aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorElliott Hughes <enh@google.com>2023-11-10 19:23:18 +0000
committerElliott Hughes <enh@google.com>2023-11-10 19:23:18 +0000
commit02cec46c7a3918f19153e4e2de707f9d7de83fc8 (patch)
treec927c514a071f3a5ec125b3f474ad4ce4d39a7fa
parenta936b27b9394502de80c116f46aff5b1a1cc3925 (diff)
downloadfonttools-02cec46c7a3918f19153e4e2de707f9d7de83fc8.tar.gz
Upgrade fonttools to 4.44.0
This project was upgraded with external_updater. Usage: tools/external_updater/updater.sh update fonttools For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md Test: TreeHugger Change-Id: I5de68c96999d1b8671c251a2555948da63de5bc6
-rw-r--r--.git-blame-ignore-revs2
-rw-r--r--.github/workflows/publish.yml34
-rw-r--r--.github/workflows/test.yml37
-rw-r--r--.github/workflows/wheels.yml120
-rw-r--r--.readthedocs.yml5
-rw-r--r--Doc/docs-requirements.txt8
-rw-r--r--Doc/source/conf.py33
-rw-r--r--Doc/source/designspaceLib/index.rst22
-rw-r--r--Doc/source/designspaceLib/python.rst4
-rw-r--r--Doc/source/designspaceLib/scripting.rst19
-rw-r--r--Doc/source/designspaceLib/xml.rst125
-rw-r--r--Doc/source/developer.rst4
-rw-r--r--Doc/source/index.rst13
-rw-r--r--Doc/source/voltLib/index.rst (renamed from Doc/source/voltLib.rst)11
-rw-r--r--Doc/source/voltLib/voltToFea.rst8
-rw-r--r--Lib/fontTools/__init__.py2
-rw-r--r--Lib/fontTools/__main__.py61
-rw-r--r--Lib/fontTools/afmLib.py709
-rw-r--r--Lib/fontTools/agl.py251
-rw-r--r--Lib/fontTools/cffLib/__init__.py6124
-rw-r--r--Lib/fontTools/cffLib/specializer.py1504
-rw-r--r--Lib/fontTools/cffLib/width.py312
-rw-r--r--Lib/fontTools/colorLib/errors.py1
-rw-r--r--Lib/fontTools/colorLib/table_builder.py4
-rw-r--r--Lib/fontTools/config/__init__.py15
-rw-r--r--Lib/fontTools/cu2qu/benchmark.py55
-rw-r--r--Lib/fontTools/cu2qu/cli.py55
-rw-r--r--Lib/fontTools/cu2qu/cu2qu.py300
-rw-r--r--Lib/fontTools/cu2qu/errors.py1
-rw-r--r--Lib/fontTools/cu2qu/ufo.py103
-rw-r--r--Lib/fontTools/designspaceLib/__init__.py1100
-rw-r--r--Lib/fontTools/designspaceLib/split.py44
-rw-r--r--Lib/fontTools/designspaceLib/statNames.py31
-rw-r--r--Lib/fontTools/encodings/MacRoman.py292
-rw-r--r--Lib/fontTools/encodings/StandardEncoding.py304
-rw-r--r--Lib/fontTools/encodings/codecs.py210
-rw-r--r--Lib/fontTools/feaLib/ast.py50
-rw-r--r--Lib/fontTools/feaLib/builder.py171
-rw-r--r--Lib/fontTools/feaLib/lexer.py8
-rw-r--r--Lib/fontTools/feaLib/lookupDebugInfo.py3
-rw-r--r--Lib/fontTools/feaLib/parser.py89
-rw-r--r--Lib/fontTools/feaLib/variableScalar.py37
-rw-r--r--Lib/fontTools/fontBuilder.py77
-rw-r--r--Lib/fontTools/help.py2
-rw-r--r--Lib/fontTools/merge/__init__.py363
-rw-r--r--Lib/fontTools/merge/__main__.py2
-rw-r--r--Lib/fontTools/merge/base.py119
-rw-r--r--Lib/fontTools/merge/cmap.py214
-rw-r--r--Lib/fontTools/merge/layout.py776
-rw-r--r--Lib/fontTools/merge/options.py141
-rw-r--r--Lib/fontTools/merge/tables.py558
-rw-r--r--Lib/fontTools/merge/unicode.py137
-rw-r--r--Lib/fontTools/merge/util.py166
-rw-r--r--Lib/fontTools/misc/arrayTools.py65
-rw-r--r--Lib/fontTools/misc/bezierTools.py269
-rw-r--r--Lib/fontTools/misc/classifyTools.py318
-rw-r--r--Lib/fontTools/misc/cliTools.py7
-rw-r--r--Lib/fontTools/misc/cython.py2
-rw-r--r--Lib/fontTools/misc/dictTools.py56
-rw-r--r--Lib/fontTools/misc/eexec.py166
-rw-r--r--Lib/fontTools/misc/encodingTools.py123
-rw-r--r--Lib/fontTools/misc/etree.py7
-rw-r--r--Lib/fontTools/misc/filenames.py412
-rw-r--r--Lib/fontTools/misc/fixedTools.py320
-rw-r--r--Lib/fontTools/misc/intTools.py4
-rw-r--r--Lib/fontTools/misc/loggingTools.py989
-rw-r--r--Lib/fontTools/misc/macCreatorType.py86
-rw-r--r--Lib/fontTools/misc/macRes.py410
-rw-r--r--Lib/fontTools/misc/plistlib/__init__.py10
-rw-r--r--Lib/fontTools/misc/psCharStrings.py2576
-rw-r--r--Lib/fontTools/misc/psLib.py687
-rw-r--r--Lib/fontTools/misc/psOperators.py1001
-rw-r--r--Lib/fontTools/misc/roundTools.py57
-rw-r--r--Lib/fontTools/misc/sstruct.py228
-rw-r--r--Lib/fontTools/misc/symfont.py275
-rw-r--r--Lib/fontTools/misc/testTools.py21
-rw-r--r--Lib/fontTools/misc/textTools.py135
-rw-r--r--Lib/fontTools/misc/timeTools.py102
-rw-r--r--Lib/fontTools/misc/transform.py739
-rw-r--r--Lib/fontTools/misc/vector.py5
-rw-r--r--Lib/fontTools/misc/visitor.py2
-rw-r--r--Lib/fontTools/misc/xmlReader.py317
-rw-r--r--Lib/fontTools/misc/xmlWriter.py358
-rw-r--r--Lib/fontTools/mtiLib/__init__.py2356
-rw-r--r--Lib/fontTools/mtiLib/__main__.py4
-rw-r--r--Lib/fontTools/otlLib/builder.py57
-rw-r--r--Lib/fontTools/otlLib/optimize/__main__.py4
-rw-r--r--Lib/fontTools/otlLib/optimize/gpos.py3
-rw-r--r--Lib/fontTools/pens/areaPen.py91
-rw-r--r--Lib/fontTools/pens/basePen.py683
-rw-r--r--Lib/fontTools/pens/boundsPen.py170
-rw-r--r--Lib/fontTools/pens/cocoaPen.py28
-rw-r--r--Lib/fontTools/pens/cu2quPen.py307
-rw-r--r--Lib/fontTools/pens/explicitClosingLinePen.py101
-rw-r--r--Lib/fontTools/pens/filterPen.py12
-rw-r--r--Lib/fontTools/pens/hashPointPen.py4
-rw-r--r--Lib/fontTools/pens/momentsPen.py1339
-rw-r--r--Lib/fontTools/pens/perimeterPen.py101
-rw-r--r--Lib/fontTools/pens/pointInsidePen.py356
-rw-r--r--Lib/fontTools/pens/pointPen.py942
-rw-r--r--Lib/fontTools/pens/qtPen.py32
-rw-r--r--Lib/fontTools/pens/qu2cuPen.py105
-rw-r--r--Lib/fontTools/pens/quartzPen.py67
-rw-r--r--Lib/fontTools/pens/recordingPen.py262
-rw-r--r--Lib/fontTools/pens/reportLabPen.py135
-rw-r--r--Lib/fontTools/pens/reverseContourPen.py47
-rw-r--r--Lib/fontTools/pens/statisticsPen.py275
-rw-r--r--Lib/fontTools/pens/svgPathPen.py55
-rw-r--r--Lib/fontTools/pens/t2CharStringPen.py25
-rw-r--r--Lib/fontTools/pens/teePen.py78
-rw-r--r--Lib/fontTools/pens/transformPen.py191
-rw-r--r--Lib/fontTools/pens/ttGlyphPen.py90
-rw-r--r--Lib/fontTools/pens/wxPen.py32
-rw-r--r--Lib/fontTools/qu2cu/__init__.py15
-rw-r--r--Lib/fontTools/qu2cu/__main__.py7
-rw-r--r--Lib/fontTools/qu2cu/benchmark.py57
-rw-r--r--Lib/fontTools/qu2cu/cli.py125
-rw-r--r--Lib/fontTools/qu2cu/qu2cu.py408
-rw-r--r--Lib/fontTools/subset/__init__.py5326
-rw-r--r--Lib/fontTools/subset/__main__.py2
-rw-r--r--Lib/fontTools/subset/cff.py952
-rw-r--r--Lib/fontTools/subset/svg.py6
-rw-r--r--Lib/fontTools/svgLib/path/__init__.py2
-rw-r--r--Lib/fontTools/svgLib/path/arc.py1
-rw-r--r--Lib/fontTools/svgLib/path/parser.py55
-rw-r--r--Lib/fontTools/svgLib/path/shapes.py102
-rw-r--r--Lib/fontTools/t1Lib/__init__.py982
-rw-r--r--Lib/fontTools/ttLib/__init__.py15
-rw-r--r--Lib/fontTools/ttLib/__main__.py108
-rw-r--r--Lib/fontTools/ttLib/macUtils.py80
-rw-r--r--Lib/fontTools/ttLib/scaleUpem.py86
-rw-r--r--Lib/fontTools/ttLib/sfnt.py1007
-rw-r--r--Lib/fontTools/ttLib/standardGlyphOrder.py520
-rw-r--r--Lib/fontTools/ttLib/tables/B_A_S_E_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py49
-rw-r--r--Lib/fontTools/ttLib/tables/C_B_D_T_.py117
-rw-r--r--Lib/fontTools/ttLib/tables/C_B_L_C_.py4
-rw-r--r--Lib/fontTools/ttLib/tables/C_F_F_.py80
-rw-r--r--Lib/fontTools/ttLib/tables/C_F_F__2.py1
-rw-r--r--Lib/fontTools/ttLib/tables/C_O_L_R_.py294
-rw-r--r--Lib/fontTools/ttLib/tables/C_P_A_L_.py492
-rw-r--r--Lib/fontTools/ttLib/tables/D_S_I_G_.py176
-rw-r--r--Lib/fontTools/ttLib/tables/D__e_b_g.py2
-rw-r--r--Lib/fontTools/ttLib/tables/DefaultTable.py89
-rw-r--r--Lib/fontTools/ttLib/tables/E_B_D_T_.py1375
-rw-r--r--Lib/fontTools/ttLib/tables/E_B_L_C_.py1139
-rw-r--r--Lib/fontTools/ttLib/tables/F_F_T_M_.py48
-rw-r--r--Lib/fontTools/ttLib/tables/F__e_a_t.py97
-rw-r--r--Lib/fontTools/ttLib/tables/G_D_E_F_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/G_M_A_P_.py191
-rw-r--r--Lib/fontTools/ttLib/tables/G_P_K_G_.py195
-rw-r--r--Lib/fontTools/ttLib/tables/G_P_O_S_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/G_S_U_B_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/G__l_a_t.py128
-rw-r--r--Lib/fontTools/ttLib/tables/G__l_o_c.py51
-rw-r--r--Lib/fontTools/ttLib/tables/H_V_A_R_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/J_S_T_F_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/L_T_S_H_.py70
-rw-r--r--Lib/fontTools/ttLib/tables/M_A_T_H_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/M_E_T_A_.py514
-rw-r--r--Lib/fontTools/ttLib/tables/M_V_A_R_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/O_S_2f_2.py933
-rw-r--r--Lib/fontTools/ttLib/tables/S_I_N_G_.py127
-rw-r--r--Lib/fontTools/ttLib/tables/S_V_G_.py290
-rw-r--r--Lib/fontTools/ttLib/tables/S__i_l_f.py672
-rw-r--r--Lib/fontTools/ttLib/tables/S__i_l_l.py57
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_B_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_D_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_J_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_P_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_S_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I_V_.py30
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I__0.py80
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I__1.py279
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I__2.py4
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I__3.py11
-rw-r--r--Lib/fontTools/ttLib/tables/T_S_I__5.py59
-rw-r--r--Lib/fontTools/ttLib/tables/T_T_F_A_.py3
-rw-r--r--Lib/fontTools/ttLib/tables/TupleVariation.py1500
-rw-r--r--Lib/fontTools/ttLib/tables/V_D_M_X_.py385
-rw-r--r--Lib/fontTools/ttLib/tables/V_O_R_G_.py280
-rw-r--r--Lib/fontTools/ttLib/tables/V_V_A_R_.py2
-rw-r--r--Lib/fontTools/ttLib/tables/__init__.py181
-rw-r--r--Lib/fontTools/ttLib/tables/_a_n_k_r.py2
-rw-r--r--Lib/fontTools/ttLib/tables/_a_v_a_r.py104
-rw-r--r--Lib/fontTools/ttLib/tables/_c_i_d_g.py20
-rw-r--r--Lib/fontTools/ttLib/tables/_c_m_a_p.py2825
-rw-r--r--Lib/fontTools/ttLib/tables/_c_v_a_r.py30
-rw-r--r--Lib/fontTools/ttLib/tables/_c_v_t.py80
-rw-r--r--Lib/fontTools/ttLib/tables/_f_e_a_t.py15
-rw-r--r--Lib/fontTools/ttLib/tables/_f_p_g_m.py83
-rw-r--r--Lib/fontTools/ttLib/tables/_f_v_a_r.py83
-rw-r--r--Lib/fontTools/ttLib/tables/_g_a_s_p.py76
-rw-r--r--Lib/fontTools/ttLib/tables/_g_l_y_f.py4350
-rw-r--r--Lib/fontTools/ttLib/tables/_g_v_a_r.py450
-rw-r--r--Lib/fontTools/ttLib/tables/_h_d_m_x.py185
-rw-r--r--Lib/fontTools/ttLib/tables/_h_e_a_d.py166
-rw-r--r--Lib/fontTools/ttLib/tables/_h_h_e_a.py198
-rw-r--r--Lib/fontTools/ttLib/tables/_h_m_t_x.py241
-rw-r--r--Lib/fontTools/ttLib/tables/_k_e_r_n.py520
-rw-r--r--Lib/fontTools/ttLib/tables/_l_c_a_r.py2
-rw-r--r--Lib/fontTools/ttLib/tables/_l_o_c_a.py104
-rw-r--r--Lib/fontTools/ttLib/tables/_l_t_a_g.py113
-rw-r--r--Lib/fontTools/ttLib/tables/_m_a_x_p.py207
-rw-r--r--Lib/fontTools/ttLib/tables/_m_e_t_a.py29
-rw-r--r--Lib/fontTools/ttLib/tables/_n_a_m_e.py1913
-rw-r--r--Lib/fontTools/ttLib/tables/_p_o_s_t.py490
-rw-r--r--Lib/fontTools/ttLib/tables/_p_r_e_p.py3
-rw-r--r--Lib/fontTools/ttLib/tables/_s_b_i_x.py164
-rw-r--r--Lib/fontTools/ttLib/tables/_t_r_a_k.py515
-rw-r--r--Lib/fontTools/ttLib/tables/_v_h_e_a.py185
-rw-r--r--Lib/fontTools/ttLib/tables/_v_m_t_x.py10
-rw-r--r--Lib/fontTools/ttLib/tables/asciiTable.py29
-rw-r--r--Lib/fontTools/ttLib/tables/grUtils.py39
-rw-r--r--Lib/fontTools/ttLib/tables/otBase.py2628
-rw-r--r--Lib/fontTools/ttLib/tables/otConverters.py3339
-rw-r--r--[-rwxr-xr-x]Lib/fontTools/ttLib/tables/otData.py8189
-rw-r--r--Lib/fontTools/ttLib/tables/otTables.py3964
-rw-r--r--Lib/fontTools/ttLib/tables/otTraverse.py32
-rw-r--r--Lib/fontTools/ttLib/tables/sbixGlyph.py218
-rw-r--r--Lib/fontTools/ttLib/tables/sbixStrike.py277
-rw-r--r--Lib/fontTools/ttLib/tables/ttProgram.py1011
-rw-r--r--Lib/fontTools/ttLib/ttCollection.py226
-rw-r--r--Lib/fontTools/ttLib/ttFont.py2002
-rw-r--r--Lib/fontTools/ttLib/ttGlyphSet.py519
-rw-r--r--Lib/fontTools/ttLib/woff2.py2947
-rw-r--r--Lib/fontTools/ttx.py602
-rwxr-xr-xLib/fontTools/ufoLib/__init__.py4301
-rw-r--r--Lib/fontTools/ufoLib/converters.py14
-rw-r--r--Lib/fontTools/ufoLib/errors.py8
-rw-r--r--Lib/fontTools/ufoLib/filenames.py360
-rwxr-xr-xLib/fontTools/ufoLib/glifLib.py3339
-rw-r--r--Lib/fontTools/ufoLib/kerning.py172
-rw-r--r--Lib/fontTools/ufoLib/validators.py1890
-rw-r--r--Lib/fontTools/unicode.py71
-rw-r--r--Lib/fontTools/unicodedata/Blocks.py784
-rw-r--r--Lib/fontTools/unicodedata/OTTags.py5
-rw-r--r--Lib/fontTools/unicodedata/ScriptExtensions.py531
-rw-r--r--Lib/fontTools/unicodedata/Scripts.py3742
-rw-r--r--Lib/fontTools/unicodedata/__init__.py154
-rw-r--r--Lib/fontTools/varLib/__init__.py2346
-rw-r--r--Lib/fontTools/varLib/__main__.py4
-rw-r--r--Lib/fontTools/varLib/avar.py70
-rw-r--r--Lib/fontTools/varLib/avarPlanner.py1004
-rw-r--r--Lib/fontTools/varLib/builder.py214
-rw-r--r--Lib/fontTools/varLib/cff.py1239
-rw-r--r--Lib/fontTools/varLib/featureVars.py204
-rw-r--r--Lib/fontTools/varLib/instancer/__init__.py891
-rw-r--r--Lib/fontTools/varLib/instancer/featureVars.py190
-rw-r--r--Lib/fontTools/varLib/instancer/names.py35
-rw-r--r--Lib/fontTools/varLib/instancer/solver.py307
-rw-r--r--Lib/fontTools/varLib/interpolatable.py447
-rw-r--r--Lib/fontTools/varLib/interpolate_layout.py173
-rw-r--r--Lib/fontTools/varLib/iup.py860
-rw-r--r--Lib/fontTools/varLib/merger.py2762
-rw-r--r--Lib/fontTools/varLib/models.py169
-rw-r--r--Lib/fontTools/varLib/mutator.py881
-rw-r--r--Lib/fontTools/varLib/mvar.py76
-rw-r--r--Lib/fontTools/varLib/plot.py331
-rw-r--r--Lib/fontTools/varLib/varStore.py1169
-rw-r--r--Lib/fontTools/voltLib/ast.py189
-rw-r--r--Lib/fontTools/voltLib/error.py2
-rw-r--r--Lib/fontTools/voltLib/lexer.py22
-rw-r--r--Lib/fontTools/voltLib/parser.py147
-rw-r--r--Lib/fontTools/voltLib/voltToFea.py726
-rw-r--r--METADATA14
-rwxr-xr-xMetaTools/buildTableList.py42
-rwxr-xr-xMetaTools/buildUCD.py70
-rwxr-xr-xMetaTools/roundTrip.py137
-rw-r--r--NEWS.rst353
-rw-r--r--README.rst48
-rw-r--r--SECURITY.md18
-rw-r--r--Snippets/checksum.py100
-rwxr-xr-xSnippets/cmap-format.py24
-rw-r--r--Snippets/dump_woff_metadata.py5
-rw-r--r--Snippets/fix-dflt-langsys.py26
-rwxr-xr-xSnippets/interpolate.py38
-rwxr-xr-xSnippets/layout-features.py78
-rw-r--r--Snippets/merge_woff_metadata.py8
-rwxr-xr-xSnippets/otf2ttf.py45
-rw-r--r--Snippets/print-json.py1
-rwxr-xr-xSnippets/rename-fonts.py8
-rw-r--r--Snippets/statShape.py85
-rwxr-xr-xSnippets/subset-fpgm.py51
-rwxr-xr-xSnippets/svg2glif.py94
-rw-r--r--Tests/afmLib/afmLib_test.py94
-rw-r--r--Tests/agl_test.py4
-rw-r--r--Tests/cffLib/cffLib_test.py35
-rw-r--r--Tests/cffLib/data/TestCFF2Widths.ttx1
-rw-r--r--Tests/cffLib/data/TestSparseCFF2VF.ttx1
-rw-r--r--Tests/cffLib/specializer_test.py783
-rw-r--r--Tests/cu2qu/cli_test.py26
-rw-r--r--Tests/cu2qu/cu2qu_test.py94
-rw-r--r--Tests/cu2qu/ufo_test.py115
-rw-r--r--Tests/designspaceLib/data/test_avar2.designspace117
-rw-r--r--Tests/designspaceLib/data/test_v5.designspace7
-rw-r--r--Tests/designspaceLib/designspace_test.py401
-rw-r--r--Tests/designspaceLib/designspace_v5_test.py9
-rw-r--r--Tests/designspaceLib/split_test.py89
-rw-r--r--Tests/designspaceLib/statNames_test.py22
-rw-r--r--Tests/encodings/codecs_test.py38
-rw-r--r--Tests/feaLib/ast_test.py1
-rw-r--r--Tests/feaLib/builder_test.py170
-rw-r--r--Tests/feaLib/data/GPOS_1_zero.ttx2
-rw-r--r--Tests/feaLib/data/GSUB_2.fea21
-rw-r--r--Tests/feaLib/data/GSUB_2.ttx121
-rw-r--r--Tests/feaLib/data/GSUB_5_formats.fea12
-rw-r--r--Tests/feaLib/data/PairPosSubtable.ttx16
-rw-r--r--Tests/feaLib/data/STAT_test.ttx6
-rw-r--r--Tests/feaLib/data/bug2949.fea20
-rw-r--r--Tests/feaLib/data/bug2949.ttx133
-rw-r--r--Tests/feaLib/data/bug509.fea2
-rw-r--r--Tests/feaLib/data/bug512.ttx3
-rw-r--r--Tests/feaLib/data/bug633.ttx12
-rw-r--r--Tests/feaLib/data/name.ttx18
-rw-r--r--Tests/feaLib/data/spec5f_ii_3.ttx22
-rw-r--r--Tests/feaLib/data/spec8b.ttx8
-rw-r--r--Tests/feaLib/data/spec8c.ttx14
-rw-r--r--Tests/feaLib/data/spec8d.ttx28
-rw-r--r--Tests/feaLib/data/spec9e.ttx4
-rw-r--r--Tests/feaLib/data/variable_bug2772.fea4
-rw-r--r--Tests/feaLib/data/variable_bug2772.ttx103
-rw-r--r--Tests/feaLib/data/variable_scalar_valuerecord.fea1
-rw-r--r--Tests/feaLib/data/variable_scalar_valuerecord.ttx9
-rw-r--r--Tests/feaLib/error_test.py1
-rw-r--r--Tests/feaLib/lexer_test.py171
-rw-r--r--Tests/feaLib/parser_test.py68
-rw-r--r--Tests/fontBuilder/data/test_var.otf.ttx10
-rw-r--r--Tests/fontBuilder/fontBuilder_test.py202
-rw-r--r--Tests/merge/data/CFFFont_expected.ttx558
-rw-r--r--Tests/merge/merge_test.py393
-rw-r--r--Tests/misc/arrayTools_test.py38
-rw-r--r--Tests/misc/bezierTools_test.py179
-rw-r--r--Tests/misc/classifyTools_test.py37
-rw-r--r--Tests/misc/eexec_test.py4
-rw-r--r--Tests/misc/encodingTools_test.py43
-rw-r--r--Tests/misc/filenames_test.py245
-rw-r--r--Tests/misc/fixedTools_test.py60
-rw-r--r--Tests/misc/loggingTools_test.py73
-rw-r--r--Tests/misc/macRes_test.py132
-rw-r--r--Tests/misc/plistlib_test.py67
-rw-r--r--Tests/misc/psCharStrings_test.py123
-rw-r--r--Tests/misc/py23_test.py722
-rw-r--r--Tests/misc/testTools_test.py109
-rw-r--r--Tests/misc/textTools_test.py10
-rw-r--r--Tests/misc/timeTools_test.py12
-rw-r--r--Tests/misc/transform_test.py107
-rw-r--r--Tests/misc/treeTools_test.py2
-rw-r--r--Tests/misc/visitor_test.py1
-rw-r--r--Tests/misc/xmlReader_test.py339
-rw-r--r--Tests/misc/xmlWriter_test.py260
-rw-r--r--Tests/mtiLib/data/featurename-backward.ttx.GSUB1
-rw-r--r--Tests/mtiLib/data/featurename-forward.ttx.GSUB1
-rw-r--r--Tests/mtiLib/data/lookupnames-backward.ttx.GSUB2
-rw-r--r--Tests/mtiLib/data/lookupnames-forward.ttx.GSUB2
-rw-r--r--Tests/mtiLib/data/mixed-toplevels.ttx.GSUB2
-rw-r--r--Tests/mtiLib/data/mti/chained-glyph.ttx.GPOS2
-rw-r--r--Tests/mtiLib/data/mti/chained-glyph.ttx.GSUB2
-rw-r--r--Tests/mtiLib/data/mti/chainedclass.ttx.GSUB2
-rw-r--r--Tests/mtiLib/data/mti/chainedcoverage.ttx.GSUB2
-rw-r--r--Tests/mtiLib/data/mti/gposcursive.ttx.GPOS1
-rw-r--r--Tests/mtiLib/data/mti/gposkernset.ttx.GPOS1
-rw-r--r--Tests/mtiLib/data/mti/gposmarktobase.ttx.GPOS1
-rw-r--r--Tests/mtiLib/data/mti/gpospairclass.ttx.GPOS1
-rw-r--r--Tests/mtiLib/data/mti/gpospairglyph.ttx.GPOS1
-rw-r--r--Tests/mtiLib/data/mti/gpossingle.ttx.GPOS1
-rw-r--r--Tests/mtiLib/data/mti/gsubalternate.ttx.GSUB1
-rw-r--r--Tests/mtiLib/data/mti/gsubligature.ttx.GSUB1
-rw-r--r--Tests/mtiLib/data/mti/gsubmultiple.ttx.GSUB1
-rw-r--r--Tests/mtiLib/data/mti/gsubreversechanined.ttx.GSUB1
-rw-r--r--Tests/mtiLib/data/mti/gsubsingle.ttx.GSUB1
-rw-r--r--Tests/mtiLib/data/mti/mark-to-ligature.ttx.GPOS1
-rw-r--r--Tests/mtiLib/mti_test.py565
-rw-r--r--Tests/otlLib/builder_test.py2
-rw-r--r--Tests/otlLib/maxContextCalc_test.py27
-rw-r--r--Tests/otlLib/mock_builder_test.py8
-rw-r--r--Tests/pens/__init__.py11
-rw-r--r--Tests/pens/areaPen_test.py195
-rw-r--r--Tests/pens/basePen_test.py78
-rw-r--r--Tests/pens/boundsPen_test.py3
-rw-r--r--Tests/pens/cocoaPen_test.py13
-rw-r--r--Tests/pens/cu2quPen_test.py386
-rw-r--r--Tests/pens/perimeterPen_test.py195
-rw-r--r--Tests/pens/pointInsidePen_test.py190
-rw-r--r--Tests/pens/pointPen_test.py253
-rw-r--r--Tests/pens/qu2cuPen_test.py253
-rw-r--r--Tests/pens/quartzPen_test.py15
-rw-r--r--Tests/pens/reverseContourPen_test.py589
-rw-r--r--Tests/pens/t2CharStringPen_test.py225
-rw-r--r--Tests/pens/ttGlyphPen_test.py256
-rw-r--r--Tests/pens/utils.py62
-rw-r--r--Tests/qu2cu/data/NotoSansArabic-Regular.quadratic.subset.ttfbin0 -> 2612 bytes
-rw-r--r--Tests/qu2cu/qu2cu_cli_test.py62
-rw-r--r--Tests/qu2cu/qu2cu_test.py104
-rw-r--r--Tests/subset/data/NotoSansCJKjp-Regular.subset.ttx417
-rw-r--r--Tests/subset/data/TestGVAR.ttx1
-rw-r--r--Tests/subset/data/TestHVVAR.ttx1
-rw-r--r--Tests/subset/data/expect_HVVAR.ttx1
-rw-r--r--Tests/subset/data/expect_HVVAR_retain_gids.ttx1
-rw-r--r--Tests/subset/data/expect_keep_gvar.ttx1
-rw-r--r--Tests/subset/data/expect_keep_gvar_notdef_outline.ttx1
-rw-r--r--Tests/subset/subset_test.py829
-rw-r--r--Tests/svgLib/path/parser_test.py170
-rw-r--r--Tests/svgLib/path/path_test.py24
-rw-r--r--Tests/svgLib/path/shapes_test.py76
-rw-r--r--Tests/t1Lib/t1Lib_test.py335
-rw-r--r--Tests/ttLib/data/I-512upem.ttx3
-rw-r--r--Tests/ttLib/data/I.otfbin0 -> 3716 bytes
-rw-r--r--Tests/ttLib/data/TestOTF-Regular.otx4
-rw-r--r--Tests/ttLib/data/TestTTF-Regular.ttx4
-rw-r--r--Tests/ttLib/data/TestTTF_normalizeLocation.ttx28
-rw-r--r--Tests/ttLib/data/bogus_post_format_1.ttfbin0 -> 3840 bytes
-rw-r--r--Tests/ttLib/data/dot-cubic.ttfbin0 -> 476 bytes
-rw-r--r--Tests/ttLib/data/issue2824.ttfbin0 -> 1216 bytes
-rw-r--r--Tests/ttLib/data/varc-6868.ttfbin0 -> 10848 bytes
-rw-r--r--Tests/ttLib/data/varc-ac00-ac01-500upem.ttx2055
-rw-r--r--Tests/ttLib/data/varc-ac00-ac01.ttfbin0 -> 4808 bytes
-rw-r--r--Tests/ttLib/main_test.py105
-rw-r--r--Tests/ttLib/scaleUpem_test.py22
-rw-r--r--Tests/ttLib/sfnt_test.py39
-rw-r--r--Tests/ttLib/tables/C_F_F__2_test.py22
-rw-r--r--Tests/ttLib/tables/C_F_F_test.py18
-rw-r--r--Tests/ttLib/tables/C_O_L_R_test.py124
-rw-r--r--Tests/ttLib/tables/C_P_A_L_test.py282
-rw-r--r--Tests/ttLib/tables/M_V_A_R_test.py149
-rw-r--r--Tests/ttLib/tables/O_S_2f_2_test.py105
-rw-r--r--Tests/ttLib/tables/S_T_A_T_test.py236
-rw-r--r--Tests/ttLib/tables/T_S_I__0_test.py44
-rw-r--r--Tests/ttLib/tables/T_S_I__1_test.py111
-rw-r--r--Tests/ttLib/tables/TupleVariation_test.py1843
-rw-r--r--Tests/ttLib/tables/_a_n_k_r_test.py142
-rw-r--r--Tests/ttLib/tables/_a_v_a_r_test.py133
-rw-r--r--Tests/ttLib/tables/_b_s_l_n_test.py135
-rw-r--r--Tests/ttLib/tables/_c_i_d_g_test.py70
-rw-r--r--Tests/ttLib/tables/_c_m_a_p_test.py322
-rw-r--r--Tests/ttLib/tables/_c_v_a_r_test.py63
-rw-r--r--Tests/ttLib/tables/_f_v_a_r_test.py153
-rw-r--r--Tests/ttLib/tables/_g_c_i_d_test.py68
-rw-r--r--Tests/ttLib/tables/_g_l_y_f_test.py632
-rw-r--r--Tests/ttLib/tables/_g_v_a_r_test.py301
-rw-r--r--Tests/ttLib/tables/_h_h_e_a_test.py140
-rw-r--r--Tests/ttLib/tables/_h_m_t_x_test.py109
-rw-r--r--Tests/ttLib/tables/_k_e_r_n_test.py264
-rw-r--r--Tests/ttLib/tables/_l_c_a_r_test.py88
-rw-r--r--Tests/ttLib/tables/_l_t_a_g_test.py111
-rw-r--r--Tests/ttLib/tables/_m_e_t_a_test.py64
-rw-r--r--Tests/ttLib/tables/_m_o_r_t_test.py99
-rw-r--r--Tests/ttLib/tables/_m_o_r_x_test.py873
-rw-r--r--Tests/ttLib/tables/_n_a_m_e_test.py1150
-rw-r--r--Tests/ttLib/tables/_o_p_b_d_test.py184
-rw-r--r--Tests/ttLib/tables/_p_r_o_p_test.py54
-rw-r--r--Tests/ttLib/tables/_t_r_a_k_test.py602
-rw-r--r--Tests/ttLib/tables/_v_h_e_a_test.py182
-rw-r--r--Tests/ttLib/tables/_v_m_t_x_test.py2
-rw-r--r--Tests/ttLib/tables/data/COLRv1-clip-boxes-cff.ttx1213
-rw-r--r--Tests/ttLib/tables/data/COLRv1-clip-boxes-glyf.ttx1414
-rw-r--r--Tests/ttLib/tables/data/COLRv1-clip-boxes-q1-expected.ttx919
-rw-r--r--Tests/ttLib/tables/data/COLRv1-clip-boxes-q10-expected.ttx911
-rw-r--r--Tests/ttLib/tables/data/COLRv1-clip-boxes-q100-expected.ttx863
-rw-r--r--Tests/ttLib/tables/data/NotoSans-VF-cubic.subset.ttfbin0 -> 4248 bytes
-rw-r--r--Tests/ttLib/tables/data/_g_l_y_f_instructions.ttx82
-rw-r--r--Tests/ttLib/tables/otBase_test.py4
-rw-r--r--Tests/ttLib/tables/otConverters_test.py382
-rw-r--r--Tests/ttLib/tables/otTables_test.py456
-rw-r--r--Tests/ttLib/tables/tables_test.py449
-rw-r--r--Tests/ttLib/tables/ttProgram_test.py136
-rw-r--r--Tests/ttLib/ttFont_test.py114
-rw-r--r--Tests/ttLib/ttGlyphSet_test.py658
-rw-r--r--Tests/ttLib/ttVisitor_test.py2
-rw-r--r--Tests/ttLib/woff2_test.py2683
-rw-r--r--Tests/ttx/data/TestOTF.ttx4
-rw-r--r--Tests/ttx/data/TestTTF.ttx4
-rw-r--r--Tests/ttx/data/roundtrip_DSIG_split_at_XML_parse_buffer_size.ttx224
-rw-r--r--Tests/ttx/ttx_test.py67
-rw-r--r--Tests/ufoLib/GLIF1_test.py908
-rw-r--r--Tests/ufoLib/GLIF2_test.py1370
-rw-r--r--Tests/ufoLib/UFO1_test.py254
-rw-r--r--Tests/ufoLib/UFO2_test.py2946
-rw-r--r--Tests/ufoLib/UFO3_test.py9680
-rw-r--r--Tests/ufoLib/UFOConversion_test.py633
-rw-r--r--Tests/ufoLib/UFOZ_test.py8
-rw-r--r--Tests/ufoLib/__init__.py3
-rw-r--r--Tests/ufoLib/filenames_test.py27
-rw-r--r--Tests/ufoLib/glifLib_test.py527
-rwxr-xr-xTests/ufoLib/testSupport.py1226
-rw-r--r--Tests/unicodedata_test.py316
-rw-r--r--Tests/varLib/builder_test.py191
-rw-r--r--Tests/varLib/data/BuildAvar2.designspace55
-rw-r--r--Tests/varLib/data/DropOnCurves.designspace20
-rw-r--r--Tests/varLib/data/InterpolateLayout.glyphs2402
-rw-r--r--Tests/varLib/data/SparseCFF2.designspace23
-rw-r--r--Tests/varLib/data/SparseMasters.glyphs486
-rw-r--r--Tests/varLib/data/SparseMasters_ufo.designspace23
-rw-r--r--Tests/varLib/data/TestNoOverwriteSTAT.designspace36
-rw-r--r--Tests/varLib/data/master_no_overwrite_stat/Test-CondensedBlack.ttx243
-rw-r--r--Tests/varLib/data/master_no_overwrite_stat/Test-CondensedThin.ttx373
-rw-r--r--Tests/varLib/data/master_no_overwrite_stat/Test-ExtendedBlack.ttx243
-rw-r--r--Tests/varLib/data/master_no_overwrite_stat/Test-ExtendedThin.ttx243
-rw-r--r--Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Bold.ttx302
-rw-r--r--Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Medium.ttx100
-rw-r--r--Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Regular.ttx302
-rw-r--r--Tests/varLib/data/master_ttx_drop_oncurves/TestFamily-Master1.ttx312
-rw-r--r--Tests/varLib/data/master_ttx_drop_oncurves/TestFamily-Master2.ttx313
-rw-r--r--Tests/varLib/data/master_ttx_varfont_otf/TestCFF2VF.ttx1
-rw-r--r--Tests/varLib/data/master_ttx_varfont_ttf/SparseMasters-VF.ttx501
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/fontinfo.plist20
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/_notdef.glif18
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/a.glif29
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/contents.plist18
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/dotabovecomb.glif12
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/e.glif22
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/edotabove.glif9
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/s.glif21
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/layercontents.plist10
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/lib.plist15
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/metainfo.plist10
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/fontinfo.plist20
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/_notdef.glif18
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/contents.plist10
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/e.glif21
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/layercontents.plist10
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/lib.plist11
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/metainfo.plist10
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/fontinfo.plist20
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/_notdef.glif18
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/a.glif29
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/contents.plist18
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/dotabovecomb.glif12
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/e.glif22
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/edotabove.glif9
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/s.glif21
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/layercontents.plist10
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/lib.plist15
-rw-r--r--Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/metainfo.plist10
-rw-r--r--Tests/varLib/data/test_results/Build.ttx99
-rw-r--r--Tests/varLib/data/test_results/BuildAvar2.ttx41
-rw-r--r--Tests/varLib/data/test_results/BuildAvarEmptyAxis.ttx1
-rw-r--r--Tests/varLib/data/test_results/BuildAvarIdentityMaps.ttx1
-rw-r--r--Tests/varLib/data/test_results/BuildAvarSingleAxis.ttx1
-rw-r--r--Tests/varLib/data/test_results/BuildMain.ttx99
-rw-r--r--Tests/varLib/data/test_results/DropOnCurves.ttx498
-rw-r--r--Tests/varLib/data/test_results/FeatureVars_rclt.ttx2
-rw-r--r--Tests/varLib/data/test_results/InterpolateLayoutGPOS_7_diff.ttx116
-rw-r--r--Tests/varLib/data/test_results/InterpolateLayoutGPOS_7_same.ttx116
-rw-r--r--Tests/varLib/data/test_results/SparseCFF2-VF.ttx157
-rw-r--r--Tests/varLib/data/test_results/SparseMasters.ttx25
-rw-r--r--Tests/varLib/data/test_results/TestSparseCFF2VF.ttx808
-rw-r--r--Tests/varLib/data/test_results/TestVVAR.ttx11
-rw-r--r--Tests/varLib/featureVars_test.py93
-rw-r--r--Tests/varLib/instancer/data/PartialInstancerTest-VF.ttx17
-rw-r--r--Tests/varLib/instancer/data/PartialInstancerTest2-VF.ttx1
-rw-r--r--Tests/varLib/instancer/data/STATInstancerTest.ttx1
-rw-r--r--Tests/varLib/instancer/data/SinglePos.ttx1
-rw-r--r--Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-100,100.ttx2
-rw-r--r--Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-100,62.5.ttx2
-rw-r--r--Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-400,100.ttx2
-rw-r--r--Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-400,62.5.ttx2
-rw-r--r--Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-900,100.ttx2
-rw-r--r--Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-900,62.5.ttx2
-rw-r--r--Tests/varLib/instancer/instancer_test.py364
-rw-r--r--Tests/varLib/instancer/names_test.py37
-rw-r--r--Tests/varLib/instancer/solver_test.py300
-rw-r--r--Tests/varLib/interpolatable_test.py192
-rw-r--r--Tests/varLib/interpolate_layout_test.py655
-rw-r--r--Tests/varLib/iup_test.py114
-rw-r--r--Tests/varLib/merger_test.py100
-rw-r--r--Tests/varLib/models_test.py123
-rw-r--r--Tests/varLib/mutator_test.py88
-rw-r--r--Tests/varLib/stat_test.py24
-rw-r--r--Tests/varLib/varLib_test.py343
-rw-r--r--Tests/varLib/varStore_test.py206
-rw-r--r--Tests/voltLib/data/Empty.ttfbin0 -> 1432 bytes
-rw-r--r--Tests/voltLib/data/NamdhinggoSIL1006.fea506
-rw-r--r--Tests/voltLib/data/NamdhinggoSIL1006.vtp1
-rw-r--r--Tests/voltLib/data/Nutso.fea328
-rw-r--r--Tests/voltLib/data/Nutso.ttfbin0 -> 29456 bytes
-rw-r--r--Tests/voltLib/data/Nutso.vtp1
-rw-r--r--Tests/voltLib/lexer_test.py15
-rw-r--r--Tests/voltLib/parser_test.py1219
-rw-r--r--Tests/voltLib/volttofea_test.py1253
-rw-r--r--dev-requirements.txt3
-rw-r--r--pyproject.toml2
-rw-r--r--requirements.txt25
-rw-r--r--setup.cfg9
-rwxr-xr-xsetup.py794
-rw-r--r--tox.ini20
586 files changed, 116020 insertions, 73652 deletions
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 00000000..c0c6efac
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,2 @@
+# First blackening of code
+d584daa8fdc71030f92ee665472d6c7cddd49283
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
deleted file mode 100644
index ea5ebc9f..00000000
--- a/.github/workflows/publish.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-# This workflows will upload a Python Package using Twine when a tag is created
-# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
-
-name: Upload Python Package
-
-on:
- push:
- # Sequence of patterns matched against refs/tags
- tags:
- - '*.*.*' # e.g. 1.0.0 or 20.15.10
-
-permissions:
- contents: read
-
-jobs:
- deploy:
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v3
- - name: Set up Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.x'
- - name: Install dependencies
- run: |
- pip install setuptools wheel twine
- - name: Build and publish
- env:
- TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
- TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
- run: |
- python setup.py sdist bdist_wheel
- twine upload dist/*
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 0ce1c2d5..d97c77f2 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -9,6 +9,10 @@ on:
permissions:
contents: read
+env:
+ # turns off tox's output redirection so we can debug package installation
+ TOX_OPTIONS: -vv
+
jobs:
lint:
runs-on: ubuntu-latest
@@ -23,32 +27,34 @@ jobs:
- name: Install packages
run: pip install tox
- name: Run Tox
- run: tox -e mypy,package_readme
+ run: tox $TOX_OPTIONS -e lint,package_readme
test:
runs-on: ${{ matrix.platform }}
if: "! contains(toJSON(github.event.commits.*.message), '[skip ci]')"
strategy:
+ fail-fast: false
matrix:
- python-version: ["3.7", "3.10"]
- platform: [ubuntu-latest, macos-latest, windows-latest]
- exclude: # Only test on the latest supported stable Python on macOS and Windows.
+ python-version: ["3.8", "3.11", "3.12"]
+ platform: [ubuntu-latest]
+ include: # Only test on the latest supported stable Python on macOS and Windows.
- platform: macos-latest
- python-version: 3.7
+ python-version: 3.11
- platform: windows-latest
- python-version: 3.7
+ python-version: 3.11
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
+ allow-prereleases: true
- name: Install packages
run: pip install tox coverage
- name: Run Tox
- run: tox -e py-cov
- - name: Run Tox without lxml
- run: tox -e py-cov-nolxml
+ run: tox $TOX_OPTIONS -e py-cov
+ - name: Run Tox without extra dependencies
+ run: tox $TOX_OPTIONS -e py-cov-noextra
- name: Produce coverage files
run: |
coverage combine
@@ -59,7 +65,10 @@ jobs:
file: coverage.xml
flags: unittests
name: codecov-umbrella
- fail_ci_if_error: true
+ # TODO(anthrotype): Set fail_ci_if_error: true if/when Codecov becomes less flaky
+ fail_ci_if_error: false
+ # see https://github.com/codecov/codecov-action/issues/557
+ token: ${{ secrets.CODECOV_TOKEN }}
test-cython:
runs-on: ubuntu-latest
@@ -69,11 +78,11 @@ jobs:
- name: Set up Python 3.x
uses: actions/setup-python@v4
with:
- python-version: "3.10"
+ python-version: "3.11"
- name: Install packages
run: pip install tox
- name: Run Tox
- run: tox -e py-cy-nolxml
+ run: tox $TOX_OPTIONS -e py-cy
test-pypy3:
runs-on: ubuntu-latest
@@ -83,8 +92,8 @@ jobs:
- name: Set up Python pypy3
uses: actions/setup-python@v4
with:
- python-version: "pypy-3.7"
+ python-version: "pypy-3.9"
- name: Install packages
run: pip install tox
- name: Run Tox
- run: tox -e pypy3-nolxml
+ run: tox $TOX_OPTIONS -e pypy3
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
new file mode 100644
index 00000000..59ba1b0b
--- /dev/null
+++ b/.github/workflows/wheels.yml
@@ -0,0 +1,120 @@
+name: Build + Deploy
+
+on:
+ push:
+ tags: ["*.*.*"]
+ # enables workflow to be run manually
+ # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_dispatch
+ workflow_dispatch:
+
+env:
+ # skip binary wheels for pypy (preferable to use pure-python) and 32-bit Linux
+ CIBW_SKIP: pp* cp*linux_i686
+ CIBW_ENVIRONMENT: FONTTOOLS_WITH_CYTHON=1
+ CIBW_TEST_REQUIRES: tox
+ # only test core fonttools library without extras for now, stuff like lxml or scipy
+ # create too many issues when testing on a large matrix of environments...
+ CIBW_TEST_COMMAND: "tox -c {package}/tox.ini -e py-cy-noextra --installpkg {wheel}"
+
+jobs:
+
+ build_pure:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
+ - name: Install dependencies
+ run: |
+ pip install setuptools wheel twine
+ - name: Build source distribution and pure-python wheel
+ run: |
+ python setup.py sdist bdist_wheel
+ - uses: actions/upload-artifact@v3
+ with:
+ path: |
+ dist/*.whl
+ dist/*.tar.gz
+
+ build_wheels:
+ name: ${{ matrix.type }} ${{ matrix.arch }} on ${{ matrix.os }}
+ runs-on: ${{ matrix.os }}
+ defaults:
+ run:
+ shell: bash
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [macos-latest, windows-latest, ubuntu-latest]
+ arch: [auto64]
+ include:
+ - os: macos-latest
+ arch: universal2
+ - os: windows-latest
+ arch: auto32
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ submodules: recursive
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
+ - name: Install dependencies
+ run: pip install cibuildwheel
+
+ - name: Build Wheels
+ run: python -m cibuildwheel --output-dir wheelhouse .
+ env:
+ CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014
+ CIBW_ARCHS: ${{ matrix.arch }}
+ - uses: actions/upload-artifact@v3
+ with:
+ path: wheelhouse/*.whl
+
+ build_arch_wheels:
+ name: py${{ matrix.python }} on ${{ matrix.arch }}
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ # aarch64 uses qemu so it's slow, build each py version in parallel jobs
+ python: [38, 39, 310, 311, 312]
+ arch: [aarch64]
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ submodules: true
+ - uses: docker/setup-qemu-action@v2.2.0
+ with:
+ platforms: all
+ - name: Install dependencies
+ run: pip install cibuildwheel
+ - name: Build Wheels
+ run: python -m cibuildwheel --output-dir wheelhouse .
+ env:
+ CIBW_BUILD: cp${{ matrix.python }}-*
+ CIBW_ARCHS: ${{ matrix.arch }}
+ - uses: actions/upload-artifact@v3
+ with:
+ path: wheelhouse/*.whl
+
+ deploy:
+ name: Upload to PyPI on tagged commit
+ runs-on: ubuntu-latest
+ needs:
+ - build_pure
+ - build_wheels
+ - build_arch_wheels
+ # only run if the commit is tagged...
+ if: startsWith(github.ref, 'refs/tags/')
+ steps:
+ - uses: actions/download-artifact@v3
+ with:
+ name: artifact
+ path: dist
+ - uses: pypa/gh-action-pypi-publish@v1.8.8
+ with:
+ user: __token__
+ password: ${{ secrets.PYPI_PASSWORD }}
diff --git a/.readthedocs.yml b/.readthedocs.yml
index 928d6587..b05ada48 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yml
@@ -6,7 +6,9 @@
version: 2
build:
- image: latest
+ os: ubuntu-22.04
+ tools:
+ python: "3.10"
# Build documentation in the docs/ directory with Sphinx
sphinx:
@@ -20,7 +22,6 @@ formats:
# Optionally set the version of Python and requirements required to build your docs
python:
- version: 3.8
install:
- requirements: Doc/docs-requirements.txt
- method: pip
diff --git a/Doc/docs-requirements.txt b/Doc/docs-requirements.txt
index 59f1cd19..f8f93c15 100644
--- a/Doc/docs-requirements.txt
+++ b/Doc/docs-requirements.txt
@@ -1,4 +1,4 @@
-sphinx==5.1.1
-sphinx_rtd_theme==1.0.0
-reportlab==3.6.11
-freetype-py==2.3.0
+sphinx==7.2.6
+sphinx_rtd_theme==1.3.0
+reportlab==4.0.6
+freetype-py==2.4.0
diff --git a/Doc/source/conf.py b/Doc/source/conf.py
index b95119db..982af803 100644
--- a/Doc/source/conf.py
+++ b/Doc/source/conf.py
@@ -30,14 +30,17 @@ needs_sphinx = "1.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinx.ext.coverage", "sphinx.ext.autosectionlabel"]
+extensions = [
+ "sphinx.ext.autodoc",
+ "sphinx.ext.viewcode",
+ "sphinx.ext.napoleon",
+ "sphinx.ext.coverage",
+ "sphinx.ext.autosectionlabel",
+]
autodoc_mock_imports = ["gtk", "reportlab"]
-autodoc_default_options = {
- 'members': True,
- 'inherited-members': True
-}
+autodoc_default_options = {"members": True, "inherited-members": True}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
@@ -52,9 +55,11 @@ source_suffix = ".rst"
master_doc = "index"
# General information about the project.
-project = u"fontTools"
-copyright = u"2020, Just van Rossum, Behdad Esfahbod, and the fontTools Authors. CC BY-SA 4.0"
-author = u"Just van Rossum, Behdad Esfahbod, and the fontTools Authors"
+project = "fontTools"
+copyright = (
+ "2020, Just van Rossum, Behdad Esfahbod, and the fontTools Authors. CC BY-SA 4.0"
+)
+author = "Just van Rossum, Behdad Esfahbod, and the fontTools Authors"
# HTML page title
html_title = "fontTools Documentation"
@@ -64,9 +69,9 @@ html_title = "fontTools Documentation"
# built documents.
#
# The short X.Y version.
-version = u"4.0"
+version = "4.0"
# The full version, including alpha/beta/rc tags.
-release = u"4.0"
+release = "4.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -142,8 +147,8 @@ latex_documents = [
(
master_doc,
"fontTools.tex",
- u"fontTools Documentation",
- u"Just van Rossum, Behdad Esfahbod et al.",
+ "fontTools Documentation",
+ "Just van Rossum, Behdad Esfahbod et al.",
"manual",
)
]
@@ -153,7 +158,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [(master_doc, "fonttools", u"fontTools Documentation", [author], 1)]
+man_pages = [(master_doc, "fonttools", "fontTools Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
@@ -165,7 +170,7 @@ texinfo_documents = [
(
master_doc,
"fontTools",
- u"fontTools Documentation",
+ "fontTools Documentation",
author,
"fontTools",
"A library for manipulating fonts, written in Python.",
diff --git a/Doc/source/designspaceLib/index.rst b/Doc/source/designspaceLib/index.rst
index 5d17dc16..7b8b4878 100644
--- a/Doc/source/designspaceLib/index.rst
+++ b/Doc/source/designspaceLib/index.rst
@@ -127,20 +127,20 @@ Implementation and differences
The designspace format has gone through considerable development.
- the format was originally written for MutatorMath.
- - the format is now also used in fontTools.varlib.
+ - the format is now also used in fontTools.varLib.
- not all values are be required by all implementations.
-Varlib vs. MutatorMath
+varLib vs. MutatorMath
----------------------
-There are some differences between the way MutatorMath and fontTools.varlib handle designspaces.
+There are some differences between the way MutatorMath and fontTools.varLib handle designspaces.
- - Varlib does not support anisotropic interpolations.
+ - varLib does not support anisotropic interpolations.
- MutatorMath will extrapolate over the boundaries of
- the axes. Varlib can not (at the moment).
- - Varlib requires much less data to define an instance than
+ the axes. varLib can not (at the moment).
+ - varLib requires much less data to define an instance than
MutatorMath.
- - The goals of Varlib and MutatorMath are different, so not all
+ - The goals of varLib and MutatorMath are different, so not all
attributes are always needed.
@@ -174,6 +174,14 @@ it can become complex very quickly. So proceed with caution.
Version history
===============
+Version 5.1
+-----------
+
+The format was extended to support arbitrary mapping between input and output
+designspace locations. The ``<axes>`` elements now can have a ``<mappings>``
+element that specifies such mappings, which when present carries data that is
+used to compile to an ``avar`` version 2 table.
+
Version 5.0
-----------
diff --git a/Doc/source/designspaceLib/python.rst b/Doc/source/designspaceLib/python.rst
index 6a43bdcc..c998911c 100644
--- a/Doc/source/designspaceLib/python.rst
+++ b/Doc/source/designspaceLib/python.rst
@@ -187,10 +187,10 @@ for more information.
.. automodule:: fontTools.designspaceLib.split
-fontTools.designspaceLib.stat
+fontTools.varLib.stat
=============================
-.. automodule:: fontTools.designspaceLib.stat
+.. automodule:: fontTools.varLib.stat
fontTools.designspaceLib.statNames
diff --git a/Doc/source/designspaceLib/scripting.rst b/Doc/source/designspaceLib/scripting.rst
index 63235eec..52ddbd6e 100644
--- a/Doc/source/designspaceLib/scripting.rst
+++ b/Doc/source/designspaceLib/scripting.rst
@@ -221,23 +221,6 @@ Saving
path = "myprototype.designspace"
doc.write(path)
-************************
-Reading old designspaces
-************************
-
-Old designspace files might not contain ``axes`` definitions. This is
-how you reconstruct the axes from the extremes of the source locations
-
-.. code:: python
-
- doc.checkAxes()
-
-This is how you check the default font.
-
-.. code:: python
-
- doc.checkDefault()
-
***********
Generating?
***********
@@ -251,7 +234,7 @@ You can generate the UFOs with MutatorMath:
- Assuming the outline data in the masters is compatible.
-Or you can use the file in making a **variable font** with varlib.
+Or you can use the file in making a **variable font** with varLib.
.. _working_with_v5:
diff --git a/Doc/source/designspaceLib/xml.rst b/Doc/source/designspaceLib/xml.rst
index 6267b025..4e3492ef 100644
--- a/Doc/source/designspaceLib/xml.rst
+++ b/Doc/source/designspaceLib/xml.rst
@@ -20,10 +20,15 @@ Overview
.. code:: xml
<?xml version='1.0' encoding='utf-8'?>
- <designspace format="5.0">
+ <designspace format="5.1">
<axes>
<!-- define axes here -->
<axis... />
+ <mappings>
+ <!-- define axis mappings here -->
+ <!-- New in version 5.1 -->
+ <mapping... />
+ </mappings>
</axes>
<labels>
<!-- define STAT format 4 labels here -->
@@ -162,10 +167,17 @@ For a discrete axis:
``<labels>`` element (axis)
---------------------------
-The ``<labels>`` element contains one or more ``<label>`` elements.
+The ``<labels>`` element contains one or more ``<label>`` elements, and can
+indicate this axis' STAT ordering.
.. versionadded:: 5.0
+.. rubric:: Attributes
+
+- ``ordering``: optional, int, default: natural position of this axis in the list
+ of axes. STAT table field ``axisOrdering`` for this axis.
+
+ See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_
``<label>`` element (axis)
..........................
@@ -248,6 +260,64 @@ Example of all axis elements together
</axes>
+``<mappings>`` element
+======================
+
+- Define axis mappings.
+- Child element of ``axes``
+
+
+ .. versionadded:: 5.1
+
+
+``<mapping>`` element
+---------------------
+
+- Defines an axis mapping.
+- Child element of ``<mappings>``
+
+
+ .. versionadded:: 5.1
+
+
+``<input>`` element
+...................
+
+- Defines the input location of an axis mapping.
+- Child element of ``<mapping>``
+- Contains one or more ``<dimension>`` elements with designspace locations.
+
+ .. versionadded:: 5.1
+
+
+``<output>`` element
+...................
+
+- Defines the output location of an axis mapping.
+- Child element of ``<mapping>``
+- Contains one or more ``<dimension>`` elements with designspace locations.
+
+ .. versionadded:: 5.1
+
+
+Example of all mappings elements together
+=========================================
+
+.. code:: xml
+
+ <mappings>
+ <mapping>
+ <input>
+ <dimension name="weight" xvalue="900"/>
+ <dimension name="width" xvalue="150"/>
+ </input>
+ <output>
+ <dimension name="weight" xvalue="870"/>
+ </output>
+ </mapping>
+ </mappings>
+
+
================================
``<labels>`` element (top-level)
================================
@@ -297,7 +367,7 @@ See: `OTSpec STAT Axis value table, format 4 <https://docs.microsoft.com/en-us/t
``<dimension>`` element
.......................
-- Child element of ``<location>``
+- Child element of ``<location>``, ``input``, or ``output`` elements
.. rubric:: Attributes
@@ -387,7 +457,7 @@ glyphname pairs: the glyphs that need to be substituted. For a rule to be trigge
- Defines a named rule.
- Each ``<rule>`` element contains one or more ``<conditionset>`` elements.
-- **Only one** ``<conditionset>`` needs to be true to trigger the rule (logical OR).
+- **Only one** ``<conditionset>`` needs to be true to trigger the rule (logical OR). An empty condition set is considered to be true, as in, the rule will be always-on.
- **All** conditions in a ``<conditionset>`` must be true to make the ``<conditionset>`` true. (logical AND)
- For backwards compatibility a ``<rule>`` can contain ``<condition>`` elements outside of a conditionset. These are then understood to be part of a single, implied, ``<conditionset>``. Note: these conditions should be written wrapped in a conditionset.
- A rule element needs to contain one or more ``<sub>`` elements in order to be compiled to a variable font.
@@ -405,7 +475,7 @@ glyphname pairs: the glyphs that need to be substituted. For a rule to be trigge
--------------------------
- Child element of ``<rule>``
-- Contains one or more ``<condition>`` elements.
+- Contains zero or more ``<condition>`` elements.
``<condition>`` element
@@ -504,7 +574,7 @@ The ``<sources>`` element contains one or more ``<source>`` elements.
While this could be extracted from the font data itself, it can be
more efficient to add it here.
- ``stylename``: optional, string. The style name of the source font.
-- ``name``: required, string. A unique name that can be used to
+- ``name``: optional, string. A unique name that can be used to
identify this font if it needs to be referenced elsewhere.
- ``filename``: required, string. A path to the source file, relative
to the root path of this document. The path can be at the same level
@@ -535,13 +605,13 @@ element with an ``xml:lang`` attribute:
Defines the coordinates of this source in the design space.
-.. seealso:: `Full documentation of the <location> element <location>`__
+.. seealso:: :ref:`Full documentation of the \<location\> element <location>`
``<dimension>`` element (source)
................................
-.. seealso:: `Full documentation of the <dimension> element <dimension>`__
+.. seealso:: :ref:`Full documentation of the \<dimension\> element <dimension>`
``<lib>`` element (source)
@@ -797,7 +867,7 @@ The ``<instances>`` element contains one or more ``<instance>`` elements.
- Defines a single font that can be calculated with the designspace.
- Child element of ``<instances>``
-- For use in Varlib the instance element really only needs the names
+- For use in varLib the instance element really only needs the names
and the location. The ``<glyphs>`` element is not required.
- MutatorMath uses the ``<glyphs>`` element to describe how certain
glyphs need different masters, mainly to describe the effects of
@@ -822,6 +892,11 @@ The ``<instances>`` element contains one or more ``<instance>`` elements.
with ``styleMapFamilyName``
- ``stylemapstylename``: string. Optional for MutatorMath. Corresponds
with ``styleMapStyleName``
+- ``location``: string. Optional. Describes the location of this instance,
+ taking it from the root level ``<labels>`` (STAT format 4) element with the
+ same name as the string.
+
+ .. versionadded:: 5.0
``<location>`` element (instance)
@@ -829,13 +904,13 @@ The ``<instances>`` element contains one or more ``<instance>`` elements.
Defines the coordinates of this instance in the design space.
-.. seealso:: `Full documentation of the <location> element <location>`__
+.. seealso:: :ref:`Full documentation of the \<location\> element <location>`
``<dimension>`` element (instance)
..................................
-.. seealso:: `Full documentation of the <dimension> element <dimension>`__
+.. seealso:: :ref:`Full documentation of the \<dimension\> element <dimension>`
``<lib>`` element (instance)
@@ -872,7 +947,7 @@ with an ``xml:lang`` attribute:
<stylemapfamilyname xml:lang="ja">モンセラート SemiBold</stylemapfamilyname>
-Example for varlib
+Example for varLib
------------------
.. code:: xml
@@ -891,6 +966,30 @@ Example for varlib
</instance>
+Here is an example using STAT format 4 labels to define the location of the
+instance directly.
+
+.. code:: xml
+
+ <?xml version='1.0' encoding='utf-8'?>
+ <designspace format="5.0">
+ <!-- ... -->
+ <labels>
+ <!-- define STAT format 4 labels here -->
+ <!-- New in version 5.0 -->
+ <label name="Extra Light">
+ <location>
+ <dimension name="weight" uservalue="123" />
+ </location>
+ </label>
+ </labels>
+ <!-- ... -->
+ <instances>
+ <instance filename="instances/labelled.ufo" location="Extra Light" />
+ </instances>
+ </designspace>
+
+
``<glyphs>`` element (instance)
-------------------------------
@@ -998,6 +1097,8 @@ Example for MutatorMath
The ``<lib>`` element contains arbitrary data.
- Child element of ``<designspace>``, ``<variable-font>`` and ``<instance>``
+- If present, content must be an XML Property List (plist).
+ <https://en.wikipedia.org/wiki/Property_list>__
- Contains arbitrary data about the whole document or about a specific
variable font or instance.
- Items in the dict need to use **reverse domain name notation**
diff --git a/Doc/source/developer.rst b/Doc/source/developer.rst
index dc4d425c..e480706a 100644
--- a/Doc/source/developer.rst
+++ b/Doc/source/developer.rst
@@ -101,13 +101,13 @@ Paul Wise.
License
-------
-`MIT license <https://github.com/fonttools/fonttools/blob/master/LICENSE>`_. See the full text of the license for details.
+`MIT license <https://github.com/fonttools/fonttools/blob/main/LICENSE>`_. See the full text of the license for details.
.. |Travis Build Status| image:: https://travis-ci.org/fonttools/fonttools.svg
:target: https://travis-ci.org/fonttools/fonttools
.. |Appveyor Build status| image:: https://ci.appveyor.com/api/projects/status/0f7fmee9as744sl7/branch/master?svg=true
:target: https://ci.appveyor.com/project/fonttools/fonttools/branch/master
-.. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/master/graph/badge.svg
+.. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/main/graph/badge.svg
:target: https://codecov.io/gh/fonttools/fonttools
.. |PyPI| image:: https://img.shields.io/pypi/v/fonttools.svg
:target: https://pypi.org/project/FontTools
diff --git a/Doc/source/index.rst b/Doc/source/index.rst
index 571ef8dd..e9b1dd8e 100644
--- a/Doc/source/index.rst
+++ b/Doc/source/index.rst
@@ -13,14 +13,14 @@ About
fontTools is a family of libraries and utilities for manipulating fonts in Python.
-The project has an `MIT open-source license <https://github.com/fonttools/fonttools/blob/master/LICENSE>`_. Among other things this means you can use it free of charge.
+The project has an `MIT open-source license <https://github.com/fonttools/fonttools/blob/main/LICENSE>`_. Among other things this means you can use it free of charge.
Installation
------------
.. note::
- fontTools requires `Python <http://www.python.org/download/>`_ 3.6 or later.
+ fontTools requires `Python <http://www.python.org/download/>`_ 3.8 or later.
The package is listed in the Python Package Index (PyPI), so you can install it with `pip <https://pip.pypa.io/>`_::
@@ -56,6 +56,7 @@ This last utility takes a subcommand, which could be one of:
- ``varLib.models``: Normalize locations on a given designspace
- ``varLib.mutator``: Instantiate a variation font
- ``varLib.varStore``: Optimize a font's GDEF variation store
+- ``voltLib.voltToFea``: Convert MS VOLT to AFDKO feature files.
Libraries
---------
@@ -88,7 +89,7 @@ libraries in the fontTools suite:
- :py:mod:`fontTools.varLib`: Module for dealing with 'gvar'-style font variations
- :py:mod:`fontTools.voltLib`: Module for dealing with Visual OpenType Layout Tool (VOLT) files
-A selection of sample Python programs using these libaries can be found in the `Snippets directory <https://github.com/fonttools/fonttools/blob/master/Snippets/>`_ of the fontTools repository.
+A selection of sample Python programs using these libaries can be found in the `Snippets directory <https://github.com/fonttools/fonttools/blob/main/Snippets/>`_ of the fontTools repository.
Optional Dependencies
---------------------
@@ -107,7 +108,7 @@ Information for developers can be found :doc:`here <./developer>`.
License
-------
-`MIT license <https://github.com/fonttools/fonttools/blob/master/LICENSE>`_. See the full text of the license for details.
+`MIT license <https://github.com/fonttools/fonttools/blob/main/LICENSE>`_. See the full text of the license for details.
Table of Contents
@@ -141,14 +142,14 @@ Table of Contents
unicode
unicodedata/index
varLib/index
- voltLib
+ voltLib/index
.. |Travis Build Status| image:: https://travis-ci.org/fonttools/fonttools.svg
:target: https://travis-ci.org/fonttools/fonttools
.. |Appveyor Build status| image:: https://ci.appveyor.com/api/projects/status/0f7fmee9as744sl7/branch/master?svg=true
:target: https://ci.appveyor.com/project/fonttools/fonttools/branch/master
-.. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/master/graph/badge.svg
+.. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/main/graph/badge.svg
:target: https://codecov.io/gh/fonttools/fonttools
.. |PyPI| image:: https://img.shields.io/pypi/v/fonttools.svg
:target: https://pypi.org/project/FontTools
diff --git a/Doc/source/voltLib.rst b/Doc/source/voltLib/index.rst
index be9e8024..00d067a2 100644
--- a/Doc/source/voltLib.rst
+++ b/Doc/source/voltLib/index.rst
@@ -1,6 +1,11 @@
-#######
-voltLib
-#######
+####################################
+voltLib: Read/write MS VOLT projects
+####################################
+
+.. toctree::
+ :maxdepth: 2
+
+ voltToFea
.. automodule:: fontTools.voltLib
diff --git a/Doc/source/voltLib/voltToFea.rst b/Doc/source/voltLib/voltToFea.rst
new file mode 100644
index 00000000..178dd68d
--- /dev/null
+++ b/Doc/source/voltLib/voltToFea.rst
@@ -0,0 +1,8 @@
+#################################################
+voltToFea: Convert MS VOLT to AFDKO feature files
+#################################################
+
+.. automodule:: fontTools.voltLib.voltToFea
+ :inherited-members:
+ :members:
+ :undoc-members:
diff --git a/Lib/fontTools/__init__.py b/Lib/fontTools/__init__.py
index 5b2cca1f..9a59504e 100644
--- a/Lib/fontTools/__init__.py
+++ b/Lib/fontTools/__init__.py
@@ -3,6 +3,6 @@ from fontTools.misc.loggingTools import configLogger
log = logging.getLogger(__name__)
-version = __version__ = "4.37.1"
+version = __version__ = "4.44.0"
__all__ = ["version", "log", "configLogger"]
diff --git a/Lib/fontTools/__main__.py b/Lib/fontTools/__main__.py
index 9b978aaa..7c74ad3c 100644
--- a/Lib/fontTools/__main__.py
+++ b/Lib/fontTools/__main__.py
@@ -2,33 +2,34 @@ import sys
def main(args=None):
- if args is None:
- args = sys.argv[1:]
-
- # TODO Handle library-wide options. Eg.:
- # --unicodedata
- # --verbose / other logging stuff
-
- # TODO Allow a way to run arbitrary modules? Useful for setting
- # library-wide options and calling another library. Eg.:
- #
- # $ fonttools --unicodedata=... fontmake ...
- #
- # This allows for a git-like command where thirdparty commands
- # can be added. Should we just try importing the fonttools
- # module first and try without if it fails?
-
- if len(sys.argv) < 2:
- sys.argv.append("help")
- if sys.argv[1] == "-h" or sys.argv[1] == "--help":
- sys.argv[1] = "help"
- mod = 'fontTools.'+sys.argv[1]
- sys.argv[1] = sys.argv[0] + ' ' + sys.argv[1]
- del sys.argv[0]
-
- import runpy
- runpy.run_module(mod, run_name='__main__')
-
-
-if __name__ == '__main__':
- sys.exit(main())
+ if args is None:
+ args = sys.argv[1:]
+
+ # TODO Handle library-wide options. Eg.:
+ # --unicodedata
+ # --verbose / other logging stuff
+
+ # TODO Allow a way to run arbitrary modules? Useful for setting
+ # library-wide options and calling another library. Eg.:
+ #
+ # $ fonttools --unicodedata=... fontmake ...
+ #
+ # This allows for a git-like command where thirdparty commands
+ # can be added. Should we just try importing the fonttools
+ # module first and try without if it fails?
+
+ if len(sys.argv) < 2:
+ sys.argv.append("help")
+ if sys.argv[1] == "-h" or sys.argv[1] == "--help":
+ sys.argv[1] = "help"
+ mod = "fontTools." + sys.argv[1]
+ sys.argv[1] = sys.argv[0] + " " + sys.argv[1]
+ del sys.argv[0]
+
+ import runpy
+
+ runpy.run_module(mod, run_name="__main__")
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/Lib/fontTools/afmLib.py b/Lib/fontTools/afmLib.py
index 49d99512..935a1e8e 100644
--- a/Lib/fontTools/afmLib.py
+++ b/Lib/fontTools/afmLib.py
@@ -53,378 +53,385 @@ identifierRE = re.compile(r"^([A-Za-z]+).*")
# regular expression to parse char lines
charRE = re.compile(
- r"(-?\d+)" # charnum
- r"\s*;\s*WX\s+" # ; WX
- r"(-?\d+)" # width
- r"\s*;\s*N\s+" # ; N
- r"([.A-Za-z0-9_]+)" # charname
- r"\s*;\s*B\s+" # ; B
- r"(-?\d+)" # left
- r"\s+"
- r"(-?\d+)" # bottom
- r"\s+"
- r"(-?\d+)" # right
- r"\s+"
- r"(-?\d+)" # top
- r"\s*;\s*" # ;
- )
+ r"(-?\d+)" # charnum
+ r"\s*;\s*WX\s+" # ; WX
+ r"(-?\d+)" # width
+ r"\s*;\s*N\s+" # ; N
+ r"([.A-Za-z0-9_]+)" # charname
+ r"\s*;\s*B\s+" # ; B
+ r"(-?\d+)" # left
+ r"\s+"
+ r"(-?\d+)" # bottom
+ r"\s+"
+ r"(-?\d+)" # right
+ r"\s+"
+ r"(-?\d+)" # top
+ r"\s*;\s*" # ;
+)
# regular expression to parse kerning lines
kernRE = re.compile(
- r"([.A-Za-z0-9_]+)" # leftchar
- r"\s+"
- r"([.A-Za-z0-9_]+)" # rightchar
- r"\s+"
- r"(-?\d+)" # value
- r"\s*"
- )
+ r"([.A-Za-z0-9_]+)" # leftchar
+ r"\s+"
+ r"([.A-Za-z0-9_]+)" # rightchar
+ r"\s+"
+ r"(-?\d+)" # value
+ r"\s*"
+)
# regular expressions to parse composite info lines of the form:
# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
compositeRE = re.compile(
- r"([.A-Za-z0-9_]+)" # char name
- r"\s+"
- r"(\d+)" # number of parts
- r"\s*;\s*"
- )
+ r"([.A-Za-z0-9_]+)" r"\s+" r"(\d+)" r"\s*;\s*" # char name # number of parts
+)
componentRE = re.compile(
- r"PCC\s+" # PPC
- r"([.A-Za-z0-9_]+)" # base char name
- r"\s+"
- r"(-?\d+)" # x offset
- r"\s+"
- r"(-?\d+)" # y offset
- r"\s*;\s*"
- )
+ r"PCC\s+" # PPC
+ r"([.A-Za-z0-9_]+)" # base char name
+ r"\s+"
+ r"(-?\d+)" # x offset
+ r"\s+"
+ r"(-?\d+)" # y offset
+ r"\s*;\s*"
+)
preferredAttributeOrder = [
- "FontName",
- "FullName",
- "FamilyName",
- "Weight",
- "ItalicAngle",
- "IsFixedPitch",
- "FontBBox",
- "UnderlinePosition",
- "UnderlineThickness",
- "Version",
- "Notice",
- "EncodingScheme",
- "CapHeight",
- "XHeight",
- "Ascender",
- "Descender",
+ "FontName",
+ "FullName",
+ "FamilyName",
+ "Weight",
+ "ItalicAngle",
+ "IsFixedPitch",
+ "FontBBox",
+ "UnderlinePosition",
+ "UnderlineThickness",
+ "Version",
+ "Notice",
+ "EncodingScheme",
+ "CapHeight",
+ "XHeight",
+ "Ascender",
+ "Descender",
]
class error(Exception):
- pass
+ pass
class AFM(object):
-
- _attrs = None
-
- _keywords = ['StartFontMetrics',
- 'EndFontMetrics',
- 'StartCharMetrics',
- 'EndCharMetrics',
- 'StartKernData',
- 'StartKernPairs',
- 'EndKernPairs',
- 'EndKernData',
- 'StartComposites',
- 'EndComposites',
- ]
-
- def __init__(self, path=None):
- """AFM file reader.
-
- Instantiating an object with a path name will cause the file to be opened,
- read, and parsed. Alternatively the path can be left unspecified, and a
- file can be parsed later with the :meth:`read` method."""
- self._attrs = {}
- self._chars = {}
- self._kerning = {}
- self._index = {}
- self._comments = []
- self._composites = {}
- if path is not None:
- self.read(path)
-
- def read(self, path):
- """Opens, reads and parses a file."""
- lines = readlines(path)
- for line in lines:
- if not line.strip():
- continue
- m = identifierRE.match(line)
- if m is None:
- raise error("syntax error in AFM file: " + repr(line))
-
- pos = m.regs[1][1]
- word = line[:pos]
- rest = line[pos:].strip()
- if word in self._keywords:
- continue
- if word == "C":
- self.parsechar(rest)
- elif word == "KPX":
- self.parsekernpair(rest)
- elif word == "CC":
- self.parsecomposite(rest)
- else:
- self.parseattr(word, rest)
-
- def parsechar(self, rest):
- m = charRE.match(rest)
- if m is None:
- raise error("syntax error in AFM file: " + repr(rest))
- things = []
- for fr, to in m.regs[1:]:
- things.append(rest[fr:to])
- charname = things[2]
- del things[2]
- charnum, width, l, b, r, t = (int(thing) for thing in things)
- self._chars[charname] = charnum, width, (l, b, r, t)
-
- def parsekernpair(self, rest):
- m = kernRE.match(rest)
- if m is None:
- raise error("syntax error in AFM file: " + repr(rest))
- things = []
- for fr, to in m.regs[1:]:
- things.append(rest[fr:to])
- leftchar, rightchar, value = things
- value = int(value)
- self._kerning[(leftchar, rightchar)] = value
-
- def parseattr(self, word, rest):
- if word == "FontBBox":
- l, b, r, t = [int(thing) for thing in rest.split()]
- self._attrs[word] = l, b, r, t
- elif word == "Comment":
- self._comments.append(rest)
- else:
- try:
- value = int(rest)
- except (ValueError, OverflowError):
- self._attrs[word] = rest
- else:
- self._attrs[word] = value
-
- def parsecomposite(self, rest):
- m = compositeRE.match(rest)
- if m is None:
- raise error("syntax error in AFM file: " + repr(rest))
- charname = m.group(1)
- ncomponents = int(m.group(2))
- rest = rest[m.regs[0][1]:]
- components = []
- while True:
- m = componentRE.match(rest)
- if m is None:
- raise error("syntax error in AFM file: " + repr(rest))
- basechar = m.group(1)
- xoffset = int(m.group(2))
- yoffset = int(m.group(3))
- components.append((basechar, xoffset, yoffset))
- rest = rest[m.regs[0][1]:]
- if not rest:
- break
- assert len(components) == ncomponents
- self._composites[charname] = components
-
- def write(self, path, sep='\r'):
- """Writes out an AFM font to the given path."""
- import time
- lines = [ "StartFontMetrics 2.0",
- "Comment Generated by afmLib; at %s" % (
- time.strftime("%m/%d/%Y %H:%M:%S",
- time.localtime(time.time())))]
-
- # write comments, assuming (possibly wrongly!) they should
- # all appear at the top
- for comment in self._comments:
- lines.append("Comment " + comment)
-
- # write attributes, first the ones we know about, in
- # a preferred order
- attrs = self._attrs
- for attr in preferredAttributeOrder:
- if attr in attrs:
- value = attrs[attr]
- if attr == "FontBBox":
- value = "%s %s %s %s" % value
- lines.append(attr + " " + str(value))
- # then write the attributes we don't know about,
- # in alphabetical order
- items = sorted(attrs.items())
- for attr, value in items:
- if attr in preferredAttributeOrder:
- continue
- lines.append(attr + " " + str(value))
-
- # write char metrics
- lines.append("StartCharMetrics " + repr(len(self._chars)))
- items = [(charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items()]
-
- def myKey(a):
- """Custom key function to make sure unencoded chars (-1)
- end up at the end of the list after sorting."""
- if a[0] == -1:
- a = (0xffff,) + a[1:] # 0xffff is an arbitrary large number
- return a
- items.sort(key=myKey)
-
- for charnum, (charname, width, (l, b, r, t)) in items:
- lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" %
- (charnum, width, charname, l, b, r, t))
- lines.append("EndCharMetrics")
-
- # write kerning info
- lines.append("StartKernData")
- lines.append("StartKernPairs " + repr(len(self._kerning)))
- items = sorted(self._kerning.items())
- for (leftchar, rightchar), value in items:
- lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
- lines.append("EndKernPairs")
- lines.append("EndKernData")
-
- if self._composites:
- composites = sorted(self._composites.items())
- lines.append("StartComposites %s" % len(self._composites))
- for charname, components in composites:
- line = "CC %s %s ;" % (charname, len(components))
- for basechar, xoffset, yoffset in components:
- line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset)
- lines.append(line)
- lines.append("EndComposites")
-
- lines.append("EndFontMetrics")
-
- writelines(path, lines, sep)
-
- def has_kernpair(self, pair):
- """Returns `True` if the given glyph pair (specified as a tuple) exists
- in the kerning dictionary."""
- return pair in self._kerning
-
- def kernpairs(self):
- """Returns a list of all kern pairs in the kerning dictionary."""
- return list(self._kerning.keys())
-
- def has_char(self, char):
- """Returns `True` if the given glyph exists in the font."""
- return char in self._chars
-
- def chars(self):
- """Returns a list of all glyph names in the font."""
- return list(self._chars.keys())
-
- def comments(self):
- """Returns all comments from the file."""
- return self._comments
-
- def addComment(self, comment):
- """Adds a new comment to the file."""
- self._comments.append(comment)
-
- def addComposite(self, glyphName, components):
- """Specifies that the glyph `glyphName` is made up of the given components.
- The components list should be of the following form::
-
- [
- (glyphname, xOffset, yOffset),
- ...
- ]
-
- """
- self._composites[glyphName] = components
-
- def __getattr__(self, attr):
- if attr in self._attrs:
- return self._attrs[attr]
- else:
- raise AttributeError(attr)
-
- def __setattr__(self, attr, value):
- # all attrs *not* starting with "_" are consider to be AFM keywords
- if attr[:1] == "_":
- self.__dict__[attr] = value
- else:
- self._attrs[attr] = value
-
- def __delattr__(self, attr):
- # all attrs *not* starting with "_" are consider to be AFM keywords
- if attr[:1] == "_":
- try:
- del self.__dict__[attr]
- except KeyError:
- raise AttributeError(attr)
- else:
- try:
- del self._attrs[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getitem__(self, key):
- if isinstance(key, tuple):
- # key is a tuple, return the kernpair
- return self._kerning[key]
- else:
- # return the metrics instead
- return self._chars[key]
-
- def __setitem__(self, key, value):
- if isinstance(key, tuple):
- # key is a tuple, set kernpair
- self._kerning[key] = value
- else:
- # set char metrics
- self._chars[key] = value
-
- def __delitem__(self, key):
- if isinstance(key, tuple):
- # key is a tuple, del kernpair
- del self._kerning[key]
- else:
- # del char metrics
- del self._chars[key]
-
- def __repr__(self):
- if hasattr(self, "FullName"):
- return '<AFM object for %s>' % self.FullName
- else:
- return '<AFM object at %x>' % id(self)
+ _attrs = None
+
+ _keywords = [
+ "StartFontMetrics",
+ "EndFontMetrics",
+ "StartCharMetrics",
+ "EndCharMetrics",
+ "StartKernData",
+ "StartKernPairs",
+ "EndKernPairs",
+ "EndKernData",
+ "StartComposites",
+ "EndComposites",
+ ]
+
+ def __init__(self, path=None):
+ """AFM file reader.
+
+ Instantiating an object with a path name will cause the file to be opened,
+ read, and parsed. Alternatively the path can be left unspecified, and a
+ file can be parsed later with the :meth:`read` method."""
+ self._attrs = {}
+ self._chars = {}
+ self._kerning = {}
+ self._index = {}
+ self._comments = []
+ self._composites = {}
+ if path is not None:
+ self.read(path)
+
+ def read(self, path):
+ """Opens, reads and parses a file."""
+ lines = readlines(path)
+ for line in lines:
+ if not line.strip():
+ continue
+ m = identifierRE.match(line)
+ if m is None:
+ raise error("syntax error in AFM file: " + repr(line))
+
+ pos = m.regs[1][1]
+ word = line[:pos]
+ rest = line[pos:].strip()
+ if word in self._keywords:
+ continue
+ if word == "C":
+ self.parsechar(rest)
+ elif word == "KPX":
+ self.parsekernpair(rest)
+ elif word == "CC":
+ self.parsecomposite(rest)
+ else:
+ self.parseattr(word, rest)
+
+ def parsechar(self, rest):
+ m = charRE.match(rest)
+ if m is None:
+ raise error("syntax error in AFM file: " + repr(rest))
+ things = []
+ for fr, to in m.regs[1:]:
+ things.append(rest[fr:to])
+ charname = things[2]
+ del things[2]
+ charnum, width, l, b, r, t = (int(thing) for thing in things)
+ self._chars[charname] = charnum, width, (l, b, r, t)
+
+ def parsekernpair(self, rest):
+ m = kernRE.match(rest)
+ if m is None:
+ raise error("syntax error in AFM file: " + repr(rest))
+ things = []
+ for fr, to in m.regs[1:]:
+ things.append(rest[fr:to])
+ leftchar, rightchar, value = things
+ value = int(value)
+ self._kerning[(leftchar, rightchar)] = value
+
+ def parseattr(self, word, rest):
+ if word == "FontBBox":
+ l, b, r, t = [int(thing) for thing in rest.split()]
+ self._attrs[word] = l, b, r, t
+ elif word == "Comment":
+ self._comments.append(rest)
+ else:
+ try:
+ value = int(rest)
+ except (ValueError, OverflowError):
+ self._attrs[word] = rest
+ else:
+ self._attrs[word] = value
+
+ def parsecomposite(self, rest):
+ m = compositeRE.match(rest)
+ if m is None:
+ raise error("syntax error in AFM file: " + repr(rest))
+ charname = m.group(1)
+ ncomponents = int(m.group(2))
+ rest = rest[m.regs[0][1] :]
+ components = []
+ while True:
+ m = componentRE.match(rest)
+ if m is None:
+ raise error("syntax error in AFM file: " + repr(rest))
+ basechar = m.group(1)
+ xoffset = int(m.group(2))
+ yoffset = int(m.group(3))
+ components.append((basechar, xoffset, yoffset))
+ rest = rest[m.regs[0][1] :]
+ if not rest:
+ break
+ assert len(components) == ncomponents
+ self._composites[charname] = components
+
+ def write(self, path, sep="\r"):
+ """Writes out an AFM font to the given path."""
+ import time
+
+ lines = [
+ "StartFontMetrics 2.0",
+ "Comment Generated by afmLib; at %s"
+ % (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))),
+ ]
+
+ # write comments, assuming (possibly wrongly!) they should
+ # all appear at the top
+ for comment in self._comments:
+ lines.append("Comment " + comment)
+
+ # write attributes, first the ones we know about, in
+ # a preferred order
+ attrs = self._attrs
+ for attr in preferredAttributeOrder:
+ if attr in attrs:
+ value = attrs[attr]
+ if attr == "FontBBox":
+ value = "%s %s %s %s" % value
+ lines.append(attr + " " + str(value))
+ # then write the attributes we don't know about,
+ # in alphabetical order
+ items = sorted(attrs.items())
+ for attr, value in items:
+ if attr in preferredAttributeOrder:
+ continue
+ lines.append(attr + " " + str(value))
+
+ # write char metrics
+ lines.append("StartCharMetrics " + repr(len(self._chars)))
+ items = [
+ (charnum, (charname, width, box))
+ for charname, (charnum, width, box) in self._chars.items()
+ ]
+
+ def myKey(a):
+ """Custom key function to make sure unencoded chars (-1)
+ end up at the end of the list after sorting."""
+ if a[0] == -1:
+ a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number
+ return a
+
+ items.sort(key=myKey)
+
+ for charnum, (charname, width, (l, b, r, t)) in items:
+ lines.append(
+ "C %d ; WX %d ; N %s ; B %d %d %d %d ;"
+ % (charnum, width, charname, l, b, r, t)
+ )
+ lines.append("EndCharMetrics")
+
+ # write kerning info
+ lines.append("StartKernData")
+ lines.append("StartKernPairs " + repr(len(self._kerning)))
+ items = sorted(self._kerning.items())
+ for (leftchar, rightchar), value in items:
+ lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
+ lines.append("EndKernPairs")
+ lines.append("EndKernData")
+
+ if self._composites:
+ composites = sorted(self._composites.items())
+ lines.append("StartComposites %s" % len(self._composites))
+ for charname, components in composites:
+ line = "CC %s %s ;" % (charname, len(components))
+ for basechar, xoffset, yoffset in components:
+ line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset)
+ lines.append(line)
+ lines.append("EndComposites")
+
+ lines.append("EndFontMetrics")
+
+ writelines(path, lines, sep)
+
+ def has_kernpair(self, pair):
+ """Returns `True` if the given glyph pair (specified as a tuple) exists
+ in the kerning dictionary."""
+ return pair in self._kerning
+
+ def kernpairs(self):
+ """Returns a list of all kern pairs in the kerning dictionary."""
+ return list(self._kerning.keys())
+
+ def has_char(self, char):
+ """Returns `True` if the given glyph exists in the font."""
+ return char in self._chars
+
+ def chars(self):
+ """Returns a list of all glyph names in the font."""
+ return list(self._chars.keys())
+
+ def comments(self):
+ """Returns all comments from the file."""
+ return self._comments
+
+ def addComment(self, comment):
+ """Adds a new comment to the file."""
+ self._comments.append(comment)
+
+ def addComposite(self, glyphName, components):
+ """Specifies that the glyph `glyphName` is made up of the given components.
+ The components list should be of the following form::
+
+ [
+ (glyphname, xOffset, yOffset),
+ ...
+ ]
+
+ """
+ self._composites[glyphName] = components
+
+ def __getattr__(self, attr):
+ if attr in self._attrs:
+ return self._attrs[attr]
+ else:
+ raise AttributeError(attr)
+
+ def __setattr__(self, attr, value):
+ # all attrs *not* starting with "_" are consider to be AFM keywords
+ if attr[:1] == "_":
+ self.__dict__[attr] = value
+ else:
+ self._attrs[attr] = value
+
+ def __delattr__(self, attr):
+ # all attrs *not* starting with "_" are consider to be AFM keywords
+ if attr[:1] == "_":
+ try:
+ del self.__dict__[attr]
+ except KeyError:
+ raise AttributeError(attr)
+ else:
+ try:
+ del self._attrs[attr]
+ except KeyError:
+ raise AttributeError(attr)
+
+ def __getitem__(self, key):
+ if isinstance(key, tuple):
+ # key is a tuple, return the kernpair
+ return self._kerning[key]
+ else:
+ # return the metrics instead
+ return self._chars[key]
+
+ def __setitem__(self, key, value):
+ if isinstance(key, tuple):
+ # key is a tuple, set kernpair
+ self._kerning[key] = value
+ else:
+ # set char metrics
+ self._chars[key] = value
+
+ def __delitem__(self, key):
+ if isinstance(key, tuple):
+ # key is a tuple, del kernpair
+ del self._kerning[key]
+ else:
+ # del char metrics
+ del self._chars[key]
+
+ def __repr__(self):
+ if hasattr(self, "FullName"):
+ return "<AFM object for %s>" % self.FullName
+ else:
+ return "<AFM object at %x>" % id(self)
def readlines(path):
- with open(path, "r", encoding="ascii") as f:
- data = f.read()
- return data.splitlines()
+ with open(path, "r", encoding="ascii") as f:
+ data = f.read()
+ return data.splitlines()
+
-def writelines(path, lines, sep='\r'):
- with open(path, "w", encoding="ascii", newline=sep) as f:
- f.write("\n".join(lines) + "\n")
+def writelines(path, lines, sep="\r"):
+ with open(path, "w", encoding="ascii", newline=sep) as f:
+ f.write("\n".join(lines) + "\n")
if __name__ == "__main__":
- import EasyDialogs
- path = EasyDialogs.AskFileForOpen()
- if path:
- afm = AFM(path)
- char = 'A'
- if afm.has_char(char):
- print(afm[char]) # print charnum, width and boundingbox
- pair = ('A', 'V')
- if afm.has_kernpair(pair):
- print(afm[pair]) # print kerning value for pair
- print(afm.Version) # various other afm entries have become attributes
- print(afm.Weight)
- # afm.comments() returns a list of all Comment lines found in the AFM
- print(afm.comments())
- #print afm.chars()
- #print afm.kernpairs()
- print(afm)
- afm.write(path + ".muck")
+ import EasyDialogs
+
+ path = EasyDialogs.AskFileForOpen()
+ if path:
+ afm = AFM(path)
+ char = "A"
+ if afm.has_char(char):
+ print(afm[char]) # print charnum, width and boundingbox
+ pair = ("A", "V")
+ if afm.has_kernpair(pair):
+ print(afm[pair]) # print kerning value for pair
+ print(afm.Version) # various other afm entries have become attributes
+ print(afm.Weight)
+ # afm.comments() returns a list of all Comment lines found in the AFM
+ print(afm.comments())
+ # print afm.chars()
+ # print afm.kernpairs()
+ print(afm)
+ afm.write(path + ".muck")
diff --git a/Lib/fontTools/agl.py b/Lib/fontTools/agl.py
index cc286e42..d6994628 100644
--- a/Lib/fontTools/agl.py
+++ b/Lib/fontTools/agl.py
@@ -5059,174 +5059,175 @@ _aglfnText = """\
class AGLError(Exception):
- pass
+ pass
+
LEGACY_AGL2UV = {}
AGL2UV = {}
UV2AGL = {}
+
def _builddicts():
- import re
+ import re
+
+ lines = _aglText.splitlines()
- lines = _aglText.splitlines()
+ parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$")
- parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$")
+ for line in lines:
+ if not line or line[:1] == "#":
+ continue
+ m = parseAGL_RE.match(line)
+ if not m:
+ raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20]))
+ unicodes = m.group(2)
+ assert len(unicodes) % 5 == 4
+ unicodes = [int(unicode, 16) for unicode in unicodes.split()]
+ glyphName = tostr(m.group(1))
+ LEGACY_AGL2UV[glyphName] = unicodes
- for line in lines:
- if not line or line[:1] == '#':
- continue
- m = parseAGL_RE.match(line)
- if not m:
- raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20]))
- unicodes = m.group(2)
- assert len(unicodes) % 5 == 4
- unicodes = [int(unicode, 16) for unicode in unicodes.split()]
- glyphName = tostr(m.group(1))
- LEGACY_AGL2UV[glyphName] = unicodes
+ lines = _aglfnText.splitlines()
- lines = _aglfnText.splitlines()
+ parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$")
- parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$")
+ for line in lines:
+ if not line or line[:1] == "#":
+ continue
+ m = parseAGLFN_RE.match(line)
+ if not m:
+ raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20]))
+ unicode = m.group(1)
+ assert len(unicode) == 4
+ unicode = int(unicode, 16)
+ glyphName = tostr(m.group(2))
+ AGL2UV[glyphName] = unicode
+ UV2AGL[unicode] = glyphName
- for line in lines:
- if not line or line[:1] == '#':
- continue
- m = parseAGLFN_RE.match(line)
- if not m:
- raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20]))
- unicode = m.group(1)
- assert len(unicode) == 4
- unicode = int(unicode, 16)
- glyphName = tostr(m.group(2))
- AGL2UV[glyphName] = unicode
- UV2AGL[unicode] = glyphName
_builddicts()
def toUnicode(glyph, isZapfDingbats=False):
- """Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'ſt'``
+ """Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'ſt'``
- If ``isZapfDingbats`` is ``True``, the implementation recognizes additional
- glyph names (as required by the AGL specification).
- """
- # https://github.com/adobe-type-tools/agl-specification#2-the-mapping
- #
- # 1. Drop all the characters from the glyph name starting with
- # the first occurrence of a period (U+002E; FULL STOP), if any.
- glyph = glyph.split(".", 1)[0]
+ If ``isZapfDingbats`` is ``True``, the implementation recognizes additional
+ glyph names (as required by the AGL specification).
+ """
+ # https://github.com/adobe-type-tools/agl-specification#2-the-mapping
+ #
+ # 1. Drop all the characters from the glyph name starting with
+ # the first occurrence of a period (U+002E; FULL STOP), if any.
+ glyph = glyph.split(".", 1)[0]
- # 2. Split the remaining string into a sequence of components,
- # using underscore (U+005F; LOW LINE) as the delimiter.
- components = glyph.split("_")
+ # 2. Split the remaining string into a sequence of components,
+ # using underscore (U+005F; LOW LINE) as the delimiter.
+ components = glyph.split("_")
- # 3. Map each component to a character string according to the
- # procedure below, and concatenate those strings; the result
- # is the character string to which the glyph name is mapped.
- result = [_glyphComponentToUnicode(c, isZapfDingbats)
- for c in components]
- return "".join(result)
+ # 3. Map each component to a character string according to the
+ # procedure below, and concatenate those strings; the result
+ # is the character string to which the glyph name is mapped.
+ result = [_glyphComponentToUnicode(c, isZapfDingbats) for c in components]
+ return "".join(result)
def _glyphComponentToUnicode(component, isZapfDingbats):
- # If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats),
- # and the component is in the ITC Zapf Dingbats Glyph List, then
- # map it to the corresponding character in that list.
- dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None
- if dingbat:
- return dingbat
+ # If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats),
+ # and the component is in the ITC Zapf Dingbats Glyph List, then
+ # map it to the corresponding character in that list.
+ dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None
+ if dingbat:
+ return dingbat
- # Otherwise, if the component is in AGL, then map it
- # to the corresponding character in that list.
- uchars = LEGACY_AGL2UV.get(component)
- if uchars:
- return "".join(map(chr, uchars))
+ # Otherwise, if the component is in AGL, then map it
+ # to the corresponding character in that list.
+ uchars = LEGACY_AGL2UV.get(component)
+ if uchars:
+ return "".join(map(chr, uchars))
- # Otherwise, if the component is of the form "uni" (U+0075,
- # U+006E, and U+0069) followed by a sequence of uppercase
- # hexadecimal digits (0–9 and A–F, meaning U+0030 through
- # U+0039 and U+0041 through U+0046), if the length of that
- # sequence is a multiple of four, and if each group of four
- # digits represents a value in the ranges 0000 through D7FF
- # or E000 through FFFF, then interpret each as a Unicode scalar
- # value and map the component to the string made of those
- # scalar values. Note that the range and digit-length
- # restrictions mean that the "uni" glyph name prefix can be
- # used only with UVs in the Basic Multilingual Plane (BMP).
- uni = _uniToUnicode(component)
- if uni:
- return uni
+ # Otherwise, if the component is of the form "uni" (U+0075,
+ # U+006E, and U+0069) followed by a sequence of uppercase
+ # hexadecimal digits (0–9 and A–F, meaning U+0030 through
+ # U+0039 and U+0041 through U+0046), if the length of that
+ # sequence is a multiple of four, and if each group of four
+ # digits represents a value in the ranges 0000 through D7FF
+ # or E000 through FFFF, then interpret each as a Unicode scalar
+ # value and map the component to the string made of those
+ # scalar values. Note that the range and digit-length
+ # restrictions mean that the "uni" glyph name prefix can be
+ # used only with UVs in the Basic Multilingual Plane (BMP).
+ uni = _uniToUnicode(component)
+ if uni:
+ return uni
- # Otherwise, if the component is of the form "u" (U+0075)
- # followed by a sequence of four to six uppercase hexadecimal
- # digits (0–9 and A–F, meaning U+0030 through U+0039 and
- # U+0041 through U+0046), and those digits represents a value
- # in the ranges 0000 through D7FF or E000 through 10FFFF, then
- # interpret it as a Unicode scalar value and map the component
- # to the string made of this scalar value.
- uni = _uToUnicode(component)
- if uni:
- return uni
+ # Otherwise, if the component is of the form "u" (U+0075)
+ # followed by a sequence of four to six uppercase hexadecimal
+ # digits (0–9 and A–F, meaning U+0030 through U+0039 and
+ # U+0041 through U+0046), and those digits represents a value
+ # in the ranges 0000 through D7FF or E000 through 10FFFF, then
+ # interpret it as a Unicode scalar value and map the component
+ # to the string made of this scalar value.
+ uni = _uToUnicode(component)
+ if uni:
+ return uni
- # Otherwise, map the component to an empty string.
- return ''
+ # Otherwise, map the component to an empty string.
+ return ""
# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt
_AGL_ZAPF_DINGBATS = (
- " ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀"
- "❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇"
- "①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔"
- "↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰")
+ " ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀"
+ "❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇"
+ "①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔"
+ "↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰"
+)
def _zapfDingbatsToUnicode(glyph):
- """Helper for toUnicode()."""
- if len(glyph) < 2 or glyph[0] != 'a':
- return None
- try:
- gid = int(glyph[1:])
- except ValueError:
- return None
- if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS):
- return None
- uchar = _AGL_ZAPF_DINGBATS[gid]
- return uchar if uchar != ' ' else None
+ """Helper for toUnicode()."""
+ if len(glyph) < 2 or glyph[0] != "a":
+ return None
+ try:
+ gid = int(glyph[1:])
+ except ValueError:
+ return None
+ if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS):
+ return None
+ uchar = _AGL_ZAPF_DINGBATS[gid]
+ return uchar if uchar != " " else None
_re_uni = re.compile("^uni([0-9A-F]+)$")
def _uniToUnicode(component):
- """Helper for toUnicode() to handle "uniABCD" components."""
- match = _re_uni.match(component)
- if match is None:
- return None
- digits = match.group(1)
- if len(digits) % 4 != 0:
- return None
- chars = [int(digits[i : i + 4], 16)
- for i in range(0, len(digits), 4)]
- if any(c >= 0xD800 and c <= 0xDFFF for c in chars):
- # The AGL specification explicitly excluded surrogate pairs.
- return None
- return ''.join([chr(c) for c in chars])
+ """Helper for toUnicode() to handle "uniABCD" components."""
+ match = _re_uni.match(component)
+ if match is None:
+ return None
+ digits = match.group(1)
+ if len(digits) % 4 != 0:
+ return None
+ chars = [int(digits[i : i + 4], 16) for i in range(0, len(digits), 4)]
+ if any(c >= 0xD800 and c <= 0xDFFF for c in chars):
+ # The AGL specification explicitly excluded surrogate pairs.
+ return None
+ return "".join([chr(c) for c in chars])
_re_u = re.compile("^u([0-9A-F]{4,6})$")
def _uToUnicode(component):
- """Helper for toUnicode() to handle "u1ABCD" components."""
- match = _re_u.match(component)
- if match is None:
- return None
- digits = match.group(1)
- try:
- value = int(digits, 16)
- except ValueError:
- return None
- if ((value >= 0x0000 and value <= 0xD7FF) or
- (value >= 0xE000 and value <= 0x10FFFF)):
- return chr(value)
- return None
+ """Helper for toUnicode() to handle "u1ABCD" components."""
+ match = _re_u.match(component)
+ if match is None:
+ return None
+ digits = match.group(1)
+ try:
+ value = int(digits, 16)
+ except ValueError:
+ return None
+ if (value >= 0x0000 and value <= 0xD7FF) or (value >= 0xE000 and value <= 0x10FFFF):
+ return chr(value)
+ return None
diff --git a/Lib/fontTools/cffLib/__init__.py b/Lib/fontTools/cffLib/__init__.py
index 3eda9ba4..644508c1 100644
--- a/Lib/fontTools/cffLib/__init__.py
+++ b/Lib/fontTools/cffLib/__init__.py
@@ -14,7 +14,14 @@ the demands of variable fonts. This module parses both original CFF and CFF2.
from fontTools.misc import sstruct
from fontTools.misc import psCharStrings
from fontTools.misc.arrayTools import unionRect, intRect
-from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr, safeEval
+from fontTools.misc.textTools import (
+ bytechr,
+ byteord,
+ bytesjoin,
+ tobytes,
+ tostr,
+ safeEval,
+)
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.otBase import OTTableWriter
from fontTools.ttLib.tables.otBase import OTTableReader
@@ -39,2128 +46,2171 @@ maxStackLimit = 513
class StopHintCountEvent(Exception):
- pass
+ pass
class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler):
- stop_hintcount_ops = ("op_hintmask", "op_cntrmask", "op_rmoveto", "op_hmoveto",
- "op_vmoveto")
-
- def __init__(self, localSubrs, globalSubrs, private=None):
- psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs,
- private)
-
- def execute(self, charString):
- self.need_hintcount = True # until proven otherwise
- for op_name in self.stop_hintcount_ops:
- setattr(self, op_name, self.stop_hint_count)
-
- if hasattr(charString, '_desubroutinized'):
- # If a charstring has already been desubroutinized, we will still
- # need to execute it if we need to count hints in order to
- # compute the byte length for mask arguments, and haven't finished
- # counting hints pairs.
- if self.need_hintcount and self.callingStack:
- try:
- psCharStrings.SimpleT2Decompiler.execute(self, charString)
- except StopHintCountEvent:
- del self.callingStack[-1]
- return
-
- charString._patches = []
- psCharStrings.SimpleT2Decompiler.execute(self, charString)
- desubroutinized = charString.program[:]
- for idx, expansion in reversed(charString._patches):
- assert idx >= 2
- assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1]
- assert type(desubroutinized[idx - 2]) == int
- if expansion[-1] == 'return':
- expansion = expansion[:-1]
- desubroutinized[idx-2:idx] = expansion
- if not self.private.in_cff2:
- if 'endchar' in desubroutinized:
- # Cut off after first endchar
- desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1]
- else:
- if not len(desubroutinized) or desubroutinized[-1] != 'return':
- desubroutinized.append('return')
-
- charString._desubroutinized = desubroutinized
- del charString._patches
-
- def op_callsubr(self, index):
- subr = self.localSubrs[self.operandStack[-1]+self.localBias]
- psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
- self.processSubr(index, subr)
-
- def op_callgsubr(self, index):
- subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
- psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
- self.processSubr(index, subr)
-
- def stop_hint_count(self, *args):
- self.need_hintcount = False
- for op_name in self.stop_hintcount_ops:
- setattr(self, op_name, None)
- cs = self.callingStack[-1]
- if hasattr(cs, '_desubroutinized'):
- raise StopHintCountEvent()
-
- def op_hintmask(self, index):
- psCharStrings.SimpleT2Decompiler.op_hintmask(self, index)
- if self.need_hintcount:
- self.stop_hint_count()
-
- def processSubr(self, index, subr):
- cs = self.callingStack[-1]
- if not hasattr(cs, '_desubroutinized'):
- cs._patches.append((index, subr._desubroutinized))
+ stop_hintcount_ops = (
+ "op_hintmask",
+ "op_cntrmask",
+ "op_rmoveto",
+ "op_hmoveto",
+ "op_vmoveto",
+ )
+
+ def __init__(self, localSubrs, globalSubrs, private=None):
+ psCharStrings.SimpleT2Decompiler.__init__(
+ self, localSubrs, globalSubrs, private
+ )
+
+ def execute(self, charString):
+ self.need_hintcount = True # until proven otherwise
+ for op_name in self.stop_hintcount_ops:
+ setattr(self, op_name, self.stop_hint_count)
+
+ if hasattr(charString, "_desubroutinized"):
+ # If a charstring has already been desubroutinized, we will still
+ # need to execute it if we need to count hints in order to
+ # compute the byte length for mask arguments, and haven't finished
+ # counting hints pairs.
+ if self.need_hintcount and self.callingStack:
+ try:
+ psCharStrings.SimpleT2Decompiler.execute(self, charString)
+ except StopHintCountEvent:
+ del self.callingStack[-1]
+ return
+
+ charString._patches = []
+ psCharStrings.SimpleT2Decompiler.execute(self, charString)
+ desubroutinized = charString.program[:]
+ for idx, expansion in reversed(charString._patches):
+ assert idx >= 2
+ assert desubroutinized[idx - 1] in [
+ "callsubr",
+ "callgsubr",
+ ], desubroutinized[idx - 1]
+ assert type(desubroutinized[idx - 2]) == int
+ if expansion[-1] == "return":
+ expansion = expansion[:-1]
+ desubroutinized[idx - 2 : idx] = expansion
+ if not self.private.in_cff2:
+ if "endchar" in desubroutinized:
+ # Cut off after first endchar
+ desubroutinized = desubroutinized[
+ : desubroutinized.index("endchar") + 1
+ ]
+ else:
+ if not len(desubroutinized) or desubroutinized[-1] != "return":
+ desubroutinized.append("return")
+
+ charString._desubroutinized = desubroutinized
+ del charString._patches
+
+ def op_callsubr(self, index):
+ subr = self.localSubrs[self.operandStack[-1] + self.localBias]
+ psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
+ self.processSubr(index, subr)
+
+ def op_callgsubr(self, index):
+ subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
+ psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
+ self.processSubr(index, subr)
+
+ def stop_hint_count(self, *args):
+ self.need_hintcount = False
+ for op_name in self.stop_hintcount_ops:
+ setattr(self, op_name, None)
+ cs = self.callingStack[-1]
+ if hasattr(cs, "_desubroutinized"):
+ raise StopHintCountEvent()
+
+ def op_hintmask(self, index):
+ psCharStrings.SimpleT2Decompiler.op_hintmask(self, index)
+ if self.need_hintcount:
+ self.stop_hint_count()
+
+ def processSubr(self, index, subr):
+ cs = self.callingStack[-1]
+ if not hasattr(cs, "_desubroutinized"):
+ cs._patches.append((index, subr._desubroutinized))
class CFFFontSet(object):
- """A CFF font "file" can contain more than one font, although this is
- extremely rare (and not allowed within OpenType fonts).
-
- This class is the entry point for parsing a CFF table. To actually
- manipulate the data inside the CFF font, you will want to access the
- ``CFFFontSet``'s :class:`TopDict` object. To do this, a ``CFFFontSet``
- object can either be treated as a dictionary (with appropriate
- ``keys()`` and ``values()`` methods) mapping font names to :class:`TopDict`
- objects, or as a list.
-
- .. code:: python
-
- from fontTools import ttLib
- tt = ttLib.TTFont("Tests/cffLib/data/LinLibertine_RBI.otf")
- tt["CFF "].cff
- # <fontTools.cffLib.CFFFontSet object at 0x101e24c90>
- tt["CFF "].cff[0] # Here's your actual font data
- # <fontTools.cffLib.TopDict object at 0x1020f1fd0>
-
- """
-
- def decompile(self, file, otFont, isCFF2=None):
- """Parse a binary CFF file into an internal representation. ``file``
- should be a file handle object. ``otFont`` is the top-level
- :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
-
- If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
- library makes an assertion that the CFF header is of the appropriate
- version.
- """
-
- self.otFont = otFont
- sstruct.unpack(cffHeaderFormat, file.read(3), self)
- if isCFF2 is not None:
- # called from ttLib: assert 'major' as read from file matches the
- # expected version
- expected_major = (2 if isCFF2 else 1)
- if self.major != expected_major:
- raise ValueError(
- "Invalid CFF 'major' version: expected %d, found %d" %
- (expected_major, self.major))
- else:
- # use 'major' version from file to determine if isCFF2
- assert self.major in (1, 2), "Unknown CFF format"
- isCFF2 = self.major == 2
- if not isCFF2:
- self.offSize = struct.unpack("B", file.read(1))[0]
- file.seek(self.hdrSize)
- self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2))
- self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2)
- self.strings = IndexedStrings(file)
- else: # isCFF2
- self.topDictSize = struct.unpack(">H", file.read(2))[0]
- file.seek(self.hdrSize)
- self.fontNames = ["CFF2Font"]
- cff2GetGlyphOrder = otFont.getGlyphOrder
- # in CFF2, offsetSize is the size of the TopDict data.
- self.topDictIndex = TopDictIndex(
- file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2)
- self.strings = None
- self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2)
- self.topDictIndex.strings = self.strings
- self.topDictIndex.GlobalSubrs = self.GlobalSubrs
-
- def __len__(self):
- return len(self.fontNames)
-
- def keys(self):
- return list(self.fontNames)
-
- def values(self):
- return self.topDictIndex
-
- def __getitem__(self, nameOrIndex):
- """ Return TopDict instance identified by name (str) or index (int
- or any object that implements `__index__`).
- """
- if hasattr(nameOrIndex, "__index__"):
- index = nameOrIndex.__index__()
- elif isinstance(nameOrIndex, str):
- name = nameOrIndex
- try:
- index = self.fontNames.index(name)
- except ValueError:
- raise KeyError(nameOrIndex)
- else:
- raise TypeError(nameOrIndex)
- return self.topDictIndex[index]
-
- def compile(self, file, otFont, isCFF2=None):
- """Write the object back into binary representation onto the given file.
- ``file`` should be a file handle object. ``otFont`` is the top-level
- :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
-
- If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
- library makes an assertion that the CFF header is of the appropriate
- version.
- """
- self.otFont = otFont
- if isCFF2 is not None:
- # called from ttLib: assert 'major' value matches expected version
- expected_major = (2 if isCFF2 else 1)
- if self.major != expected_major:
- raise ValueError(
- "Invalid CFF 'major' version: expected %d, found %d" %
- (expected_major, self.major))
- else:
- # use current 'major' value to determine output format
- assert self.major in (1, 2), "Unknown CFF format"
- isCFF2 = self.major == 2
-
- if otFont.recalcBBoxes and not isCFF2:
- for topDict in self.topDictIndex:
- topDict.recalcFontBBox()
-
- if not isCFF2:
- strings = IndexedStrings()
- else:
- strings = None
- writer = CFFWriter(isCFF2)
- topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2)
- if isCFF2:
- self.hdrSize = 5
- writer.add(sstruct.pack(cffHeaderFormat, self))
- # Note: topDictSize will most likely change in CFFWriter.toFile().
- self.topDictSize = topCompiler.getDataLength()
- writer.add(struct.pack(">H", self.topDictSize))
- else:
- self.hdrSize = 4
- self.offSize = 4 # will most likely change in CFFWriter.toFile().
- writer.add(sstruct.pack(cffHeaderFormat, self))
- writer.add(struct.pack("B", self.offSize))
- if not isCFF2:
- fontNames = Index()
- for name in self.fontNames:
- fontNames.append(name)
- writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2))
- writer.add(topCompiler)
- if not isCFF2:
- writer.add(strings.getCompiler())
- writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2))
-
- for topDict in self.topDictIndex:
- if not hasattr(topDict, "charset") or topDict.charset is None:
- charset = otFont.getGlyphOrder()
- topDict.charset = charset
- children = topCompiler.getChildren(strings)
- for child in children:
- writer.add(child)
-
- writer.toFile(file)
-
- def toXML(self, xmlWriter):
- """Write the object into XML representation onto the given
- :class:`fontTools.misc.xmlWriter.XMLWriter`.
-
- .. code:: python
-
- writer = xmlWriter.XMLWriter(sys.stdout)
- tt["CFF "].cff.toXML(writer)
-
- """
-
- xmlWriter.simpletag("major", value=self.major)
- xmlWriter.newline()
- xmlWriter.simpletag("minor", value=self.minor)
- xmlWriter.newline()
- for fontName in self.fontNames:
- xmlWriter.begintag("CFFFont", name=tostr(fontName))
- xmlWriter.newline()
- font = self[fontName]
- font.toXML(xmlWriter)
- xmlWriter.endtag("CFFFont")
- xmlWriter.newline()
- xmlWriter.newline()
- xmlWriter.begintag("GlobalSubrs")
- xmlWriter.newline()
- self.GlobalSubrs.toXML(xmlWriter)
- xmlWriter.endtag("GlobalSubrs")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, otFont=None):
- """Reads data from the XML element into the ``CFFFontSet`` object."""
- self.otFont = otFont
-
- # set defaults. These will be replaced if there are entries for them
- # in the XML file.
- if not hasattr(self, "major"):
- self.major = 1
- if not hasattr(self, "minor"):
- self.minor = 0
-
- if name == "CFFFont":
- if self.major == 1:
- if not hasattr(self, "offSize"):
- # this will be recalculated when the cff is compiled.
- self.offSize = 4
- if not hasattr(self, "hdrSize"):
- self.hdrSize = 4
- if not hasattr(self, "GlobalSubrs"):
- self.GlobalSubrs = GlobalSubrsIndex()
- if not hasattr(self, "fontNames"):
- self.fontNames = []
- self.topDictIndex = TopDictIndex()
- fontName = attrs["name"]
- self.fontNames.append(fontName)
- topDict = TopDict(GlobalSubrs=self.GlobalSubrs)
- topDict.charset = None # gets filled in later
- elif self.major == 2:
- if not hasattr(self, "hdrSize"):
- self.hdrSize = 5
- if not hasattr(self, "GlobalSubrs"):
- self.GlobalSubrs = GlobalSubrsIndex()
- if not hasattr(self, "fontNames"):
- self.fontNames = ["CFF2Font"]
- cff2GetGlyphOrder = self.otFont.getGlyphOrder
- topDict = TopDict(
- GlobalSubrs=self.GlobalSubrs,
- cff2GetGlyphOrder=cff2GetGlyphOrder)
- self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder)
- self.topDictIndex.append(topDict)
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- topDict.fromXML(name, attrs, content)
-
- if hasattr(topDict, "VarStore") and topDict.FDArray[0].vstore is None:
- fdArray = topDict.FDArray
- for fontDict in fdArray:
- if hasattr(fontDict, "Private"):
- fontDict.Private.vstore = topDict.VarStore
-
- elif name == "GlobalSubrs":
- subrCharStringClass = psCharStrings.T2CharString
- if not hasattr(self, "GlobalSubrs"):
- self.GlobalSubrs = GlobalSubrsIndex()
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- subr = subrCharStringClass()
- subr.fromXML(name, attrs, content)
- self.GlobalSubrs.append(subr)
- elif name == "major":
- self.major = int(attrs['value'])
- elif name == "minor":
- self.minor = int(attrs['value'])
-
- def convertCFFToCFF2(self, otFont):
- """Converts this object from CFF format to CFF2 format. This conversion
- is done 'in-place'. The conversion cannot be reversed.
-
- This assumes a decompiled CFF table. (i.e. that the object has been
- filled via :meth:`decompile`.)"""
- self.major = 2
- cff2GetGlyphOrder = self.otFont.getGlyphOrder
- topDictData = TopDictIndex(None, cff2GetGlyphOrder)
- topDictData.items = self.topDictIndex.items
- self.topDictIndex = topDictData
- topDict = topDictData[0]
- if hasattr(topDict, 'Private'):
- privateDict = topDict.Private
- else:
- privateDict = None
- opOrder = buildOrder(topDictOperators2)
- topDict.order = opOrder
- topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
- for entry in topDictOperators:
- key = entry[1]
- if key not in opOrder:
- if key in topDict.rawDict:
- del topDict.rawDict[key]
- if hasattr(topDict, key):
- delattr(topDict, key)
-
- if not hasattr(topDict, "FDArray"):
- fdArray = topDict.FDArray = FDArrayIndex()
- fdArray.strings = None
- fdArray.GlobalSubrs = topDict.GlobalSubrs
- topDict.GlobalSubrs.fdArray = fdArray
- charStrings = topDict.CharStrings
- if charStrings.charStringsAreIndexed:
- charStrings.charStringsIndex.fdArray = fdArray
- else:
- charStrings.fdArray = fdArray
- fontDict = FontDict()
- fontDict.setCFF2(True)
- fdArray.append(fontDict)
- fontDict.Private = privateDict
- privateOpOrder = buildOrder(privateDictOperators2)
- for entry in privateDictOperators:
- key = entry[1]
- if key not in privateOpOrder:
- if key in privateDict.rawDict:
- # print "Removing private dict", key
- del privateDict.rawDict[key]
- if hasattr(privateDict, key):
- delattr(privateDict, key)
- # print "Removing privateDict attr", key
- else:
- # clean up the PrivateDicts in the fdArray
- fdArray = topDict.FDArray
- privateOpOrder = buildOrder(privateDictOperators2)
- for fontDict in fdArray:
- fontDict.setCFF2(True)
- for key in fontDict.rawDict.keys():
- if key not in fontDict.order:
- del fontDict.rawDict[key]
- if hasattr(fontDict, key):
- delattr(fontDict, key)
-
- privateDict = fontDict.Private
- for entry in privateDictOperators:
- key = entry[1]
- if key not in privateOpOrder:
- if key in privateDict.rawDict:
- # print "Removing private dict", key
- del privateDict.rawDict[key]
- if hasattr(privateDict, key):
- delattr(privateDict, key)
- # print "Removing privateDict attr", key
- # At this point, the Subrs and Charstrings are all still T2Charstring class
- # easiest to fix this by compiling, then decompiling again
- file = BytesIO()
- self.compile(file, otFont, isCFF2=True)
- file.seek(0)
- self.decompile(file, otFont, isCFF2=True)
-
- def desubroutinize(self):
- for fontName in self.fontNames:
- font = self[fontName]
- cs = font.CharStrings
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- c.decompile()
- subrs = getattr(c.private, "Subrs", [])
- decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs, c.private)
- decompiler.execute(c)
- c.program = c._desubroutinized
- del c._desubroutinized
- # Delete all the local subrs
- if hasattr(font, 'FDArray'):
- for fd in font.FDArray:
- pd = fd.Private
- if hasattr(pd, 'Subrs'):
- del pd.Subrs
- if 'Subrs' in pd.rawDict:
- del pd.rawDict['Subrs']
- else:
- pd = font.Private
- if hasattr(pd, 'Subrs'):
- del pd.Subrs
- if 'Subrs' in pd.rawDict:
- del pd.rawDict['Subrs']
- # as well as the global subrs
- self.GlobalSubrs.clear()
+ """A CFF font "file" can contain more than one font, although this is
+ extremely rare (and not allowed within OpenType fonts).
+
+ This class is the entry point for parsing a CFF table. To actually
+ manipulate the data inside the CFF font, you will want to access the
+ ``CFFFontSet``'s :class:`TopDict` object. To do this, a ``CFFFontSet``
+ object can either be treated as a dictionary (with appropriate
+ ``keys()`` and ``values()`` methods) mapping font names to :class:`TopDict`
+ objects, or as a list.
+
+ .. code:: python
+
+ from fontTools import ttLib
+ tt = ttLib.TTFont("Tests/cffLib/data/LinLibertine_RBI.otf")
+ tt["CFF "].cff
+ # <fontTools.cffLib.CFFFontSet object at 0x101e24c90>
+ tt["CFF "].cff[0] # Here's your actual font data
+ # <fontTools.cffLib.TopDict object at 0x1020f1fd0>
+
+ """
+
+ def decompile(self, file, otFont, isCFF2=None):
+ """Parse a binary CFF file into an internal representation. ``file``
+ should be a file handle object. ``otFont`` is the top-level
+ :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
+
+ If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
+ library makes an assertion that the CFF header is of the appropriate
+ version.
+ """
+
+ self.otFont = otFont
+ sstruct.unpack(cffHeaderFormat, file.read(3), self)
+ if isCFF2 is not None:
+ # called from ttLib: assert 'major' as read from file matches the
+ # expected version
+ expected_major = 2 if isCFF2 else 1
+ if self.major != expected_major:
+ raise ValueError(
+ "Invalid CFF 'major' version: expected %d, found %d"
+ % (expected_major, self.major)
+ )
+ else:
+ # use 'major' version from file to determine if isCFF2
+ assert self.major in (1, 2), "Unknown CFF format"
+ isCFF2 = self.major == 2
+ if not isCFF2:
+ self.offSize = struct.unpack("B", file.read(1))[0]
+ file.seek(self.hdrSize)
+ self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2))
+ self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2)
+ self.strings = IndexedStrings(file)
+ else: # isCFF2
+ self.topDictSize = struct.unpack(">H", file.read(2))[0]
+ file.seek(self.hdrSize)
+ self.fontNames = ["CFF2Font"]
+ cff2GetGlyphOrder = otFont.getGlyphOrder
+ # in CFF2, offsetSize is the size of the TopDict data.
+ self.topDictIndex = TopDictIndex(
+ file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2
+ )
+ self.strings = None
+ self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2)
+ self.topDictIndex.strings = self.strings
+ self.topDictIndex.GlobalSubrs = self.GlobalSubrs
+
+ def __len__(self):
+ return len(self.fontNames)
+
+ def keys(self):
+ return list(self.fontNames)
+
+ def values(self):
+ return self.topDictIndex
+
+ def __getitem__(self, nameOrIndex):
+ """Return TopDict instance identified by name (str) or index (int
+ or any object that implements `__index__`).
+ """
+ if hasattr(nameOrIndex, "__index__"):
+ index = nameOrIndex.__index__()
+ elif isinstance(nameOrIndex, str):
+ name = nameOrIndex
+ try:
+ index = self.fontNames.index(name)
+ except ValueError:
+ raise KeyError(nameOrIndex)
+ else:
+ raise TypeError(nameOrIndex)
+ return self.topDictIndex[index]
+
+ def compile(self, file, otFont, isCFF2=None):
+ """Write the object back into binary representation onto the given file.
+ ``file`` should be a file handle object. ``otFont`` is the top-level
+ :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
+
+ If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
+ library makes an assertion that the CFF header is of the appropriate
+ version.
+ """
+ self.otFont = otFont
+ if isCFF2 is not None:
+ # called from ttLib: assert 'major' value matches expected version
+ expected_major = 2 if isCFF2 else 1
+ if self.major != expected_major:
+ raise ValueError(
+ "Invalid CFF 'major' version: expected %d, found %d"
+ % (expected_major, self.major)
+ )
+ else:
+ # use current 'major' value to determine output format
+ assert self.major in (1, 2), "Unknown CFF format"
+ isCFF2 = self.major == 2
+
+ if otFont.recalcBBoxes and not isCFF2:
+ for topDict in self.topDictIndex:
+ topDict.recalcFontBBox()
+
+ if not isCFF2:
+ strings = IndexedStrings()
+ else:
+ strings = None
+ writer = CFFWriter(isCFF2)
+ topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2)
+ if isCFF2:
+ self.hdrSize = 5
+ writer.add(sstruct.pack(cffHeaderFormat, self))
+ # Note: topDictSize will most likely change in CFFWriter.toFile().
+ self.topDictSize = topCompiler.getDataLength()
+ writer.add(struct.pack(">H", self.topDictSize))
+ else:
+ self.hdrSize = 4
+ self.offSize = 4 # will most likely change in CFFWriter.toFile().
+ writer.add(sstruct.pack(cffHeaderFormat, self))
+ writer.add(struct.pack("B", self.offSize))
+ if not isCFF2:
+ fontNames = Index()
+ for name in self.fontNames:
+ fontNames.append(name)
+ writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2))
+ writer.add(topCompiler)
+ if not isCFF2:
+ writer.add(strings.getCompiler())
+ writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2))
+
+ for topDict in self.topDictIndex:
+ if not hasattr(topDict, "charset") or topDict.charset is None:
+ charset = otFont.getGlyphOrder()
+ topDict.charset = charset
+ children = topCompiler.getChildren(strings)
+ for child in children:
+ writer.add(child)
+
+ writer.toFile(file)
+
+ def toXML(self, xmlWriter):
+ """Write the object into XML representation onto the given
+ :class:`fontTools.misc.xmlWriter.XMLWriter`.
+
+ .. code:: python
+
+ writer = xmlWriter.XMLWriter(sys.stdout)
+ tt["CFF "].cff.toXML(writer)
+
+ """
+
+ xmlWriter.simpletag("major", value=self.major)
+ xmlWriter.newline()
+ xmlWriter.simpletag("minor", value=self.minor)
+ xmlWriter.newline()
+ for fontName in self.fontNames:
+ xmlWriter.begintag("CFFFont", name=tostr(fontName))
+ xmlWriter.newline()
+ font = self[fontName]
+ font.toXML(xmlWriter)
+ xmlWriter.endtag("CFFFont")
+ xmlWriter.newline()
+ xmlWriter.newline()
+ xmlWriter.begintag("GlobalSubrs")
+ xmlWriter.newline()
+ self.GlobalSubrs.toXML(xmlWriter)
+ xmlWriter.endtag("GlobalSubrs")
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, otFont=None):
+ """Reads data from the XML element into the ``CFFFontSet`` object."""
+ self.otFont = otFont
+
+ # set defaults. These will be replaced if there are entries for them
+ # in the XML file.
+ if not hasattr(self, "major"):
+ self.major = 1
+ if not hasattr(self, "minor"):
+ self.minor = 0
+
+ if name == "CFFFont":
+ if self.major == 1:
+ if not hasattr(self, "offSize"):
+ # this will be recalculated when the cff is compiled.
+ self.offSize = 4
+ if not hasattr(self, "hdrSize"):
+ self.hdrSize = 4
+ if not hasattr(self, "GlobalSubrs"):
+ self.GlobalSubrs = GlobalSubrsIndex()
+ if not hasattr(self, "fontNames"):
+ self.fontNames = []
+ self.topDictIndex = TopDictIndex()
+ fontName = attrs["name"]
+ self.fontNames.append(fontName)
+ topDict = TopDict(GlobalSubrs=self.GlobalSubrs)
+ topDict.charset = None # gets filled in later
+ elif self.major == 2:
+ if not hasattr(self, "hdrSize"):
+ self.hdrSize = 5
+ if not hasattr(self, "GlobalSubrs"):
+ self.GlobalSubrs = GlobalSubrsIndex()
+ if not hasattr(self, "fontNames"):
+ self.fontNames = ["CFF2Font"]
+ cff2GetGlyphOrder = self.otFont.getGlyphOrder
+ topDict = TopDict(
+ GlobalSubrs=self.GlobalSubrs, cff2GetGlyphOrder=cff2GetGlyphOrder
+ )
+ self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder)
+ self.topDictIndex.append(topDict)
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ topDict.fromXML(name, attrs, content)
+
+ if hasattr(topDict, "VarStore") and topDict.FDArray[0].vstore is None:
+ fdArray = topDict.FDArray
+ for fontDict in fdArray:
+ if hasattr(fontDict, "Private"):
+ fontDict.Private.vstore = topDict.VarStore
+
+ elif name == "GlobalSubrs":
+ subrCharStringClass = psCharStrings.T2CharString
+ if not hasattr(self, "GlobalSubrs"):
+ self.GlobalSubrs = GlobalSubrsIndex()
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ subr = subrCharStringClass()
+ subr.fromXML(name, attrs, content)
+ self.GlobalSubrs.append(subr)
+ elif name == "major":
+ self.major = int(attrs["value"])
+ elif name == "minor":
+ self.minor = int(attrs["value"])
+
+ def convertCFFToCFF2(self, otFont):
+ """Converts this object from CFF format to CFF2 format. This conversion
+ is done 'in-place'. The conversion cannot be reversed.
+
+ This assumes a decompiled CFF table. (i.e. that the object has been
+ filled via :meth:`decompile`.)"""
+ self.major = 2
+ cff2GetGlyphOrder = self.otFont.getGlyphOrder
+ topDictData = TopDictIndex(None, cff2GetGlyphOrder)
+ topDictData.items = self.topDictIndex.items
+ self.topDictIndex = topDictData
+ topDict = topDictData[0]
+ if hasattr(topDict, "Private"):
+ privateDict = topDict.Private
+ else:
+ privateDict = None
+ opOrder = buildOrder(topDictOperators2)
+ topDict.order = opOrder
+ topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
+ for entry in topDictOperators:
+ key = entry[1]
+ if key not in opOrder:
+ if key in topDict.rawDict:
+ del topDict.rawDict[key]
+ if hasattr(topDict, key):
+ delattr(topDict, key)
+
+ if not hasattr(topDict, "FDArray"):
+ fdArray = topDict.FDArray = FDArrayIndex()
+ fdArray.strings = None
+ fdArray.GlobalSubrs = topDict.GlobalSubrs
+ topDict.GlobalSubrs.fdArray = fdArray
+ charStrings = topDict.CharStrings
+ if charStrings.charStringsAreIndexed:
+ charStrings.charStringsIndex.fdArray = fdArray
+ else:
+ charStrings.fdArray = fdArray
+ fontDict = FontDict()
+ fontDict.setCFF2(True)
+ fdArray.append(fontDict)
+ fontDict.Private = privateDict
+ privateOpOrder = buildOrder(privateDictOperators2)
+ for entry in privateDictOperators:
+ key = entry[1]
+ if key not in privateOpOrder:
+ if key in privateDict.rawDict:
+ # print "Removing private dict", key
+ del privateDict.rawDict[key]
+ if hasattr(privateDict, key):
+ delattr(privateDict, key)
+ # print "Removing privateDict attr", key
+ else:
+ # clean up the PrivateDicts in the fdArray
+ fdArray = topDict.FDArray
+ privateOpOrder = buildOrder(privateDictOperators2)
+ for fontDict in fdArray:
+ fontDict.setCFF2(True)
+ for key in fontDict.rawDict.keys():
+ if key not in fontDict.order:
+ del fontDict.rawDict[key]
+ if hasattr(fontDict, key):
+ delattr(fontDict, key)
+
+ privateDict = fontDict.Private
+ for entry in privateDictOperators:
+ key = entry[1]
+ if key not in privateOpOrder:
+ if key in privateDict.rawDict:
+ # print "Removing private dict", key
+ del privateDict.rawDict[key]
+ if hasattr(privateDict, key):
+ delattr(privateDict, key)
+ # print "Removing privateDict attr", key
+ # At this point, the Subrs and Charstrings are all still T2Charstring class
+ # easiest to fix this by compiling, then decompiling again
+ file = BytesIO()
+ self.compile(file, otFont, isCFF2=True)
+ file.seek(0)
+ self.decompile(file, otFont, isCFF2=True)
+
+ def desubroutinize(self):
+ for fontName in self.fontNames:
+ font = self[fontName]
+ cs = font.CharStrings
+ for g in font.charset:
+ c, _ = cs.getItemAndSelector(g)
+ c.decompile()
+ subrs = getattr(c.private, "Subrs", [])
+ decompiler = _DesubroutinizingT2Decompiler(
+ subrs, c.globalSubrs, c.private
+ )
+ decompiler.execute(c)
+ c.program = c._desubroutinized
+ del c._desubroutinized
+ # Delete all the local subrs
+ if hasattr(font, "FDArray"):
+ for fd in font.FDArray:
+ pd = fd.Private
+ if hasattr(pd, "Subrs"):
+ del pd.Subrs
+ if "Subrs" in pd.rawDict:
+ del pd.rawDict["Subrs"]
+ else:
+ pd = font.Private
+ if hasattr(pd, "Subrs"):
+ del pd.Subrs
+ if "Subrs" in pd.rawDict:
+ del pd.rawDict["Subrs"]
+ # as well as the global subrs
+ self.GlobalSubrs.clear()
class CFFWriter(object):
- """Helper class for serializing CFF data to binary. Used by
- :meth:`CFFFontSet.compile`."""
- def __init__(self, isCFF2):
- self.data = []
- self.isCFF2 = isCFF2
-
- def add(self, table):
- self.data.append(table)
-
- def toFile(self, file):
- lastPosList = None
- count = 1
- while True:
- log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count)
- count = count + 1
- pos = 0
- posList = [pos]
- for item in self.data:
- if hasattr(item, "getDataLength"):
- endPos = pos + item.getDataLength()
- if isinstance(item, TopDictIndexCompiler) and item.isCFF2:
- self.topDictSize = item.getDataLength()
- else:
- endPos = pos + len(item)
- if hasattr(item, "setPos"):
- item.setPos(pos, endPos)
- pos = endPos
- posList.append(pos)
- if posList == lastPosList:
- break
- lastPosList = posList
- log.log(DEBUG, "CFFWriter.toFile() writing to file.")
- begin = file.tell()
- if self.isCFF2:
- self.data[1] = struct.pack(">H", self.topDictSize)
- else:
- self.offSize = calcOffSize(lastPosList[-1])
- self.data[1] = struct.pack("B", self.offSize)
- posList = [0]
- for item in self.data:
- if hasattr(item, "toFile"):
- item.toFile(file)
- else:
- file.write(item)
- posList.append(file.tell() - begin)
- assert posList == lastPosList
+ """Helper class for serializing CFF data to binary. Used by
+ :meth:`CFFFontSet.compile`."""
+
+ def __init__(self, isCFF2):
+ self.data = []
+ self.isCFF2 = isCFF2
+
+ def add(self, table):
+ self.data.append(table)
+
+ def toFile(self, file):
+ lastPosList = None
+ count = 1
+ while True:
+ log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count)
+ count = count + 1
+ pos = 0
+ posList = [pos]
+ for item in self.data:
+ if hasattr(item, "getDataLength"):
+ endPos = pos + item.getDataLength()
+ if isinstance(item, TopDictIndexCompiler) and item.isCFF2:
+ self.topDictSize = item.getDataLength()
+ else:
+ endPos = pos + len(item)
+ if hasattr(item, "setPos"):
+ item.setPos(pos, endPos)
+ pos = endPos
+ posList.append(pos)
+ if posList == lastPosList:
+ break
+ lastPosList = posList
+ log.log(DEBUG, "CFFWriter.toFile() writing to file.")
+ begin = file.tell()
+ if self.isCFF2:
+ self.data[1] = struct.pack(">H", self.topDictSize)
+ else:
+ self.offSize = calcOffSize(lastPosList[-1])
+ self.data[1] = struct.pack("B", self.offSize)
+ posList = [0]
+ for item in self.data:
+ if hasattr(item, "toFile"):
+ item.toFile(file)
+ else:
+ file.write(item)
+ posList.append(file.tell() - begin)
+ assert posList == lastPosList
def calcOffSize(largestOffset):
- if largestOffset < 0x100:
- offSize = 1
- elif largestOffset < 0x10000:
- offSize = 2
- elif largestOffset < 0x1000000:
- offSize = 3
- else:
- offSize = 4
- return offSize
+ if largestOffset < 0x100:
+ offSize = 1
+ elif largestOffset < 0x10000:
+ offSize = 2
+ elif largestOffset < 0x1000000:
+ offSize = 3
+ else:
+ offSize = 4
+ return offSize
class IndexCompiler(object):
- """Base class for writing CFF `INDEX data <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#5-index-data>`_
- to binary."""
-
- def __init__(self, items, strings, parent, isCFF2=None):
- if isCFF2 is None and hasattr(parent, "isCFF2"):
- isCFF2 = parent.isCFF2
- assert isCFF2 is not None
- self.isCFF2 = isCFF2
- self.items = self.getItems(items, strings)
- self.parent = parent
-
- def getItems(self, items, strings):
- return items
-
- def getOffsets(self):
- # An empty INDEX contains only the count field.
- if self.items:
- pos = 1
- offsets = [pos]
- for item in self.items:
- if hasattr(item, "getDataLength"):
- pos = pos + item.getDataLength()
- else:
- pos = pos + len(item)
- offsets.append(pos)
- else:
- offsets = []
- return offsets
-
- def getDataLength(self):
- if self.isCFF2:
- countSize = 4
- else:
- countSize = 2
-
- if self.items:
- lastOffset = self.getOffsets()[-1]
- offSize = calcOffSize(lastOffset)
- dataLength = (
- countSize + # count
- 1 + # offSize
- (len(self.items) + 1) * offSize + # the offsets
- lastOffset - 1 # size of object data
- )
- else:
- # count. For empty INDEX tables, this is the only entry.
- dataLength = countSize
-
- return dataLength
-
- def toFile(self, file):
- offsets = self.getOffsets()
- if self.isCFF2:
- writeCard32(file, len(self.items))
- else:
- writeCard16(file, len(self.items))
- # An empty INDEX contains only the count field.
- if self.items:
- offSize = calcOffSize(offsets[-1])
- writeCard8(file, offSize)
- offSize = -offSize
- pack = struct.pack
- for offset in offsets:
- binOffset = pack(">l", offset)[offSize:]
- assert len(binOffset) == -offSize
- file.write(binOffset)
- for item in self.items:
- if hasattr(item, "toFile"):
- item.toFile(file)
- else:
- data = tobytes(item, encoding="latin1")
- file.write(data)
+ """Base class for writing CFF `INDEX data <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#5-index-data>`_
+ to binary."""
+
+ def __init__(self, items, strings, parent, isCFF2=None):
+ if isCFF2 is None and hasattr(parent, "isCFF2"):
+ isCFF2 = parent.isCFF2
+ assert isCFF2 is not None
+ self.isCFF2 = isCFF2
+ self.items = self.getItems(items, strings)
+ self.parent = parent
+
+ def getItems(self, items, strings):
+ return items
+
+ def getOffsets(self):
+ # An empty INDEX contains only the count field.
+ if self.items:
+ pos = 1
+ offsets = [pos]
+ for item in self.items:
+ if hasattr(item, "getDataLength"):
+ pos = pos + item.getDataLength()
+ else:
+ pos = pos + len(item)
+ offsets.append(pos)
+ else:
+ offsets = []
+ return offsets
+
+ def getDataLength(self):
+ if self.isCFF2:
+ countSize = 4
+ else:
+ countSize = 2
+
+ if self.items:
+ lastOffset = self.getOffsets()[-1]
+ offSize = calcOffSize(lastOffset)
+ dataLength = (
+ countSize
+ + 1 # count
+ + (len(self.items) + 1) * offSize # offSize
+ + lastOffset # the offsets
+ - 1 # size of object data
+ )
+ else:
+ # count. For empty INDEX tables, this is the only entry.
+ dataLength = countSize
+
+ return dataLength
+
+ def toFile(self, file):
+ offsets = self.getOffsets()
+ if self.isCFF2:
+ writeCard32(file, len(self.items))
+ else:
+ writeCard16(file, len(self.items))
+ # An empty INDEX contains only the count field.
+ if self.items:
+ offSize = calcOffSize(offsets[-1])
+ writeCard8(file, offSize)
+ offSize = -offSize
+ pack = struct.pack
+ for offset in offsets:
+ binOffset = pack(">l", offset)[offSize:]
+ assert len(binOffset) == -offSize
+ file.write(binOffset)
+ for item in self.items:
+ if hasattr(item, "toFile"):
+ item.toFile(file)
+ else:
+ data = tobytes(item, encoding="latin1")
+ file.write(data)
class IndexedStringsCompiler(IndexCompiler):
-
- def getItems(self, items, strings):
- return items.strings
+ def getItems(self, items, strings):
+ return items.strings
class TopDictIndexCompiler(IndexCompiler):
- """Helper class for writing the TopDict to binary."""
-
- def getItems(self, items, strings):
- out = []
- for item in items:
- out.append(item.getCompiler(strings, self))
- return out
-
- def getChildren(self, strings):
- children = []
- for topDict in self.items:
- children.extend(topDict.getChildren(strings))
- return children
-
- def getOffsets(self):
- if self.isCFF2:
- offsets = [0, self.items[0].getDataLength()]
- return offsets
- else:
- return super(TopDictIndexCompiler, self).getOffsets()
-
- def getDataLength(self):
- if self.isCFF2:
- dataLength = self.items[0].getDataLength()
- return dataLength
- else:
- return super(TopDictIndexCompiler, self).getDataLength()
-
- def toFile(self, file):
- if self.isCFF2:
- self.items[0].toFile(file)
- else:
- super(TopDictIndexCompiler, self).toFile(file)
+ """Helper class for writing the TopDict to binary."""
+
+ def getItems(self, items, strings):
+ out = []
+ for item in items:
+ out.append(item.getCompiler(strings, self))
+ return out
+
+ def getChildren(self, strings):
+ children = []
+ for topDict in self.items:
+ children.extend(topDict.getChildren(strings))
+ return children
+
+ def getOffsets(self):
+ if self.isCFF2:
+ offsets = [0, self.items[0].getDataLength()]
+ return offsets
+ else:
+ return super(TopDictIndexCompiler, self).getOffsets()
+
+ def getDataLength(self):
+ if self.isCFF2:
+ dataLength = self.items[0].getDataLength()
+ return dataLength
+ else:
+ return super(TopDictIndexCompiler, self).getDataLength()
+
+ def toFile(self, file):
+ if self.isCFF2:
+ self.items[0].toFile(file)
+ else:
+ super(TopDictIndexCompiler, self).toFile(file)
class FDArrayIndexCompiler(IndexCompiler):
- """Helper class for writing the
- `Font DICT INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#10-font-dict-index-font-dicts-and-fdselect>`_
- to binary."""
-
- def getItems(self, items, strings):
- out = []
- for item in items:
- out.append(item.getCompiler(strings, self))
- return out
-
- def getChildren(self, strings):
- children = []
- for fontDict in self.items:
- children.extend(fontDict.getChildren(strings))
- return children
-
- def toFile(self, file):
- offsets = self.getOffsets()
- if self.isCFF2:
- writeCard32(file, len(self.items))
- else:
- writeCard16(file, len(self.items))
- offSize = calcOffSize(offsets[-1])
- writeCard8(file, offSize)
- offSize = -offSize
- pack = struct.pack
- for offset in offsets:
- binOffset = pack(">l", offset)[offSize:]
- assert len(binOffset) == -offSize
- file.write(binOffset)
- for item in self.items:
- if hasattr(item, "toFile"):
- item.toFile(file)
- else:
- file.write(item)
-
- def setPos(self, pos, endPos):
- self.parent.rawDict["FDArray"] = pos
+ """Helper class for writing the
+ `Font DICT INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#10-font-dict-index-font-dicts-and-fdselect>`_
+ to binary."""
+
+ def getItems(self, items, strings):
+ out = []
+ for item in items:
+ out.append(item.getCompiler(strings, self))
+ return out
+
+ def getChildren(self, strings):
+ children = []
+ for fontDict in self.items:
+ children.extend(fontDict.getChildren(strings))
+ return children
+
+ def toFile(self, file):
+ offsets = self.getOffsets()
+ if self.isCFF2:
+ writeCard32(file, len(self.items))
+ else:
+ writeCard16(file, len(self.items))
+ offSize = calcOffSize(offsets[-1])
+ writeCard8(file, offSize)
+ offSize = -offSize
+ pack = struct.pack
+ for offset in offsets:
+ binOffset = pack(">l", offset)[offSize:]
+ assert len(binOffset) == -offSize
+ file.write(binOffset)
+ for item in self.items:
+ if hasattr(item, "toFile"):
+ item.toFile(file)
+ else:
+ file.write(item)
+
+ def setPos(self, pos, endPos):
+ self.parent.rawDict["FDArray"] = pos
class GlobalSubrsCompiler(IndexCompiler):
- """Helper class for writing the `global subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
- to binary."""
+ """Helper class for writing the `global subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
+ to binary."""
- def getItems(self, items, strings):
- out = []
- for cs in items:
- cs.compile(self.isCFF2)
- out.append(cs.bytecode)
- return out
+ def getItems(self, items, strings):
+ out = []
+ for cs in items:
+ cs.compile(self.isCFF2)
+ out.append(cs.bytecode)
+ return out
class SubrsCompiler(GlobalSubrsCompiler):
- """Helper class for writing the `local subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
- to binary."""
-
- def setPos(self, pos, endPos):
- offset = pos - self.parent.pos
- self.parent.rawDict["Subrs"] = offset
+ """Helper class for writing the `local subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
+ to binary."""
+
+ def setPos(self, pos, endPos):
+ offset = pos - self.parent.pos
+ self.parent.rawDict["Subrs"] = offset
class CharStringsCompiler(GlobalSubrsCompiler):
- """Helper class for writing the `CharStrings INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
- to binary."""
- def getItems(self, items, strings):
- out = []
- for cs in items:
- cs.compile(self.isCFF2)
- out.append(cs.bytecode)
- return out
+ """Helper class for writing the `CharStrings INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
+ to binary."""
- def setPos(self, pos, endPos):
- self.parent.rawDict["CharStrings"] = pos
+ def getItems(self, items, strings):
+ out = []
+ for cs in items:
+ cs.compile(self.isCFF2)
+ out.append(cs.bytecode)
+ return out
+
+ def setPos(self, pos, endPos):
+ self.parent.rawDict["CharStrings"] = pos
class Index(object):
- """This class represents what the CFF spec calls an INDEX (an array of
- variable-sized objects). `Index` items can be addressed and set using
- Python list indexing."""
-
- compilerClass = IndexCompiler
-
- def __init__(self, file=None, isCFF2=None):
- assert (isCFF2 is None) == (file is None)
- self.items = []
- name = self.__class__.__name__
- if file is None:
- return
- self._isCFF2 = isCFF2
- log.log(DEBUG, "loading %s at %s", name, file.tell())
- self.file = file
- if isCFF2:
- count = readCard32(file)
- else:
- count = readCard16(file)
- if count == 0:
- return
- self.items = [None] * count
- offSize = readCard8(file)
- log.log(DEBUG, " index count: %s offSize: %s", count, offSize)
- assert offSize <= 4, "offSize too large: %s" % offSize
- self.offsets = offsets = []
- pad = b'\0' * (4 - offSize)
- for index in range(count + 1):
- chunk = file.read(offSize)
- chunk = pad + chunk
- offset, = struct.unpack(">L", chunk)
- offsets.append(int(offset))
- self.offsetBase = file.tell() - 1
- file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot
- log.log(DEBUG, " end of %s at %s", name, file.tell())
-
- def __len__(self):
- return len(self.items)
-
- def __getitem__(self, index):
- item = self.items[index]
- if item is not None:
- return item
- offset = self.offsets[index] + self.offsetBase
- size = self.offsets[index + 1] - self.offsets[index]
- file = self.file
- file.seek(offset)
- data = file.read(size)
- assert len(data) == size
- item = self.produceItem(index, data, file, offset)
- self.items[index] = item
- return item
-
- def __setitem__(self, index, item):
- self.items[index] = item
-
- def produceItem(self, index, data, file, offset):
- return data
-
- def append(self, item):
- """Add an item to an INDEX."""
- self.items.append(item)
-
- def getCompiler(self, strings, parent, isCFF2=None):
- return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
-
- def clear(self):
- """Empty the INDEX."""
- del self.items[:]
+ """This class represents what the CFF spec calls an INDEX (an array of
+ variable-sized objects). `Index` items can be addressed and set using
+ Python list indexing."""
+
+ compilerClass = IndexCompiler
+
+ def __init__(self, file=None, isCFF2=None):
+ assert (isCFF2 is None) == (file is None)
+ self.items = []
+ name = self.__class__.__name__
+ if file is None:
+ return
+ self._isCFF2 = isCFF2
+ log.log(DEBUG, "loading %s at %s", name, file.tell())
+ self.file = file
+ if isCFF2:
+ count = readCard32(file)
+ else:
+ count = readCard16(file)
+ if count == 0:
+ return
+ self.items = [None] * count
+ offSize = readCard8(file)
+ log.log(DEBUG, " index count: %s offSize: %s", count, offSize)
+ assert offSize <= 4, "offSize too large: %s" % offSize
+ self.offsets = offsets = []
+ pad = b"\0" * (4 - offSize)
+ for index in range(count + 1):
+ chunk = file.read(offSize)
+ chunk = pad + chunk
+ (offset,) = struct.unpack(">L", chunk)
+ offsets.append(int(offset))
+ self.offsetBase = file.tell() - 1
+ file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot
+ log.log(DEBUG, " end of %s at %s", name, file.tell())
+
+ def __len__(self):
+ return len(self.items)
+
+ def __getitem__(self, index):
+ item = self.items[index]
+ if item is not None:
+ return item
+ offset = self.offsets[index] + self.offsetBase
+ size = self.offsets[index + 1] - self.offsets[index]
+ file = self.file
+ file.seek(offset)
+ data = file.read(size)
+ assert len(data) == size
+ item = self.produceItem(index, data, file, offset)
+ self.items[index] = item
+ return item
+
+ def __setitem__(self, index, item):
+ self.items[index] = item
+
+ def produceItem(self, index, data, file, offset):
+ return data
+
+ def append(self, item):
+ """Add an item to an INDEX."""
+ self.items.append(item)
+
+ def getCompiler(self, strings, parent, isCFF2=None):
+ return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
+
+ def clear(self):
+ """Empty the INDEX."""
+ del self.items[:]
class GlobalSubrsIndex(Index):
- """This index contains all the global subroutines in the font. A global
- subroutine is a set of ``CharString`` data which is accessible to any
- glyph in the font, and are used to store repeated instructions - for
- example, components may be encoded as global subroutines, but so could
- hinting instructions.
-
- Remember that when interpreting a ``callgsubr`` instruction (or indeed
- a ``callsubr`` instruction) that you will need to add the "subroutine
- number bias" to number given:
-
- .. code:: python
-
- tt = ttLib.TTFont("Almendra-Bold.otf")
- u = tt["CFF "].cff[0].CharStrings["udieresis"]
- u.decompile()
-
- u.toXML(XMLWriter(sys.stdout))
- # <some stuff>
- # -64 callgsubr <-- Subroutine which implements the dieresis mark
- # <other stuff>
-
- tt["CFF "].cff[0].GlobalSubrs[-64] # <-- WRONG
- # <T2CharString (bytecode) at 103451d10>
-
- tt["CFF "].cff[0].GlobalSubrs[-64 + 107] # <-- RIGHT
- # <T2CharString (source) at 103451390>
-
- ("The bias applied depends on the number of subrs (gsubrs). If the number of
- subrs (gsubrs) is less than 1240, the bias is 107. Otherwise if it is less
- than 33900, it is 1131; otherwise it is 32768.",
- `Subroutine Operators <https://docs.microsoft.com/en-us/typography/opentype/otspec180/cff2charstr#section4.4>`)
- """
-
- compilerClass = GlobalSubrsCompiler
- subrClass = psCharStrings.T2CharString
- charStringClass = psCharStrings.T2CharString
-
- def __init__(self, file=None, globalSubrs=None, private=None,
- fdSelect=None, fdArray=None, isCFF2=None):
- super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2)
- self.globalSubrs = globalSubrs
- self.private = private
- if fdSelect:
- self.fdSelect = fdSelect
- if fdArray:
- self.fdArray = fdArray
-
- def produceItem(self, index, data, file, offset):
- if self.private is not None:
- private = self.private
- elif hasattr(self, 'fdArray') and self.fdArray is not None:
- if hasattr(self, 'fdSelect') and self.fdSelect is not None:
- fdIndex = self.fdSelect[index]
- else:
- fdIndex = 0
- private = self.fdArray[fdIndex].Private
- else:
- private = None
- return self.subrClass(data, private=private, globalSubrs=self.globalSubrs)
-
- def toXML(self, xmlWriter):
- """Write the subroutines index into XML representation onto the given
- :class:`fontTools.misc.xmlWriter.XMLWriter`.
-
- .. code:: python
-
- writer = xmlWriter.XMLWriter(sys.stdout)
- tt["CFF "].cff[0].GlobalSubrs.toXML(writer)
-
- """
- xmlWriter.comment(
- "The 'index' attribute is only for humans; "
- "it is ignored when parsed.")
- xmlWriter.newline()
- for i in range(len(self)):
- subr = self[i]
- if subr.needsDecompilation():
- xmlWriter.begintag("CharString", index=i, raw=1)
- else:
- xmlWriter.begintag("CharString", index=i)
- xmlWriter.newline()
- subr.toXML(xmlWriter)
- xmlWriter.endtag("CharString")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content):
- if name != "CharString":
- return
- subr = self.subrClass()
- subr.fromXML(name, attrs, content)
- self.append(subr)
-
- def getItemAndSelector(self, index):
- sel = None
- if hasattr(self, 'fdSelect'):
- sel = self.fdSelect[index]
- return self[index], sel
+ """This index contains all the global subroutines in the font. A global
+ subroutine is a set of ``CharString`` data which is accessible to any
+ glyph in the font, and are used to store repeated instructions - for
+ example, components may be encoded as global subroutines, but so could
+ hinting instructions.
+
+ Remember that when interpreting a ``callgsubr`` instruction (or indeed
+ a ``callsubr`` instruction) that you will need to add the "subroutine
+ number bias" to number given:
+
+ .. code:: python
+
+ tt = ttLib.TTFont("Almendra-Bold.otf")
+ u = tt["CFF "].cff[0].CharStrings["udieresis"]
+ u.decompile()
+
+ u.toXML(XMLWriter(sys.stdout))
+ # <some stuff>
+ # -64 callgsubr <-- Subroutine which implements the dieresis mark
+ # <other stuff>
+
+ tt["CFF "].cff[0].GlobalSubrs[-64] # <-- WRONG
+ # <T2CharString (bytecode) at 103451d10>
+
+ tt["CFF "].cff[0].GlobalSubrs[-64 + 107] # <-- RIGHT
+ # <T2CharString (source) at 103451390>
+
+ ("The bias applied depends on the number of subrs (gsubrs). If the number of
+ subrs (gsubrs) is less than 1240, the bias is 107. Otherwise if it is less
+ than 33900, it is 1131; otherwise it is 32768.",
+ `Subroutine Operators <https://docs.microsoft.com/en-us/typography/opentype/otspec180/cff2charstr#section4.4>`)
+ """
+
+ compilerClass = GlobalSubrsCompiler
+ subrClass = psCharStrings.T2CharString
+ charStringClass = psCharStrings.T2CharString
+
+ def __init__(
+ self,
+ file=None,
+ globalSubrs=None,
+ private=None,
+ fdSelect=None,
+ fdArray=None,
+ isCFF2=None,
+ ):
+ super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2)
+ self.globalSubrs = globalSubrs
+ self.private = private
+ if fdSelect:
+ self.fdSelect = fdSelect
+ if fdArray:
+ self.fdArray = fdArray
+
+ def produceItem(self, index, data, file, offset):
+ if self.private is not None:
+ private = self.private
+ elif hasattr(self, "fdArray") and self.fdArray is not None:
+ if hasattr(self, "fdSelect") and self.fdSelect is not None:
+ fdIndex = self.fdSelect[index]
+ else:
+ fdIndex = 0
+ private = self.fdArray[fdIndex].Private
+ else:
+ private = None
+ return self.subrClass(data, private=private, globalSubrs=self.globalSubrs)
+
+ def toXML(self, xmlWriter):
+ """Write the subroutines index into XML representation onto the given
+ :class:`fontTools.misc.xmlWriter.XMLWriter`.
+
+ .. code:: python
+
+ writer = xmlWriter.XMLWriter(sys.stdout)
+ tt["CFF "].cff[0].GlobalSubrs.toXML(writer)
+
+ """
+ xmlWriter.comment(
+ "The 'index' attribute is only for humans; " "it is ignored when parsed."
+ )
+ xmlWriter.newline()
+ for i in range(len(self)):
+ subr = self[i]
+ if subr.needsDecompilation():
+ xmlWriter.begintag("CharString", index=i, raw=1)
+ else:
+ xmlWriter.begintag("CharString", index=i)
+ xmlWriter.newline()
+ subr.toXML(xmlWriter)
+ xmlWriter.endtag("CharString")
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content):
+ if name != "CharString":
+ return
+ subr = self.subrClass()
+ subr.fromXML(name, attrs, content)
+ self.append(subr)
+
+ def getItemAndSelector(self, index):
+ sel = None
+ if hasattr(self, "fdSelect"):
+ sel = self.fdSelect[index]
+ return self[index], sel
class SubrsIndex(GlobalSubrsIndex):
- """This index contains a glyph's local subroutines. A local subroutine is a
- private set of ``CharString`` data which is accessible only to the glyph to
- which the index is attached."""
+ """This index contains a glyph's local subroutines. A local subroutine is a
+ private set of ``CharString`` data which is accessible only to the glyph to
+ which the index is attached."""
- compilerClass = SubrsCompiler
+ compilerClass = SubrsCompiler
class TopDictIndex(Index):
- """This index represents the array of ``TopDict`` structures in the font
- (again, usually only one entry is present). Hence the following calls are
- equivalent:
-
- .. code:: python
-
- tt["CFF "].cff[0]
- # <fontTools.cffLib.TopDict object at 0x102ed6e50>
- tt["CFF "].cff.topDictIndex[0]
- # <fontTools.cffLib.TopDict object at 0x102ed6e50>
-
- """
-
- compilerClass = TopDictIndexCompiler
-
- def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0,
- isCFF2=None):
- assert (isCFF2 is None) == (file is None)
- self.cff2GetGlyphOrder = cff2GetGlyphOrder
- if file is not None and isCFF2:
- self._isCFF2 = isCFF2
- self.items = []
- name = self.__class__.__name__
- log.log(DEBUG, "loading %s at %s", name, file.tell())
- self.file = file
- count = 1
- self.items = [None] * count
- self.offsets = [0, topSize]
- self.offsetBase = file.tell()
- # pretend we've read the whole lot
- file.seek(self.offsetBase + topSize)
- log.log(DEBUG, " end of %s at %s", name, file.tell())
- else:
- super(TopDictIndex, self).__init__(file, isCFF2=isCFF2)
-
- def produceItem(self, index, data, file, offset):
- top = TopDict(
- self.strings, file, offset, self.GlobalSubrs,
- self.cff2GetGlyphOrder, isCFF2=self._isCFF2)
- top.decompile(data)
- return top
-
- def toXML(self, xmlWriter):
- for i in range(len(self)):
- xmlWriter.begintag("FontDict", index=i)
- xmlWriter.newline()
- self[i].toXML(xmlWriter)
- xmlWriter.endtag("FontDict")
- xmlWriter.newline()
+ """This index represents the array of ``TopDict`` structures in the font
+ (again, usually only one entry is present). Hence the following calls are
+ equivalent:
+
+ .. code:: python
+
+ tt["CFF "].cff[0]
+ # <fontTools.cffLib.TopDict object at 0x102ed6e50>
+ tt["CFF "].cff.topDictIndex[0]
+ # <fontTools.cffLib.TopDict object at 0x102ed6e50>
+
+ """
+
+ compilerClass = TopDictIndexCompiler
+
+ def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0, isCFF2=None):
+ assert (isCFF2 is None) == (file is None)
+ self.cff2GetGlyphOrder = cff2GetGlyphOrder
+ if file is not None and isCFF2:
+ self._isCFF2 = isCFF2
+ self.items = []
+ name = self.__class__.__name__
+ log.log(DEBUG, "loading %s at %s", name, file.tell())
+ self.file = file
+ count = 1
+ self.items = [None] * count
+ self.offsets = [0, topSize]
+ self.offsetBase = file.tell()
+ # pretend we've read the whole lot
+ file.seek(self.offsetBase + topSize)
+ log.log(DEBUG, " end of %s at %s", name, file.tell())
+ else:
+ super(TopDictIndex, self).__init__(file, isCFF2=isCFF2)
+
+ def produceItem(self, index, data, file, offset):
+ top = TopDict(
+ self.strings,
+ file,
+ offset,
+ self.GlobalSubrs,
+ self.cff2GetGlyphOrder,
+ isCFF2=self._isCFF2,
+ )
+ top.decompile(data)
+ return top
+
+ def toXML(self, xmlWriter):
+ for i in range(len(self)):
+ xmlWriter.begintag("FontDict", index=i)
+ xmlWriter.newline()
+ self[i].toXML(xmlWriter)
+ xmlWriter.endtag("FontDict")
+ xmlWriter.newline()
class FDArrayIndex(Index):
-
- compilerClass = FDArrayIndexCompiler
-
- def toXML(self, xmlWriter):
- for i in range(len(self)):
- xmlWriter.begintag("FontDict", index=i)
- xmlWriter.newline()
- self[i].toXML(xmlWriter)
- xmlWriter.endtag("FontDict")
- xmlWriter.newline()
-
- def produceItem(self, index, data, file, offset):
- fontDict = FontDict(
- self.strings, file, offset, self.GlobalSubrs, isCFF2=self._isCFF2,
- vstore=self.vstore)
- fontDict.decompile(data)
- return fontDict
-
- def fromXML(self, name, attrs, content):
- if name != "FontDict":
- return
- fontDict = FontDict()
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- fontDict.fromXML(name, attrs, content)
- self.append(fontDict)
+ compilerClass = FDArrayIndexCompiler
+
+ def toXML(self, xmlWriter):
+ for i in range(len(self)):
+ xmlWriter.begintag("FontDict", index=i)
+ xmlWriter.newline()
+ self[i].toXML(xmlWriter)
+ xmlWriter.endtag("FontDict")
+ xmlWriter.newline()
+
+ def produceItem(self, index, data, file, offset):
+ fontDict = FontDict(
+ self.strings,
+ file,
+ offset,
+ self.GlobalSubrs,
+ isCFF2=self._isCFF2,
+ vstore=self.vstore,
+ )
+ fontDict.decompile(data)
+ return fontDict
+
+ def fromXML(self, name, attrs, content):
+ if name != "FontDict":
+ return
+ fontDict = FontDict()
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ fontDict.fromXML(name, attrs, content)
+ self.append(fontDict)
class VarStoreData(object):
-
- def __init__(self, file=None, otVarStore=None):
- self.file = file
- self.data = None
- self.otVarStore = otVarStore
- self.font = TTFont() # dummy font for the decompile function.
-
- def decompile(self):
- if self.file:
- # read data in from file. Assume position is correct.
- length = readCard16(self.file)
- self.data = self.file.read(length)
- globalState = {}
- reader = OTTableReader(self.data, globalState)
- self.otVarStore = ot.VarStore()
- self.otVarStore.decompile(reader, self.font)
- return self
-
- def compile(self):
- writer = OTTableWriter()
- self.otVarStore.compile(writer, self.font)
- # Note that this omits the initial Card16 length from the CFF2
- # VarStore data block
- self.data = writer.getAllData()
-
- def writeXML(self, xmlWriter, name):
- self.otVarStore.toXML(xmlWriter, self.font)
-
- def xmlRead(self, name, attrs, content, parent):
- self.otVarStore = ot.VarStore()
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- self.otVarStore.fromXML(name, attrs, content, self.font)
- else:
- pass
- return None
-
- def __len__(self):
- return len(self.data)
-
- def getNumRegions(self, vsIndex):
- if vsIndex is None:
- vsIndex = 0
- varData = self.otVarStore.VarData[vsIndex]
- numRegions = varData.VarRegionCount
- return numRegions
+ def __init__(self, file=None, otVarStore=None):
+ self.file = file
+ self.data = None
+ self.otVarStore = otVarStore
+ self.font = TTFont() # dummy font for the decompile function.
+
+ def decompile(self):
+ if self.file:
+ # read data in from file. Assume position is correct.
+ length = readCard16(self.file)
+ self.data = self.file.read(length)
+ globalState = {}
+ reader = OTTableReader(self.data, globalState)
+ self.otVarStore = ot.VarStore()
+ self.otVarStore.decompile(reader, self.font)
+ return self
+
+ def compile(self):
+ writer = OTTableWriter()
+ self.otVarStore.compile(writer, self.font)
+ # Note that this omits the initial Card16 length from the CFF2
+ # VarStore data block
+ self.data = writer.getAllData()
+
+ def writeXML(self, xmlWriter, name):
+ self.otVarStore.toXML(xmlWriter, self.font)
+
+ def xmlRead(self, name, attrs, content, parent):
+ self.otVarStore = ot.VarStore()
+ for element in content:
+ if isinstance(element, tuple):
+ name, attrs, content = element
+ self.otVarStore.fromXML(name, attrs, content, self.font)
+ else:
+ pass
+ return None
+
+ def __len__(self):
+ return len(self.data)
+
+ def getNumRegions(self, vsIndex):
+ if vsIndex is None:
+ vsIndex = 0
+ varData = self.otVarStore.VarData[vsIndex]
+ numRegions = varData.VarRegionCount
+ return numRegions
class FDSelect(object):
-
- def __init__(self, file=None, numGlyphs=None, format=None):
- if file:
- # read data in from file
- self.format = readCard8(file)
- if self.format == 0:
- from array import array
- self.gidArray = array("B", file.read(numGlyphs)).tolist()
- elif self.format == 3:
- gidArray = [None] * numGlyphs
- nRanges = readCard16(file)
- fd = None
- prev = None
- for i in range(nRanges):
- first = readCard16(file)
- if prev is not None:
- for glyphID in range(prev, first):
- gidArray[glyphID] = fd
- prev = first
- fd = readCard8(file)
- if prev is not None:
- first = readCard16(file)
- for glyphID in range(prev, first):
- gidArray[glyphID] = fd
- self.gidArray = gidArray
- elif self.format == 4:
- gidArray = [None] * numGlyphs
- nRanges = readCard32(file)
- fd = None
- prev = None
- for i in range(nRanges):
- first = readCard32(file)
- if prev is not None:
- for glyphID in range(prev, first):
- gidArray[glyphID] = fd
- prev = first
- fd = readCard16(file)
- if prev is not None:
- first = readCard32(file)
- for glyphID in range(prev, first):
- gidArray[glyphID] = fd
- self.gidArray = gidArray
- else:
- assert False, "unsupported FDSelect format: %s" % format
- else:
- # reading from XML. Make empty gidArray, and leave format as passed in.
- # format is None will result in the smallest representation being used.
- self.format = format
- self.gidArray = []
-
- def __len__(self):
- return len(self.gidArray)
-
- def __getitem__(self, index):
- return self.gidArray[index]
-
- def __setitem__(self, index, fdSelectValue):
- self.gidArray[index] = fdSelectValue
-
- def append(self, fdSelectValue):
- self.gidArray.append(fdSelectValue)
+ def __init__(self, file=None, numGlyphs=None, format=None):
+ if file:
+ # read data in from file
+ self.format = readCard8(file)
+ if self.format == 0:
+ from array import array
+
+ self.gidArray = array("B", file.read(numGlyphs)).tolist()
+ elif self.format == 3:
+ gidArray = [None] * numGlyphs
+ nRanges = readCard16(file)
+ fd = None
+ prev = None
+ for i in range(nRanges):
+ first = readCard16(file)
+ if prev is not None:
+ for glyphID in range(prev, first):
+ gidArray[glyphID] = fd
+ prev = first
+ fd = readCard8(file)
+ if prev is not None:
+ first = readCard16(file)
+ for glyphID in range(prev, first):
+ gidArray[glyphID] = fd
+ self.gidArray = gidArray
+ elif self.format == 4:
+ gidArray = [None] * numGlyphs
+ nRanges = readCard32(file)
+ fd = None
+ prev = None
+ for i in range(nRanges):
+ first = readCard32(file)
+ if prev is not None:
+ for glyphID in range(prev, first):
+ gidArray[glyphID] = fd
+ prev = first
+ fd = readCard16(file)
+ if prev is not None:
+ first = readCard32(file)
+ for glyphID in range(prev, first):
+ gidArray[glyphID] = fd
+ self.gidArray = gidArray
+ else:
+ assert False, "unsupported FDSelect format: %s" % format
+ else:
+ # reading from XML. Make empty gidArray, and leave format as passed in.
+ # format is None will result in the smallest representation being used.
+ self.format = format
+ self.gidArray = []
+
+ def __len__(self):
+ return len(self.gidArray)
+
+ def __getitem__(self, index):
+ return self.gidArray[index]
+
+ def __setitem__(self, index, fdSelectValue):
+ self.gidArray[index] = fdSelectValue
+
+ def append(self, fdSelectValue):
+ self.gidArray.append(fdSelectValue)
class CharStrings(object):
- """The ``CharStrings`` in the font represent the instructions for drawing
- each glyph. This object presents a dictionary interface to the font's
- CharStrings, indexed by glyph name:
-
- .. code:: python
-
- tt["CFF "].cff[0].CharStrings["a"]
- # <T2CharString (bytecode) at 103451e90>
-
- See :class:`fontTools.misc.psCharStrings.T1CharString` and
- :class:`fontTools.misc.psCharStrings.T2CharString` for how to decompile,
- compile and interpret the glyph drawing instructions in the returned objects.
-
- """
-
- def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray,
- isCFF2=None):
- self.globalSubrs = globalSubrs
- if file is not None:
- self.charStringsIndex = SubrsIndex(
- file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2)
- self.charStrings = charStrings = {}
- for i in range(len(charset)):
- charStrings[charset[i]] = i
- # read from OTF file: charStrings.values() are indices into
- # charStringsIndex.
- self.charStringsAreIndexed = 1
- else:
- self.charStrings = {}
- # read from ttx file: charStrings.values() are actual charstrings
- self.charStringsAreIndexed = 0
- self.private = private
- if fdSelect is not None:
- self.fdSelect = fdSelect
- if fdArray is not None:
- self.fdArray = fdArray
-
- def keys(self):
- return list(self.charStrings.keys())
-
- def values(self):
- if self.charStringsAreIndexed:
- return self.charStringsIndex
- else:
- return list(self.charStrings.values())
-
- def has_key(self, name):
- return name in self.charStrings
-
- __contains__ = has_key
-
- def __len__(self):
- return len(self.charStrings)
-
- def __getitem__(self, name):
- charString = self.charStrings[name]
- if self.charStringsAreIndexed:
- charString = self.charStringsIndex[charString]
- return charString
-
- def __setitem__(self, name, charString):
- if self.charStringsAreIndexed:
- index = self.charStrings[name]
- self.charStringsIndex[index] = charString
- else:
- self.charStrings[name] = charString
-
- def getItemAndSelector(self, name):
- if self.charStringsAreIndexed:
- index = self.charStrings[name]
- return self.charStringsIndex.getItemAndSelector(index)
- else:
- if hasattr(self, 'fdArray'):
- if hasattr(self, 'fdSelect'):
- sel = self.charStrings[name].fdSelectIndex
- else:
- sel = 0
- else:
- sel = None
- return self.charStrings[name], sel
-
- def toXML(self, xmlWriter):
- names = sorted(self.keys())
- for name in names:
- charStr, fdSelectIndex = self.getItemAndSelector(name)
- if charStr.needsDecompilation():
- raw = [("raw", 1)]
- else:
- raw = []
- if fdSelectIndex is None:
- xmlWriter.begintag("CharString", [('name', name)] + raw)
- else:
- xmlWriter.begintag(
- "CharString",
- [('name', name), ('fdSelectIndex', fdSelectIndex)] + raw)
- xmlWriter.newline()
- charStr.toXML(xmlWriter)
- xmlWriter.endtag("CharString")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content):
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- if name != "CharString":
- continue
- fdID = -1
- if hasattr(self, "fdArray"):
- try:
- fdID = safeEval(attrs["fdSelectIndex"])
- except KeyError:
- fdID = 0
- private = self.fdArray[fdID].Private
- else:
- private = self.private
-
- glyphName = attrs["name"]
- charStringClass = psCharStrings.T2CharString
- charString = charStringClass(
- private=private,
- globalSubrs=self.globalSubrs)
- charString.fromXML(name, attrs, content)
- if fdID >= 0:
- charString.fdSelectIndex = fdID
- self[glyphName] = charString
+ """The ``CharStrings`` in the font represent the instructions for drawing
+ each glyph. This object presents a dictionary interface to the font's
+ CharStrings, indexed by glyph name:
+
+ .. code:: python
+
+ tt["CFF "].cff[0].CharStrings["a"]
+ # <T2CharString (bytecode) at 103451e90>
+
+ See :class:`fontTools.misc.psCharStrings.T1CharString` and
+ :class:`fontTools.misc.psCharStrings.T2CharString` for how to decompile,
+ compile and interpret the glyph drawing instructions in the returned objects.
+
+ """
+
+ def __init__(
+ self,
+ file,
+ charset,
+ globalSubrs,
+ private,
+ fdSelect,
+ fdArray,
+ isCFF2=None,
+ varStore=None,
+ ):
+ self.globalSubrs = globalSubrs
+ self.varStore = varStore
+ if file is not None:
+ self.charStringsIndex = SubrsIndex(
+ file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2
+ )
+ self.charStrings = charStrings = {}
+ for i in range(len(charset)):
+ charStrings[charset[i]] = i
+ # read from OTF file: charStrings.values() are indices into
+ # charStringsIndex.
+ self.charStringsAreIndexed = 1
+ else:
+ self.charStrings = {}
+ # read from ttx file: charStrings.values() are actual charstrings
+ self.charStringsAreIndexed = 0
+ self.private = private
+ if fdSelect is not None:
+ self.fdSelect = fdSelect
+ if fdArray is not None:
+ self.fdArray = fdArray
+
+ def keys(self):
+ return list(self.charStrings.keys())
+
+ def values(self):
+ if self.charStringsAreIndexed:
+ return self.charStringsIndex
+ else:
+ return list(self.charStrings.values())
+
+ def has_key(self, name):
+ return name in self.charStrings
+
+ __contains__ = has_key
+
+ def __len__(self):
+ return len(self.charStrings)
+
+ def __getitem__(self, name):
+ charString = self.charStrings[name]
+ if self.charStringsAreIndexed:
+ charString = self.charStringsIndex[charString]
+ return charString
+
+ def __setitem__(self, name, charString):
+ if self.charStringsAreIndexed:
+ index = self.charStrings[name]
+ self.charStringsIndex[index] = charString
+ else:
+ self.charStrings[name] = charString
+
+ def getItemAndSelector(self, name):
+ if self.charStringsAreIndexed:
+ index = self.charStrings[name]
+ return self.charStringsIndex.getItemAndSelector(index)
+ else:
+ if hasattr(self, "fdArray"):
+ if hasattr(self, "fdSelect"):
+ sel = self.charStrings[name].fdSelectIndex
+ else:
+ sel = 0
+ else:
+ sel = None
+ return self.charStrings[name], sel
+
+ def toXML(self, xmlWriter):
+ names = sorted(self.keys())
+ for name in names:
+ charStr, fdSelectIndex = self.getItemAndSelector(name)
+ if charStr.needsDecompilation():
+ raw = [("raw", 1)]
+ else:
+ raw = []
+ if fdSelectIndex is None:
+ xmlWriter.begintag("CharString", [("name", name)] + raw)
+ else:
+ xmlWriter.begintag(
+ "CharString",
+ [("name", name), ("fdSelectIndex", fdSelectIndex)] + raw,
+ )
+ xmlWriter.newline()
+ charStr.toXML(xmlWriter)
+ xmlWriter.endtag("CharString")
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content):
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ if name != "CharString":
+ continue
+ fdID = -1
+ if hasattr(self, "fdArray"):
+ try:
+ fdID = safeEval(attrs["fdSelectIndex"])
+ except KeyError:
+ fdID = 0
+ private = self.fdArray[fdID].Private
+ else:
+ private = self.private
+
+ glyphName = attrs["name"]
+ charStringClass = psCharStrings.T2CharString
+ charString = charStringClass(private=private, globalSubrs=self.globalSubrs)
+ charString.fromXML(name, attrs, content)
+ if fdID >= 0:
+ charString.fdSelectIndex = fdID
+ self[glyphName] = charString
def readCard8(file):
- return byteord(file.read(1))
+ return byteord(file.read(1))
def readCard16(file):
- value, = struct.unpack(">H", file.read(2))
- return value
+ (value,) = struct.unpack(">H", file.read(2))
+ return value
def readCard32(file):
- value, = struct.unpack(">L", file.read(4))
- return value
+ (value,) = struct.unpack(">L", file.read(4))
+ return value
def writeCard8(file, value):
- file.write(bytechr(value))
+ file.write(bytechr(value))
def writeCard16(file, value):
- file.write(struct.pack(">H", value))
+ file.write(struct.pack(">H", value))
def writeCard32(file, value):
- file.write(struct.pack(">L", value))
+ file.write(struct.pack(">L", value))
def packCard8(value):
- return bytechr(value)
+ return bytechr(value)
def packCard16(value):
- return struct.pack(">H", value)
+ return struct.pack(">H", value)
def packCard32(value):
- return struct.pack(">L", value)
+ return struct.pack(">L", value)
def buildOperatorDict(table):
- d = {}
- for op, name, arg, default, conv in table:
- d[op] = (name, arg)
- return d
+ d = {}
+ for op, name, arg, default, conv in table:
+ d[op] = (name, arg)
+ return d
def buildOpcodeDict(table):
- d = {}
- for op, name, arg, default, conv in table:
- if isinstance(op, tuple):
- op = bytechr(op[0]) + bytechr(op[1])
- else:
- op = bytechr(op)
- d[name] = (op, arg)
- return d
+ d = {}
+ for op, name, arg, default, conv in table:
+ if isinstance(op, tuple):
+ op = bytechr(op[0]) + bytechr(op[1])
+ else:
+ op = bytechr(op)
+ d[name] = (op, arg)
+ return d
def buildOrder(table):
- l = []
- for op, name, arg, default, conv in table:
- l.append(name)
- return l
+ l = []
+ for op, name, arg, default, conv in table:
+ l.append(name)
+ return l
def buildDefaults(table):
- d = {}
- for op, name, arg, default, conv in table:
- if default is not None:
- d[name] = default
- return d
+ d = {}
+ for op, name, arg, default, conv in table:
+ if default is not None:
+ d[name] = default
+ return d
def buildConverters(table):
- d = {}
- for op, name, arg, default, conv in table:
- d[name] = conv
- return d
+ d = {}
+ for op, name, arg, default, conv in table:
+ d[name] = conv
+ return d
class SimpleConverter(object):
+ def read(self, parent, value):
+ if not hasattr(parent, "file"):
+ return self._read(parent, value)
+ file = parent.file
+ pos = file.tell()
+ try:
+ return self._read(parent, value)
+ finally:
+ file.seek(pos)
- def read(self, parent, value):
- if not hasattr(parent, "file"):
- return self._read(parent, value)
- file = parent.file
- pos = file.tell()
- try:
- return self._read(parent, value)
- finally:
- file.seek(pos)
+ def _read(self, parent, value):
+ return value
- def _read(self, parent, value):
- return value
+ def write(self, parent, value):
+ return value
- def write(self, parent, value):
- return value
+ def xmlWrite(self, xmlWriter, name, value):
+ xmlWriter.simpletag(name, value=value)
+ xmlWriter.newline()
- def xmlWrite(self, xmlWriter, name, value):
- xmlWriter.simpletag(name, value=value)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- return attrs["value"]
+ def xmlRead(self, name, attrs, content, parent):
+ return attrs["value"]
class ASCIIConverter(SimpleConverter):
+ def _read(self, parent, value):
+ return tostr(value, encoding="ascii")
- def _read(self, parent, value):
- return tostr(value, encoding='ascii')
-
- def write(self, parent, value):
- return tobytes(value, encoding='ascii')
+ def write(self, parent, value):
+ return tobytes(value, encoding="ascii")
- def xmlWrite(self, xmlWriter, name, value):
- xmlWriter.simpletag(name, value=tostr(value, encoding="ascii"))
- xmlWriter.newline()
+ def xmlWrite(self, xmlWriter, name, value):
+ xmlWriter.simpletag(name, value=tostr(value, encoding="ascii"))
+ xmlWriter.newline()
- def xmlRead(self, name, attrs, content, parent):
- return tobytes(attrs["value"], encoding=("ascii"))
+ def xmlRead(self, name, attrs, content, parent):
+ return tobytes(attrs["value"], encoding=("ascii"))
class Latin1Converter(SimpleConverter):
+ def _read(self, parent, value):
+ return tostr(value, encoding="latin1")
- def _read(self, parent, value):
- return tostr(value, encoding='latin1')
+ def write(self, parent, value):
+ return tobytes(value, encoding="latin1")
- def write(self, parent, value):
- return tobytes(value, encoding='latin1')
+ def xmlWrite(self, xmlWriter, name, value):
+ value = tostr(value, encoding="latin1")
+ if name in ["Notice", "Copyright"]:
+ value = re.sub(r"[\r\n]\s+", " ", value)
+ xmlWriter.simpletag(name, value=value)
+ xmlWriter.newline()
- def xmlWrite(self, xmlWriter, name, value):
- value = tostr(value, encoding="latin1")
- if name in ['Notice', 'Copyright']:
- value = re.sub(r"[\r\n]\s+", " ", value)
- xmlWriter.simpletag(name, value=value)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- return tobytes(attrs["value"], encoding=("latin1"))
+ def xmlRead(self, name, attrs, content, parent):
+ return tobytes(attrs["value"], encoding=("latin1"))
def parseNum(s):
- try:
- value = int(s)
- except:
- value = float(s)
- return value
+ try:
+ value = int(s)
+ except:
+ value = float(s)
+ return value
def parseBlendList(s):
- valueList = []
- for element in s:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- blendList = attrs["value"].split()
- blendList = [eval(val) for val in blendList]
- valueList.append(blendList)
- if len(valueList) == 1:
- valueList = valueList[0]
- return valueList
+ valueList = []
+ for element in s:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ blendList = attrs["value"].split()
+ blendList = [eval(val) for val in blendList]
+ valueList.append(blendList)
+ if len(valueList) == 1:
+ valueList = valueList[0]
+ return valueList
class NumberConverter(SimpleConverter):
- def xmlWrite(self, xmlWriter, name, value):
- if isinstance(value, list):
- xmlWriter.begintag(name)
- xmlWriter.newline()
- xmlWriter.indent()
- blendValue = " ".join([str(val) for val in value])
- xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
- xmlWriter.newline()
- xmlWriter.dedent()
- xmlWriter.endtag(name)
- xmlWriter.newline()
- else:
- xmlWriter.simpletag(name, value=value)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- valueString = attrs.get("value", None)
- if valueString is None:
- value = parseBlendList(content)
- else:
- value = parseNum(attrs["value"])
- return value
+ def xmlWrite(self, xmlWriter, name, value):
+ if isinstance(value, list):
+ xmlWriter.begintag(name)
+ xmlWriter.newline()
+ xmlWriter.indent()
+ blendValue = " ".join([str(val) for val in value])
+ xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
+ xmlWriter.newline()
+ xmlWriter.dedent()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+ else:
+ xmlWriter.simpletag(name, value=value)
+ xmlWriter.newline()
+
+ def xmlRead(self, name, attrs, content, parent):
+ valueString = attrs.get("value", None)
+ if valueString is None:
+ value = parseBlendList(content)
+ else:
+ value = parseNum(attrs["value"])
+ return value
class ArrayConverter(SimpleConverter):
- def xmlWrite(self, xmlWriter, name, value):
- if value and isinstance(value[0], list):
- xmlWriter.begintag(name)
- xmlWriter.newline()
- xmlWriter.indent()
- for valueList in value:
- blendValue = " ".join([str(val) for val in valueList])
- xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
- xmlWriter.newline()
- xmlWriter.dedent()
- xmlWriter.endtag(name)
- xmlWriter.newline()
- else:
- value = " ".join([str(val) for val in value])
- xmlWriter.simpletag(name, value=value)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- valueString = attrs.get("value", None)
- if valueString is None:
- valueList = parseBlendList(content)
- else:
- values = valueString.split()
- valueList = [parseNum(value) for value in values]
- return valueList
+ def xmlWrite(self, xmlWriter, name, value):
+ if value and isinstance(value[0], list):
+ xmlWriter.begintag(name)
+ xmlWriter.newline()
+ xmlWriter.indent()
+ for valueList in value:
+ blendValue = " ".join([str(val) for val in valueList])
+ xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
+ xmlWriter.newline()
+ xmlWriter.dedent()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+ else:
+ value = " ".join([str(val) for val in value])
+ xmlWriter.simpletag(name, value=value)
+ xmlWriter.newline()
+
+ def xmlRead(self, name, attrs, content, parent):
+ valueString = attrs.get("value", None)
+ if valueString is None:
+ valueList = parseBlendList(content)
+ else:
+ values = valueString.split()
+ valueList = [parseNum(value) for value in values]
+ return valueList
class TableConverter(SimpleConverter):
-
- def xmlWrite(self, xmlWriter, name, value):
- xmlWriter.begintag(name)
- xmlWriter.newline()
- value.toXML(xmlWriter)
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- ob = self.getClass()()
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- ob.fromXML(name, attrs, content)
- return ob
+ def xmlWrite(self, xmlWriter, name, value):
+ xmlWriter.begintag(name)
+ xmlWriter.newline()
+ value.toXML(xmlWriter)
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def xmlRead(self, name, attrs, content, parent):
+ ob = self.getClass()()
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ ob.fromXML(name, attrs, content)
+ return ob
class PrivateDictConverter(TableConverter):
-
- def getClass(self):
- return PrivateDict
-
- def _read(self, parent, value):
- size, offset = value
- file = parent.file
- isCFF2 = parent._isCFF2
- try:
- vstore = parent.vstore
- except AttributeError:
- vstore = None
- priv = PrivateDict(
- parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore)
- file.seek(offset)
- data = file.read(size)
- assert len(data) == size
- priv.decompile(data)
- return priv
-
- def write(self, parent, value):
- return (0, 0) # dummy value
+ def getClass(self):
+ return PrivateDict
+
+ def _read(self, parent, value):
+ size, offset = value
+ file = parent.file
+ isCFF2 = parent._isCFF2
+ try:
+ vstore = parent.vstore
+ except AttributeError:
+ vstore = None
+ priv = PrivateDict(parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore)
+ file.seek(offset)
+ data = file.read(size)
+ assert len(data) == size
+ priv.decompile(data)
+ return priv
+
+ def write(self, parent, value):
+ return (0, 0) # dummy value
class SubrsConverter(TableConverter):
+ def getClass(self):
+ return SubrsIndex
- def getClass(self):
- return SubrsIndex
+ def _read(self, parent, value):
+ file = parent.file
+ isCFF2 = parent._isCFF2
+ file.seek(parent.offset + value) # Offset(self)
+ return SubrsIndex(file, isCFF2=isCFF2)
- def _read(self, parent, value):
- file = parent.file
- isCFF2 = parent._isCFF2
- file.seek(parent.offset + value) # Offset(self)
- return SubrsIndex(file, isCFF2=isCFF2)
-
- def write(self, parent, value):
- return 0 # dummy value
+ def write(self, parent, value):
+ return 0 # dummy value
class CharStringsConverter(TableConverter):
-
- def _read(self, parent, value):
- file = parent.file
- isCFF2 = parent._isCFF2
- charset = parent.charset
- globalSubrs = parent.GlobalSubrs
- if hasattr(parent, "FDArray"):
- fdArray = parent.FDArray
- if hasattr(parent, "FDSelect"):
- fdSelect = parent.FDSelect
- else:
- fdSelect = None
- private = None
- else:
- fdSelect, fdArray = None, None
- private = parent.Private
- file.seek(value) # Offset(0)
- charStrings = CharStrings(
- file, charset, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2)
- return charStrings
-
- def write(self, parent, value):
- return 0 # dummy value
-
- def xmlRead(self, name, attrs, content, parent):
- if hasattr(parent, "FDArray"):
- # if it is a CID-keyed font, then the private Dict is extracted from the
- # parent.FDArray
- fdArray = parent.FDArray
- if hasattr(parent, "FDSelect"):
- fdSelect = parent.FDSelect
- else:
- fdSelect = None
- private = None
- else:
- # if it is a name-keyed font, then the private dict is in the top dict,
- # and
- # there is no fdArray.
- private, fdSelect, fdArray = parent.Private, None, None
- charStrings = CharStrings(
- None, None, parent.GlobalSubrs, private, fdSelect, fdArray)
- charStrings.fromXML(name, attrs, content)
- return charStrings
+ def _read(self, parent, value):
+ file = parent.file
+ isCFF2 = parent._isCFF2
+ charset = parent.charset
+ varStore = getattr(parent, "VarStore", None)
+ globalSubrs = parent.GlobalSubrs
+ if hasattr(parent, "FDArray"):
+ fdArray = parent.FDArray
+ if hasattr(parent, "FDSelect"):
+ fdSelect = parent.FDSelect
+ else:
+ fdSelect = None
+ private = None
+ else:
+ fdSelect, fdArray = None, None
+ private = parent.Private
+ file.seek(value) # Offset(0)
+ charStrings = CharStrings(
+ file,
+ charset,
+ globalSubrs,
+ private,
+ fdSelect,
+ fdArray,
+ isCFF2=isCFF2,
+ varStore=varStore,
+ )
+ return charStrings
+
+ def write(self, parent, value):
+ return 0 # dummy value
+
+ def xmlRead(self, name, attrs, content, parent):
+ if hasattr(parent, "FDArray"):
+ # if it is a CID-keyed font, then the private Dict is extracted from the
+ # parent.FDArray
+ fdArray = parent.FDArray
+ if hasattr(parent, "FDSelect"):
+ fdSelect = parent.FDSelect
+ else:
+ fdSelect = None
+ private = None
+ else:
+ # if it is a name-keyed font, then the private dict is in the top dict,
+ # and
+ # there is no fdArray.
+ private, fdSelect, fdArray = parent.Private, None, None
+ charStrings = CharStrings(
+ None,
+ None,
+ parent.GlobalSubrs,
+ private,
+ fdSelect,
+ fdArray,
+ varStore=getattr(parent, "VarStore", None),
+ )
+ charStrings.fromXML(name, attrs, content)
+ return charStrings
class CharsetConverter(SimpleConverter):
- def _read(self, parent, value):
- isCID = hasattr(parent, "ROS")
- if value > 2:
- numGlyphs = parent.numGlyphs
- file = parent.file
- file.seek(value)
- log.log(DEBUG, "loading charset at %s", value)
- format = readCard8(file)
- if format == 0:
- charset = parseCharset0(numGlyphs, file, parent.strings, isCID)
- elif format == 1 or format == 2:
- charset = parseCharset(numGlyphs, file, parent.strings, isCID, format)
- else:
- raise NotImplementedError
- assert len(charset) == numGlyphs
- log.log(DEBUG, " charset end at %s", file.tell())
- # make sure glyph names are unique
- allNames = {}
- newCharset = []
- for glyphName in charset:
- if glyphName in allNames:
- # make up a new glyphName that's unique
- n = allNames[glyphName]
- while (glyphName + "#" + str(n)) in allNames:
- n += 1
- allNames[glyphName] = n + 1
- glyphName = glyphName + "#" + str(n)
- allNames[glyphName] = 1
- newCharset.append(glyphName)
- charset = newCharset
- else: # offset == 0 -> no charset data.
- if isCID or "CharStrings" not in parent.rawDict:
- # We get here only when processing fontDicts from the FDArray of
- # CFF-CID fonts. Only the real topDict references the chrset.
- assert value == 0
- charset = None
- elif value == 0:
- charset = cffISOAdobeStrings
- elif value == 1:
- charset = cffIExpertStrings
- elif value == 2:
- charset = cffExpertSubsetStrings
- if charset and (len(charset) != parent.numGlyphs):
- charset = charset[:parent.numGlyphs]
- return charset
-
- def write(self, parent, value):
- return 0 # dummy value
-
- def xmlWrite(self, xmlWriter, name, value):
- # XXX only write charset when not in OT/TTX context, where we
- # dump charset as a separate "GlyphOrder" table.
- # # xmlWriter.simpletag("charset")
- xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element")
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- pass
+ def _read(self, parent, value):
+ isCID = hasattr(parent, "ROS")
+ if value > 2:
+ numGlyphs = parent.numGlyphs
+ file = parent.file
+ file.seek(value)
+ log.log(DEBUG, "loading charset at %s", value)
+ format = readCard8(file)
+ if format == 0:
+ charset = parseCharset0(numGlyphs, file, parent.strings, isCID)
+ elif format == 1 or format == 2:
+ charset = parseCharset(numGlyphs, file, parent.strings, isCID, format)
+ else:
+ raise NotImplementedError
+ assert len(charset) == numGlyphs
+ log.log(DEBUG, " charset end at %s", file.tell())
+ # make sure glyph names are unique
+ allNames = {}
+ newCharset = []
+ for glyphName in charset:
+ if glyphName in allNames:
+ # make up a new glyphName that's unique
+ n = allNames[glyphName]
+ while (glyphName + "#" + str(n)) in allNames:
+ n += 1
+ allNames[glyphName] = n + 1
+ glyphName = glyphName + "#" + str(n)
+ allNames[glyphName] = 1
+ newCharset.append(glyphName)
+ charset = newCharset
+ else: # offset == 0 -> no charset data.
+ if isCID or "CharStrings" not in parent.rawDict:
+ # We get here only when processing fontDicts from the FDArray of
+ # CFF-CID fonts. Only the real topDict references the chrset.
+ assert value == 0
+ charset = None
+ elif value == 0:
+ charset = cffISOAdobeStrings
+ elif value == 1:
+ charset = cffIExpertStrings
+ elif value == 2:
+ charset = cffExpertSubsetStrings
+ if charset and (len(charset) != parent.numGlyphs):
+ charset = charset[: parent.numGlyphs]
+ return charset
+
+ def write(self, parent, value):
+ return 0 # dummy value
+
+ def xmlWrite(self, xmlWriter, name, value):
+ # XXX only write charset when not in OT/TTX context, where we
+ # dump charset as a separate "GlyphOrder" table.
+ # # xmlWriter.simpletag("charset")
+ xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element")
+ xmlWriter.newline()
+
+ def xmlRead(self, name, attrs, content, parent):
+ pass
class CharsetCompiler(object):
+ def __init__(self, strings, charset, parent):
+ assert charset[0] == ".notdef"
+ isCID = hasattr(parent.dictObj, "ROS")
+ data0 = packCharset0(charset, isCID, strings)
+ data = packCharset(charset, isCID, strings)
+ if len(data) < len(data0):
+ self.data = data
+ else:
+ self.data = data0
+ self.parent = parent
- def __init__(self, strings, charset, parent):
- assert charset[0] == '.notdef'
- isCID = hasattr(parent.dictObj, "ROS")
- data0 = packCharset0(charset, isCID, strings)
- data = packCharset(charset, isCID, strings)
- if len(data) < len(data0):
- self.data = data
- else:
- self.data = data0
- self.parent = parent
-
- def setPos(self, pos, endPos):
- self.parent.rawDict["charset"] = pos
+ def setPos(self, pos, endPos):
+ self.parent.rawDict["charset"] = pos
- def getDataLength(self):
- return len(self.data)
+ def getDataLength(self):
+ return len(self.data)
- def toFile(self, file):
- file.write(self.data)
+ def toFile(self, file):
+ file.write(self.data)
def getStdCharSet(charset):
- # check to see if we can use a predefined charset value.
- predefinedCharSetVal = None
- predefinedCharSets = [
- (cffISOAdobeStringCount, cffISOAdobeStrings, 0),
- (cffExpertStringCount, cffIExpertStrings, 1),
- (cffExpertSubsetStringCount, cffExpertSubsetStrings, 2)]
- lcs = len(charset)
- for cnt, pcs, csv in predefinedCharSets:
- if predefinedCharSetVal is not None:
- break
- if lcs > cnt:
- continue
- predefinedCharSetVal = csv
- for i in range(lcs):
- if charset[i] != pcs[i]:
- predefinedCharSetVal = None
- break
- return predefinedCharSetVal
+ # check to see if we can use a predefined charset value.
+ predefinedCharSetVal = None
+ predefinedCharSets = [
+ (cffISOAdobeStringCount, cffISOAdobeStrings, 0),
+ (cffExpertStringCount, cffIExpertStrings, 1),
+ (cffExpertSubsetStringCount, cffExpertSubsetStrings, 2),
+ ]
+ lcs = len(charset)
+ for cnt, pcs, csv in predefinedCharSets:
+ if predefinedCharSetVal is not None:
+ break
+ if lcs > cnt:
+ continue
+ predefinedCharSetVal = csv
+ for i in range(lcs):
+ if charset[i] != pcs[i]:
+ predefinedCharSetVal = None
+ break
+ return predefinedCharSetVal
def getCIDfromName(name, strings):
- return int(name[3:])
+ return int(name[3:])
def getSIDfromName(name, strings):
- return strings.getSID(name)
+ return strings.getSID(name)
def packCharset0(charset, isCID, strings):
- fmt = 0
- data = [packCard8(fmt)]
- if isCID:
- getNameID = getCIDfromName
- else:
- getNameID = getSIDfromName
+ fmt = 0
+ data = [packCard8(fmt)]
+ if isCID:
+ getNameID = getCIDfromName
+ else:
+ getNameID = getSIDfromName
- for name in charset[1:]:
- data.append(packCard16(getNameID(name, strings)))
- return bytesjoin(data)
+ for name in charset[1:]:
+ data.append(packCard16(getNameID(name, strings)))
+ return bytesjoin(data)
def packCharset(charset, isCID, strings):
- fmt = 1
- ranges = []
- first = None
- end = 0
- if isCID:
- getNameID = getCIDfromName
- else:
- getNameID = getSIDfromName
-
- for name in charset[1:]:
- SID = getNameID(name, strings)
- if first is None:
- first = SID
- elif end + 1 != SID:
- nLeft = end - first
- if nLeft > 255:
- fmt = 2
- ranges.append((first, nLeft))
- first = SID
- end = SID
- if end:
- nLeft = end - first
- if nLeft > 255:
- fmt = 2
- ranges.append((first, nLeft))
-
- data = [packCard8(fmt)]
- if fmt == 1:
- nLeftFunc = packCard8
- else:
- nLeftFunc = packCard16
- for first, nLeft in ranges:
- data.append(packCard16(first) + nLeftFunc(nLeft))
- return bytesjoin(data)
+ fmt = 1
+ ranges = []
+ first = None
+ end = 0
+ if isCID:
+ getNameID = getCIDfromName
+ else:
+ getNameID = getSIDfromName
+
+ for name in charset[1:]:
+ SID = getNameID(name, strings)
+ if first is None:
+ first = SID
+ elif end + 1 != SID:
+ nLeft = end - first
+ if nLeft > 255:
+ fmt = 2
+ ranges.append((first, nLeft))
+ first = SID
+ end = SID
+ if end:
+ nLeft = end - first
+ if nLeft > 255:
+ fmt = 2
+ ranges.append((first, nLeft))
+
+ data = [packCard8(fmt)]
+ if fmt == 1:
+ nLeftFunc = packCard8
+ else:
+ nLeftFunc = packCard16
+ for first, nLeft in ranges:
+ data.append(packCard16(first) + nLeftFunc(nLeft))
+ return bytesjoin(data)
def parseCharset0(numGlyphs, file, strings, isCID):
- charset = [".notdef"]
- if isCID:
- for i in range(numGlyphs - 1):
- CID = readCard16(file)
- charset.append("cid" + str(CID).zfill(5))
- else:
- for i in range(numGlyphs - 1):
- SID = readCard16(file)
- charset.append(strings[SID])
- return charset
+ charset = [".notdef"]
+ if isCID:
+ for i in range(numGlyphs - 1):
+ CID = readCard16(file)
+ charset.append("cid" + str(CID).zfill(5))
+ else:
+ for i in range(numGlyphs - 1):
+ SID = readCard16(file)
+ charset.append(strings[SID])
+ return charset
def parseCharset(numGlyphs, file, strings, isCID, fmt):
- charset = ['.notdef']
- count = 1
- if fmt == 1:
- nLeftFunc = readCard8
- else:
- nLeftFunc = readCard16
- while count < numGlyphs:
- first = readCard16(file)
- nLeft = nLeftFunc(file)
- if isCID:
- for CID in range(first, first + nLeft + 1):
- charset.append("cid" + str(CID).zfill(5))
- else:
- for SID in range(first, first + nLeft + 1):
- charset.append(strings[SID])
- count = count + nLeft + 1
- return charset
+ charset = [".notdef"]
+ count = 1
+ if fmt == 1:
+ nLeftFunc = readCard8
+ else:
+ nLeftFunc = readCard16
+ while count < numGlyphs:
+ first = readCard16(file)
+ nLeft = nLeftFunc(file)
+ if isCID:
+ for CID in range(first, first + nLeft + 1):
+ charset.append("cid" + str(CID).zfill(5))
+ else:
+ for SID in range(first, first + nLeft + 1):
+ charset.append(strings[SID])
+ count = count + nLeft + 1
+ return charset
class EncodingCompiler(object):
+ def __init__(self, strings, encoding, parent):
+ assert not isinstance(encoding, str)
+ data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings)
+ data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings)
+ if len(data0) < len(data1):
+ self.data = data0
+ else:
+ self.data = data1
+ self.parent = parent
- def __init__(self, strings, encoding, parent):
- assert not isinstance(encoding, str)
- data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings)
- data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings)
- if len(data0) < len(data1):
- self.data = data0
- else:
- self.data = data1
- self.parent = parent
+ def setPos(self, pos, endPos):
+ self.parent.rawDict["Encoding"] = pos
- def setPos(self, pos, endPos):
- self.parent.rawDict["Encoding"] = pos
+ def getDataLength(self):
+ return len(self.data)
- def getDataLength(self):
- return len(self.data)
-
- def toFile(self, file):
- file.write(self.data)
+ def toFile(self, file):
+ file.write(self.data)
class EncodingConverter(SimpleConverter):
-
- def _read(self, parent, value):
- if value == 0:
- return "StandardEncoding"
- elif value == 1:
- return "ExpertEncoding"
- else:
- assert value > 1
- file = parent.file
- file.seek(value)
- log.log(DEBUG, "loading Encoding at %s", value)
- fmt = readCard8(file)
- haveSupplement = fmt & 0x80
- if haveSupplement:
- raise NotImplementedError("Encoding supplements are not yet supported")
- fmt = fmt & 0x7f
- if fmt == 0:
- encoding = parseEncoding0(parent.charset, file, haveSupplement,
- parent.strings)
- elif fmt == 1:
- encoding = parseEncoding1(parent.charset, file, haveSupplement,
- parent.strings)
- return encoding
-
- def write(self, parent, value):
- if value == "StandardEncoding":
- return 0
- elif value == "ExpertEncoding":
- return 1
- return 0 # dummy value
-
- def xmlWrite(self, xmlWriter, name, value):
- if value in ("StandardEncoding", "ExpertEncoding"):
- xmlWriter.simpletag(name, name=value)
- xmlWriter.newline()
- return
- xmlWriter.begintag(name)
- xmlWriter.newline()
- for code in range(len(value)):
- glyphName = value[code]
- if glyphName != ".notdef":
- xmlWriter.simpletag("map", code=hex(code), name=glyphName)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- if "name" in attrs:
- return attrs["name"]
- encoding = [".notdef"] * 256
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- code = safeEval(attrs["code"])
- glyphName = attrs["name"]
- encoding[code] = glyphName
- return encoding
+ def _read(self, parent, value):
+ if value == 0:
+ return "StandardEncoding"
+ elif value == 1:
+ return "ExpertEncoding"
+ else:
+ assert value > 1
+ file = parent.file
+ file.seek(value)
+ log.log(DEBUG, "loading Encoding at %s", value)
+ fmt = readCard8(file)
+ haveSupplement = fmt & 0x80
+ if haveSupplement:
+ raise NotImplementedError("Encoding supplements are not yet supported")
+ fmt = fmt & 0x7F
+ if fmt == 0:
+ encoding = parseEncoding0(
+ parent.charset, file, haveSupplement, parent.strings
+ )
+ elif fmt == 1:
+ encoding = parseEncoding1(
+ parent.charset, file, haveSupplement, parent.strings
+ )
+ return encoding
+
+ def write(self, parent, value):
+ if value == "StandardEncoding":
+ return 0
+ elif value == "ExpertEncoding":
+ return 1
+ return 0 # dummy value
+
+ def xmlWrite(self, xmlWriter, name, value):
+ if value in ("StandardEncoding", "ExpertEncoding"):
+ xmlWriter.simpletag(name, name=value)
+ xmlWriter.newline()
+ return
+ xmlWriter.begintag(name)
+ xmlWriter.newline()
+ for code in range(len(value)):
+ glyphName = value[code]
+ if glyphName != ".notdef":
+ xmlWriter.simpletag("map", code=hex(code), name=glyphName)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def xmlRead(self, name, attrs, content, parent):
+ if "name" in attrs:
+ return attrs["name"]
+ encoding = [".notdef"] * 256
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ code = safeEval(attrs["code"])
+ glyphName = attrs["name"]
+ encoding[code] = glyphName
+ return encoding
def parseEncoding0(charset, file, haveSupplement, strings):
- nCodes = readCard8(file)
- encoding = [".notdef"] * 256
- for glyphID in range(1, nCodes + 1):
- code = readCard8(file)
- if code != 0:
- encoding[code] = charset[glyphID]
- return encoding
+ nCodes = readCard8(file)
+ encoding = [".notdef"] * 256
+ for glyphID in range(1, nCodes + 1):
+ code = readCard8(file)
+ if code != 0:
+ encoding[code] = charset[glyphID]
+ return encoding
def parseEncoding1(charset, file, haveSupplement, strings):
- nRanges = readCard8(file)
- encoding = [".notdef"] * 256
- glyphID = 1
- for i in range(nRanges):
- code = readCard8(file)
- nLeft = readCard8(file)
- for glyphID in range(glyphID, glyphID + nLeft + 1):
- encoding[code] = charset[glyphID]
- code = code + 1
- glyphID = glyphID + 1
- return encoding
+ nRanges = readCard8(file)
+ encoding = [".notdef"] * 256
+ glyphID = 1
+ for i in range(nRanges):
+ code = readCard8(file)
+ nLeft = readCard8(file)
+ for glyphID in range(glyphID, glyphID + nLeft + 1):
+ encoding[code] = charset[glyphID]
+ code = code + 1
+ glyphID = glyphID + 1
+ return encoding
def packEncoding0(charset, encoding, strings):
- fmt = 0
- m = {}
- for code in range(len(encoding)):
- name = encoding[code]
- if name != ".notdef":
- m[name] = code
- codes = []
- for name in charset[1:]:
- code = m.get(name)
- codes.append(code)
-
- while codes and codes[-1] is None:
- codes.pop()
-
- data = [packCard8(fmt), packCard8(len(codes))]
- for code in codes:
- if code is None:
- code = 0
- data.append(packCard8(code))
- return bytesjoin(data)
+ fmt = 0
+ m = {}
+ for code in range(len(encoding)):
+ name = encoding[code]
+ if name != ".notdef":
+ m[name] = code
+ codes = []
+ for name in charset[1:]:
+ code = m.get(name)
+ codes.append(code)
+
+ while codes and codes[-1] is None:
+ codes.pop()
+
+ data = [packCard8(fmt), packCard8(len(codes))]
+ for code in codes:
+ if code is None:
+ code = 0
+ data.append(packCard8(code))
+ return bytesjoin(data)
def packEncoding1(charset, encoding, strings):
- fmt = 1
- m = {}
- for code in range(len(encoding)):
- name = encoding[code]
- if name != ".notdef":
- m[name] = code
- ranges = []
- first = None
- end = 0
- for name in charset[1:]:
- code = m.get(name, -1)
- if first is None:
- first = code
- elif end + 1 != code:
- nLeft = end - first
- ranges.append((first, nLeft))
- first = code
- end = code
- nLeft = end - first
- ranges.append((first, nLeft))
-
- # remove unencoded glyphs at the end.
- while ranges and ranges[-1][0] == -1:
- ranges.pop()
-
- data = [packCard8(fmt), packCard8(len(ranges))]
- for first, nLeft in ranges:
- if first == -1: # unencoded
- first = 0
- data.append(packCard8(first) + packCard8(nLeft))
- return bytesjoin(data)
+ fmt = 1
+ m = {}
+ for code in range(len(encoding)):
+ name = encoding[code]
+ if name != ".notdef":
+ m[name] = code
+ ranges = []
+ first = None
+ end = 0
+ for name in charset[1:]:
+ code = m.get(name, -1)
+ if first is None:
+ first = code
+ elif end + 1 != code:
+ nLeft = end - first
+ ranges.append((first, nLeft))
+ first = code
+ end = code
+ nLeft = end - first
+ ranges.append((first, nLeft))
+
+ # remove unencoded glyphs at the end.
+ while ranges and ranges[-1][0] == -1:
+ ranges.pop()
+
+ data = [packCard8(fmt), packCard8(len(ranges))]
+ for first, nLeft in ranges:
+ if first == -1: # unencoded
+ first = 0
+ data.append(packCard8(first) + packCard8(nLeft))
+ return bytesjoin(data)
class FDArrayConverter(TableConverter):
-
- def _read(self, parent, value):
- try:
- vstore = parent.VarStore
- except AttributeError:
- vstore = None
- file = parent.file
- isCFF2 = parent._isCFF2
- file.seek(value)
- fdArray = FDArrayIndex(file, isCFF2=isCFF2)
- fdArray.vstore = vstore
- fdArray.strings = parent.strings
- fdArray.GlobalSubrs = parent.GlobalSubrs
- return fdArray
-
- def write(self, parent, value):
- return 0 # dummy value
-
- def xmlRead(self, name, attrs, content, parent):
- fdArray = FDArrayIndex()
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- fdArray.fromXML(name, attrs, content)
- return fdArray
+ def _read(self, parent, value):
+ try:
+ vstore = parent.VarStore
+ except AttributeError:
+ vstore = None
+ file = parent.file
+ isCFF2 = parent._isCFF2
+ file.seek(value)
+ fdArray = FDArrayIndex(file, isCFF2=isCFF2)
+ fdArray.vstore = vstore
+ fdArray.strings = parent.strings
+ fdArray.GlobalSubrs = parent.GlobalSubrs
+ return fdArray
+
+ def write(self, parent, value):
+ return 0 # dummy value
+
+ def xmlRead(self, name, attrs, content, parent):
+ fdArray = FDArrayIndex()
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ fdArray.fromXML(name, attrs, content)
+ return fdArray
class FDSelectConverter(SimpleConverter):
+ def _read(self, parent, value):
+ file = parent.file
+ file.seek(value)
+ fdSelect = FDSelect(file, parent.numGlyphs)
+ return fdSelect
- def _read(self, parent, value):
- file = parent.file
- file.seek(value)
- fdSelect = FDSelect(file, parent.numGlyphs)
- return fdSelect
-
- def write(self, parent, value):
- return 0 # dummy value
+ def write(self, parent, value):
+ return 0 # dummy value
- # The FDSelect glyph data is written out to XML in the charstring keys,
- # so we write out only the format selector
- def xmlWrite(self, xmlWriter, name, value):
- xmlWriter.simpletag(name, [('format', value.format)])
- xmlWriter.newline()
+ # The FDSelect glyph data is written out to XML in the charstring keys,
+ # so we write out only the format selector
+ def xmlWrite(self, xmlWriter, name, value):
+ xmlWriter.simpletag(name, [("format", value.format)])
+ xmlWriter.newline()
- def xmlRead(self, name, attrs, content, parent):
- fmt = safeEval(attrs["format"])
- file = None
- numGlyphs = None
- fdSelect = FDSelect(file, numGlyphs, fmt)
- return fdSelect
+ def xmlRead(self, name, attrs, content, parent):
+ fmt = safeEval(attrs["format"])
+ file = None
+ numGlyphs = None
+ fdSelect = FDSelect(file, numGlyphs, fmt)
+ return fdSelect
class VarStoreConverter(SimpleConverter):
+ def _read(self, parent, value):
+ file = parent.file
+ file.seek(value)
+ varStore = VarStoreData(file)
+ varStore.decompile()
+ return varStore
- def _read(self, parent, value):
- file = parent.file
- file.seek(value)
- varStore = VarStoreData(file)
- varStore.decompile()
- return varStore
+ def write(self, parent, value):
+ return 0 # dummy value
- def write(self, parent, value):
- return 0 # dummy value
+ def xmlWrite(self, xmlWriter, name, value):
+ value.writeXML(xmlWriter, name)
- def xmlWrite(self, xmlWriter, name, value):
- value.writeXML(xmlWriter, name)
-
- def xmlRead(self, name, attrs, content, parent):
- varStore = VarStoreData()
- varStore.xmlRead(name, attrs, content, parent)
- return varStore
+ def xmlRead(self, name, attrs, content, parent):
+ varStore = VarStoreData()
+ varStore.xmlRead(name, attrs, content, parent)
+ return varStore
def packFDSelect0(fdSelectArray):
- fmt = 0
- data = [packCard8(fmt)]
- for index in fdSelectArray:
- data.append(packCard8(index))
- return bytesjoin(data)
+ fmt = 0
+ data = [packCard8(fmt)]
+ for index in fdSelectArray:
+ data.append(packCard8(index))
+ return bytesjoin(data)
def packFDSelect3(fdSelectArray):
- fmt = 3
- fdRanges = []
- lenArray = len(fdSelectArray)
- lastFDIndex = -1
- for i in range(lenArray):
- fdIndex = fdSelectArray[i]
- if lastFDIndex != fdIndex:
- fdRanges.append([i, fdIndex])
- lastFDIndex = fdIndex
- sentinelGID = i + 1
-
- data = [packCard8(fmt)]
- data.append(packCard16(len(fdRanges)))
- for fdRange in fdRanges:
- data.append(packCard16(fdRange[0]))
- data.append(packCard8(fdRange[1]))
- data.append(packCard16(sentinelGID))
- return bytesjoin(data)
+ fmt = 3
+ fdRanges = []
+ lenArray = len(fdSelectArray)
+ lastFDIndex = -1
+ for i in range(lenArray):
+ fdIndex = fdSelectArray[i]
+ if lastFDIndex != fdIndex:
+ fdRanges.append([i, fdIndex])
+ lastFDIndex = fdIndex
+ sentinelGID = i + 1
+
+ data = [packCard8(fmt)]
+ data.append(packCard16(len(fdRanges)))
+ for fdRange in fdRanges:
+ data.append(packCard16(fdRange[0]))
+ data.append(packCard8(fdRange[1]))
+ data.append(packCard16(sentinelGID))
+ return bytesjoin(data)
def packFDSelect4(fdSelectArray):
- fmt = 4
- fdRanges = []
- lenArray = len(fdSelectArray)
- lastFDIndex = -1
- for i in range(lenArray):
- fdIndex = fdSelectArray[i]
- if lastFDIndex != fdIndex:
- fdRanges.append([i, fdIndex])
- lastFDIndex = fdIndex
- sentinelGID = i + 1
-
- data = [packCard8(fmt)]
- data.append(packCard32(len(fdRanges)))
- for fdRange in fdRanges:
- data.append(packCard32(fdRange[0]))
- data.append(packCard16(fdRange[1]))
- data.append(packCard32(sentinelGID))
- return bytesjoin(data)
+ fmt = 4
+ fdRanges = []
+ lenArray = len(fdSelectArray)
+ lastFDIndex = -1
+ for i in range(lenArray):
+ fdIndex = fdSelectArray[i]
+ if lastFDIndex != fdIndex:
+ fdRanges.append([i, fdIndex])
+ lastFDIndex = fdIndex
+ sentinelGID = i + 1
+
+ data = [packCard8(fmt)]
+ data.append(packCard32(len(fdRanges)))
+ for fdRange in fdRanges:
+ data.append(packCard32(fdRange[0]))
+ data.append(packCard16(fdRange[1]))
+ data.append(packCard32(sentinelGID))
+ return bytesjoin(data)
class FDSelectCompiler(object):
-
- def __init__(self, fdSelect, parent):
- fmt = fdSelect.format
- fdSelectArray = fdSelect.gidArray
- if fmt == 0:
- self.data = packFDSelect0(fdSelectArray)
- elif fmt == 3:
- self.data = packFDSelect3(fdSelectArray)
- elif fmt == 4:
- self.data = packFDSelect4(fdSelectArray)
- else:
- # choose smaller of the two formats
- data0 = packFDSelect0(fdSelectArray)
- data3 = packFDSelect3(fdSelectArray)
- if len(data0) < len(data3):
- self.data = data0
- fdSelect.format = 0
- else:
- self.data = data3
- fdSelect.format = 3
-
- self.parent = parent
-
- def setPos(self, pos, endPos):
- self.parent.rawDict["FDSelect"] = pos
-
- def getDataLength(self):
- return len(self.data)
-
- def toFile(self, file):
- file.write(self.data)
+ def __init__(self, fdSelect, parent):
+ fmt = fdSelect.format
+ fdSelectArray = fdSelect.gidArray
+ if fmt == 0:
+ self.data = packFDSelect0(fdSelectArray)
+ elif fmt == 3:
+ self.data = packFDSelect3(fdSelectArray)
+ elif fmt == 4:
+ self.data = packFDSelect4(fdSelectArray)
+ else:
+ # choose smaller of the two formats
+ data0 = packFDSelect0(fdSelectArray)
+ data3 = packFDSelect3(fdSelectArray)
+ if len(data0) < len(data3):
+ self.data = data0
+ fdSelect.format = 0
+ else:
+ self.data = data3
+ fdSelect.format = 3
+
+ self.parent = parent
+
+ def setPos(self, pos, endPos):
+ self.parent.rawDict["FDSelect"] = pos
+
+ def getDataLength(self):
+ return len(self.data)
+
+ def toFile(self, file):
+ file.write(self.data)
class VarStoreCompiler(object):
+ def __init__(self, varStoreData, parent):
+ self.parent = parent
+ if not varStoreData.data:
+ varStoreData.compile()
+ data = [packCard16(len(varStoreData.data)), varStoreData.data]
+ self.data = bytesjoin(data)
- def __init__(self, varStoreData, parent):
- self.parent = parent
- if not varStoreData.data:
- varStoreData.compile()
- data = [
- packCard16(len(varStoreData.data)),
- varStoreData.data
- ]
- self.data = bytesjoin(data)
-
- def setPos(self, pos, endPos):
- self.parent.rawDict["VarStore"] = pos
+ def setPos(self, pos, endPos):
+ self.parent.rawDict["VarStore"] = pos
- def getDataLength(self):
- return len(self.data)
+ def getDataLength(self):
+ return len(self.data)
- def toFile(self, file):
- file.write(self.data)
+ def toFile(self, file):
+ file.write(self.data)
class ROSConverter(SimpleConverter):
+ def xmlWrite(self, xmlWriter, name, value):
+ registry, order, supplement = value
+ xmlWriter.simpletag(
+ name,
+ [
+ ("Registry", tostr(registry)),
+ ("Order", tostr(order)),
+ ("Supplement", supplement),
+ ],
+ )
+ xmlWriter.newline()
+
+ def xmlRead(self, name, attrs, content, parent):
+ return (attrs["Registry"], attrs["Order"], safeEval(attrs["Supplement"]))
- def xmlWrite(self, xmlWriter, name, value):
- registry, order, supplement = value
- xmlWriter.simpletag(
- name,
- [
- ('Registry', tostr(registry)),
- ('Order', tostr(order)),
- ('Supplement', supplement)
- ])
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement']))
topDictOperators = [
-# opcode name argument type default converter
- (25, 'maxstack', 'number', None, None),
- ((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()),
- ((12, 20), 'SyntheticBase', 'number', None, None),
- (0, 'version', 'SID', None, None),
- (1, 'Notice', 'SID', None, Latin1Converter()),
- ((12, 0), 'Copyright', 'SID', None, Latin1Converter()),
- (2, 'FullName', 'SID', None, None),
- ((12, 38), 'FontName', 'SID', None, None),
- (3, 'FamilyName', 'SID', None, None),
- (4, 'Weight', 'SID', None, None),
- ((12, 1), 'isFixedPitch', 'number', 0, None),
- ((12, 2), 'ItalicAngle', 'number', 0, None),
- ((12, 3), 'UnderlinePosition', 'number', -100, None),
- ((12, 4), 'UnderlineThickness', 'number', 50, None),
- ((12, 5), 'PaintType', 'number', 0, None),
- ((12, 6), 'CharstringType', 'number', 2, None),
- ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None),
- (13, 'UniqueID', 'number', None, None),
- (5, 'FontBBox', 'array', [0, 0, 0, 0], None),
- ((12, 8), 'StrokeWidth', 'number', 0, None),
- (14, 'XUID', 'array', None, None),
- ((12, 21), 'PostScript', 'SID', None, None),
- ((12, 22), 'BaseFontName', 'SID', None, None),
- ((12, 23), 'BaseFontBlend', 'delta', None, None),
- ((12, 31), 'CIDFontVersion', 'number', 0, None),
- ((12, 32), 'CIDFontRevision', 'number', 0, None),
- ((12, 33), 'CIDFontType', 'number', 0, None),
- ((12, 34), 'CIDCount', 'number', 8720, None),
- (15, 'charset', 'number', None, CharsetConverter()),
- ((12, 35), 'UIDBase', 'number', None, None),
- (16, 'Encoding', 'number', 0, EncodingConverter()),
- (18, 'Private', ('number', 'number'), None, PrivateDictConverter()),
- ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()),
- ((12, 36), 'FDArray', 'number', None, FDArrayConverter()),
- (17, 'CharStrings', 'number', None, CharStringsConverter()),
- (24, 'VarStore', 'number', None, VarStoreConverter()),
+ # opcode name argument type default converter
+ (25, "maxstack", "number", None, None),
+ ((12, 30), "ROS", ("SID", "SID", "number"), None, ROSConverter()),
+ ((12, 20), "SyntheticBase", "number", None, None),
+ (0, "version", "SID", None, None),
+ (1, "Notice", "SID", None, Latin1Converter()),
+ ((12, 0), "Copyright", "SID", None, Latin1Converter()),
+ (2, "FullName", "SID", None, Latin1Converter()),
+ ((12, 38), "FontName", "SID", None, Latin1Converter()),
+ (3, "FamilyName", "SID", None, Latin1Converter()),
+ (4, "Weight", "SID", None, None),
+ ((12, 1), "isFixedPitch", "number", 0, None),
+ ((12, 2), "ItalicAngle", "number", 0, None),
+ ((12, 3), "UnderlinePosition", "number", -100, None),
+ ((12, 4), "UnderlineThickness", "number", 50, None),
+ ((12, 5), "PaintType", "number", 0, None),
+ ((12, 6), "CharstringType", "number", 2, None),
+ ((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None),
+ (13, "UniqueID", "number", None, None),
+ (5, "FontBBox", "array", [0, 0, 0, 0], None),
+ ((12, 8), "StrokeWidth", "number", 0, None),
+ (14, "XUID", "array", None, None),
+ ((12, 21), "PostScript", "SID", None, None),
+ ((12, 22), "BaseFontName", "SID", None, None),
+ ((12, 23), "BaseFontBlend", "delta", None, None),
+ ((12, 31), "CIDFontVersion", "number", 0, None),
+ ((12, 32), "CIDFontRevision", "number", 0, None),
+ ((12, 33), "CIDFontType", "number", 0, None),
+ ((12, 34), "CIDCount", "number", 8720, None),
+ (15, "charset", "number", None, CharsetConverter()),
+ ((12, 35), "UIDBase", "number", None, None),
+ (16, "Encoding", "number", 0, EncodingConverter()),
+ (18, "Private", ("number", "number"), None, PrivateDictConverter()),
+ ((12, 37), "FDSelect", "number", None, FDSelectConverter()),
+ ((12, 36), "FDArray", "number", None, FDArrayConverter()),
+ (17, "CharStrings", "number", None, CharStringsConverter()),
+ (24, "VarStore", "number", None, VarStoreConverter()),
]
topDictOperators2 = [
-# opcode name argument type default converter
- (25, 'maxstack', 'number', None, None),
- ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None),
- ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()),
- ((12, 36), 'FDArray', 'number', None, FDArrayConverter()),
- (17, 'CharStrings', 'number', None, CharStringsConverter()),
- (24, 'VarStore', 'number', None, VarStoreConverter()),
+ # opcode name argument type default converter
+ (25, "maxstack", "number", None, None),
+ ((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None),
+ ((12, 37), "FDSelect", "number", None, FDSelectConverter()),
+ ((12, 36), "FDArray", "number", None, FDArrayConverter()),
+ (17, "CharStrings", "number", None, CharStringsConverter()),
+ (24, "VarStore", "number", None, VarStoreConverter()),
]
# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order,
@@ -2170,68 +2220,80 @@ kBlendDictOpName = "blend"
blendOp = 23
privateDictOperators = [
-# opcode name argument type default converter
- (22, "vsindex", 'number', None, None),
- (blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF.
- (6, 'BlueValues', 'delta', None, None),
- (7, 'OtherBlues', 'delta', None, None),
- (8, 'FamilyBlues', 'delta', None, None),
- (9, 'FamilyOtherBlues', 'delta', None, None),
- ((12, 9), 'BlueScale', 'number', 0.039625, None),
- ((12, 10), 'BlueShift', 'number', 7, None),
- ((12, 11), 'BlueFuzz', 'number', 1, None),
- (10, 'StdHW', 'number', None, None),
- (11, 'StdVW', 'number', None, None),
- ((12, 12), 'StemSnapH', 'delta', None, None),
- ((12, 13), 'StemSnapV', 'delta', None, None),
- ((12, 14), 'ForceBold', 'number', 0, None),
- ((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated
- ((12, 16), 'lenIV', 'number', None, None), # deprecated
- ((12, 17), 'LanguageGroup', 'number', 0, None),
- ((12, 18), 'ExpansionFactor', 'number', 0.06, None),
- ((12, 19), 'initialRandomSeed', 'number', 0, None),
- (20, 'defaultWidthX', 'number', 0, None),
- (21, 'nominalWidthX', 'number', 0, None),
- (19, 'Subrs', 'number', None, SubrsConverter()),
+ # opcode name argument type default converter
+ (22, "vsindex", "number", None, None),
+ (
+ blendOp,
+ kBlendDictOpName,
+ "blendList",
+ None,
+ None,
+ ), # This is for reading to/from XML: it not written to CFF.
+ (6, "BlueValues", "delta", None, None),
+ (7, "OtherBlues", "delta", None, None),
+ (8, "FamilyBlues", "delta", None, None),
+ (9, "FamilyOtherBlues", "delta", None, None),
+ ((12, 9), "BlueScale", "number", 0.039625, None),
+ ((12, 10), "BlueShift", "number", 7, None),
+ ((12, 11), "BlueFuzz", "number", 1, None),
+ (10, "StdHW", "number", None, None),
+ (11, "StdVW", "number", None, None),
+ ((12, 12), "StemSnapH", "delta", None, None),
+ ((12, 13), "StemSnapV", "delta", None, None),
+ ((12, 14), "ForceBold", "number", 0, None),
+ ((12, 15), "ForceBoldThreshold", "number", None, None), # deprecated
+ ((12, 16), "lenIV", "number", None, None), # deprecated
+ ((12, 17), "LanguageGroup", "number", 0, None),
+ ((12, 18), "ExpansionFactor", "number", 0.06, None),
+ ((12, 19), "initialRandomSeed", "number", 0, None),
+ (20, "defaultWidthX", "number", 0, None),
+ (21, "nominalWidthX", "number", 0, None),
+ (19, "Subrs", "number", None, SubrsConverter()),
]
privateDictOperators2 = [
-# opcode name argument type default converter
- (22, "vsindex", 'number', None, None),
- (blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF.
- (6, 'BlueValues', 'delta', None, None),
- (7, 'OtherBlues', 'delta', None, None),
- (8, 'FamilyBlues', 'delta', None, None),
- (9, 'FamilyOtherBlues', 'delta', None, None),
- ((12, 9), 'BlueScale', 'number', 0.039625, None),
- ((12, 10), 'BlueShift', 'number', 7, None),
- ((12, 11), 'BlueFuzz', 'number', 1, None),
- (10, 'StdHW', 'number', None, None),
- (11, 'StdVW', 'number', None, None),
- ((12, 12), 'StemSnapH', 'delta', None, None),
- ((12, 13), 'StemSnapV', 'delta', None, None),
- ((12, 17), 'LanguageGroup', 'number', 0, None),
- ((12, 18), 'ExpansionFactor', 'number', 0.06, None),
- (19, 'Subrs', 'number', None, SubrsConverter()),
+ # opcode name argument type default converter
+ (22, "vsindex", "number", None, None),
+ (
+ blendOp,
+ kBlendDictOpName,
+ "blendList",
+ None,
+ None,
+ ), # This is for reading to/from XML: it not written to CFF.
+ (6, "BlueValues", "delta", None, None),
+ (7, "OtherBlues", "delta", None, None),
+ (8, "FamilyBlues", "delta", None, None),
+ (9, "FamilyOtherBlues", "delta", None, None),
+ ((12, 9), "BlueScale", "number", 0.039625, None),
+ ((12, 10), "BlueShift", "number", 7, None),
+ ((12, 11), "BlueFuzz", "number", 1, None),
+ (10, "StdHW", "number", None, None),
+ (11, "StdVW", "number", None, None),
+ ((12, 12), "StemSnapH", "delta", None, None),
+ ((12, 13), "StemSnapV", "delta", None, None),
+ ((12, 17), "LanguageGroup", "number", 0, None),
+ ((12, 18), "ExpansionFactor", "number", 0.06, None),
+ (19, "Subrs", "number", None, SubrsConverter()),
]
def addConverters(table):
- for i in range(len(table)):
- op, name, arg, default, conv = table[i]
- if conv is not None:
- continue
- if arg in ("delta", "array"):
- conv = ArrayConverter()
- elif arg == "number":
- conv = NumberConverter()
- elif arg == "SID":
- conv = ASCIIConverter()
- elif arg == 'blendList':
- conv = None
- else:
- assert False
- table[i] = op, name, arg, default, conv
+ for i in range(len(table)):
+ op, name, arg, default, conv = table[i]
+ if conv is not None:
+ continue
+ if arg in ("delta", "array"):
+ conv = ArrayConverter()
+ elif arg == "number":
+ conv = NumberConverter()
+ elif arg == "SID":
+ conv = ASCIIConverter()
+ elif arg == "blendList":
+ conv = None
+ else:
+ assert False
+ table[i] = op, name, arg, default, conv
addConverters(privateDictOperators)
@@ -2239,683 +2301,1025 @@ addConverters(topDictOperators)
class TopDictDecompiler(psCharStrings.DictDecompiler):
- operators = buildOperatorDict(topDictOperators)
+ operators = buildOperatorDict(topDictOperators)
class PrivateDictDecompiler(psCharStrings.DictDecompiler):
- operators = buildOperatorDict(privateDictOperators)
+ operators = buildOperatorDict(privateDictOperators)
class DictCompiler(object):
- maxBlendStack = 0
-
- def __init__(self, dictObj, strings, parent, isCFF2=None):
- if strings:
- assert isinstance(strings, IndexedStrings)
- if isCFF2 is None and hasattr(parent, "isCFF2"):
- isCFF2 = parent.isCFF2
- assert isCFF2 is not None
- self.isCFF2 = isCFF2
- self.dictObj = dictObj
- self.strings = strings
- self.parent = parent
- rawDict = {}
- for name in dictObj.order:
- value = getattr(dictObj, name, None)
- if value is None:
- continue
- conv = dictObj.converters[name]
- value = conv.write(dictObj, value)
- if value == dictObj.defaults.get(name):
- continue
- rawDict[name] = value
- self.rawDict = rawDict
-
- def setPos(self, pos, endPos):
- pass
-
- def getDataLength(self):
- return len(self.compile("getDataLength"))
-
- def compile(self, reason):
- log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason)
- rawDict = self.rawDict
- data = []
- for name in self.dictObj.order:
- value = rawDict.get(name)
- if value is None:
- continue
- op, argType = self.opcodes[name]
- if isinstance(argType, tuple):
- l = len(argType)
- assert len(value) == l, "value doesn't match arg type"
- for i in range(l):
- arg = argType[i]
- v = value[i]
- arghandler = getattr(self, "arg_" + arg)
- data.append(arghandler(v))
- else:
- arghandler = getattr(self, "arg_" + argType)
- data.append(arghandler(value))
- data.append(op)
- data = bytesjoin(data)
- return data
-
- def toFile(self, file):
- data = self.compile("toFile")
- file.write(data)
-
- def arg_number(self, num):
- if isinstance(num, list):
- data = [encodeNumber(val) for val in num]
- data.append(encodeNumber(1))
- data.append(bytechr(blendOp))
- datum = bytesjoin(data)
- else:
- datum = encodeNumber(num)
- return datum
-
- def arg_SID(self, s):
- return psCharStrings.encodeIntCFF(self.strings.getSID(s))
-
- def arg_array(self, value):
- data = []
- for num in value:
- data.append(self.arg_number(num))
- return bytesjoin(data)
-
- def arg_delta(self, value):
- if not value:
- return b""
- val0 = value[0]
- if isinstance(val0, list):
- data = self.arg_delta_blend(value)
- else:
- out = []
- last = 0
- for v in value:
- out.append(v - last)
- last = v
- data = []
- for num in out:
- data.append(encodeNumber(num))
- return bytesjoin(data)
-
-
- def arg_delta_blend(self, value):
- """A delta list with blend lists has to be *all* blend lists.
-
- The value is a list is arranged as follows::
-
- [
- [V0, d0..dn]
- [V1, d0..dn]
- ...
- [Vm, d0..dn]
- ]
-
- ``V`` is the absolute coordinate value from the default font, and ``d0-dn``
- are the delta values from the *n* regions. Each ``V`` is an absolute
- coordinate from the default font.
-
- We want to return a list::
-
- [
- [v0, v1..vm]
- [d0..dn]
- ...
- [d0..dn]
- numBlends
- blendOp
- ]
-
- where each ``v`` is relative to the previous default font value.
- """
- numMasters = len(value[0])
- numBlends = len(value)
- numStack = (numBlends * numMasters) + 1
- if numStack > self.maxBlendStack:
- # Figure out the max number of value we can blend
- # and divide this list up into chunks of that size.
-
- numBlendValues = int((self.maxBlendStack - 1) / numMasters)
- out = []
- while True:
- numVal = min(len(value), numBlendValues)
- if numVal == 0:
- break
- valList = value[0:numVal]
- out1 = self.arg_delta_blend(valList)
- out.extend(out1)
- value = value[numVal:]
- else:
- firstList = [0] * numBlends
- deltaList = [None] * numBlends
- i = 0
- prevVal = 0
- while i < numBlends:
- # For PrivateDict BlueValues, the default font
- # values are absolute, not relative.
- # Must convert these back to relative coordinates
- # befor writing to CFF2.
- defaultValue = value[i][0]
- firstList[i] = defaultValue - prevVal
- prevVal = defaultValue
- deltaList[i] = value[i][1:]
- i += 1
-
- relValueList = firstList
- for blendList in deltaList:
- relValueList.extend(blendList)
- out = [encodeNumber(val) for val in relValueList]
- out.append(encodeNumber(numBlends))
- out.append(bytechr(blendOp))
- return out
+ maxBlendStack = 0
+
+ def __init__(self, dictObj, strings, parent, isCFF2=None):
+ if strings:
+ assert isinstance(strings, IndexedStrings)
+ if isCFF2 is None and hasattr(parent, "isCFF2"):
+ isCFF2 = parent.isCFF2
+ assert isCFF2 is not None
+ self.isCFF2 = isCFF2
+ self.dictObj = dictObj
+ self.strings = strings
+ self.parent = parent
+ rawDict = {}
+ for name in dictObj.order:
+ value = getattr(dictObj, name, None)
+ if value is None:
+ continue
+ conv = dictObj.converters[name]
+ value = conv.write(dictObj, value)
+ if value == dictObj.defaults.get(name):
+ continue
+ rawDict[name] = value
+ self.rawDict = rawDict
+
+ def setPos(self, pos, endPos):
+ pass
+
+ def getDataLength(self):
+ return len(self.compile("getDataLength"))
+
+ def compile(self, reason):
+ log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason)
+ rawDict = self.rawDict
+ data = []
+ for name in self.dictObj.order:
+ value = rawDict.get(name)
+ if value is None:
+ continue
+ op, argType = self.opcodes[name]
+ if isinstance(argType, tuple):
+ l = len(argType)
+ assert len(value) == l, "value doesn't match arg type"
+ for i in range(l):
+ arg = argType[i]
+ v = value[i]
+ arghandler = getattr(self, "arg_" + arg)
+ data.append(arghandler(v))
+ else:
+ arghandler = getattr(self, "arg_" + argType)
+ data.append(arghandler(value))
+ data.append(op)
+ data = bytesjoin(data)
+ return data
+
+ def toFile(self, file):
+ data = self.compile("toFile")
+ file.write(data)
+
+ def arg_number(self, num):
+ if isinstance(num, list):
+ data = [encodeNumber(val) for val in num]
+ data.append(encodeNumber(1))
+ data.append(bytechr(blendOp))
+ datum = bytesjoin(data)
+ else:
+ datum = encodeNumber(num)
+ return datum
+
+ def arg_SID(self, s):
+ return psCharStrings.encodeIntCFF(self.strings.getSID(s))
+
+ def arg_array(self, value):
+ data = []
+ for num in value:
+ data.append(self.arg_number(num))
+ return bytesjoin(data)
+
+ def arg_delta(self, value):
+ if not value:
+ return b""
+ val0 = value[0]
+ if isinstance(val0, list):
+ data = self.arg_delta_blend(value)
+ else:
+ out = []
+ last = 0
+ for v in value:
+ out.append(v - last)
+ last = v
+ data = []
+ for num in out:
+ data.append(encodeNumber(num))
+ return bytesjoin(data)
+
+ def arg_delta_blend(self, value):
+ """A delta list with blend lists has to be *all* blend lists.
+
+ The value is a list is arranged as follows::
+
+ [
+ [V0, d0..dn]
+ [V1, d0..dn]
+ ...
+ [Vm, d0..dn]
+ ]
+
+ ``V`` is the absolute coordinate value from the default font, and ``d0-dn``
+ are the delta values from the *n* regions. Each ``V`` is an absolute
+ coordinate from the default font.
+
+ We want to return a list::
+
+ [
+ [v0, v1..vm]
+ [d0..dn]
+ ...
+ [d0..dn]
+ numBlends
+ blendOp
+ ]
+
+ where each ``v`` is relative to the previous default font value.
+ """
+ numMasters = len(value[0])
+ numBlends = len(value)
+ numStack = (numBlends * numMasters) + 1
+ if numStack > self.maxBlendStack:
+ # Figure out the max number of value we can blend
+ # and divide this list up into chunks of that size.
+
+ numBlendValues = int((self.maxBlendStack - 1) / numMasters)
+ out = []
+ while True:
+ numVal = min(len(value), numBlendValues)
+ if numVal == 0:
+ break
+ valList = value[0:numVal]
+ out1 = self.arg_delta_blend(valList)
+ out.extend(out1)
+ value = value[numVal:]
+ else:
+ firstList = [0] * numBlends
+ deltaList = [None] * numBlends
+ i = 0
+ prevVal = 0
+ while i < numBlends:
+ # For PrivateDict BlueValues, the default font
+ # values are absolute, not relative.
+ # Must convert these back to relative coordinates
+ # befor writing to CFF2.
+ defaultValue = value[i][0]
+ firstList[i] = defaultValue - prevVal
+ prevVal = defaultValue
+ deltaList[i] = value[i][1:]
+ i += 1
+
+ relValueList = firstList
+ for blendList in deltaList:
+ relValueList.extend(blendList)
+ out = [encodeNumber(val) for val in relValueList]
+ out.append(encodeNumber(numBlends))
+ out.append(bytechr(blendOp))
+ return out
def encodeNumber(num):
- if isinstance(num, float):
- return psCharStrings.encodeFloat(num)
- else:
- return psCharStrings.encodeIntCFF(num)
+ if isinstance(num, float):
+ return psCharStrings.encodeFloat(num)
+ else:
+ return psCharStrings.encodeIntCFF(num)
class TopDictCompiler(DictCompiler):
-
- opcodes = buildOpcodeDict(topDictOperators)
-
- def getChildren(self, strings):
- isCFF2 = self.isCFF2
- children = []
- if self.dictObj.cff2GetGlyphOrder is None:
- if hasattr(self.dictObj, "charset") and self.dictObj.charset:
- if hasattr(self.dictObj, "ROS"): # aka isCID
- charsetCode = None
- else:
- charsetCode = getStdCharSet(self.dictObj.charset)
- if charsetCode is None:
- children.append(CharsetCompiler(strings, self.dictObj.charset, self))
- else:
- self.rawDict["charset"] = charsetCode
- if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding:
- encoding = self.dictObj.Encoding
- if not isinstance(encoding, str):
- children.append(EncodingCompiler(strings, encoding, self))
- else:
- if hasattr(self.dictObj, "VarStore"):
- varStoreData = self.dictObj.VarStore
- varStoreComp = VarStoreCompiler(varStoreData, self)
- children.append(varStoreComp)
- if hasattr(self.dictObj, "FDSelect"):
- # I have not yet supported merging a ttx CFF-CID font, as there are
- # interesting issues about merging the FDArrays. Here I assume that
- # either the font was read from XML, and the FDSelect indices are all
- # in the charstring data, or the FDSelect array is already fully defined.
- fdSelect = self.dictObj.FDSelect
- # probably read in from XML; assume fdIndex in CharString data
- if len(fdSelect) == 0:
- charStrings = self.dictObj.CharStrings
- for name in self.dictObj.charset:
- fdSelect.append(charStrings[name].fdSelectIndex)
- fdSelectComp = FDSelectCompiler(fdSelect, self)
- children.append(fdSelectComp)
- if hasattr(self.dictObj, "CharStrings"):
- items = []
- charStrings = self.dictObj.CharStrings
- for name in self.dictObj.charset:
- items.append(charStrings[name])
- charStringsComp = CharStringsCompiler(
- items, strings, self, isCFF2=isCFF2)
- children.append(charStringsComp)
- if hasattr(self.dictObj, "FDArray"):
- # I have not yet supported merging a ttx CFF-CID font, as there are
- # interesting issues about merging the FDArrays. Here I assume that the
- # FDArray info is correct and complete.
- fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self)
- children.append(fdArrayIndexComp)
- children.extend(fdArrayIndexComp.getChildren(strings))
- if hasattr(self.dictObj, "Private"):
- privComp = self.dictObj.Private.getCompiler(strings, self)
- children.append(privComp)
- children.extend(privComp.getChildren(strings))
- return children
+ opcodes = buildOpcodeDict(topDictOperators)
+
+ def getChildren(self, strings):
+ isCFF2 = self.isCFF2
+ children = []
+ if self.dictObj.cff2GetGlyphOrder is None:
+ if hasattr(self.dictObj, "charset") and self.dictObj.charset:
+ if hasattr(self.dictObj, "ROS"): # aka isCID
+ charsetCode = None
+ else:
+ charsetCode = getStdCharSet(self.dictObj.charset)
+ if charsetCode is None:
+ children.append(
+ CharsetCompiler(strings, self.dictObj.charset, self)
+ )
+ else:
+ self.rawDict["charset"] = charsetCode
+ if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding:
+ encoding = self.dictObj.Encoding
+ if not isinstance(encoding, str):
+ children.append(EncodingCompiler(strings, encoding, self))
+ else:
+ if hasattr(self.dictObj, "VarStore"):
+ varStoreData = self.dictObj.VarStore
+ varStoreComp = VarStoreCompiler(varStoreData, self)
+ children.append(varStoreComp)
+ if hasattr(self.dictObj, "FDSelect"):
+ # I have not yet supported merging a ttx CFF-CID font, as there are
+ # interesting issues about merging the FDArrays. Here I assume that
+ # either the font was read from XML, and the FDSelect indices are all
+ # in the charstring data, or the FDSelect array is already fully defined.
+ fdSelect = self.dictObj.FDSelect
+ # probably read in from XML; assume fdIndex in CharString data
+ if len(fdSelect) == 0:
+ charStrings = self.dictObj.CharStrings
+ for name in self.dictObj.charset:
+ fdSelect.append(charStrings[name].fdSelectIndex)
+ fdSelectComp = FDSelectCompiler(fdSelect, self)
+ children.append(fdSelectComp)
+ if hasattr(self.dictObj, "CharStrings"):
+ items = []
+ charStrings = self.dictObj.CharStrings
+ for name in self.dictObj.charset:
+ items.append(charStrings[name])
+ charStringsComp = CharStringsCompiler(items, strings, self, isCFF2=isCFF2)
+ children.append(charStringsComp)
+ if hasattr(self.dictObj, "FDArray"):
+ # I have not yet supported merging a ttx CFF-CID font, as there are
+ # interesting issues about merging the FDArrays. Here I assume that the
+ # FDArray info is correct and complete.
+ fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self)
+ children.append(fdArrayIndexComp)
+ children.extend(fdArrayIndexComp.getChildren(strings))
+ if hasattr(self.dictObj, "Private"):
+ privComp = self.dictObj.Private.getCompiler(strings, self)
+ children.append(privComp)
+ children.extend(privComp.getChildren(strings))
+ return children
class FontDictCompiler(DictCompiler):
- opcodes = buildOpcodeDict(topDictOperators)
-
- def __init__(self, dictObj, strings, parent, isCFF2=None):
- super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2)
- #
- # We now take some effort to detect if there were any key/value pairs
- # supplied that were ignored in the FontDict context, and issue a warning
- # for those cases.
- #
- ignoredNames = []
- dictObj = self.dictObj
- for name in sorted(set(dictObj.converters) - set(dictObj.order)):
- if name in dictObj.rawDict:
- # The font was directly read from binary. In this
- # case, we want to report *all* "useless" key/value
- # pairs that are in the font, not just the ones that
- # are different from the default.
- ignoredNames.append(name)
- else:
- # The font was probably read from a TTX file. We only
- # warn about keys whos value is not the default. The
- # ones that have the default value will not be written
- # to binary anyway.
- default = dictObj.defaults.get(name)
- if default is not None:
- conv = dictObj.converters[name]
- default = conv.read(dictObj, default)
- if getattr(dictObj, name, None) != default:
- ignoredNames.append(name)
- if ignoredNames:
- log.warning(
- "Some CFF FDArray/FontDict keys were ignored upon compile: " +
- " ".join(sorted(ignoredNames)))
-
- def getChildren(self, strings):
- children = []
- if hasattr(self.dictObj, "Private"):
- privComp = self.dictObj.Private.getCompiler(strings, self)
- children.append(privComp)
- children.extend(privComp.getChildren(strings))
- return children
+ opcodes = buildOpcodeDict(topDictOperators)
+
+ def __init__(self, dictObj, strings, parent, isCFF2=None):
+ super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2)
+ #
+ # We now take some effort to detect if there were any key/value pairs
+ # supplied that were ignored in the FontDict context, and issue a warning
+ # for those cases.
+ #
+ ignoredNames = []
+ dictObj = self.dictObj
+ for name in sorted(set(dictObj.converters) - set(dictObj.order)):
+ if name in dictObj.rawDict:
+ # The font was directly read from binary. In this
+ # case, we want to report *all* "useless" key/value
+ # pairs that are in the font, not just the ones that
+ # are different from the default.
+ ignoredNames.append(name)
+ else:
+ # The font was probably read from a TTX file. We only
+ # warn about keys whos value is not the default. The
+ # ones that have the default value will not be written
+ # to binary anyway.
+ default = dictObj.defaults.get(name)
+ if default is not None:
+ conv = dictObj.converters[name]
+ default = conv.read(dictObj, default)
+ if getattr(dictObj, name, None) != default:
+ ignoredNames.append(name)
+ if ignoredNames:
+ log.warning(
+ "Some CFF FDArray/FontDict keys were ignored upon compile: "
+ + " ".join(sorted(ignoredNames))
+ )
+
+ def getChildren(self, strings):
+ children = []
+ if hasattr(self.dictObj, "Private"):
+ privComp = self.dictObj.Private.getCompiler(strings, self)
+ children.append(privComp)
+ children.extend(privComp.getChildren(strings))
+ return children
class PrivateDictCompiler(DictCompiler):
+ maxBlendStack = maxStackLimit
+ opcodes = buildOpcodeDict(privateDictOperators)
- maxBlendStack = maxStackLimit
- opcodes = buildOpcodeDict(privateDictOperators)
+ def setPos(self, pos, endPos):
+ size = endPos - pos
+ self.parent.rawDict["Private"] = size, pos
+ self.pos = pos
- def setPos(self, pos, endPos):
- size = endPos - pos
- self.parent.rawDict["Private"] = size, pos
- self.pos = pos
-
- def getChildren(self, strings):
- children = []
- if hasattr(self.dictObj, "Subrs"):
- children.append(self.dictObj.Subrs.getCompiler(strings, self))
- return children
+ def getChildren(self, strings):
+ children = []
+ if hasattr(self.dictObj, "Subrs"):
+ children.append(self.dictObj.Subrs.getCompiler(strings, self))
+ return children
class BaseDict(object):
-
- def __init__(self, strings=None, file=None, offset=None, isCFF2=None):
- assert (isCFF2 is None) == (file is None)
- self.rawDict = {}
- self.skipNames = []
- self.strings = strings
- if file is None:
- return
- self._isCFF2 = isCFF2
- self.file = file
- if offset is not None:
- log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset)
- self.offset = offset
-
- def decompile(self, data):
- log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data))
- dec = self.decompilerClass(self.strings, self)
- dec.decompile(data)
- self.rawDict = dec.getDict()
- self.postDecompile()
-
- def postDecompile(self):
- pass
-
- def getCompiler(self, strings, parent, isCFF2=None):
- return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
-
- def __getattr__(self, name):
- if name[:2] == name[-2:] == "__":
- # to make deepcopy() and pickle.load() work, we need to signal with
- # AttributeError that dunder methods like '__deepcopy__' or '__getstate__'
- # aren't implemented. For more details, see:
- # https://github.com/fonttools/fonttools/pull/1488
- raise AttributeError(name)
- value = self.rawDict.get(name, None)
- if value is None:
- value = self.defaults.get(name)
- if value is None:
- raise AttributeError(name)
- conv = self.converters[name]
- value = conv.read(self, value)
- setattr(self, name, value)
- return value
-
- def toXML(self, xmlWriter):
- for name in self.order:
- if name in self.skipNames:
- continue
- value = getattr(self, name, None)
- # XXX For "charset" we never skip calling xmlWrite even if the
- # value is None, so we always write the following XML comment:
- #
- # <!-- charset is dumped separately as the 'GlyphOrder' element -->
- #
- # Charset is None when 'CFF ' table is imported from XML into an
- # empty TTFont(). By writing this comment all the time, we obtain
- # the same XML output whether roundtripping XML-to-XML or
- # dumping binary-to-XML
- if value is None and name != "charset":
- continue
- conv = self.converters[name]
- conv.xmlWrite(xmlWriter, name, value)
- ignoredNames = set(self.rawDict) - set(self.order)
- if ignoredNames:
- xmlWriter.comment(
- "some keys were ignored: %s" % " ".join(sorted(ignoredNames)))
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content):
- conv = self.converters[name]
- value = conv.xmlRead(name, attrs, content, self)
- setattr(self, name, value)
+ def __init__(self, strings=None, file=None, offset=None, isCFF2=None):
+ assert (isCFF2 is None) == (file is None)
+ self.rawDict = {}
+ self.skipNames = []
+ self.strings = strings
+ if file is None:
+ return
+ self._isCFF2 = isCFF2
+ self.file = file
+ if offset is not None:
+ log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset)
+ self.offset = offset
+
+ def decompile(self, data):
+ log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data))
+ dec = self.decompilerClass(self.strings, self)
+ dec.decompile(data)
+ self.rawDict = dec.getDict()
+ self.postDecompile()
+
+ def postDecompile(self):
+ pass
+
+ def getCompiler(self, strings, parent, isCFF2=None):
+ return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
+
+ def __getattr__(self, name):
+ if name[:2] == name[-2:] == "__":
+ # to make deepcopy() and pickle.load() work, we need to signal with
+ # AttributeError that dunder methods like '__deepcopy__' or '__getstate__'
+ # aren't implemented. For more details, see:
+ # https://github.com/fonttools/fonttools/pull/1488
+ raise AttributeError(name)
+ value = self.rawDict.get(name, None)
+ if value is None:
+ value = self.defaults.get(name)
+ if value is None:
+ raise AttributeError(name)
+ conv = self.converters[name]
+ value = conv.read(self, value)
+ setattr(self, name, value)
+ return value
+
+ def toXML(self, xmlWriter):
+ for name in self.order:
+ if name in self.skipNames:
+ continue
+ value = getattr(self, name, None)
+ # XXX For "charset" we never skip calling xmlWrite even if the
+ # value is None, so we always write the following XML comment:
+ #
+ # <!-- charset is dumped separately as the 'GlyphOrder' element -->
+ #
+ # Charset is None when 'CFF ' table is imported from XML into an
+ # empty TTFont(). By writing this comment all the time, we obtain
+ # the same XML output whether roundtripping XML-to-XML or
+ # dumping binary-to-XML
+ if value is None and name != "charset":
+ continue
+ conv = self.converters[name]
+ conv.xmlWrite(xmlWriter, name, value)
+ ignoredNames = set(self.rawDict) - set(self.order)
+ if ignoredNames:
+ xmlWriter.comment(
+ "some keys were ignored: %s" % " ".join(sorted(ignoredNames))
+ )
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content):
+ conv = self.converters[name]
+ value = conv.xmlRead(name, attrs, content, self)
+ setattr(self, name, value)
class TopDict(BaseDict):
- """The ``TopDict`` represents the top-level dictionary holding font
- information. CFF2 tables contain a restricted set of top-level entries
- as described `here <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#7-top-dict-data>`_,
- but CFF tables may contain a wider range of information. This information
- can be accessed through attributes or through the dictionary returned
- through the ``rawDict`` property:
-
- .. code:: python
-
- font = tt["CFF "].cff[0]
- font.FamilyName
- # 'Linux Libertine O'
- font.rawDict["FamilyName"]
- # 'Linux Libertine O'
-
- More information is available in the CFF file's private dictionary, accessed
- via the ``Private`` property:
-
- .. code:: python
-
- tt["CFF "].cff[0].Private.BlueValues
- # [-15, 0, 515, 515, 666, 666]
-
- """
-
- defaults = buildDefaults(topDictOperators)
- converters = buildConverters(topDictOperators)
- compilerClass = TopDictCompiler
- order = buildOrder(topDictOperators)
- decompilerClass = TopDictDecompiler
-
- def __init__(self, strings=None, file=None, offset=None,
- GlobalSubrs=None, cff2GetGlyphOrder=None, isCFF2=None):
- super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
- self.cff2GetGlyphOrder = cff2GetGlyphOrder
- self.GlobalSubrs = GlobalSubrs
- if isCFF2:
- self.defaults = buildDefaults(topDictOperators2)
- self.charset = cff2GetGlyphOrder()
- self.order = buildOrder(topDictOperators2)
- else:
- self.defaults = buildDefaults(topDictOperators)
- self.order = buildOrder(topDictOperators)
-
- def getGlyphOrder(self):
- """Returns a list of glyph names in the CFF font."""
- return self.charset
-
- def postDecompile(self):
- offset = self.rawDict.get("CharStrings")
- if offset is None:
- return
- # get the number of glyphs beforehand.
- self.file.seek(offset)
- if self._isCFF2:
- self.numGlyphs = readCard32(self.file)
- else:
- self.numGlyphs = readCard16(self.file)
-
- def toXML(self, xmlWriter):
- if hasattr(self, "CharStrings"):
- self.decompileAllCharStrings()
- if hasattr(self, "ROS"):
- self.skipNames = ['Encoding']
- if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"):
- # these values have default values, but I only want them to show up
- # in CID fonts.
- self.skipNames = [
- 'CIDFontVersion', 'CIDFontRevision', 'CIDFontType', 'CIDCount']
- BaseDict.toXML(self, xmlWriter)
-
- def decompileAllCharStrings(self):
- # Make sure that all the Private Dicts have been instantiated.
- for i, charString in enumerate(self.CharStrings.values()):
- try:
- charString.decompile()
- except:
- log.error("Error in charstring %s", i)
- raise
-
- def recalcFontBBox(self):
- fontBBox = None
- for charString in self.CharStrings.values():
- bounds = charString.calcBounds(self.CharStrings)
- if bounds is not None:
- if fontBBox is not None:
- fontBBox = unionRect(fontBBox, bounds)
- else:
- fontBBox = bounds
-
- if fontBBox is None:
- self.FontBBox = self.defaults['FontBBox'][:]
- else:
- self.FontBBox = list(intRect(fontBBox))
+ """The ``TopDict`` represents the top-level dictionary holding font
+ information. CFF2 tables contain a restricted set of top-level entries
+ as described `here <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#7-top-dict-data>`_,
+ but CFF tables may contain a wider range of information. This information
+ can be accessed through attributes or through the dictionary returned
+ through the ``rawDict`` property:
+
+ .. code:: python
+
+ font = tt["CFF "].cff[0]
+ font.FamilyName
+ # 'Linux Libertine O'
+ font.rawDict["FamilyName"]
+ # 'Linux Libertine O'
+
+ More information is available in the CFF file's private dictionary, accessed
+ via the ``Private`` property:
+
+ .. code:: python
+
+ tt["CFF "].cff[0].Private.BlueValues
+ # [-15, 0, 515, 515, 666, 666]
+
+ """
+
+ defaults = buildDefaults(topDictOperators)
+ converters = buildConverters(topDictOperators)
+ compilerClass = TopDictCompiler
+ order = buildOrder(topDictOperators)
+ decompilerClass = TopDictDecompiler
+
+ def __init__(
+ self,
+ strings=None,
+ file=None,
+ offset=None,
+ GlobalSubrs=None,
+ cff2GetGlyphOrder=None,
+ isCFF2=None,
+ ):
+ super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
+ self.cff2GetGlyphOrder = cff2GetGlyphOrder
+ self.GlobalSubrs = GlobalSubrs
+ if isCFF2:
+ self.defaults = buildDefaults(topDictOperators2)
+ self.charset = cff2GetGlyphOrder()
+ self.order = buildOrder(topDictOperators2)
+ else:
+ self.defaults = buildDefaults(topDictOperators)
+ self.order = buildOrder(topDictOperators)
+
+ def getGlyphOrder(self):
+ """Returns a list of glyph names in the CFF font."""
+ return self.charset
+
+ def postDecompile(self):
+ offset = self.rawDict.get("CharStrings")
+ if offset is None:
+ return
+ # get the number of glyphs beforehand.
+ self.file.seek(offset)
+ if self._isCFF2:
+ self.numGlyphs = readCard32(self.file)
+ else:
+ self.numGlyphs = readCard16(self.file)
+
+ def toXML(self, xmlWriter):
+ if hasattr(self, "CharStrings"):
+ self.decompileAllCharStrings()
+ if hasattr(self, "ROS"):
+ self.skipNames = ["Encoding"]
+ if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"):
+ # these values have default values, but I only want them to show up
+ # in CID fonts.
+ self.skipNames = [
+ "CIDFontVersion",
+ "CIDFontRevision",
+ "CIDFontType",
+ "CIDCount",
+ ]
+ BaseDict.toXML(self, xmlWriter)
+
+ def decompileAllCharStrings(self):
+ # Make sure that all the Private Dicts have been instantiated.
+ for i, charString in enumerate(self.CharStrings.values()):
+ try:
+ charString.decompile()
+ except:
+ log.error("Error in charstring %s", i)
+ raise
+
+ def recalcFontBBox(self):
+ fontBBox = None
+ for charString in self.CharStrings.values():
+ bounds = charString.calcBounds(self.CharStrings)
+ if bounds is not None:
+ if fontBBox is not None:
+ fontBBox = unionRect(fontBBox, bounds)
+ else:
+ fontBBox = bounds
+
+ if fontBBox is None:
+ self.FontBBox = self.defaults["FontBBox"][:]
+ else:
+ self.FontBBox = list(intRect(fontBBox))
class FontDict(BaseDict):
- #
- # Since fonttools used to pass a lot of fields that are not relevant in the FDArray
- # FontDict, there are 'ttx' files in the wild that contain all these. These got in
- # the ttx files because fonttools writes explicit values for all the TopDict default
- # values. These are not actually illegal in the context of an FDArray FontDict - you
- # can legally, per spec, put any arbitrary key/value pair in a FontDict - but are
- # useless since current major company CFF interpreters ignore anything but the set
- # listed in this file. So, we just silently skip them. An exception is Weight: this
- # is not used by any interpreter, but some foundries have asked that this be
- # supported in FDArray FontDicts just to preserve information about the design when
- # the font is being inspected.
- #
- # On top of that, there are fonts out there that contain such useless FontDict values.
- #
- # By subclassing TopDict, we *allow* all key/values from TopDict, both when reading
- # from binary or when reading from XML, but by overriding `order` with a limited
- # list of names, we ensure that only the useful names ever get exported to XML and
- # ever get compiled into the binary font.
- #
- # We override compilerClass so we can warn about "useless" key/value pairs, either
- # from the original binary font or from TTX input.
- #
- # See:
- # - https://github.com/fonttools/fonttools/issues/740
- # - https://github.com/fonttools/fonttools/issues/601
- # - https://github.com/adobe-type-tools/afdko/issues/137
- #
- defaults = {}
- converters = buildConverters(topDictOperators)
- compilerClass = FontDictCompiler
- orderCFF = ['FontName', 'FontMatrix', 'Weight', 'Private']
- orderCFF2 = ['Private']
- decompilerClass = TopDictDecompiler
-
- def __init__(self, strings=None, file=None, offset=None,
- GlobalSubrs=None, isCFF2=None, vstore=None):
- super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
- self.vstore = vstore
- self.setCFF2(isCFF2)
-
- def setCFF2(self, isCFF2):
- # isCFF2 may be None.
- if isCFF2:
- self.order = self.orderCFF2
- self._isCFF2 = True
- else:
- self.order = self.orderCFF
- self._isCFF2 = False
+ #
+ # Since fonttools used to pass a lot of fields that are not relevant in the FDArray
+ # FontDict, there are 'ttx' files in the wild that contain all these. These got in
+ # the ttx files because fonttools writes explicit values for all the TopDict default
+ # values. These are not actually illegal in the context of an FDArray FontDict - you
+ # can legally, per spec, put any arbitrary key/value pair in a FontDict - but are
+ # useless since current major company CFF interpreters ignore anything but the set
+ # listed in this file. So, we just silently skip them. An exception is Weight: this
+ # is not used by any interpreter, but some foundries have asked that this be
+ # supported in FDArray FontDicts just to preserve information about the design when
+ # the font is being inspected.
+ #
+ # On top of that, there are fonts out there that contain such useless FontDict values.
+ #
+ # By subclassing TopDict, we *allow* all key/values from TopDict, both when reading
+ # from binary or when reading from XML, but by overriding `order` with a limited
+ # list of names, we ensure that only the useful names ever get exported to XML and
+ # ever get compiled into the binary font.
+ #
+ # We override compilerClass so we can warn about "useless" key/value pairs, either
+ # from the original binary font or from TTX input.
+ #
+ # See:
+ # - https://github.com/fonttools/fonttools/issues/740
+ # - https://github.com/fonttools/fonttools/issues/601
+ # - https://github.com/adobe-type-tools/afdko/issues/137
+ #
+ defaults = {}
+ converters = buildConverters(topDictOperators)
+ compilerClass = FontDictCompiler
+ orderCFF = ["FontName", "FontMatrix", "Weight", "Private"]
+ orderCFF2 = ["Private"]
+ decompilerClass = TopDictDecompiler
+
+ def __init__(
+ self,
+ strings=None,
+ file=None,
+ offset=None,
+ GlobalSubrs=None,
+ isCFF2=None,
+ vstore=None,
+ ):
+ super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
+ self.vstore = vstore
+ self.setCFF2(isCFF2)
+
+ def setCFF2(self, isCFF2):
+ # isCFF2 may be None.
+ if isCFF2:
+ self.order = self.orderCFF2
+ self._isCFF2 = True
+ else:
+ self.order = self.orderCFF
+ self._isCFF2 = False
class PrivateDict(BaseDict):
- defaults = buildDefaults(privateDictOperators)
- converters = buildConverters(privateDictOperators)
- order = buildOrder(privateDictOperators)
- decompilerClass = PrivateDictDecompiler
- compilerClass = PrivateDictCompiler
-
- def __init__(self, strings=None, file=None, offset=None, isCFF2=None,
- vstore=None):
- super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
- self.vstore = vstore
- if isCFF2:
- self.defaults = buildDefaults(privateDictOperators2)
- self.order = buildOrder(privateDictOperators2)
- # Provide dummy values. This avoids needing to provide
- # an isCFF2 state in a lot of places.
- self.nominalWidthX = self.defaultWidthX = None
- else:
- self.defaults = buildDefaults(privateDictOperators)
- self.order = buildOrder(privateDictOperators)
-
- @property
- def in_cff2(self):
- return self._isCFF2
-
- def getNumRegions(self, vi=None): # called from misc/psCharStrings.py
- # if getNumRegions is being called, we can assume that VarStore exists.
- if vi is None:
- if hasattr(self, 'vsindex'):
- vi = self.vsindex
- else:
- vi = 0
- numRegions = self.vstore.getNumRegions(vi)
- return numRegions
+ defaults = buildDefaults(privateDictOperators)
+ converters = buildConverters(privateDictOperators)
+ order = buildOrder(privateDictOperators)
+ decompilerClass = PrivateDictDecompiler
+ compilerClass = PrivateDictCompiler
+
+ def __init__(self, strings=None, file=None, offset=None, isCFF2=None, vstore=None):
+ super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
+ self.vstore = vstore
+ if isCFF2:
+ self.defaults = buildDefaults(privateDictOperators2)
+ self.order = buildOrder(privateDictOperators2)
+ # Provide dummy values. This avoids needing to provide
+ # an isCFF2 state in a lot of places.
+ self.nominalWidthX = self.defaultWidthX = None
+ else:
+ self.defaults = buildDefaults(privateDictOperators)
+ self.order = buildOrder(privateDictOperators)
+
+ @property
+ def in_cff2(self):
+ return self._isCFF2
+
+ def getNumRegions(self, vi=None): # called from misc/psCharStrings.py
+ # if getNumRegions is being called, we can assume that VarStore exists.
+ if vi is None:
+ if hasattr(self, "vsindex"):
+ vi = self.vsindex
+ else:
+ vi = 0
+ numRegions = self.vstore.getNumRegions(vi)
+ return numRegions
class IndexedStrings(object):
- """SID -> string mapping."""
-
- def __init__(self, file=None):
- if file is None:
- strings = []
- else:
- strings = [
- tostr(s, encoding="latin1")
- for s in Index(file, isCFF2=False)
- ]
- self.strings = strings
-
- def getCompiler(self):
- return IndexedStringsCompiler(self, None, self, isCFF2=False)
-
- def __len__(self):
- return len(self.strings)
-
- def __getitem__(self, SID):
- if SID < cffStandardStringCount:
- return cffStandardStrings[SID]
- else:
- return self.strings[SID - cffStandardStringCount]
-
- def getSID(self, s):
- if not hasattr(self, "stringMapping"):
- self.buildStringMapping()
- s = tostr(s, encoding="latin1")
- if s in cffStandardStringMapping:
- SID = cffStandardStringMapping[s]
- elif s in self.stringMapping:
- SID = self.stringMapping[s]
- else:
- SID = len(self.strings) + cffStandardStringCount
- self.strings.append(s)
- self.stringMapping[s] = SID
- return SID
-
- def getStrings(self):
- return self.strings
-
- def buildStringMapping(self):
- self.stringMapping = {}
- for index in range(len(self.strings)):
- self.stringMapping[self.strings[index]] = index + cffStandardStringCount
+ """SID -> string mapping."""
+
+ def __init__(self, file=None):
+ if file is None:
+ strings = []
+ else:
+ strings = [tostr(s, encoding="latin1") for s in Index(file, isCFF2=False)]
+ self.strings = strings
+
+ def getCompiler(self):
+ return IndexedStringsCompiler(self, None, self, isCFF2=False)
+
+ def __len__(self):
+ return len(self.strings)
+
+ def __getitem__(self, SID):
+ if SID < cffStandardStringCount:
+ return cffStandardStrings[SID]
+ else:
+ return self.strings[SID - cffStandardStringCount]
+
+ def getSID(self, s):
+ if not hasattr(self, "stringMapping"):
+ self.buildStringMapping()
+ s = tostr(s, encoding="latin1")
+ if s in cffStandardStringMapping:
+ SID = cffStandardStringMapping[s]
+ elif s in self.stringMapping:
+ SID = self.stringMapping[s]
+ else:
+ SID = len(self.strings) + cffStandardStringCount
+ self.strings.append(s)
+ self.stringMapping[s] = SID
+ return SID
+
+ def getStrings(self):
+ return self.strings
+
+ def buildStringMapping(self):
+ self.stringMapping = {}
+ for index in range(len(self.strings)):
+ self.stringMapping[self.strings[index]] = index + cffStandardStringCount
# The 391 Standard Strings as used in the CFF format.
# from Adobe Technical None #5176, version 1.0, 18 March 1998
-cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
- 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright',
- 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one',
- 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon',
- 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C',
- 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
- 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
- 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c',
- 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
- 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
- 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin',
- 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
- 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger',
- 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase',
- 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand',
- 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
- 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron',
- 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae',
- 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior',
- 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
- 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters',
- 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior',
- 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring',
- 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
- 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute',
- 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute',
- 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron',
- 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla',
- 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
- 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis',
- 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave',
- 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall',
- 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall',
- 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader',
- 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle',
- 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle',
- 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior',
- 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior',
- 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior',
- 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior',
- 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall',
- 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall',
- 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall',
- 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall',
- 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
- 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall',
- 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall',
- 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall',
- 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
- 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior',
- 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior',
- 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior',
- 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior',
- 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall',
- 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall',
- 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall',
- 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
- 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall',
- 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall',
- 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
- 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002',
- '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman',
- 'Semibold'
+cffStandardStrings = [
+ ".notdef",
+ "space",
+ "exclam",
+ "quotedbl",
+ "numbersign",
+ "dollar",
+ "percent",
+ "ampersand",
+ "quoteright",
+ "parenleft",
+ "parenright",
+ "asterisk",
+ "plus",
+ "comma",
+ "hyphen",
+ "period",
+ "slash",
+ "zero",
+ "one",
+ "two",
+ "three",
+ "four",
+ "five",
+ "six",
+ "seven",
+ "eight",
+ "nine",
+ "colon",
+ "semicolon",
+ "less",
+ "equal",
+ "greater",
+ "question",
+ "at",
+ "A",
+ "B",
+ "C",
+ "D",
+ "E",
+ "F",
+ "G",
+ "H",
+ "I",
+ "J",
+ "K",
+ "L",
+ "M",
+ "N",
+ "O",
+ "P",
+ "Q",
+ "R",
+ "S",
+ "T",
+ "U",
+ "V",
+ "W",
+ "X",
+ "Y",
+ "Z",
+ "bracketleft",
+ "backslash",
+ "bracketright",
+ "asciicircum",
+ "underscore",
+ "quoteleft",
+ "a",
+ "b",
+ "c",
+ "d",
+ "e",
+ "f",
+ "g",
+ "h",
+ "i",
+ "j",
+ "k",
+ "l",
+ "m",
+ "n",
+ "o",
+ "p",
+ "q",
+ "r",
+ "s",
+ "t",
+ "u",
+ "v",
+ "w",
+ "x",
+ "y",
+ "z",
+ "braceleft",
+ "bar",
+ "braceright",
+ "asciitilde",
+ "exclamdown",
+ "cent",
+ "sterling",
+ "fraction",
+ "yen",
+ "florin",
+ "section",
+ "currency",
+ "quotesingle",
+ "quotedblleft",
+ "guillemotleft",
+ "guilsinglleft",
+ "guilsinglright",
+ "fi",
+ "fl",
+ "endash",
+ "dagger",
+ "daggerdbl",
+ "periodcentered",
+ "paragraph",
+ "bullet",
+ "quotesinglbase",
+ "quotedblbase",
+ "quotedblright",
+ "guillemotright",
+ "ellipsis",
+ "perthousand",
+ "questiondown",
+ "grave",
+ "acute",
+ "circumflex",
+ "tilde",
+ "macron",
+ "breve",
+ "dotaccent",
+ "dieresis",
+ "ring",
+ "cedilla",
+ "hungarumlaut",
+ "ogonek",
+ "caron",
+ "emdash",
+ "AE",
+ "ordfeminine",
+ "Lslash",
+ "Oslash",
+ "OE",
+ "ordmasculine",
+ "ae",
+ "dotlessi",
+ "lslash",
+ "oslash",
+ "oe",
+ "germandbls",
+ "onesuperior",
+ "logicalnot",
+ "mu",
+ "trademark",
+ "Eth",
+ "onehalf",
+ "plusminus",
+ "Thorn",
+ "onequarter",
+ "divide",
+ "brokenbar",
+ "degree",
+ "thorn",
+ "threequarters",
+ "twosuperior",
+ "registered",
+ "minus",
+ "eth",
+ "multiply",
+ "threesuperior",
+ "copyright",
+ "Aacute",
+ "Acircumflex",
+ "Adieresis",
+ "Agrave",
+ "Aring",
+ "Atilde",
+ "Ccedilla",
+ "Eacute",
+ "Ecircumflex",
+ "Edieresis",
+ "Egrave",
+ "Iacute",
+ "Icircumflex",
+ "Idieresis",
+ "Igrave",
+ "Ntilde",
+ "Oacute",
+ "Ocircumflex",
+ "Odieresis",
+ "Ograve",
+ "Otilde",
+ "Scaron",
+ "Uacute",
+ "Ucircumflex",
+ "Udieresis",
+ "Ugrave",
+ "Yacute",
+ "Ydieresis",
+ "Zcaron",
+ "aacute",
+ "acircumflex",
+ "adieresis",
+ "agrave",
+ "aring",
+ "atilde",
+ "ccedilla",
+ "eacute",
+ "ecircumflex",
+ "edieresis",
+ "egrave",
+ "iacute",
+ "icircumflex",
+ "idieresis",
+ "igrave",
+ "ntilde",
+ "oacute",
+ "ocircumflex",
+ "odieresis",
+ "ograve",
+ "otilde",
+ "scaron",
+ "uacute",
+ "ucircumflex",
+ "udieresis",
+ "ugrave",
+ "yacute",
+ "ydieresis",
+ "zcaron",
+ "exclamsmall",
+ "Hungarumlautsmall",
+ "dollaroldstyle",
+ "dollarsuperior",
+ "ampersandsmall",
+ "Acutesmall",
+ "parenleftsuperior",
+ "parenrightsuperior",
+ "twodotenleader",
+ "onedotenleader",
+ "zerooldstyle",
+ "oneoldstyle",
+ "twooldstyle",
+ "threeoldstyle",
+ "fouroldstyle",
+ "fiveoldstyle",
+ "sixoldstyle",
+ "sevenoldstyle",
+ "eightoldstyle",
+ "nineoldstyle",
+ "commasuperior",
+ "threequartersemdash",
+ "periodsuperior",
+ "questionsmall",
+ "asuperior",
+ "bsuperior",
+ "centsuperior",
+ "dsuperior",
+ "esuperior",
+ "isuperior",
+ "lsuperior",
+ "msuperior",
+ "nsuperior",
+ "osuperior",
+ "rsuperior",
+ "ssuperior",
+ "tsuperior",
+ "ff",
+ "ffi",
+ "ffl",
+ "parenleftinferior",
+ "parenrightinferior",
+ "Circumflexsmall",
+ "hyphensuperior",
+ "Gravesmall",
+ "Asmall",
+ "Bsmall",
+ "Csmall",
+ "Dsmall",
+ "Esmall",
+ "Fsmall",
+ "Gsmall",
+ "Hsmall",
+ "Ismall",
+ "Jsmall",
+ "Ksmall",
+ "Lsmall",
+ "Msmall",
+ "Nsmall",
+ "Osmall",
+ "Psmall",
+ "Qsmall",
+ "Rsmall",
+ "Ssmall",
+ "Tsmall",
+ "Usmall",
+ "Vsmall",
+ "Wsmall",
+ "Xsmall",
+ "Ysmall",
+ "Zsmall",
+ "colonmonetary",
+ "onefitted",
+ "rupiah",
+ "Tildesmall",
+ "exclamdownsmall",
+ "centoldstyle",
+ "Lslashsmall",
+ "Scaronsmall",
+ "Zcaronsmall",
+ "Dieresissmall",
+ "Brevesmall",
+ "Caronsmall",
+ "Dotaccentsmall",
+ "Macronsmall",
+ "figuredash",
+ "hypheninferior",
+ "Ogoneksmall",
+ "Ringsmall",
+ "Cedillasmall",
+ "questiondownsmall",
+ "oneeighth",
+ "threeeighths",
+ "fiveeighths",
+ "seveneighths",
+ "onethird",
+ "twothirds",
+ "zerosuperior",
+ "foursuperior",
+ "fivesuperior",
+ "sixsuperior",
+ "sevensuperior",
+ "eightsuperior",
+ "ninesuperior",
+ "zeroinferior",
+ "oneinferior",
+ "twoinferior",
+ "threeinferior",
+ "fourinferior",
+ "fiveinferior",
+ "sixinferior",
+ "seveninferior",
+ "eightinferior",
+ "nineinferior",
+ "centinferior",
+ "dollarinferior",
+ "periodinferior",
+ "commainferior",
+ "Agravesmall",
+ "Aacutesmall",
+ "Acircumflexsmall",
+ "Atildesmall",
+ "Adieresissmall",
+ "Aringsmall",
+ "AEsmall",
+ "Ccedillasmall",
+ "Egravesmall",
+ "Eacutesmall",
+ "Ecircumflexsmall",
+ "Edieresissmall",
+ "Igravesmall",
+ "Iacutesmall",
+ "Icircumflexsmall",
+ "Idieresissmall",
+ "Ethsmall",
+ "Ntildesmall",
+ "Ogravesmall",
+ "Oacutesmall",
+ "Ocircumflexsmall",
+ "Otildesmall",
+ "Odieresissmall",
+ "OEsmall",
+ "Oslashsmall",
+ "Ugravesmall",
+ "Uacutesmall",
+ "Ucircumflexsmall",
+ "Udieresissmall",
+ "Yacutesmall",
+ "Thornsmall",
+ "Ydieresissmall",
+ "001.000",
+ "001.001",
+ "001.002",
+ "001.003",
+ "Black",
+ "Bold",
+ "Book",
+ "Light",
+ "Medium",
+ "Regular",
+ "Roman",
+ "Semibold",
]
cffStandardStringCount = 391
@@ -2923,98 +3327,504 @@ assert len(cffStandardStrings) == cffStandardStringCount
# build reverse mapping
cffStandardStringMapping = {}
for _i in range(cffStandardStringCount):
- cffStandardStringMapping[cffStandardStrings[_i]] = _i
-
-cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign",
-"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright",
-"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two",
-"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon",
-"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G",
-"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W",
-"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum",
-"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
-"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
-"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent",
-"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle",
-"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl",
-"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet",
-"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis",
-"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde",
-"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
-"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE",
-"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls",
-"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus",
-"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn",
-"threequarters", "twosuperior", "registered", "minus", "eth", "multiply",
-"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
-"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave",
-"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute",
-"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute",
-"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute",
-"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute",
-"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis",
-"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde",
-"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis",
-"zcaron"]
+ cffStandardStringMapping[cffStandardStrings[_i]] = _i
+
+cffISOAdobeStrings = [
+ ".notdef",
+ "space",
+ "exclam",
+ "quotedbl",
+ "numbersign",
+ "dollar",
+ "percent",
+ "ampersand",
+ "quoteright",
+ "parenleft",
+ "parenright",
+ "asterisk",
+ "plus",
+ "comma",
+ "hyphen",
+ "period",
+ "slash",
+ "zero",
+ "one",
+ "two",
+ "three",
+ "four",
+ "five",
+ "six",
+ "seven",
+ "eight",
+ "nine",
+ "colon",
+ "semicolon",
+ "less",
+ "equal",
+ "greater",
+ "question",
+ "at",
+ "A",
+ "B",
+ "C",
+ "D",
+ "E",
+ "F",
+ "G",
+ "H",
+ "I",
+ "J",
+ "K",
+ "L",
+ "M",
+ "N",
+ "O",
+ "P",
+ "Q",
+ "R",
+ "S",
+ "T",
+ "U",
+ "V",
+ "W",
+ "X",
+ "Y",
+ "Z",
+ "bracketleft",
+ "backslash",
+ "bracketright",
+ "asciicircum",
+ "underscore",
+ "quoteleft",
+ "a",
+ "b",
+ "c",
+ "d",
+ "e",
+ "f",
+ "g",
+ "h",
+ "i",
+ "j",
+ "k",
+ "l",
+ "m",
+ "n",
+ "o",
+ "p",
+ "q",
+ "r",
+ "s",
+ "t",
+ "u",
+ "v",
+ "w",
+ "x",
+ "y",
+ "z",
+ "braceleft",
+ "bar",
+ "braceright",
+ "asciitilde",
+ "exclamdown",
+ "cent",
+ "sterling",
+ "fraction",
+ "yen",
+ "florin",
+ "section",
+ "currency",
+ "quotesingle",
+ "quotedblleft",
+ "guillemotleft",
+ "guilsinglleft",
+ "guilsinglright",
+ "fi",
+ "fl",
+ "endash",
+ "dagger",
+ "daggerdbl",
+ "periodcentered",
+ "paragraph",
+ "bullet",
+ "quotesinglbase",
+ "quotedblbase",
+ "quotedblright",
+ "guillemotright",
+ "ellipsis",
+ "perthousand",
+ "questiondown",
+ "grave",
+ "acute",
+ "circumflex",
+ "tilde",
+ "macron",
+ "breve",
+ "dotaccent",
+ "dieresis",
+ "ring",
+ "cedilla",
+ "hungarumlaut",
+ "ogonek",
+ "caron",
+ "emdash",
+ "AE",
+ "ordfeminine",
+ "Lslash",
+ "Oslash",
+ "OE",
+ "ordmasculine",
+ "ae",
+ "dotlessi",
+ "lslash",
+ "oslash",
+ "oe",
+ "germandbls",
+ "onesuperior",
+ "logicalnot",
+ "mu",
+ "trademark",
+ "Eth",
+ "onehalf",
+ "plusminus",
+ "Thorn",
+ "onequarter",
+ "divide",
+ "brokenbar",
+ "degree",
+ "thorn",
+ "threequarters",
+ "twosuperior",
+ "registered",
+ "minus",
+ "eth",
+ "multiply",
+ "threesuperior",
+ "copyright",
+ "Aacute",
+ "Acircumflex",
+ "Adieresis",
+ "Agrave",
+ "Aring",
+ "Atilde",
+ "Ccedilla",
+ "Eacute",
+ "Ecircumflex",
+ "Edieresis",
+ "Egrave",
+ "Iacute",
+ "Icircumflex",
+ "Idieresis",
+ "Igrave",
+ "Ntilde",
+ "Oacute",
+ "Ocircumflex",
+ "Odieresis",
+ "Ograve",
+ "Otilde",
+ "Scaron",
+ "Uacute",
+ "Ucircumflex",
+ "Udieresis",
+ "Ugrave",
+ "Yacute",
+ "Ydieresis",
+ "Zcaron",
+ "aacute",
+ "acircumflex",
+ "adieresis",
+ "agrave",
+ "aring",
+ "atilde",
+ "ccedilla",
+ "eacute",
+ "ecircumflex",
+ "edieresis",
+ "egrave",
+ "iacute",
+ "icircumflex",
+ "idieresis",
+ "igrave",
+ "ntilde",
+ "oacute",
+ "ocircumflex",
+ "odieresis",
+ "ograve",
+ "otilde",
+ "scaron",
+ "uacute",
+ "ucircumflex",
+ "udieresis",
+ "ugrave",
+ "yacute",
+ "ydieresis",
+ "zcaron",
+]
cffISOAdobeStringCount = 229
assert len(cffISOAdobeStrings) == cffISOAdobeStringCount
-cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall",
-"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall",
-"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader",
-"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle",
-"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle",
-"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon",
-"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall",
-"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior",
-"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
-"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior",
-"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
-"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall",
-"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
-"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall",
-"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall",
-"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
-"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall",
-"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
-"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth",
-"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds",
-"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior",
-"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior",
-"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior",
-"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior",
-"centinferior", "dollarinferior", "periodinferior", "commainferior",
-"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall",
-"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall",
-"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall",
-"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall",
-"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
-"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
-"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall",
-"Ydieresissmall"]
+cffIExpertStrings = [
+ ".notdef",
+ "space",
+ "exclamsmall",
+ "Hungarumlautsmall",
+ "dollaroldstyle",
+ "dollarsuperior",
+ "ampersandsmall",
+ "Acutesmall",
+ "parenleftsuperior",
+ "parenrightsuperior",
+ "twodotenleader",
+ "onedotenleader",
+ "comma",
+ "hyphen",
+ "period",
+ "fraction",
+ "zerooldstyle",
+ "oneoldstyle",
+ "twooldstyle",
+ "threeoldstyle",
+ "fouroldstyle",
+ "fiveoldstyle",
+ "sixoldstyle",
+ "sevenoldstyle",
+ "eightoldstyle",
+ "nineoldstyle",
+ "colon",
+ "semicolon",
+ "commasuperior",
+ "threequartersemdash",
+ "periodsuperior",
+ "questionsmall",
+ "asuperior",
+ "bsuperior",
+ "centsuperior",
+ "dsuperior",
+ "esuperior",
+ "isuperior",
+ "lsuperior",
+ "msuperior",
+ "nsuperior",
+ "osuperior",
+ "rsuperior",
+ "ssuperior",
+ "tsuperior",
+ "ff",
+ "fi",
+ "fl",
+ "ffi",
+ "ffl",
+ "parenleftinferior",
+ "parenrightinferior",
+ "Circumflexsmall",
+ "hyphensuperior",
+ "Gravesmall",
+ "Asmall",
+ "Bsmall",
+ "Csmall",
+ "Dsmall",
+ "Esmall",
+ "Fsmall",
+ "Gsmall",
+ "Hsmall",
+ "Ismall",
+ "Jsmall",
+ "Ksmall",
+ "Lsmall",
+ "Msmall",
+ "Nsmall",
+ "Osmall",
+ "Psmall",
+ "Qsmall",
+ "Rsmall",
+ "Ssmall",
+ "Tsmall",
+ "Usmall",
+ "Vsmall",
+ "Wsmall",
+ "Xsmall",
+ "Ysmall",
+ "Zsmall",
+ "colonmonetary",
+ "onefitted",
+ "rupiah",
+ "Tildesmall",
+ "exclamdownsmall",
+ "centoldstyle",
+ "Lslashsmall",
+ "Scaronsmall",
+ "Zcaronsmall",
+ "Dieresissmall",
+ "Brevesmall",
+ "Caronsmall",
+ "Dotaccentsmall",
+ "Macronsmall",
+ "figuredash",
+ "hypheninferior",
+ "Ogoneksmall",
+ "Ringsmall",
+ "Cedillasmall",
+ "onequarter",
+ "onehalf",
+ "threequarters",
+ "questiondownsmall",
+ "oneeighth",
+ "threeeighths",
+ "fiveeighths",
+ "seveneighths",
+ "onethird",
+ "twothirds",
+ "zerosuperior",
+ "onesuperior",
+ "twosuperior",
+ "threesuperior",
+ "foursuperior",
+ "fivesuperior",
+ "sixsuperior",
+ "sevensuperior",
+ "eightsuperior",
+ "ninesuperior",
+ "zeroinferior",
+ "oneinferior",
+ "twoinferior",
+ "threeinferior",
+ "fourinferior",
+ "fiveinferior",
+ "sixinferior",
+ "seveninferior",
+ "eightinferior",
+ "nineinferior",
+ "centinferior",
+ "dollarinferior",
+ "periodinferior",
+ "commainferior",
+ "Agravesmall",
+ "Aacutesmall",
+ "Acircumflexsmall",
+ "Atildesmall",
+ "Adieresissmall",
+ "Aringsmall",
+ "AEsmall",
+ "Ccedillasmall",
+ "Egravesmall",
+ "Eacutesmall",
+ "Ecircumflexsmall",
+ "Edieresissmall",
+ "Igravesmall",
+ "Iacutesmall",
+ "Icircumflexsmall",
+ "Idieresissmall",
+ "Ethsmall",
+ "Ntildesmall",
+ "Ogravesmall",
+ "Oacutesmall",
+ "Ocircumflexsmall",
+ "Otildesmall",
+ "Odieresissmall",
+ "OEsmall",
+ "Oslashsmall",
+ "Ugravesmall",
+ "Uacutesmall",
+ "Ucircumflexsmall",
+ "Udieresissmall",
+ "Yacutesmall",
+ "Thornsmall",
+ "Ydieresissmall",
+]
cffExpertStringCount = 166
assert len(cffIExpertStrings) == cffExpertStringCount
-cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle",
-"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader",
-"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle",
-"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle",
-"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon",
-"semicolon", "commasuperior", "threequartersemdash", "periodsuperior",
-"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior",
-"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
-"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior",
-"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah",
-"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf",
-"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths",
-"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior",
-"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior",
-"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior",
-"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior",
-"eightinferior", "nineinferior", "centinferior", "dollarinferior",
-"periodinferior", "commainferior"]
+cffExpertSubsetStrings = [
+ ".notdef",
+ "space",
+ "dollaroldstyle",
+ "dollarsuperior",
+ "parenleftsuperior",
+ "parenrightsuperior",
+ "twodotenleader",
+ "onedotenleader",
+ "comma",
+ "hyphen",
+ "period",
+ "fraction",
+ "zerooldstyle",
+ "oneoldstyle",
+ "twooldstyle",
+ "threeoldstyle",
+ "fouroldstyle",
+ "fiveoldstyle",
+ "sixoldstyle",
+ "sevenoldstyle",
+ "eightoldstyle",
+ "nineoldstyle",
+ "colon",
+ "semicolon",
+ "commasuperior",
+ "threequartersemdash",
+ "periodsuperior",
+ "asuperior",
+ "bsuperior",
+ "centsuperior",
+ "dsuperior",
+ "esuperior",
+ "isuperior",
+ "lsuperior",
+ "msuperior",
+ "nsuperior",
+ "osuperior",
+ "rsuperior",
+ "ssuperior",
+ "tsuperior",
+ "ff",
+ "fi",
+ "fl",
+ "ffi",
+ "ffl",
+ "parenleftinferior",
+ "parenrightinferior",
+ "hyphensuperior",
+ "colonmonetary",
+ "onefitted",
+ "rupiah",
+ "centoldstyle",
+ "figuredash",
+ "hypheninferior",
+ "onequarter",
+ "onehalf",
+ "threequarters",
+ "oneeighth",
+ "threeeighths",
+ "fiveeighths",
+ "seveneighths",
+ "onethird",
+ "twothirds",
+ "zerosuperior",
+ "onesuperior",
+ "twosuperior",
+ "threesuperior",
+ "foursuperior",
+ "fivesuperior",
+ "sixsuperior",
+ "sevensuperior",
+ "eightsuperior",
+ "ninesuperior",
+ "zeroinferior",
+ "oneinferior",
+ "twoinferior",
+ "threeinferior",
+ "fourinferior",
+ "fiveinferior",
+ "sixinferior",
+ "seveninferior",
+ "eightinferior",
+ "nineinferior",
+ "centinferior",
+ "dollarinferior",
+ "periodinferior",
+ "commainferior",
+]
cffExpertSubsetStringCount = 87
assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount
diff --git a/Lib/fontTools/cffLib/specializer.py b/Lib/fontTools/cffLib/specializer.py
index 677f03b7..efc15af7 100644
--- a/Lib/fontTools/cffLib/specializer.py
+++ b/Lib/fontTools/cffLib/specializer.py
@@ -17,751 +17,833 @@ from fontTools.cffLib import maxStackLimit
def stringToProgram(string):
- if isinstance(string, str):
- string = string.split()
- program = []
- for token in string:
- try:
- token = int(token)
- except ValueError:
- try:
- token = float(token)
- except ValueError:
- pass
- program.append(token)
- return program
+ if isinstance(string, str):
+ string = string.split()
+ program = []
+ for token in string:
+ try:
+ token = int(token)
+ except ValueError:
+ try:
+ token = float(token)
+ except ValueError:
+ pass
+ program.append(token)
+ return program
def programToString(program):
- return ' '.join(str(x) for x in program)
+ return " ".join(str(x) for x in program)
def programToCommands(program, getNumRegions=None):
- """Takes a T2CharString program list and returns list of commands.
- Each command is a two-tuple of commandname,arg-list. The commandname might
- be empty string if no commandname shall be emitted (used for glyph width,
- hintmask/cntrmask argument, as well as stray arguments at the end of the
- program (¯\_(ツ)_/¯).
- 'getNumRegions' may be None, or a callable object. It must return the
- number of regions. 'getNumRegions' takes a single argument, vsindex. If
- the vsindex argument is None, getNumRegions returns the default number
- of regions for the charstring, else it returns the numRegions for
- the vsindex.
- The Charstring may or may not start with a width value. If the first
- non-blend operator has an odd number of arguments, then the first argument is
- a width, and is popped off. This is complicated with blend operators, as
- there may be more than one before the first hint or moveto operator, and each
- one reduces several arguments to just one list argument. We have to sum the
- number of arguments that are not part of the blend arguments, and all the
- 'numBlends' values. We could instead have said that by definition, if there
- is a blend operator, there is no width value, since CFF2 Charstrings don't
- have width values. I discussed this with Behdad, and we are allowing for an
- initial width value in this case because developers may assemble a CFF2
- charstring from CFF Charstrings, which could have width values.
- """
-
- seenWidthOp = False
- vsIndex = None
- lenBlendStack = 0
- lastBlendIndex = 0
- commands = []
- stack = []
- it = iter(program)
-
- for token in it:
- if not isinstance(token, str):
- stack.append(token)
- continue
-
- if token == 'blend':
- assert getNumRegions is not None
- numSourceFonts = 1 + getNumRegions(vsIndex)
- # replace the blend op args on the stack with a single list
- # containing all the blend op args.
- numBlends = stack[-1]
- numBlendArgs = numBlends * numSourceFonts + 1
- # replace first blend op by a list of the blend ops.
- stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
- lenBlendStack += numBlends + len(stack) - 1
- lastBlendIndex = len(stack)
- # if a blend op exists, this is or will be a CFF2 charstring.
- continue
-
- elif token == 'vsindex':
- vsIndex = stack[-1]
- assert type(vsIndex) is int
-
- elif (not seenWidthOp) and token in {'hstem', 'hstemhm', 'vstem', 'vstemhm',
- 'cntrmask', 'hintmask',
- 'hmoveto', 'vmoveto', 'rmoveto',
- 'endchar'}:
- seenWidthOp = True
- parity = token in {'hmoveto', 'vmoveto'}
- if lenBlendStack:
- # lenBlendStack has the number of args represented by the last blend
- # arg and all the preceding args. We need to now add the number of
- # args following the last blend arg.
- numArgs = lenBlendStack + len(stack[lastBlendIndex:])
- else:
- numArgs = len(stack)
- if numArgs and (numArgs % 2) ^ parity:
- width = stack.pop(0)
- commands.append(('', [width]))
-
- if token in {'hintmask', 'cntrmask'}:
- if stack:
- commands.append(('', stack))
- commands.append((token, []))
- commands.append(('', [next(it)]))
- else:
- commands.append((token, stack))
- stack = []
- if stack:
- commands.append(('', stack))
- return commands
+ """Takes a T2CharString program list and returns list of commands.
+ Each command is a two-tuple of commandname,arg-list. The commandname might
+ be empty string if no commandname shall be emitted (used for glyph width,
+ hintmask/cntrmask argument, as well as stray arguments at the end of the
+ program (🤷).
+ 'getNumRegions' may be None, or a callable object. It must return the
+ number of regions. 'getNumRegions' takes a single argument, vsindex. If
+ the vsindex argument is None, getNumRegions returns the default number
+ of regions for the charstring, else it returns the numRegions for
+ the vsindex.
+ The Charstring may or may not start with a width value. If the first
+ non-blend operator has an odd number of arguments, then the first argument is
+ a width, and is popped off. This is complicated with blend operators, as
+ there may be more than one before the first hint or moveto operator, and each
+ one reduces several arguments to just one list argument. We have to sum the
+ number of arguments that are not part of the blend arguments, and all the
+ 'numBlends' values. We could instead have said that by definition, if there
+ is a blend operator, there is no width value, since CFF2 Charstrings don't
+ have width values. I discussed this with Behdad, and we are allowing for an
+ initial width value in this case because developers may assemble a CFF2
+ charstring from CFF Charstrings, which could have width values.
+ """
+
+ seenWidthOp = False
+ vsIndex = None
+ lenBlendStack = 0
+ lastBlendIndex = 0
+ commands = []
+ stack = []
+ it = iter(program)
+
+ for token in it:
+ if not isinstance(token, str):
+ stack.append(token)
+ continue
+
+ if token == "blend":
+ assert getNumRegions is not None
+ numSourceFonts = 1 + getNumRegions(vsIndex)
+ # replace the blend op args on the stack with a single list
+ # containing all the blend op args.
+ numBlends = stack[-1]
+ numBlendArgs = numBlends * numSourceFonts + 1
+ # replace first blend op by a list of the blend ops.
+ stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
+ lenBlendStack += numBlends + len(stack) - 1
+ lastBlendIndex = len(stack)
+ # if a blend op exists, this is or will be a CFF2 charstring.
+ continue
+
+ elif token == "vsindex":
+ vsIndex = stack[-1]
+ assert type(vsIndex) is int
+
+ elif (not seenWidthOp) and token in {
+ "hstem",
+ "hstemhm",
+ "vstem",
+ "vstemhm",
+ "cntrmask",
+ "hintmask",
+ "hmoveto",
+ "vmoveto",
+ "rmoveto",
+ "endchar",
+ }:
+ seenWidthOp = True
+ parity = token in {"hmoveto", "vmoveto"}
+ if lenBlendStack:
+ # lenBlendStack has the number of args represented by the last blend
+ # arg and all the preceding args. We need to now add the number of
+ # args following the last blend arg.
+ numArgs = lenBlendStack + len(stack[lastBlendIndex:])
+ else:
+ numArgs = len(stack)
+ if numArgs and (numArgs % 2) ^ parity:
+ width = stack.pop(0)
+ commands.append(("", [width]))
+
+ if token in {"hintmask", "cntrmask"}:
+ if stack:
+ commands.append(("", stack))
+ commands.append((token, []))
+ commands.append(("", [next(it)]))
+ else:
+ commands.append((token, stack))
+ stack = []
+ if stack:
+ commands.append(("", stack))
+ return commands
def _flattenBlendArgs(args):
- token_list = []
- for arg in args:
- if isinstance(arg, list):
- token_list.extend(arg)
- token_list.append('blend')
- else:
- token_list.append(arg)
- return token_list
+ token_list = []
+ for arg in args:
+ if isinstance(arg, list):
+ token_list.extend(arg)
+ token_list.append("blend")
+ else:
+ token_list.append(arg)
+ return token_list
+
def commandsToProgram(commands):
- """Takes a commands list as returned by programToCommands() and converts
- it back to a T2CharString program list."""
- program = []
- for op,args in commands:
- if any(isinstance(arg, list) for arg in args):
- args = _flattenBlendArgs(args)
- program.extend(args)
- if op:
- program.append(op)
- return program
+ """Takes a commands list as returned by programToCommands() and converts
+ it back to a T2CharString program list."""
+ program = []
+ for op, args in commands:
+ if any(isinstance(arg, list) for arg in args):
+ args = _flattenBlendArgs(args)
+ program.extend(args)
+ if op:
+ program.append(op)
+ return program
def _everyN(el, n):
- """Group the list el into groups of size n"""
- if len(el) % n != 0: raise ValueError(el)
- for i in range(0, len(el), n):
- yield el[i:i+n]
+ """Group the list el into groups of size n"""
+ if len(el) % n != 0:
+ raise ValueError(el)
+ for i in range(0, len(el), n):
+ yield el[i : i + n]
class _GeneralizerDecombinerCommandsMap(object):
+ @staticmethod
+ def rmoveto(args):
+ if len(args) != 2:
+ raise ValueError(args)
+ yield ("rmoveto", args)
+
+ @staticmethod
+ def hmoveto(args):
+ if len(args) != 1:
+ raise ValueError(args)
+ yield ("rmoveto", [args[0], 0])
+
+ @staticmethod
+ def vmoveto(args):
+ if len(args) != 1:
+ raise ValueError(args)
+ yield ("rmoveto", [0, args[0]])
+
+ @staticmethod
+ def rlineto(args):
+ if not args:
+ raise ValueError(args)
+ for args in _everyN(args, 2):
+ yield ("rlineto", args)
+
+ @staticmethod
+ def hlineto(args):
+ if not args:
+ raise ValueError(args)
+ it = iter(args)
+ try:
+ while True:
+ yield ("rlineto", [next(it), 0])
+ yield ("rlineto", [0, next(it)])
+ except StopIteration:
+ pass
+
+ @staticmethod
+ def vlineto(args):
+ if not args:
+ raise ValueError(args)
+ it = iter(args)
+ try:
+ while True:
+ yield ("rlineto", [0, next(it)])
+ yield ("rlineto", [next(it), 0])
+ except StopIteration:
+ pass
+
+ @staticmethod
+ def rrcurveto(args):
+ if not args:
+ raise ValueError(args)
+ for args in _everyN(args, 6):
+ yield ("rrcurveto", args)
+
+ @staticmethod
+ def hhcurveto(args):
+ if len(args) < 4 or len(args) % 4 > 1:
+ raise ValueError(args)
+ if len(args) % 2 == 1:
+ yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0])
+ args = args[5:]
+ for args in _everyN(args, 4):
+ yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0])
+
+ @staticmethod
+ def vvcurveto(args):
+ if len(args) < 4 or len(args) % 4 > 1:
+ raise ValueError(args)
+ if len(args) % 2 == 1:
+ yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]])
+ args = args[5:]
+ for args in _everyN(args, 4):
+ yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]])
+
+ @staticmethod
+ def hvcurveto(args):
+ if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}:
+ raise ValueError(args)
+ last_args = None
+ if len(args) % 2 == 1:
+ lastStraight = len(args) % 8 == 5
+ args, last_args = args[:-5], args[-5:]
+ it = _everyN(args, 4)
+ try:
+ while True:
+ args = next(it)
+ yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
+ args = next(it)
+ yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
+ except StopIteration:
+ pass
+ if last_args:
+ args = last_args
+ if lastStraight:
+ yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
+ else:
+ yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
+
+ @staticmethod
+ def vhcurveto(args):
+ if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}:
+ raise ValueError(args)
+ last_args = None
+ if len(args) % 2 == 1:
+ lastStraight = len(args) % 8 == 5
+ args, last_args = args[:-5], args[-5:]
+ it = _everyN(args, 4)
+ try:
+ while True:
+ args = next(it)
+ yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
+ args = next(it)
+ yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
+ except StopIteration:
+ pass
+ if last_args:
+ args = last_args
+ if lastStraight:
+ yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
+ else:
+ yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
+
+ @staticmethod
+ def rcurveline(args):
+ if len(args) < 8 or len(args) % 6 != 2:
+ raise ValueError(args)
+ args, last_args = args[:-2], args[-2:]
+ for args in _everyN(args, 6):
+ yield ("rrcurveto", args)
+ yield ("rlineto", last_args)
+
+ @staticmethod
+ def rlinecurve(args):
+ if len(args) < 8 or len(args) % 2 != 0:
+ raise ValueError(args)
+ args, last_args = args[:-6], args[-6:]
+ for args in _everyN(args, 2):
+ yield ("rlineto", args)
+ yield ("rrcurveto", last_args)
- @staticmethod
- def rmoveto(args):
- if len(args) != 2: raise ValueError(args)
- yield ('rmoveto', args)
- @staticmethod
- def hmoveto(args):
- if len(args) != 1: raise ValueError(args)
- yield ('rmoveto', [args[0], 0])
- @staticmethod
- def vmoveto(args):
- if len(args) != 1: raise ValueError(args)
- yield ('rmoveto', [0, args[0]])
-
- @staticmethod
- def rlineto(args):
- if not args: raise ValueError(args)
- for args in _everyN(args, 2):
- yield ('rlineto', args)
- @staticmethod
- def hlineto(args):
- if not args: raise ValueError(args)
- it = iter(args)
- try:
- while True:
- yield ('rlineto', [next(it), 0])
- yield ('rlineto', [0, next(it)])
- except StopIteration:
- pass
- @staticmethod
- def vlineto(args):
- if not args: raise ValueError(args)
- it = iter(args)
- try:
- while True:
- yield ('rlineto', [0, next(it)])
- yield ('rlineto', [next(it), 0])
- except StopIteration:
- pass
- @staticmethod
- def rrcurveto(args):
- if not args: raise ValueError(args)
- for args in _everyN(args, 6):
- yield ('rrcurveto', args)
- @staticmethod
- def hhcurveto(args):
- if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args)
- if len(args) % 2 == 1:
- yield ('rrcurveto', [args[1], args[0], args[2], args[3], args[4], 0])
- args = args[5:]
- for args in _everyN(args, 4):
- yield ('rrcurveto', [args[0], 0, args[1], args[2], args[3], 0])
- @staticmethod
- def vvcurveto(args):
- if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args)
- if len(args) % 2 == 1:
- yield ('rrcurveto', [args[0], args[1], args[2], args[3], 0, args[4]])
- args = args[5:]
- for args in _everyN(args, 4):
- yield ('rrcurveto', [0, args[0], args[1], args[2], 0, args[3]])
- @staticmethod
- def hvcurveto(args):
- if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args)
- last_args = None
- if len(args) % 2 == 1:
- lastStraight = len(args) % 8 == 5
- args, last_args = args[:-5], args[-5:]
- it = _everyN(args, 4)
- try:
- while True:
- args = next(it)
- yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]])
- args = next(it)
- yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0])
- except StopIteration:
- pass
- if last_args:
- args = last_args
- if lastStraight:
- yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]])
- else:
- yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]])
- @staticmethod
- def vhcurveto(args):
- if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args)
- last_args = None
- if len(args) % 2 == 1:
- lastStraight = len(args) % 8 == 5
- args, last_args = args[:-5], args[-5:]
- it = _everyN(args, 4)
- try:
- while True:
- args = next(it)
- yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0])
- args = next(it)
- yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]])
- except StopIteration:
- pass
- if last_args:
- args = last_args
- if lastStraight:
- yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]])
- else:
- yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]])
-
- @staticmethod
- def rcurveline(args):
- if len(args) < 8 or len(args) % 6 != 2: raise ValueError(args)
- args, last_args = args[:-2], args[-2:]
- for args in _everyN(args, 6):
- yield ('rrcurveto', args)
- yield ('rlineto', last_args)
- @staticmethod
- def rlinecurve(args):
- if len(args) < 8 or len(args) % 2 != 0: raise ValueError(args)
- args, last_args = args[:-6], args[-6:]
- for args in _everyN(args, 2):
- yield ('rlineto', args)
- yield ('rrcurveto', last_args)
def _convertBlendOpToArgs(blendList):
- # args is list of blend op args. Since we are supporting
- # recursive blend op calls, some of these args may also
- # be a list of blend op args, and need to be converted before
- # we convert the current list.
- if any([isinstance(arg, list) for arg in blendList]):
- args = [i for e in blendList for i in
- (_convertBlendOpToArgs(e) if isinstance(e,list) else [e]) ]
- else:
- args = blendList
-
- # We now know that blendList contains a blend op argument list, even if
- # some of the args are lists that each contain a blend op argument list.
- # Convert from:
- # [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn]
- # to:
- # [ [x0] + [delta tuple for x0],
- # ...,
- # [xn] + [delta tuple for xn] ]
- numBlends = args[-1]
- # Can't use args.pop() when the args are being used in a nested list
- # comprehension. See calling context
- args = args[:-1]
-
- numRegions = len(args)//numBlends - 1
- if not (numBlends*(numRegions + 1) == len(args)):
- raise ValueError(blendList)
-
- defaultArgs = [[arg] for arg in args[:numBlends]]
- deltaArgs = args[numBlends:]
- numDeltaValues = len(deltaArgs)
- deltaList = [ deltaArgs[i:i + numRegions] for i in range(0, numDeltaValues, numRegions) ]
- blend_args = [ a + b + [1] for a, b in zip(defaultArgs,deltaList)]
- return blend_args
+ # args is list of blend op args. Since we are supporting
+ # recursive blend op calls, some of these args may also
+ # be a list of blend op args, and need to be converted before
+ # we convert the current list.
+ if any([isinstance(arg, list) for arg in blendList]):
+ args = [
+ i
+ for e in blendList
+ for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e])
+ ]
+ else:
+ args = blendList
+
+ # We now know that blendList contains a blend op argument list, even if
+ # some of the args are lists that each contain a blend op argument list.
+ # Convert from:
+ # [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn]
+ # to:
+ # [ [x0] + [delta tuple for x0],
+ # ...,
+ # [xn] + [delta tuple for xn] ]
+ numBlends = args[-1]
+ # Can't use args.pop() when the args are being used in a nested list
+ # comprehension. See calling context
+ args = args[:-1]
+
+ numRegions = len(args) // numBlends - 1
+ if not (numBlends * (numRegions + 1) == len(args)):
+ raise ValueError(blendList)
+
+ defaultArgs = [[arg] for arg in args[:numBlends]]
+ deltaArgs = args[numBlends:]
+ numDeltaValues = len(deltaArgs)
+ deltaList = [
+ deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions)
+ ]
+ blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)]
+ return blend_args
+
def generalizeCommands(commands, ignoreErrors=False):
- result = []
- mapping = _GeneralizerDecombinerCommandsMap
- for op, args in commands:
- # First, generalize any blend args in the arg list.
- if any([isinstance(arg, list) for arg in args]):
- try:
- args = [n for arg in args for n in (_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg])]
- except ValueError:
- if ignoreErrors:
- # Store op as data, such that consumers of commands do not have to
- # deal with incorrect number of arguments.
- result.append(('', args))
- result.append(('', [op]))
- else:
- raise
-
- func = getattr(mapping, op, None)
- if not func:
- result.append((op,args))
- continue
- try:
- for command in func(args):
- result.append(command)
- except ValueError:
- if ignoreErrors:
- # Store op as data, such that consumers of commands do not have to
- # deal with incorrect number of arguments.
- result.append(('', args))
- result.append(('', [op]))
- else:
- raise
- return result
+ result = []
+ mapping = _GeneralizerDecombinerCommandsMap
+ for op, args in commands:
+ # First, generalize any blend args in the arg list.
+ if any([isinstance(arg, list) for arg in args]):
+ try:
+ args = [
+ n
+ for arg in args
+ for n in (
+ _convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg]
+ )
+ ]
+ except ValueError:
+ if ignoreErrors:
+ # Store op as data, such that consumers of commands do not have to
+ # deal with incorrect number of arguments.
+ result.append(("", args))
+ result.append(("", [op]))
+ else:
+ raise
+
+ func = getattr(mapping, op, None)
+ if not func:
+ result.append((op, args))
+ continue
+ try:
+ for command in func(args):
+ result.append(command)
+ except ValueError:
+ if ignoreErrors:
+ # Store op as data, such that consumers of commands do not have to
+ # deal with incorrect number of arguments.
+ result.append(("", args))
+ result.append(("", [op]))
+ else:
+ raise
+ return result
+
def generalizeProgram(program, getNumRegions=None, **kwargs):
- return commandsToProgram(generalizeCommands(programToCommands(program, getNumRegions), **kwargs))
+ return commandsToProgram(
+ generalizeCommands(programToCommands(program, getNumRegions), **kwargs)
+ )
def _categorizeVector(v):
- """
- Takes X,Y vector v and returns one of r, h, v, or 0 depending on which
- of X and/or Y are zero, plus tuple of nonzero ones. If both are zero,
- it returns a single zero still.
-
- >>> _categorizeVector((0,0))
- ('0', (0,))
- >>> _categorizeVector((1,0))
- ('h', (1,))
- >>> _categorizeVector((0,2))
- ('v', (2,))
- >>> _categorizeVector((1,2))
- ('r', (1, 2))
- """
- if not v[0]:
- if not v[1]:
- return '0', v[:1]
- else:
- return 'v', v[1:]
- else:
- if not v[1]:
- return 'h', v[:1]
- else:
- return 'r', v
+ """
+ Takes X,Y vector v and returns one of r, h, v, or 0 depending on which
+ of X and/or Y are zero, plus tuple of nonzero ones. If both are zero,
+ it returns a single zero still.
+
+ >>> _categorizeVector((0,0))
+ ('0', (0,))
+ >>> _categorizeVector((1,0))
+ ('h', (1,))
+ >>> _categorizeVector((0,2))
+ ('v', (2,))
+ >>> _categorizeVector((1,2))
+ ('r', (1, 2))
+ """
+ if not v[0]:
+ if not v[1]:
+ return "0", v[:1]
+ else:
+ return "v", v[1:]
+ else:
+ if not v[1]:
+ return "h", v[:1]
+ else:
+ return "r", v
+
def _mergeCategories(a, b):
- if a == '0': return b
- if b == '0': return a
- if a == b: return a
- return None
+ if a == "0":
+ return b
+ if b == "0":
+ return a
+ if a == b:
+ return a
+ return None
+
def _negateCategory(a):
- if a == 'h': return 'v'
- if a == 'v': return 'h'
- assert a in '0r'
- return a
+ if a == "h":
+ return "v"
+ if a == "v":
+ return "h"
+ assert a in "0r"
+ return a
+
def _convertToBlendCmds(args):
- # return a list of blend commands, and
- # the remaining non-blended args, if any.
- num_args = len(args)
- stack_use = 0
- new_args = []
- i = 0
- while i < num_args:
- arg = args[i]
- if not isinstance(arg, list):
- new_args.append(arg)
- i += 1
- stack_use += 1
- else:
- prev_stack_use = stack_use
- # The arg is a tuple of blend values.
- # These are each (master 0,delta 1..delta n, 1)
- # Combine as many successive tuples as we can,
- # up to the max stack limit.
- num_sources = len(arg) - 1
- blendlist = [arg]
- i += 1
- stack_use += 1 + num_sources # 1 for the num_blends arg
- while (i < num_args) and isinstance(args[i], list):
- blendlist.append(args[i])
- i += 1
- stack_use += num_sources
- if stack_use + num_sources > maxStackLimit:
- # if we are here, max stack is the CFF2 max stack.
- # I use the CFF2 max stack limit here rather than
- # the 'maxstack' chosen by the client, as the default
- # maxstack may have been used unintentionally. For all
- # the other operators, this just produces a little less
- # optimization, but here it puts a hard (and low) limit
- # on the number of source fonts that can be used.
- break
- # blendList now contains as many single blend tuples as can be
- # combined without exceeding the CFF2 stack limit.
- num_blends = len(blendlist)
- # append the 'num_blends' default font values
- blend_args = []
- for arg in blendlist:
- blend_args.append(arg[0])
- for arg in blendlist:
- assert arg[-1] == 1
- blend_args.extend(arg[1:-1])
- blend_args.append(num_blends)
- new_args.append(blend_args)
- stack_use = prev_stack_use + num_blends
-
- return new_args
+ # return a list of blend commands, and
+ # the remaining non-blended args, if any.
+ num_args = len(args)
+ stack_use = 0
+ new_args = []
+ i = 0
+ while i < num_args:
+ arg = args[i]
+ if not isinstance(arg, list):
+ new_args.append(arg)
+ i += 1
+ stack_use += 1
+ else:
+ prev_stack_use = stack_use
+ # The arg is a tuple of blend values.
+ # These are each (master 0,delta 1..delta n, 1)
+ # Combine as many successive tuples as we can,
+ # up to the max stack limit.
+ num_sources = len(arg) - 1
+ blendlist = [arg]
+ i += 1
+ stack_use += 1 + num_sources # 1 for the num_blends arg
+ while (i < num_args) and isinstance(args[i], list):
+ blendlist.append(args[i])
+ i += 1
+ stack_use += num_sources
+ if stack_use + num_sources > maxStackLimit:
+ # if we are here, max stack is the CFF2 max stack.
+ # I use the CFF2 max stack limit here rather than
+ # the 'maxstack' chosen by the client, as the default
+ # maxstack may have been used unintentionally. For all
+ # the other operators, this just produces a little less
+ # optimization, but here it puts a hard (and low) limit
+ # on the number of source fonts that can be used.
+ break
+ # blendList now contains as many single blend tuples as can be
+ # combined without exceeding the CFF2 stack limit.
+ num_blends = len(blendlist)
+ # append the 'num_blends' default font values
+ blend_args = []
+ for arg in blendlist:
+ blend_args.append(arg[0])
+ for arg in blendlist:
+ assert arg[-1] == 1
+ blend_args.extend(arg[1:-1])
+ blend_args.append(num_blends)
+ new_args.append(blend_args)
+ stack_use = prev_stack_use + num_blends
+
+ return new_args
+
def _addArgs(a, b):
- if isinstance(b, list):
- if isinstance(a, list):
- if len(a) != len(b) or a[-1] != b[-1]:
- raise ValueError()
- return [_addArgs(va, vb) for va,vb in zip(a[:-1], b[:-1])] + [a[-1]]
- else:
- a, b = b, a
- if isinstance(a, list):
- assert a[-1] == 1
- return [_addArgs(a[0], b)] + a[1:]
- return a + b
-
-
-def specializeCommands(commands,
- ignoreErrors=False,
- generalizeFirst=True,
- preserveTopology=False,
- maxstack=48):
-
- # We perform several rounds of optimizations. They are carefully ordered and are:
- #
- # 0. Generalize commands.
- # This ensures that they are in our expected simple form, with each line/curve only
- # having arguments for one segment, and using the generic form (rlineto/rrcurveto).
- # If caller is sure the input is in this form, they can turn off generalization to
- # save time.
- #
- # 1. Combine successive rmoveto operations.
- #
- # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
- # We specialize into some, made-up, variants as well, which simplifies following
- # passes.
- #
- # 3. Merge or delete redundant operations, to the extent requested.
- # OpenType spec declares point numbers in CFF undefined. As such, we happily
- # change topology. If client relies on point numbers (in GPOS anchors, or for
- # hinting purposes(what?)) they can turn this off.
- #
- # 4. Peephole optimization to revert back some of the h/v variants back into their
- # original "relative" operator (rline/rrcurveto) if that saves a byte.
- #
- # 5. Combine adjacent operators when possible, minding not to go over max stack size.
- #
- # 6. Resolve any remaining made-up operators into real operators.
- #
- # I have convinced myself that this produces optimal bytecode (except for, possibly
- # one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
- # A dynamic-programming approach can do the same but would be significantly slower.
- #
- # 7. For any args which are blend lists, convert them to a blend command.
-
-
- # 0. Generalize commands.
- if generalizeFirst:
- commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
- else:
- commands = list(commands) # Make copy since we modify in-place later.
-
- # 1. Combine successive rmoveto operations.
- for i in range(len(commands)-1, 0, -1):
- if 'rmoveto' == commands[i][0] == commands[i-1][0]:
- v1, v2 = commands[i-1][1], commands[i][1]
- commands[i-1] = ('rmoveto', [v1[0]+v2[0], v1[1]+v2[1]])
- del commands[i]
-
- # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
- #
- # We, in fact, specialize into more, made-up, variants that special-case when both
- # X and Y components are zero. This simplifies the following optimization passes.
- # This case is rare, but OCD does not let me skip it.
- #
- # After this round, we will have four variants that use the following mnemonics:
- #
- # - 'r' for relative, ie. non-zero X and non-zero Y,
- # - 'h' for horizontal, ie. zero X and non-zero Y,
- # - 'v' for vertical, ie. non-zero X and zero Y,
- # - '0' for zeros, ie. zero X and zero Y.
- #
- # The '0' pseudo-operators are not part of the spec, but help simplify the following
- # optimization rounds. We resolve them at the end. So, after this, we will have four
- # moveto and four lineto variants:
- #
- # - 0moveto, 0lineto
- # - hmoveto, hlineto
- # - vmoveto, vlineto
- # - rmoveto, rlineto
- #
- # and sixteen curveto variants. For example, a '0hcurveto' operator means a curve
- # dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3.
- # An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3.
- #
- # There are nine different variants of curves without the '0'. Those nine map exactly
- # to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto,
- # vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of
- # arguments and one without. Eg. an hhcurveto with an extra argument (odd number of
- # arguments) is in fact an rhcurveto. The operators in the spec are designed such that
- # all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve.
- #
- # Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest
- # of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be
- # thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always
- # encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute
- # the '0' with either 'h' or 'v' and it works.
- #
- # When we get to curve splines however, things become more complicated... XXX finish this.
- # There's one more complexity with splines. If one side of the spline is not horizontal or
- # vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode.
- # Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and
- # only hvcurveto and vhcurveto operators can encode a spline ending with 'r'.
- # This limits our merge opportunities later.
- #
- for i in range(len(commands)):
- op,args = commands[i]
-
- if op in {'rmoveto', 'rlineto'}:
- c, args = _categorizeVector(args)
- commands[i] = c+op[1:], args
- continue
-
- if op == 'rrcurveto':
- c1, args1 = _categorizeVector(args[:2])
- c2, args2 = _categorizeVector(args[-2:])
- commands[i] = c1+c2+'curveto', args1+args[2:4]+args2
- continue
-
- # 3. Merge or delete redundant operations, to the extent requested.
- #
- # TODO
- # A 0moveto that comes before all other path operations can be removed.
- # though I find conflicting evidence for this.
- #
- # TODO
- # "If hstem and vstem hints are both declared at the beginning of a
- # CharString, and this sequence is followed directly by the hintmask or
- # cntrmask operators, then the vstem hint operator (or, if applicable,
- # the vstemhm operator) need not be included."
- #
- # "The sequence and form of a CFF2 CharString program may be represented as:
- # {hs* vs* cm* hm* mt subpath}? {mt subpath}*"
- #
- # https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1
- #
- # For Type2 CharStrings the sequence is:
- # w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
-
-
- # Some other redundancies change topology (point numbers).
- if not preserveTopology:
- for i in range(len(commands)-1, -1, -1):
- op, args = commands[i]
-
- # A 00curveto is demoted to a (specialized) lineto.
- if op == '00curveto':
- assert len(args) == 4
- c, args = _categorizeVector(args[1:3])
- op = c+'lineto'
- commands[i] = op, args
- # and then...
-
- # A 0lineto can be deleted.
- if op == '0lineto':
- del commands[i]
- continue
-
- # Merge adjacent hlineto's and vlineto's.
- # In CFF2 charstrings from variable fonts, each
- # arg item may be a list of blendable values, one from
- # each source font.
- if (i and op in {'hlineto', 'vlineto'} and
- (op == commands[i-1][0])):
- _, other_args = commands[i-1]
- assert len(args) == 1 and len(other_args) == 1
- try:
- new_args = [_addArgs(args[0], other_args[0])]
- except ValueError:
- continue
- commands[i-1] = (op, new_args)
- del commands[i]
- continue
-
- # 4. Peephole optimization to revert back some of the h/v variants back into their
- # original "relative" operator (rline/rrcurveto) if that saves a byte.
- for i in range(1, len(commands)-1):
- op,args = commands[i]
- prv,nxt = commands[i-1][0], commands[i+1][0]
-
- if op in {'0lineto', 'hlineto', 'vlineto'} and prv == nxt == 'rlineto':
- assert len(args) == 1
- args = [0, args[0]] if op[0] == 'v' else [args[0], 0]
- commands[i] = ('rlineto', args)
- continue
-
- if op[2:] == 'curveto' and len(args) == 5 and prv == nxt == 'rrcurveto':
- assert (op[0] == 'r') ^ (op[1] == 'r')
- if op[0] == 'v':
- pos = 0
- elif op[0] != 'r':
- pos = 1
- elif op[1] == 'v':
- pos = 4
- else:
- pos = 5
- # Insert, while maintaining the type of args (can be tuple or list).
- args = args[:pos] + type(args)((0,)) + args[pos:]
- commands[i] = ('rrcurveto', args)
- continue
-
- # 5. Combine adjacent operators when possible, minding not to go over max stack size.
- for i in range(len(commands)-1, 0, -1):
- op1,args1 = commands[i-1]
- op2,args2 = commands[i]
- new_op = None
-
- # Merge logic...
- if {op1, op2} <= {'rlineto', 'rrcurveto'}:
- if op1 == op2:
- new_op = op1
- else:
- if op2 == 'rrcurveto' and len(args2) == 6:
- new_op = 'rlinecurve'
- elif len(args2) == 2:
- new_op = 'rcurveline'
-
- elif (op1, op2) in {('rlineto', 'rlinecurve'), ('rrcurveto', 'rcurveline')}:
- new_op = op2
-
- elif {op1, op2} == {'vlineto', 'hlineto'}:
- new_op = op1
-
- elif 'curveto' == op1[2:] == op2[2:]:
- d0, d1 = op1[:2]
- d2, d3 = op2[:2]
-
- if d1 == 'r' or d2 == 'r' or d0 == d3 == 'r':
- continue
-
- d = _mergeCategories(d1, d2)
- if d is None: continue
- if d0 == 'r':
- d = _mergeCategories(d, d3)
- if d is None: continue
- new_op = 'r'+d+'curveto'
- elif d3 == 'r':
- d0 = _mergeCategories(d0, _negateCategory(d))
- if d0 is None: continue
- new_op = d0+'r'+'curveto'
- else:
- d0 = _mergeCategories(d0, d3)
- if d0 is None: continue
- new_op = d0+d+'curveto'
-
- # Make sure the stack depth does not exceed (maxstack - 1), so
- # that subroutinizer can insert subroutine calls at any point.
- if new_op and len(args1) + len(args2) < maxstack:
- commands[i-1] = (new_op, args1+args2)
- del commands[i]
-
- # 6. Resolve any remaining made-up operators into real operators.
- for i in range(len(commands)):
- op,args = commands[i]
-
- if op in {'0moveto', '0lineto'}:
- commands[i] = 'h'+op[1:], args
- continue
-
- if op[2:] == 'curveto' and op[:2] not in {'rr', 'hh', 'vv', 'vh', 'hv'}:
- op0, op1 = op[:2]
- if (op0 == 'r') ^ (op1 == 'r'):
- assert len(args) % 2 == 1
- if op0 == '0': op0 = 'h'
- if op1 == '0': op1 = 'h'
- if op0 == 'r': op0 = op1
- if op1 == 'r': op1 = _negateCategory(op0)
- assert {op0,op1} <= {'h','v'}, (op0, op1)
-
- if len(args) % 2:
- if op0 != op1: # vhcurveto / hvcurveto
- if (op0 == 'h') ^ (len(args) % 8 == 1):
- # Swap last two args order
- args = args[:-2]+args[-1:]+args[-2:-1]
- else: # hhcurveto / vvcurveto
- if op0 == 'h': # hhcurveto
- # Swap first two args order
- args = args[1:2]+args[:1]+args[2:]
-
- commands[i] = op0+op1+'curveto', args
- continue
-
- # 7. For any series of args which are blend lists, convert the series to a single blend arg.
- for i in range(len(commands)):
- op, args = commands[i]
- if any(isinstance(arg, list) for arg in args):
- commands[i] = op, _convertToBlendCmds(args)
-
- return commands
+ if isinstance(b, list):
+ if isinstance(a, list):
+ if len(a) != len(b) or a[-1] != b[-1]:
+ raise ValueError()
+ return [_addArgs(va, vb) for va, vb in zip(a[:-1], b[:-1])] + [a[-1]]
+ else:
+ a, b = b, a
+ if isinstance(a, list):
+ assert a[-1] == 1
+ return [_addArgs(a[0], b)] + a[1:]
+ return a + b
+
+
+def specializeCommands(
+ commands,
+ ignoreErrors=False,
+ generalizeFirst=True,
+ preserveTopology=False,
+ maxstack=48,
+):
+ # We perform several rounds of optimizations. They are carefully ordered and are:
+ #
+ # 0. Generalize commands.
+ # This ensures that they are in our expected simple form, with each line/curve only
+ # having arguments for one segment, and using the generic form (rlineto/rrcurveto).
+ # If caller is sure the input is in this form, they can turn off generalization to
+ # save time.
+ #
+ # 1. Combine successive rmoveto operations.
+ #
+ # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
+ # We specialize into some, made-up, variants as well, which simplifies following
+ # passes.
+ #
+ # 3. Merge or delete redundant operations, to the extent requested.
+ # OpenType spec declares point numbers in CFF undefined. As such, we happily
+ # change topology. If client relies on point numbers (in GPOS anchors, or for
+ # hinting purposes(what?)) they can turn this off.
+ #
+ # 4. Peephole optimization to revert back some of the h/v variants back into their
+ # original "relative" operator (rline/rrcurveto) if that saves a byte.
+ #
+ # 5. Combine adjacent operators when possible, minding not to go over max stack size.
+ #
+ # 6. Resolve any remaining made-up operators into real operators.
+ #
+ # I have convinced myself that this produces optimal bytecode (except for, possibly
+ # one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
+ # A dynamic-programming approach can do the same but would be significantly slower.
+ #
+ # 7. For any args which are blend lists, convert them to a blend command.
+
+ # 0. Generalize commands.
+ if generalizeFirst:
+ commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
+ else:
+ commands = list(commands) # Make copy since we modify in-place later.
+
+ # 1. Combine successive rmoveto operations.
+ for i in range(len(commands) - 1, 0, -1):
+ if "rmoveto" == commands[i][0] == commands[i - 1][0]:
+ v1, v2 = commands[i - 1][1], commands[i][1]
+ commands[i - 1] = ("rmoveto", [v1[0] + v2[0], v1[1] + v2[1]])
+ del commands[i]
+
+ # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
+ #
+ # We, in fact, specialize into more, made-up, variants that special-case when both
+ # X and Y components are zero. This simplifies the following optimization passes.
+ # This case is rare, but OCD does not let me skip it.
+ #
+ # After this round, we will have four variants that use the following mnemonics:
+ #
+ # - 'r' for relative, ie. non-zero X and non-zero Y,
+ # - 'h' for horizontal, ie. zero X and non-zero Y,
+ # - 'v' for vertical, ie. non-zero X and zero Y,
+ # - '0' for zeros, ie. zero X and zero Y.
+ #
+ # The '0' pseudo-operators are not part of the spec, but help simplify the following
+ # optimization rounds. We resolve them at the end. So, after this, we will have four
+ # moveto and four lineto variants:
+ #
+ # - 0moveto, 0lineto
+ # - hmoveto, hlineto
+ # - vmoveto, vlineto
+ # - rmoveto, rlineto
+ #
+ # and sixteen curveto variants. For example, a '0hcurveto' operator means a curve
+ # dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3.
+ # An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3.
+ #
+ # There are nine different variants of curves without the '0'. Those nine map exactly
+ # to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto,
+ # vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of
+ # arguments and one without. Eg. an hhcurveto with an extra argument (odd number of
+ # arguments) is in fact an rhcurveto. The operators in the spec are designed such that
+ # all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve.
+ #
+ # Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest
+ # of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be
+ # thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always
+ # encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute
+ # the '0' with either 'h' or 'v' and it works.
+ #
+ # When we get to curve splines however, things become more complicated... XXX finish this.
+ # There's one more complexity with splines. If one side of the spline is not horizontal or
+ # vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode.
+ # Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and
+ # only hvcurveto and vhcurveto operators can encode a spline ending with 'r'.
+ # This limits our merge opportunities later.
+ #
+ for i in range(len(commands)):
+ op, args = commands[i]
+
+ if op in {"rmoveto", "rlineto"}:
+ c, args = _categorizeVector(args)
+ commands[i] = c + op[1:], args
+ continue
+
+ if op == "rrcurveto":
+ c1, args1 = _categorizeVector(args[:2])
+ c2, args2 = _categorizeVector(args[-2:])
+ commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2
+ continue
+
+ # 3. Merge or delete redundant operations, to the extent requested.
+ #
+ # TODO
+ # A 0moveto that comes before all other path operations can be removed.
+ # though I find conflicting evidence for this.
+ #
+ # TODO
+ # "If hstem and vstem hints are both declared at the beginning of a
+ # CharString, and this sequence is followed directly by the hintmask or
+ # cntrmask operators, then the vstem hint operator (or, if applicable,
+ # the vstemhm operator) need not be included."
+ #
+ # "The sequence and form of a CFF2 CharString program may be represented as:
+ # {hs* vs* cm* hm* mt subpath}? {mt subpath}*"
+ #
+ # https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1
+ #
+ # For Type2 CharStrings the sequence is:
+ # w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
+
+ # Some other redundancies change topology (point numbers).
+ if not preserveTopology:
+ for i in range(len(commands) - 1, -1, -1):
+ op, args = commands[i]
+
+ # A 00curveto is demoted to a (specialized) lineto.
+ if op == "00curveto":
+ assert len(args) == 4
+ c, args = _categorizeVector(args[1:3])
+ op = c + "lineto"
+ commands[i] = op, args
+ # and then...
+
+ # A 0lineto can be deleted.
+ if op == "0lineto":
+ del commands[i]
+ continue
+
+ # Merge adjacent hlineto's and vlineto's.
+ # In CFF2 charstrings from variable fonts, each
+ # arg item may be a list of blendable values, one from
+ # each source font.
+ if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]):
+ _, other_args = commands[i - 1]
+ assert len(args) == 1 and len(other_args) == 1
+ try:
+ new_args = [_addArgs(args[0], other_args[0])]
+ except ValueError:
+ continue
+ commands[i - 1] = (op, new_args)
+ del commands[i]
+ continue
+
+ # 4. Peephole optimization to revert back some of the h/v variants back into their
+ # original "relative" operator (rline/rrcurveto) if that saves a byte.
+ for i in range(1, len(commands) - 1):
+ op, args = commands[i]
+ prv, nxt = commands[i - 1][0], commands[i + 1][0]
+
+ if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto":
+ assert len(args) == 1
+ args = [0, args[0]] if op[0] == "v" else [args[0], 0]
+ commands[i] = ("rlineto", args)
+ continue
+
+ if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto":
+ assert (op[0] == "r") ^ (op[1] == "r")
+ if op[0] == "v":
+ pos = 0
+ elif op[0] != "r":
+ pos = 1
+ elif op[1] == "v":
+ pos = 4
+ else:
+ pos = 5
+ # Insert, while maintaining the type of args (can be tuple or list).
+ args = args[:pos] + type(args)((0,)) + args[pos:]
+ commands[i] = ("rrcurveto", args)
+ continue
+
+ # 5. Combine adjacent operators when possible, minding not to go over max stack size.
+ for i in range(len(commands) - 1, 0, -1):
+ op1, args1 = commands[i - 1]
+ op2, args2 = commands[i]
+ new_op = None
+
+ # Merge logic...
+ if {op1, op2} <= {"rlineto", "rrcurveto"}:
+ if op1 == op2:
+ new_op = op1
+ else:
+ if op2 == "rrcurveto" and len(args2) == 6:
+ new_op = "rlinecurve"
+ elif len(args2) == 2:
+ new_op = "rcurveline"
+
+ elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}:
+ new_op = op2
+
+ elif {op1, op2} == {"vlineto", "hlineto"}:
+ new_op = op1
+
+ elif "curveto" == op1[2:] == op2[2:]:
+ d0, d1 = op1[:2]
+ d2, d3 = op2[:2]
+
+ if d1 == "r" or d2 == "r" or d0 == d3 == "r":
+ continue
+
+ d = _mergeCategories(d1, d2)
+ if d is None:
+ continue
+ if d0 == "r":
+ d = _mergeCategories(d, d3)
+ if d is None:
+ continue
+ new_op = "r" + d + "curveto"
+ elif d3 == "r":
+ d0 = _mergeCategories(d0, _negateCategory(d))
+ if d0 is None:
+ continue
+ new_op = d0 + "r" + "curveto"
+ else:
+ d0 = _mergeCategories(d0, d3)
+ if d0 is None:
+ continue
+ new_op = d0 + d + "curveto"
+
+ # Make sure the stack depth does not exceed (maxstack - 1), so
+ # that subroutinizer can insert subroutine calls at any point.
+ if new_op and len(args1) + len(args2) < maxstack:
+ commands[i - 1] = (new_op, args1 + args2)
+ del commands[i]
+
+ # 6. Resolve any remaining made-up operators into real operators.
+ for i in range(len(commands)):
+ op, args = commands[i]
+
+ if op in {"0moveto", "0lineto"}:
+ commands[i] = "h" + op[1:], args
+ continue
+
+ if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}:
+ op0, op1 = op[:2]
+ if (op0 == "r") ^ (op1 == "r"):
+ assert len(args) % 2 == 1
+ if op0 == "0":
+ op0 = "h"
+ if op1 == "0":
+ op1 = "h"
+ if op0 == "r":
+ op0 = op1
+ if op1 == "r":
+ op1 = _negateCategory(op0)
+ assert {op0, op1} <= {"h", "v"}, (op0, op1)
+
+ if len(args) % 2:
+ if op0 != op1: # vhcurveto / hvcurveto
+ if (op0 == "h") ^ (len(args) % 8 == 1):
+ # Swap last two args order
+ args = args[:-2] + args[-1:] + args[-2:-1]
+ else: # hhcurveto / vvcurveto
+ if op0 == "h": # hhcurveto
+ # Swap first two args order
+ args = args[1:2] + args[:1] + args[2:]
+
+ commands[i] = op0 + op1 + "curveto", args
+ continue
+
+ # 7. For any series of args which are blend lists, convert the series to a single blend arg.
+ for i in range(len(commands)):
+ op, args = commands[i]
+ if any(isinstance(arg, list) for arg in args):
+ commands[i] = op, _convertToBlendCmds(args)
+
+ return commands
+
def specializeProgram(program, getNumRegions=None, **kwargs):
- return commandsToProgram(specializeCommands(programToCommands(program, getNumRegions), **kwargs))
-
-
-if __name__ == '__main__':
- import sys
- if len(sys.argv) == 1:
- import doctest
- sys.exit(doctest.testmod().failed)
-
- import argparse
-
- parser = argparse.ArgumentParser(
- "fonttools cffLib.specialer", description="CFF CharString generalizer/specializer")
- parser.add_argument(
- "program", metavar="command", nargs="*", help="Commands.")
- parser.add_argument(
- "--num-regions", metavar="NumRegions", nargs="*", default=None,
- help="Number of variable-font regions for blend opertaions.")
-
- options = parser.parse_args(sys.argv[1:])
-
- getNumRegions = None if options.num_regions is None else lambda vsIndex: int(options.num_regions[0 if vsIndex is None else vsIndex])
-
- program = stringToProgram(options.program)
- print("Program:"); print(programToString(program))
- commands = programToCommands(program, getNumRegions)
- print("Commands:"); print(commands)
- program2 = commandsToProgram(commands)
- print("Program from commands:"); print(programToString(program2))
- assert program == program2
- print("Generalized program:"); print(programToString(generalizeProgram(program, getNumRegions)))
- print("Specialized program:"); print(programToString(specializeProgram(program, getNumRegions)))
+ return commandsToProgram(
+ specializeCommands(programToCommands(program, getNumRegions), **kwargs)
+ )
+
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) == 1:
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
+
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ "fonttools cffLib.specialer",
+ description="CFF CharString generalizer/specializer",
+ )
+ parser.add_argument("program", metavar="command", nargs="*", help="Commands.")
+ parser.add_argument(
+ "--num-regions",
+ metavar="NumRegions",
+ nargs="*",
+ default=None,
+ help="Number of variable-font regions for blend opertaions.",
+ )
+
+ options = parser.parse_args(sys.argv[1:])
+
+ getNumRegions = (
+ None
+ if options.num_regions is None
+ else lambda vsIndex: int(options.num_regions[0 if vsIndex is None else vsIndex])
+ )
+
+ program = stringToProgram(options.program)
+ print("Program:")
+ print(programToString(program))
+ commands = programToCommands(program, getNumRegions)
+ print("Commands:")
+ print(commands)
+ program2 = commandsToProgram(commands)
+ print("Program from commands:")
+ print(programToString(program2))
+ assert program == program2
+ print("Generalized program:")
+ print(programToString(generalizeProgram(program, getNumRegions)))
+ print("Specialized program:")
+ print(programToString(specializeProgram(program, getNumRegions)))
diff --git a/Lib/fontTools/cffLib/width.py b/Lib/fontTools/cffLib/width.py
index 303c9462..0ba3ed39 100644
--- a/Lib/fontTools/cffLib/width.py
+++ b/Lib/fontTools/cffLib/width.py
@@ -14,170 +14,194 @@ from functools import reduce
class missingdict(dict):
- def __init__(self, missing_func):
- self.missing_func = missing_func
- def __missing__(self, v):
- return self.missing_func(v)
+ def __init__(self, missing_func):
+ self.missing_func = missing_func
+
+ def __missing__(self, v):
+ return self.missing_func(v)
+
def cumSum(f, op=add, start=0, decreasing=False):
-
- keys = sorted(f.keys())
- minx, maxx = keys[0], keys[-1]
+ keys = sorted(f.keys())
+ minx, maxx = keys[0], keys[-1]
- total = reduce(op, f.values(), start)
+ total = reduce(op, f.values(), start)
- if decreasing:
- missing = lambda x: start if x > maxx else total
- domain = range(maxx, minx - 1, -1)
- else:
- missing = lambda x: start if x < minx else total
- domain = range(minx, maxx + 1)
+ if decreasing:
+ missing = lambda x: start if x > maxx else total
+ domain = range(maxx, minx - 1, -1)
+ else:
+ missing = lambda x: start if x < minx else total
+ domain = range(minx, maxx + 1)
- out = missingdict(missing)
+ out = missingdict(missing)
- v = start
- for x in domain:
- v = op(v, f[x])
- out[x] = v
+ v = start
+ for x in domain:
+ v = op(v, f[x])
+ out[x] = v
- return out
+ return out
-def byteCost(widths, default, nominal):
- if not hasattr(widths, 'items'):
- d = defaultdict(int)
- for w in widths:
- d[w] += 1
- widths = d
-
- cost = 0
- for w,freq in widths.items():
- if w == default: continue
- diff = abs(w - nominal)
- if diff <= 107:
- cost += freq
- elif diff <= 1131:
- cost += freq * 2
- else:
- cost += freq * 5
- return cost
+def byteCost(widths, default, nominal):
+ if not hasattr(widths, "items"):
+ d = defaultdict(int)
+ for w in widths:
+ d[w] += 1
+ widths = d
+
+ cost = 0
+ for w, freq in widths.items():
+ if w == default:
+ continue
+ diff = abs(w - nominal)
+ if diff <= 107:
+ cost += freq
+ elif diff <= 1131:
+ cost += freq * 2
+ else:
+ cost += freq * 5
+ return cost
def optimizeWidthsBruteforce(widths):
- """Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
+ """Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
- d = defaultdict(int)
- for w in widths:
- d[w] += 1
+ d = defaultdict(int)
+ for w in widths:
+ d[w] += 1
- # Maximum number of bytes using default can possibly save
- maxDefaultAdvantage = 5 * max(d.values())
+ # Maximum number of bytes using default can possibly save
+ maxDefaultAdvantage = 5 * max(d.values())
- minw, maxw = min(widths), max(widths)
- domain = list(range(minw, maxw+1))
+ minw, maxw = min(widths), max(widths)
+ domain = list(range(minw, maxw + 1))
- bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
+ bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
- bestCost = len(widths) * 5 + 1
- for nominal in domain:
- if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
- continue
- for default in domain:
- cost = byteCost(widths, default, nominal)
- if cost < bestCost:
- bestCost = cost
- bestDefault = default
- bestNominal = nominal
+ bestCost = len(widths) * 5 + 1
+ for nominal in domain:
+ if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
+ continue
+ for default in domain:
+ cost = byteCost(widths, default, nominal)
+ if cost < bestCost:
+ bestCost = cost
+ bestDefault = default
+ bestNominal = nominal
- return bestDefault, bestNominal
+ return bestDefault, bestNominal
def optimizeWidths(widths):
- """Given a list of glyph widths, or dictionary mapping glyph width to number of
- glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
-
- This algorithm is linear in UPEM+numGlyphs."""
-
- if not hasattr(widths, 'items'):
- d = defaultdict(int)
- for w in widths:
- d[w] += 1
- widths = d
-
- keys = sorted(widths.keys())
- minw, maxw = keys[0], keys[-1]
- domain = list(range(minw, maxw+1))
-
- # Cumulative sum/max forward/backward.
- cumFrqU = cumSum(widths, op=add)
- cumMaxU = cumSum(widths, op=max)
- cumFrqD = cumSum(widths, op=add, decreasing=True)
- cumMaxD = cumSum(widths, op=max, decreasing=True)
-
- # Cost per nominal choice, without default consideration.
- nomnCostU = missingdict(lambda x: cumFrqU[x] + cumFrqU[x-108] + cumFrqU[x-1132]*3)
- nomnCostD = missingdict(lambda x: cumFrqD[x] + cumFrqD[x+108] + cumFrqD[x+1132]*3)
- nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
-
- # Cost-saving per nominal choice, by best default choice.
- dfltCostU = missingdict(lambda x: max(cumMaxU[x], cumMaxU[x-108]*2, cumMaxU[x-1132]*5))
- dfltCostD = missingdict(lambda x: max(cumMaxD[x], cumMaxD[x+108]*2, cumMaxD[x+1132]*5))
- dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
-
- # Combined cost per nominal choice.
- bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
-
- # Best nominal.
- nominal = min(domain, key=lambda x: bestCost[x])
-
- # Work back the best default.
- bestC = bestCost[nominal]
- dfltC = nomnCost[nominal] - bestCost[nominal]
- ends = []
- if dfltC == dfltCostU[nominal]:
- starts = [nominal, nominal-108, nominal-1132]
- for start in starts:
- while cumMaxU[start] and cumMaxU[start] == cumMaxU[start-1]:
- start -= 1
- ends.append(start)
- else:
- starts = [nominal, nominal+108, nominal+1132]
- for start in starts:
- while cumMaxD[start] and cumMaxD[start] == cumMaxD[start+1]:
- start += 1
- ends.append(start)
- default = min(ends, key=lambda default: byteCost(widths, default, nominal))
-
- return default, nominal
+ """Given a list of glyph widths, or dictionary mapping glyph width to number of
+ glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
+
+ This algorithm is linear in UPEM+numGlyphs."""
+
+ if not hasattr(widths, "items"):
+ d = defaultdict(int)
+ for w in widths:
+ d[w] += 1
+ widths = d
+
+ keys = sorted(widths.keys())
+ minw, maxw = keys[0], keys[-1]
+ domain = list(range(minw, maxw + 1))
+
+ # Cumulative sum/max forward/backward.
+ cumFrqU = cumSum(widths, op=add)
+ cumMaxU = cumSum(widths, op=max)
+ cumFrqD = cumSum(widths, op=add, decreasing=True)
+ cumMaxD = cumSum(widths, op=max, decreasing=True)
+
+ # Cost per nominal choice, without default consideration.
+ nomnCostU = missingdict(
+ lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
+ )
+ nomnCostD = missingdict(
+ lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
+ )
+ nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
+
+ # Cost-saving per nominal choice, by best default choice.
+ dfltCostU = missingdict(
+ lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
+ )
+ dfltCostD = missingdict(
+ lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
+ )
+ dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
+
+ # Combined cost per nominal choice.
+ bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
+
+ # Best nominal.
+ nominal = min(domain, key=lambda x: bestCost[x])
+
+ # Work back the best default.
+ bestC = bestCost[nominal]
+ dfltC = nomnCost[nominal] - bestCost[nominal]
+ ends = []
+ if dfltC == dfltCostU[nominal]:
+ starts = [nominal, nominal - 108, nominal - 1132]
+ for start in starts:
+ while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
+ start -= 1
+ ends.append(start)
+ else:
+ starts = [nominal, nominal + 108, nominal + 1132]
+ for start in starts:
+ while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
+ start += 1
+ ends.append(start)
+ default = min(ends, key=lambda default: byteCost(widths, default, nominal))
+
+ return default, nominal
+
def main(args=None):
- """Calculate optimum defaultWidthX/nominalWidthX values"""
-
- import argparse
- parser = argparse.ArgumentParser(
- "fonttools cffLib.width",
- description=main.__doc__,
- )
- parser.add_argument('inputs', metavar='FILE', type=str, nargs='+',
- help="Input TTF files")
- parser.add_argument('-b', '--brute-force', dest="brute", action="store_true",
- help="Use brute-force approach (VERY slow)")
-
- args = parser.parse_args(args)
-
- for fontfile in args.inputs:
- font = TTFont(fontfile)
- hmtx = font['hmtx']
- widths = [m[0] for m in hmtx.metrics.values()]
- if args.brute:
- default, nominal = optimizeWidthsBruteforce(widths)
- else:
- default, nominal = optimizeWidths(widths)
- print("glyphs=%d default=%d nominal=%d byteCost=%d" % (len(widths), default, nominal, byteCost(widths, default, nominal)))
-
-if __name__ == '__main__':
- import sys
- if len(sys.argv) == 1:
- import doctest
- sys.exit(doctest.testmod().failed)
- main()
+ """Calculate optimum defaultWidthX/nominalWidthX values"""
+
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ "fonttools cffLib.width",
+ description=main.__doc__,
+ )
+ parser.add_argument(
+ "inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
+ )
+ parser.add_argument(
+ "-b",
+ "--brute-force",
+ dest="brute",
+ action="store_true",
+ help="Use brute-force approach (VERY slow)",
+ )
+
+ args = parser.parse_args(args)
+
+ for fontfile in args.inputs:
+ font = TTFont(fontfile)
+ hmtx = font["hmtx"]
+ widths = [m[0] for m in hmtx.metrics.values()]
+ if args.brute:
+ default, nominal = optimizeWidthsBruteforce(widths)
+ else:
+ default, nominal = optimizeWidths(widths)
+ print(
+ "glyphs=%d default=%d nominal=%d byteCost=%d"
+ % (len(widths), default, nominal, byteCost(widths, default, nominal))
+ )
+
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) == 1:
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
+ main()
diff --git a/Lib/fontTools/colorLib/errors.py b/Lib/fontTools/colorLib/errors.py
index a0bdda17..18cbebba 100644
--- a/Lib/fontTools/colorLib/errors.py
+++ b/Lib/fontTools/colorLib/errors.py
@@ -1,3 +1,2 @@
-
class ColorLibError(Exception):
pass
diff --git a/Lib/fontTools/colorLib/table_builder.py b/Lib/fontTools/colorLib/table_builder.py
index 763115b9..f1e182c4 100644
--- a/Lib/fontTools/colorLib/table_builder.py
+++ b/Lib/fontTools/colorLib/table_builder.py
@@ -67,9 +67,7 @@ def _split_format(cls, source):
assert isinstance(
fmt, collections.abc.Hashable
), f"{cls} Format is not hashable: {fmt!r}"
- assert (
- fmt in cls.convertersByName
- ), f"{cls} invalid Format: {fmt!r}"
+ assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}"
return fmt, remainder
diff --git a/Lib/fontTools/config/__init__.py b/Lib/fontTools/config/__init__.py
index f5a62eaf..c106fe51 100644
--- a/Lib/fontTools/config/__init__.py
+++ b/Lib/fontTools/config/__init__.py
@@ -57,3 +57,18 @@ Config.register_option(
parse=Option.parse_optional_bool,
validate=Option.validate_optional_bool,
)
+
+Config.register_option(
+ name="fontTools.otlLib.builder:WRITE_GPOS7",
+ help=dedent(
+ """\
+ macOS before 13.2 didn’t support GPOS LookupType 7 (non-chaining
+ ContextPos lookups), so FontTools.otlLib.builder disables a file size
+ optimisation that would use LookupType 7 instead of 8 when there is no
+ chaining (no prefix or suffix). Set to True to enable the optimization.
+ """
+ ),
+ default=False,
+ parse=Option.parse_optional_bool,
+ validate=Option.validate_optional_bool,
+)
diff --git a/Lib/fontTools/cu2qu/benchmark.py b/Lib/fontTools/cu2qu/benchmark.py
new file mode 100644
index 00000000..2ab1e966
--- /dev/null
+++ b/Lib/fontTools/cu2qu/benchmark.py
@@ -0,0 +1,55 @@
+"""Benchmark the cu2qu algorithm performance."""
+
+from .cu2qu import *
+import random
+import timeit
+
+MAX_ERR = 0.05
+
+
+def generate_curve():
+ return [
+ tuple(float(random.randint(0, 2048)) for coord in range(2))
+ for point in range(4)
+ ]
+
+
+def setup_curve_to_quadratic():
+ return generate_curve(), MAX_ERR
+
+
+def setup_curves_to_quadratic():
+ num_curves = 3
+ return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves)
+
+
+def run_benchmark(module, function, setup_suffix="", repeat=5, number=1000):
+ setup_func = "setup_" + function
+ if setup_suffix:
+ print("%s with %s:" % (function, setup_suffix), end="")
+ setup_func += "_" + setup_suffix
+ else:
+ print("%s:" % function, end="")
+
+ def wrapper(function, setup_func):
+ function = globals()[function]
+ setup_func = globals()[setup_func]
+
+ def wrapped():
+ return function(*setup_func())
+
+ return wrapped
+
+ results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
+ print("\t%5.1fus" % (min(results) * 1000000.0 / number))
+
+
+def main():
+ """Benchmark the cu2qu algorithm performance."""
+ run_benchmark("cu2qu", "curve_to_quadratic")
+ run_benchmark("cu2qu", "curves_to_quadratic")
+
+
+if __name__ == "__main__":
+ random.seed(1)
+ main()
diff --git a/Lib/fontTools/cu2qu/cli.py b/Lib/fontTools/cu2qu/cli.py
index 34520fc0..9144043f 100644
--- a/Lib/fontTools/cu2qu/cli.py
+++ b/Lib/fontTools/cu2qu/cli.py
@@ -37,7 +37,7 @@ def open_ufo(path):
def _font_to_quadratic(input_path, output_path=None, **kwargs):
ufo = open_ufo(input_path)
- logger.info('Converting curves for %s', input_path)
+ logger.info("Converting curves for %s", input_path)
if font_to_quadratic(ufo, **kwargs):
logger.info("Saving %s", output_path)
if output_path:
@@ -67,13 +67,13 @@ def _copytree(input_path, output_path):
def main(args=None):
"""Convert a UFO font from cubic to quadratic curves"""
parser = argparse.ArgumentParser(prog="cu2qu")
- parser.add_argument(
- "--version", action="version", version=fontTools.__version__)
+ parser.add_argument("--version", action="version", version=fontTools.__version__)
parser.add_argument(
"infiles",
nargs="+",
metavar="INPUT",
- help="one or more input UFO source file(s).")
+ help="one or more input UFO source file(s).",
+ )
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument(
"-e",
@@ -81,19 +81,28 @@ def main(args=None):
type=float,
metavar="ERROR",
default=None,
- help="maxiumum approximation error measured in EM (default: 0.001)")
+ help="maxiumum approximation error measured in EM (default: 0.001)",
+ )
+ parser.add_argument(
+ "-m",
+ "--mixed",
+ default=False,
+ action="store_true",
+ help="whether to used mixed quadratic and cubic curves",
+ )
parser.add_argument(
"--keep-direction",
dest="reverse_direction",
action="store_false",
- help="do not reverse the contour direction")
+ help="do not reverse the contour direction",
+ )
mode_parser = parser.add_mutually_exclusive_group()
mode_parser.add_argument(
"-i",
"--interpolatable",
action="store_true",
- help="whether curve conversion should keep interpolation compatibility"
+ help="whether curve conversion should keep interpolation compatibility",
)
mode_parser.add_argument(
"-j",
@@ -103,7 +112,8 @@ def main(args=None):
default=1,
const=_cpu_count(),
metavar="N",
- help="Convert using N multiple processes (default: %(default)s)")
+ help="Convert using N multiple processes (default: %(default)s)",
+ )
output_parser = parser.add_mutually_exclusive_group()
output_parser.add_argument(
@@ -111,14 +121,18 @@ def main(args=None):
"--output-file",
default=None,
metavar="OUTPUT",
- help=("output filename for the converted UFO. By default fonts are "
- "modified in place. This only works with a single input."))
+ help=(
+ "output filename for the converted UFO. By default fonts are "
+ "modified in place. This only works with a single input."
+ ),
+ )
output_parser.add_argument(
"-d",
"--output-dir",
default=None,
metavar="DIRECTORY",
- help="output directory where to save converted UFOs")
+ help="output directory where to save converted UFOs",
+ )
options = parser.parse_args(args)
@@ -143,8 +157,7 @@ def main(args=None):
elif not os.path.isdir(output_dir):
parser.error("'%s' is not a directory" % output_dir)
output_paths = [
- os.path.join(output_dir, os.path.basename(p))
- for p in options.infiles
+ os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
]
elif options.output_file:
output_paths = [options.output_file]
@@ -152,12 +165,15 @@ def main(args=None):
# save in-place
output_paths = [None] * len(options.infiles)
- kwargs = dict(dump_stats=options.verbose > 0,
- max_err_em=options.conversion_error,
- reverse_direction=options.reverse_direction)
+ kwargs = dict(
+ dump_stats=options.verbose > 0,
+ max_err_em=options.conversion_error,
+ reverse_direction=options.reverse_direction,
+ all_quadratic=False if options.mixed else True,
+ )
if options.interpolatable:
- logger.info('Converting curves compatibly')
+ logger.info("Converting curves compatibly")
ufos = [open_ufo(infile) for infile in options.infiles]
if fonts_to_quadratic(ufos, **kwargs):
for ufo, output_path in zip(ufos, output_paths):
@@ -171,11 +187,10 @@ def main(args=None):
if output_path:
_copytree(input_path, output_path)
else:
- jobs = min(len(options.infiles),
- options.jobs) if options.jobs > 1 else 1
+ jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
if jobs > 1:
func = partial(_font_to_quadratic, **kwargs)
- logger.info('Running %d parallel processes', jobs)
+ logger.info("Running %d parallel processes", jobs)
with closing(mp.Pool(jobs)) as pool:
pool.starmap(func, zip(options.infiles, output_paths))
else:
diff --git a/Lib/fontTools/cu2qu/cu2qu.py b/Lib/fontTools/cu2qu/cu2qu.py
index c9ce93ae..e620b48a 100644
--- a/Lib/fontTools/cu2qu/cu2qu.py
+++ b/Lib/fontTools/cu2qu/cu2qu.py
@@ -1,5 +1,5 @@
-#cython: language_level=3
-#distutils: define_macros=CYTHON_TRACE_NOGIL=1
+# cython: language_level=3
+# distutils: define_macros=CYTHON_TRACE_NOGIL=1
# Copyright 2015 Google Inc. All Rights Reserved.
#
@@ -17,30 +17,26 @@
try:
import cython
-except ImportError:
+
+ COMPILED = cython.compiled
+except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
+ COMPILED = False
+
import math
from .errors import Error as Cu2QuError, ApproxNotFoundError
-__all__ = ['curve_to_quadratic', 'curves_to_quadratic']
+__all__ = ["curve_to_quadratic", "curves_to_quadratic"]
MAX_N = 100
NAN = float("NaN")
-if cython.compiled:
- # Yep, I'm compiled.
- COMPILED = True
-else:
- # Just a lowly interpreted script.
- COMPILED = False
-
-
@cython.cfunc
@cython.inline
@cython.returns(cython.double)
@@ -61,7 +57,9 @@ def dot(v1, v2):
@cython.cfunc
@cython.inline
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
-@cython.locals(_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex)
+@cython.locals(
+ _1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex
+)
def calc_cubic_points(a, b, c, d):
_1 = d
_2 = (c / 3.0) + d
@@ -72,7 +70,9 @@ def calc_cubic_points(a, b, c, d):
@cython.cfunc
@cython.inline
-@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
+@cython.locals(
+ p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
+)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
def calc_cubic_parameters(p0, p1, p2, p3):
c = (p1 - p0) * 3.0
@@ -83,7 +83,10 @@ def calc_cubic_parameters(p0, p1, p2, p3):
@cython.cfunc
-@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
+@cython.inline
+@cython.locals(
+ p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
+)
def split_cubic_into_n_iter(p0, p1, p2, p3, n):
"""Split a cubic Bezier into n equal parts.
@@ -107,18 +110,34 @@ def split_cubic_into_n_iter(p0, p1, p2, p3, n):
return iter(split_cubic_into_three(p0, p1, p2, p3))
if n == 4:
a, b = split_cubic_into_two(p0, p1, p2, p3)
- return iter(split_cubic_into_two(*a) + split_cubic_into_two(*b))
+ return iter(
+ split_cubic_into_two(a[0], a[1], a[2], a[3])
+ + split_cubic_into_two(b[0], b[1], b[2], b[3])
+ )
if n == 6:
a, b = split_cubic_into_two(p0, p1, p2, p3)
- return iter(split_cubic_into_three(*a) + split_cubic_into_three(*b))
+ return iter(
+ split_cubic_into_three(a[0], a[1], a[2], a[3])
+ + split_cubic_into_three(b[0], b[1], b[2], b[3])
+ )
- return _split_cubic_into_n_gen(p0,p1,p2,p3,n)
+ return _split_cubic_into_n_gen(p0, p1, p2, p3, n)
-@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, n=cython.int)
+@cython.locals(
+ p0=cython.complex,
+ p1=cython.complex,
+ p2=cython.complex,
+ p3=cython.complex,
+ n=cython.int,
+)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
-@cython.locals(dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int)
-@cython.locals(a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex)
+@cython.locals(
+ dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int
+)
+@cython.locals(
+ a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex
+)
def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3)
dt = 1 / n
@@ -129,13 +148,17 @@ def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
t1_2 = t1 * t1
# calc new a, b, c and d
a1 = a * delta_3
- b1 = (3*a*t1 + b) * delta_2
- c1 = (2*b*t1 + c + 3*a*t1_2) * dt
- d1 = a*t1*t1_2 + b*t1_2 + c*t1 + d
+ b1 = (3 * a * t1 + b) * delta_2
+ c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt
+ d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d
yield calc_cubic_points(a1, b1, c1, d1)
-@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
+@cython.cfunc
+@cython.inline
+@cython.locals(
+ p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
+)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def split_cubic_into_two(p0, p1, p2, p3):
"""Split a cubic Bezier into two equal parts.
@@ -152,15 +175,29 @@ def split_cubic_into_two(p0, p1, p2, p3):
tuple: Two cubic Beziers (each expressed as a tuple of four complex
values).
"""
- mid = (p0 + 3 * (p1 + p2) + p3) * .125
- deriv3 = (p3 + p2 - p1 - p0) * .125
- return ((p0, (p0 + p1) * .5, mid - deriv3, mid),
- (mid, mid + deriv3, (p2 + p3) * .5, p3))
+ mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
+ deriv3 = (p3 + p2 - p1 - p0) * 0.125
+ return (
+ (p0, (p0 + p1) * 0.5, mid - deriv3, mid),
+ (mid, mid + deriv3, (p2 + p3) * 0.5, p3),
+ )
-@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, _27=cython.double)
-@cython.locals(mid1=cython.complex, deriv1=cython.complex, mid2=cython.complex, deriv2=cython.complex)
-def split_cubic_into_three(p0, p1, p2, p3, _27=1/27):
+@cython.cfunc
+@cython.inline
+@cython.locals(
+ p0=cython.complex,
+ p1=cython.complex,
+ p2=cython.complex,
+ p3=cython.complex,
+)
+@cython.locals(
+ mid1=cython.complex,
+ deriv1=cython.complex,
+ mid2=cython.complex,
+ deriv2=cython.complex,
+)
+def split_cubic_into_three(p0, p1, p2, p3):
"""Split a cubic Bezier into three equal parts.
Splits the curve into three equal parts at t = 1/3 and t = 2/3
@@ -175,19 +212,27 @@ def split_cubic_into_three(p0, p1, p2, p3, _27=1/27):
tuple: Three cubic Beziers (each expressed as a tuple of four complex
values).
"""
- # we define 1/27 as a keyword argument so that it will be evaluated only
- # once but still in the scope of this function
- mid1 = (8*p0 + 12*p1 + 6*p2 + p3) * _27
- deriv1 = (p3 + 3*p2 - 4*p0) * _27
- mid2 = (p0 + 6*p1 + 12*p2 + 8*p3) * _27
- deriv2 = (4*p3 - 3*p1 - p0) * _27
- return ((p0, (2*p0 + p1) / 3.0, mid1 - deriv1, mid1),
- (mid1, mid1 + deriv1, mid2 - deriv2, mid2),
- (mid2, mid2 + deriv2, (p2 + 2*p3) / 3.0, p3))
+ mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * (1 / 27)
+ deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27)
+ mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27)
+ deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27)
+ return (
+ (p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1),
+ (mid1, mid1 + deriv1, mid2 - deriv2, mid2),
+ (mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3),
+ )
+@cython.cfunc
+@cython.inline
@cython.returns(cython.complex)
-@cython.locals(t=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
+@cython.locals(
+ t=cython.double,
+ p0=cython.complex,
+ p1=cython.complex,
+ p2=cython.complex,
+ p3=cython.complex,
+)
@cython.locals(_p1=cython.complex, _p2=cython.complex)
def cubic_approx_control(t, p0, p1, p2, p3):
"""Approximate a cubic Bezier using a quadratic one.
@@ -207,6 +252,8 @@ def cubic_approx_control(t, p0, p1, p2, p3):
return _p1 + (_p2 - _p1) * t
+@cython.cfunc
+@cython.inline
@cython.returns(cython.complex)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(ab=cython.complex, cd=cython.complex, p=cython.complex, h=cython.double)
@@ -235,7 +282,13 @@ def calc_intersect(a, b, c, d):
@cython.cfunc
@cython.returns(cython.int)
-@cython.locals(tolerance=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
+@cython.locals(
+ tolerance=cython.double,
+ p0=cython.complex,
+ p1=cython.complex,
+ p2=cython.complex,
+ p3=cython.complex,
+)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
"""Check if a cubic Bezier lies within a given distance of the origin.
@@ -260,18 +313,26 @@ def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
return True
# Split.
- mid = (p0 + 3 * (p1 + p2) + p3) * .125
+ mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
if abs(mid) > tolerance:
return False
- deriv3 = (p3 + p2 - p1 - p0) * .125
- return (cubic_farthest_fit_inside(p0, (p0+p1)*.5, mid-deriv3, mid, tolerance) and
- cubic_farthest_fit_inside(mid, mid+deriv3, (p2+p3)*.5, p3, tolerance))
+ deriv3 = (p3 + p2 - p1 - p0) * 0.125
+ return cubic_farthest_fit_inside(
+ p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
+ ) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
@cython.cfunc
-@cython.locals(tolerance=cython.double, _2_3=cython.double)
-@cython.locals(q1=cython.complex, c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex)
-def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3):
+@cython.inline
+@cython.locals(tolerance=cython.double)
+@cython.locals(
+ q1=cython.complex,
+ c0=cython.complex,
+ c1=cython.complex,
+ c2=cython.complex,
+ c3=cython.complex,
+)
+def cubic_approx_quadratic(cubic, tolerance):
"""Approximate a cubic Bezier with a single quadratic within a given tolerance.
Args:
@@ -284,30 +345,34 @@ def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3):
curve if it fits within the given tolerance, or ``None`` if no suitable
curve could be calculated.
"""
- # we define 2/3 as a keyword argument so that it will be evaluated only
- # once but still in the scope of this function
- q1 = calc_intersect(*cubic)
+ q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3])
if math.isnan(q1.imag):
return None
c0 = cubic[0]
c3 = cubic[3]
- c1 = c0 + (q1 - c0) * _2_3
- c2 = c3 + (q1 - c3) * _2_3
- if not cubic_farthest_fit_inside(0,
- c1 - cubic[1],
- c2 - cubic[2],
- 0, tolerance):
+ c1 = c0 + (q1 - c0) * (2 / 3)
+ c2 = c3 + (q1 - c3) * (2 / 3)
+ if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance):
return None
return c0, q1, c3
@cython.cfunc
-@cython.locals(n=cython.int, tolerance=cython.double, _2_3=cython.double)
+@cython.locals(n=cython.int, tolerance=cython.double)
@cython.locals(i=cython.int)
-@cython.locals(c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex)
-@cython.locals(q0=cython.complex, q1=cython.complex, next_q1=cython.complex, q2=cython.complex, d1=cython.complex)
-def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
+@cython.locals(all_quadratic=cython.int)
+@cython.locals(
+ c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex
+)
+@cython.locals(
+ q0=cython.complex,
+ q1=cython.complex,
+ next_q1=cython.complex,
+ q2=cython.complex,
+ d1=cython.complex,
+)
+def cubic_approx_spline(cubic, n, tolerance, all_quadratic):
"""Approximate a cubic Bezier curve with a spline of n quadratics.
Args:
@@ -321,22 +386,23 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
quadratic spline if it fits within the given tolerance, or ``None`` if
no suitable spline could be calculated.
"""
- # we define 2/3 as a keyword argument so that it will be evaluated only
- # once but still in the scope of this function
if n == 1:
return cubic_approx_quadratic(cubic, tolerance)
+ if n == 2 and all_quadratic == False:
+ return cubic
cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n)
# calculate the spline of quadratics and check errors at the same time.
next_cubic = next(cubics)
- next_q1 = cubic_approx_control(0, *next_cubic)
+ next_q1 = cubic_approx_control(
+ 0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
+ )
q2 = cubic[0]
d1 = 0j
spline = [cubic[0], next_q1]
- for i in range(1, n+1):
-
+ for i in range(1, n + 1):
# Current cubic to convert
c0, c1, c2, c3 = next_cubic
@@ -345,9 +411,11 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
q1 = next_q1
if i < n:
next_cubic = next(cubics)
- next_q1 = cubic_approx_control(i / (n-1), *next_cubic)
+ next_q1 = cubic_approx_control(
+ i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
+ )
spline.append(next_q1)
- q2 = (q1 + next_q1) * .5
+ q2 = (q1 + next_q1) * 0.5
else:
q2 = c3
@@ -355,12 +423,13 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
d0 = d1
d1 = q2 - c3
- if (abs(d1) > tolerance or
- not cubic_farthest_fit_inside(d0,
- q0 + (q1 - q0) * _2_3 - c1,
- q2 + (q1 - q2) * _2_3 - c2,
- d1,
- tolerance)):
+ if abs(d1) > tolerance or not cubic_farthest_fit_inside(
+ d0,
+ q0 + (q1 - q0) * (2 / 3) - c1,
+ q2 + (q1 - q2) * (2 / 3) - c2,
+ d1,
+ tolerance,
+ ):
return None
spline.append(cubic[3])
@@ -369,24 +438,32 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
@cython.locals(max_err=cython.double)
@cython.locals(n=cython.int)
-def curve_to_quadratic(curve, max_err):
+@cython.locals(all_quadratic=cython.int)
+def curve_to_quadratic(curve, max_err, all_quadratic=True):
"""Approximate a cubic Bezier curve with a spline of n quadratics.
Args:
cubic (sequence): Four 2D tuples representing control points of
the cubic Bezier curve.
max_err (double): Permitted deviation from the original curve.
+ all_quadratic (bool): If True (default) returned value is a
+ quadratic spline. If False, it's either a single quadratic
+ curve or a single cubic curve.
Returns:
- A list of 2D tuples, representing control points of the quadratic
- spline if it fits within the given tolerance, or ``None`` if no
- suitable spline could be calculated.
+ If all_quadratic is True: A list of 2D tuples, representing
+ control points of the quadratic spline if it fits within the
+ given tolerance, or ``None`` if no suitable spline could be
+ calculated.
+
+ If all_quadratic is False: Either a quadratic curve (if length
+ of output is 3), or a cubic curve (if length of output is 4).
"""
curve = [complex(*p) for p in curve]
for n in range(1, MAX_N + 1):
- spline = cubic_approx_spline(curve, n, max_err)
+ spline = cubic_approx_spline(curve, n, max_err, all_quadratic)
if spline is not None:
# done. go home
return [(s.real, s.imag) for s in spline]
@@ -394,9 +471,9 @@ def curve_to_quadratic(curve, max_err):
raise ApproxNotFoundError(curve)
-
@cython.locals(l=cython.int, last_i=cython.int, i=cython.int)
-def curves_to_quadratic(curves, max_errors):
+@cython.locals(all_quadratic=cython.int)
+def curves_to_quadratic(curves, max_errors, all_quadratic=True):
"""Return quadratic Bezier splines approximating the input cubic Beziers.
Args:
@@ -404,6 +481,9 @@ def curves_to_quadratic(curves, max_errors):
2D tuples.
max_errors: A sequence of *n* floats representing the maximum permissible
deviation from each of the cubic Bezier curves.
+ all_quadratic (bool): If True (default) returned values are a
+ quadratic spline. If False, they are either a single quadratic
+ curve or a single cubic curve.
Example::
@@ -419,7 +499,11 @@ def curves_to_quadratic(curves, max_errors):
( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...).
Returns:
- A list of splines, each spline being a list of 2D tuples.
+ If all_quadratic is True, a list of splines, each spline being a list
+ of 2D tuples.
+
+ If all_quadratic is False, a list of curves, each curve being a quadratic
+ (length 3), or cubic (length 4).
Raises:
fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation
@@ -434,7 +518,7 @@ def curves_to_quadratic(curves, max_errors):
last_i = i = 0
n = 1
while True:
- spline = cubic_approx_spline(curves[i], n, max_errors[i])
+ spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic)
if spline is None:
if n == MAX_N:
break
@@ -448,49 +532,3 @@ def curves_to_quadratic(curves, max_errors):
return [[(s.real, s.imag) for s in spline] for spline in splines]
raise ApproxNotFoundError(curves)
-
-
-if __name__ == '__main__':
- import random
- import timeit
-
- MAX_ERR = 5
-
- def generate_curve():
- return [
- tuple(float(random.randint(0, 2048)) for coord in range(2))
- for point in range(4)]
-
- def setup_curve_to_quadratic():
- return generate_curve(), MAX_ERR
-
- def setup_curves_to_quadratic():
- num_curves = 3
- return (
- [generate_curve() for curve in range(num_curves)],
- [MAX_ERR] * num_curves)
-
- def run_benchmark(
- benchmark_module, module, function, setup_suffix='', repeat=5, number=1000):
- setup_func = 'setup_' + function
- if setup_suffix:
- print('%s with %s:' % (function, setup_suffix), end='')
- setup_func += '_' + setup_suffix
- else:
- print('%s:' % function, end='')
-
- def wrapper(function, setup_func):
- function = globals()[function]
- setup_func = globals()[setup_func]
- def wrapped():
- return function(*setup_func())
- return wrapped
- results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
- print('\t%5.1fus' % (min(results) * 1000000. / number))
-
- def main():
- run_benchmark('cu2qu.benchmark', 'cu2qu', 'curve_to_quadratic')
- run_benchmark('cu2qu.benchmark', 'cu2qu', 'curves_to_quadratic')
-
- random.seed(1)
- main()
diff --git a/Lib/fontTools/cu2qu/errors.py b/Lib/fontTools/cu2qu/errors.py
index 74c4c227..fa3dc429 100644
--- a/Lib/fontTools/cu2qu/errors.py
+++ b/Lib/fontTools/cu2qu/errors.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+
class Error(Exception):
"""Base Cu2Qu exception class for all other errors."""
diff --git a/Lib/fontTools/cu2qu/ufo.py b/Lib/fontTools/cu2qu/ufo.py
index 447de7bb..10367cfe 100644
--- a/Lib/fontTools/cu2qu/ufo.py
+++ b/Lib/fontTools/cu2qu/ufo.py
@@ -30,12 +30,15 @@ from fontTools.pens.reverseContourPen import ReverseContourPen
from . import curves_to_quadratic
from .errors import (
- UnequalZipLengthsError, IncompatibleSegmentNumberError,
- IncompatibleSegmentTypesError, IncompatibleGlyphsError,
- IncompatibleFontsError)
+ UnequalZipLengthsError,
+ IncompatibleSegmentNumberError,
+ IncompatibleSegmentTypesError,
+ IncompatibleGlyphsError,
+ IncompatibleFontsError,
+)
-__all__ = ['fonts_to_quadratic', 'font_to_quadratic']
+__all__ = ["fonts_to_quadratic", "font_to_quadratic"]
# The default approximation error below is a relative value (1/1000 of the EM square).
# Later on, we convert it to absolute font units by multiplying it by a font's UPEM
@@ -47,6 +50,8 @@ logger = logging.getLogger(__name__)
_zip = zip
+
+
def zip(*args):
"""Ensure each argument to zip has the same length. Also make sure a list is
returned for python 2/3 compatibility.
@@ -69,27 +74,27 @@ class GetSegmentsPen(AbstractPen):
self.segments = []
def _add_segment(self, tag, *args):
- if tag in ['move', 'line', 'qcurve', 'curve']:
+ if tag in ["move", "line", "qcurve", "curve"]:
self._last_pt = args[-1]
self.segments.append((tag, args))
def moveTo(self, pt):
- self._add_segment('move', pt)
+ self._add_segment("move", pt)
def lineTo(self, pt):
- self._add_segment('line', pt)
+ self._add_segment("line", pt)
def qCurveTo(self, *points):
- self._add_segment('qcurve', self._last_pt, *points)
+ self._add_segment("qcurve", self._last_pt, *points)
def curveTo(self, *points):
- self._add_segment('curve', self._last_pt, *points)
+ self._add_segment("curve", self._last_pt, *points)
def closePath(self):
- self._add_segment('close')
+ self._add_segment("close")
def endPath(self):
- self._add_segment('end')
+ self._add_segment("end")
def addComponent(self, glyphName, transformation):
pass
@@ -122,38 +127,41 @@ def _set_segments(glyph, segments, reverse_direction):
if reverse_direction:
pen = ReverseContourPen(pen)
for tag, args in segments:
- if tag == 'move':
+ if tag == "move":
pen.moveTo(*args)
- elif tag == 'line':
+ elif tag == "line":
pen.lineTo(*args)
- elif tag == 'curve':
+ elif tag == "curve":
pen.curveTo(*args[1:])
- elif tag == 'qcurve':
+ elif tag == "qcurve":
pen.qCurveTo(*args[1:])
- elif tag == 'close':
+ elif tag == "close":
pen.closePath()
- elif tag == 'end':
+ elif tag == "end":
pen.endPath()
else:
raise AssertionError('Unhandled segment type "%s"' % tag)
-def _segments_to_quadratic(segments, max_err, stats):
+def _segments_to_quadratic(segments, max_err, stats, all_quadratic=True):
"""Return quadratic approximations of cubic segments."""
- assert all(s[0] == 'curve' for s in segments), 'Non-cubic given to convert'
+ assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert"
- new_points = curves_to_quadratic([s[1] for s in segments], max_err)
+ new_points = curves_to_quadratic([s[1] for s in segments], max_err, all_quadratic)
n = len(new_points[0])
- assert all(len(s) == n for s in new_points[1:]), 'Converted incompatibly'
+ assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly"
spline_length = str(n - 2)
stats[spline_length] = stats.get(spline_length, 0) + 1
- return [('qcurve', p) for p in new_points]
+ if all_quadratic or n == 3:
+ return [("qcurve", p) for p in new_points]
+ else:
+ return [("curve", p) for p in new_points]
-def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
+def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats, all_quadratic=True):
"""Do the actual conversion of a set of compatible glyphs, after arguments
have been set up.
@@ -176,9 +184,13 @@ def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
tag = segments[0][0]
if not all(s[0] == tag for s in segments[1:]):
incompatible[i] = [s[0] for s in segments]
- elif tag == 'curve':
- segments = _segments_to_quadratic(segments, max_err, stats)
- glyphs_modified = True
+ elif tag == "curve":
+ new_segments = _segments_to_quadratic(
+ segments, max_err, stats, all_quadratic
+ )
+ if all_quadratic or new_segments != segments:
+ glyphs_modified = True
+ segments = new_segments
new_segments_by_location.append(segments)
if glyphs_modified:
@@ -192,7 +204,8 @@ def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
def glyphs_to_quadratic(
- glyphs, max_err=None, reverse_direction=False, stats=None):
+ glyphs, max_err=None, reverse_direction=False, stats=None, all_quadratic=True
+):
"""Convert the curves of a set of compatible of glyphs to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
@@ -216,12 +229,21 @@ def glyphs_to_quadratic(
max_errors = [max_err] * len(glyphs)
assert len(max_errors) == len(glyphs)
- return _glyphs_to_quadratic(glyphs, max_errors, reverse_direction, stats)
+ return _glyphs_to_quadratic(
+ glyphs, max_errors, reverse_direction, stats, all_quadratic
+ )
def fonts_to_quadratic(
- fonts, max_err_em=None, max_err=None, reverse_direction=False,
- stats=None, dump_stats=False, remember_curve_type=True):
+ fonts,
+ max_err_em=None,
+ max_err=None,
+ reverse_direction=False,
+ stats=None,
+ dump_stats=False,
+ remember_curve_type=True,
+ all_quadratic=True,
+):
"""Convert the curves of a collection of fonts to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
@@ -243,7 +265,7 @@ def fonts_to_quadratic(
curve_types = {f.lib.get(CURVE_TYPE_LIB_KEY, "cubic") for f in fonts}
if len(curve_types) == 1:
curve_type = next(iter(curve_types))
- if curve_type == "quadratic":
+ if curve_type in ("quadratic", "mixed"):
logger.info("Curves already converted to quadratic")
return False
elif curve_type == "cubic":
@@ -258,7 +280,7 @@ def fonts_to_quadratic(
stats = {}
if max_err_em and max_err:
- raise TypeError('Only one of max_err and max_err_em can be specified.')
+ raise TypeError("Only one of max_err and max_err_em can be specified.")
if not (max_err_em or max_err):
max_err_em = DEFAULT_MAX_ERR
@@ -270,8 +292,7 @@ def fonts_to_quadratic(
if isinstance(max_err_em, (list, tuple)):
assert len(fonts) == len(max_err_em)
- max_errors = [f.info.unitsPerEm * e
- for f, e in zip(fonts, max_err_em)]
+ max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)]
elif max_err_em:
max_errors = [f.info.unitsPerEm * max_err_em for f in fonts]
@@ -286,7 +307,8 @@ def fonts_to_quadratic(
cur_max_errors.append(error)
try:
modified |= _glyphs_to_quadratic(
- glyphs, cur_max_errors, reverse_direction, stats)
+ glyphs, cur_max_errors, reverse_direction, stats, all_quadratic
+ )
except IncompatibleGlyphsError as exc:
logger.error(exc)
glyph_errors[name] = exc
@@ -296,14 +318,17 @@ def fonts_to_quadratic(
if modified and dump_stats:
spline_lengths = sorted(stats.keys())
- logger.info('New spline lengths: %s' % (', '.join(
- '%s: %d' % (l, stats[l]) for l in spline_lengths)))
+ logger.info(
+ "New spline lengths: %s"
+ % (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths))
+ )
if remember_curve_type:
for font in fonts:
curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic")
- if curve_type != "quadratic":
- font.lib[CURVE_TYPE_LIB_KEY] = "quadratic"
+ new_curve_type = "quadratic" if all_quadratic else "mixed"
+ if curve_type != new_curve_type:
+ font.lib[CURVE_TYPE_LIB_KEY] = new_curve_type
modified = True
return modified
diff --git a/Lib/fontTools/designspaceLib/__init__.py b/Lib/fontTools/designspaceLib/__init__.py
index c74b5509..1c71fd00 100644
--- a/Lib/fontTools/designspaceLib/__init__.py
+++ b/Lib/fontTools/designspaceLib/__init__.py
@@ -22,20 +22,21 @@ from fontTools.misc.textTools import tobytes, tostr
"""
__all__ = [
- 'AxisDescriptor',
- 'AxisLabelDescriptor',
- 'BaseDocReader',
- 'BaseDocWriter',
- 'DesignSpaceDocument',
- 'DesignSpaceDocumentError',
- 'DiscreteAxisDescriptor',
- 'InstanceDescriptor',
- 'LocationLabelDescriptor',
- 'RangeAxisSubsetDescriptor',
- 'RuleDescriptor',
- 'SourceDescriptor',
- 'ValueAxisSubsetDescriptor',
- 'VariableFontDescriptor',
+ "AxisDescriptor",
+ "AxisLabelDescriptor",
+ "AxisMappingDescriptor",
+ "BaseDocReader",
+ "BaseDocWriter",
+ "DesignSpaceDocument",
+ "DesignSpaceDocumentError",
+ "DiscreteAxisDescriptor",
+ "InstanceDescriptor",
+ "LocationLabelDescriptor",
+ "RangeAxisSubsetDescriptor",
+ "RuleDescriptor",
+ "SourceDescriptor",
+ "ValueAxisSubsetDescriptor",
+ "VariableFontDescriptor",
]
# ElementTree allows to find namespace-prefixed elements, but not attributes
@@ -47,17 +48,18 @@ XML_LANG = XML_NS + "lang"
def posix(path):
"""Normalize paths using forward slash to work also on Windows."""
new_path = posixpath.join(*path.split(os.path.sep))
- if path.startswith('/'):
+ if path.startswith("/"):
# The above transformation loses absolute paths
- new_path = '/' + new_path
- elif path.startswith(r'\\'):
+ new_path = "/" + new_path
+ elif path.startswith(r"\\"):
# The above transformation loses leading slashes of UNC path mounts
- new_path = '//' + new_path
+ new_path = "//" + new_path
return new_path
def posixpath_property(private_name):
"""Generate a propery that holds a path always using forward slashes."""
+
def getter(self):
# Normal getter
return getattr(self, private_name)
@@ -77,12 +79,10 @@ class DesignSpaceDocumentError(Exception):
self.obj = obj
def __str__(self):
- return str(self.msg) + (
- ": %r" % self.obj if self.obj is not None else "")
+ return str(self.msg) + (": %r" % self.obj if self.obj is not None else "")
class AsDictMixin(object):
-
def asdict(self):
d = {}
for attr, value in self.__dict__.items():
@@ -91,15 +91,13 @@ class AsDictMixin(object):
if hasattr(value, "asdict"):
value = value.asdict()
elif isinstance(value, list):
- value = [
- v.asdict() if hasattr(v, "asdict") else v for v in value
- ]
+ value = [v.asdict() if hasattr(v, "asdict") else v for v in value]
d[attr] = value
return d
class SimpleDescriptor(AsDictMixin):
- """ Containers for a bunch of attributes"""
+ """Containers for a bunch of attributes"""
# XXX this is ugly. The 'print' is inappropriate here, and instead of
# assert, it should simply return True/False
@@ -107,13 +105,19 @@ class SimpleDescriptor(AsDictMixin):
# test if this object contains the same data as the other
for attr in self._attrs:
try:
- assert(getattr(self, attr) == getattr(other, attr))
+ assert getattr(self, attr) == getattr(other, attr)
except AssertionError:
- print("failed attribute", attr, getattr(self, attr), "!=", getattr(other, attr))
+ print(
+ "failed attribute",
+ attr,
+ getattr(self, attr),
+ "!=",
+ getattr(other, attr),
+ )
def __repr__(self):
attrs = [f"{a}={repr(getattr(self, a))}," for a in self._attrs]
- attrs = indent('\n'.join(attrs), ' ')
+ attrs = indent("\n".join(attrs), " ")
return f"{self.__class__.__name__}(\n{attrs}\n)"
@@ -136,13 +140,24 @@ class SourceDescriptor(SimpleDescriptor):
doc.addSource(s1)
"""
+
flavor = "source"
- _attrs = ['filename', 'path', 'name', 'layerName',
- 'location', 'copyLib',
- 'copyGroups', 'copyFeatures',
- 'muteKerning', 'muteInfo',
- 'mutedGlyphNames',
- 'familyName', 'styleName', 'localisedFamilyName']
+ _attrs = [
+ "filename",
+ "path",
+ "name",
+ "layerName",
+ "location",
+ "copyLib",
+ "copyGroups",
+ "copyFeatures",
+ "muteKerning",
+ "muteInfo",
+ "mutedGlyphNames",
+ "familyName",
+ "styleName",
+ "localisedFamilyName",
+ ]
filename = posixpath_property("_filename")
path = posixpath_property("_path")
@@ -191,13 +206,15 @@ class SourceDescriptor(SimpleDescriptor):
self.name = name
"""string. Optional. Unique identifier name for this source.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
"""
- self.designLocation = designLocation if designLocation is not None else location or {}
+ self.designLocation = (
+ designLocation if designLocation is not None else location or {}
+ )
"""dict. Axis values for this source, in design space coordinates.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
This may be only part of the full design location.
See :meth:`getFullDesignLocation()`
@@ -214,14 +231,14 @@ class SourceDescriptor(SimpleDescriptor):
can be extracted from the font, it can be efficient to have it right
here.
- Varlib.
+ varLib.
"""
self.styleName = styleName
"""string. Style name of this source. Though this data
can be extracted from the font, it can be efficient to have it right
here.
- Varlib.
+ varLib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name strings, keyed by
@@ -287,7 +304,7 @@ class SourceDescriptor(SimpleDescriptor):
def location(self):
"""dict. Axis values for this source, in design space coordinates.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
@@ -312,8 +329,9 @@ class SourceDescriptor(SimpleDescriptor):
"""
return self.localisedFamilyName.get(languageCode)
-
- def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict:
+ def getFullDesignLocation(
+ self, doc: "DesignSpaceDocument"
+ ) -> AnisotropicLocationDict:
"""Get the complete design location of this source, from its
:attr:`designLocation` and the document's axis defaults.
@@ -355,7 +373,8 @@ class RuleDescriptor(SimpleDescriptor):
</rule>
</rules>
"""
- _attrs = ['name', 'conditionSets', 'subs'] # what do we need here
+
+ _attrs = ["name", "conditionSets", "subs"] # what do we need here
def __init__(self, *, name=None, conditionSets=None, subs=None):
self.name = name
@@ -391,14 +410,14 @@ def evaluateConditions(conditions, location):
- If a condition has no maximum, check for > minimum.
"""
for cd in conditions:
- value = location[cd['name']]
- if cd.get('minimum') is None:
- if value > cd['maximum']:
+ value = location[cd["name"]]
+ if cd.get("minimum") is None:
+ if value > cd["maximum"]:
return False
- elif cd.get('maximum') is None:
- if cd['minimum'] > value:
+ elif cd.get("maximum") is None:
+ if cd["minimum"] > value:
return False
- elif not cd['minimum'] <= value <= cd['maximum']:
+ elif not cd["minimum"] <= value <= cd["maximum"]:
return False
return True
@@ -432,6 +451,50 @@ AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]]
SimpleLocationDict = Dict[str, float]
+class AxisMappingDescriptor(SimpleDescriptor):
+ """Represents the axis mapping element: mapping an input location
+ to an output location in the designspace.
+
+ .. code:: python
+
+ m1 = AxisMappingDescriptor()
+ m1.inputLocation = {"weight": 900, "width": 150}
+ m1.outputLocation = {"weight": 870}
+
+ .. code:: xml
+
+ <mappings>
+ <mapping>
+ <input>
+ <dimension name="weight" xvalue="900"/>
+ <dimension name="width" xvalue="150"/>
+ </input>
+ <output>
+ <dimension name="weight" xvalue="870"/>
+ </output>
+ </mapping>
+ </mappings>
+ """
+
+ _attrs = ["inputLocation", "outputLocation"]
+
+ def __init__(self, *, inputLocation=None, outputLocation=None):
+ self.inputLocation: SimpleLocationDict = inputLocation or {}
+ """dict. Axis values for the input of the mapping, in design space coordinates.
+
+ varLib.
+
+ .. versionadded:: 5.1
+ """
+ self.outputLocation: SimpleLocationDict = outputLocation or {}
+ """dict. Axis values for the output of the mapping, in design space coordinates.
+
+ varLib.
+
+ .. versionadded:: 5.1
+ """
+
+
class InstanceDescriptor(SimpleDescriptor):
"""Simple container for data related to the instance
@@ -451,27 +514,30 @@ class InstanceDescriptor(SimpleDescriptor):
i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever'
doc.addInstance(i2)
"""
+
flavor = "instance"
_defaultLanguageCode = "en"
- _attrs = ['filename',
- 'path',
- 'name',
- 'locationLabel',
- 'designLocation',
- 'userLocation',
- 'familyName',
- 'styleName',
- 'postScriptFontName',
- 'styleMapFamilyName',
- 'styleMapStyleName',
- 'localisedFamilyName',
- 'localisedStyleName',
- 'localisedStyleMapFamilyName',
- 'localisedStyleMapStyleName',
- 'glyphs',
- 'kerning',
- 'info',
- 'lib']
+ _attrs = [
+ "filename",
+ "path",
+ "name",
+ "locationLabel",
+ "designLocation",
+ "userLocation",
+ "familyName",
+ "styleName",
+ "postScriptFontName",
+ "styleMapFamilyName",
+ "styleMapStyleName",
+ "localisedFamilyName",
+ "localisedStyleName",
+ "localisedStyleMapFamilyName",
+ "localisedStyleMapStyleName",
+ "glyphs",
+ "kerning",
+ "info",
+ "lib",
+ ]
filename = posixpath_property("_filename")
path = posixpath_property("_path")
@@ -535,10 +601,12 @@ class InstanceDescriptor(SimpleDescriptor):
.. versionadded:: 5.0
"""
- self.designLocation: AnisotropicLocationDict = designLocation if designLocation is not None else (location or {})
+ self.designLocation: AnisotropicLocationDict = (
+ designLocation if designLocation is not None else (location or {})
+ )
"""dict. Axis values for this instance, in design space coordinates.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
@@ -549,7 +617,7 @@ class InstanceDescriptor(SimpleDescriptor):
self.userLocation: SimpleLocationDict = userLocation or {}
"""dict. Axis values for this instance, in user space coordinates.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
@@ -560,27 +628,27 @@ class InstanceDescriptor(SimpleDescriptor):
self.familyName = familyName
"""string. Family name of this instance.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
"""
self.styleName = styleName
"""string. Style name of this instance.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
"""
self.postScriptFontName = postScriptFontName
"""string. Postscript fontname for this instance.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
"""
self.styleMapFamilyName = styleMapFamilyName
"""string. StyleMap familyname for this instance.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
"""
self.styleMapStyleName = styleMapStyleName
"""string. StyleMap stylename for this instance.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name
@@ -630,7 +698,7 @@ class InstanceDescriptor(SimpleDescriptor):
def location(self):
"""dict. Axis values for this instance.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
@@ -708,7 +776,9 @@ class InstanceDescriptor(SimpleDescriptor):
if axisName in self.userLocation:
del self.userLocation[axisName]
- def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]:
+ def getLocationLabelDescriptor(
+ self, doc: "DesignSpaceDocument"
+ ) -> Optional[LocationLabelDescriptor]:
"""Get the :class:`LocationLabelDescriptor` instance that matches
this instances's :attr:`locationLabel`.
@@ -721,12 +791,14 @@ class InstanceDescriptor(SimpleDescriptor):
label = doc.getLocationLabel(self.locationLabel)
if label is None:
raise DesignSpaceDocumentError(
- 'InstanceDescriptor.getLocationLabelDescriptor(): '
- f'unknown location label `{self.locationLabel}` in instance `{self.name}`.'
+ "InstanceDescriptor.getLocationLabelDescriptor(): "
+ f"unknown location label `{self.locationLabel}` in instance `{self.name}`."
)
return label
- def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict:
+ def getFullDesignLocation(
+ self, doc: "DesignSpaceDocument"
+ ) -> AnisotropicLocationDict:
"""Get the complete design location of this instance, by combining data
from the various location fields, default axis values and mappings, and
top-level location labels.
@@ -757,7 +829,7 @@ class InstanceDescriptor(SimpleDescriptor):
result[axis.name] = axis.map_forward(axis.default)
return result
- def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict:
+ def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict:
"""Get the complete user location for this instance.
.. seealso:: :meth:`getFullDesignLocation`
@@ -770,11 +842,11 @@ class InstanceDescriptor(SimpleDescriptor):
def tagForAxisName(name):
# try to find or make a tag name for this axis name
names = {
- 'weight': ('wght', dict(en = 'Weight')),
- 'width': ('wdth', dict(en = 'Width')),
- 'optical': ('opsz', dict(en = 'Optical Size')),
- 'slant': ('slnt', dict(en = 'Slant')),
- 'italic': ('ital', dict(en = 'Italic')),
+ "weight": ("wght", dict(en="Weight")),
+ "width": ("wdth", dict(en="Width")),
+ "optical": ("opsz", dict(en="Optical Size")),
+ "slant": ("slnt", dict(en="Slant")),
+ "italic": ("ital", dict(en="Italic")),
}
if name.lower() in names:
return names[name.lower()]
@@ -811,7 +883,7 @@ class AbstractAxisDescriptor(SimpleDescriptor):
self.name = name
"""string. Name of the axis as it is used in the location dicts.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
"""
# names for UI purposes, if this is not a standard axis,
self.labelNames = labelNames or {}
@@ -829,7 +901,7 @@ class AbstractAxisDescriptor(SimpleDescriptor):
user space is the same as design space, as in [(minimum, minimum),
(maximum, maximum)].
- Varlib.
+ varLib.
"""
self.axisOrdering = axisOrdering
"""STAT table field ``axisOrdering``.
@@ -848,7 +920,7 @@ class AbstractAxisDescriptor(SimpleDescriptor):
class AxisDescriptor(AbstractAxisDescriptor):
- """ Simple container for the axis data.
+ """Simple container for the axis data.
Add more localisations?
@@ -869,7 +941,17 @@ class AxisDescriptor(AbstractAxisDescriptor):
]
doc.addAxis(a1)
"""
- _attrs = ['tag', 'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels']
+
+ _attrs = [
+ "tag",
+ "name",
+ "maximum",
+ "minimum",
+ "default",
+ "map",
+ "axisOrdering",
+ "axisLabels",
+ ]
def __init__(
self,
@@ -897,18 +979,18 @@ class AxisDescriptor(AbstractAxisDescriptor):
self.minimum = minimum
"""number. The minimum value for this axis in user space.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
"""
self.maximum = maximum
"""number. The maximum value for this axis in user space.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
"""
self.default = default
"""number. The default value for this axis, i.e. when a new location is
created, this is the value this axis will get in user space.
- MutatorMath + Varlib.
+ MutatorMath + varLib.
"""
def serialize(self):
@@ -976,7 +1058,7 @@ class DiscreteAxisDescriptor(AbstractAxisDescriptor):
"""
flavor = "axis"
- _attrs = ('tag', 'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels')
+ _attrs = ("tag", "name", "values", "default", "map", "axisOrdering", "axisLabels")
def __init__(
self,
@@ -1053,7 +1135,16 @@ class AxisLabelDescriptor(SimpleDescriptor):
"""
flavor = "label"
- _attrs = ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames')
+ _attrs = (
+ "userMinimum",
+ "userValue",
+ "userMaximum",
+ "name",
+ "elidable",
+ "olderSibling",
+ "linkedUserValue",
+ "labelNames",
+ )
def __init__(
self,
@@ -1127,7 +1218,7 @@ class LocationLabelDescriptor(SimpleDescriptor):
"""
flavor = "label"
- _attrs = ('name', 'elidable', 'olderSibling', 'userLocation', 'labelNames')
+ _attrs = ("name", "elidable", "olderSibling", "userLocation", "labelNames")
def __init__(
self,
@@ -1168,7 +1259,7 @@ class LocationLabelDescriptor(SimpleDescriptor):
"""Return the English name from :attr:`labelNames` or the :attr:`name`."""
return self.labelNames.get("en") or self.name
- def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict:
+ def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict:
"""Get the complete user location of this label, by combining data
from the explicit user location and default axis values.
@@ -1195,7 +1286,7 @@ class VariableFontDescriptor(SimpleDescriptor):
"""
flavor = "variable-font"
- _attrs = ('filename', 'axisSubsets', 'lib')
+ _attrs = ("filename", "axisSubsets", "lib")
filename = posixpath_property("_filename")
@@ -1213,7 +1304,9 @@ class VariableFontDescriptor(SimpleDescriptor):
If not specified, the :attr:`name` will be used as a basename for the file.
"""
- self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or []
+ self.axisSubsets: List[
+ Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]
+ ] = (axisSubsets or [])
"""Axis subsets to include in this variable font.
If an axis is not mentioned, assume that we only want the default
@@ -1228,10 +1321,13 @@ class RangeAxisSubsetDescriptor(SimpleDescriptor):
.. versionadded:: 5.0
"""
+
flavor = "axis-subset"
- _attrs = ('name', 'userMinimum', 'userDefault', 'userMaximum')
+ _attrs = ("name", "userMinimum", "userDefault", "userMaximum")
- def __init__(self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf):
+ def __init__(
+ self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf
+ ):
self.name: str = name
"""Name of the :class:`AxisDescriptor` to subset."""
self.userMinimum: float = userMinimum
@@ -1256,8 +1352,9 @@ class ValueAxisSubsetDescriptor(SimpleDescriptor):
.. versionadded:: 5.0
"""
+
flavor = "axis-subset"
- _attrs = ('name', 'userValue')
+ _attrs = ("name", "userValue")
def __init__(self, *, name, userValue):
self.name: str = name
@@ -1273,6 +1370,7 @@ class BaseDocWriter(object):
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
+ axisMappingDescriptorClass = AxisMappingDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
@@ -1286,6 +1384,10 @@ class BaseDocWriter(object):
return cls.axisDescriptorClass()
@classmethod
+ def getAxisMappingDescriptor(cls):
+ return cls.axisMappingDescriptorClass()
+
+ @classmethod
def getSourceDescriptor(cls):
return cls.sourceDescriptorClass()
@@ -1304,16 +1406,28 @@ class BaseDocWriter(object):
self.root = ET.Element("designspace")
def write(self, pretty=True, encoding="UTF-8", xml_declaration=True):
- self.root.attrib['format'] = ".".join(str(i) for i in self.effectiveFormatTuple)
+ self.root.attrib["format"] = ".".join(str(i) for i in self.effectiveFormatTuple)
- if self.documentObject.axes or self.documentObject.elidedFallbackName is not None:
+ if (
+ self.documentObject.axes
+ or self.documentObject.axisMappings
+ or self.documentObject.elidedFallbackName is not None
+ ):
axesElement = ET.Element("axes")
if self.documentObject.elidedFallbackName is not None:
- axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName
+ axesElement.attrib[
+ "elidedfallbackname"
+ ] = self.documentObject.elidedFallbackName
self.root.append(axesElement)
for axisObject in self.documentObject.axes:
self._addAxis(axisObject)
+ if self.documentObject.axisMappings:
+ mappingsElement = ET.Element("mappings")
+ self.root.findall(".axes")[0].append(mappingsElement)
+ for mappingObject in self.documentObject.axisMappings:
+ self._addAxisMapping(mappingsElement, mappingObject)
+
if self.documentObject.locationLabels:
labelsElement = ET.Element("labels")
for labelObject in self.documentObject.locationLabels:
@@ -1352,7 +1466,7 @@ class BaseDocWriter(object):
tree.write(
self.path,
encoding=encoding,
- method='xml',
+ method="xml",
xml_declaration=xml_declaration,
pretty_print=pretty,
)
@@ -1364,140 +1478,168 @@ class BaseDocWriter(object):
minVersion = self.documentObject.formatTuple
if (
any(
- hasattr(axis, 'values') or
- axis.axisOrdering is not None or
- axis.axisLabels
+ hasattr(axis, "values")
+ or axis.axisOrdering is not None
+ or axis.axisLabels
for axis in self.documentObject.axes
- ) or
- self.documentObject.locationLabels or
- any(
- source.localisedFamilyName
- for source in self.documentObject.sources
- ) or
- self.documentObject.variableFonts or
- any(
- instance.locationLabel or
- instance.userLocation
+ )
+ or self.documentObject.locationLabels
+ or any(source.localisedFamilyName for source in self.documentObject.sources)
+ or self.documentObject.variableFonts
+ or any(
+ instance.locationLabel or instance.userLocation
for instance in self.documentObject.instances
)
):
if minVersion < (5, 0):
minVersion = (5, 0)
+ if self.documentObject.axisMappings:
+ if minVersion < (5, 1):
+ minVersion = (5, 1)
return minVersion
def _makeLocationElement(self, locationObject, name=None):
- """ Convert Location dict to a locationElement."""
+ """Convert Location dict to a locationElement."""
locElement = ET.Element("location")
if name is not None:
- locElement.attrib['name'] = name
+ locElement.attrib["name"] = name
validatedLocation = self.documentObject.newDefaultLocation()
for axisName, axisValue in locationObject.items():
if axisName in validatedLocation:
# only accept values we know
validatedLocation[axisName] = axisValue
for dimensionName, dimensionValue in validatedLocation.items():
- dimElement = ET.Element('dimension')
- dimElement.attrib['name'] = dimensionName
+ dimElement = ET.Element("dimension")
+ dimElement.attrib["name"] = dimensionName
if type(dimensionValue) == tuple:
- dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0])
- dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1])
+ dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue[0])
+ dimElement.attrib["yvalue"] = self.intOrFloat(dimensionValue[1])
else:
- dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue)
+ dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue)
locElement.append(dimElement)
return locElement, validatedLocation
def intOrFloat(self, num):
if int(num) == num:
return "%d" % num
- return ("%f" % num).rstrip('0').rstrip('.')
+ return ("%f" % num).rstrip("0").rstrip(".")
def _addRule(self, ruleObject):
# if none of the conditions have minimum or maximum values, do not add the rule.
- ruleElement = ET.Element('rule')
+ ruleElement = ET.Element("rule")
if ruleObject.name is not None:
- ruleElement.attrib['name'] = ruleObject.name
+ ruleElement.attrib["name"] = ruleObject.name
for conditions in ruleObject.conditionSets:
- conditionsetElement = ET.Element('conditionset')
+ conditionsetElement = ET.Element("conditionset")
for cond in conditions:
- if cond.get('minimum') is None and cond.get('maximum') is None:
+ if cond.get("minimum") is None and cond.get("maximum") is None:
# neither is defined, don't add this condition
continue
- conditionElement = ET.Element('condition')
- conditionElement.attrib['name'] = cond.get('name')
- if cond.get('minimum') is not None:
- conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum'))
- if cond.get('maximum') is not None:
- conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum'))
+ conditionElement = ET.Element("condition")
+ conditionElement.attrib["name"] = cond.get("name")
+ if cond.get("minimum") is not None:
+ conditionElement.attrib["minimum"] = self.intOrFloat(
+ cond.get("minimum")
+ )
+ if cond.get("maximum") is not None:
+ conditionElement.attrib["maximum"] = self.intOrFloat(
+ cond.get("maximum")
+ )
conditionsetElement.append(conditionElement)
if len(conditionsetElement):
ruleElement.append(conditionsetElement)
for sub in ruleObject.subs:
- subElement = ET.Element('sub')
- subElement.attrib['name'] = sub[0]
- subElement.attrib['with'] = sub[1]
+ subElement = ET.Element("sub")
+ subElement.attrib["name"] = sub[0]
+ subElement.attrib["with"] = sub[1]
ruleElement.append(subElement)
if len(ruleElement):
- self.root.findall('.rules')[0].append(ruleElement)
+ self.root.findall(".rules")[0].append(ruleElement)
def _addAxis(self, axisObject):
- axisElement = ET.Element('axis')
- axisElement.attrib['tag'] = axisObject.tag
- axisElement.attrib['name'] = axisObject.name
+ axisElement = ET.Element("axis")
+ axisElement.attrib["tag"] = axisObject.tag
+ axisElement.attrib["name"] = axisObject.name
self._addLabelNames(axisElement, axisObject.labelNames)
if axisObject.map:
for inputValue, outputValue in axisObject.map:
- mapElement = ET.Element('map')
- mapElement.attrib['input'] = self.intOrFloat(inputValue)
- mapElement.attrib['output'] = self.intOrFloat(outputValue)
+ mapElement = ET.Element("map")
+ mapElement.attrib["input"] = self.intOrFloat(inputValue)
+ mapElement.attrib["output"] = self.intOrFloat(outputValue)
axisElement.append(mapElement)
if axisObject.axisOrdering or axisObject.axisLabels:
- labelsElement = ET.Element('labels')
+ labelsElement = ET.Element("labels")
if axisObject.axisOrdering is not None:
- labelsElement.attrib['ordering'] = str(axisObject.axisOrdering)
+ labelsElement.attrib["ordering"] = str(axisObject.axisOrdering)
for label in axisObject.axisLabels:
self._addAxisLabel(labelsElement, label)
axisElement.append(labelsElement)
if hasattr(axisObject, "minimum"):
- axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum)
- axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum)
+ axisElement.attrib["minimum"] = self.intOrFloat(axisObject.minimum)
+ axisElement.attrib["maximum"] = self.intOrFloat(axisObject.maximum)
elif hasattr(axisObject, "values"):
- axisElement.attrib['values'] = " ".join(self.intOrFloat(v) for v in axisObject.values)
- axisElement.attrib['default'] = self.intOrFloat(axisObject.default)
+ axisElement.attrib["values"] = " ".join(
+ self.intOrFloat(v) for v in axisObject.values
+ )
+ axisElement.attrib["default"] = self.intOrFloat(axisObject.default)
if axisObject.hidden:
- axisElement.attrib['hidden'] = "1"
- self.root.findall('.axes')[0].append(axisElement)
-
- def _addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor) -> None:
- labelElement = ET.Element('label')
- labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue)
+ axisElement.attrib["hidden"] = "1"
+ self.root.findall(".axes")[0].append(axisElement)
+
+ def _addAxisMapping(self, mappingsElement, mappingObject):
+ mappingElement = ET.Element("mapping")
+ for what in ("inputLocation", "outputLocation"):
+ whatObject = getattr(mappingObject, what, None)
+ if whatObject is None:
+ continue
+ whatElement = ET.Element(what[:-8])
+ mappingElement.append(whatElement)
+
+ for name, value in whatObject.items():
+ dimensionElement = ET.Element("dimension")
+ dimensionElement.attrib["name"] = name
+ dimensionElement.attrib["xvalue"] = self.intOrFloat(value)
+ whatElement.append(dimensionElement)
+
+ mappingsElement.append(mappingElement)
+
+ def _addAxisLabel(
+ self, axisElement: ET.Element, label: AxisLabelDescriptor
+ ) -> None:
+ labelElement = ET.Element("label")
+ labelElement.attrib["uservalue"] = self.intOrFloat(label.userValue)
if label.userMinimum is not None:
- labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum)
+ labelElement.attrib["userminimum"] = self.intOrFloat(label.userMinimum)
if label.userMaximum is not None:
- labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum)
- labelElement.attrib['name'] = label.name
+ labelElement.attrib["usermaximum"] = self.intOrFloat(label.userMaximum)
+ labelElement.attrib["name"] = label.name
if label.elidable:
- labelElement.attrib['elidable'] = "true"
+ labelElement.attrib["elidable"] = "true"
if label.olderSibling:
- labelElement.attrib['oldersibling'] = "true"
+ labelElement.attrib["oldersibling"] = "true"
if label.linkedUserValue is not None:
- labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue)
+ labelElement.attrib["linkeduservalue"] = self.intOrFloat(
+ label.linkedUserValue
+ )
self._addLabelNames(labelElement, label.labelNames)
axisElement.append(labelElement)
def _addLabelNames(self, parentElement, labelNames):
for languageCode, labelName in sorted(labelNames.items()):
- languageElement = ET.Element('labelname')
+ languageElement = ET.Element("labelname")
languageElement.attrib[XML_LANG] = languageCode
languageElement.text = labelName
parentElement.append(languageElement)
- def _addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor) -> None:
- labelElement = ET.Element('label')
- labelElement.attrib['name'] = label.name
+ def _addLocationLabel(
+ self, parentElement: ET.Element, label: LocationLabelDescriptor
+ ) -> None:
+ labelElement = ET.Element("label")
+ labelElement.attrib["name"] = label.name
if label.elidable:
- labelElement.attrib['elidable'] = "true"
+ labelElement.attrib["elidable"] = "true"
if label.olderSibling:
- labelElement.attrib['oldersibling'] = "true"
+ labelElement.attrib["oldersibling"] = "true"
self._addLabelNames(labelElement, label.labelNames)
self._addLocationElement(labelElement, userLocation=label.userLocation)
parentElement.append(labelElement)
@@ -1507,39 +1649,39 @@ class BaseDocWriter(object):
parentElement,
*,
designLocation: AnisotropicLocationDict = None,
- userLocation: SimpleLocationDict = None
+ userLocation: SimpleLocationDict = None,
):
locElement = ET.Element("location")
for axis in self.documentObject.axes:
if designLocation is not None and axis.name in designLocation:
- dimElement = ET.Element('dimension')
- dimElement.attrib['name'] = axis.name
+ dimElement = ET.Element("dimension")
+ dimElement.attrib["name"] = axis.name
value = designLocation[axis.name]
if isinstance(value, tuple):
- dimElement.attrib['xvalue'] = self.intOrFloat(value[0])
- dimElement.attrib['yvalue'] = self.intOrFloat(value[1])
+ dimElement.attrib["xvalue"] = self.intOrFloat(value[0])
+ dimElement.attrib["yvalue"] = self.intOrFloat(value[1])
else:
- dimElement.attrib['xvalue'] = self.intOrFloat(value)
+ dimElement.attrib["xvalue"] = self.intOrFloat(value)
locElement.append(dimElement)
elif userLocation is not None and axis.name in userLocation:
- dimElement = ET.Element('dimension')
- dimElement.attrib['name'] = axis.name
+ dimElement = ET.Element("dimension")
+ dimElement.attrib["name"] = axis.name
value = userLocation[axis.name]
- dimElement.attrib['uservalue'] = self.intOrFloat(value)
+ dimElement.attrib["uservalue"] = self.intOrFloat(value)
locElement.append(dimElement)
if len(locElement) > 0:
parentElement.append(locElement)
def _addInstance(self, instanceObject):
- instanceElement = ET.Element('instance')
+ instanceElement = ET.Element("instance")
if instanceObject.name is not None:
- instanceElement.attrib['name'] = instanceObject.name
+ instanceElement.attrib["name"] = instanceObject.name
if instanceObject.locationLabel is not None:
- instanceElement.attrib['location'] = instanceObject.locationLabel
+ instanceElement.attrib["location"] = instanceObject.locationLabel
if instanceObject.familyName is not None:
- instanceElement.attrib['familyname'] = instanceObject.familyName
+ instanceElement.attrib["familyname"] = instanceObject.familyName
if instanceObject.styleName is not None:
- instanceElement.attrib['stylename'] = instanceObject.styleName
+ instanceElement.attrib["stylename"] = instanceObject.styleName
# add localisations
if instanceObject.localisedStyleName:
languageCodes = list(instanceObject.localisedStyleName.keys())
@@ -1547,7 +1689,7 @@ class BaseDocWriter(object):
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
- localisedStyleNameElement = ET.Element('stylename')
+ localisedStyleNameElement = ET.Element("stylename")
localisedStyleNameElement.attrib[XML_LANG] = code
localisedStyleNameElement.text = instanceObject.getStyleName(code)
instanceElement.append(localisedStyleNameElement)
@@ -1557,7 +1699,7 @@ class BaseDocWriter(object):
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
- localisedFamilyNameElement = ET.Element('familyname')
+ localisedFamilyNameElement = ET.Element("familyname")
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = instanceObject.getFamilyName(code)
instanceElement.append(localisedFamilyNameElement)
@@ -1567,9 +1709,11 @@ class BaseDocWriter(object):
for code in languageCodes:
if code == "en":
continue
- localisedStyleMapStyleNameElement = ET.Element('stylemapstylename')
+ localisedStyleMapStyleNameElement = ET.Element("stylemapstylename")
localisedStyleMapStyleNameElement.attrib[XML_LANG] = code
- localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code)
+ localisedStyleMapStyleNameElement.text = (
+ instanceObject.getStyleMapStyleName(code)
+ )
instanceElement.append(localisedStyleMapStyleNameElement)
if instanceObject.localisedStyleMapFamilyName:
languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys())
@@ -1577,9 +1721,11 @@ class BaseDocWriter(object):
for code in languageCodes:
if code == "en":
continue
- localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname')
+ localisedStyleMapFamilyNameElement = ET.Element("stylemapfamilyname")
localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code
- localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code)
+ localisedStyleMapFamilyNameElement.text = (
+ instanceObject.getStyleMapFamilyName(code)
+ )
instanceElement.append(localisedStyleMapFamilyNameElement)
if self.effectiveFormatTuple >= (5, 0):
@@ -1587,127 +1733,151 @@ class BaseDocWriter(object):
self._addLocationElement(
instanceElement,
designLocation=instanceObject.designLocation,
- userLocation=instanceObject.userLocation
+ userLocation=instanceObject.userLocation,
)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
if instanceObject.location is not None:
- locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location)
+ locationElement, instanceObject.location = self._makeLocationElement(
+ instanceObject.location
+ )
instanceElement.append(locationElement)
if instanceObject.filename is not None:
- instanceElement.attrib['filename'] = instanceObject.filename
+ instanceElement.attrib["filename"] = instanceObject.filename
if instanceObject.postScriptFontName is not None:
- instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName
+ instanceElement.attrib[
+ "postscriptfontname"
+ ] = instanceObject.postScriptFontName
if instanceObject.styleMapFamilyName is not None:
- instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName
+ instanceElement.attrib[
+ "stylemapfamilyname"
+ ] = instanceObject.styleMapFamilyName
if instanceObject.styleMapStyleName is not None:
- instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName
+ instanceElement.attrib[
+ "stylemapstylename"
+ ] = instanceObject.styleMapStyleName
if self.effectiveFormatTuple < (5, 0):
# Deprecated members as of version 5.0
if instanceObject.glyphs:
- if instanceElement.findall('.glyphs') == []:
- glyphsElement = ET.Element('glyphs')
+ if instanceElement.findall(".glyphs") == []:
+ glyphsElement = ET.Element("glyphs")
instanceElement.append(glyphsElement)
- glyphsElement = instanceElement.findall('.glyphs')[0]
+ glyphsElement = instanceElement.findall(".glyphs")[0]
for glyphName, data in sorted(instanceObject.glyphs.items()):
- glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data)
+ glyphElement = self._writeGlyphElement(
+ instanceElement, instanceObject, glyphName, data
+ )
glyphsElement.append(glyphElement)
if instanceObject.kerning:
- kerningElement = ET.Element('kerning')
+ kerningElement = ET.Element("kerning")
instanceElement.append(kerningElement)
if instanceObject.info:
- infoElement = ET.Element('info')
+ infoElement = ET.Element("info")
instanceElement.append(infoElement)
self._addLib(instanceElement, instanceObject.lib, 4)
- self.root.findall('.instances')[0].append(instanceElement)
+ self.root.findall(".instances")[0].append(instanceElement)
def _addSource(self, sourceObject):
sourceElement = ET.Element("source")
if sourceObject.filename is not None:
- sourceElement.attrib['filename'] = sourceObject.filename
+ sourceElement.attrib["filename"] = sourceObject.filename
if sourceObject.name is not None:
if sourceObject.name.find("temp_master") != 0:
# do not save temporary source names
- sourceElement.attrib['name'] = sourceObject.name
+ sourceElement.attrib["name"] = sourceObject.name
if sourceObject.familyName is not None:
- sourceElement.attrib['familyname'] = sourceObject.familyName
+ sourceElement.attrib["familyname"] = sourceObject.familyName
if sourceObject.styleName is not None:
- sourceElement.attrib['stylename'] = sourceObject.styleName
+ sourceElement.attrib["stylename"] = sourceObject.styleName
if sourceObject.layerName is not None:
- sourceElement.attrib['layer'] = sourceObject.layerName
+ sourceElement.attrib["layer"] = sourceObject.layerName
if sourceObject.localisedFamilyName:
languageCodes = list(sourceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
- localisedFamilyNameElement = ET.Element('familyname')
+ localisedFamilyNameElement = ET.Element("familyname")
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = sourceObject.getFamilyName(code)
sourceElement.append(localisedFamilyNameElement)
if sourceObject.copyLib:
- libElement = ET.Element('lib')
- libElement.attrib['copy'] = "1"
+ libElement = ET.Element("lib")
+ libElement.attrib["copy"] = "1"
sourceElement.append(libElement)
if sourceObject.copyGroups:
- groupsElement = ET.Element('groups')
- groupsElement.attrib['copy'] = "1"
+ groupsElement = ET.Element("groups")
+ groupsElement.attrib["copy"] = "1"
sourceElement.append(groupsElement)
if sourceObject.copyFeatures:
- featuresElement = ET.Element('features')
- featuresElement.attrib['copy'] = "1"
+ featuresElement = ET.Element("features")
+ featuresElement.attrib["copy"] = "1"
sourceElement.append(featuresElement)
if sourceObject.copyInfo or sourceObject.muteInfo:
- infoElement = ET.Element('info')
+ infoElement = ET.Element("info")
if sourceObject.copyInfo:
- infoElement.attrib['copy'] = "1"
+ infoElement.attrib["copy"] = "1"
if sourceObject.muteInfo:
- infoElement.attrib['mute'] = "1"
+ infoElement.attrib["mute"] = "1"
sourceElement.append(infoElement)
if sourceObject.muteKerning:
kerningElement = ET.Element("kerning")
- kerningElement.attrib["mute"] = '1'
+ kerningElement.attrib["mute"] = "1"
sourceElement.append(kerningElement)
if sourceObject.mutedGlyphNames:
for name in sourceObject.mutedGlyphNames:
glyphElement = ET.Element("glyph")
glyphElement.attrib["name"] = name
- glyphElement.attrib["mute"] = '1'
+ glyphElement.attrib["mute"] = "1"
sourceElement.append(glyphElement)
if self.effectiveFormatTuple >= (5, 0):
- self._addLocationElement(sourceElement, designLocation=sourceObject.location)
+ self._addLocationElement(
+ sourceElement, designLocation=sourceObject.location
+ )
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
- locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location)
+ locationElement, sourceObject.location = self._makeLocationElement(
+ sourceObject.location
+ )
sourceElement.append(locationElement)
- self.root.findall('.sources')[0].append(sourceElement)
+ self.root.findall(".sources")[0].append(sourceElement)
- def _addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor) -> None:
- vfElement = ET.Element('variable-font')
- vfElement.attrib['name'] = vf.name
+ def _addVariableFont(
+ self, parentElement: ET.Element, vf: VariableFontDescriptor
+ ) -> None:
+ vfElement = ET.Element("variable-font")
+ vfElement.attrib["name"] = vf.name
if vf.filename is not None:
- vfElement.attrib['filename'] = vf.filename
+ vfElement.attrib["filename"] = vf.filename
if vf.axisSubsets:
- subsetsElement = ET.Element('axis-subsets')
+ subsetsElement = ET.Element("axis-subsets")
for subset in vf.axisSubsets:
- subsetElement = ET.Element('axis-subset')
- subsetElement.attrib['name'] = subset.name
+ subsetElement = ET.Element("axis-subset")
+ subsetElement.attrib["name"] = subset.name
# Mypy doesn't support narrowing union types via hasattr()
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
# TODO(Python 3.10): use TypeGuard
if hasattr(subset, "userMinimum"):
subset = cast(RangeAxisSubsetDescriptor, subset)
if subset.userMinimum != -math.inf:
- subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum)
+ subsetElement.attrib["userminimum"] = self.intOrFloat(
+ subset.userMinimum
+ )
if subset.userMaximum != math.inf:
- subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum)
+ subsetElement.attrib["usermaximum"] = self.intOrFloat(
+ subset.userMaximum
+ )
if subset.userDefault is not None:
- subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault)
+ subsetElement.attrib["userdefault"] = self.intOrFloat(
+ subset.userDefault
+ )
elif hasattr(subset, "userValue"):
subset = cast(ValueAxisSubsetDescriptor, subset)
- subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue)
+ subsetElement.attrib["uservalue"] = self.intOrFloat(
+ subset.userValue
+ )
subsetsElement.append(subsetElement)
vfElement.append(subsetsElement)
self._addLib(vfElement, vf.lib, 4)
@@ -1716,35 +1886,41 @@ class BaseDocWriter(object):
def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None:
if not data:
return
- libElement = ET.Element('lib')
+ libElement = ET.Element("lib")
libElement.append(plistlib.totree(data, indent_level=indent_level))
parentElement.append(libElement)
def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data):
- glyphElement = ET.Element('glyph')
- if data.get('mute'):
- glyphElement.attrib['mute'] = "1"
- if data.get('unicodes') is not None:
- glyphElement.attrib['unicode'] = " ".join([hex(u) for u in data.get('unicodes')])
- if data.get('instanceLocation') is not None:
- locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation'))
+ glyphElement = ET.Element("glyph")
+ if data.get("mute"):
+ glyphElement.attrib["mute"] = "1"
+ if data.get("unicodes") is not None:
+ glyphElement.attrib["unicode"] = " ".join(
+ [hex(u) for u in data.get("unicodes")]
+ )
+ if data.get("instanceLocation") is not None:
+ locationElement, data["instanceLocation"] = self._makeLocationElement(
+ data.get("instanceLocation")
+ )
glyphElement.append(locationElement)
if glyphName is not None:
- glyphElement.attrib['name'] = glyphName
- if data.get('note') is not None:
- noteElement = ET.Element('note')
- noteElement.text = data.get('note')
+ glyphElement.attrib["name"] = glyphName
+ if data.get("note") is not None:
+ noteElement = ET.Element("note")
+ noteElement.text = data.get("note")
glyphElement.append(noteElement)
- if data.get('masters') is not None:
+ if data.get("masters") is not None:
mastersElement = ET.Element("masters")
- for m in data.get('masters'):
+ for m in data.get("masters"):
masterElement = ET.Element("master")
- if m.get('glyphName') is not None:
- masterElement.attrib['glyphname'] = m.get('glyphName')
- if m.get('font') is not None:
- masterElement.attrib['source'] = m.get('font')
- if m.get('location') is not None:
- locationElement, m['location'] = self._makeLocationElement(m.get('location'))
+ if m.get("glyphName") is not None:
+ masterElement.attrib["glyphname"] = m.get("glyphName")
+ if m.get("font") is not None:
+ masterElement.attrib["source"] = m.get("font")
+ if m.get("location") is not None:
+ locationElement, m["location"] = self._makeLocationElement(
+ m.get("location")
+ )
masterElement.append(locationElement)
mastersElement.append(masterElement)
glyphElement.append(mastersElement)
@@ -1755,6 +1931,7 @@ class BaseDocReader(LogMixin):
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
+ axisMappingDescriptorClass = AxisMappingDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
@@ -1801,7 +1978,8 @@ class BaseDocReader(LogMixin):
if processingValue not in {"first", "last"}:
raise DesignSpaceDocumentError(
"<rules> processing attribute value is not valid: %r, "
- "expected 'first' or 'last'" % processingValue)
+ "expected 'first' or 'last'" % processingValue
+ )
self.documentObject.rulesProcessingLast = processingValue == "last"
for ruleElement in self.root.findall(".rules/rule"):
ruleObject = self.ruleDescriptorClass()
@@ -1818,71 +1996,79 @@ class BaseDocReader(LogMixin):
"Wrapped them in a new conditionset."
)
# read the conditionsets
- for conditionSetElement in ruleElement.findall('.conditionset'):
+ for conditionSetElement in ruleElement.findall(".conditionset"):
conditionSet = self._readConditionElements(
conditionSetElement,
ruleName,
)
if conditionSet is not None:
ruleObject.conditionSets.append(conditionSet)
- for subElement in ruleElement.findall('.sub'):
- a = subElement.attrib['name']
- b = subElement.attrib['with']
+ for subElement in ruleElement.findall(".sub"):
+ a = subElement.attrib["name"]
+ b = subElement.attrib["with"]
ruleObject.subs.append((a, b))
rules.append(ruleObject)
self.documentObject.rules = rules
def _readConditionElements(self, parentElement, ruleName=None):
cds = []
- for conditionElement in parentElement.findall('.condition'):
+ for conditionElement in parentElement.findall(".condition"):
cd = {}
cdMin = conditionElement.attrib.get("minimum")
if cdMin is not None:
- cd['minimum'] = float(cdMin)
+ cd["minimum"] = float(cdMin)
else:
# will allow these to be None, assume axis.minimum
- cd['minimum'] = None
+ cd["minimum"] = None
cdMax = conditionElement.attrib.get("maximum")
if cdMax is not None:
- cd['maximum'] = float(cdMax)
+ cd["maximum"] = float(cdMax)
else:
# will allow these to be None, assume axis.maximum
- cd['maximum'] = None
- cd['name'] = conditionElement.attrib.get("name")
+ cd["maximum"] = None
+ cd["name"] = conditionElement.attrib.get("name")
# # test for things
- if cd.get('minimum') is None and cd.get('maximum') is None:
+ if cd.get("minimum") is None and cd.get("maximum") is None:
raise DesignSpaceDocumentError(
- "condition missing required minimum or maximum in rule" +
- (" '%s'" % ruleName if ruleName is not None else ""))
+ "condition missing required minimum or maximum in rule"
+ + (" '%s'" % ruleName if ruleName is not None else "")
+ )
cds.append(cd)
return cds
def readAxes(self):
# read the axes elements, including the warp map.
axesElement = self.root.find(".axes")
- if axesElement is not None and 'elidedfallbackname' in axesElement.attrib:
- self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname']
+ if axesElement is not None and "elidedfallbackname" in axesElement.attrib:
+ self.documentObject.elidedFallbackName = axesElement.attrib[
+ "elidedfallbackname"
+ ]
axisElements = self.root.findall(".axes/axis")
if not axisElements:
return
for axisElement in axisElements:
- if self.documentObject.formatTuple >= (5, 0) and "values" in axisElement.attrib:
+ if (
+ self.documentObject.formatTuple >= (5, 0)
+ and "values" in axisElement.attrib
+ ):
axisObject = self.discreteAxisDescriptorClass()
- axisObject.values = [float(s) for s in axisElement.attrib["values"].split(" ")]
+ axisObject.values = [
+ float(s) for s in axisElement.attrib["values"].split(" ")
+ ]
else:
axisObject = self.axisDescriptorClass()
axisObject.minimum = float(axisElement.attrib.get("minimum"))
axisObject.maximum = float(axisElement.attrib.get("maximum"))
axisObject.default = float(axisElement.attrib.get("default"))
axisObject.name = axisElement.attrib.get("name")
- if axisElement.attrib.get('hidden', False):
+ if axisElement.attrib.get("hidden", False):
axisObject.hidden = True
axisObject.tag = axisElement.attrib.get("tag")
- for mapElement in axisElement.findall('map'):
- a = float(mapElement.attrib['input'])
- b = float(mapElement.attrib['output'])
+ for mapElement in axisElement.findall("map"):
+ a = float(mapElement.attrib["input"])
+ b = float(mapElement.attrib["output"])
axisObject.map.append((a, b))
- for labelNameElement in axisElement.findall('labelname'):
+ for labelNameElement in axisElement.findall("labelname"):
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
for key, lang in labelNameElement.items():
@@ -1897,18 +2083,51 @@ class BaseDocReader(LogMixin):
self.documentObject.axes.append(axisObject)
self.axisDefaults[axisObject.name] = axisObject.default
+ mappingsElement = self.root.find(".axes/mappings")
+ self.documentObject.axisMappings = []
+ if mappingsElement is not None:
+ for mappingElement in mappingsElement.findall("mapping"):
+ inputElement = mappingElement.find("input")
+ outputElement = mappingElement.find("output")
+ inputLoc = {}
+ outputLoc = {}
+ for dimElement in inputElement.findall(".dimension"):
+ name = dimElement.attrib["name"]
+ value = float(dimElement.attrib["xvalue"])
+ inputLoc[name] = value
+ for dimElement in outputElement.findall(".dimension"):
+ name = dimElement.attrib["name"]
+ value = float(dimElement.attrib["xvalue"])
+ outputLoc[name] = value
+ axisMappingObject = self.axisMappingDescriptorClass(
+ inputLocation=inputLoc, outputLocation=outputLoc
+ )
+ self.documentObject.axisMappings.append(axisMappingObject)
+
def readAxisLabel(self, element: ET.Element):
- xml_attrs = {'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'}
+ xml_attrs = {
+ "userminimum",
+ "uservalue",
+ "usermaximum",
+ "name",
+ "elidable",
+ "oldersibling",
+ "linkeduservalue",
+ }
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
- raise DesignSpaceDocumentError(f"label element contains unknown attributes: {', '.join(unknown_attrs)}")
+ raise DesignSpaceDocumentError(
+ f"label element contains unknown attributes: {', '.join(unknown_attrs)}"
+ )
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("label element must have a name attribute.")
valueStr = element.get("uservalue")
if valueStr is None:
- raise DesignSpaceDocumentError("label element must have a uservalue attribute.")
+ raise DesignSpaceDocumentError(
+ "label element must have a uservalue attribute."
+ )
value = float(valueStr)
minimumStr = element.get("userminimum")
minimum = float(minimumStr) if minimumStr is not None else None
@@ -1941,18 +2160,24 @@ class BaseDocReader(LogMixin):
if self.documentObject.formatTuple < (5, 0):
return
- xml_attrs = {'name', 'elidable', 'oldersibling'}
+ xml_attrs = {"name", "elidable", "oldersibling"}
for labelElement in self.root.findall(".labels/label"):
unknown_attrs = set(labelElement.attrib) - xml_attrs
if unknown_attrs:
- raise DesignSpaceDocumentError(f"Label element contains unknown attributes: {', '.join(unknown_attrs)}")
+ raise DesignSpaceDocumentError(
+ f"Label element contains unknown attributes: {', '.join(unknown_attrs)}"
+ )
name = labelElement.get("name")
if name is None:
- raise DesignSpaceDocumentError("label element must have a name attribute.")
+ raise DesignSpaceDocumentError(
+ "label element must have a name attribute."
+ )
designLocation, userLocation = self.locationFromElement(labelElement)
if designLocation:
- raise DesignSpaceDocumentError(f'<label> element "{name}" must only have user locations (using uservalue="").')
+ raise DesignSpaceDocumentError(
+ f'<label> element "{name}" must only have user locations (using uservalue="").'
+ )
elidable = True if labelElement.get("elidable") == "true" else False
olderSibling = True if labelElement.get("oldersibling") == "true" else False
labelNames = {
@@ -1976,21 +2201,27 @@ class BaseDocReader(LogMixin):
if self.documentObject.formatTuple < (5, 0):
return
- xml_attrs = {'name', 'filename'}
+ xml_attrs = {"name", "filename"}
for variableFontElement in self.root.findall(".variable-fonts/variable-font"):
unknown_attrs = set(variableFontElement.attrib) - xml_attrs
if unknown_attrs:
- raise DesignSpaceDocumentError(f"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}")
+ raise DesignSpaceDocumentError(
+ f"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}"
+ )
name = variableFontElement.get("name")
if name is None:
- raise DesignSpaceDocumentError("variable-font element must have a name attribute.")
+ raise DesignSpaceDocumentError(
+ "variable-font element must have a name attribute."
+ )
filename = variableFontElement.get("filename")
axisSubsetsElement = variableFontElement.find(".axis-subsets")
if axisSubsetsElement is None:
- raise DesignSpaceDocumentError("variable-font element must contain an axis-subsets element.")
+ raise DesignSpaceDocumentError(
+ "variable-font element must contain an axis-subsets element."
+ )
axisSubsets = []
for axisSubset in axisSubsetsElement.iterfind(".axis-subset"):
axisSubsets.append(self.readAxisSubset(axisSubset))
@@ -2010,14 +2241,18 @@ class BaseDocReader(LogMixin):
def readAxisSubset(self, element: ET.Element):
if "uservalue" in element.attrib:
- xml_attrs = {'name', 'uservalue'}
+ xml_attrs = {"name", "uservalue"}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
- raise DesignSpaceDocumentError(f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}")
+ raise DesignSpaceDocumentError(
+ f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}"
+ )
name = element.get("name")
if name is None:
- raise DesignSpaceDocumentError("axis-subset element must have a name attribute.")
+ raise DesignSpaceDocumentError(
+ "axis-subset element must have a name attribute."
+ )
userValueStr = element.get("uservalue")
if userValueStr is None:
raise DesignSpaceDocumentError(
@@ -2027,19 +2262,27 @@ class BaseDocReader(LogMixin):
return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue)
else:
- xml_attrs = {'name', 'userminimum', 'userdefault', 'usermaximum'}
+ xml_attrs = {"name", "userminimum", "userdefault", "usermaximum"}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
- raise DesignSpaceDocumentError(f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}")
+ raise DesignSpaceDocumentError(
+ f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}"
+ )
name = element.get("name")
if name is None:
- raise DesignSpaceDocumentError("axis-subset element must have a name attribute.")
+ raise DesignSpaceDocumentError(
+ "axis-subset element must have a name attribute."
+ )
userMinimum = element.get("userminimum")
userDefault = element.get("userdefault")
userMaximum = element.get("usermaximum")
- if userMinimum is not None and userDefault is not None and userMaximum is not None:
+ if (
+ userMinimum is not None
+ and userDefault is not None
+ and userMaximum is not None
+ ):
return self.rangeAxisSubsetDescriptorClass(
name=name,
userMinimum=float(userMinimum),
@@ -2053,21 +2296,24 @@ class BaseDocReader(LogMixin):
"axis-subset element must have min/max/default values or none at all."
)
-
def readSources(self):
- for sourceCount, sourceElement in enumerate(self.root.findall(".sources/source")):
- filename = sourceElement.attrib.get('filename')
+ for sourceCount, sourceElement in enumerate(
+ self.root.findall(".sources/source")
+ ):
+ filename = sourceElement.attrib.get("filename")
if filename is not None and self.path is not None:
- sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))
+ sourcePath = os.path.abspath(
+ os.path.join(os.path.dirname(self.path), filename)
+ )
else:
sourcePath = None
- sourceName = sourceElement.attrib.get('name')
+ sourceName = sourceElement.attrib.get("name")
if sourceName is None:
# add a temporary source name
sourceName = "temp_master.%d" % (sourceCount)
sourceObject = self.sourceDescriptorClass()
- sourceObject.path = sourcePath # absolute path to the ufo source
- sourceObject.filename = filename # path as it is stored in the document
+ sourceObject.path = sourcePath # absolute path to the ufo source
+ sourceObject.filename = filename # path as it is stored in the document
sourceObject.name = sourceName
familyName = sourceElement.attrib.get("familyname")
if familyName is not None:
@@ -2075,40 +2321,42 @@ class BaseDocReader(LogMixin):
styleName = sourceElement.attrib.get("stylename")
if styleName is not None:
sourceObject.styleName = styleName
- for familyNameElement in sourceElement.findall('familyname'):
+ for familyNameElement in sourceElement.findall("familyname"):
for key, lang in familyNameElement.items():
if key == XML_LANG:
familyName = familyNameElement.text
sourceObject.setFamilyName(familyName, lang)
designLocation, userLocation = self.locationFromElement(sourceElement)
if userLocation:
- raise DesignSpaceDocumentError(f'<source> element "{sourceName}" must only have design locations (using xvalue="").')
+ raise DesignSpaceDocumentError(
+ f'<source> element "{sourceName}" must only have design locations (using xvalue="").'
+ )
sourceObject.location = designLocation
- layerName = sourceElement.attrib.get('layer')
+ layerName = sourceElement.attrib.get("layer")
if layerName is not None:
sourceObject.layerName = layerName
- for libElement in sourceElement.findall('.lib'):
- if libElement.attrib.get('copy') == '1':
+ for libElement in sourceElement.findall(".lib"):
+ if libElement.attrib.get("copy") == "1":
sourceObject.copyLib = True
- for groupsElement in sourceElement.findall('.groups'):
- if groupsElement.attrib.get('copy') == '1':
+ for groupsElement in sourceElement.findall(".groups"):
+ if groupsElement.attrib.get("copy") == "1":
sourceObject.copyGroups = True
for infoElement in sourceElement.findall(".info"):
- if infoElement.attrib.get('copy') == '1':
+ if infoElement.attrib.get("copy") == "1":
sourceObject.copyInfo = True
- if infoElement.attrib.get('mute') == '1':
+ if infoElement.attrib.get("mute") == "1":
sourceObject.muteInfo = True
for featuresElement in sourceElement.findall(".features"):
- if featuresElement.attrib.get('copy') == '1':
+ if featuresElement.attrib.get("copy") == "1":
sourceObject.copyFeatures = True
for glyphElement in sourceElement.findall(".glyph"):
- glyphName = glyphElement.attrib.get('name')
+ glyphName = glyphElement.attrib.get("name")
if glyphName is None:
continue
- if glyphElement.attrib.get('mute') == '1':
+ if glyphElement.attrib.get("mute") == "1":
sourceObject.mutedGlyphNames.append(glyphName)
for kerningElement in sourceElement.findall(".kerning"):
- if kerningElement.attrib.get('mute') == '1':
+ if kerningElement.attrib.get("mute") == "1":
sourceObject.muteKerning = True
self.documentObject.sources.append(sourceObject)
@@ -2119,7 +2367,7 @@ class BaseDocReader(LogMixin):
Return a tuple of (designLocation, userLocation)
"""
elementLocation = (None, None)
- for locationElement in element.findall('.location'):
+ for locationElement in element.findall(".location"):
elementLocation = self.readLocationElement(locationElement)
break
return elementLocation
@@ -2138,32 +2386,38 @@ class BaseDocReader(LogMixin):
dimName = dimensionElement.attrib.get("name")
if self._strictAxisNames and dimName not in self.axisDefaults:
# In case the document contains no axis definitions,
- self.log.warning("Location with undefined axis: \"%s\".", dimName)
+ self.log.warning('Location with undefined axis: "%s".', dimName)
continue
userValue = xValue = yValue = None
try:
- userValue = dimensionElement.attrib.get('uservalue')
+ userValue = dimensionElement.attrib.get("uservalue")
if userValue is not None:
userValue = float(userValue)
except ValueError:
- self.log.warning("ValueError in readLocation userValue %3.3f", userValue)
+ self.log.warning(
+ "ValueError in readLocation userValue %3.3f", userValue
+ )
try:
- xValue = dimensionElement.attrib.get('xvalue')
+ xValue = dimensionElement.attrib.get("xvalue")
if xValue is not None:
xValue = float(xValue)
except ValueError:
self.log.warning("ValueError in readLocation xValue %3.3f", xValue)
try:
- yValue = dimensionElement.attrib.get('yvalue')
+ yValue = dimensionElement.attrib.get("yvalue")
if yValue is not None:
yValue = float(yValue)
except ValueError:
self.log.warning("ValueError in readLocation yValue %3.3f", yValue)
if userValue is None == xValue is None:
- raise DesignSpaceDocumentError(f'Exactly one of uservalue="" or xvalue="" must be provided for location dimension "{dimName}"')
+ raise DesignSpaceDocumentError(
+ f'Exactly one of uservalue="" or xvalue="" must be provided for location dimension "{dimName}"'
+ )
if yValue is not None:
if xValue is None:
- raise DesignSpaceDocumentError(f'Missing xvalue="" for the location dimension "{dimName}"" with yvalue="{yValue}"')
+ raise DesignSpaceDocumentError(
+ f'Missing xvalue="" for the location dimension "{dimName}"" with yvalue="{yValue}"'
+ )
designLoc[dimName] = (xValue, yValue)
elif xValue is not None:
designLoc[dimName] = xValue
@@ -2172,70 +2426,81 @@ class BaseDocReader(LogMixin):
return designLoc, userLoc
def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True):
- instanceElements = self.root.findall('.instances/instance')
+ instanceElements = self.root.findall(".instances/instance")
for instanceElement in instanceElements:
- self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo)
+ self._readSingleInstanceElement(
+ instanceElement,
+ makeGlyphs=makeGlyphs,
+ makeKerning=makeKerning,
+ makeInfo=makeInfo,
+ )
- def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True):
- filename = instanceElement.attrib.get('filename')
+ def _readSingleInstanceElement(
+ self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True
+ ):
+ filename = instanceElement.attrib.get("filename")
if filename is not None and self.documentObject.path is not None:
- instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename)
+ instancePath = os.path.join(
+ os.path.dirname(self.documentObject.path), filename
+ )
else:
instancePath = None
instanceObject = self.instanceDescriptorClass()
- instanceObject.path = instancePath # absolute path to the instance
- instanceObject.filename = filename # path as it is stored in the document
+ instanceObject.path = instancePath # absolute path to the instance
+ instanceObject.filename = filename # path as it is stored in the document
name = instanceElement.attrib.get("name")
if name is not None:
instanceObject.name = name
- familyname = instanceElement.attrib.get('familyname')
+ familyname = instanceElement.attrib.get("familyname")
if familyname is not None:
instanceObject.familyName = familyname
- stylename = instanceElement.attrib.get('stylename')
+ stylename = instanceElement.attrib.get("stylename")
if stylename is not None:
instanceObject.styleName = stylename
- postScriptFontName = instanceElement.attrib.get('postscriptfontname')
+ postScriptFontName = instanceElement.attrib.get("postscriptfontname")
if postScriptFontName is not None:
instanceObject.postScriptFontName = postScriptFontName
- styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname')
+ styleMapFamilyName = instanceElement.attrib.get("stylemapfamilyname")
if styleMapFamilyName is not None:
instanceObject.styleMapFamilyName = styleMapFamilyName
- styleMapStyleName = instanceElement.attrib.get('stylemapstylename')
+ styleMapStyleName = instanceElement.attrib.get("stylemapstylename")
if styleMapStyleName is not None:
instanceObject.styleMapStyleName = styleMapStyleName
# read localised names
- for styleNameElement in instanceElement.findall('stylename'):
+ for styleNameElement in instanceElement.findall("stylename"):
for key, lang in styleNameElement.items():
if key == XML_LANG:
styleName = styleNameElement.text
instanceObject.setStyleName(styleName, lang)
- for familyNameElement in instanceElement.findall('familyname'):
+ for familyNameElement in instanceElement.findall("familyname"):
for key, lang in familyNameElement.items():
if key == XML_LANG:
familyName = familyNameElement.text
instanceObject.setFamilyName(familyName, lang)
- for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'):
+ for styleMapStyleNameElement in instanceElement.findall("stylemapstylename"):
for key, lang in styleMapStyleNameElement.items():
if key == XML_LANG:
styleMapStyleName = styleMapStyleNameElement.text
instanceObject.setStyleMapStyleName(styleMapStyleName, lang)
- for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'):
+ for styleMapFamilyNameElement in instanceElement.findall("stylemapfamilyname"):
for key, lang in styleMapFamilyNameElement.items():
if key == XML_LANG:
styleMapFamilyName = styleMapFamilyNameElement.text
instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang)
designLocation, userLocation = self.locationFromElement(instanceElement)
- locationLabel = instanceElement.attrib.get('location')
+ locationLabel = instanceElement.attrib.get("location")
if (designLocation or userLocation) and locationLabel is not None:
- raise DesignSpaceDocumentError('instance element must have at most one of the location="..." attribute or the nested location element')
+ raise DesignSpaceDocumentError(
+ 'instance element must have at most one of the location="..." attribute or the nested location element'
+ )
instanceObject.locationLabel = locationLabel
instanceObject.userLocation = userLocation or {}
instanceObject.designLocation = designLocation or {}
- for glyphElement in instanceElement.findall('.glyphs/glyph'):
+ for glyphElement in instanceElement.findall(".glyphs/glyph"):
self.readGlyphElement(glyphElement, instanceObject)
for infoElement in instanceElement.findall("info"):
self.readInfoElement(infoElement, instanceObject)
- for libElement in instanceElement.findall('lib'):
+ for libElement in instanceElement.findall("lib"):
self.readLibElement(libElement, instanceObject)
self.documentObject.instances.append(instanceObject)
@@ -2244,7 +2509,7 @@ class BaseDocReader(LogMixin):
instanceObject.lib = plistlib.fromtree(libElement[0])
def readInfoElement(self, infoElement, instanceObject):
- """ Read the info element."""
+ """Read the info element."""
instanceObject.info = True
def readGlyphElement(self, glyphElement, instanceObject):
@@ -2266,47 +2531,53 @@ class BaseDocReader(LogMixin):
</glyph>
"""
glyphData = {}
- glyphName = glyphElement.attrib.get('name')
+ glyphName = glyphElement.attrib.get("name")
if glyphName is None:
raise DesignSpaceDocumentError("Glyph object without name attribute")
mute = glyphElement.attrib.get("mute")
if mute == "1":
- glyphData['mute'] = True
+ glyphData["mute"] = True
# unicode
- unicodes = glyphElement.attrib.get('unicode')
+ unicodes = glyphElement.attrib.get("unicode")
if unicodes is not None:
try:
unicodes = [int(u, 16) for u in unicodes.split(" ")]
- glyphData['unicodes'] = unicodes
+ glyphData["unicodes"] = unicodes
except ValueError:
- raise DesignSpaceDocumentError("unicode values %s are not integers" % unicodes)
+ raise DesignSpaceDocumentError(
+ "unicode values %s are not integers" % unicodes
+ )
- for noteElement in glyphElement.findall('.note'):
- glyphData['note'] = noteElement.text
+ for noteElement in glyphElement.findall(".note"):
+ glyphData["note"] = noteElement.text
break
designLocation, userLocation = self.locationFromElement(glyphElement)
if userLocation:
- raise DesignSpaceDocumentError(f'<glyph> element "{glyphName}" must only have design locations (using xvalue="").')
+ raise DesignSpaceDocumentError(
+ f'<glyph> element "{glyphName}" must only have design locations (using xvalue="").'
+ )
if designLocation is not None:
- glyphData['instanceLocation'] = designLocation
+ glyphData["instanceLocation"] = designLocation
glyphSources = None
- for masterElement in glyphElement.findall('.masters/master'):
- fontSourceName = masterElement.attrib.get('source')
+ for masterElement in glyphElement.findall(".masters/master"):
+ fontSourceName = masterElement.attrib.get("source")
designLocation, userLocation = self.locationFromElement(masterElement)
if userLocation:
- raise DesignSpaceDocumentError(f'<master> element "{fontSourceName}" must only have design locations (using xvalue="").')
- masterGlyphName = masterElement.attrib.get('glyphname')
+ raise DesignSpaceDocumentError(
+ f'<master> element "{fontSourceName}" must only have design locations (using xvalue="").'
+ )
+ masterGlyphName = masterElement.attrib.get("glyphname")
if masterGlyphName is None:
# if we don't read a glyphname, use the one we have
masterGlyphName = glyphName
- d = dict(font=fontSourceName,
- location=designLocation,
- glyphName=masterGlyphName)
+ d = dict(
+ font=fontSourceName, location=designLocation, glyphName=masterGlyphName
+ )
if glyphSources is None:
glyphSources = []
glyphSources.append(d)
if glyphSources is not None:
- glyphData['masters'] = glyphSources
+ glyphData["masters"] = glyphSources
instanceObject.glyphs[glyphName] = glyphData
def readLib(self):
@@ -2338,6 +2609,7 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
doc.formatVersion
doc.elidedFallbackName
doc.axes
+ doc.axisMappings
doc.locationLabels
doc.rules
doc.rulesProcessingLast
@@ -2375,6 +2647,10 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = []
"""List of this document's axes."""
+
+ self.axisMappings: List[AxisMappingDescriptor] = []
+ """List of this document's axis mappings."""
+
self.locationLabels: List[LocationLabelDescriptor] = []
"""List of this document's STAT format 4 labels.
@@ -2455,9 +2731,7 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
def tostring(self, encoding=None):
"""Returns the designspace as a string. Default encoding ``utf-8``."""
- if encoding is str or (
- encoding is not None and encoding.lower() == "unicode"
- ):
+ if encoding is str or (encoding is not None and encoding.lower() == "unicode"):
f = StringIO()
xml_declaration = False
elif encoding is None or encoding == "utf-8":
@@ -2587,6 +2861,18 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
self.addAxis(axis)
return axis
+ def addAxisMapping(self, axisMappingDescriptor: AxisMappingDescriptor):
+ """Add the given ``axisMappingDescriptor`` to :attr:`axisMappings`."""
+ self.axisMappings.append(axisMappingDescriptor)
+
+ def addAxisMappingDescriptor(self, **kwargs):
+ """Instantiate a new :class:`AxisMappingDescriptor` using the given
+ ``kwargs`` and add it to :attr:`rules`.
+ """
+ axisMapping = self.writerClass.axisMappingDescriptorClass(**kwargs)
+ self.addAxisMapping(axisMapping)
+ return axisMapping
+
def addRule(self, ruleDescriptor: RuleDescriptor):
"""Add the given ``ruleDescriptor`` to :attr:`rules`."""
self.rules.append(ruleDescriptor)
@@ -2644,14 +2930,21 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
)
return loc
- def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]:
+ def labelForUserLocation(
+ self, userLocation: SimpleLocationDict
+ ) -> Optional[LocationLabelDescriptor]:
"""Return the :class:`LocationLabel` that matches the given
``userLocation``, or ``None`` if no such label exists.
.. versionadded:: 5.0
"""
return next(
- (label for label in self.locationLabels if label.userLocation == userLocation), None
+ (
+ label
+ for label in self.locationLabels
+ if label.userLocation == userLocation
+ ),
+ None,
)
def updateFilenameFromPath(self, masters=True, instances=True, force=False):
@@ -2691,12 +2984,13 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
names.append(axisDescriptor.name)
return names
- def getAxis(self, name):
+ def getAxis(self, name: str) -> AxisDescriptor | DiscreteAxisDescriptor | None:
"""Return the axis with the given ``name``, or ``None`` if no such axis exists."""
- for axisDescriptor in self.axes:
- if axisDescriptor.name == name:
- return axisDescriptor
- return None
+ return next((axis for axis in self.axes if axis.name == name), None)
+
+ def getAxisByTag(self, tag: str) -> AxisDescriptor | DiscreteAxisDescriptor | None:
+ """Return the axis with the given ``tag``, or ``None`` if no such axis exists."""
+ return next((axis for axis in self.axes if axis.tag == tag), None)
def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]:
"""Return the top-level location label with the given ``name``, or
@@ -2723,7 +3017,9 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
for axis in self.axes
}
- def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict:
+ def map_backward(
+ self, designLocation: AnisotropicLocationDict
+ ) -> SimpleLocationDict:
"""Map a design location to a user location.
Assume that missing coordinates are at the default location for that axis.
@@ -2800,16 +3096,22 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
for item in self.instances:
# glyph masters for this instance
for _, glyphData in item.glyphs.items():
- glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation'])
- for glyphMaster in glyphData['masters']:
- glyphMaster['location'] = self.normalizeLocation(glyphMaster['location'])
+ glyphData["instanceLocation"] = self.normalizeLocation(
+ glyphData["instanceLocation"]
+ )
+ for glyphMaster in glyphData["masters"]:
+ glyphMaster["location"] = self.normalizeLocation(
+ glyphMaster["location"]
+ )
item.location = self.normalizeLocation(item.location)
# the axes
for axis in self.axes:
# scale the map first
newMap = []
for inputValue, outputValue in axis.map:
- newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name)
+ newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(
+ axis.name
+ )
newMap.append((inputValue, newOutputValue))
if newMap:
axis.map = newMap
@@ -2827,15 +3129,21 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
for conditions in rule.conditionSets:
newConditions = []
for cond in conditions:
- if cond.get('minimum') is not None:
- minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name'])
+ if cond.get("minimum") is not None:
+ minimum = self.normalizeLocation(
+ {cond["name"]: cond["minimum"]}
+ ).get(cond["name"])
else:
minimum = None
- if cond.get('maximum') is not None:
- maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name'])
+ if cond.get("maximum") is not None:
+ maximum = self.normalizeLocation(
+ {cond["name"]: cond["maximum"]}
+ ).get(cond["name"])
else:
maximum = None
- newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum))
+ newConditions.append(
+ dict(name=cond["name"], minimum=minimum, maximum=maximum)
+ )
newConditionSets.append(newConditions)
rule.conditionSets = newConditionSets
@@ -2919,7 +3227,9 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
variableFonts = []
discreteAxes = []
- rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = []
+ rangeAxisSubsets: List[
+ Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]
+ ] = []
for axis in self.axes:
if hasattr(axis, "values"):
# Mypy doesn't support narrowing union types via hasattr()
@@ -2938,14 +3248,19 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
basename = os.path.splitext(os.path.basename(self.path))[0] + "-VF"
if basename is None:
basename = "VF"
- axisNames = "".join([f"-{axis.tag}{value}" for axis, value in zip(discreteAxes, values)])
- variableFonts.append(VariableFontDescriptor(
- name=f"{basename}{axisNames}",
- axisSubsets=rangeAxisSubsets + [
- ValueAxisSubsetDescriptor(name=axis.name, userValue=value)
- for axis, value in zip(discreteAxes, values)
- ]
- ))
+ axisNames = "".join(
+ [f"-{axis.tag}{value}" for axis, value in zip(discreteAxes, values)]
+ )
+ variableFonts.append(
+ VariableFontDescriptor(
+ name=f"{basename}{axisNames}",
+ axisSubsets=rangeAxisSubsets
+ + [
+ ValueAxisSubsetDescriptor(name=axis.name, userValue=value)
+ for axis, value in zip(discreteAxes, values)
+ ],
+ )
+ )
return variableFonts
def deepcopyExceptFonts(self):
@@ -2966,4 +3281,3 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
finally:
for source, font in zip(self.sources, fonts):
source.font = font
-
diff --git a/Lib/fontTools/designspaceLib/split.py b/Lib/fontTools/designspaceLib/split.py
index 408de70a..0b7cdf4b 100644
--- a/Lib/fontTools/designspaceLib/split.py
+++ b/Lib/fontTools/designspaceLib/split.py
@@ -11,6 +11,7 @@ from typing import Any, Callable, Dict, Iterator, List, Tuple, cast
from fontTools.designspaceLib import (
AxisDescriptor,
+ AxisMappingDescriptor,
DesignSpaceDocument,
DiscreteAxisDescriptor,
InstanceDescriptor,
@@ -225,6 +226,44 @@ def _extractSubSpace(
)
)
+ subDoc.axisMappings = mappings = []
+ subDocAxes = {axis.name for axis in subDoc.axes}
+ for mapping in doc.axisMappings:
+ if not all(axis in subDocAxes for axis in mapping.inputLocation.keys()):
+ continue
+ if not all(axis in subDocAxes for axis in mapping.outputLocation.keys()):
+ LOGGER.error(
+ "In axis mapping from input %s, some output axes are not in the variable-font: %s",
+ mapping.inputLocation,
+ mapping.outputLocation,
+ )
+ continue
+
+ mappingAxes = set()
+ mappingAxes.update(mapping.inputLocation.keys())
+ mappingAxes.update(mapping.outputLocation.keys())
+ for axis in doc.axes:
+ if axis.name not in mappingAxes:
+ continue
+ range = userRegion[axis.name]
+ if (
+ range.minimum != axis.minimum
+ or (range.default is not None and range.default != axis.default)
+ or range.maximum != axis.maximum
+ ):
+ LOGGER.error(
+ "Limiting axis ranges used in <mapping> elements not supported: %s",
+ axis.name,
+ )
+ continue
+
+ mappings.append(
+ AxisMappingDescriptor(
+ inputLocation=mapping.inputLocation,
+ outputLocation=mapping.outputLocation,
+ )
+ )
+
# Don't include STAT info
# subDoc.locationLabels = doc.locationLabels
@@ -352,9 +391,10 @@ def _extractSubSpace(
def _conditionSetFrom(conditionSet: List[Dict[str, Any]]) -> ConditionSet:
c: Dict[str, Range] = {}
for condition in conditionSet:
+ minimum, maximum = condition.get("minimum"), condition.get("maximum")
c[condition["name"]] = Range(
- condition.get("minimum", -math.inf),
- condition.get("maximum", math.inf),
+ minimum if minimum is not None else -math.inf,
+ maximum if maximum is not None else math.inf,
)
return c
diff --git a/Lib/fontTools/designspaceLib/statNames.py b/Lib/fontTools/designspaceLib/statNames.py
index 1b672703..a164169d 100644
--- a/Lib/fontTools/designspaceLib/statNames.py
+++ b/Lib/fontTools/designspaceLib/statNames.py
@@ -48,7 +48,6 @@ class StatNames:
styleMapStyleName: Optional[RibbiStyle]
-
def getStatNames(
doc: DesignSpaceDocument, userLocation: SimpleLocationDict
) -> StatNames:
@@ -89,7 +88,9 @@ def getStatNames(
# whenever a translation is missing.
labels = _getAxisLabelsForUserLocation(doc.axes, userLocation)
if labels:
- languages = set(language for label in labels for language in label.labelNames)
+ languages = set(
+ language for label in labels for language in label.labelNames
+ )
languages.add("en")
for language in languages:
styleName = " ".join(
@@ -214,16 +215,34 @@ def _getRibbiStyle(
axis = axes_by_tag.get("wght")
if axis is not None:
for regular_label in axis.axisLabels:
- if regular_label.linkedUserValue == userLocation[axis.name]:
+ if (
+ regular_label.linkedUserValue == userLocation[axis.name]
+ # In the "recursive" case where both the Regular has
+ # linkedUserValue pointing the Bold, and the Bold has
+ # linkedUserValue pointing to the Regular, only consider the
+ # first case: Regular (e.g. 400) has linkedUserValue pointing to
+ # Bold (e.g. 700, higher than Regular)
+ and regular_label.userValue < regular_label.linkedUserValue
+ ):
regularUserLocation[axis.name] = regular_label.userValue
bold = True
break
axis = axes_by_tag.get("ital") or axes_by_tag.get("slnt")
if axis is not None:
- for urpright_label in axis.axisLabels:
- if urpright_label.linkedUserValue == userLocation[axis.name]:
- regularUserLocation[axis.name] = urpright_label.userValue
+ for upright_label in axis.axisLabels:
+ if (
+ upright_label.linkedUserValue == userLocation[axis.name]
+ # In the "recursive" case where both the Upright has
+ # linkedUserValue pointing the Italic, and the Italic has
+ # linkedUserValue pointing to the Upright, only consider the
+ # first case: Upright (e.g. ital=0, slant=0) has
+ # linkedUserValue pointing to Italic (e.g ital=1, slant=-12 or
+ # slant=12 for backwards italics, in any case higher than
+ # Upright in absolute value, hence the abs() below.
+ and abs(upright_label.userValue) < abs(upright_label.linkedUserValue)
+ ):
+ regularUserLocation[axis.name] = upright_label.userValue
italic = True
break
diff --git a/Lib/fontTools/encodings/MacRoman.py b/Lib/fontTools/encodings/MacRoman.py
index 25232d38..ba8bf14e 100644
--- a/Lib/fontTools/encodings/MacRoman.py
+++ b/Lib/fontTools/encodings/MacRoman.py
@@ -1,36 +1,258 @@
MacRoman = [
- 'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute',
- 'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1',
- 'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters',
- 'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US',
- 'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand',
- 'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma',
- 'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five',
- 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal',
- 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
- 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
- 'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore',
- 'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
- 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar',
- 'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute',
- 'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex',
- 'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex',
- 'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde',
- 'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave',
- 'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section',
- 'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark',
- 'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus',
- 'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation',
- 'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae',
- 'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin',
- 'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis',
- 'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash',
- 'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge',
- 'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft',
- 'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase',
- 'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute',
- 'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute',
- 'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi',
- 'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla',
- 'hungarumlaut', 'ogonek', 'caron'
- ]
+ "NUL",
+ "Eth",
+ "eth",
+ "Lslash",
+ "lslash",
+ "Scaron",
+ "scaron",
+ "Yacute",
+ "yacute",
+ "HT",
+ "LF",
+ "Thorn",
+ "thorn",
+ "CR",
+ "Zcaron",
+ "zcaron",
+ "DLE",
+ "DC1",
+ "DC2",
+ "DC3",
+ "DC4",
+ "onehalf",
+ "onequarter",
+ "onesuperior",
+ "threequarters",
+ "threesuperior",
+ "twosuperior",
+ "brokenbar",
+ "minus",
+ "multiply",
+ "RS",
+ "US",
+ "space",
+ "exclam",
+ "quotedbl",
+ "numbersign",
+ "dollar",
+ "percent",
+ "ampersand",
+ "quotesingle",
+ "parenleft",
+ "parenright",
+ "asterisk",
+ "plus",
+ "comma",
+ "hyphen",
+ "period",
+ "slash",
+ "zero",
+ "one",
+ "two",
+ "three",
+ "four",
+ "five",
+ "six",
+ "seven",
+ "eight",
+ "nine",
+ "colon",
+ "semicolon",
+ "less",
+ "equal",
+ "greater",
+ "question",
+ "at",
+ "A",
+ "B",
+ "C",
+ "D",
+ "E",
+ "F",
+ "G",
+ "H",
+ "I",
+ "J",
+ "K",
+ "L",
+ "M",
+ "N",
+ "O",
+ "P",
+ "Q",
+ "R",
+ "S",
+ "T",
+ "U",
+ "V",
+ "W",
+ "X",
+ "Y",
+ "Z",
+ "bracketleft",
+ "backslash",
+ "bracketright",
+ "asciicircum",
+ "underscore",
+ "grave",
+ "a",
+ "b",
+ "c",
+ "d",
+ "e",
+ "f",
+ "g",
+ "h",
+ "i",
+ "j",
+ "k",
+ "l",
+ "m",
+ "n",
+ "o",
+ "p",
+ "q",
+ "r",
+ "s",
+ "t",
+ "u",
+ "v",
+ "w",
+ "x",
+ "y",
+ "z",
+ "braceleft",
+ "bar",
+ "braceright",
+ "asciitilde",
+ "DEL",
+ "Adieresis",
+ "Aring",
+ "Ccedilla",
+ "Eacute",
+ "Ntilde",
+ "Odieresis",
+ "Udieresis",
+ "aacute",
+ "agrave",
+ "acircumflex",
+ "adieresis",
+ "atilde",
+ "aring",
+ "ccedilla",
+ "eacute",
+ "egrave",
+ "ecircumflex",
+ "edieresis",
+ "iacute",
+ "igrave",
+ "icircumflex",
+ "idieresis",
+ "ntilde",
+ "oacute",
+ "ograve",
+ "ocircumflex",
+ "odieresis",
+ "otilde",
+ "uacute",
+ "ugrave",
+ "ucircumflex",
+ "udieresis",
+ "dagger",
+ "degree",
+ "cent",
+ "sterling",
+ "section",
+ "bullet",
+ "paragraph",
+ "germandbls",
+ "registered",
+ "copyright",
+ "trademark",
+ "acute",
+ "dieresis",
+ "notequal",
+ "AE",
+ "Oslash",
+ "infinity",
+ "plusminus",
+ "lessequal",
+ "greaterequal",
+ "yen",
+ "mu",
+ "partialdiff",
+ "summation",
+ "product",
+ "pi",
+ "integral",
+ "ordfeminine",
+ "ordmasculine",
+ "Omega",
+ "ae",
+ "oslash",
+ "questiondown",
+ "exclamdown",
+ "logicalnot",
+ "radical",
+ "florin",
+ "approxequal",
+ "Delta",
+ "guillemotleft",
+ "guillemotright",
+ "ellipsis",
+ "nbspace",
+ "Agrave",
+ "Atilde",
+ "Otilde",
+ "OE",
+ "oe",
+ "endash",
+ "emdash",
+ "quotedblleft",
+ "quotedblright",
+ "quoteleft",
+ "quoteright",
+ "divide",
+ "lozenge",
+ "ydieresis",
+ "Ydieresis",
+ "fraction",
+ "currency",
+ "guilsinglleft",
+ "guilsinglright",
+ "fi",
+ "fl",
+ "daggerdbl",
+ "periodcentered",
+ "quotesinglbase",
+ "quotedblbase",
+ "perthousand",
+ "Acircumflex",
+ "Ecircumflex",
+ "Aacute",
+ "Edieresis",
+ "Egrave",
+ "Iacute",
+ "Icircumflex",
+ "Idieresis",
+ "Igrave",
+ "Oacute",
+ "Ocircumflex",
+ "apple",
+ "Ograve",
+ "Uacute",
+ "Ucircumflex",
+ "Ugrave",
+ "dotlessi",
+ "circumflex",
+ "tilde",
+ "macron",
+ "breve",
+ "dotaccent",
+ "ring",
+ "cedilla",
+ "hungarumlaut",
+ "ogonek",
+ "caron",
+]
diff --git a/Lib/fontTools/encodings/StandardEncoding.py b/Lib/fontTools/encodings/StandardEncoding.py
index 810b2a09..bf138862 100644
--- a/Lib/fontTools/encodings/StandardEncoding.py
+++ b/Lib/fontTools/encodings/StandardEncoding.py
@@ -1,48 +1,258 @@
StandardEncoding = [
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', 'space', 'exclam', 'quotedbl',
- 'numbersign', 'dollar', 'percent', 'ampersand',
- 'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus',
- 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two',
- 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
- 'colon', 'semicolon', 'less', 'equal', 'greater',
- 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
- 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
- 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
- 'bracketright', 'asciicircum', 'underscore', 'quoteleft',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
- 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
- 'y', 'z', 'braceleft', 'bar', 'braceright', 'asciitilde',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', 'exclamdown',
- 'cent', 'sterling', 'fraction', 'yen', 'florin', 'section',
- 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
- 'guilsinglleft', 'guilsinglright', 'fi', 'fl', '.notdef',
- 'endash', 'dagger', 'daggerdbl', 'periodcentered',
- '.notdef', 'paragraph', 'bullet', 'quotesinglbase',
- 'quotedblbase', 'quotedblright', 'guillemotright',
- 'ellipsis', 'perthousand', '.notdef', 'questiondown',
- '.notdef', 'grave', 'acute', 'circumflex', 'tilde',
- 'macron', 'breve', 'dotaccent', 'dieresis', '.notdef',
- 'ring', 'cedilla', '.notdef', 'hungarumlaut', 'ogonek',
- 'caron', 'emdash', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
- '.notdef', '.notdef', '.notdef', 'AE', '.notdef',
- 'ordfeminine', '.notdef', '.notdef', '.notdef', '.notdef',
- 'Lslash', 'Oslash', 'OE', 'ordmasculine', '.notdef',
- '.notdef', '.notdef', '.notdef', '.notdef', 'ae', '.notdef',
- '.notdef', '.notdef', 'dotlessi', '.notdef', '.notdef',
- 'lslash', 'oslash', 'oe', 'germandbls', '.notdef',
- '.notdef', '.notdef', '.notdef'
- ]
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ "space",
+ "exclam",
+ "quotedbl",
+ "numbersign",
+ "dollar",
+ "percent",
+ "ampersand",
+ "quoteright",
+ "parenleft",
+ "parenright",
+ "asterisk",
+ "plus",
+ "comma",
+ "hyphen",
+ "period",
+ "slash",
+ "zero",
+ "one",
+ "two",
+ "three",
+ "four",
+ "five",
+ "six",
+ "seven",
+ "eight",
+ "nine",
+ "colon",
+ "semicolon",
+ "less",
+ "equal",
+ "greater",
+ "question",
+ "at",
+ "A",
+ "B",
+ "C",
+ "D",
+ "E",
+ "F",
+ "G",
+ "H",
+ "I",
+ "J",
+ "K",
+ "L",
+ "M",
+ "N",
+ "O",
+ "P",
+ "Q",
+ "R",
+ "S",
+ "T",
+ "U",
+ "V",
+ "W",
+ "X",
+ "Y",
+ "Z",
+ "bracketleft",
+ "backslash",
+ "bracketright",
+ "asciicircum",
+ "underscore",
+ "quoteleft",
+ "a",
+ "b",
+ "c",
+ "d",
+ "e",
+ "f",
+ "g",
+ "h",
+ "i",
+ "j",
+ "k",
+ "l",
+ "m",
+ "n",
+ "o",
+ "p",
+ "q",
+ "r",
+ "s",
+ "t",
+ "u",
+ "v",
+ "w",
+ "x",
+ "y",
+ "z",
+ "braceleft",
+ "bar",
+ "braceright",
+ "asciitilde",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ "exclamdown",
+ "cent",
+ "sterling",
+ "fraction",
+ "yen",
+ "florin",
+ "section",
+ "currency",
+ "quotesingle",
+ "quotedblleft",
+ "guillemotleft",
+ "guilsinglleft",
+ "guilsinglright",
+ "fi",
+ "fl",
+ ".notdef",
+ "endash",
+ "dagger",
+ "daggerdbl",
+ "periodcentered",
+ ".notdef",
+ "paragraph",
+ "bullet",
+ "quotesinglbase",
+ "quotedblbase",
+ "quotedblright",
+ "guillemotright",
+ "ellipsis",
+ "perthousand",
+ ".notdef",
+ "questiondown",
+ ".notdef",
+ "grave",
+ "acute",
+ "circumflex",
+ "tilde",
+ "macron",
+ "breve",
+ "dotaccent",
+ "dieresis",
+ ".notdef",
+ "ring",
+ "cedilla",
+ ".notdef",
+ "hungarumlaut",
+ "ogonek",
+ "caron",
+ "emdash",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ "AE",
+ ".notdef",
+ "ordfeminine",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ "Lslash",
+ "Oslash",
+ "OE",
+ "ordmasculine",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ "ae",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ "dotlessi",
+ ".notdef",
+ ".notdef",
+ "lslash",
+ "oslash",
+ "oe",
+ "germandbls",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+ ".notdef",
+]
diff --git a/Lib/fontTools/encodings/codecs.py b/Lib/fontTools/encodings/codecs.py
index 3b1a8256..3ac0268d 100644
--- a/Lib/fontTools/encodings/codecs.py
+++ b/Lib/fontTools/encodings/codecs.py
@@ -4,116 +4,132 @@ but missing from Python. See https://github.com/fonttools/fonttools/issues/236
import codecs
import encodings
-class ExtendCodec(codecs.Codec):
- def __init__(self, name, base_encoding, mapping):
- self.name = name
- self.base_encoding = base_encoding
- self.mapping = mapping
- self.reverse = {v:k for k,v in mapping.items()}
- self.max_len = max(len(v) for v in mapping.values())
- self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode)
- codecs.register_error(name, self.error)
+class ExtendCodec(codecs.Codec):
+ def __init__(self, name, base_encoding, mapping):
+ self.name = name
+ self.base_encoding = base_encoding
+ self.mapping = mapping
+ self.reverse = {v: k for k, v in mapping.items()}
+ self.max_len = max(len(v) for v in mapping.values())
+ self.info = codecs.CodecInfo(
+ name=self.name, encode=self.encode, decode=self.decode
+ )
+ codecs.register_error(name, self.error)
- def _map(self, mapper, output_type, exc_type, input, errors):
- base_error_handler = codecs.lookup_error(errors)
- length = len(input)
- out = output_type()
- while input:
- # first try to use self.error as the error handler
- try:
- part = mapper(input, self.base_encoding, errors=self.name)
- out += part
- break # All converted
- except exc_type as e:
- # else convert the correct part, handle error as requested and continue
- out += mapper(input[:e.start], self.base_encoding, self.name)
- replacement, pos = base_error_handler(e)
- out += replacement
- input = input[pos:]
- return out, length
+ def _map(self, mapper, output_type, exc_type, input, errors):
+ base_error_handler = codecs.lookup_error(errors)
+ length = len(input)
+ out = output_type()
+ while input:
+ # first try to use self.error as the error handler
+ try:
+ part = mapper(input, self.base_encoding, errors=self.name)
+ out += part
+ break # All converted
+ except exc_type as e:
+ # else convert the correct part, handle error as requested and continue
+ out += mapper(input[: e.start], self.base_encoding, self.name)
+ replacement, pos = base_error_handler(e)
+ out += replacement
+ input = input[pos:]
+ return out, length
- def encode(self, input, errors='strict'):
- return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
+ def encode(self, input, errors="strict"):
+ return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
- def decode(self, input, errors='strict'):
- return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
+ def decode(self, input, errors="strict"):
+ return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
- def error(self, e):
- if isinstance(e, UnicodeDecodeError):
- for end in range(e.start + 1, e.end + 1):
- s = e.object[e.start:end]
- if s in self.mapping:
- return self.mapping[s], end
- elif isinstance(e, UnicodeEncodeError):
- for end in range(e.start + 1, e.start + self.max_len + 1):
- s = e.object[e.start:end]
- if s in self.reverse:
- return self.reverse[s], end
- e.encoding = self.name
- raise e
+ def error(self, e):
+ if isinstance(e, UnicodeDecodeError):
+ for end in range(e.start + 1, e.end + 1):
+ s = e.object[e.start : end]
+ if s in self.mapping:
+ return self.mapping[s], end
+ elif isinstance(e, UnicodeEncodeError):
+ for end in range(e.start + 1, e.start + self.max_len + 1):
+ s = e.object[e.start : end]
+ if s in self.reverse:
+ return self.reverse[s], end
+ e.encoding = self.name
+ raise e
_extended_encodings = {
- "x_mac_japanese_ttx": ("shift_jis", {
- b"\xFC": chr(0x007C),
- b"\x7E": chr(0x007E),
- b"\x80": chr(0x005C),
- b"\xA0": chr(0x00A0),
- b"\xFD": chr(0x00A9),
- b"\xFE": chr(0x2122),
- b"\xFF": chr(0x2026),
- }),
- "x_mac_trad_chinese_ttx": ("big5", {
- b"\x80": chr(0x005C),
- b"\xA0": chr(0x00A0),
- b"\xFD": chr(0x00A9),
- b"\xFE": chr(0x2122),
- b"\xFF": chr(0x2026),
- }),
- "x_mac_korean_ttx": ("euc_kr", {
- b"\x80": chr(0x00A0),
- b"\x81": chr(0x20A9),
- b"\x82": chr(0x2014),
- b"\x83": chr(0x00A9),
- b"\xFE": chr(0x2122),
- b"\xFF": chr(0x2026),
- }),
- "x_mac_simp_chinese_ttx": ("gb2312", {
- b"\x80": chr(0x00FC),
- b"\xA0": chr(0x00A0),
- b"\xFD": chr(0x00A9),
- b"\xFE": chr(0x2122),
- b"\xFF": chr(0x2026),
- }),
+ "x_mac_japanese_ttx": (
+ "shift_jis",
+ {
+ b"\xFC": chr(0x007C),
+ b"\x7E": chr(0x007E),
+ b"\x80": chr(0x005C),
+ b"\xA0": chr(0x00A0),
+ b"\xFD": chr(0x00A9),
+ b"\xFE": chr(0x2122),
+ b"\xFF": chr(0x2026),
+ },
+ ),
+ "x_mac_trad_chinese_ttx": (
+ "big5",
+ {
+ b"\x80": chr(0x005C),
+ b"\xA0": chr(0x00A0),
+ b"\xFD": chr(0x00A9),
+ b"\xFE": chr(0x2122),
+ b"\xFF": chr(0x2026),
+ },
+ ),
+ "x_mac_korean_ttx": (
+ "euc_kr",
+ {
+ b"\x80": chr(0x00A0),
+ b"\x81": chr(0x20A9),
+ b"\x82": chr(0x2014),
+ b"\x83": chr(0x00A9),
+ b"\xFE": chr(0x2122),
+ b"\xFF": chr(0x2026),
+ },
+ ),
+ "x_mac_simp_chinese_ttx": (
+ "gb2312",
+ {
+ b"\x80": chr(0x00FC),
+ b"\xA0": chr(0x00A0),
+ b"\xFD": chr(0x00A9),
+ b"\xFE": chr(0x2122),
+ b"\xFF": chr(0x2026),
+ },
+ ),
}
_cache = {}
+
def search_function(name):
- name = encodings.normalize_encoding(name) # Rather undocumented...
- if name in _extended_encodings:
- if name not in _cache:
- base_encoding, mapping = _extended_encodings[name]
- assert(name[-4:] == "_ttx")
- # Python 2 didn't have any of the encodings that we are implementing
- # in this file. Python 3 added aliases for the East Asian ones, mapping
- # them "temporarily" to the same base encoding as us, with a comment
- # suggesting that full implementation will appear some time later.
- # As such, try the Python version of the x_mac_... first, if that is found,
- # use *that* as our base encoding. This would make our encoding upgrade
- # to the full encoding when and if Python finally implements that.
- # http://bugs.python.org/issue24041
- base_encodings = [name[:-4], base_encoding]
- for base_encoding in base_encodings:
- try:
- codecs.lookup(base_encoding)
- except LookupError:
- continue
- _cache[name] = ExtendCodec(name, base_encoding, mapping)
- break
- return _cache[name].info
+ name = encodings.normalize_encoding(name) # Rather undocumented...
+ if name in _extended_encodings:
+ if name not in _cache:
+ base_encoding, mapping = _extended_encodings[name]
+ assert name[-4:] == "_ttx"
+ # Python 2 didn't have any of the encodings that we are implementing
+ # in this file. Python 3 added aliases for the East Asian ones, mapping
+ # them "temporarily" to the same base encoding as us, with a comment
+ # suggesting that full implementation will appear some time later.
+ # As such, try the Python version of the x_mac_... first, if that is found,
+ # use *that* as our base encoding. This would make our encoding upgrade
+ # to the full encoding when and if Python finally implements that.
+ # http://bugs.python.org/issue24041
+ base_encodings = [name[:-4], base_encoding]
+ for base_encoding in base_encodings:
+ try:
+ codecs.lookup(base_encoding)
+ except LookupError:
+ continue
+ _cache[name] = ExtendCodec(name, base_encoding, mapping)
+ break
+ return _cache[name].info
+
+ return None
- return None
codecs.register(search_function)
diff --git a/Lib/fontTools/feaLib/ast.py b/Lib/fontTools/feaLib/ast.py
index 1273343d..17c6cc3f 100644
--- a/Lib/fontTools/feaLib/ast.py
+++ b/Lib/fontTools/feaLib/ast.py
@@ -912,14 +912,11 @@ class IgnoreSubstStatement(Statement):
contexts = []
for prefix, glyphs, suffix in self.chainContexts:
res = ""
- if len(prefix) or len(suffix):
- if len(prefix):
- res += " ".join(map(asFea, prefix)) + " "
- res += " ".join(g.asFea() + "'" for g in glyphs)
- if len(suffix):
- res += " " + " ".join(map(asFea, suffix))
- else:
- res += " ".join(map(asFea, glyphs))
+ if len(prefix):
+ res += " ".join(map(asFea, prefix)) + " "
+ res += " ".join(g.asFea() + "'" for g in glyphs)
+ if len(suffix):
+ res += " " + " ".join(map(asFea, suffix))
contexts.append(res)
return "ignore sub " + ", ".join(contexts) + ";"
@@ -1259,25 +1256,34 @@ class MultipleSubstStatement(Statement):
"""Calls the builder object's ``add_multiple_subst`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
- if not self.replacement and hasattr(self.glyph, "glyphSet"):
- for glyph in self.glyph.glyphSet():
+ if hasattr(self.glyph, "glyphSet"):
+ originals = self.glyph.glyphSet()
+ else:
+ originals = [self.glyph]
+ count = len(originals)
+ replaces = []
+ for r in self.replacement:
+ if hasattr(r, "glyphSet"):
+ replace = r.glyphSet()
+ else:
+ replace = [r]
+ if len(replace) == 1 and len(replace) != count:
+ replace = replace * count
+ replaces.append(replace)
+ replaces = list(zip(*replaces))
+
+ seen_originals = set()
+ for i, original in enumerate(originals):
+ if original not in seen_originals:
+ seen_originals.add(original)
builder.add_multiple_subst(
self.location,
prefix,
- glyph,
+ original,
suffix,
- self.replacement,
+ replaces and replaces[i] or (),
self.forceChain,
)
- else:
- builder.add_multiple_subst(
- self.location,
- prefix,
- self.glyph,
- suffix,
- self.replacement,
- self.forceChain,
- )
def asFea(self, indent=""):
res = "sub "
@@ -2068,7 +2074,7 @@ class ConditionsetStatement(Statement):
self.conditions = conditions
def build(self, builder):
- builder.add_conditionset(self.name, self.conditions)
+ builder.add_conditionset(self.location, self.name, self.conditions)
def asFea(self, res="", indent=""):
res += indent + f"conditionset {self.name} " + "{\n"
diff --git a/Lib/fontTools/feaLib/builder.py b/Lib/fontTools/feaLib/builder.py
index 0a991761..cfaf54d4 100644
--- a/Lib/fontTools/feaLib/builder.py
+++ b/Lib/fontTools/feaLib/builder.py
@@ -34,7 +34,7 @@ from fontTools.otlLib.error import OpenTypeLibError
from fontTools.varLib.varStore import OnlineVarStoreBuilder
from fontTools.varLib.builder import buildVarDevTable
from fontTools.varLib.featureVars import addFeatureVariationsRaw
-from fontTools.varLib.models import normalizeValue
+from fontTools.varLib.models import normalizeValue, piecewiseLinearMap
from collections import defaultdict
import itertools
from io import StringIO
@@ -90,7 +90,6 @@ def addOpenTypeFeaturesFromString(
class Builder(object):
-
supportedTables = frozenset(
Tag(tag)
for tag in [
@@ -176,6 +175,10 @@ class Builder(object):
self.stat_ = {}
# for conditionsets
self.conditionsets_ = {}
+ # We will often use exactly the same locations (i.e. the font's masters)
+ # for a large number of variable scalars. Instead of creating a model
+ # for each, let's share the models.
+ self.model_cache = {}
def build(self, tables=None, debug=False):
if self.parseTree is None:
@@ -290,9 +293,8 @@ class Builder(object):
]
# "aalt" does not have to specify its own lookups, but it might.
if not feature and name != "aalt":
- raise FeatureLibError(
- "Feature %s has not been defined" % name, location
- )
+ warnings.warn("%s: Feature %s has not been defined" % (location, name))
+ continue
for script, lang, feature, lookups in feature:
for lookuplist in lookups:
if not isinstance(lookuplist, list):
@@ -446,6 +448,7 @@ class Builder(object):
assert self.cv_parameters_ids_[tag] is not None
nameID = self.cv_parameters_ids_[tag]
table.setName(string, nameID, platformID, platEncID, langID)
+ table.names.sort()
def build_OS_2(self):
if not self.os2_:
@@ -768,8 +771,9 @@ class Builder(object):
varidx_map = store.optimize()
gdef.remap_device_varidxes(varidx_map)
- if 'GPOS' in self.font:
- self.font['GPOS'].table.remap_device_varidxes(varidx_map)
+ if "GPOS" in self.font:
+ self.font["GPOS"].table.remap_device_varidxes(varidx_map)
+ self.model_cache.clear()
if any(
(
gdef.GlyphClassDef,
@@ -840,10 +844,15 @@ class Builder(object):
feature=None,
)
lookups.append(lookup)
- try:
- otLookups = [l.build() for l in lookups]
- except OpenTypeLibError as e:
- raise FeatureLibError(str(e), e.location) from e
+ otLookups = []
+ for l in lookups:
+ try:
+ otLookups.append(l.build())
+ except OpenTypeLibError as e:
+ raise FeatureLibError(str(e), e.location) from e
+ except Exception as e:
+ location = self.lookup_locations[tag][str(l.lookup_index)].location
+ raise FeatureLibError(str(e), location) from e
return otLookups
def makeTable(self, tag):
@@ -945,11 +954,7 @@ class Builder(object):
feature_vars = {}
has_any_variations = False
# Sort out which lookups to build, gather their indices
- for (
- script_,
- language,
- feature_tag,
- ), variations in self.feature_variations_.items():
+ for (_, _, feature_tag), variations in self.feature_variations_.items():
feature_vars[feature_tag] = []
for conditionset, builders in variations.items():
raw_conditionset = self.conditionsets_[conditionset]
@@ -1242,7 +1247,7 @@ class Builder(object):
# GSUB 1
def add_single_subst(self, location, prefix, suffix, mapping, forceChain):
if self.cur_feature_name_ == "aalt":
- for (from_glyph, to_glyph) in mapping.items():
+ for from_glyph, to_glyph in mapping.items():
alts = self.aalt_alternates_.setdefault(from_glyph, set())
alts.add(to_glyph)
return
@@ -1250,7 +1255,7 @@ class Builder(object):
self.add_single_subst_chained_(location, prefix, suffix, mapping)
return
lookup = self.get_lookup_(location, SingleSubstBuilder)
- for (from_glyph, to_glyph) in mapping.items():
+ for from_glyph, to_glyph in mapping.items():
if from_glyph in lookup.mapping:
if to_glyph == lookup.mapping[from_glyph]:
log.info(
@@ -1338,7 +1343,9 @@ class Builder(object):
# GSUB 5/6
def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups):
if not all(glyphs) or not all(prefix) or not all(suffix):
- raise FeatureLibError("Empty glyph class in contextual substitution", location)
+ raise FeatureLibError(
+ "Empty glyph class in contextual substitution", location
+ )
lookup = self.get_lookup_(location, ChainContextSubstBuilder)
lookup.rules.append(
ChainContextualRule(
@@ -1348,10 +1355,13 @@ class Builder(object):
def add_single_subst_chained_(self, location, prefix, suffix, mapping):
if not mapping or not all(prefix) or not all(suffix):
- raise FeatureLibError("Empty glyph class in contextual substitution", location)
+ raise FeatureLibError(
+ "Empty glyph class in contextual substitution", location
+ )
# https://github.com/fonttools/fonttools/issues/512
+ # https://github.com/fonttools/fonttools/issues/2150
chain = self.get_lookup_(location, ChainContextSubstBuilder)
- sub = chain.find_chainable_single_subst(set(mapping.keys()))
+ sub = chain.find_chainable_single_subst(mapping)
if sub is None:
sub = self.get_chained_lookup_(location, SingleSubstBuilder)
sub.mapping.update(mapping)
@@ -1376,8 +1386,12 @@ class Builder(object):
lookup = self.get_lookup_(location, SinglePosBuilder)
for glyphs, value in pos:
if not glyphs:
- raise FeatureLibError("Empty glyph class in positioning rule", location)
- otValueRecord = self.makeOpenTypeValueRecord(location, value, pairPosContext=False)
+ raise FeatureLibError(
+ "Empty glyph class in positioning rule", location
+ )
+ otValueRecord = self.makeOpenTypeValueRecord(
+ location, value, pairPosContext=False
+ )
for glyph in glyphs:
try:
lookup.add_pos(location, glyph, otValueRecord)
@@ -1387,9 +1401,7 @@ class Builder(object):
# GPOS 2
def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2):
if not glyphclass1 or not glyphclass2:
- raise FeatureLibError(
- "Empty glyph class in positioning rule", location
- )
+ raise FeatureLibError("Empty glyph class in positioning rule", location)
lookup = self.get_lookup_(location, PairPosBuilder)
v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True)
v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True)
@@ -1457,7 +1469,9 @@ class Builder(object):
# GPOS 7/8
def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups):
if not all(glyphs) or not all(prefix) or not all(suffix):
- raise FeatureLibError("Empty glyph class in contextual positioning rule", location)
+ raise FeatureLibError(
+ "Empty glyph class in contextual positioning rule", location
+ )
lookup = self.get_lookup_(location, ChainContextPosBuilder)
lookup.rules.append(
ChainContextualRule(
@@ -1467,7 +1481,9 @@ class Builder(object):
def add_single_pos_chained_(self, location, prefix, suffix, pos):
if not pos or not all(prefix) or not all(suffix):
- raise FeatureLibError("Empty glyph class in contextual positioning rule", location)
+ raise FeatureLibError(
+ "Empty glyph class in contextual positioning rule", location
+ )
# https://github.com/fonttools/fonttools/issues/514
chain = self.get_lookup_(location, ChainContextPosBuilder)
targets = []
@@ -1478,7 +1494,9 @@ class Builder(object):
if value is None:
subs.append(None)
continue
- otValue = self.makeOpenTypeValueRecord(location, value, pairPosContext=False)
+ otValue = self.makeOpenTypeValueRecord(
+ location, value, pairPosContext=False
+ )
sub = chain.find_chainable_single_pos(targets, glyphs, otValue)
if sub is None:
sub = self.get_chained_lookup_(location, SinglePosBuilder)
@@ -1497,7 +1515,9 @@ class Builder(object):
for markClassDef in markClass.definitions:
for mark in markClassDef.glyphs.glyphSet():
if mark not in lookupBuilder.marks:
- otMarkAnchor = self.makeOpenTypeAnchor(location, markClassDef.anchor)
+ otMarkAnchor = self.makeOpenTypeAnchor(
+ location, markClassDef.anchor
+ )
lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor)
else:
existingMarkClass = lookupBuilder.marks[mark][0]
@@ -1538,7 +1558,16 @@ class Builder(object):
if glyph not in self.ligCaretPoints_:
self.ligCaretPoints_[glyph] = carets
+ def makeLigCaret(self, location, caret):
+ if not isinstance(caret, VariableScalar):
+ return caret
+ default, device = self.makeVariablePos(location, caret)
+ if device is not None:
+ return (default, device)
+ return default
+
def add_ligatureCaretByPos_(self, location, glyphs, carets):
+ carets = [self.makeLigCaret(location, caret) for caret in carets]
for glyph in glyphs:
if glyph not in self.ligCaretCoords_:
self.ligCaretCoords_[glyph] = carets
@@ -1555,10 +1584,11 @@ class Builder(object):
def add_vhea_field(self, key, value):
self.vhea_[key] = value
- def add_conditionset(self, key, value):
- if not "fvar" in self.font:
+ def add_conditionset(self, location, key, value):
+ if "fvar" not in self.font:
raise FeatureLibError(
- "Cannot add feature variations to a font without an 'fvar' table"
+ "Cannot add feature variations to a font without an 'fvar' table",
+ location,
)
# Normalize
@@ -1575,8 +1605,41 @@ class Builder(object):
for tag, (bottom, top) in value.items()
}
+ # NOTE: This might result in rounding errors (off-by-ones) compared to
+ # rules in Designspace files, since we're working with what's in the
+ # `avar` table rather than the original values.
+ if "avar" in self.font:
+ mapping = self.font["avar"].segments
+ value = {
+ axis: tuple(
+ piecewiseLinearMap(v, mapping[axis]) if axis in mapping else v
+ for v in condition_range
+ )
+ for axis, condition_range in value.items()
+ }
+
self.conditionsets_[key] = value
+ def makeVariablePos(self, location, varscalar):
+ if not self.varstorebuilder:
+ raise FeatureLibError(
+ "Can't define a variable scalar in a non-variable font", location
+ )
+
+ varscalar.axes = self.axes
+ if not varscalar.does_vary:
+ return varscalar.default, None
+
+ default, index = varscalar.add_to_variation_store(
+ self.varstorebuilder, self.model_cache, self.font.get("avar")
+ )
+
+ device = None
+ if index is not None and index != 0xFFFFFFFF:
+ device = buildVarDevTable(index)
+
+ return default, device
+
def makeOpenTypeAnchor(self, location, anchor):
"""ast.Anchor --> otTables.Anchor"""
if anchor is None:
@@ -1588,24 +1651,25 @@ class Builder(object):
if anchor.yDeviceTable is not None:
deviceY = otl.buildDevice(dict(anchor.yDeviceTable))
for dim in ("x", "y"):
- if not isinstance(getattr(anchor, dim), VariableScalar):
+ varscalar = getattr(anchor, dim)
+ if not isinstance(varscalar, VariableScalar):
continue
- if getattr(anchor, dim+"DeviceTable") is not None:
- raise FeatureLibError("Can't define a device coordinate and variable scalar", location)
- if not self.varstorebuilder:
- raise FeatureLibError("Can't define a variable scalar in a non-variable font", location)
- varscalar = getattr(anchor,dim)
- varscalar.axes = self.axes
- default, index = varscalar.add_to_variation_store(self.varstorebuilder)
+ if getattr(anchor, dim + "DeviceTable") is not None:
+ raise FeatureLibError(
+ "Can't define a device coordinate and variable scalar", location
+ )
+ default, device = self.makeVariablePos(location, varscalar)
setattr(anchor, dim, default)
- if index is not None and index != 0xFFFFFFFF:
+ if device is not None:
if dim == "x":
- deviceX = buildVarDevTable(index)
+ deviceX = device
else:
- deviceY = buildVarDevTable(index)
+ deviceY = device
variable = True
- otlanchor = otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY)
+ otlanchor = otl.buildAnchor(
+ anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY
+ )
if variable:
otlanchor.Format = 3
return otlanchor
@@ -1616,14 +1680,12 @@ class Builder(object):
if not name.startswith("Reserved")
}
-
def makeOpenTypeValueRecord(self, location, v, pairPosContext):
"""ast.ValueRecord --> otBase.ValueRecord"""
if not v:
return None
vr = {}
- variable = False
for astName, (otName, isDevice) in self._VALUEREC_ATTRS.items():
val = getattr(v, astName, None)
if not val:
@@ -1634,15 +1696,12 @@ class Builder(object):
otDeviceName = otName[0:4] + "Device"
feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:]
if getattr(v, feaDeviceName):
- raise FeatureLibError("Can't define a device coordinate and variable scalar", location)
- if not self.varstorebuilder:
- raise FeatureLibError("Can't define a variable scalar in a non-variable font", location)
- val.axes = self.axes
- default, index = val.add_to_variation_store(self.varstorebuilder)
- vr[otName] = default
- if index is not None and index != 0xFFFFFFFF:
- vr[otDeviceName] = buildVarDevTable(index)
- variable = True
+ raise FeatureLibError(
+ "Can't define a device coordinate and variable scalar", location
+ )
+ vr[otName], device = self.makeVariablePos(location, val)
+ if device is not None:
+ vr[otDeviceName] = device
else:
vr[otName] = val
diff --git a/Lib/fontTools/feaLib/lexer.py b/Lib/fontTools/feaLib/lexer.py
index 140fbd82..e0ae0aef 100644
--- a/Lib/fontTools/feaLib/lexer.py
+++ b/Lib/fontTools/feaLib/lexer.py
@@ -3,6 +3,12 @@ from fontTools.feaLib.location import FeatureLibLocation
import re
import os
+try:
+ import cython
+except ImportError:
+ # if cython not installed, use mock module with no-op decorators and types
+ from fontTools.misc import cython
+
class Lexer(object):
NUMBER = "NUMBER"
@@ -191,7 +197,7 @@ class IncludingLexer(object):
"""A Lexer that follows include statements.
The OpenType feature file specification states that due to
- historical reasons, relative imports should be resolved in this
+ historical reasons, relative imports should be resolved in this
order:
1. If the source font is UFO format, then relative to the UFO's
diff --git a/Lib/fontTools/feaLib/lookupDebugInfo.py b/Lib/fontTools/feaLib/lookupDebugInfo.py
index 876cadff..d4da7de0 100644
--- a/Lib/fontTools/feaLib/lookupDebugInfo.py
+++ b/Lib/fontTools/feaLib/lookupDebugInfo.py
@@ -1,7 +1,8 @@
from typing import NamedTuple
LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib"
-LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING"
+LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING"
+
class LookupDebugInfo(NamedTuple):
"""Information about where a lookup came from, to be embedded in a font"""
diff --git a/Lib/fontTools/feaLib/parser.py b/Lib/fontTools/feaLib/parser.py
index 04ff6030..8ffdf644 100644
--- a/Lib/fontTools/feaLib/parser.py
+++ b/Lib/fontTools/feaLib/parser.py
@@ -45,7 +45,6 @@ class Parser(object):
def __init__(
self, featurefile, glyphNames=(), followIncludes=True, includeDir=None, **kwargs
):
-
if "glyphMap" in kwargs:
from fontTools.misc.loggingTools import deprecateArgument
@@ -134,7 +133,8 @@ class Parser(object):
]
raise FeatureLibError(
"The following glyph names are referenced but are missing from the "
- "glyph set:\n" + ("\n".join(error)), None
+ "glyph set:\n" + ("\n".join(error)),
+ None,
)
return self.doc_
@@ -396,7 +396,8 @@ class Parser(object):
self.expect_symbol_("-")
range_end = self.expect_cid_()
self.check_glyph_name_in_glyph_set(
- f"cid{range_start:05d}", f"cid{range_end:05d}",
+ f"cid{range_start:05d}",
+ f"cid{range_end:05d}",
)
glyphs.add_cid_range(
range_start,
@@ -522,27 +523,33 @@ class Parser(object):
)
return (prefix, glyphs, lookups, values, suffix, hasMarks)
- def parse_chain_context_(self):
+ def parse_ignore_glyph_pattern_(self, sub):
location = self.cur_token_location_
prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_(
vertical=False
)
- chainContext = [(prefix, glyphs, suffix)]
- hasLookups = any(lookups)
+ if any(lookups):
+ raise FeatureLibError(
+ f'No lookups can be specified for "ignore {sub}"', location
+ )
+ if not hasMarks:
+ error = FeatureLibError(
+ f'Ambiguous "ignore {sub}", there should be least one marked glyph',
+ location,
+ )
+ log.warning(str(error))
+ suffix, glyphs = glyphs[1:], glyphs[0:1]
+ chainContext = (prefix, glyphs, suffix)
+ return chainContext
+
+ def parse_ignore_context_(self, sub):
+ location = self.cur_token_location_
+ chainContext = [self.parse_ignore_glyph_pattern_(sub)]
while self.next_token_ == ",":
self.expect_symbol_(",")
- (
- prefix,
- glyphs,
- lookups,
- values,
- suffix,
- hasMarks,
- ) = self.parse_glyph_pattern_(vertical=False)
- chainContext.append((prefix, glyphs, suffix))
- hasLookups = hasLookups or any(lookups)
+ chainContext.append(self.parse_ignore_glyph_pattern_(sub))
self.expect_symbol_(";")
- return chainContext, hasLookups
+ return chainContext
def parse_ignore_(self):
# Parses an ignore sub/pos rule.
@@ -550,18 +557,10 @@ class Parser(object):
location = self.cur_token_location_
self.advance_lexer_()
if self.cur_token_ in ["substitute", "sub"]:
- chainContext, hasLookups = self.parse_chain_context_()
- if hasLookups:
- raise FeatureLibError(
- 'No lookups can be specified for "ignore sub"', location
- )
+ chainContext = self.parse_ignore_context_("sub")
return self.ast.IgnoreSubstStatement(chainContext, location=location)
if self.cur_token_ in ["position", "pos"]:
- chainContext, hasLookups = self.parse_chain_context_()
- if hasLookups:
- raise FeatureLibError(
- 'No lookups can be specified for "ignore pos"', location
- )
+ chainContext = self.parse_ignore_context_("pos")
return self.ast.IgnorePosStatement(chainContext, location=location)
raise FeatureLibError(
'Expected "substitute" or "position"', self.cur_token_location_
@@ -603,9 +602,9 @@ class Parser(object):
assert self.is_cur_keyword_("LigatureCaretByPos")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
- carets = [self.expect_number_()]
+ carets = [self.expect_number_(variable=True)]
while self.next_token_ != ";":
- carets.append(self.expect_number_())
+ carets.append(self.expect_number_(variable=True))
self.expect_symbol_(";")
return self.ast.LigatureCaretByPosStatement(glyphs, carets, location=location)
@@ -696,7 +695,9 @@ class Parser(object):
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
if not glyphs.glyphSet():
- raise FeatureLibError("Empty glyph class in mark class definition", location)
+ raise FeatureLibError(
+ "Empty glyph class in mark class definition", location
+ )
anchor = self.parse_anchor_()
name = self.expect_class_name_()
self.expect_symbol_(";")
@@ -923,22 +924,27 @@ class Parser(object):
# GSUB lookup type 2: Multiple substitution.
# Format: "substitute f_f_i by f f i;"
- if (
- not reverse
- and len(old) == 1
- and len(old[0].glyphSet()) == 1
- and len(new) > 1
- and max([len(n.glyphSet()) for n in new]) == 1
- and num_lookups == 0
- ):
+ #
+ # GlyphsApp introduces two additional formats:
+ # Format 1: "substitute [f_i f_l] by [f f] [i l];"
+ # Format 2: "substitute [f_i f_l] by f [i l];"
+ # http://handbook.glyphsapp.com/en/layout/multiple-substitution-with-classes/
+ if not reverse and len(old) == 1 and len(new) > 1 and num_lookups == 0:
+ count = len(old[0].glyphSet())
for n in new:
if not list(n.glyphSet()):
raise FeatureLibError("Empty class in replacement", location)
+ if len(n.glyphSet()) != 1 and len(n.glyphSet()) != count:
+ raise FeatureLibError(
+ f'Expected a glyph class with 1 or {count} elements after "by", '
+ f"but found a glyph class with {len(n.glyphSet())} elements",
+ location,
+ )
return self.ast.MultipleSubstStatement(
old_prefix,
- tuple(old[0].glyphSet())[0],
+ old[0],
old_suffix,
- tuple([list(n.glyphSet())[0] for n in new]),
+ new,
forceChain=hasMarks,
location=location,
)
@@ -1747,7 +1753,8 @@ class Parser(object):
def parse_featureNames_(self, tag):
"""Parses a ``featureNames`` statement found in stylistic set features.
- See section `8.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.c>`_."""
+ See section `8.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.c>`_.
+ """
assert self.cur_token_ == "featureNames", self.cur_token_
block = self.ast.NestedBlock(
tag, self.cur_token_, location=self.cur_token_location_
diff --git a/Lib/fontTools/feaLib/variableScalar.py b/Lib/fontTools/feaLib/variableScalar.py
index a286568e..c97b4354 100644
--- a/Lib/fontTools/feaLib/variableScalar.py
+++ b/Lib/fontTools/feaLib/variableScalar.py
@@ -1,4 +1,4 @@
-from fontTools.varLib.models import VariationModel, normalizeValue
+from fontTools.varLib.models import VariationModel, normalizeValue, piecewiseLinearMap
def Location(loc):
@@ -74,24 +74,39 @@ class VariableScalar:
# I *guess* we could interpolate one, but I don't know how.
return self.values[key]
- def value_at_location(self, location):
+ def value_at_location(self, location, model_cache=None, avar=None):
loc = location
if loc in self.values.keys():
return self.values[loc]
values = list(self.values.values())
- return self.model.interpolateFromMasters(loc, values)
+ return self.model(model_cache, avar).interpolateFromMasters(loc, values)
- @property
- def model(self):
+ def model(self, model_cache=None, avar=None):
+ if model_cache is not None:
+ key = tuple(self.values.keys())
+ if key in model_cache:
+ return model_cache[key]
locations = [dict(self._normalized_location(k)) for k in self.values.keys()]
- return VariationModel(locations)
-
- def get_deltas_and_supports(self):
+ if avar is not None:
+ mapping = avar.segments
+ locations = [
+ {
+ k: piecewiseLinearMap(v, mapping[k]) if k in mapping else v
+ for k, v in location.items()
+ }
+ for location in locations
+ ]
+ m = VariationModel(locations)
+ if model_cache is not None:
+ model_cache[key] = m
+ return m
+
+ def get_deltas_and_supports(self, model_cache=None, avar=None):
values = list(self.values.values())
- return self.model.getDeltasAndSupports(values)
+ return self.model(model_cache, avar).getDeltasAndSupports(values)
- def add_to_variation_store(self, store_builder):
- deltas, supports = self.get_deltas_and_supports()
+ def add_to_variation_store(self, store_builder, model_cache=None, avar=None):
+ deltas, supports = self.get_deltas_and_supports(model_cache, avar)
store_builder.setSupports(supports)
index = store_builder.storeDeltas(deltas)
return int(self.default), index
diff --git a/Lib/fontTools/fontBuilder.py b/Lib/fontTools/fontBuilder.py
index 60382683..dd57a050 100644
--- a/Lib/fontTools/fontBuilder.py
+++ b/Lib/fontTools/fontBuilder.py
@@ -131,6 +131,8 @@ fb.save("test.otf")
from .ttLib import TTFont, newTable
from .ttLib.tables._c_m_a_p import cmap_classes
+from .ttLib.tables._g_l_y_f import flagCubic
+from .ttLib.tables.O_S_2f_2 import Panose
from .misc.timeTools import timestampNow
import struct
from collections import OrderedDict
@@ -262,18 +264,7 @@ _nameIDs = dict(
# to insert in setupNameTable doc string:
# print("\n".join(("%s (nameID %s)" % (k, v)) for k, v in sorted(_nameIDs.items(), key=lambda x: x[1])))
-_panoseDefaults = dict(
- bFamilyType=0,
- bSerifStyle=0,
- bWeight=0,
- bProportion=0,
- bContrast=0,
- bStrokeVariation=0,
- bArmStyle=0,
- bLetterForm=0,
- bMidline=0,
- bXHeight=0,
-)
+_panoseDefaults = Panose()
_OS2Defaults = dict(
version=3,
@@ -319,7 +310,7 @@ _OS2Defaults = dict(
class FontBuilder(object):
- def __init__(self, unitsPerEm=None, font=None, isTTF=True):
+ def __init__(self, unitsPerEm=None, font=None, isTTF=True, glyphDataFormat=0):
"""Initialize a FontBuilder instance.
If the `font` argument is not given, a new `TTFont` will be
@@ -327,15 +318,31 @@ class FontBuilder(object):
the font will be a glyf-based TTF; if `isTTF` is False it will be
a CFF-based OTF.
+ The `glyphDataFormat` argument corresponds to the `head` table field
+ that defines the format of the TrueType `glyf` table (default=0).
+ TrueType glyphs historically can only contain quadratic splines and static
+ components, but there's a proposal to add support for cubic Bezier curves as well
+ as variable composites/components at
+ https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1.md
+ You can experiment with the new features by setting `glyphDataFormat` to 1.
+ A ValueError is raised if `glyphDataFormat` is left at 0 but glyphs are added
+ that contain cubic splines or varcomposites. This is to prevent accidentally
+ creating fonts that are incompatible with existing TrueType implementations.
+
If `font` is given, it must be a `TTFont` instance and `unitsPerEm`
- must _not_ be given. The `isTTF` argument will be ignored.
+ must _not_ be given. The `isTTF` and `glyphDataFormat` arguments will be ignored.
"""
if font is None:
self.font = TTFont(recalcTimestamp=False)
self.isTTF = isTTF
now = timestampNow()
assert unitsPerEm is not None
- self.setupHead(unitsPerEm=unitsPerEm, created=now, modified=now)
+ self.setupHead(
+ unitsPerEm=unitsPerEm,
+ created=now,
+ modified=now,
+ glyphDataFormat=glyphDataFormat,
+ )
self.setupMaxp()
else:
assert unitsPerEm is None
@@ -391,7 +398,7 @@ class FontBuilder(object):
sequence, but this is not policed.
"""
subTables = []
- highestUnicode = max(cmapping)
+ highestUnicode = max(cmapping) if cmapping else 0
if highestUnicode > 0xFFFF:
cmapping_3_1 = dict((k, v) for k, v in cmapping.items() if k < 0x10000)
subTable_3_10 = buildCmapSubTable(cmapping, 12, 3, 10)
@@ -631,7 +638,7 @@ class FontBuilder(object):
for fontDict in topDict.FDArray:
fontDict.Private.vstore = vstore
- def setupGlyf(self, glyphs, calcGlyphBounds=True):
+ def setupGlyf(self, glyphs, calcGlyphBounds=True, validateGlyphFormat=True):
"""Create the `glyf` table from a dict, that maps glyph names
to `fontTools.ttLib.tables._g_l_y_f.Glyph` objects, for example
as made by `fontTools.pens.ttGlyphPen.TTGlyphPen`.
@@ -639,8 +646,26 @@ class FontBuilder(object):
If `calcGlyphBounds` is True, the bounds of all glyphs will be
calculated. Only pass False if your glyph objects already have
their bounding box values set.
+
+ If `validateGlyphFormat` is True, raise ValueError if any of the glyphs contains
+ cubic curves or is a variable composite but head.glyphDataFormat=0.
+ Set it to False to skip the check if you know in advance all the glyphs are
+ compatible with the specified glyphDataFormat.
"""
assert self.isTTF
+
+ if validateGlyphFormat and self.font["head"].glyphDataFormat == 0:
+ for name, g in glyphs.items():
+ if g.isVarComposite():
+ raise ValueError(
+ f"Glyph {name!r} is a variable composite, but glyphDataFormat=0"
+ )
+ elif g.numberOfContours > 0 and any(f & flagCubic for f in g.flags):
+ raise ValueError(
+ f"Glyph {name!r} has cubic Bezier outlines, but glyphDataFormat=0; "
+ "either convert to quadratics with cu2qu or set glyphDataFormat=1."
+ )
+
self.font["loca"] = newTable("loca")
self.font["glyf"] = newTable("glyf")
self.font["glyf"].glyphs = glyphs
@@ -672,7 +697,7 @@ class FontBuilder(object):
addFvar(self.font, axes, instances)
- def setupAvar(self, axes):
+ def setupAvar(self, axes, mappings=None):
"""Adds an axis variations table to the font.
Args:
@@ -680,7 +705,12 @@ class FontBuilder(object):
"""
from .varLib import _add_avar
- _add_avar(self.font, OrderedDict(enumerate(axes))) # Only values are used
+ if "fvar" not in self.font:
+ raise KeyError("'fvar' table is missing; can't add 'avar'.")
+
+ axisTags = [axis.axisTag for axis in self.font["fvar"].axes]
+ axes = OrderedDict(enumerate(axes)) # Only values are used
+ _add_avar(self.font, axes, mappings, axisTags)
def setupGvar(self, variations):
gvar = self.font["gvar"] = newTable("gvar")
@@ -800,7 +830,7 @@ class FontBuilder(object):
)
self._initTableWithValues("DSIG", {}, values)
- def addOpenTypeFeatures(self, features, filename=None, tables=None):
+ def addOpenTypeFeatures(self, features, filename=None, tables=None, debug=False):
"""Add OpenType features to the font from a string containing
Feature File syntax.
@@ -810,11 +840,14 @@ class FontBuilder(object):
The optional `tables` argument can be a list of OTL tables tags to
build, allowing the caller to only build selected OTL tables. See
`fontTools.feaLib` for details.
+
+ The optional `debug` argument controls whether to add source debugging
+ information to the font in the `Debg` table.
"""
from .feaLib.builder import addOpenTypeFeaturesFromString
addOpenTypeFeaturesFromString(
- self.font, features, filename=filename, tables=tables
+ self.font, features, filename=filename, tables=tables, debug=debug
)
def addFeatureVariations(self, conditionalSubstitutions, featureTag="rvrn"):
@@ -928,6 +961,8 @@ def addFvar(font, axes, instances):
axis_def.maximum,
axis_def.name,
)
+ if axis_def.hidden:
+ axis.flags = 0x0001 # HIDDEN_AXIS
if isinstance(name, str):
name = dict(en=name)
diff --git a/Lib/fontTools/help.py b/Lib/fontTools/help.py
index 4334e500..2a238de3 100644
--- a/Lib/fontTools/help.py
+++ b/Lib/fontTools/help.py
@@ -27,7 +27,7 @@ def main():
except AttributeError as e:
pass
for pkg, description in descriptions.items():
- print("fonttools %-12s %s" % (pkg, description), file=sys.stderr)
+ print("fonttools %-25s %s" % (pkg, description), file=sys.stderr)
if __name__ == "__main__":
diff --git a/Lib/fontTools/merge/__init__.py b/Lib/fontTools/merge/__init__.py
index 97106489..8d8a5213 100644
--- a/Lib/fontTools/merge/__init__.py
+++ b/Lib/fontTools/merge/__init__.py
@@ -4,7 +4,11 @@
from fontTools import ttLib
import fontTools.merge.base
-from fontTools.merge.cmap import computeMegaGlyphOrder, computeMegaCmap, renameCFFCharStrings
+from fontTools.merge.cmap import (
+ computeMegaGlyphOrder,
+ computeMegaCmap,
+ renameCFFCharStrings,
+)
from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
from fontTools.merge.options import Options
import fontTools.merge.tables
@@ -15,191 +19,192 @@ import logging
log = logging.getLogger("fontTools.merge")
-timer = Timer(logger=logging.getLogger(__name__+".timer"), level=logging.INFO)
+timer = Timer(logger=logging.getLogger(__name__ + ".timer"), level=logging.INFO)
class Merger(object):
- """Font merger.
-
- This class merges multiple files into a single OpenType font, taking into
- account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
- cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across
- all the fonts).
+ """Font merger.
+
+ This class merges multiple files into a single OpenType font, taking into
+ account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
+ cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across
+ all the fonts).
+
+ If multiple glyphs map to the same Unicode value, and the glyphs are considered
+ sufficiently different (that is, they differ in any of paths, widths, or
+ height), then subsequent glyphs are renamed and a lookup in the ``locl``
+ feature will be created to disambiguate them. For example, if the arguments
+ are an Arabic font and a Latin font and both contain a set of parentheses,
+ the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``,
+ and a lookup will be inserted into the to ``locl`` feature (creating it if
+ necessary) under the ``latn`` script to substitute ``parenleft`` with
+ ``parenleft#1`` etc.
+
+ Restrictions:
+
+ - All fonts must have the same units per em.
+ - If duplicate glyph disambiguation takes place as described above then the
+ fonts must have a ``GSUB`` table.
+
+ Attributes:
+ options: Currently unused.
+ """
+
+ def __init__(self, options=None):
+ if not options:
+ options = Options()
+
+ self.options = options
+
+ def _openFonts(self, fontfiles):
+ fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
+ for font, fontfile in zip(fonts, fontfiles):
+ font._merger__fontfile = fontfile
+ font._merger__name = font["name"].getDebugName(4)
+ return fonts
+
+ def merge(self, fontfiles):
+ """Merges fonts together.
+
+ Args:
+ fontfiles: A list of file names to be merged
+
+ Returns:
+ A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
+ this to write it out to an OTF file.
+ """
+ #
+ # Settle on a mega glyph order.
+ #
+ fonts = self._openFonts(fontfiles)
+ glyphOrders = [list(font.getGlyphOrder()) for font in fonts]
+ computeMegaGlyphOrder(self, glyphOrders)
+
+ # Take first input file sfntVersion
+ sfntVersion = fonts[0].sfntVersion
+
+ # Reload fonts and set new glyph names on them.
+ fonts = self._openFonts(fontfiles)
+ for font, glyphOrder in zip(fonts, glyphOrders):
+ font.setGlyphOrder(glyphOrder)
+ if "CFF " in font:
+ renameCFFCharStrings(self, glyphOrder, font["CFF "])
+
+ cmaps = [font["cmap"] for font in fonts]
+ self.duplicateGlyphsPerFont = [{} for _ in fonts]
+ computeMegaCmap(self, cmaps)
+
+ mega = ttLib.TTFont(sfntVersion=sfntVersion)
+ mega.setGlyphOrder(self.glyphOrder)
+
+ for font in fonts:
+ self._preMerge(font)
+
+ self.fonts = fonts
+
+ allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
+ allTags.remove("GlyphOrder")
+
+ for tag in sorted(allTags):
+ if tag in self.options.drop_tables:
+ continue
+
+ with timer("merge '%s'" % tag):
+ tables = [font.get(tag, NotImplemented) for font in fonts]
+
+ log.info("Merging '%s'.", tag)
+ clazz = ttLib.getTableClass(tag)
+ table = clazz(tag).merge(self, tables)
+ # XXX Clean this up and use: table = mergeObjects(tables)
+
+ if table is not NotImplemented and table is not False:
+ mega[tag] = table
+ log.info("Merged '%s'.", tag)
+ else:
+ log.info("Dropped '%s'.", tag)
+
+ del self.duplicateGlyphsPerFont
+ del self.fonts
+
+ self._postMerge(mega)
+
+ return mega
+
+ def mergeObjects(self, returnTable, logic, tables):
+ # Right now we don't use self at all. Will use in the future
+ # for options and logging.
+
+ allKeys = set.union(
+ set(),
+ *(vars(table).keys() for table in tables if table is not NotImplemented),
+ )
+ for key in allKeys:
+ try:
+ mergeLogic = logic[key]
+ except KeyError:
+ try:
+ mergeLogic = logic["*"]
+ except KeyError:
+ raise Exception(
+ "Don't know how to merge key %s of class %s"
+ % (key, returnTable.__class__.__name__)
+ )
+ if mergeLogic is NotImplemented:
+ continue
+ value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
+ if value is not NotImplemented:
+ setattr(returnTable, key, value)
+
+ return returnTable
+
+ def _preMerge(self, font):
+ layoutPreMerge(font)
+
+ def _postMerge(self, font):
+ layoutPostMerge(font)
+
+ if "OS/2" in font:
+ # https://github.com/fonttools/fonttools/issues/2538
+ # TODO: Add an option to disable this?
+ font["OS/2"].recalcAvgCharWidth(font)
+
+
+__all__ = ["Options", "Merger", "main"]
- If multiple glyphs map to the same Unicode value, and the glyphs are considered
- sufficiently different (that is, they differ in any of paths, widths, or
- height), then subsequent glyphs are renamed and a lookup in the ``locl``
- feature will be created to disambiguate them. For example, if the arguments
- are an Arabic font and a Latin font and both contain a set of parentheses,
- the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``,
- and a lookup will be inserted into the to ``locl`` feature (creating it if
- necessary) under the ``latn`` script to substitute ``parenleft`` with
- ``parenleft#1`` etc.
-
- Restrictions:
-
- - All fonts must have the same units per em.
- - If duplicate glyph disambiguation takes place as described above then the
- fonts must have a ``GSUB`` table.
-
- Attributes:
- options: Currently unused.
- """
-
- def __init__(self, options=None):
-
- if not options:
- options = Options()
-
- self.options = options
-
- def _openFonts(self, fontfiles):
- fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
- for font,fontfile in zip(fonts, fontfiles):
- font._merger__fontfile = fontfile
- font._merger__name = font['name'].getDebugName(4)
- return fonts
-
- def merge(self, fontfiles):
- """Merges fonts together.
-
- Args:
- fontfiles: A list of file names to be merged
-
- Returns:
- A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
- this to write it out to an OTF file.
- """
- #
- # Settle on a mega glyph order.
- #
- fonts = self._openFonts(fontfiles)
- glyphOrders = [list(font.getGlyphOrder()) for font in fonts]
- computeMegaGlyphOrder(self, glyphOrders)
-
- # Take first input file sfntVersion
- sfntVersion = fonts[0].sfntVersion
-
- # Reload fonts and set new glyph names on them.
- fonts = self._openFonts(fontfiles)
- for font,glyphOrder in zip(fonts, glyphOrders):
- font.setGlyphOrder(glyphOrder)
- if 'CFF ' in font:
- renameCFFCharStrings(self, glyphOrder, font['CFF '])
-
- cmaps = [font['cmap'] for font in fonts]
- self.duplicateGlyphsPerFont = [{} for _ in fonts]
- computeMegaCmap(self, cmaps)
-
- mega = ttLib.TTFont(sfntVersion=sfntVersion)
- mega.setGlyphOrder(self.glyphOrder)
-
- for font in fonts:
- self._preMerge(font)
-
- self.fonts = fonts
-
- allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
- allTags.remove('GlyphOrder')
-
- for tag in allTags:
- if tag in self.options.drop_tables:
- continue
-
- with timer("merge '%s'" % tag):
- tables = [font.get(tag, NotImplemented) for font in fonts]
-
- log.info("Merging '%s'.", tag)
- clazz = ttLib.getTableClass(tag)
- table = clazz(tag).merge(self, tables)
- # XXX Clean this up and use: table = mergeObjects(tables)
-
- if table is not NotImplemented and table is not False:
- mega[tag] = table
- log.info("Merged '%s'.", tag)
- else:
- log.info("Dropped '%s'.", tag)
-
- del self.duplicateGlyphsPerFont
- del self.fonts
-
- self._postMerge(mega)
-
- return mega
-
- def mergeObjects(self, returnTable, logic, tables):
- # Right now we don't use self at all. Will use in the future
- # for options and logging.
-
- allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented))
- for key in allKeys:
- try:
- mergeLogic = logic[key]
- except KeyError:
- try:
- mergeLogic = logic['*']
- except KeyError:
- raise Exception("Don't know how to merge key %s of class %s" %
- (key, returnTable.__class__.__name__))
- if mergeLogic is NotImplemented:
- continue
- value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
- if value is not NotImplemented:
- setattr(returnTable, key, value)
-
- return returnTable
-
- def _preMerge(self, font):
- layoutPreMerge(font)
-
- def _postMerge(self, font):
- layoutPostMerge(font)
-
- if "OS/2" in font:
- # https://github.com/fonttools/fonttools/issues/2538
- # TODO: Add an option to disable this?
- font["OS/2"].recalcAvgCharWidth(font)
-
-
-__all__ = [
- 'Options',
- 'Merger',
- 'main'
-]
@timer("make one with everything (TOTAL TIME)")
def main(args=None):
- """Merge multiple fonts into one"""
- from fontTools import configLogger
-
- if args is None:
- args = sys.argv[1:]
-
- options = Options()
- args = options.parse_opts(args, ignore_unknown=['output-file'])
- outfile = 'merged.ttf'
- fontfiles = []
- for g in args:
- if g.startswith('--output-file='):
- outfile = g[14:]
- continue
- fontfiles.append(g)
-
- if len(args) < 1:
- print("usage: pyftmerge font...", file=sys.stderr)
- return 1
-
- configLogger(level=logging.INFO if options.verbose else logging.WARNING)
- if options.timing:
- timer.logger.setLevel(logging.DEBUG)
- else:
- timer.logger.disabled = True
-
- merger = Merger(options=options)
- font = merger.merge(fontfiles)
- with timer("compile and save font"):
- font.save(outfile)
+ """Merge multiple fonts into one"""
+ from fontTools import configLogger
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = Options()
+ args = options.parse_opts(args, ignore_unknown=["output-file"])
+ outfile = "merged.ttf"
+ fontfiles = []
+ for g in args:
+ if g.startswith("--output-file="):
+ outfile = g[14:]
+ continue
+ fontfiles.append(g)
+
+ if len(args) < 1:
+ print("usage: pyftmerge font...", file=sys.stderr)
+ return 1
+
+ configLogger(level=logging.INFO if options.verbose else logging.WARNING)
+ if options.timing:
+ timer.logger.setLevel(logging.DEBUG)
+ else:
+ timer.logger.disabled = True
+
+ merger = Merger(options=options)
+ font = merger.merge(fontfiles)
+ with timer("compile and save font"):
+ font.save(outfile)
if __name__ == "__main__":
- sys.exit(main())
+ sys.exit(main())
diff --git a/Lib/fontTools/merge/__main__.py b/Lib/fontTools/merge/__main__.py
index 623ca7d2..ff632d49 100644
--- a/Lib/fontTools/merge/__main__.py
+++ b/Lib/fontTools/merge/__main__.py
@@ -2,5 +2,5 @@ import sys
from fontTools.merge import main
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/Lib/fontTools/merge/base.py b/Lib/fontTools/merge/base.py
index 868b51a4..37f9097a 100644
--- a/Lib/fontTools/merge/base.py
+++ b/Lib/fontTools/merge/base.py
@@ -10,67 +10,72 @@ log = logging.getLogger("fontTools.merge")
def add_method(*clazzes, **kwargs):
- """Returns a decorator function that adds a new method to one or
- more classes."""
- allowDefault = kwargs.get('allowDefaultTable', False)
- def wrapper(method):
- done = []
- for clazz in clazzes:
- if clazz in done: continue # Support multiple names of a clazz
- done.append(clazz)
- assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.'
- assert method.__name__ not in clazz.__dict__, \
- "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
- setattr(clazz, method.__name__, method)
- return None
- return wrapper
+ """Returns a decorator function that adds a new method to one or
+ more classes."""
+ allowDefault = kwargs.get("allowDefaultTable", False)
+
+ def wrapper(method):
+ done = []
+ for clazz in clazzes:
+ if clazz in done:
+ continue # Support multiple names of a clazz
+ done.append(clazz)
+ assert allowDefault or clazz != DefaultTable, "Oops, table class not found."
+ assert (
+ method.__name__ not in clazz.__dict__
+ ), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
+ setattr(clazz, method.__name__, method)
+ return None
+
+ return wrapper
+
def mergeObjects(lst):
- lst = [item for item in lst if item is not NotImplemented]
- if not lst:
- return NotImplemented
- lst = [item for item in lst if item is not None]
- if not lst:
- return None
-
- clazz = lst[0].__class__
- assert all(type(item) == clazz for item in lst), lst
-
- logic = clazz.mergeMap
- returnTable = clazz()
- returnDict = {}
-
- allKeys = set.union(set(), *(vars(table).keys() for table in lst))
- for key in allKeys:
- try:
- mergeLogic = logic[key]
- except KeyError:
- try:
- mergeLogic = logic['*']
- except KeyError:
- raise Exception("Don't know how to merge key %s of class %s" %
- (key, clazz.__name__))
- if mergeLogic is NotImplemented:
- continue
- value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
- if value is not NotImplemented:
- returnDict[key] = value
-
- returnTable.__dict__ = returnDict
-
- return returnTable
+ lst = [item for item in lst if item is not NotImplemented]
+ if not lst:
+ return NotImplemented
+ lst = [item for item in lst if item is not None]
+ if not lst:
+ return None
+
+ clazz = lst[0].__class__
+ assert all(type(item) == clazz for item in lst), lst
+
+ logic = clazz.mergeMap
+ returnTable = clazz()
+ returnDict = {}
+
+ allKeys = set.union(set(), *(vars(table).keys() for table in lst))
+ for key in allKeys:
+ try:
+ mergeLogic = logic[key]
+ except KeyError:
+ try:
+ mergeLogic = logic["*"]
+ except KeyError:
+ raise Exception(
+ "Don't know how to merge key %s of class %s" % (key, clazz.__name__)
+ )
+ if mergeLogic is NotImplemented:
+ continue
+ value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
+ if value is not NotImplemented:
+ returnDict[key] = value
+
+ returnTable.__dict__ = returnDict
+
+ return returnTable
+
@add_method(DefaultTable, allowDefaultTable=True)
def merge(self, m, tables):
- if not hasattr(self, 'mergeMap'):
- log.info("Don't know how to merge '%s'.", self.tableTag)
- return NotImplemented
-
- logic = self.mergeMap
-
- if isinstance(logic, dict):
- return m.mergeObjects(self, self.mergeMap, tables)
- else:
- return logic(tables)
+ if not hasattr(self, "mergeMap"):
+ log.info("Don't know how to merge '%s'.", self.tableTag)
+ return NotImplemented
+ logic = self.mergeMap
+ if isinstance(logic, dict):
+ return m.mergeObjects(self, self.mergeMap, tables)
+ else:
+ return logic(tables)
diff --git a/Lib/fontTools/merge/cmap.py b/Lib/fontTools/merge/cmap.py
index 7d98b588..3209a5d7 100644
--- a/Lib/fontTools/merge/cmap.py
+++ b/Lib/fontTools/merge/cmap.py
@@ -11,119 +11,131 @@ log = logging.getLogger("fontTools.merge")
def computeMegaGlyphOrder(merger, glyphOrders):
- """Modifies passed-in glyphOrders to reflect new glyph names.
+ """Modifies passed-in glyphOrders to reflect new glyph names.
Stores merger.glyphOrder."""
- megaOrder = {}
- for glyphOrder in glyphOrders:
- for i,glyphName in enumerate(glyphOrder):
- if glyphName in megaOrder:
- n = megaOrder[glyphName]
- while (glyphName + "." + repr(n)) in megaOrder:
- n += 1
- megaOrder[glyphName] = n
- glyphName += "." + repr(n)
- glyphOrder[i] = glyphName
- megaOrder[glyphName] = 1
- merger.glyphOrder = megaOrder = list(megaOrder.keys())
-
-
-def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2,
- advanceTolerance=.05,
- advanceToleranceEmpty=.20):
- pen1 = DecomposingRecordingPen(glyphSet1)
- pen2 = DecomposingRecordingPen(glyphSet2)
- g1 = glyphSet1[glyph1]
- g2 = glyphSet2[glyph2]
- g1.draw(pen1)
- g2.draw(pen2)
- if pen1.value != pen2.value:
- return False
- # Allow more width tolerance for glyphs with no ink
- tolerance = advanceTolerance if pen1.value else advanceToleranceEmpty
+ megaOrder = {}
+ for glyphOrder in glyphOrders:
+ for i, glyphName in enumerate(glyphOrder):
+ if glyphName in megaOrder:
+ n = megaOrder[glyphName]
+ while (glyphName + "." + repr(n)) in megaOrder:
+ n += 1
+ megaOrder[glyphName] = n
+ glyphName += "." + repr(n)
+ glyphOrder[i] = glyphName
+ megaOrder[glyphName] = 1
+ merger.glyphOrder = megaOrder = list(megaOrder.keys())
+
+
+def _glyphsAreSame(
+ glyphSet1,
+ glyphSet2,
+ glyph1,
+ glyph2,
+ advanceTolerance=0.05,
+ advanceToleranceEmpty=0.20,
+):
+ pen1 = DecomposingRecordingPen(glyphSet1)
+ pen2 = DecomposingRecordingPen(glyphSet2)
+ g1 = glyphSet1[glyph1]
+ g2 = glyphSet2[glyph2]
+ g1.draw(pen1)
+ g2.draw(pen2)
+ if pen1.value != pen2.value:
+ return False
+ # Allow more width tolerance for glyphs with no ink
+ tolerance = advanceTolerance if pen1.value else advanceToleranceEmpty
# TODO Warn if advances not the same but within tolerance.
- if abs(g1.width - g2.width) > g1.width * tolerance:
- return False
- if hasattr(g1, 'height') and g1.height is not None:
- if abs(g1.height - g2.height) > g1.height * tolerance:
- return False
- return True
+ if abs(g1.width - g2.width) > g1.width * tolerance:
+ return False
+ if hasattr(g1, "height") and g1.height is not None:
+ if abs(g1.height - g2.height) > g1.height * tolerance:
+ return False
+ return True
+
# Valid (format, platformID, platEncID) triplets for cmap subtables containing
# Unicode BMP-only and Unicode Full Repertoire semantics.
# Cf. OpenType spec for "Platform specific encodings":
# https://docs.microsoft.com/en-us/typography/opentype/spec/name
class _CmapUnicodePlatEncodings:
- BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
- FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
+ BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
+ FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
+
def computeMegaCmap(merger, cmapTables):
- """Sets merger.cmap and merger.glyphOrder."""
-
- # TODO Handle format=14.
- # Only merge format 4 and 12 Unicode subtables, ignores all other subtables
- # If there is a format 12 table for a font, ignore the format 4 table of it
- chosenCmapTables = []
- for fontIdx,table in enumerate(cmapTables):
- format4 = None
- format12 = None
- for subtable in table.tables:
- properties = (subtable.format, subtable.platformID, subtable.platEncID)
- if properties in _CmapUnicodePlatEncodings.BMP:
- format4 = subtable
- elif properties in _CmapUnicodePlatEncodings.FullRepertoire:
- format12 = subtable
- else:
- log.warning(
- "Dropped cmap subtable from font '%s':\t"
- "format %2s, platformID %2s, platEncID %2s",
- fontIdx, subtable.format, subtable.platformID, subtable.platEncID
- )
- if format12 is not None:
- chosenCmapTables.append((format12, fontIdx))
- elif format4 is not None:
- chosenCmapTables.append((format4, fontIdx))
-
- # Build the unicode mapping
- merger.cmap = cmap = {}
- fontIndexForGlyph = {}
- glyphSets = [None for f in merger.fonts] if hasattr(merger, 'fonts') else None
-
- for table,fontIdx in chosenCmapTables:
- # handle duplicates
- for uni,gid in table.cmap.items():
- oldgid = cmap.get(uni, None)
- if oldgid is None:
- cmap[uni] = gid
- fontIndexForGlyph[gid] = fontIdx
- elif is_Default_Ignorable(uni) or uni in (0x25CC,): # U+25CC DOTTED CIRCLE
- continue
- elif oldgid != gid:
- # Char previously mapped to oldgid, now to gid.
- # Record, to fix up in GSUB 'locl' later.
- if merger.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None:
- if glyphSets is not None:
- oldFontIdx = fontIndexForGlyph[oldgid]
- for idx in (fontIdx, oldFontIdx):
- if glyphSets[idx] is None:
- glyphSets[idx] = merger.fonts[idx].getGlyphSet()
- #if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid):
- # continue
- merger.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
- elif merger.duplicateGlyphsPerFont[fontIdx][oldgid] != gid:
- # Char previously mapped to oldgid but oldgid is already remapped to a different
- # gid, because of another Unicode character.
- # TODO: Try harder to do something about these.
- log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid)
+ """Sets merger.cmap and merger.glyphOrder."""
+
+ # TODO Handle format=14.
+ # Only merge format 4 and 12 Unicode subtables, ignores all other subtables
+ # If there is a format 12 table for a font, ignore the format 4 table of it
+ chosenCmapTables = []
+ for fontIdx, table in enumerate(cmapTables):
+ format4 = None
+ format12 = None
+ for subtable in table.tables:
+ properties = (subtable.format, subtable.platformID, subtable.platEncID)
+ if properties in _CmapUnicodePlatEncodings.BMP:
+ format4 = subtable
+ elif properties in _CmapUnicodePlatEncodings.FullRepertoire:
+ format12 = subtable
+ else:
+ log.warning(
+ "Dropped cmap subtable from font '%s':\t"
+ "format %2s, platformID %2s, platEncID %2s",
+ fontIdx,
+ subtable.format,
+ subtable.platformID,
+ subtable.platEncID,
+ )
+ if format12 is not None:
+ chosenCmapTables.append((format12, fontIdx))
+ elif format4 is not None:
+ chosenCmapTables.append((format4, fontIdx))
+
+ # Build the unicode mapping
+ merger.cmap = cmap = {}
+ fontIndexForGlyph = {}
+ glyphSets = [None for f in merger.fonts] if hasattr(merger, "fonts") else None
+
+ for table, fontIdx in chosenCmapTables:
+ # handle duplicates
+ for uni, gid in table.cmap.items():
+ oldgid = cmap.get(uni, None)
+ if oldgid is None:
+ cmap[uni] = gid
+ fontIndexForGlyph[gid] = fontIdx
+ elif is_Default_Ignorable(uni) or uni in (0x25CC,): # U+25CC DOTTED CIRCLE
+ continue
+ elif oldgid != gid:
+ # Char previously mapped to oldgid, now to gid.
+ # Record, to fix up in GSUB 'locl' later.
+ if merger.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None:
+ if glyphSets is not None:
+ oldFontIdx = fontIndexForGlyph[oldgid]
+ for idx in (fontIdx, oldFontIdx):
+ if glyphSets[idx] is None:
+ glyphSets[idx] = merger.fonts[idx].getGlyphSet()
+ # if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid):
+ # continue
+ merger.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
+ elif merger.duplicateGlyphsPerFont[fontIdx][oldgid] != gid:
+ # Char previously mapped to oldgid but oldgid is already remapped to a different
+ # gid, because of another Unicode character.
+ # TODO: Try harder to do something about these.
+ log.warning(
+ "Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid
+ )
def renameCFFCharStrings(merger, glyphOrder, cffTable):
- """Rename topDictIndex charStrings based on glyphOrder."""
- td = cffTable.cff.topDictIndex[0]
+ """Rename topDictIndex charStrings based on glyphOrder."""
+ td = cffTable.cff.topDictIndex[0]
- charStrings = {}
- for i, v in enumerate(td.CharStrings.charStrings.values()):
- glyphName = glyphOrder[i]
- charStrings[glyphName] = v
- td.CharStrings.charStrings = charStrings
+ charStrings = {}
+ for i, v in enumerate(td.CharStrings.charStrings.values()):
+ glyphName = glyphOrder[i]
+ charStrings[glyphName] = v
+ td.CharStrings.charStrings = charStrings
- td.charset = list(glyphOrder)
+ td.charset = list(glyphOrder)
diff --git a/Lib/fontTools/merge/layout.py b/Lib/fontTools/merge/layout.py
index 4bf01c37..6b85cd50 100644
--- a/Lib/fontTools/merge/layout.py
+++ b/Lib/fontTools/merge/layout.py
@@ -14,453 +14,517 @@ log = logging.getLogger("fontTools.merge")
def mergeLookupLists(lst):
- # TODO Do smarter merge.
- return sumLists(lst)
+ # TODO Do smarter merge.
+ return sumLists(lst)
+
def mergeFeatures(lst):
- assert lst
- self = otTables.Feature()
- self.FeatureParams = None
- self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex])
- self.LookupCount = len(self.LookupListIndex)
- return self
+ assert lst
+ self = otTables.Feature()
+ self.FeatureParams = None
+ self.LookupListIndex = mergeLookupLists(
+ [l.LookupListIndex for l in lst if l.LookupListIndex]
+ )
+ self.LookupCount = len(self.LookupListIndex)
+ return self
+
def mergeFeatureLists(lst):
- d = {}
- for l in lst:
- for f in l:
- tag = f.FeatureTag
- if tag not in d:
- d[tag] = []
- d[tag].append(f.Feature)
- ret = []
- for tag in sorted(d.keys()):
- rec = otTables.FeatureRecord()
- rec.FeatureTag = tag
- rec.Feature = mergeFeatures(d[tag])
- ret.append(rec)
- return ret
+ d = {}
+ for l in lst:
+ for f in l:
+ tag = f.FeatureTag
+ if tag not in d:
+ d[tag] = []
+ d[tag].append(f.Feature)
+ ret = []
+ for tag in sorted(d.keys()):
+ rec = otTables.FeatureRecord()
+ rec.FeatureTag = tag
+ rec.Feature = mergeFeatures(d[tag])
+ ret.append(rec)
+ return ret
+
def mergeLangSyses(lst):
- assert lst
+ assert lst
- # TODO Support merging ReqFeatureIndex
- assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
+ # TODO Support merging ReqFeatureIndex
+ assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
+
+ self = otTables.LangSys()
+ self.LookupOrder = None
+ self.ReqFeatureIndex = 0xFFFF
+ self.FeatureIndex = mergeFeatureLists(
+ [l.FeatureIndex for l in lst if l.FeatureIndex]
+ )
+ self.FeatureCount = len(self.FeatureIndex)
+ return self
- self = otTables.LangSys()
- self.LookupOrder = None
- self.ReqFeatureIndex = 0xFFFF
- self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex])
- self.FeatureCount = len(self.FeatureIndex)
- return self
def mergeScripts(lst):
- assert lst
-
- if len(lst) == 1:
- return lst[0]
- langSyses = {}
- for sr in lst:
- for lsr in sr.LangSysRecord:
- if lsr.LangSysTag not in langSyses:
- langSyses[lsr.LangSysTag] = []
- langSyses[lsr.LangSysTag].append(lsr.LangSys)
- lsrecords = []
- for tag, langSys_list in sorted(langSyses.items()):
- lsr = otTables.LangSysRecord()
- lsr.LangSys = mergeLangSyses(langSys_list)
- lsr.LangSysTag = tag
- lsrecords.append(lsr)
-
- self = otTables.Script()
- self.LangSysRecord = lsrecords
- self.LangSysCount = len(lsrecords)
- dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
- if dfltLangSyses:
- self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
- else:
- self.DefaultLangSys = None
- return self
+ assert lst
+
+ if len(lst) == 1:
+ return lst[0]
+ langSyses = {}
+ for sr in lst:
+ for lsr in sr.LangSysRecord:
+ if lsr.LangSysTag not in langSyses:
+ langSyses[lsr.LangSysTag] = []
+ langSyses[lsr.LangSysTag].append(lsr.LangSys)
+ lsrecords = []
+ for tag, langSys_list in sorted(langSyses.items()):
+ lsr = otTables.LangSysRecord()
+ lsr.LangSys = mergeLangSyses(langSys_list)
+ lsr.LangSysTag = tag
+ lsrecords.append(lsr)
+
+ self = otTables.Script()
+ self.LangSysRecord = lsrecords
+ self.LangSysCount = len(lsrecords)
+ dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
+ if dfltLangSyses:
+ self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
+ else:
+ self.DefaultLangSys = None
+ return self
+
def mergeScriptRecords(lst):
- d = {}
- for l in lst:
- for s in l:
- tag = s.ScriptTag
- if tag not in d:
- d[tag] = []
- d[tag].append(s.Script)
- ret = []
- for tag in sorted(d.keys()):
- rec = otTables.ScriptRecord()
- rec.ScriptTag = tag
- rec.Script = mergeScripts(d[tag])
- ret.append(rec)
- return ret
+ d = {}
+ for l in lst:
+ for s in l:
+ tag = s.ScriptTag
+ if tag not in d:
+ d[tag] = []
+ d[tag].append(s.Script)
+ ret = []
+ for tag in sorted(d.keys()):
+ rec = otTables.ScriptRecord()
+ rec.ScriptTag = tag
+ rec.Script = mergeScripts(d[tag])
+ ret.append(rec)
+ return ret
+
otTables.ScriptList.mergeMap = {
- 'ScriptCount': lambda lst: None, # TODO
- 'ScriptRecord': mergeScriptRecords,
+ "ScriptCount": lambda lst: None, # TODO
+ "ScriptRecord": mergeScriptRecords,
}
otTables.BaseScriptList.mergeMap = {
- 'BaseScriptCount': lambda lst: None, # TODO
- # TODO: Merge duplicate entries
- 'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag),
+ "BaseScriptCount": lambda lst: None, # TODO
+ # TODO: Merge duplicate entries
+ "BaseScriptRecord": lambda lst: sorted(
+ sumLists(lst), key=lambda s: s.BaseScriptTag
+ ),
}
otTables.FeatureList.mergeMap = {
- 'FeatureCount': sum,
- 'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
+ "FeatureCount": sum,
+ "FeatureRecord": lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
}
otTables.LookupList.mergeMap = {
- 'LookupCount': sum,
- 'Lookup': sumLists,
+ "LookupCount": sum,
+ "Lookup": sumLists,
}
otTables.Coverage.mergeMap = {
- 'Format': min,
- 'glyphs': sumLists,
+ "Format": min,
+ "glyphs": sumLists,
}
otTables.ClassDef.mergeMap = {
- 'Format': min,
- 'classDefs': sumDicts,
+ "Format": min,
+ "classDefs": sumDicts,
}
otTables.LigCaretList.mergeMap = {
- 'Coverage': mergeObjects,
- 'LigGlyphCount': sum,
- 'LigGlyph': sumLists,
+ "Coverage": mergeObjects,
+ "LigGlyphCount": sum,
+ "LigGlyph": sumLists,
}
otTables.AttachList.mergeMap = {
- 'Coverage': mergeObjects,
- 'GlyphCount': sum,
- 'AttachPoint': sumLists,
+ "Coverage": mergeObjects,
+ "GlyphCount": sum,
+ "AttachPoint": sumLists,
}
# XXX Renumber MarkFilterSets of lookups
otTables.MarkGlyphSetsDef.mergeMap = {
- 'MarkSetTableFormat': equal,
- 'MarkSetCount': sum,
- 'Coverage': sumLists,
+ "MarkSetTableFormat": equal,
+ "MarkSetCount": sum,
+ "Coverage": sumLists,
}
otTables.Axis.mergeMap = {
- '*': mergeObjects,
+ "*": mergeObjects,
}
# XXX Fix BASE table merging
otTables.BaseTagList.mergeMap = {
- 'BaseTagCount': sum,
- 'BaselineTag': sumLists,
+ "BaseTagCount": sum,
+ "BaselineTag": sumLists,
}
-otTables.GDEF.mergeMap = \
-otTables.GSUB.mergeMap = \
-otTables.GPOS.mergeMap = \
-otTables.BASE.mergeMap = \
-otTables.JSTF.mergeMap = \
-otTables.MATH.mergeMap = \
-{
- '*': mergeObjects,
- 'Version': max,
+otTables.GDEF.mergeMap = (
+ otTables.GSUB.mergeMap
+) = (
+ otTables.GPOS.mergeMap
+) = otTables.BASE.mergeMap = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = {
+ "*": mergeObjects,
+ "Version": max,
}
-ttLib.getTableClass('GDEF').mergeMap = \
-ttLib.getTableClass('GSUB').mergeMap = \
-ttLib.getTableClass('GPOS').mergeMap = \
-ttLib.getTableClass('BASE').mergeMap = \
-ttLib.getTableClass('JSTF').mergeMap = \
-ttLib.getTableClass('MATH').mergeMap = \
-{
- 'tableTag': onlyExisting(equal), # XXX clean me up
- 'table': mergeObjects,
+ttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass(
+ "GSUB"
+).mergeMap = ttLib.getTableClass("GPOS").mergeMap = ttLib.getTableClass(
+ "BASE"
+).mergeMap = ttLib.getTableClass(
+ "JSTF"
+).mergeMap = ttLib.getTableClass(
+ "MATH"
+).mergeMap = {
+ "tableTag": onlyExisting(equal), # XXX clean me up
+ "table": mergeObjects,
}
-@add_method(ttLib.getTableClass('GSUB'))
-def merge(self, m, tables):
- assert len(tables) == len(m.duplicateGlyphsPerFont)
- for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
- if not dups: continue
- if table is None or table is NotImplemented:
- log.warning("Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s", m.fonts[i]._merger__name, dups)
- continue
-
- synthFeature = None
- synthLookup = None
- for script in table.table.ScriptList.ScriptRecord:
- if script.ScriptTag == 'DFLT': continue # XXX
- for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]:
- if langsys is None: continue # XXX Create!
- feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl']
- assert len(feature) <= 1
- if feature:
- feature = feature[0]
- else:
- if not synthFeature:
- synthFeature = otTables.FeatureRecord()
- synthFeature.FeatureTag = 'locl'
- f = synthFeature.Feature = otTables.Feature()
- f.FeatureParams = None
- f.LookupCount = 0
- f.LookupListIndex = []
- table.table.FeatureList.FeatureRecord.append(synthFeature)
- table.table.FeatureList.FeatureCount += 1
- feature = synthFeature
- langsys.FeatureIndex.append(feature)
- langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
-
- if not synthLookup:
- subtable = otTables.SingleSubst()
- subtable.mapping = dups
- synthLookup = otTables.Lookup()
- synthLookup.LookupFlag = 0
- synthLookup.LookupType = 1
- synthLookup.SubTableCount = 1
- synthLookup.SubTable = [subtable]
- if table.table.LookupList is None:
- # mtiLib uses None as default value for LookupList,
- # while feaLib points to an empty array with count 0
- # TODO: make them do the same
- table.table.LookupList = otTables.LookupList()
- table.table.LookupList.Lookup = []
- table.table.LookupList.LookupCount = 0
- table.table.LookupList.Lookup.append(synthLookup)
- table.table.LookupList.LookupCount += 1
-
- if feature.Feature.LookupListIndex[:1] != [synthLookup]:
- feature.Feature.LookupListIndex[:0] = [synthLookup]
- feature.Feature.LookupCount += 1
-
- DefaultTable.merge(self, m, tables)
- return self
-
-@add_method(otTables.SingleSubst,
- otTables.MultipleSubst,
- otTables.AlternateSubst,
- otTables.LigatureSubst,
- otTables.ReverseChainSingleSubst,
- otTables.SinglePos,
- otTables.PairPos,
- otTables.CursivePos,
- otTables.MarkBasePos,
- otTables.MarkLigPos,
- otTables.MarkMarkPos)
+@add_method(ttLib.getTableClass("GSUB"))
+def merge(self, m, tables):
+ assert len(tables) == len(m.duplicateGlyphsPerFont)
+ for i, (table, dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
+ if not dups:
+ continue
+ if table is None or table is NotImplemented:
+ log.warning(
+ "Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s",
+ m.fonts[i]._merger__name,
+ dups,
+ )
+ continue
+
+ synthFeature = None
+ synthLookup = None
+ for script in table.table.ScriptList.ScriptRecord:
+ if script.ScriptTag == "DFLT":
+ continue # XXX
+ for langsys in [script.Script.DefaultLangSys] + [
+ l.LangSys for l in script.Script.LangSysRecord
+ ]:
+ if langsys is None:
+ continue # XXX Create!
+ feature = [v for v in langsys.FeatureIndex if v.FeatureTag == "locl"]
+ assert len(feature) <= 1
+ if feature:
+ feature = feature[0]
+ else:
+ if not synthFeature:
+ synthFeature = otTables.FeatureRecord()
+ synthFeature.FeatureTag = "locl"
+ f = synthFeature.Feature = otTables.Feature()
+ f.FeatureParams = None
+ f.LookupCount = 0
+ f.LookupListIndex = []
+ table.table.FeatureList.FeatureRecord.append(synthFeature)
+ table.table.FeatureList.FeatureCount += 1
+ feature = synthFeature
+ langsys.FeatureIndex.append(feature)
+ langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
+
+ if not synthLookup:
+ subtable = otTables.SingleSubst()
+ subtable.mapping = dups
+ synthLookup = otTables.Lookup()
+ synthLookup.LookupFlag = 0
+ synthLookup.LookupType = 1
+ synthLookup.SubTableCount = 1
+ synthLookup.SubTable = [subtable]
+ if table.table.LookupList is None:
+ # mtiLib uses None as default value for LookupList,
+ # while feaLib points to an empty array with count 0
+ # TODO: make them do the same
+ table.table.LookupList = otTables.LookupList()
+ table.table.LookupList.Lookup = []
+ table.table.LookupList.LookupCount = 0
+ table.table.LookupList.Lookup.append(synthLookup)
+ table.table.LookupList.LookupCount += 1
+
+ if feature.Feature.LookupListIndex[:1] != [synthLookup]:
+ feature.Feature.LookupListIndex[:0] = [synthLookup]
+ feature.Feature.LookupCount += 1
+
+ DefaultTable.merge(self, m, tables)
+ return self
+
+
+@add_method(
+ otTables.SingleSubst,
+ otTables.MultipleSubst,
+ otTables.AlternateSubst,
+ otTables.LigatureSubst,
+ otTables.ReverseChainSingleSubst,
+ otTables.SinglePos,
+ otTables.PairPos,
+ otTables.CursivePos,
+ otTables.MarkBasePos,
+ otTables.MarkLigPos,
+ otTables.MarkMarkPos,
+)
def mapLookups(self, lookupMap):
- pass
+ pass
+
# Copied and trimmed down from subset.py
-@add_method(otTables.ContextSubst,
- otTables.ChainContextSubst,
- otTables.ContextPos,
- otTables.ChainContextPos)
+@add_method(
+ otTables.ContextSubst,
+ otTables.ChainContextSubst,
+ otTables.ContextPos,
+ otTables.ChainContextPos,
+)
def __merge_classify_context(self):
-
- class ContextHelper(object):
- def __init__(self, klass, Format):
- if klass.__name__.endswith('Subst'):
- Typ = 'Sub'
- Type = 'Subst'
- else:
- Typ = 'Pos'
- Type = 'Pos'
- if klass.__name__.startswith('Chain'):
- Chain = 'Chain'
- else:
- Chain = ''
- ChainTyp = Chain+Typ
-
- self.Typ = Typ
- self.Type = Type
- self.Chain = Chain
- self.ChainTyp = ChainTyp
-
- self.LookupRecord = Type+'LookupRecord'
-
- if Format == 1:
- self.Rule = ChainTyp+'Rule'
- self.RuleSet = ChainTyp+'RuleSet'
- elif Format == 2:
- self.Rule = ChainTyp+'ClassRule'
- self.RuleSet = ChainTyp+'ClassSet'
-
- if self.Format not in [1, 2, 3]:
- return None # Don't shoot the messenger; let it go
- if not hasattr(self.__class__, "_merge__ContextHelpers"):
- self.__class__._merge__ContextHelpers = {}
- if self.Format not in self.__class__._merge__ContextHelpers:
- helper = ContextHelper(self.__class__, self.Format)
- self.__class__._merge__ContextHelpers[self.Format] = helper
- return self.__class__._merge__ContextHelpers[self.Format]
-
-
-@add_method(otTables.ContextSubst,
- otTables.ChainContextSubst,
- otTables.ContextPos,
- otTables.ChainContextPos)
+ class ContextHelper(object):
+ def __init__(self, klass, Format):
+ if klass.__name__.endswith("Subst"):
+ Typ = "Sub"
+ Type = "Subst"
+ else:
+ Typ = "Pos"
+ Type = "Pos"
+ if klass.__name__.startswith("Chain"):
+ Chain = "Chain"
+ else:
+ Chain = ""
+ ChainTyp = Chain + Typ
+
+ self.Typ = Typ
+ self.Type = Type
+ self.Chain = Chain
+ self.ChainTyp = ChainTyp
+
+ self.LookupRecord = Type + "LookupRecord"
+
+ if Format == 1:
+ self.Rule = ChainTyp + "Rule"
+ self.RuleSet = ChainTyp + "RuleSet"
+ elif Format == 2:
+ self.Rule = ChainTyp + "ClassRule"
+ self.RuleSet = ChainTyp + "ClassSet"
+
+ if self.Format not in [1, 2, 3]:
+ return None # Don't shoot the messenger; let it go
+ if not hasattr(self.__class__, "_merge__ContextHelpers"):
+ self.__class__._merge__ContextHelpers = {}
+ if self.Format not in self.__class__._merge__ContextHelpers:
+ helper = ContextHelper(self.__class__, self.Format)
+ self.__class__._merge__ContextHelpers[self.Format] = helper
+ return self.__class__._merge__ContextHelpers[self.Format]
+
+
+@add_method(
+ otTables.ContextSubst,
+ otTables.ChainContextSubst,
+ otTables.ContextPos,
+ otTables.ChainContextPos,
+)
def mapLookups(self, lookupMap):
- c = self.__merge_classify_context()
-
- if self.Format in [1, 2]:
- for rs in getattr(self, c.RuleSet):
- if not rs: continue
- for r in getattr(rs, c.Rule):
- if not r: continue
- for ll in getattr(r, c.LookupRecord):
- if not ll: continue
- ll.LookupListIndex = lookupMap[ll.LookupListIndex]
- elif self.Format == 3:
- for ll in getattr(self, c.LookupRecord):
- if not ll: continue
- ll.LookupListIndex = lookupMap[ll.LookupListIndex]
- else:
- assert 0, "unknown format: %s" % self.Format
-
-@add_method(otTables.ExtensionSubst,
- otTables.ExtensionPos)
+ c = self.__merge_classify_context()
+
+ if self.Format in [1, 2]:
+ for rs in getattr(self, c.RuleSet):
+ if not rs:
+ continue
+ for r in getattr(rs, c.Rule):
+ if not r:
+ continue
+ for ll in getattr(r, c.LookupRecord):
+ if not ll:
+ continue
+ ll.LookupListIndex = lookupMap[ll.LookupListIndex]
+ elif self.Format == 3:
+ for ll in getattr(self, c.LookupRecord):
+ if not ll:
+ continue
+ ll.LookupListIndex = lookupMap[ll.LookupListIndex]
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
+
+@add_method(otTables.ExtensionSubst, otTables.ExtensionPos)
def mapLookups(self, lookupMap):
- if self.Format == 1:
- self.ExtSubTable.mapLookups(lookupMap)
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ self.ExtSubTable.mapLookups(lookupMap)
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@add_method(otTables.Lookup)
def mapLookups(self, lookupMap):
- for st in self.SubTable:
- if not st: continue
- st.mapLookups(lookupMap)
+ for st in self.SubTable:
+ if not st:
+ continue
+ st.mapLookups(lookupMap)
+
@add_method(otTables.LookupList)
def mapLookups(self, lookupMap):
- for l in self.Lookup:
- if not l: continue
- l.mapLookups(lookupMap)
+ for l in self.Lookup:
+ if not l:
+ continue
+ l.mapLookups(lookupMap)
+
@add_method(otTables.Lookup)
def mapMarkFilteringSets(self, markFilteringSetMap):
- if self.LookupFlag & 0x0010:
- self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
+ if self.LookupFlag & 0x0010:
+ self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
+
@add_method(otTables.LookupList)
def mapMarkFilteringSets(self, markFilteringSetMap):
- for l in self.Lookup:
- if not l: continue
- l.mapMarkFilteringSets(markFilteringSetMap)
+ for l in self.Lookup:
+ if not l:
+ continue
+ l.mapMarkFilteringSets(markFilteringSetMap)
+
@add_method(otTables.Feature)
def mapLookups(self, lookupMap):
- self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
+ self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
+
@add_method(otTables.FeatureList)
def mapLookups(self, lookupMap):
- for f in self.FeatureRecord:
- if not f or not f.Feature: continue
- f.Feature.mapLookups(lookupMap)
+ for f in self.FeatureRecord:
+ if not f or not f.Feature:
+ continue
+ f.Feature.mapLookups(lookupMap)
+
-@add_method(otTables.DefaultLangSys,
- otTables.LangSys)
+@add_method(otTables.DefaultLangSys, otTables.LangSys)
def mapFeatures(self, featureMap):
- self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
- if self.ReqFeatureIndex != 65535:
- self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
+ self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
+ if self.ReqFeatureIndex != 65535:
+ self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
+
@add_method(otTables.Script)
def mapFeatures(self, featureMap):
- if self.DefaultLangSys:
- self.DefaultLangSys.mapFeatures(featureMap)
- for l in self.LangSysRecord:
- if not l or not l.LangSys: continue
- l.LangSys.mapFeatures(featureMap)
+ if self.DefaultLangSys:
+ self.DefaultLangSys.mapFeatures(featureMap)
+ for l in self.LangSysRecord:
+ if not l or not l.LangSys:
+ continue
+ l.LangSys.mapFeatures(featureMap)
+
@add_method(otTables.ScriptList)
def mapFeatures(self, featureMap):
- for s in self.ScriptRecord:
- if not s or not s.Script: continue
- s.Script.mapFeatures(featureMap)
+ for s in self.ScriptRecord:
+ if not s or not s.Script:
+ continue
+ s.Script.mapFeatures(featureMap)
-def layoutPreMerge(font):
- # Map indices to references
-
- GDEF = font.get('GDEF')
- GSUB = font.get('GSUB')
- GPOS = font.get('GPOS')
-
- for t in [GSUB, GPOS]:
- if not t: continue
-
- if t.table.LookupList:
- lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)}
- t.table.LookupList.mapLookups(lookupMap)
- t.table.FeatureList.mapLookups(lookupMap)
-
- if GDEF and GDEF.table.Version >= 0x00010002:
- markFilteringSetMap = {i:v for i,v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)}
- t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
-
- if t.table.FeatureList and t.table.ScriptList:
- featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)}
- t.table.ScriptList.mapFeatures(featureMap)
-
- # TODO FeatureParams nameIDs
-
-def layoutPostMerge(font):
- # Map references back to indices
- GDEF = font.get('GDEF')
- GSUB = font.get('GSUB')
- GPOS = font.get('GPOS')
-
- for t in [GSUB, GPOS]:
- if not t: continue
-
- if t.table.FeatureList and t.table.ScriptList:
-
- # Collect unregistered (new) features.
- featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
- t.table.ScriptList.mapFeatures(featureMap)
-
- # Record used features.
- featureMap = AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord)
- t.table.ScriptList.mapFeatures(featureMap)
- usedIndices = featureMap.s
-
- # Remove unused features
- t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices]
-
- # Map back to indices.
- featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
- t.table.ScriptList.mapFeatures(featureMap)
-
- t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
+def layoutPreMerge(font):
+ # Map indices to references
- if t.table.LookupList:
+ GDEF = font.get("GDEF")
+ GSUB = font.get("GSUB")
+ GPOS = font.get("GPOS")
- # Collect unregistered (new) lookups.
- lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup)
- t.table.FeatureList.mapLookups(lookupMap)
- t.table.LookupList.mapLookups(lookupMap)
+ for t in [GSUB, GPOS]:
+ if not t:
+ continue
- # Record used lookups.
- lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
- t.table.FeatureList.mapLookups(lookupMap)
- t.table.LookupList.mapLookups(lookupMap)
- usedIndices = lookupMap.s
+ if t.table.LookupList:
+ lookupMap = {i: v for i, v in enumerate(t.table.LookupList.Lookup)}
+ t.table.LookupList.mapLookups(lookupMap)
+ t.table.FeatureList.mapLookups(lookupMap)
- # Remove unused lookups
- t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices]
+ if (
+ GDEF
+ and GDEF.table.Version >= 0x00010002
+ and GDEF.table.MarkGlyphSetsDef
+ ):
+ markFilteringSetMap = {
+ i: v for i, v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)
+ }
+ t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
- # Map back to indices.
- lookupMap = NonhashableDict(t.table.LookupList.Lookup)
- t.table.FeatureList.mapLookups(lookupMap)
- t.table.LookupList.mapLookups(lookupMap)
+ if t.table.FeatureList and t.table.ScriptList:
+ featureMap = {i: v for i, v in enumerate(t.table.FeatureList.FeatureRecord)}
+ t.table.ScriptList.mapFeatures(featureMap)
- t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
+ # TODO FeatureParams nameIDs
- if GDEF and GDEF.table.Version >= 0x00010002:
- markFilteringSetMap = NonhashableDict(GDEF.table.MarkGlyphSetsDef.Coverage)
- t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
- # TODO FeatureParams nameIDs
+def layoutPostMerge(font):
+ # Map references back to indices
+
+ GDEF = font.get("GDEF")
+ GSUB = font.get("GSUB")
+ GPOS = font.get("GPOS")
+
+ for t in [GSUB, GPOS]:
+ if not t:
+ continue
+
+ if t.table.FeatureList and t.table.ScriptList:
+ # Collect unregistered (new) features.
+ featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
+ t.table.ScriptList.mapFeatures(featureMap)
+
+ # Record used features.
+ featureMap = AttendanceRecordingIdentityDict(
+ t.table.FeatureList.FeatureRecord
+ )
+ t.table.ScriptList.mapFeatures(featureMap)
+ usedIndices = featureMap.s
+
+ # Remove unused features
+ t.table.FeatureList.FeatureRecord = [
+ f
+ for i, f in enumerate(t.table.FeatureList.FeatureRecord)
+ if i in usedIndices
+ ]
+
+ # Map back to indices.
+ featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
+ t.table.ScriptList.mapFeatures(featureMap)
+
+ t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
+
+ if t.table.LookupList:
+ # Collect unregistered (new) lookups.
+ lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup)
+ t.table.FeatureList.mapLookups(lookupMap)
+ t.table.LookupList.mapLookups(lookupMap)
+
+ # Record used lookups.
+ lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
+ t.table.FeatureList.mapLookups(lookupMap)
+ t.table.LookupList.mapLookups(lookupMap)
+ usedIndices = lookupMap.s
+
+ # Remove unused lookups
+ t.table.LookupList.Lookup = [
+ l for i, l in enumerate(t.table.LookupList.Lookup) if i in usedIndices
+ ]
+
+ # Map back to indices.
+ lookupMap = NonhashableDict(t.table.LookupList.Lookup)
+ t.table.FeatureList.mapLookups(lookupMap)
+ t.table.LookupList.mapLookups(lookupMap)
+
+ t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
+
+ if GDEF and GDEF.table.Version >= 0x00010002:
+ markFilteringSetMap = NonhashableDict(
+ GDEF.table.MarkGlyphSetsDef.Coverage
+ )
+ t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
+
+ # TODO FeatureParams nameIDs
diff --git a/Lib/fontTools/merge/options.py b/Lib/fontTools/merge/options.py
index 02dcf4b7..f1340093 100644
--- a/Lib/fontTools/merge/options.py
+++ b/Lib/fontTools/merge/options.py
@@ -4,82 +4,79 @@
class Options(object):
+ class UnknownOptionError(Exception):
+ pass
- class UnknownOptionError(Exception):
- pass
+ def __init__(self, **kwargs):
+ self.verbose = False
+ self.timing = False
+ self.drop_tables = []
- def __init__(self, **kwargs):
+ self.set(**kwargs)
- self.verbose = False
- self.timing = False
- self.drop_tables = []
+ def set(self, **kwargs):
+ for k, v in kwargs.items():
+ if not hasattr(self, k):
+ raise self.UnknownOptionError("Unknown option '%s'" % k)
+ setattr(self, k, v)
- self.set(**kwargs)
+ def parse_opts(self, argv, ignore_unknown=[]):
+ ret = []
+ opts = {}
+ for a in argv:
+ orig_a = a
+ if not a.startswith("--"):
+ ret.append(a)
+ continue
+ a = a[2:]
+ i = a.find("=")
+ op = "="
+ if i == -1:
+ if a.startswith("no-"):
+ k = a[3:]
+ v = False
+ else:
+ k = a
+ v = True
+ else:
+ k = a[:i]
+ if k[-1] in "-+":
+ op = k[-1] + "=" # Ops is '-=' or '+=' now.
+ k = k[:-1]
+ v = a[i + 1 :]
+ ok = k
+ k = k.replace("-", "_")
+ if not hasattr(self, k):
+ if ignore_unknown is True or ok in ignore_unknown:
+ ret.append(orig_a)
+ continue
+ else:
+ raise self.UnknownOptionError("Unknown option '%s'" % a)
- def set(self, **kwargs):
- for k,v in kwargs.items():
- if not hasattr(self, k):
- raise self.UnknownOptionError("Unknown option '%s'" % k)
- setattr(self, k, v)
+ ov = getattr(self, k)
+ if isinstance(ov, bool):
+ v = bool(v)
+ elif isinstance(ov, int):
+ v = int(v)
+ elif isinstance(ov, list):
+ vv = v.split(",")
+ if vv == [""]:
+ vv = []
+ vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
+ if op == "=":
+ v = vv
+ elif op == "+=":
+ v = ov
+ v.extend(vv)
+ elif op == "-=":
+ v = ov
+ for x in vv:
+ if x in v:
+ v.remove(x)
+ else:
+ assert 0
- def parse_opts(self, argv, ignore_unknown=[]):
- ret = []
- opts = {}
- for a in argv:
- orig_a = a
- if not a.startswith('--'):
- ret.append(a)
- continue
- a = a[2:]
- i = a.find('=')
- op = '='
- if i == -1:
- if a.startswith("no-"):
- k = a[3:]
- v = False
- else:
- k = a
- v = True
- else:
- k = a[:i]
- if k[-1] in "-+":
- op = k[-1]+'=' # Ops is '-=' or '+=' now.
- k = k[:-1]
- v = a[i+1:]
- ok = k
- k = k.replace('-', '_')
- if not hasattr(self, k):
- if ignore_unknown is True or ok in ignore_unknown:
- ret.append(orig_a)
- continue
- else:
- raise self.UnknownOptionError("Unknown option '%s'" % a)
-
- ov = getattr(self, k)
- if isinstance(ov, bool):
- v = bool(v)
- elif isinstance(ov, int):
- v = int(v)
- elif isinstance(ov, list):
- vv = v.split(',')
- if vv == ['']:
- vv = []
- vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
- if op == '=':
- v = vv
- elif op == '+=':
- v = ov
- v.extend(vv)
- elif op == '-=':
- v = ov
- for x in vv:
- if x in v:
- v.remove(x)
- else:
- assert 0
-
- opts[k] = v
- self.set(**opts)
-
- return ret
+ opts[k] = v
+ self.set(**opts)
+ return ret
diff --git a/Lib/fontTools/merge/tables.py b/Lib/fontTools/merge/tables.py
index ac6d59b5..57ed64d3 100644
--- a/Lib/fontTools/merge/tables.py
+++ b/Lib/fontTools/merge/tables.py
@@ -3,6 +3,7 @@
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
from fontTools import ttLib, cffLib
+from fontTools.misc.psCharStrings import T2WidthExtractor
from fontTools.ttLib.tables.DefaultTable import DefaultTable
from fontTools.merge.base import add_method, mergeObjects
from fontTools.merge.cmap import computeMegaCmap
@@ -13,299 +14,326 @@ import logging
log = logging.getLogger("fontTools.merge")
-ttLib.getTableClass('maxp').mergeMap = {
- '*': max,
- 'tableTag': equal,
- 'tableVersion': equal,
- 'numGlyphs': sum,
- 'maxStorage': first,
- 'maxFunctionDefs': first,
- 'maxInstructionDefs': first,
- # TODO When we correctly merge hinting data, update these values:
- # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
+ttLib.getTableClass("maxp").mergeMap = {
+ "*": max,
+ "tableTag": equal,
+ "tableVersion": equal,
+ "numGlyphs": sum,
+ "maxStorage": first,
+ "maxFunctionDefs": first,
+ "maxInstructionDefs": first,
+ # TODO When we correctly merge hinting data, update these values:
+ # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
}
headFlagsMergeBitMap = {
- 'size': 16,
- '*': bitwise_or,
- 1: bitwise_and, # Baseline at y = 0
- 2: bitwise_and, # lsb at x = 0
- 3: bitwise_and, # Force ppem to integer values. FIXME?
- 5: bitwise_and, # Font is vertical
- 6: lambda bit: 0, # Always set to zero
- 11: bitwise_and, # Font data is 'lossless'
- 13: bitwise_and, # Optimized for ClearType
- 14: bitwise_and, # Last resort font. FIXME? equal or first may be better
- 15: lambda bit: 0, # Always set to zero
+ "size": 16,
+ "*": bitwise_or,
+ 1: bitwise_and, # Baseline at y = 0
+ 2: bitwise_and, # lsb at x = 0
+ 3: bitwise_and, # Force ppem to integer values. FIXME?
+ 5: bitwise_and, # Font is vertical
+ 6: lambda bit: 0, # Always set to zero
+ 11: bitwise_and, # Font data is 'lossless'
+ 13: bitwise_and, # Optimized for ClearType
+ 14: bitwise_and, # Last resort font. FIXME? equal or first may be better
+ 15: lambda bit: 0, # Always set to zero
}
-ttLib.getTableClass('head').mergeMap = {
- 'tableTag': equal,
- 'tableVersion': max,
- 'fontRevision': max,
- 'checkSumAdjustment': lambda lst: 0, # We need *something* here
- 'magicNumber': equal,
- 'flags': mergeBits(headFlagsMergeBitMap),
- 'unitsPerEm': equal,
- 'created': current_time,
- 'modified': current_time,
- 'xMin': min,
- 'yMin': min,
- 'xMax': max,
- 'yMax': max,
- 'macStyle': first,
- 'lowestRecPPEM': max,
- 'fontDirectionHint': lambda lst: 2,
- 'indexToLocFormat': first,
- 'glyphDataFormat': equal,
+ttLib.getTableClass("head").mergeMap = {
+ "tableTag": equal,
+ "tableVersion": max,
+ "fontRevision": max,
+ "checkSumAdjustment": lambda lst: 0, # We need *something* here
+ "magicNumber": equal,
+ "flags": mergeBits(headFlagsMergeBitMap),
+ "unitsPerEm": equal,
+ "created": current_time,
+ "modified": current_time,
+ "xMin": min,
+ "yMin": min,
+ "xMax": max,
+ "yMax": max,
+ "macStyle": first,
+ "lowestRecPPEM": max,
+ "fontDirectionHint": lambda lst: 2,
+ "indexToLocFormat": first,
+ "glyphDataFormat": equal,
}
-ttLib.getTableClass('hhea').mergeMap = {
- '*': equal,
- 'tableTag': equal,
- 'tableVersion': max,
- 'ascent': max,
- 'descent': min,
- 'lineGap': max,
- 'advanceWidthMax': max,
- 'minLeftSideBearing': min,
- 'minRightSideBearing': min,
- 'xMaxExtent': max,
- 'caretSlopeRise': first,
- 'caretSlopeRun': first,
- 'caretOffset': first,
- 'numberOfHMetrics': recalculate,
+ttLib.getTableClass("hhea").mergeMap = {
+ "*": equal,
+ "tableTag": equal,
+ "tableVersion": max,
+ "ascent": max,
+ "descent": min,
+ "lineGap": max,
+ "advanceWidthMax": max,
+ "minLeftSideBearing": min,
+ "minRightSideBearing": min,
+ "xMaxExtent": max,
+ "caretSlopeRise": first,
+ "caretSlopeRun": first,
+ "caretOffset": first,
+ "numberOfHMetrics": recalculate,
}
-ttLib.getTableClass('vhea').mergeMap = {
- '*': equal,
- 'tableTag': equal,
- 'tableVersion': max,
- 'ascent': max,
- 'descent': min,
- 'lineGap': max,
- 'advanceHeightMax': max,
- 'minTopSideBearing': min,
- 'minBottomSideBearing': min,
- 'yMaxExtent': max,
- 'caretSlopeRise': first,
- 'caretSlopeRun': first,
- 'caretOffset': first,
- 'numberOfVMetrics': recalculate,
+ttLib.getTableClass("vhea").mergeMap = {
+ "*": equal,
+ "tableTag": equal,
+ "tableVersion": max,
+ "ascent": max,
+ "descent": min,
+ "lineGap": max,
+ "advanceHeightMax": max,
+ "minTopSideBearing": min,
+ "minBottomSideBearing": min,
+ "yMaxExtent": max,
+ "caretSlopeRise": first,
+ "caretSlopeRun": first,
+ "caretOffset": first,
+ "numberOfVMetrics": recalculate,
}
os2FsTypeMergeBitMap = {
- 'size': 16,
- '*': lambda bit: 0,
- 1: bitwise_or, # no embedding permitted
- 2: bitwise_and, # allow previewing and printing documents
- 3: bitwise_and, # allow editing documents
- 8: bitwise_or, # no subsetting permitted
- 9: bitwise_or, # no embedding of outlines permitted
+ "size": 16,
+ "*": lambda bit: 0,
+ 1: bitwise_or, # no embedding permitted
+ 2: bitwise_and, # allow previewing and printing documents
+ 3: bitwise_and, # allow editing documents
+ 8: bitwise_or, # no subsetting permitted
+ 9: bitwise_or, # no embedding of outlines permitted
}
+
def mergeOs2FsType(lst):
- lst = list(lst)
- if all(item == 0 for item in lst):
- return 0
-
- # Compute least restrictive logic for each fsType value
- for i in range(len(lst)):
- # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
- if lst[i] & 0x000C:
- lst[i] &= ~0x0002
- # set bit 2 (allow previewing) if bit 3 is set (allow editing)
- elif lst[i] & 0x0008:
- lst[i] |= 0x0004
- # set bits 2 and 3 if everything is allowed
- elif lst[i] == 0:
- lst[i] = 0x000C
-
- fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
- # unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
- if fsType & 0x0002:
- fsType &= ~0x000C
- return fsType
-
-
-ttLib.getTableClass('OS/2').mergeMap = {
- '*': first,
- 'tableTag': equal,
- 'version': max,
- 'xAvgCharWidth': first, # Will be recalculated at the end on the merged font
- 'fsType': mergeOs2FsType, # Will be overwritten
- 'panose': first, # FIXME: should really be the first Latin font
- 'ulUnicodeRange1': bitwise_or,
- 'ulUnicodeRange2': bitwise_or,
- 'ulUnicodeRange3': bitwise_or,
- 'ulUnicodeRange4': bitwise_or,
- 'fsFirstCharIndex': min,
- 'fsLastCharIndex': max,
- 'sTypoAscender': max,
- 'sTypoDescender': min,
- 'sTypoLineGap': max,
- 'usWinAscent': max,
- 'usWinDescent': max,
- # Version 1
- 'ulCodePageRange1': onlyExisting(bitwise_or),
- 'ulCodePageRange2': onlyExisting(bitwise_or),
- # Version 2, 3, 4
- 'sxHeight': onlyExisting(max),
- 'sCapHeight': onlyExisting(max),
- 'usDefaultChar': onlyExisting(first),
- 'usBreakChar': onlyExisting(first),
- 'usMaxContext': onlyExisting(max),
- # version 5
- 'usLowerOpticalPointSize': onlyExisting(min),
- 'usUpperOpticalPointSize': onlyExisting(max),
+ lst = list(lst)
+ if all(item == 0 for item in lst):
+ return 0
+
+ # Compute least restrictive logic for each fsType value
+ for i in range(len(lst)):
+ # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
+ if lst[i] & 0x000C:
+ lst[i] &= ~0x0002
+ # set bit 2 (allow previewing) if bit 3 is set (allow editing)
+ elif lst[i] & 0x0008:
+ lst[i] |= 0x0004
+ # set bits 2 and 3 if everything is allowed
+ elif lst[i] == 0:
+ lst[i] = 0x000C
+
+ fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
+ # unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
+ if fsType & 0x0002:
+ fsType &= ~0x000C
+ return fsType
+
+
+ttLib.getTableClass("OS/2").mergeMap = {
+ "*": first,
+ "tableTag": equal,
+ "version": max,
+ "xAvgCharWidth": first, # Will be recalculated at the end on the merged font
+ "fsType": mergeOs2FsType, # Will be overwritten
+ "panose": first, # FIXME: should really be the first Latin font
+ "ulUnicodeRange1": bitwise_or,
+ "ulUnicodeRange2": bitwise_or,
+ "ulUnicodeRange3": bitwise_or,
+ "ulUnicodeRange4": bitwise_or,
+ "fsFirstCharIndex": min,
+ "fsLastCharIndex": max,
+ "sTypoAscender": max,
+ "sTypoDescender": min,
+ "sTypoLineGap": max,
+ "usWinAscent": max,
+ "usWinDescent": max,
+ # Version 1
+ "ulCodePageRange1": onlyExisting(bitwise_or),
+ "ulCodePageRange2": onlyExisting(bitwise_or),
+ # Version 2, 3, 4
+ "sxHeight": onlyExisting(max),
+ "sCapHeight": onlyExisting(max),
+ "usDefaultChar": onlyExisting(first),
+ "usBreakChar": onlyExisting(first),
+ "usMaxContext": onlyExisting(max),
+ # version 5
+ "usLowerOpticalPointSize": onlyExisting(min),
+ "usUpperOpticalPointSize": onlyExisting(max),
}
-@add_method(ttLib.getTableClass('OS/2'))
+
+@add_method(ttLib.getTableClass("OS/2"))
def merge(self, m, tables):
- DefaultTable.merge(self, m, tables)
- if self.version < 2:
- # bits 8 and 9 are reserved and should be set to zero
- self.fsType &= ~0x0300
- if self.version >= 3:
- # Only one of bits 1, 2, and 3 may be set. We already take
- # care of bit 1 implications in mergeOs2FsType. So unset
- # bit 2 if bit 3 is already set.
- if self.fsType & 0x0008:
- self.fsType &= ~0x0004
- return self
-
-ttLib.getTableClass('post').mergeMap = {
- '*': first,
- 'tableTag': equal,
- 'formatType': max,
- 'isFixedPitch': min,
- 'minMemType42': max,
- 'maxMemType42': lambda lst: 0,
- 'minMemType1': max,
- 'maxMemType1': lambda lst: 0,
- 'mapping': onlyExisting(sumDicts),
- 'extraNames': lambda lst: [],
+ DefaultTable.merge(self, m, tables)
+ if self.version < 2:
+ # bits 8 and 9 are reserved and should be set to zero
+ self.fsType &= ~0x0300
+ if self.version >= 3:
+ # Only one of bits 1, 2, and 3 may be set. We already take
+ # care of bit 1 implications in mergeOs2FsType. So unset
+ # bit 2 if bit 3 is already set.
+ if self.fsType & 0x0008:
+ self.fsType &= ~0x0004
+ return self
+
+
+ttLib.getTableClass("post").mergeMap = {
+ "*": first,
+ "tableTag": equal,
+ "formatType": max,
+ "isFixedPitch": min,
+ "minMemType42": max,
+ "maxMemType42": lambda lst: 0,
+ "minMemType1": max,
+ "maxMemType1": lambda lst: 0,
+ "mapping": onlyExisting(sumDicts),
+ "extraNames": lambda lst: [],
}
-ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = {
- 'tableTag': equal,
- 'metrics': sumDicts,
+ttLib.getTableClass("vmtx").mergeMap = ttLib.getTableClass("hmtx").mergeMap = {
+ "tableTag": equal,
+ "metrics": sumDicts,
}
-ttLib.getTableClass('name').mergeMap = {
- 'tableTag': equal,
- 'names': first, # FIXME? Does mixing name records make sense?
+ttLib.getTableClass("name").mergeMap = {
+ "tableTag": equal,
+ "names": first, # FIXME? Does mixing name records make sense?
}
-ttLib.getTableClass('loca').mergeMap = {
- '*': recalculate,
- 'tableTag': equal,
+ttLib.getTableClass("loca").mergeMap = {
+ "*": recalculate,
+ "tableTag": equal,
}
-ttLib.getTableClass('glyf').mergeMap = {
- 'tableTag': equal,
- 'glyphs': sumDicts,
- 'glyphOrder': sumLists,
+ttLib.getTableClass("glyf").mergeMap = {
+ "tableTag": equal,
+ "glyphs": sumDicts,
+ "glyphOrder": sumLists,
+ "_reverseGlyphOrder": recalculate,
+ "axisTags": equal,
}
-@add_method(ttLib.getTableClass('glyf'))
+
+@add_method(ttLib.getTableClass("glyf"))
def merge(self, m, tables):
- for i,table in enumerate(tables):
- for g in table.glyphs.values():
- if i:
- # Drop hints for all but first font, since
- # we don't map functions / CVT values.
- g.removeHinting()
- # Expand composite glyphs to load their
- # composite glyph names.
- if g.isComposite():
- g.expand(table)
- return DefaultTable.merge(self, m, tables)
-
-ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
-ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
-ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
-ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable
-
-@add_method(ttLib.getTableClass('CFF '))
+ for i, table in enumerate(tables):
+ for g in table.glyphs.values():
+ if i:
+ # Drop hints for all but first font, since
+ # we don't map functions / CVT values.
+ g.removeHinting()
+ # Expand composite glyphs to load their
+ # composite glyph names.
+ if g.isComposite() or g.isVarComposite():
+ g.expand(table)
+ return DefaultTable.merge(self, m, tables)
+
+
+ttLib.getTableClass("prep").mergeMap = lambda self, lst: first(lst)
+ttLib.getTableClass("fpgm").mergeMap = lambda self, lst: first(lst)
+ttLib.getTableClass("cvt ").mergeMap = lambda self, lst: first(lst)
+ttLib.getTableClass("gasp").mergeMap = lambda self, lst: first(
+ lst
+) # FIXME? Appears irreconcilable
+
+
+@add_method(ttLib.getTableClass("CFF "))
def merge(self, m, tables):
-
- if any(hasattr(table, "FDSelect") for table in tables):
- raise NotImplementedError(
- "Merging CID-keyed CFF tables is not supported yet"
- )
-
- for table in tables:
- table.cff.desubroutinize()
-
- newcff = tables[0]
- newfont = newcff.cff[0]
- private = newfont.Private
- storedNamesStrings = []
- glyphOrderStrings = []
- glyphOrder = set(newfont.getGlyphOrder())
-
- for name in newfont.strings.strings:
- if name not in glyphOrder:
- storedNamesStrings.append(name)
- else:
- glyphOrderStrings.append(name)
-
- chrset = list(newfont.charset)
- newcs = newfont.CharStrings
- log.debug("FONT 0 CharStrings: %d.", len(newcs))
-
- for i, table in enumerate(tables[1:], start=1):
- font = table.cff[0]
- font.Private = private
- fontGlyphOrder = set(font.getGlyphOrder())
- for name in font.strings.strings:
- if name in fontGlyphOrder:
- glyphOrderStrings.append(name)
- cs = font.CharStrings
- gs = table.cff.GlobalSubrs
- log.debug("Font %d CharStrings: %d.", i, len(cs))
- chrset.extend(font.charset)
- if newcs.charStringsAreIndexed:
- for i, name in enumerate(cs.charStrings, start=len(newcs)):
- newcs.charStrings[name] = i
- newcs.charStringsIndex.items.append(None)
- for name in cs.charStrings:
- newcs[name] = cs[name]
-
- newfont.charset = chrset
- newfont.numGlyphs = len(chrset)
- newfont.strings.strings = glyphOrderStrings + storedNamesStrings
-
- return newcff
-
-@add_method(ttLib.getTableClass('cmap'))
+ if any(hasattr(table.cff[0], "FDSelect") for table in tables):
+ raise NotImplementedError("Merging CID-keyed CFF tables is not supported yet")
+
+ for table in tables:
+ table.cff.desubroutinize()
+
+ newcff = tables[0]
+ newfont = newcff.cff[0]
+ private = newfont.Private
+ newDefaultWidthX, newNominalWidthX = private.defaultWidthX, private.nominalWidthX
+ storedNamesStrings = []
+ glyphOrderStrings = []
+ glyphOrder = set(newfont.getGlyphOrder())
+
+ for name in newfont.strings.strings:
+ if name not in glyphOrder:
+ storedNamesStrings.append(name)
+ else:
+ glyphOrderStrings.append(name)
+
+ chrset = list(newfont.charset)
+ newcs = newfont.CharStrings
+ log.debug("FONT 0 CharStrings: %d.", len(newcs))
+
+ for i, table in enumerate(tables[1:], start=1):
+ font = table.cff[0]
+ defaultWidthX, nominalWidthX = (
+ font.Private.defaultWidthX,
+ font.Private.nominalWidthX,
+ )
+ widthsDiffer = (
+ defaultWidthX != newDefaultWidthX or nominalWidthX != newNominalWidthX
+ )
+ font.Private = private
+ fontGlyphOrder = set(font.getGlyphOrder())
+ for name in font.strings.strings:
+ if name in fontGlyphOrder:
+ glyphOrderStrings.append(name)
+ cs = font.CharStrings
+ gs = table.cff.GlobalSubrs
+ log.debug("Font %d CharStrings: %d.", i, len(cs))
+ chrset.extend(font.charset)
+ if newcs.charStringsAreIndexed:
+ for i, name in enumerate(cs.charStrings, start=len(newcs)):
+ newcs.charStrings[name] = i
+ newcs.charStringsIndex.items.append(None)
+ for name in cs.charStrings:
+ if widthsDiffer:
+ c = cs[name]
+ defaultWidthXToken = object()
+ extractor = T2WidthExtractor([], [], nominalWidthX, defaultWidthXToken)
+ extractor.execute(c)
+ width = extractor.width
+ if width is not defaultWidthXToken:
+ c.program.pop(0)
+ else:
+ width = defaultWidthX
+ if width != newDefaultWidthX:
+ c.program.insert(0, width - newNominalWidthX)
+ newcs[name] = cs[name]
+
+ newfont.charset = chrset
+ newfont.numGlyphs = len(chrset)
+ newfont.strings.strings = glyphOrderStrings + storedNamesStrings
+
+ return newcff
+
+
+@add_method(ttLib.getTableClass("cmap"))
def merge(self, m, tables):
-
- # TODO Handle format=14.
- if not hasattr(m, 'cmap'):
- computeMegaCmap(m, tables)
- cmap = m.cmap
-
- cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF}
- self.tables = []
- module = ttLib.getTableModule('cmap')
- if len(cmapBmpOnly) != len(cmap):
- # format-12 required.
- cmapTable = module.cmap_classes[12](12)
- cmapTable.platformID = 3
- cmapTable.platEncID = 10
- cmapTable.language = 0
- cmapTable.cmap = cmap
- self.tables.append(cmapTable)
- # always create format-4
- cmapTable = module.cmap_classes[4](4)
- cmapTable.platformID = 3
- cmapTable.platEncID = 1
- cmapTable.language = 0
- cmapTable.cmap = cmapBmpOnly
- # ordered by platform then encoding
- self.tables.insert(0, cmapTable)
- self.tableVersion = 0
- self.numSubTables = len(self.tables)
- return self
+ # TODO Handle format=14.
+ if not hasattr(m, "cmap"):
+ computeMegaCmap(m, tables)
+ cmap = m.cmap
+
+ cmapBmpOnly = {uni: gid for uni, gid in cmap.items() if uni <= 0xFFFF}
+ self.tables = []
+ module = ttLib.getTableModule("cmap")
+ if len(cmapBmpOnly) != len(cmap):
+ # format-12 required.
+ cmapTable = module.cmap_classes[12](12)
+ cmapTable.platformID = 3
+ cmapTable.platEncID = 10
+ cmapTable.language = 0
+ cmapTable.cmap = cmap
+ self.tables.append(cmapTable)
+ # always create format-4
+ cmapTable = module.cmap_classes[4](4)
+ cmapTable.platformID = 3
+ cmapTable.platEncID = 1
+ cmapTable.language = 0
+ cmapTable.cmap = cmapBmpOnly
+ # ordered by platform then encoding
+ self.tables.insert(0, cmapTable)
+ self.tableVersion = 0
+ self.numSubTables = len(self.tables)
+ return self
diff --git a/Lib/fontTools/merge/unicode.py b/Lib/fontTools/merge/unicode.py
index f91baee8..65ae6c49 100644
--- a/Lib/fontTools/merge/unicode.py
+++ b/Lib/fontTools/merge/unicode.py
@@ -1,65 +1,78 @@
# Copyright 2021 Behdad Esfahbod. All Rights Reserved.
+
def is_Default_Ignorable(u):
- # http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
- #
- # TODO Move me to unicodedata module and autogenerate.
- #
- # Unicode 14.0:
- # $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/'
- # 00AD # Cf SOFT HYPHEN
- # 034F # Mn COMBINING GRAPHEME JOINER
- # 061C # Cf ARABIC LETTER MARK
- # 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
- # 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
- # 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
- # 180E # Cf MONGOLIAN VOWEL SEPARATOR
- # 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
- # 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
- # 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
- # 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS
- # 2065 # Cn <reserved-2065>
- # 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
- # 3164 # Lo HANGUL FILLER
- # FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
- # FEFF # Cf ZERO WIDTH NO-BREAK SPACE
- # FFA0 # Lo HALFWIDTH HANGUL FILLER
- # FFF0..FFF8 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
- # 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
- # 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
- # E0000 # Cn <reserved-E0000>
- # E0001 # Cf LANGUAGE TAG
- # E0002..E001F # Cn [30] <reserved-E0002>..<reserved-E001F>
- # E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG
- # E0080..E00FF # Cn [128] <reserved-E0080>..<reserved-E00FF>
- # E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
- # E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
- return (
- u == 0x00AD or # Cf SOFT HYPHEN
- u == 0x034F or # Mn COMBINING GRAPHEME JOINER
- u == 0x061C or # Cf ARABIC LETTER MARK
- 0x115F <= u <= 0x1160 or # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
- 0x17B4 <= u <= 0x17B5 or # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
- 0x180B <= u <= 0x180D or # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
- u == 0x180E or # Cf MONGOLIAN VOWEL SEPARATOR
- u == 0x180F or # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
- 0x200B <= u <= 0x200F or # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
- 0x202A <= u <= 0x202E or # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
- 0x2060 <= u <= 0x2064 or # Cf [5] WORD JOINER..INVISIBLE PLUS
- u == 0x2065 or # Cn <reserved-2065>
- 0x2066 <= u <= 0x206F or # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
- u == 0x3164 or # Lo HANGUL FILLER
- 0xFE00 <= u <= 0xFE0F or # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
- u == 0xFEFF or # Cf ZERO WIDTH NO-BREAK SPACE
- u == 0xFFA0 or # Lo HALFWIDTH HANGUL FILLER
- 0xFFF0 <= u <= 0xFFF8 or # Cn [9] <reserved-FFF0>..<reserved-FFF8>
- 0x1BCA0 <= u <= 0x1BCA3 or # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
- 0x1D173 <= u <= 0x1D17A or # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
- u == 0xE0000 or # Cn <reserved-E0000>
- u == 0xE0001 or # Cf LANGUAGE TAG
- 0xE0002 <= u <= 0xE001F or # Cn [30] <reserved-E0002>..<reserved-E001F>
- 0xE0020 <= u <= 0xE007F or # Cf [96] TAG SPACE..CANCEL TAG
- 0xE0080 <= u <= 0xE00FF or # Cn [128] <reserved-E0080>..<reserved-E00FF>
- 0xE0100 <= u <= 0xE01EF or # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
- 0xE01F0 <= u <= 0xE0FFF or # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
- False)
+ # http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
+ #
+ # TODO Move me to unicodedata module and autogenerate.
+ #
+ # Unicode 14.0:
+ # $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/'
+ # 00AD # Cf SOFT HYPHEN
+ # 034F # Mn COMBINING GRAPHEME JOINER
+ # 061C # Cf ARABIC LETTER MARK
+ # 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
+ # 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
+ # 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
+ # 180E # Cf MONGOLIAN VOWEL SEPARATOR
+ # 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
+ # 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
+ # 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
+ # 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS
+ # 2065 # Cn <reserved-2065>
+ # 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
+ # 3164 # Lo HANGUL FILLER
+ # FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
+ # FEFF # Cf ZERO WIDTH NO-BREAK SPACE
+ # FFA0 # Lo HALFWIDTH HANGUL FILLER
+ # FFF0..FFF8 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
+ # 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
+ # 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
+ # E0000 # Cn <reserved-E0000>
+ # E0001 # Cf LANGUAGE TAG
+ # E0002..E001F # Cn [30] <reserved-E0002>..<reserved-E001F>
+ # E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG
+ # E0080..E00FF # Cn [128] <reserved-E0080>..<reserved-E00FF>
+ # E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
+ # E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
+ return (
+ u == 0x00AD
+ or u == 0x034F # Cf SOFT HYPHEN
+ or u == 0x061C # Mn COMBINING GRAPHEME JOINER
+ or 0x115F <= u <= 0x1160 # Cf ARABIC LETTER MARK
+ or 0x17B4 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
+ <= u
+ <= 0x17B5
+ or 0x180B # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
+ <= u
+ <= 0x180D
+ or u # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
+ == 0x180E
+ or u == 0x180F # Cf MONGOLIAN VOWEL SEPARATOR
+ or 0x200B <= u <= 0x200F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
+ or 0x202A <= u <= 0x202E # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
+ or 0x2060 # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
+ <= u
+ <= 0x2064
+ or u == 0x2065 # Cf [5] WORD JOINER..INVISIBLE PLUS
+ or 0x2066 <= u <= 0x206F # Cn <reserved-2065>
+ or u == 0x3164 # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
+ or 0xFE00 <= u <= 0xFE0F # Lo HANGUL FILLER
+ or u == 0xFEFF # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
+ or u == 0xFFA0 # Cf ZERO WIDTH NO-BREAK SPACE
+ or 0xFFF0 <= u <= 0xFFF8 # Lo HALFWIDTH HANGUL FILLER
+ or 0x1BCA0 <= u <= 0x1BCA3 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
+ or 0x1D173 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
+ <= u
+ <= 0x1D17A
+ or u == 0xE0000 # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
+ or u == 0xE0001 # Cn <reserved-E0000>
+ or 0xE0002 <= u <= 0xE001F # Cf LANGUAGE TAG
+ or 0xE0020 <= u <= 0xE007F # Cn [30] <reserved-E0002>..<reserved-E001F>
+ or 0xE0080 <= u <= 0xE00FF # Cf [96] TAG SPACE..CANCEL TAG
+ or 0xE0100 <= u <= 0xE01EF # Cn [128] <reserved-E0080>..<reserved-E00FF>
+ or 0xE01F0 # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
+ <= u
+ <= 0xE0FFF
+ or False # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
+ )
diff --git a/Lib/fontTools/merge/util.py b/Lib/fontTools/merge/util.py
index 66cea4d5..42fe39d5 100644
--- a/Lib/fontTools/merge/util.py
+++ b/Lib/fontTools/merge/util.py
@@ -14,118 +14,130 @@ log = logging.getLogger("fontTools.merge")
# General utility functions for merging values from different fonts
+
def equal(lst):
- lst = list(lst)
- t = iter(lst)
- first = next(t)
- assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
- return first
+ lst = list(lst)
+ t = iter(lst)
+ first = next(t)
+ assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
+ return first
+
def first(lst):
- return next(iter(lst))
+ return next(iter(lst))
+
def recalculate(lst):
- return NotImplemented
+ return NotImplemented
+
def current_time(lst):
- return timestampNow()
+ return timestampNow()
+
def bitwise_and(lst):
- return reduce(operator.and_, lst)
+ return reduce(operator.and_, lst)
+
def bitwise_or(lst):
- return reduce(operator.or_, lst)
+ return reduce(operator.or_, lst)
+
def avg_int(lst):
- lst = list(lst)
- return sum(lst) // len(lst)
+ lst = list(lst)
+ return sum(lst) // len(lst)
+
def onlyExisting(func):
- """Returns a filter func that when called with a list,
- only calls func on the non-NotImplemented items of the list,
- and only so if there's at least one item remaining.
- Otherwise returns NotImplemented."""
+ """Returns a filter func that when called with a list,
+ only calls func on the non-NotImplemented items of the list,
+ and only so if there's at least one item remaining.
+ Otherwise returns NotImplemented."""
+
+ def wrapper(lst):
+ items = [item for item in lst if item is not NotImplemented]
+ return func(items) if items else NotImplemented
- def wrapper(lst):
- items = [item for item in lst if item is not NotImplemented]
- return func(items) if items else NotImplemented
+ return wrapper
- return wrapper
def sumLists(lst):
- l = []
- for item in lst:
- l.extend(item)
- return l
+ l = []
+ for item in lst:
+ l.extend(item)
+ return l
+
def sumDicts(lst):
- d = {}
- for item in lst:
- d.update(item)
- return d
+ d = {}
+ for item in lst:
+ d.update(item)
+ return d
-def mergeBits(bitmap):
- def wrapper(lst):
- lst = list(lst)
- returnValue = 0
- for bitNumber in range(bitmap['size']):
- try:
- mergeLogic = bitmap[bitNumber]
- except KeyError:
- try:
- mergeLogic = bitmap['*']
- except KeyError:
- raise Exception("Don't know how to merge bit %s" % bitNumber)
- shiftedBit = 1 << bitNumber
- mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
- returnValue |= mergedValue << bitNumber
- return returnValue
-
- return wrapper
+def mergeBits(bitmap):
+ def wrapper(lst):
+ lst = list(lst)
+ returnValue = 0
+ for bitNumber in range(bitmap["size"]):
+ try:
+ mergeLogic = bitmap[bitNumber]
+ except KeyError:
+ try:
+ mergeLogic = bitmap["*"]
+ except KeyError:
+ raise Exception("Don't know how to merge bit %s" % bitNumber)
+ shiftedBit = 1 << bitNumber
+ mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
+ returnValue |= mergedValue << bitNumber
+ return returnValue
+
+ return wrapper
class AttendanceRecordingIdentityDict(object):
- """A dictionary-like object that records indices of items actually accessed
- from a list."""
+ """A dictionary-like object that records indices of items actually accessed
+ from a list."""
+
+ def __init__(self, lst):
+ self.l = lst
+ self.d = {id(v): i for i, v in enumerate(lst)}
+ self.s = set()
- def __init__(self, lst):
- self.l = lst
- self.d = {id(v):i for i,v in enumerate(lst)}
- self.s = set()
+ def __getitem__(self, v):
+ self.s.add(self.d[id(v)])
+ return v
- def __getitem__(self, v):
- self.s.add(self.d[id(v)])
- return v
class GregariousIdentityDict(object):
- """A dictionary-like object that welcomes guests without reservations and
- adds them to the end of the guest list."""
+ """A dictionary-like object that welcomes guests without reservations and
+ adds them to the end of the guest list."""
+
+ def __init__(self, lst):
+ self.l = lst
+ self.s = set(id(v) for v in lst)
- def __init__(self, lst):
- self.l = lst
- self.s = set(id(v) for v in lst)
+ def __getitem__(self, v):
+ if id(v) not in self.s:
+ self.s.add(id(v))
+ self.l.append(v)
+ return v
- def __getitem__(self, v):
- if id(v) not in self.s:
- self.s.add(id(v))
- self.l.append(v)
- return v
class NonhashableDict(object):
- """A dictionary-like object mapping objects to values."""
+ """A dictionary-like object mapping objects to values."""
- def __init__(self, keys, values=None):
- if values is None:
- self.d = {id(v):i for i,v in enumerate(keys)}
- else:
- self.d = {id(k):v for k,v in zip(keys, values)}
+ def __init__(self, keys, values=None):
+ if values is None:
+ self.d = {id(v): i for i, v in enumerate(keys)}
+ else:
+ self.d = {id(k): v for k, v in zip(keys, values)}
- def __getitem__(self, k):
- return self.d[id(k)]
+ def __getitem__(self, k):
+ return self.d[id(k)]
- def __setitem__(self, k, v):
- self.d[id(k)] = v
+ def __setitem__(self, k, v):
+ self.d[id(k)] = v
- def __delitem__(self, k):
- del self.d[id(k)]
+ def __delitem__(self, k):
+ del self.d[id(k)]
diff --git a/Lib/fontTools/misc/arrayTools.py b/Lib/fontTools/misc/arrayTools.py
index 01ccbe82..ced8d87a 100644
--- a/Lib/fontTools/misc/arrayTools.py
+++ b/Lib/fontTools/misc/arrayTools.py
@@ -23,6 +23,7 @@ def calcBounds(array):
ys = [y for x, y in array]
return min(xs), min(ys), max(xs), max(ys)
+
def calcIntBounds(array, round=otRound):
"""Calculate the integer bounding rectangle of a 2D points array.
@@ -46,7 +47,7 @@ def updateBounds(bounds, p, min=min, max=max):
Args:
bounds: A bounding rectangle expressed as a tuple
- ``(xMin, yMin, xMax, yMax)``.
+ ``(xMin, yMin, xMax, yMax), or None``.
p: A 2D tuple representing a point.
min,max: functions to compute the minimum and maximum.
@@ -54,9 +55,12 @@ def updateBounds(bounds, p, min=min, max=max):
The updated bounding rectangle ``(xMin, yMin, xMax, yMax)``.
"""
(x, y) = p
+ if bounds is None:
+ return x, y, x, y
xMin, yMin, xMax, yMax = bounds
return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
+
def pointInRect(p, rect):
"""Test if a point is inside a bounding rectangle.
@@ -72,6 +76,7 @@ def pointInRect(p, rect):
xMin, yMin, xMax, yMax = rect
return (xMin <= x <= xMax) and (yMin <= y <= yMax)
+
def pointsInRect(array, rect):
"""Determine which points are inside a bounding rectangle.
@@ -88,6 +93,7 @@ def pointsInRect(array, rect):
xMin, yMin, xMax, yMax = rect
return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]
+
def vectorLength(vector):
"""Calculate the length of the given vector.
@@ -100,6 +106,7 @@ def vectorLength(vector):
x, y = vector
return math.sqrt(x**2 + y**2)
+
def asInt16(array):
"""Round a list of floats to 16-bit signed integers.
@@ -109,7 +116,7 @@ def asInt16(array):
Returns:
A list of rounded integers.
"""
- return [int(math.floor(i+0.5)) for i in array]
+ return [int(math.floor(i + 0.5)) for i in array]
def normRect(rect):
@@ -130,6 +137,7 @@ def normRect(rect):
(xMin, yMin, xMax, yMax) = rect
return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax)
+
def scaleRect(rect, x, y):
"""Scale a bounding box rectangle.
@@ -145,6 +153,7 @@ def scaleRect(rect, x, y):
(xMin, yMin, xMax, yMax) = rect
return xMin * x, yMin * y, xMax * x, yMax * y
+
def offsetRect(rect, dx, dy):
"""Offset a bounding box rectangle.
@@ -158,7 +167,8 @@ def offsetRect(rect, dx, dy):
An offset bounding rectangle.
"""
(xMin, yMin, xMax, yMax) = rect
- return xMin+dx, yMin+dy, xMax+dx, yMax+dy
+ return xMin + dx, yMin + dy, xMax + dx, yMax + dy
+
def insetRect(rect, dx, dy):
"""Inset a bounding box rectangle on all sides.
@@ -173,7 +183,8 @@ def insetRect(rect, dx, dy):
An inset bounding rectangle.
"""
(xMin, yMin, xMax, yMax) = rect
- return xMin+dx, yMin+dy, xMax-dx, yMax-dy
+ return xMin + dx, yMin + dy, xMax - dx, yMax - dy
+
def sectRect(rect1, rect2):
"""Test for rectangle-rectangle intersection.
@@ -191,12 +202,17 @@ def sectRect(rect1, rect2):
"""
(xMin1, yMin1, xMax1, yMax1) = rect1
(xMin2, yMin2, xMax2, yMax2) = rect2
- xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2),
- min(xMax1, xMax2), min(yMax1, yMax2))
+ xMin, yMin, xMax, yMax = (
+ max(xMin1, xMin2),
+ max(yMin1, yMin2),
+ min(xMax1, xMax2),
+ min(yMax1, yMax2),
+ )
if xMin >= xMax or yMin >= yMax:
return False, (0, 0, 0, 0)
return True, (xMin, yMin, xMax, yMax)
+
def unionRect(rect1, rect2):
"""Determine union of bounding rectangles.
@@ -211,10 +227,15 @@ def unionRect(rect1, rect2):
"""
(xMin1, yMin1, xMax1, yMax1) = rect1
(xMin2, yMin2, xMax2, yMax2) = rect2
- xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2),
- max(xMax1, xMax2), max(yMax1, yMax2))
+ xMin, yMin, xMax, yMax = (
+ min(xMin1, xMin2),
+ min(yMin1, yMin2),
+ max(xMax1, xMax2),
+ max(yMax1, yMax2),
+ )
return (xMin, yMin, xMax, yMax)
+
def rectCenter(rect):
"""Determine rectangle center.
@@ -226,7 +247,8 @@ def rectCenter(rect):
A 2D tuple representing the point at the center of the rectangle.
"""
(xMin, yMin, xMax, yMax) = rect
- return (xMin+xMax)/2, (yMin+yMax)/2
+ return (xMin + xMax) / 2, (yMin + yMax) / 2
+
def rectArea(rect):
"""Determine rectangle area.
@@ -241,6 +263,7 @@ def rectArea(rect):
(xMin, yMin, xMax, yMax) = rect
return (yMax - yMin) * (xMax - xMin)
+
def intRect(rect):
"""Round a rectangle to integer values.
@@ -261,8 +284,28 @@ def intRect(rect):
return (xMin, yMin, xMax, yMax)
-class Vector(_Vector):
+def quantizeRect(rect, factor=1):
+ """
+ >>> bounds = (72.3, -218.4, 1201.3, 919.1)
+ >>> quantizeRect(bounds)
+ (72, -219, 1202, 920)
+ >>> quantizeRect(bounds, factor=10)
+ (70, -220, 1210, 920)
+ >>> quantizeRect(bounds, factor=100)
+ (0, -300, 1300, 1000)
+ """
+ if factor < 1:
+ raise ValueError(f"Expected quantization factor >= 1, found: {factor!r}")
+ xMin, yMin, xMax, yMax = normRect(rect)
+ return (
+ int(math.floor(xMin / factor) * factor),
+ int(math.floor(yMin / factor) * factor),
+ int(math.ceil(xMax / factor) * factor),
+ int(math.ceil(yMax / factor) * factor),
+ )
+
+class Vector(_Vector):
def __init__(self, *args, **kwargs):
warnings.warn(
"fontTools.misc.arrayTools.Vector has been deprecated, please use "
@@ -373,7 +416,9 @@ def _test():
(0, 2, 4, 5)
"""
+
if __name__ == "__main__":
import sys
import doctest
+
sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/misc/bezierTools.py b/Lib/fontTools/misc/bezierTools.py
index 25e5c548..21ab0a5d 100644
--- a/Lib/fontTools/misc/bezierTools.py
+++ b/Lib/fontTools/misc/bezierTools.py
@@ -7,6 +7,17 @@ from fontTools.misc.transform import Identity
import math
from collections import namedtuple
+try:
+ import cython
+
+ COMPILED = cython.compiled
+except (AttributeError, ImportError):
+ # if cython not installed, use mock module with no-op decorators and types
+ from fontTools.misc import cython
+
+ COMPILED = False
+
+
Intersection = namedtuple("Intersection", ["pt", "t1", "t2"])
@@ -26,10 +37,13 @@ __all__ = [
"splitCubic",
"splitQuadraticAtT",
"splitCubicAtT",
+ "splitCubicAtTC",
+ "splitCubicIntoTwoAtTC",
"solveQuadratic",
"solveCubic",
"quadraticPointAtT",
"cubicPointAtT",
+ "cubicPointAtTC",
"linePointAtT",
"segmentPointAtT",
"lineLineIntersections",
@@ -67,6 +81,14 @@ def _split_cubic_into_two(p0, p1, p2, p3):
)
+@cython.returns(cython.double)
+@cython.locals(
+ p0=cython.complex,
+ p1=cython.complex,
+ p2=cython.complex,
+ p3=cython.complex,
+)
+@cython.locals(mult=cython.double, arch=cython.double, box=cython.double)
def _calcCubicArcLengthCRecurse(mult, p0, p1, p2, p3):
arch = abs(p0 - p3)
box = abs(p0 - p1) + abs(p1 - p2) + abs(p2 - p3)
@@ -79,6 +101,17 @@ def _calcCubicArcLengthCRecurse(mult, p0, p1, p2, p3):
)
+@cython.returns(cython.double)
+@cython.locals(
+ pt1=cython.complex,
+ pt2=cython.complex,
+ pt3=cython.complex,
+ pt4=cython.complex,
+)
+@cython.locals(
+ tolerance=cython.double,
+ mult=cython.double,
+)
def calcCubicArcLengthC(pt1, pt2, pt3, pt4, tolerance=0.005):
"""Calculates the arc length for a cubic Bezier segment.
@@ -97,14 +130,22 @@ epsilonDigits = 6
epsilon = 1e-10
+@cython.cfunc
+@cython.inline
+@cython.returns(cython.double)
+@cython.locals(v1=cython.complex, v2=cython.complex)
def _dot(v1, v2):
return (v1 * v2.conjugate()).real
+@cython.cfunc
+@cython.inline
+@cython.returns(cython.double)
+@cython.locals(x=cython.double)
def _intSecAtan(x):
# In : sympy.integrate(sp.sec(sp.atan(x)))
# Out: x*sqrt(x**2 + 1)/2 + asinh(x)/2
- return x * math.sqrt(x ** 2 + 1) / 2 + math.asinh(x) / 2
+ return x * math.sqrt(x**2 + 1) / 2 + math.asinh(x) / 2
def calcQuadraticArcLength(pt1, pt2, pt3):
@@ -142,6 +183,25 @@ def calcQuadraticArcLength(pt1, pt2, pt3):
return calcQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3))
+@cython.returns(cython.double)
+@cython.locals(
+ pt1=cython.complex,
+ pt2=cython.complex,
+ pt3=cython.complex,
+ d0=cython.complex,
+ d1=cython.complex,
+ d=cython.complex,
+ n=cython.complex,
+)
+@cython.locals(
+ scale=cython.double,
+ origDist=cython.double,
+ a=cython.double,
+ b=cython.double,
+ x0=cython.double,
+ x1=cython.double,
+ Len=cython.double,
+)
def calcQuadraticArcLengthC(pt1, pt2, pt3):
"""Calculates the arc length for a quadratic Bezier segment.
@@ -154,7 +214,7 @@ def calcQuadraticArcLengthC(pt1, pt2, pt3):
Arc length value.
"""
# Analytical solution to the length of a quadratic bezier.
- # I'll explain how I arrived at this later.
+ # Documentation: https://github.com/fonttools/fonttools/issues/3055
d0 = pt2 - pt1
d1 = pt3 - pt2
d = d1 - d0
@@ -191,6 +251,17 @@ def approximateQuadraticArcLength(pt1, pt2, pt3):
return approximateQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3))
+@cython.returns(cython.double)
+@cython.locals(
+ pt1=cython.complex,
+ pt2=cython.complex,
+ pt3=cython.complex,
+)
+@cython.locals(
+ v0=cython.double,
+ v1=cython.double,
+ v2=cython.double,
+)
def approximateQuadraticArcLengthC(pt1, pt2, pt3):
"""Calculates the arc length for a quadratic Bezier segment.
@@ -288,6 +359,20 @@ def approximateCubicArcLength(pt1, pt2, pt3, pt4):
)
+@cython.returns(cython.double)
+@cython.locals(
+ pt1=cython.complex,
+ pt2=cython.complex,
+ pt3=cython.complex,
+ pt4=cython.complex,
+)
+@cython.locals(
+ v0=cython.double,
+ v1=cython.double,
+ v2=cython.double,
+ v3=cython.double,
+ v4=cython.double,
+)
def approximateCubicArcLengthC(pt1, pt2, pt3, pt4):
"""Approximates the arc length for a cubic Bezier segment.
@@ -549,6 +634,70 @@ def splitCubicAtT(pt1, pt2, pt3, pt4, *ts):
return _splitCubicAtT(a, b, c, d, *ts)
+@cython.locals(
+ pt1=cython.complex,
+ pt2=cython.complex,
+ pt3=cython.complex,
+ pt4=cython.complex,
+ a=cython.complex,
+ b=cython.complex,
+ c=cython.complex,
+ d=cython.complex,
+)
+def splitCubicAtTC(pt1, pt2, pt3, pt4, *ts):
+ """Split a cubic Bezier curve at one or more values of t.
+
+ Args:
+ pt1,pt2,pt3,pt4: Control points of the Bezier as complex numbers..
+ *ts: Positions at which to split the curve.
+
+ Yields:
+ Curve segments (each curve segment being four complex numbers).
+ """
+ a, b, c, d = calcCubicParametersC(pt1, pt2, pt3, pt4)
+ yield from _splitCubicAtTC(a, b, c, d, *ts)
+
+
+@cython.returns(cython.complex)
+@cython.locals(
+ t=cython.double,
+ pt1=cython.complex,
+ pt2=cython.complex,
+ pt3=cython.complex,
+ pt4=cython.complex,
+ pointAtT=cython.complex,
+ off1=cython.complex,
+ off2=cython.complex,
+)
+@cython.locals(
+ t2=cython.double, _1_t=cython.double, _1_t_2=cython.double, _2_t_1_t=cython.double
+)
+def splitCubicIntoTwoAtTC(pt1, pt2, pt3, pt4, t):
+ """Split a cubic Bezier curve at t.
+
+ Args:
+ pt1,pt2,pt3,pt4: Control points of the Bezier as complex numbers.
+ t: Position at which to split the curve.
+
+ Returns:
+ A tuple of two curve segments (each curve segment being four complex numbers).
+ """
+ t2 = t * t
+ _1_t = 1 - t
+ _1_t_2 = _1_t * _1_t
+ _2_t_1_t = 2 * t * _1_t
+ pointAtT = (
+ _1_t_2 * _1_t * pt1 + 3 * (_1_t_2 * t * pt2 + _1_t * t2 * pt3) + t2 * t * pt4
+ )
+ off1 = _1_t_2 * pt1 + _2_t_1_t * pt2 + t2 * pt3
+ off2 = _1_t_2 * pt2 + _2_t_1_t * pt3 + t2 * pt4
+
+ pt2 = pt1 + (pt2 - pt1) * t
+ pt3 = pt4 + (pt3 - pt4) * _1_t
+
+ return ((pt1, pt2, off1, pointAtT), (pointAtT, off2, pt3, pt4))
+
+
def _splitQuadraticAtT(a, b, c, *ts):
ts = list(ts)
segments = []
@@ -611,6 +760,44 @@ def _splitCubicAtT(a, b, c, d, *ts):
return segments
+@cython.locals(
+ a=cython.complex,
+ b=cython.complex,
+ c=cython.complex,
+ d=cython.complex,
+ t1=cython.double,
+ t2=cython.double,
+ delta=cython.double,
+ delta_2=cython.double,
+ delta_3=cython.double,
+ a1=cython.complex,
+ b1=cython.complex,
+ c1=cython.complex,
+ d1=cython.complex,
+)
+def _splitCubicAtTC(a, b, c, d, *ts):
+ ts = list(ts)
+ ts.insert(0, 0.0)
+ ts.append(1.0)
+ for i in range(len(ts) - 1):
+ t1 = ts[i]
+ t2 = ts[i + 1]
+ delta = t2 - t1
+
+ delta_2 = delta * delta
+ delta_3 = delta * delta_2
+ t1_2 = t1 * t1
+ t1_3 = t1 * t1_2
+
+ # calc new a, b, c and d
+ a1 = a * delta_3
+ b1 = (3 * a * t1 + b) * delta_2
+ c1 = (2 * b * t1 + c + 3 * a * t1_2) * delta
+ d1 = a * t1_3 + b * t1_2 + c * t1 + d
+ pt1, pt2, pt3, pt4 = calcCubicPointsC(a1, b1, c1, d1)
+ yield (pt1, pt2, pt3, pt4)
+
+
#
# Equation solvers.
#
@@ -773,6 +960,24 @@ def calcCubicParameters(pt1, pt2, pt3, pt4):
return (ax, ay), (bx, by), (cx, cy), (dx, dy)
+@cython.cfunc
+@cython.inline
+@cython.locals(
+ pt1=cython.complex,
+ pt2=cython.complex,
+ pt3=cython.complex,
+ pt4=cython.complex,
+ a=cython.complex,
+ b=cython.complex,
+ c=cython.complex,
+)
+def calcCubicParametersC(pt1, pt2, pt3, pt4):
+ c = (pt2 - pt1) * 3.0
+ b = (pt3 - pt2) * 3.0 - c
+ a = pt4 - pt1 - c - b
+ return (a, b, c, pt1)
+
+
def calcQuadraticPoints(a, b, c):
ax, ay = a
bx, by = b
@@ -802,6 +1007,24 @@ def calcCubicPoints(a, b, c, d):
return (x1, y1), (x2, y2), (x3, y3), (x4, y4)
+@cython.cfunc
+@cython.inline
+@cython.locals(
+ a=cython.complex,
+ b=cython.complex,
+ c=cython.complex,
+ d=cython.complex,
+ p2=cython.complex,
+ p3=cython.complex,
+ p4=cython.complex,
+)
+def calcCubicPointsC(a, b, c, d):
+ p2 = c * (1 / 3) + d
+ p3 = (b + c) * (1 / 3) + p2
+ p4 = a + b + c + d
+ return (d, p2, p3, p4)
+
+
#
# Point at time
#
@@ -845,21 +1068,47 @@ def cubicPointAtT(pt1, pt2, pt3, pt4, t):
Returns:
A 2D tuple with the coordinates of the point.
"""
+ t2 = t * t
+ _1_t = 1 - t
+ _1_t_2 = _1_t * _1_t
x = (
- (1 - t) * (1 - t) * (1 - t) * pt1[0]
- + 3 * (1 - t) * (1 - t) * t * pt2[0]
- + 3 * (1 - t) * t * t * pt3[0]
- + t * t * t * pt4[0]
+ _1_t_2 * _1_t * pt1[0]
+ + 3 * (_1_t_2 * t * pt2[0] + _1_t * t2 * pt3[0])
+ + t2 * t * pt4[0]
)
y = (
- (1 - t) * (1 - t) * (1 - t) * pt1[1]
- + 3 * (1 - t) * (1 - t) * t * pt2[1]
- + 3 * (1 - t) * t * t * pt3[1]
- + t * t * t * pt4[1]
+ _1_t_2 * _1_t * pt1[1]
+ + 3 * (_1_t_2 * t * pt2[1] + _1_t * t2 * pt3[1])
+ + t2 * t * pt4[1]
)
return (x, y)
+@cython.returns(cython.complex)
+@cython.locals(
+ t=cython.double,
+ pt1=cython.complex,
+ pt2=cython.complex,
+ pt3=cython.complex,
+ pt4=cython.complex,
+)
+@cython.locals(t2=cython.double, _1_t=cython.double, _1_t_2=cython.double)
+def cubicPointAtTC(pt1, pt2, pt3, pt4, t):
+ """Finds the point at time `t` on a cubic curve.
+
+ Args:
+ pt1, pt2, pt3, pt4: Coordinates of the curve as complex numbers.
+ t: The time along the curve.
+
+ Returns:
+ A complex number with the coordinates of the point.
+ """
+ t2 = t * t
+ _1_t = 1 - t
+ _1_t_2 = _1_t * _1_t
+ return _1_t_2 * _1_t * pt1 + 3 * (_1_t_2 * t * pt2 + _1_t * t2 * pt3) + t2 * t * pt4
+
+
def segmentPointAtT(seg, t):
if len(seg) == 2:
return linePointAtT(*seg, t)
diff --git a/Lib/fontTools/misc/classifyTools.py b/Lib/fontTools/misc/classifyTools.py
index ae88a8f7..2235bbd7 100644
--- a/Lib/fontTools/misc/classifyTools.py
+++ b/Lib/fontTools/misc/classifyTools.py
@@ -4,168 +4,168 @@
class Classifier(object):
- """
- Main Classifier object, used to classify things into similar sets.
- """
-
- def __init__(self, sort=True):
-
- self._things = set() # set of all things known so far
- self._sets = [] # list of class sets produced so far
- self._mapping = {} # map from things to their class set
- self._dirty = False
- self._sort = sort
-
- def add(self, set_of_things):
- """
- Add a set to the classifier. Any iterable is accepted.
- """
- if not set_of_things:
- return
-
- self._dirty = True
-
- things, sets, mapping = self._things, self._sets, self._mapping
-
- s = set(set_of_things)
- intersection = s.intersection(things) # existing things
- s.difference_update(intersection) # new things
- difference = s
- del s
-
- # Add new class for new things
- if difference:
- things.update(difference)
- sets.append(difference)
- for thing in difference:
- mapping[thing] = difference
- del difference
-
- while intersection:
- # Take one item and process the old class it belongs to
- old_class = mapping[next(iter(intersection))]
- old_class_intersection = old_class.intersection(intersection)
-
- # Update old class to remove items from new set
- old_class.difference_update(old_class_intersection)
-
- # Remove processed items from todo list
- intersection.difference_update(old_class_intersection)
-
- # Add new class for the intersection with old class
- sets.append(old_class_intersection)
- for thing in old_class_intersection:
- mapping[thing] = old_class_intersection
- del old_class_intersection
-
- def update(self, list_of_sets):
- """
- Add a a list of sets to the classifier. Any iterable of iterables is accepted.
- """
- for s in list_of_sets:
- self.add(s)
-
- def _process(self):
- if not self._dirty:
- return
-
- # Do any deferred processing
- sets = self._sets
- self._sets = [s for s in sets if s]
-
- if self._sort:
- self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s)))
-
- self._dirty = False
-
- # Output methods
-
- def getThings(self):
- """Returns the set of all things known so far.
-
- The return value belongs to the Classifier object and should NOT
- be modified while the classifier is still in use.
- """
- self._process()
- return self._things
-
- def getMapping(self):
- """Returns the mapping from things to their class set.
-
- The return value belongs to the Classifier object and should NOT
- be modified while the classifier is still in use.
- """
- self._process()
- return self._mapping
-
- def getClasses(self):
- """Returns the list of class sets.
-
- The return value belongs to the Classifier object and should NOT
- be modified while the classifier is still in use.
- """
- self._process()
- return self._sets
+ """
+ Main Classifier object, used to classify things into similar sets.
+ """
+
+ def __init__(self, sort=True):
+ self._things = set() # set of all things known so far
+ self._sets = [] # list of class sets produced so far
+ self._mapping = {} # map from things to their class set
+ self._dirty = False
+ self._sort = sort
+
+ def add(self, set_of_things):
+ """
+ Add a set to the classifier. Any iterable is accepted.
+ """
+ if not set_of_things:
+ return
+
+ self._dirty = True
+
+ things, sets, mapping = self._things, self._sets, self._mapping
+
+ s = set(set_of_things)
+ intersection = s.intersection(things) # existing things
+ s.difference_update(intersection) # new things
+ difference = s
+ del s
+
+ # Add new class for new things
+ if difference:
+ things.update(difference)
+ sets.append(difference)
+ for thing in difference:
+ mapping[thing] = difference
+ del difference
+
+ while intersection:
+ # Take one item and process the old class it belongs to
+ old_class = mapping[next(iter(intersection))]
+ old_class_intersection = old_class.intersection(intersection)
+
+ # Update old class to remove items from new set
+ old_class.difference_update(old_class_intersection)
+
+ # Remove processed items from todo list
+ intersection.difference_update(old_class_intersection)
+
+ # Add new class for the intersection with old class
+ sets.append(old_class_intersection)
+ for thing in old_class_intersection:
+ mapping[thing] = old_class_intersection
+ del old_class_intersection
+
+ def update(self, list_of_sets):
+ """
+ Add a a list of sets to the classifier. Any iterable of iterables is accepted.
+ """
+ for s in list_of_sets:
+ self.add(s)
+
+ def _process(self):
+ if not self._dirty:
+ return
+
+ # Do any deferred processing
+ sets = self._sets
+ self._sets = [s for s in sets if s]
+
+ if self._sort:
+ self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s)))
+
+ self._dirty = False
+
+ # Output methods
+
+ def getThings(self):
+ """Returns the set of all things known so far.
+
+ The return value belongs to the Classifier object and should NOT
+ be modified while the classifier is still in use.
+ """
+ self._process()
+ return self._things
+
+ def getMapping(self):
+ """Returns the mapping from things to their class set.
+
+ The return value belongs to the Classifier object and should NOT
+ be modified while the classifier is still in use.
+ """
+ self._process()
+ return self._mapping
+
+ def getClasses(self):
+ """Returns the list of class sets.
+
+ The return value belongs to the Classifier object and should NOT
+ be modified while the classifier is still in use.
+ """
+ self._process()
+ return self._sets
def classify(list_of_sets, sort=True):
- """
- Takes a iterable of iterables (list of sets from here on; but any
- iterable works.), and returns the smallest list of sets such that
- each set, is either a subset, or is disjoint from, each of the input
- sets.
-
- In other words, this function classifies all the things present in
- any of the input sets, into similar classes, based on which sets
- things are a member of.
-
- If sort=True, return class sets are sorted by decreasing size and
- their natural sort order within each class size. Otherwise, class
- sets are returned in the order that they were identified, which is
- generally not significant.
-
- >>> classify([]) == ([], {})
- True
- >>> classify([[]]) == ([], {})
- True
- >>> classify([[], []]) == ([], {})
- True
- >>> classify([[1]]) == ([{1}], {1: {1}})
- True
- >>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}})
- True
- >>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
- True
- >>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
- True
- >>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}})
- True
- >>> classify([[1,2],[2,4,5]]) == (
- ... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
- True
- >>> classify([[1,2],[2,4,5]], sort=False) == (
- ... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
- True
- >>> classify([[1,2,9],[2,4,5]], sort=False) == (
- ... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5},
- ... 9: {1, 9}})
- True
- >>> classify([[1,2,9,15],[2,4,5]], sort=False) == (
- ... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5},
- ... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}})
- True
- >>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False)
- >>> set([frozenset(c) for c in classes]) == set(
- ... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})])
- True
- >>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}}
- True
- """
- classifier = Classifier(sort=sort)
- classifier.update(list_of_sets)
- return classifier.getClasses(), classifier.getMapping()
+ """
+ Takes a iterable of iterables (list of sets from here on; but any
+ iterable works.), and returns the smallest list of sets such that
+ each set, is either a subset, or is disjoint from, each of the input
+ sets.
+
+ In other words, this function classifies all the things present in
+ any of the input sets, into similar classes, based on which sets
+ things are a member of.
+
+ If sort=True, return class sets are sorted by decreasing size and
+ their natural sort order within each class size. Otherwise, class
+ sets are returned in the order that they were identified, which is
+ generally not significant.
+
+ >>> classify([]) == ([], {})
+ True
+ >>> classify([[]]) == ([], {})
+ True
+ >>> classify([[], []]) == ([], {})
+ True
+ >>> classify([[1]]) == ([{1}], {1: {1}})
+ True
+ >>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}})
+ True
+ >>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
+ True
+ >>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
+ True
+ >>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}})
+ True
+ >>> classify([[1,2],[2,4,5]]) == (
+ ... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
+ True
+ >>> classify([[1,2],[2,4,5]], sort=False) == (
+ ... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
+ True
+ >>> classify([[1,2,9],[2,4,5]], sort=False) == (
+ ... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5},
+ ... 9: {1, 9}})
+ True
+ >>> classify([[1,2,9,15],[2,4,5]], sort=False) == (
+ ... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5},
+ ... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}})
+ True
+ >>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False)
+ >>> set([frozenset(c) for c in classes]) == set(
+ ... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})])
+ True
+ >>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}}
+ True
+ """
+ classifier = Classifier(sort=sort)
+ classifier.update(list_of_sets)
+ return classifier.getClasses(), classifier.getMapping()
if __name__ == "__main__":
- import sys, doctest
- sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
+ import sys, doctest
+
+ sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
diff --git a/Lib/fontTools/misc/cliTools.py b/Lib/fontTools/misc/cliTools.py
index e7dadf98..8322ea9e 100644
--- a/Lib/fontTools/misc/cliTools.py
+++ b/Lib/fontTools/misc/cliTools.py
@@ -6,7 +6,9 @@ import re
numberAddedRE = re.compile(r"#\d+$")
-def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False, suffix=""):
+def makeOutputFileName(
+ input, outputDir=None, extension=None, overWrite=False, suffix=""
+):
"""Generates a suitable file name for writing output.
Often tools will want to take a file, do some kind of transformation to it,
@@ -44,6 +46,7 @@ def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False, s
if not overWrite:
while os.path.exists(output):
output = os.path.join(
- dirName, fileName + suffix + "#" + repr(n) + extension)
+ dirName, fileName + suffix + "#" + repr(n) + extension
+ )
n += 1
return output
diff --git a/Lib/fontTools/misc/cython.py b/Lib/fontTools/misc/cython.py
index 0ba659f6..2a42d94a 100644
--- a/Lib/fontTools/misc/cython.py
+++ b/Lib/fontTools/misc/cython.py
@@ -10,9 +10,11 @@ We only define the symbols that we use. E.g. see fontTools.cu2qu
from types import SimpleNamespace
+
def _empty_decorator(x):
return x
+
compiled = False
for name in ("double", "complex", "int"):
diff --git a/Lib/fontTools/misc/dictTools.py b/Lib/fontTools/misc/dictTools.py
index ae7932c9..e3c0df73 100644
--- a/Lib/fontTools/misc/dictTools.py
+++ b/Lib/fontTools/misc/dictTools.py
@@ -1,7 +1,8 @@
"""Misc dict tools."""
-__all__ = ['hashdict']
+__all__ = ["hashdict"]
+
# https://stackoverflow.com/questions/1151658/python-hashable-dicts
class hashdict(dict):
@@ -26,36 +27,54 @@ class hashdict(dict):
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
+
def __key(self):
return tuple(sorted(self.items()))
+
def __repr__(self):
- return "{0}({1})".format(self.__class__.__name__,
- ", ".join("{0}={1}".format(
- str(i[0]),repr(i[1])) for i in self.__key()))
+ return "{0}({1})".format(
+ self.__class__.__name__,
+ ", ".join("{0}={1}".format(str(i[0]), repr(i[1])) for i in self.__key()),
+ )
def __hash__(self):
return hash(self.__key())
+
def __setitem__(self, key, value):
- raise TypeError("{0} does not support item assignment"
- .format(self.__class__.__name__))
+ raise TypeError(
+ "{0} does not support item assignment".format(self.__class__.__name__)
+ )
+
def __delitem__(self, key):
- raise TypeError("{0} does not support item assignment"
- .format(self.__class__.__name__))
+ raise TypeError(
+ "{0} does not support item assignment".format(self.__class__.__name__)
+ )
+
def clear(self):
- raise TypeError("{0} does not support item assignment"
- .format(self.__class__.__name__))
+ raise TypeError(
+ "{0} does not support item assignment".format(self.__class__.__name__)
+ )
+
def pop(self, *args, **kwargs):
- raise TypeError("{0} does not support item assignment"
- .format(self.__class__.__name__))
+ raise TypeError(
+ "{0} does not support item assignment".format(self.__class__.__name__)
+ )
+
def popitem(self, *args, **kwargs):
- raise TypeError("{0} does not support item assignment"
- .format(self.__class__.__name__))
+ raise TypeError(
+ "{0} does not support item assignment".format(self.__class__.__name__)
+ )
+
def setdefault(self, *args, **kwargs):
- raise TypeError("{0} does not support item assignment"
- .format(self.__class__.__name__))
+ raise TypeError(
+ "{0} does not support item assignment".format(self.__class__.__name__)
+ )
+
def update(self, *args, **kwargs):
- raise TypeError("{0} does not support item assignment"
- .format(self.__class__.__name__))
+ raise TypeError(
+ "{0} does not support item assignment".format(self.__class__.__name__)
+ )
+
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
@@ -63,4 +82,3 @@ class hashdict(dict):
result = hashdict(self)
dict.update(result, right)
return result
-
diff --git a/Lib/fontTools/misc/eexec.py b/Lib/fontTools/misc/eexec.py
index d1d4bb6a..cafa312c 100644
--- a/Lib/fontTools/misc/eexec.py
+++ b/Lib/fontTools/misc/eexec.py
@@ -16,98 +16,104 @@ from fontTools.misc.textTools import bytechr, bytesjoin, byteord
def _decryptChar(cipher, R):
- cipher = byteord(cipher)
- plain = ( (cipher ^ (R>>8)) ) & 0xFF
- R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF
- return bytechr(plain), R
+ cipher = byteord(cipher)
+ plain = ((cipher ^ (R >> 8))) & 0xFF
+ R = ((cipher + R) * 52845 + 22719) & 0xFFFF
+ return bytechr(plain), R
+
def _encryptChar(plain, R):
- plain = byteord(plain)
- cipher = ( (plain ^ (R>>8)) ) & 0xFF
- R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF
- return bytechr(cipher), R
+ plain = byteord(plain)
+ cipher = ((plain ^ (R >> 8))) & 0xFF
+ R = ((cipher + R) * 52845 + 22719) & 0xFFFF
+ return bytechr(cipher), R
def decrypt(cipherstring, R):
- r"""
- Decrypts a string using the Type 1 encryption algorithm.
-
- Args:
- cipherstring: String of ciphertext.
- R: Initial key.
-
- Returns:
- decryptedStr: Plaintext string.
- R: Output key for subsequent decryptions.
-
- Examples::
-
- >>> testStr = b"\0\0asdadads asds\265"
- >>> decryptedStr, R = decrypt(testStr, 12321)
- >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
- True
- >>> R == 36142
- True
- """
- plainList = []
- for cipher in cipherstring:
- plain, R = _decryptChar(cipher, R)
- plainList.append(plain)
- plainstring = bytesjoin(plainList)
- return plainstring, int(R)
+ r"""
+ Decrypts a string using the Type 1 encryption algorithm.
+
+ Args:
+ cipherstring: String of ciphertext.
+ R: Initial key.
+
+ Returns:
+ decryptedStr: Plaintext string.
+ R: Output key for subsequent decryptions.
+
+ Examples::
+
+ >>> testStr = b"\0\0asdadads asds\265"
+ >>> decryptedStr, R = decrypt(testStr, 12321)
+ >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
+ True
+ >>> R == 36142
+ True
+ """
+ plainList = []
+ for cipher in cipherstring:
+ plain, R = _decryptChar(cipher, R)
+ plainList.append(plain)
+ plainstring = bytesjoin(plainList)
+ return plainstring, int(R)
+
def encrypt(plainstring, R):
- r"""
- Encrypts a string using the Type 1 encryption algorithm.
-
- Note that the algorithm as described in the Type 1 specification requires the
- plaintext to be prefixed with a number of random bytes. (For ``eexec`` the
- number of random bytes is set to 4.) This routine does *not* add the random
- prefix to its input.
-
- Args:
- plainstring: String of plaintext.
- R: Initial key.
-
- Returns:
- cipherstring: Ciphertext string.
- R: Output key for subsequent encryptions.
-
- Examples::
-
- >>> testStr = b"\0\0asdadads asds\265"
- >>> decryptedStr, R = decrypt(testStr, 12321)
- >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
- True
- >>> R == 36142
- True
-
- >>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
- >>> encryptedStr, R = encrypt(testStr, 12321)
- >>> encryptedStr == b"\0\0asdadads asds\265"
- True
- >>> R == 36142
- True
- """
- cipherList = []
- for plain in plainstring:
- cipher, R = _encryptChar(plain, R)
- cipherList.append(cipher)
- cipherstring = bytesjoin(cipherList)
- return cipherstring, int(R)
+ r"""
+ Encrypts a string using the Type 1 encryption algorithm.
+
+ Note that the algorithm as described in the Type 1 specification requires the
+ plaintext to be prefixed with a number of random bytes. (For ``eexec`` the
+ number of random bytes is set to 4.) This routine does *not* add the random
+ prefix to its input.
+
+ Args:
+ plainstring: String of plaintext.
+ R: Initial key.
+
+ Returns:
+ cipherstring: Ciphertext string.
+ R: Output key for subsequent encryptions.
+
+ Examples::
+
+ >>> testStr = b"\0\0asdadads asds\265"
+ >>> decryptedStr, R = decrypt(testStr, 12321)
+ >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
+ True
+ >>> R == 36142
+ True
+
+ >>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
+ >>> encryptedStr, R = encrypt(testStr, 12321)
+ >>> encryptedStr == b"\0\0asdadads asds\265"
+ True
+ >>> R == 36142
+ True
+ """
+ cipherList = []
+ for plain in plainstring:
+ cipher, R = _encryptChar(plain, R)
+ cipherList.append(cipher)
+ cipherstring = bytesjoin(cipherList)
+ return cipherstring, int(R)
def hexString(s):
- import binascii
- return binascii.hexlify(s)
+ import binascii
+
+ return binascii.hexlify(s)
+
def deHexString(h):
- import binascii
- h = bytesjoin(h.split())
- return binascii.unhexlify(h)
+ import binascii
+
+ h = bytesjoin(h.split())
+ return binascii.unhexlify(h)
if __name__ == "__main__":
- import sys
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/misc/encodingTools.py b/Lib/fontTools/misc/encodingTools.py
index eccf951d..3b2651d3 100644
--- a/Lib/fontTools/misc/encodingTools.py
+++ b/Lib/fontTools/misc/encodingTools.py
@@ -5,67 +5,68 @@ import fontTools.encodings.codecs
# Map keyed by platformID, then platEncID, then possibly langID
_encodingMap = {
- 0: { # Unicode
- 0: 'utf_16_be',
- 1: 'utf_16_be',
- 2: 'utf_16_be',
- 3: 'utf_16_be',
- 4: 'utf_16_be',
- 5: 'utf_16_be',
- 6: 'utf_16_be',
- },
- 1: { # Macintosh
- # See
- # https://github.com/fonttools/fonttools/issues/236
- 0: { # Macintosh, platEncID==0, keyed by langID
- 15: "mac_iceland",
- 17: "mac_turkish",
- 18: "mac_croatian",
- 24: "mac_latin2",
- 25: "mac_latin2",
- 26: "mac_latin2",
- 27: "mac_latin2",
- 28: "mac_latin2",
- 36: "mac_latin2",
- 37: "mac_romanian",
- 38: "mac_latin2",
- 39: "mac_latin2",
- 40: "mac_latin2",
- Ellipsis: 'mac_roman', # Other
- },
- 1: 'x_mac_japanese_ttx',
- 2: 'x_mac_trad_chinese_ttx',
- 3: 'x_mac_korean_ttx',
- 6: 'mac_greek',
- 7: 'mac_cyrillic',
- 25: 'x_mac_simp_chinese_ttx',
- 29: 'mac_latin2',
- 35: 'mac_turkish',
- 37: 'mac_iceland',
- },
- 2: { # ISO
- 0: 'ascii',
- 1: 'utf_16_be',
- 2: 'latin1',
- },
- 3: { # Microsoft
- 0: 'utf_16_be',
- 1: 'utf_16_be',
- 2: 'shift_jis',
- 3: 'gb2312',
- 4: 'big5',
- 5: 'euc_kr',
- 6: 'johab',
- 10: 'utf_16_be',
- },
+ 0: { # Unicode
+ 0: "utf_16_be",
+ 1: "utf_16_be",
+ 2: "utf_16_be",
+ 3: "utf_16_be",
+ 4: "utf_16_be",
+ 5: "utf_16_be",
+ 6: "utf_16_be",
+ },
+ 1: { # Macintosh
+ # See
+ # https://github.com/fonttools/fonttools/issues/236
+ 0: { # Macintosh, platEncID==0, keyed by langID
+ 15: "mac_iceland",
+ 17: "mac_turkish",
+ 18: "mac_croatian",
+ 24: "mac_latin2",
+ 25: "mac_latin2",
+ 26: "mac_latin2",
+ 27: "mac_latin2",
+ 28: "mac_latin2",
+ 36: "mac_latin2",
+ 37: "mac_romanian",
+ 38: "mac_latin2",
+ 39: "mac_latin2",
+ 40: "mac_latin2",
+ Ellipsis: "mac_roman", # Other
+ },
+ 1: "x_mac_japanese_ttx",
+ 2: "x_mac_trad_chinese_ttx",
+ 3: "x_mac_korean_ttx",
+ 6: "mac_greek",
+ 7: "mac_cyrillic",
+ 25: "x_mac_simp_chinese_ttx",
+ 29: "mac_latin2",
+ 35: "mac_turkish",
+ 37: "mac_iceland",
+ },
+ 2: { # ISO
+ 0: "ascii",
+ 1: "utf_16_be",
+ 2: "latin1",
+ },
+ 3: { # Microsoft
+ 0: "utf_16_be",
+ 1: "utf_16_be",
+ 2: "shift_jis",
+ 3: "gb2312",
+ 4: "big5",
+ 5: "euc_kr",
+ 6: "johab",
+ 10: "utf_16_be",
+ },
}
+
def getEncoding(platformID, platEncID, langID, default=None):
- """Returns the Python encoding name for OpenType platformID/encodingID/langID
- triplet. If encoding for these values is not known, by default None is
- returned. That can be overriden by passing a value to the default argument.
- """
- encoding = _encodingMap.get(platformID, {}).get(platEncID, default)
- if isinstance(encoding, dict):
- encoding = encoding.get(langID, encoding[Ellipsis])
- return encoding
+ """Returns the Python encoding name for OpenType platformID/encodingID/langID
+ triplet. If encoding for these values is not known, by default None is
+ returned. That can be overriden by passing a value to the default argument.
+ """
+ encoding = _encodingMap.get(platformID, {}).get(platEncID, default)
+ if isinstance(encoding, dict):
+ encoding = encoding.get(langID, encoding[Ellipsis])
+ return encoding
diff --git a/Lib/fontTools/misc/etree.py b/Lib/fontTools/misc/etree.py
index cd4df365..9d4a65c3 100644
--- a/Lib/fontTools/misc/etree.py
+++ b/Lib/fontTools/misc/etree.py
@@ -244,7 +244,8 @@ except ImportError:
except UnicodeDecodeError:
raise ValueError(
"Bytes strings can only contain ASCII characters. "
- "Use unicode strings for non-ASCII characters.")
+ "Use unicode strings for non-ASCII characters."
+ )
except AttributeError:
_raise_serialization_error(s)
if s and _invalid_xml_string.search(s):
@@ -425,9 +426,7 @@ except ImportError:
write(_escape_cdata(elem.tail))
def _raise_serialization_error(text):
- raise TypeError(
- "cannot serialize %r (type %s)" % (text, type(text).__name__)
- )
+ raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__))
def _escape_cdata(text):
# escape character data
diff --git a/Lib/fontTools/misc/filenames.py b/Lib/fontTools/misc/filenames.py
index 0f010008..d279f89c 100644
--- a/Lib/fontTools/misc/filenames.py
+++ b/Lib/fontTools/misc/filenames.py
@@ -27,216 +27,220 @@ maxFileNameLength = 255
class NameTranslationError(Exception):
- pass
+ pass
def userNameToFileName(userName, existing=[], prefix="", suffix=""):
- """Converts from a user name to a file name.
-
- Takes care to avoid illegal characters, reserved file names, ambiguity between
- upper- and lower-case characters, and clashes with existing files.
-
- Args:
- userName (str): The input file name.
- existing: A case-insensitive list of all existing file names.
- prefix: Prefix to be prepended to the file name.
- suffix: Suffix to be appended to the file name.
-
- Returns:
- A suitable filename.
-
- Raises:
- NameTranslationError: If no suitable name could be generated.
-
- Examples::
-
- >>> userNameToFileName("a") == "a"
- True
- >>> userNameToFileName("A") == "A_"
- True
- >>> userNameToFileName("AE") == "A_E_"
- True
- >>> userNameToFileName("Ae") == "A_e"
- True
- >>> userNameToFileName("ae") == "ae"
- True
- >>> userNameToFileName("aE") == "aE_"
- True
- >>> userNameToFileName("a.alt") == "a.alt"
- True
- >>> userNameToFileName("A.alt") == "A_.alt"
- True
- >>> userNameToFileName("A.Alt") == "A_.A_lt"
- True
- >>> userNameToFileName("A.aLt") == "A_.aL_t"
- True
- >>> userNameToFileName(u"A.alT") == "A_.alT_"
- True
- >>> userNameToFileName("T_H") == "T__H_"
- True
- >>> userNameToFileName("T_h") == "T__h"
- True
- >>> userNameToFileName("t_h") == "t_h"
- True
- >>> userNameToFileName("F_F_I") == "F__F__I_"
- True
- >>> userNameToFileName("f_f_i") == "f_f_i"
- True
- >>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
- True
- >>> userNameToFileName(".notdef") == "_notdef"
- True
- >>> userNameToFileName("con") == "_con"
- True
- >>> userNameToFileName("CON") == "C_O_N_"
- True
- >>> userNameToFileName("con.alt") == "_con.alt"
- True
- >>> userNameToFileName("alt.con") == "alt._con"
- True
- """
- # the incoming name must be a str
- if not isinstance(userName, str):
- raise ValueError("The value for userName must be a string.")
- # establish the prefix and suffix lengths
- prefixLength = len(prefix)
- suffixLength = len(suffix)
- # replace an initial period with an _
- # if no prefix is to be added
- if not prefix and userName[0] == ".":
- userName = "_" + userName[1:]
- # filter the user name
- filteredUserName = []
- for character in userName:
- # replace illegal characters with _
- if character in illegalCharacters:
- character = "_"
- # add _ to all non-lower characters
- elif character != character.lower():
- character += "_"
- filteredUserName.append(character)
- userName = "".join(filteredUserName)
- # clip to 255
- sliceLength = maxFileNameLength - prefixLength - suffixLength
- userName = userName[:sliceLength]
- # test for illegal files names
- parts = []
- for part in userName.split("."):
- if part.lower() in reservedFileNames:
- part = "_" + part
- parts.append(part)
- userName = ".".join(parts)
- # test for clash
- fullName = prefix + userName + suffix
- if fullName.lower() in existing:
- fullName = handleClash1(userName, existing, prefix, suffix)
- # finished
- return fullName
+ """Converts from a user name to a file name.
+
+ Takes care to avoid illegal characters, reserved file names, ambiguity between
+ upper- and lower-case characters, and clashes with existing files.
+
+ Args:
+ userName (str): The input file name.
+ existing: A case-insensitive list of all existing file names.
+ prefix: Prefix to be prepended to the file name.
+ suffix: Suffix to be appended to the file name.
+
+ Returns:
+ A suitable filename.
+
+ Raises:
+ NameTranslationError: If no suitable name could be generated.
+
+ Examples::
+
+ >>> userNameToFileName("a") == "a"
+ True
+ >>> userNameToFileName("A") == "A_"
+ True
+ >>> userNameToFileName("AE") == "A_E_"
+ True
+ >>> userNameToFileName("Ae") == "A_e"
+ True
+ >>> userNameToFileName("ae") == "ae"
+ True
+ >>> userNameToFileName("aE") == "aE_"
+ True
+ >>> userNameToFileName("a.alt") == "a.alt"
+ True
+ >>> userNameToFileName("A.alt") == "A_.alt"
+ True
+ >>> userNameToFileName("A.Alt") == "A_.A_lt"
+ True
+ >>> userNameToFileName("A.aLt") == "A_.aL_t"
+ True
+ >>> userNameToFileName(u"A.alT") == "A_.alT_"
+ True
+ >>> userNameToFileName("T_H") == "T__H_"
+ True
+ >>> userNameToFileName("T_h") == "T__h"
+ True
+ >>> userNameToFileName("t_h") == "t_h"
+ True
+ >>> userNameToFileName("F_F_I") == "F__F__I_"
+ True
+ >>> userNameToFileName("f_f_i") == "f_f_i"
+ True
+ >>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
+ True
+ >>> userNameToFileName(".notdef") == "_notdef"
+ True
+ >>> userNameToFileName("con") == "_con"
+ True
+ >>> userNameToFileName("CON") == "C_O_N_"
+ True
+ >>> userNameToFileName("con.alt") == "_con.alt"
+ True
+ >>> userNameToFileName("alt.con") == "alt._con"
+ True
+ """
+ # the incoming name must be a str
+ if not isinstance(userName, str):
+ raise ValueError("The value for userName must be a string.")
+ # establish the prefix and suffix lengths
+ prefixLength = len(prefix)
+ suffixLength = len(suffix)
+ # replace an initial period with an _
+ # if no prefix is to be added
+ if not prefix and userName[0] == ".":
+ userName = "_" + userName[1:]
+ # filter the user name
+ filteredUserName = []
+ for character in userName:
+ # replace illegal characters with _
+ if character in illegalCharacters:
+ character = "_"
+ # add _ to all non-lower characters
+ elif character != character.lower():
+ character += "_"
+ filteredUserName.append(character)
+ userName = "".join(filteredUserName)
+ # clip to 255
+ sliceLength = maxFileNameLength - prefixLength - suffixLength
+ userName = userName[:sliceLength]
+ # test for illegal files names
+ parts = []
+ for part in userName.split("."):
+ if part.lower() in reservedFileNames:
+ part = "_" + part
+ parts.append(part)
+ userName = ".".join(parts)
+ # test for clash
+ fullName = prefix + userName + suffix
+ if fullName.lower() in existing:
+ fullName = handleClash1(userName, existing, prefix, suffix)
+ # finished
+ return fullName
+
def handleClash1(userName, existing=[], prefix="", suffix=""):
- """
- existing should be a case-insensitive list
- of all existing file names.
-
- >>> prefix = ("0" * 5) + "."
- >>> suffix = "." + ("0" * 10)
- >>> existing = ["a" * 5]
-
- >>> e = list(existing)
- >>> handleClash1(userName="A" * 5, existing=e,
- ... prefix=prefix, suffix=suffix) == (
- ... '00000.AAAAA000000000000001.0000000000')
- True
-
- >>> e = list(existing)
- >>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
- >>> handleClash1(userName="A" * 5, existing=e,
- ... prefix=prefix, suffix=suffix) == (
- ... '00000.AAAAA000000000000002.0000000000')
- True
-
- >>> e = list(existing)
- >>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
- >>> handleClash1(userName="A" * 5, existing=e,
- ... prefix=prefix, suffix=suffix) == (
- ... '00000.AAAAA000000000000001.0000000000')
- True
- """
- # if the prefix length + user name length + suffix length + 15 is at
- # or past the maximum length, silce 15 characters off of the user name
- prefixLength = len(prefix)
- suffixLength = len(suffix)
- if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
- l = (prefixLength + len(userName) + suffixLength + 15)
- sliceLength = maxFileNameLength - l
- userName = userName[:sliceLength]
- finalName = None
- # try to add numbers to create a unique name
- counter = 1
- while finalName is None:
- name = userName + str(counter).zfill(15)
- fullName = prefix + name + suffix
- if fullName.lower() not in existing:
- finalName = fullName
- break
- else:
- counter += 1
- if counter >= 999999999999999:
- break
- # if there is a clash, go to the next fallback
- if finalName is None:
- finalName = handleClash2(existing, prefix, suffix)
- # finished
- return finalName
+ """
+ existing should be a case-insensitive list
+ of all existing file names.
+
+ >>> prefix = ("0" * 5) + "."
+ >>> suffix = "." + ("0" * 10)
+ >>> existing = ["a" * 5]
+
+ >>> e = list(existing)
+ >>> handleClash1(userName="A" * 5, existing=e,
+ ... prefix=prefix, suffix=suffix) == (
+ ... '00000.AAAAA000000000000001.0000000000')
+ True
+
+ >>> e = list(existing)
+ >>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
+ >>> handleClash1(userName="A" * 5, existing=e,
+ ... prefix=prefix, suffix=suffix) == (
+ ... '00000.AAAAA000000000000002.0000000000')
+ True
+
+ >>> e = list(existing)
+ >>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
+ >>> handleClash1(userName="A" * 5, existing=e,
+ ... prefix=prefix, suffix=suffix) == (
+ ... '00000.AAAAA000000000000001.0000000000')
+ True
+ """
+ # if the prefix length + user name length + suffix length + 15 is at
+ # or past the maximum length, silce 15 characters off of the user name
+ prefixLength = len(prefix)
+ suffixLength = len(suffix)
+ if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
+ l = prefixLength + len(userName) + suffixLength + 15
+ sliceLength = maxFileNameLength - l
+ userName = userName[:sliceLength]
+ finalName = None
+ # try to add numbers to create a unique name
+ counter = 1
+ while finalName is None:
+ name = userName + str(counter).zfill(15)
+ fullName = prefix + name + suffix
+ if fullName.lower() not in existing:
+ finalName = fullName
+ break
+ else:
+ counter += 1
+ if counter >= 999999999999999:
+ break
+ # if there is a clash, go to the next fallback
+ if finalName is None:
+ finalName = handleClash2(existing, prefix, suffix)
+ # finished
+ return finalName
+
def handleClash2(existing=[], prefix="", suffix=""):
- """
- existing should be a case-insensitive list
- of all existing file names.
-
- >>> prefix = ("0" * 5) + "."
- >>> suffix = "." + ("0" * 10)
- >>> existing = [prefix + str(i) + suffix for i in range(100)]
-
- >>> e = list(existing)
- >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
- ... '00000.100.0000000000')
- True
-
- >>> e = list(existing)
- >>> e.remove(prefix + "1" + suffix)
- >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
- ... '00000.1.0000000000')
- True
-
- >>> e = list(existing)
- >>> e.remove(prefix + "2" + suffix)
- >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
- ... '00000.2.0000000000')
- True
- """
- # calculate the longest possible string
- maxLength = maxFileNameLength - len(prefix) - len(suffix)
- maxValue = int("9" * maxLength)
- # try to find a number
- finalName = None
- counter = 1
- while finalName is None:
- fullName = prefix + str(counter) + suffix
- if fullName.lower() not in existing:
- finalName = fullName
- break
- else:
- counter += 1
- if counter >= maxValue:
- break
- # raise an error if nothing has been found
- if finalName is None:
- raise NameTranslationError("No unique name could be found.")
- # finished
- return finalName
+ """
+ existing should be a case-insensitive list
+ of all existing file names.
+
+ >>> prefix = ("0" * 5) + "."
+ >>> suffix = "." + ("0" * 10)
+ >>> existing = [prefix + str(i) + suffix for i in range(100)]
+
+ >>> e = list(existing)
+ >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
+ ... '00000.100.0000000000')
+ True
+
+ >>> e = list(existing)
+ >>> e.remove(prefix + "1" + suffix)
+ >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
+ ... '00000.1.0000000000')
+ True
+
+ >>> e = list(existing)
+ >>> e.remove(prefix + "2" + suffix)
+ >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
+ ... '00000.2.0000000000')
+ True
+ """
+ # calculate the longest possible string
+ maxLength = maxFileNameLength - len(prefix) - len(suffix)
+ maxValue = int("9" * maxLength)
+ # try to find a number
+ finalName = None
+ counter = 1
+ while finalName is None:
+ fullName = prefix + str(counter) + suffix
+ if fullName.lower() not in existing:
+ finalName = fullName
+ break
+ else:
+ counter += 1
+ if counter >= maxValue:
+ break
+ # raise an error if nothing has been found
+ if finalName is None:
+ raise NameTranslationError("No unique name could be found.")
+ # finished
+ return finalName
+
if __name__ == "__main__":
- import doctest
- import sys
- sys.exit(doctest.testmod().failed)
+ import doctest
+ import sys
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/misc/fixedTools.py b/Lib/fontTools/misc/fixedTools.py
index 6ec7d06e..33004287 100644
--- a/Lib/fontTools/misc/fixedTools.py
+++ b/Lib/fontTools/misc/fixedTools.py
@@ -23,16 +23,16 @@ import logging
log = logging.getLogger(__name__)
__all__ = [
- "MAX_F2DOT14",
- "fixedToFloat",
- "floatToFixed",
- "floatToFixedToFloat",
- "floatToFixedToStr",
- "fixedToStr",
- "strToFixed",
- "strToFixedToFloat",
- "ensureVersionIsLong",
- "versionToFixed",
+ "MAX_F2DOT14",
+ "fixedToFloat",
+ "floatToFixed",
+ "floatToFixedToFloat",
+ "floatToFixedToStr",
+ "fixedToStr",
+ "strToFixed",
+ "strToFixedToFloat",
+ "ensureVersionIsLong",
+ "versionToFixed",
]
@@ -40,212 +40,214 @@ MAX_F2DOT14 = 0x7FFF / (1 << 14)
def fixedToFloat(value, precisionBits):
- """Converts a fixed-point number to a float given the number of
- precision bits.
+ """Converts a fixed-point number to a float given the number of
+ precision bits.
- Args:
- value (int): Number in fixed-point format.
- precisionBits (int): Number of precision bits.
+ Args:
+ value (int): Number in fixed-point format.
+ precisionBits (int): Number of precision bits.
- Returns:
- Floating point value.
+ Returns:
+ Floating point value.
- Examples::
+ Examples::
- >>> import math
- >>> f = fixedToFloat(-10139, precisionBits=14)
- >>> math.isclose(f, -0.61883544921875)
- True
- """
- return value / (1 << precisionBits)
+ >>> import math
+ >>> f = fixedToFloat(-10139, precisionBits=14)
+ >>> math.isclose(f, -0.61883544921875)
+ True
+ """
+ return value / (1 << precisionBits)
def floatToFixed(value, precisionBits):
- """Converts a float to a fixed-point number given the number of
- precision bits.
+ """Converts a float to a fixed-point number given the number of
+ precision bits.
- Args:
- value (float): Floating point value.
- precisionBits (int): Number of precision bits.
+ Args:
+ value (float): Floating point value.
+ precisionBits (int): Number of precision bits.
- Returns:
- int: Fixed-point representation.
+ Returns:
+ int: Fixed-point representation.
- Examples::
+ Examples::
- >>> floatToFixed(-0.61883544921875, precisionBits=14)
- -10139
- >>> floatToFixed(-0.61884, precisionBits=14)
- -10139
- """
- return otRound(value * (1 << precisionBits))
+ >>> floatToFixed(-0.61883544921875, precisionBits=14)
+ -10139
+ >>> floatToFixed(-0.61884, precisionBits=14)
+ -10139
+ """
+ return otRound(value * (1 << precisionBits))
def floatToFixedToFloat(value, precisionBits):
- """Converts a float to a fixed-point number and back again.
+ """Converts a float to a fixed-point number and back again.
- By converting the float to fixed, rounding it, and converting it back
- to float again, this returns a floating point values which is exactly
- representable in fixed-point format.
+ By converting the float to fixed, rounding it, and converting it back
+ to float again, this returns a floating point values which is exactly
+ representable in fixed-point format.
- Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``.
+ Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``.
- Args:
- value (float): The input floating point value.
- precisionBits (int): Number of precision bits.
+ Args:
+ value (float): The input floating point value.
+ precisionBits (int): Number of precision bits.
- Returns:
- float: The transformed and rounded value.
+ Returns:
+ float: The transformed and rounded value.
- Examples::
- >>> import math
- >>> f1 = -0.61884
- >>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14)
- >>> f1 != f2
- True
- >>> math.isclose(f2, -0.61883544921875)
- True
- """
- scale = 1 << precisionBits
- return otRound(value * scale) / scale
+ Examples::
+ >>> import math
+ >>> f1 = -0.61884
+ >>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14)
+ >>> f1 != f2
+ True
+ >>> math.isclose(f2, -0.61883544921875)
+ True
+ """
+ scale = 1 << precisionBits
+ return otRound(value * scale) / scale
def fixedToStr(value, precisionBits):
- """Converts a fixed-point number to a string representing a decimal float.
+ """Converts a fixed-point number to a string representing a decimal float.
- This chooses the float that has the shortest decimal representation (the least
- number of fractional decimal digits).
+ This chooses the float that has the shortest decimal representation (the least
+ number of fractional decimal digits).
- For example, to convert a fixed-point number in a 2.14 format, use
- ``precisionBits=14``::
+ For example, to convert a fixed-point number in a 2.14 format, use
+ ``precisionBits=14``::
- >>> fixedToStr(-10139, precisionBits=14)
- '-0.61884'
+ >>> fixedToStr(-10139, precisionBits=14)
+ '-0.61884'
- This is pretty slow compared to the simple division used in ``fixedToFloat``.
- Use sporadically when you need to serialize or print the fixed-point number in
- a human-readable form.
- It uses nearestMultipleShortestRepr under the hood.
+ This is pretty slow compared to the simple division used in ``fixedToFloat``.
+ Use sporadically when you need to serialize or print the fixed-point number in
+ a human-readable form.
+ It uses nearestMultipleShortestRepr under the hood.
- Args:
- value (int): The fixed-point value to convert.
- precisionBits (int): Number of precision bits, *up to a maximum of 16*.
+ Args:
+ value (int): The fixed-point value to convert.
+ precisionBits (int): Number of precision bits, *up to a maximum of 16*.
- Returns:
- str: A string representation of the value.
- """
- scale = 1 << precisionBits
- return nearestMultipleShortestRepr(value/scale, factor=1.0/scale)
+ Returns:
+ str: A string representation of the value.
+ """
+ scale = 1 << precisionBits
+ return nearestMultipleShortestRepr(value / scale, factor=1.0 / scale)
def strToFixed(string, precisionBits):
- """Converts a string representing a decimal float to a fixed-point number.
+ """Converts a string representing a decimal float to a fixed-point number.
- Args:
- string (str): A string representing a decimal float.
- precisionBits (int): Number of precision bits, *up to a maximum of 16*.
+ Args:
+ string (str): A string representing a decimal float.
+ precisionBits (int): Number of precision bits, *up to a maximum of 16*.
- Returns:
- int: Fixed-point representation.
+ Returns:
+ int: Fixed-point representation.
- Examples::
+ Examples::
- >>> ## to convert a float string to a 2.14 fixed-point number:
- >>> strToFixed('-0.61884', precisionBits=14)
- -10139
- """
- value = float(string)
- return otRound(value * (1 << precisionBits))
+ >>> ## to convert a float string to a 2.14 fixed-point number:
+ >>> strToFixed('-0.61884', precisionBits=14)
+ -10139
+ """
+ value = float(string)
+ return otRound(value * (1 << precisionBits))
def strToFixedToFloat(string, precisionBits):
- """Convert a string to a decimal float with fixed-point rounding.
+ """Convert a string to a decimal float with fixed-point rounding.
- This first converts string to a float, then turns it into a fixed-point
- number with ``precisionBits`` fractional binary digits, then back to a
- float again.
+ This first converts string to a float, then turns it into a fixed-point
+ number with ``precisionBits`` fractional binary digits, then back to a
+ float again.
- This is simply a shorthand for fixedToFloat(floatToFixed(float(s))).
+ This is simply a shorthand for fixedToFloat(floatToFixed(float(s))).
- Args:
- string (str): A string representing a decimal float.
- precisionBits (int): Number of precision bits.
+ Args:
+ string (str): A string representing a decimal float.
+ precisionBits (int): Number of precision bits.
- Returns:
- float: The transformed and rounded value.
+ Returns:
+ float: The transformed and rounded value.
- Examples::
+ Examples::
- >>> import math
- >>> s = '-0.61884'
- >>> bits = 14
- >>> f = strToFixedToFloat(s, precisionBits=bits)
- >>> math.isclose(f, -0.61883544921875)
- True
- >>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits)
- True
- """
- value = float(string)
- scale = 1 << precisionBits
- return otRound(value * scale) / scale
+ >>> import math
+ >>> s = '-0.61884'
+ >>> bits = 14
+ >>> f = strToFixedToFloat(s, precisionBits=bits)
+ >>> math.isclose(f, -0.61883544921875)
+ True
+ >>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits)
+ True
+ """
+ value = float(string)
+ scale = 1 << precisionBits
+ return otRound(value * scale) / scale
def floatToFixedToStr(value, precisionBits):
- """Convert float to string with fixed-point rounding.
+ """Convert float to string with fixed-point rounding.
- This uses the shortest decimal representation (ie. the least
- number of fractional decimal digits) to represent the equivalent
- fixed-point number with ``precisionBits`` fractional binary digits.
- It uses nearestMultipleShortestRepr under the hood.
+ This uses the shortest decimal representation (ie. the least
+ number of fractional decimal digits) to represent the equivalent
+ fixed-point number with ``precisionBits`` fractional binary digits.
+ It uses nearestMultipleShortestRepr under the hood.
- >>> floatToFixedToStr(-0.61883544921875, precisionBits=14)
- '-0.61884'
+ >>> floatToFixedToStr(-0.61883544921875, precisionBits=14)
+ '-0.61884'
- Args:
- value (float): The float value to convert.
- precisionBits (int): Number of precision bits, *up to a maximum of 16*.
+ Args:
+ value (float): The float value to convert.
+ precisionBits (int): Number of precision bits, *up to a maximum of 16*.
- Returns:
- str: A string representation of the value.
+ Returns:
+ str: A string representation of the value.
- """
- scale = 1 << precisionBits
- return nearestMultipleShortestRepr(value, factor=1.0/scale)
+ """
+ scale = 1 << precisionBits
+ return nearestMultipleShortestRepr(value, factor=1.0 / scale)
def ensureVersionIsLong(value):
- """Ensure a table version is an unsigned long.
-
- OpenType table version numbers are expressed as a single unsigned long
- comprising of an unsigned short major version and unsigned short minor
- version. This function detects if the value to be used as a version number
- looks too small (i.e. is less than ``0x10000``), and converts it to
- fixed-point using :func:`floatToFixed` if so.
-
- Args:
- value (Number): a candidate table version number.
-
- Returns:
- int: A table version number, possibly corrected to fixed-point.
- """
- if value < 0x10000:
- newValue = floatToFixed(value, 16)
- log.warning(
- "Table version value is a float: %.4f; "
- "fix to use hex instead: 0x%08x", value, newValue)
- value = newValue
- return value
+ """Ensure a table version is an unsigned long.
+
+ OpenType table version numbers are expressed as a single unsigned long
+ comprising of an unsigned short major version and unsigned short minor
+ version. This function detects if the value to be used as a version number
+ looks too small (i.e. is less than ``0x10000``), and converts it to
+ fixed-point using :func:`floatToFixed` if so.
+
+ Args:
+ value (Number): a candidate table version number.
+
+ Returns:
+ int: A table version number, possibly corrected to fixed-point.
+ """
+ if value < 0x10000:
+ newValue = floatToFixed(value, 16)
+ log.warning(
+ "Table version value is a float: %.4f; " "fix to use hex instead: 0x%08x",
+ value,
+ newValue,
+ )
+ value = newValue
+ return value
def versionToFixed(value):
- """Ensure a table version number is fixed-point.
+ """Ensure a table version number is fixed-point.
- Args:
- value (str): a candidate table version number.
+ Args:
+ value (str): a candidate table version number.
- Returns:
- int: A table version number, possibly corrected to fixed-point.
- """
- value = int(value, 0) if value.startswith("0") else float(value)
- value = ensureVersionIsLong(value)
- return value
+ Returns:
+ int: A table version number, possibly corrected to fixed-point.
+ """
+ value = int(value, 0) if value.startswith("0") else float(value)
+ value = ensureVersionIsLong(value)
+ return value
diff --git a/Lib/fontTools/misc/intTools.py b/Lib/fontTools/misc/intTools.py
index 6ba03e16..0ca29854 100644
--- a/Lib/fontTools/misc/intTools.py
+++ b/Lib/fontTools/misc/intTools.py
@@ -1,4 +1,4 @@
-__all__ = ["popCount"]
+__all__ = ["popCount", "bit_count", "bit_indices"]
try:
@@ -13,7 +13,7 @@ except AttributeError:
See https://docs.python.org/3.10/library/stdtypes.html#int.bit_count
"""
-popCount = bit_count
+popCount = bit_count # alias
def bit_indices(v):
diff --git a/Lib/fontTools/misc/loggingTools.py b/Lib/fontTools/misc/loggingTools.py
index d1baa839..78704f5a 100644
--- a/Lib/fontTools/misc/loggingTools.py
+++ b/Lib/fontTools/misc/loggingTools.py
@@ -13,524 +13,531 @@ TIME_LEVEL = logging.DEBUG
# per-level format strings used by the default formatter
# (the level name is not printed for INFO and DEBUG messages)
DEFAULT_FORMATS = {
- "*": "%(levelname)s: %(message)s",
- "INFO": "%(message)s",
- "DEBUG": "%(message)s",
- }
+ "*": "%(levelname)s: %(message)s",
+ "INFO": "%(message)s",
+ "DEBUG": "%(message)s",
+}
class LevelFormatter(logging.Formatter):
- """Log formatter with level-specific formatting.
-
- Formatter class which optionally takes a dict of logging levels to
- format strings, allowing to customise the log records appearance for
- specific levels.
-
-
- Attributes:
- fmt: A dictionary mapping logging levels to format strings.
- The ``*`` key identifies the default format string.
- datefmt: As per py:class:`logging.Formatter`
- style: As per py:class:`logging.Formatter`
-
- >>> import sys
- >>> handler = logging.StreamHandler(sys.stdout)
- >>> formatter = LevelFormatter(
- ... fmt={
- ... '*': '[%(levelname)s] %(message)s',
- ... 'DEBUG': '%(name)s [%(levelname)s] %(message)s',
- ... 'INFO': '%(message)s',
- ... })
- >>> handler.setFormatter(formatter)
- >>> log = logging.getLogger('test')
- >>> log.setLevel(logging.DEBUG)
- >>> log.addHandler(handler)
- >>> log.debug('this uses a custom format string')
- test [DEBUG] this uses a custom format string
- >>> log.info('this also uses a custom format string')
- this also uses a custom format string
- >>> log.warning("this one uses the default format string")
- [WARNING] this one uses the default format string
- """
-
- def __init__(self, fmt=None, datefmt=None, style="%"):
- if style != '%':
- raise ValueError(
- "only '%' percent style is supported in both python 2 and 3")
- if fmt is None:
- fmt = DEFAULT_FORMATS
- if isinstance(fmt, str):
- default_format = fmt
- custom_formats = {}
- elif isinstance(fmt, Mapping):
- custom_formats = dict(fmt)
- default_format = custom_formats.pop("*", None)
- else:
- raise TypeError('fmt must be a str or a dict of str: %r' % fmt)
- super(LevelFormatter, self).__init__(default_format, datefmt)
- self.default_format = self._fmt
- self.custom_formats = {}
- for level, fmt in custom_formats.items():
- level = logging._checkLevel(level)
- self.custom_formats[level] = fmt
-
- def format(self, record):
- if self.custom_formats:
- fmt = self.custom_formats.get(record.levelno, self.default_format)
- if self._fmt != fmt:
- self._fmt = fmt
- # for python >= 3.2, _style needs to be set if _fmt changes
- if PercentStyle:
- self._style = PercentStyle(fmt)
- return super(LevelFormatter, self).format(record)
+ """Log formatter with level-specific formatting.
+
+ Formatter class which optionally takes a dict of logging levels to
+ format strings, allowing to customise the log records appearance for
+ specific levels.
+
+
+ Attributes:
+ fmt: A dictionary mapping logging levels to format strings.
+ The ``*`` key identifies the default format string.
+ datefmt: As per py:class:`logging.Formatter`
+ style: As per py:class:`logging.Formatter`
+
+ >>> import sys
+ >>> handler = logging.StreamHandler(sys.stdout)
+ >>> formatter = LevelFormatter(
+ ... fmt={
+ ... '*': '[%(levelname)s] %(message)s',
+ ... 'DEBUG': '%(name)s [%(levelname)s] %(message)s',
+ ... 'INFO': '%(message)s',
+ ... })
+ >>> handler.setFormatter(formatter)
+ >>> log = logging.getLogger('test')
+ >>> log.setLevel(logging.DEBUG)
+ >>> log.addHandler(handler)
+ >>> log.debug('this uses a custom format string')
+ test [DEBUG] this uses a custom format string
+ >>> log.info('this also uses a custom format string')
+ this also uses a custom format string
+ >>> log.warning("this one uses the default format string")
+ [WARNING] this one uses the default format string
+ """
+
+ def __init__(self, fmt=None, datefmt=None, style="%"):
+ if style != "%":
+ raise ValueError(
+ "only '%' percent style is supported in both python 2 and 3"
+ )
+ if fmt is None:
+ fmt = DEFAULT_FORMATS
+ if isinstance(fmt, str):
+ default_format = fmt
+ custom_formats = {}
+ elif isinstance(fmt, Mapping):
+ custom_formats = dict(fmt)
+ default_format = custom_formats.pop("*", None)
+ else:
+ raise TypeError("fmt must be a str or a dict of str: %r" % fmt)
+ super(LevelFormatter, self).__init__(default_format, datefmt)
+ self.default_format = self._fmt
+ self.custom_formats = {}
+ for level, fmt in custom_formats.items():
+ level = logging._checkLevel(level)
+ self.custom_formats[level] = fmt
+
+ def format(self, record):
+ if self.custom_formats:
+ fmt = self.custom_formats.get(record.levelno, self.default_format)
+ if self._fmt != fmt:
+ self._fmt = fmt
+ # for python >= 3.2, _style needs to be set if _fmt changes
+ if PercentStyle:
+ self._style = PercentStyle(fmt)
+ return super(LevelFormatter, self).format(record)
def configLogger(**kwargs):
- """A more sophisticated logging system configuation manager.
-
- This is more or less the same as :py:func:`logging.basicConfig`,
- with some additional options and defaults.
-
- The default behaviour is to create a ``StreamHandler`` which writes to
- sys.stderr, set a formatter using the ``DEFAULT_FORMATS`` strings, and add
- the handler to the top-level library logger ("fontTools").
-
- A number of optional keyword arguments may be specified, which can alter
- the default behaviour.
-
- Args:
-
- logger: Specifies the logger name or a Logger instance to be
- configured. (Defaults to "fontTools" logger). Unlike ``basicConfig``,
- this function can be called multiple times to reconfigure a logger.
- If the logger or any of its children already exists before the call is
- made, they will be reset before the new configuration is applied.
- filename: Specifies that a ``FileHandler`` be created, using the
- specified filename, rather than a ``StreamHandler``.
- filemode: Specifies the mode to open the file, if filename is
- specified. (If filemode is unspecified, it defaults to ``a``).
- format: Use the specified format string for the handler. This
- argument also accepts a dictionary of format strings keyed by
- level name, to allow customising the records appearance for
- specific levels. The special ``'*'`` key is for 'any other' level.
- datefmt: Use the specified date/time format.
- level: Set the logger level to the specified level.
- stream: Use the specified stream to initialize the StreamHandler. Note
- that this argument is incompatible with ``filename`` - if both
- are present, ``stream`` is ignored.
- handlers: If specified, this should be an iterable of already created
- handlers, which will be added to the logger. Any handler in the
- list which does not have a formatter assigned will be assigned the
- formatter created in this function.
- filters: If specified, this should be an iterable of already created
- filters. If the ``handlers`` do not already have filters assigned,
- these filters will be added to them.
- propagate: All loggers have a ``propagate`` attribute which determines
- whether to continue searching for handlers up the logging hierarchy.
- If not provided, the "propagate" attribute will be set to ``False``.
- """
- # using kwargs to enforce keyword-only arguments in py2.
- handlers = kwargs.pop("handlers", None)
- if handlers is None:
- if "stream" in kwargs and "filename" in kwargs:
- raise ValueError("'stream' and 'filename' should not be "
- "specified together")
- else:
- if "stream" in kwargs or "filename" in kwargs:
- raise ValueError("'stream' or 'filename' should not be "
- "specified together with 'handlers'")
- if handlers is None:
- filename = kwargs.pop("filename", None)
- mode = kwargs.pop("filemode", 'a')
- if filename:
- h = logging.FileHandler(filename, mode)
- else:
- stream = kwargs.pop("stream", None)
- h = logging.StreamHandler(stream)
- handlers = [h]
- # By default, the top-level library logger is configured.
- logger = kwargs.pop("logger", "fontTools")
- if not logger or isinstance(logger, str):
- # empty "" or None means the 'root' logger
- logger = logging.getLogger(logger)
- # before (re)configuring, reset named logger and its children (if exist)
- _resetExistingLoggers(parent=logger.name)
- # use DEFAULT_FORMATS if 'format' is None
- fs = kwargs.pop("format", None)
- dfs = kwargs.pop("datefmt", None)
- # XXX: '%' is the only format style supported on both py2 and 3
- style = kwargs.pop("style", '%')
- fmt = LevelFormatter(fs, dfs, style)
- filters = kwargs.pop("filters", [])
- for h in handlers:
- if h.formatter is None:
- h.setFormatter(fmt)
- if not h.filters:
- for f in filters:
- h.addFilter(f)
- logger.addHandler(h)
- if logger.name != "root":
- # stop searching up the hierarchy for handlers
- logger.propagate = kwargs.pop("propagate", False)
- # set a custom severity level
- level = kwargs.pop("level", None)
- if level is not None:
- logger.setLevel(level)
- if kwargs:
- keys = ', '.join(kwargs.keys())
- raise ValueError('Unrecognised argument(s): %s' % keys)
+ """A more sophisticated logging system configuation manager.
+
+ This is more or less the same as :py:func:`logging.basicConfig`,
+ with some additional options and defaults.
+
+ The default behaviour is to create a ``StreamHandler`` which writes to
+ sys.stderr, set a formatter using the ``DEFAULT_FORMATS`` strings, and add
+ the handler to the top-level library logger ("fontTools").
+
+ A number of optional keyword arguments may be specified, which can alter
+ the default behaviour.
+
+ Args:
+
+ logger: Specifies the logger name or a Logger instance to be
+ configured. (Defaults to "fontTools" logger). Unlike ``basicConfig``,
+ this function can be called multiple times to reconfigure a logger.
+ If the logger or any of its children already exists before the call is
+ made, they will be reset before the new configuration is applied.
+ filename: Specifies that a ``FileHandler`` be created, using the
+ specified filename, rather than a ``StreamHandler``.
+ filemode: Specifies the mode to open the file, if filename is
+ specified. (If filemode is unspecified, it defaults to ``a``).
+ format: Use the specified format string for the handler. This
+ argument also accepts a dictionary of format strings keyed by
+ level name, to allow customising the records appearance for
+ specific levels. The special ``'*'`` key is for 'any other' level.
+ datefmt: Use the specified date/time format.
+ level: Set the logger level to the specified level.
+ stream: Use the specified stream to initialize the StreamHandler. Note
+ that this argument is incompatible with ``filename`` - if both
+ are present, ``stream`` is ignored.
+ handlers: If specified, this should be an iterable of already created
+ handlers, which will be added to the logger. Any handler in the
+ list which does not have a formatter assigned will be assigned the
+ formatter created in this function.
+ filters: If specified, this should be an iterable of already created
+ filters. If the ``handlers`` do not already have filters assigned,
+ these filters will be added to them.
+ propagate: All loggers have a ``propagate`` attribute which determines
+ whether to continue searching for handlers up the logging hierarchy.
+ If not provided, the "propagate" attribute will be set to ``False``.
+ """
+ # using kwargs to enforce keyword-only arguments in py2.
+ handlers = kwargs.pop("handlers", None)
+ if handlers is None:
+ if "stream" in kwargs and "filename" in kwargs:
+ raise ValueError(
+ "'stream' and 'filename' should not be " "specified together"
+ )
+ else:
+ if "stream" in kwargs or "filename" in kwargs:
+ raise ValueError(
+ "'stream' or 'filename' should not be "
+ "specified together with 'handlers'"
+ )
+ if handlers is None:
+ filename = kwargs.pop("filename", None)
+ mode = kwargs.pop("filemode", "a")
+ if filename:
+ h = logging.FileHandler(filename, mode)
+ else:
+ stream = kwargs.pop("stream", None)
+ h = logging.StreamHandler(stream)
+ handlers = [h]
+ # By default, the top-level library logger is configured.
+ logger = kwargs.pop("logger", "fontTools")
+ if not logger or isinstance(logger, str):
+ # empty "" or None means the 'root' logger
+ logger = logging.getLogger(logger)
+ # before (re)configuring, reset named logger and its children (if exist)
+ _resetExistingLoggers(parent=logger.name)
+ # use DEFAULT_FORMATS if 'format' is None
+ fs = kwargs.pop("format", None)
+ dfs = kwargs.pop("datefmt", None)
+ # XXX: '%' is the only format style supported on both py2 and 3
+ style = kwargs.pop("style", "%")
+ fmt = LevelFormatter(fs, dfs, style)
+ filters = kwargs.pop("filters", [])
+ for h in handlers:
+ if h.formatter is None:
+ h.setFormatter(fmt)
+ if not h.filters:
+ for f in filters:
+ h.addFilter(f)
+ logger.addHandler(h)
+ if logger.name != "root":
+ # stop searching up the hierarchy for handlers
+ logger.propagate = kwargs.pop("propagate", False)
+ # set a custom severity level
+ level = kwargs.pop("level", None)
+ if level is not None:
+ logger.setLevel(level)
+ if kwargs:
+ keys = ", ".join(kwargs.keys())
+ raise ValueError("Unrecognised argument(s): %s" % keys)
def _resetExistingLoggers(parent="root"):
- """ Reset the logger named 'parent' and all its children to their initial
- state, if they already exist in the current configuration.
- """
- root = logging.root
- # get sorted list of all existing loggers
- existing = sorted(root.manager.loggerDict.keys())
- if parent == "root":
- # all the existing loggers are children of 'root'
- loggers_to_reset = [parent] + existing
- elif parent not in existing:
- # nothing to do
- return
- elif parent in existing:
- loggers_to_reset = [parent]
- # collect children, starting with the entry after parent name
- i = existing.index(parent) + 1
- prefixed = parent + "."
- pflen = len(prefixed)
- num_existing = len(existing)
- while i < num_existing:
- if existing[i][:pflen] == prefixed:
- loggers_to_reset.append(existing[i])
- i += 1
- for name in loggers_to_reset:
- if name == "root":
- root.setLevel(logging.WARNING)
- for h in root.handlers[:]:
- root.removeHandler(h)
- for f in root.filters[:]:
- root.removeFilters(f)
- root.disabled = False
- else:
- logger = root.manager.loggerDict[name]
- logger.level = logging.NOTSET
- logger.handlers = []
- logger.filters = []
- logger.propagate = True
- logger.disabled = False
+ """Reset the logger named 'parent' and all its children to their initial
+ state, if they already exist in the current configuration.
+ """
+ root = logging.root
+ # get sorted list of all existing loggers
+ existing = sorted(root.manager.loggerDict.keys())
+ if parent == "root":
+ # all the existing loggers are children of 'root'
+ loggers_to_reset = [parent] + existing
+ elif parent not in existing:
+ # nothing to do
+ return
+ elif parent in existing:
+ loggers_to_reset = [parent]
+ # collect children, starting with the entry after parent name
+ i = existing.index(parent) + 1
+ prefixed = parent + "."
+ pflen = len(prefixed)
+ num_existing = len(existing)
+ while i < num_existing:
+ if existing[i][:pflen] == prefixed:
+ loggers_to_reset.append(existing[i])
+ i += 1
+ for name in loggers_to_reset:
+ if name == "root":
+ root.setLevel(logging.WARNING)
+ for h in root.handlers[:]:
+ root.removeHandler(h)
+ for f in root.filters[:]:
+ root.removeFilters(f)
+ root.disabled = False
+ else:
+ logger = root.manager.loggerDict[name]
+ logger.level = logging.NOTSET
+ logger.handlers = []
+ logger.filters = []
+ logger.propagate = True
+ logger.disabled = False
class Timer(object):
- """ Keeps track of overall time and split/lap times.
-
- >>> import time
- >>> timer = Timer()
- >>> time.sleep(0.01)
- >>> print("First lap:", timer.split())
- First lap: ...
- >>> time.sleep(0.02)
- >>> print("Second lap:", timer.split())
- Second lap: ...
- >>> print("Overall time:", timer.time())
- Overall time: ...
-
- Can be used as a context manager inside with-statements.
-
- >>> with Timer() as t:
- ... time.sleep(0.01)
- >>> print("%0.3f seconds" % t.elapsed)
- 0... seconds
-
- If initialised with a logger, it can log the elapsed time automatically
- upon exiting the with-statement.
-
- >>> import logging
- >>> log = logging.getLogger("my-fancy-timer-logger")
- >>> configLogger(logger=log, level="DEBUG", format="%(message)s", stream=sys.stdout)
- >>> with Timer(log, 'do something'):
- ... time.sleep(0.01)
- Took ... to do something
-
- The same Timer instance, holding a reference to a logger, can be reused
- in multiple with-statements, optionally with different messages or levels.
-
- >>> timer = Timer(log)
- >>> with timer():
- ... time.sleep(0.01)
- elapsed time: ...s
- >>> with timer('redo it', level=logging.INFO):
- ... time.sleep(0.02)
- Took ... to redo it
-
- It can also be used as a function decorator to log the time elapsed to run
- the decorated function.
-
- >>> @timer()
- ... def test1():
- ... time.sleep(0.01)
- >>> @timer('run test 2', level=logging.INFO)
- ... def test2():
- ... time.sleep(0.02)
- >>> test1()
- Took ... to run 'test1'
- >>> test2()
- Took ... to run test 2
- """
-
- # timeit.default_timer choses the most accurate clock for each platform
- _time = timeit.default_timer
- default_msg = "elapsed time: %(time).3fs"
- default_format = "Took %(time).3fs to %(msg)s"
-
- def __init__(self, logger=None, msg=None, level=None, start=None):
- self.reset(start)
- if logger is None:
- for arg in ('msg', 'level'):
- if locals().get(arg) is not None:
- raise ValueError(
- "'%s' can't be specified without a 'logger'" % arg)
- self.logger = logger
- self.level = level if level is not None else TIME_LEVEL
- self.msg = msg
-
- def reset(self, start=None):
- """ Reset timer to 'start_time' or the current time. """
- if start is None:
- self.start = self._time()
- else:
- self.start = start
- self.last = self.start
- self.elapsed = 0.0
-
- def time(self):
- """ Return the overall time (in seconds) since the timer started. """
- return self._time() - self.start
-
- def split(self):
- """ Split and return the lap time (in seconds) in between splits. """
- current = self._time()
- self.elapsed = current - self.last
- self.last = current
- return self.elapsed
-
- def formatTime(self, msg, time):
- """ Format 'time' value in 'msg' and return formatted string.
- If 'msg' contains a '%(time)' format string, try to use that.
- Otherwise, use the predefined 'default_format'.
- If 'msg' is empty or None, fall back to 'default_msg'.
- """
- if not msg:
- msg = self.default_msg
- if msg.find("%(time)") < 0:
- msg = self.default_format % {"msg": msg, "time": time}
- else:
- try:
- msg = msg % {"time": time}
- except (KeyError, ValueError):
- pass # skip if the format string is malformed
- return msg
-
- def __enter__(self):
- """ Start a new lap """
- self.last = self._time()
- self.elapsed = 0.0
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- """ End the current lap. If timer has a logger, log the time elapsed,
- using the format string in self.msg (or the default one).
- """
- time = self.split()
- if self.logger is None or exc_type:
- # if there's no logger attached, or if any exception occurred in
- # the with-statement, exit without logging the time
- return
- message = self.formatTime(self.msg, time)
- # Allow log handlers to see the individual parts to facilitate things
- # like a server accumulating aggregate stats.
- msg_parts = { 'msg': self.msg, 'time': time }
- self.logger.log(self.level, message, msg_parts)
-
- def __call__(self, func_or_msg=None, **kwargs):
- """ If the first argument is a function, return a decorator which runs
- the wrapped function inside Timer's context manager.
- Otherwise, treat the first argument as a 'msg' string and return an updated
- Timer instance, referencing the same logger.
- A 'level' keyword can also be passed to override self.level.
- """
- if isinstance(func_or_msg, Callable):
- func = func_or_msg
- # use the function name when no explicit 'msg' is provided
- if not self.msg:
- self.msg = "run '%s'" % func.__name__
-
- @wraps(func)
- def wrapper(*args, **kwds):
- with self:
- return func(*args, **kwds)
- return wrapper
- else:
- msg = func_or_msg or kwargs.get("msg")
- level = kwargs.get("level", self.level)
- return self.__class__(self.logger, msg, level)
-
- def __float__(self):
- return self.elapsed
-
- def __int__(self):
- return int(self.elapsed)
-
- def __str__(self):
- return "%.3f" % self.elapsed
+ """Keeps track of overall time and split/lap times.
+
+ >>> import time
+ >>> timer = Timer()
+ >>> time.sleep(0.01)
+ >>> print("First lap:", timer.split())
+ First lap: ...
+ >>> time.sleep(0.02)
+ >>> print("Second lap:", timer.split())
+ Second lap: ...
+ >>> print("Overall time:", timer.time())
+ Overall time: ...
+
+ Can be used as a context manager inside with-statements.
+
+ >>> with Timer() as t:
+ ... time.sleep(0.01)
+ >>> print("%0.3f seconds" % t.elapsed)
+ 0... seconds
+
+ If initialised with a logger, it can log the elapsed time automatically
+ upon exiting the with-statement.
+
+ >>> import logging
+ >>> log = logging.getLogger("my-fancy-timer-logger")
+ >>> configLogger(logger=log, level="DEBUG", format="%(message)s", stream=sys.stdout)
+ >>> with Timer(log, 'do something'):
+ ... time.sleep(0.01)
+ Took ... to do something
+
+ The same Timer instance, holding a reference to a logger, can be reused
+ in multiple with-statements, optionally with different messages or levels.
+
+ >>> timer = Timer(log)
+ >>> with timer():
+ ... time.sleep(0.01)
+ elapsed time: ...s
+ >>> with timer('redo it', level=logging.INFO):
+ ... time.sleep(0.02)
+ Took ... to redo it
+
+ It can also be used as a function decorator to log the time elapsed to run
+ the decorated function.
+
+ >>> @timer()
+ ... def test1():
+ ... time.sleep(0.01)
+ >>> @timer('run test 2', level=logging.INFO)
+ ... def test2():
+ ... time.sleep(0.02)
+ >>> test1()
+ Took ... to run 'test1'
+ >>> test2()
+ Took ... to run test 2
+ """
+
+ # timeit.default_timer choses the most accurate clock for each platform
+ _time = timeit.default_timer
+ default_msg = "elapsed time: %(time).3fs"
+ default_format = "Took %(time).3fs to %(msg)s"
+
+ def __init__(self, logger=None, msg=None, level=None, start=None):
+ self.reset(start)
+ if logger is None:
+ for arg in ("msg", "level"):
+ if locals().get(arg) is not None:
+ raise ValueError("'%s' can't be specified without a 'logger'" % arg)
+ self.logger = logger
+ self.level = level if level is not None else TIME_LEVEL
+ self.msg = msg
+
+ def reset(self, start=None):
+ """Reset timer to 'start_time' or the current time."""
+ if start is None:
+ self.start = self._time()
+ else:
+ self.start = start
+ self.last = self.start
+ self.elapsed = 0.0
+
+ def time(self):
+ """Return the overall time (in seconds) since the timer started."""
+ return self._time() - self.start
+
+ def split(self):
+ """Split and return the lap time (in seconds) in between splits."""
+ current = self._time()
+ self.elapsed = current - self.last
+ self.last = current
+ return self.elapsed
+
+ def formatTime(self, msg, time):
+ """Format 'time' value in 'msg' and return formatted string.
+ If 'msg' contains a '%(time)' format string, try to use that.
+ Otherwise, use the predefined 'default_format'.
+ If 'msg' is empty or None, fall back to 'default_msg'.
+ """
+ if not msg:
+ msg = self.default_msg
+ if msg.find("%(time)") < 0:
+ msg = self.default_format % {"msg": msg, "time": time}
+ else:
+ try:
+ msg = msg % {"time": time}
+ except (KeyError, ValueError):
+ pass # skip if the format string is malformed
+ return msg
+
+ def __enter__(self):
+ """Start a new lap"""
+ self.last = self._time()
+ self.elapsed = 0.0
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """End the current lap. If timer has a logger, log the time elapsed,
+ using the format string in self.msg (or the default one).
+ """
+ time = self.split()
+ if self.logger is None or exc_type:
+ # if there's no logger attached, or if any exception occurred in
+ # the with-statement, exit without logging the time
+ return
+ message = self.formatTime(self.msg, time)
+ # Allow log handlers to see the individual parts to facilitate things
+ # like a server accumulating aggregate stats.
+ msg_parts = {"msg": self.msg, "time": time}
+ self.logger.log(self.level, message, msg_parts)
+
+ def __call__(self, func_or_msg=None, **kwargs):
+ """If the first argument is a function, return a decorator which runs
+ the wrapped function inside Timer's context manager.
+ Otherwise, treat the first argument as a 'msg' string and return an updated
+ Timer instance, referencing the same logger.
+ A 'level' keyword can also be passed to override self.level.
+ """
+ if isinstance(func_or_msg, Callable):
+ func = func_or_msg
+ # use the function name when no explicit 'msg' is provided
+ if not self.msg:
+ self.msg = "run '%s'" % func.__name__
+
+ @wraps(func)
+ def wrapper(*args, **kwds):
+ with self:
+ return func(*args, **kwds)
+
+ return wrapper
+ else:
+ msg = func_or_msg or kwargs.get("msg")
+ level = kwargs.get("level", self.level)
+ return self.__class__(self.logger, msg, level)
+
+ def __float__(self):
+ return self.elapsed
+
+ def __int__(self):
+ return int(self.elapsed)
+
+ def __str__(self):
+ return "%.3f" % self.elapsed
class ChannelsFilter(logging.Filter):
- """Provides a hierarchical filter for log entries based on channel names.
-
- Filters out records emitted from a list of enabled channel names,
- including their children. It works the same as the ``logging.Filter``
- class, but allows the user to specify multiple channel names.
-
- >>> import sys
- >>> handler = logging.StreamHandler(sys.stdout)
- >>> handler.setFormatter(logging.Formatter("%(message)s"))
- >>> filter = ChannelsFilter("A.B", "C.D")
- >>> handler.addFilter(filter)
- >>> root = logging.getLogger()
- >>> root.addHandler(handler)
- >>> root.setLevel(level=logging.DEBUG)
- >>> logging.getLogger('A.B').debug('this record passes through')
- this record passes through
- >>> logging.getLogger('A.B.C').debug('records from children also pass')
- records from children also pass
- >>> logging.getLogger('C.D').debug('this one as well')
- this one as well
- >>> logging.getLogger('A.B.').debug('also this one')
- also this one
- >>> logging.getLogger('A.F').debug('but this one does not!')
- >>> logging.getLogger('C.DE').debug('neither this one!')
- """
-
- def __init__(self, *names):
- self.names = names
- self.num = len(names)
- self.lengths = {n: len(n) for n in names}
-
- def filter(self, record):
- if self.num == 0:
- return True
- for name in self.names:
- nlen = self.lengths[name]
- if name == record.name:
- return True
- elif (record.name.find(name, 0, nlen) == 0
- and record.name[nlen] == "."):
- return True
- return False
+ """Provides a hierarchical filter for log entries based on channel names.
+
+ Filters out records emitted from a list of enabled channel names,
+ including their children. It works the same as the ``logging.Filter``
+ class, but allows the user to specify multiple channel names.
+
+ >>> import sys
+ >>> handler = logging.StreamHandler(sys.stdout)
+ >>> handler.setFormatter(logging.Formatter("%(message)s"))
+ >>> filter = ChannelsFilter("A.B", "C.D")
+ >>> handler.addFilter(filter)
+ >>> root = logging.getLogger()
+ >>> root.addHandler(handler)
+ >>> root.setLevel(level=logging.DEBUG)
+ >>> logging.getLogger('A.B').debug('this record passes through')
+ this record passes through
+ >>> logging.getLogger('A.B.C').debug('records from children also pass')
+ records from children also pass
+ >>> logging.getLogger('C.D').debug('this one as well')
+ this one as well
+ >>> logging.getLogger('A.B.').debug('also this one')
+ also this one
+ >>> logging.getLogger('A.F').debug('but this one does not!')
+ >>> logging.getLogger('C.DE').debug('neither this one!')
+ """
+
+ def __init__(self, *names):
+ self.names = names
+ self.num = len(names)
+ self.lengths = {n: len(n) for n in names}
+
+ def filter(self, record):
+ if self.num == 0:
+ return True
+ for name in self.names:
+ nlen = self.lengths[name]
+ if name == record.name:
+ return True
+ elif record.name.find(name, 0, nlen) == 0 and record.name[nlen] == ".":
+ return True
+ return False
class CapturingLogHandler(logging.Handler):
- def __init__(self, logger, level):
- super(CapturingLogHandler, self).__init__(level=level)
- self.records = []
- if isinstance(logger, str):
- self.logger = logging.getLogger(logger)
- else:
- self.logger = logger
-
- def __enter__(self):
- self.original_disabled = self.logger.disabled
- self.original_level = self.logger.level
- self.original_propagate = self.logger.propagate
-
- self.logger.addHandler(self)
- self.logger.setLevel(self.level)
- self.logger.disabled = False
- self.logger.propagate = False
-
- return self
-
- def __exit__(self, type, value, traceback):
- self.logger.removeHandler(self)
- self.logger.setLevel(self.original_level)
- self.logger.disabled = self.original_disabled
- self.logger.propagate = self.original_propagate
-
- return self
-
- def emit(self, record):
- self.records.append(record)
-
- def assertRegex(self, regexp, msg=None):
- import re
- pattern = re.compile(regexp)
- for r in self.records:
- if pattern.search(r.getMessage()):
- return True
- if msg is None:
- msg = "Pattern '%s' not found in logger records" % regexp
- assert 0, msg
+ def __init__(self, logger, level):
+ super(CapturingLogHandler, self).__init__(level=level)
+ self.records = []
+ if isinstance(logger, str):
+ self.logger = logging.getLogger(logger)
+ else:
+ self.logger = logger
+
+ def __enter__(self):
+ self.original_disabled = self.logger.disabled
+ self.original_level = self.logger.level
+ self.original_propagate = self.logger.propagate
+
+ self.logger.addHandler(self)
+ self.logger.setLevel(self.level)
+ self.logger.disabled = False
+ self.logger.propagate = False
+
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.logger.removeHandler(self)
+ self.logger.setLevel(self.original_level)
+ self.logger.disabled = self.original_disabled
+ self.logger.propagate = self.original_propagate
+
+ return self
+
+ def emit(self, record):
+ self.records.append(record)
+
+ def assertRegex(self, regexp, msg=None):
+ import re
+
+ pattern = re.compile(regexp)
+ for r in self.records:
+ if pattern.search(r.getMessage()):
+ return True
+ if msg is None:
+ msg = "Pattern '%s' not found in logger records" % regexp
+ assert 0, msg
class LogMixin(object):
- """ Mixin class that adds logging functionality to another class.
-
- You can define a new class that subclasses from ``LogMixin`` as well as
- other base classes through multiple inheritance.
- All instances of that class will have a ``log`` property that returns
- a ``logging.Logger`` named after their respective ``<module>.<class>``.
-
- For example:
-
- >>> class BaseClass(object):
- ... pass
- >>> class MyClass(LogMixin, BaseClass):
- ... pass
- >>> a = MyClass()
- >>> isinstance(a.log, logging.Logger)
- True
- >>> print(a.log.name)
- fontTools.misc.loggingTools.MyClass
- >>> class AnotherClass(MyClass):
- ... pass
- >>> b = AnotherClass()
- >>> isinstance(b.log, logging.Logger)
- True
- >>> print(b.log.name)
- fontTools.misc.loggingTools.AnotherClass
- """
-
- @property
- def log(self):
- if not hasattr(self, "_log"):
- name = ".".join(
- (self.__class__.__module__, self.__class__.__name__)
- )
- self._log = logging.getLogger(name)
- return self._log
+ """Mixin class that adds logging functionality to another class.
+
+ You can define a new class that subclasses from ``LogMixin`` as well as
+ other base classes through multiple inheritance.
+ All instances of that class will have a ``log`` property that returns
+ a ``logging.Logger`` named after their respective ``<module>.<class>``.
+
+ For example:
+
+ >>> class BaseClass(object):
+ ... pass
+ >>> class MyClass(LogMixin, BaseClass):
+ ... pass
+ >>> a = MyClass()
+ >>> isinstance(a.log, logging.Logger)
+ True
+ >>> print(a.log.name)
+ fontTools.misc.loggingTools.MyClass
+ >>> class AnotherClass(MyClass):
+ ... pass
+ >>> b = AnotherClass()
+ >>> isinstance(b.log, logging.Logger)
+ True
+ >>> print(b.log.name)
+ fontTools.misc.loggingTools.AnotherClass
+ """
+
+ @property
+ def log(self):
+ if not hasattr(self, "_log"):
+ name = ".".join((self.__class__.__module__, self.__class__.__name__))
+ self._log = logging.getLogger(name)
+ return self._log
def deprecateArgument(name, msg, category=UserWarning):
- """ Raise a warning about deprecated function argument 'name'. """
- warnings.warn(
- "%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
+ """Raise a warning about deprecated function argument 'name'."""
+ warnings.warn("%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
def deprecateFunction(msg, category=UserWarning):
- """ Decorator to raise a warning when a deprecated function is called. """
- def decorator(func):
- @wraps(func)
- def wrapper(*args, **kwargs):
- warnings.warn(
- "%r is deprecated; %s" % (func.__name__, msg),
- category=category, stacklevel=2)
- return func(*args, **kwargs)
- return wrapper
- return decorator
+ """Decorator to raise a warning when a deprecated function is called."""
+
+ def decorator(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ warnings.warn(
+ "%r is deprecated; %s" % (func.__name__, msg),
+ category=category,
+ stacklevel=2,
+ )
+ return func(*args, **kwargs)
+
+ return wrapper
+
+ return decorator
if __name__ == "__main__":
- import doctest
- sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
+ import doctest
+
+ sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
diff --git a/Lib/fontTools/misc/macCreatorType.py b/Lib/fontTools/misc/macCreatorType.py
index 6b191054..36b15aca 100644
--- a/Lib/fontTools/misc/macCreatorType.py
+++ b/Lib/fontTools/misc/macCreatorType.py
@@ -1,54 +1,56 @@
from fontTools.misc.textTools import Tag, bytesjoin, strjoin
+
try:
- import xattr
+ import xattr
except ImportError:
- xattr = None
+ xattr = None
def _reverseString(s):
- s = list(s)
- s.reverse()
- return strjoin(s)
+ s = list(s)
+ s.reverse()
+ return strjoin(s)
def getMacCreatorAndType(path):
- """Returns file creator and file type codes for a path.
-
- Args:
- path (str): A file path.
-
- Returns:
- A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first
- representing the file creator and the second representing the
- file type.
- """
- if xattr is not None:
- try:
- finderInfo = xattr.getxattr(path, 'com.apple.FinderInfo')
- except (KeyError, IOError):
- pass
- else:
- fileType = Tag(finderInfo[:4])
- fileCreator = Tag(finderInfo[4:8])
- return fileCreator, fileType
- return None, None
+ """Returns file creator and file type codes for a path.
+
+ Args:
+ path (str): A file path.
+
+ Returns:
+ A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first
+ representing the file creator and the second representing the
+ file type.
+ """
+ if xattr is not None:
+ try:
+ finderInfo = xattr.getxattr(path, "com.apple.FinderInfo")
+ except (KeyError, IOError):
+ pass
+ else:
+ fileType = Tag(finderInfo[:4])
+ fileCreator = Tag(finderInfo[4:8])
+ return fileCreator, fileType
+ return None, None
def setMacCreatorAndType(path, fileCreator, fileType):
- """Set file creator and file type codes for a path.
-
- Note that if the ``xattr`` module is not installed, no action is
- taken but no error is raised.
-
- Args:
- path (str): A file path.
- fileCreator: A four-character file creator tag.
- fileType: A four-character file type tag.
-
- """
- if xattr is not None:
- from fontTools.misc.textTools import pad
- if not all(len(s) == 4 for s in (fileCreator, fileType)):
- raise TypeError('arg must be string of 4 chars')
- finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
- xattr.setxattr(path, 'com.apple.FinderInfo', finderInfo)
+ """Set file creator and file type codes for a path.
+
+ Note that if the ``xattr`` module is not installed, no action is
+ taken but no error is raised.
+
+ Args:
+ path (str): A file path.
+ fileCreator: A four-character file creator tag.
+ fileType: A four-character file type tag.
+
+ """
+ if xattr is not None:
+ from fontTools.misc.textTools import pad
+
+ if not all(len(s) == 4 for s in (fileCreator, fileType)):
+ raise TypeError("arg must be string of 4 chars")
+ finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
+ xattr.setxattr(path, "com.apple.FinderInfo", finderInfo)
diff --git a/Lib/fontTools/misc/macRes.py b/Lib/fontTools/misc/macRes.py
index 895ca1b8..f5a6cfe4 100644
--- a/Lib/fontTools/misc/macRes.py
+++ b/Lib/fontTools/misc/macRes.py
@@ -7,216 +7,218 @@ from collections.abc import MutableMapping
class ResourceError(Exception):
- pass
+ pass
class ResourceReader(MutableMapping):
- """Reader for Mac OS resource forks.
-
- Parses a resource fork and returns resources according to their type.
- If run on OS X, this will open the resource fork in the filesystem.
- Otherwise, it will open the file itself and attempt to read it as
- though it were a resource fork.
-
- The returned object can be indexed by type and iterated over,
- returning in each case a list of py:class:`Resource` objects
- representing all the resources of a certain type.
-
- """
- def __init__(self, fileOrPath):
- """Open a file
-
- Args:
- fileOrPath: Either an object supporting a ``read`` method, an
- ``os.PathLike`` object, or a string.
- """
- self._resources = OrderedDict()
- if hasattr(fileOrPath, 'read'):
- self.file = fileOrPath
- else:
- try:
- # try reading from the resource fork (only works on OS X)
- self.file = self.openResourceFork(fileOrPath)
- self._readFile()
- return
- except (ResourceError, IOError):
- # if it fails, use the data fork
- self.file = self.openDataFork(fileOrPath)
- self._readFile()
-
- @staticmethod
- def openResourceFork(path):
- if hasattr(path, "__fspath__"): # support os.PathLike objects
- path = path.__fspath__()
- with open(path + '/..namedfork/rsrc', 'rb') as resfork:
- data = resfork.read()
- infile = BytesIO(data)
- infile.name = path
- return infile
-
- @staticmethod
- def openDataFork(path):
- with open(path, 'rb') as datafork:
- data = datafork.read()
- infile = BytesIO(data)
- infile.name = path
- return infile
-
- def _readFile(self):
- self._readHeaderAndMap()
- self._readTypeList()
-
- def _read(self, numBytes, offset=None):
- if offset is not None:
- try:
- self.file.seek(offset)
- except OverflowError:
- raise ResourceError("Failed to seek offset ('offset' is too large)")
- if self.file.tell() != offset:
- raise ResourceError('Failed to seek offset (reached EOF)')
- try:
- data = self.file.read(numBytes)
- except OverflowError:
- raise ResourceError("Cannot read resource ('numBytes' is too large)")
- if len(data) != numBytes:
- raise ResourceError('Cannot read resource (not enough data)')
- return data
-
- def _readHeaderAndMap(self):
- self.file.seek(0)
- headerData = self._read(ResourceForkHeaderSize)
- sstruct.unpack(ResourceForkHeader, headerData, self)
- # seek to resource map, skip reserved
- mapOffset = self.mapOffset + 22
- resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
- sstruct.unpack(ResourceMapHeader, resourceMapData, self)
- self.absTypeListOffset = self.mapOffset + self.typeListOffset
- self.absNameListOffset = self.mapOffset + self.nameListOffset
-
- def _readTypeList(self):
- absTypeListOffset = self.absTypeListOffset
- numTypesData = self._read(2, absTypeListOffset)
- self.numTypes, = struct.unpack('>H', numTypesData)
- absTypeListOffset2 = absTypeListOffset + 2
- for i in range(self.numTypes + 1):
- resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
- resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
- item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
- resType = tostr(item['type'], encoding='mac-roman')
- refListOffset = absTypeListOffset + item['refListOffset']
- numRes = item['numRes'] + 1
- resources = self._readReferenceList(resType, refListOffset, numRes)
- self._resources[resType] = resources
-
- def _readReferenceList(self, resType, refListOffset, numRes):
- resources = []
- for i in range(numRes):
- refOffset = refListOffset + ResourceRefItemSize * i
- refData = self._read(ResourceRefItemSize, refOffset)
- res = Resource(resType)
- res.decompile(refData, self)
- resources.append(res)
- return resources
-
- def __getitem__(self, resType):
- return self._resources[resType]
-
- def __delitem__(self, resType):
- del self._resources[resType]
-
- def __setitem__(self, resType, resources):
- self._resources[resType] = resources
-
- def __len__(self):
- return len(self._resources)
-
- def __iter__(self):
- return iter(self._resources)
-
- def keys(self):
- return self._resources.keys()
-
- @property
- def types(self):
- """A list of the types of resources in the resource fork."""
- return list(self._resources.keys())
-
- def countResources(self, resType):
- """Return the number of resources of a given type."""
- try:
- return len(self[resType])
- except KeyError:
- return 0
-
- def getIndices(self, resType):
- """Returns a list of indices of resources of a given type."""
- numRes = self.countResources(resType)
- if numRes:
- return list(range(1, numRes+1))
- else:
- return []
-
- def getNames(self, resType):
- """Return list of names of all resources of a given type."""
- return [res.name for res in self.get(resType, []) if res.name is not None]
-
- def getIndResource(self, resType, index):
- """Return resource of given type located at an index ranging from 1
- to the number of resources for that type, or None if not found.
- """
- if index < 1:
- return None
- try:
- res = self[resType][index-1]
- except (KeyError, IndexError):
- return None
- return res
-
- def getNamedResource(self, resType, name):
- """Return the named resource of given type, else return None."""
- name = tostr(name, encoding='mac-roman')
- for res in self.get(resType, []):
- if res.name == name:
- return res
- return None
-
- def close(self):
- if not self.file.closed:
- self.file.close()
+ """Reader for Mac OS resource forks.
+
+ Parses a resource fork and returns resources according to their type.
+ If run on OS X, this will open the resource fork in the filesystem.
+ Otherwise, it will open the file itself and attempt to read it as
+ though it were a resource fork.
+
+ The returned object can be indexed by type and iterated over,
+ returning in each case a list of py:class:`Resource` objects
+ representing all the resources of a certain type.
+
+ """
+
+ def __init__(self, fileOrPath):
+ """Open a file
+
+ Args:
+ fileOrPath: Either an object supporting a ``read`` method, an
+ ``os.PathLike`` object, or a string.
+ """
+ self._resources = OrderedDict()
+ if hasattr(fileOrPath, "read"):
+ self.file = fileOrPath
+ else:
+ try:
+ # try reading from the resource fork (only works on OS X)
+ self.file = self.openResourceFork(fileOrPath)
+ self._readFile()
+ return
+ except (ResourceError, IOError):
+ # if it fails, use the data fork
+ self.file = self.openDataFork(fileOrPath)
+ self._readFile()
+
+ @staticmethod
+ def openResourceFork(path):
+ if hasattr(path, "__fspath__"): # support os.PathLike objects
+ path = path.__fspath__()
+ with open(path + "/..namedfork/rsrc", "rb") as resfork:
+ data = resfork.read()
+ infile = BytesIO(data)
+ infile.name = path
+ return infile
+
+ @staticmethod
+ def openDataFork(path):
+ with open(path, "rb") as datafork:
+ data = datafork.read()
+ infile = BytesIO(data)
+ infile.name = path
+ return infile
+
+ def _readFile(self):
+ self._readHeaderAndMap()
+ self._readTypeList()
+
+ def _read(self, numBytes, offset=None):
+ if offset is not None:
+ try:
+ self.file.seek(offset)
+ except OverflowError:
+ raise ResourceError("Failed to seek offset ('offset' is too large)")
+ if self.file.tell() != offset:
+ raise ResourceError("Failed to seek offset (reached EOF)")
+ try:
+ data = self.file.read(numBytes)
+ except OverflowError:
+ raise ResourceError("Cannot read resource ('numBytes' is too large)")
+ if len(data) != numBytes:
+ raise ResourceError("Cannot read resource (not enough data)")
+ return data
+
+ def _readHeaderAndMap(self):
+ self.file.seek(0)
+ headerData = self._read(ResourceForkHeaderSize)
+ sstruct.unpack(ResourceForkHeader, headerData, self)
+ # seek to resource map, skip reserved
+ mapOffset = self.mapOffset + 22
+ resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
+ sstruct.unpack(ResourceMapHeader, resourceMapData, self)
+ self.absTypeListOffset = self.mapOffset + self.typeListOffset
+ self.absNameListOffset = self.mapOffset + self.nameListOffset
+
+ def _readTypeList(self):
+ absTypeListOffset = self.absTypeListOffset
+ numTypesData = self._read(2, absTypeListOffset)
+ (self.numTypes,) = struct.unpack(">H", numTypesData)
+ absTypeListOffset2 = absTypeListOffset + 2
+ for i in range(self.numTypes + 1):
+ resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
+ resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
+ item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
+ resType = tostr(item["type"], encoding="mac-roman")
+ refListOffset = absTypeListOffset + item["refListOffset"]
+ numRes = item["numRes"] + 1
+ resources = self._readReferenceList(resType, refListOffset, numRes)
+ self._resources[resType] = resources
+
+ def _readReferenceList(self, resType, refListOffset, numRes):
+ resources = []
+ for i in range(numRes):
+ refOffset = refListOffset + ResourceRefItemSize * i
+ refData = self._read(ResourceRefItemSize, refOffset)
+ res = Resource(resType)
+ res.decompile(refData, self)
+ resources.append(res)
+ return resources
+
+ def __getitem__(self, resType):
+ return self._resources[resType]
+
+ def __delitem__(self, resType):
+ del self._resources[resType]
+
+ def __setitem__(self, resType, resources):
+ self._resources[resType] = resources
+
+ def __len__(self):
+ return len(self._resources)
+
+ def __iter__(self):
+ return iter(self._resources)
+
+ def keys(self):
+ return self._resources.keys()
+
+ @property
+ def types(self):
+ """A list of the types of resources in the resource fork."""
+ return list(self._resources.keys())
+
+ def countResources(self, resType):
+ """Return the number of resources of a given type."""
+ try:
+ return len(self[resType])
+ except KeyError:
+ return 0
+
+ def getIndices(self, resType):
+ """Returns a list of indices of resources of a given type."""
+ numRes = self.countResources(resType)
+ if numRes:
+ return list(range(1, numRes + 1))
+ else:
+ return []
+
+ def getNames(self, resType):
+ """Return list of names of all resources of a given type."""
+ return [res.name for res in self.get(resType, []) if res.name is not None]
+
+ def getIndResource(self, resType, index):
+ """Return resource of given type located at an index ranging from 1
+ to the number of resources for that type, or None if not found.
+ """
+ if index < 1:
+ return None
+ try:
+ res = self[resType][index - 1]
+ except (KeyError, IndexError):
+ return None
+ return res
+
+ def getNamedResource(self, resType, name):
+ """Return the named resource of given type, else return None."""
+ name = tostr(name, encoding="mac-roman")
+ for res in self.get(resType, []):
+ if res.name == name:
+ return res
+ return None
+
+ def close(self):
+ if not self.file.closed:
+ self.file.close()
class Resource(object):
- """Represents a resource stored within a resource fork.
-
- Attributes:
- type: resource type.
- data: resource data.
- id: ID.
- name: resource name.
- attr: attributes.
- """
-
- def __init__(self, resType=None, resData=None, resID=None, resName=None,
- resAttr=None):
- self.type = resType
- self.data = resData
- self.id = resID
- self.name = resName
- self.attr = resAttr
-
- def decompile(self, refData, reader):
- sstruct.unpack(ResourceRefItem, refData, self)
- # interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
- self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
- absDataOffset = reader.dataOffset + self.dataOffset
- dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
- self.data = reader._read(dataLength)
- if self.nameOffset == -1:
- return
- absNameOffset = reader.absNameListOffset + self.nameOffset
- nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
- name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
- self.name = tostr(name, encoding='mac-roman')
+ """Represents a resource stored within a resource fork.
+
+ Attributes:
+ type: resource type.
+ data: resource data.
+ id: ID.
+ name: resource name.
+ attr: attributes.
+ """
+
+ def __init__(
+ self, resType=None, resData=None, resID=None, resName=None, resAttr=None
+ ):
+ self.type = resType
+ self.data = resData
+ self.id = resID
+ self.name = resName
+ self.attr = resAttr
+
+ def decompile(self, refData, reader):
+ sstruct.unpack(ResourceRefItem, refData, self)
+ # interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
+ (self.dataOffset,) = struct.unpack(">L", bytesjoin([b"\0", self.dataOffset]))
+ absDataOffset = reader.dataOffset + self.dataOffset
+ (dataLength,) = struct.unpack(">L", reader._read(4, absDataOffset))
+ self.data = reader._read(dataLength)
+ if self.nameOffset == -1:
+ return
+ absNameOffset = reader.absNameListOffset + self.nameOffset
+ (nameLength,) = struct.unpack("B", reader._read(1, absNameOffset))
+ (name,) = struct.unpack(">%ss" % nameLength, reader._read(nameLength))
+ self.name = tostr(name, encoding="mac-roman")
ResourceForkHeader = """
diff --git a/Lib/fontTools/misc/plistlib/__init__.py b/Lib/fontTools/misc/plistlib/__init__.py
index eb4b5259..066eef38 100644
--- a/Lib/fontTools/misc/plistlib/__init__.py
+++ b/Lib/fontTools/misc/plistlib/__init__.py
@@ -176,7 +176,7 @@ class PlistTarget:
True
Links:
- https://github.com/python/cpython/blob/master/Lib/plistlib.py
+ https://github.com/python/cpython/blob/main/Lib/plistlib.py
http://lxml.de/parsing.html#the-target-parser-interface
"""
@@ -353,7 +353,9 @@ def _real_element(value: float, ctx: SimpleNamespace) -> etree.Element:
return el
-def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etree.Element:
+def _dict_element(
+ d: Mapping[str, PlistEncodable], ctx: SimpleNamespace
+) -> etree.Element:
el = etree.Element("dict")
items = d.items()
if ctx.sort_keys:
@@ -371,7 +373,9 @@ def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etre
return el
-def _array_element(array: Sequence[PlistEncodable], ctx: SimpleNamespace) -> etree.Element:
+def _array_element(
+ array: Sequence[PlistEncodable], ctx: SimpleNamespace
+) -> etree.Element:
el = etree.Element("array")
if len(array) == 0:
return el
diff --git a/Lib/fontTools/misc/psCharStrings.py b/Lib/fontTools/misc/psCharStrings.py
index 549dae25..cc9ca01c 100644
--- a/Lib/fontTools/misc/psCharStrings.py
+++ b/Lib/fontTools/misc/psCharStrings.py
@@ -3,7 +3,10 @@ CFF dictionary data and Type1/Type2 CharStrings.
"""
from fontTools.misc.fixedTools import (
- fixedToFloat, floatToFixed, floatToFixedToStr, strToFixedToFloat,
+ fixedToFloat,
+ floatToFixed,
+ floatToFixedToStr,
+ strToFixedToFloat,
)
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin
from fontTools.pens.boundsPen import BoundsPen
@@ -15,59 +18,67 @@ log = logging.getLogger(__name__)
def read_operator(self, b0, data, index):
- if b0 == 12:
- op = (b0, byteord(data[index]))
- index = index+1
- else:
- op = b0
- try:
- operator = self.operators[op]
- except KeyError:
- return None, index
- value = self.handle_operator(operator)
- return value, index
+ if b0 == 12:
+ op = (b0, byteord(data[index]))
+ index = index + 1
+ else:
+ op = b0
+ try:
+ operator = self.operators[op]
+ except KeyError:
+ return None, index
+ value = self.handle_operator(operator)
+ return value, index
+
def read_byte(self, b0, data, index):
- return b0 - 139, index
+ return b0 - 139, index
+
def read_smallInt1(self, b0, data, index):
- b1 = byteord(data[index])
- return (b0-247)*256 + b1 + 108, index+1
+ b1 = byteord(data[index])
+ return (b0 - 247) * 256 + b1 + 108, index + 1
+
def read_smallInt2(self, b0, data, index):
- b1 = byteord(data[index])
- return -(b0-251)*256 - b1 - 108, index+1
+ b1 = byteord(data[index])
+ return -(b0 - 251) * 256 - b1 - 108, index + 1
+
def read_shortInt(self, b0, data, index):
- value, = struct.unpack(">h", data[index:index+2])
- return value, index+2
+ (value,) = struct.unpack(">h", data[index : index + 2])
+ return value, index + 2
+
def read_longInt(self, b0, data, index):
- value, = struct.unpack(">l", data[index:index+4])
- return value, index+4
+ (value,) = struct.unpack(">l", data[index : index + 4])
+ return value, index + 4
+
def read_fixed1616(self, b0, data, index):
- value, = struct.unpack(">l", data[index:index+4])
- return fixedToFloat(value, precisionBits=16), index+4
+ (value,) = struct.unpack(">l", data[index : index + 4])
+ return fixedToFloat(value, precisionBits=16), index + 4
+
def read_reserved(self, b0, data, index):
- assert NotImplementedError
- return NotImplemented, index
+ assert NotImplementedError
+ return NotImplemented, index
+
def read_realNumber(self, b0, data, index):
- number = ''
- while True:
- b = byteord(data[index])
- index = index + 1
- nibble0 = (b & 0xf0) >> 4
- nibble1 = b & 0x0f
- if nibble0 == 0xf:
- break
- number = number + realNibbles[nibble0]
- if nibble1 == 0xf:
- break
- number = number + realNibbles[nibble1]
- return float(number), index
+ number = ""
+ while True:
+ b = byteord(data[index])
+ index = index + 1
+ nibble0 = (b & 0xF0) >> 4
+ nibble1 = b & 0x0F
+ if nibble0 == 0xF:
+ break
+ number = number + realNibbles[nibble0]
+ if nibble1 == 0xF:
+ break
+ number = number + realNibbles[nibble1]
+ return float(number), index
t1OperandEncoding = [None] * 256
@@ -88,1229 +99,1378 @@ cffDictOperandEncoding[30] = read_realNumber
cffDictOperandEncoding[255] = read_reserved
-realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
- '.', 'E', 'E-', None, '-']
-realNibblesDict = {v:i for i,v in enumerate(realNibbles)}
+realNibbles = [
+ "0",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5",
+ "6",
+ "7",
+ "8",
+ "9",
+ ".",
+ "E",
+ "E-",
+ None,
+ "-",
+]
+realNibblesDict = {v: i for i, v in enumerate(realNibbles)}
maxOpStack = 193
def buildOperatorDict(operatorList):
- oper = {}
- opc = {}
- for item in operatorList:
- if len(item) == 2:
- oper[item[0]] = item[1]
- else:
- oper[item[0]] = item[1:]
- if isinstance(item[0], tuple):
- opc[item[1]] = item[0]
- else:
- opc[item[1]] = (item[0],)
- return oper, opc
+ oper = {}
+ opc = {}
+ for item in operatorList:
+ if len(item) == 2:
+ oper[item[0]] = item[1]
+ else:
+ oper[item[0]] = item[1:]
+ if isinstance(item[0], tuple):
+ opc[item[1]] = item[0]
+ else:
+ opc[item[1]] = (item[0],)
+ return oper, opc
t2Operators = [
-# opcode name
- (1, 'hstem'),
- (3, 'vstem'),
- (4, 'vmoveto'),
- (5, 'rlineto'),
- (6, 'hlineto'),
- (7, 'vlineto'),
- (8, 'rrcurveto'),
- (10, 'callsubr'),
- (11, 'return'),
- (14, 'endchar'),
- (15, 'vsindex'),
- (16, 'blend'),
- (18, 'hstemhm'),
- (19, 'hintmask'),
- (20, 'cntrmask'),
- (21, 'rmoveto'),
- (22, 'hmoveto'),
- (23, 'vstemhm'),
- (24, 'rcurveline'),
- (25, 'rlinecurve'),
- (26, 'vvcurveto'),
- (27, 'hhcurveto'),
-# (28, 'shortint'), # not really an operator
- (29, 'callgsubr'),
- (30, 'vhcurveto'),
- (31, 'hvcurveto'),
- ((12, 0), 'ignore'), # dotsection. Yes, there a few very early OTF/CFF
- # fonts with this deprecated operator. Just ignore it.
- ((12, 3), 'and'),
- ((12, 4), 'or'),
- ((12, 5), 'not'),
- ((12, 8), 'store'),
- ((12, 9), 'abs'),
- ((12, 10), 'add'),
- ((12, 11), 'sub'),
- ((12, 12), 'div'),
- ((12, 13), 'load'),
- ((12, 14), 'neg'),
- ((12, 15), 'eq'),
- ((12, 18), 'drop'),
- ((12, 20), 'put'),
- ((12, 21), 'get'),
- ((12, 22), 'ifelse'),
- ((12, 23), 'random'),
- ((12, 24), 'mul'),
- ((12, 26), 'sqrt'),
- ((12, 27), 'dup'),
- ((12, 28), 'exch'),
- ((12, 29), 'index'),
- ((12, 30), 'roll'),
- ((12, 34), 'hflex'),
- ((12, 35), 'flex'),
- ((12, 36), 'hflex1'),
- ((12, 37), 'flex1'),
+ # opcode name
+ (1, "hstem"),
+ (3, "vstem"),
+ (4, "vmoveto"),
+ (5, "rlineto"),
+ (6, "hlineto"),
+ (7, "vlineto"),
+ (8, "rrcurveto"),
+ (10, "callsubr"),
+ (11, "return"),
+ (14, "endchar"),
+ (15, "vsindex"),
+ (16, "blend"),
+ (18, "hstemhm"),
+ (19, "hintmask"),
+ (20, "cntrmask"),
+ (21, "rmoveto"),
+ (22, "hmoveto"),
+ (23, "vstemhm"),
+ (24, "rcurveline"),
+ (25, "rlinecurve"),
+ (26, "vvcurveto"),
+ (27, "hhcurveto"),
+ # (28, 'shortint'), # not really an operator
+ (29, "callgsubr"),
+ (30, "vhcurveto"),
+ (31, "hvcurveto"),
+ ((12, 0), "ignore"), # dotsection. Yes, there a few very early OTF/CFF
+ # fonts with this deprecated operator. Just ignore it.
+ ((12, 3), "and"),
+ ((12, 4), "or"),
+ ((12, 5), "not"),
+ ((12, 8), "store"),
+ ((12, 9), "abs"),
+ ((12, 10), "add"),
+ ((12, 11), "sub"),
+ ((12, 12), "div"),
+ ((12, 13), "load"),
+ ((12, 14), "neg"),
+ ((12, 15), "eq"),
+ ((12, 18), "drop"),
+ ((12, 20), "put"),
+ ((12, 21), "get"),
+ ((12, 22), "ifelse"),
+ ((12, 23), "random"),
+ ((12, 24), "mul"),
+ ((12, 26), "sqrt"),
+ ((12, 27), "dup"),
+ ((12, 28), "exch"),
+ ((12, 29), "index"),
+ ((12, 30), "roll"),
+ ((12, 34), "hflex"),
+ ((12, 35), "flex"),
+ ((12, 36), "hflex1"),
+ ((12, 37), "flex1"),
]
+
def getIntEncoder(format):
- if format == "cff":
- fourByteOp = bytechr(29)
- elif format == "t1":
- fourByteOp = bytechr(255)
- else:
- assert format == "t2"
- fourByteOp = None
-
- def encodeInt(value, fourByteOp=fourByteOp, bytechr=bytechr,
- pack=struct.pack, unpack=struct.unpack):
- if -107 <= value <= 107:
- code = bytechr(value + 139)
- elif 108 <= value <= 1131:
- value = value - 108
- code = bytechr((value >> 8) + 247) + bytechr(value & 0xFF)
- elif -1131 <= value <= -108:
- value = -value - 108
- code = bytechr((value >> 8) + 251) + bytechr(value & 0xFF)
- elif fourByteOp is None:
- # T2 only supports 2 byte ints
- if -32768 <= value <= 32767:
- code = bytechr(28) + pack(">h", value)
- else:
- # Backwards compatible hack: due to a previous bug in FontTools,
- # 16.16 fixed numbers were written out as 4-byte ints. When
- # these numbers were small, they were wrongly written back as
- # small ints instead of 4-byte ints, breaking round-tripping.
- # This here workaround doesn't do it any better, since we can't
- # distinguish anymore between small ints that were supposed to
- # be small fixed numbers and small ints that were just small
- # ints. Hence the warning.
- log.warning("4-byte T2 number got passed to the "
- "IntType handler. This should happen only when reading in "
- "old XML files.\n")
- code = bytechr(255) + pack(">l", value)
- else:
- code = fourByteOp + pack(">l", value)
- return code
-
- return encodeInt
+ if format == "cff":
+ twoByteOp = bytechr(28)
+ fourByteOp = bytechr(29)
+ elif format == "t1":
+ twoByteOp = None
+ fourByteOp = bytechr(255)
+ else:
+ assert format == "t2"
+ twoByteOp = bytechr(28)
+ fourByteOp = None
+
+ def encodeInt(
+ value,
+ fourByteOp=fourByteOp,
+ bytechr=bytechr,
+ pack=struct.pack,
+ unpack=struct.unpack,
+ twoByteOp=twoByteOp,
+ ):
+ if -107 <= value <= 107:
+ code = bytechr(value + 139)
+ elif 108 <= value <= 1131:
+ value = value - 108
+ code = bytechr((value >> 8) + 247) + bytechr(value & 0xFF)
+ elif -1131 <= value <= -108:
+ value = -value - 108
+ code = bytechr((value >> 8) + 251) + bytechr(value & 0xFF)
+ elif twoByteOp is not None and -32768 <= value <= 32767:
+ code = twoByteOp + pack(">h", value)
+ elif fourByteOp is None:
+ # Backwards compatible hack: due to a previous bug in FontTools,
+ # 16.16 fixed numbers were written out as 4-byte ints. When
+ # these numbers were small, they were wrongly written back as
+ # small ints instead of 4-byte ints, breaking round-tripping.
+ # This here workaround doesn't do it any better, since we can't
+ # distinguish anymore between small ints that were supposed to
+ # be small fixed numbers and small ints that were just small
+ # ints. Hence the warning.
+ log.warning(
+ "4-byte T2 number got passed to the "
+ "IntType handler. This should happen only when reading in "
+ "old XML files.\n"
+ )
+ code = bytechr(255) + pack(">l", value)
+ else:
+ code = fourByteOp + pack(">l", value)
+ return code
+
+ return encodeInt
encodeIntCFF = getIntEncoder("cff")
encodeIntT1 = getIntEncoder("t1")
encodeIntT2 = getIntEncoder("t2")
+
def encodeFixed(f, pack=struct.pack):
- """For T2 only"""
- value = floatToFixed(f, precisionBits=16)
- if value & 0xFFFF == 0: # check if the fractional part is zero
- return encodeIntT2(value >> 16) # encode only the integer part
- else:
- return b"\xff" + pack(">l", value) # encode the entire fixed point value
+ """For T2 only"""
+ value = floatToFixed(f, precisionBits=16)
+ if value & 0xFFFF == 0: # check if the fractional part is zero
+ return encodeIntT2(value >> 16) # encode only the integer part
+ else:
+ return b"\xff" + pack(">l", value) # encode the entire fixed point value
+
+realZeroBytes = bytechr(30) + bytechr(0xF)
-realZeroBytes = bytechr(30) + bytechr(0xf)
def encodeFloat(f):
- # For CFF only, used in cffLib
- if f == 0.0: # 0.0 == +0.0 == -0.0
- return realZeroBytes
- # Note: 14 decimal digits seems to be the limitation for CFF real numbers
- # in macOS. However, we use 8 here to match the implementation of AFDKO.
- s = "%.8G" % f
- if s[:2] == "0.":
- s = s[1:]
- elif s[:3] == "-0.":
- s = "-" + s[2:]
- nibbles = []
- while s:
- c = s[0]
- s = s[1:]
- if c == "E":
- c2 = s[:1]
- if c2 == "-":
- s = s[1:]
- c = "E-"
- elif c2 == "+":
- s = s[1:]
- nibbles.append(realNibblesDict[c])
- nibbles.append(0xf)
- if len(nibbles) % 2:
- nibbles.append(0xf)
- d = bytechr(30)
- for i in range(0, len(nibbles), 2):
- d = d + bytechr(nibbles[i] << 4 | nibbles[i+1])
- return d
-
-
-class CharStringCompileError(Exception): pass
+ # For CFF only, used in cffLib
+ if f == 0.0: # 0.0 == +0.0 == -0.0
+ return realZeroBytes
+ # Note: 14 decimal digits seems to be the limitation for CFF real numbers
+ # in macOS. However, we use 8 here to match the implementation of AFDKO.
+ s = "%.8G" % f
+ if s[:2] == "0.":
+ s = s[1:]
+ elif s[:3] == "-0.":
+ s = "-" + s[2:]
+ nibbles = []
+ while s:
+ c = s[0]
+ s = s[1:]
+ if c == "E":
+ c2 = s[:1]
+ if c2 == "-":
+ s = s[1:]
+ c = "E-"
+ elif c2 == "+":
+ s = s[1:]
+ nibbles.append(realNibblesDict[c])
+ nibbles.append(0xF)
+ if len(nibbles) % 2:
+ nibbles.append(0xF)
+ d = bytechr(30)
+ for i in range(0, len(nibbles), 2):
+ d = d + bytechr(nibbles[i] << 4 | nibbles[i + 1])
+ return d
+
+
+class CharStringCompileError(Exception):
+ pass
class SimpleT2Decompiler(object):
+ def __init__(self, localSubrs, globalSubrs, private=None, blender=None):
+ self.localSubrs = localSubrs
+ self.localBias = calcSubrBias(localSubrs)
+ self.globalSubrs = globalSubrs
+ self.globalBias = calcSubrBias(globalSubrs)
+ self.private = private
+ self.blender = blender
+ self.reset()
+
+ def reset(self):
+ self.callingStack = []
+ self.operandStack = []
+ self.hintCount = 0
+ self.hintMaskBytes = 0
+ self.numRegions = 0
+ self.vsIndex = 0
+
+ def execute(self, charString):
+ self.callingStack.append(charString)
+ needsDecompilation = charString.needsDecompilation()
+ if needsDecompilation:
+ program = []
+ pushToProgram = program.append
+ else:
+ pushToProgram = lambda x: None
+ pushToStack = self.operandStack.append
+ index = 0
+ while True:
+ token, isOperator, index = charString.getToken(index)
+ if token is None:
+ break # we're done!
+ pushToProgram(token)
+ if isOperator:
+ handlerName = "op_" + token
+ handler = getattr(self, handlerName, None)
+ if handler is not None:
+ rv = handler(index)
+ if rv:
+ hintMaskBytes, index = rv
+ pushToProgram(hintMaskBytes)
+ else:
+ self.popall()
+ else:
+ pushToStack(token)
+ if needsDecompilation:
+ charString.setProgram(program)
+ del self.callingStack[-1]
+
+ def pop(self):
+ value = self.operandStack[-1]
+ del self.operandStack[-1]
+ return value
+
+ def popall(self):
+ stack = self.operandStack[:]
+ self.operandStack[:] = []
+ return stack
+
+ def push(self, value):
+ self.operandStack.append(value)
+
+ def op_return(self, index):
+ if self.operandStack:
+ pass
+
+ def op_endchar(self, index):
+ pass
+
+ def op_ignore(self, index):
+ pass
+
+ def op_callsubr(self, index):
+ subrIndex = self.pop()
+ subr = self.localSubrs[subrIndex + self.localBias]
+ self.execute(subr)
+
+ def op_callgsubr(self, index):
+ subrIndex = self.pop()
+ subr = self.globalSubrs[subrIndex + self.globalBias]
+ self.execute(subr)
+
+ def op_hstem(self, index):
+ self.countHints()
+
+ def op_vstem(self, index):
+ self.countHints()
+
+ def op_hstemhm(self, index):
+ self.countHints()
+
+ def op_vstemhm(self, index):
+ self.countHints()
+
+ def op_hintmask(self, index):
+ if not self.hintMaskBytes:
+ self.countHints()
+ self.hintMaskBytes = (self.hintCount + 7) // 8
+ hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes)
+ return hintMaskBytes, index
+
+ op_cntrmask = op_hintmask
+
+ def countHints(self):
+ args = self.popall()
+ self.hintCount = self.hintCount + len(args) // 2
+
+ # misc
+ def op_and(self, index):
+ raise NotImplementedError
+
+ def op_or(self, index):
+ raise NotImplementedError
+
+ def op_not(self, index):
+ raise NotImplementedError
+
+ def op_store(self, index):
+ raise NotImplementedError
+
+ def op_abs(self, index):
+ raise NotImplementedError
+
+ def op_add(self, index):
+ raise NotImplementedError
+
+ def op_sub(self, index):
+ raise NotImplementedError
+
+ def op_div(self, index):
+ raise NotImplementedError
+
+ def op_load(self, index):
+ raise NotImplementedError
- def __init__(self, localSubrs, globalSubrs, private=None):
- self.localSubrs = localSubrs
- self.localBias = calcSubrBias(localSubrs)
- self.globalSubrs = globalSubrs
- self.globalBias = calcSubrBias(globalSubrs)
- self.private = private
- self.reset()
-
- def reset(self):
- self.callingStack = []
- self.operandStack = []
- self.hintCount = 0
- self.hintMaskBytes = 0
- self.numRegions = 0
-
- def execute(self, charString):
- self.callingStack.append(charString)
- needsDecompilation = charString.needsDecompilation()
- if needsDecompilation:
- program = []
- pushToProgram = program.append
- else:
- pushToProgram = lambda x: None
- pushToStack = self.operandStack.append
- index = 0
- while True:
- token, isOperator, index = charString.getToken(index)
- if token is None:
- break # we're done!
- pushToProgram(token)
- if isOperator:
- handlerName = "op_" + token
- handler = getattr(self, handlerName, None)
- if handler is not None:
- rv = handler(index)
- if rv:
- hintMaskBytes, index = rv
- pushToProgram(hintMaskBytes)
- else:
- self.popall()
- else:
- pushToStack(token)
- if needsDecompilation:
- charString.setProgram(program)
- del self.callingStack[-1]
-
- def pop(self):
- value = self.operandStack[-1]
- del self.operandStack[-1]
- return value
-
- def popall(self):
- stack = self.operandStack[:]
- self.operandStack[:] = []
- return stack
-
- def push(self, value):
- self.operandStack.append(value)
-
- def op_return(self, index):
- if self.operandStack:
- pass
-
- def op_endchar(self, index):
- pass
-
- def op_ignore(self, index):
- pass
-
- def op_callsubr(self, index):
- subrIndex = self.pop()
- subr = self.localSubrs[subrIndex+self.localBias]
- self.execute(subr)
-
- def op_callgsubr(self, index):
- subrIndex = self.pop()
- subr = self.globalSubrs[subrIndex+self.globalBias]
- self.execute(subr)
-
- def op_hstem(self, index):
- self.countHints()
- def op_vstem(self, index):
- self.countHints()
- def op_hstemhm(self, index):
- self.countHints()
- def op_vstemhm(self, index):
- self.countHints()
-
- def op_hintmask(self, index):
- if not self.hintMaskBytes:
- self.countHints()
- self.hintMaskBytes = (self.hintCount + 7) // 8
- hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes)
- return hintMaskBytes, index
-
- op_cntrmask = op_hintmask
-
- def countHints(self):
- args = self.popall()
- self.hintCount = self.hintCount + len(args) // 2
-
- # misc
- def op_and(self, index):
- raise NotImplementedError
- def op_or(self, index):
- raise NotImplementedError
- def op_not(self, index):
- raise NotImplementedError
- def op_store(self, index):
- raise NotImplementedError
- def op_abs(self, index):
- raise NotImplementedError
- def op_add(self, index):
- raise NotImplementedError
- def op_sub(self, index):
- raise NotImplementedError
- def op_div(self, index):
- raise NotImplementedError
- def op_load(self, index):
- raise NotImplementedError
- def op_neg(self, index):
- raise NotImplementedError
- def op_eq(self, index):
- raise NotImplementedError
- def op_drop(self, index):
- raise NotImplementedError
- def op_put(self, index):
- raise NotImplementedError
- def op_get(self, index):
- raise NotImplementedError
- def op_ifelse(self, index):
- raise NotImplementedError
- def op_random(self, index):
- raise NotImplementedError
- def op_mul(self, index):
- raise NotImplementedError
- def op_sqrt(self, index):
- raise NotImplementedError
- def op_dup(self, index):
- raise NotImplementedError
- def op_exch(self, index):
- raise NotImplementedError
- def op_index(self, index):
- raise NotImplementedError
- def op_roll(self, index):
- raise NotImplementedError
-
- # TODO(behdad): move to T2OutlineExtractor and add a 'setVariation'
- # method that takes VarStoreData and a location
- def op_blend(self, index):
- if self.numRegions == 0:
- self.numRegions = self.private.getNumRegions()
- numBlends = self.pop()
- numOps = numBlends * (self.numRegions + 1)
- del self.operandStack[-(numOps-numBlends):] # Leave the default operands on the stack.
-
- def op_vsindex(self, index):
- vi = self.pop()
- self.numRegions = self.private.getNumRegions(vi)
+ def op_neg(self, index):
+ raise NotImplementedError
+ def op_eq(self, index):
+ raise NotImplementedError
-t1Operators = [
-# opcode name
- (1, 'hstem'),
- (3, 'vstem'),
- (4, 'vmoveto'),
- (5, 'rlineto'),
- (6, 'hlineto'),
- (7, 'vlineto'),
- (8, 'rrcurveto'),
- (9, 'closepath'),
- (10, 'callsubr'),
- (11, 'return'),
- (13, 'hsbw'),
- (14, 'endchar'),
- (21, 'rmoveto'),
- (22, 'hmoveto'),
- (30, 'vhcurveto'),
- (31, 'hvcurveto'),
- ((12, 0), 'dotsection'),
- ((12, 1), 'vstem3'),
- ((12, 2), 'hstem3'),
- ((12, 6), 'seac'),
- ((12, 7), 'sbw'),
- ((12, 12), 'div'),
- ((12, 16), 'callothersubr'),
- ((12, 17), 'pop'),
- ((12, 33), 'setcurrentpoint'),
-]
+ def op_drop(self, index):
+ raise NotImplementedError
+ def op_put(self, index):
+ raise NotImplementedError
-class T2WidthExtractor(SimpleT2Decompiler):
+ def op_get(self, index):
+ raise NotImplementedError
+
+ def op_ifelse(self, index):
+ raise NotImplementedError
+
+ def op_random(self, index):
+ raise NotImplementedError
+
+ def op_mul(self, index):
+ raise NotImplementedError
+
+ def op_sqrt(self, index):
+ raise NotImplementedError
- def __init__(self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None):
- SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private)
- self.nominalWidthX = nominalWidthX
- self.defaultWidthX = defaultWidthX
+ def op_dup(self, index):
+ raise NotImplementedError
- def reset(self):
- SimpleT2Decompiler.reset(self)
- self.gotWidth = 0
- self.width = 0
+ def op_exch(self, index):
+ raise NotImplementedError
- def popallWidth(self, evenOdd=0):
- args = self.popall()
- if not self.gotWidth:
- if evenOdd ^ (len(args) % 2):
- # For CFF2 charstrings, this should never happen
- assert self.defaultWidthX is not None, "CFF2 CharStrings must not have an initial width value"
- self.width = self.nominalWidthX + args[0]
- args = args[1:]
- else:
- self.width = self.defaultWidthX
- self.gotWidth = 1
- return args
+ def op_index(self, index):
+ raise NotImplementedError
- def countHints(self):
- args = self.popallWidth()
- self.hintCount = self.hintCount + len(args) // 2
+ def op_roll(self, index):
+ raise NotImplementedError
- def op_rmoveto(self, index):
- self.popallWidth()
+ def op_blend(self, index):
+ if self.numRegions == 0:
+ self.numRegions = self.private.getNumRegions()
+ numBlends = self.pop()
+ numOps = numBlends * (self.numRegions + 1)
+ if self.blender is None:
+ del self.operandStack[
+ -(numOps - numBlends) :
+ ] # Leave the default operands on the stack.
+ else:
+ argi = len(self.operandStack) - numOps
+ end_args = tuplei = argi + numBlends
+ while argi < end_args:
+ next_ti = tuplei + self.numRegions
+ deltas = self.operandStack[tuplei:next_ti]
+ delta = self.blender(self.vsIndex, deltas)
+ self.operandStack[argi] += delta
+ tuplei = next_ti
+ argi += 1
+ self.operandStack[end_args:] = []
- def op_hmoveto(self, index):
- self.popallWidth(1)
+ def op_vsindex(self, index):
+ vi = self.pop()
+ self.vsIndex = vi
+ self.numRegions = self.private.getNumRegions(vi)
- def op_vmoveto(self, index):
- self.popallWidth(1)
- def op_endchar(self, index):
- self.popallWidth()
+t1Operators = [
+ # opcode name
+ (1, "hstem"),
+ (3, "vstem"),
+ (4, "vmoveto"),
+ (5, "rlineto"),
+ (6, "hlineto"),
+ (7, "vlineto"),
+ (8, "rrcurveto"),
+ (9, "closepath"),
+ (10, "callsubr"),
+ (11, "return"),
+ (13, "hsbw"),
+ (14, "endchar"),
+ (21, "rmoveto"),
+ (22, "hmoveto"),
+ (30, "vhcurveto"),
+ (31, "hvcurveto"),
+ ((12, 0), "dotsection"),
+ ((12, 1), "vstem3"),
+ ((12, 2), "hstem3"),
+ ((12, 6), "seac"),
+ ((12, 7), "sbw"),
+ ((12, 12), "div"),
+ ((12, 16), "callothersubr"),
+ ((12, 17), "pop"),
+ ((12, 33), "setcurrentpoint"),
+]
+
+
+class T2WidthExtractor(SimpleT2Decompiler):
+ def __init__(
+ self,
+ localSubrs,
+ globalSubrs,
+ nominalWidthX,
+ defaultWidthX,
+ private=None,
+ blender=None,
+ ):
+ SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private, blender)
+ self.nominalWidthX = nominalWidthX
+ self.defaultWidthX = defaultWidthX
+
+ def reset(self):
+ SimpleT2Decompiler.reset(self)
+ self.gotWidth = 0
+ self.width = 0
+
+ def popallWidth(self, evenOdd=0):
+ args = self.popall()
+ if not self.gotWidth:
+ if evenOdd ^ (len(args) % 2):
+ # For CFF2 charstrings, this should never happen
+ assert (
+ self.defaultWidthX is not None
+ ), "CFF2 CharStrings must not have an initial width value"
+ self.width = self.nominalWidthX + args[0]
+ args = args[1:]
+ else:
+ self.width = self.defaultWidthX
+ self.gotWidth = 1
+ return args
+
+ def countHints(self):
+ args = self.popallWidth()
+ self.hintCount = self.hintCount + len(args) // 2
+
+ def op_rmoveto(self, index):
+ self.popallWidth()
+
+ def op_hmoveto(self, index):
+ self.popallWidth(1)
+
+ def op_vmoveto(self, index):
+ self.popallWidth(1)
+
+ def op_endchar(self, index):
+ self.popallWidth()
class T2OutlineExtractor(T2WidthExtractor):
+ def __init__(
+ self,
+ pen,
+ localSubrs,
+ globalSubrs,
+ nominalWidthX,
+ defaultWidthX,
+ private=None,
+ blender=None,
+ ):
+ T2WidthExtractor.__init__(
+ self,
+ localSubrs,
+ globalSubrs,
+ nominalWidthX,
+ defaultWidthX,
+ private,
+ blender,
+ )
+ self.pen = pen
+ self.subrLevel = 0
+
+ def reset(self):
+ T2WidthExtractor.reset(self)
+ self.currentPoint = (0, 0)
+ self.sawMoveTo = 0
+ self.subrLevel = 0
+
+ def execute(self, charString):
+ self.subrLevel += 1
+ super().execute(charString)
+ self.subrLevel -= 1
+ if self.subrLevel == 0:
+ self.endPath()
+
+ def _nextPoint(self, point):
+ x, y = self.currentPoint
+ point = x + point[0], y + point[1]
+ self.currentPoint = point
+ return point
+
+ def rMoveTo(self, point):
+ self.pen.moveTo(self._nextPoint(point))
+ self.sawMoveTo = 1
+
+ def rLineTo(self, point):
+ if not self.sawMoveTo:
+ self.rMoveTo((0, 0))
+ self.pen.lineTo(self._nextPoint(point))
+
+ def rCurveTo(self, pt1, pt2, pt3):
+ if not self.sawMoveTo:
+ self.rMoveTo((0, 0))
+ nextPoint = self._nextPoint
+ self.pen.curveTo(nextPoint(pt1), nextPoint(pt2), nextPoint(pt3))
+
+ def closePath(self):
+ if self.sawMoveTo:
+ self.pen.closePath()
+ self.sawMoveTo = 0
+
+ def endPath(self):
+ # In T2 there are no open paths, so always do a closePath when
+ # finishing a sub path. We avoid spurious calls to closePath()
+ # because its a real T1 op we're emulating in T2 whereas
+ # endPath() is just a means to that emulation
+ if self.sawMoveTo:
+ self.closePath()
+
+ #
+ # hint operators
+ #
+ # def op_hstem(self, index):
+ # self.countHints()
+ # def op_vstem(self, index):
+ # self.countHints()
+ # def op_hstemhm(self, index):
+ # self.countHints()
+ # def op_vstemhm(self, index):
+ # self.countHints()
+ # def op_hintmask(self, index):
+ # self.countHints()
+ # def op_cntrmask(self, index):
+ # self.countHints()
+
+ #
+ # path constructors, moveto
+ #
+ def op_rmoveto(self, index):
+ self.endPath()
+ self.rMoveTo(self.popallWidth())
+
+ def op_hmoveto(self, index):
+ self.endPath()
+ self.rMoveTo((self.popallWidth(1)[0], 0))
+
+ def op_vmoveto(self, index):
+ self.endPath()
+ self.rMoveTo((0, self.popallWidth(1)[0]))
+
+ def op_endchar(self, index):
+ self.endPath()
+ args = self.popallWidth()
+ if args:
+ from fontTools.encodings.StandardEncoding import StandardEncoding
+
+ # endchar can do seac accent bulding; The T2 spec says it's deprecated,
+ # but recent software that shall remain nameless does output it.
+ adx, ady, bchar, achar = args
+ baseGlyph = StandardEncoding[bchar]
+ self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))
+ accentGlyph = StandardEncoding[achar]
+ self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))
+
+ #
+ # path constructors, lines
+ #
+ def op_rlineto(self, index):
+ args = self.popall()
+ for i in range(0, len(args), 2):
+ point = args[i : i + 2]
+ self.rLineTo(point)
+
+ def op_hlineto(self, index):
+ self.alternatingLineto(1)
+
+ def op_vlineto(self, index):
+ self.alternatingLineto(0)
+
+ #
+ # path constructors, curves
+ #
+ def op_rrcurveto(self, index):
+ """{dxa dya dxb dyb dxc dyc}+ rrcurveto"""
+ args = self.popall()
+ for i in range(0, len(args), 6):
+ (
+ dxa,
+ dya,
+ dxb,
+ dyb,
+ dxc,
+ dyc,
+ ) = args[i : i + 6]
+ self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc))
+
+ def op_rcurveline(self, index):
+ """{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline"""
+ args = self.popall()
+ for i in range(0, len(args) - 2, 6):
+ dxb, dyb, dxc, dyc, dxd, dyd = args[i : i + 6]
+ self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd))
+ self.rLineTo(args[-2:])
+
+ def op_rlinecurve(self, index):
+ """{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve"""
+ args = self.popall()
+ lineArgs = args[:-6]
+ for i in range(0, len(lineArgs), 2):
+ self.rLineTo(lineArgs[i : i + 2])
+ dxb, dyb, dxc, dyc, dxd, dyd = args[-6:]
+ self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd))
+
+ def op_vvcurveto(self, index):
+ "dx1? {dya dxb dyb dyc}+ vvcurveto"
+ args = self.popall()
+ if len(args) % 2:
+ dx1 = args[0]
+ args = args[1:]
+ else:
+ dx1 = 0
+ for i in range(0, len(args), 4):
+ dya, dxb, dyb, dyc = args[i : i + 4]
+ self.rCurveTo((dx1, dya), (dxb, dyb), (0, dyc))
+ dx1 = 0
+
+ def op_hhcurveto(self, index):
+ """dy1? {dxa dxb dyb dxc}+ hhcurveto"""
+ args = self.popall()
+ if len(args) % 2:
+ dy1 = args[0]
+ args = args[1:]
+ else:
+ dy1 = 0
+ for i in range(0, len(args), 4):
+ dxa, dxb, dyb, dxc = args[i : i + 4]
+ self.rCurveTo((dxa, dy1), (dxb, dyb), (dxc, 0))
+ dy1 = 0
+
+ def op_vhcurveto(self, index):
+ """dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30)
+ {dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto
+ """
+ args = self.popall()
+ while args:
+ args = self.vcurveto(args)
+ if args:
+ args = self.hcurveto(args)
+
+ def op_hvcurveto(self, index):
+ """dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf?
+ {dxa dxb dyb dyc dyd dxe dye dxf}+ dyf?
+ """
+ args = self.popall()
+ while args:
+ args = self.hcurveto(args)
+ if args:
+ args = self.vcurveto(args)
+
+ #
+ # path constructors, flex
+ #
+ def op_hflex(self, index):
+ dx1, dx2, dy2, dx3, dx4, dx5, dx6 = self.popall()
+ dy1 = dy3 = dy4 = dy6 = 0
+ dy5 = -dy2
+ self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
+ self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
+
+ def op_flex(self, index):
+ dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall()
+ self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
+ self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
+
+ def op_hflex1(self, index):
+ dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall()
+ dy3 = dy4 = 0
+ dy6 = -(dy1 + dy2 + dy3 + dy4 + dy5)
+
+ self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
+ self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
+
+ def op_flex1(self, index):
+ dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall()
+ dx = dx1 + dx2 + dx3 + dx4 + dx5
+ dy = dy1 + dy2 + dy3 + dy4 + dy5
+ if abs(dx) > abs(dy):
+ dx6 = d6
+ dy6 = -dy
+ else:
+ dx6 = -dx
+ dy6 = d6
+ self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
+ self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
+
+ # misc
+ def op_and(self, index):
+ raise NotImplementedError
+
+ def op_or(self, index):
+ raise NotImplementedError
+
+ def op_not(self, index):
+ raise NotImplementedError
+
+ def op_store(self, index):
+ raise NotImplementedError
+
+ def op_abs(self, index):
+ raise NotImplementedError
+
+ def op_add(self, index):
+ raise NotImplementedError
+
+ def op_sub(self, index):
+ raise NotImplementedError
+
+ def op_div(self, index):
+ num2 = self.pop()
+ num1 = self.pop()
+ d1 = num1 // num2
+ d2 = num1 / num2
+ if d1 == d2:
+ self.push(d1)
+ else:
+ self.push(d2)
+
+ def op_load(self, index):
+ raise NotImplementedError
+
+ def op_neg(self, index):
+ raise NotImplementedError
+
+ def op_eq(self, index):
+ raise NotImplementedError
+
+ def op_drop(self, index):
+ raise NotImplementedError
+
+ def op_put(self, index):
+ raise NotImplementedError
+
+ def op_get(self, index):
+ raise NotImplementedError
+
+ def op_ifelse(self, index):
+ raise NotImplementedError
+
+ def op_random(self, index):
+ raise NotImplementedError
+
+ def op_mul(self, index):
+ raise NotImplementedError
+
+ def op_sqrt(self, index):
+ raise NotImplementedError
+
+ def op_dup(self, index):
+ raise NotImplementedError
+
+ def op_exch(self, index):
+ raise NotImplementedError
+
+ def op_index(self, index):
+ raise NotImplementedError
+
+ def op_roll(self, index):
+ raise NotImplementedError
+
+ #
+ # miscellaneous helpers
+ #
+ def alternatingLineto(self, isHorizontal):
+ args = self.popall()
+ for arg in args:
+ if isHorizontal:
+ point = (arg, 0)
+ else:
+ point = (0, arg)
+ self.rLineTo(point)
+ isHorizontal = not isHorizontal
+
+ def vcurveto(self, args):
+ dya, dxb, dyb, dxc = args[:4]
+ args = args[4:]
+ if len(args) == 1:
+ dyc = args[0]
+ args = []
+ else:
+ dyc = 0
+ self.rCurveTo((0, dya), (dxb, dyb), (dxc, dyc))
+ return args
+
+ def hcurveto(self, args):
+ dxa, dxb, dyb, dyc = args[:4]
+ args = args[4:]
+ if len(args) == 1:
+ dxc = args[0]
+ args = []
+ else:
+ dxc = 0
+ self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc))
+ return args
- def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None):
- T2WidthExtractor.__init__(
- self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private)
- self.pen = pen
- self.subrLevel = 0
-
- def reset(self):
- T2WidthExtractor.reset(self)
- self.currentPoint = (0, 0)
- self.sawMoveTo = 0
- self.subrLevel = 0
-
- def execute(self, charString):
- self.subrLevel += 1
- super().execute(charString)
- self.subrLevel -= 1
- if self.subrLevel == 0:
- self.endPath()
-
- def _nextPoint(self, point):
- x, y = self.currentPoint
- point = x + point[0], y + point[1]
- self.currentPoint = point
- return point
-
- def rMoveTo(self, point):
- self.pen.moveTo(self._nextPoint(point))
- self.sawMoveTo = 1
-
- def rLineTo(self, point):
- if not self.sawMoveTo:
- self.rMoveTo((0, 0))
- self.pen.lineTo(self._nextPoint(point))
-
- def rCurveTo(self, pt1, pt2, pt3):
- if not self.sawMoveTo:
- self.rMoveTo((0, 0))
- nextPoint = self._nextPoint
- self.pen.curveTo(nextPoint(pt1), nextPoint(pt2), nextPoint(pt3))
-
- def closePath(self):
- if self.sawMoveTo:
- self.pen.closePath()
- self.sawMoveTo = 0
-
- def endPath(self):
- # In T2 there are no open paths, so always do a closePath when
- # finishing a sub path. We avoid spurious calls to closePath()
- # because its a real T1 op we're emulating in T2 whereas
- # endPath() is just a means to that emulation
- if self.sawMoveTo:
- self.closePath()
-
- #
- # hint operators
- #
- #def op_hstem(self, index):
- # self.countHints()
- #def op_vstem(self, index):
- # self.countHints()
- #def op_hstemhm(self, index):
- # self.countHints()
- #def op_vstemhm(self, index):
- # self.countHints()
- #def op_hintmask(self, index):
- # self.countHints()
- #def op_cntrmask(self, index):
- # self.countHints()
-
- #
- # path constructors, moveto
- #
- def op_rmoveto(self, index):
- self.endPath()
- self.rMoveTo(self.popallWidth())
- def op_hmoveto(self, index):
- self.endPath()
- self.rMoveTo((self.popallWidth(1)[0], 0))
- def op_vmoveto(self, index):
- self.endPath()
- self.rMoveTo((0, self.popallWidth(1)[0]))
- def op_endchar(self, index):
- self.endPath()
- args = self.popallWidth()
- if args:
- from fontTools.encodings.StandardEncoding import StandardEncoding
- # endchar can do seac accent bulding; The T2 spec says it's deprecated,
- # but recent software that shall remain nameless does output it.
- adx, ady, bchar, achar = args
- baseGlyph = StandardEncoding[bchar]
- self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))
- accentGlyph = StandardEncoding[achar]
- self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))
-
- #
- # path constructors, lines
- #
- def op_rlineto(self, index):
- args = self.popall()
- for i in range(0, len(args), 2):
- point = args[i:i+2]
- self.rLineTo(point)
-
- def op_hlineto(self, index):
- self.alternatingLineto(1)
- def op_vlineto(self, index):
- self.alternatingLineto(0)
-
- #
- # path constructors, curves
- #
- def op_rrcurveto(self, index):
- """{dxa dya dxb dyb dxc dyc}+ rrcurveto"""
- args = self.popall()
- for i in range(0, len(args), 6):
- dxa, dya, dxb, dyb, dxc, dyc, = args[i:i+6]
- self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc))
-
- def op_rcurveline(self, index):
- """{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline"""
- args = self.popall()
- for i in range(0, len(args)-2, 6):
- dxb, dyb, dxc, dyc, dxd, dyd = args[i:i+6]
- self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd))
- self.rLineTo(args[-2:])
-
- def op_rlinecurve(self, index):
- """{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve"""
- args = self.popall()
- lineArgs = args[:-6]
- for i in range(0, len(lineArgs), 2):
- self.rLineTo(lineArgs[i:i+2])
- dxb, dyb, dxc, dyc, dxd, dyd = args[-6:]
- self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd))
-
- def op_vvcurveto(self, index):
- "dx1? {dya dxb dyb dyc}+ vvcurveto"
- args = self.popall()
- if len(args) % 2:
- dx1 = args[0]
- args = args[1:]
- else:
- dx1 = 0
- for i in range(0, len(args), 4):
- dya, dxb, dyb, dyc = args[i:i+4]
- self.rCurveTo((dx1, dya), (dxb, dyb), (0, dyc))
- dx1 = 0
-
- def op_hhcurveto(self, index):
- """dy1? {dxa dxb dyb dxc}+ hhcurveto"""
- args = self.popall()
- if len(args) % 2:
- dy1 = args[0]
- args = args[1:]
- else:
- dy1 = 0
- for i in range(0, len(args), 4):
- dxa, dxb, dyb, dxc = args[i:i+4]
- self.rCurveTo((dxa, dy1), (dxb, dyb), (dxc, 0))
- dy1 = 0
-
- def op_vhcurveto(self, index):
- """dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30)
- {dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto
- """
- args = self.popall()
- while args:
- args = self.vcurveto(args)
- if args:
- args = self.hcurveto(args)
-
- def op_hvcurveto(self, index):
- """dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf?
- {dxa dxb dyb dyc dyd dxe dye dxf}+ dyf?
- """
- args = self.popall()
- while args:
- args = self.hcurveto(args)
- if args:
- args = self.vcurveto(args)
-
- #
- # path constructors, flex
- #
- def op_hflex(self, index):
- dx1, dx2, dy2, dx3, dx4, dx5, dx6 = self.popall()
- dy1 = dy3 = dy4 = dy6 = 0
- dy5 = -dy2
- self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
- self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
- def op_flex(self, index):
- dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall()
- self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
- self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
- def op_hflex1(self, index):
- dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall()
- dy3 = dy4 = 0
- dy6 = -(dy1 + dy2 + dy3 + dy4 + dy5)
-
- self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
- self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
- def op_flex1(self, index):
- dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall()
- dx = dx1 + dx2 + dx3 + dx4 + dx5
- dy = dy1 + dy2 + dy3 + dy4 + dy5
- if abs(dx) > abs(dy):
- dx6 = d6
- dy6 = -dy
- else:
- dx6 = -dx
- dy6 = d6
- self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
- self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
-
- # misc
- def op_and(self, index):
- raise NotImplementedError
- def op_or(self, index):
- raise NotImplementedError
- def op_not(self, index):
- raise NotImplementedError
- def op_store(self, index):
- raise NotImplementedError
- def op_abs(self, index):
- raise NotImplementedError
- def op_add(self, index):
- raise NotImplementedError
- def op_sub(self, index):
- raise NotImplementedError
- def op_div(self, index):
- num2 = self.pop()
- num1 = self.pop()
- d1 = num1//num2
- d2 = num1/num2
- if d1 == d2:
- self.push(d1)
- else:
- self.push(d2)
- def op_load(self, index):
- raise NotImplementedError
- def op_neg(self, index):
- raise NotImplementedError
- def op_eq(self, index):
- raise NotImplementedError
- def op_drop(self, index):
- raise NotImplementedError
- def op_put(self, index):
- raise NotImplementedError
- def op_get(self, index):
- raise NotImplementedError
- def op_ifelse(self, index):
- raise NotImplementedError
- def op_random(self, index):
- raise NotImplementedError
- def op_mul(self, index):
- raise NotImplementedError
- def op_sqrt(self, index):
- raise NotImplementedError
- def op_dup(self, index):
- raise NotImplementedError
- def op_exch(self, index):
- raise NotImplementedError
- def op_index(self, index):
- raise NotImplementedError
- def op_roll(self, index):
- raise NotImplementedError
-
- #
- # miscellaneous helpers
- #
- def alternatingLineto(self, isHorizontal):
- args = self.popall()
- for arg in args:
- if isHorizontal:
- point = (arg, 0)
- else:
- point = (0, arg)
- self.rLineTo(point)
- isHorizontal = not isHorizontal
-
- def vcurveto(self, args):
- dya, dxb, dyb, dxc = args[:4]
- args = args[4:]
- if len(args) == 1:
- dyc = args[0]
- args = []
- else:
- dyc = 0
- self.rCurveTo((0, dya), (dxb, dyb), (dxc, dyc))
- return args
-
- def hcurveto(self, args):
- dxa, dxb, dyb, dyc = args[:4]
- args = args[4:]
- if len(args) == 1:
- dxc = args[0]
- args = []
- else:
- dxc = 0
- self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc))
- return args
class T1OutlineExtractor(T2OutlineExtractor):
+ def __init__(self, pen, subrs):
+ self.pen = pen
+ self.subrs = subrs
+ self.reset()
+
+ def reset(self):
+ self.flexing = 0
+ self.width = 0
+ self.sbx = 0
+ T2OutlineExtractor.reset(self)
+
+ def endPath(self):
+ if self.sawMoveTo:
+ self.pen.endPath()
+ self.sawMoveTo = 0
+
+ def popallWidth(self, evenOdd=0):
+ return self.popall()
+
+ def exch(self):
+ stack = self.operandStack
+ stack[-1], stack[-2] = stack[-2], stack[-1]
+
+ #
+ # path constructors
+ #
+ def op_rmoveto(self, index):
+ if self.flexing:
+ return
+ self.endPath()
+ self.rMoveTo(self.popall())
+
+ def op_hmoveto(self, index):
+ if self.flexing:
+ # We must add a parameter to the stack if we are flexing
+ self.push(0)
+ return
+ self.endPath()
+ self.rMoveTo((self.popall()[0], 0))
+
+ def op_vmoveto(self, index):
+ if self.flexing:
+ # We must add a parameter to the stack if we are flexing
+ self.push(0)
+ self.exch()
+ return
+ self.endPath()
+ self.rMoveTo((0, self.popall()[0]))
+
+ def op_closepath(self, index):
+ self.closePath()
+
+ def op_setcurrentpoint(self, index):
+ args = self.popall()
+ x, y = args
+ self.currentPoint = x, y
+
+ def op_endchar(self, index):
+ self.endPath()
+
+ def op_hsbw(self, index):
+ sbx, wx = self.popall()
+ self.width = wx
+ self.sbx = sbx
+ self.currentPoint = sbx, self.currentPoint[1]
+
+ def op_sbw(self, index):
+ self.popall() # XXX
+
+ #
+ def op_callsubr(self, index):
+ subrIndex = self.pop()
+ subr = self.subrs[subrIndex]
+ self.execute(subr)
+
+ def op_callothersubr(self, index):
+ subrIndex = self.pop()
+ nArgs = self.pop()
+ # print nArgs, subrIndex, "callothersubr"
+ if subrIndex == 0 and nArgs == 3:
+ self.doFlex()
+ self.flexing = 0
+ elif subrIndex == 1 and nArgs == 0:
+ self.flexing = 1
+ # ignore...
+
+ def op_pop(self, index):
+ pass # ignore...
+
+ def doFlex(self):
+ finaly = self.pop()
+ finalx = self.pop()
+ self.pop() # flex height is unused
+
+ p3y = self.pop()
+ p3x = self.pop()
+ bcp4y = self.pop()
+ bcp4x = self.pop()
+ bcp3y = self.pop()
+ bcp3x = self.pop()
+ p2y = self.pop()
+ p2x = self.pop()
+ bcp2y = self.pop()
+ bcp2x = self.pop()
+ bcp1y = self.pop()
+ bcp1x = self.pop()
+ rpy = self.pop()
+ rpx = self.pop()
+
+ # call rrcurveto
+ self.push(bcp1x + rpx)
+ self.push(bcp1y + rpy)
+ self.push(bcp2x)
+ self.push(bcp2y)
+ self.push(p2x)
+ self.push(p2y)
+ self.op_rrcurveto(None)
+
+ # call rrcurveto
+ self.push(bcp3x)
+ self.push(bcp3y)
+ self.push(bcp4x)
+ self.push(bcp4y)
+ self.push(p3x)
+ self.push(p3y)
+ self.op_rrcurveto(None)
+
+ # Push back final coords so subr 0 can find them
+ self.push(finalx)
+ self.push(finaly)
+
+ def op_dotsection(self, index):
+ self.popall() # XXX
+
+ def op_hstem3(self, index):
+ self.popall() # XXX
+
+ def op_seac(self, index):
+ "asb adx ady bchar achar seac"
+ from fontTools.encodings.StandardEncoding import StandardEncoding
+
+ asb, adx, ady, bchar, achar = self.popall()
+ baseGlyph = StandardEncoding[bchar]
+ self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))
+ accentGlyph = StandardEncoding[achar]
+ adx = adx + self.sbx - asb # seac weirdness
+ self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))
+
+ def op_vstem3(self, index):
+ self.popall() # XXX
- def __init__(self, pen, subrs):
- self.pen = pen
- self.subrs = subrs
- self.reset()
-
- def reset(self):
- self.flexing = 0
- self.width = 0
- self.sbx = 0
- T2OutlineExtractor.reset(self)
-
- def endPath(self):
- if self.sawMoveTo:
- self.pen.endPath()
- self.sawMoveTo = 0
-
- def popallWidth(self, evenOdd=0):
- return self.popall()
-
- def exch(self):
- stack = self.operandStack
- stack[-1], stack[-2] = stack[-2], stack[-1]
-
- #
- # path constructors
- #
- def op_rmoveto(self, index):
- if self.flexing:
- return
- self.endPath()
- self.rMoveTo(self.popall())
- def op_hmoveto(self, index):
- if self.flexing:
- # We must add a parameter to the stack if we are flexing
- self.push(0)
- return
- self.endPath()
- self.rMoveTo((self.popall()[0], 0))
- def op_vmoveto(self, index):
- if self.flexing:
- # We must add a parameter to the stack if we are flexing
- self.push(0)
- self.exch()
- return
- self.endPath()
- self.rMoveTo((0, self.popall()[0]))
- def op_closepath(self, index):
- self.closePath()
- def op_setcurrentpoint(self, index):
- args = self.popall()
- x, y = args
- self.currentPoint = x, y
-
- def op_endchar(self, index):
- self.endPath()
-
- def op_hsbw(self, index):
- sbx, wx = self.popall()
- self.width = wx
- self.sbx = sbx
- self.currentPoint = sbx, self.currentPoint[1]
- def op_sbw(self, index):
- self.popall() # XXX
-
- #
- def op_callsubr(self, index):
- subrIndex = self.pop()
- subr = self.subrs[subrIndex]
- self.execute(subr)
- def op_callothersubr(self, index):
- subrIndex = self.pop()
- nArgs = self.pop()
- #print nArgs, subrIndex, "callothersubr"
- if subrIndex == 0 and nArgs == 3:
- self.doFlex()
- self.flexing = 0
- elif subrIndex == 1 and nArgs == 0:
- self.flexing = 1
- # ignore...
- def op_pop(self, index):
- pass # ignore...
-
- def doFlex(self):
- finaly = self.pop()
- finalx = self.pop()
- self.pop() # flex height is unused
-
- p3y = self.pop()
- p3x = self.pop()
- bcp4y = self.pop()
- bcp4x = self.pop()
- bcp3y = self.pop()
- bcp3x = self.pop()
- p2y = self.pop()
- p2x = self.pop()
- bcp2y = self.pop()
- bcp2x = self.pop()
- bcp1y = self.pop()
- bcp1x = self.pop()
- rpy = self.pop()
- rpx = self.pop()
-
- # call rrcurveto
- self.push(bcp1x+rpx)
- self.push(bcp1y+rpy)
- self.push(bcp2x)
- self.push(bcp2y)
- self.push(p2x)
- self.push(p2y)
- self.op_rrcurveto(None)
-
- # call rrcurveto
- self.push(bcp3x)
- self.push(bcp3y)
- self.push(bcp4x)
- self.push(bcp4y)
- self.push(p3x)
- self.push(p3y)
- self.op_rrcurveto(None)
-
- # Push back final coords so subr 0 can find them
- self.push(finalx)
- self.push(finaly)
-
- def op_dotsection(self, index):
- self.popall() # XXX
- def op_hstem3(self, index):
- self.popall() # XXX
- def op_seac(self, index):
- "asb adx ady bchar achar seac"
- from fontTools.encodings.StandardEncoding import StandardEncoding
- asb, adx, ady, bchar, achar = self.popall()
- baseGlyph = StandardEncoding[bchar]
- self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))
- accentGlyph = StandardEncoding[achar]
- adx = adx + self.sbx - asb # seac weirdness
- self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))
- def op_vstem3(self, index):
- self.popall() # XXX
class T2CharString(object):
+ operandEncoding = t2OperandEncoding
+ operators, opcodes = buildOperatorDict(t2Operators)
+ decompilerClass = SimpleT2Decompiler
+ outlineExtractor = T2OutlineExtractor
+
+ def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None):
+ if program is None:
+ program = []
+ self.bytecode = bytecode
+ self.program = program
+ self.private = private
+ self.globalSubrs = globalSubrs if globalSubrs is not None else []
+ self._cur_vsindex = None
+
+ def getNumRegions(self, vsindex=None):
+ pd = self.private
+ assert pd is not None
+ if vsindex is not None:
+ self._cur_vsindex = vsindex
+ elif self._cur_vsindex is None:
+ self._cur_vsindex = pd.vsindex if hasattr(pd, "vsindex") else 0
+ return pd.getNumRegions(self._cur_vsindex)
+
+ def __repr__(self):
+ if self.bytecode is None:
+ return "<%s (source) at %x>" % (self.__class__.__name__, id(self))
+ else:
+ return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self))
+
+ def getIntEncoder(self):
+ return encodeIntT2
+
+ def getFixedEncoder(self):
+ return encodeFixed
+
+ def decompile(self):
+ if not self.needsDecompilation():
+ return
+ subrs = getattr(self.private, "Subrs", [])
+ decompiler = self.decompilerClass(subrs, self.globalSubrs, self.private)
+ decompiler.execute(self)
+
+ def draw(self, pen, blender=None):
+ subrs = getattr(self.private, "Subrs", [])
+ extractor = self.outlineExtractor(
+ pen,
+ subrs,
+ self.globalSubrs,
+ self.private.nominalWidthX,
+ self.private.defaultWidthX,
+ self.private,
+ blender,
+ )
+ extractor.execute(self)
+ self.width = extractor.width
+
+ def calcBounds(self, glyphSet):
+ boundsPen = BoundsPen(glyphSet)
+ self.draw(boundsPen)
+ return boundsPen.bounds
+
+ def compile(self, isCFF2=False):
+ if self.bytecode is not None:
+ return
+ opcodes = self.opcodes
+ program = self.program
+
+ if isCFF2:
+ # If present, remove return and endchar operators.
+ if program and program[-1] in ("return", "endchar"):
+ program = program[:-1]
+ elif program and not isinstance(program[-1], str):
+ raise CharStringCompileError(
+ "T2CharString or Subr has items on the stack after last operator."
+ )
+
+ bytecode = []
+ encodeInt = self.getIntEncoder()
+ encodeFixed = self.getFixedEncoder()
+ i = 0
+ end = len(program)
+ while i < end:
+ token = program[i]
+ i = i + 1
+ if isinstance(token, str):
+ try:
+ bytecode.extend(bytechr(b) for b in opcodes[token])
+ except KeyError:
+ raise CharStringCompileError("illegal operator: %s" % token)
+ if token in ("hintmask", "cntrmask"):
+ bytecode.append(program[i]) # hint mask
+ i = i + 1
+ elif isinstance(token, int):
+ bytecode.append(encodeInt(token))
+ elif isinstance(token, float):
+ bytecode.append(encodeFixed(token))
+ else:
+ assert 0, "unsupported type: %s" % type(token)
+ try:
+ bytecode = bytesjoin(bytecode)
+ except TypeError:
+ log.error(bytecode)
+ raise
+ self.setBytecode(bytecode)
+
+ def needsDecompilation(self):
+ return self.bytecode is not None
+
+ def setProgram(self, program):
+ self.program = program
+ self.bytecode = None
+
+ def setBytecode(self, bytecode):
+ self.bytecode = bytecode
+ self.program = None
+
+ def getToken(self, index, len=len, byteord=byteord, isinstance=isinstance):
+ if self.bytecode is not None:
+ if index >= len(self.bytecode):
+ return None, 0, 0
+ b0 = byteord(self.bytecode[index])
+ index = index + 1
+ handler = self.operandEncoding[b0]
+ token, index = handler(self, b0, self.bytecode, index)
+ else:
+ if index >= len(self.program):
+ return None, 0, 0
+ token = self.program[index]
+ index = index + 1
+ isOperator = isinstance(token, str)
+ return token, isOperator, index
+
+ def getBytes(self, index, nBytes):
+ if self.bytecode is not None:
+ newIndex = index + nBytes
+ bytes = self.bytecode[index:newIndex]
+ index = newIndex
+ else:
+ bytes = self.program[index]
+ index = index + 1
+ assert len(bytes) == nBytes
+ return bytes, index
+
+ def handle_operator(self, operator):
+ return operator
+
+ def toXML(self, xmlWriter, ttFont=None):
+ from fontTools.misc.textTools import num2binary
+
+ if self.bytecode is not None:
+ xmlWriter.dumphex(self.bytecode)
+ else:
+ index = 0
+ args = []
+ while True:
+ token, isOperator, index = self.getToken(index)
+ if token is None:
+ break
+ if isOperator:
+ if token in ("hintmask", "cntrmask"):
+ hintMask, isOperator, index = self.getToken(index)
+ bits = []
+ for byte in hintMask:
+ bits.append(num2binary(byteord(byte), 8))
+ hintMask = strjoin(bits)
+ line = " ".join(args + [token, hintMask])
+ else:
+ line = " ".join(args + [token])
+ xmlWriter.write(line)
+ xmlWriter.newline()
+ args = []
+ else:
+ if isinstance(token, float):
+ token = floatToFixedToStr(token, precisionBits=16)
+ else:
+ token = str(token)
+ args.append(token)
+ if args:
+ # NOTE: only CFF2 charstrings/subrs can have numeric arguments on
+ # the stack after the last operator. Compiling this would fail if
+ # this is part of CFF 1.0 table.
+ line = " ".join(args)
+ xmlWriter.write(line)
+
+ def fromXML(self, name, attrs, content):
+ from fontTools.misc.textTools import binary2num, readHex
+
+ if attrs.get("raw"):
+ self.setBytecode(readHex(content))
+ return
+ content = strjoin(content)
+ content = content.split()
+ program = []
+ end = len(content)
+ i = 0
+ while i < end:
+ token = content[i]
+ i = i + 1
+ try:
+ token = int(token)
+ except ValueError:
+ try:
+ token = strToFixedToFloat(token, precisionBits=16)
+ except ValueError:
+ program.append(token)
+ if token in ("hintmask", "cntrmask"):
+ mask = content[i]
+ maskBytes = b""
+ for j in range(0, len(mask), 8):
+ maskBytes = maskBytes + bytechr(binary2num(mask[j : j + 8]))
+ program.append(maskBytes)
+ i = i + 1
+ else:
+ program.append(token)
+ else:
+ program.append(token)
+ self.setProgram(program)
- operandEncoding = t2OperandEncoding
- operators, opcodes = buildOperatorDict(t2Operators)
- decompilerClass = SimpleT2Decompiler
- outlineExtractor = T2OutlineExtractor
-
- def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None):
- if program is None:
- program = []
- self.bytecode = bytecode
- self.program = program
- self.private = private
- self.globalSubrs = globalSubrs if globalSubrs is not None else []
- self._cur_vsindex = None
-
- def getNumRegions(self, vsindex=None):
- pd = self.private
- assert(pd is not None)
- if vsindex is not None:
- self._cur_vsindex = vsindex
- elif self._cur_vsindex is None:
- self._cur_vsindex = pd.vsindex if hasattr(pd, 'vsindex') else 0
- return pd.getNumRegions(self._cur_vsindex)
-
- def __repr__(self):
- if self.bytecode is None:
- return "<%s (source) at %x>" % (self.__class__.__name__, id(self))
- else:
- return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self))
-
- def getIntEncoder(self):
- return encodeIntT2
-
- def getFixedEncoder(self):
- return encodeFixed
-
- def decompile(self):
- if not self.needsDecompilation():
- return
- subrs = getattr(self.private, "Subrs", [])
- decompiler = self.decompilerClass(subrs, self.globalSubrs, self.private)
- decompiler.execute(self)
-
- def draw(self, pen):
- subrs = getattr(self.private, "Subrs", [])
- extractor = self.outlineExtractor(pen, subrs, self.globalSubrs,
- self.private.nominalWidthX, self.private.defaultWidthX,
- self.private)
- extractor.execute(self)
- self.width = extractor.width
-
- def calcBounds(self, glyphSet):
- boundsPen = BoundsPen(glyphSet)
- self.draw(boundsPen)
- return boundsPen.bounds
-
- def compile(self, isCFF2=False):
- if self.bytecode is not None:
- return
- opcodes = self.opcodes
- program = self.program
-
- if isCFF2:
- # If present, remove return and endchar operators.
- if program and program[-1] in ("return", "endchar"):
- program = program[:-1]
- elif program and not isinstance(program[-1], str):
- raise CharStringCompileError(
- "T2CharString or Subr has items on the stack after last operator."
- )
-
- bytecode = []
- encodeInt = self.getIntEncoder()
- encodeFixed = self.getFixedEncoder()
- i = 0
- end = len(program)
- while i < end:
- token = program[i]
- i = i + 1
- if isinstance(token, str):
- try:
- bytecode.extend(bytechr(b) for b in opcodes[token])
- except KeyError:
- raise CharStringCompileError("illegal operator: %s" % token)
- if token in ('hintmask', 'cntrmask'):
- bytecode.append(program[i]) # hint mask
- i = i + 1
- elif isinstance(token, int):
- bytecode.append(encodeInt(token))
- elif isinstance(token, float):
- bytecode.append(encodeFixed(token))
- else:
- assert 0, "unsupported type: %s" % type(token)
- try:
- bytecode = bytesjoin(bytecode)
- except TypeError:
- log.error(bytecode)
- raise
- self.setBytecode(bytecode)
-
- def needsDecompilation(self):
- return self.bytecode is not None
-
- def setProgram(self, program):
- self.program = program
- self.bytecode = None
-
- def setBytecode(self, bytecode):
- self.bytecode = bytecode
- self.program = None
-
- def getToken(self, index,
- len=len, byteord=byteord, isinstance=isinstance):
- if self.bytecode is not None:
- if index >= len(self.bytecode):
- return None, 0, 0
- b0 = byteord(self.bytecode[index])
- index = index + 1
- handler = self.operandEncoding[b0]
- token, index = handler(self, b0, self.bytecode, index)
- else:
- if index >= len(self.program):
- return None, 0, 0
- token = self.program[index]
- index = index + 1
- isOperator = isinstance(token, str)
- return token, isOperator, index
-
- def getBytes(self, index, nBytes):
- if self.bytecode is not None:
- newIndex = index + nBytes
- bytes = self.bytecode[index:newIndex]
- index = newIndex
- else:
- bytes = self.program[index]
- index = index + 1
- assert len(bytes) == nBytes
- return bytes, index
-
- def handle_operator(self, operator):
- return operator
-
- def toXML(self, xmlWriter, ttFont=None):
- from fontTools.misc.textTools import num2binary
- if self.bytecode is not None:
- xmlWriter.dumphex(self.bytecode)
- else:
- index = 0
- args = []
- while True:
- token, isOperator, index = self.getToken(index)
- if token is None:
- break
- if isOperator:
- if token in ('hintmask', 'cntrmask'):
- hintMask, isOperator, index = self.getToken(index)
- bits = []
- for byte in hintMask:
- bits.append(num2binary(byteord(byte), 8))
- hintMask = strjoin(bits)
- line = ' '.join(args + [token, hintMask])
- else:
- line = ' '.join(args + [token])
- xmlWriter.write(line)
- xmlWriter.newline()
- args = []
- else:
- if isinstance(token, float):
- token = floatToFixedToStr(token, precisionBits=16)
- else:
- token = str(token)
- args.append(token)
- if args:
- # NOTE: only CFF2 charstrings/subrs can have numeric arguments on
- # the stack after the last operator. Compiling this would fail if
- # this is part of CFF 1.0 table.
- line = ' '.join(args)
- xmlWriter.write(line)
-
- def fromXML(self, name, attrs, content):
- from fontTools.misc.textTools import binary2num, readHex
- if attrs.get("raw"):
- self.setBytecode(readHex(content))
- return
- content = strjoin(content)
- content = content.split()
- program = []
- end = len(content)
- i = 0
- while i < end:
- token = content[i]
- i = i + 1
- try:
- token = int(token)
- except ValueError:
- try:
- token = strToFixedToFloat(token, precisionBits=16)
- except ValueError:
- program.append(token)
- if token in ('hintmask', 'cntrmask'):
- mask = content[i]
- maskBytes = b""
- for j in range(0, len(mask), 8):
- maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8]))
- program.append(maskBytes)
- i = i + 1
- else:
- program.append(token)
- else:
- program.append(token)
- self.setProgram(program)
class T1CharString(T2CharString):
+ operandEncoding = t1OperandEncoding
+ operators, opcodes = buildOperatorDict(t1Operators)
+
+ def __init__(self, bytecode=None, program=None, subrs=None):
+ super().__init__(bytecode, program)
+ self.subrs = subrs
+
+ def getIntEncoder(self):
+ return encodeIntT1
+
+ def getFixedEncoder(self):
+ def encodeFixed(value):
+ raise TypeError("Type 1 charstrings don't support floating point operands")
+
+ def decompile(self):
+ if self.bytecode is None:
+ return
+ program = []
+ index = 0
+ while True:
+ token, isOperator, index = self.getToken(index)
+ if token is None:
+ break
+ program.append(token)
+ self.setProgram(program)
+
+ def draw(self, pen):
+ extractor = T1OutlineExtractor(pen, self.subrs)
+ extractor.execute(self)
+ self.width = extractor.width
- operandEncoding = t1OperandEncoding
- operators, opcodes = buildOperatorDict(t1Operators)
-
- def __init__(self, bytecode=None, program=None, subrs=None):
- super().__init__(bytecode, program)
- self.subrs = subrs
-
- def getIntEncoder(self):
- return encodeIntT1
-
- def getFixedEncoder(self):
- def encodeFixed(value):
- raise TypeError("Type 1 charstrings don't support floating point operands")
-
- def decompile(self):
- if self.bytecode is None:
- return
- program = []
- index = 0
- while True:
- token, isOperator, index = self.getToken(index)
- if token is None:
- break
- program.append(token)
- self.setProgram(program)
-
- def draw(self, pen):
- extractor = T1OutlineExtractor(pen, self.subrs)
- extractor.execute(self)
- self.width = extractor.width
class DictDecompiler(object):
-
- operandEncoding = cffDictOperandEncoding
-
- def __init__(self, strings, parent=None):
- self.stack = []
- self.strings = strings
- self.dict = {}
- self.parent = parent
-
- def getDict(self):
- assert len(self.stack) == 0, "non-empty stack"
- return self.dict
-
- def decompile(self, data):
- index = 0
- lenData = len(data)
- push = self.stack.append
- while index < lenData:
- b0 = byteord(data[index])
- index = index + 1
- handler = self.operandEncoding[b0]
- value, index = handler(self, b0, data, index)
- if value is not None:
- push(value)
- def pop(self):
- value = self.stack[-1]
- del self.stack[-1]
- return value
-
- def popall(self):
- args = self.stack[:]
- del self.stack[:]
- return args
-
- def handle_operator(self, operator):
- operator, argType = operator
- if isinstance(argType, tuple):
- value = ()
- for i in range(len(argType)-1, -1, -1):
- arg = argType[i]
- arghandler = getattr(self, "arg_" + arg)
- value = (arghandler(operator),) + value
- else:
- arghandler = getattr(self, "arg_" + argType)
- value = arghandler(operator)
- if operator == "blend":
- self.stack.extend(value)
- else:
- self.dict[operator] = value
-
- def arg_number(self, name):
- if isinstance(self.stack[0], list):
- out = self.arg_blend_number(self.stack)
- else:
- out = self.pop()
- return out
-
- def arg_blend_number(self, name):
- out = []
- blendArgs = self.pop()
- numMasters = len(blendArgs)
- out.append(blendArgs)
- out.append("blend")
- dummy = self.popall()
- return blendArgs
-
- def arg_SID(self, name):
- return self.strings[self.pop()]
- def arg_array(self, name):
- return self.popall()
- def arg_blendList(self, name):
- """
- There may be non-blend args at the top of the stack. We first calculate
- where the blend args start in the stack. These are the last
- numMasters*numBlends) +1 args.
- The blend args starts with numMasters relative coordinate values, the BlueValues in the list from the default master font. This is followed by
- numBlends list of values. Each of value in one of these lists is the
- Variable Font delta for the matching region.
-
- We re-arrange this to be a list of numMaster entries. Each entry starts with the corresponding default font relative value, and is followed by
- the delta values. We then convert the default values, the first item in each entry, to an absolute value.
- """
- vsindex = self.dict.get('vsindex', 0)
- numMasters = self.parent.getNumRegions(vsindex) + 1 # only a PrivateDict has blended ops.
- numBlends = self.pop()
- args = self.popall()
- numArgs = len(args)
- # The spec says that there should be no non-blended Blue Values,.
- assert(numArgs == numMasters * numBlends)
- value = [None]*numBlends
- numDeltas = numMasters-1
- i = 0
- prevVal = 0
- while i < numBlends:
- newVal = args[i] + prevVal
- prevVal = newVal
- masterOffset = numBlends + (i* numDeltas)
- blendList = [newVal] + args[masterOffset:masterOffset+numDeltas]
- value[i] = blendList
- i += 1
- return value
-
- def arg_delta(self, name):
- valueList = self.popall()
- out = []
- if valueList and isinstance(valueList[0], list):
- # arg_blendList() has already converted these to absolute values.
- out = valueList
- else:
- current = 0
- for v in valueList:
- current = current + v
- out.append(current)
- return out
+ operandEncoding = cffDictOperandEncoding
+
+ def __init__(self, strings, parent=None):
+ self.stack = []
+ self.strings = strings
+ self.dict = {}
+ self.parent = parent
+
+ def getDict(self):
+ assert len(self.stack) == 0, "non-empty stack"
+ return self.dict
+
+ def decompile(self, data):
+ index = 0
+ lenData = len(data)
+ push = self.stack.append
+ while index < lenData:
+ b0 = byteord(data[index])
+ index = index + 1
+ handler = self.operandEncoding[b0]
+ value, index = handler(self, b0, data, index)
+ if value is not None:
+ push(value)
+
+ def pop(self):
+ value = self.stack[-1]
+ del self.stack[-1]
+ return value
+
+ def popall(self):
+ args = self.stack[:]
+ del self.stack[:]
+ return args
+
+ def handle_operator(self, operator):
+ operator, argType = operator
+ if isinstance(argType, tuple):
+ value = ()
+ for i in range(len(argType) - 1, -1, -1):
+ arg = argType[i]
+ arghandler = getattr(self, "arg_" + arg)
+ value = (arghandler(operator),) + value
+ else:
+ arghandler = getattr(self, "arg_" + argType)
+ value = arghandler(operator)
+ if operator == "blend":
+ self.stack.extend(value)
+ else:
+ self.dict[operator] = value
+
+ def arg_number(self, name):
+ if isinstance(self.stack[0], list):
+ out = self.arg_blend_number(self.stack)
+ else:
+ out = self.pop()
+ return out
+
+ def arg_blend_number(self, name):
+ out = []
+ blendArgs = self.pop()
+ numMasters = len(blendArgs)
+ out.append(blendArgs)
+ out.append("blend")
+ dummy = self.popall()
+ return blendArgs
+
+ def arg_SID(self, name):
+ return self.strings[self.pop()]
+
+ def arg_array(self, name):
+ return self.popall()
+
+ def arg_blendList(self, name):
+ """
+ There may be non-blend args at the top of the stack. We first calculate
+ where the blend args start in the stack. These are the last
+ numMasters*numBlends) +1 args.
+ The blend args starts with numMasters relative coordinate values, the BlueValues in the list from the default master font. This is followed by
+ numBlends list of values. Each of value in one of these lists is the
+ Variable Font delta for the matching region.
+
+ We re-arrange this to be a list of numMaster entries. Each entry starts with the corresponding default font relative value, and is followed by
+ the delta values. We then convert the default values, the first item in each entry, to an absolute value.
+ """
+ vsindex = self.dict.get("vsindex", 0)
+ numMasters = (
+ self.parent.getNumRegions(vsindex) + 1
+ ) # only a PrivateDict has blended ops.
+ numBlends = self.pop()
+ args = self.popall()
+ numArgs = len(args)
+ # The spec says that there should be no non-blended Blue Values,.
+ assert numArgs == numMasters * numBlends
+ value = [None] * numBlends
+ numDeltas = numMasters - 1
+ i = 0
+ prevVal = 0
+ while i < numBlends:
+ newVal = args[i] + prevVal
+ prevVal = newVal
+ masterOffset = numBlends + (i * numDeltas)
+ blendList = [newVal] + args[masterOffset : masterOffset + numDeltas]
+ value[i] = blendList
+ i += 1
+ return value
+
+ def arg_delta(self, name):
+ valueList = self.popall()
+ out = []
+ if valueList and isinstance(valueList[0], list):
+ # arg_blendList() has already converted these to absolute values.
+ out = valueList
+ else:
+ current = 0
+ for v in valueList:
+ current = current + v
+ out.append(current)
+ return out
def calcSubrBias(subrs):
- nSubrs = len(subrs)
- if nSubrs < 1240:
- bias = 107
- elif nSubrs < 33900:
- bias = 1131
- else:
- bias = 32768
- return bias
+ nSubrs = len(subrs)
+ if nSubrs < 1240:
+ bias = 107
+ elif nSubrs < 33900:
+ bias = 1131
+ else:
+ bias = 32768
+ return bias
diff --git a/Lib/fontTools/misc/psLib.py b/Lib/fontTools/misc/psLib.py
index a6c8b8b5..3bfdb4ae 100644
--- a/Lib/fontTools/misc/psLib.py
+++ b/Lib/fontTools/misc/psLib.py
@@ -1,20 +1,20 @@
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr
from fontTools.misc import eexec
from .psOperators import (
- PSOperators,
- ps_StandardEncoding,
- ps_array,
- ps_boolean,
- ps_dict,
- ps_integer,
- ps_literal,
- ps_mark,
- ps_name,
- ps_operator,
- ps_procedure,
- ps_procmark,
- ps_real,
- ps_string,
+ PSOperators,
+ ps_StandardEncoding,
+ ps_array,
+ ps_boolean,
+ ps_dict,
+ ps_integer,
+ ps_literal,
+ ps_mark,
+ ps_name,
+ ps_operator,
+ ps_procedure,
+ ps_procmark,
+ ps_real,
+ ps_string,
)
import re
from collections.abc import Callable
@@ -24,7 +24,7 @@ import logging
log = logging.getLogger(__name__)
-ps_special = b'()<>[]{}%' # / is one too, but we take care of that one differently
+ps_special = b"()<>[]{}%" # / is one too, but we take care of that one differently
skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"]))
endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"])
@@ -32,7 +32,7 @@ endofthingRE = re.compile(endofthingPat)
commentRE = re.compile(b"%[^\n\r]*")
# XXX This not entirely correct as it doesn't allow *nested* embedded parens:
-stringPat = br"""
+stringPat = rb"""
\(
(
(
@@ -51,335 +51,348 @@ stringRE = re.compile(stringPat)
hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"]))
-class PSTokenError(Exception): pass
-class PSError(Exception): pass
+class PSTokenError(Exception):
+ pass
-class PSTokenizer(object):
- def __init__(self, buf=b'', encoding="ascii"):
- # Force self.buf to be a byte string
- buf = tobytes(buf)
- self.buf = buf
- self.len = len(buf)
- self.pos = 0
- self.closed = False
- self.encoding = encoding
-
- def read(self, n=-1):
- """Read at most 'n' bytes from the buffer, or less if the read
- hits EOF before obtaining 'n' bytes.
- If 'n' is negative or omitted, read all data until EOF is reached.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
- if n is None or n < 0:
- newpos = self.len
- else:
- newpos = min(self.pos+n, self.len)
- r = self.buf[self.pos:newpos]
- self.pos = newpos
- return r
-
- def close(self):
- if not self.closed:
- self.closed = True
- del self.buf, self.pos
-
- def getnexttoken(self,
- # localize some stuff, for performance
- len=len,
- ps_special=ps_special,
- stringmatch=stringRE.match,
- hexstringmatch=hexstringRE.match,
- commentmatch=commentRE.match,
- endmatch=endofthingRE.match):
-
- self.skipwhite()
- if self.pos >= self.len:
- return None, None
- pos = self.pos
- buf = self.buf
- char = bytechr(byteord(buf[pos]))
- if char in ps_special:
- if char in b'{}[]':
- tokentype = 'do_special'
- token = char
- elif char == b'%':
- tokentype = 'do_comment'
- _, nextpos = commentmatch(buf, pos).span()
- token = buf[pos:nextpos]
- elif char == b'(':
- tokentype = 'do_string'
- m = stringmatch(buf, pos)
- if m is None:
- raise PSTokenError('bad string at character %d' % pos)
- _, nextpos = m.span()
- token = buf[pos:nextpos]
- elif char == b'<':
- tokentype = 'do_hexstring'
- m = hexstringmatch(buf, pos)
- if m is None:
- raise PSTokenError('bad hexstring at character %d' % pos)
- _, nextpos = m.span()
- token = buf[pos:nextpos]
- else:
- raise PSTokenError('bad token at character %d' % pos)
- else:
- if char == b'/':
- tokentype = 'do_literal'
- m = endmatch(buf, pos+1)
- else:
- tokentype = ''
- m = endmatch(buf, pos)
- if m is None:
- raise PSTokenError('bad token at character %d' % pos)
- _, nextpos = m.span()
- token = buf[pos:nextpos]
- self.pos = pos + len(token)
- token = tostr(token, encoding=self.encoding)
- return tokentype, token
-
- def skipwhite(self, whitematch=skipwhiteRE.match):
- _, nextpos = whitematch(self.buf, self.pos).span()
- self.pos = nextpos
-
- def starteexec(self):
- self.pos = self.pos + 1
- self.dirtybuf = self.buf[self.pos:]
- self.buf, R = eexec.decrypt(self.dirtybuf, 55665)
- self.len = len(self.buf)
- self.pos = 4
-
- def stopeexec(self):
- if not hasattr(self, 'dirtybuf'):
- return
- self.buf = self.dirtybuf
- del self.dirtybuf
+class PSError(Exception):
+ pass
-class PSInterpreter(PSOperators):
+class PSTokenizer(object):
+ def __init__(self, buf=b"", encoding="ascii"):
+ # Force self.buf to be a byte string
+ buf = tobytes(buf)
+ self.buf = buf
+ self.len = len(buf)
+ self.pos = 0
+ self.closed = False
+ self.encoding = encoding
+
+ def read(self, n=-1):
+ """Read at most 'n' bytes from the buffer, or less if the read
+ hits EOF before obtaining 'n' bytes.
+ If 'n' is negative or omitted, read all data until EOF is reached.
+ """
+ if self.closed:
+ raise ValueError("I/O operation on closed file")
+ if n is None or n < 0:
+ newpos = self.len
+ else:
+ newpos = min(self.pos + n, self.len)
+ r = self.buf[self.pos : newpos]
+ self.pos = newpos
+ return r
+
+ def close(self):
+ if not self.closed:
+ self.closed = True
+ del self.buf, self.pos
+
+ def getnexttoken(
+ self,
+ # localize some stuff, for performance
+ len=len,
+ ps_special=ps_special,
+ stringmatch=stringRE.match,
+ hexstringmatch=hexstringRE.match,
+ commentmatch=commentRE.match,
+ endmatch=endofthingRE.match,
+ ):
+ self.skipwhite()
+ if self.pos >= self.len:
+ return None, None
+ pos = self.pos
+ buf = self.buf
+ char = bytechr(byteord(buf[pos]))
+ if char in ps_special:
+ if char in b"{}[]":
+ tokentype = "do_special"
+ token = char
+ elif char == b"%":
+ tokentype = "do_comment"
+ _, nextpos = commentmatch(buf, pos).span()
+ token = buf[pos:nextpos]
+ elif char == b"(":
+ tokentype = "do_string"
+ m = stringmatch(buf, pos)
+ if m is None:
+ raise PSTokenError("bad string at character %d" % pos)
+ _, nextpos = m.span()
+ token = buf[pos:nextpos]
+ elif char == b"<":
+ tokentype = "do_hexstring"
+ m = hexstringmatch(buf, pos)
+ if m is None:
+ raise PSTokenError("bad hexstring at character %d" % pos)
+ _, nextpos = m.span()
+ token = buf[pos:nextpos]
+ else:
+ raise PSTokenError("bad token at character %d" % pos)
+ else:
+ if char == b"/":
+ tokentype = "do_literal"
+ m = endmatch(buf, pos + 1)
+ else:
+ tokentype = ""
+ m = endmatch(buf, pos)
+ if m is None:
+ raise PSTokenError("bad token at character %d" % pos)
+ _, nextpos = m.span()
+ token = buf[pos:nextpos]
+ self.pos = pos + len(token)
+ token = tostr(token, encoding=self.encoding)
+ return tokentype, token
+
+ def skipwhite(self, whitematch=skipwhiteRE.match):
+ _, nextpos = whitematch(self.buf, self.pos).span()
+ self.pos = nextpos
+
+ def starteexec(self):
+ self.pos = self.pos + 1
+ self.dirtybuf = self.buf[self.pos :]
+ self.buf, R = eexec.decrypt(self.dirtybuf, 55665)
+ self.len = len(self.buf)
+ self.pos = 4
+
+ def stopeexec(self):
+ if not hasattr(self, "dirtybuf"):
+ return
+ self.buf = self.dirtybuf
+ del self.dirtybuf
- def __init__(self, encoding="ascii"):
- systemdict = {}
- userdict = {}
- self.encoding = encoding
- self.dictstack = [systemdict, userdict]
- self.stack = []
- self.proclevel = 0
- self.procmark = ps_procmark()
- self.fillsystemdict()
-
- def fillsystemdict(self):
- systemdict = self.dictstack[0]
- systemdict['['] = systemdict['mark'] = self.mark = ps_mark()
- systemdict[']'] = ps_operator(']', self.do_makearray)
- systemdict['true'] = ps_boolean(1)
- systemdict['false'] = ps_boolean(0)
- systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding)
- systemdict['FontDirectory'] = ps_dict({})
- self.suckoperators(systemdict, self.__class__)
-
- def suckoperators(self, systemdict, klass):
- for name in dir(klass):
- attr = getattr(self, name)
- if isinstance(attr, Callable) and name[:3] == 'ps_':
- name = name[3:]
- systemdict[name] = ps_operator(name, attr)
- for baseclass in klass.__bases__:
- self.suckoperators(systemdict, baseclass)
-
- def interpret(self, data, getattr=getattr):
- tokenizer = self.tokenizer = PSTokenizer(data, self.encoding)
- getnexttoken = tokenizer.getnexttoken
- do_token = self.do_token
- handle_object = self.handle_object
- try:
- while 1:
- tokentype, token = getnexttoken()
- if not token:
- break
- if tokentype:
- handler = getattr(self, tokentype)
- object = handler(token)
- else:
- object = do_token(token)
- if object is not None:
- handle_object(object)
- tokenizer.close()
- self.tokenizer = None
- except:
- if self.tokenizer is not None:
- log.debug(
- 'ps error:\n'
- '- - - - - - -\n'
- '%s\n'
- '>>>\n'
- '%s\n'
- '- - - - - - -',
- self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos],
- self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50])
- raise
-
- def handle_object(self, object):
- if not (self.proclevel or object.literal or object.type == 'proceduretype'):
- if object.type != 'operatortype':
- object = self.resolve_name(object.value)
- if object.literal:
- self.push(object)
- else:
- if object.type == 'proceduretype':
- self.call_procedure(object)
- else:
- object.function()
- else:
- self.push(object)
-
- def call_procedure(self, proc):
- handle_object = self.handle_object
- for item in proc.value:
- handle_object(item)
-
- def resolve_name(self, name):
- dictstack = self.dictstack
- for i in range(len(dictstack)-1, -1, -1):
- if name in dictstack[i]:
- return dictstack[i][name]
- raise PSError('name error: ' + str(name))
-
- def do_token(self, token,
- int=int,
- float=float,
- ps_name=ps_name,
- ps_integer=ps_integer,
- ps_real=ps_real):
- try:
- num = int(token)
- except (ValueError, OverflowError):
- try:
- num = float(token)
- except (ValueError, OverflowError):
- if '#' in token:
- hashpos = token.find('#')
- try:
- base = int(token[:hashpos])
- num = int(token[hashpos+1:], base)
- except (ValueError, OverflowError):
- return ps_name(token)
- else:
- return ps_integer(num)
- else:
- return ps_name(token)
- else:
- return ps_real(num)
- else:
- return ps_integer(num)
-
- def do_comment(self, token):
- pass
-
- def do_literal(self, token):
- return ps_literal(token[1:])
-
- def do_string(self, token):
- return ps_string(token[1:-1])
-
- def do_hexstring(self, token):
- hexStr = "".join(token[1:-1].split())
- if len(hexStr) % 2:
- hexStr = hexStr + '0'
- cleanstr = []
- for i in range(0, len(hexStr), 2):
- cleanstr.append(chr(int(hexStr[i:i+2], 16)))
- cleanstr = "".join(cleanstr)
- return ps_string(cleanstr)
-
- def do_special(self, token):
- if token == '{':
- self.proclevel = self.proclevel + 1
- return self.procmark
- elif token == '}':
- proc = []
- while 1:
- topobject = self.pop()
- if topobject == self.procmark:
- break
- proc.append(topobject)
- self.proclevel = self.proclevel - 1
- proc.reverse()
- return ps_procedure(proc)
- elif token == '[':
- return self.mark
- elif token == ']':
- return ps_name(']')
- else:
- raise PSTokenError('huh?')
-
- def push(self, object):
- self.stack.append(object)
-
- def pop(self, *types):
- stack = self.stack
- if not stack:
- raise PSError('stack underflow')
- object = stack[-1]
- if types:
- if object.type not in types:
- raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type))
- del stack[-1]
- return object
-
- def do_makearray(self):
- array = []
- while 1:
- topobject = self.pop()
- if topobject == self.mark:
- break
- array.append(topobject)
- array.reverse()
- self.push(ps_array(array))
-
- def close(self):
- """Remove circular references."""
- del self.stack
- del self.dictstack
+
+class PSInterpreter(PSOperators):
+ def __init__(self, encoding="ascii"):
+ systemdict = {}
+ userdict = {}
+ self.encoding = encoding
+ self.dictstack = [systemdict, userdict]
+ self.stack = []
+ self.proclevel = 0
+ self.procmark = ps_procmark()
+ self.fillsystemdict()
+
+ def fillsystemdict(self):
+ systemdict = self.dictstack[0]
+ systemdict["["] = systemdict["mark"] = self.mark = ps_mark()
+ systemdict["]"] = ps_operator("]", self.do_makearray)
+ systemdict["true"] = ps_boolean(1)
+ systemdict["false"] = ps_boolean(0)
+ systemdict["StandardEncoding"] = ps_array(ps_StandardEncoding)
+ systemdict["FontDirectory"] = ps_dict({})
+ self.suckoperators(systemdict, self.__class__)
+
+ def suckoperators(self, systemdict, klass):
+ for name in dir(klass):
+ attr = getattr(self, name)
+ if isinstance(attr, Callable) and name[:3] == "ps_":
+ name = name[3:]
+ systemdict[name] = ps_operator(name, attr)
+ for baseclass in klass.__bases__:
+ self.suckoperators(systemdict, baseclass)
+
+ def interpret(self, data, getattr=getattr):
+ tokenizer = self.tokenizer = PSTokenizer(data, self.encoding)
+ getnexttoken = tokenizer.getnexttoken
+ do_token = self.do_token
+ handle_object = self.handle_object
+ try:
+ while 1:
+ tokentype, token = getnexttoken()
+ if not token:
+ break
+ if tokentype:
+ handler = getattr(self, tokentype)
+ object = handler(token)
+ else:
+ object = do_token(token)
+ if object is not None:
+ handle_object(object)
+ tokenizer.close()
+ self.tokenizer = None
+ except:
+ if self.tokenizer is not None:
+ log.debug(
+ "ps error:\n"
+ "- - - - - - -\n"
+ "%s\n"
+ ">>>\n"
+ "%s\n"
+ "- - - - - - -",
+ self.tokenizer.buf[self.tokenizer.pos - 50 : self.tokenizer.pos],
+ self.tokenizer.buf[self.tokenizer.pos : self.tokenizer.pos + 50],
+ )
+ raise
+
+ def handle_object(self, object):
+ if not (self.proclevel or object.literal or object.type == "proceduretype"):
+ if object.type != "operatortype":
+ object = self.resolve_name(object.value)
+ if object.literal:
+ self.push(object)
+ else:
+ if object.type == "proceduretype":
+ self.call_procedure(object)
+ else:
+ object.function()
+ else:
+ self.push(object)
+
+ def call_procedure(self, proc):
+ handle_object = self.handle_object
+ for item in proc.value:
+ handle_object(item)
+
+ def resolve_name(self, name):
+ dictstack = self.dictstack
+ for i in range(len(dictstack) - 1, -1, -1):
+ if name in dictstack[i]:
+ return dictstack[i][name]
+ raise PSError("name error: " + str(name))
+
+ def do_token(
+ self,
+ token,
+ int=int,
+ float=float,
+ ps_name=ps_name,
+ ps_integer=ps_integer,
+ ps_real=ps_real,
+ ):
+ try:
+ num = int(token)
+ except (ValueError, OverflowError):
+ try:
+ num = float(token)
+ except (ValueError, OverflowError):
+ if "#" in token:
+ hashpos = token.find("#")
+ try:
+ base = int(token[:hashpos])
+ num = int(token[hashpos + 1 :], base)
+ except (ValueError, OverflowError):
+ return ps_name(token)
+ else:
+ return ps_integer(num)
+ else:
+ return ps_name(token)
+ else:
+ return ps_real(num)
+ else:
+ return ps_integer(num)
+
+ def do_comment(self, token):
+ pass
+
+ def do_literal(self, token):
+ return ps_literal(token[1:])
+
+ def do_string(self, token):
+ return ps_string(token[1:-1])
+
+ def do_hexstring(self, token):
+ hexStr = "".join(token[1:-1].split())
+ if len(hexStr) % 2:
+ hexStr = hexStr + "0"
+ cleanstr = []
+ for i in range(0, len(hexStr), 2):
+ cleanstr.append(chr(int(hexStr[i : i + 2], 16)))
+ cleanstr = "".join(cleanstr)
+ return ps_string(cleanstr)
+
+ def do_special(self, token):
+ if token == "{":
+ self.proclevel = self.proclevel + 1
+ return self.procmark
+ elif token == "}":
+ proc = []
+ while 1:
+ topobject = self.pop()
+ if topobject == self.procmark:
+ break
+ proc.append(topobject)
+ self.proclevel = self.proclevel - 1
+ proc.reverse()
+ return ps_procedure(proc)
+ elif token == "[":
+ return self.mark
+ elif token == "]":
+ return ps_name("]")
+ else:
+ raise PSTokenError("huh?")
+
+ def push(self, object):
+ self.stack.append(object)
+
+ def pop(self, *types):
+ stack = self.stack
+ if not stack:
+ raise PSError("stack underflow")
+ object = stack[-1]
+ if types:
+ if object.type not in types:
+ raise PSError(
+ "typecheck, expected %s, found %s" % (repr(types), object.type)
+ )
+ del stack[-1]
+ return object
+
+ def do_makearray(self):
+ array = []
+ while 1:
+ topobject = self.pop()
+ if topobject == self.mark:
+ break
+ array.append(topobject)
+ array.reverse()
+ self.push(ps_array(array))
+
+ def close(self):
+ """Remove circular references."""
+ del self.stack
+ del self.dictstack
def unpack_item(item):
- tp = type(item.value)
- if tp == dict:
- newitem = {}
- for key, value in item.value.items():
- newitem[key] = unpack_item(value)
- elif tp == list:
- newitem = [None] * len(item.value)
- for i in range(len(item.value)):
- newitem[i] = unpack_item(item.value[i])
- if item.type == 'proceduretype':
- newitem = tuple(newitem)
- else:
- newitem = item.value
- return newitem
+ tp = type(item.value)
+ if tp == dict:
+ newitem = {}
+ for key, value in item.value.items():
+ newitem[key] = unpack_item(value)
+ elif tp == list:
+ newitem = [None] * len(item.value)
+ for i in range(len(item.value)):
+ newitem[i] = unpack_item(item.value[i])
+ if item.type == "proceduretype":
+ newitem = tuple(newitem)
+ else:
+ newitem = item.value
+ return newitem
+
def suckfont(data, encoding="ascii"):
- m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data)
- if m:
- fontName = m.group(1)
- fontName = fontName.decode()
- else:
- fontName = None
- interpreter = PSInterpreter(encoding=encoding)
- interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop")
- interpreter.interpret(data)
- fontdir = interpreter.dictstack[0]['FontDirectory'].value
- if fontName in fontdir:
- rawfont = fontdir[fontName]
- else:
- # fall back, in case fontName wasn't found
- fontNames = list(fontdir.keys())
- if len(fontNames) > 1:
- fontNames.remove("Helvetica")
- fontNames.sort()
- rawfont = fontdir[fontNames[0]]
- interpreter.close()
- return unpack_item(rawfont)
+ m = re.search(rb"/FontName\s+/([^ \t\n\r]+)\s+def", data)
+ if m:
+ fontName = m.group(1)
+ fontName = fontName.decode()
+ else:
+ fontName = None
+ interpreter = PSInterpreter(encoding=encoding)
+ interpreter.interpret(
+ b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop"
+ )
+ interpreter.interpret(data)
+ fontdir = interpreter.dictstack[0]["FontDirectory"].value
+ if fontName in fontdir:
+ rawfont = fontdir[fontName]
+ else:
+ # fall back, in case fontName wasn't found
+ fontNames = list(fontdir.keys())
+ if len(fontNames) > 1:
+ fontNames.remove("Helvetica")
+ fontNames.sort()
+ rawfont = fontdir[fontNames[0]]
+ interpreter.close()
+ return unpack_item(rawfont)
diff --git a/Lib/fontTools/misc/psOperators.py b/Lib/fontTools/misc/psOperators.py
index 3b378f59..d0b975ea 100644
--- a/Lib/fontTools/misc/psOperators.py
+++ b/Lib/fontTools/misc/psOperators.py
@@ -2,536 +2,571 @@ _accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"}
class ps_object(object):
+ literal = 1
+ access = 0
+ value = None
- literal = 1
- access = 0
- value = None
+ def __init__(self, value):
+ self.value = value
+ self.type = self.__class__.__name__[3:] + "type"
- def __init__(self, value):
- self.value = value
- self.type = self.__class__.__name__[3:] + "type"
-
- def __repr__(self):
- return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value))
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value))
class ps_operator(ps_object):
+ literal = 0
+
+ def __init__(self, name, function):
+ self.name = name
+ self.function = function
+ self.type = self.__class__.__name__[3:] + "type"
- literal = 0
+ def __repr__(self):
+ return "<operator %s>" % self.name
- def __init__(self, name, function):
- self.name = name
- self.function = function
- self.type = self.__class__.__name__[3:] + "type"
- def __repr__(self):
- return "<operator %s>" % self.name
class ps_procedure(ps_object):
- literal = 0
- def __repr__(self):
- return "<procedure>"
- def __str__(self):
- psstring = '{'
- for i in range(len(self.value)):
- if i:
- psstring = psstring + ' ' + str(self.value[i])
- else:
- psstring = psstring + str(self.value[i])
- return psstring + '}'
+ literal = 0
+
+ def __repr__(self):
+ return "<procedure>"
+
+ def __str__(self):
+ psstring = "{"
+ for i in range(len(self.value)):
+ if i:
+ psstring = psstring + " " + str(self.value[i])
+ else:
+ psstring = psstring + str(self.value[i])
+ return psstring + "}"
+
class ps_name(ps_object):
- literal = 0
- def __str__(self):
- if self.literal:
- return '/' + self.value
- else:
- return self.value
+ literal = 0
+
+ def __str__(self):
+ if self.literal:
+ return "/" + self.value
+ else:
+ return self.value
+
class ps_literal(ps_object):
- def __str__(self):
- return '/' + self.value
+ def __str__(self):
+ return "/" + self.value
+
class ps_array(ps_object):
- def __str__(self):
- psstring = '['
- for i in range(len(self.value)):
- item = self.value[i]
- access = _accessstrings[item.access]
- if access:
- access = ' ' + access
- if i:
- psstring = psstring + ' ' + str(item) + access
- else:
- psstring = psstring + str(item) + access
- return psstring + ']'
- def __repr__(self):
- return "<array>"
+ def __str__(self):
+ psstring = "["
+ for i in range(len(self.value)):
+ item = self.value[i]
+ access = _accessstrings[item.access]
+ if access:
+ access = " " + access
+ if i:
+ psstring = psstring + " " + str(item) + access
+ else:
+ psstring = psstring + str(item) + access
+ return psstring + "]"
+
+ def __repr__(self):
+ return "<array>"
+
_type1_pre_eexec_order = [
- "FontInfo",
- "FontName",
- "Encoding",
- "PaintType",
- "FontType",
- "FontMatrix",
- "FontBBox",
- "UniqueID",
- "Metrics",
- "StrokeWidth"
- ]
+ "FontInfo",
+ "FontName",
+ "Encoding",
+ "PaintType",
+ "FontType",
+ "FontMatrix",
+ "FontBBox",
+ "UniqueID",
+ "Metrics",
+ "StrokeWidth",
+]
_type1_fontinfo_order = [
- "version",
- "Notice",
- "FullName",
- "FamilyName",
- "Weight",
- "ItalicAngle",
- "isFixedPitch",
- "UnderlinePosition",
- "UnderlineThickness"
- ]
-
-_type1_post_eexec_order = [
- "Private",
- "CharStrings",
- "FID"
- ]
+ "version",
+ "Notice",
+ "FullName",
+ "FamilyName",
+ "Weight",
+ "ItalicAngle",
+ "isFixedPitch",
+ "UnderlinePosition",
+ "UnderlineThickness",
+]
+
+_type1_post_eexec_order = ["Private", "CharStrings", "FID"]
+
def _type1_item_repr(key, value):
- psstring = ""
- access = _accessstrings[value.access]
- if access:
- access = access + ' '
- if key == 'CharStrings':
- psstring = psstring + "/%s %s def\n" % (key, _type1_CharString_repr(value.value))
- elif key == 'Encoding':
- psstring = psstring + _type1_Encoding_repr(value, access)
- else:
- psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
- return psstring
+ psstring = ""
+ access = _accessstrings[value.access]
+ if access:
+ access = access + " "
+ if key == "CharStrings":
+ psstring = psstring + "/%s %s def\n" % (
+ key,
+ _type1_CharString_repr(value.value),
+ )
+ elif key == "Encoding":
+ psstring = psstring + _type1_Encoding_repr(value, access)
+ else:
+ psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
+ return psstring
+
def _type1_Encoding_repr(encoding, access):
- encoding = encoding.value
- psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n"
- for i in range(256):
- name = encoding[i].value
- if name != '.notdef':
- psstring = psstring + "dup %d /%s put\n" % (i, name)
- return psstring + access + "def\n"
+ encoding = encoding.value
+ psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n"
+ for i in range(256):
+ name = encoding[i].value
+ if name != ".notdef":
+ psstring = psstring + "dup %d /%s put\n" % (i, name)
+ return psstring + access + "def\n"
+
def _type1_CharString_repr(charstrings):
- items = sorted(charstrings.items())
- return 'xxx'
+ items = sorted(charstrings.items())
+ return "xxx"
+
class ps_font(ps_object):
- def __str__(self):
- psstring = "%d dict dup begin\n" % len(self.value)
- for key in _type1_pre_eexec_order:
- try:
- value = self.value[key]
- except KeyError:
- pass
- else:
- psstring = psstring + _type1_item_repr(key, value)
- items = sorted(self.value.items())
- for key, value in items:
- if key not in _type1_pre_eexec_order + _type1_post_eexec_order:
- psstring = psstring + _type1_item_repr(key, value)
- psstring = psstring + "currentdict end\ncurrentfile eexec\ndup "
- for key in _type1_post_eexec_order:
- try:
- value = self.value[key]
- except KeyError:
- pass
- else:
- psstring = psstring + _type1_item_repr(key, value)
- return psstring + 'dup/FontName get exch definefont pop\nmark currentfile closefile\n' + \
- 8 * (64 * '0' + '\n') + 'cleartomark' + '\n'
- def __repr__(self):
- return '<font>'
+ def __str__(self):
+ psstring = "%d dict dup begin\n" % len(self.value)
+ for key in _type1_pre_eexec_order:
+ try:
+ value = self.value[key]
+ except KeyError:
+ pass
+ else:
+ psstring = psstring + _type1_item_repr(key, value)
+ items = sorted(self.value.items())
+ for key, value in items:
+ if key not in _type1_pre_eexec_order + _type1_post_eexec_order:
+ psstring = psstring + _type1_item_repr(key, value)
+ psstring = psstring + "currentdict end\ncurrentfile eexec\ndup "
+ for key in _type1_post_eexec_order:
+ try:
+ value = self.value[key]
+ except KeyError:
+ pass
+ else:
+ psstring = psstring + _type1_item_repr(key, value)
+ return (
+ psstring
+ + "dup/FontName get exch definefont pop\nmark currentfile closefile\n"
+ + 8 * (64 * "0" + "\n")
+ + "cleartomark"
+ + "\n"
+ )
+
+ def __repr__(self):
+ return "<font>"
+
class ps_file(ps_object):
- pass
+ pass
+
class ps_dict(ps_object):
- def __str__(self):
- psstring = "%d dict dup begin\n" % len(self.value)
- items = sorted(self.value.items())
- for key, value in items:
- access = _accessstrings[value.access]
- if access:
- access = access + ' '
- psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
- return psstring + 'end '
- def __repr__(self):
- return "<dict>"
+ def __str__(self):
+ psstring = "%d dict dup begin\n" % len(self.value)
+ items = sorted(self.value.items())
+ for key, value in items:
+ access = _accessstrings[value.access]
+ if access:
+ access = access + " "
+ psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
+ return psstring + "end "
+
+ def __repr__(self):
+ return "<dict>"
+
class ps_mark(ps_object):
- def __init__(self):
- self.value = 'mark'
- self.type = self.__class__.__name__[3:] + "type"
+ def __init__(self):
+ self.value = "mark"
+ self.type = self.__class__.__name__[3:] + "type"
+
class ps_procmark(ps_object):
- def __init__(self):
- self.value = 'procmark'
- self.type = self.__class__.__name__[3:] + "type"
+ def __init__(self):
+ self.value = "procmark"
+ self.type = self.__class__.__name__[3:] + "type"
+
class ps_null(ps_object):
- def __init__(self):
- self.type = self.__class__.__name__[3:] + "type"
+ def __init__(self):
+ self.type = self.__class__.__name__[3:] + "type"
+
class ps_boolean(ps_object):
- def __str__(self):
- if self.value:
- return 'true'
- else:
- return 'false'
+ def __str__(self):
+ if self.value:
+ return "true"
+ else:
+ return "false"
+
class ps_string(ps_object):
- def __str__(self):
- return "(%s)" % repr(self.value)[1:-1]
+ def __str__(self):
+ return "(%s)" % repr(self.value)[1:-1]
+
class ps_integer(ps_object):
- def __str__(self):
- return repr(self.value)
+ def __str__(self):
+ return repr(self.value)
+
class ps_real(ps_object):
- def __str__(self):
- return repr(self.value)
+ def __str__(self):
+ return repr(self.value)
class PSOperators(object):
-
- def ps_def(self):
- obj = self.pop()
- name = self.pop()
- self.dictstack[-1][name.value] = obj
-
- def ps_bind(self):
- proc = self.pop('proceduretype')
- self.proc_bind(proc)
- self.push(proc)
-
- def proc_bind(self, proc):
- for i in range(len(proc.value)):
- item = proc.value[i]
- if item.type == 'proceduretype':
- self.proc_bind(item)
- else:
- if not item.literal:
- try:
- obj = self.resolve_name(item.value)
- except:
- pass
- else:
- if obj.type == 'operatortype':
- proc.value[i] = obj
-
- def ps_exch(self):
- if len(self.stack) < 2:
- raise RuntimeError('stack underflow')
- obj1 = self.pop()
- obj2 = self.pop()
- self.push(obj1)
- self.push(obj2)
-
- def ps_dup(self):
- if not self.stack:
- raise RuntimeError('stack underflow')
- self.push(self.stack[-1])
-
- def ps_exec(self):
- obj = self.pop()
- if obj.type == 'proceduretype':
- self.call_procedure(obj)
- else:
- self.handle_object(obj)
-
- def ps_count(self):
- self.push(ps_integer(len(self.stack)))
-
- def ps_eq(self):
- any1 = self.pop()
- any2 = self.pop()
- self.push(ps_boolean(any1.value == any2.value))
-
- def ps_ne(self):
- any1 = self.pop()
- any2 = self.pop()
- self.push(ps_boolean(any1.value != any2.value))
-
- def ps_cvx(self):
- obj = self.pop()
- obj.literal = 0
- self.push(obj)
-
- def ps_matrix(self):
- matrix = [ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0)]
- self.push(ps_array(matrix))
-
- def ps_string(self):
- num = self.pop('integertype').value
- self.push(ps_string('\0' * num))
-
- def ps_type(self):
- obj = self.pop()
- self.push(ps_string(obj.type))
-
- def ps_store(self):
- value = self.pop()
- key = self.pop()
- name = key.value
- for i in range(len(self.dictstack)-1, -1, -1):
- if name in self.dictstack[i]:
- self.dictstack[i][name] = value
- break
- self.dictstack[-1][name] = value
-
- def ps_where(self):
- name = self.pop()
- # XXX
- self.push(ps_boolean(0))
-
- def ps_systemdict(self):
- self.push(ps_dict(self.dictstack[0]))
-
- def ps_userdict(self):
- self.push(ps_dict(self.dictstack[1]))
-
- def ps_currentdict(self):
- self.push(ps_dict(self.dictstack[-1]))
-
- def ps_currentfile(self):
- self.push(ps_file(self.tokenizer))
-
- def ps_eexec(self):
- f = self.pop('filetype').value
- f.starteexec()
-
- def ps_closefile(self):
- f = self.pop('filetype').value
- f.skipwhite()
- f.stopeexec()
-
- def ps_cleartomark(self):
- obj = self.pop()
- while obj != self.mark:
- obj = self.pop()
-
- def ps_readstring(self,
- ps_boolean=ps_boolean,
- len=len):
- s = self.pop('stringtype')
- oldstr = s.value
- f = self.pop('filetype')
- #pad = file.value.read(1)
- # for StringIO, this is faster
- f.value.pos = f.value.pos + 1
- newstr = f.value.read(len(oldstr))
- s.value = newstr
- self.push(s)
- self.push(ps_boolean(len(oldstr) == len(newstr)))
-
- def ps_known(self):
- key = self.pop()
- d = self.pop('dicttype', 'fonttype')
- self.push(ps_boolean(key.value in d.value))
-
- def ps_if(self):
- proc = self.pop('proceduretype')
- if self.pop('booleantype').value:
- self.call_procedure(proc)
-
- def ps_ifelse(self):
- proc2 = self.pop('proceduretype')
- proc1 = self.pop('proceduretype')
- if self.pop('booleantype').value:
- self.call_procedure(proc1)
- else:
- self.call_procedure(proc2)
-
- def ps_readonly(self):
- obj = self.pop()
- if obj.access < 1:
- obj.access = 1
- self.push(obj)
-
- def ps_executeonly(self):
- obj = self.pop()
- if obj.access < 2:
- obj.access = 2
- self.push(obj)
-
- def ps_noaccess(self):
- obj = self.pop()
- if obj.access < 3:
- obj.access = 3
- self.push(obj)
-
- def ps_not(self):
- obj = self.pop('booleantype', 'integertype')
- if obj.type == 'booleantype':
- self.push(ps_boolean(not obj.value))
- else:
- self.push(ps_integer(~obj.value))
-
- def ps_print(self):
- str = self.pop('stringtype')
- print('PS output --->', str.value)
-
- def ps_anchorsearch(self):
- seek = self.pop('stringtype')
- s = self.pop('stringtype')
- seeklen = len(seek.value)
- if s.value[:seeklen] == seek.value:
- self.push(ps_string(s.value[seeklen:]))
- self.push(seek)
- self.push(ps_boolean(1))
- else:
- self.push(s)
- self.push(ps_boolean(0))
-
- def ps_array(self):
- num = self.pop('integertype')
- array = ps_array([None] * num.value)
- self.push(array)
-
- def ps_astore(self):
- array = self.pop('arraytype')
- for i in range(len(array.value)-1, -1, -1):
- array.value[i] = self.pop()
- self.push(array)
-
- def ps_load(self):
- name = self.pop()
- self.push(self.resolve_name(name.value))
-
- def ps_put(self):
- obj1 = self.pop()
- obj2 = self.pop()
- obj3 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype')
- tp = obj3.type
- if tp == 'arraytype' or tp == 'proceduretype':
- obj3.value[obj2.value] = obj1
- elif tp == 'dicttype':
- obj3.value[obj2.value] = obj1
- elif tp == 'stringtype':
- index = obj2.value
- obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index+1:]
-
- def ps_get(self):
- obj1 = self.pop()
- if obj1.value == "Encoding":
- pass
- obj2 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype', 'fonttype')
- tp = obj2.type
- if tp in ('arraytype', 'proceduretype'):
- self.push(obj2.value[obj1.value])
- elif tp in ('dicttype', 'fonttype'):
- self.push(obj2.value[obj1.value])
- elif tp == 'stringtype':
- self.push(ps_integer(ord(obj2.value[obj1.value])))
- else:
- assert False, "shouldn't get here"
-
- def ps_getinterval(self):
- obj1 = self.pop('integertype')
- obj2 = self.pop('integertype')
- obj3 = self.pop('arraytype', 'stringtype')
- tp = obj3.type
- if tp == 'arraytype':
- self.push(ps_array(obj3.value[obj2.value:obj2.value + obj1.value]))
- elif tp == 'stringtype':
- self.push(ps_string(obj3.value[obj2.value:obj2.value + obj1.value]))
-
- def ps_putinterval(self):
- obj1 = self.pop('arraytype', 'stringtype')
- obj2 = self.pop('integertype')
- obj3 = self.pop('arraytype', 'stringtype')
- tp = obj3.type
- if tp == 'arraytype':
- obj3.value[obj2.value:obj2.value + len(obj1.value)] = obj1.value
- elif tp == 'stringtype':
- newstr = obj3.value[:obj2.value]
- newstr = newstr + obj1.value
- newstr = newstr + obj3.value[obj2.value + len(obj1.value):]
- obj3.value = newstr
-
- def ps_cvn(self):
- self.push(ps_name(self.pop('stringtype').value))
-
- def ps_index(self):
- n = self.pop('integertype').value
- if n < 0:
- raise RuntimeError('index may not be negative')
- self.push(self.stack[-1-n])
-
- def ps_for(self):
- proc = self.pop('proceduretype')
- limit = self.pop('integertype', 'realtype').value
- increment = self.pop('integertype', 'realtype').value
- i = self.pop('integertype', 'realtype').value
- while 1:
- if increment > 0:
- if i > limit:
- break
- else:
- if i < limit:
- break
- if type(i) == type(0.0):
- self.push(ps_real(i))
- else:
- self.push(ps_integer(i))
- self.call_procedure(proc)
- i = i + increment
-
- def ps_forall(self):
- proc = self.pop('proceduretype')
- obj = self.pop('arraytype', 'stringtype', 'dicttype')
- tp = obj.type
- if tp == 'arraytype':
- for item in obj.value:
- self.push(item)
- self.call_procedure(proc)
- elif tp == 'stringtype':
- for item in obj.value:
- self.push(ps_integer(ord(item)))
- self.call_procedure(proc)
- elif tp == 'dicttype':
- for key, value in obj.value.items():
- self.push(ps_name(key))
- self.push(value)
- self.call_procedure(proc)
-
- def ps_definefont(self):
- font = self.pop('dicttype')
- name = self.pop()
- font = ps_font(font.value)
- self.dictstack[0]['FontDirectory'].value[name.value] = font
- self.push(font)
-
- def ps_findfont(self):
- name = self.pop()
- font = self.dictstack[0]['FontDirectory'].value[name.value]
- self.push(font)
-
- def ps_pop(self):
- self.pop()
-
- def ps_dict(self):
- self.pop('integertype')
- self.push(ps_dict({}))
-
- def ps_begin(self):
- self.dictstack.append(self.pop('dicttype').value)
-
- def ps_end(self):
- if len(self.dictstack) > 2:
- del self.dictstack[-1]
- else:
- raise RuntimeError('dictstack underflow')
-
-notdef = '.notdef'
+ def ps_def(self):
+ obj = self.pop()
+ name = self.pop()
+ self.dictstack[-1][name.value] = obj
+
+ def ps_bind(self):
+ proc = self.pop("proceduretype")
+ self.proc_bind(proc)
+ self.push(proc)
+
+ def proc_bind(self, proc):
+ for i in range(len(proc.value)):
+ item = proc.value[i]
+ if item.type == "proceduretype":
+ self.proc_bind(item)
+ else:
+ if not item.literal:
+ try:
+ obj = self.resolve_name(item.value)
+ except:
+ pass
+ else:
+ if obj.type == "operatortype":
+ proc.value[i] = obj
+
+ def ps_exch(self):
+ if len(self.stack) < 2:
+ raise RuntimeError("stack underflow")
+ obj1 = self.pop()
+ obj2 = self.pop()
+ self.push(obj1)
+ self.push(obj2)
+
+ def ps_dup(self):
+ if not self.stack:
+ raise RuntimeError("stack underflow")
+ self.push(self.stack[-1])
+
+ def ps_exec(self):
+ obj = self.pop()
+ if obj.type == "proceduretype":
+ self.call_procedure(obj)
+ else:
+ self.handle_object(obj)
+
+ def ps_count(self):
+ self.push(ps_integer(len(self.stack)))
+
+ def ps_eq(self):
+ any1 = self.pop()
+ any2 = self.pop()
+ self.push(ps_boolean(any1.value == any2.value))
+
+ def ps_ne(self):
+ any1 = self.pop()
+ any2 = self.pop()
+ self.push(ps_boolean(any1.value != any2.value))
+
+ def ps_cvx(self):
+ obj = self.pop()
+ obj.literal = 0
+ self.push(obj)
+
+ def ps_matrix(self):
+ matrix = [
+ ps_real(1.0),
+ ps_integer(0),
+ ps_integer(0),
+ ps_real(1.0),
+ ps_integer(0),
+ ps_integer(0),
+ ]
+ self.push(ps_array(matrix))
+
+ def ps_string(self):
+ num = self.pop("integertype").value
+ self.push(ps_string("\0" * num))
+
+ def ps_type(self):
+ obj = self.pop()
+ self.push(ps_string(obj.type))
+
+ def ps_store(self):
+ value = self.pop()
+ key = self.pop()
+ name = key.value
+ for i in range(len(self.dictstack) - 1, -1, -1):
+ if name in self.dictstack[i]:
+ self.dictstack[i][name] = value
+ break
+ self.dictstack[-1][name] = value
+
+ def ps_where(self):
+ name = self.pop()
+ # XXX
+ self.push(ps_boolean(0))
+
+ def ps_systemdict(self):
+ self.push(ps_dict(self.dictstack[0]))
+
+ def ps_userdict(self):
+ self.push(ps_dict(self.dictstack[1]))
+
+ def ps_currentdict(self):
+ self.push(ps_dict(self.dictstack[-1]))
+
+ def ps_currentfile(self):
+ self.push(ps_file(self.tokenizer))
+
+ def ps_eexec(self):
+ f = self.pop("filetype").value
+ f.starteexec()
+
+ def ps_closefile(self):
+ f = self.pop("filetype").value
+ f.skipwhite()
+ f.stopeexec()
+
+ def ps_cleartomark(self):
+ obj = self.pop()
+ while obj != self.mark:
+ obj = self.pop()
+
+ def ps_readstring(self, ps_boolean=ps_boolean, len=len):
+ s = self.pop("stringtype")
+ oldstr = s.value
+ f = self.pop("filetype")
+ # pad = file.value.read(1)
+ # for StringIO, this is faster
+ f.value.pos = f.value.pos + 1
+ newstr = f.value.read(len(oldstr))
+ s.value = newstr
+ self.push(s)
+ self.push(ps_boolean(len(oldstr) == len(newstr)))
+
+ def ps_known(self):
+ key = self.pop()
+ d = self.pop("dicttype", "fonttype")
+ self.push(ps_boolean(key.value in d.value))
+
+ def ps_if(self):
+ proc = self.pop("proceduretype")
+ if self.pop("booleantype").value:
+ self.call_procedure(proc)
+
+ def ps_ifelse(self):
+ proc2 = self.pop("proceduretype")
+ proc1 = self.pop("proceduretype")
+ if self.pop("booleantype").value:
+ self.call_procedure(proc1)
+ else:
+ self.call_procedure(proc2)
+
+ def ps_readonly(self):
+ obj = self.pop()
+ if obj.access < 1:
+ obj.access = 1
+ self.push(obj)
+
+ def ps_executeonly(self):
+ obj = self.pop()
+ if obj.access < 2:
+ obj.access = 2
+ self.push(obj)
+
+ def ps_noaccess(self):
+ obj = self.pop()
+ if obj.access < 3:
+ obj.access = 3
+ self.push(obj)
+
+ def ps_not(self):
+ obj = self.pop("booleantype", "integertype")
+ if obj.type == "booleantype":
+ self.push(ps_boolean(not obj.value))
+ else:
+ self.push(ps_integer(~obj.value))
+
+ def ps_print(self):
+ str = self.pop("stringtype")
+ print("PS output --->", str.value)
+
+ def ps_anchorsearch(self):
+ seek = self.pop("stringtype")
+ s = self.pop("stringtype")
+ seeklen = len(seek.value)
+ if s.value[:seeklen] == seek.value:
+ self.push(ps_string(s.value[seeklen:]))
+ self.push(seek)
+ self.push(ps_boolean(1))
+ else:
+ self.push(s)
+ self.push(ps_boolean(0))
+
+ def ps_array(self):
+ num = self.pop("integertype")
+ array = ps_array([None] * num.value)
+ self.push(array)
+
+ def ps_astore(self):
+ array = self.pop("arraytype")
+ for i in range(len(array.value) - 1, -1, -1):
+ array.value[i] = self.pop()
+ self.push(array)
+
+ def ps_load(self):
+ name = self.pop()
+ self.push(self.resolve_name(name.value))
+
+ def ps_put(self):
+ obj1 = self.pop()
+ obj2 = self.pop()
+ obj3 = self.pop("arraytype", "dicttype", "stringtype", "proceduretype")
+ tp = obj3.type
+ if tp == "arraytype" or tp == "proceduretype":
+ obj3.value[obj2.value] = obj1
+ elif tp == "dicttype":
+ obj3.value[obj2.value] = obj1
+ elif tp == "stringtype":
+ index = obj2.value
+ obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index + 1 :]
+
+ def ps_get(self):
+ obj1 = self.pop()
+ if obj1.value == "Encoding":
+ pass
+ obj2 = self.pop(
+ "arraytype", "dicttype", "stringtype", "proceduretype", "fonttype"
+ )
+ tp = obj2.type
+ if tp in ("arraytype", "proceduretype"):
+ self.push(obj2.value[obj1.value])
+ elif tp in ("dicttype", "fonttype"):
+ self.push(obj2.value[obj1.value])
+ elif tp == "stringtype":
+ self.push(ps_integer(ord(obj2.value[obj1.value])))
+ else:
+ assert False, "shouldn't get here"
+
+ def ps_getinterval(self):
+ obj1 = self.pop("integertype")
+ obj2 = self.pop("integertype")
+ obj3 = self.pop("arraytype", "stringtype")
+ tp = obj3.type
+ if tp == "arraytype":
+ self.push(ps_array(obj3.value[obj2.value : obj2.value + obj1.value]))
+ elif tp == "stringtype":
+ self.push(ps_string(obj3.value[obj2.value : obj2.value + obj1.value]))
+
+ def ps_putinterval(self):
+ obj1 = self.pop("arraytype", "stringtype")
+ obj2 = self.pop("integertype")
+ obj3 = self.pop("arraytype", "stringtype")
+ tp = obj3.type
+ if tp == "arraytype":
+ obj3.value[obj2.value : obj2.value + len(obj1.value)] = obj1.value
+ elif tp == "stringtype":
+ newstr = obj3.value[: obj2.value]
+ newstr = newstr + obj1.value
+ newstr = newstr + obj3.value[obj2.value + len(obj1.value) :]
+ obj3.value = newstr
+
+ def ps_cvn(self):
+ self.push(ps_name(self.pop("stringtype").value))
+
+ def ps_index(self):
+ n = self.pop("integertype").value
+ if n < 0:
+ raise RuntimeError("index may not be negative")
+ self.push(self.stack[-1 - n])
+
+ def ps_for(self):
+ proc = self.pop("proceduretype")
+ limit = self.pop("integertype", "realtype").value
+ increment = self.pop("integertype", "realtype").value
+ i = self.pop("integertype", "realtype").value
+ while 1:
+ if increment > 0:
+ if i > limit:
+ break
+ else:
+ if i < limit:
+ break
+ if type(i) == type(0.0):
+ self.push(ps_real(i))
+ else:
+ self.push(ps_integer(i))
+ self.call_procedure(proc)
+ i = i + increment
+
+ def ps_forall(self):
+ proc = self.pop("proceduretype")
+ obj = self.pop("arraytype", "stringtype", "dicttype")
+ tp = obj.type
+ if tp == "arraytype":
+ for item in obj.value:
+ self.push(item)
+ self.call_procedure(proc)
+ elif tp == "stringtype":
+ for item in obj.value:
+ self.push(ps_integer(ord(item)))
+ self.call_procedure(proc)
+ elif tp == "dicttype":
+ for key, value in obj.value.items():
+ self.push(ps_name(key))
+ self.push(value)
+ self.call_procedure(proc)
+
+ def ps_definefont(self):
+ font = self.pop("dicttype")
+ name = self.pop()
+ font = ps_font(font.value)
+ self.dictstack[0]["FontDirectory"].value[name.value] = font
+ self.push(font)
+
+ def ps_findfont(self):
+ name = self.pop()
+ font = self.dictstack[0]["FontDirectory"].value[name.value]
+ self.push(font)
+
+ def ps_pop(self):
+ self.pop()
+
+ def ps_dict(self):
+ self.pop("integertype")
+ self.push(ps_dict({}))
+
+ def ps_begin(self):
+ self.dictstack.append(self.pop("dicttype").value)
+
+ def ps_end(self):
+ if len(self.dictstack) > 2:
+ del self.dictstack[-1]
+ else:
+ raise RuntimeError("dictstack underflow")
+
+
+notdef = ".notdef"
from fontTools.encodings.StandardEncoding import StandardEncoding
+
ps_StandardEncoding = list(map(ps_name, StandardEncoding))
diff --git a/Lib/fontTools/misc/roundTools.py b/Lib/fontTools/misc/roundTools.py
index 6f4aa634..a4d45c31 100644
--- a/Lib/fontTools/misc/roundTools.py
+++ b/Lib/fontTools/misc/roundTools.py
@@ -9,41 +9,46 @@ import logging
log = logging.getLogger(__name__)
__all__ = [
- "noRound",
- "otRound",
- "maybeRound",
- "roundFunc",
+ "noRound",
+ "otRound",
+ "maybeRound",
+ "roundFunc",
+ "nearestMultipleShortestRepr",
]
+
def noRound(value):
- return value
+ return value
+
def otRound(value):
- """Round float value to nearest integer towards ``+Infinity``.
+ """Round float value to nearest integer towards ``+Infinity``.
+
+ The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
+ defines the required method for converting floating point values to
+ fixed-point. In particular it specifies the following rounding strategy:
- The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
- defines the required method for converting floating point values to
- fixed-point. In particular it specifies the following rounding strategy:
+ for fractional values of 0.5 and higher, take the next higher integer;
+ for other fractional values, truncate.
- for fractional values of 0.5 and higher, take the next higher integer;
- for other fractional values, truncate.
+ This function rounds the floating-point value according to this strategy
+ in preparation for conversion to fixed-point.
- This function rounds the floating-point value according to this strategy
- in preparation for conversion to fixed-point.
+ Args:
+ value (float): The input floating-point value.
- Args:
- value (float): The input floating-point value.
+ Returns
+ float: The rounded value.
+ """
+ # See this thread for how we ended up with this implementation:
+ # https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
+ return int(math.floor(value + 0.5))
- Returns
- float: The rounded value.
- """
- # See this thread for how we ended up with this implementation:
- # https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
- return int(math.floor(value + 0.5))
def maybeRound(v, tolerance, round=otRound):
- rounded = round(v)
- return rounded if abs(rounded - v) <= tolerance else v
+ rounded = round(v)
+ return rounded if abs(rounded - v) <= tolerance else v
+
def roundFunc(tolerance, round=otRound):
if tolerance < 0:
@@ -52,7 +57,7 @@ def roundFunc(tolerance, round=otRound):
if tolerance == 0:
return noRound
- if tolerance >= .5:
+ if tolerance >= 0.5:
return round
return functools.partial(maybeRound, tolerance=tolerance, round=round)
@@ -85,7 +90,7 @@ def nearestMultipleShortestRepr(value: float, factor: float) -> str:
return "0.0"
value = otRound(value / factor) * factor
- eps = .5 * factor
+ eps = 0.5 * factor
lo = value - eps
hi = value + eps
# If the range of valid choices spans an integer, return the integer.
@@ -99,7 +104,7 @@ def nearestMultipleShortestRepr(value: float, factor: float) -> str:
for i in range(len(lo)):
if lo[i] != hi[i]:
break
- period = lo.find('.')
+ period = lo.find(".")
assert period < i
fmt = "%%.%df" % (i - period)
return fmt % value
diff --git a/Lib/fontTools/misc/sstruct.py b/Lib/fontTools/misc/sstruct.py
index 6db8b515..d35bc9a5 100644
--- a/Lib/fontTools/misc/sstruct.py
+++ b/Lib/fontTools/misc/sstruct.py
@@ -56,68 +56,72 @@ __copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"
class Error(Exception):
- pass
+ pass
+
def pack(fmt, obj):
- formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
- elements = []
- if not isinstance(obj, dict):
- obj = obj.__dict__
- for name in names:
- value = obj[name]
- if name in fixes:
- # fixed point conversion
- value = fl2fi(value, fixes[name])
- elif isinstance(value, str):
- value = tobytes(value)
- elements.append(value)
- data = struct.pack(*(formatstring,) + tuple(elements))
- return data
+ formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
+ elements = []
+ if not isinstance(obj, dict):
+ obj = obj.__dict__
+ for name in names:
+ value = obj[name]
+ if name in fixes:
+ # fixed point conversion
+ value = fl2fi(value, fixes[name])
+ elif isinstance(value, str):
+ value = tobytes(value)
+ elements.append(value)
+ data = struct.pack(*(formatstring,) + tuple(elements))
+ return data
+
def unpack(fmt, data, obj=None):
- if obj is None:
- obj = {}
- data = tobytes(data)
- formatstring, names, fixes = getformat(fmt)
- if isinstance(obj, dict):
- d = obj
- else:
- d = obj.__dict__
- elements = struct.unpack(formatstring, data)
- for i in range(len(names)):
- name = names[i]
- value = elements[i]
- if name in fixes:
- # fixed point conversion
- value = fi2fl(value, fixes[name])
- elif isinstance(value, bytes):
- try:
- value = tostr(value)
- except UnicodeDecodeError:
- pass
- d[name] = value
- return obj
+ if obj is None:
+ obj = {}
+ data = tobytes(data)
+ formatstring, names, fixes = getformat(fmt)
+ if isinstance(obj, dict):
+ d = obj
+ else:
+ d = obj.__dict__
+ elements = struct.unpack(formatstring, data)
+ for i in range(len(names)):
+ name = names[i]
+ value = elements[i]
+ if name in fixes:
+ # fixed point conversion
+ value = fi2fl(value, fixes[name])
+ elif isinstance(value, bytes):
+ try:
+ value = tostr(value)
+ except UnicodeDecodeError:
+ pass
+ d[name] = value
+ return obj
+
def unpack2(fmt, data, obj=None):
- length = calcsize(fmt)
- return unpack(fmt, data[:length], obj), data[length:]
+ length = calcsize(fmt)
+ return unpack(fmt, data[:length], obj), data[length:]
+
def calcsize(fmt):
- formatstring, names, fixes = getformat(fmt)
- return struct.calcsize(formatstring)
+ formatstring, names, fixes = getformat(fmt)
+ return struct.calcsize(formatstring)
# matches "name:formatchar" (whitespace is allowed)
_elementRE = re.compile(
- r"\s*" # whitespace
- r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
- r"\s*:\s*" # whitespace : whitespace
- r"([xcbB?hHiIlLqQfd]|" # formatchar...
- r"[0-9]+[ps]|" # ...formatchar...
- r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
- r"\s*" # whitespace
- r"(#.*)?$" # [comment] + end of string
- )
+ r"\s*" # whitespace
+ r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
+ r"\s*:\s*" # whitespace : whitespace
+ r"([xcbB?hHiIlLqQfd]|" # formatchar...
+ r"[0-9]+[ps]|" # ...formatchar...
+ r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
+ r"\s*" # whitespace
+ r"(#.*)?$" # [comment] + end of string
+)
# matches the special struct fmt chars and 'x' (pad byte)
_extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
@@ -125,54 +129,53 @@ _extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
# matches an "empty" string, possibly containing whitespace and/or a comment
_emptyRE = re.compile(r"\s*(#.*)?$")
-_fixedpointmappings = {
- 8: "b",
- 16: "h",
- 32: "l"}
+_fixedpointmappings = {8: "b", 16: "h", 32: "l"}
_formatcache = {}
+
def getformat(fmt, keep_pad_byte=False):
- fmt = tostr(fmt, encoding="ascii")
- try:
- formatstring, names, fixes = _formatcache[fmt]
- except KeyError:
- lines = re.split("[\n;]", fmt)
- formatstring = ""
- names = []
- fixes = {}
- for line in lines:
- if _emptyRE.match(line):
- continue
- m = _extraRE.match(line)
- if m:
- formatchar = m.group(1)
- if formatchar != 'x' and formatstring:
- raise Error("a special fmt char must be first")
- else:
- m = _elementRE.match(line)
- if not m:
- raise Error("syntax error in fmt: '%s'" % line)
- name = m.group(1)
- formatchar = m.group(2)
- if keep_pad_byte or formatchar != "x":
- names.append(name)
- if m.group(3):
- # fixed point
- before = int(m.group(3))
- after = int(m.group(4))
- bits = before + after
- if bits not in [8, 16, 32]:
- raise Error("fixed point must be 8, 16 or 32 bits long")
- formatchar = _fixedpointmappings[bits]
- assert m.group(5) == "F"
- fixes[name] = after
- formatstring = formatstring + formatchar
- _formatcache[fmt] = formatstring, names, fixes
- return formatstring, names, fixes
+ fmt = tostr(fmt, encoding="ascii")
+ try:
+ formatstring, names, fixes = _formatcache[fmt]
+ except KeyError:
+ lines = re.split("[\n;]", fmt)
+ formatstring = ""
+ names = []
+ fixes = {}
+ for line in lines:
+ if _emptyRE.match(line):
+ continue
+ m = _extraRE.match(line)
+ if m:
+ formatchar = m.group(1)
+ if formatchar != "x" and formatstring:
+ raise Error("a special fmt char must be first")
+ else:
+ m = _elementRE.match(line)
+ if not m:
+ raise Error("syntax error in fmt: '%s'" % line)
+ name = m.group(1)
+ formatchar = m.group(2)
+ if keep_pad_byte or formatchar != "x":
+ names.append(name)
+ if m.group(3):
+ # fixed point
+ before = int(m.group(3))
+ after = int(m.group(4))
+ bits = before + after
+ if bits not in [8, 16, 32]:
+ raise Error("fixed point must be 8, 16 or 32 bits long")
+ formatchar = _fixedpointmappings[bits]
+ assert m.group(5) == "F"
+ fixes[name] = after
+ formatstring = formatstring + formatchar
+ _formatcache[fmt] = formatstring, names, fixes
+ return formatstring, names, fixes
+
def _test():
- fmt = """
+ fmt = """
# comments are allowed
> # big endian (see documentation for struct)
# empty lines are allowed:
@@ -188,29 +191,30 @@ def _test():
apad: x
"""
- print('size:', calcsize(fmt))
+ print("size:", calcsize(fmt))
+
+ class foo(object):
+ pass
- class foo(object):
- pass
+ i = foo()
- i = foo()
+ i.ashort = 0x7FFF
+ i.along = 0x7FFFFFFF
+ i.abyte = 0x7F
+ i.achar = "a"
+ i.astr = "12345"
+ i.afloat = 0.5
+ i.adouble = 0.5
+ i.afixed = 1.5
+ i.abool = True
- i.ashort = 0x7fff
- i.along = 0x7fffffff
- i.abyte = 0x7f
- i.achar = "a"
- i.astr = "12345"
- i.afloat = 0.5
- i.adouble = 0.5
- i.afixed = 1.5
- i.abool = True
+ data = pack(fmt, i)
+ print("data:", repr(data))
+ print(unpack(fmt, data))
+ i2 = foo()
+ unpack(fmt, data, i2)
+ print(vars(i2))
- data = pack(fmt, i)
- print('data:', repr(data))
- print(unpack(fmt, data))
- i2 = foo()
- unpack(fmt, data, i2)
- print(vars(i2))
if __name__ == "__main__":
- _test()
+ _test()
diff --git a/Lib/fontTools/misc/symfont.py b/Lib/fontTools/misc/symfont.py
index 3ff2b5df..fb9e20a4 100644
--- a/Lib/fontTools/misc/symfont.py
+++ b/Lib/fontTools/misc/symfont.py
@@ -4,98 +4,103 @@ from itertools import count
import sympy as sp
import sys
-n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
+n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
-t, x, y = sp.symbols('t x y', real=True)
-c = sp.symbols('c', real=False) # Complex representation instead of x/y
+t, x, y = sp.symbols("t x y", real=True)
+c = sp.symbols("c", real=False) # Complex representation instead of x/y
-X = tuple(sp.symbols('x:%d'%(n+1), real=True))
-Y = tuple(sp.symbols('y:%d'%(n+1), real=True))
-P = tuple(zip(*(sp.symbols('p:%d[%s]'%(n+1,w), real=True) for w in '01')))
-C = tuple(sp.symbols('c:%d'%(n+1), real=False))
+X = tuple(sp.symbols("x:%d" % (n + 1), real=True))
+Y = tuple(sp.symbols("y:%d" % (n + 1), real=True))
+P = tuple(zip(*(sp.symbols("p:%d[%s]" % (n + 1, w), real=True) for w in "01")))
+C = tuple(sp.symbols("c:%d" % (n + 1), real=False))
# Cubic Bernstein basis functions
BinomialCoefficient = [(1, 0)]
-for i in range(1, n+1):
- last = BinomialCoefficient[-1]
- this = tuple(last[j-1]+last[j] for j in range(len(last)))+(0,)
- BinomialCoefficient.append(this)
+for i in range(1, n + 1):
+ last = BinomialCoefficient[-1]
+ this = tuple(last[j - 1] + last[j] for j in range(len(last))) + (0,)
+ BinomialCoefficient.append(this)
BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient)
del last, this
BernsteinPolynomial = tuple(
- tuple(c * t**i * (1-t)**(n-i) for i,c in enumerate(coeffs))
- for n,coeffs in enumerate(BinomialCoefficient))
+ tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs))
+ for n, coeffs in enumerate(BinomialCoefficient)
+)
BezierCurve = tuple(
- tuple(sum(P[i][j]*bernstein for i,bernstein in enumerate(bernsteins))
- for j in range(2))
- for n,bernsteins in enumerate(BernsteinPolynomial))
+ tuple(
+ sum(P[i][j] * bernstein for i, bernstein in enumerate(bernsteins))
+ for j in range(2)
+ )
+ for n, bernsteins in enumerate(BernsteinPolynomial)
+)
BezierCurveC = tuple(
- sum(C[i]*bernstein for i,bernstein in enumerate(bernsteins))
- for n,bernsteins in enumerate(BernsteinPolynomial))
+ sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins))
+ for n, bernsteins in enumerate(BernsteinPolynomial)
+)
def green(f, curveXY):
- f = -sp.integrate(sp.sympify(f), y)
- f = f.subs({x:curveXY[0], y:curveXY[1]})
- f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1))
- return f
+ f = -sp.integrate(sp.sympify(f), y)
+ f = f.subs({x: curveXY[0], y: curveXY[1]})
+ f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1))
+ return f
class _BezierFuncsLazy(dict):
+ def __init__(self, symfunc):
+ self._symfunc = symfunc
+ self._bezfuncs = {}
- def __init__(self, symfunc):
- self._symfunc = symfunc
- self._bezfuncs = {}
+ def __missing__(self, i):
+ args = ["p%d" % d for d in range(i + 1)]
+ f = green(self._symfunc, BezierCurve[i])
+ f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize
+ return sp.lambdify(args, f)
- def __missing__(self, i):
- args = ['p%d'%d for d in range(i+1)]
- f = green(self._symfunc, BezierCurve[i])
- f = sp.gcd_terms(f.collect(sum(P,()))) # Optimize
- return sp.lambdify(args, f)
class GreenPen(BasePen):
+ _BezierFuncs = {}
- _BezierFuncs = {}
+ @classmethod
+ def _getGreenBezierFuncs(celf, func):
+ funcstr = str(func)
+ if not funcstr in celf._BezierFuncs:
+ celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func)
+ return celf._BezierFuncs[funcstr]
- @classmethod
- def _getGreenBezierFuncs(celf, func):
- funcstr = str(func)
- if not funcstr in celf._BezierFuncs:
- celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func)
- return celf._BezierFuncs[funcstr]
+ def __init__(self, func, glyphset=None):
+ BasePen.__init__(self, glyphset)
+ self._funcs = self._getGreenBezierFuncs(func)
+ self.value = 0
- def __init__(self, func, glyphset=None):
- BasePen.__init__(self, glyphset)
- self._funcs = self._getGreenBezierFuncs(func)
- self.value = 0
+ def _moveTo(self, p0):
+ self.__startPoint = p0
- def _moveTo(self, p0):
- self.__startPoint = p0
+ def _closePath(self):
+ p0 = self._getCurrentPoint()
+ if p0 != self.__startPoint:
+ self._lineTo(self.__startPoint)
- def _closePath(self):
- p0 = self._getCurrentPoint()
- if p0 != self.__startPoint:
- self._lineTo(self.__startPoint)
+ def _endPath(self):
+ p0 = self._getCurrentPoint()
+ if p0 != self.__startPoint:
+ # Green theorem is not defined on open contours.
+ raise NotImplementedError
- def _endPath(self):
- p0 = self._getCurrentPoint()
- if p0 != self.__startPoint:
- # Green theorem is not defined on open contours.
- raise NotImplementedError
+ def _lineTo(self, p1):
+ p0 = self._getCurrentPoint()
+ self.value += self._funcs[1](p0, p1)
- def _lineTo(self, p1):
- p0 = self._getCurrentPoint()
- self.value += self._funcs[1](p0, p1)
+ def _qCurveToOne(self, p1, p2):
+ p0 = self._getCurrentPoint()
+ self.value += self._funcs[2](p0, p1, p2)
- def _qCurveToOne(self, p1, p2):
- p0 = self._getCurrentPoint()
- self.value += self._funcs[2](p0, p1, p2)
+ def _curveToOne(self, p1, p2, p3):
+ p0 = self._getCurrentPoint()
+ self.value += self._funcs[3](p0, p1, p2, p3)
- def _curveToOne(self, p1, p2, p3):
- p0 = self._getCurrentPoint()
- self.value += self._funcs[3](p0, p1, p2, p3)
# Sample pens.
# Do not use this in real code.
@@ -103,29 +108,25 @@ class GreenPen(BasePen):
AreaPen = partial(GreenPen, func=1)
MomentXPen = partial(GreenPen, func=x)
MomentYPen = partial(GreenPen, func=y)
-MomentXXPen = partial(GreenPen, func=x*x)
-MomentYYPen = partial(GreenPen, func=y*y)
-MomentXYPen = partial(GreenPen, func=x*y)
+MomentXXPen = partial(GreenPen, func=x * x)
+MomentYYPen = partial(GreenPen, func=y * y)
+MomentXYPen = partial(GreenPen, func=x * y)
def printGreenPen(penName, funcs, file=sys.stdout, docstring=None):
+ if docstring is not None:
+ print('"""%s"""' % docstring)
- if docstring is not None:
- print('"""%s"""' % docstring)
-
- print(
-'''from fontTools.pens.basePen import BasePen, OpenContourError
+ print(
+ """from fontTools.pens.basePen import BasePen, OpenContourError
try:
import cython
-except ImportError:
+
+ COMPILED = cython.compiled
+except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
-if cython.compiled:
- # Yep, I'm compiled.
- COMPILED = True
-else:
- # Just a lowly interpreted script.
COMPILED = False
@@ -135,10 +136,14 @@ class %s(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
-'''% (penName, penName), file=file)
- for name,f in funcs:
- print(' self.%s = 0' % name, file=file)
- print('''
+"""
+ % (penName, penName),
+ file=file,
+ )
+ for name, f in funcs:
+ print(" self.%s = 0" % name, file=file)
+ print(
+ """
def _moveTo(self, p0):
self.__startPoint = p0
@@ -154,32 +159,39 @@ class %s(BasePen):
raise OpenContourError(
"Green theorem is not defined on open contours."
)
-''', end='', file=file)
-
- for n in (1, 2, 3):
-
-
- subs = {P[i][j]: [X, Y][j][i] for i in range(n+1) for j in range(2)}
- greens = [green(f, BezierCurve[n]) for name,f in funcs]
- greens = [sp.gcd_terms(f.collect(sum(P,()))) for f in greens] # Optimize
- greens = [f.subs(subs) for f in greens] # Convert to p to x/y
- defs, exprs = sp.cse(greens,
- optimizations='basic',
- symbols=(sp.Symbol('r%d'%i) for i in count()))
-
- print()
- for name,value in defs:
- print(' @cython.locals(%s=cython.double)' % name, file=file)
- if n == 1:
- print('''\
+""",
+ end="",
+ file=file,
+ )
+
+ for n in (1, 2, 3):
+ subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)}
+ greens = [green(f, BezierCurve[n]) for name, f in funcs]
+ greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize
+ greens = [f.subs(subs) for f in greens] # Convert to p to x/y
+ defs, exprs = sp.cse(
+ greens,
+ optimizations="basic",
+ symbols=(sp.Symbol("r%d" % i) for i in count()),
+ )
+
+ print()
+ for name, value in defs:
+ print(" @cython.locals(%s=cython.double)" % name, file=file)
+ if n == 1:
+ print(
+ """\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
def _lineTo(self, p1):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
-''', file=file)
- elif n == 2:
- print('''\
+""",
+ file=file,
+ )
+ elif n == 2:
+ print(
+ """\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
@@ -187,9 +199,12 @@ class %s(BasePen):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
x2,y2 = p2
-''', file=file)
- elif n == 3:
- print('''\
+""",
+ file=file,
+ )
+ elif n == 3:
+ print(
+ """\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
@@ -199,29 +214,35 @@ class %s(BasePen):
x1,y1 = p1
x2,y2 = p2
x3,y3 = p3
-''', file=file)
- for name,value in defs:
- print(' %s = %s' % (name, value), file=file)
-
- print(file=file)
- for name,value in zip([f[0] for f in funcs], exprs):
- print(' self.%s += %s' % (name, value), file=file)
-
- print('''
+""",
+ file=file,
+ )
+ for name, value in defs:
+ print(" %s = %s" % (name, value), file=file)
+
+ print(file=file)
+ for name, value in zip([f[0] for f in funcs], exprs):
+ print(" self.%s += %s" % (name, value), file=file)
+
+ print(
+ """
if __name__ == '__main__':
from fontTools.misc.symfont import x, y, printGreenPen
- printGreenPen('%s', ['''%penName, file=file)
- for name,f in funcs:
- print(" ('%s', %s)," % (name, str(f)), file=file)
- print(' ])', file=file)
-
-
-if __name__ == '__main__':
- pen = AreaPen()
- pen.moveTo((100,100))
- pen.lineTo((100,200))
- pen.lineTo((200,200))
- pen.curveTo((200,250),(300,300),(250,350))
- pen.lineTo((200,100))
- pen.closePath()
- print(pen.value)
+ printGreenPen('%s', ["""
+ % penName,
+ file=file,
+ )
+ for name, f in funcs:
+ print(" ('%s', %s)," % (name, str(f)), file=file)
+ print(" ])", file=file)
+
+
+if __name__ == "__main__":
+ pen = AreaPen()
+ pen.moveTo((100, 100))
+ pen.lineTo((100, 200))
+ pen.lineTo((200, 200))
+ pen.curveTo((200, 250), (300, 300), (250, 350))
+ pen.lineTo((200, 100))
+ pen.closePath()
+ print(pen.value)
diff --git a/Lib/fontTools/misc/testTools.py b/Lib/fontTools/misc/testTools.py
index 871a9951..be611613 100644
--- a/Lib/fontTools/misc/testTools.py
+++ b/Lib/fontTools/misc/testTools.py
@@ -29,12 +29,14 @@ def parseXML(xmlSnippet):
if isinstance(xmlSnippet, bytes):
xml += xmlSnippet
elif isinstance(xmlSnippet, str):
- xml += tobytes(xmlSnippet, 'utf-8')
+ xml += tobytes(xmlSnippet, "utf-8")
elif isinstance(xmlSnippet, Iterable):
- xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet)
+ xml += b"".join(tobytes(s, "utf-8") for s in xmlSnippet)
else:
- raise TypeError("expected string or sequence of strings; found %r"
- % type(xmlSnippet).__name__)
+ raise TypeError(
+ "expected string or sequence of strings; found %r"
+ % type(xmlSnippet).__name__
+ )
xml += b"</root>"
reader.parser.Parse(xml, 0)
return reader.root[2]
@@ -76,6 +78,7 @@ class FakeFont:
return self.glyphOrder_[glyphID]
else:
return "glyph%.5d" % glyphID
+
def getGlyphNameMany(self, lst):
return [self.getGlyphName(gid) for gid in lst]
@@ -92,6 +95,7 @@ class FakeFont:
class TestXMLReader_(object):
def __init__(self):
from xml.parsers.expat import ParserCreate
+
self.parser = ParserCreate()
self.parser.StartElementHandler = self.startElement_
self.parser.EndElementHandler = self.endElement_
@@ -114,7 +118,7 @@ class TestXMLReader_(object):
self.stack[-1][2].append(data)
-def makeXMLWriter(newlinestr='\n'):
+def makeXMLWriter(newlinestr="\n"):
# don't write OS-specific new lines
writer = XMLWriter(BytesIO(), newlinestr=newlinestr)
# erase XML declaration
@@ -166,7 +170,7 @@ class MockFont(object):
to its glyphOrder."""
def __init__(self):
- self._glyphOrder = ['.notdef']
+ self._glyphOrder = [".notdef"]
class AllocatingDict(dict):
def __missing__(reverseDict, key):
@@ -174,7 +178,8 @@ class MockFont(object):
gid = len(reverseDict)
reverseDict[key] = gid
return gid
- self._reverseGlyphOrder = AllocatingDict({'.notdef': 0})
+
+ self._reverseGlyphOrder = AllocatingDict({".notdef": 0})
self.lazy = False
def getGlyphID(self, glyph):
@@ -192,7 +197,6 @@ class MockFont(object):
class TestCase(_TestCase):
-
def __init__(self, methodName):
_TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
@@ -202,7 +206,6 @@ class TestCase(_TestCase):
class DataFilesHandler(TestCase):
-
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0
diff --git a/Lib/fontTools/misc/textTools.py b/Lib/fontTools/misc/textTools.py
index bf75bcbd..f7ca1acc 100644
--- a/Lib/fontTools/misc/textTools.py
+++ b/Lib/fontTools/misc/textTools.py
@@ -33,90 +33,90 @@ class Tag(str):
def readHex(content):
- """Convert a list of hex strings to binary data."""
- return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str)))
+ """Convert a list of hex strings to binary data."""
+ return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str)))
def deHexStr(hexdata):
- """Convert a hex string to binary data."""
- hexdata = strjoin(hexdata.split())
- if len(hexdata) % 2:
- hexdata = hexdata + "0"
- data = []
- for i in range(0, len(hexdata), 2):
- data.append(bytechr(int(hexdata[i:i+2], 16)))
- return bytesjoin(data)
+ """Convert a hex string to binary data."""
+ hexdata = strjoin(hexdata.split())
+ if len(hexdata) % 2:
+ hexdata = hexdata + "0"
+ data = []
+ for i in range(0, len(hexdata), 2):
+ data.append(bytechr(int(hexdata[i : i + 2], 16)))
+ return bytesjoin(data)
def hexStr(data):
- """Convert binary data to a hex string."""
- h = string.hexdigits
- r = ''
- for c in data:
- i = byteord(c)
- r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
- return r
+ """Convert binary data to a hex string."""
+ h = string.hexdigits
+ r = ""
+ for c in data:
+ i = byteord(c)
+ r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
+ return r
def num2binary(l, bits=32):
- items = []
- binary = ""
- for i in range(bits):
- if l & 0x1:
- binary = "1" + binary
- else:
- binary = "0" + binary
- l = l >> 1
- if not ((i+1) % 8):
- items.append(binary)
- binary = ""
- if binary:
- items.append(binary)
- items.reverse()
- assert l in (0, -1), "number doesn't fit in number of bits"
- return ' '.join(items)
+ items = []
+ binary = ""
+ for i in range(bits):
+ if l & 0x1:
+ binary = "1" + binary
+ else:
+ binary = "0" + binary
+ l = l >> 1
+ if not ((i + 1) % 8):
+ items.append(binary)
+ binary = ""
+ if binary:
+ items.append(binary)
+ items.reverse()
+ assert l in (0, -1), "number doesn't fit in number of bits"
+ return " ".join(items)
def binary2num(bin):
- bin = strjoin(bin.split())
- l = 0
- for digit in bin:
- l = l << 1
- if digit != "0":
- l = l | 0x1
- return l
+ bin = strjoin(bin.split())
+ l = 0
+ for digit in bin:
+ l = l << 1
+ if digit != "0":
+ l = l | 0x1
+ return l
def caselessSort(alist):
- """Return a sorted copy of a list. If there are only strings
- in the list, it will not consider case.
- """
+ """Return a sorted copy of a list. If there are only strings
+ in the list, it will not consider case.
+ """
- try:
- return sorted(alist, key=lambda a: (a.lower(), a))
- except TypeError:
- return sorted(alist)
+ try:
+ return sorted(alist, key=lambda a: (a.lower(), a))
+ except TypeError:
+ return sorted(alist)
def pad(data, size):
- r""" Pad byte string 'data' with null bytes until its length is a
- multiple of 'size'.
-
- >>> len(pad(b'abcd', 4))
- 4
- >>> len(pad(b'abcde', 2))
- 6
- >>> len(pad(b'abcde', 4))
- 8
- >>> pad(b'abcdef', 4) == b'abcdef\x00\x00'
- True
- """
- data = tobytes(data)
- if size > 1:
- remainder = len(data) % size
- if remainder:
- data += b"\0" * (size - remainder)
- return data
+ r"""Pad byte string 'data' with null bytes until its length is a
+ multiple of 'size'.
+
+ >>> len(pad(b'abcd', 4))
+ 4
+ >>> len(pad(b'abcde', 2))
+ 6
+ >>> len(pad(b'abcde', 4))
+ 8
+ >>> pad(b'abcdef', 4) == b'abcdef\x00\x00'
+ True
+ """
+ data = tobytes(data)
+ if size > 1:
+ remainder = len(data) % size
+ if remainder:
+ data += b"\0" * (size - remainder)
+ return data
def tostr(s, encoding="ascii", errors="strict"):
@@ -150,5 +150,6 @@ def bytesjoin(iterable, joiner=b""):
if __name__ == "__main__":
- import doctest, sys
- sys.exit(doctest.testmod().failed)
+ import doctest, sys
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/misc/timeTools.py b/Lib/fontTools/misc/timeTools.py
index f4b84f6e..175ce815 100644
--- a/Lib/fontTools/misc/timeTools.py
+++ b/Lib/fontTools/misc/timeTools.py
@@ -10,59 +10,79 @@ import calendar
epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
-MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
- "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
+MONTHNAMES = [
+ None,
+ "Jan",
+ "Feb",
+ "Mar",
+ "Apr",
+ "May",
+ "Jun",
+ "Jul",
+ "Aug",
+ "Sep",
+ "Oct",
+ "Nov",
+ "Dec",
+]
def asctime(t=None):
- """
- Convert a tuple or struct_time representing a time as returned by gmtime()
- or localtime() to a 24-character string of the following form:
-
- >>> asctime(time.gmtime(0))
- 'Thu Jan 1 00:00:00 1970'
-
- If t is not provided, the current time as returned by localtime() is used.
- Locale information is not used by asctime().
-
- This is meant to normalise the output of the built-in time.asctime() across
- different platforms and Python versions.
- In Python 3.x, the day of the month is right-justified, whereas on Windows
- Python 2.7 it is padded with zeros.
-
- See https://github.com/fonttools/fonttools/issues/455
- """
- if t is None:
- t = time.localtime()
- s = "%s %s %2s %s" % (
- DAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday,
- time.strftime("%H:%M:%S %Y", t))
- return s
+ """
+ Convert a tuple or struct_time representing a time as returned by gmtime()
+ or localtime() to a 24-character string of the following form:
+
+ >>> asctime(time.gmtime(0))
+ 'Thu Jan 1 00:00:00 1970'
+
+ If t is not provided, the current time as returned by localtime() is used.
+ Locale information is not used by asctime().
+
+ This is meant to normalise the output of the built-in time.asctime() across
+ different platforms and Python versions.
+ In Python 3.x, the day of the month is right-justified, whereas on Windows
+ Python 2.7 it is padded with zeros.
+
+ See https://github.com/fonttools/fonttools/issues/455
+ """
+ if t is None:
+ t = time.localtime()
+ s = "%s %s %2s %s" % (
+ DAYNAMES[t.tm_wday],
+ MONTHNAMES[t.tm_mon],
+ t.tm_mday,
+ time.strftime("%H:%M:%S %Y", t),
+ )
+ return s
def timestampToString(value):
- return asctime(time.gmtime(max(0, value + epoch_diff)))
+ return asctime(time.gmtime(max(0, value + epoch_diff)))
+
def timestampFromString(value):
- wkday, mnth = value[:7].split()
- t = datetime.strptime(value[7:], ' %d %H:%M:%S %Y')
- t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
- wkday_idx = DAYNAMES.index(wkday)
- assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
- return int(t.timestamp()) - epoch_diff
+ wkday, mnth = value[:7].split()
+ t = datetime.strptime(value[7:], " %d %H:%M:%S %Y")
+ t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
+ wkday_idx = DAYNAMES.index(wkday)
+ assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
+ return int(t.timestamp()) - epoch_diff
+
def timestampNow():
- # https://reproducible-builds.org/specs/source-date-epoch/
- source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
- if source_date_epoch is not None:
- return int(source_date_epoch) - epoch_diff
- return int(time.time() - epoch_diff)
+ # https://reproducible-builds.org/specs/source-date-epoch/
+ source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
+ if source_date_epoch is not None:
+ return int(source_date_epoch) - epoch_diff
+ return int(time.time() - epoch_diff)
+
def timestampSinceEpoch(value):
- return int(value - epoch_diff)
+ return int(value - epoch_diff)
if __name__ == "__main__":
- import sys
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/misc/transform.py b/Lib/fontTools/misc/transform.py
index 94e1f622..f85b54b7 100644
--- a/Lib/fontTools/misc/transform.py
+++ b/Lib/fontTools/misc/transform.py
@@ -19,6 +19,9 @@ Offset
Scale
Convenience function that returns a scaling transformation
+The DecomposedTransform class implements a transformation with separate
+translate, rotation, scale, skew, and transformation-center components.
+
:Example:
>>> t = Transform(2, 0, 0, 3, 0, 0)
@@ -49,10 +52,12 @@ Scale
>>>
"""
+import math
from typing import NamedTuple
+from dataclasses import dataclass
-__all__ = ["Transform", "Identity", "Offset", "Scale"]
+__all__ = ["Transform", "Identity", "Offset", "Scale", "DecomposedTransform"]
_EPSILON = 1e-15
@@ -61,338 +66,430 @@ _MINUS_ONE_EPSILON = -1 + _EPSILON
def _normSinCos(v):
- if abs(v) < _EPSILON:
- v = 0
- elif v > _ONE_EPSILON:
- v = 1
- elif v < _MINUS_ONE_EPSILON:
- v = -1
- return v
+ if abs(v) < _EPSILON:
+ v = 0
+ elif v > _ONE_EPSILON:
+ v = 1
+ elif v < _MINUS_ONE_EPSILON:
+ v = -1
+ return v
class Transform(NamedTuple):
- """2x2 transformation matrix plus offset, a.k.a. Affine transform.
- Transform instances are immutable: all transforming methods, eg.
- rotate(), return a new Transform instance.
-
- :Example:
-
- >>> t = Transform()
- >>> t
- <Transform [1 0 0 1 0 0]>
- >>> t.scale(2)
- <Transform [2 0 0 2 0 0]>
- >>> t.scale(2.5, 5.5)
- <Transform [2.5 0 0 5.5 0 0]>
- >>>
- >>> t.scale(2, 3).transformPoint((100, 100))
- (200, 300)
-
- Transform's constructor takes six arguments, all of which are
- optional, and can be used as keyword arguments::
-
- >>> Transform(12)
- <Transform [12 0 0 1 0 0]>
- >>> Transform(dx=12)
- <Transform [1 0 0 1 12 0]>
- >>> Transform(yx=12)
- <Transform [1 0 12 1 0 0]>
-
- Transform instances also behave like sequences of length 6::
-
- >>> len(Identity)
- 6
- >>> list(Identity)
- [1, 0, 0, 1, 0, 0]
- >>> tuple(Identity)
- (1, 0, 0, 1, 0, 0)
-
- Transform instances are comparable::
-
- >>> t1 = Identity.scale(2, 3).translate(4, 6)
- >>> t2 = Identity.translate(8, 18).scale(2, 3)
- >>> t1 == t2
- 1
-
- But beware of floating point rounding errors::
-
- >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
- >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
- >>> t1
- <Transform [0.2 0 0 0.3 0.08 0.18]>
- >>> t2
- <Transform [0.2 0 0 0.3 0.08 0.18]>
- >>> t1 == t2
- 0
-
- Transform instances are hashable, meaning you can use them as
- keys in dictionaries::
-
- >>> d = {Scale(12, 13): None}
- >>> d
- {<Transform [12 0 0 13 0 0]>: None}
-
- But again, beware of floating point rounding errors::
-
- >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
- >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
- >>> t1
- <Transform [0.2 0 0 0.3 0.08 0.18]>
- >>> t2
- <Transform [0.2 0 0 0.3 0.08 0.18]>
- >>> d = {t1: None}
- >>> d
- {<Transform [0.2 0 0 0.3 0.08 0.18]>: None}
- >>> d[t2]
- Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- KeyError: <Transform [0.2 0 0 0.3 0.08 0.18]>
- """
-
- xx: float = 1
- xy: float = 0
- yx: float = 0
- yy: float = 1
- dx: float = 0
- dy: float = 0
-
- def transformPoint(self, p):
- """Transform a point.
-
- :Example:
-
- >>> t = Transform()
- >>> t = t.scale(2.5, 5.5)
- >>> t.transformPoint((100, 100))
- (250.0, 550.0)
- """
- (x, y) = p
- xx, xy, yx, yy, dx, dy = self
- return (xx*x + yx*y + dx, xy*x + yy*y + dy)
-
- def transformPoints(self, points):
- """Transform a list of points.
-
- :Example:
-
- >>> t = Scale(2, 3)
- >>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)])
- [(0, 0), (0, 300), (200, 300), (200, 0)]
- >>>
- """
- xx, xy, yx, yy, dx, dy = self
- return [(xx*x + yx*y + dx, xy*x + yy*y + dy) for x, y in points]
-
- def transformVector(self, v):
- """Transform an (dx, dy) vector, treating translation as zero.
-
- :Example:
-
- >>> t = Transform(2, 0, 0, 2, 10, 20)
- >>> t.transformVector((3, -4))
- (6, -8)
- >>>
- """
- (dx, dy) = v
- xx, xy, yx, yy = self[:4]
- return (xx*dx + yx*dy, xy*dx + yy*dy)
-
- def transformVectors(self, vectors):
- """Transform a list of (dx, dy) vector, treating translation as zero.
-
- :Example:
- >>> t = Transform(2, 0, 0, 2, 10, 20)
- >>> t.transformVectors([(3, -4), (5, -6)])
- [(6, -8), (10, -12)]
- >>>
- """
- xx, xy, yx, yy = self[:4]
- return [(xx*dx + yx*dy, xy*dx + yy*dy) for dx, dy in vectors]
-
- def translate(self, x=0, y=0):
- """Return a new transformation, translated (offset) by x, y.
-
- :Example:
- >>> t = Transform()
- >>> t.translate(20, 30)
- <Transform [1 0 0 1 20 30]>
- >>>
- """
- return self.transform((1, 0, 0, 1, x, y))
-
- def scale(self, x=1, y=None):
- """Return a new transformation, scaled by x, y. The 'y' argument
- may be None, which implies to use the x value for y as well.
-
- :Example:
- >>> t = Transform()
- >>> t.scale(5)
- <Transform [5 0 0 5 0 0]>
- >>> t.scale(5, 6)
- <Transform [5 0 0 6 0 0]>
- >>>
- """
- if y is None:
- y = x
- return self.transform((x, 0, 0, y, 0, 0))
-
- def rotate(self, angle):
- """Return a new transformation, rotated by 'angle' (radians).
-
- :Example:
- >>> import math
- >>> t = Transform()
- >>> t.rotate(math.pi / 2)
- <Transform [0 1 -1 0 0 0]>
- >>>
- """
- import math
- c = _normSinCos(math.cos(angle))
- s = _normSinCos(math.sin(angle))
- return self.transform((c, s, -s, c, 0, 0))
-
- def skew(self, x=0, y=0):
- """Return a new transformation, skewed by x and y.
-
- :Example:
- >>> import math
- >>> t = Transform()
- >>> t.skew(math.pi / 4)
- <Transform [1 0 1 1 0 0]>
- >>>
- """
- import math
- return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
-
- def transform(self, other):
- """Return a new transformation, transformed by another
- transformation.
-
- :Example:
- >>> t = Transform(2, 0, 0, 3, 1, 6)
- >>> t.transform((4, 3, 2, 1, 5, 6))
- <Transform [8 9 4 3 11 24]>
- >>>
- """
- xx1, xy1, yx1, yy1, dx1, dy1 = other
- xx2, xy2, yx2, yy2, dx2, dy2 = self
- return self.__class__(
- xx1*xx2 + xy1*yx2,
- xx1*xy2 + xy1*yy2,
- yx1*xx2 + yy1*yx2,
- yx1*xy2 + yy1*yy2,
- xx2*dx1 + yx2*dy1 + dx2,
- xy2*dx1 + yy2*dy1 + dy2)
-
- def reverseTransform(self, other):
- """Return a new transformation, which is the other transformation
- transformed by self. self.reverseTransform(other) is equivalent to
- other.transform(self).
-
- :Example:
- >>> t = Transform(2, 0, 0, 3, 1, 6)
- >>> t.reverseTransform((4, 3, 2, 1, 5, 6))
- <Transform [8 6 6 3 21 15]>
- >>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6))
- <Transform [8 6 6 3 21 15]>
- >>>
- """
- xx1, xy1, yx1, yy1, dx1, dy1 = self
- xx2, xy2, yx2, yy2, dx2, dy2 = other
- return self.__class__(
- xx1*xx2 + xy1*yx2,
- xx1*xy2 + xy1*yy2,
- yx1*xx2 + yy1*yx2,
- yx1*xy2 + yy1*yy2,
- xx2*dx1 + yx2*dy1 + dx2,
- xy2*dx1 + yy2*dy1 + dy2)
-
- def inverse(self):
- """Return the inverse transformation.
-
- :Example:
- >>> t = Identity.translate(2, 3).scale(4, 5)
- >>> t.transformPoint((10, 20))
- (42, 103)
- >>> it = t.inverse()
- >>> it.transformPoint((42, 103))
- (10.0, 20.0)
- >>>
- """
- if self == Identity:
- return self
- xx, xy, yx, yy, dx, dy = self
- det = xx*yy - yx*xy
- xx, xy, yx, yy = yy/det, -xy/det, -yx/det, xx/det
- dx, dy = -xx*dx - yx*dy, -xy*dx - yy*dy
- return self.__class__(xx, xy, yx, yy, dx, dy)
-
- def toPS(self):
- """Return a PostScript representation
-
- :Example:
-
- >>> t = Identity.scale(2, 3).translate(4, 5)
- >>> t.toPS()
- '[2 0 0 3 8 15]'
- >>>
- """
- return "[%s %s %s %s %s %s]" % self
-
- def __bool__(self):
- """Returns True if transform is not identity, False otherwise.
-
- :Example:
-
- >>> bool(Identity)
- False
- >>> bool(Transform())
- False
- >>> bool(Scale(1.))
- False
- >>> bool(Scale(2))
- True
- >>> bool(Offset())
- False
- >>> bool(Offset(0))
- False
- >>> bool(Offset(2))
- True
- """
- return self != Identity
-
- def __repr__(self):
- return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) + self)
+ """2x2 transformation matrix plus offset, a.k.a. Affine transform.
+ Transform instances are immutable: all transforming methods, eg.
+ rotate(), return a new Transform instance.
+
+ :Example:
+
+ >>> t = Transform()
+ >>> t
+ <Transform [1 0 0 1 0 0]>
+ >>> t.scale(2)
+ <Transform [2 0 0 2 0 0]>
+ >>> t.scale(2.5, 5.5)
+ <Transform [2.5 0 0 5.5 0 0]>
+ >>>
+ >>> t.scale(2, 3).transformPoint((100, 100))
+ (200, 300)
+
+ Transform's constructor takes six arguments, all of which are
+ optional, and can be used as keyword arguments::
+
+ >>> Transform(12)
+ <Transform [12 0 0 1 0 0]>
+ >>> Transform(dx=12)
+ <Transform [1 0 0 1 12 0]>
+ >>> Transform(yx=12)
+ <Transform [1 0 12 1 0 0]>
+
+ Transform instances also behave like sequences of length 6::
+
+ >>> len(Identity)
+ 6
+ >>> list(Identity)
+ [1, 0, 0, 1, 0, 0]
+ >>> tuple(Identity)
+ (1, 0, 0, 1, 0, 0)
+
+ Transform instances are comparable::
+
+ >>> t1 = Identity.scale(2, 3).translate(4, 6)
+ >>> t2 = Identity.translate(8, 18).scale(2, 3)
+ >>> t1 == t2
+ 1
+
+ But beware of floating point rounding errors::
+
+ >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
+ >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
+ >>> t1
+ <Transform [0.2 0 0 0.3 0.08 0.18]>
+ >>> t2
+ <Transform [0.2 0 0 0.3 0.08 0.18]>
+ >>> t1 == t2
+ 0
+
+ Transform instances are hashable, meaning you can use them as
+ keys in dictionaries::
+
+ >>> d = {Scale(12, 13): None}
+ >>> d
+ {<Transform [12 0 0 13 0 0]>: None}
+
+ But again, beware of floating point rounding errors::
+
+ >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
+ >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
+ >>> t1
+ <Transform [0.2 0 0 0.3 0.08 0.18]>
+ >>> t2
+ <Transform [0.2 0 0 0.3 0.08 0.18]>
+ >>> d = {t1: None}
+ >>> d
+ {<Transform [0.2 0 0 0.3 0.08 0.18]>: None}
+ >>> d[t2]
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ KeyError: <Transform [0.2 0 0 0.3 0.08 0.18]>
+ """
+
+ xx: float = 1
+ xy: float = 0
+ yx: float = 0
+ yy: float = 1
+ dx: float = 0
+ dy: float = 0
+
+ def transformPoint(self, p):
+ """Transform a point.
+
+ :Example:
+
+ >>> t = Transform()
+ >>> t = t.scale(2.5, 5.5)
+ >>> t.transformPoint((100, 100))
+ (250.0, 550.0)
+ """
+ (x, y) = p
+ xx, xy, yx, yy, dx, dy = self
+ return (xx * x + yx * y + dx, xy * x + yy * y + dy)
+
+ def transformPoints(self, points):
+ """Transform a list of points.
+
+ :Example:
+
+ >>> t = Scale(2, 3)
+ >>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)])
+ [(0, 0), (0, 300), (200, 300), (200, 0)]
+ >>>
+ """
+ xx, xy, yx, yy, dx, dy = self
+ return [(xx * x + yx * y + dx, xy * x + yy * y + dy) for x, y in points]
+
+ def transformVector(self, v):
+ """Transform an (dx, dy) vector, treating translation as zero.
+
+ :Example:
+
+ >>> t = Transform(2, 0, 0, 2, 10, 20)
+ >>> t.transformVector((3, -4))
+ (6, -8)
+ >>>
+ """
+ (dx, dy) = v
+ xx, xy, yx, yy = self[:4]
+ return (xx * dx + yx * dy, xy * dx + yy * dy)
+
+ def transformVectors(self, vectors):
+ """Transform a list of (dx, dy) vector, treating translation as zero.
+
+ :Example:
+ >>> t = Transform(2, 0, 0, 2, 10, 20)
+ >>> t.transformVectors([(3, -4), (5, -6)])
+ [(6, -8), (10, -12)]
+ >>>
+ """
+ xx, xy, yx, yy = self[:4]
+ return [(xx * dx + yx * dy, xy * dx + yy * dy) for dx, dy in vectors]
+
+ def translate(self, x=0, y=0):
+ """Return a new transformation, translated (offset) by x, y.
+
+ :Example:
+ >>> t = Transform()
+ >>> t.translate(20, 30)
+ <Transform [1 0 0 1 20 30]>
+ >>>
+ """
+ return self.transform((1, 0, 0, 1, x, y))
+
+ def scale(self, x=1, y=None):
+ """Return a new transformation, scaled by x, y. The 'y' argument
+ may be None, which implies to use the x value for y as well.
+
+ :Example:
+ >>> t = Transform()
+ >>> t.scale(5)
+ <Transform [5 0 0 5 0 0]>
+ >>> t.scale(5, 6)
+ <Transform [5 0 0 6 0 0]>
+ >>>
+ """
+ if y is None:
+ y = x
+ return self.transform((x, 0, 0, y, 0, 0))
+
+ def rotate(self, angle):
+ """Return a new transformation, rotated by 'angle' (radians).
+
+ :Example:
+ >>> import math
+ >>> t = Transform()
+ >>> t.rotate(math.pi / 2)
+ <Transform [0 1 -1 0 0 0]>
+ >>>
+ """
+ import math
+
+ c = _normSinCos(math.cos(angle))
+ s = _normSinCos(math.sin(angle))
+ return self.transform((c, s, -s, c, 0, 0))
+
+ def skew(self, x=0, y=0):
+ """Return a new transformation, skewed by x and y.
+
+ :Example:
+ >>> import math
+ >>> t = Transform()
+ >>> t.skew(math.pi / 4)
+ <Transform [1 0 1 1 0 0]>
+ >>>
+ """
+ import math
+
+ return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
+
+ def transform(self, other):
+ """Return a new transformation, transformed by another
+ transformation.
+
+ :Example:
+ >>> t = Transform(2, 0, 0, 3, 1, 6)
+ >>> t.transform((4, 3, 2, 1, 5, 6))
+ <Transform [8 9 4 3 11 24]>
+ >>>
+ """
+ xx1, xy1, yx1, yy1, dx1, dy1 = other
+ xx2, xy2, yx2, yy2, dx2, dy2 = self
+ return self.__class__(
+ xx1 * xx2 + xy1 * yx2,
+ xx1 * xy2 + xy1 * yy2,
+ yx1 * xx2 + yy1 * yx2,
+ yx1 * xy2 + yy1 * yy2,
+ xx2 * dx1 + yx2 * dy1 + dx2,
+ xy2 * dx1 + yy2 * dy1 + dy2,
+ )
+
+ def reverseTransform(self, other):
+ """Return a new transformation, which is the other transformation
+ transformed by self. self.reverseTransform(other) is equivalent to
+ other.transform(self).
+
+ :Example:
+ >>> t = Transform(2, 0, 0, 3, 1, 6)
+ >>> t.reverseTransform((4, 3, 2, 1, 5, 6))
+ <Transform [8 6 6 3 21 15]>
+ >>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6))
+ <Transform [8 6 6 3 21 15]>
+ >>>
+ """
+ xx1, xy1, yx1, yy1, dx1, dy1 = self
+ xx2, xy2, yx2, yy2, dx2, dy2 = other
+ return self.__class__(
+ xx1 * xx2 + xy1 * yx2,
+ xx1 * xy2 + xy1 * yy2,
+ yx1 * xx2 + yy1 * yx2,
+ yx1 * xy2 + yy1 * yy2,
+ xx2 * dx1 + yx2 * dy1 + dx2,
+ xy2 * dx1 + yy2 * dy1 + dy2,
+ )
+
+ def inverse(self):
+ """Return the inverse transformation.
+
+ :Example:
+ >>> t = Identity.translate(2, 3).scale(4, 5)
+ >>> t.transformPoint((10, 20))
+ (42, 103)
+ >>> it = t.inverse()
+ >>> it.transformPoint((42, 103))
+ (10.0, 20.0)
+ >>>
+ """
+ if self == Identity:
+ return self
+ xx, xy, yx, yy, dx, dy = self
+ det = xx * yy - yx * xy
+ xx, xy, yx, yy = yy / det, -xy / det, -yx / det, xx / det
+ dx, dy = -xx * dx - yx * dy, -xy * dx - yy * dy
+ return self.__class__(xx, xy, yx, yy, dx, dy)
+
+ def toPS(self):
+ """Return a PostScript representation
+
+ :Example:
+
+ >>> t = Identity.scale(2, 3).translate(4, 5)
+ >>> t.toPS()
+ '[2 0 0 3 8 15]'
+ >>>
+ """
+ return "[%s %s %s %s %s %s]" % self
+
+ def toDecomposed(self) -> "DecomposedTransform":
+ """Decompose into a DecomposedTransform."""
+ return DecomposedTransform.fromTransform(self)
+
+ def __bool__(self):
+ """Returns True if transform is not identity, False otherwise.
+
+ :Example:
+
+ >>> bool(Identity)
+ False
+ >>> bool(Transform())
+ False
+ >>> bool(Scale(1.))
+ False
+ >>> bool(Scale(2))
+ True
+ >>> bool(Offset())
+ False
+ >>> bool(Offset(0))
+ False
+ >>> bool(Offset(2))
+ True
+ """
+ return self != Identity
+
+ def __repr__(self):
+ return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) + self)
Identity = Transform()
+
def Offset(x=0, y=0):
- """Return the identity transformation offset by x, y.
+ """Return the identity transformation offset by x, y.
- :Example:
- >>> Offset(2, 3)
- <Transform [1 0 0 1 2 3]>
- >>>
- """
- return Transform(1, 0, 0, 1, x, y)
+ :Example:
+ >>> Offset(2, 3)
+ <Transform [1 0 0 1 2 3]>
+ >>>
+ """
+ return Transform(1, 0, 0, 1, x, y)
-def Scale(x, y=None):
- """Return the identity transformation scaled by x, y. The 'y' argument
- may be None, which implies to use the x value for y as well.
- :Example:
- >>> Scale(2, 3)
- <Transform [2 0 0 3 0 0]>
- >>>
- """
- if y is None:
- y = x
- return Transform(x, 0, 0, y, 0, 0)
+def Scale(x, y=None):
+ """Return the identity transformation scaled by x, y. The 'y' argument
+ may be None, which implies to use the x value for y as well.
+
+ :Example:
+ >>> Scale(2, 3)
+ <Transform [2 0 0 3 0 0]>
+ >>>
+ """
+ if y is None:
+ y = x
+ return Transform(x, 0, 0, y, 0, 0)
+
+
+@dataclass
+class DecomposedTransform:
+ """The DecomposedTransform class implements a transformation with separate
+ translate, rotation, scale, skew, and transformation-center components.
+ """
+
+ translateX: float = 0
+ translateY: float = 0
+ rotation: float = 0 # in degrees, counter-clockwise
+ scaleX: float = 1
+ scaleY: float = 1
+ skewX: float = 0 # in degrees, clockwise
+ skewY: float = 0 # in degrees, counter-clockwise
+ tCenterX: float = 0
+ tCenterY: float = 0
+
+ @classmethod
+ def fromTransform(self, transform):
+ # Adapted from an answer on
+ # https://math.stackexchange.com/questions/13150/extracting-rotation-scale-values-from-2d-transformation-matrix
+ a, b, c, d, x, y = transform
+
+ sx = math.copysign(1, a)
+ if sx < 0:
+ a *= sx
+ b *= sx
+
+ delta = a * d - b * c
+
+ rotation = 0
+ scaleX = scaleY = 0
+ skewX = skewY = 0
+
+ # Apply the QR-like decomposition.
+ if a != 0 or b != 0:
+ r = math.sqrt(a * a + b * b)
+ rotation = math.acos(a / r) if b >= 0 else -math.acos(a / r)
+ scaleX, scaleY = (r, delta / r)
+ skewX, skewY = (math.atan((a * c + b * d) / (r * r)), 0)
+ elif c != 0 or d != 0:
+ s = math.sqrt(c * c + d * d)
+ rotation = math.pi / 2 - (
+ math.acos(-c / s) if d >= 0 else -math.acos(c / s)
+ )
+ scaleX, scaleY = (delta / s, s)
+ skewX, skewY = (0, math.atan((a * c + b * d) / (s * s)))
+ else:
+ # a = b = c = d = 0
+ pass
+
+ return DecomposedTransform(
+ x,
+ y,
+ math.degrees(rotation),
+ scaleX * sx,
+ scaleY,
+ math.degrees(skewX) * sx,
+ math.degrees(skewY),
+ 0,
+ 0,
+ )
+
+ def toTransform(self):
+ """Return the Transform() equivalent of this transformation.
+
+ :Example:
+ >>> DecomposedTransform(scaleX=2, scaleY=2).toTransform()
+ <Transform [2 0 0 2 0 0]>
+ >>>
+ """
+ t = Transform()
+ t = t.translate(
+ self.translateX + self.tCenterX, self.translateY + self.tCenterY
+ )
+ t = t.rotate(math.radians(self.rotation))
+ t = t.scale(self.scaleX, self.scaleY)
+ t = t.skew(math.radians(self.skewX), math.radians(self.skewY))
+ t = t.translate(-self.tCenterX, -self.tCenterY)
+ return t
if __name__ == "__main__":
- import sys
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/misc/vector.py b/Lib/fontTools/misc/vector.py
index 81c14841..666ff15c 100644
--- a/Lib/fontTools/misc/vector.py
+++ b/Lib/fontTools/misc/vector.py
@@ -134,6 +134,11 @@ class Vector(tuple):
"can't set attribute, the 'values' attribute has been deprecated",
)
+ def isclose(self, other: "Vector", **kwargs) -> bool:
+ """Return True if the vector is close to another Vector."""
+ assert len(self) == len(other)
+ return all(math.isclose(a, b, **kwargs) for a, b in zip(self, other))
+
def _operator_rsub(a, b):
return operator.sub(b, a)
diff --git a/Lib/fontTools/misc/visitor.py b/Lib/fontTools/misc/visitor.py
index 3d28135f..d2898954 100644
--- a/Lib/fontTools/misc/visitor.py
+++ b/Lib/fontTools/misc/visitor.py
@@ -4,7 +4,6 @@ import enum
class Visitor(object):
-
defaultStop = False
@classmethod
@@ -58,7 +57,6 @@ class Visitor(object):
typ = type(thing)
for celf in celf.mro():
-
_visitors = getattr(celf, "_visitors", None)
if _visitors is None:
break
diff --git a/Lib/fontTools/misc/xmlReader.py b/Lib/fontTools/misc/xmlReader.py
index 6ec50de4..d8e502f1 100644
--- a/Lib/fontTools/misc/xmlReader.py
+++ b/Lib/fontTools/misc/xmlReader.py
@@ -8,164 +8,181 @@ import logging
log = logging.getLogger(__name__)
-class TTXParseError(Exception): pass
+
+class TTXParseError(Exception):
+ pass
+
BUFSIZE = 0x4000
class XMLReader(object):
-
- def __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False):
- if fileOrPath == '-':
- fileOrPath = sys.stdin
- if not hasattr(fileOrPath, "read"):
- self.file = open(fileOrPath, "rb")
- self._closeStream = True
- else:
- # assume readable file object
- self.file = fileOrPath
- self._closeStream = False
- self.ttFont = ttFont
- self.progress = progress
- if quiet is not None:
- from fontTools.misc.loggingTools import deprecateArgument
- deprecateArgument("quiet", "configure logging instead")
- self.quiet = quiet
- self.root = None
- self.contentStack = []
- self.contentOnly = contentOnly
- self.stackSize = 0
-
- def read(self, rootless=False):
- if rootless:
- self.stackSize += 1
- if self.progress:
- self.file.seek(0, 2)
- fileSize = self.file.tell()
- self.progress.set(0, fileSize // 100 or 1)
- self.file.seek(0)
- self._parseFile(self.file)
- if self._closeStream:
- self.close()
- if rootless:
- self.stackSize -= 1
-
- def close(self):
- self.file.close()
-
- def _parseFile(self, file):
- from xml.parsers.expat import ParserCreate
- parser = ParserCreate()
- parser.StartElementHandler = self._startElementHandler
- parser.EndElementHandler = self._endElementHandler
- parser.CharacterDataHandler = self._characterDataHandler
-
- pos = 0
- while True:
- chunk = file.read(BUFSIZE)
- if not chunk:
- parser.Parse(chunk, 1)
- break
- pos = pos + len(chunk)
- if self.progress:
- self.progress.set(pos // 100)
- parser.Parse(chunk, 0)
-
- def _startElementHandler(self, name, attrs):
- if self.stackSize == 1 and self.contentOnly:
- # We already know the table we're parsing, skip
- # parsing the table tag and continue to
- # stack '2' which begins parsing content
- self.contentStack.append([])
- self.stackSize = 2
- return
- stackSize = self.stackSize
- self.stackSize = stackSize + 1
- subFile = attrs.get("src")
- if subFile is not None:
- if hasattr(self.file, 'name'):
- # if file has a name, get its parent directory
- dirname = os.path.dirname(self.file.name)
- else:
- # else fall back to using the current working directory
- dirname = os.getcwd()
- subFile = os.path.join(dirname, subFile)
- if not stackSize:
- if name != "ttFont":
- raise TTXParseError("illegal root tag: %s" % name)
- if self.ttFont.reader is None and not self.ttFont.tables:
- sfntVersion = attrs.get("sfntVersion")
- if sfntVersion is not None:
- if len(sfntVersion) != 4:
- sfntVersion = safeEval('"' + sfntVersion + '"')
- self.ttFont.sfntVersion = sfntVersion
- self.contentStack.append([])
- elif stackSize == 1:
- if subFile is not None:
- subReader = XMLReader(subFile, self.ttFont, self.progress)
- subReader.read()
- self.contentStack.append([])
- return
- tag = ttLib.xmlToTag(name)
- msg = "Parsing '%s' table..." % tag
- if self.progress:
- self.progress.setLabel(msg)
- log.info(msg)
- if tag == "GlyphOrder":
- tableClass = ttLib.GlyphOrder
- elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
- tableClass = DefaultTable
- else:
- tableClass = ttLib.getTableClass(tag)
- if tableClass is None:
- tableClass = DefaultTable
- if tag == 'loca' and tag in self.ttFont:
- # Special-case the 'loca' table as we need the
- # original if the 'glyf' table isn't recompiled.
- self.currentTable = self.ttFont[tag]
- else:
- self.currentTable = tableClass(tag)
- self.ttFont[tag] = self.currentTable
- self.contentStack.append([])
- elif stackSize == 2 and subFile is not None:
- subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
- subReader.read()
- self.contentStack.append([])
- self.root = subReader.root
- elif stackSize == 2:
- self.contentStack.append([])
- self.root = (name, attrs, self.contentStack[-1])
- else:
- l = []
- self.contentStack[-1].append((name, attrs, l))
- self.contentStack.append(l)
-
- def _characterDataHandler(self, data):
- if self.stackSize > 1:
- self.contentStack[-1].append(data)
-
- def _endElementHandler(self, name):
- self.stackSize = self.stackSize - 1
- del self.contentStack[-1]
- if not self.contentOnly:
- if self.stackSize == 1:
- self.root = None
- elif self.stackSize == 2:
- name, attrs, content = self.root
- self.currentTable.fromXML(name, attrs, content, self.ttFont)
- self.root = None
+ def __init__(
+ self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False
+ ):
+ if fileOrPath == "-":
+ fileOrPath = sys.stdin
+ if not hasattr(fileOrPath, "read"):
+ self.file = open(fileOrPath, "rb")
+ self._closeStream = True
+ else:
+ # assume readable file object
+ self.file = fileOrPath
+ self._closeStream = False
+ self.ttFont = ttFont
+ self.progress = progress
+ if quiet is not None:
+ from fontTools.misc.loggingTools import deprecateArgument
+
+ deprecateArgument("quiet", "configure logging instead")
+ self.quiet = quiet
+ self.root = None
+ self.contentStack = []
+ self.contentOnly = contentOnly
+ self.stackSize = 0
+
+ def read(self, rootless=False):
+ if rootless:
+ self.stackSize += 1
+ if self.progress:
+ self.file.seek(0, 2)
+ fileSize = self.file.tell()
+ self.progress.set(0, fileSize // 100 or 1)
+ self.file.seek(0)
+ self._parseFile(self.file)
+ if self._closeStream:
+ self.close()
+ if rootless:
+ self.stackSize -= 1
+
+ def close(self):
+ self.file.close()
+
+ def _parseFile(self, file):
+ from xml.parsers.expat import ParserCreate
+
+ parser = ParserCreate()
+ parser.StartElementHandler = self._startElementHandler
+ parser.EndElementHandler = self._endElementHandler
+ parser.CharacterDataHandler = self._characterDataHandler
+
+ pos = 0
+ while True:
+ chunk = file.read(BUFSIZE)
+ if not chunk:
+ parser.Parse(chunk, 1)
+ break
+ pos = pos + len(chunk)
+ if self.progress:
+ self.progress.set(pos // 100)
+ parser.Parse(chunk, 0)
+
+ def _startElementHandler(self, name, attrs):
+ if self.stackSize == 1 and self.contentOnly:
+ # We already know the table we're parsing, skip
+ # parsing the table tag and continue to
+ # stack '2' which begins parsing content
+ self.contentStack.append([])
+ self.stackSize = 2
+ return
+ stackSize = self.stackSize
+ self.stackSize = stackSize + 1
+ subFile = attrs.get("src")
+ if subFile is not None:
+ if hasattr(self.file, "name"):
+ # if file has a name, get its parent directory
+ dirname = os.path.dirname(self.file.name)
+ else:
+ # else fall back to using the current working directory
+ dirname = os.getcwd()
+ subFile = os.path.join(dirname, subFile)
+ if not stackSize:
+ if name != "ttFont":
+ raise TTXParseError("illegal root tag: %s" % name)
+ if self.ttFont.reader is None and not self.ttFont.tables:
+ sfntVersion = attrs.get("sfntVersion")
+ if sfntVersion is not None:
+ if len(sfntVersion) != 4:
+ sfntVersion = safeEval('"' + sfntVersion + '"')
+ self.ttFont.sfntVersion = sfntVersion
+ self.contentStack.append([])
+ elif stackSize == 1:
+ if subFile is not None:
+ subReader = XMLReader(subFile, self.ttFont, self.progress)
+ subReader.read()
+ self.contentStack.append([])
+ return
+ tag = ttLib.xmlToTag(name)
+ msg = "Parsing '%s' table..." % tag
+ if self.progress:
+ self.progress.setLabel(msg)
+ log.info(msg)
+ if tag == "GlyphOrder":
+ tableClass = ttLib.GlyphOrder
+ elif "ERROR" in attrs or ("raw" in attrs and safeEval(attrs["raw"])):
+ tableClass = DefaultTable
+ else:
+ tableClass = ttLib.getTableClass(tag)
+ if tableClass is None:
+ tableClass = DefaultTable
+ if tag == "loca" and tag in self.ttFont:
+ # Special-case the 'loca' table as we need the
+ # original if the 'glyf' table isn't recompiled.
+ self.currentTable = self.ttFont[tag]
+ else:
+ self.currentTable = tableClass(tag)
+ self.ttFont[tag] = self.currentTable
+ self.contentStack.append([])
+ elif stackSize == 2 and subFile is not None:
+ subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
+ subReader.read()
+ self.contentStack.append([])
+ self.root = subReader.root
+ elif stackSize == 2:
+ self.contentStack.append([])
+ self.root = (name, attrs, self.contentStack[-1])
+ else:
+ l = []
+ self.contentStack[-1].append((name, attrs, l))
+ self.contentStack.append(l)
+
+ def _characterDataHandler(self, data):
+ if self.stackSize > 1:
+ # parser parses in chunks, so we may get multiple calls
+ # for the same text node; thus we need to append the data
+ # to the last item in the content stack:
+ # https://github.com/fonttools/fonttools/issues/2614
+ if (
+ data != "\n"
+ and self.contentStack[-1]
+ and isinstance(self.contentStack[-1][-1], str)
+ and self.contentStack[-1][-1] != "\n"
+ ):
+ self.contentStack[-1][-1] += data
+ else:
+ self.contentStack[-1].append(data)
+
+ def _endElementHandler(self, name):
+ self.stackSize = self.stackSize - 1
+ del self.contentStack[-1]
+ if not self.contentOnly:
+ if self.stackSize == 1:
+ self.root = None
+ elif self.stackSize == 2:
+ name, attrs, content = self.root
+ self.currentTable.fromXML(name, attrs, content, self.ttFont)
+ self.root = None
class ProgressPrinter(object):
+ def __init__(self, title, maxval=100):
+ print(title)
- def __init__(self, title, maxval=100):
- print(title)
-
- def set(self, val, maxval=None):
- pass
+ def set(self, val, maxval=None):
+ pass
- def increment(self, val=1):
- pass
+ def increment(self, val=1):
+ pass
- def setLabel(self, text):
- print(text)
+ def setLabel(self, text):
+ print(text)
diff --git a/Lib/fontTools/misc/xmlWriter.py b/Lib/fontTools/misc/xmlWriter.py
index 9e30fa33..9a8dc3e3 100644
--- a/Lib/fontTools/misc/xmlWriter.py
+++ b/Lib/fontTools/misc/xmlWriter.py
@@ -9,186 +9,196 @@ INDENT = " "
class XMLWriter(object):
-
- def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8",
- newlinestr="\n"):
- if encoding.lower().replace('-','').replace('_','') != 'utf8':
- raise Exception('Only UTF-8 encoding is supported.')
- if fileOrPath == '-':
- fileOrPath = sys.stdout
- if not hasattr(fileOrPath, "write"):
- self.filename = fileOrPath
- self.file = open(fileOrPath, "wb")
- self._closeStream = True
- else:
- self.filename = None
- # assume writable file object
- self.file = fileOrPath
- self._closeStream = False
-
- # Figure out if writer expects bytes or unicodes
- try:
- # The bytes check should be first. See:
- # https://github.com/fonttools/fonttools/pull/233
- self.file.write(b'')
- self.totype = tobytes
- except TypeError:
- # This better not fail.
- self.file.write('')
- self.totype = tostr
- self.indentwhite = self.totype(indentwhite)
- if newlinestr is None:
- self.newlinestr = self.totype(os.linesep)
- else:
- self.newlinestr = self.totype(newlinestr)
- self.indentlevel = 0
- self.stack = []
- self.needindent = 1
- self.idlefunc = idlefunc
- self.idlecounter = 0
- self._writeraw('<?xml version="1.0" encoding="UTF-8"?>')
- self.newline()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exception_type, exception_value, traceback):
- self.close()
-
- def close(self):
- if self._closeStream:
- self.file.close()
-
- def write(self, string, indent=True):
- """Writes text."""
- self._writeraw(escape(string), indent=indent)
-
- def writecdata(self, string):
- """Writes text in a CDATA section."""
- self._writeraw("<![CDATA[" + string + "]]>")
-
- def write8bit(self, data, strip=False):
- """Writes a bytes() sequence into the XML, escaping
- non-ASCII bytes. When this is read in xmlReader,
- the original bytes can be recovered by encoding to
- 'latin-1'."""
- self._writeraw(escape8bit(data), strip=strip)
-
- def write_noindent(self, string):
- """Writes text without indentation."""
- self._writeraw(escape(string), indent=False)
-
- def _writeraw(self, data, indent=True, strip=False):
- """Writes bytes, possibly indented."""
- if indent and self.needindent:
- self.file.write(self.indentlevel * self.indentwhite)
- self.needindent = 0
- s = self.totype(data, encoding="utf_8")
- if (strip):
- s = s.strip()
- self.file.write(s)
-
- def newline(self):
- self.file.write(self.newlinestr)
- self.needindent = 1
- idlecounter = self.idlecounter
- if not idlecounter % 100 and self.idlefunc is not None:
- self.idlefunc()
- self.idlecounter = idlecounter + 1
-
- def comment(self, data):
- data = escape(data)
- lines = data.split("\n")
- self._writeraw("<!-- " + lines[0])
- for line in lines[1:]:
- self.newline()
- self._writeraw(" " + line)
- self._writeraw(" -->")
-
- def simpletag(self, _TAG_, *args, **kwargs):
- attrdata = self.stringifyattrs(*args, **kwargs)
- data = "<%s%s/>" % (_TAG_, attrdata)
- self._writeraw(data)
-
- def begintag(self, _TAG_, *args, **kwargs):
- attrdata = self.stringifyattrs(*args, **kwargs)
- data = "<%s%s>" % (_TAG_, attrdata)
- self._writeraw(data)
- self.stack.append(_TAG_)
- self.indent()
-
- def endtag(self, _TAG_):
- assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag"
- del self.stack[-1]
- self.dedent()
- data = "</%s>" % _TAG_
- self._writeraw(data)
-
- def dumphex(self, data):
- linelength = 16
- hexlinelength = linelength * 2
- chunksize = 8
- for i in range(0, len(data), linelength):
- hexline = hexStr(data[i:i+linelength])
- line = ""
- white = ""
- for j in range(0, hexlinelength, chunksize):
- line = line + white + hexline[j:j+chunksize]
- white = " "
- self._writeraw(line)
- self.newline()
-
- def indent(self):
- self.indentlevel = self.indentlevel + 1
-
- def dedent(self):
- assert self.indentlevel > 0
- self.indentlevel = self.indentlevel - 1
-
- def stringifyattrs(self, *args, **kwargs):
- if kwargs:
- assert not args
- attributes = sorted(kwargs.items())
- elif args:
- assert len(args) == 1
- attributes = args[0]
- else:
- return ""
- data = ""
- for attr, value in attributes:
- if not isinstance(value, (bytes, str)):
- value = str(value)
- data = data + ' %s="%s"' % (attr, escapeattr(value))
- return data
+ def __init__(
+ self,
+ fileOrPath,
+ indentwhite=INDENT,
+ idlefunc=None,
+ encoding="utf_8",
+ newlinestr="\n",
+ ):
+ if encoding.lower().replace("-", "").replace("_", "") != "utf8":
+ raise Exception("Only UTF-8 encoding is supported.")
+ if fileOrPath == "-":
+ fileOrPath = sys.stdout
+ if not hasattr(fileOrPath, "write"):
+ self.filename = fileOrPath
+ self.file = open(fileOrPath, "wb")
+ self._closeStream = True
+ else:
+ self.filename = None
+ # assume writable file object
+ self.file = fileOrPath
+ self._closeStream = False
+
+ # Figure out if writer expects bytes or unicodes
+ try:
+ # The bytes check should be first. See:
+ # https://github.com/fonttools/fonttools/pull/233
+ self.file.write(b"")
+ self.totype = tobytes
+ except TypeError:
+ # This better not fail.
+ self.file.write("")
+ self.totype = tostr
+ self.indentwhite = self.totype(indentwhite)
+ if newlinestr is None:
+ self.newlinestr = self.totype(os.linesep)
+ else:
+ self.newlinestr = self.totype(newlinestr)
+ self.indentlevel = 0
+ self.stack = []
+ self.needindent = 1
+ self.idlefunc = idlefunc
+ self.idlecounter = 0
+ self._writeraw('<?xml version="1.0" encoding="UTF-8"?>')
+ self.newline()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exception_type, exception_value, traceback):
+ self.close()
+
+ def close(self):
+ if self._closeStream:
+ self.file.close()
+
+ def write(self, string, indent=True):
+ """Writes text."""
+ self._writeraw(escape(string), indent=indent)
+
+ def writecdata(self, string):
+ """Writes text in a CDATA section."""
+ self._writeraw("<![CDATA[" + string + "]]>")
+
+ def write8bit(self, data, strip=False):
+ """Writes a bytes() sequence into the XML, escaping
+ non-ASCII bytes. When this is read in xmlReader,
+ the original bytes can be recovered by encoding to
+ 'latin-1'."""
+ self._writeraw(escape8bit(data), strip=strip)
+
+ def write_noindent(self, string):
+ """Writes text without indentation."""
+ self._writeraw(escape(string), indent=False)
+
+ def _writeraw(self, data, indent=True, strip=False):
+ """Writes bytes, possibly indented."""
+ if indent and self.needindent:
+ self.file.write(self.indentlevel * self.indentwhite)
+ self.needindent = 0
+ s = self.totype(data, encoding="utf_8")
+ if strip:
+ s = s.strip()
+ self.file.write(s)
+
+ def newline(self):
+ self.file.write(self.newlinestr)
+ self.needindent = 1
+ idlecounter = self.idlecounter
+ if not idlecounter % 100 and self.idlefunc is not None:
+ self.idlefunc()
+ self.idlecounter = idlecounter + 1
+
+ def comment(self, data):
+ data = escape(data)
+ lines = data.split("\n")
+ self._writeraw("<!-- " + lines[0])
+ for line in lines[1:]:
+ self.newline()
+ self._writeraw(" " + line)
+ self._writeraw(" -->")
+
+ def simpletag(self, _TAG_, *args, **kwargs):
+ attrdata = self.stringifyattrs(*args, **kwargs)
+ data = "<%s%s/>" % (_TAG_, attrdata)
+ self._writeraw(data)
+
+ def begintag(self, _TAG_, *args, **kwargs):
+ attrdata = self.stringifyattrs(*args, **kwargs)
+ data = "<%s%s>" % (_TAG_, attrdata)
+ self._writeraw(data)
+ self.stack.append(_TAG_)
+ self.indent()
+
+ def endtag(self, _TAG_):
+ assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag"
+ del self.stack[-1]
+ self.dedent()
+ data = "</%s>" % _TAG_
+ self._writeraw(data)
+
+ def dumphex(self, data):
+ linelength = 16
+ hexlinelength = linelength * 2
+ chunksize = 8
+ for i in range(0, len(data), linelength):
+ hexline = hexStr(data[i : i + linelength])
+ line = ""
+ white = ""
+ for j in range(0, hexlinelength, chunksize):
+ line = line + white + hexline[j : j + chunksize]
+ white = " "
+ self._writeraw(line)
+ self.newline()
+
+ def indent(self):
+ self.indentlevel = self.indentlevel + 1
+
+ def dedent(self):
+ assert self.indentlevel > 0
+ self.indentlevel = self.indentlevel - 1
+
+ def stringifyattrs(self, *args, **kwargs):
+ if kwargs:
+ assert not args
+ attributes = sorted(kwargs.items())
+ elif args:
+ assert len(args) == 1
+ attributes = args[0]
+ else:
+ return ""
+ data = ""
+ for attr, value in attributes:
+ if not isinstance(value, (bytes, str)):
+ value = str(value)
+ data = data + ' %s="%s"' % (attr, escapeattr(value))
+ return data
def escape(data):
- data = tostr(data, 'utf_8')
- data = data.replace("&", "&amp;")
- data = data.replace("<", "&lt;")
- data = data.replace(">", "&gt;")
- data = data.replace("\r", "&#13;")
- return data
+ data = tostr(data, "utf_8")
+ data = data.replace("&", "&amp;")
+ data = data.replace("<", "&lt;")
+ data = data.replace(">", "&gt;")
+ data = data.replace("\r", "&#13;")
+ return data
+
def escapeattr(data):
- data = escape(data)
- data = data.replace('"', "&quot;")
- return data
+ data = escape(data)
+ data = data.replace('"', "&quot;")
+ return data
+
def escape8bit(data):
- """Input is Unicode string."""
- def escapechar(c):
- n = ord(c)
- if 32 <= n <= 127 and c not in "<&>":
- return c
- else:
- return "&#" + repr(n) + ";"
- return strjoin(map(escapechar, data.decode('latin-1')))
+ """Input is Unicode string."""
+
+ def escapechar(c):
+ n = ord(c)
+ if 32 <= n <= 127 and c not in "<&>":
+ return c
+ else:
+ return "&#" + repr(n) + ";"
+
+ return strjoin(map(escapechar, data.decode("latin-1")))
+
def hexStr(s):
- h = string.hexdigits
- r = ''
- for c in s:
- i = byteord(c)
- r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
- return r
+ h = string.hexdigits
+ r = ""
+ for c in s:
+ i = byteord(c)
+ r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
+ return r
diff --git a/Lib/fontTools/mtiLib/__init__.py b/Lib/fontTools/mtiLib/__init__.py
index f117a742..dbedf275 100644
--- a/Lib/fontTools/mtiLib/__init__.py
+++ b/Lib/fontTools/mtiLib/__init__.py
@@ -12,1219 +12,1391 @@ from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otBase import ValueRecord, valueRecordFormatDict
from fontTools.otlLib import builder as otl
from contextlib import contextmanager
+from fontTools.ttLib import newTable
+from fontTools.feaLib.lookupDebugInfo import LOOKUP_DEBUG_ENV_VAR, LOOKUP_DEBUG_INFO_KEY
from operator import setitem
+import os
import logging
-class MtiLibError(Exception): pass
-class ReferenceNotFoundError(MtiLibError): pass
-class FeatureNotFoundError(ReferenceNotFoundError): pass
-class LookupNotFoundError(ReferenceNotFoundError): pass
+
+class MtiLibError(Exception):
+ pass
+
+
+class ReferenceNotFoundError(MtiLibError):
+ pass
+
+
+class FeatureNotFoundError(ReferenceNotFoundError):
+ pass
+
+
+class LookupNotFoundError(ReferenceNotFoundError):
+ pass
log = logging.getLogger("fontTools.mtiLib")
def makeGlyph(s):
- if s[:2] in ['U ', 'u ']:
- return ttLib.TTFont._makeGlyphName(int(s[2:], 16))
- elif s[:2] == '# ':
- return "glyph%.5d" % int(s[2:])
- assert s.find(' ') < 0, "Space found in glyph name: %s" % s
- assert s, "Glyph name is empty"
- return s
+ if s[:2] in ["U ", "u "]:
+ return ttLib.TTFont._makeGlyphName(int(s[2:], 16))
+ elif s[:2] == "# ":
+ return "glyph%.5d" % int(s[2:])
+ assert s.find(" ") < 0, "Space found in glyph name: %s" % s
+ assert s, "Glyph name is empty"
+ return s
+
def makeGlyphs(l):
- return [makeGlyph(g) for g in l]
+ return [makeGlyph(g) for g in l]
+
def mapLookup(sym, mapping):
- # Lookups are addressed by name. So resolved them using a map if available.
- # Fallback to parsing as lookup index if a map isn't provided.
- if mapping is not None:
- try:
- idx = mapping[sym]
- except KeyError:
- raise LookupNotFoundError(sym)
- else:
- idx = int(sym)
- return idx
+ # Lookups are addressed by name. So resolved them using a map if available.
+ # Fallback to parsing as lookup index if a map isn't provided.
+ if mapping is not None:
+ try:
+ idx = mapping[sym]
+ except KeyError:
+ raise LookupNotFoundError(sym)
+ else:
+ idx = int(sym)
+ return idx
-def mapFeature(sym, mapping):
- # Features are referenced by index according the spec. So, if symbol is an
- # integer, use it directly. Otherwise look up in the map if provided.
- try:
- idx = int(sym)
- except ValueError:
- try:
- idx = mapping[sym]
- except KeyError:
- raise FeatureNotFoundError(sym)
- return idx
-def setReference(mapper, mapping, sym, setter, collection, key):
- try:
- mapped = mapper(sym, mapping)
- except ReferenceNotFoundError as e:
- try:
- if mapping is not None:
- mapping.addDeferredMapping(lambda ref: setter(collection, key, ref), sym, e)
- return
- except AttributeError:
- pass
- raise
- setter(collection, key, mapped)
+def mapFeature(sym, mapping):
+ # Features are referenced by index according the spec. So, if symbol is an
+ # integer, use it directly. Otherwise look up in the map if provided.
+ try:
+ idx = int(sym)
+ except ValueError:
+ try:
+ idx = mapping[sym]
+ except KeyError:
+ raise FeatureNotFoundError(sym)
+ return idx
-class DeferredMapping(dict):
- def __init__(self):
- self._deferredMappings = []
+def setReference(mapper, mapping, sym, setter, collection, key):
+ try:
+ mapped = mapper(sym, mapping)
+ except ReferenceNotFoundError as e:
+ try:
+ if mapping is not None:
+ mapping.addDeferredMapping(
+ lambda ref: setter(collection, key, ref), sym, e
+ )
+ return
+ except AttributeError:
+ pass
+ raise
+ setter(collection, key, mapped)
- def addDeferredMapping(self, setter, sym, e):
- log.debug("Adding deferred mapping for symbol '%s' %s", sym, type(e).__name__)
- self._deferredMappings.append((setter,sym, e))
- def applyDeferredMappings(self):
- for setter,sym,e in self._deferredMappings:
- log.debug("Applying deferred mapping for symbol '%s' %s", sym, type(e).__name__)
- try:
- mapped = self[sym]
- except KeyError:
- raise e
- setter(mapped)
- log.debug("Set to %s", mapped)
- self._deferredMappings = []
+class DeferredMapping(dict):
+ def __init__(self):
+ self._deferredMappings = []
+
+ def addDeferredMapping(self, setter, sym, e):
+ log.debug("Adding deferred mapping for symbol '%s' %s", sym, type(e).__name__)
+ self._deferredMappings.append((setter, sym, e))
+
+ def applyDeferredMappings(self):
+ for setter, sym, e in self._deferredMappings:
+ log.debug(
+ "Applying deferred mapping for symbol '%s' %s", sym, type(e).__name__
+ )
+ try:
+ mapped = self[sym]
+ except KeyError:
+ raise e
+ setter(mapped)
+ log.debug("Set to %s", mapped)
+ self._deferredMappings = []
def parseScriptList(lines, featureMap=None):
- self = ot.ScriptList()
- records = []
- with lines.between('script table'):
- for line in lines:
- while len(line) < 4:
- line.append('')
- scriptTag, langSysTag, defaultFeature, features = line
- log.debug("Adding script %s language-system %s", scriptTag, langSysTag)
-
- langSys = ot.LangSys()
- langSys.LookupOrder = None
- if defaultFeature:
- setReference(mapFeature, featureMap, defaultFeature, setattr, langSys, 'ReqFeatureIndex')
- else:
- langSys.ReqFeatureIndex = 0xFFFF
- syms = stripSplitComma(features)
- langSys.FeatureIndex = theList = [3] * len(syms)
- for i,sym in enumerate(syms):
- setReference(mapFeature, featureMap, sym, setitem, theList, i)
- langSys.FeatureCount = len(langSys.FeatureIndex)
-
- script = [s for s in records if s.ScriptTag == scriptTag]
- if script:
- script = script[0].Script
- else:
- scriptRec = ot.ScriptRecord()
- scriptRec.ScriptTag = scriptTag + ' '*(4 - len(scriptTag))
- scriptRec.Script = ot.Script()
- records.append(scriptRec)
- script = scriptRec.Script
- script.DefaultLangSys = None
- script.LangSysRecord = []
- script.LangSysCount = 0
-
- if langSysTag == 'default':
- script.DefaultLangSys = langSys
- else:
- langSysRec = ot.LangSysRecord()
- langSysRec.LangSysTag = langSysTag + ' '*(4 - len(langSysTag))
- langSysRec.LangSys = langSys
- script.LangSysRecord.append(langSysRec)
- script.LangSysCount = len(script.LangSysRecord)
-
- for script in records:
- script.Script.LangSysRecord = sorted(script.Script.LangSysRecord, key=lambda rec: rec.LangSysTag)
- self.ScriptRecord = sorted(records, key=lambda rec: rec.ScriptTag)
- self.ScriptCount = len(self.ScriptRecord)
- return self
+ self = ot.ScriptList()
+ records = []
+ with lines.between("script table"):
+ for line in lines:
+ while len(line) < 4:
+ line.append("")
+ scriptTag, langSysTag, defaultFeature, features = line
+ log.debug("Adding script %s language-system %s", scriptTag, langSysTag)
+
+ langSys = ot.LangSys()
+ langSys.LookupOrder = None
+ if defaultFeature:
+ setReference(
+ mapFeature,
+ featureMap,
+ defaultFeature,
+ setattr,
+ langSys,
+ "ReqFeatureIndex",
+ )
+ else:
+ langSys.ReqFeatureIndex = 0xFFFF
+ syms = stripSplitComma(features)
+ langSys.FeatureIndex = theList = [3] * len(syms)
+ for i, sym in enumerate(syms):
+ setReference(mapFeature, featureMap, sym, setitem, theList, i)
+ langSys.FeatureCount = len(langSys.FeatureIndex)
+
+ script = [s for s in records if s.ScriptTag == scriptTag]
+ if script:
+ script = script[0].Script
+ else:
+ scriptRec = ot.ScriptRecord()
+ scriptRec.ScriptTag = scriptTag + " " * (4 - len(scriptTag))
+ scriptRec.Script = ot.Script()
+ records.append(scriptRec)
+ script = scriptRec.Script
+ script.DefaultLangSys = None
+ script.LangSysRecord = []
+ script.LangSysCount = 0
+
+ if langSysTag == "default":
+ script.DefaultLangSys = langSys
+ else:
+ langSysRec = ot.LangSysRecord()
+ langSysRec.LangSysTag = langSysTag + " " * (4 - len(langSysTag))
+ langSysRec.LangSys = langSys
+ script.LangSysRecord.append(langSysRec)
+ script.LangSysCount = len(script.LangSysRecord)
+
+ for script in records:
+ script.Script.LangSysRecord = sorted(
+ script.Script.LangSysRecord, key=lambda rec: rec.LangSysTag
+ )
+ self.ScriptRecord = sorted(records, key=lambda rec: rec.ScriptTag)
+ self.ScriptCount = len(self.ScriptRecord)
+ return self
+
def parseFeatureList(lines, lookupMap=None, featureMap=None):
- self = ot.FeatureList()
- self.FeatureRecord = []
- with lines.between('feature table'):
- for line in lines:
- name, featureTag, lookups = line
- if featureMap is not None:
- assert name not in featureMap, "Duplicate feature name: %s" % name
- featureMap[name] = len(self.FeatureRecord)
- # If feature name is integer, make sure it matches its index.
- try:
- assert int(name) == len(self.FeatureRecord), "%d %d" % (name, len(self.FeatureRecord))
- except ValueError:
- pass
- featureRec = ot.FeatureRecord()
- featureRec.FeatureTag = featureTag
- featureRec.Feature = ot.Feature()
- self.FeatureRecord.append(featureRec)
- feature = featureRec.Feature
- feature.FeatureParams = None
- syms = stripSplitComma(lookups)
- feature.LookupListIndex = theList = [None] * len(syms)
- for i,sym in enumerate(syms):
- setReference(mapLookup, lookupMap, sym, setitem, theList, i)
- feature.LookupCount = len(feature.LookupListIndex)
-
- self.FeatureCount = len(self.FeatureRecord)
- return self
+ self = ot.FeatureList()
+ self.FeatureRecord = []
+ with lines.between("feature table"):
+ for line in lines:
+ name, featureTag, lookups = line
+ if featureMap is not None:
+ assert name not in featureMap, "Duplicate feature name: %s" % name
+ featureMap[name] = len(self.FeatureRecord)
+ # If feature name is integer, make sure it matches its index.
+ try:
+ assert int(name) == len(self.FeatureRecord), "%d %d" % (
+ name,
+ len(self.FeatureRecord),
+ )
+ except ValueError:
+ pass
+ featureRec = ot.FeatureRecord()
+ featureRec.FeatureTag = featureTag
+ featureRec.Feature = ot.Feature()
+ self.FeatureRecord.append(featureRec)
+ feature = featureRec.Feature
+ feature.FeatureParams = None
+ syms = stripSplitComma(lookups)
+ feature.LookupListIndex = theList = [None] * len(syms)
+ for i, sym in enumerate(syms):
+ setReference(mapLookup, lookupMap, sym, setitem, theList, i)
+ feature.LookupCount = len(feature.LookupListIndex)
+
+ self.FeatureCount = len(self.FeatureRecord)
+ return self
+
def parseLookupFlags(lines):
- flags = 0
- filterset = None
- allFlags = [
- 'righttoleft',
- 'ignorebaseglyphs',
- 'ignoreligatures',
- 'ignoremarks',
- 'markattachmenttype',
- 'markfiltertype',
- ]
- while lines.peeks()[0].lower() in allFlags:
- line = next(lines)
- flag = {
- 'righttoleft': 0x0001,
- 'ignorebaseglyphs': 0x0002,
- 'ignoreligatures': 0x0004,
- 'ignoremarks': 0x0008,
- }.get(line[0].lower())
- if flag:
- assert line[1].lower() in ['yes', 'no'], line[1]
- if line[1].lower() == 'yes':
- flags |= flag
- continue
- if line[0].lower() == 'markattachmenttype':
- flags |= int(line[1]) << 8
- continue
- if line[0].lower() == 'markfiltertype':
- flags |= 0x10
- filterset = int(line[1])
- return flags, filterset
+ flags = 0
+ filterset = None
+ allFlags = [
+ "righttoleft",
+ "ignorebaseglyphs",
+ "ignoreligatures",
+ "ignoremarks",
+ "markattachmenttype",
+ "markfiltertype",
+ ]
+ while lines.peeks()[0].lower() in allFlags:
+ line = next(lines)
+ flag = {
+ "righttoleft": 0x0001,
+ "ignorebaseglyphs": 0x0002,
+ "ignoreligatures": 0x0004,
+ "ignoremarks": 0x0008,
+ }.get(line[0].lower())
+ if flag:
+ assert line[1].lower() in ["yes", "no"], line[1]
+ if line[1].lower() == "yes":
+ flags |= flag
+ continue
+ if line[0].lower() == "markattachmenttype":
+ flags |= int(line[1]) << 8
+ continue
+ if line[0].lower() == "markfiltertype":
+ flags |= 0x10
+ filterset = int(line[1])
+ return flags, filterset
+
def parseSingleSubst(lines, font, _lookupMap=None):
- mapping = {}
- for line in lines:
- assert len(line) == 2, line
- line = makeGlyphs(line)
- mapping[line[0]] = line[1]
- return otl.buildSingleSubstSubtable(mapping)
+ mapping = {}
+ for line in lines:
+ assert len(line) == 2, line
+ line = makeGlyphs(line)
+ mapping[line[0]] = line[1]
+ return otl.buildSingleSubstSubtable(mapping)
+
def parseMultiple(lines, font, _lookupMap=None):
- mapping = {}
- for line in lines:
- line = makeGlyphs(line)
- mapping[line[0]] = line[1:]
- return otl.buildMultipleSubstSubtable(mapping)
+ mapping = {}
+ for line in lines:
+ line = makeGlyphs(line)
+ mapping[line[0]] = line[1:]
+ return otl.buildMultipleSubstSubtable(mapping)
+
def parseAlternate(lines, font, _lookupMap=None):
- mapping = {}
- for line in lines:
- line = makeGlyphs(line)
- mapping[line[0]] = line[1:]
- return otl.buildAlternateSubstSubtable(mapping)
+ mapping = {}
+ for line in lines:
+ line = makeGlyphs(line)
+ mapping[line[0]] = line[1:]
+ return otl.buildAlternateSubstSubtable(mapping)
+
def parseLigature(lines, font, _lookupMap=None):
- mapping = {}
- for line in lines:
- assert len(line) >= 2, line
- line = makeGlyphs(line)
- mapping[tuple(line[1:])] = line[0]
- return otl.buildLigatureSubstSubtable(mapping)
+ mapping = {}
+ for line in lines:
+ assert len(line) >= 2, line
+ line = makeGlyphs(line)
+ mapping[tuple(line[1:])] = line[0]
+ return otl.buildLigatureSubstSubtable(mapping)
+
def parseSinglePos(lines, font, _lookupMap=None):
- values = {}
- for line in lines:
- assert len(line) == 3, line
- w = line[0].title().replace(' ', '')
- assert w in valueRecordFormatDict
- g = makeGlyph(line[1])
- v = int(line[2])
- if g not in values:
- values[g] = ValueRecord()
- assert not hasattr(values[g], w), (g, w)
- setattr(values[g], w, v)
- return otl.buildSinglePosSubtable(values, font.getReverseGlyphMap())
+ values = {}
+ for line in lines:
+ assert len(line) == 3, line
+ w = line[0].title().replace(" ", "")
+ assert w in valueRecordFormatDict
+ g = makeGlyph(line[1])
+ v = int(line[2])
+ if g not in values:
+ values[g] = ValueRecord()
+ assert not hasattr(values[g], w), (g, w)
+ setattr(values[g], w, v)
+ return otl.buildSinglePosSubtable(values, font.getReverseGlyphMap())
+
def parsePair(lines, font, _lookupMap=None):
- self = ot.PairPos()
- self.ValueFormat1 = self.ValueFormat2 = 0
- typ = lines.peeks()[0].split()[0].lower()
- if typ in ('left', 'right'):
- self.Format = 1
- values = {}
- for line in lines:
- assert len(line) == 4, line
- side = line[0].split()[0].lower()
- assert side in ('left', 'right'), side
- what = line[0][len(side):].title().replace(' ', '')
- mask = valueRecordFormatDict[what][0]
- glyph1, glyph2 = makeGlyphs(line[1:3])
- value = int(line[3])
- if not glyph1 in values: values[glyph1] = {}
- if not glyph2 in values[glyph1]: values[glyph1][glyph2] = (ValueRecord(),ValueRecord())
- rec2 = values[glyph1][glyph2]
- if side == 'left':
- self.ValueFormat1 |= mask
- vr = rec2[0]
- else:
- self.ValueFormat2 |= mask
- vr = rec2[1]
- assert not hasattr(vr, what), (vr, what)
- setattr(vr, what, value)
- self.Coverage = makeCoverage(set(values.keys()), font)
- self.PairSet = []
- for glyph1 in self.Coverage.glyphs:
- values1 = values[glyph1]
- pairset = ot.PairSet()
- records = pairset.PairValueRecord = []
- for glyph2 in sorted(values1.keys(), key=font.getGlyphID):
- values2 = values1[glyph2]
- pair = ot.PairValueRecord()
- pair.SecondGlyph = glyph2
- pair.Value1 = values2[0]
- pair.Value2 = values2[1] if self.ValueFormat2 else None
- records.append(pair)
- pairset.PairValueCount = len(pairset.PairValueRecord)
- self.PairSet.append(pairset)
- self.PairSetCount = len(self.PairSet)
- elif typ.endswith('class'):
- self.Format = 2
- classDefs = [None, None]
- while lines.peeks()[0].endswith("class definition begin"):
- typ = lines.peek()[0][:-len("class definition begin")].lower()
- idx,klass = {
- 'first': (0,ot.ClassDef1),
- 'second': (1,ot.ClassDef2),
- }[typ]
- assert classDefs[idx] is None
- classDefs[idx] = parseClassDef(lines, font, klass=klass)
- self.ClassDef1, self.ClassDef2 = classDefs
- self.Class1Count, self.Class2Count = (1+max(c.classDefs.values()) for c in classDefs)
- self.Class1Record = [ot.Class1Record() for i in range(self.Class1Count)]
- for rec1 in self.Class1Record:
- rec1.Class2Record = [ot.Class2Record() for j in range(self.Class2Count)]
- for rec2 in rec1.Class2Record:
- rec2.Value1 = ValueRecord()
- rec2.Value2 = ValueRecord()
- for line in lines:
- assert len(line) == 4, line
- side = line[0].split()[0].lower()
- assert side in ('left', 'right'), side
- what = line[0][len(side):].title().replace(' ', '')
- mask = valueRecordFormatDict[what][0]
- class1, class2, value = (int(x) for x in line[1:4])
- rec2 = self.Class1Record[class1].Class2Record[class2]
- if side == 'left':
- self.ValueFormat1 |= mask
- vr = rec2.Value1
- else:
- self.ValueFormat2 |= mask
- vr = rec2.Value2
- assert not hasattr(vr, what), (vr, what)
- setattr(vr, what, value)
- for rec1 in self.Class1Record:
- for rec2 in rec1.Class2Record:
- rec2.Value1 = ValueRecord(self.ValueFormat1, rec2.Value1)
- rec2.Value2 = ValueRecord(self.ValueFormat2, rec2.Value2) \
- if self.ValueFormat2 else None
-
- self.Coverage = makeCoverage(set(self.ClassDef1.classDefs.keys()), font)
- else:
- assert 0, typ
- return self
+ self = ot.PairPos()
+ self.ValueFormat1 = self.ValueFormat2 = 0
+ typ = lines.peeks()[0].split()[0].lower()
+ if typ in ("left", "right"):
+ self.Format = 1
+ values = {}
+ for line in lines:
+ assert len(line) == 4, line
+ side = line[0].split()[0].lower()
+ assert side in ("left", "right"), side
+ what = line[0][len(side) :].title().replace(" ", "")
+ mask = valueRecordFormatDict[what][0]
+ glyph1, glyph2 = makeGlyphs(line[1:3])
+ value = int(line[3])
+ if not glyph1 in values:
+ values[glyph1] = {}
+ if not glyph2 in values[glyph1]:
+ values[glyph1][glyph2] = (ValueRecord(), ValueRecord())
+ rec2 = values[glyph1][glyph2]
+ if side == "left":
+ self.ValueFormat1 |= mask
+ vr = rec2[0]
+ else:
+ self.ValueFormat2 |= mask
+ vr = rec2[1]
+ assert not hasattr(vr, what), (vr, what)
+ setattr(vr, what, value)
+ self.Coverage = makeCoverage(set(values.keys()), font)
+ self.PairSet = []
+ for glyph1 in self.Coverage.glyphs:
+ values1 = values[glyph1]
+ pairset = ot.PairSet()
+ records = pairset.PairValueRecord = []
+ for glyph2 in sorted(values1.keys(), key=font.getGlyphID):
+ values2 = values1[glyph2]
+ pair = ot.PairValueRecord()
+ pair.SecondGlyph = glyph2
+ pair.Value1 = values2[0]
+ pair.Value2 = values2[1] if self.ValueFormat2 else None
+ records.append(pair)
+ pairset.PairValueCount = len(pairset.PairValueRecord)
+ self.PairSet.append(pairset)
+ self.PairSetCount = len(self.PairSet)
+ elif typ.endswith("class"):
+ self.Format = 2
+ classDefs = [None, None]
+ while lines.peeks()[0].endswith("class definition begin"):
+ typ = lines.peek()[0][: -len("class definition begin")].lower()
+ idx, klass = {
+ "first": (0, ot.ClassDef1),
+ "second": (1, ot.ClassDef2),
+ }[typ]
+ assert classDefs[idx] is None
+ classDefs[idx] = parseClassDef(lines, font, klass=klass)
+ self.ClassDef1, self.ClassDef2 = classDefs
+ self.Class1Count, self.Class2Count = (
+ 1 + max(c.classDefs.values()) for c in classDefs
+ )
+ self.Class1Record = [ot.Class1Record() for i in range(self.Class1Count)]
+ for rec1 in self.Class1Record:
+ rec1.Class2Record = [ot.Class2Record() for j in range(self.Class2Count)]
+ for rec2 in rec1.Class2Record:
+ rec2.Value1 = ValueRecord()
+ rec2.Value2 = ValueRecord()
+ for line in lines:
+ assert len(line) == 4, line
+ side = line[0].split()[0].lower()
+ assert side in ("left", "right"), side
+ what = line[0][len(side) :].title().replace(" ", "")
+ mask = valueRecordFormatDict[what][0]
+ class1, class2, value = (int(x) for x in line[1:4])
+ rec2 = self.Class1Record[class1].Class2Record[class2]
+ if side == "left":
+ self.ValueFormat1 |= mask
+ vr = rec2.Value1
+ else:
+ self.ValueFormat2 |= mask
+ vr = rec2.Value2
+ assert not hasattr(vr, what), (vr, what)
+ setattr(vr, what, value)
+ for rec1 in self.Class1Record:
+ for rec2 in rec1.Class2Record:
+ rec2.Value1 = ValueRecord(self.ValueFormat1, rec2.Value1)
+ rec2.Value2 = (
+ ValueRecord(self.ValueFormat2, rec2.Value2)
+ if self.ValueFormat2
+ else None
+ )
+
+ self.Coverage = makeCoverage(set(self.ClassDef1.classDefs.keys()), font)
+ else:
+ assert 0, typ
+ return self
+
def parseKernset(lines, font, _lookupMap=None):
- typ = lines.peeks()[0].split()[0].lower()
- if typ in ('left', 'right'):
- with lines.until(("firstclass definition begin", "secondclass definition begin")):
- return parsePair(lines, font)
- return parsePair(lines, font)
+ typ = lines.peeks()[0].split()[0].lower()
+ if typ in ("left", "right"):
+ with lines.until(
+ ("firstclass definition begin", "secondclass definition begin")
+ ):
+ return parsePair(lines, font)
+ return parsePair(lines, font)
+
def makeAnchor(data, klass=ot.Anchor):
- assert len(data) <= 2
- anchor = klass()
- anchor.Format = 1
- anchor.XCoordinate,anchor.YCoordinate = intSplitComma(data[0])
- if len(data) > 1 and data[1] != '':
- anchor.Format = 2
- anchor.AnchorPoint = int(data[1])
- return anchor
+ assert len(data) <= 2
+ anchor = klass()
+ anchor.Format = 1
+ anchor.XCoordinate, anchor.YCoordinate = intSplitComma(data[0])
+ if len(data) > 1 and data[1] != "":
+ anchor.Format = 2
+ anchor.AnchorPoint = int(data[1])
+ return anchor
+
def parseCursive(lines, font, _lookupMap=None):
- records = {}
- for line in lines:
- assert len(line) in [3,4], line
- idx,klass = {
- 'entry': (0,ot.EntryAnchor),
- 'exit': (1,ot.ExitAnchor),
- }[line[0]]
- glyph = makeGlyph(line[1])
- if glyph not in records:
- records[glyph] = [None,None]
- assert records[glyph][idx] is None, (glyph, idx)
- records[glyph][idx] = makeAnchor(line[2:], klass)
- return otl.buildCursivePosSubtable(records, font.getReverseGlyphMap())
+ records = {}
+ for line in lines:
+ assert len(line) in [3, 4], line
+ idx, klass = {
+ "entry": (0, ot.EntryAnchor),
+ "exit": (1, ot.ExitAnchor),
+ }[line[0]]
+ glyph = makeGlyph(line[1])
+ if glyph not in records:
+ records[glyph] = [None, None]
+ assert records[glyph][idx] is None, (glyph, idx)
+ records[glyph][idx] = makeAnchor(line[2:], klass)
+ return otl.buildCursivePosSubtable(records, font.getReverseGlyphMap())
+
def makeMarkRecords(data, coverage, c):
- records = []
- for glyph in coverage.glyphs:
- klass, anchor = data[glyph]
- record = c.MarkRecordClass()
- record.Class = klass
- setattr(record, c.MarkAnchor, anchor)
- records.append(record)
- return records
+ records = []
+ for glyph in coverage.glyphs:
+ klass, anchor = data[glyph]
+ record = c.MarkRecordClass()
+ record.Class = klass
+ setattr(record, c.MarkAnchor, anchor)
+ records.append(record)
+ return records
+
def makeBaseRecords(data, coverage, c, classCount):
- records = []
- idx = {}
- for glyph in coverage.glyphs:
- idx[glyph] = len(records)
- record = c.BaseRecordClass()
- anchors = [None] * classCount
- setattr(record, c.BaseAnchor, anchors)
- records.append(record)
- for (glyph,klass),anchor in data.items():
- record = records[idx[glyph]]
- anchors = getattr(record, c.BaseAnchor)
- assert anchors[klass] is None, (glyph, klass)
- anchors[klass] = anchor
- return records
+ records = []
+ idx = {}
+ for glyph in coverage.glyphs:
+ idx[glyph] = len(records)
+ record = c.BaseRecordClass()
+ anchors = [None] * classCount
+ setattr(record, c.BaseAnchor, anchors)
+ records.append(record)
+ for (glyph, klass), anchor in data.items():
+ record = records[idx[glyph]]
+ anchors = getattr(record, c.BaseAnchor)
+ assert anchors[klass] is None, (glyph, klass)
+ anchors[klass] = anchor
+ return records
+
def makeLigatureRecords(data, coverage, c, classCount):
- records = [None] * len(coverage.glyphs)
- idx = {g:i for i,g in enumerate(coverage.glyphs)}
-
- for (glyph,klass,compIdx,compCount),anchor in data.items():
- record = records[idx[glyph]]
- if record is None:
- record = records[idx[glyph]] = ot.LigatureAttach()
- record.ComponentCount = compCount
- record.ComponentRecord = [ot.ComponentRecord() for i in range(compCount)]
- for compRec in record.ComponentRecord:
- compRec.LigatureAnchor = [None] * classCount
- assert record.ComponentCount == compCount, (glyph, record.ComponentCount, compCount)
-
- anchors = record.ComponentRecord[compIdx - 1].LigatureAnchor
- assert anchors[klass] is None, (glyph, compIdx, klass)
- anchors[klass] = anchor
- return records
+ records = [None] * len(coverage.glyphs)
+ idx = {g: i for i, g in enumerate(coverage.glyphs)}
+
+ for (glyph, klass, compIdx, compCount), anchor in data.items():
+ record = records[idx[glyph]]
+ if record is None:
+ record = records[idx[glyph]] = ot.LigatureAttach()
+ record.ComponentCount = compCount
+ record.ComponentRecord = [ot.ComponentRecord() for i in range(compCount)]
+ for compRec in record.ComponentRecord:
+ compRec.LigatureAnchor = [None] * classCount
+ assert record.ComponentCount == compCount, (
+ glyph,
+ record.ComponentCount,
+ compCount,
+ )
+
+ anchors = record.ComponentRecord[compIdx - 1].LigatureAnchor
+ assert anchors[klass] is None, (glyph, compIdx, klass)
+ anchors[klass] = anchor
+ return records
+
def parseMarkToSomething(lines, font, c):
- self = c.Type()
- self.Format = 1
- markData = {}
- baseData = {}
- Data = {
- 'mark': (markData, c.MarkAnchorClass),
- 'base': (baseData, c.BaseAnchorClass),
- 'ligature': (baseData, c.BaseAnchorClass),
- }
- maxKlass = 0
- for line in lines:
- typ = line[0]
- assert typ in ('mark', 'base', 'ligature')
- glyph = makeGlyph(line[1])
- data, anchorClass = Data[typ]
- extraItems = 2 if typ == 'ligature' else 0
- extras = tuple(int(i) for i in line[2:2+extraItems])
- klass = int(line[2+extraItems])
- anchor = makeAnchor(line[3+extraItems:], anchorClass)
- if typ == 'mark':
- key,value = glyph,(klass,anchor)
- else:
- key,value = ((glyph,klass)+extras),anchor
- assert key not in data, key
- data[key] = value
- maxKlass = max(maxKlass, klass)
-
- # Mark
- markCoverage = makeCoverage(set(markData.keys()), font, c.MarkCoverageClass)
- markArray = c.MarkArrayClass()
- markRecords = makeMarkRecords(markData, markCoverage, c)
- setattr(markArray, c.MarkRecord, markRecords)
- setattr(markArray, c.MarkCount, len(markRecords))
- setattr(self, c.MarkCoverage, markCoverage)
- setattr(self, c.MarkArray, markArray)
- self.ClassCount = maxKlass + 1
-
- # Base
- self.classCount = 0 if not baseData else 1+max(k[1] for k,v in baseData.items())
- baseCoverage = makeCoverage(set([k[0] for k in baseData.keys()]), font, c.BaseCoverageClass)
- baseArray = c.BaseArrayClass()
- if c.Base == 'Ligature':
- baseRecords = makeLigatureRecords(baseData, baseCoverage, c, self.classCount)
- else:
- baseRecords = makeBaseRecords(baseData, baseCoverage, c, self.classCount)
- setattr(baseArray, c.BaseRecord, baseRecords)
- setattr(baseArray, c.BaseCount, len(baseRecords))
- setattr(self, c.BaseCoverage, baseCoverage)
- setattr(self, c.BaseArray, baseArray)
-
- return self
+ self = c.Type()
+ self.Format = 1
+ markData = {}
+ baseData = {}
+ Data = {
+ "mark": (markData, c.MarkAnchorClass),
+ "base": (baseData, c.BaseAnchorClass),
+ "ligature": (baseData, c.BaseAnchorClass),
+ }
+ maxKlass = 0
+ for line in lines:
+ typ = line[0]
+ assert typ in ("mark", "base", "ligature")
+ glyph = makeGlyph(line[1])
+ data, anchorClass = Data[typ]
+ extraItems = 2 if typ == "ligature" else 0
+ extras = tuple(int(i) for i in line[2 : 2 + extraItems])
+ klass = int(line[2 + extraItems])
+ anchor = makeAnchor(line[3 + extraItems :], anchorClass)
+ if typ == "mark":
+ key, value = glyph, (klass, anchor)
+ else:
+ key, value = ((glyph, klass) + extras), anchor
+ assert key not in data, key
+ data[key] = value
+ maxKlass = max(maxKlass, klass)
+
+ # Mark
+ markCoverage = makeCoverage(set(markData.keys()), font, c.MarkCoverageClass)
+ markArray = c.MarkArrayClass()
+ markRecords = makeMarkRecords(markData, markCoverage, c)
+ setattr(markArray, c.MarkRecord, markRecords)
+ setattr(markArray, c.MarkCount, len(markRecords))
+ setattr(self, c.MarkCoverage, markCoverage)
+ setattr(self, c.MarkArray, markArray)
+ self.ClassCount = maxKlass + 1
+
+ # Base
+ self.classCount = 0 if not baseData else 1 + max(k[1] for k, v in baseData.items())
+ baseCoverage = makeCoverage(
+ set([k[0] for k in baseData.keys()]), font, c.BaseCoverageClass
+ )
+ baseArray = c.BaseArrayClass()
+ if c.Base == "Ligature":
+ baseRecords = makeLigatureRecords(baseData, baseCoverage, c, self.classCount)
+ else:
+ baseRecords = makeBaseRecords(baseData, baseCoverage, c, self.classCount)
+ setattr(baseArray, c.BaseRecord, baseRecords)
+ setattr(baseArray, c.BaseCount, len(baseRecords))
+ setattr(self, c.BaseCoverage, baseCoverage)
+ setattr(self, c.BaseArray, baseArray)
+
+ return self
+
class MarkHelper(object):
- def __init__(self):
- for Which in ('Mark', 'Base'):
- for What in ('Coverage', 'Array', 'Count', 'Record', 'Anchor'):
- key = Which + What
- if Which == 'Mark' and What in ('Count', 'Record', 'Anchor'):
- value = key
- else:
- value = getattr(self, Which) + What
- if value == 'LigatureRecord':
- value = 'LigatureAttach'
- setattr(self, key, value)
- if What != 'Count':
- klass = getattr(ot, value)
- setattr(self, key+'Class', klass)
+ def __init__(self):
+ for Which in ("Mark", "Base"):
+ for What in ("Coverage", "Array", "Count", "Record", "Anchor"):
+ key = Which + What
+ if Which == "Mark" and What in ("Count", "Record", "Anchor"):
+ value = key
+ else:
+ value = getattr(self, Which) + What
+ if value == "LigatureRecord":
+ value = "LigatureAttach"
+ setattr(self, key, value)
+ if What != "Count":
+ klass = getattr(ot, value)
+ setattr(self, key + "Class", klass)
+
class MarkToBaseHelper(MarkHelper):
- Mark = 'Mark'
- Base = 'Base'
- Type = ot.MarkBasePos
+ Mark = "Mark"
+ Base = "Base"
+ Type = ot.MarkBasePos
+
+
class MarkToMarkHelper(MarkHelper):
- Mark = 'Mark1'
- Base = 'Mark2'
- Type = ot.MarkMarkPos
+ Mark = "Mark1"
+ Base = "Mark2"
+ Type = ot.MarkMarkPos
+
+
class MarkToLigatureHelper(MarkHelper):
- Mark = 'Mark'
- Base = 'Ligature'
- Type = ot.MarkLigPos
+ Mark = "Mark"
+ Base = "Ligature"
+ Type = ot.MarkLigPos
+
def parseMarkToBase(lines, font, _lookupMap=None):
- return parseMarkToSomething(lines, font, MarkToBaseHelper())
+ return parseMarkToSomething(lines, font, MarkToBaseHelper())
+
+
def parseMarkToMark(lines, font, _lookupMap=None):
- return parseMarkToSomething(lines, font, MarkToMarkHelper())
+ return parseMarkToSomething(lines, font, MarkToMarkHelper())
+
+
def parseMarkToLigature(lines, font, _lookupMap=None):
- return parseMarkToSomething(lines, font, MarkToLigatureHelper())
+ return parseMarkToSomething(lines, font, MarkToLigatureHelper())
+
def stripSplitComma(line):
- return [s.strip() for s in line.split(',')] if line else []
+ return [s.strip() for s in line.split(",")] if line else []
+
def intSplitComma(line):
- return [int(i) for i in line.split(',')] if line else []
+ return [int(i) for i in line.split(",")] if line else []
+
# Copied from fontTools.subset
class ContextHelper(object):
- def __init__(self, klassName, Format):
- if klassName.endswith('Subst'):
- Typ = 'Sub'
- Type = 'Subst'
- else:
- Typ = 'Pos'
- Type = 'Pos'
- if klassName.startswith('Chain'):
- Chain = 'Chain'
- InputIdx = 1
- DataLen = 3
- else:
- Chain = ''
- InputIdx = 0
- DataLen = 1
- ChainTyp = Chain+Typ
-
- self.Typ = Typ
- self.Type = Type
- self.Chain = Chain
- self.ChainTyp = ChainTyp
- self.InputIdx = InputIdx
- self.DataLen = DataLen
-
- self.LookupRecord = Type+'LookupRecord'
-
- if Format == 1:
- Coverage = lambda r: r.Coverage
- ChainCoverage = lambda r: r.Coverage
- ContextData = lambda r:(None,)
- ChainContextData = lambda r:(None, None, None)
- SetContextData = None
- SetChainContextData = None
- RuleData = lambda r:(r.Input,)
- ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead)
- def SetRuleData(r, d):
- (r.Input,) = d
- (r.GlyphCount,) = (len(x)+1 for x in d)
- def ChainSetRuleData(r, d):
- (r.Backtrack, r.Input, r.LookAhead) = d
- (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2]))
- elif Format == 2:
- Coverage = lambda r: r.Coverage
- ChainCoverage = lambda r: r.Coverage
- ContextData = lambda r:(r.ClassDef,)
- ChainContextData = lambda r:(r.BacktrackClassDef,
- r.InputClassDef,
- r.LookAheadClassDef)
- def SetContextData(r, d):
- (r.ClassDef,) = d
- def SetChainContextData(r, d):
- (r.BacktrackClassDef,
- r.InputClassDef,
- r.LookAheadClassDef) = d
- RuleData = lambda r:(r.Class,)
- ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead)
- def SetRuleData(r, d):
- (r.Class,) = d
- (r.GlyphCount,) = (len(x)+1 for x in d)
- def ChainSetRuleData(r, d):
- (r.Backtrack, r.Input, r.LookAhead) = d
- (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2]))
- elif Format == 3:
- Coverage = lambda r: r.Coverage[0]
- ChainCoverage = lambda r: r.InputCoverage[0]
- ContextData = None
- ChainContextData = None
- SetContextData = None
- SetChainContextData = None
- RuleData = lambda r: r.Coverage
- ChainRuleData = lambda r:(r.BacktrackCoverage +
- r.InputCoverage +
- r.LookAheadCoverage)
- def SetRuleData(r, d):
- (r.Coverage,) = d
- (r.GlyphCount,) = (len(x) for x in d)
- def ChainSetRuleData(r, d):
- (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d
- (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(x) for x in d)
- else:
- assert 0, "unknown format: %s" % Format
-
- if Chain:
- self.Coverage = ChainCoverage
- self.ContextData = ChainContextData
- self.SetContextData = SetChainContextData
- self.RuleData = ChainRuleData
- self.SetRuleData = ChainSetRuleData
- else:
- self.Coverage = Coverage
- self.ContextData = ContextData
- self.SetContextData = SetContextData
- self.RuleData = RuleData
- self.SetRuleData = SetRuleData
-
- if Format == 1:
- self.Rule = ChainTyp+'Rule'
- self.RuleCount = ChainTyp+'RuleCount'
- self.RuleSet = ChainTyp+'RuleSet'
- self.RuleSetCount = ChainTyp+'RuleSetCount'
- self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else []
- elif Format == 2:
- self.Rule = ChainTyp+'ClassRule'
- self.RuleCount = ChainTyp+'ClassRuleCount'
- self.RuleSet = ChainTyp+'ClassSet'
- self.RuleSetCount = ChainTyp+'ClassSetCount'
- self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c
- else (set(glyphs) if r == 0 else set()))
-
- self.ClassDef = 'InputClassDef' if Chain else 'ClassDef'
- self.ClassDefIndex = 1 if Chain else 0
- self.Input = 'Input' if Chain else 'Class'
+ def __init__(self, klassName, Format):
+ if klassName.endswith("Subst"):
+ Typ = "Sub"
+ Type = "Subst"
+ else:
+ Typ = "Pos"
+ Type = "Pos"
+ if klassName.startswith("Chain"):
+ Chain = "Chain"
+ InputIdx = 1
+ DataLen = 3
+ else:
+ Chain = ""
+ InputIdx = 0
+ DataLen = 1
+ ChainTyp = Chain + Typ
+
+ self.Typ = Typ
+ self.Type = Type
+ self.Chain = Chain
+ self.ChainTyp = ChainTyp
+ self.InputIdx = InputIdx
+ self.DataLen = DataLen
+
+ self.LookupRecord = Type + "LookupRecord"
+
+ if Format == 1:
+ Coverage = lambda r: r.Coverage
+ ChainCoverage = lambda r: r.Coverage
+ ContextData = lambda r: (None,)
+ ChainContextData = lambda r: (None, None, None)
+ SetContextData = None
+ SetChainContextData = None
+ RuleData = lambda r: (r.Input,)
+ ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead)
+
+ def SetRuleData(r, d):
+ (r.Input,) = d
+ (r.GlyphCount,) = (len(x) + 1 for x in d)
+
+ def ChainSetRuleData(r, d):
+ (r.Backtrack, r.Input, r.LookAhead) = d
+ (
+ r.BacktrackGlyphCount,
+ r.InputGlyphCount,
+ r.LookAheadGlyphCount,
+ ) = (len(d[0]), len(d[1]) + 1, len(d[2]))
+
+ elif Format == 2:
+ Coverage = lambda r: r.Coverage
+ ChainCoverage = lambda r: r.Coverage
+ ContextData = lambda r: (r.ClassDef,)
+ ChainContextData = lambda r: (
+ r.BacktrackClassDef,
+ r.InputClassDef,
+ r.LookAheadClassDef,
+ )
+
+ def SetContextData(r, d):
+ (r.ClassDef,) = d
+
+ def SetChainContextData(r, d):
+ (r.BacktrackClassDef, r.InputClassDef, r.LookAheadClassDef) = d
+
+ RuleData = lambda r: (r.Class,)
+ ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead)
+
+ def SetRuleData(r, d):
+ (r.Class,) = d
+ (r.GlyphCount,) = (len(x) + 1 for x in d)
+
+ def ChainSetRuleData(r, d):
+ (r.Backtrack, r.Input, r.LookAhead) = d
+ (
+ r.BacktrackGlyphCount,
+ r.InputGlyphCount,
+ r.LookAheadGlyphCount,
+ ) = (len(d[0]), len(d[1]) + 1, len(d[2]))
+
+ elif Format == 3:
+ Coverage = lambda r: r.Coverage[0]
+ ChainCoverage = lambda r: r.InputCoverage[0]
+ ContextData = None
+ ChainContextData = None
+ SetContextData = None
+ SetChainContextData = None
+ RuleData = lambda r: r.Coverage
+ ChainRuleData = lambda r: (
+ r.BacktrackCoverage + r.InputCoverage + r.LookAheadCoverage
+ )
+
+ def SetRuleData(r, d):
+ (r.Coverage,) = d
+ (r.GlyphCount,) = (len(x) for x in d)
+
+ def ChainSetRuleData(r, d):
+ (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d
+ (
+ r.BacktrackGlyphCount,
+ r.InputGlyphCount,
+ r.LookAheadGlyphCount,
+ ) = (len(x) for x in d)
+
+ else:
+ assert 0, "unknown format: %s" % Format
+
+ if Chain:
+ self.Coverage = ChainCoverage
+ self.ContextData = ChainContextData
+ self.SetContextData = SetChainContextData
+ self.RuleData = ChainRuleData
+ self.SetRuleData = ChainSetRuleData
+ else:
+ self.Coverage = Coverage
+ self.ContextData = ContextData
+ self.SetContextData = SetContextData
+ self.RuleData = RuleData
+ self.SetRuleData = SetRuleData
+
+ if Format == 1:
+ self.Rule = ChainTyp + "Rule"
+ self.RuleCount = ChainTyp + "RuleCount"
+ self.RuleSet = ChainTyp + "RuleSet"
+ self.RuleSetCount = ChainTyp + "RuleSetCount"
+ self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else []
+ elif Format == 2:
+ self.Rule = ChainTyp + "ClassRule"
+ self.RuleCount = ChainTyp + "ClassRuleCount"
+ self.RuleSet = ChainTyp + "ClassSet"
+ self.RuleSetCount = ChainTyp + "ClassSetCount"
+ self.Intersect = lambda glyphs, c, r: (
+ c.intersect_class(glyphs, r)
+ if c
+ else (set(glyphs) if r == 0 else set())
+ )
+
+ self.ClassDef = "InputClassDef" if Chain else "ClassDef"
+ self.ClassDefIndex = 1 if Chain else 0
+ self.Input = "Input" if Chain else "Class"
+
def parseLookupRecords(items, klassName, lookupMap=None):
- klass = getattr(ot, klassName)
- lst = []
- for item in items:
- rec = klass()
- item = stripSplitComma(item)
- assert len(item) == 2, item
- idx = int(item[0])
- assert idx > 0, idx
- rec.SequenceIndex = idx - 1
- setReference(mapLookup, lookupMap, item[1], setattr, rec, 'LookupListIndex')
- lst.append(rec)
- return lst
+ klass = getattr(ot, klassName)
+ lst = []
+ for item in items:
+ rec = klass()
+ item = stripSplitComma(item)
+ assert len(item) == 2, item
+ idx = int(item[0])
+ assert idx > 0, idx
+ rec.SequenceIndex = idx - 1
+ setReference(mapLookup, lookupMap, item[1], setattr, rec, "LookupListIndex")
+ lst.append(rec)
+ return lst
+
def makeClassDef(classDefs, font, klass=ot.Coverage):
- if not classDefs: return None
- self = klass()
- self.classDefs = dict(classDefs)
- return self
+ if not classDefs:
+ return None
+ self = klass()
+ self.classDefs = dict(classDefs)
+ return self
+
def parseClassDef(lines, font, klass=ot.ClassDef):
- classDefs = {}
- with lines.between('class definition'):
- for line in lines:
- glyph = makeGlyph(line[0])
- assert glyph not in classDefs, glyph
- classDefs[glyph] = int(line[1])
- return makeClassDef(classDefs, font, klass)
+ classDefs = {}
+ with lines.between("class definition"):
+ for line in lines:
+ glyph = makeGlyph(line[0])
+ assert glyph not in classDefs, glyph
+ classDefs[glyph] = int(line[1])
+ return makeClassDef(classDefs, font, klass)
+
def makeCoverage(glyphs, font, klass=ot.Coverage):
- if not glyphs: return None
- if isinstance(glyphs, set):
- glyphs = sorted(glyphs)
- coverage = klass()
- coverage.glyphs = sorted(set(glyphs), key=font.getGlyphID)
- return coverage
+ if not glyphs:
+ return None
+ if isinstance(glyphs, set):
+ glyphs = sorted(glyphs)
+ coverage = klass()
+ coverage.glyphs = sorted(set(glyphs), key=font.getGlyphID)
+ return coverage
+
def parseCoverage(lines, font, klass=ot.Coverage):
- glyphs = []
- with lines.between('coverage definition'):
- for line in lines:
- glyphs.append(makeGlyph(line[0]))
- return makeCoverage(glyphs, font, klass)
+ glyphs = []
+ with lines.between("coverage definition"):
+ for line in lines:
+ glyphs.append(makeGlyph(line[0]))
+ return makeCoverage(glyphs, font, klass)
+
def bucketizeRules(self, c, rules, bucketKeys):
- buckets = {}
- for seq,recs in rules:
- buckets.setdefault(seq[c.InputIdx][0], []).append((tuple(s[1 if i==c.InputIdx else 0:] for i,s in enumerate(seq)), recs))
-
- rulesets = []
- for firstGlyph in bucketKeys:
- if firstGlyph not in buckets:
- rulesets.append(None)
- continue
- thisRules = []
- for seq,recs in buckets[firstGlyph]:
- rule = getattr(ot, c.Rule)()
- c.SetRuleData(rule, seq)
- setattr(rule, c.Type+'Count', len(recs))
- setattr(rule, c.LookupRecord, recs)
- thisRules.append(rule)
-
- ruleset = getattr(ot, c.RuleSet)()
- setattr(ruleset, c.Rule, thisRules)
- setattr(ruleset, c.RuleCount, len(thisRules))
- rulesets.append(ruleset)
-
- setattr(self, c.RuleSet, rulesets)
- setattr(self, c.RuleSetCount, len(rulesets))
+ buckets = {}
+ for seq, recs in rules:
+ buckets.setdefault(seq[c.InputIdx][0], []).append(
+ (tuple(s[1 if i == c.InputIdx else 0 :] for i, s in enumerate(seq)), recs)
+ )
+
+ rulesets = []
+ for firstGlyph in bucketKeys:
+ if firstGlyph not in buckets:
+ rulesets.append(None)
+ continue
+ thisRules = []
+ for seq, recs in buckets[firstGlyph]:
+ rule = getattr(ot, c.Rule)()
+ c.SetRuleData(rule, seq)
+ setattr(rule, c.Type + "Count", len(recs))
+ setattr(rule, c.LookupRecord, recs)
+ thisRules.append(rule)
+
+ ruleset = getattr(ot, c.RuleSet)()
+ setattr(ruleset, c.Rule, thisRules)
+ setattr(ruleset, c.RuleCount, len(thisRules))
+ rulesets.append(ruleset)
+
+ setattr(self, c.RuleSet, rulesets)
+ setattr(self, c.RuleSetCount, len(rulesets))
+
def parseContext(lines, font, Type, lookupMap=None):
- self = getattr(ot, Type)()
- typ = lines.peeks()[0].split()[0].lower()
- if typ == 'glyph':
- self.Format = 1
- log.debug("Parsing %s format %s", Type, self.Format)
- c = ContextHelper(Type, self.Format)
- rules = []
- for line in lines:
- assert line[0].lower() == 'glyph', line[0]
- while len(line) < 1+c.DataLen: line.append('')
- seq = tuple(makeGlyphs(stripSplitComma(i)) for i in line[1:1+c.DataLen])
- recs = parseLookupRecords(line[1+c.DataLen:], c.LookupRecord, lookupMap)
- rules.append((seq, recs))
-
- firstGlyphs = set(seq[c.InputIdx][0] for seq,recs in rules)
- self.Coverage = makeCoverage(firstGlyphs, font)
- bucketizeRules(self, c, rules, self.Coverage.glyphs)
- elif typ.endswith('class'):
- self.Format = 2
- log.debug("Parsing %s format %s", Type, self.Format)
- c = ContextHelper(Type, self.Format)
- classDefs = [None] * c.DataLen
- while lines.peeks()[0].endswith("class definition begin"):
- typ = lines.peek()[0][:-len("class definition begin")].lower()
- idx,klass = {
- 1: {
- '': (0,ot.ClassDef),
- },
- 3: {
- 'backtrack': (0,ot.BacktrackClassDef),
- '': (1,ot.InputClassDef),
- 'lookahead': (2,ot.LookAheadClassDef),
- },
- }[c.DataLen][typ]
- assert classDefs[idx] is None, idx
- classDefs[idx] = parseClassDef(lines, font, klass=klass)
- c.SetContextData(self, classDefs)
- rules = []
- for line in lines:
- assert line[0].lower().startswith('class'), line[0]
- while len(line) < 1+c.DataLen: line.append('')
- seq = tuple(intSplitComma(i) for i in line[1:1+c.DataLen])
- recs = parseLookupRecords(line[1+c.DataLen:], c.LookupRecord, lookupMap)
- rules.append((seq, recs))
- firstClasses = set(seq[c.InputIdx][0] for seq,recs in rules)
- firstGlyphs = set(g for g,c in classDefs[c.InputIdx].classDefs.items() if c in firstClasses)
- self.Coverage = makeCoverage(firstGlyphs, font)
- bucketizeRules(self, c, rules, range(max(firstClasses) + 1))
- elif typ.endswith('coverage'):
- self.Format = 3
- log.debug("Parsing %s format %s", Type, self.Format)
- c = ContextHelper(Type, self.Format)
- coverages = tuple([] for i in range(c.DataLen))
- while lines.peeks()[0].endswith("coverage definition begin"):
- typ = lines.peek()[0][:-len("coverage definition begin")].lower()
- idx,klass = {
- 1: {
- '': (0,ot.Coverage),
- },
- 3: {
- 'backtrack': (0,ot.BacktrackCoverage),
- 'input': (1,ot.InputCoverage),
- 'lookahead': (2,ot.LookAheadCoverage),
- },
- }[c.DataLen][typ]
- coverages[idx].append(parseCoverage(lines, font, klass=klass))
- c.SetRuleData(self, coverages)
- lines = list(lines)
- assert len(lines) == 1
- line = lines[0]
- assert line[0].lower() == 'coverage', line[0]
- recs = parseLookupRecords(line[1:], c.LookupRecord, lookupMap)
- setattr(self, c.Type+'Count', len(recs))
- setattr(self, c.LookupRecord, recs)
- else:
- assert 0, typ
- return self
+ self = getattr(ot, Type)()
+ typ = lines.peeks()[0].split()[0].lower()
+ if typ == "glyph":
+ self.Format = 1
+ log.debug("Parsing %s format %s", Type, self.Format)
+ c = ContextHelper(Type, self.Format)
+ rules = []
+ for line in lines:
+ assert line[0].lower() == "glyph", line[0]
+ while len(line) < 1 + c.DataLen:
+ line.append("")
+ seq = tuple(makeGlyphs(stripSplitComma(i)) for i in line[1 : 1 + c.DataLen])
+ recs = parseLookupRecords(line[1 + c.DataLen :], c.LookupRecord, lookupMap)
+ rules.append((seq, recs))
+
+ firstGlyphs = set(seq[c.InputIdx][0] for seq, recs in rules)
+ self.Coverage = makeCoverage(firstGlyphs, font)
+ bucketizeRules(self, c, rules, self.Coverage.glyphs)
+ elif typ.endswith("class"):
+ self.Format = 2
+ log.debug("Parsing %s format %s", Type, self.Format)
+ c = ContextHelper(Type, self.Format)
+ classDefs = [None] * c.DataLen
+ while lines.peeks()[0].endswith("class definition begin"):
+ typ = lines.peek()[0][: -len("class definition begin")].lower()
+ idx, klass = {
+ 1: {
+ "": (0, ot.ClassDef),
+ },
+ 3: {
+ "backtrack": (0, ot.BacktrackClassDef),
+ "": (1, ot.InputClassDef),
+ "lookahead": (2, ot.LookAheadClassDef),
+ },
+ }[c.DataLen][typ]
+ assert classDefs[idx] is None, idx
+ classDefs[idx] = parseClassDef(lines, font, klass=klass)
+ c.SetContextData(self, classDefs)
+ rules = []
+ for line in lines:
+ assert line[0].lower().startswith("class"), line[0]
+ while len(line) < 1 + c.DataLen:
+ line.append("")
+ seq = tuple(intSplitComma(i) for i in line[1 : 1 + c.DataLen])
+ recs = parseLookupRecords(line[1 + c.DataLen :], c.LookupRecord, lookupMap)
+ rules.append((seq, recs))
+ firstClasses = set(seq[c.InputIdx][0] for seq, recs in rules)
+ firstGlyphs = set(
+ g for g, c in classDefs[c.InputIdx].classDefs.items() if c in firstClasses
+ )
+ self.Coverage = makeCoverage(firstGlyphs, font)
+ bucketizeRules(self, c, rules, range(max(firstClasses) + 1))
+ elif typ.endswith("coverage"):
+ self.Format = 3
+ log.debug("Parsing %s format %s", Type, self.Format)
+ c = ContextHelper(Type, self.Format)
+ coverages = tuple([] for i in range(c.DataLen))
+ while lines.peeks()[0].endswith("coverage definition begin"):
+ typ = lines.peek()[0][: -len("coverage definition begin")].lower()
+ idx, klass = {
+ 1: {
+ "": (0, ot.Coverage),
+ },
+ 3: {
+ "backtrack": (0, ot.BacktrackCoverage),
+ "input": (1, ot.InputCoverage),
+ "lookahead": (2, ot.LookAheadCoverage),
+ },
+ }[c.DataLen][typ]
+ coverages[idx].append(parseCoverage(lines, font, klass=klass))
+ c.SetRuleData(self, coverages)
+ lines = list(lines)
+ assert len(lines) == 1
+ line = lines[0]
+ assert line[0].lower() == "coverage", line[0]
+ recs = parseLookupRecords(line[1:], c.LookupRecord, lookupMap)
+ setattr(self, c.Type + "Count", len(recs))
+ setattr(self, c.LookupRecord, recs)
+ else:
+ assert 0, typ
+ return self
+
def parseContextSubst(lines, font, lookupMap=None):
- return parseContext(lines, font, "ContextSubst", lookupMap=lookupMap)
+ return parseContext(lines, font, "ContextSubst", lookupMap=lookupMap)
+
+
def parseContextPos(lines, font, lookupMap=None):
- return parseContext(lines, font, "ContextPos", lookupMap=lookupMap)
+ return parseContext(lines, font, "ContextPos", lookupMap=lookupMap)
+
+
def parseChainedSubst(lines, font, lookupMap=None):
- return parseContext(lines, font, "ChainContextSubst", lookupMap=lookupMap)
+ return parseContext(lines, font, "ChainContextSubst", lookupMap=lookupMap)
+
+
def parseChainedPos(lines, font, lookupMap=None):
- return parseContext(lines, font, "ChainContextPos", lookupMap=lookupMap)
+ return parseContext(lines, font, "ChainContextPos", lookupMap=lookupMap)
+
def parseReverseChainedSubst(lines, font, _lookupMap=None):
- self = ot.ReverseChainSingleSubst()
- self.Format = 1
- coverages = ([], [])
- while lines.peeks()[0].endswith("coverage definition begin"):
- typ = lines.peek()[0][:-len("coverage definition begin")].lower()
- idx,klass = {
- 'backtrack': (0,ot.BacktrackCoverage),
- 'lookahead': (1,ot.LookAheadCoverage),
- }[typ]
- coverages[idx].append(parseCoverage(lines, font, klass=klass))
- self.BacktrackCoverage = coverages[0]
- self.BacktrackGlyphCount = len(self.BacktrackCoverage)
- self.LookAheadCoverage = coverages[1]
- self.LookAheadGlyphCount = len(self.LookAheadCoverage)
- mapping = {}
- for line in lines:
- assert len(line) == 2, line
- line = makeGlyphs(line)
- mapping[line[0]] = line[1]
- self.Coverage = makeCoverage(set(mapping.keys()), font)
- self.Substitute = [mapping[k] for k in self.Coverage.glyphs]
- self.GlyphCount = len(self.Substitute)
- return self
+ self = ot.ReverseChainSingleSubst()
+ self.Format = 1
+ coverages = ([], [])
+ while lines.peeks()[0].endswith("coverage definition begin"):
+ typ = lines.peek()[0][: -len("coverage definition begin")].lower()
+ idx, klass = {
+ "backtrack": (0, ot.BacktrackCoverage),
+ "lookahead": (1, ot.LookAheadCoverage),
+ }[typ]
+ coverages[idx].append(parseCoverage(lines, font, klass=klass))
+ self.BacktrackCoverage = coverages[0]
+ self.BacktrackGlyphCount = len(self.BacktrackCoverage)
+ self.LookAheadCoverage = coverages[1]
+ self.LookAheadGlyphCount = len(self.LookAheadCoverage)
+ mapping = {}
+ for line in lines:
+ assert len(line) == 2, line
+ line = makeGlyphs(line)
+ mapping[line[0]] = line[1]
+ self.Coverage = makeCoverage(set(mapping.keys()), font)
+ self.Substitute = [mapping[k] for k in self.Coverage.glyphs]
+ self.GlyphCount = len(self.Substitute)
+ return self
+
def parseLookup(lines, tableTag, font, lookupMap=None):
- line = lines.expect('lookup')
- _, name, typ = line
- log.debug("Parsing lookup type %s %s", typ, name)
- lookup = ot.Lookup()
- lookup.LookupFlag,filterset = parseLookupFlags(lines)
- if filterset is not None:
- lookup.MarkFilteringSet = filterset
- lookup.LookupType, parseLookupSubTable = {
- 'GSUB': {
- 'single': (1, parseSingleSubst),
- 'multiple': (2, parseMultiple),
- 'alternate': (3, parseAlternate),
- 'ligature': (4, parseLigature),
- 'context': (5, parseContextSubst),
- 'chained': (6, parseChainedSubst),
- 'reversechained':(8, parseReverseChainedSubst),
- },
- 'GPOS': {
- 'single': (1, parseSinglePos),
- 'pair': (2, parsePair),
- 'kernset': (2, parseKernset),
- 'cursive': (3, parseCursive),
- 'mark to base': (4, parseMarkToBase),
- 'mark to ligature':(5, parseMarkToLigature),
- 'mark to mark': (6, parseMarkToMark),
- 'context': (7, parseContextPos),
- 'chained': (8, parseChainedPos),
- },
- }[tableTag][typ]
-
- with lines.until('lookup end'):
- subtables = []
-
- while lines.peek():
- with lines.until(('% subtable', 'subtable end')):
- while lines.peek():
- subtable = parseLookupSubTable(lines, font, lookupMap)
- assert lookup.LookupType == subtable.LookupType
- subtables.append(subtable)
- if lines.peeks()[0] in ('% subtable', 'subtable end'):
- next(lines)
- lines.expect('lookup end')
-
- lookup.SubTable = subtables
- lookup.SubTableCount = len(lookup.SubTable)
- if lookup.SubTableCount == 0:
- # Remove this return when following is fixed:
- # https://github.com/fonttools/fonttools/issues/789
- return None
- return lookup
+ line = lines.expect("lookup")
+ _, name, typ = line
+ log.debug("Parsing lookup type %s %s", typ, name)
+ lookup = ot.Lookup()
+ lookup.LookupFlag, filterset = parseLookupFlags(lines)
+ if filterset is not None:
+ lookup.MarkFilteringSet = filterset
+ lookup.LookupType, parseLookupSubTable = {
+ "GSUB": {
+ "single": (1, parseSingleSubst),
+ "multiple": (2, parseMultiple),
+ "alternate": (3, parseAlternate),
+ "ligature": (4, parseLigature),
+ "context": (5, parseContextSubst),
+ "chained": (6, parseChainedSubst),
+ "reversechained": (8, parseReverseChainedSubst),
+ },
+ "GPOS": {
+ "single": (1, parseSinglePos),
+ "pair": (2, parsePair),
+ "kernset": (2, parseKernset),
+ "cursive": (3, parseCursive),
+ "mark to base": (4, parseMarkToBase),
+ "mark to ligature": (5, parseMarkToLigature),
+ "mark to mark": (6, parseMarkToMark),
+ "context": (7, parseContextPos),
+ "chained": (8, parseChainedPos),
+ },
+ }[tableTag][typ]
+
+ with lines.until("lookup end"):
+ subtables = []
+
+ while lines.peek():
+ with lines.until(("% subtable", "subtable end")):
+ while lines.peek():
+ subtable = parseLookupSubTable(lines, font, lookupMap)
+ assert lookup.LookupType == subtable.LookupType
+ subtables.append(subtable)
+ if lines.peeks()[0] in ("% subtable", "subtable end"):
+ next(lines)
+ lines.expect("lookup end")
+
+ lookup.SubTable = subtables
+ lookup.SubTableCount = len(lookup.SubTable)
+ if lookup.SubTableCount == 0:
+ # Remove this return when following is fixed:
+ # https://github.com/fonttools/fonttools/issues/789
+ return None
+ return lookup
+
def parseGSUBGPOS(lines, font, tableTag):
- container = ttLib.getTableClass(tableTag)()
- lookupMap = DeferredMapping()
- featureMap = DeferredMapping()
- assert tableTag in ('GSUB', 'GPOS')
- log.debug("Parsing %s", tableTag)
- self = getattr(ot, tableTag)()
- self.Version = 0x00010000
- fields = {
- 'script table begin':
- ('ScriptList',
- lambda lines: parseScriptList (lines, featureMap)),
- 'feature table begin':
- ('FeatureList',
- lambda lines: parseFeatureList (lines, lookupMap, featureMap)),
- 'lookup':
- ('LookupList',
- None),
- }
- for attr,parser in fields.values():
- setattr(self, attr, None)
- while lines.peek() is not None:
- typ = lines.peek()[0].lower()
- if typ not in fields:
- log.debug('Skipping %s', lines.peek())
- next(lines)
- continue
- attr,parser = fields[typ]
- if typ == 'lookup':
- if self.LookupList is None:
- self.LookupList = ot.LookupList()
- self.LookupList.Lookup = []
- _, name, _ = lines.peek()
- lookup = parseLookup(lines, tableTag, font, lookupMap)
- if lookupMap is not None:
- assert name not in lookupMap, "Duplicate lookup name: %s" % name
- lookupMap[name] = len(self.LookupList.Lookup)
- else:
- assert int(name) == len(self.LookupList.Lookup), "%d %d" % (name, len(self.Lookup))
- self.LookupList.Lookup.append(lookup)
- else:
- assert getattr(self, attr) is None, attr
- setattr(self, attr, parser(lines))
- if self.LookupList:
- self.LookupList.LookupCount = len(self.LookupList.Lookup)
- if lookupMap is not None:
- lookupMap.applyDeferredMappings()
- if featureMap is not None:
- featureMap.applyDeferredMappings()
- container.table = self
- return container
+ container = ttLib.getTableClass(tableTag)()
+ lookupMap = DeferredMapping()
+ featureMap = DeferredMapping()
+ assert tableTag in ("GSUB", "GPOS")
+ log.debug("Parsing %s", tableTag)
+ self = getattr(ot, tableTag)()
+ self.Version = 0x00010000
+ fields = {
+ "script table begin": (
+ "ScriptList",
+ lambda lines: parseScriptList(lines, featureMap),
+ ),
+ "feature table begin": (
+ "FeatureList",
+ lambda lines: parseFeatureList(lines, lookupMap, featureMap),
+ ),
+ "lookup": ("LookupList", None),
+ }
+ for attr, parser in fields.values():
+ setattr(self, attr, None)
+ while lines.peek() is not None:
+ typ = lines.peek()[0].lower()
+ if typ not in fields:
+ log.debug("Skipping %s", lines.peek())
+ next(lines)
+ continue
+ attr, parser = fields[typ]
+ if typ == "lookup":
+ if self.LookupList is None:
+ self.LookupList = ot.LookupList()
+ self.LookupList.Lookup = []
+ _, name, _ = lines.peek()
+ lookup = parseLookup(lines, tableTag, font, lookupMap)
+ if lookupMap is not None:
+ assert name not in lookupMap, "Duplicate lookup name: %s" % name
+ lookupMap[name] = len(self.LookupList.Lookup)
+ else:
+ assert int(name) == len(self.LookupList.Lookup), "%d %d" % (
+ name,
+ len(self.Lookup),
+ )
+ self.LookupList.Lookup.append(lookup)
+ else:
+ assert getattr(self, attr) is None, attr
+ setattr(self, attr, parser(lines))
+ if self.LookupList:
+ self.LookupList.LookupCount = len(self.LookupList.Lookup)
+ if lookupMap is not None:
+ lookupMap.applyDeferredMappings()
+ if os.environ.get(LOOKUP_DEBUG_ENV_VAR):
+ if "Debg" not in font:
+ font["Debg"] = newTable("Debg")
+ font["Debg"].data = {}
+ debug = (
+ font["Debg"]
+ .data.setdefault(LOOKUP_DEBUG_INFO_KEY, {})
+ .setdefault(tableTag, {})
+ )
+ for name, lookup in lookupMap.items():
+ debug[str(lookup)] = ["", name, ""]
+
+ featureMap.applyDeferredMappings()
+ container.table = self
+ return container
+
def parseGSUB(lines, font):
- return parseGSUBGPOS(lines, font, 'GSUB')
+ return parseGSUBGPOS(lines, font, "GSUB")
+
+
def parseGPOS(lines, font):
- return parseGSUBGPOS(lines, font, 'GPOS')
+ return parseGSUBGPOS(lines, font, "GPOS")
+
def parseAttachList(lines, font):
- points = {}
- with lines.between('attachment list'):
- for line in lines:
- glyph = makeGlyph(line[0])
- assert glyph not in points, glyph
- points[glyph] = [int(i) for i in line[1:]]
- return otl.buildAttachList(points, font.getReverseGlyphMap())
+ points = {}
+ with lines.between("attachment list"):
+ for line in lines:
+ glyph = makeGlyph(line[0])
+ assert glyph not in points, glyph
+ points[glyph] = [int(i) for i in line[1:]]
+ return otl.buildAttachList(points, font.getReverseGlyphMap())
+
def parseCaretList(lines, font):
- carets = {}
- with lines.between('carets'):
- for line in lines:
- glyph = makeGlyph(line[0])
- assert glyph not in carets, glyph
- num = int(line[1])
- thisCarets = [int(i) for i in line[2:]]
- assert num == len(thisCarets), line
- carets[glyph] = thisCarets
- return otl.buildLigCaretList(carets, {}, font.getReverseGlyphMap())
+ carets = {}
+ with lines.between("carets"):
+ for line in lines:
+ glyph = makeGlyph(line[0])
+ assert glyph not in carets, glyph
+ num = int(line[1])
+ thisCarets = [int(i) for i in line[2:]]
+ assert num == len(thisCarets), line
+ carets[glyph] = thisCarets
+ return otl.buildLigCaretList(carets, {}, font.getReverseGlyphMap())
+
def makeMarkFilteringSets(sets, font):
- self = ot.MarkGlyphSetsDef()
- self.MarkSetTableFormat = 1
- self.MarkSetCount = 1 + max(sets.keys())
- self.Coverage = [None] * self.MarkSetCount
- for k,v in sorted(sets.items()):
- self.Coverage[k] = makeCoverage(set(v), font)
- return self
+ self = ot.MarkGlyphSetsDef()
+ self.MarkSetTableFormat = 1
+ self.MarkSetCount = 1 + max(sets.keys())
+ self.Coverage = [None] * self.MarkSetCount
+ for k, v in sorted(sets.items()):
+ self.Coverage[k] = makeCoverage(set(v), font)
+ return self
+
def parseMarkFilteringSets(lines, font):
- sets = {}
- with lines.between('set definition'):
- for line in lines:
- assert len(line) == 2, line
- glyph = makeGlyph(line[0])
- # TODO accept set names
- st = int(line[1])
- if st not in sets:
- sets[st] = []
- sets[st].append(glyph)
- return makeMarkFilteringSets(sets, font)
+ sets = {}
+ with lines.between("set definition"):
+ for line in lines:
+ assert len(line) == 2, line
+ glyph = makeGlyph(line[0])
+ # TODO accept set names
+ st = int(line[1])
+ if st not in sets:
+ sets[st] = []
+ sets[st].append(glyph)
+ return makeMarkFilteringSets(sets, font)
+
def parseGDEF(lines, font):
- container = ttLib.getTableClass('GDEF')()
- log.debug("Parsing GDEF")
- self = ot.GDEF()
- fields = {
- 'class definition begin':
- ('GlyphClassDef',
- lambda lines, font: parseClassDef(lines, font, klass=ot.GlyphClassDef)),
- 'attachment list begin':
- ('AttachList', parseAttachList),
- 'carets begin':
- ('LigCaretList', parseCaretList),
- 'mark attachment class definition begin':
- ('MarkAttachClassDef',
- lambda lines, font: parseClassDef(lines, font, klass=ot.MarkAttachClassDef)),
- 'markfilter set definition begin':
- ('MarkGlyphSetsDef', parseMarkFilteringSets),
- }
- for attr,parser in fields.values():
- setattr(self, attr, None)
- while lines.peek() is not None:
- typ = lines.peek()[0].lower()
- if typ not in fields:
- log.debug('Skipping %s', typ)
- next(lines)
- continue
- attr,parser = fields[typ]
- assert getattr(self, attr) is None, attr
- setattr(self, attr, parser(lines, font))
- self.Version = 0x00010000 if self.MarkGlyphSetsDef is None else 0x00010002
- container.table = self
- return container
+ container = ttLib.getTableClass("GDEF")()
+ log.debug("Parsing GDEF")
+ self = ot.GDEF()
+ fields = {
+ "class definition begin": (
+ "GlyphClassDef",
+ lambda lines, font: parseClassDef(lines, font, klass=ot.GlyphClassDef),
+ ),
+ "attachment list begin": ("AttachList", parseAttachList),
+ "carets begin": ("LigCaretList", parseCaretList),
+ "mark attachment class definition begin": (
+ "MarkAttachClassDef",
+ lambda lines, font: parseClassDef(lines, font, klass=ot.MarkAttachClassDef),
+ ),
+ "markfilter set definition begin": ("MarkGlyphSetsDef", parseMarkFilteringSets),
+ }
+ for attr, parser in fields.values():
+ setattr(self, attr, None)
+ while lines.peek() is not None:
+ typ = lines.peek()[0].lower()
+ if typ not in fields:
+ log.debug("Skipping %s", typ)
+ next(lines)
+ continue
+ attr, parser = fields[typ]
+ assert getattr(self, attr) is None, attr
+ setattr(self, attr, parser(lines, font))
+ self.Version = 0x00010000 if self.MarkGlyphSetsDef is None else 0x00010002
+ container.table = self
+ return container
+
def parseCmap(lines, font):
- container = ttLib.getTableClass('cmap')()
- log.debug("Parsing cmap")
- tables = []
- while lines.peek() is not None:
- lines.expect('cmap subtable %d' % len(tables))
- platId, encId, fmt, lang = [
- parseCmapId(lines, field)
- for field in ('platformID', 'encodingID', 'format', 'language')]
- table = cmap_classes[fmt](fmt)
- table.platformID = platId
- table.platEncID = encId
- table.language = lang
- table.cmap = {}
- line = next(lines)
- while line[0] != 'end subtable':
- table.cmap[int(line[0], 16)] = line[1]
- line = next(lines)
- tables.append(table)
- container.tableVersion = 0
- container.tables = tables
- return container
+ container = ttLib.getTableClass("cmap")()
+ log.debug("Parsing cmap")
+ tables = []
+ while lines.peek() is not None:
+ lines.expect("cmap subtable %d" % len(tables))
+ platId, encId, fmt, lang = [
+ parseCmapId(lines, field)
+ for field in ("platformID", "encodingID", "format", "language")
+ ]
+ table = cmap_classes[fmt](fmt)
+ table.platformID = platId
+ table.platEncID = encId
+ table.language = lang
+ table.cmap = {}
+ line = next(lines)
+ while line[0] != "end subtable":
+ table.cmap[int(line[0], 16)] = line[1]
+ line = next(lines)
+ tables.append(table)
+ container.tableVersion = 0
+ container.tables = tables
+ return container
+
def parseCmapId(lines, field):
- line = next(lines)
- assert field == line[0]
- return int(line[1])
+ line = next(lines)
+ assert field == line[0]
+ return int(line[1])
+
def parseTable(lines, font, tableTag=None):
- log.debug("Parsing table")
- line = lines.peeks()
- tag = None
- if line[0].split()[0] == 'FontDame':
- tag = line[0].split()[1]
- elif ' '.join(line[0].split()[:3]) == 'Font Chef Table':
- tag = line[0].split()[3]
- if tag is not None:
- next(lines)
- tag = tag.ljust(4)
- if tableTag is None:
- tableTag = tag
- else:
- assert tableTag == tag, (tableTag, tag)
-
- assert tableTag is not None, "Don't know what table to parse and data doesn't specify"
-
- return {
- 'GSUB': parseGSUB,
- 'GPOS': parseGPOS,
- 'GDEF': parseGDEF,
- 'cmap': parseCmap,
- }[tableTag](lines, font)
+ log.debug("Parsing table")
+ line = lines.peeks()
+ tag = None
+ if line[0].split()[0] == "FontDame":
+ tag = line[0].split()[1]
+ elif " ".join(line[0].split()[:3]) == "Font Chef Table":
+ tag = line[0].split()[3]
+ if tag is not None:
+ next(lines)
+ tag = tag.ljust(4)
+ if tableTag is None:
+ tableTag = tag
+ else:
+ assert tableTag == tag, (tableTag, tag)
+
+ assert (
+ tableTag is not None
+ ), "Don't know what table to parse and data doesn't specify"
+
+ return {
+ "GSUB": parseGSUB,
+ "GPOS": parseGPOS,
+ "GDEF": parseGDEF,
+ "cmap": parseCmap,
+ }[tableTag](lines, font)
+
class Tokenizer(object):
+ def __init__(self, f):
+ # TODO BytesIO / StringIO as needed? also, figure out whether we work on bytes or unicode
+ lines = iter(f)
+ try:
+ self.filename = f.name
+ except:
+ self.filename = None
+ self.lines = iter(lines)
+ self.line = ""
+ self.lineno = 0
+ self.stoppers = []
+ self.buffer = None
+
+ def __iter__(self):
+ return self
+
+ def _next_line(self):
+ self.lineno += 1
+ line = self.line = next(self.lines)
+ line = [s.strip() for s in line.split("\t")]
+ if len(line) == 1 and not line[0]:
+ del line[0]
+ if line and not line[-1]:
+ log.warning("trailing tab found on line %d: %s" % (self.lineno, self.line))
+ while line and not line[-1]:
+ del line[-1]
+ return line
+
+ def _next_nonempty(self):
+ while True:
+ line = self._next_line()
+ # Skip comments and empty lines
+ if line and line[0] and (line[0][0] != "%" or line[0] == "% subtable"):
+ return line
+
+ def _next_buffered(self):
+ if self.buffer:
+ ret = self.buffer
+ self.buffer = None
+ return ret
+ else:
+ return self._next_nonempty()
+
+ def __next__(self):
+ line = self._next_buffered()
+ if line[0].lower() in self.stoppers:
+ self.buffer = line
+ raise StopIteration
+ return line
+
+ def next(self):
+ return self.__next__()
+
+ def peek(self):
+ if not self.buffer:
+ try:
+ self.buffer = self._next_nonempty()
+ except StopIteration:
+ return None
+ if self.buffer[0].lower() in self.stoppers:
+ return None
+ return self.buffer
+
+ def peeks(self):
+ ret = self.peek()
+ return ret if ret is not None else ("",)
+
+ @contextmanager
+ def between(self, tag):
+ start = tag + " begin"
+ end = tag + " end"
+ self.expectendswith(start)
+ self.stoppers.append(end)
+ yield
+ del self.stoppers[-1]
+ self.expect(tag + " end")
+
+ @contextmanager
+ def until(self, tags):
+ if type(tags) is not tuple:
+ tags = (tags,)
+ self.stoppers.extend(tags)
+ yield
+ del self.stoppers[-len(tags) :]
+
+ def expect(self, s):
+ line = next(self)
+ tag = line[0].lower()
+ assert tag == s, "Expected '%s', got '%s'" % (s, tag)
+ return line
+
+ def expectendswith(self, s):
+ line = next(self)
+ tag = line[0].lower()
+ assert tag.endswith(s), "Expected '*%s', got '%s'" % (s, tag)
+ return line
- def __init__(self, f):
- # TODO BytesIO / StringIO as needed? also, figure out whether we work on bytes or unicode
- lines = iter(f)
- try:
- self.filename = f.name
- except:
- self.filename = None
- self.lines = iter(lines)
- self.line = ''
- self.lineno = 0
- self.stoppers = []
- self.buffer = None
-
- def __iter__(self):
- return self
-
- def _next_line(self):
- self.lineno += 1
- line = self.line = next(self.lines)
- line = [s.strip() for s in line.split('\t')]
- if len(line) == 1 and not line[0]:
- del line[0]
- if line and not line[-1]:
- log.warning('trailing tab found on line %d: %s' % (self.lineno, self.line))
- while line and not line[-1]:
- del line[-1]
- return line
-
- def _next_nonempty(self):
- while True:
- line = self._next_line()
- # Skip comments and empty lines
- if line and line[0] and (line[0][0] != '%' or line[0] == '% subtable'):
- return line
-
- def _next_buffered(self):
- if self.buffer:
- ret = self.buffer
- self.buffer = None
- return ret
- else:
- return self._next_nonempty()
-
- def __next__(self):
- line = self._next_buffered()
- if line[0].lower() in self.stoppers:
- self.buffer = line
- raise StopIteration
- return line
-
- def next(self):
- return self.__next__()
-
- def peek(self):
- if not self.buffer:
- try:
- self.buffer = self._next_nonempty()
- except StopIteration:
- return None
- if self.buffer[0].lower() in self.stoppers:
- return None
- return self.buffer
-
- def peeks(self):
- ret = self.peek()
- return ret if ret is not None else ('',)
-
- @contextmanager
- def between(self, tag):
- start = tag + ' begin'
- end = tag + ' end'
- self.expectendswith(start)
- self.stoppers.append(end)
- yield
- del self.stoppers[-1]
- self.expect(tag + ' end')
-
- @contextmanager
- def until(self, tags):
- if type(tags) is not tuple:
- tags = (tags,)
- self.stoppers.extend(tags)
- yield
- del self.stoppers[-len(tags):]
-
- def expect(self, s):
- line = next(self)
- tag = line[0].lower()
- assert tag == s, "Expected '%s', got '%s'" % (s, tag)
- return line
-
- def expectendswith(self, s):
- line = next(self)
- tag = line[0].lower()
- assert tag.endswith(s), "Expected '*%s', got '%s'" % (s, tag)
- return line
def build(f, font, tableTag=None):
- """Convert a Monotype font layout file to an OpenType layout object
+ """Convert a Monotype font layout file to an OpenType layout object
- A font object must be passed, but this may be a "dummy" font; it is only
- used for sorting glyph sets when making coverage tables and to hold the
- OpenType layout table while it is being built.
+ A font object must be passed, but this may be a "dummy" font; it is only
+ used for sorting glyph sets when making coverage tables and to hold the
+ OpenType layout table while it is being built.
- Args:
- f: A file object.
- font (TTFont): A font object.
- tableTag (string): If provided, asserts that the file contains data for the
- given OpenType table.
+ Args:
+ f: A file object.
+ font (TTFont): A font object.
+ tableTag (string): If provided, asserts that the file contains data for the
+ given OpenType table.
- Returns:
- An object representing the table. (e.g. ``table_G_S_U_B_``)
- """
- lines = Tokenizer(f)
- return parseTable(lines, font, tableTag=tableTag)
+ Returns:
+ An object representing the table. (e.g. ``table_G_S_U_B_``)
+ """
+ lines = Tokenizer(f)
+ return parseTable(lines, font, tableTag=tableTag)
def main(args=None, font=None):
- """Convert a FontDame OTL file to TTX XML
-
- Writes XML output to stdout.
-
- Args:
- args: Command line arguments (``--font``, ``--table``, input files).
- """
- import sys
- from fontTools import configLogger
- from fontTools.misc.testTools import MockFont
-
- if args is None:
- args = sys.argv[1:]
-
- # configure the library logger (for >= WARNING)
- configLogger()
- # comment this out to enable debug messages from mtiLib's logger
- # log.setLevel(logging.DEBUG)
-
- import argparse
- parser = argparse.ArgumentParser(
- "fonttools mtiLib",
- description=main.__doc__,
- )
-
- parser.add_argument('--font', '-f', metavar='FILE', dest="font",
- help="Input TTF files (used for glyph classes and sorting coverage tables)")
- parser.add_argument('--table', '-t', metavar='TABLE', dest="tableTag",
- help="Table to fill (sniffed from input file if not provided)")
- parser.add_argument('inputs', metavar='FILE', type=str, nargs='+',
- help="Input FontDame .txt files")
-
- args = parser.parse_args(args)
-
- if font is None:
- if args.font:
- font = ttLib.TTFont(args.font)
- else:
- font = MockFont()
-
- for f in args.inputs:
- log.debug("Processing %s", f)
- with open(f, 'rt', encoding="utf-8") as f:
- table = build(f, font, tableTag=args.tableTag)
- blob = table.compile(font) # Make sure it compiles
- decompiled = table.__class__()
- decompiled.decompile(blob, font) # Make sure it decompiles!
-
- #continue
- from fontTools.misc import xmlWriter
- tag = table.tableTag
- writer = xmlWriter.XMLWriter(sys.stdout)
- writer.begintag(tag)
- writer.newline()
- #table.toXML(writer, font)
- decompiled.toXML(writer, font)
- writer.endtag(tag)
- writer.newline()
-
-
-if __name__ == '__main__':
- import sys
- sys.exit(main())
+ """Convert a FontDame OTL file to TTX XML
+
+ Writes XML output to stdout.
+
+ Args:
+ args: Command line arguments (``--font``, ``--table``, input files).
+ """
+ import sys
+ from fontTools import configLogger
+ from fontTools.misc.testTools import MockFont
+
+ if args is None:
+ args = sys.argv[1:]
+
+ # configure the library logger (for >= WARNING)
+ configLogger()
+ # comment this out to enable debug messages from mtiLib's logger
+ # log.setLevel(logging.DEBUG)
+
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ "fonttools mtiLib",
+ description=main.__doc__,
+ )
+
+ parser.add_argument(
+ "--font",
+ "-f",
+ metavar="FILE",
+ dest="font",
+ help="Input TTF files (used for glyph classes and sorting coverage tables)",
+ )
+ parser.add_argument(
+ "--table",
+ "-t",
+ metavar="TABLE",
+ dest="tableTag",
+ help="Table to fill (sniffed from input file if not provided)",
+ )
+ parser.add_argument(
+ "inputs", metavar="FILE", type=str, nargs="+", help="Input FontDame .txt files"
+ )
+
+ args = parser.parse_args(args)
+
+ if font is None:
+ if args.font:
+ font = ttLib.TTFont(args.font)
+ else:
+ font = MockFont()
+
+ for f in args.inputs:
+ log.debug("Processing %s", f)
+ with open(f, "rt", encoding="utf-8") as f:
+ table = build(f, font, tableTag=args.tableTag)
+ blob = table.compile(font) # Make sure it compiles
+ decompiled = table.__class__()
+ decompiled.decompile(blob, font) # Make sure it decompiles!
+
+ # continue
+ from fontTools.misc import xmlWriter
+
+ tag = table.tableTag
+ writer = xmlWriter.XMLWriter(sys.stdout)
+ writer.begintag(tag)
+ writer.newline()
+ # table.toXML(writer, font)
+ decompiled.toXML(writer, font)
+ writer.endtag(tag)
+ writer.newline()
+
+
+if __name__ == "__main__":
+ import sys
+
+ sys.exit(main())
diff --git a/Lib/fontTools/mtiLib/__main__.py b/Lib/fontTools/mtiLib/__main__.py
index fe6b638b..29c802bc 100644
--- a/Lib/fontTools/mtiLib/__main__.py
+++ b/Lib/fontTools/mtiLib/__main__.py
@@ -1,5 +1,5 @@
import sys
from fontTools.mtiLib import main
-if __name__ == '__main__':
- sys.exit(main())
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/Lib/fontTools/otlLib/builder.py b/Lib/fontTools/otlLib/builder.py
index 233edec2..3508a7e2 100644
--- a/Lib/fontTools/otlLib/builder.py
+++ b/Lib/fontTools/otlLib/builder.py
@@ -55,7 +55,11 @@ def buildCoverage(glyphs, glyphMap):
if not glyphs:
return None
self = ot.Coverage()
- self.glyphs = sorted(set(glyphs), key=glyphMap.__getitem__)
+ try:
+ self.glyphs = sorted(set(glyphs), key=glyphMap.__getitem__)
+ except KeyError as e:
+ raise ValueError(f"Could not find glyph {e} in font") from e
+
return self
@@ -369,10 +373,19 @@ class ChainContextualBuilder(LookupBuilder):
rulesets = self.rulesets()
chaining = any(ruleset.hasPrefixOrSuffix for ruleset in rulesets)
+
+ # https://github.com/fonttools/fonttools/issues/2539
+ #
# Unfortunately, as of 2022-03-07, Apple's CoreText renderer does not
# correctly process GPOS7 lookups, so for now we force contextual
# positioning lookups to be chaining (GPOS8).
- if self.subtable_type == "Pos": # horrible separation of concerns breach
+ #
+ # This seems to be fixed as of macOS 13.2, but we keep disabling this
+ # for now until we are no longer concerned about old macOS versions.
+ # But we allow people to opt-out of this with the config key below.
+ write_gpos7 = self.font.cfg.get("fontTools.otlLib.builder:WRITE_GPOS7")
+ # horrible separation of concerns breach
+ if not write_gpos7 and self.subtable_type == "Pos":
chaining = True
for ruleset in rulesets:
@@ -764,7 +777,7 @@ class ChainContextSubstBuilder(ChainContextualBuilder):
result.setdefault(glyph, set()).update(replacements)
return result
- def find_chainable_single_subst(self, glyphs):
+ def find_chainable_single_subst(self, mapping):
"""Helper for add_single_subst_chained_()"""
res = None
for rule in self.rules[::-1]:
@@ -772,7 +785,7 @@ class ChainContextSubstBuilder(ChainContextualBuilder):
return res
for sub in rule.lookups:
if isinstance(sub, SingleSubstBuilder) and not any(
- g in glyphs for g in sub.mapping.keys()
+ g in mapping and mapping[g] != sub.mapping[g] for g in sub.mapping
):
res = sub
return res
@@ -981,7 +994,7 @@ class MarkBasePosBuilder(LookupBuilder):
for mc, anchor in anchors.items():
if mc not in markClasses:
raise ValueError(
- "Mark class %s not found for base glyph %s" % (mc, mark)
+ "Mark class %s not found for base glyph %s" % (mc, glyph)
)
bases[glyph][markClasses[mc]] = anchor
subtables = buildMarkBasePos(marks, bases, self.glyphMap)
@@ -1387,27 +1400,16 @@ class PairPosBuilder(LookupBuilder):
lookup.
"""
builders = {}
- builder = None
+ builder = ClassPairPosSubtableBuilder(self)
for glyphclass1, value1, glyphclass2, value2 in self.pairs:
if glyphclass1 is self.SUBTABLE_BREAK_:
- if builder is not None:
- builder.addSubtableBreak()
+ builder.addSubtableBreak()
continue
- valFormat1, valFormat2 = 0, 0
- if value1:
- valFormat1 = value1.getFormat()
- if value2:
- valFormat2 = value2.getFormat()
- builder = builders.get((valFormat1, valFormat2))
- if builder is None:
- builder = ClassPairPosSubtableBuilder(self)
- builders[(valFormat1, valFormat2)] = builder
builder.addPair(glyphclass1, value1, glyphclass2, value2)
subtables = []
if self.glyphPairs:
subtables.extend(buildPairPosGlyphs(self.glyphPairs, self.glyphMap))
- for key in sorted(builders.keys()):
- subtables.extend(builders[key].subtables())
+ subtables.extend(builder.subtables())
lookup = self.buildLookup_(subtables)
# Compact the lookup
@@ -2511,9 +2513,14 @@ def buildAttachPoint(points):
def buildCaretValueForCoord(coord):
# 500 --> otTables.CaretValue, format 1
+ # (500, DeviceTable) --> otTables.CaretValue, format 3
self = ot.CaretValue()
- self.Format = 1
- self.Coordinate = coord
+ if isinstance(coord, tuple):
+ self.Format = 3
+ self.Coordinate, self.DeviceTable = coord
+ else:
+ self.Format = 1
+ self.Coordinate = coord
return self
@@ -2575,7 +2582,8 @@ def buildLigGlyph(coords, points):
# ([500], [4]) --> otTables.LigGlyph; None for empty coords/points
carets = []
if coords:
- carets.extend([buildCaretValueForCoord(c) for c in sorted(coords)])
+ coords = sorted(coords, key=lambda c: c[0] if isinstance(c, tuple) else c)
+ carets.extend([buildCaretValueForCoord(c) for c in coords])
if points:
carets.extend([buildCaretValueForPoint(p) for p in sorted(points)])
if not carets:
@@ -2666,7 +2674,7 @@ class ClassDefBuilder(object):
# class form a contiguous range, the encoding is actually quite
# compact, whereas a non-contiguous set might need a lot of bytes
# in the output file. We don't get this right with the key below.
- result = sorted(self.classes_, key=lambda s: (len(s), s), reverse=True)
+ result = sorted(self.classes_, key=lambda s: (-len(s), s))
if not self.useClass0_:
result.insert(0, frozenset())
return result
@@ -2792,6 +2800,7 @@ def buildStatTable(
locations, axes, nameTable, windowsNames=windowsNames, macNames=macNames
)
axisValues = multiAxisValues + axisValues
+ nameTable.names.sort()
# Store AxisRecords
axisRecordArray = ot.AxisRecordArray()
@@ -2801,6 +2810,8 @@ def buildStatTable(
statTable.DesignAxisRecord = axisRecordArray
statTable.DesignAxisCount = len(axisRecords)
+ statTable.AxisValueCount = 0
+ statTable.AxisValueArray = None
if axisValues:
# Store AxisValueRecords
axisValueArray = ot.AxisValueArray()
diff --git a/Lib/fontTools/otlLib/optimize/__main__.py b/Lib/fontTools/otlLib/optimize/__main__.py
index 03027ecd..b0ae9081 100644
--- a/Lib/fontTools/otlLib/optimize/__main__.py
+++ b/Lib/fontTools/otlLib/optimize/__main__.py
@@ -2,5 +2,5 @@ import sys
from fontTools.otlLib.optimize import main
-if __name__ == '__main__':
- sys.exit(main())
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/Lib/fontTools/otlLib/optimize/gpos.py b/Lib/fontTools/otlLib/optimize/gpos.py
index 0acd9ed0..01c2257c 100644
--- a/Lib/fontTools/otlLib/optimize/gpos.py
+++ b/Lib/fontTools/otlLib/optimize/gpos.py
@@ -135,6 +135,7 @@ Pairs = Dict[
Tuple[otBase.ValueRecord, otBase.ValueRecord],
]
+
# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L935-L958
def _getClassRanges(glyphIDs: Iterable[int]):
glyphIDs = sorted(glyphIDs)
@@ -274,7 +275,7 @@ class Cluster:
)
merged_range_count = 0
last = None
- for (start, end) in ranges:
+ for start, end in ranges:
if last is not None and start != last + 1:
merged_range_count += 1
last = end
diff --git a/Lib/fontTools/pens/areaPen.py b/Lib/fontTools/pens/areaPen.py
index 403afe7b..004bb06b 100644
--- a/Lib/fontTools/pens/areaPen.py
+++ b/Lib/fontTools/pens/areaPen.py
@@ -7,51 +7,46 @@ __all__ = ["AreaPen"]
class AreaPen(BasePen):
-
- def __init__(self, glyphset=None):
- BasePen.__init__(self, glyphset)
- self.value = 0
-
- def _moveTo(self, p0):
- self._p0 = self._startPoint = p0
-
- def _lineTo(self, p1):
- x0, y0 = self._p0
- x1, y1 = p1
- self.value -= (x1 - x0) * (y1 + y0) * .5
- self._p0 = p1
-
- def _qCurveToOne(self, p1, p2):
- # https://github.com/Pomax/bezierinfo/issues/44
- p0 = self._p0
- x0, y0 = p0[0], p0[1]
- x1, y1 = p1[0] - x0, p1[1] - y0
- x2, y2 = p2[0] - x0, p2[1] - y0
- self.value -= (x2 * y1 - x1 * y2) / 3
- self._lineTo(p2)
- self._p0 = p2
-
- def _curveToOne(self, p1, p2, p3):
- # https://github.com/Pomax/bezierinfo/issues/44
- p0 = self._p0
- x0, y0 = p0[0], p0[1]
- x1, y1 = p1[0] - x0, p1[1] - y0
- x2, y2 = p2[0] - x0, p2[1] - y0
- x3, y3 = p3[0] - x0, p3[1] - y0
- self.value -= (
- x1 * ( - y2 - y3) +
- x2 * (y1 - 2*y3) +
- x3 * (y1 + 2*y2 )
- ) * 0.15
- self._lineTo(p3)
- self._p0 = p3
-
- def _closePath(self):
- self._lineTo(self._startPoint)
- del self._p0, self._startPoint
-
- def _endPath(self):
- if self._p0 != self._startPoint:
- # Area is not defined for open contours.
- raise NotImplementedError
- del self._p0, self._startPoint
+ def __init__(self, glyphset=None):
+ BasePen.__init__(self, glyphset)
+ self.value = 0
+
+ def _moveTo(self, p0):
+ self._p0 = self._startPoint = p0
+
+ def _lineTo(self, p1):
+ x0, y0 = self._p0
+ x1, y1 = p1
+ self.value -= (x1 - x0) * (y1 + y0) * 0.5
+ self._p0 = p1
+
+ def _qCurveToOne(self, p1, p2):
+ # https://github.com/Pomax/bezierinfo/issues/44
+ p0 = self._p0
+ x0, y0 = p0[0], p0[1]
+ x1, y1 = p1[0] - x0, p1[1] - y0
+ x2, y2 = p2[0] - x0, p2[1] - y0
+ self.value -= (x2 * y1 - x1 * y2) / 3
+ self._lineTo(p2)
+ self._p0 = p2
+
+ def _curveToOne(self, p1, p2, p3):
+ # https://github.com/Pomax/bezierinfo/issues/44
+ p0 = self._p0
+ x0, y0 = p0[0], p0[1]
+ x1, y1 = p1[0] - x0, p1[1] - y0
+ x2, y2 = p2[0] - x0, p2[1] - y0
+ x3, y3 = p3[0] - x0, p3[1] - y0
+ self.value -= (x1 * (-y2 - y3) + x2 * (y1 - 2 * y3) + x3 * (y1 + 2 * y2)) * 0.15
+ self._lineTo(p3)
+ self._p0 = p3
+
+ def _closePath(self):
+ self._lineTo(self._startPoint)
+ del self._p0, self._startPoint
+
+ def _endPath(self):
+ if self._p0 != self._startPoint:
+ # Area is not defined for open contours.
+ raise NotImplementedError
+ del self._p0, self._startPoint
diff --git a/Lib/fontTools/pens/basePen.py b/Lib/fontTools/pens/basePen.py
index f981f806..ac8abd40 100644
--- a/Lib/fontTools/pens/basePen.py
+++ b/Lib/fontTools/pens/basePen.py
@@ -36,376 +36,409 @@ Coordinates are usually expressed as (x, y) tuples, but generally any
sequence of length 2 will do.
"""
-from typing import Tuple
+from typing import Tuple, Dict
from fontTools.misc.loggingTools import LogMixin
+from fontTools.misc.transform import DecomposedTransform
-__all__ = ["AbstractPen", "NullPen", "BasePen", "PenError",
- "decomposeSuperBezierSegment", "decomposeQuadraticSegment"]
+__all__ = [
+ "AbstractPen",
+ "NullPen",
+ "BasePen",
+ "PenError",
+ "decomposeSuperBezierSegment",
+ "decomposeQuadraticSegment",
+]
class PenError(Exception):
- """Represents an error during penning."""
+ """Represents an error during penning."""
+
class OpenContourError(PenError):
- pass
+ pass
class AbstractPen:
-
- def moveTo(self, pt: Tuple[float, float]) -> None:
- """Begin a new sub path, set the current point to 'pt'. You must
- end each sub path with a call to pen.closePath() or pen.endPath().
- """
- raise NotImplementedError
-
- def lineTo(self, pt: Tuple[float, float]) -> None:
- """Draw a straight line from the current point to 'pt'."""
- raise NotImplementedError
-
- def curveTo(self, *points: Tuple[float, float]) -> None:
- """Draw a cubic bezier with an arbitrary number of control points.
-
- The last point specified is on-curve, all others are off-curve
- (control) points. If the number of control points is > 2, the
- segment is split into multiple bezier segments. This works
- like this:
-
- Let n be the number of control points (which is the number of
- arguments to this call minus 1). If n==2, a plain vanilla cubic
- bezier is drawn. If n==1, we fall back to a quadratic segment and
- if n==0 we draw a straight line. It gets interesting when n>2:
- n-1 PostScript-style cubic segments will be drawn as if it were
- one curve. See decomposeSuperBezierSegment().
-
- The conversion algorithm used for n>2 is inspired by NURB
- splines, and is conceptually equivalent to the TrueType "implied
- points" principle. See also decomposeQuadraticSegment().
- """
- raise NotImplementedError
-
- def qCurveTo(self, *points: Tuple[float, float]) -> None:
- """Draw a whole string of quadratic curve segments.
-
- The last point specified is on-curve, all others are off-curve
- points.
-
- This method implements TrueType-style curves, breaking up curves
- using 'implied points': between each two consequtive off-curve points,
- there is one implied point exactly in the middle between them. See
- also decomposeQuadraticSegment().
-
- The last argument (normally the on-curve point) may be None.
- This is to support contours that have NO on-curve points (a rarely
- seen feature of TrueType outlines).
- """
- raise NotImplementedError
-
- def closePath(self) -> None:
- """Close the current sub path. You must call either pen.closePath()
- or pen.endPath() after each sub path.
- """
- pass
-
- def endPath(self) -> None:
- """End the current sub path, but don't close it. You must call
- either pen.closePath() or pen.endPath() after each sub path.
- """
- pass
-
- def addComponent(
- self,
- glyphName: str,
- transformation: Tuple[float, float, float, float, float, float]
- ) -> None:
- """Add a sub glyph. The 'transformation' argument must be a 6-tuple
- containing an affine transformation, or a Transform object from the
- fontTools.misc.transform module. More precisely: it should be a
- sequence containing 6 numbers.
- """
- raise NotImplementedError
+ def moveTo(self, pt: Tuple[float, float]) -> None:
+ """Begin a new sub path, set the current point to 'pt'. You must
+ end each sub path with a call to pen.closePath() or pen.endPath().
+ """
+ raise NotImplementedError
+
+ def lineTo(self, pt: Tuple[float, float]) -> None:
+ """Draw a straight line from the current point to 'pt'."""
+ raise NotImplementedError
+
+ def curveTo(self, *points: Tuple[float, float]) -> None:
+ """Draw a cubic bezier with an arbitrary number of control points.
+
+ The last point specified is on-curve, all others are off-curve
+ (control) points. If the number of control points is > 2, the
+ segment is split into multiple bezier segments. This works
+ like this:
+
+ Let n be the number of control points (which is the number of
+ arguments to this call minus 1). If n==2, a plain vanilla cubic
+ bezier is drawn. If n==1, we fall back to a quadratic segment and
+ if n==0 we draw a straight line. It gets interesting when n>2:
+ n-1 PostScript-style cubic segments will be drawn as if it were
+ one curve. See decomposeSuperBezierSegment().
+
+ The conversion algorithm used for n>2 is inspired by NURB
+ splines, and is conceptually equivalent to the TrueType "implied
+ points" principle. See also decomposeQuadraticSegment().
+ """
+ raise NotImplementedError
+
+ def qCurveTo(self, *points: Tuple[float, float]) -> None:
+ """Draw a whole string of quadratic curve segments.
+
+ The last point specified is on-curve, all others are off-curve
+ points.
+
+ This method implements TrueType-style curves, breaking up curves
+ using 'implied points': between each two consequtive off-curve points,
+ there is one implied point exactly in the middle between them. See
+ also decomposeQuadraticSegment().
+
+ The last argument (normally the on-curve point) may be None.
+ This is to support contours that have NO on-curve points (a rarely
+ seen feature of TrueType outlines).
+ """
+ raise NotImplementedError
+
+ def closePath(self) -> None:
+ """Close the current sub path. You must call either pen.closePath()
+ or pen.endPath() after each sub path.
+ """
+ pass
+
+ def endPath(self) -> None:
+ """End the current sub path, but don't close it. You must call
+ either pen.closePath() or pen.endPath() after each sub path.
+ """
+ pass
+
+ def addComponent(
+ self,
+ glyphName: str,
+ transformation: Tuple[float, float, float, float, float, float],
+ ) -> None:
+ """Add a sub glyph. The 'transformation' argument must be a 6-tuple
+ containing an affine transformation, or a Transform object from the
+ fontTools.misc.transform module. More precisely: it should be a
+ sequence containing 6 numbers.
+ """
+ raise NotImplementedError
+
+ def addVarComponent(
+ self,
+ glyphName: str,
+ transformation: DecomposedTransform,
+ location: Dict[str, float],
+ ) -> None:
+ """Add a VarComponent sub glyph. The 'transformation' argument
+ must be a DecomposedTransform from the fontTools.misc.transform module,
+ and the 'location' argument must be a dictionary mapping axis tags
+ to their locations.
+ """
+ # GlyphSet decomposes for us
+ raise AttributeError
class NullPen(AbstractPen):
- """A pen that does nothing.
- """
+ """A pen that does nothing."""
- def moveTo(self, pt):
- pass
+ def moveTo(self, pt):
+ pass
- def lineTo(self, pt):
- pass
+ def lineTo(self, pt):
+ pass
- def curveTo(self, *points):
- pass
+ def curveTo(self, *points):
+ pass
- def qCurveTo(self, *points):
- pass
+ def qCurveTo(self, *points):
+ pass
- def closePath(self):
- pass
+ def closePath(self):
+ pass
- def endPath(self):
- pass
+ def endPath(self):
+ pass
- def addComponent(self, glyphName, transformation):
- pass
+ def addComponent(self, glyphName, transformation):
+ pass
+
+ def addVarComponent(self, glyphName, transformation, location):
+ pass
class LoggingPen(LogMixin, AbstractPen):
- """A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)
- """
- pass
+ """A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)"""
+
+ pass
class MissingComponentError(KeyError):
- """Indicates a component pointing to a non-existent glyph in the glyphset."""
+ """Indicates a component pointing to a non-existent glyph in the glyphset."""
class DecomposingPen(LoggingPen):
- """ Implements a 'addComponent' method that decomposes components
- (i.e. draws them onto self as simple contours).
- It can also be used as a mixin class (e.g. see ContourRecordingPen).
-
- You must override moveTo, lineTo, curveTo and qCurveTo. You may
- additionally override closePath, endPath and addComponent.
-
- By default a warning message is logged when a base glyph is missing;
- set the class variable ``skipMissingComponents`` to False if you want
- to raise a :class:`MissingComponentError` exception.
- """
-
- skipMissingComponents = True
-
- def __init__(self, glyphSet):
- """ Takes a single 'glyphSet' argument (dict), in which the glyphs
- that are referenced as components are looked up by their name.
- """
- super(DecomposingPen, self).__init__()
- self.glyphSet = glyphSet
-
- def addComponent(self, glyphName, transformation):
- """ Transform the points of the base glyph and draw it onto self.
- """
- from fontTools.pens.transformPen import TransformPen
- try:
- glyph = self.glyphSet[glyphName]
- except KeyError:
- if not self.skipMissingComponents:
- raise MissingComponentError(glyphName)
- self.log.warning(
- "glyph '%s' is missing from glyphSet; skipped" % glyphName)
- else:
- tPen = TransformPen(self, transformation)
- glyph.draw(tPen)
+ """Implements a 'addComponent' method that decomposes components
+ (i.e. draws them onto self as simple contours).
+ It can also be used as a mixin class (e.g. see ContourRecordingPen).
+
+ You must override moveTo, lineTo, curveTo and qCurveTo. You may
+ additionally override closePath, endPath and addComponent.
+
+ By default a warning message is logged when a base glyph is missing;
+ set the class variable ``skipMissingComponents`` to False if you want
+ to raise a :class:`MissingComponentError` exception.
+ """
+
+ skipMissingComponents = True
+
+ def __init__(self, glyphSet):
+ """Takes a single 'glyphSet' argument (dict), in which the glyphs
+ that are referenced as components are looked up by their name.
+ """
+ super(DecomposingPen, self).__init__()
+ self.glyphSet = glyphSet
+
+ def addComponent(self, glyphName, transformation):
+ """Transform the points of the base glyph and draw it onto self."""
+ from fontTools.pens.transformPen import TransformPen
+
+ try:
+ glyph = self.glyphSet[glyphName]
+ except KeyError:
+ if not self.skipMissingComponents:
+ raise MissingComponentError(glyphName)
+ self.log.warning("glyph '%s' is missing from glyphSet; skipped" % glyphName)
+ else:
+ tPen = TransformPen(self, transformation)
+ glyph.draw(tPen)
+
+ def addVarComponent(self, glyphName, transformation, location):
+ # GlyphSet decomposes for us
+ raise AttributeError
class BasePen(DecomposingPen):
- """Base class for drawing pens. You must override _moveTo, _lineTo and
- _curveToOne. You may additionally override _closePath, _endPath,
- addComponent and/or _qCurveToOne. You should not override any other
- methods.
- """
-
- def __init__(self, glyphSet=None):
- super(BasePen, self).__init__(glyphSet)
- self.__currentPoint = None
-
- # must override
-
- def _moveTo(self, pt):
- raise NotImplementedError
-
- def _lineTo(self, pt):
- raise NotImplementedError
-
- def _curveToOne(self, pt1, pt2, pt3):
- raise NotImplementedError
-
- # may override
-
- def _closePath(self):
- pass
-
- def _endPath(self):
- pass
-
- def _qCurveToOne(self, pt1, pt2):
- """This method implements the basic quadratic curve type. The
- default implementation delegates the work to the cubic curve
- function. Optionally override with a native implementation.
- """
- pt0x, pt0y = self.__currentPoint
- pt1x, pt1y = pt1
- pt2x, pt2y = pt2
- mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x)
- mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y)
- mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x)
- mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y)
- self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2)
-
- # don't override
-
- def _getCurrentPoint(self):
- """Return the current point. This is not part of the public
- interface, yet is useful for subclasses.
- """
- return self.__currentPoint
-
- def closePath(self):
- self._closePath()
- self.__currentPoint = None
-
- def endPath(self):
- self._endPath()
- self.__currentPoint = None
-
- def moveTo(self, pt):
- self._moveTo(pt)
- self.__currentPoint = pt
-
- def lineTo(self, pt):
- self._lineTo(pt)
- self.__currentPoint = pt
-
- def curveTo(self, *points):
- n = len(points) - 1 # 'n' is the number of control points
- assert n >= 0
- if n == 2:
- # The common case, we have exactly two BCP's, so this is a standard
- # cubic bezier. Even though decomposeSuperBezierSegment() handles
- # this case just fine, we special-case it anyway since it's so
- # common.
- self._curveToOne(*points)
- self.__currentPoint = points[-1]
- elif n > 2:
- # n is the number of control points; split curve into n-1 cubic
- # bezier segments. The algorithm used here is inspired by NURB
- # splines and the TrueType "implied point" principle, and ensures
- # the smoothest possible connection between two curve segments,
- # with no disruption in the curvature. It is practical since it
- # allows one to construct multiple bezier segments with a much
- # smaller amount of points.
- _curveToOne = self._curveToOne
- for pt1, pt2, pt3 in decomposeSuperBezierSegment(points):
- _curveToOne(pt1, pt2, pt3)
- self.__currentPoint = pt3
- elif n == 1:
- self.qCurveTo(*points)
- elif n == 0:
- self.lineTo(points[0])
- else:
- raise AssertionError("can't get there from here")
-
- def qCurveTo(self, *points):
- n = len(points) - 1 # 'n' is the number of control points
- assert n >= 0
- if points[-1] is None:
- # Special case for TrueType quadratics: it is possible to
- # define a contour with NO on-curve points. BasePen supports
- # this by allowing the final argument (the expected on-curve
- # point) to be None. We simulate the feature by making the implied
- # on-curve point between the last and the first off-curve points
- # explicit.
- x, y = points[-2] # last off-curve point
- nx, ny = points[0] # first off-curve point
- impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny))
- self.__currentPoint = impliedStartPoint
- self._moveTo(impliedStartPoint)
- points = points[:-1] + (impliedStartPoint,)
- if n > 0:
- # Split the string of points into discrete quadratic curve
- # segments. Between any two consecutive off-curve points
- # there's an implied on-curve point exactly in the middle.
- # This is where the segment splits.
- _qCurveToOne = self._qCurveToOne
- for pt1, pt2 in decomposeQuadraticSegment(points):
- _qCurveToOne(pt1, pt2)
- self.__currentPoint = pt2
- else:
- self.lineTo(points[0])
+ """Base class for drawing pens. You must override _moveTo, _lineTo and
+ _curveToOne. You may additionally override _closePath, _endPath,
+ addComponent, addVarComponent, and/or _qCurveToOne. You should not
+ override any other methods.
+ """
+
+ def __init__(self, glyphSet=None):
+ super(BasePen, self).__init__(glyphSet)
+ self.__currentPoint = None
+
+ # must override
+
+ def _moveTo(self, pt):
+ raise NotImplementedError
+
+ def _lineTo(self, pt):
+ raise NotImplementedError
+
+ def _curveToOne(self, pt1, pt2, pt3):
+ raise NotImplementedError
+
+ # may override
+
+ def _closePath(self):
+ pass
+
+ def _endPath(self):
+ pass
+
+ def _qCurveToOne(self, pt1, pt2):
+ """This method implements the basic quadratic curve type. The
+ default implementation delegates the work to the cubic curve
+ function. Optionally override with a native implementation.
+ """
+ pt0x, pt0y = self.__currentPoint
+ pt1x, pt1y = pt1
+ pt2x, pt2y = pt2
+ mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x)
+ mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y)
+ mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x)
+ mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y)
+ self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2)
+
+ # don't override
+
+ def _getCurrentPoint(self):
+ """Return the current point. This is not part of the public
+ interface, yet is useful for subclasses.
+ """
+ return self.__currentPoint
+
+ def closePath(self):
+ self._closePath()
+ self.__currentPoint = None
+
+ def endPath(self):
+ self._endPath()
+ self.__currentPoint = None
+
+ def moveTo(self, pt):
+ self._moveTo(pt)
+ self.__currentPoint = pt
+
+ def lineTo(self, pt):
+ self._lineTo(pt)
+ self.__currentPoint = pt
+
+ def curveTo(self, *points):
+ n = len(points) - 1 # 'n' is the number of control points
+ assert n >= 0
+ if n == 2:
+ # The common case, we have exactly two BCP's, so this is a standard
+ # cubic bezier. Even though decomposeSuperBezierSegment() handles
+ # this case just fine, we special-case it anyway since it's so
+ # common.
+ self._curveToOne(*points)
+ self.__currentPoint = points[-1]
+ elif n > 2:
+ # n is the number of control points; split curve into n-1 cubic
+ # bezier segments. The algorithm used here is inspired by NURB
+ # splines and the TrueType "implied point" principle, and ensures
+ # the smoothest possible connection between two curve segments,
+ # with no disruption in the curvature. It is practical since it
+ # allows one to construct multiple bezier segments with a much
+ # smaller amount of points.
+ _curveToOne = self._curveToOne
+ for pt1, pt2, pt3 in decomposeSuperBezierSegment(points):
+ _curveToOne(pt1, pt2, pt3)
+ self.__currentPoint = pt3
+ elif n == 1:
+ self.qCurveTo(*points)
+ elif n == 0:
+ self.lineTo(points[0])
+ else:
+ raise AssertionError("can't get there from here")
+
+ def qCurveTo(self, *points):
+ n = len(points) - 1 # 'n' is the number of control points
+ assert n >= 0
+ if points[-1] is None:
+ # Special case for TrueType quadratics: it is possible to
+ # define a contour with NO on-curve points. BasePen supports
+ # this by allowing the final argument (the expected on-curve
+ # point) to be None. We simulate the feature by making the implied
+ # on-curve point between the last and the first off-curve points
+ # explicit.
+ x, y = points[-2] # last off-curve point
+ nx, ny = points[0] # first off-curve point
+ impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny))
+ self.__currentPoint = impliedStartPoint
+ self._moveTo(impliedStartPoint)
+ points = points[:-1] + (impliedStartPoint,)
+ if n > 0:
+ # Split the string of points into discrete quadratic curve
+ # segments. Between any two consecutive off-curve points
+ # there's an implied on-curve point exactly in the middle.
+ # This is where the segment splits.
+ _qCurveToOne = self._qCurveToOne
+ for pt1, pt2 in decomposeQuadraticSegment(points):
+ _qCurveToOne(pt1, pt2)
+ self.__currentPoint = pt2
+ else:
+ self.lineTo(points[0])
def decomposeSuperBezierSegment(points):
- """Split the SuperBezier described by 'points' into a list of regular
- bezier segments. The 'points' argument must be a sequence with length
- 3 or greater, containing (x, y) coordinates. The last point is the
- destination on-curve point, the rest of the points are off-curve points.
- The start point should not be supplied.
-
- This function returns a list of (pt1, pt2, pt3) tuples, which each
- specify a regular curveto-style bezier segment.
- """
- n = len(points) - 1
- assert n > 1
- bezierSegments = []
- pt1, pt2, pt3 = points[0], None, None
- for i in range(2, n+1):
- # calculate points in between control points.
- nDivisions = min(i, 3, n-i+2)
- for j in range(1, nDivisions):
- factor = j / nDivisions
- temp1 = points[i-1]
- temp2 = points[i-2]
- temp = (temp2[0] + factor * (temp1[0] - temp2[0]),
- temp2[1] + factor * (temp1[1] - temp2[1]))
- if pt2 is None:
- pt2 = temp
- else:
- pt3 = (0.5 * (pt2[0] + temp[0]),
- 0.5 * (pt2[1] + temp[1]))
- bezierSegments.append((pt1, pt2, pt3))
- pt1, pt2, pt3 = temp, None, None
- bezierSegments.append((pt1, points[-2], points[-1]))
- return bezierSegments
+ """Split the SuperBezier described by 'points' into a list of regular
+ bezier segments. The 'points' argument must be a sequence with length
+ 3 or greater, containing (x, y) coordinates. The last point is the
+ destination on-curve point, the rest of the points are off-curve points.
+ The start point should not be supplied.
+
+ This function returns a list of (pt1, pt2, pt3) tuples, which each
+ specify a regular curveto-style bezier segment.
+ """
+ n = len(points) - 1
+ assert n > 1
+ bezierSegments = []
+ pt1, pt2, pt3 = points[0], None, None
+ for i in range(2, n + 1):
+ # calculate points in between control points.
+ nDivisions = min(i, 3, n - i + 2)
+ for j in range(1, nDivisions):
+ factor = j / nDivisions
+ temp1 = points[i - 1]
+ temp2 = points[i - 2]
+ temp = (
+ temp2[0] + factor * (temp1[0] - temp2[0]),
+ temp2[1] + factor * (temp1[1] - temp2[1]),
+ )
+ if pt2 is None:
+ pt2 = temp
+ else:
+ pt3 = (0.5 * (pt2[0] + temp[0]), 0.5 * (pt2[1] + temp[1]))
+ bezierSegments.append((pt1, pt2, pt3))
+ pt1, pt2, pt3 = temp, None, None
+ bezierSegments.append((pt1, points[-2], points[-1]))
+ return bezierSegments
def decomposeQuadraticSegment(points):
- """Split the quadratic curve segment described by 'points' into a list
- of "atomic" quadratic segments. The 'points' argument must be a sequence
- with length 2 or greater, containing (x, y) coordinates. The last point
- is the destination on-curve point, the rest of the points are off-curve
- points. The start point should not be supplied.
-
- This function returns a list of (pt1, pt2) tuples, which each specify a
- plain quadratic bezier segment.
- """
- n = len(points) - 1
- assert n > 0
- quadSegments = []
- for i in range(n - 1):
- x, y = points[i]
- nx, ny = points[i+1]
- impliedPt = (0.5 * (x + nx), 0.5 * (y + ny))
- quadSegments.append((points[i], impliedPt))
- quadSegments.append((points[-2], points[-1]))
- return quadSegments
+ """Split the quadratic curve segment described by 'points' into a list
+ of "atomic" quadratic segments. The 'points' argument must be a sequence
+ with length 2 or greater, containing (x, y) coordinates. The last point
+ is the destination on-curve point, the rest of the points are off-curve
+ points. The start point should not be supplied.
+
+ This function returns a list of (pt1, pt2) tuples, which each specify a
+ plain quadratic bezier segment.
+ """
+ n = len(points) - 1
+ assert n > 0
+ quadSegments = []
+ for i in range(n - 1):
+ x, y = points[i]
+ nx, ny = points[i + 1]
+ impliedPt = (0.5 * (x + nx), 0.5 * (y + ny))
+ quadSegments.append((points[i], impliedPt))
+ quadSegments.append((points[-2], points[-1]))
+ return quadSegments
class _TestPen(BasePen):
- """Test class that prints PostScript to stdout."""
- def _moveTo(self, pt):
- print("%s %s moveto" % (pt[0], pt[1]))
- def _lineTo(self, pt):
- print("%s %s lineto" % (pt[0], pt[1]))
- def _curveToOne(self, bcp1, bcp2, pt):
- print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1],
- bcp2[0], bcp2[1], pt[0], pt[1]))
- def _closePath(self):
- print("closepath")
+ """Test class that prints PostScript to stdout."""
+
+ def _moveTo(self, pt):
+ print("%s %s moveto" % (pt[0], pt[1]))
+
+ def _lineTo(self, pt):
+ print("%s %s lineto" % (pt[0], pt[1]))
+
+ def _curveToOne(self, bcp1, bcp2, pt):
+ print(
+ "%s %s %s %s %s %s curveto"
+ % (bcp1[0], bcp1[1], bcp2[0], bcp2[1], pt[0], pt[1])
+ )
+
+ def _closePath(self):
+ print("closepath")
if __name__ == "__main__":
- pen = _TestPen(None)
- pen.moveTo((0, 0))
- pen.lineTo((0, 100))
- pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
- pen.closePath()
-
- pen = _TestPen(None)
- # testing the "no on-curve point" scenario
- pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None)
- pen.closePath()
+ pen = _TestPen(None)
+ pen.moveTo((0, 0))
+ pen.lineTo((0, 100))
+ pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
+ pen.closePath()
+
+ pen = _TestPen(None)
+ # testing the "no on-curve point" scenario
+ pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None)
+ pen.closePath()
diff --git a/Lib/fontTools/pens/boundsPen.py b/Lib/fontTools/pens/boundsPen.py
index 227c22f5..d833cc89 100644
--- a/Lib/fontTools/pens/boundsPen.py
+++ b/Lib/fontTools/pens/boundsPen.py
@@ -8,91 +8,93 @@ __all__ = ["BoundsPen", "ControlBoundsPen"]
class ControlBoundsPen(BasePen):
- """Pen to calculate the "control bounds" of a shape. This is the
- bounding box of all control points, so may be larger than the
- actual bounding box if there are curves that don't have points
- on their extremes.
-
- When the shape has been drawn, the bounds are available as the
- ``bounds`` attribute of the pen object. It's a 4-tuple::
-
- (xMin, yMin, xMax, yMax).
-
- If ``ignoreSinglePoints`` is True, single points are ignored.
- """
-
- def __init__(self, glyphSet, ignoreSinglePoints=False):
- BasePen.__init__(self, glyphSet)
- self.ignoreSinglePoints = ignoreSinglePoints
- self.init()
-
- def init(self):
- self.bounds = None
- self._start = None
-
- def _moveTo(self, pt):
- self._start = pt
- if not self.ignoreSinglePoints:
- self._addMoveTo()
-
- def _addMoveTo(self):
- if self._start is None:
- return
- bounds = self.bounds
- if bounds:
- self.bounds = updateBounds(bounds, self._start)
- else:
- x, y = self._start
- self.bounds = (x, y, x, y)
- self._start = None
-
- def _lineTo(self, pt):
- self._addMoveTo()
- self.bounds = updateBounds(self.bounds, pt)
-
- def _curveToOne(self, bcp1, bcp2, pt):
- self._addMoveTo()
- bounds = self.bounds
- bounds = updateBounds(bounds, bcp1)
- bounds = updateBounds(bounds, bcp2)
- bounds = updateBounds(bounds, pt)
- self.bounds = bounds
-
- def _qCurveToOne(self, bcp, pt):
- self._addMoveTo()
- bounds = self.bounds
- bounds = updateBounds(bounds, bcp)
- bounds = updateBounds(bounds, pt)
- self.bounds = bounds
+ """Pen to calculate the "control bounds" of a shape. This is the
+ bounding box of all control points, so may be larger than the
+ actual bounding box if there are curves that don't have points
+ on their extremes.
+
+ When the shape has been drawn, the bounds are available as the
+ ``bounds`` attribute of the pen object. It's a 4-tuple::
+
+ (xMin, yMin, xMax, yMax).
+
+ If ``ignoreSinglePoints`` is True, single points are ignored.
+ """
+
+ def __init__(self, glyphSet, ignoreSinglePoints=False):
+ BasePen.__init__(self, glyphSet)
+ self.ignoreSinglePoints = ignoreSinglePoints
+ self.init()
+
+ def init(self):
+ self.bounds = None
+ self._start = None
+
+ def _moveTo(self, pt):
+ self._start = pt
+ if not self.ignoreSinglePoints:
+ self._addMoveTo()
+
+ def _addMoveTo(self):
+ if self._start is None:
+ return
+ bounds = self.bounds
+ if bounds:
+ self.bounds = updateBounds(bounds, self._start)
+ else:
+ x, y = self._start
+ self.bounds = (x, y, x, y)
+ self._start = None
+
+ def _lineTo(self, pt):
+ self._addMoveTo()
+ self.bounds = updateBounds(self.bounds, pt)
+
+ def _curveToOne(self, bcp1, bcp2, pt):
+ self._addMoveTo()
+ bounds = self.bounds
+ bounds = updateBounds(bounds, bcp1)
+ bounds = updateBounds(bounds, bcp2)
+ bounds = updateBounds(bounds, pt)
+ self.bounds = bounds
+
+ def _qCurveToOne(self, bcp, pt):
+ self._addMoveTo()
+ bounds = self.bounds
+ bounds = updateBounds(bounds, bcp)
+ bounds = updateBounds(bounds, pt)
+ self.bounds = bounds
class BoundsPen(ControlBoundsPen):
- """Pen to calculate the bounds of a shape. It calculates the
- correct bounds even when the shape contains curves that don't
- have points on their extremes. This is somewhat slower to compute
- than the "control bounds".
-
- When the shape has been drawn, the bounds are available as the
- ``bounds`` attribute of the pen object. It's a 4-tuple::
-
- (xMin, yMin, xMax, yMax)
- """
-
- def _curveToOne(self, bcp1, bcp2, pt):
- self._addMoveTo()
- bounds = self.bounds
- bounds = updateBounds(bounds, pt)
- if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
- bounds = unionRect(bounds, calcCubicBounds(
- self._getCurrentPoint(), bcp1, bcp2, pt))
- self.bounds = bounds
-
- def _qCurveToOne(self, bcp, pt):
- self._addMoveTo()
- bounds = self.bounds
- bounds = updateBounds(bounds, pt)
- if not pointInRect(bcp, bounds):
- bounds = unionRect(bounds, calcQuadraticBounds(
- self._getCurrentPoint(), bcp, pt))
- self.bounds = bounds
+ """Pen to calculate the bounds of a shape. It calculates the
+ correct bounds even when the shape contains curves that don't
+ have points on their extremes. This is somewhat slower to compute
+ than the "control bounds".
+
+ When the shape has been drawn, the bounds are available as the
+ ``bounds`` attribute of the pen object. It's a 4-tuple::
+
+ (xMin, yMin, xMax, yMax)
+ """
+
+ def _curveToOne(self, bcp1, bcp2, pt):
+ self._addMoveTo()
+ bounds = self.bounds
+ bounds = updateBounds(bounds, pt)
+ if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
+ bounds = unionRect(
+ bounds, calcCubicBounds(self._getCurrentPoint(), bcp1, bcp2, pt)
+ )
+ self.bounds = bounds
+
+ def _qCurveToOne(self, bcp, pt):
+ self._addMoveTo()
+ bounds = self.bounds
+ bounds = updateBounds(bounds, pt)
+ if not pointInRect(bcp, bounds):
+ bounds = unionRect(
+ bounds, calcQuadraticBounds(self._getCurrentPoint(), bcp, pt)
+ )
+ self.bounds = bounds
diff --git a/Lib/fontTools/pens/cocoaPen.py b/Lib/fontTools/pens/cocoaPen.py
index 67482b4d..5369c309 100644
--- a/Lib/fontTools/pens/cocoaPen.py
+++ b/Lib/fontTools/pens/cocoaPen.py
@@ -5,22 +5,22 @@ __all__ = ["CocoaPen"]
class CocoaPen(BasePen):
+ def __init__(self, glyphSet, path=None):
+ BasePen.__init__(self, glyphSet)
+ if path is None:
+ from AppKit import NSBezierPath
- def __init__(self, glyphSet, path=None):
- BasePen.__init__(self, glyphSet)
- if path is None:
- from AppKit import NSBezierPath
- path = NSBezierPath.bezierPath()
- self.path = path
+ path = NSBezierPath.bezierPath()
+ self.path = path
- def _moveTo(self, p):
- self.path.moveToPoint_(p)
+ def _moveTo(self, p):
+ self.path.moveToPoint_(p)
- def _lineTo(self, p):
- self.path.lineToPoint_(p)
+ def _lineTo(self, p):
+ self.path.lineToPoint_(p)
- def _curveToOne(self, p1, p2, p3):
- self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2)
+ def _curveToOne(self, p1, p2, p3):
+ self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2)
- def _closePath(self):
- self.path.closePath()
+ def _closePath(self):
+ self.path.closePath()
diff --git a/Lib/fontTools/pens/cu2quPen.py b/Lib/fontTools/pens/cu2quPen.py
index 3c4ceae9..5730b325 100644
--- a/Lib/fontTools/pens/cu2quPen.py
+++ b/Lib/fontTools/pens/cu2quPen.py
@@ -12,15 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from fontTools.cu2qu import curve_to_quadratic
-from fontTools.pens.basePen import AbstractPen, decomposeSuperBezierSegment
+import operator
+from fontTools.cu2qu import curve_to_quadratic, curves_to_quadratic
+from fontTools.pens.basePen import decomposeSuperBezierSegment
+from fontTools.pens.filterPen import FilterPen
from fontTools.pens.reverseContourPen import ReverseContourPen
from fontTools.pens.pointPen import BasePointToSegmentPen
from fontTools.pens.pointPen import ReverseContourPointPen
-class Cu2QuPen(AbstractPen):
- """ A filter pen to convert cubic bezier curves to quadratic b-splines
+class Cu2QuPen(FilterPen):
+ """A filter pen to convert cubic bezier curves to quadratic b-splines
using the FontTools SegmentPen protocol.
Args:
@@ -31,114 +33,56 @@ class Cu2QuPen(AbstractPen):
value equal, or close to UPEM / 1000.
reverse_direction: flip the contours' direction but keep starting point.
stats: a dictionary counting the point numbers of quadratic segments.
- ignore_single_points: don't emit contours containing only a single point
-
- NOTE: The "ignore_single_points" argument is deprecated since v1.3.0,
- which dropped Robofab subpport. It's no longer needed to special-case
- UFO2-style anchors (aka "named points") when using ufoLib >= 2.0,
- as these are no longer drawn onto pens as single-point contours,
- but are handled separately as anchors.
+ all_quadratic: if True (default), only quadratic b-splines are generated.
+ if False, quadratic curves or cubic curves are generated depending
+ on which one is more economical.
"""
- def __init__(self, other_pen, max_err, reverse_direction=False,
- stats=None, ignore_single_points=False):
+ def __init__(
+ self,
+ other_pen,
+ max_err,
+ reverse_direction=False,
+ stats=None,
+ all_quadratic=True,
+ ):
if reverse_direction:
- self.pen = ReverseContourPen(other_pen)
- else:
- self.pen = other_pen
+ other_pen = ReverseContourPen(other_pen)
+ super().__init__(other_pen)
self.max_err = max_err
self.stats = stats
- if ignore_single_points:
- import warnings
- warnings.warn("ignore_single_points is deprecated and "
- "will be removed in future versions",
- UserWarning, stacklevel=2)
- self.ignore_single_points = ignore_single_points
- self.start_pt = None
- self.current_pt = None
-
- def _check_contour_is_open(self):
- if self.current_pt is None:
- raise AssertionError("moveTo is required")
-
- def _check_contour_is_closed(self):
- if self.current_pt is not None:
- raise AssertionError("closePath or endPath is required")
-
- def _add_moveTo(self):
- if self.start_pt is not None:
- self.pen.moveTo(self.start_pt)
- self.start_pt = None
+ self.all_quadratic = all_quadratic
- def moveTo(self, pt):
- self._check_contour_is_closed()
- self.start_pt = self.current_pt = pt
- if not self.ignore_single_points:
- self._add_moveTo()
-
- def lineTo(self, pt):
- self._check_contour_is_open()
- self._add_moveTo()
- self.pen.lineTo(pt)
- self.current_pt = pt
-
- def qCurveTo(self, *points):
- self._check_contour_is_open()
- n = len(points)
- if n == 1:
- self.lineTo(points[0])
- elif n > 1:
- self._add_moveTo()
- self.pen.qCurveTo(*points)
- self.current_pt = points[-1]
- else:
- raise AssertionError("illegal qcurve segment point count: %d" % n)
-
- def _curve_to_quadratic(self, pt1, pt2, pt3):
+ def _convert_curve(self, pt1, pt2, pt3):
curve = (self.current_pt, pt1, pt2, pt3)
- quadratic = curve_to_quadratic(curve, self.max_err)
+ result = curve_to_quadratic(curve, self.max_err, self.all_quadratic)
if self.stats is not None:
- n = str(len(quadratic) - 2)
+ n = str(len(result) - 2)
self.stats[n] = self.stats.get(n, 0) + 1
- self.qCurveTo(*quadratic[1:])
+ if self.all_quadratic:
+ self.qCurveTo(*result[1:])
+ else:
+ if len(result) == 3:
+ self.qCurveTo(*result[1:])
+ else:
+ assert len(result) == 4
+ super().curveTo(*result[1:])
def curveTo(self, *points):
- self._check_contour_is_open()
n = len(points)
if n == 3:
# this is the most common case, so we special-case it
- self._curve_to_quadratic(*points)
+ self._convert_curve(*points)
elif n > 3:
for segment in decomposeSuperBezierSegment(points):
- self._curve_to_quadratic(*segment)
- elif n == 2:
- self.qCurveTo(*points)
- elif n == 1:
- self.lineTo(points[0])
+ self._convert_curve(*segment)
else:
- raise AssertionError("illegal curve segment point count: %d" % n)
-
- def closePath(self):
- self._check_contour_is_open()
- if self.start_pt is None:
- # if 'start_pt' is _not_ None, we are ignoring single-point paths
- self.pen.closePath()
- self.current_pt = self.start_pt = None
-
- def endPath(self):
- self._check_contour_is_open()
- if self.start_pt is None:
- self.pen.endPath()
- self.current_pt = self.start_pt = None
-
- def addComponent(self, glyphName, transformation):
- self._check_contour_is_closed()
- self.pen.addComponent(glyphName, transformation)
+ self.qCurveTo(*points)
class Cu2QuPointPen(BasePointToSegmentPen):
- """ A filter pen to convert cubic bezier curves to quadratic b-splines
- using the RoboFab PointPen protocol.
+ """A filter pen to convert cubic bezier curves to quadratic b-splines
+ using the FontTools PointPen protocol.
Args:
other_point_pen: another PointPen used to draw the transformed outline.
@@ -147,10 +91,26 @@ class Cu2QuPointPen(BasePointToSegmentPen):
value equal, or close to UPEM / 1000.
reverse_direction: reverse the winding direction of all contours.
stats: a dictionary counting the point numbers of quadratic segments.
+ all_quadratic: if True (default), only quadratic b-splines are generated.
+ if False, quadratic curves or cubic curves are generated depending
+ on which one is more economical.
"""
- def __init__(self, other_point_pen, max_err, reverse_direction=False,
- stats=None):
+ __points_required = {
+ "move": (1, operator.eq),
+ "line": (1, operator.eq),
+ "qcurve": (2, operator.ge),
+ "curve": (3, operator.eq),
+ }
+
+ def __init__(
+ self,
+ other_point_pen,
+ max_err,
+ reverse_direction=False,
+ stats=None,
+ all_quadratic=True,
+ ):
BasePointToSegmentPen.__init__(self)
if reverse_direction:
self.pen = ReverseContourPointPen(other_point_pen)
@@ -158,6 +118,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
self.pen = other_point_pen
self.max_err = max_err
self.stats = stats
+ self.all_quadratic = all_quadratic
def _flushContour(self, segments):
assert len(segments) >= 1
@@ -166,18 +127,21 @@ class Cu2QuPointPen(BasePointToSegmentPen):
prev_points = segments[-1][1]
prev_on_curve = prev_points[-1][0]
for segment_type, points in segments:
- if segment_type == 'curve':
+ if segment_type == "curve":
for sub_points in self._split_super_bezier_segments(points):
on_curve, smooth, name, kwargs = sub_points[-1]
bcp1, bcp2 = sub_points[0][0], sub_points[1][0]
cubic = [prev_on_curve, bcp1, bcp2, on_curve]
- quad = curve_to_quadratic(cubic, self.max_err)
+ quad = curve_to_quadratic(cubic, self.max_err, self.all_quadratic)
if self.stats is not None:
n = str(len(quad) - 2)
self.stats[n] = self.stats.get(n, 0) + 1
new_points = [(pt, False, None, {}) for pt in quad[1:-1]]
new_points.append((on_curve, smooth, name, kwargs))
- new_segments.append(["qcurve", new_points])
+ if self.all_quadratic or len(new_points) == 2:
+ new_segments.append(["qcurve", new_points])
+ else:
+ new_segments.append(["curve", new_points])
prev_on_curve = sub_points[-1][0]
else:
new_segments.append([segment_type, points])
@@ -200,8 +164,9 @@ class Cu2QuPointPen(BasePointToSegmentPen):
# a "super" bezier; decompose it
on_curve, smooth, name, kwargs = points[-1]
num_sub_segments = n - 1
- for i, sub_points in enumerate(decomposeSuperBezierSegment([
- pt for pt, _, _, _ in points])):
+ for i, sub_points in enumerate(
+ decomposeSuperBezierSegment([pt for pt, _, _, _ in points])
+ ):
new_segment = []
for point in sub_points[:-1]:
new_segment.append((point, False, None, {}))
@@ -213,34 +178,32 @@ class Cu2QuPointPen(BasePointToSegmentPen):
new_segment.append((sub_points[-1], True, None, {}))
sub_segments.append(new_segment)
else:
- raise AssertionError(
- "expected 2 control points, found: %d" % n)
+ raise AssertionError("expected 2 control points, found: %d" % n)
return sub_segments
def _drawPoints(self, segments):
pen = self.pen
pen.beginPath()
last_offcurves = []
+ points_required = self.__points_required
for i, (segment_type, points) in enumerate(segments):
- if segment_type in ("move", "line"):
- assert len(points) == 1, (
- "illegal line segment point count: %d" % len(points))
- pt, smooth, name, kwargs = points[0]
- pen.addPoint(pt, segment_type, smooth, name, **kwargs)
- elif segment_type == "qcurve":
- assert len(points) >= 2, (
- "illegal qcurve segment point count: %d" % len(points))
+ if segment_type in points_required:
+ n, op = points_required[segment_type]
+ assert op(len(points), n), (
+ f"illegal {segment_type!r} segment point count: "
+ f"expected {n}, got {len(points)}"
+ )
offcurves = points[:-1]
- if offcurves:
- if i == 0:
- # any off-curve points preceding the first on-curve
- # will be appended at the end of the contour
- last_offcurves = offcurves
- else:
- for (pt, smooth, name, kwargs) in offcurves:
- pen.addPoint(pt, None, smooth, name, **kwargs)
+ if i == 0:
+ # any off-curve points preceding the first on-curve
+ # will be appended at the end of the contour
+ last_offcurves = offcurves
+ else:
+ for pt, smooth, name, kwargs in offcurves:
+ pen.addPoint(pt, None, smooth, name, **kwargs)
pt, smooth, name, kwargs = points[-1]
if pt is None:
+ assert segment_type == "qcurve"
# special quadratic contour with no on-curve points:
# we need to skip the "None" point. See also the Pen
# protocol's qCurveTo() method and fontTools.pens.basePen
@@ -248,13 +211,115 @@ class Cu2QuPointPen(BasePointToSegmentPen):
else:
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
else:
- # 'curve' segments must have been converted to 'qcurve' by now
- raise AssertionError(
- "unexpected segment type: %r" % segment_type)
- for (pt, smooth, name, kwargs) in last_offcurves:
+ raise AssertionError("unexpected segment type: %r" % segment_type)
+ for pt, smooth, name, kwargs in last_offcurves:
pen.addPoint(pt, None, smooth, name, **kwargs)
pen.endPath()
def addComponent(self, baseGlyphName, transformation):
assert self.currentPath is None
self.pen.addComponent(baseGlyphName, transformation)
+
+
+class Cu2QuMultiPen:
+ """A filter multi-pen to convert cubic bezier curves to quadratic b-splines
+ in a interpolation-compatible manner, using the FontTools SegmentPen protocol.
+
+ Args:
+
+ other_pens: list of SegmentPens used to draw the transformed outlines.
+ max_err: maximum approximation error in font units. For optimal results,
+ if you know the UPEM of the font, we recommend setting this to a
+ value equal, or close to UPEM / 1000.
+ reverse_direction: flip the contours' direction but keep starting point.
+
+ This pen does not follow the normal SegmentPen protocol. Instead, its
+ moveTo/lineTo/qCurveTo/curveTo methods take a list of tuples that are
+ arguments that would normally be passed to a SegmentPen, one item for
+ each of the pens in other_pens.
+ """
+
+ # TODO Simplify like 3e8ebcdce592fe8a59ca4c3a294cc9724351e1ce
+ # Remove start_pts and _add_moveTO
+
+ def __init__(self, other_pens, max_err, reverse_direction=False):
+ if reverse_direction:
+ other_pens = [
+ ReverseContourPen(pen, outputImpliedClosingLine=True)
+ for pen in other_pens
+ ]
+ self.pens = other_pens
+ self.max_err = max_err
+ self.start_pts = None
+ self.current_pts = None
+
+ def _check_contour_is_open(self):
+ if self.current_pts is None:
+ raise AssertionError("moveTo is required")
+
+ def _check_contour_is_closed(self):
+ if self.current_pts is not None:
+ raise AssertionError("closePath or endPath is required")
+
+ def _add_moveTo(self):
+ if self.start_pts is not None:
+ for pt, pen in zip(self.start_pts, self.pens):
+ pen.moveTo(*pt)
+ self.start_pts = None
+
+ def moveTo(self, pts):
+ self._check_contour_is_closed()
+ self.start_pts = self.current_pts = pts
+ self._add_moveTo()
+
+ def lineTo(self, pts):
+ self._check_contour_is_open()
+ self._add_moveTo()
+ for pt, pen in zip(pts, self.pens):
+ pen.lineTo(*pt)
+ self.current_pts = pts
+
+ def qCurveTo(self, pointsList):
+ self._check_contour_is_open()
+ if len(pointsList[0]) == 1:
+ self.lineTo([(points[0],) for points in pointsList])
+ return
+ self._add_moveTo()
+ current_pts = []
+ for points, pen in zip(pointsList, self.pens):
+ pen.qCurveTo(*points)
+ current_pts.append((points[-1],))
+ self.current_pts = current_pts
+
+ def _curves_to_quadratic(self, pointsList):
+ curves = []
+ for current_pt, points in zip(self.current_pts, pointsList):
+ curves.append(current_pt + points)
+ quadratics = curves_to_quadratic(curves, [self.max_err] * len(curves))
+ pointsList = []
+ for quadratic in quadratics:
+ pointsList.append(quadratic[1:])
+ self.qCurveTo(pointsList)
+
+ def curveTo(self, pointsList):
+ self._check_contour_is_open()
+ self._curves_to_quadratic(pointsList)
+
+ def closePath(self):
+ self._check_contour_is_open()
+ if self.start_pts is None:
+ for pen in self.pens:
+ pen.closePath()
+ self.current_pts = self.start_pts = None
+
+ def endPath(self):
+ self._check_contour_is_open()
+ if self.start_pts is None:
+ for pen in self.pens:
+ pen.endPath()
+ self.current_pts = self.start_pts = None
+
+ def addComponent(self, glyphName, transformations):
+ self._check_contour_is_closed()
+ for trans, pen in zip(transformations, self.pens):
+ pen.addComponent(glyphName, trans)
diff --git a/Lib/fontTools/pens/explicitClosingLinePen.py b/Lib/fontTools/pens/explicitClosingLinePen.py
new file mode 100644
index 00000000..e3c9c943
--- /dev/null
+++ b/Lib/fontTools/pens/explicitClosingLinePen.py
@@ -0,0 +1,101 @@
+from fontTools.pens.filterPen import ContourFilterPen
+
+
+class ExplicitClosingLinePen(ContourFilterPen):
+ """A filter pen that adds an explicit lineTo to the first point of each closed
+ contour if the end point of the last segment is not already the same as the first point.
+ Otherwise, it passes the contour through unchanged.
+
+ >>> from pprint import pprint
+ >>> from fontTools.pens.recordingPen import RecordingPen
+ >>> rec = RecordingPen()
+ >>> pen = ExplicitClosingLinePen(rec)
+ >>> pen.moveTo((0, 0))
+ >>> pen.lineTo((100, 0))
+ >>> pen.lineTo((100, 100))
+ >>> pen.closePath()
+ >>> pprint(rec.value)
+ [('moveTo', ((0, 0),)),
+ ('lineTo', ((100, 0),)),
+ ('lineTo', ((100, 100),)),
+ ('lineTo', ((0, 0),)),
+ ('closePath', ())]
+ >>> rec = RecordingPen()
+ >>> pen = ExplicitClosingLinePen(rec)
+ >>> pen.moveTo((0, 0))
+ >>> pen.lineTo((100, 0))
+ >>> pen.lineTo((100, 100))
+ >>> pen.lineTo((0, 0))
+ >>> pen.closePath()
+ >>> pprint(rec.value)
+ [('moveTo', ((0, 0),)),
+ ('lineTo', ((100, 0),)),
+ ('lineTo', ((100, 100),)),
+ ('lineTo', ((0, 0),)),
+ ('closePath', ())]
+ >>> rec = RecordingPen()
+ >>> pen = ExplicitClosingLinePen(rec)
+ >>> pen.moveTo((0, 0))
+ >>> pen.curveTo((100, 0), (0, 100), (100, 100))
+ >>> pen.closePath()
+ >>> pprint(rec.value)
+ [('moveTo', ((0, 0),)),
+ ('curveTo', ((100, 0), (0, 100), (100, 100))),
+ ('lineTo', ((0, 0),)),
+ ('closePath', ())]
+ >>> rec = RecordingPen()
+ >>> pen = ExplicitClosingLinePen(rec)
+ >>> pen.moveTo((0, 0))
+ >>> pen.curveTo((100, 0), (0, 100), (100, 100))
+ >>> pen.lineTo((0, 0))
+ >>> pen.closePath()
+ >>> pprint(rec.value)
+ [('moveTo', ((0, 0),)),
+ ('curveTo', ((100, 0), (0, 100), (100, 100))),
+ ('lineTo', ((0, 0),)),
+ ('closePath', ())]
+ >>> rec = RecordingPen()
+ >>> pen = ExplicitClosingLinePen(rec)
+ >>> pen.moveTo((0, 0))
+ >>> pen.curveTo((100, 0), (0, 100), (0, 0))
+ >>> pen.closePath()
+ >>> pprint(rec.value)
+ [('moveTo', ((0, 0),)),
+ ('curveTo', ((100, 0), (0, 100), (0, 0))),
+ ('closePath', ())]
+ >>> rec = RecordingPen()
+ >>> pen = ExplicitClosingLinePen(rec)
+ >>> pen.moveTo((0, 0))
+ >>> pen.closePath()
+ >>> pprint(rec.value)
+ [('moveTo', ((0, 0),)), ('closePath', ())]
+ >>> rec = RecordingPen()
+ >>> pen = ExplicitClosingLinePen(rec)
+ >>> pen.closePath()
+ >>> pprint(rec.value)
+ [('closePath', ())]
+ >>> rec = RecordingPen()
+ >>> pen = ExplicitClosingLinePen(rec)
+ >>> pen.moveTo((0, 0))
+ >>> pen.lineTo((100, 0))
+ >>> pen.lineTo((100, 100))
+ >>> pen.endPath()
+ >>> pprint(rec.value)
+ [('moveTo', ((0, 0),)),
+ ('lineTo', ((100, 0),)),
+ ('lineTo', ((100, 100),)),
+ ('endPath', ())]
+ """
+
+ def filterContour(self, contour):
+ if (
+ not contour
+ or contour[0][0] != "moveTo"
+ or contour[-1][0] != "closePath"
+ or len(contour) < 3
+ ):
+ return
+ movePt = contour[0][1][0]
+ lastSeg = contour[-2][1]
+ if lastSeg and movePt != lastSeg[-1]:
+ contour[-1:] = [("lineTo", (movePt,)), ("closePath", ())]
diff --git a/Lib/fontTools/pens/filterPen.py b/Lib/fontTools/pens/filterPen.py
index 4355ba41..81423109 100644
--- a/Lib/fontTools/pens/filterPen.py
+++ b/Lib/fontTools/pens/filterPen.py
@@ -4,14 +4,13 @@ from fontTools.pens.recordingPen import RecordingPen
class _PassThruComponentsMixin(object):
-
def addComponent(self, glyphName, transformation, **kwargs):
self._outPen.addComponent(glyphName, transformation, **kwargs)
class FilterPen(_PassThruComponentsMixin, AbstractPen):
- """ Base class for pens that apply some transformation to the coordinates
+ """Base class for pens that apply some transformation to the coordinates
they receive and pass them to another pen.
You can override any of its methods. The default implementation does
@@ -57,24 +56,31 @@ class FilterPen(_PassThruComponentsMixin, AbstractPen):
def __init__(self, outPen):
self._outPen = outPen
+ self.current_pt = None
def moveTo(self, pt):
self._outPen.moveTo(pt)
+ self.current_pt = pt
def lineTo(self, pt):
self._outPen.lineTo(pt)
+ self.current_pt = pt
def curveTo(self, *points):
self._outPen.curveTo(*points)
+ self.current_pt = points[-1]
def qCurveTo(self, *points):
self._outPen.qCurveTo(*points)
+ self.current_pt = points[-1]
def closePath(self):
self._outPen.closePath()
+ self.current_pt = None
def endPath(self):
self._outPen.endPath()
+ self.current_pt = None
class ContourFilterPen(_PassThruComponentsMixin, RecordingPen):
@@ -121,7 +127,7 @@ class ContourFilterPen(_PassThruComponentsMixin, RecordingPen):
class FilterPointPen(_PassThruComponentsMixin, AbstractPointPen):
- """ Baseclass for point pens that apply some transformation to the
+ """Baseclass for point pens that apply some transformation to the
coordinates they receive and pass them to another point pen.
You can override any of its methods. The default implementation does
diff --git a/Lib/fontTools/pens/hashPointPen.py b/Lib/fontTools/pens/hashPointPen.py
index 9aef5d87..b82468ec 100644
--- a/Lib/fontTools/pens/hashPointPen.py
+++ b/Lib/fontTools/pens/hashPointPen.py
@@ -65,9 +65,7 @@ class HashPointPen(AbstractPointPen):
pt_type = segmentType[0]
self.data.append(f"{pt_type}{pt[0]:g}{pt[1]:+g}")
- def addComponent(
- self, baseGlyphName, transformation, identifier=None, **kwargs
- ):
+ def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
tr = "".join([f"{t:+}" for t in transformation])
self.data.append("[")
try:
diff --git a/Lib/fontTools/pens/momentsPen.py b/Lib/fontTools/pens/momentsPen.py
index 7cd87919..dab0d10e 100644
--- a/Lib/fontTools/pens/momentsPen.py
+++ b/Lib/fontTools/pens/momentsPen.py
@@ -1,503 +1,882 @@
from fontTools.pens.basePen import BasePen, OpenContourError
+
try:
- import cython
-except ImportError:
- # if cython not installed, use mock module with no-op decorators and types
- from fontTools.misc import cython
+ import cython
+
+ COMPILED = cython.compiled
+except (AttributeError, ImportError):
+ # if cython not installed, use mock module with no-op decorators and types
+ from fontTools.misc import cython
-if cython.compiled:
- # Yep, I'm compiled.
- COMPILED = True
-else:
- # Just a lowly interpreted script.
- COMPILED = False
+ COMPILED = False
__all__ = ["MomentsPen"]
+
class MomentsPen(BasePen):
+ def __init__(self, glyphset=None):
+ BasePen.__init__(self, glyphset)
+
+ self.area = 0
+ self.momentX = 0
+ self.momentY = 0
+ self.momentXX = 0
+ self.momentXY = 0
+ self.momentYY = 0
- def __init__(self, glyphset=None):
- BasePen.__init__(self, glyphset)
+ def _moveTo(self, p0):
+ self.__startPoint = p0
- self.area = 0
- self.momentX = 0
- self.momentY = 0
- self.momentXX = 0
- self.momentXY = 0
- self.momentYY = 0
+ def _closePath(self):
+ p0 = self._getCurrentPoint()
+ if p0 != self.__startPoint:
+ self._lineTo(self.__startPoint)
- def _moveTo(self, p0):
- self.__startPoint = p0
+ def _endPath(self):
+ p0 = self._getCurrentPoint()
+ if p0 != self.__startPoint:
+ # Green theorem is not defined on open contours.
+ raise OpenContourError("Green theorem is not defined on open contours.")
- def _closePath(self):
- p0 = self._getCurrentPoint()
- if p0 != self.__startPoint:
- self._lineTo(self.__startPoint)
+ @cython.locals(r0=cython.double)
+ @cython.locals(r1=cython.double)
+ @cython.locals(r2=cython.double)
+ @cython.locals(r3=cython.double)
+ @cython.locals(r4=cython.double)
+ @cython.locals(r5=cython.double)
+ @cython.locals(r6=cython.double)
+ @cython.locals(r7=cython.double)
+ @cython.locals(r8=cython.double)
+ @cython.locals(r9=cython.double)
+ @cython.locals(r10=cython.double)
+ @cython.locals(r11=cython.double)
+ @cython.locals(r12=cython.double)
+ @cython.locals(x0=cython.double, y0=cython.double)
+ @cython.locals(x1=cython.double, y1=cython.double)
+ def _lineTo(self, p1):
+ x0, y0 = self._getCurrentPoint()
+ x1, y1 = p1
- def _endPath(self):
- p0 = self._getCurrentPoint()
- if p0 != self.__startPoint:
- # Green theorem is not defined on open contours.
- raise OpenContourError(
- "Green theorem is not defined on open contours."
- )
+ r0 = x1 * y0
+ r1 = x1 * y1
+ r2 = x1**2
+ r3 = r2 * y1
+ r4 = y0 - y1
+ r5 = r4 * x0
+ r6 = x0**2
+ r7 = 2 * y0
+ r8 = y0**2
+ r9 = y1**2
+ r10 = x1**3
+ r11 = y0**3
+ r12 = y1**3
- @cython.locals(r0=cython.double)
- @cython.locals(r1=cython.double)
- @cython.locals(r2=cython.double)
- @cython.locals(r3=cython.double)
- @cython.locals(r4=cython.double)
- @cython.locals(r5=cython.double)
- @cython.locals(r6=cython.double)
- @cython.locals(r7=cython.double)
- @cython.locals(r8=cython.double)
- @cython.locals(r9=cython.double)
- @cython.locals(r10=cython.double)
- @cython.locals(r11=cython.double)
- @cython.locals(r12=cython.double)
- @cython.locals(x0=cython.double, y0=cython.double)
- @cython.locals(x1=cython.double, y1=cython.double)
- def _lineTo(self, p1):
- x0,y0 = self._getCurrentPoint()
- x1,y1 = p1
+ self.area += -r0 / 2 - r1 / 2 + x0 * (y0 + y1) / 2
+ self.momentX += -r2 * y0 / 6 - r3 / 3 - r5 * x1 / 6 + r6 * (r7 + y1) / 6
+ self.momentY += (
+ -r0 * y1 / 6 - r8 * x1 / 6 - r9 * x1 / 6 + x0 * (r8 + r9 + y0 * y1) / 6
+ )
+ self.momentXX += (
+ -r10 * y0 / 12
+ - r10 * y1 / 4
+ - r2 * r5 / 12
+ - r4 * r6 * x1 / 12
+ + x0**3 * (3 * y0 + y1) / 12
+ )
+ self.momentXY += (
+ -r2 * r8 / 24
+ - r2 * r9 / 8
+ - r3 * r7 / 24
+ + r6 * (r7 * y1 + 3 * r8 + r9) / 24
+ - x0 * x1 * (r8 - r9) / 12
+ )
+ self.momentYY += (
+ -r0 * r9 / 12
+ - r1 * r8 / 12
+ - r11 * x1 / 12
+ - r12 * x1 / 12
+ + x0 * (r11 + r12 + r8 * y1 + r9 * y0) / 12
+ )
- r0 = x1*y0
- r1 = x1*y1
- r2 = x1**2
- r3 = r2*y1
- r4 = y0 - y1
- r5 = r4*x0
- r6 = x0**2
- r7 = 2*y0
- r8 = y0**2
- r9 = y1**2
- r10 = x1**3
- r11 = y0**3
- r12 = y1**3
+ @cython.locals(r0=cython.double)
+ @cython.locals(r1=cython.double)
+ @cython.locals(r2=cython.double)
+ @cython.locals(r3=cython.double)
+ @cython.locals(r4=cython.double)
+ @cython.locals(r5=cython.double)
+ @cython.locals(r6=cython.double)
+ @cython.locals(r7=cython.double)
+ @cython.locals(r8=cython.double)
+ @cython.locals(r9=cython.double)
+ @cython.locals(r10=cython.double)
+ @cython.locals(r11=cython.double)
+ @cython.locals(r12=cython.double)
+ @cython.locals(r13=cython.double)
+ @cython.locals(r14=cython.double)
+ @cython.locals(r15=cython.double)
+ @cython.locals(r16=cython.double)
+ @cython.locals(r17=cython.double)
+ @cython.locals(r18=cython.double)
+ @cython.locals(r19=cython.double)
+ @cython.locals(r20=cython.double)
+ @cython.locals(r21=cython.double)
+ @cython.locals(r22=cython.double)
+ @cython.locals(r23=cython.double)
+ @cython.locals(r24=cython.double)
+ @cython.locals(r25=cython.double)
+ @cython.locals(r26=cython.double)
+ @cython.locals(r27=cython.double)
+ @cython.locals(r28=cython.double)
+ @cython.locals(r29=cython.double)
+ @cython.locals(r30=cython.double)
+ @cython.locals(r31=cython.double)
+ @cython.locals(r32=cython.double)
+ @cython.locals(r33=cython.double)
+ @cython.locals(r34=cython.double)
+ @cython.locals(r35=cython.double)
+ @cython.locals(r36=cython.double)
+ @cython.locals(r37=cython.double)
+ @cython.locals(r38=cython.double)
+ @cython.locals(r39=cython.double)
+ @cython.locals(r40=cython.double)
+ @cython.locals(r41=cython.double)
+ @cython.locals(r42=cython.double)
+ @cython.locals(r43=cython.double)
+ @cython.locals(r44=cython.double)
+ @cython.locals(r45=cython.double)
+ @cython.locals(r46=cython.double)
+ @cython.locals(r47=cython.double)
+ @cython.locals(r48=cython.double)
+ @cython.locals(r49=cython.double)
+ @cython.locals(r50=cython.double)
+ @cython.locals(r51=cython.double)
+ @cython.locals(r52=cython.double)
+ @cython.locals(r53=cython.double)
+ @cython.locals(x0=cython.double, y0=cython.double)
+ @cython.locals(x1=cython.double, y1=cython.double)
+ @cython.locals(x2=cython.double, y2=cython.double)
+ def _qCurveToOne(self, p1, p2):
+ x0, y0 = self._getCurrentPoint()
+ x1, y1 = p1
+ x2, y2 = p2
- self.area += -r0/2 - r1/2 + x0*(y0 + y1)/2
- self.momentX += -r2*y0/6 - r3/3 - r5*x1/6 + r6*(r7 + y1)/6
- self.momentY += -r0*y1/6 - r8*x1/6 - r9*x1/6 + x0*(r8 + r9 + y0*y1)/6
- self.momentXX += -r10*y0/12 - r10*y1/4 - r2*r5/12 - r4*r6*x1/12 + x0**3*(3*y0 + y1)/12
- self.momentXY += -r2*r8/24 - r2*r9/8 - r3*r7/24 + r6*(r7*y1 + 3*r8 + r9)/24 - x0*x1*(r8 - r9)/12
- self.momentYY += -r0*r9/12 - r1*r8/12 - r11*x1/12 - r12*x1/12 + x0*(r11 + r12 + r8*y1 + r9*y0)/12
+ r0 = 2 * y1
+ r1 = r0 * x2
+ r2 = x2 * y2
+ r3 = 3 * r2
+ r4 = 2 * x1
+ r5 = 3 * y0
+ r6 = x1**2
+ r7 = x2**2
+ r8 = 4 * y1
+ r9 = 10 * y2
+ r10 = 2 * y2
+ r11 = r4 * x2
+ r12 = x0**2
+ r13 = 10 * y0
+ r14 = r4 * y2
+ r15 = x2 * y0
+ r16 = 4 * x1
+ r17 = r0 * x1 + r2
+ r18 = r2 * r8
+ r19 = y1**2
+ r20 = 2 * r19
+ r21 = y2**2
+ r22 = r21 * x2
+ r23 = 5 * r22
+ r24 = y0**2
+ r25 = y0 * y2
+ r26 = 5 * r24
+ r27 = x1**3
+ r28 = x2**3
+ r29 = 30 * y1
+ r30 = 6 * y1
+ r31 = 10 * r7 * x1
+ r32 = 5 * y2
+ r33 = 12 * r6
+ r34 = 30 * x1
+ r35 = x1 * y1
+ r36 = r3 + 20 * r35
+ r37 = 12 * x1
+ r38 = 20 * r6
+ r39 = 8 * r6 * y1
+ r40 = r32 * r7
+ r41 = 60 * y1
+ r42 = 20 * r19
+ r43 = 4 * r19
+ r44 = 15 * r21
+ r45 = 12 * x2
+ r46 = 12 * y2
+ r47 = 6 * x1
+ r48 = 8 * r19 * x1 + r23
+ r49 = 8 * y1**3
+ r50 = y2**3
+ r51 = y0**3
+ r52 = 10 * y1
+ r53 = 12 * y1
- @cython.locals(r0=cython.double)
- @cython.locals(r1=cython.double)
- @cython.locals(r2=cython.double)
- @cython.locals(r3=cython.double)
- @cython.locals(r4=cython.double)
- @cython.locals(r5=cython.double)
- @cython.locals(r6=cython.double)
- @cython.locals(r7=cython.double)
- @cython.locals(r8=cython.double)
- @cython.locals(r9=cython.double)
- @cython.locals(r10=cython.double)
- @cython.locals(r11=cython.double)
- @cython.locals(r12=cython.double)
- @cython.locals(r13=cython.double)
- @cython.locals(r14=cython.double)
- @cython.locals(r15=cython.double)
- @cython.locals(r16=cython.double)
- @cython.locals(r17=cython.double)
- @cython.locals(r18=cython.double)
- @cython.locals(r19=cython.double)
- @cython.locals(r20=cython.double)
- @cython.locals(r21=cython.double)
- @cython.locals(r22=cython.double)
- @cython.locals(r23=cython.double)
- @cython.locals(r24=cython.double)
- @cython.locals(r25=cython.double)
- @cython.locals(r26=cython.double)
- @cython.locals(r27=cython.double)
- @cython.locals(r28=cython.double)
- @cython.locals(r29=cython.double)
- @cython.locals(r30=cython.double)
- @cython.locals(r31=cython.double)
- @cython.locals(r32=cython.double)
- @cython.locals(r33=cython.double)
- @cython.locals(r34=cython.double)
- @cython.locals(r35=cython.double)
- @cython.locals(r36=cython.double)
- @cython.locals(r37=cython.double)
- @cython.locals(r38=cython.double)
- @cython.locals(r39=cython.double)
- @cython.locals(r40=cython.double)
- @cython.locals(r41=cython.double)
- @cython.locals(r42=cython.double)
- @cython.locals(r43=cython.double)
- @cython.locals(r44=cython.double)
- @cython.locals(r45=cython.double)
- @cython.locals(r46=cython.double)
- @cython.locals(r47=cython.double)
- @cython.locals(r48=cython.double)
- @cython.locals(r49=cython.double)
- @cython.locals(r50=cython.double)
- @cython.locals(r51=cython.double)
- @cython.locals(r52=cython.double)
- @cython.locals(r53=cython.double)
- @cython.locals(x0=cython.double, y0=cython.double)
- @cython.locals(x1=cython.double, y1=cython.double)
- @cython.locals(x2=cython.double, y2=cython.double)
- def _qCurveToOne(self, p1, p2):
- x0,y0 = self._getCurrentPoint()
- x1,y1 = p1
- x2,y2 = p2
+ self.area += (
+ -r1 / 6
+ - r3 / 6
+ + x0 * (r0 + r5 + y2) / 6
+ + x1 * y2 / 3
+ - y0 * (r4 + x2) / 6
+ )
+ self.momentX += (
+ -r11 * (-r10 + y1) / 30
+ + r12 * (r13 + r8 + y2) / 30
+ + r6 * y2 / 15
+ - r7 * r8 / 30
+ - r7 * r9 / 30
+ + x0 * (r14 - r15 - r16 * y0 + r17) / 30
+ - y0 * (r11 + 2 * r6 + r7) / 30
+ )
+ self.momentY += (
+ -r18 / 30
+ - r20 * x2 / 30
+ - r23 / 30
+ - r24 * (r16 + x2) / 30
+ + x0 * (r0 * y2 + r20 + r21 + r25 + r26 + r8 * y0) / 30
+ + x1 * y2 * (r10 + y1) / 15
+ - y0 * (r1 + r17) / 30
+ )
+ self.momentXX += (
+ r12 * (r1 - 5 * r15 - r34 * y0 + r36 + r9 * x1) / 420
+ + 2 * r27 * y2 / 105
+ - r28 * r29 / 420
+ - r28 * y2 / 4
+ - r31 * (r0 - 3 * y2) / 420
+ - r6 * x2 * (r0 - r32) / 105
+ + x0**3 * (r30 + 21 * y0 + y2) / 84
+ - x0
+ * (
+ r0 * r7
+ + r15 * r37
+ - r2 * r37
+ - r33 * y2
+ + r38 * y0
+ - r39
+ - r40
+ + r5 * r7
+ )
+ / 420
+ - y0 * (8 * r27 + 5 * r28 + r31 + r33 * x2) / 420
+ )
+ self.momentXY += (
+ r12 * (r13 * y2 + 3 * r21 + 105 * r24 + r41 * y0 + r42 + r46 * y1) / 840
+ - r16 * x2 * (r43 - r44) / 840
+ - r21 * r7 / 8
+ - r24 * (r38 + r45 * x1 + 3 * r7) / 840
+ - r41 * r7 * y2 / 840
+ - r42 * r7 / 840
+ + r6 * y2 * (r32 + r8) / 210
+ + x0
+ * (
+ -r15 * r8
+ + r16 * r25
+ + r18
+ + r21 * r47
+ - r24 * r34
+ - r26 * x2
+ + r35 * r46
+ + r48
+ )
+ / 420
+ - y0 * (r16 * r2 + r30 * r7 + r35 * r45 + r39 + r40) / 420
+ )
+ self.momentYY += (
+ -r2 * r42 / 420
+ - r22 * r29 / 420
+ - r24 * (r14 + r36 + r52 * x2) / 420
+ - r49 * x2 / 420
+ - r50 * x2 / 12
+ - r51 * (r47 + x2) / 84
+ + x0
+ * (
+ r19 * r46
+ + r21 * r5
+ + r21 * r52
+ + r24 * r29
+ + r25 * r53
+ + r26 * y2
+ + r42 * y0
+ + r49
+ + 5 * r50
+ + 35 * r51
+ )
+ / 420
+ + x1 * y2 * (r43 + r44 + r9 * y1) / 210
+ - y0 * (r19 * r45 + r2 * r53 - r21 * r4 + r48) / 420
+ )
- r0 = 2*y1
- r1 = r0*x2
- r2 = x2*y2
- r3 = 3*r2
- r4 = 2*x1
- r5 = 3*y0
- r6 = x1**2
- r7 = x2**2
- r8 = 4*y1
- r9 = 10*y2
- r10 = 2*y2
- r11 = r4*x2
- r12 = x0**2
- r13 = 10*y0
- r14 = r4*y2
- r15 = x2*y0
- r16 = 4*x1
- r17 = r0*x1 + r2
- r18 = r2*r8
- r19 = y1**2
- r20 = 2*r19
- r21 = y2**2
- r22 = r21*x2
- r23 = 5*r22
- r24 = y0**2
- r25 = y0*y2
- r26 = 5*r24
- r27 = x1**3
- r28 = x2**3
- r29 = 30*y1
- r30 = 6*y1
- r31 = 10*r7*x1
- r32 = 5*y2
- r33 = 12*r6
- r34 = 30*x1
- r35 = x1*y1
- r36 = r3 + 20*r35
- r37 = 12*x1
- r38 = 20*r6
- r39 = 8*r6*y1
- r40 = r32*r7
- r41 = 60*y1
- r42 = 20*r19
- r43 = 4*r19
- r44 = 15*r21
- r45 = 12*x2
- r46 = 12*y2
- r47 = 6*x1
- r48 = 8*r19*x1 + r23
- r49 = 8*y1**3
- r50 = y2**3
- r51 = y0**3
- r52 = 10*y1
- r53 = 12*y1
+ @cython.locals(r0=cython.double)
+ @cython.locals(r1=cython.double)
+ @cython.locals(r2=cython.double)
+ @cython.locals(r3=cython.double)
+ @cython.locals(r4=cython.double)
+ @cython.locals(r5=cython.double)
+ @cython.locals(r6=cython.double)
+ @cython.locals(r7=cython.double)
+ @cython.locals(r8=cython.double)
+ @cython.locals(r9=cython.double)
+ @cython.locals(r10=cython.double)
+ @cython.locals(r11=cython.double)
+ @cython.locals(r12=cython.double)
+ @cython.locals(r13=cython.double)
+ @cython.locals(r14=cython.double)
+ @cython.locals(r15=cython.double)
+ @cython.locals(r16=cython.double)
+ @cython.locals(r17=cython.double)
+ @cython.locals(r18=cython.double)
+ @cython.locals(r19=cython.double)
+ @cython.locals(r20=cython.double)
+ @cython.locals(r21=cython.double)
+ @cython.locals(r22=cython.double)
+ @cython.locals(r23=cython.double)
+ @cython.locals(r24=cython.double)
+ @cython.locals(r25=cython.double)
+ @cython.locals(r26=cython.double)
+ @cython.locals(r27=cython.double)
+ @cython.locals(r28=cython.double)
+ @cython.locals(r29=cython.double)
+ @cython.locals(r30=cython.double)
+ @cython.locals(r31=cython.double)
+ @cython.locals(r32=cython.double)
+ @cython.locals(r33=cython.double)
+ @cython.locals(r34=cython.double)
+ @cython.locals(r35=cython.double)
+ @cython.locals(r36=cython.double)
+ @cython.locals(r37=cython.double)
+ @cython.locals(r38=cython.double)
+ @cython.locals(r39=cython.double)
+ @cython.locals(r40=cython.double)
+ @cython.locals(r41=cython.double)
+ @cython.locals(r42=cython.double)
+ @cython.locals(r43=cython.double)
+ @cython.locals(r44=cython.double)
+ @cython.locals(r45=cython.double)
+ @cython.locals(r46=cython.double)
+ @cython.locals(r47=cython.double)
+ @cython.locals(r48=cython.double)
+ @cython.locals(r49=cython.double)
+ @cython.locals(r50=cython.double)
+ @cython.locals(r51=cython.double)
+ @cython.locals(r52=cython.double)
+ @cython.locals(r53=cython.double)
+ @cython.locals(r54=cython.double)
+ @cython.locals(r55=cython.double)
+ @cython.locals(r56=cython.double)
+ @cython.locals(r57=cython.double)
+ @cython.locals(r58=cython.double)
+ @cython.locals(r59=cython.double)
+ @cython.locals(r60=cython.double)
+ @cython.locals(r61=cython.double)
+ @cython.locals(r62=cython.double)
+ @cython.locals(r63=cython.double)
+ @cython.locals(r64=cython.double)
+ @cython.locals(r65=cython.double)
+ @cython.locals(r66=cython.double)
+ @cython.locals(r67=cython.double)
+ @cython.locals(r68=cython.double)
+ @cython.locals(r69=cython.double)
+ @cython.locals(r70=cython.double)
+ @cython.locals(r71=cython.double)
+ @cython.locals(r72=cython.double)
+ @cython.locals(r73=cython.double)
+ @cython.locals(r74=cython.double)
+ @cython.locals(r75=cython.double)
+ @cython.locals(r76=cython.double)
+ @cython.locals(r77=cython.double)
+ @cython.locals(r78=cython.double)
+ @cython.locals(r79=cython.double)
+ @cython.locals(r80=cython.double)
+ @cython.locals(r81=cython.double)
+ @cython.locals(r82=cython.double)
+ @cython.locals(r83=cython.double)
+ @cython.locals(r84=cython.double)
+ @cython.locals(r85=cython.double)
+ @cython.locals(r86=cython.double)
+ @cython.locals(r87=cython.double)
+ @cython.locals(r88=cython.double)
+ @cython.locals(r89=cython.double)
+ @cython.locals(r90=cython.double)
+ @cython.locals(r91=cython.double)
+ @cython.locals(r92=cython.double)
+ @cython.locals(r93=cython.double)
+ @cython.locals(r94=cython.double)
+ @cython.locals(r95=cython.double)
+ @cython.locals(r96=cython.double)
+ @cython.locals(r97=cython.double)
+ @cython.locals(r98=cython.double)
+ @cython.locals(r99=cython.double)
+ @cython.locals(r100=cython.double)
+ @cython.locals(r101=cython.double)
+ @cython.locals(r102=cython.double)
+ @cython.locals(r103=cython.double)
+ @cython.locals(r104=cython.double)
+ @cython.locals(r105=cython.double)
+ @cython.locals(r106=cython.double)
+ @cython.locals(r107=cython.double)
+ @cython.locals(r108=cython.double)
+ @cython.locals(r109=cython.double)
+ @cython.locals(r110=cython.double)
+ @cython.locals(r111=cython.double)
+ @cython.locals(r112=cython.double)
+ @cython.locals(r113=cython.double)
+ @cython.locals(r114=cython.double)
+ @cython.locals(r115=cython.double)
+ @cython.locals(r116=cython.double)
+ @cython.locals(r117=cython.double)
+ @cython.locals(r118=cython.double)
+ @cython.locals(r119=cython.double)
+ @cython.locals(r120=cython.double)
+ @cython.locals(r121=cython.double)
+ @cython.locals(r122=cython.double)
+ @cython.locals(r123=cython.double)
+ @cython.locals(r124=cython.double)
+ @cython.locals(r125=cython.double)
+ @cython.locals(r126=cython.double)
+ @cython.locals(r127=cython.double)
+ @cython.locals(r128=cython.double)
+ @cython.locals(r129=cython.double)
+ @cython.locals(r130=cython.double)
+ @cython.locals(r131=cython.double)
+ @cython.locals(r132=cython.double)
+ @cython.locals(x0=cython.double, y0=cython.double)
+ @cython.locals(x1=cython.double, y1=cython.double)
+ @cython.locals(x2=cython.double, y2=cython.double)
+ @cython.locals(x3=cython.double, y3=cython.double)
+ def _curveToOne(self, p1, p2, p3):
+ x0, y0 = self._getCurrentPoint()
+ x1, y1 = p1
+ x2, y2 = p2
+ x3, y3 = p3
- self.area += -r1/6 - r3/6 + x0*(r0 + r5 + y2)/6 + x1*y2/3 - y0*(r4 + x2)/6
- self.momentX += -r11*(-r10 + y1)/30 + r12*(r13 + r8 + y2)/30 + r6*y2/15 - r7*r8/30 - r7*r9/30 + x0*(r14 - r15 - r16*y0 + r17)/30 - y0*(r11 + 2*r6 + r7)/30
- self.momentY += -r18/30 - r20*x2/30 - r23/30 - r24*(r16 + x2)/30 + x0*(r0*y2 + r20 + r21 + r25 + r26 + r8*y0)/30 + x1*y2*(r10 + y1)/15 - y0*(r1 + r17)/30
- self.momentXX += r12*(r1 - 5*r15 - r34*y0 + r36 + r9*x1)/420 + 2*r27*y2/105 - r28*r29/420 - r28*y2/4 - r31*(r0 - 3*y2)/420 - r6*x2*(r0 - r32)/105 + x0**3*(r30 + 21*y0 + y2)/84 - x0*(r0*r7 + r15*r37 - r2*r37 - r33*y2 + r38*y0 - r39 - r40 + r5*r7)/420 - y0*(8*r27 + 5*r28 + r31 + r33*x2)/420
- self.momentXY += r12*(r13*y2 + 3*r21 + 105*r24 + r41*y0 + r42 + r46*y1)/840 - r16*x2*(r43 - r44)/840 - r21*r7/8 - r24*(r38 + r45*x1 + 3*r7)/840 - r41*r7*y2/840 - r42*r7/840 + r6*y2*(r32 + r8)/210 + x0*(-r15*r8 + r16*r25 + r18 + r21*r47 - r24*r34 - r26*x2 + r35*r46 + r48)/420 - y0*(r16*r2 + r30*r7 + r35*r45 + r39 + r40)/420
- self.momentYY += -r2*r42/420 - r22*r29/420 - r24*(r14 + r36 + r52*x2)/420 - r49*x2/420 - r50*x2/12 - r51*(r47 + x2)/84 + x0*(r19*r46 + r21*r5 + r21*r52 + r24*r29 + r25*r53 + r26*y2 + r42*y0 + r49 + 5*r50 + 35*r51)/420 + x1*y2*(r43 + r44 + r9*y1)/210 - y0*(r19*r45 + r2*r53 - r21*r4 + r48)/420
+ r0 = 6 * y2
+ r1 = r0 * x3
+ r2 = 10 * y3
+ r3 = r2 * x3
+ r4 = 3 * y1
+ r5 = 6 * x1
+ r6 = 3 * x2
+ r7 = 6 * y1
+ r8 = 3 * y2
+ r9 = x2**2
+ r10 = 45 * r9
+ r11 = r10 * y3
+ r12 = x3**2
+ r13 = r12 * y2
+ r14 = r12 * y3
+ r15 = 7 * y3
+ r16 = 15 * x3
+ r17 = r16 * x2
+ r18 = x1**2
+ r19 = 9 * r18
+ r20 = x0**2
+ r21 = 21 * y1
+ r22 = 9 * r9
+ r23 = r7 * x3
+ r24 = 9 * y2
+ r25 = r24 * x2 + r3
+ r26 = 9 * x2
+ r27 = x2 * y3
+ r28 = -r26 * y1 + 15 * r27
+ r29 = 3 * x1
+ r30 = 45 * x1
+ r31 = 12 * x3
+ r32 = 45 * r18
+ r33 = 5 * r12
+ r34 = r8 * x3
+ r35 = 105 * y0
+ r36 = 30 * y0
+ r37 = r36 * x2
+ r38 = 5 * x3
+ r39 = 15 * y3
+ r40 = 5 * y3
+ r41 = r40 * x3
+ r42 = x2 * y2
+ r43 = 18 * r42
+ r44 = 45 * y1
+ r45 = r41 + r43 + r44 * x1
+ r46 = y2 * y3
+ r47 = r46 * x3
+ r48 = y2**2
+ r49 = 45 * r48
+ r50 = r49 * x3
+ r51 = y3**2
+ r52 = r51 * x3
+ r53 = y1**2
+ r54 = 9 * r53
+ r55 = y0**2
+ r56 = 21 * x1
+ r57 = 6 * x2
+ r58 = r16 * y2
+ r59 = r39 * y2
+ r60 = 9 * r48
+ r61 = r6 * y3
+ r62 = 3 * y3
+ r63 = r36 * y2
+ r64 = y1 * y3
+ r65 = 45 * r53
+ r66 = 5 * r51
+ r67 = x2**3
+ r68 = x3**3
+ r69 = 630 * y2
+ r70 = 126 * x3
+ r71 = x1**3
+ r72 = 126 * x2
+ r73 = 63 * r9
+ r74 = r73 * x3
+ r75 = r15 * x3 + 15 * r42
+ r76 = 630 * x1
+ r77 = 14 * x3
+ r78 = 21 * r27
+ r79 = 42 * x1
+ r80 = 42 * x2
+ r81 = x1 * y2
+ r82 = 63 * r42
+ r83 = x1 * y1
+ r84 = r41 + r82 + 378 * r83
+ r85 = x2 * x3
+ r86 = r85 * y1
+ r87 = r27 * x3
+ r88 = 27 * r9
+ r89 = r88 * y2
+ r90 = 42 * r14
+ r91 = 90 * x1
+ r92 = 189 * r18
+ r93 = 378 * r18
+ r94 = r12 * y1
+ r95 = 252 * x1 * x2
+ r96 = r79 * x3
+ r97 = 30 * r85
+ r98 = r83 * x3
+ r99 = 30 * x3
+ r100 = 42 * x3
+ r101 = r42 * x1
+ r102 = r10 * y2 + 14 * r14 + 126 * r18 * y1 + r81 * r99
+ r103 = 378 * r48
+ r104 = 18 * y1
+ r105 = r104 * y2
+ r106 = y0 * y1
+ r107 = 252 * y2
+ r108 = r107 * y0
+ r109 = y0 * y3
+ r110 = 42 * r64
+ r111 = 378 * r53
+ r112 = 63 * r48
+ r113 = 27 * x2
+ r114 = r27 * y2
+ r115 = r113 * r48 + 42 * r52
+ r116 = x3 * y3
+ r117 = 54 * r42
+ r118 = r51 * x1
+ r119 = r51 * x2
+ r120 = r48 * x1
+ r121 = 21 * x3
+ r122 = r64 * x1
+ r123 = r81 * y3
+ r124 = 30 * r27 * y1 + r49 * x2 + 14 * r52 + 126 * r53 * x1
+ r125 = y2**3
+ r126 = y3**3
+ r127 = y1**3
+ r128 = y0**3
+ r129 = r51 * y2
+ r130 = r112 * y3 + r21 * r51
+ r131 = 189 * r53
+ r132 = 90 * y2
- @cython.locals(r0=cython.double)
- @cython.locals(r1=cython.double)
- @cython.locals(r2=cython.double)
- @cython.locals(r3=cython.double)
- @cython.locals(r4=cython.double)
- @cython.locals(r5=cython.double)
- @cython.locals(r6=cython.double)
- @cython.locals(r7=cython.double)
- @cython.locals(r8=cython.double)
- @cython.locals(r9=cython.double)
- @cython.locals(r10=cython.double)
- @cython.locals(r11=cython.double)
- @cython.locals(r12=cython.double)
- @cython.locals(r13=cython.double)
- @cython.locals(r14=cython.double)
- @cython.locals(r15=cython.double)
- @cython.locals(r16=cython.double)
- @cython.locals(r17=cython.double)
- @cython.locals(r18=cython.double)
- @cython.locals(r19=cython.double)
- @cython.locals(r20=cython.double)
- @cython.locals(r21=cython.double)
- @cython.locals(r22=cython.double)
- @cython.locals(r23=cython.double)
- @cython.locals(r24=cython.double)
- @cython.locals(r25=cython.double)
- @cython.locals(r26=cython.double)
- @cython.locals(r27=cython.double)
- @cython.locals(r28=cython.double)
- @cython.locals(r29=cython.double)
- @cython.locals(r30=cython.double)
- @cython.locals(r31=cython.double)
- @cython.locals(r32=cython.double)
- @cython.locals(r33=cython.double)
- @cython.locals(r34=cython.double)
- @cython.locals(r35=cython.double)
- @cython.locals(r36=cython.double)
- @cython.locals(r37=cython.double)
- @cython.locals(r38=cython.double)
- @cython.locals(r39=cython.double)
- @cython.locals(r40=cython.double)
- @cython.locals(r41=cython.double)
- @cython.locals(r42=cython.double)
- @cython.locals(r43=cython.double)
- @cython.locals(r44=cython.double)
- @cython.locals(r45=cython.double)
- @cython.locals(r46=cython.double)
- @cython.locals(r47=cython.double)
- @cython.locals(r48=cython.double)
- @cython.locals(r49=cython.double)
- @cython.locals(r50=cython.double)
- @cython.locals(r51=cython.double)
- @cython.locals(r52=cython.double)
- @cython.locals(r53=cython.double)
- @cython.locals(r54=cython.double)
- @cython.locals(r55=cython.double)
- @cython.locals(r56=cython.double)
- @cython.locals(r57=cython.double)
- @cython.locals(r58=cython.double)
- @cython.locals(r59=cython.double)
- @cython.locals(r60=cython.double)
- @cython.locals(r61=cython.double)
- @cython.locals(r62=cython.double)
- @cython.locals(r63=cython.double)
- @cython.locals(r64=cython.double)
- @cython.locals(r65=cython.double)
- @cython.locals(r66=cython.double)
- @cython.locals(r67=cython.double)
- @cython.locals(r68=cython.double)
- @cython.locals(r69=cython.double)
- @cython.locals(r70=cython.double)
- @cython.locals(r71=cython.double)
- @cython.locals(r72=cython.double)
- @cython.locals(r73=cython.double)
- @cython.locals(r74=cython.double)
- @cython.locals(r75=cython.double)
- @cython.locals(r76=cython.double)
- @cython.locals(r77=cython.double)
- @cython.locals(r78=cython.double)
- @cython.locals(r79=cython.double)
- @cython.locals(r80=cython.double)
- @cython.locals(r81=cython.double)
- @cython.locals(r82=cython.double)
- @cython.locals(r83=cython.double)
- @cython.locals(r84=cython.double)
- @cython.locals(r85=cython.double)
- @cython.locals(r86=cython.double)
- @cython.locals(r87=cython.double)
- @cython.locals(r88=cython.double)
- @cython.locals(r89=cython.double)
- @cython.locals(r90=cython.double)
- @cython.locals(r91=cython.double)
- @cython.locals(r92=cython.double)
- @cython.locals(r93=cython.double)
- @cython.locals(r94=cython.double)
- @cython.locals(r95=cython.double)
- @cython.locals(r96=cython.double)
- @cython.locals(r97=cython.double)
- @cython.locals(r98=cython.double)
- @cython.locals(r99=cython.double)
- @cython.locals(r100=cython.double)
- @cython.locals(r101=cython.double)
- @cython.locals(r102=cython.double)
- @cython.locals(r103=cython.double)
- @cython.locals(r104=cython.double)
- @cython.locals(r105=cython.double)
- @cython.locals(r106=cython.double)
- @cython.locals(r107=cython.double)
- @cython.locals(r108=cython.double)
- @cython.locals(r109=cython.double)
- @cython.locals(r110=cython.double)
- @cython.locals(r111=cython.double)
- @cython.locals(r112=cython.double)
- @cython.locals(r113=cython.double)
- @cython.locals(r114=cython.double)
- @cython.locals(r115=cython.double)
- @cython.locals(r116=cython.double)
- @cython.locals(r117=cython.double)
- @cython.locals(r118=cython.double)
- @cython.locals(r119=cython.double)
- @cython.locals(r120=cython.double)
- @cython.locals(r121=cython.double)
- @cython.locals(r122=cython.double)
- @cython.locals(r123=cython.double)
- @cython.locals(r124=cython.double)
- @cython.locals(r125=cython.double)
- @cython.locals(r126=cython.double)
- @cython.locals(r127=cython.double)
- @cython.locals(r128=cython.double)
- @cython.locals(r129=cython.double)
- @cython.locals(r130=cython.double)
- @cython.locals(r131=cython.double)
- @cython.locals(r132=cython.double)
- @cython.locals(x0=cython.double, y0=cython.double)
- @cython.locals(x1=cython.double, y1=cython.double)
- @cython.locals(x2=cython.double, y2=cython.double)
- @cython.locals(x3=cython.double, y3=cython.double)
- def _curveToOne(self, p1, p2, p3):
- x0,y0 = self._getCurrentPoint()
- x1,y1 = p1
- x2,y2 = p2
- x3,y3 = p3
+ self.area += (
+ -r1 / 20
+ - r3 / 20
+ - r4 * (x2 + x3) / 20
+ + x0 * (r7 + r8 + 10 * y0 + y3) / 20
+ + 3 * x1 * (y2 + y3) / 20
+ + 3 * x2 * y3 / 10
+ - y0 * (r5 + r6 + x3) / 20
+ )
+ self.momentX += (
+ r11 / 840
+ - r13 / 8
+ - r14 / 3
+ - r17 * (-r15 + r8) / 840
+ + r19 * (r8 + 2 * y3) / 840
+ + r20 * (r0 + r21 + 56 * y0 + y3) / 168
+ + r29 * (-r23 + r25 + r28) / 840
+ - r4 * (10 * r12 + r17 + r22) / 840
+ + x0
+ * (
+ 12 * r27
+ + r30 * y2
+ + r34
+ - r35 * x1
+ - r37
+ - r38 * y0
+ + r39 * x1
+ - r4 * x3
+ + r45
+ )
+ / 840
+ - y0 * (r17 + r30 * x2 + r31 * x1 + r32 + r33 + 18 * r9) / 840
+ )
+ self.momentY += (
+ -r4 * (r25 + r58) / 840
+ - r47 / 8
+ - r50 / 840
+ - r52 / 6
+ - r54 * (r6 + 2 * x3) / 840
+ - r55 * (r56 + r57 + x3) / 168
+ + x0
+ * (
+ r35 * y1
+ + r40 * y0
+ + r44 * y2
+ + 18 * r48
+ + 140 * r55
+ + r59
+ + r63
+ + 12 * r64
+ + r65
+ + r66
+ )
+ / 840
+ + x1 * (r24 * y1 + 10 * r51 + r59 + r60 + r7 * y3) / 280
+ + x2 * y3 * (r15 + r8) / 56
+ - y0 * (r16 * y1 + r31 * y2 + r44 * x2 + r45 + r61 - r62 * x1) / 840
+ )
+ self.momentXX += (
+ -r12 * r72 * (-r40 + r8) / 9240
+ + 3 * r18 * (r28 + r34 - r38 * y1 + r75) / 3080
+ + r20
+ * (
+ r24 * x3
+ - r72 * y0
+ - r76 * y0
+ - r77 * y0
+ + r78
+ + r79 * y3
+ + r80 * y1
+ + 210 * r81
+ + r84
+ )
+ / 9240
+ - r29
+ * (
+ r12 * r21
+ + 14 * r13
+ + r44 * r9
+ - r73 * y3
+ + 54 * r86
+ - 84 * r87
+ - r89
+ - r90
+ )
+ / 9240
+ - r4 * (70 * r12 * x2 + 27 * r67 + 42 * r68 + r74) / 9240
+ + 3 * r67 * y3 / 220
+ - r68 * r69 / 9240
+ - r68 * y3 / 4
+ - r70 * r9 * (-r62 + y2) / 9240
+ + 3 * r71 * (r24 + r40) / 3080
+ + x0**3 * (r24 + r44 + 165 * y0 + y3) / 660
+ + x0
+ * (
+ r100 * r27
+ + 162 * r101
+ + r102
+ + r11
+ + 63 * r18 * y3
+ + r27 * r91
+ - r33 * y0
+ - r37 * x3
+ + r43 * x3
+ - r73 * y0
+ - r88 * y1
+ + r92 * y2
+ - r93 * y0
+ - 9 * r94
+ - r95 * y0
+ - r96 * y0
+ - r97 * y1
+ - 18 * r98
+ + r99 * x1 * y3
+ )
+ / 9240
+ - y0
+ * (
+ r12 * r56
+ + r12 * r80
+ + r32 * x3
+ + 45 * r67
+ + 14 * r68
+ + 126 * r71
+ + r74
+ + r85 * r91
+ + 135 * r9 * x1
+ + r92 * x2
+ )
+ / 9240
+ )
+ self.momentXY += (
+ -r103 * r12 / 18480
+ - r12 * r51 / 8
+ - 3 * r14 * y2 / 44
+ + 3 * r18 * (r105 + r2 * y1 + 18 * r46 + 15 * r48 + 7 * r51) / 6160
+ + r20
+ * (
+ 1260 * r106
+ + r107 * y1
+ + r108
+ + 28 * r109
+ + r110
+ + r111
+ + r112
+ + 30 * r46
+ + 2310 * r55
+ + r66
+ )
+ / 18480
+ - r54 * (7 * r12 + 18 * r85 + 15 * r9) / 18480
+ - r55 * (r33 + r73 + r93 + r95 + r96 + r97) / 18480
+ - r7 * (42 * r13 + r82 * x3 + 28 * r87 + r89 + r90) / 18480
+ - 3 * r85 * (r48 - r66) / 220
+ + 3 * r9 * y3 * (r62 + 2 * y2) / 440
+ + x0
+ * (
+ -r1 * y0
+ - 84 * r106 * x2
+ + r109 * r56
+ + 54 * r114
+ + r117 * y1
+ + 15 * r118
+ + 21 * r119
+ + 81 * r120
+ + r121 * r46
+ + 54 * r122
+ + 60 * r123
+ + r124
+ - r21 * x3 * y0
+ + r23 * y3
+ - r54 * x3
+ - r55 * r72
+ - r55 * r76
+ - r55 * r77
+ + r57 * y0 * y3
+ + r60 * x3
+ + 84 * r81 * y0
+ + 189 * r81 * y1
+ )
+ / 9240
+ + x1
+ * (
+ r104 * r27
+ - r105 * x3
+ - r113 * r53
+ + 63 * r114
+ + r115
+ - r16 * r53
+ + 28 * r47
+ + r51 * r80
+ )
+ / 3080
+ - y0
+ * (
+ 54 * r101
+ + r102
+ + r116 * r5
+ + r117 * x3
+ + 21 * r13
+ - r19 * y3
+ + r22 * y3
+ + r78 * x3
+ + 189 * r83 * x2
+ + 60 * r86
+ + 81 * r9 * y1
+ + 15 * r94
+ + 54 * r98
+ )
+ / 9240
+ )
+ self.momentYY += (
+ -r103 * r116 / 9240
+ - r125 * r70 / 9240
+ - r126 * x3 / 12
+ - 3 * r127 * (r26 + r38) / 3080
+ - r128 * (r26 + r30 + x3) / 660
+ - r4 * (r112 * x3 + r115 - 14 * r119 + 84 * r47) / 9240
+ - r52 * r69 / 9240
+ - r54 * (r58 + r61 + r75) / 9240
+ - r55
+ * (r100 * y1 + r121 * y2 + r26 * y3 + r79 * y2 + r84 + 210 * x2 * y1)
+ / 9240
+ + x0
+ * (
+ r108 * y1
+ + r110 * y0
+ + r111 * y0
+ + r112 * y0
+ + 45 * r125
+ + 14 * r126
+ + 126 * r127
+ + 770 * r128
+ + 42 * r129
+ + r130
+ + r131 * y2
+ + r132 * r64
+ + 135 * r48 * y1
+ + 630 * r55 * y1
+ + 126 * r55 * y2
+ + 14 * r55 * y3
+ + r63 * y3
+ + r65 * y3
+ + r66 * y0
+ )
+ / 9240
+ + x1
+ * (
+ 27 * r125
+ + 42 * r126
+ + 70 * r129
+ + r130
+ + r39 * r53
+ + r44 * r48
+ + 27 * r53 * y2
+ + 54 * r64 * y2
+ )
+ / 3080
+ + 3 * x2 * y3 * (r48 + r66 + r8 * y3) / 220
+ - y0
+ * (
+ r100 * r46
+ + 18 * r114
+ - 9 * r118
+ - 27 * r120
+ - 18 * r122
+ - 30 * r123
+ + r124
+ + r131 * x2
+ + r132 * x3 * y1
+ + 162 * r42 * y1
+ + r50
+ + 63 * r53 * x3
+ + r64 * r99
+ )
+ / 9240
+ )
- r0 = 6*y2
- r1 = r0*x3
- r2 = 10*y3
- r3 = r2*x3
- r4 = 3*y1
- r5 = 6*x1
- r6 = 3*x2
- r7 = 6*y1
- r8 = 3*y2
- r9 = x2**2
- r10 = 45*r9
- r11 = r10*y3
- r12 = x3**2
- r13 = r12*y2
- r14 = r12*y3
- r15 = 7*y3
- r16 = 15*x3
- r17 = r16*x2
- r18 = x1**2
- r19 = 9*r18
- r20 = x0**2
- r21 = 21*y1
- r22 = 9*r9
- r23 = r7*x3
- r24 = 9*y2
- r25 = r24*x2 + r3
- r26 = 9*x2
- r27 = x2*y3
- r28 = -r26*y1 + 15*r27
- r29 = 3*x1
- r30 = 45*x1
- r31 = 12*x3
- r32 = 45*r18
- r33 = 5*r12
- r34 = r8*x3
- r35 = 105*y0
- r36 = 30*y0
- r37 = r36*x2
- r38 = 5*x3
- r39 = 15*y3
- r40 = 5*y3
- r41 = r40*x3
- r42 = x2*y2
- r43 = 18*r42
- r44 = 45*y1
- r45 = r41 + r43 + r44*x1
- r46 = y2*y3
- r47 = r46*x3
- r48 = y2**2
- r49 = 45*r48
- r50 = r49*x3
- r51 = y3**2
- r52 = r51*x3
- r53 = y1**2
- r54 = 9*r53
- r55 = y0**2
- r56 = 21*x1
- r57 = 6*x2
- r58 = r16*y2
- r59 = r39*y2
- r60 = 9*r48
- r61 = r6*y3
- r62 = 3*y3
- r63 = r36*y2
- r64 = y1*y3
- r65 = 45*r53
- r66 = 5*r51
- r67 = x2**3
- r68 = x3**3
- r69 = 630*y2
- r70 = 126*x3
- r71 = x1**3
- r72 = 126*x2
- r73 = 63*r9
- r74 = r73*x3
- r75 = r15*x3 + 15*r42
- r76 = 630*x1
- r77 = 14*x3
- r78 = 21*r27
- r79 = 42*x1
- r80 = 42*x2
- r81 = x1*y2
- r82 = 63*r42
- r83 = x1*y1
- r84 = r41 + r82 + 378*r83
- r85 = x2*x3
- r86 = r85*y1
- r87 = r27*x3
- r88 = 27*r9
- r89 = r88*y2
- r90 = 42*r14
- r91 = 90*x1
- r92 = 189*r18
- r93 = 378*r18
- r94 = r12*y1
- r95 = 252*x1*x2
- r96 = r79*x3
- r97 = 30*r85
- r98 = r83*x3
- r99 = 30*x3
- r100 = 42*x3
- r101 = r42*x1
- r102 = r10*y2 + 14*r14 + 126*r18*y1 + r81*r99
- r103 = 378*r48
- r104 = 18*y1
- r105 = r104*y2
- r106 = y0*y1
- r107 = 252*y2
- r108 = r107*y0
- r109 = y0*y3
- r110 = 42*r64
- r111 = 378*r53
- r112 = 63*r48
- r113 = 27*x2
- r114 = r27*y2
- r115 = r113*r48 + 42*r52
- r116 = x3*y3
- r117 = 54*r42
- r118 = r51*x1
- r119 = r51*x2
- r120 = r48*x1
- r121 = 21*x3
- r122 = r64*x1
- r123 = r81*y3
- r124 = 30*r27*y1 + r49*x2 + 14*r52 + 126*r53*x1
- r125 = y2**3
- r126 = y3**3
- r127 = y1**3
- r128 = y0**3
- r129 = r51*y2
- r130 = r112*y3 + r21*r51
- r131 = 189*r53
- r132 = 90*y2
- self.area += -r1/20 - r3/20 - r4*(x2 + x3)/20 + x0*(r7 + r8 + 10*y0 + y3)/20 + 3*x1*(y2 + y3)/20 + 3*x2*y3/10 - y0*(r5 + r6 + x3)/20
- self.momentX += r11/840 - r13/8 - r14/3 - r17*(-r15 + r8)/840 + r19*(r8 + 2*y3)/840 + r20*(r0 + r21 + 56*y0 + y3)/168 + r29*(-r23 + r25 + r28)/840 - r4*(10*r12 + r17 + r22)/840 + x0*(12*r27 + r30*y2 + r34 - r35*x1 - r37 - r38*y0 + r39*x1 - r4*x3 + r45)/840 - y0*(r17 + r30*x2 + r31*x1 + r32 + r33 + 18*r9)/840
- self.momentY += -r4*(r25 + r58)/840 - r47/8 - r50/840 - r52/6 - r54*(r6 + 2*x3)/840 - r55*(r56 + r57 + x3)/168 + x0*(r35*y1 + r40*y0 + r44*y2 + 18*r48 + 140*r55 + r59 + r63 + 12*r64 + r65 + r66)/840 + x1*(r24*y1 + 10*r51 + r59 + r60 + r7*y3)/280 + x2*y3*(r15 + r8)/56 - y0*(r16*y1 + r31*y2 + r44*x2 + r45 + r61 - r62*x1)/840
- self.momentXX += -r12*r72*(-r40 + r8)/9240 + 3*r18*(r28 + r34 - r38*y1 + r75)/3080 + r20*(r24*x3 - r72*y0 - r76*y0 - r77*y0 + r78 + r79*y3 + r80*y1 + 210*r81 + r84)/9240 - r29*(r12*r21 + 14*r13 + r44*r9 - r73*y3 + 54*r86 - 84*r87 - r89 - r90)/9240 - r4*(70*r12*x2 + 27*r67 + 42*r68 + r74)/9240 + 3*r67*y3/220 - r68*r69/9240 - r68*y3/4 - r70*r9*(-r62 + y2)/9240 + 3*r71*(r24 + r40)/3080 + x0**3*(r24 + r44 + 165*y0 + y3)/660 + x0*(r100*r27 + 162*r101 + r102 + r11 + 63*r18*y3 + r27*r91 - r33*y0 - r37*x3 + r43*x3 - r73*y0 - r88*y1 + r92*y2 - r93*y0 - 9*r94 - r95*y0 - r96*y0 - r97*y1 - 18*r98 + r99*x1*y3)/9240 - y0*(r12*r56 + r12*r80 + r32*x3 + 45*r67 + 14*r68 + 126*r71 + r74 + r85*r91 + 135*r9*x1 + r92*x2)/9240
- self.momentXY += -r103*r12/18480 - r12*r51/8 - 3*r14*y2/44 + 3*r18*(r105 + r2*y1 + 18*r46 + 15*r48 + 7*r51)/6160 + r20*(1260*r106 + r107*y1 + r108 + 28*r109 + r110 + r111 + r112 + 30*r46 + 2310*r55 + r66)/18480 - r54*(7*r12 + 18*r85 + 15*r9)/18480 - r55*(r33 + r73 + r93 + r95 + r96 + r97)/18480 - r7*(42*r13 + r82*x3 + 28*r87 + r89 + r90)/18480 - 3*r85*(r48 - r66)/220 + 3*r9*y3*(r62 + 2*y2)/440 + x0*(-r1*y0 - 84*r106*x2 + r109*r56 + 54*r114 + r117*y1 + 15*r118 + 21*r119 + 81*r120 + r121*r46 + 54*r122 + 60*r123 + r124 - r21*x3*y0 + r23*y3 - r54*x3 - r55*r72 - r55*r76 - r55*r77 + r57*y0*y3 + r60*x3 + 84*r81*y0 + 189*r81*y1)/9240 + x1*(r104*r27 - r105*x3 - r113*r53 + 63*r114 + r115 - r16*r53 + 28*r47 + r51*r80)/3080 - y0*(54*r101 + r102 + r116*r5 + r117*x3 + 21*r13 - r19*y3 + r22*y3 + r78*x3 + 189*r83*x2 + 60*r86 + 81*r9*y1 + 15*r94 + 54*r98)/9240
- self.momentYY += -r103*r116/9240 - r125*r70/9240 - r126*x3/12 - 3*r127*(r26 + r38)/3080 - r128*(r26 + r30 + x3)/660 - r4*(r112*x3 + r115 - 14*r119 + 84*r47)/9240 - r52*r69/9240 - r54*(r58 + r61 + r75)/9240 - r55*(r100*y1 + r121*y2 + r26*y3 + r79*y2 + r84 + 210*x2*y1)/9240 + x0*(r108*y1 + r110*y0 + r111*y0 + r112*y0 + 45*r125 + 14*r126 + 126*r127 + 770*r128 + 42*r129 + r130 + r131*y2 + r132*r64 + 135*r48*y1 + 630*r55*y1 + 126*r55*y2 + 14*r55*y3 + r63*y3 + r65*y3 + r66*y0)/9240 + x1*(27*r125 + 42*r126 + 70*r129 + r130 + r39*r53 + r44*r48 + 27*r53*y2 + 54*r64*y2)/3080 + 3*x2*y3*(r48 + r66 + r8*y3)/220 - y0*(r100*r46 + 18*r114 - 9*r118 - 27*r120 - 18*r122 - 30*r123 + r124 + r131*x2 + r132*x3*y1 + 162*r42*y1 + r50 + 63*r53*x3 + r64*r99)/9240
+if __name__ == "__main__":
+ from fontTools.misc.symfont import x, y, printGreenPen
-if __name__ == '__main__':
- from fontTools.misc.symfont import x, y, printGreenPen
- printGreenPen('MomentsPen', [
- ('area', 1),
- ('momentX', x),
- ('momentY', y),
- ('momentXX', x**2),
- ('momentXY', x*y),
- ('momentYY', y**2),
- ])
+ printGreenPen(
+ "MomentsPen",
+ [
+ ("area", 1),
+ ("momentX", x),
+ ("momentY", y),
+ ("momentXX", x**2),
+ ("momentXY", x * y),
+ ("momentYY", y**2),
+ ],
+ )
diff --git a/Lib/fontTools/pens/perimeterPen.py b/Lib/fontTools/pens/perimeterPen.py
index 9a09cb8f..efb2b2d1 100644
--- a/Lib/fontTools/pens/perimeterPen.py
+++ b/Lib/fontTools/pens/perimeterPen.py
@@ -2,7 +2,12 @@
"""Calculate the perimeter of a glyph."""
from fontTools.pens.basePen import BasePen
-from fontTools.misc.bezierTools import approximateQuadraticArcLengthC, calcQuadraticArcLengthC, approximateCubicArcLengthC, calcCubicArcLengthC
+from fontTools.misc.bezierTools import (
+ approximateQuadraticArcLengthC,
+ calcQuadraticArcLengthC,
+ approximateCubicArcLengthC,
+ calcCubicArcLengthC,
+)
import math
@@ -10,49 +15,55 @@ __all__ = ["PerimeterPen"]
def _distance(p0, p1):
- return math.hypot(p0[0] - p1[0], p0[1] - p1[1])
+ return math.hypot(p0[0] - p1[0], p0[1] - p1[1])
-class PerimeterPen(BasePen):
-
- def __init__(self, glyphset=None, tolerance=0.005):
- BasePen.__init__(self, glyphset)
- self.value = 0
- self.tolerance = tolerance
-
- # Choose which algorithm to use for quadratic and for cubic.
- # Quadrature is faster but has fixed error characteristic with no strong
- # error bound. The cutoff points are derived empirically.
- self._addCubic = self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive
- self._addQuadratic = self._addQuadraticQuadrature if tolerance >= 0.00075 else self._addQuadraticExact
-
- def _moveTo(self, p0):
- self.__startPoint = p0
-
- def _closePath(self):
- p0 = self._getCurrentPoint()
- if p0 != self.__startPoint:
- self._lineTo(self.__startPoint)
-
- def _lineTo(self, p1):
- p0 = self._getCurrentPoint()
- self.value += _distance(p0, p1)
- def _addQuadraticExact(self, c0, c1, c2):
- self.value += calcQuadraticArcLengthC(c0, c1, c2)
-
- def _addQuadraticQuadrature(self, c0, c1, c2):
- self.value += approximateQuadraticArcLengthC(c0, c1, c2)
-
- def _qCurveToOne(self, p1, p2):
- p0 = self._getCurrentPoint()
- self._addQuadratic(complex(*p0), complex(*p1), complex(*p2))
-
- def _addCubicRecursive(self, c0, c1, c2, c3):
- self.value += calcCubicArcLengthC(c0, c1, c2, c3, self.tolerance)
-
- def _addCubicQuadrature(self, c0, c1, c2, c3):
- self.value += approximateCubicArcLengthC(c0, c1, c2, c3)
-
- def _curveToOne(self, p1, p2, p3):
- p0 = self._getCurrentPoint()
- self._addCubic(complex(*p0), complex(*p1), complex(*p2), complex(*p3))
+class PerimeterPen(BasePen):
+ def __init__(self, glyphset=None, tolerance=0.005):
+ BasePen.__init__(self, glyphset)
+ self.value = 0
+ self.tolerance = tolerance
+
+ # Choose which algorithm to use for quadratic and for cubic.
+ # Quadrature is faster but has fixed error characteristic with no strong
+ # error bound. The cutoff points are derived empirically.
+ self._addCubic = (
+ self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive
+ )
+ self._addQuadratic = (
+ self._addQuadraticQuadrature
+ if tolerance >= 0.00075
+ else self._addQuadraticExact
+ )
+
+ def _moveTo(self, p0):
+ self.__startPoint = p0
+
+ def _closePath(self):
+ p0 = self._getCurrentPoint()
+ if p0 != self.__startPoint:
+ self._lineTo(self.__startPoint)
+
+ def _lineTo(self, p1):
+ p0 = self._getCurrentPoint()
+ self.value += _distance(p0, p1)
+
+ def _addQuadraticExact(self, c0, c1, c2):
+ self.value += calcQuadraticArcLengthC(c0, c1, c2)
+
+ def _addQuadraticQuadrature(self, c0, c1, c2):
+ self.value += approximateQuadraticArcLengthC(c0, c1, c2)
+
+ def _qCurveToOne(self, p1, p2):
+ p0 = self._getCurrentPoint()
+ self._addQuadratic(complex(*p0), complex(*p1), complex(*p2))
+
+ def _addCubicRecursive(self, c0, c1, c2, c3):
+ self.value += calcCubicArcLengthC(c0, c1, c2, c3, self.tolerance)
+
+ def _addCubicQuadrature(self, c0, c1, c2, c3):
+ self.value += approximateCubicArcLengthC(c0, c1, c2, c3)
+
+ def _curveToOne(self, p1, p2, p3):
+ p0 = self._getCurrentPoint()
+ self._addCubic(complex(*p0), complex(*p1), complex(*p2), complex(*p3))
diff --git a/Lib/fontTools/pens/pointInsidePen.py b/Lib/fontTools/pens/pointInsidePen.py
index 34597f40..8a579ae4 100644
--- a/Lib/fontTools/pens/pointInsidePen.py
+++ b/Lib/fontTools/pens/pointInsidePen.py
@@ -11,180 +11,182 @@ __all__ = ["PointInsidePen"]
class PointInsidePen(BasePen):
- """This pen implements "point inside" testing: to test whether
- a given point lies inside the shape (black) or outside (white).
- Instances of this class can be recycled, as long as the
- setTestPoint() method is used to set the new point to test.
-
- Typical usage:
-
- pen = PointInsidePen(glyphSet, (100, 200))
- outline.draw(pen)
- isInside = pen.getResult()
-
- Both the even-odd algorithm and the non-zero-winding-rule
- algorithm are implemented. The latter is the default, specify
- True for the evenOdd argument of __init__ or setTestPoint
- to use the even-odd algorithm.
- """
-
- # This class implements the classical "shoot a ray from the test point
- # to infinity and count how many times it intersects the outline" (as well
- # as the non-zero variant, where the counter is incremented if the outline
- # intersects the ray in one direction and decremented if it intersects in
- # the other direction).
- # I found an amazingly clear explanation of the subtleties involved in
- # implementing this correctly for polygons here:
- # http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html
- # I extended the principles outlined on that page to curves.
-
- def __init__(self, glyphSet, testPoint, evenOdd=False):
- BasePen.__init__(self, glyphSet)
- self.setTestPoint(testPoint, evenOdd)
-
- def setTestPoint(self, testPoint, evenOdd=False):
- """Set the point to test. Call this _before_ the outline gets drawn."""
- self.testPoint = testPoint
- self.evenOdd = evenOdd
- self.firstPoint = None
- self.intersectionCount = 0
-
- def getWinding(self):
- if self.firstPoint is not None:
- # always make sure the sub paths are closed; the algorithm only works
- # for closed paths.
- self.closePath()
- return self.intersectionCount
-
- def getResult(self):
- """After the shape has been drawn, getResult() returns True if the test
- point lies within the (black) shape, and False if it doesn't.
- """
- winding = self.getWinding()
- if self.evenOdd:
- result = winding % 2
- else: # non-zero
- result = self.intersectionCount != 0
- return not not result
-
- def _addIntersection(self, goingUp):
- if self.evenOdd or goingUp:
- self.intersectionCount += 1
- else:
- self.intersectionCount -= 1
-
- def _moveTo(self, point):
- if self.firstPoint is not None:
- # always make sure the sub paths are closed; the algorithm only works
- # for closed paths.
- self.closePath()
- self.firstPoint = point
-
- def _lineTo(self, point):
- x, y = self.testPoint
- x1, y1 = self._getCurrentPoint()
- x2, y2 = point
-
- if x1 < x and x2 < x:
- return
- if y1 < y and y2 < y:
- return
- if y1 >= y and y2 >= y:
- return
-
- dx = x2 - x1
- dy = y2 - y1
- t = (y - y1) / dy
- ix = dx * t + x1
- if ix < x:
- return
- self._addIntersection(y2 > y1)
-
- def _curveToOne(self, bcp1, bcp2, point):
- x, y = self.testPoint
- x1, y1 = self._getCurrentPoint()
- x2, y2 = bcp1
- x3, y3 = bcp2
- x4, y4 = point
-
- if x1 < x and x2 < x and x3 < x and x4 < x:
- return
- if y1 < y and y2 < y and y3 < y and y4 < y:
- return
- if y1 >= y and y2 >= y and y3 >= y and y4 >= y:
- return
-
- dy = y1
- cy = (y2 - dy) * 3.0
- by = (y3 - y2) * 3.0 - cy
- ay = y4 - dy - cy - by
- solutions = sorted(solveCubic(ay, by, cy, dy - y))
- solutions = [t for t in solutions if -0. <= t <= 1.]
- if not solutions:
- return
-
- dx = x1
- cx = (x2 - dx) * 3.0
- bx = (x3 - x2) * 3.0 - cx
- ax = x4 - dx - cx - bx
-
- above = y1 >= y
- lastT = None
- for t in solutions:
- if t == lastT:
- continue
- lastT = t
- t2 = t * t
- t3 = t2 * t
-
- direction = 3*ay*t2 + 2*by*t + cy
- incomingGoingUp = outgoingGoingUp = direction > 0.0
- if direction == 0.0:
- direction = 6*ay*t + 2*by
- outgoingGoingUp = direction > 0.0
- incomingGoingUp = not outgoingGoingUp
- if direction == 0.0:
- direction = ay
- incomingGoingUp = outgoingGoingUp = direction > 0.0
-
- xt = ax*t3 + bx*t2 + cx*t + dx
- if xt < x:
- continue
-
- if t in (0.0, -0.0):
- if not outgoingGoingUp:
- self._addIntersection(outgoingGoingUp)
- elif t == 1.0:
- if incomingGoingUp:
- self._addIntersection(incomingGoingUp)
- else:
- if incomingGoingUp == outgoingGoingUp:
- self._addIntersection(outgoingGoingUp)
- #else:
- # we're not really intersecting, merely touching
-
- def _qCurveToOne_unfinished(self, bcp, point):
- # XXX need to finish this, for now doing it through a cubic
- # (BasePen implements _qCurveTo in terms of a cubic) will
- # have to do.
- x, y = self.testPoint
- x1, y1 = self._getCurrentPoint()
- x2, y2 = bcp
- x3, y3 = point
- c = y1
- b = (y2 - c) * 2.0
- a = y3 - c - b
- solutions = sorted(solveQuadratic(a, b, c - y))
- solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON]
- if not solutions:
- return
- # XXX
-
- def _closePath(self):
- if self._getCurrentPoint() != self.firstPoint:
- self.lineTo(self.firstPoint)
- self.firstPoint = None
-
- def _endPath(self):
- """Insideness is not defined for open contours."""
- raise NotImplementedError
+ """This pen implements "point inside" testing: to test whether
+ a given point lies inside the shape (black) or outside (white).
+ Instances of this class can be recycled, as long as the
+ setTestPoint() method is used to set the new point to test.
+
+ Typical usage:
+
+ pen = PointInsidePen(glyphSet, (100, 200))
+ outline.draw(pen)
+ isInside = pen.getResult()
+
+ Both the even-odd algorithm and the non-zero-winding-rule
+ algorithm are implemented. The latter is the default, specify
+ True for the evenOdd argument of __init__ or setTestPoint
+ to use the even-odd algorithm.
+ """
+
+ # This class implements the classical "shoot a ray from the test point
+ # to infinity and count how many times it intersects the outline" (as well
+ # as the non-zero variant, where the counter is incremented if the outline
+ # intersects the ray in one direction and decremented if it intersects in
+ # the other direction).
+ # I found an amazingly clear explanation of the subtleties involved in
+ # implementing this correctly for polygons here:
+ # http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html
+ # I extended the principles outlined on that page to curves.
+
+ def __init__(self, glyphSet, testPoint, evenOdd=False):
+ BasePen.__init__(self, glyphSet)
+ self.setTestPoint(testPoint, evenOdd)
+
+ def setTestPoint(self, testPoint, evenOdd=False):
+ """Set the point to test. Call this _before_ the outline gets drawn."""
+ self.testPoint = testPoint
+ self.evenOdd = evenOdd
+ self.firstPoint = None
+ self.intersectionCount = 0
+
+ def getWinding(self):
+ if self.firstPoint is not None:
+ # always make sure the sub paths are closed; the algorithm only works
+ # for closed paths.
+ self.closePath()
+ return self.intersectionCount
+
+ def getResult(self):
+ """After the shape has been drawn, getResult() returns True if the test
+ point lies within the (black) shape, and False if it doesn't.
+ """
+ winding = self.getWinding()
+ if self.evenOdd:
+ result = winding % 2
+ else: # non-zero
+ result = self.intersectionCount != 0
+ return not not result
+
+ def _addIntersection(self, goingUp):
+ if self.evenOdd or goingUp:
+ self.intersectionCount += 1
+ else:
+ self.intersectionCount -= 1
+
+ def _moveTo(self, point):
+ if self.firstPoint is not None:
+ # always make sure the sub paths are closed; the algorithm only works
+ # for closed paths.
+ self.closePath()
+ self.firstPoint = point
+
+ def _lineTo(self, point):
+ x, y = self.testPoint
+ x1, y1 = self._getCurrentPoint()
+ x2, y2 = point
+
+ if x1 < x and x2 < x:
+ return
+ if y1 < y and y2 < y:
+ return
+ if y1 >= y and y2 >= y:
+ return
+
+ dx = x2 - x1
+ dy = y2 - y1
+ t = (y - y1) / dy
+ ix = dx * t + x1
+ if ix < x:
+ return
+ self._addIntersection(y2 > y1)
+
+ def _curveToOne(self, bcp1, bcp2, point):
+ x, y = self.testPoint
+ x1, y1 = self._getCurrentPoint()
+ x2, y2 = bcp1
+ x3, y3 = bcp2
+ x4, y4 = point
+
+ if x1 < x and x2 < x and x3 < x and x4 < x:
+ return
+ if y1 < y and y2 < y and y3 < y and y4 < y:
+ return
+ if y1 >= y and y2 >= y and y3 >= y and y4 >= y:
+ return
+
+ dy = y1
+ cy = (y2 - dy) * 3.0
+ by = (y3 - y2) * 3.0 - cy
+ ay = y4 - dy - cy - by
+ solutions = sorted(solveCubic(ay, by, cy, dy - y))
+ solutions = [t for t in solutions if -0.0 <= t <= 1.0]
+ if not solutions:
+ return
+
+ dx = x1
+ cx = (x2 - dx) * 3.0
+ bx = (x3 - x2) * 3.0 - cx
+ ax = x4 - dx - cx - bx
+
+ above = y1 >= y
+ lastT = None
+ for t in solutions:
+ if t == lastT:
+ continue
+ lastT = t
+ t2 = t * t
+ t3 = t2 * t
+
+ direction = 3 * ay * t2 + 2 * by * t + cy
+ incomingGoingUp = outgoingGoingUp = direction > 0.0
+ if direction == 0.0:
+ direction = 6 * ay * t + 2 * by
+ outgoingGoingUp = direction > 0.0
+ incomingGoingUp = not outgoingGoingUp
+ if direction == 0.0:
+ direction = ay
+ incomingGoingUp = outgoingGoingUp = direction > 0.0
+
+ xt = ax * t3 + bx * t2 + cx * t + dx
+ if xt < x:
+ continue
+
+ if t in (0.0, -0.0):
+ if not outgoingGoingUp:
+ self._addIntersection(outgoingGoingUp)
+ elif t == 1.0:
+ if incomingGoingUp:
+ self._addIntersection(incomingGoingUp)
+ else:
+ if incomingGoingUp == outgoingGoingUp:
+ self._addIntersection(outgoingGoingUp)
+ # else:
+ # we're not really intersecting, merely touching
+
+ def _qCurveToOne_unfinished(self, bcp, point):
+ # XXX need to finish this, for now doing it through a cubic
+ # (BasePen implements _qCurveTo in terms of a cubic) will
+ # have to do.
+ x, y = self.testPoint
+ x1, y1 = self._getCurrentPoint()
+ x2, y2 = bcp
+ x3, y3 = point
+ c = y1
+ b = (y2 - c) * 2.0
+ a = y3 - c - b
+ solutions = sorted(solveQuadratic(a, b, c - y))
+ solutions = [
+ t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON
+ ]
+ if not solutions:
+ return
+ # XXX
+
+ def _closePath(self):
+ if self._getCurrentPoint() != self.firstPoint:
+ self.lineTo(self.firstPoint)
+ self.firstPoint = None
+
+ def _endPath(self):
+ """Insideness is not defined for open contours."""
+ raise NotImplementedError
diff --git a/Lib/fontTools/pens/pointPen.py b/Lib/fontTools/pens/pointPen.py
index 4c3148bf..eb1ebc20 100644
--- a/Lib/fontTools/pens/pointPen.py
+++ b/Lib/fontTools/pens/pointPen.py
@@ -13,481 +13,513 @@ For instance, whether or not a point is smooth, and its name.
"""
import math
-from typing import Any, Optional, Tuple
+from typing import Any, Optional, Tuple, Dict
from fontTools.pens.basePen import AbstractPen, PenError
+from fontTools.misc.transform import DecomposedTransform
__all__ = [
- "AbstractPointPen",
- "BasePointToSegmentPen",
- "PointToSegmentPen",
- "SegmentToPointPen",
- "GuessSmoothPointPen",
- "ReverseContourPointPen",
+ "AbstractPointPen",
+ "BasePointToSegmentPen",
+ "PointToSegmentPen",
+ "SegmentToPointPen",
+ "GuessSmoothPointPen",
+ "ReverseContourPointPen",
]
class AbstractPointPen:
- """Baseclass for all PointPens."""
-
- def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
- """Start a new sub path."""
- raise NotImplementedError
-
- def endPath(self) -> None:
- """End the current sub path."""
- raise NotImplementedError
-
- def addPoint(
- self,
- pt: Tuple[float, float],
- segmentType: Optional[str] = None,
- smooth: bool = False,
- name: Optional[str] = None,
- identifier: Optional[str] = None,
- **kwargs: Any
- ) -> None:
- """Add a point to the current sub path."""
- raise NotImplementedError
-
- def addComponent(
- self,
- baseGlyphName: str,
- transformation: Tuple[float, float, float, float, float, float],
- identifier: Optional[str] = None,
- **kwargs: Any
- ) -> None:
- """Add a sub glyph."""
- raise NotImplementedError
+ """Baseclass for all PointPens."""
+
+ def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
+ """Start a new sub path."""
+ raise NotImplementedError
+
+ def endPath(self) -> None:
+ """End the current sub path."""
+ raise NotImplementedError
+
+ def addPoint(
+ self,
+ pt: Tuple[float, float],
+ segmentType: Optional[str] = None,
+ smooth: bool = False,
+ name: Optional[str] = None,
+ identifier: Optional[str] = None,
+ **kwargs: Any,
+ ) -> None:
+ """Add a point to the current sub path."""
+ raise NotImplementedError
+
+ def addComponent(
+ self,
+ baseGlyphName: str,
+ transformation: Tuple[float, float, float, float, float, float],
+ identifier: Optional[str] = None,
+ **kwargs: Any,
+ ) -> None:
+ """Add a sub glyph."""
+ raise NotImplementedError
+
+ def addVarComponent(
+ self,
+ glyphName: str,
+ transformation: DecomposedTransform,
+ location: Dict[str, float],
+ identifier: Optional[str] = None,
+ **kwargs: Any,
+ ) -> None:
+ """Add a VarComponent sub glyph. The 'transformation' argument
+ must be a DecomposedTransform from the fontTools.misc.transform module,
+ and the 'location' argument must be a dictionary mapping axis tags
+ to their locations.
+ """
+ # ttGlyphSet decomposes for us
+ raise AttributeError
class BasePointToSegmentPen(AbstractPointPen):
- """
- Base class for retrieving the outline in a segment-oriented
- way. The PointPen protocol is simple yet also a little tricky,
- so when you need an outline presented as segments but you have
- as points, do use this base implementation as it properly takes
- care of all the edge cases.
- """
-
- def __init__(self):
- self.currentPath = None
-
- def beginPath(self, identifier=None, **kwargs):
- if self.currentPath is not None:
- raise PenError("Path already begun.")
- self.currentPath = []
-
- def _flushContour(self, segments):
- """Override this method.
-
- It will be called for each non-empty sub path with a list
- of segments: the 'segments' argument.
-
- The segments list contains tuples of length 2:
- (segmentType, points)
-
- segmentType is one of "move", "line", "curve" or "qcurve".
- "move" may only occur as the first segment, and it signifies
- an OPEN path. A CLOSED path does NOT start with a "move", in
- fact it will not contain a "move" at ALL.
-
- The 'points' field in the 2-tuple is a list of point info
- tuples. The list has 1 or more items, a point tuple has
- four items:
- (point, smooth, name, kwargs)
- 'point' is an (x, y) coordinate pair.
-
- For a closed path, the initial moveTo point is defined as
- the last point of the last segment.
-
- The 'points' list of "move" and "line" segments always contains
- exactly one point tuple.
- """
- raise NotImplementedError
-
- def endPath(self):
- if self.currentPath is None:
- raise PenError("Path not begun.")
- points = self.currentPath
- self.currentPath = None
- if not points:
- return
- if len(points) == 1:
- # Not much more we can do than output a single move segment.
- pt, segmentType, smooth, name, kwargs = points[0]
- segments = [("move", [(pt, smooth, name, kwargs)])]
- self._flushContour(segments)
- return
- segments = []
- if points[0][1] == "move":
- # It's an open contour, insert a "move" segment for the first
- # point and remove that first point from the point list.
- pt, segmentType, smooth, name, kwargs = points[0]
- segments.append(("move", [(pt, smooth, name, kwargs)]))
- points.pop(0)
- else:
- # It's a closed contour. Locate the first on-curve point, and
- # rotate the point list so that it _ends_ with an on-curve
- # point.
- firstOnCurve = None
- for i in range(len(points)):
- segmentType = points[i][1]
- if segmentType is not None:
- firstOnCurve = i
- break
- if firstOnCurve is None:
- # Special case for quadratics: a contour with no on-curve
- # points. Add a "None" point. (See also the Pen protocol's
- # qCurveTo() method and fontTools.pens.basePen.py.)
- points.append((None, "qcurve", None, None, None))
- else:
- points = points[firstOnCurve+1:] + points[:firstOnCurve+1]
-
- currentSegment = []
- for pt, segmentType, smooth, name, kwargs in points:
- currentSegment.append((pt, smooth, name, kwargs))
- if segmentType is None:
- continue
- segments.append((segmentType, currentSegment))
- currentSegment = []
-
- self._flushContour(segments)
-
- def addPoint(self, pt, segmentType=None, smooth=False, name=None,
- identifier=None, **kwargs):
- if self.currentPath is None:
- raise PenError("Path not begun")
- self.currentPath.append((pt, segmentType, smooth, name, kwargs))
+ """
+ Base class for retrieving the outline in a segment-oriented
+ way. The PointPen protocol is simple yet also a little tricky,
+ so when you need an outline presented as segments but you have
+ as points, do use this base implementation as it properly takes
+ care of all the edge cases.
+ """
+
+ def __init__(self):
+ self.currentPath = None
+
+ def beginPath(self, identifier=None, **kwargs):
+ if self.currentPath is not None:
+ raise PenError("Path already begun.")
+ self.currentPath = []
+
+ def _flushContour(self, segments):
+ """Override this method.
+
+ It will be called for each non-empty sub path with a list
+ of segments: the 'segments' argument.
+
+ The segments list contains tuples of length 2:
+ (segmentType, points)
+
+ segmentType is one of "move", "line", "curve" or "qcurve".
+ "move" may only occur as the first segment, and it signifies
+ an OPEN path. A CLOSED path does NOT start with a "move", in
+ fact it will not contain a "move" at ALL.
+
+ The 'points' field in the 2-tuple is a list of point info
+ tuples. The list has 1 or more items, a point tuple has
+ four items:
+ (point, smooth, name, kwargs)
+ 'point' is an (x, y) coordinate pair.
+
+ For a closed path, the initial moveTo point is defined as
+ the last point of the last segment.
+
+ The 'points' list of "move" and "line" segments always contains
+ exactly one point tuple.
+ """
+ raise NotImplementedError
+
+ def endPath(self):
+ if self.currentPath is None:
+ raise PenError("Path not begun.")
+ points = self.currentPath
+ self.currentPath = None
+ if not points:
+ return
+ if len(points) == 1:
+ # Not much more we can do than output a single move segment.
+ pt, segmentType, smooth, name, kwargs = points[0]
+ segments = [("move", [(pt, smooth, name, kwargs)])]
+ self._flushContour(segments)
+ return
+ segments = []
+ if points[0][1] == "move":
+ # It's an open contour, insert a "move" segment for the first
+ # point and remove that first point from the point list.
+ pt, segmentType, smooth, name, kwargs = points[0]
+ segments.append(("move", [(pt, smooth, name, kwargs)]))
+ points.pop(0)
+ else:
+ # It's a closed contour. Locate the first on-curve point, and
+ # rotate the point list so that it _ends_ with an on-curve
+ # point.
+ firstOnCurve = None
+ for i in range(len(points)):
+ segmentType = points[i][1]
+ if segmentType is not None:
+ firstOnCurve = i
+ break
+ if firstOnCurve is None:
+ # Special case for quadratics: a contour with no on-curve
+ # points. Add a "None" point. (See also the Pen protocol's
+ # qCurveTo() method and fontTools.pens.basePen.py.)
+ points.append((None, "qcurve", None, None, None))
+ else:
+ points = points[firstOnCurve + 1 :] + points[: firstOnCurve + 1]
+
+ currentSegment = []
+ for pt, segmentType, smooth, name, kwargs in points:
+ currentSegment.append((pt, smooth, name, kwargs))
+ if segmentType is None:
+ continue
+ segments.append((segmentType, currentSegment))
+ currentSegment = []
+
+ self._flushContour(segments)
+
+ def addPoint(
+ self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
+ ):
+ if self.currentPath is None:
+ raise PenError("Path not begun")
+ self.currentPath.append((pt, segmentType, smooth, name, kwargs))
class PointToSegmentPen(BasePointToSegmentPen):
- """
- Adapter class that converts the PointPen protocol to the
- (Segment)Pen protocol.
-
- NOTE: The segment pen does not support and will drop point names, identifiers
- and kwargs.
- """
-
- def __init__(self, segmentPen, outputImpliedClosingLine=False):
- BasePointToSegmentPen.__init__(self)
- self.pen = segmentPen
- self.outputImpliedClosingLine = outputImpliedClosingLine
-
- def _flushContour(self, segments):
- if not segments:
- raise PenError("Must have at least one segment.")
- pen = self.pen
- if segments[0][0] == "move":
- # It's an open path.
- closed = False
- points = segments[0][1]
- if len(points) != 1:
- raise PenError(f"Illegal move segment point count: {len(points)}")
- movePt, _, _ , _ = points[0]
- del segments[0]
- else:
- # It's a closed path, do a moveTo to the last
- # point of the last segment.
- closed = True
- segmentType, points = segments[-1]
- movePt, _, _ , _ = points[-1]
- if movePt is None:
- # quad special case: a contour with no on-curve points contains
- # one "qcurve" segment that ends with a point that's None. We
- # must not output a moveTo() in that case.
- pass
- else:
- pen.moveTo(movePt)
- outputImpliedClosingLine = self.outputImpliedClosingLine
- nSegments = len(segments)
- lastPt = movePt
- for i in range(nSegments):
- segmentType, points = segments[i]
- points = [pt for pt, _, _ , _ in points]
- if segmentType == "line":
- if len(points) != 1:
- raise PenError(f"Illegal line segment point count: {len(points)}")
- pt = points[0]
- # For closed contours, a 'lineTo' is always implied from the last oncurve
- # point to the starting point, thus we can omit it when the last and
- # starting point don't overlap.
- # However, when the last oncurve point is a "line" segment and has same
- # coordinates as the starting point of a closed contour, we need to output
- # the closing 'lineTo' explicitly (regardless of the value of the
- # 'outputImpliedClosingLine' option) in order to disambiguate this case from
- # the implied closing 'lineTo', otherwise the duplicate point would be lost.
- # See https://github.com/googlefonts/fontmake/issues/572.
- if (
- i + 1 != nSegments
- or outputImpliedClosingLine
- or not closed
- or pt == lastPt
- ):
- pen.lineTo(pt)
- lastPt = pt
- elif segmentType == "curve":
- pen.curveTo(*points)
- lastPt = points[-1]
- elif segmentType == "qcurve":
- pen.qCurveTo(*points)
- lastPt = points[-1]
- else:
- raise PenError(f"Illegal segmentType: {segmentType}")
- if closed:
- pen.closePath()
- else:
- pen.endPath()
-
- def addComponent(self, glyphName, transform, identifier=None, **kwargs):
- del identifier # unused
- del kwargs # unused
- self.pen.addComponent(glyphName, transform)
+ """
+ Adapter class that converts the PointPen protocol to the
+ (Segment)Pen protocol.
+
+ NOTE: The segment pen does not support and will drop point names, identifiers
+ and kwargs.
+ """
+
+ def __init__(self, segmentPen, outputImpliedClosingLine=False):
+ BasePointToSegmentPen.__init__(self)
+ self.pen = segmentPen
+ self.outputImpliedClosingLine = outputImpliedClosingLine
+
+ def _flushContour(self, segments):
+ if not segments:
+ raise PenError("Must have at least one segment.")
+ pen = self.pen
+ if segments[0][0] == "move":
+ # It's an open path.
+ closed = False
+ points = segments[0][1]
+ if len(points) != 1:
+ raise PenError(f"Illegal move segment point count: {len(points)}")
+ movePt, _, _, _ = points[0]
+ del segments[0]
+ else:
+ # It's a closed path, do a moveTo to the last
+ # point of the last segment.
+ closed = True
+ segmentType, points = segments[-1]
+ movePt, _, _, _ = points[-1]
+ if movePt is None:
+ # quad special case: a contour with no on-curve points contains
+ # one "qcurve" segment that ends with a point that's None. We
+ # must not output a moveTo() in that case.
+ pass
+ else:
+ pen.moveTo(movePt)
+ outputImpliedClosingLine = self.outputImpliedClosingLine
+ nSegments = len(segments)
+ lastPt = movePt
+ for i in range(nSegments):
+ segmentType, points = segments[i]
+ points = [pt for pt, _, _, _ in points]
+ if segmentType == "line":
+ if len(points) != 1:
+ raise PenError(f"Illegal line segment point count: {len(points)}")
+ pt = points[0]
+ # For closed contours, a 'lineTo' is always implied from the last oncurve
+ # point to the starting point, thus we can omit it when the last and
+ # starting point don't overlap.
+ # However, when the last oncurve point is a "line" segment and has same
+ # coordinates as the starting point of a closed contour, we need to output
+ # the closing 'lineTo' explicitly (regardless of the value of the
+ # 'outputImpliedClosingLine' option) in order to disambiguate this case from
+ # the implied closing 'lineTo', otherwise the duplicate point would be lost.
+ # See https://github.com/googlefonts/fontmake/issues/572.
+ if (
+ i + 1 != nSegments
+ or outputImpliedClosingLine
+ or not closed
+ or pt == lastPt
+ ):
+ pen.lineTo(pt)
+ lastPt = pt
+ elif segmentType == "curve":
+ pen.curveTo(*points)
+ lastPt = points[-1]
+ elif segmentType == "qcurve":
+ pen.qCurveTo(*points)
+ lastPt = points[-1]
+ else:
+ raise PenError(f"Illegal segmentType: {segmentType}")
+ if closed:
+ pen.closePath()
+ else:
+ pen.endPath()
+
+ def addComponent(self, glyphName, transform, identifier=None, **kwargs):
+ del identifier # unused
+ del kwargs # unused
+ self.pen.addComponent(glyphName, transform)
class SegmentToPointPen(AbstractPen):
- """
- Adapter class that converts the (Segment)Pen protocol to the
- PointPen protocol.
- """
-
- def __init__(self, pointPen, guessSmooth=True):
- if guessSmooth:
- self.pen = GuessSmoothPointPen(pointPen)
- else:
- self.pen = pointPen
- self.contour = None
-
- def _flushContour(self):
- pen = self.pen
- pen.beginPath()
- for pt, segmentType in self.contour:
- pen.addPoint(pt, segmentType=segmentType)
- pen.endPath()
-
- def moveTo(self, pt):
- self.contour = []
- self.contour.append((pt, "move"))
-
- def lineTo(self, pt):
- if self.contour is None:
- raise PenError("Contour missing required initial moveTo")
- self.contour.append((pt, "line"))
-
- def curveTo(self, *pts):
- if not pts:
- raise TypeError("Must pass in at least one point")
- if self.contour is None:
- raise PenError("Contour missing required initial moveTo")
- for pt in pts[:-1]:
- self.contour.append((pt, None))
- self.contour.append((pts[-1], "curve"))
-
- def qCurveTo(self, *pts):
- if not pts:
- raise TypeError("Must pass in at least one point")
- if pts[-1] is None:
- self.contour = []
- else:
- if self.contour is None:
- raise PenError("Contour missing required initial moveTo")
- for pt in pts[:-1]:
- self.contour.append((pt, None))
- if pts[-1] is not None:
- self.contour.append((pts[-1], "qcurve"))
-
- def closePath(self):
- if self.contour is None:
- raise PenError("Contour missing required initial moveTo")
- if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]:
- self.contour[0] = self.contour[-1]
- del self.contour[-1]
- else:
- # There's an implied line at the end, replace "move" with "line"
- # for the first point
- pt, tp = self.contour[0]
- if tp == "move":
- self.contour[0] = pt, "line"
- self._flushContour()
- self.contour = None
-
- def endPath(self):
- if self.contour is None:
- raise PenError("Contour missing required initial moveTo")
- self._flushContour()
- self.contour = None
-
- def addComponent(self, glyphName, transform):
- if self.contour is not None:
- raise PenError("Components must be added before or after contours")
- self.pen.addComponent(glyphName, transform)
+ """
+ Adapter class that converts the (Segment)Pen protocol to the
+ PointPen protocol.
+ """
+
+ def __init__(self, pointPen, guessSmooth=True):
+ if guessSmooth:
+ self.pen = GuessSmoothPointPen(pointPen)
+ else:
+ self.pen = pointPen
+ self.contour = None
+
+ def _flushContour(self):
+ pen = self.pen
+ pen.beginPath()
+ for pt, segmentType in self.contour:
+ pen.addPoint(pt, segmentType=segmentType)
+ pen.endPath()
+
+ def moveTo(self, pt):
+ self.contour = []
+ self.contour.append((pt, "move"))
+
+ def lineTo(self, pt):
+ if self.contour is None:
+ raise PenError("Contour missing required initial moveTo")
+ self.contour.append((pt, "line"))
+
+ def curveTo(self, *pts):
+ if not pts:
+ raise TypeError("Must pass in at least one point")
+ if self.contour is None:
+ raise PenError("Contour missing required initial moveTo")
+ for pt in pts[:-1]:
+ self.contour.append((pt, None))
+ self.contour.append((pts[-1], "curve"))
+
+ def qCurveTo(self, *pts):
+ if not pts:
+ raise TypeError("Must pass in at least one point")
+ if pts[-1] is None:
+ self.contour = []
+ else:
+ if self.contour is None:
+ raise PenError("Contour missing required initial moveTo")
+ for pt in pts[:-1]:
+ self.contour.append((pt, None))
+ if pts[-1] is not None:
+ self.contour.append((pts[-1], "qcurve"))
+
+ def closePath(self):
+ if self.contour is None:
+ raise PenError("Contour missing required initial moveTo")
+ if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]:
+ self.contour[0] = self.contour[-1]
+ del self.contour[-1]
+ else:
+ # There's an implied line at the end, replace "move" with "line"
+ # for the first point
+ pt, tp = self.contour[0]
+ if tp == "move":
+ self.contour[0] = pt, "line"
+ self._flushContour()
+ self.contour = None
+
+ def endPath(self):
+ if self.contour is None:
+ raise PenError("Contour missing required initial moveTo")
+ self._flushContour()
+ self.contour = None
+
+ def addComponent(self, glyphName, transform):
+ if self.contour is not None:
+ raise PenError("Components must be added before or after contours")
+ self.pen.addComponent(glyphName, transform)
class GuessSmoothPointPen(AbstractPointPen):
- """
- Filtering PointPen that tries to determine whether an on-curve point
- should be "smooth", ie. that it's a "tangent" point or a "curve" point.
- """
-
- def __init__(self, outPen, error=0.05):
- self._outPen = outPen
- self._error = error
- self._points = None
-
- def _flushContour(self):
- if self._points is None:
- raise PenError("Path not begun")
- points = self._points
- nPoints = len(points)
- if not nPoints:
- return
- if points[0][1] == "move":
- # Open path.
- indices = range(1, nPoints - 1)
- elif nPoints > 1:
- # Closed path. To avoid having to mod the contour index, we
- # simply abuse Python's negative index feature, and start at -1
- indices = range(-1, nPoints - 1)
- else:
- # closed path containing 1 point (!), ignore.
- indices = []
- for i in indices:
- pt, segmentType, _, name, kwargs = points[i]
- if segmentType is None:
- continue
- prev = i - 1
- next = i + 1
- if points[prev][1] is not None and points[next][1] is not None:
- continue
- # At least one of our neighbors is an off-curve point
- pt = points[i][0]
- prevPt = points[prev][0]
- nextPt = points[next][0]
- if pt != prevPt and pt != nextPt:
- dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1]
- dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1]
- a1 = math.atan2(dy1, dx1)
- a2 = math.atan2(dy2, dx2)
- if abs(a1 - a2) < self._error:
- points[i] = pt, segmentType, True, name, kwargs
-
- for pt, segmentType, smooth, name, kwargs in points:
- self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
-
- def beginPath(self, identifier=None, **kwargs):
- if self._points is not None:
- raise PenError("Path already begun")
- self._points = []
- if identifier is not None:
- kwargs["identifier"] = identifier
- self._outPen.beginPath(**kwargs)
-
- def endPath(self):
- self._flushContour()
- self._outPen.endPath()
- self._points = None
-
- def addPoint(self, pt, segmentType=None, smooth=False, name=None,
- identifier=None, **kwargs):
- if self._points is None:
- raise PenError("Path not begun")
- if identifier is not None:
- kwargs["identifier"] = identifier
- self._points.append((pt, segmentType, False, name, kwargs))
-
- def addComponent(self, glyphName, transformation, identifier=None, **kwargs):
- if self._points is not None:
- raise PenError("Components must be added before or after contours")
- if identifier is not None:
- kwargs["identifier"] = identifier
- self._outPen.addComponent(glyphName, transformation, **kwargs)
+ """
+ Filtering PointPen that tries to determine whether an on-curve point
+ should be "smooth", ie. that it's a "tangent" point or a "curve" point.
+ """
+
+ def __init__(self, outPen, error=0.05):
+ self._outPen = outPen
+ self._error = error
+ self._points = None
+
+ def _flushContour(self):
+ if self._points is None:
+ raise PenError("Path not begun")
+ points = self._points
+ nPoints = len(points)
+ if not nPoints:
+ return
+ if points[0][1] == "move":
+ # Open path.
+ indices = range(1, nPoints - 1)
+ elif nPoints > 1:
+ # Closed path. To avoid having to mod the contour index, we
+ # simply abuse Python's negative index feature, and start at -1
+ indices = range(-1, nPoints - 1)
+ else:
+ # closed path containing 1 point (!), ignore.
+ indices = []
+ for i in indices:
+ pt, segmentType, _, name, kwargs = points[i]
+ if segmentType is None:
+ continue
+ prev = i - 1
+ next = i + 1
+ if points[prev][1] is not None and points[next][1] is not None:
+ continue
+ # At least one of our neighbors is an off-curve point
+ pt = points[i][0]
+ prevPt = points[prev][0]
+ nextPt = points[next][0]
+ if pt != prevPt and pt != nextPt:
+ dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1]
+ dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1]
+ a1 = math.atan2(dy1, dx1)
+ a2 = math.atan2(dy2, dx2)
+ if abs(a1 - a2) < self._error:
+ points[i] = pt, segmentType, True, name, kwargs
+
+ for pt, segmentType, smooth, name, kwargs in points:
+ self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
+
+ def beginPath(self, identifier=None, **kwargs):
+ if self._points is not None:
+ raise PenError("Path already begun")
+ self._points = []
+ if identifier is not None:
+ kwargs["identifier"] = identifier
+ self._outPen.beginPath(**kwargs)
+
+ def endPath(self):
+ self._flushContour()
+ self._outPen.endPath()
+ self._points = None
+
+ def addPoint(
+ self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
+ ):
+ if self._points is None:
+ raise PenError("Path not begun")
+ if identifier is not None:
+ kwargs["identifier"] = identifier
+ self._points.append((pt, segmentType, False, name, kwargs))
+
+ def addComponent(self, glyphName, transformation, identifier=None, **kwargs):
+ if self._points is not None:
+ raise PenError("Components must be added before or after contours")
+ if identifier is not None:
+ kwargs["identifier"] = identifier
+ self._outPen.addComponent(glyphName, transformation, **kwargs)
+
+ def addVarComponent(
+ self, glyphName, transformation, location, identifier=None, **kwargs
+ ):
+ if self._points is not None:
+ raise PenError("VarComponents must be added before or after contours")
+ if identifier is not None:
+ kwargs["identifier"] = identifier
+ self._outPen.addVarComponent(glyphName, transformation, location, **kwargs)
class ReverseContourPointPen(AbstractPointPen):
- """
- This is a PointPen that passes outline data to another PointPen, but
- reversing the winding direction of all contours. Components are simply
- passed through unchanged.
-
- Closed contours are reversed in such a way that the first point remains
- the first point.
- """
-
- def __init__(self, outputPointPen):
- self.pen = outputPointPen
- # a place to store the points for the current sub path
- self.currentContour = None
-
- def _flushContour(self):
- pen = self.pen
- contour = self.currentContour
- if not contour:
- pen.beginPath(identifier=self.currentContourIdentifier)
- pen.endPath()
- return
-
- closed = contour[0][1] != "move"
- if not closed:
- lastSegmentType = "move"
- else:
- # Remove the first point and insert it at the end. When
- # the list of points gets reversed, this point will then
- # again be at the start. In other words, the following
- # will hold:
- # for N in range(len(originalContour)):
- # originalContour[N] == reversedContour[-N]
- contour.append(contour.pop(0))
- # Find the first on-curve point.
- firstOnCurve = None
- for i in range(len(contour)):
- if contour[i][1] is not None:
- firstOnCurve = i
- break
- if firstOnCurve is None:
- # There are no on-curve points, be basically have to
- # do nothing but contour.reverse().
- lastSegmentType = None
- else:
- lastSegmentType = contour[firstOnCurve][1]
-
- contour.reverse()
- if not closed:
- # Open paths must start with a move, so we simply dump
- # all off-curve points leading up to the first on-curve.
- while contour[0][1] is None:
- contour.pop(0)
- pen.beginPath(identifier=self.currentContourIdentifier)
- for pt, nextSegmentType, smooth, name, kwargs in contour:
- if nextSegmentType is not None:
- segmentType = lastSegmentType
- lastSegmentType = nextSegmentType
- else:
- segmentType = None
- pen.addPoint(pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs)
- pen.endPath()
-
- def beginPath(self, identifier=None, **kwargs):
- if self.currentContour is not None:
- raise PenError("Path already begun")
- self.currentContour = []
- self.currentContourIdentifier = identifier
- self.onCurve = []
-
- def endPath(self):
- if self.currentContour is None:
- raise PenError("Path not begun")
- self._flushContour()
- self.currentContour = None
-
- def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
- if self.currentContour is None:
- raise PenError("Path not begun")
- if identifier is not None:
- kwargs["identifier"] = identifier
- self.currentContour.append((pt, segmentType, smooth, name, kwargs))
-
- def addComponent(self, glyphName, transform, identifier=None, **kwargs):
- if self.currentContour is not None:
- raise PenError("Components must be added before or after contours")
- self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs)
+ """
+ This is a PointPen that passes outline data to another PointPen, but
+ reversing the winding direction of all contours. Components are simply
+ passed through unchanged.
+
+ Closed contours are reversed in such a way that the first point remains
+ the first point.
+ """
+
+ def __init__(self, outputPointPen):
+ self.pen = outputPointPen
+ # a place to store the points for the current sub path
+ self.currentContour = None
+
+ def _flushContour(self):
+ pen = self.pen
+ contour = self.currentContour
+ if not contour:
+ pen.beginPath(identifier=self.currentContourIdentifier)
+ pen.endPath()
+ return
+
+ closed = contour[0][1] != "move"
+ if not closed:
+ lastSegmentType = "move"
+ else:
+ # Remove the first point and insert it at the end. When
+ # the list of points gets reversed, this point will then
+ # again be at the start. In other words, the following
+ # will hold:
+ # for N in range(len(originalContour)):
+ # originalContour[N] == reversedContour[-N]
+ contour.append(contour.pop(0))
+ # Find the first on-curve point.
+ firstOnCurve = None
+ for i in range(len(contour)):
+ if contour[i][1] is not None:
+ firstOnCurve = i
+ break
+ if firstOnCurve is None:
+ # There are no on-curve points, be basically have to
+ # do nothing but contour.reverse().
+ lastSegmentType = None
+ else:
+ lastSegmentType = contour[firstOnCurve][1]
+
+ contour.reverse()
+ if not closed:
+ # Open paths must start with a move, so we simply dump
+ # all off-curve points leading up to the first on-curve.
+ while contour[0][1] is None:
+ contour.pop(0)
+ pen.beginPath(identifier=self.currentContourIdentifier)
+ for pt, nextSegmentType, smooth, name, kwargs in contour:
+ if nextSegmentType is not None:
+ segmentType = lastSegmentType
+ lastSegmentType = nextSegmentType
+ else:
+ segmentType = None
+ pen.addPoint(
+ pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs
+ )
+ pen.endPath()
+
+ def beginPath(self, identifier=None, **kwargs):
+ if self.currentContour is not None:
+ raise PenError("Path already begun")
+ self.currentContour = []
+ self.currentContourIdentifier = identifier
+ self.onCurve = []
+
+ def endPath(self):
+ if self.currentContour is None:
+ raise PenError("Path not begun")
+ self._flushContour()
+ self.currentContour = None
+
+ def addPoint(
+ self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
+ ):
+ if self.currentContour is None:
+ raise PenError("Path not begun")
+ if identifier is not None:
+ kwargs["identifier"] = identifier
+ self.currentContour.append((pt, segmentType, smooth, name, kwargs))
+
+ def addComponent(self, glyphName, transform, identifier=None, **kwargs):
+ if self.currentContour is not None:
+ raise PenError("Components must be added before or after contours")
+ self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs)
diff --git a/Lib/fontTools/pens/qtPen.py b/Lib/fontTools/pens/qtPen.py
index d08a344f..eb13d03d 100644
--- a/Lib/fontTools/pens/qtPen.py
+++ b/Lib/fontTools/pens/qtPen.py
@@ -5,25 +5,25 @@ __all__ = ["QtPen"]
class QtPen(BasePen):
+ def __init__(self, glyphSet, path=None):
+ BasePen.__init__(self, glyphSet)
+ if path is None:
+ from PyQt5.QtGui import QPainterPath
- def __init__(self, glyphSet, path=None):
- BasePen.__init__(self, glyphSet)
- if path is None:
- from PyQt5.QtGui import QPainterPath
- path = QPainterPath()
- self.path = path
+ path = QPainterPath()
+ self.path = path
- def _moveTo(self, p):
- self.path.moveTo(*p)
+ def _moveTo(self, p):
+ self.path.moveTo(*p)
- def _lineTo(self, p):
- self.path.lineTo(*p)
+ def _lineTo(self, p):
+ self.path.lineTo(*p)
- def _curveToOne(self, p1, p2, p3):
- self.path.cubicTo(*p1, *p2, *p3)
+ def _curveToOne(self, p1, p2, p3):
+ self.path.cubicTo(*p1, *p2, *p3)
- def _qCurveToOne(self, p1, p2):
- self.path.quadTo(*p1, *p2)
+ def _qCurveToOne(self, p1, p2):
+ self.path.quadTo(*p1, *p2)
- def _closePath(self):
- self.path.closeSubpath()
+ def _closePath(self):
+ self.path.closeSubpath()
diff --git a/Lib/fontTools/pens/qu2cuPen.py b/Lib/fontTools/pens/qu2cuPen.py
new file mode 100644
index 00000000..7e400f98
--- /dev/null
+++ b/Lib/fontTools/pens/qu2cuPen.py
@@ -0,0 +1,105 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2023 Behdad Esfahbod. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from fontTools.qu2cu import quadratic_to_curves
+from fontTools.pens.filterPen import ContourFilterPen
+from fontTools.pens.reverseContourPen import ReverseContourPen
+import math
+
+
+class Qu2CuPen(ContourFilterPen):
+ """A filter pen to convert quadratic bezier splines to cubic curves
+ using the FontTools SegmentPen protocol.
+
+ Args:
+
+ other_pen: another SegmentPen used to draw the transformed outline.
+ max_err: maximum approximation error in font units. For optimal results,
+ if you know the UPEM of the font, we recommend setting this to a
+ value equal, or close to UPEM / 1000.
+ reverse_direction: flip the contours' direction but keep starting point.
+ stats: a dictionary counting the point numbers of cubic segments.
+ """
+
+ def __init__(
+ self,
+ other_pen,
+ max_err,
+ all_cubic=False,
+ reverse_direction=False,
+ stats=None,
+ ):
+ if reverse_direction:
+ other_pen = ReverseContourPen(other_pen)
+ super().__init__(other_pen)
+ self.all_cubic = all_cubic
+ self.max_err = max_err
+ self.stats = stats
+
+ def _quadratics_to_curve(self, q):
+ curves = quadratic_to_curves(q, self.max_err, all_cubic=self.all_cubic)
+ if self.stats is not None:
+ for curve in curves:
+ n = str(len(curve) - 2)
+ self.stats[n] = self.stats.get(n, 0) + 1
+ for curve in curves:
+ if len(curve) == 4:
+ yield ("curveTo", curve[1:])
+ else:
+ yield ("qCurveTo", curve[1:])
+
+ def filterContour(self, contour):
+ quadratics = []
+ currentPt = None
+ newContour = []
+ for op, args in contour:
+ if op == "qCurveTo" and (
+ self.all_cubic or (len(args) > 2 and args[-1] is not None)
+ ):
+ if args[-1] is None:
+ raise NotImplementedError(
+ "oncurve-less contours with all_cubic not implemented"
+ )
+ quadratics.append((currentPt,) + args)
+ else:
+ if quadratics:
+ newContour.extend(self._quadratics_to_curve(quadratics))
+ quadratics = []
+ newContour.append((op, args))
+ currentPt = args[-1] if args else None
+ if quadratics:
+ newContour.extend(self._quadratics_to_curve(quadratics))
+
+ if not self.all_cubic:
+ # Add back implicit oncurve points
+ contour = newContour
+ newContour = []
+ for op, args in contour:
+ if op == "qCurveTo" and newContour and newContour[-1][0] == "qCurveTo":
+ pt0 = newContour[-1][1][-2]
+ pt1 = newContour[-1][1][-1]
+ pt2 = args[0]
+ if (
+ pt1 is not None
+ and math.isclose(pt2[0] - pt1[0], pt1[0] - pt0[0])
+ and math.isclose(pt2[1] - pt1[1], pt1[1] - pt0[1])
+ ):
+ newArgs = newContour[-1][1][:-1] + args
+ newContour[-1] = (op, newArgs)
+ continue
+
+ newContour.append((op, args))
+
+ return newContour
diff --git a/Lib/fontTools/pens/quartzPen.py b/Lib/fontTools/pens/quartzPen.py
index 16b9c2d8..6e1228d6 100644
--- a/Lib/fontTools/pens/quartzPen.py
+++ b/Lib/fontTools/pens/quartzPen.py
@@ -3,43 +3,42 @@ from fontTools.pens.basePen import BasePen
from Quartz.CoreGraphics import CGPathCreateMutable, CGPathMoveToPoint
from Quartz.CoreGraphics import CGPathAddLineToPoint, CGPathAddCurveToPoint
from Quartz.CoreGraphics import CGPathAddQuadCurveToPoint, CGPathCloseSubpath
-
+
__all__ = ["QuartzPen"]
class QuartzPen(BasePen):
-
- """A pen that creates a CGPath
-
- Parameters
- - path: an optional CGPath to add to
- - xform: an optional CGAffineTransform to apply to the path
- """
-
- def __init__(self, glyphSet, path=None, xform=None):
- BasePen.__init__(self, glyphSet)
- if path is None:
- path = CGPathCreateMutable()
- self.path = path
- self.xform = xform
-
- def _moveTo(self, pt):
- x, y = pt
- CGPathMoveToPoint(self.path, self.xform, x, y)
-
- def _lineTo(self, pt):
- x, y = pt
- CGPathAddLineToPoint(self.path, self.xform, x, y)
-
- def _curveToOne(self, p1, p2, p3):
- (x1, y1), (x2, y2), (x3, y3) = p1, p2, p3
- CGPathAddCurveToPoint(self.path, self.xform, x1, y1, x2, y2, x3, y3)
-
- def _qCurveToOne(self, p1, p2):
- (x1, y1), (x2, y2) = p1, p2
- CGPathAddQuadCurveToPoint(self.path, self.xform, x1, y1, x2, y2)
-
- def _closePath(self):
- CGPathCloseSubpath(self.path)
+ """A pen that creates a CGPath
+
+ Parameters
+ - path: an optional CGPath to add to
+ - xform: an optional CGAffineTransform to apply to the path
+ """
+
+ def __init__(self, glyphSet, path=None, xform=None):
+ BasePen.__init__(self, glyphSet)
+ if path is None:
+ path = CGPathCreateMutable()
+ self.path = path
+ self.xform = xform
+
+ def _moveTo(self, pt):
+ x, y = pt
+ CGPathMoveToPoint(self.path, self.xform, x, y)
+
+ def _lineTo(self, pt):
+ x, y = pt
+ CGPathAddLineToPoint(self.path, self.xform, x, y)
+
+ def _curveToOne(self, p1, p2, p3):
+ (x1, y1), (x2, y2), (x3, y3) = p1, p2, p3
+ CGPathAddCurveToPoint(self.path, self.xform, x1, y1, x2, y2, x3, y3)
+
+ def _qCurveToOne(self, p1, p2):
+ (x1, y1), (x2, y2) = p1, p2
+ CGPathAddQuadCurveToPoint(self.path, self.xform, x1, y1, x2, y2)
+
+ def _closePath(self):
+ CGPathCloseSubpath(self.path)
diff --git a/Lib/fontTools/pens/recordingPen.py b/Lib/fontTools/pens/recordingPen.py
index 70f05e83..6c3b6613 100644
--- a/Lib/fontTools/pens/recordingPen.py
+++ b/Lib/fontTools/pens/recordingPen.py
@@ -4,152 +4,176 @@ from fontTools.pens.pointPen import AbstractPointPen
__all__ = [
- "replayRecording",
- "RecordingPen",
- "DecomposingRecordingPen",
- "RecordingPointPen",
+ "replayRecording",
+ "RecordingPen",
+ "DecomposingRecordingPen",
+ "RecordingPointPen",
]
def replayRecording(recording, pen):
- """Replay a recording, as produced by RecordingPen or DecomposingRecordingPen,
- to a pen.
+ """Replay a recording, as produced by RecordingPen or DecomposingRecordingPen,
+ to a pen.
- Note that recording does not have to be produced by those pens.
- It can be any iterable of tuples of method name and tuple-of-arguments.
- Likewise, pen can be any objects receiving those method calls.
- """
- for operator,operands in recording:
- getattr(pen, operator)(*operands)
+ Note that recording does not have to be produced by those pens.
+ It can be any iterable of tuples of method name and tuple-of-arguments.
+ Likewise, pen can be any objects receiving those method calls.
+ """
+ for operator, operands in recording:
+ getattr(pen, operator)(*operands)
class RecordingPen(AbstractPen):
- """Pen recording operations that can be accessed or replayed.
-
- The recording can be accessed as pen.value; or replayed using
- pen.replay(otherPen).
-
- :Example:
-
- from fontTools.ttLib import TTFont
- from fontTools.pens.recordingPen import RecordingPen
-
- glyph_name = 'dollar'
- font_path = 'MyFont.otf'
-
- font = TTFont(font_path)
- glyphset = font.getGlyphSet()
- glyph = glyphset[glyph_name]
-
- pen = RecordingPen()
- glyph.draw(pen)
- print(pen.value)
- """
-
- def __init__(self):
- self.value = []
- def moveTo(self, p0):
- self.value.append(('moveTo', (p0,)))
- def lineTo(self, p1):
- self.value.append(('lineTo', (p1,)))
- def qCurveTo(self, *points):
- self.value.append(('qCurveTo', points))
- def curveTo(self, *points):
- self.value.append(('curveTo', points))
- def closePath(self):
- self.value.append(('closePath', ()))
- def endPath(self):
- self.value.append(('endPath', ()))
- def addComponent(self, glyphName, transformation):
- self.value.append(('addComponent', (glyphName, transformation)))
- def replay(self, pen):
- replayRecording(self.value, pen)
+ """Pen recording operations that can be accessed or replayed.
+
+ The recording can be accessed as pen.value; or replayed using
+ pen.replay(otherPen).
+
+ :Example:
+
+ from fontTools.ttLib import TTFont
+ from fontTools.pens.recordingPen import RecordingPen
+
+ glyph_name = 'dollar'
+ font_path = 'MyFont.otf'
+
+ font = TTFont(font_path)
+ glyphset = font.getGlyphSet()
+ glyph = glyphset[glyph_name]
+
+ pen = RecordingPen()
+ glyph.draw(pen)
+ print(pen.value)
+ """
+
+ def __init__(self):
+ self.value = []
+
+ def moveTo(self, p0):
+ self.value.append(("moveTo", (p0,)))
+
+ def lineTo(self, p1):
+ self.value.append(("lineTo", (p1,)))
+
+ def qCurveTo(self, *points):
+ self.value.append(("qCurveTo", points))
+
+ def curveTo(self, *points):
+ self.value.append(("curveTo", points))
+
+ def closePath(self):
+ self.value.append(("closePath", ()))
+
+ def endPath(self):
+ self.value.append(("endPath", ()))
+
+ def addComponent(self, glyphName, transformation):
+ self.value.append(("addComponent", (glyphName, transformation)))
+
+ def addVarComponent(self, glyphName, transformation, location):
+ self.value.append(("addVarComponent", (glyphName, transformation, location)))
+
+ def replay(self, pen):
+ replayRecording(self.value, pen)
class DecomposingRecordingPen(DecomposingPen, RecordingPen):
- """ Same as RecordingPen, except that it doesn't keep components
- as references, but draws them decomposed as regular contours.
-
- The constructor takes a single 'glyphSet' positional argument,
- a dictionary of glyph objects (i.e. with a 'draw' method) keyed
- by thir name::
-
- >>> class SimpleGlyph(object):
- ... def draw(self, pen):
- ... pen.moveTo((0, 0))
- ... pen.curveTo((1, 1), (2, 2), (3, 3))
- ... pen.closePath()
- >>> class CompositeGlyph(object):
- ... def draw(self, pen):
- ... pen.addComponent('a', (1, 0, 0, 1, -1, 1))
- >>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()}
- >>> for name, glyph in sorted(glyphSet.items()):
- ... pen = DecomposingRecordingPen(glyphSet)
- ... glyph.draw(pen)
- ... print("{}: {}".format(name, pen.value))
- a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
- b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
- """
- # raises KeyError if base glyph is not found in glyphSet
- skipMissingComponents = False
+ """Same as RecordingPen, except that it doesn't keep components
+ as references, but draws them decomposed as regular contours.
+
+ The constructor takes a single 'glyphSet' positional argument,
+ a dictionary of glyph objects (i.e. with a 'draw' method) keyed
+ by thir name::
+
+ >>> class SimpleGlyph(object):
+ ... def draw(self, pen):
+ ... pen.moveTo((0, 0))
+ ... pen.curveTo((1, 1), (2, 2), (3, 3))
+ ... pen.closePath()
+ >>> class CompositeGlyph(object):
+ ... def draw(self, pen):
+ ... pen.addComponent('a', (1, 0, 0, 1, -1, 1))
+ >>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()}
+ >>> for name, glyph in sorted(glyphSet.items()):
+ ... pen = DecomposingRecordingPen(glyphSet)
+ ... glyph.draw(pen)
+ ... print("{}: {}".format(name, pen.value))
+ a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
+ b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
+ """
+
+ # raises KeyError if base glyph is not found in glyphSet
+ skipMissingComponents = False
class RecordingPointPen(AbstractPointPen):
- """PointPen recording operations that can be accessed or replayed.
+ """PointPen recording operations that can be accessed or replayed.
+
+ The recording can be accessed as pen.value; or replayed using
+ pointPen.replay(otherPointPen).
- The recording can be accessed as pen.value; or replayed using
- pointPen.replay(otherPointPen).
+ :Example:
- :Example:
+ from defcon import Font
+ from fontTools.pens.recordingPen import RecordingPointPen
- from defcon import Font
- from fontTools.pens.recordingPen import RecordingPointPen
+ glyph_name = 'a'
+ font_path = 'MyFont.ufo'
- glyph_name = 'a'
- font_path = 'MyFont.ufo'
+ font = Font(font_path)
+ glyph = font[glyph_name]
- font = Font(font_path)
- glyph = font[glyph_name]
+ pen = RecordingPointPen()
+ glyph.drawPoints(pen)
+ print(pen.value)
- pen = RecordingPointPen()
- glyph.drawPoints(pen)
- print(pen.value)
+ new_glyph = font.newGlyph('b')
+ pen.replay(new_glyph.getPointPen())
+ """
- new_glyph = font.newGlyph('b')
- pen.replay(new_glyph.getPointPen())
- """
+ def __init__(self):
+ self.value = []
- def __init__(self):
- self.value = []
+ def beginPath(self, identifier=None, **kwargs):
+ if identifier is not None:
+ kwargs["identifier"] = identifier
+ self.value.append(("beginPath", (), kwargs))
- def beginPath(self, identifier=None, **kwargs):
- if identifier is not None:
- kwargs["identifier"] = identifier
- self.value.append(("beginPath", (), kwargs))
+ def endPath(self):
+ self.value.append(("endPath", (), {}))
- def endPath(self):
- self.value.append(("endPath", (), {}))
+ def addPoint(
+ self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
+ ):
+ if identifier is not None:
+ kwargs["identifier"] = identifier
+ self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs))
- def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
- if identifier is not None:
- kwargs["identifier"] = identifier
- self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs))
+ def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
+ if identifier is not None:
+ kwargs["identifier"] = identifier
+ self.value.append(("addComponent", (baseGlyphName, transformation), kwargs))
- def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
- if identifier is not None:
- kwargs["identifier"] = identifier
- self.value.append(("addComponent", (baseGlyphName, transformation), kwargs))
+ def addVarComponent(
+ self, baseGlyphName, transformation, location, identifier=None, **kwargs
+ ):
+ if identifier is not None:
+ kwargs["identifier"] = identifier
+ self.value.append(
+ ("addVarComponent", (baseGlyphName, transformation, location), kwargs)
+ )
- def replay(self, pointPen):
- for operator, args, kwargs in self.value:
- getattr(pointPen, operator)(*args, **kwargs)
+ def replay(self, pointPen):
+ for operator, args, kwargs in self.value:
+ getattr(pointPen, operator)(*args, **kwargs)
if __name__ == "__main__":
- pen = RecordingPen()
- pen.moveTo((0, 0))
- pen.lineTo((0, 100))
- pen.curveTo((50, 75), (60, 50), (50, 25))
- pen.closePath()
- from pprint import pprint
- pprint(pen.value)
+ pen = RecordingPen()
+ pen.moveTo((0, 0))
+ pen.lineTo((0, 100))
+ pen.curveTo((50, 75), (60, 50), (50, 25))
+ pen.closePath()
+ from pprint import pprint
+
+ pprint(pen.value)
diff --git a/Lib/fontTools/pens/reportLabPen.py b/Lib/fontTools/pens/reportLabPen.py
index 43217d42..2cb89c8b 100644
--- a/Lib/fontTools/pens/reportLabPen.py
+++ b/Lib/fontTools/pens/reportLabPen.py
@@ -7,67 +7,74 @@ __all__ = ["ReportLabPen"]
class ReportLabPen(BasePen):
- """A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object."""
-
- def __init__(self, glyphSet, path=None):
- BasePen.__init__(self, glyphSet)
- if path is None:
- path = Path()
- self.path = path
-
- def _moveTo(self, p):
- (x,y) = p
- self.path.moveTo(x,y)
-
- def _lineTo(self, p):
- (x,y) = p
- self.path.lineTo(x,y)
-
- def _curveToOne(self, p1, p2, p3):
- (x1,y1) = p1
- (x2,y2) = p2
- (x3,y3) = p3
- self.path.curveTo(x1, y1, x2, y2, x3, y3)
-
- def _closePath(self):
- self.path.closePath()
-
-
-if __name__=="__main__":
- import sys
- if len(sys.argv) < 3:
- print("Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]")
- print(" If no image file name is created, by default <glyphname>.png is created.")
- print(" example: reportLabPen.py Arial.TTF R test.png")
- print(" (The file format will be PNG, regardless of the image file name supplied)")
- sys.exit(0)
-
- from fontTools.ttLib import TTFont
- from reportlab.lib import colors
-
- path = sys.argv[1]
- glyphName = sys.argv[2]
- if (len(sys.argv) > 3):
- imageFile = sys.argv[3]
- else:
- imageFile = "%s.png" % glyphName
-
- font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font
- gs = font.getGlyphSet()
- pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5))
- g = gs[glyphName]
- g.draw(pen)
-
- w, h = g.width, 1000
- from reportlab.graphics import renderPM
- from reportlab.graphics.shapes import Group, Drawing, scale
-
- # Everything is wrapped in a group to allow transformations.
- g = Group(pen.path)
- g.translate(0, 200)
- g.scale(0.3, 0.3)
-
- d = Drawing(w, h)
- d.add(g)
-
- renderPM.drawToFile(d, imageFile, fmt="PNG")
+ """A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object."""
+
+ def __init__(self, glyphSet, path=None):
+ BasePen.__init__(self, glyphSet)
+ if path is None:
+ path = Path()
+ self.path = path
+
+ def _moveTo(self, p):
+ (x, y) = p
+ self.path.moveTo(x, y)
+
+ def _lineTo(self, p):
+ (x, y) = p
+ self.path.lineTo(x, y)
+
+ def _curveToOne(self, p1, p2, p3):
+ (x1, y1) = p1
+ (x2, y2) = p2
+ (x3, y3) = p3
+ self.path.curveTo(x1, y1, x2, y2, x3, y3)
+
+ def _closePath(self):
+ self.path.closePath()
+
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) < 3:
+ print(
+ "Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]"
+ )
+ print(
+ " If no image file name is created, by default <glyphname>.png is created."
+ )
+ print(" example: reportLabPen.py Arial.TTF R test.png")
+ print(
+ " (The file format will be PNG, regardless of the image file name supplied)"
+ )
+ sys.exit(0)
+
+ from fontTools.ttLib import TTFont
+ from reportlab.lib import colors
+
+ path = sys.argv[1]
+ glyphName = sys.argv[2]
+ if len(sys.argv) > 3:
+ imageFile = sys.argv[3]
+ else:
+ imageFile = "%s.png" % glyphName
+
+ font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font
+ gs = font.getGlyphSet()
+ pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5))
+ g = gs[glyphName]
+ g.draw(pen)
+
+ w, h = g.width, 1000
+ from reportlab.graphics import renderPM
+ from reportlab.graphics.shapes import Group, Drawing, scale
+
+ # Everything is wrapped in a group to allow transformations.
+ g = Group(pen.path)
+ g.translate(0, 200)
+ g.scale(0.3, 0.3)
+
+ d = Drawing(w, h)
+ d.add(g)
+
+ renderPM.drawToFile(d, imageFile, fmt="PNG")
diff --git a/Lib/fontTools/pens/reverseContourPen.py b/Lib/fontTools/pens/reverseContourPen.py
index 9b3241b6..a3756ab1 100644
--- a/Lib/fontTools/pens/reverseContourPen.py
+++ b/Lib/fontTools/pens/reverseContourPen.py
@@ -14,12 +14,16 @@ class ReverseContourPen(ContourFilterPen):
the first point.
"""
+ def __init__(self, outPen, outputImpliedClosingLine=False):
+ super().__init__(outPen)
+ self.outputImpliedClosingLine = outputImpliedClosingLine
+
def filterContour(self, contour):
- return reversedContour(contour)
+ return reversedContour(contour, self.outputImpliedClosingLine)
-def reversedContour(contour):
- """ Generator that takes a list of pen's (operator, operands) tuples,
+def reversedContour(contour, outputImpliedClosingLine=False):
+ """Generator that takes a list of pen's (operator, operands) tuples,
and yields them with the winding direction reversed.
"""
if not contour:
@@ -36,16 +40,14 @@ def reversedContour(contour):
firstType, firstPts = contour.pop(0)
assert firstType in ("moveTo", "qCurveTo"), (
- "invalid initial segment type: %r" % firstType)
+ "invalid initial segment type: %r" % firstType
+ )
firstOnCurve = firstPts[-1]
if firstType == "qCurveTo":
# special case for TrueType paths contaning only off-curve points
- assert firstOnCurve is None, (
- "off-curve only paths must end with 'None'")
- assert not contour, (
- "only one qCurveTo allowed per off-curve path")
- firstPts = ((firstPts[0],) + tuple(reversed(firstPts[1:-1])) +
- (None,))
+ assert firstOnCurve is None, "off-curve only paths must end with 'None'"
+ assert not contour, "only one qCurveTo allowed per off-curve path"
+ firstPts = (firstPts[0],) + tuple(reversed(firstPts[1:-1])) + (None,)
if not contour:
# contour contains only one segment, nothing to reverse
@@ -63,23 +65,23 @@ def reversedContour(contour):
if firstOnCurve != lastOnCurve:
# emit an implied line between the last and first points
yield "lineTo", (lastOnCurve,)
- contour[-1] = (lastType,
- tuple(lastPts[:-1]) + (firstOnCurve,))
+ contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
if len(contour) > 1:
secondType, secondPts = contour[0]
else:
# contour has only two points, the second and last are the same
secondType, secondPts = lastType, lastPts
- # if a lineTo follows the initial moveTo, after reversing it
- # will be implied by the closePath, so we don't emit one;
- # unless the lineTo and moveTo overlap, in which case we keep the
- # duplicate points
- if secondType == "lineTo" and firstPts != secondPts:
- del contour[0]
- if contour:
- contour[-1] = (lastType,
- tuple(lastPts[:-1]) + secondPts)
+
+ if not outputImpliedClosingLine:
+ # if a lineTo follows the initial moveTo, after reversing it
+ # will be implied by the closePath, so we don't emit one;
+ # unless the lineTo and moveTo overlap, in which case we keep the
+ # duplicate points
+ if secondType == "lineTo" and firstPts != secondPts:
+ del contour[0]
+ if contour:
+ contour[-1] = (lastType, tuple(lastPts[:-1]) + secondPts)
else:
# for open paths, the last point will become the first
yield firstType, (lastOnCurve,)
@@ -88,8 +90,7 @@ def reversedContour(contour):
# we iterate over all segment pairs in reverse order, and yield
# each one with the off-curve points reversed (if any), and
# with the on-curve point of the following segment
- for (curType, curPts), (_, nextPts) in pairwise(
- contour, reverse=True):
+ for (curType, curPts), (_, nextPts) in pairwise(contour, reverse=True):
yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],)
yield "closePath" if closed else "endPath", ()
diff --git a/Lib/fontTools/pens/statisticsPen.py b/Lib/fontTools/pens/statisticsPen.py
index 15830672..39f319e0 100644
--- a/Lib/fontTools/pens/statisticsPen.py
+++ b/Lib/fontTools/pens/statisticsPen.py
@@ -8,95 +8,190 @@ __all__ = ["StatisticsPen"]
class StatisticsPen(MomentsPen):
- """Pen calculating area, center of mass, variance and
- standard-deviation, covariance and correlation, and slant,
- of glyph shapes.
-
- Note that all the calculated values are 'signed'. Ie. if the
- glyph shape is self-intersecting, the values are not correct
- (but well-defined). As such, area will be negative if contour
- directions are clockwise. Moreover, variance might be negative
- if the shapes are self-intersecting in certain ways."""
-
- def __init__(self, glyphset=None):
- MomentsPen.__init__(self, glyphset=glyphset)
- self.__zero()
-
- def _closePath(self):
- MomentsPen._closePath(self)
- self.__update()
-
- def __zero(self):
- self.meanX = 0
- self.meanY = 0
- self.varianceX = 0
- self.varianceY = 0
- self.stddevX = 0
- self.stddevY = 0
- self.covariance = 0
- self.correlation = 0
- self.slant = 0
-
- def __update(self):
-
- area = self.area
- if not area:
- self.__zero()
- return
-
- # Center of mass
- # https://en.wikipedia.org/wiki/Center_of_mass#A_continuous_volume
- self.meanX = meanX = self.momentX / area
- self.meanY = meanY = self.momentY / area
-
- # Var(X) = E[X^2] - E[X]^2
- self.varianceX = varianceX = self.momentXX / area - meanX**2
- self.varianceY = varianceY = self.momentYY / area - meanY**2
-
- self.stddevX = stddevX = math.copysign(abs(varianceX)**.5, varianceX)
- self.stddevY = stddevY = math.copysign(abs(varianceY)**.5, varianceY)
-
- # Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] )
- self.covariance = covariance = self.momentXY / area - meanX*meanY
-
- # Correlation(X,Y) = Covariance(X,Y) / ( stddev(X) * stddev(Y) )
- # https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
- if stddevX * stddevY == 0:
- correlation = float("NaN")
- else:
- correlation = covariance / (stddevX * stddevY)
- self.correlation = correlation if abs(correlation) > 1e-3 else 0
-
- slant = covariance / varianceY if varianceY != 0 else float("NaN")
- self.slant = slant if abs(slant) > 1e-3 else 0
-
-
-def _test(glyphset, upem, glyphs):
- from fontTools.pens.transformPen import TransformPen
- from fontTools.misc.transform import Scale
-
- print('upem', upem)
-
- for glyph_name in glyphs:
- print()
- print("glyph:", glyph_name)
- glyph = glyphset[glyph_name]
- pen = StatisticsPen(glyphset=glyphset)
- transformer = TransformPen(pen, Scale(1./upem))
- glyph.draw(transformer)
- for item in ['area', 'momentX', 'momentY', 'momentXX', 'momentYY', 'momentXY', 'meanX', 'meanY', 'varianceX', 'varianceY', 'stddevX', 'stddevY', 'covariance', 'correlation', 'slant']:
- print ("%s: %g" % (item, getattr(pen, item)))
+ """Pen calculating area, center of mass, variance and
+ standard-deviation, covariance and correlation, and slant,
+ of glyph shapes.
+
+ Note that all the calculated values are 'signed'. Ie. if the
+ glyph shape is self-intersecting, the values are not correct
+ (but well-defined). As such, area will be negative if contour
+ directions are clockwise. Moreover, variance might be negative
+ if the shapes are self-intersecting in certain ways."""
+
+ def __init__(self, glyphset=None):
+ MomentsPen.__init__(self, glyphset=glyphset)
+ self.__zero()
+
+ def _closePath(self):
+ MomentsPen._closePath(self)
+ self.__update()
+
+ def __zero(self):
+ self.meanX = 0
+ self.meanY = 0
+ self.varianceX = 0
+ self.varianceY = 0
+ self.stddevX = 0
+ self.stddevY = 0
+ self.covariance = 0
+ self.correlation = 0
+ self.slant = 0
+
+ def __update(self):
+ area = self.area
+ if not area:
+ self.__zero()
+ return
+
+ # Center of mass
+ # https://en.wikipedia.org/wiki/Center_of_mass#A_continuous_volume
+ self.meanX = meanX = self.momentX / area
+ self.meanY = meanY = self.momentY / area
+
+ # Var(X) = E[X^2] - E[X]^2
+ self.varianceX = varianceX = self.momentXX / area - meanX**2
+ self.varianceY = varianceY = self.momentYY / area - meanY**2
+
+ self.stddevX = stddevX = math.copysign(abs(varianceX) ** 0.5, varianceX)
+ self.stddevY = stddevY = math.copysign(abs(varianceY) ** 0.5, varianceY)
+
+ # Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] )
+ self.covariance = covariance = self.momentXY / area - meanX * meanY
+
+ # Correlation(X,Y) = Covariance(X,Y) / ( stddev(X) * stddev(Y) )
+ # https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
+ if stddevX * stddevY == 0:
+ correlation = float("NaN")
+ else:
+ correlation = covariance / (stddevX * stddevY)
+ self.correlation = correlation if abs(correlation) > 1e-3 else 0
+
+ slant = covariance / varianceY if varianceY != 0 else float("NaN")
+ self.slant = slant if abs(slant) > 1e-3 else 0
+
+
+def _test(glyphset, upem, glyphs, quiet=False):
+ from fontTools.pens.transformPen import TransformPen
+ from fontTools.misc.transform import Scale
+
+ wght_sum = 0
+ wght_sum_perceptual = 0
+ wdth_sum = 0
+ slnt_sum = 0
+ slnt_sum_perceptual = 0
+ for glyph_name in glyphs:
+ glyph = glyphset[glyph_name]
+ pen = StatisticsPen(glyphset=glyphset)
+ transformer = TransformPen(pen, Scale(1.0 / upem))
+ glyph.draw(transformer)
+
+ area = abs(pen.area)
+ width = glyph.width
+ wght_sum += area
+ wght_sum_perceptual += pen.area * width
+ wdth_sum += width
+ slnt_sum += pen.slant
+ slnt_sum_perceptual += pen.slant * width
+
+ if quiet:
+ continue
+
+ print()
+ print("glyph:", glyph_name)
+
+ for item in [
+ "area",
+ "momentX",
+ "momentY",
+ "momentXX",
+ "momentYY",
+ "momentXY",
+ "meanX",
+ "meanY",
+ "varianceX",
+ "varianceY",
+ "stddevX",
+ "stddevY",
+ "covariance",
+ "correlation",
+ "slant",
+ ]:
+ print("%s: %g" % (item, getattr(pen, item)))
+
+ if not quiet:
+ print()
+ print("font:")
+
+ print("weight: %g" % (wght_sum * upem / wdth_sum))
+ print("weight (perceptual): %g" % (wght_sum_perceptual / wdth_sum))
+ print("width: %g" % (wdth_sum / upem / len(glyphs)))
+ slant = slnt_sum / len(glyphs)
+ print("slant: %g" % slant)
+ print("slant angle: %g" % -math.degrees(math.atan(slant)))
+ slant_perceptual = slnt_sum_perceptual / wdth_sum
+ print("slant (perceptual): %g" % slant_perceptual)
+ print("slant (perceptual) angle: %g" % -math.degrees(math.atan(slant_perceptual)))
+
def main(args):
- if not args:
- return
- filename, glyphs = args[0], args[1:]
- from fontTools.ttLib import TTFont
- font = TTFont(filename)
- if not glyphs:
- glyphs = font.getGlyphOrder()
- _test(font.getGlyphSet(), font['head'].unitsPerEm, glyphs)
-
-if __name__ == '__main__':
- import sys
- main(sys.argv[1:])
+ """Report font glyph shape geometricsl statistics"""
+
+ if args is None:
+ import sys
+
+ args = sys.argv[1:]
+
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ "fonttools pens.statisticsPen",
+ description="Report font glyph shape geometricsl statistics",
+ )
+ parser.add_argument("font", metavar="font.ttf", help="Font file.")
+ parser.add_argument("glyphs", metavar="glyph-name", help="Glyph names.", nargs="*")
+ parser.add_argument(
+ "-y",
+ metavar="<number>",
+ help="Face index into a collection to open. Zero based.",
+ )
+ parser.add_argument(
+ "-q", "--quiet", action="store_true", help="Only report font-wide statistics."
+ )
+ parser.add_argument(
+ "--variations",
+ metavar="AXIS=LOC",
+ default="",
+ help="List of space separated locations. A location consist in "
+ "the name of a variation axis, followed by '=' and a number. E.g.: "
+ "wght=700 wdth=80. The default is the location of the base master.",
+ )
+
+ options = parser.parse_args(args)
+
+ glyphs = options.glyphs
+ fontNumber = int(options.y) if options.y is not None else 0
+
+ location = {}
+ for tag_v in options.variations.split():
+ fields = tag_v.split("=")
+ tag = fields[0].strip()
+ v = int(fields[1])
+ location[tag] = v
+
+ from fontTools.ttLib import TTFont
+
+ font = TTFont(options.font, fontNumber=fontNumber)
+ if not glyphs:
+ glyphs = font.getGlyphOrder()
+ _test(
+ font.getGlyphSet(location=location),
+ font["head"].unitsPerEm,
+ glyphs,
+ quiet=options.quiet,
+ )
+
+
+if __name__ == "__main__":
+ import sys
+
+ main(sys.argv[1:])
diff --git a/Lib/fontTools/pens/svgPathPen.py b/Lib/fontTools/pens/svgPathPen.py
index 106e33b7..ae6ebfbd 100644
--- a/Lib/fontTools/pens/svgPathPen.py
+++ b/Lib/fontTools/pens/svgPathPen.py
@@ -7,7 +7,7 @@ def pointToString(pt, ntos=str):
class SVGPathPen(BasePen):
- """ Pen to draw SVG path d commands.
+ """Pen to draw SVG path d commands.
Example::
>>> pen = SVGPathPen(None)
@@ -36,6 +36,7 @@ class SVGPathPen(BasePen):
glyphset[glyphname].draw(pen)
print(tpen.getCommands())
"""
+
def __init__(self, glyphSet, ntos: Callable[[float], str] = str):
BasePen.__init__(self, glyphSet)
self._commands = []
@@ -195,9 +196,8 @@ class SVGPathPen(BasePen):
>>> pen = SVGPathPen(None)
>>> pen.endPath()
>>> pen._commands
- ['Z']
+ []
"""
- self._closePath()
self._lastCommand = None
self._lastX = self._lastY = None
@@ -210,42 +210,52 @@ def main(args=None):
if args is None:
import sys
+
args = sys.argv[1:]
from fontTools.ttLib import TTFont
import argparse
parser = argparse.ArgumentParser(
- "fonttools pens.svgPathPen", description="Generate SVG from text")
- parser.add_argument(
- "font", metavar="font.ttf", help="Font file.")
+ "fonttools pens.svgPathPen", description="Generate SVG from text"
+ )
+ parser.add_argument("font", metavar="font.ttf", help="Font file.")
+ parser.add_argument("text", metavar="text", help="Text string.")
parser.add_argument(
- "text", metavar="text", help="Text string.")
+ "-y",
+ metavar="<number>",
+ help="Face index into a collection to open. Zero based.",
+ )
parser.add_argument(
- "--variations", metavar="AXIS=LOC", default='',
+ "--variations",
+ metavar="AXIS=LOC",
+ default="",
help="List of space separated locations. A location consist in "
"the name of a variation axis, followed by '=' and a number. E.g.: "
- "wght=700 wdth=80. The default is the location of the base master.")
+ "wght=700 wdth=80. The default is the location of the base master.",
+ )
options = parser.parse_args(args)
- font = TTFont(options.font)
+ fontNumber = int(options.y) if options.y is not None else 0
+
+ font = TTFont(options.font, fontNumber=fontNumber)
text = options.text
location = {}
for tag_v in options.variations.split():
- fields = tag_v.split('=')
+ fields = tag_v.split("=")
tag = fields[0].strip()
v = int(fields[1])
location[tag] = v
- hhea = font['hhea']
+ hhea = font["hhea"]
ascent, descent = hhea.ascent, hhea.descent
glyphset = font.getGlyphSet(location=location)
- cmap = font['cmap'].getBestCmap()
+ cmap = font["cmap"].getBestCmap()
- s = ''
+ s = ""
width = 0
for u in text:
g = cmap[ord(u)]
@@ -255,20 +265,29 @@ def main(args=None):
glyph.draw(pen)
commands = pen.getCommands()
- s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % (width, ascent, commands)
+ s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % (
+ width,
+ ascent,
+ commands,
+ )
width += glyph.width
print('<?xml version="1.0" encoding="UTF-8"?>')
- print('<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">' % (width, ascent-descent))
- print(s, end='')
- print('</svg>')
+ print(
+ '<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">'
+ % (width, ascent - descent)
+ )
+ print(s, end="")
+ print("</svg>")
if __name__ == "__main__":
import sys
+
if len(sys.argv) == 1:
import doctest
+
sys.exit(doctest.testmod().failed)
sys.exit(main())
diff --git a/Lib/fontTools/pens/t2CharStringPen.py b/Lib/fontTools/pens/t2CharStringPen.py
index 0fddec1a..41ab0f92 100644
--- a/Lib/fontTools/pens/t2CharStringPen.py
+++ b/Lib/fontTools/pens/t2CharStringPen.py
@@ -24,22 +24,22 @@ class T2CharStringPen(BasePen):
self._CFF2 = CFF2
self._width = width
self._commands = []
- self._p0 = (0,0)
+ self._p0 = (0, 0)
def _p(self, pt):
p0 = self._p0
pt = self._p0 = (self.round(pt[0]), self.round(pt[1]))
- return [pt[0]-p0[0], pt[1]-p0[1]]
+ return [pt[0] - p0[0], pt[1] - p0[1]]
def _moveTo(self, pt):
- self._commands.append(('rmoveto', self._p(pt)))
+ self._commands.append(("rmoveto", self._p(pt)))
def _lineTo(self, pt):
- self._commands.append(('rlineto', self._p(pt)))
+ self._commands.append(("rlineto", self._p(pt)))
def _curveToOne(self, pt1, pt2, pt3):
_p = self._p
- self._commands.append(('rrcurveto', _p(pt1)+_p(pt2)+_p(pt3)))
+ self._commands.append(("rrcurveto", _p(pt1) + _p(pt2) + _p(pt3)))
def _closePath(self):
pass
@@ -51,15 +51,18 @@ class T2CharStringPen(BasePen):
commands = self._commands
if optimize:
maxstack = 48 if not self._CFF2 else 513
- commands = specializeCommands(commands,
- generalizeFirst=False,
- maxstack=maxstack)
+ commands = specializeCommands(
+ commands, generalizeFirst=False, maxstack=maxstack
+ )
program = commandsToProgram(commands)
if self._width is not None:
- assert not self._CFF2, "CFF2 does not allow encoding glyph width in CharString."
+ assert (
+ not self._CFF2
+ ), "CFF2 does not allow encoding glyph width in CharString."
program.insert(0, otRound(self._width))
if not self._CFF2:
- program.append('endchar')
+ program.append("endchar")
charString = T2CharString(
- program=program, private=private, globalSubrs=globalSubrs)
+ program=program, private=private, globalSubrs=globalSubrs
+ )
return charString
diff --git a/Lib/fontTools/pens/teePen.py b/Lib/fontTools/pens/teePen.py
index 2f30e922..2828175a 100644
--- a/Lib/fontTools/pens/teePen.py
+++ b/Lib/fontTools/pens/teePen.py
@@ -6,41 +6,49 @@ __all__ = ["TeePen"]
class TeePen(AbstractPen):
- """Pen multiplexing drawing to one or more pens.
-
- Use either as TeePen(pen1, pen2, ...) or TeePen(iterableOfPens)."""
-
- def __init__(self, *pens):
- if len(pens) == 1:
- pens = pens[0]
- self.pens = pens
- def moveTo(self, p0):
- for pen in self.pens:
- pen.moveTo(p0)
- def lineTo(self, p1):
- for pen in self.pens:
- pen.lineTo(p1)
- def qCurveTo(self, *points):
- for pen in self.pens:
- pen.qCurveTo(*points)
- def curveTo(self, *points):
- for pen in self.pens:
- pen.curveTo(*points)
- def closePath(self):
- for pen in self.pens:
- pen.closePath()
- def endPath(self):
- for pen in self.pens:
- pen.endPath()
- def addComponent(self, glyphName, transformation):
- for pen in self.pens:
- pen.addComponent(glyphName, transformation)
+ """Pen multiplexing drawing to one or more pens.
+
+ Use either as TeePen(pen1, pen2, ...) or TeePen(iterableOfPens)."""
+
+ def __init__(self, *pens):
+ if len(pens) == 1:
+ pens = pens[0]
+ self.pens = pens
+
+ def moveTo(self, p0):
+ for pen in self.pens:
+ pen.moveTo(p0)
+
+ def lineTo(self, p1):
+ for pen in self.pens:
+ pen.lineTo(p1)
+
+ def qCurveTo(self, *points):
+ for pen in self.pens:
+ pen.qCurveTo(*points)
+
+ def curveTo(self, *points):
+ for pen in self.pens:
+ pen.curveTo(*points)
+
+ def closePath(self):
+ for pen in self.pens:
+ pen.closePath()
+
+ def endPath(self):
+ for pen in self.pens:
+ pen.endPath()
+
+ def addComponent(self, glyphName, transformation):
+ for pen in self.pens:
+ pen.addComponent(glyphName, transformation)
if __name__ == "__main__":
- from fontTools.pens.basePen import _TestPen
- pen = TeePen(_TestPen(), _TestPen())
- pen.moveTo((0, 0))
- pen.lineTo((0, 100))
- pen.curveTo((50, 75), (60, 50), (50, 25))
- pen.closePath()
+ from fontTools.pens.basePen import _TestPen
+
+ pen = TeePen(_TestPen(), _TestPen())
+ pen.moveTo((0, 0))
+ pen.lineTo((0, 100))
+ pen.curveTo((50, 75), (60, 50), (50, 25))
+ pen.closePath()
diff --git a/Lib/fontTools/pens/transformPen.py b/Lib/fontTools/pens/transformPen.py
index 93d19191..2e572f61 100644
--- a/Lib/fontTools/pens/transformPen.py
+++ b/Lib/fontTools/pens/transformPen.py
@@ -6,103 +6,106 @@ __all__ = ["TransformPen", "TransformPointPen"]
class TransformPen(FilterPen):
- """Pen that transforms all coordinates using a Affine transformation,
- and passes them to another pen.
- """
-
- def __init__(self, outPen, transformation):
- """The 'outPen' argument is another pen object. It will receive the
- transformed coordinates. The 'transformation' argument can either
- be a six-tuple, or a fontTools.misc.transform.Transform object.
- """
- super(TransformPen, self).__init__(outPen)
- if not hasattr(transformation, "transformPoint"):
- from fontTools.misc.transform import Transform
- transformation = Transform(*transformation)
- self._transformation = transformation
- self._transformPoint = transformation.transformPoint
- self._stack = []
-
- def moveTo(self, pt):
- self._outPen.moveTo(self._transformPoint(pt))
-
- def lineTo(self, pt):
- self._outPen.lineTo(self._transformPoint(pt))
-
- def curveTo(self, *points):
- self._outPen.curveTo(*self._transformPoints(points))
-
- def qCurveTo(self, *points):
- if points[-1] is None:
- points = self._transformPoints(points[:-1]) + [None]
- else:
- points = self._transformPoints(points)
- self._outPen.qCurveTo(*points)
-
- def _transformPoints(self, points):
- transformPoint = self._transformPoint
- return [transformPoint(pt) for pt in points]
-
- def closePath(self):
- self._outPen.closePath()
-
- def endPath(self):
- self._outPen.endPath()
-
- def addComponent(self, glyphName, transformation):
- transformation = self._transformation.transform(transformation)
- self._outPen.addComponent(glyphName, transformation)
+ """Pen that transforms all coordinates using a Affine transformation,
+ and passes them to another pen.
+ """
+
+ def __init__(self, outPen, transformation):
+ """The 'outPen' argument is another pen object. It will receive the
+ transformed coordinates. The 'transformation' argument can either
+ be a six-tuple, or a fontTools.misc.transform.Transform object.
+ """
+ super(TransformPen, self).__init__(outPen)
+ if not hasattr(transformation, "transformPoint"):
+ from fontTools.misc.transform import Transform
+
+ transformation = Transform(*transformation)
+ self._transformation = transformation
+ self._transformPoint = transformation.transformPoint
+ self._stack = []
+
+ def moveTo(self, pt):
+ self._outPen.moveTo(self._transformPoint(pt))
+
+ def lineTo(self, pt):
+ self._outPen.lineTo(self._transformPoint(pt))
+
+ def curveTo(self, *points):
+ self._outPen.curveTo(*self._transformPoints(points))
+
+ def qCurveTo(self, *points):
+ if points[-1] is None:
+ points = self._transformPoints(points[:-1]) + [None]
+ else:
+ points = self._transformPoints(points)
+ self._outPen.qCurveTo(*points)
+
+ def _transformPoints(self, points):
+ transformPoint = self._transformPoint
+ return [transformPoint(pt) for pt in points]
+
+ def closePath(self):
+ self._outPen.closePath()
+
+ def endPath(self):
+ self._outPen.endPath()
+
+ def addComponent(self, glyphName, transformation):
+ transformation = self._transformation.transform(transformation)
+ self._outPen.addComponent(glyphName, transformation)
class TransformPointPen(FilterPointPen):
- """PointPen that transforms all coordinates using a Affine transformation,
- and passes them to another PointPen.
-
- >>> from fontTools.pens.recordingPen import RecordingPointPen
- >>> rec = RecordingPointPen()
- >>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5))
- >>> v = iter(rec.value)
- >>> pen.beginPath(identifier="contour-0")
- >>> next(v)
- ('beginPath', (), {'identifier': 'contour-0'})
- >>> pen.addPoint((100, 100), "line")
- >>> next(v)
- ('addPoint', ((190, 205), 'line', False, None), {})
- >>> pen.endPath()
- >>> next(v)
- ('endPath', (), {})
- >>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0")
- >>> next(v)
- ('addComponent', ('a', <Transform [2 0 0 2 -30 15]>), {'identifier': 'component-0'})
- """
-
- def __init__(self, outPointPen, transformation):
- """The 'outPointPen' argument is another point pen object.
- It will receive the transformed coordinates.
- The 'transformation' argument can either be a six-tuple, or a
- fontTools.misc.transform.Transform object.
- """
- super().__init__(outPointPen)
- if not hasattr(transformation, "transformPoint"):
- from fontTools.misc.transform import Transform
- transformation = Transform(*transformation)
- self._transformation = transformation
- self._transformPoint = transformation.transformPoint
-
- def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
- self._outPen.addPoint(
- self._transformPoint(pt), segmentType, smooth, name, **kwargs
- )
-
- def addComponent(self, baseGlyphName, transformation, **kwargs):
- transformation = self._transformation.transform(transformation)
- self._outPen.addComponent(baseGlyphName, transformation, **kwargs)
+ """PointPen that transforms all coordinates using a Affine transformation,
+ and passes them to another PointPen.
+
+ >>> from fontTools.pens.recordingPen import RecordingPointPen
+ >>> rec = RecordingPointPen()
+ >>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5))
+ >>> v = iter(rec.value)
+ >>> pen.beginPath(identifier="contour-0")
+ >>> next(v)
+ ('beginPath', (), {'identifier': 'contour-0'})
+ >>> pen.addPoint((100, 100), "line")
+ >>> next(v)
+ ('addPoint', ((190, 205), 'line', False, None), {})
+ >>> pen.endPath()
+ >>> next(v)
+ ('endPath', (), {})
+ >>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0")
+ >>> next(v)
+ ('addComponent', ('a', <Transform [2 0 0 2 -30 15]>), {'identifier': 'component-0'})
+ """
+
+ def __init__(self, outPointPen, transformation):
+ """The 'outPointPen' argument is another point pen object.
+ It will receive the transformed coordinates.
+ The 'transformation' argument can either be a six-tuple, or a
+ fontTools.misc.transform.Transform object.
+ """
+ super().__init__(outPointPen)
+ if not hasattr(transformation, "transformPoint"):
+ from fontTools.misc.transform import Transform
+
+ transformation = Transform(*transformation)
+ self._transformation = transformation
+ self._transformPoint = transformation.transformPoint
+
+ def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
+ self._outPen.addPoint(
+ self._transformPoint(pt), segmentType, smooth, name, **kwargs
+ )
+
+ def addComponent(self, baseGlyphName, transformation, **kwargs):
+ transformation = self._transformation.transform(transformation)
+ self._outPen.addComponent(baseGlyphName, transformation, **kwargs)
if __name__ == "__main__":
- from fontTools.pens.basePen import _TestPen
- pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
- pen.moveTo((0, 0))
- pen.lineTo((0, 100))
- pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
- pen.closePath()
+ from fontTools.pens.basePen import _TestPen
+
+ pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
+ pen.moveTo((0, 0))
+ pen.lineTo((0, 100))
+ pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
+ pen.closePath()
diff --git a/Lib/fontTools/pens/ttGlyphPen.py b/Lib/fontTools/pens/ttGlyphPen.py
index 5087e158..de2ccaee 100644
--- a/Lib/fontTools/pens/ttGlyphPen.py
+++ b/Lib/fontTools/pens/ttGlyphPen.py
@@ -1,5 +1,5 @@
from array import array
-from typing import Any, Dict, Optional, Tuple
+from typing import Any, Callable, Dict, Optional, Tuple
from fontTools.misc.fixedTools import MAX_F2DOT14, floatToFixedToFloat
from fontTools.misc.loggingTools import LogMixin
from fontTools.pens.pointPen import AbstractPointPen
@@ -7,9 +7,12 @@ from fontTools.misc.roundTools import otRound
from fontTools.pens.basePen import LoggingPen, PenError
from fontTools.pens.transformPen import TransformPen, TransformPointPen
from fontTools.ttLib.tables import ttProgram
+from fontTools.ttLib.tables._g_l_y_f import flagOnCurve, flagCubic
from fontTools.ttLib.tables._g_l_y_f import Glyph
from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
+from fontTools.ttLib.tables._g_l_y_f import dropImpliedOnCurvePoints
+import math
__all__ = ["TTGlyphPen", "TTGlyphPointPen"]
@@ -124,9 +127,20 @@ class _TTGlyphBasePen:
components.append(component)
return components
- def glyph(self, componentFlags: int = 0x4) -> Glyph:
+ def glyph(
+ self,
+ componentFlags: int = 0x04,
+ dropImpliedOnCurves: bool = False,
+ *,
+ round: Callable[[float], int] = otRound,
+ ) -> Glyph:
"""
Returns a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
+
+ Args:
+ componentFlags: Flags to use for component glyphs. (default: 0x04)
+
+ dropImpliedOnCurves: Whether to remove implied-oncurve points. (default: False)
"""
if not self._isClosed():
raise PenError("Didn't close last contour.")
@@ -134,7 +148,6 @@ class _TTGlyphBasePen:
glyph = Glyph()
glyph.coordinates = GlyphCoordinates(self.points)
- glyph.coordinates.toInt()
glyph.endPtsOfContours = self.endPts
glyph.flags = array("B", self.types)
self.init()
@@ -148,6 +161,9 @@ class _TTGlyphBasePen:
glyph.numberOfContours = len(glyph.endPtsOfContours)
glyph.program = ttProgram.Program()
glyph.program.fromBytecode(b"")
+ if dropImpliedOnCurves:
+ dropImpliedOnCurvePoints(glyph)
+ glyph.coordinates.toInt(round=round)
return glyph
@@ -164,9 +180,18 @@ class TTGlyphPen(_TTGlyphBasePen, LoggingPen):
drawMethod = "draw"
transformPen = TransformPen
- def _addPoint(self, pt: Tuple[float, float], onCurve: int) -> None:
+ def __init__(
+ self,
+ glyphSet: Optional[Dict[str, Any]] = None,
+ handleOverflowingTransforms: bool = True,
+ outputImpliedClosingLine: bool = False,
+ ) -> None:
+ super().__init__(glyphSet, handleOverflowingTransforms)
+ self.outputImpliedClosingLine = outputImpliedClosingLine
+
+ def _addPoint(self, pt: Tuple[float, float], tp: int) -> None:
self.points.append(pt)
- self.types.append(onCurve)
+ self.types.append(tp)
def _popPoint(self) -> None:
self.points.pop()
@@ -178,15 +203,21 @@ class TTGlyphPen(_TTGlyphBasePen, LoggingPen):
)
def lineTo(self, pt: Tuple[float, float]) -> None:
- self._addPoint(pt, 1)
+ self._addPoint(pt, flagOnCurve)
def moveTo(self, pt: Tuple[float, float]) -> None:
if not self._isClosed():
raise PenError('"move"-type point must begin a new contour.')
- self._addPoint(pt, 1)
+ self._addPoint(pt, flagOnCurve)
def curveTo(self, *points) -> None:
- raise NotImplementedError
+ assert len(points) % 2 == 1
+ for pt in points[:-1]:
+ self._addPoint(pt, flagCubic)
+
+ # last point is None if there are no on-curve points
+ if points[-1] is not None:
+ self._addPoint(points[-1], 1)
def qCurveTo(self, *points) -> None:
assert len(points) >= 1
@@ -205,13 +236,14 @@ class TTGlyphPen(_TTGlyphBasePen, LoggingPen):
self._popPoint()
return
- # if first and last point on this path are the same, remove last
- startPt = 0
- if self.endPts:
- startPt = self.endPts[-1] + 1
- if self.points[startPt] == self.points[endPt]:
- self._popPoint()
- endPt -= 1
+ if not self.outputImpliedClosingLine:
+ # if first and last point on this path are the same, remove last
+ startPt = 0
+ if self.endPts:
+ startPt = self.endPts[-1] + 1
+ if self.points[startPt] == self.points[endPt]:
+ self._popPoint()
+ endPt -= 1
self.endPts.append(endPt)
@@ -255,10 +287,26 @@ class TTGlyphPointPen(_TTGlyphBasePen, LogMixin, AbstractPointPen):
if self._isClosed():
raise PenError("Contour is already closed.")
if self._currentContourStartIndex == len(self.points):
- raise PenError("Tried to end an empty contour.")
+ # ignore empty contours
+ self._currentContourStartIndex = None
+ return
+
+ contourStart = self.endPts[-1] + 1 if self.endPts else 0
self.endPts.append(len(self.points) - 1)
self._currentContourStartIndex = None
+ # Resolve types for any cubic segments
+ flags = self.types
+ for i in range(contourStart, len(flags)):
+ if flags[i] == "curve":
+ j = i - 1
+ if j < contourStart:
+ j = len(flags) - 1
+ while flags[j] == 0:
+ flags[j] = flagCubic
+ j -= 1
+ flags[i] = flagOnCurve
+
def addPoint(
self,
pt: Tuple[float, float],
@@ -274,11 +322,13 @@ class TTGlyphPointPen(_TTGlyphBasePen, LogMixin, AbstractPointPen):
if self._isClosed():
raise PenError("Can't add a point to a closed contour.")
if segmentType is None:
- self.types.append(0) # offcurve
- elif segmentType in ("qcurve", "line", "move"):
- self.types.append(1) # oncurve
+ self.types.append(0)
+ elif segmentType in ("line", "move"):
+ self.types.append(flagOnCurve)
+ elif segmentType == "qcurve":
+ self.types.append(flagOnCurve)
elif segmentType == "curve":
- raise NotImplementedError("cubic curves are not supported")
+ self.types.append("curve")
else:
raise AssertionError(segmentType)
diff --git a/Lib/fontTools/pens/wxPen.py b/Lib/fontTools/pens/wxPen.py
index 1504f089..c790641a 100644
--- a/Lib/fontTools/pens/wxPen.py
+++ b/Lib/fontTools/pens/wxPen.py
@@ -5,25 +5,25 @@ __all__ = ["WxPen"]
class WxPen(BasePen):
+ def __init__(self, glyphSet, path=None):
+ BasePen.__init__(self, glyphSet)
+ if path is None:
+ import wx
- def __init__(self, glyphSet, path=None):
- BasePen.__init__(self, glyphSet)
- if path is None:
- import wx
- path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath()
- self.path = path
+ path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath()
+ self.path = path
- def _moveTo(self, p):
- self.path.MoveToPoint(*p)
+ def _moveTo(self, p):
+ self.path.MoveToPoint(*p)
- def _lineTo(self, p):
- self.path.AddLineToPoint(*p)
+ def _lineTo(self, p):
+ self.path.AddLineToPoint(*p)
- def _curveToOne(self, p1, p2, p3):
- self.path.AddCurveToPoint(*p1+p2+p3)
+ def _curveToOne(self, p1, p2, p3):
+ self.path.AddCurveToPoint(*p1 + p2 + p3)
- def _qCurveToOne(self, p1, p2):
- self.path.AddQuadCurveToPoint(*p1+p2)
+ def _qCurveToOne(self, p1, p2):
+ self.path.AddQuadCurveToPoint(*p1 + p2)
- def _closePath(self):
- self.path.CloseSubpath()
+ def _closePath(self):
+ self.path.CloseSubpath()
diff --git a/Lib/fontTools/qu2cu/__init__.py b/Lib/fontTools/qu2cu/__init__.py
new file mode 100644
index 00000000..ce357417
--- /dev/null
+++ b/Lib/fontTools/qu2cu/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .qu2cu import *
diff --git a/Lib/fontTools/qu2cu/__main__.py b/Lib/fontTools/qu2cu/__main__.py
new file mode 100644
index 00000000..27728cc7
--- /dev/null
+++ b/Lib/fontTools/qu2cu/__main__.py
@@ -0,0 +1,7 @@
+import sys
+
+from .cli import main
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/Lib/fontTools/qu2cu/benchmark.py b/Lib/fontTools/qu2cu/benchmark.py
new file mode 100644
index 00000000..cee55f5e
--- /dev/null
+++ b/Lib/fontTools/qu2cu/benchmark.py
@@ -0,0 +1,57 @@
+"""Benchmark the qu2cu algorithm performance."""
+
+from .qu2cu import *
+from fontTools.cu2qu import curve_to_quadratic
+import random
+import timeit
+
+MAX_ERR = 0.5
+NUM_CURVES = 5
+
+
+def generate_curves(n):
+ points = [
+ tuple(float(random.randint(0, 2048)) for coord in range(2))
+ for point in range(1 + 3 * n)
+ ]
+ curves = []
+ for i in range(n):
+ curves.append(tuple(points[i * 3 : i * 3 + 4]))
+ return curves
+
+
+def setup_quadratic_to_curves():
+ curves = generate_curves(NUM_CURVES)
+ quadratics = [curve_to_quadratic(curve, MAX_ERR) for curve in curves]
+ return quadratics, MAX_ERR
+
+
+def run_benchmark(module, function, setup_suffix="", repeat=25, number=1):
+ setup_func = "setup_" + function
+ if setup_suffix:
+ print("%s with %s:" % (function, setup_suffix), end="")
+ setup_func += "_" + setup_suffix
+ else:
+ print("%s:" % function, end="")
+
+ def wrapper(function, setup_func):
+ function = globals()[function]
+ setup_func = globals()[setup_func]
+
+ def wrapped():
+ return function(*setup_func())
+
+ return wrapped
+
+ results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
+ print("\t%5.1fus" % (min(results) * 1000000.0 / number))
+
+
+def main():
+ """Benchmark the qu2cu algorithm performance."""
+ run_benchmark("qu2cu", "quadratic_to_curves")
+
+
+if __name__ == "__main__":
+ random.seed(1)
+ main()
diff --git a/Lib/fontTools/qu2cu/cli.py b/Lib/fontTools/qu2cu/cli.py
new file mode 100644
index 00000000..a07fd6dc
--- /dev/null
+++ b/Lib/fontTools/qu2cu/cli.py
@@ -0,0 +1,125 @@
+import os
+import argparse
+import logging
+from fontTools.misc.cliTools import makeOutputFileName
+from fontTools.ttLib import TTFont
+from fontTools.pens.qu2cuPen import Qu2CuPen
+from fontTools.pens.ttGlyphPen import TTGlyphPen
+import fontTools
+
+
+logger = logging.getLogger("fontTools.qu2cu")
+
+
+def _font_to_cubic(input_path, output_path=None, **kwargs):
+ font = TTFont(input_path)
+ logger.info("Converting curves for %s", input_path)
+
+ stats = {} if kwargs["dump_stats"] else None
+ qu2cu_kwargs = {
+ "stats": stats,
+ "max_err": kwargs["max_err_em"] * font["head"].unitsPerEm,
+ "all_cubic": kwargs["all_cubic"],
+ }
+
+ assert "gvar" not in font, "Cannot convert variable font"
+ glyphSet = font.getGlyphSet()
+ glyphOrder = font.getGlyphOrder()
+ glyf = font["glyf"]
+ for glyphName in glyphOrder:
+ glyph = glyphSet[glyphName]
+ ttpen = TTGlyphPen(glyphSet)
+ pen = Qu2CuPen(ttpen, **qu2cu_kwargs)
+ glyph.draw(pen)
+ glyf[glyphName] = ttpen.glyph(dropImpliedOnCurves=True)
+
+ font["head"].glyphDataFormat = 1
+
+ if kwargs["dump_stats"]:
+ logger.info("Stats: %s", stats)
+
+ logger.info("Saving %s", output_path)
+ font.save(output_path)
+
+
+def main(args=None):
+ """Convert an OpenType font from quadratic to cubic curves"""
+ parser = argparse.ArgumentParser(prog="qu2cu")
+ parser.add_argument("--version", action="version", version=fontTools.__version__)
+ parser.add_argument(
+ "infiles",
+ nargs="+",
+ metavar="INPUT",
+ help="one or more input TTF source file(s).",
+ )
+ parser.add_argument("-v", "--verbose", action="count", default=0)
+ parser.add_argument(
+ "-e",
+ "--conversion-error",
+ type=float,
+ metavar="ERROR",
+ default=0.001,
+ help="maxiumum approximation error measured in EM (default: 0.001)",
+ )
+ parser.add_argument(
+ "-c",
+ "--all-cubic",
+ default=False,
+ action="store_true",
+ help="whether to only use cubic curves",
+ )
+
+ output_parser = parser.add_mutually_exclusive_group()
+ output_parser.add_argument(
+ "-o",
+ "--output-file",
+ default=None,
+ metavar="OUTPUT",
+ help=("output filename for the converted TTF."),
+ )
+ output_parser.add_argument(
+ "-d",
+ "--output-dir",
+ default=None,
+ metavar="DIRECTORY",
+ help="output directory where to save converted TTFs",
+ )
+
+ options = parser.parse_args(args)
+
+ if not options.verbose:
+ level = "WARNING"
+ elif options.verbose == 1:
+ level = "INFO"
+ else:
+ level = "DEBUG"
+ logging.basicConfig(level=level)
+
+ if len(options.infiles) > 1 and options.output_file:
+ parser.error("-o/--output-file can't be used with multile inputs")
+
+ if options.output_dir:
+ output_dir = options.output_dir
+ if not os.path.exists(output_dir):
+ os.mkdir(output_dir)
+ elif not os.path.isdir(output_dir):
+ parser.error("'%s' is not a directory" % output_dir)
+ output_paths = [
+ os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
+ ]
+ elif options.output_file:
+ output_paths = [options.output_file]
+ else:
+ output_paths = [
+ makeOutputFileName(p, overWrite=True, suffix=".cubic")
+ for p in options.infiles
+ ]
+
+ kwargs = dict(
+ dump_stats=options.verbose > 0,
+ max_err_em=options.conversion_error,
+ all_cubic=options.all_cubic,
+ )
+
+ for input_path, output_path in zip(options.infiles, output_paths):
+ _font_to_cubic(input_path, output_path, **kwargs)
diff --git a/Lib/fontTools/qu2cu/qu2cu.py b/Lib/fontTools/qu2cu/qu2cu.py
new file mode 100644
index 00000000..97a665f6
--- /dev/null
+++ b/Lib/fontTools/qu2cu/qu2cu.py
@@ -0,0 +1,408 @@
+# cython: language_level=3
+# distutils: define_macros=CYTHON_TRACE_NOGIL=1
+
+# Copyright 2023 Google Inc. All Rights Reserved.
+# Copyright 2023 Behdad Esfahbod. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+try:
+ import cython
+
+ COMPILED = cython.compiled
+except (AttributeError, ImportError):
+ # if cython not installed, use mock module with no-op decorators and types
+ from fontTools.misc import cython
+
+ COMPILED = False
+
+from fontTools.misc.bezierTools import splitCubicAtTC
+from collections import namedtuple
+import math
+from typing import (
+ List,
+ Tuple,
+ Union,
+)
+
+
+__all__ = ["quadratic_to_curves"]
+
+
+# Copied from cu2qu
+@cython.cfunc
+@cython.returns(cython.int)
+@cython.locals(
+ tolerance=cython.double,
+ p0=cython.complex,
+ p1=cython.complex,
+ p2=cython.complex,
+ p3=cython.complex,
+)
+@cython.locals(mid=cython.complex, deriv3=cython.complex)
+def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
+ """Check if a cubic Bezier lies within a given distance of the origin.
+
+ "Origin" means *the* origin (0,0), not the start of the curve. Note that no
+ checks are made on the start and end positions of the curve; this function
+ only checks the inside of the curve.
+
+ Args:
+ p0 (complex): Start point of curve.
+ p1 (complex): First handle of curve.
+ p2 (complex): Second handle of curve.
+ p3 (complex): End point of curve.
+ tolerance (double): Distance from origin.
+
+ Returns:
+ bool: True if the cubic Bezier ``p`` entirely lies within a distance
+ ``tolerance`` of the origin, False otherwise.
+ """
+ # First check p2 then p1, as p2 has higher error early on.
+ if abs(p2) <= tolerance and abs(p1) <= tolerance:
+ return True
+
+ # Split.
+ mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
+ if abs(mid) > tolerance:
+ return False
+ deriv3 = (p3 + p2 - p1 - p0) * 0.125
+ return cubic_farthest_fit_inside(
+ p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
+ ) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
+
+
+@cython.locals(
+ p0=cython.complex,
+ p1=cython.complex,
+ p2=cython.complex,
+ p1_2_3=cython.complex,
+)
+def elevate_quadratic(p0, p1, p2):
+ """Given a quadratic bezier curve, return its degree-elevated cubic."""
+
+ # https://pomax.github.io/bezierinfo/#reordering
+ p1_2_3 = p1 * (2 / 3)
+ return (
+ p0,
+ (p0 * (1 / 3) + p1_2_3),
+ (p2 * (1 / 3) + p1_2_3),
+ p2,
+ )
+
+
+@cython.cfunc
+@cython.locals(
+ start=cython.int,
+ n=cython.int,
+ k=cython.int,
+ prod_ratio=cython.double,
+ sum_ratio=cython.double,
+ ratio=cython.double,
+ t=cython.double,
+ p0=cython.complex,
+ p1=cython.complex,
+ p2=cython.complex,
+ p3=cython.complex,
+)
+def merge_curves(curves, start, n):
+ """Give a cubic-Bezier spline, reconstruct one cubic-Bezier
+ that has the same endpoints and tangents and approxmates
+ the spline."""
+
+ # Reconstruct the t values of the cut segments
+ prod_ratio = 1.0
+ sum_ratio = 1.0
+ ts = [1]
+ for k in range(1, n):
+ ck = curves[start + k]
+ c_before = curves[start + k - 1]
+
+ # |t_(k+1) - t_k| / |t_k - t_(k - 1)| = ratio
+ assert ck[0] == c_before[3]
+ ratio = abs(ck[1] - ck[0]) / abs(c_before[3] - c_before[2])
+
+ prod_ratio *= ratio
+ sum_ratio += prod_ratio
+ ts.append(sum_ratio)
+
+ # (t(n) - t(n - 1)) / (t_(1) - t(0)) = prod_ratio
+
+ ts = [t / sum_ratio for t in ts[:-1]]
+
+ p0 = curves[start][0]
+ p1 = curves[start][1]
+ p2 = curves[start + n - 1][2]
+ p3 = curves[start + n - 1][3]
+
+ # Build the curve by scaling the control-points.
+ p1 = p0 + (p1 - p0) / (ts[0] if ts else 1)
+ p2 = p3 + (p2 - p3) / ((1 - ts[-1]) if ts else 1)
+
+ curve = (p0, p1, p2, p3)
+
+ return curve, ts
+
+
+@cython.locals(
+ count=cython.int,
+ num_offcurves=cython.int,
+ i=cython.int,
+ off1=cython.complex,
+ off2=cython.complex,
+ on=cython.complex,
+)
+def add_implicit_on_curves(p):
+ q = list(p)
+ count = 0
+ num_offcurves = len(p) - 2
+ for i in range(1, num_offcurves):
+ off1 = p[i]
+ off2 = p[i + 1]
+ on = off1 + (off2 - off1) * 0.5
+ q.insert(i + 1 + count, on)
+ count += 1
+ return q
+
+
+Point = Union[Tuple[float, float], complex]
+
+
+@cython.locals(
+ cost=cython.int,
+ is_complex=cython.int,
+)
+def quadratic_to_curves(
+ quads: List[List[Point]],
+ max_err: float = 0.5,
+ all_cubic: bool = False,
+) -> List[Tuple[Point, ...]]:
+ """Converts a connecting list of quadratic splines to a list of quadratic
+ and cubic curves.
+
+ A quadratic spline is specified as a list of points. Either each point is
+ a 2-tuple of X,Y coordinates, or each point is a complex number with
+ real/imaginary components representing X,Y coordinates.
+
+ The first and last points are on-curve points and the rest are off-curve
+ points, with an implied on-curve point in the middle between every two
+ consequtive off-curve points.
+
+ Returns:
+ The output is a list of tuples of points. Points are represented
+ in the same format as the input, either as 2-tuples or complex numbers.
+
+ Each tuple is either of length three, for a quadratic curve, or four,
+ for a cubic curve. Each curve's last point is the same as the next
+ curve's first point.
+
+ Args:
+ quads: quadratic splines
+
+ max_err: absolute error tolerance; defaults to 0.5
+
+ all_cubic: if True, only cubic curves are generated; defaults to False
+ """
+ is_complex = type(quads[0][0]) is complex
+ if not is_complex:
+ quads = [[complex(x, y) for (x, y) in p] for p in quads]
+
+ q = [quads[0][0]]
+ costs = [1]
+ cost = 1
+ for p in quads:
+ assert q[-1] == p[0]
+ for i in range(len(p) - 2):
+ cost += 1
+ costs.append(cost)
+ costs.append(cost)
+ qq = add_implicit_on_curves(p)[1:]
+ costs.pop()
+ q.extend(qq)
+ cost += 1
+ costs.append(cost)
+
+ curves = spline_to_curves(q, costs, max_err, all_cubic)
+
+ if not is_complex:
+ curves = [tuple((c.real, c.imag) for c in curve) for curve in curves]
+ return curves
+
+
+Solution = namedtuple("Solution", ["num_points", "error", "start_index", "is_cubic"])
+
+
+@cython.locals(
+ i=cython.int,
+ j=cython.int,
+ k=cython.int,
+ start=cython.int,
+ i_sol_count=cython.int,
+ j_sol_count=cython.int,
+ this_sol_count=cython.int,
+ tolerance=cython.double,
+ err=cython.double,
+ error=cython.double,
+ i_sol_error=cython.double,
+ j_sol_error=cython.double,
+ all_cubic=cython.int,
+ is_cubic=cython.int,
+ count=cython.int,
+ p0=cython.complex,
+ p1=cython.complex,
+ p2=cython.complex,
+ p3=cython.complex,
+ v=cython.complex,
+ u=cython.complex,
+)
+def spline_to_curves(q, costs, tolerance=0.5, all_cubic=False):
+ """
+ q: quadratic spline with alternating on-curve / off-curve points.
+
+ costs: cumulative list of encoding cost of q in terms of number of
+ points that need to be encoded. Implied on-curve points do not
+ contribute to the cost. If all points need to be encoded, then
+ costs will be range(1, len(q)+1).
+ """
+
+ assert len(q) >= 3, "quadratic spline requires at least 3 points"
+
+ # Elevate quadratic segments to cubic
+ elevated_quadratics = [
+ elevate_quadratic(*q[i : i + 3]) for i in range(0, len(q) - 2, 2)
+ ]
+
+ # Find sharp corners; they have to be oncurves for sure.
+ forced = set()
+ for i in range(1, len(elevated_quadratics)):
+ p0 = elevated_quadratics[i - 1][2]
+ p1 = elevated_quadratics[i][0]
+ p2 = elevated_quadratics[i][1]
+ if abs(p1 - p0) + abs(p2 - p1) > tolerance + abs(p2 - p0):
+ forced.add(i)
+
+ # Dynamic-Programming to find the solution with fewest number of
+ # cubic curves, and within those the one with smallest error.
+ sols = [Solution(0, 0, 0, False)]
+ impossible = Solution(len(elevated_quadratics) * 3 + 1, 0, 1, False)
+ start = 0
+ for i in range(1, len(elevated_quadratics) + 1):
+ best_sol = impossible
+ for j in range(start, i):
+ j_sol_count, j_sol_error = sols[j].num_points, sols[j].error
+
+ if not all_cubic:
+ # Solution with quadratics between j:i
+ this_count = costs[2 * i - 1] - costs[2 * j] + 1
+ i_sol_count = j_sol_count + this_count
+ i_sol_error = j_sol_error
+ i_sol = Solution(i_sol_count, i_sol_error, i - j, False)
+ if i_sol < best_sol:
+ best_sol = i_sol
+
+ if this_count <= 3:
+ # Can't get any better than this in the path below
+ continue
+
+ # Fit elevated_quadratics[j:i] into one cubic
+ try:
+ curve, ts = merge_curves(elevated_quadratics, j, i - j)
+ except ZeroDivisionError:
+ continue
+
+ # Now reconstruct the segments from the fitted curve
+ reconstructed_iter = splitCubicAtTC(*curve, *ts)
+ reconstructed = []
+
+ # Knot errors
+ error = 0
+ for k, reconst in enumerate(reconstructed_iter):
+ orig = elevated_quadratics[j + k]
+ err = abs(reconst[3] - orig[3])
+ error = max(error, err)
+ if error > tolerance:
+ break
+ reconstructed.append(reconst)
+ if error > tolerance:
+ # Not feasible
+ continue
+
+ # Interior errors
+ for k, reconst in enumerate(reconstructed):
+ orig = elevated_quadratics[j + k]
+ p0, p1, p2, p3 = tuple(v - u for v, u in zip(reconst, orig))
+
+ if not cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
+ error = tolerance + 1
+ break
+ if error > tolerance:
+ # Not feasible
+ continue
+
+ # Save best solution
+ i_sol_count = j_sol_count + 3
+ i_sol_error = max(j_sol_error, error)
+ i_sol = Solution(i_sol_count, i_sol_error, i - j, True)
+ if i_sol < best_sol:
+ best_sol = i_sol
+
+ if i_sol_count == 3:
+ # Can't get any better than this
+ break
+
+ sols.append(best_sol)
+ if i in forced:
+ start = i
+
+ # Reconstruct solution
+ splits = []
+ cubic = []
+ i = len(sols) - 1
+ while i:
+ count, is_cubic = sols[i].start_index, sols[i].is_cubic
+ splits.append(i)
+ cubic.append(is_cubic)
+ i -= count
+ curves = []
+ j = 0
+ for i, is_cubic in reversed(list(zip(splits, cubic))):
+ if is_cubic:
+ curves.append(merge_curves(elevated_quadratics, j, i - j)[0])
+ else:
+ for k in range(j, i):
+ curves.append(q[k * 2 : k * 2 + 3])
+ j = i
+
+ return curves
+
+
+def main():
+ from fontTools.cu2qu.benchmark import generate_curve
+ from fontTools.cu2qu import curve_to_quadratic
+
+ tolerance = 0.05
+ reconstruct_tolerance = tolerance * 1
+ curve = generate_curve()
+ quadratics = curve_to_quadratic(curve, tolerance)
+ print(
+ "cu2qu tolerance %g. qu2cu tolerance %g." % (tolerance, reconstruct_tolerance)
+ )
+ print("One random cubic turned into %d quadratics." % len(quadratics))
+ curves = quadratic_to_curves([quadratics], reconstruct_tolerance)
+ print("Those quadratics turned back into %d cubics. " % len(curves))
+ print("Original curve:", curve)
+ print("Reconstructed curve(s):", curves)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/Lib/fontTools/subset/__init__.py b/Lib/fontTools/subset/__init__.py
index b58e6162..bd826ed2 100644
--- a/Lib/fontTools/subset/__init__.py
+++ b/Lib/fontTools/subset/__init__.py
@@ -15,6 +15,7 @@ from fontTools.subset.util import _add_method, _uniq_sort
from fontTools.subset.cff import *
from fontTools.subset.svg import *
from fontTools.varLib import varStore # for subset_varidxes
+from fontTools.ttLib.tables._n_a_m_e import NameRecordVisitor
import sys
import struct
import array
@@ -25,7 +26,8 @@ from types import MethodType
__usage__ = "pyftsubset font-file [glyph...] [--option=value]..."
-__doc__="""\
+__doc__ = (
+ """\
pyftsubset -- OpenType font subsetter and optimizer
pyftsubset is an OpenType font subsetter and optimizer, based on fontTools.
@@ -37,7 +39,9 @@ The tool also performs some size-reducing optimizations, aimed for using
subset fonts as webfonts. Individual optimizations can be enabled or
disabled, and are enabled by default when they are safe.
-Usage: """+__usage__+"""
+Usage: """
+ + __usage__
+ + """
At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file,
--text, --text-file, --unicodes, or --unicodes-file, must be specified.
@@ -270,7 +274,7 @@ Font table options
Examples:
- --drop-tables-='BASE'
+ --drop-tables-=BASE
* Drop the default set of tables but keep 'BASE'.
--drop-tables+=GSUB
@@ -310,9 +314,9 @@ Font table options
Examples:
- --hinting-tables-='VDMX'
+ --hinting-tables-=VDMX
* Drop font-wide hinting tables except 'VDMX'.
- --hinting-tables=''
+ --hinting-tables=
* Keep all font-wide hinting tables (but strip hints from glyphs).
--legacy-kern
@@ -336,9 +340,9 @@ codes, see: http://www.microsoft.com/typography/otspec/name.htm
--name-IDs+=7,8,9
* Also keep Trademark, Manufacturer and Designer name entries.
- --name-IDs=''
+ --name-IDs=
* Drop all 'name' table entries.
- --name-IDs='*'
+ --name-IDs=*
* keep all 'name' table entries
--name-legacy
@@ -434,19 +438,22 @@ Produce a subset containing the characters ' !"#$%' without performing
size-reducing optimizations::
$ pyftsubset font.ttf --unicodes="U+0020-0025" \\
- --layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\
+ --layout-features=* --glyph-names --symbol-cmap --legacy-cmap \\
--notdef-glyph --notdef-outline --recommended-glyphs \\
- --name-IDs='*' --name-legacy --name-languages='*'
+ --name-IDs=* --name-legacy --name-languages=*
"""
+)
log = logging.getLogger("fontTools.subset")
+
def _log_glyphs(self, glyphs, font=None):
- self.info("Glyph names: %s", sorted(glyphs))
- if font:
- reverseGlyphMap = font.getReverseGlyphMap()
- self.info("Glyph IDs: %s", sorted(reverseGlyphMap[g] for g in glyphs))
+ self.info("Glyph names: %s", sorted(glyphs))
+ if font:
+ reverseGlyphMap = font.getReverseGlyphMap()
+ self.info("Glyph IDs: %s", sorted(reverseGlyphMap[g] for g in glyphs))
+
# bind "glyphs" function to 'log' object
log.glyphs = MethodType(_log_glyphs, log)
@@ -457,2125 +464,2481 @@ timer = Timer(logger=logging.getLogger("fontTools.subset.timer"))
def _dict_subset(d, glyphs):
- return {g:d[g] for g in glyphs}
+ return {g: d[g] for g in glyphs}
+
def _list_subset(l, indices):
- count = len(l)
- return [l[i] for i in indices if i < count]
+ count = len(l)
+ return [l[i] for i in indices if i < count]
+
@_add_method(otTables.Coverage)
def intersect(self, glyphs):
- """Returns ascending list of matching coverage values."""
- return [i for i,g in enumerate(self.glyphs) if g in glyphs]
+ """Returns ascending list of matching coverage values."""
+ return [i for i, g in enumerate(self.glyphs) if g in glyphs]
+
@_add_method(otTables.Coverage)
def intersect_glyphs(self, glyphs):
- """Returns set of intersecting glyphs."""
- return set(g for g in self.glyphs if g in glyphs)
+ """Returns set of intersecting glyphs."""
+ return set(g for g in self.glyphs if g in glyphs)
+
@_add_method(otTables.Coverage)
def subset(self, glyphs):
- """Returns ascending list of remaining coverage values."""
- indices = self.intersect(glyphs)
- self.glyphs = [g for g in self.glyphs if g in glyphs]
- return indices
+ """Returns ascending list of remaining coverage values."""
+ indices = self.intersect(glyphs)
+ self.glyphs = [g for g in self.glyphs if g in glyphs]
+ return indices
+
@_add_method(otTables.Coverage)
def remap(self, coverage_map):
- """Remaps coverage."""
- self.glyphs = [self.glyphs[i] for i in coverage_map]
+ """Remaps coverage."""
+ self.glyphs = [self.glyphs[i] for i in coverage_map]
+
@_add_method(otTables.ClassDef)
def intersect(self, glyphs):
- """Returns ascending list of matching class values."""
- return _uniq_sort(
- ([0] if any(g not in self.classDefs for g in glyphs) else []) +
- [v for g,v in self.classDefs.items() if g in glyphs])
+ """Returns ascending list of matching class values."""
+ return _uniq_sort(
+ ([0] if any(g not in self.classDefs for g in glyphs) else [])
+ + [v for g, v in self.classDefs.items() if g in glyphs]
+ )
+
@_add_method(otTables.ClassDef)
def intersect_class(self, glyphs, klass):
- """Returns set of glyphs matching class."""
- if klass == 0:
- return set(g for g in glyphs if g not in self.classDefs)
- return set(g for g,v in self.classDefs.items()
- if v == klass and g in glyphs)
+ """Returns set of glyphs matching class."""
+ if klass == 0:
+ return set(g for g in glyphs if g not in self.classDefs)
+ return set(g for g, v in self.classDefs.items() if v == klass and g in glyphs)
+
@_add_method(otTables.ClassDef)
def subset(self, glyphs, remap=False, useClass0=True):
- """Returns ascending list of remaining classes."""
- self.classDefs = {g:v for g,v in self.classDefs.items() if g in glyphs}
- # Note: while class 0 has the special meaning of "not matched",
- # if no glyph will ever /not match/, we can optimize class 0 out too.
- # Only do this if allowed.
- indices = _uniq_sort(
- ([0] if ((not useClass0) or any(g not in self.classDefs for g in glyphs)) else []) +
- list(self.classDefs.values()))
- if remap:
- self.remap(indices)
- return indices
+ """Returns ascending list of remaining classes."""
+ self.classDefs = {g: v for g, v in self.classDefs.items() if g in glyphs}
+ # Note: while class 0 has the special meaning of "not matched",
+ # if no glyph will ever /not match/, we can optimize class 0 out too.
+ # Only do this if allowed.
+ indices = _uniq_sort(
+ (
+ [0]
+ if ((not useClass0) or any(g not in self.classDefs for g in glyphs))
+ else []
+ )
+ + list(self.classDefs.values())
+ )
+ if remap:
+ self.remap(indices)
+ return indices
+
@_add_method(otTables.ClassDef)
def remap(self, class_map):
- """Remaps classes."""
- self.classDefs = {g:class_map.index(v) for g,v in self.classDefs.items()}
+ """Remaps classes."""
+ self.classDefs = {g: class_map.index(v) for g, v in self.classDefs.items()}
+
@_add_method(otTables.SingleSubst)
def closure_glyphs(self, s, cur_glyphs):
- s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs)
+ s.glyphs.update(v for g, v in self.mapping.items() if g in cur_glyphs)
+
@_add_method(otTables.SingleSubst)
def subset_glyphs(self, s):
- self.mapping = {g:v for g,v in self.mapping.items()
- if g in s.glyphs and v in s.glyphs}
- return bool(self.mapping)
+ self.mapping = {
+ g: v for g, v in self.mapping.items() if g in s.glyphs and v in s.glyphs
+ }
+ return bool(self.mapping)
+
@_add_method(otTables.MultipleSubst)
def closure_glyphs(self, s, cur_glyphs):
- for glyph, subst in self.mapping.items():
- if glyph in cur_glyphs:
- s.glyphs.update(subst)
+ for glyph, subst in self.mapping.items():
+ if glyph in cur_glyphs:
+ s.glyphs.update(subst)
+
@_add_method(otTables.MultipleSubst)
def subset_glyphs(self, s):
- self.mapping = {g:v for g,v in self.mapping.items()
- if g in s.glyphs and all(sub in s.glyphs for sub in v)}
- return bool(self.mapping)
+ self.mapping = {
+ g: v
+ for g, v in self.mapping.items()
+ if g in s.glyphs and all(sub in s.glyphs for sub in v)
+ }
+ return bool(self.mapping)
+
@_add_method(otTables.AlternateSubst)
def closure_glyphs(self, s, cur_glyphs):
- s.glyphs.update(*(vlist for g,vlist in self.alternates.items()
- if g in cur_glyphs))
+ s.glyphs.update(*(vlist for g, vlist in self.alternates.items() if g in cur_glyphs))
+
@_add_method(otTables.AlternateSubst)
def subset_glyphs(self, s):
- self.alternates = {g:[v for v in vlist if v in s.glyphs]
- for g,vlist in self.alternates.items()
- if g in s.glyphs and
- any(v in s.glyphs for v in vlist)}
- return bool(self.alternates)
+ self.alternates = {
+ g: [v for v in vlist if v in s.glyphs]
+ for g, vlist in self.alternates.items()
+ if g in s.glyphs and any(v in s.glyphs for v in vlist)
+ }
+ return bool(self.alternates)
+
@_add_method(otTables.LigatureSubst)
def closure_glyphs(self, s, cur_glyphs):
- s.glyphs.update(*([seq.LigGlyph for seq in seqs
- if all(c in s.glyphs for c in seq.Component)]
- for g,seqs in self.ligatures.items()
- if g in cur_glyphs))
+ s.glyphs.update(
+ *(
+ [seq.LigGlyph for seq in seqs if all(c in s.glyphs for c in seq.Component)]
+ for g, seqs in self.ligatures.items()
+ if g in cur_glyphs
+ )
+ )
+
@_add_method(otTables.LigatureSubst)
def subset_glyphs(self, s):
- self.ligatures = {g:v for g,v in self.ligatures.items()
- if g in s.glyphs}
- self.ligatures = {g:[seq for seq in seqs
- if seq.LigGlyph in s.glyphs and
- all(c in s.glyphs for c in seq.Component)]
- for g,seqs in self.ligatures.items()}
- self.ligatures = {g:v for g,v in self.ligatures.items() if v}
- return bool(self.ligatures)
+ self.ligatures = {g: v for g, v in self.ligatures.items() if g in s.glyphs}
+ self.ligatures = {
+ g: [
+ seq
+ for seq in seqs
+ if seq.LigGlyph in s.glyphs and all(c in s.glyphs for c in seq.Component)
+ ]
+ for g, seqs in self.ligatures.items()
+ }
+ self.ligatures = {g: v for g, v in self.ligatures.items() if v}
+ return bool(self.ligatures)
+
@_add_method(otTables.ReverseChainSingleSubst)
def closure_glyphs(self, s, cur_glyphs):
- if self.Format == 1:
- indices = self.Coverage.intersect(cur_glyphs)
- if(not indices or
- not all(c.intersect(s.glyphs)
- for c in self.LookAheadCoverage + self.BacktrackCoverage)):
- return
- s.glyphs.update(self.Substitute[i] for i in indices)
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ indices = self.Coverage.intersect(cur_glyphs)
+ if not indices or not all(
+ c.intersect(s.glyphs)
+ for c in self.LookAheadCoverage + self.BacktrackCoverage
+ ):
+ return
+ s.glyphs.update(self.Substitute[i] for i in indices)
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@_add_method(otTables.ReverseChainSingleSubst)
def subset_glyphs(self, s):
- if self.Format == 1:
- indices = self.Coverage.subset(s.glyphs)
- self.Substitute = _list_subset(self.Substitute, indices)
- # Now drop rules generating glyphs we don't want
- indices = [i for i,sub in enumerate(self.Substitute)
- if sub in s.glyphs]
- self.Substitute = _list_subset(self.Substitute, indices)
- self.Coverage.remap(indices)
- self.GlyphCount = len(self.Substitute)
- return bool(self.GlyphCount and
- all(c.subset(s.glyphs)
- for c in self.LookAheadCoverage+self.BacktrackCoverage))
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ indices = self.Coverage.subset(s.glyphs)
+ self.Substitute = _list_subset(self.Substitute, indices)
+ # Now drop rules generating glyphs we don't want
+ indices = [i for i, sub in enumerate(self.Substitute) if sub in s.glyphs]
+ self.Substitute = _list_subset(self.Substitute, indices)
+ self.Coverage.remap(indices)
+ self.GlyphCount = len(self.Substitute)
+ return bool(
+ self.GlyphCount
+ and all(
+ c.subset(s.glyphs)
+ for c in self.LookAheadCoverage + self.BacktrackCoverage
+ )
+ )
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@_add_method(otTables.Device)
def is_hinting(self):
- return self.DeltaFormat in (1,2,3)
+ return self.DeltaFormat in (1, 2, 3)
+
@_add_method(otTables.ValueRecord)
def prune_hints(self):
- for name in ['XPlaDevice', 'YPlaDevice', 'XAdvDevice', 'YAdvDevice']:
- v = getattr(self, name, None)
- if v is not None and v.is_hinting():
- delattr(self, name)
+ for name in ["XPlaDevice", "YPlaDevice", "XAdvDevice", "YAdvDevice"]:
+ v = getattr(self, name, None)
+ if v is not None and v.is_hinting():
+ delattr(self, name)
+
@_add_method(otTables.SinglePos)
def subset_glyphs(self, s):
- if self.Format == 1:
- return len(self.Coverage.subset(s.glyphs))
- elif self.Format == 2:
- indices = self.Coverage.subset(s.glyphs)
- values = self.Value
- count = len(values)
- self.Value = [values[i] for i in indices if i < count]
- self.ValueCount = len(self.Value)
- return bool(self.ValueCount)
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ return len(self.Coverage.subset(s.glyphs))
+ elif self.Format == 2:
+ indices = self.Coverage.subset(s.glyphs)
+ values = self.Value
+ count = len(values)
+ self.Value = [values[i] for i in indices if i < count]
+ self.ValueCount = len(self.Value)
+ return bool(self.ValueCount)
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@_add_method(otTables.SinglePos)
def prune_post_subset(self, font, options):
- if self.Value is None:
- assert self.ValueFormat == 0
- return True
-
- # Shrink ValueFormat
- if self.Format == 1:
- if not options.hinting:
- self.Value.prune_hints()
- self.ValueFormat = self.Value.getEffectiveFormat()
- elif self.Format == 2:
- if None in self.Value:
- assert self.ValueFormat == 0
- assert all(v is None for v in self.Value)
- else:
- if not options.hinting:
- for v in self.Value:
- v.prune_hints()
- self.ValueFormat = reduce(
- int.__or__, [v.getEffectiveFormat() for v in self.Value], 0
- )
-
- # Downgrade to Format 1 if all ValueRecords are the same
- if self.Format == 2 and all(v == self.Value[0] for v in self.Value):
- self.Format = 1
- self.Value = self.Value[0] if self.ValueFormat != 0 else None
- del self.ValueCount
-
- return True
+ if self.Value is None:
+ assert self.ValueFormat == 0
+ return True
+
+ # Shrink ValueFormat
+ if self.Format == 1:
+ if not options.hinting:
+ self.Value.prune_hints()
+ self.ValueFormat = self.Value.getEffectiveFormat()
+ elif self.Format == 2:
+ if None in self.Value:
+ assert self.ValueFormat == 0
+ assert all(v is None for v in self.Value)
+ else:
+ if not options.hinting:
+ for v in self.Value:
+ v.prune_hints()
+ self.ValueFormat = reduce(
+ int.__or__, [v.getEffectiveFormat() for v in self.Value], 0
+ )
+
+ # Downgrade to Format 1 if all ValueRecords are the same
+ if self.Format == 2 and all(v == self.Value[0] for v in self.Value):
+ self.Format = 1
+ self.Value = self.Value[0] if self.ValueFormat != 0 else None
+ del self.ValueCount
+
+ return True
+
@_add_method(otTables.PairPos)
def subset_glyphs(self, s):
- if self.Format == 1:
- indices = self.Coverage.subset(s.glyphs)
- pairs = self.PairSet
- count = len(pairs)
- self.PairSet = [pairs[i] for i in indices if i < count]
- for p in self.PairSet:
- p.PairValueRecord = [r for r in p.PairValueRecord if r.SecondGlyph in s.glyphs]
- p.PairValueCount = len(p.PairValueRecord)
- # Remove empty pairsets
- indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount]
- self.Coverage.remap(indices)
- self.PairSet = _list_subset(self.PairSet, indices)
- self.PairSetCount = len(self.PairSet)
- return bool(self.PairSetCount)
- elif self.Format == 2:
- class1_map = [c for c in self.ClassDef1.subset(s.glyphs.intersection(self.Coverage.glyphs), remap=True) if c < self.Class1Count]
- class2_map = [c for c in self.ClassDef2.subset(s.glyphs, remap=True, useClass0=False) if c < self.Class2Count]
- self.Class1Record = [self.Class1Record[i] for i in class1_map]
- for c in self.Class1Record:
- c.Class2Record = [c.Class2Record[i] for i in class2_map]
- self.Class1Count = len(class1_map)
- self.Class2Count = len(class2_map)
- # If only Class2 0 left, no need to keep anything.
- return bool(self.Class1Count and
- (self.Class2Count > 1) and
- self.Coverage.subset(s.glyphs))
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ indices = self.Coverage.subset(s.glyphs)
+ pairs = self.PairSet
+ count = len(pairs)
+ self.PairSet = [pairs[i] for i in indices if i < count]
+ for p in self.PairSet:
+ p.PairValueRecord = [
+ r for r in p.PairValueRecord if r.SecondGlyph in s.glyphs
+ ]
+ p.PairValueCount = len(p.PairValueRecord)
+ # Remove empty pairsets
+ indices = [i for i, p in enumerate(self.PairSet) if p.PairValueCount]
+ self.Coverage.remap(indices)
+ self.PairSet = _list_subset(self.PairSet, indices)
+ self.PairSetCount = len(self.PairSet)
+ return bool(self.PairSetCount)
+ elif self.Format == 2:
+ class1_map = [
+ c
+ for c in self.ClassDef1.subset(
+ s.glyphs.intersection(self.Coverage.glyphs), remap=True
+ )
+ if c < self.Class1Count
+ ]
+ class2_map = [
+ c
+ for c in self.ClassDef2.subset(s.glyphs, remap=True, useClass0=False)
+ if c < self.Class2Count
+ ]
+ self.Class1Record = [self.Class1Record[i] for i in class1_map]
+ for c in self.Class1Record:
+ c.Class2Record = [c.Class2Record[i] for i in class2_map]
+ self.Class1Count = len(class1_map)
+ self.Class2Count = len(class2_map)
+ # If only Class2 0 left, no need to keep anything.
+ return bool(
+ self.Class1Count
+ and (self.Class2Count > 1)
+ and self.Coverage.subset(s.glyphs)
+ )
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@_add_method(otTables.PairPos)
def prune_post_subset(self, font, options):
- if not options.hinting:
- attr1, attr2 = {
- 1: ('PairSet', 'PairValueRecord'),
- 2: ('Class1Record', 'Class2Record'),
- }[self.Format]
-
- self.ValueFormat1 = self.ValueFormat2 = 0
- for row in getattr(self, attr1):
- for r in getattr(row, attr2):
- if r.Value1:
- r.Value1.prune_hints()
- self.ValueFormat1 |= r.Value1.getEffectiveFormat()
- if r.Value2:
- r.Value2.prune_hints()
- self.ValueFormat2 |= r.Value2.getEffectiveFormat()
-
- return bool(self.ValueFormat1 | self.ValueFormat2)
+ if not options.hinting:
+ attr1, attr2 = {
+ 1: ("PairSet", "PairValueRecord"),
+ 2: ("Class1Record", "Class2Record"),
+ }[self.Format]
+
+ self.ValueFormat1 = self.ValueFormat2 = 0
+ for row in getattr(self, attr1):
+ for r in getattr(row, attr2):
+ if r.Value1:
+ r.Value1.prune_hints()
+ self.ValueFormat1 |= r.Value1.getEffectiveFormat()
+ if r.Value2:
+ r.Value2.prune_hints()
+ self.ValueFormat2 |= r.Value2.getEffectiveFormat()
+
+ return bool(self.ValueFormat1 | self.ValueFormat2)
+
@_add_method(otTables.CursivePos)
def subset_glyphs(self, s):
- if self.Format == 1:
- indices = self.Coverage.subset(s.glyphs)
- records = self.EntryExitRecord
- count = len(records)
- self.EntryExitRecord = [records[i] for i in indices if i < count]
- self.EntryExitCount = len(self.EntryExitRecord)
- return bool(self.EntryExitCount)
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ indices = self.Coverage.subset(s.glyphs)
+ records = self.EntryExitRecord
+ count = len(records)
+ self.EntryExitRecord = [records[i] for i in indices if i < count]
+ self.EntryExitCount = len(self.EntryExitRecord)
+ return bool(self.EntryExitCount)
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@_add_method(otTables.Anchor)
def prune_hints(self):
- if self.Format == 2:
- self.Format = 1
- elif self.Format == 3:
- for name in ('XDeviceTable', 'YDeviceTable'):
- v = getattr(self, name, None)
- if v is not None and v.is_hinting():
- setattr(self, name, None)
- if self.XDeviceTable is None and self.YDeviceTable is None:
- self.Format = 1
+ if self.Format == 2:
+ self.Format = 1
+ elif self.Format == 3:
+ for name in ("XDeviceTable", "YDeviceTable"):
+ v = getattr(self, name, None)
+ if v is not None and v.is_hinting():
+ setattr(self, name, None)
+ if self.XDeviceTable is None and self.YDeviceTable is None:
+ self.Format = 1
+
@_add_method(otTables.CursivePos)
def prune_post_subset(self, font, options):
- if not options.hinting:
- for rec in self.EntryExitRecord:
- if rec.EntryAnchor: rec.EntryAnchor.prune_hints()
- if rec.ExitAnchor: rec.ExitAnchor.prune_hints()
- return True
+ if not options.hinting:
+ for rec in self.EntryExitRecord:
+ if rec.EntryAnchor:
+ rec.EntryAnchor.prune_hints()
+ if rec.ExitAnchor:
+ rec.ExitAnchor.prune_hints()
+ return True
+
@_add_method(otTables.MarkBasePos)
def subset_glyphs(self, s):
- if self.Format == 1:
- mark_indices = self.MarkCoverage.subset(s.glyphs)
- self.MarkArray.MarkRecord = _list_subset(self.MarkArray.MarkRecord, mark_indices)
- self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
- base_indices = self.BaseCoverage.subset(s.glyphs)
- self.BaseArray.BaseRecord = _list_subset(self.BaseArray.BaseRecord, base_indices)
- self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord)
- # Prune empty classes
- class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
- self.ClassCount = len(class_indices)
- for m in self.MarkArray.MarkRecord:
- m.Class = class_indices.index(m.Class)
- for b in self.BaseArray.BaseRecord:
- b.BaseAnchor = _list_subset(b.BaseAnchor, class_indices)
- return bool(self.ClassCount and
- self.MarkArray.MarkCount and
- self.BaseArray.BaseCount)
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ mark_indices = self.MarkCoverage.subset(s.glyphs)
+ self.MarkArray.MarkRecord = _list_subset(
+ self.MarkArray.MarkRecord, mark_indices
+ )
+ self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
+ base_indices = self.BaseCoverage.subset(s.glyphs)
+ self.BaseArray.BaseRecord = _list_subset(
+ self.BaseArray.BaseRecord, base_indices
+ )
+ self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord)
+ # Prune empty classes
+ class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
+ self.ClassCount = len(class_indices)
+ for m in self.MarkArray.MarkRecord:
+ m.Class = class_indices.index(m.Class)
+ for b in self.BaseArray.BaseRecord:
+ b.BaseAnchor = _list_subset(b.BaseAnchor, class_indices)
+ return bool(
+ self.ClassCount and self.MarkArray.MarkCount and self.BaseArray.BaseCount
+ )
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@_add_method(otTables.MarkBasePos)
def prune_post_subset(self, font, options):
- if not options.hinting:
- for m in self.MarkArray.MarkRecord:
- if m.MarkAnchor:
- m.MarkAnchor.prune_hints()
- for b in self.BaseArray.BaseRecord:
- for a in b.BaseAnchor:
- if a:
- a.prune_hints()
- return True
+ if not options.hinting:
+ for m in self.MarkArray.MarkRecord:
+ if m.MarkAnchor:
+ m.MarkAnchor.prune_hints()
+ for b in self.BaseArray.BaseRecord:
+ for a in b.BaseAnchor:
+ if a:
+ a.prune_hints()
+ return True
+
@_add_method(otTables.MarkLigPos)
def subset_glyphs(self, s):
- if self.Format == 1:
- mark_indices = self.MarkCoverage.subset(s.glyphs)
- self.MarkArray.MarkRecord = _list_subset(self.MarkArray.MarkRecord, mark_indices)
- self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
- ligature_indices = self.LigatureCoverage.subset(s.glyphs)
- self.LigatureArray.LigatureAttach = _list_subset(self.LigatureArray.LigatureAttach, ligature_indices)
- self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach)
- # Prune empty classes
- class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
- self.ClassCount = len(class_indices)
- for m in self.MarkArray.MarkRecord:
- m.Class = class_indices.index(m.Class)
- for l in self.LigatureArray.LigatureAttach:
- for c in l.ComponentRecord:
- c.LigatureAnchor = _list_subset(c.LigatureAnchor, class_indices)
- return bool(self.ClassCount and
- self.MarkArray.MarkCount and
- self.LigatureArray.LigatureCount)
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ mark_indices = self.MarkCoverage.subset(s.glyphs)
+ self.MarkArray.MarkRecord = _list_subset(
+ self.MarkArray.MarkRecord, mark_indices
+ )
+ self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
+ ligature_indices = self.LigatureCoverage.subset(s.glyphs)
+ self.LigatureArray.LigatureAttach = _list_subset(
+ self.LigatureArray.LigatureAttach, ligature_indices
+ )
+ self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach)
+ # Prune empty classes
+ class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
+ self.ClassCount = len(class_indices)
+ for m in self.MarkArray.MarkRecord:
+ m.Class = class_indices.index(m.Class)
+ for l in self.LigatureArray.LigatureAttach:
+ if l is None:
+ continue
+ for c in l.ComponentRecord:
+ c.LigatureAnchor = _list_subset(c.LigatureAnchor, class_indices)
+ return bool(
+ self.ClassCount
+ and self.MarkArray.MarkCount
+ and self.LigatureArray.LigatureCount
+ )
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@_add_method(otTables.MarkLigPos)
def prune_post_subset(self, font, options):
- if not options.hinting:
- for m in self.MarkArray.MarkRecord:
- if m.MarkAnchor:
- m.MarkAnchor.prune_hints()
- for l in self.LigatureArray.LigatureAttach:
- for c in l.ComponentRecord:
- for a in c.LigatureAnchor:
- if a:
- a.prune_hints()
- return True
+ if not options.hinting:
+ for m in self.MarkArray.MarkRecord:
+ if m.MarkAnchor:
+ m.MarkAnchor.prune_hints()
+ for l in self.LigatureArray.LigatureAttach:
+ if l is None:
+ continue
+ for c in l.ComponentRecord:
+ for a in c.LigatureAnchor:
+ if a:
+ a.prune_hints()
+ return True
+
@_add_method(otTables.MarkMarkPos)
def subset_glyphs(self, s):
- if self.Format == 1:
- mark1_indices = self.Mark1Coverage.subset(s.glyphs)
- self.Mark1Array.MarkRecord = _list_subset(self.Mark1Array.MarkRecord, mark1_indices)
- self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord)
- mark2_indices = self.Mark2Coverage.subset(s.glyphs)
- self.Mark2Array.Mark2Record = _list_subset(self.Mark2Array.Mark2Record, mark2_indices)
- self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record)
- # Prune empty classes
- class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord)
- self.ClassCount = len(class_indices)
- for m in self.Mark1Array.MarkRecord:
- m.Class = class_indices.index(m.Class)
- for b in self.Mark2Array.Mark2Record:
- b.Mark2Anchor = _list_subset(b.Mark2Anchor, class_indices)
- return bool(self.ClassCount and
- self.Mark1Array.MarkCount and
- self.Mark2Array.MarkCount)
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ mark1_indices = self.Mark1Coverage.subset(s.glyphs)
+ self.Mark1Array.MarkRecord = _list_subset(
+ self.Mark1Array.MarkRecord, mark1_indices
+ )
+ self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord)
+ mark2_indices = self.Mark2Coverage.subset(s.glyphs)
+ self.Mark2Array.Mark2Record = _list_subset(
+ self.Mark2Array.Mark2Record, mark2_indices
+ )
+ self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record)
+ # Prune empty classes
+ class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord)
+ self.ClassCount = len(class_indices)
+ for m in self.Mark1Array.MarkRecord:
+ m.Class = class_indices.index(m.Class)
+ for b in self.Mark2Array.Mark2Record:
+ b.Mark2Anchor = _list_subset(b.Mark2Anchor, class_indices)
+ return bool(
+ self.ClassCount and self.Mark1Array.MarkCount and self.Mark2Array.MarkCount
+ )
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@_add_method(otTables.MarkMarkPos)
def prune_post_subset(self, font, options):
- if not options.hinting:
- for m in self.Mark1Array.MarkRecord:
- if m.MarkAnchor:
- m.MarkAnchor.prune_hints()
- for b in self.Mark2Array.Mark2Record:
- for m in b.Mark2Anchor:
- if m:
- m.prune_hints()
- return True
-
-@_add_method(otTables.SingleSubst,
- otTables.MultipleSubst,
- otTables.AlternateSubst,
- otTables.LigatureSubst,
- otTables.ReverseChainSingleSubst,
- otTables.SinglePos,
- otTables.PairPos,
- otTables.CursivePos,
- otTables.MarkBasePos,
- otTables.MarkLigPos,
- otTables.MarkMarkPos)
+ if not options.hinting:
+ for m in self.Mark1Array.MarkRecord:
+ if m.MarkAnchor:
+ m.MarkAnchor.prune_hints()
+ for b in self.Mark2Array.Mark2Record:
+ for m in b.Mark2Anchor:
+ if m:
+ m.prune_hints()
+ return True
+
+
+@_add_method(
+ otTables.SingleSubst,
+ otTables.MultipleSubst,
+ otTables.AlternateSubst,
+ otTables.LigatureSubst,
+ otTables.ReverseChainSingleSubst,
+ otTables.SinglePos,
+ otTables.PairPos,
+ otTables.CursivePos,
+ otTables.MarkBasePos,
+ otTables.MarkLigPos,
+ otTables.MarkMarkPos,
+)
def subset_lookups(self, lookup_indices):
- pass
-
-@_add_method(otTables.SingleSubst,
- otTables.MultipleSubst,
- otTables.AlternateSubst,
- otTables.LigatureSubst,
- otTables.ReverseChainSingleSubst,
- otTables.SinglePos,
- otTables.PairPos,
- otTables.CursivePos,
- otTables.MarkBasePos,
- otTables.MarkLigPos,
- otTables.MarkMarkPos)
+ pass
+
+
+@_add_method(
+ otTables.SingleSubst,
+ otTables.MultipleSubst,
+ otTables.AlternateSubst,
+ otTables.LigatureSubst,
+ otTables.ReverseChainSingleSubst,
+ otTables.SinglePos,
+ otTables.PairPos,
+ otTables.CursivePos,
+ otTables.MarkBasePos,
+ otTables.MarkLigPos,
+ otTables.MarkMarkPos,
+)
def collect_lookups(self):
- return []
-
-@_add_method(otTables.SingleSubst,
- otTables.MultipleSubst,
- otTables.AlternateSubst,
- otTables.LigatureSubst,
- otTables.ReverseChainSingleSubst,
- otTables.ContextSubst,
- otTables.ChainContextSubst,
- otTables.ContextPos,
- otTables.ChainContextPos)
+ return []
+
+
+@_add_method(
+ otTables.SingleSubst,
+ otTables.MultipleSubst,
+ otTables.AlternateSubst,
+ otTables.LigatureSubst,
+ otTables.ReverseChainSingleSubst,
+ otTables.ContextSubst,
+ otTables.ChainContextSubst,
+ otTables.ContextPos,
+ otTables.ChainContextPos,
+)
def prune_post_subset(self, font, options):
- return True
+ return True
+
-@_add_method(otTables.SingleSubst,
- otTables.AlternateSubst,
- otTables.ReverseChainSingleSubst)
+@_add_method(
+ otTables.SingleSubst, otTables.AlternateSubst, otTables.ReverseChainSingleSubst
+)
def may_have_non_1to1(self):
- return False
+ return False
-@_add_method(otTables.MultipleSubst,
- otTables.LigatureSubst,
- otTables.ContextSubst,
- otTables.ChainContextSubst)
+
+@_add_method(
+ otTables.MultipleSubst,
+ otTables.LigatureSubst,
+ otTables.ContextSubst,
+ otTables.ChainContextSubst,
+)
def may_have_non_1to1(self):
- return True
+ return True
-@_add_method(otTables.ContextSubst,
- otTables.ChainContextSubst,
- otTables.ContextPos,
- otTables.ChainContextPos)
-def __subset_classify_context(self):
- class ContextHelper(object):
- def __init__(self, klass, Format):
- if klass.__name__.endswith('Subst'):
- Typ = 'Sub'
- Type = 'Subst'
- else:
- Typ = 'Pos'
- Type = 'Pos'
- if klass.__name__.startswith('Chain'):
- Chain = 'Chain'
- InputIdx = 1
- DataLen = 3
- else:
- Chain = ''
- InputIdx = 0
- DataLen = 1
- ChainTyp = Chain+Typ
-
- self.Typ = Typ
- self.Type = Type
- self.Chain = Chain
- self.ChainTyp = ChainTyp
- self.InputIdx = InputIdx
- self.DataLen = DataLen
-
- self.LookupRecord = Type+'LookupRecord'
-
- if Format == 1:
- Coverage = lambda r: r.Coverage
- ChainCoverage = lambda r: r.Coverage
- ContextData = lambda r:(None,)
- ChainContextData = lambda r:(None, None, None)
- SetContextData = None
- SetChainContextData = None
- RuleData = lambda r:(r.Input,)
- ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead)
- def SetRuleData(r, d):
- (r.Input,) = d
- (r.GlyphCount,) = (len(x)+1 for x in d)
- def ChainSetRuleData(r, d):
- (r.Backtrack, r.Input, r.LookAhead) = d
- (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2]))
- elif Format == 2:
- Coverage = lambda r: r.Coverage
- ChainCoverage = lambda r: r.Coverage
- ContextData = lambda r:(r.ClassDef,)
- ChainContextData = lambda r:(r.BacktrackClassDef,
- r.InputClassDef,
- r.LookAheadClassDef)
- def SetContextData(r, d):
- (r.ClassDef,) = d
- def SetChainContextData(r, d):
- (r.BacktrackClassDef,
- r.InputClassDef,
- r.LookAheadClassDef) = d
- RuleData = lambda r:(r.Class,)
- ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead)
- def SetRuleData(r, d):
- (r.Class,) = d
- (r.GlyphCount,) = (len(x)+1 for x in d)
- def ChainSetRuleData(r, d):
- (r.Backtrack, r.Input, r.LookAhead) = d
- (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2]))
- elif Format == 3:
- Coverage = lambda r: r.Coverage[0]
- ChainCoverage = lambda r: r.InputCoverage[0]
- ContextData = None
- ChainContextData = None
- SetContextData = None
- SetChainContextData = None
- RuleData = lambda r: r.Coverage
- ChainRuleData = lambda r:(r.BacktrackCoverage +
- r.InputCoverage +
- r.LookAheadCoverage)
- def SetRuleData(r, d):
- (r.Coverage,) = d
- (r.GlyphCount,) = (len(x) for x in d)
- def ChainSetRuleData(r, d):
- (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d
- (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(x) for x in d)
- else:
- assert 0, "unknown format: %s" % Format
-
- if Chain:
- self.Coverage = ChainCoverage
- self.ContextData = ChainContextData
- self.SetContextData = SetChainContextData
- self.RuleData = ChainRuleData
- self.SetRuleData = ChainSetRuleData
- else:
- self.Coverage = Coverage
- self.ContextData = ContextData
- self.SetContextData = SetContextData
- self.RuleData = RuleData
- self.SetRuleData = SetRuleData
-
- if Format == 1:
- self.Rule = ChainTyp+'Rule'
- self.RuleCount = ChainTyp+'RuleCount'
- self.RuleSet = ChainTyp+'RuleSet'
- self.RuleSetCount = ChainTyp+'RuleSetCount'
- self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else []
- elif Format == 2:
- self.Rule = ChainTyp+'ClassRule'
- self.RuleCount = ChainTyp+'ClassRuleCount'
- self.RuleSet = ChainTyp+'ClassSet'
- self.RuleSetCount = ChainTyp+'ClassSetCount'
- self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c
- else (set(glyphs) if r == 0 else set()))
-
- self.ClassDef = 'InputClassDef' if Chain else 'ClassDef'
- self.ClassDefIndex = 1 if Chain else 0
- self.Input = 'Input' if Chain else 'Class'
- elif Format == 3:
- self.Input = 'InputCoverage' if Chain else 'Coverage'
-
- if self.Format not in [1, 2, 3]:
- return None # Don't shoot the messenger; let it go
- if not hasattr(self.__class__, "_subset__ContextHelpers"):
- self.__class__._subset__ContextHelpers = {}
- if self.Format not in self.__class__._subset__ContextHelpers:
- helper = ContextHelper(self.__class__, self.Format)
- self.__class__._subset__ContextHelpers[self.Format] = helper
- return self.__class__._subset__ContextHelpers[self.Format]
-
-@_add_method(otTables.ContextSubst,
- otTables.ChainContextSubst)
+@_add_method(
+ otTables.ContextSubst,
+ otTables.ChainContextSubst,
+ otTables.ContextPos,
+ otTables.ChainContextPos,
+)
+def __subset_classify_context(self):
+ class ContextHelper(object):
+ def __init__(self, klass, Format):
+ if klass.__name__.endswith("Subst"):
+ Typ = "Sub"
+ Type = "Subst"
+ else:
+ Typ = "Pos"
+ Type = "Pos"
+ if klass.__name__.startswith("Chain"):
+ Chain = "Chain"
+ InputIdx = 1
+ DataLen = 3
+ else:
+ Chain = ""
+ InputIdx = 0
+ DataLen = 1
+ ChainTyp = Chain + Typ
+
+ self.Typ = Typ
+ self.Type = Type
+ self.Chain = Chain
+ self.ChainTyp = ChainTyp
+ self.InputIdx = InputIdx
+ self.DataLen = DataLen
+
+ self.LookupRecord = Type + "LookupRecord"
+
+ if Format == 1:
+ Coverage = lambda r: r.Coverage
+ ChainCoverage = lambda r: r.Coverage
+ ContextData = lambda r: (None,)
+ ChainContextData = lambda r: (None, None, None)
+ SetContextData = None
+ SetChainContextData = None
+ RuleData = lambda r: (r.Input,)
+ ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead)
+
+ def SetRuleData(r, d):
+ (r.Input,) = d
+ (r.GlyphCount,) = (len(x) + 1 for x in d)
+
+ def ChainSetRuleData(r, d):
+ (r.Backtrack, r.Input, r.LookAhead) = d
+ (
+ r.BacktrackGlyphCount,
+ r.InputGlyphCount,
+ r.LookAheadGlyphCount,
+ ) = (len(d[0]), len(d[1]) + 1, len(d[2]))
+
+ elif Format == 2:
+ Coverage = lambda r: r.Coverage
+ ChainCoverage = lambda r: r.Coverage
+ ContextData = lambda r: (r.ClassDef,)
+ ChainContextData = lambda r: (
+ r.BacktrackClassDef,
+ r.InputClassDef,
+ r.LookAheadClassDef,
+ )
+
+ def SetContextData(r, d):
+ (r.ClassDef,) = d
+
+ def SetChainContextData(r, d):
+ (r.BacktrackClassDef, r.InputClassDef, r.LookAheadClassDef) = d
+
+ RuleData = lambda r: (r.Class,)
+ ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead)
+
+ def SetRuleData(r, d):
+ (r.Class,) = d
+ (r.GlyphCount,) = (len(x) + 1 for x in d)
+
+ def ChainSetRuleData(r, d):
+ (r.Backtrack, r.Input, r.LookAhead) = d
+ (
+ r.BacktrackGlyphCount,
+ r.InputGlyphCount,
+ r.LookAheadGlyphCount,
+ ) = (len(d[0]), len(d[1]) + 1, len(d[2]))
+
+ elif Format == 3:
+ Coverage = lambda r: r.Coverage[0]
+ ChainCoverage = lambda r: r.InputCoverage[0]
+ ContextData = None
+ ChainContextData = None
+ SetContextData = None
+ SetChainContextData = None
+ RuleData = lambda r: r.Coverage
+ ChainRuleData = lambda r: (
+ r.BacktrackCoverage + r.InputCoverage + r.LookAheadCoverage
+ )
+
+ def SetRuleData(r, d):
+ (r.Coverage,) = d
+ (r.GlyphCount,) = (len(x) for x in d)
+
+ def ChainSetRuleData(r, d):
+ (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d
+ (
+ r.BacktrackGlyphCount,
+ r.InputGlyphCount,
+ r.LookAheadGlyphCount,
+ ) = (len(x) for x in d)
+
+ else:
+ assert 0, "unknown format: %s" % Format
+
+ if Chain:
+ self.Coverage = ChainCoverage
+ self.ContextData = ChainContextData
+ self.SetContextData = SetChainContextData
+ self.RuleData = ChainRuleData
+ self.SetRuleData = ChainSetRuleData
+ else:
+ self.Coverage = Coverage
+ self.ContextData = ContextData
+ self.SetContextData = SetContextData
+ self.RuleData = RuleData
+ self.SetRuleData = SetRuleData
+
+ if Format == 1:
+ self.Rule = ChainTyp + "Rule"
+ self.RuleCount = ChainTyp + "RuleCount"
+ self.RuleSet = ChainTyp + "RuleSet"
+ self.RuleSetCount = ChainTyp + "RuleSetCount"
+ self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else []
+ elif Format == 2:
+ self.Rule = ChainTyp + "ClassRule"
+ self.RuleCount = ChainTyp + "ClassRuleCount"
+ self.RuleSet = ChainTyp + "ClassSet"
+ self.RuleSetCount = ChainTyp + "ClassSetCount"
+ self.Intersect = lambda glyphs, c, r: (
+ c.intersect_class(glyphs, r)
+ if c
+ else (set(glyphs) if r == 0 else set())
+ )
+
+ self.ClassDef = "InputClassDef" if Chain else "ClassDef"
+ self.ClassDefIndex = 1 if Chain else 0
+ self.Input = "Input" if Chain else "Class"
+ elif Format == 3:
+ self.Input = "InputCoverage" if Chain else "Coverage"
+
+ if self.Format not in [1, 2, 3]:
+ return None # Don't shoot the messenger; let it go
+ if not hasattr(self.__class__, "_subset__ContextHelpers"):
+ self.__class__._subset__ContextHelpers = {}
+ if self.Format not in self.__class__._subset__ContextHelpers:
+ helper = ContextHelper(self.__class__, self.Format)
+ self.__class__._subset__ContextHelpers[self.Format] = helper
+ return self.__class__._subset__ContextHelpers[self.Format]
+
+
+@_add_method(otTables.ContextSubst, otTables.ChainContextSubst)
def closure_glyphs(self, s, cur_glyphs):
- c = self.__subset_classify_context()
-
- indices = c.Coverage(self).intersect(cur_glyphs)
- if not indices:
- return []
- cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs)
-
- if self.Format == 1:
- ContextData = c.ContextData(self)
- rss = getattr(self, c.RuleSet)
- rssCount = getattr(self, c.RuleSetCount)
- for i in indices:
- if i >= rssCount or not rss[i]: continue
- for r in getattr(rss[i], c.Rule):
- if not r: continue
- if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist)
- for cd,klist in zip(ContextData, c.RuleData(r))):
- continue
- chaos = set()
- for ll in getattr(r, c.LookupRecord):
- if not ll: continue
- seqi = ll.SequenceIndex
- if seqi in chaos:
- # TODO Can we improve this?
- pos_glyphs = None
- else:
- if seqi == 0:
- pos_glyphs = frozenset([c.Coverage(self).glyphs[i]])
- else:
- pos_glyphs = frozenset([r.Input[seqi - 1]])
- lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
- chaos.add(seqi)
- if lookup.may_have_non_1to1():
- chaos.update(range(seqi, len(r.Input)+2))
- lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
- elif self.Format == 2:
- ClassDef = getattr(self, c.ClassDef)
- indices = ClassDef.intersect(cur_glyphs)
- ContextData = c.ContextData(self)
- rss = getattr(self, c.RuleSet)
- rssCount = getattr(self, c.RuleSetCount)
- for i in indices:
- if i >= rssCount or not rss[i]: continue
- for r in getattr(rss[i], c.Rule):
- if not r: continue
- if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist)
- for cd,klist in zip(ContextData, c.RuleData(r))):
- continue
- chaos = set()
- for ll in getattr(r, c.LookupRecord):
- if not ll: continue
- seqi = ll.SequenceIndex
- if seqi in chaos:
- # TODO Can we improve this?
- pos_glyphs = None
- else:
- if seqi == 0:
- pos_glyphs = frozenset(ClassDef.intersect_class(cur_glyphs, i))
- else:
- pos_glyphs = frozenset(ClassDef.intersect_class(s.glyphs, getattr(r, c.Input)[seqi - 1]))
- lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
- chaos.add(seqi)
- if lookup.may_have_non_1to1():
- chaos.update(range(seqi, len(getattr(r, c.Input))+2))
- lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
- elif self.Format == 3:
- if not all(x is not None and x.intersect(s.glyphs) for x in c.RuleData(self)):
- return []
- r = self
- input_coverages = getattr(r, c.Input)
- chaos = set()
- for ll in getattr(r, c.LookupRecord):
- if not ll: continue
- seqi = ll.SequenceIndex
- if seqi in chaos:
- # TODO Can we improve this?
- pos_glyphs = None
- else:
- if seqi == 0:
- pos_glyphs = frozenset(cur_glyphs)
- else:
- pos_glyphs = frozenset(input_coverages[seqi].intersect_glyphs(s.glyphs))
- lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
- chaos.add(seqi)
- if lookup.may_have_non_1to1():
- chaos.update(range(seqi, len(input_coverages)+1))
- lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
- else:
- assert 0, "unknown format: %s" % self.Format
-
-@_add_method(otTables.ContextSubst,
- otTables.ContextPos,
- otTables.ChainContextSubst,
- otTables.ChainContextPos)
+ c = self.__subset_classify_context()
+
+ indices = c.Coverage(self).intersect(cur_glyphs)
+ if not indices:
+ return []
+ cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs)
+
+ if self.Format == 1:
+ ContextData = c.ContextData(self)
+ rss = getattr(self, c.RuleSet)
+ rssCount = getattr(self, c.RuleSetCount)
+ for i in indices:
+ if i >= rssCount or not rss[i]:
+ continue
+ for r in getattr(rss[i], c.Rule):
+ if not r:
+ continue
+ if not all(
+ all(c.Intersect(s.glyphs, cd, k) for k in klist)
+ for cd, klist in zip(ContextData, c.RuleData(r))
+ ):
+ continue
+ chaos = set()
+ for ll in getattr(r, c.LookupRecord):
+ if not ll:
+ continue
+ seqi = ll.SequenceIndex
+ if seqi in chaos:
+ # TODO Can we improve this?
+ pos_glyphs = None
+ else:
+ if seqi == 0:
+ pos_glyphs = frozenset([c.Coverage(self).glyphs[i]])
+ else:
+ pos_glyphs = frozenset([r.Input[seqi - 1]])
+ lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
+ chaos.add(seqi)
+ if lookup.may_have_non_1to1():
+ chaos.update(range(seqi, len(r.Input) + 2))
+ lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
+ elif self.Format == 2:
+ ClassDef = getattr(self, c.ClassDef)
+ indices = ClassDef.intersect(cur_glyphs)
+ ContextData = c.ContextData(self)
+ rss = getattr(self, c.RuleSet)
+ rssCount = getattr(self, c.RuleSetCount)
+ for i in indices:
+ if i >= rssCount or not rss[i]:
+ continue
+ for r in getattr(rss[i], c.Rule):
+ if not r:
+ continue
+ if not all(
+ all(c.Intersect(s.glyphs, cd, k) for k in klist)
+ for cd, klist in zip(ContextData, c.RuleData(r))
+ ):
+ continue
+ chaos = set()
+ for ll in getattr(r, c.LookupRecord):
+ if not ll:
+ continue
+ seqi = ll.SequenceIndex
+ if seqi in chaos:
+ # TODO Can we improve this?
+ pos_glyphs = None
+ else:
+ if seqi == 0:
+ pos_glyphs = frozenset(
+ ClassDef.intersect_class(cur_glyphs, i)
+ )
+ else:
+ pos_glyphs = frozenset(
+ ClassDef.intersect_class(
+ s.glyphs, getattr(r, c.Input)[seqi - 1]
+ )
+ )
+ lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
+ chaos.add(seqi)
+ if lookup.may_have_non_1to1():
+ chaos.update(range(seqi, len(getattr(r, c.Input)) + 2))
+ lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
+ elif self.Format == 3:
+ if not all(x is not None and x.intersect(s.glyphs) for x in c.RuleData(self)):
+ return []
+ r = self
+ input_coverages = getattr(r, c.Input)
+ chaos = set()
+ for ll in getattr(r, c.LookupRecord):
+ if not ll:
+ continue
+ seqi = ll.SequenceIndex
+ if seqi in chaos:
+ # TODO Can we improve this?
+ pos_glyphs = None
+ else:
+ if seqi == 0:
+ pos_glyphs = frozenset(cur_glyphs)
+ else:
+ pos_glyphs = frozenset(
+ input_coverages[seqi].intersect_glyphs(s.glyphs)
+ )
+ lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
+ chaos.add(seqi)
+ if lookup.may_have_non_1to1():
+ chaos.update(range(seqi, len(input_coverages) + 1))
+ lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
+
+@_add_method(
+ otTables.ContextSubst,
+ otTables.ContextPos,
+ otTables.ChainContextSubst,
+ otTables.ChainContextPos,
+)
def subset_glyphs(self, s):
- c = self.__subset_classify_context()
-
- if self.Format == 1:
- indices = self.Coverage.subset(s.glyphs)
- rss = getattr(self, c.RuleSet)
- rssCount = getattr(self, c.RuleSetCount)
- rss = [rss[i] for i in indices if i < rssCount]
- for rs in rss:
- if not rs: continue
- ss = getattr(rs, c.Rule)
- ss = [r for r in ss
- if r and all(all(g in s.glyphs for g in glist)
- for glist in c.RuleData(r))]
- setattr(rs, c.Rule, ss)
- setattr(rs, c.RuleCount, len(ss))
- # Prune empty rulesets
- indices = [i for i,rs in enumerate(rss) if rs and getattr(rs, c.Rule)]
- self.Coverage.remap(indices)
- rss = _list_subset(rss, indices)
- setattr(self, c.RuleSet, rss)
- setattr(self, c.RuleSetCount, len(rss))
- return bool(rss)
- elif self.Format == 2:
- if not self.Coverage.subset(s.glyphs):
- return False
- ContextData = c.ContextData(self)
- klass_maps = [x.subset(s.glyphs, remap=True) if x else None for x in ContextData]
-
- # Keep rulesets for class numbers that survived.
- indices = klass_maps[c.ClassDefIndex]
- rss = getattr(self, c.RuleSet)
- rssCount = getattr(self, c.RuleSetCount)
- rss = [rss[i] for i in indices if i < rssCount]
- del rssCount
- # Delete, but not renumber, unreachable rulesets.
- indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs)
- rss = [rss if i in indices else None for i,rss in enumerate(rss)]
-
- for rs in rss:
- if not rs: continue
- ss = getattr(rs, c.Rule)
- ss = [r for r in ss
- if r and all(all(k in klass_map for k in klist)
- for klass_map,klist in zip(klass_maps, c.RuleData(r)))]
- setattr(rs, c.Rule, ss)
- setattr(rs, c.RuleCount, len(ss))
-
- # Remap rule classes
- for r in ss:
- c.SetRuleData(r, [[klass_map.index(k) for k in klist]
- for klass_map,klist in zip(klass_maps, c.RuleData(r))])
-
- # Prune empty rulesets
- rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss]
- while rss and rss[-1] is None:
- del rss[-1]
- setattr(self, c.RuleSet, rss)
- setattr(self, c.RuleSetCount, len(rss))
-
- # TODO: We can do a second round of remapping class values based
- # on classes that are actually used in at least one rule. Right
- # now we subset classes to c.glyphs only. Or better, rewrite
- # the above to do that.
-
- return bool(rss)
- elif self.Format == 3:
- return all(x is not None and x.subset(s.glyphs) for x in c.RuleData(self))
- else:
- assert 0, "unknown format: %s" % self.Format
-
-@_add_method(otTables.ContextSubst,
- otTables.ChainContextSubst,
- otTables.ContextPos,
- otTables.ChainContextPos)
+ c = self.__subset_classify_context()
+
+ if self.Format == 1:
+ indices = self.Coverage.subset(s.glyphs)
+ rss = getattr(self, c.RuleSet)
+ rssCount = getattr(self, c.RuleSetCount)
+ rss = [rss[i] for i in indices if i < rssCount]
+ for rs in rss:
+ if not rs:
+ continue
+ ss = getattr(rs, c.Rule)
+ ss = [
+ r
+ for r in ss
+ if r
+ and all(all(g in s.glyphs for g in glist) for glist in c.RuleData(r))
+ ]
+ setattr(rs, c.Rule, ss)
+ setattr(rs, c.RuleCount, len(ss))
+ # Prune empty rulesets
+ indices = [i for i, rs in enumerate(rss) if rs and getattr(rs, c.Rule)]
+ self.Coverage.remap(indices)
+ rss = _list_subset(rss, indices)
+ setattr(self, c.RuleSet, rss)
+ setattr(self, c.RuleSetCount, len(rss))
+ return bool(rss)
+ elif self.Format == 2:
+ if not self.Coverage.subset(s.glyphs):
+ return False
+ ContextData = c.ContextData(self)
+ klass_maps = [
+ x.subset(s.glyphs, remap=True) if x else None for x in ContextData
+ ]
+
+ # Keep rulesets for class numbers that survived.
+ indices = klass_maps[c.ClassDefIndex]
+ rss = getattr(self, c.RuleSet)
+ rssCount = getattr(self, c.RuleSetCount)
+ rss = [rss[i] for i in indices if i < rssCount]
+ del rssCount
+ # Delete, but not renumber, unreachable rulesets.
+ indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs)
+ rss = [rss if i in indices else None for i, rss in enumerate(rss)]
+
+ for rs in rss:
+ if not rs:
+ continue
+ ss = getattr(rs, c.Rule)
+ ss = [
+ r
+ for r in ss
+ if r
+ and all(
+ all(k in klass_map for k in klist)
+ for klass_map, klist in zip(klass_maps, c.RuleData(r))
+ )
+ ]
+ setattr(rs, c.Rule, ss)
+ setattr(rs, c.RuleCount, len(ss))
+
+ # Remap rule classes
+ for r in ss:
+ c.SetRuleData(
+ r,
+ [
+ [klass_map.index(k) for k in klist]
+ for klass_map, klist in zip(klass_maps, c.RuleData(r))
+ ],
+ )
+
+ # Prune empty rulesets
+ rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss]
+ while rss and rss[-1] is None:
+ del rss[-1]
+ setattr(self, c.RuleSet, rss)
+ setattr(self, c.RuleSetCount, len(rss))
+
+ # TODO: We can do a second round of remapping class values based
+ # on classes that are actually used in at least one rule. Right
+ # now we subset classes to c.glyphs only. Or better, rewrite
+ # the above to do that.
+
+ return bool(rss)
+ elif self.Format == 3:
+ return all(x is not None and x.subset(s.glyphs) for x in c.RuleData(self))
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
+
+@_add_method(
+ otTables.ContextSubst,
+ otTables.ChainContextSubst,
+ otTables.ContextPos,
+ otTables.ChainContextPos,
+)
def subset_lookups(self, lookup_indices):
- c = self.__subset_classify_context()
-
- if self.Format in [1, 2]:
- for rs in getattr(self, c.RuleSet):
- if not rs: continue
- for r in getattr(rs, c.Rule):
- if not r: continue
- setattr(r, c.LookupRecord,
- [ll for ll in getattr(r, c.LookupRecord)
- if ll and ll.LookupListIndex in lookup_indices])
- for ll in getattr(r, c.LookupRecord):
- if not ll: continue
- ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
- elif self.Format == 3:
- setattr(self, c.LookupRecord,
- [ll for ll in getattr(self, c.LookupRecord)
- if ll and ll.LookupListIndex in lookup_indices])
- for ll in getattr(self, c.LookupRecord):
- if not ll: continue
- ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
- else:
- assert 0, "unknown format: %s" % self.Format
-
-@_add_method(otTables.ContextSubst,
- otTables.ChainContextSubst,
- otTables.ContextPos,
- otTables.ChainContextPos)
+ c = self.__subset_classify_context()
+
+ if self.Format in [1, 2]:
+ for rs in getattr(self, c.RuleSet):
+ if not rs:
+ continue
+ for r in getattr(rs, c.Rule):
+ if not r:
+ continue
+ setattr(
+ r,
+ c.LookupRecord,
+ [
+ ll
+ for ll in getattr(r, c.LookupRecord)
+ if ll and ll.LookupListIndex in lookup_indices
+ ],
+ )
+ for ll in getattr(r, c.LookupRecord):
+ if not ll:
+ continue
+ ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
+ elif self.Format == 3:
+ setattr(
+ self,
+ c.LookupRecord,
+ [
+ ll
+ for ll in getattr(self, c.LookupRecord)
+ if ll and ll.LookupListIndex in lookup_indices
+ ],
+ )
+ for ll in getattr(self, c.LookupRecord):
+ if not ll:
+ continue
+ ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
+
+@_add_method(
+ otTables.ContextSubst,
+ otTables.ChainContextSubst,
+ otTables.ContextPos,
+ otTables.ChainContextPos,
+)
def collect_lookups(self):
- c = self.__subset_classify_context()
-
- if self.Format in [1, 2]:
- return [ll.LookupListIndex
- for rs in getattr(self, c.RuleSet) if rs
- for r in getattr(rs, c.Rule) if r
- for ll in getattr(r, c.LookupRecord) if ll]
- elif self.Format == 3:
- return [ll.LookupListIndex
- for ll in getattr(self, c.LookupRecord) if ll]
- else:
- assert 0, "unknown format: %s" % self.Format
+ c = self.__subset_classify_context()
+
+ if self.Format in [1, 2]:
+ return [
+ ll.LookupListIndex
+ for rs in getattr(self, c.RuleSet)
+ if rs
+ for r in getattr(rs, c.Rule)
+ if r
+ for ll in getattr(r, c.LookupRecord)
+ if ll
+ ]
+ elif self.Format == 3:
+ return [ll.LookupListIndex for ll in getattr(self, c.LookupRecord) if ll]
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@_add_method(otTables.ExtensionSubst)
def closure_glyphs(self, s, cur_glyphs):
- if self.Format == 1:
- self.ExtSubTable.closure_glyphs(s, cur_glyphs)
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ self.ExtSubTable.closure_glyphs(s, cur_glyphs)
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@_add_method(otTables.ExtensionSubst)
def may_have_non_1to1(self):
- if self.Format == 1:
- return self.ExtSubTable.may_have_non_1to1()
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ return self.ExtSubTable.may_have_non_1to1()
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
-@_add_method(otTables.ExtensionSubst,
- otTables.ExtensionPos)
+@_add_method(otTables.ExtensionSubst, otTables.ExtensionPos)
def subset_glyphs(self, s):
- if self.Format == 1:
- return self.ExtSubTable.subset_glyphs(s)
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ return self.ExtSubTable.subset_glyphs(s)
+ else:
+ assert 0, "unknown format: %s" % self.Format
-@_add_method(otTables.ExtensionSubst,
- otTables.ExtensionPos)
+
+@_add_method(otTables.ExtensionSubst, otTables.ExtensionPos)
def prune_post_subset(self, font, options):
- if self.Format == 1:
- return self.ExtSubTable.prune_post_subset(font, options)
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ return self.ExtSubTable.prune_post_subset(font, options)
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
-@_add_method(otTables.ExtensionSubst,
- otTables.ExtensionPos)
+@_add_method(otTables.ExtensionSubst, otTables.ExtensionPos)
def subset_lookups(self, lookup_indices):
- if self.Format == 1:
- return self.ExtSubTable.subset_lookups(lookup_indices)
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ return self.ExtSubTable.subset_lookups(lookup_indices)
+ else:
+ assert 0, "unknown format: %s" % self.Format
-@_add_method(otTables.ExtensionSubst,
- otTables.ExtensionPos)
+
+@_add_method(otTables.ExtensionSubst, otTables.ExtensionPos)
def collect_lookups(self):
- if self.Format == 1:
- return self.ExtSubTable.collect_lookups()
- else:
- assert 0, "unknown format: %s" % self.Format
+ if self.Format == 1:
+ return self.ExtSubTable.collect_lookups()
+ else:
+ assert 0, "unknown format: %s" % self.Format
+
@_add_method(otTables.Lookup)
def closure_glyphs(self, s, cur_glyphs=None):
- if cur_glyphs is None:
- cur_glyphs = frozenset(s.glyphs)
-
- # Memoize
- key = id(self)
- doneLookups = s._doneLookups
- count,covered = doneLookups.get(key, (0, None))
- if count != len(s.glyphs):
- count,covered = doneLookups[key] = (len(s.glyphs), set())
- if cur_glyphs.issubset(covered):
- return
- covered.update(cur_glyphs)
-
- for st in self.SubTable:
- if not st: continue
- st.closure_glyphs(s, cur_glyphs)
+ if cur_glyphs is None:
+ cur_glyphs = frozenset(s.glyphs)
+
+ # Memoize
+ key = id(self)
+ doneLookups = s._doneLookups
+ count, covered = doneLookups.get(key, (0, None))
+ if count != len(s.glyphs):
+ count, covered = doneLookups[key] = (len(s.glyphs), set())
+ if cur_glyphs.issubset(covered):
+ return
+ covered.update(cur_glyphs)
+
+ for st in self.SubTable:
+ if not st:
+ continue
+ st.closure_glyphs(s, cur_glyphs)
+
@_add_method(otTables.Lookup)
def subset_glyphs(self, s):
- self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)]
- self.SubTableCount = len(self.SubTable)
- return bool(self.SubTableCount)
+ self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)]
+ self.SubTableCount = len(self.SubTable)
+ if hasattr(self, "MarkFilteringSet") and self.MarkFilteringSet is not None:
+ if self.MarkFilteringSet not in s.used_mark_sets:
+ self.MarkFilteringSet = None
+ self.LookupFlag &= ~0x10
+ else:
+ self.MarkFilteringSet = s.used_mark_sets.index(self.MarkFilteringSet)
+ return bool(self.SubTableCount)
+
@_add_method(otTables.Lookup)
def prune_post_subset(self, font, options):
- ret = False
- for st in self.SubTable:
- if not st: continue
- if st.prune_post_subset(font, options): ret = True
- return ret
+ ret = False
+ for st in self.SubTable:
+ if not st:
+ continue
+ if st.prune_post_subset(font, options):
+ ret = True
+ return ret
+
@_add_method(otTables.Lookup)
def subset_lookups(self, lookup_indices):
- for s in self.SubTable:
- s.subset_lookups(lookup_indices)
+ for s in self.SubTable:
+ s.subset_lookups(lookup_indices)
+
@_add_method(otTables.Lookup)
def collect_lookups(self):
- return sum((st.collect_lookups() for st in self.SubTable if st), [])
+ return sum((st.collect_lookups() for st in self.SubTable if st), [])
+
@_add_method(otTables.Lookup)
def may_have_non_1to1(self):
- return any(st.may_have_non_1to1() for st in self.SubTable if st)
+ return any(st.may_have_non_1to1() for st in self.SubTable if st)
+
@_add_method(otTables.LookupList)
def subset_glyphs(self, s):
- """Returns the indices of nonempty lookups."""
- return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)]
+ """Returns the indices of nonempty lookups."""
+ return [i for i, l in enumerate(self.Lookup) if l and l.subset_glyphs(s)]
+
@_add_method(otTables.LookupList)
def prune_post_subset(self, font, options):
- ret = False
- for l in self.Lookup:
- if not l: continue
- if l.prune_post_subset(font, options): ret = True
- return ret
+ ret = False
+ for l in self.Lookup:
+ if not l:
+ continue
+ if l.prune_post_subset(font, options):
+ ret = True
+ return ret
+
@_add_method(otTables.LookupList)
def subset_lookups(self, lookup_indices):
- self.ensureDecompiled()
- self.Lookup = [self.Lookup[i] for i in lookup_indices
- if i < self.LookupCount]
- self.LookupCount = len(self.Lookup)
- for l in self.Lookup:
- l.subset_lookups(lookup_indices)
+ self.ensureDecompiled()
+ self.Lookup = [self.Lookup[i] for i in lookup_indices if i < self.LookupCount]
+ self.LookupCount = len(self.Lookup)
+ for l in self.Lookup:
+ l.subset_lookups(lookup_indices)
+
@_add_method(otTables.LookupList)
def neuter_lookups(self, lookup_indices):
- """Sets lookups not in lookup_indices to None."""
- self.ensureDecompiled()
- self.Lookup = [l if i in lookup_indices else None for i,l in enumerate(self.Lookup)]
+ """Sets lookups not in lookup_indices to None."""
+ self.ensureDecompiled()
+ self.Lookup = [
+ l if i in lookup_indices else None for i, l in enumerate(self.Lookup)
+ ]
+
@_add_method(otTables.LookupList)
def closure_lookups(self, lookup_indices):
- """Returns sorted index of all lookups reachable from lookup_indices."""
- lookup_indices = _uniq_sort(lookup_indices)
- recurse = lookup_indices
- while True:
- recurse_lookups = sum((self.Lookup[i].collect_lookups()
- for i in recurse if i < self.LookupCount), [])
- recurse_lookups = [l for l in recurse_lookups
- if l not in lookup_indices and l < self.LookupCount]
- if not recurse_lookups:
- return _uniq_sort(lookup_indices)
- recurse_lookups = _uniq_sort(recurse_lookups)
- lookup_indices.extend(recurse_lookups)
- recurse = recurse_lookups
+ """Returns sorted index of all lookups reachable from lookup_indices."""
+ lookup_indices = _uniq_sort(lookup_indices)
+ recurse = lookup_indices
+ while True:
+ recurse_lookups = sum(
+ (self.Lookup[i].collect_lookups() for i in recurse if i < self.LookupCount),
+ [],
+ )
+ recurse_lookups = [
+ l
+ for l in recurse_lookups
+ if l not in lookup_indices and l < self.LookupCount
+ ]
+ if not recurse_lookups:
+ return _uniq_sort(lookup_indices)
+ recurse_lookups = _uniq_sort(recurse_lookups)
+ lookup_indices.extend(recurse_lookups)
+ recurse = recurse_lookups
+
@_add_method(otTables.Feature)
def subset_lookups(self, lookup_indices):
- """"Returns True if feature is non-empty afterwards."""
- self.LookupListIndex = [l for l in self.LookupListIndex
- if l in lookup_indices]
- # Now map them.
- self.LookupListIndex = [lookup_indices.index(l)
- for l in self.LookupListIndex]
- self.LookupCount = len(self.LookupListIndex)
- # keep 'size' feature even if it contains no lookups; but drop any other
- # empty feature (e.g. FeatureParams for stylistic set names)
- # https://github.com/fonttools/fonttools/issues/2324
- return (
- self.LookupCount or
- isinstance(self.FeatureParams, otTables.FeatureParamsSize)
- )
+ """ "Returns True if feature is non-empty afterwards."""
+ self.LookupListIndex = [l for l in self.LookupListIndex if l in lookup_indices]
+ # Now map them.
+ self.LookupListIndex = [lookup_indices.index(l) for l in self.LookupListIndex]
+ self.LookupCount = len(self.LookupListIndex)
+ # keep 'size' feature even if it contains no lookups; but drop any other
+ # empty feature (e.g. FeatureParams for stylistic set names)
+ # https://github.com/fonttools/fonttools/issues/2324
+ return self.LookupCount or isinstance(
+ self.FeatureParams, otTables.FeatureParamsSize
+ )
+
@_add_method(otTables.FeatureList)
def subset_lookups(self, lookup_indices):
- """Returns the indices of nonempty features."""
- # Note: Never ever drop feature 'pref', even if it's empty.
- # HarfBuzz chooses shaper for Khmer based on presence of this
- # feature. See thread at:
- # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html
- return [i for i,f in enumerate(self.FeatureRecord)
- if (f.Feature.subset_lookups(lookup_indices) or
- f.FeatureTag == 'pref')]
+ """Returns the indices of nonempty features."""
+ # Note: Never ever drop feature 'pref', even if it's empty.
+ # HarfBuzz chooses shaper for Khmer based on presence of this
+ # feature. See thread at:
+ # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html
+ return [
+ i
+ for i, f in enumerate(self.FeatureRecord)
+ if (f.Feature.subset_lookups(lookup_indices) or f.FeatureTag == "pref")
+ ]
+
@_add_method(otTables.FeatureList)
def collect_lookups(self, feature_indices):
- return sum((self.FeatureRecord[i].Feature.LookupListIndex
- for i in feature_indices
- if i < self.FeatureCount), [])
+ return sum(
+ (
+ self.FeatureRecord[i].Feature.LookupListIndex
+ for i in feature_indices
+ if i < self.FeatureCount
+ ),
+ [],
+ )
+
@_add_method(otTables.FeatureList)
def subset_features(self, feature_indices):
- self.ensureDecompiled()
- self.FeatureRecord = _list_subset(self.FeatureRecord, feature_indices)
- self.FeatureCount = len(self.FeatureRecord)
- return bool(self.FeatureCount)
+ self.ensureDecompiled()
+ self.FeatureRecord = _list_subset(self.FeatureRecord, feature_indices)
+ self.FeatureCount = len(self.FeatureRecord)
+ return bool(self.FeatureCount)
+
@_add_method(otTables.FeatureTableSubstitution)
def subset_lookups(self, lookup_indices):
- """Returns the indices of nonempty features."""
- return [r.FeatureIndex for r in self.SubstitutionRecord
- if r.Feature.subset_lookups(lookup_indices)]
+ """Returns the indices of nonempty features."""
+ return [
+ r.FeatureIndex
+ for r in self.SubstitutionRecord
+ if r.Feature.subset_lookups(lookup_indices)
+ ]
+
@_add_method(otTables.FeatureVariations)
def subset_lookups(self, lookup_indices):
- """Returns the indices of nonempty features."""
- return sum((f.FeatureTableSubstitution.subset_lookups(lookup_indices)
- for f in self.FeatureVariationRecord), [])
+ """Returns the indices of nonempty features."""
+ return sum(
+ (
+ f.FeatureTableSubstitution.subset_lookups(lookup_indices)
+ for f in self.FeatureVariationRecord
+ ),
+ [],
+ )
+
@_add_method(otTables.FeatureVariations)
def collect_lookups(self, feature_indices):
- return sum((r.Feature.LookupListIndex
- for vr in self.FeatureVariationRecord
- for r in vr.FeatureTableSubstitution.SubstitutionRecord
- if r.FeatureIndex in feature_indices), [])
+ return sum(
+ (
+ r.Feature.LookupListIndex
+ for vr in self.FeatureVariationRecord
+ for r in vr.FeatureTableSubstitution.SubstitutionRecord
+ if r.FeatureIndex in feature_indices
+ ),
+ [],
+ )
+
@_add_method(otTables.FeatureTableSubstitution)
def subset_features(self, feature_indices):
- self.ensureDecompiled()
- self.SubstitutionRecord = [r for r in self.SubstitutionRecord
- if r.FeatureIndex in feature_indices]
- # remap feature indices
- for r in self.SubstitutionRecord:
- r.FeatureIndex = feature_indices.index(r.FeatureIndex)
- self.SubstitutionCount = len(self.SubstitutionRecord)
- return bool(self.SubstitutionCount)
+ self.ensureDecompiled()
+ self.SubstitutionRecord = [
+ r for r in self.SubstitutionRecord if r.FeatureIndex in feature_indices
+ ]
+ # remap feature indices
+ for r in self.SubstitutionRecord:
+ r.FeatureIndex = feature_indices.index(r.FeatureIndex)
+ self.SubstitutionCount = len(self.SubstitutionRecord)
+ return bool(self.SubstitutionCount)
+
@_add_method(otTables.FeatureVariations)
def subset_features(self, feature_indices):
- self.ensureDecompiled()
- for r in self.FeatureVariationRecord:
- r.FeatureTableSubstitution.subset_features(feature_indices)
- # Prune empty records at the end only
- # https://github.com/fonttools/fonttools/issues/1881
- while (self.FeatureVariationRecord and
- not self.FeatureVariationRecord[-1]
- .FeatureTableSubstitution.SubstitutionCount):
- self.FeatureVariationRecord.pop()
- self.FeatureVariationCount = len(self.FeatureVariationRecord)
- return bool(self.FeatureVariationCount)
-
-@_add_method(otTables.DefaultLangSys,
- otTables.LangSys)
+ self.ensureDecompiled()
+ for r in self.FeatureVariationRecord:
+ r.FeatureTableSubstitution.subset_features(feature_indices)
+ # Prune empty records at the end only
+ # https://github.com/fonttools/fonttools/issues/1881
+ while (
+ self.FeatureVariationRecord
+ and not self.FeatureVariationRecord[
+ -1
+ ].FeatureTableSubstitution.SubstitutionCount
+ ):
+ self.FeatureVariationRecord.pop()
+ self.FeatureVariationCount = len(self.FeatureVariationRecord)
+ return bool(self.FeatureVariationCount)
+
+
+@_add_method(otTables.DefaultLangSys, otTables.LangSys)
def subset_features(self, feature_indices):
- if self.ReqFeatureIndex in feature_indices:
- self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex)
- else:
- self.ReqFeatureIndex = 65535
- self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices]
- # Now map them.
- self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex
- if f in feature_indices]
- self.FeatureCount = len(self.FeatureIndex)
- return bool(self.FeatureCount or self.ReqFeatureIndex != 65535)
-
-@_add_method(otTables.DefaultLangSys,
- otTables.LangSys)
+ if self.ReqFeatureIndex in feature_indices:
+ self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex)
+ else:
+ self.ReqFeatureIndex = 65535
+ self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices]
+ # Now map them.
+ self.FeatureIndex = [
+ feature_indices.index(f) for f in self.FeatureIndex if f in feature_indices
+ ]
+ self.FeatureCount = len(self.FeatureIndex)
+ return bool(self.FeatureCount or self.ReqFeatureIndex != 65535)
+
+
+@_add_method(otTables.DefaultLangSys, otTables.LangSys)
def collect_features(self):
- feature_indices = self.FeatureIndex[:]
- if self.ReqFeatureIndex != 65535:
- feature_indices.append(self.ReqFeatureIndex)
- return _uniq_sort(feature_indices)
+ feature_indices = self.FeatureIndex[:]
+ if self.ReqFeatureIndex != 65535:
+ feature_indices.append(self.ReqFeatureIndex)
+ return _uniq_sort(feature_indices)
+
@_add_method(otTables.Script)
def subset_features(self, feature_indices, keepEmptyDefaultLangSys=False):
- if(self.DefaultLangSys and
- not self.DefaultLangSys.subset_features(feature_indices) and
- not keepEmptyDefaultLangSys):
- self.DefaultLangSys = None
- self.LangSysRecord = [l for l in self.LangSysRecord
- if l.LangSys.subset_features(feature_indices)]
- self.LangSysCount = len(self.LangSysRecord)
- return bool(self.LangSysCount or self.DefaultLangSys)
+ if (
+ self.DefaultLangSys
+ and not self.DefaultLangSys.subset_features(feature_indices)
+ and not keepEmptyDefaultLangSys
+ ):
+ self.DefaultLangSys = None
+ self.LangSysRecord = [
+ l for l in self.LangSysRecord if l.LangSys.subset_features(feature_indices)
+ ]
+ self.LangSysCount = len(self.LangSysRecord)
+ return bool(self.LangSysCount or self.DefaultLangSys)
+
@_add_method(otTables.Script)
def collect_features(self):
- feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord]
- if self.DefaultLangSys:
- feature_indices.append(self.DefaultLangSys.collect_features())
- return _uniq_sort(sum(feature_indices, []))
+ feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord]
+ if self.DefaultLangSys:
+ feature_indices.append(self.DefaultLangSys.collect_features())
+ return _uniq_sort(sum(feature_indices, []))
+
@_add_method(otTables.ScriptList)
def subset_features(self, feature_indices, retain_empty):
- # https://bugzilla.mozilla.org/show_bug.cgi?id=1331737#c32
- self.ScriptRecord = [s for s in self.ScriptRecord
- if s.Script.subset_features(feature_indices, s.ScriptTag=='DFLT') or
- retain_empty]
- self.ScriptCount = len(self.ScriptRecord)
- return bool(self.ScriptCount)
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1331737#c32
+ self.ScriptRecord = [
+ s
+ for s in self.ScriptRecord
+ if s.Script.subset_features(feature_indices, s.ScriptTag == "DFLT")
+ or retain_empty
+ ]
+ self.ScriptCount = len(self.ScriptRecord)
+ return bool(self.ScriptCount)
+
@_add_method(otTables.ScriptList)
def collect_features(self):
- return _uniq_sort(sum((s.Script.collect_features()
- for s in self.ScriptRecord), []))
+ return _uniq_sort(sum((s.Script.collect_features() for s in self.ScriptRecord), []))
+
# CBLC will inherit it
-@_add_method(ttLib.getTableClass('EBLC'))
+@_add_method(ttLib.getTableClass("EBLC"))
def subset_glyphs(self, s):
- for strike in self.strikes:
- for indexSubTable in strike.indexSubTables:
- indexSubTable.names = [n for n in indexSubTable.names if n in s.glyphs]
- strike.indexSubTables = [i for i in strike.indexSubTables if i.names]
- self.strikes = [s for s in self.strikes if s.indexSubTables]
+ for strike in self.strikes:
+ for indexSubTable in strike.indexSubTables:
+ indexSubTable.names = [n for n in indexSubTable.names if n in s.glyphs]
+ strike.indexSubTables = [i for i in strike.indexSubTables if i.names]
+ self.strikes = [s for s in self.strikes if s.indexSubTables]
+
+ return True
- return True
# CBDT will inherit it
-@_add_method(ttLib.getTableClass('EBDT'))
+@_add_method(ttLib.getTableClass("EBDT"))
def subset_glyphs(self, s):
- strikeData = [
- {g: strike[g] for g in s.glyphs if g in strike}
- for strike in self.strikeData
- ]
- # Prune empty strikes
- # https://github.com/fonttools/fonttools/issues/1633
- self.strikeData = [strike for strike in strikeData if strike]
- return True
-
-@_add_method(ttLib.getTableClass('sbix'))
+ strikeData = [
+ {g: strike[g] for g in s.glyphs if g in strike} for strike in self.strikeData
+ ]
+ # Prune empty strikes
+ # https://github.com/fonttools/fonttools/issues/1633
+ self.strikeData = [strike for strike in strikeData if strike]
+ return True
+
+
+@_add_method(ttLib.getTableClass("sbix"))
def subset_glyphs(self, s):
- for strike in self.strikes.values():
- strike.glyphs = {g: strike.glyphs[g] for g in s.glyphs if g in strike.glyphs}
+ for strike in self.strikes.values():
+ strike.glyphs = {g: strike.glyphs[g] for g in s.glyphs if g in strike.glyphs}
+
+ return True
- return True
-@_add_method(ttLib.getTableClass('GSUB'))
+@_add_method(ttLib.getTableClass("GSUB"))
def closure_glyphs(self, s):
- s.table = self.table
- if self.table.ScriptList:
- feature_indices = self.table.ScriptList.collect_features()
- else:
- feature_indices = []
- if self.table.FeatureList:
- lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
- else:
- lookup_indices = []
- if getattr(self.table, 'FeatureVariations', None):
- lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices)
- lookup_indices = _uniq_sort(lookup_indices)
- if self.table.LookupList:
- s._doneLookups = {}
- while True:
- orig_glyphs = frozenset(s.glyphs)
- for i in lookup_indices:
- if i >= self.table.LookupList.LookupCount: continue
- if not self.table.LookupList.Lookup[i]: continue
- self.table.LookupList.Lookup[i].closure_glyphs(s)
- if orig_glyphs == s.glyphs:
- break
- del s._doneLookups
- del s.table
-
-@_add_method(ttLib.getTableClass('GSUB'),
- ttLib.getTableClass('GPOS'))
+ s.table = self.table
+ if self.table.ScriptList:
+ feature_indices = self.table.ScriptList.collect_features()
+ else:
+ feature_indices = []
+ if self.table.FeatureList:
+ lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
+ else:
+ lookup_indices = []
+ if getattr(self.table, "FeatureVariations", None):
+ lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices)
+ lookup_indices = _uniq_sort(lookup_indices)
+ if self.table.LookupList:
+ s._doneLookups = {}
+ while True:
+ orig_glyphs = frozenset(s.glyphs)
+ for i in lookup_indices:
+ if i >= self.table.LookupList.LookupCount:
+ continue
+ if not self.table.LookupList.Lookup[i]:
+ continue
+ self.table.LookupList.Lookup[i].closure_glyphs(s)
+ if orig_glyphs == s.glyphs:
+ break
+ del s._doneLookups
+ del s.table
+
+
+@_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS"))
def subset_glyphs(self, s):
- s.glyphs = s.glyphs_gsubed
- if self.table.LookupList:
- lookup_indices = self.table.LookupList.subset_glyphs(s)
- else:
- lookup_indices = []
- self.subset_lookups(lookup_indices)
- return True
-
-@_add_method(ttLib.getTableClass('GSUB'),
- ttLib.getTableClass('GPOS'))
+ s.glyphs = s.glyphs_gsubed
+ if self.table.LookupList:
+ lookup_indices = self.table.LookupList.subset_glyphs(s)
+ else:
+ lookup_indices = []
+ self.subset_lookups(lookup_indices)
+ return True
+
+
+@_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS"))
def retain_empty_scripts(self):
- # https://github.com/fonttools/fonttools/issues/518
- # https://bugzilla.mozilla.org/show_bug.cgi?id=1080739#c15
- return self.__class__ == ttLib.getTableClass('GSUB')
+ # https://github.com/fonttools/fonttools/issues/518
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1080739#c15
+ return self.__class__ == ttLib.getTableClass("GSUB")
-@_add_method(ttLib.getTableClass('GSUB'),
- ttLib.getTableClass('GPOS'))
+
+@_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS"))
def subset_lookups(self, lookup_indices):
- """Retains specified lookups, then removes empty features, language
- systems, and scripts."""
- if self.table.LookupList:
- self.table.LookupList.subset_lookups(lookup_indices)
- if self.table.FeatureList:
- feature_indices = self.table.FeatureList.subset_lookups(lookup_indices)
- else:
- feature_indices = []
- if getattr(self.table, 'FeatureVariations', None):
- feature_indices += self.table.FeatureVariations.subset_lookups(lookup_indices)
- feature_indices = _uniq_sort(feature_indices)
- if self.table.FeatureList:
- self.table.FeatureList.subset_features(feature_indices)
- if getattr(self.table, 'FeatureVariations', None):
- self.table.FeatureVariations.subset_features(feature_indices)
- if self.table.ScriptList:
- self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts())
-
-@_add_method(ttLib.getTableClass('GSUB'),
- ttLib.getTableClass('GPOS'))
+ """Retains specified lookups, then removes empty features, language
+ systems, and scripts."""
+ if self.table.LookupList:
+ self.table.LookupList.subset_lookups(lookup_indices)
+ if self.table.FeatureList:
+ feature_indices = self.table.FeatureList.subset_lookups(lookup_indices)
+ else:
+ feature_indices = []
+ if getattr(self.table, "FeatureVariations", None):
+ feature_indices += self.table.FeatureVariations.subset_lookups(lookup_indices)
+ feature_indices = _uniq_sort(feature_indices)
+ if self.table.FeatureList:
+ self.table.FeatureList.subset_features(feature_indices)
+ if getattr(self.table, "FeatureVariations", None):
+ self.table.FeatureVariations.subset_features(feature_indices)
+ if self.table.ScriptList:
+ self.table.ScriptList.subset_features(
+ feature_indices, self.retain_empty_scripts()
+ )
+
+
+@_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS"))
def neuter_lookups(self, lookup_indices):
- """Sets lookups not in lookup_indices to None."""
- if self.table.LookupList:
- self.table.LookupList.neuter_lookups(lookup_indices)
+ """Sets lookups not in lookup_indices to None."""
+ if self.table.LookupList:
+ self.table.LookupList.neuter_lookups(lookup_indices)
+
-@_add_method(ttLib.getTableClass('GSUB'),
- ttLib.getTableClass('GPOS'))
+@_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS"))
def prune_lookups(self, remap=True):
- """Remove (default) or neuter unreferenced lookups"""
- if self.table.ScriptList:
- feature_indices = self.table.ScriptList.collect_features()
- else:
- feature_indices = []
- if self.table.FeatureList:
- lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
- else:
- lookup_indices = []
- if getattr(self.table, 'FeatureVariations', None):
- lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices)
- lookup_indices = _uniq_sort(lookup_indices)
- if self.table.LookupList:
- lookup_indices = self.table.LookupList.closure_lookups(lookup_indices)
- else:
- lookup_indices = []
- if remap:
- self.subset_lookups(lookup_indices)
- else:
- self.neuter_lookups(lookup_indices)
-
-@_add_method(ttLib.getTableClass('GSUB'),
- ttLib.getTableClass('GPOS'))
+ """Remove (default) or neuter unreferenced lookups"""
+ if self.table.ScriptList:
+ feature_indices = self.table.ScriptList.collect_features()
+ else:
+ feature_indices = []
+ if self.table.FeatureList:
+ lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
+ else:
+ lookup_indices = []
+ if getattr(self.table, "FeatureVariations", None):
+ lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices)
+ lookup_indices = _uniq_sort(lookup_indices)
+ if self.table.LookupList:
+ lookup_indices = self.table.LookupList.closure_lookups(lookup_indices)
+ else:
+ lookup_indices = []
+ if remap:
+ self.subset_lookups(lookup_indices)
+ else:
+ self.neuter_lookups(lookup_indices)
+
+
+@_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS"))
def subset_feature_tags(self, feature_tags):
- if self.table.FeatureList:
- feature_indices = \
- [i for i,f in enumerate(self.table.FeatureList.FeatureRecord)
- if f.FeatureTag in feature_tags]
- self.table.FeatureList.subset_features(feature_indices)
- if getattr(self.table, 'FeatureVariations', None):
- self.table.FeatureVariations.subset_features(feature_indices)
- else:
- feature_indices = []
- if self.table.ScriptList:
- self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts())
-
-@_add_method(ttLib.getTableClass('GSUB'),
- ttLib.getTableClass('GPOS'))
+ if self.table.FeatureList:
+ feature_indices = [
+ i
+ for i, f in enumerate(self.table.FeatureList.FeatureRecord)
+ if f.FeatureTag in feature_tags
+ ]
+ self.table.FeatureList.subset_features(feature_indices)
+ if getattr(self.table, "FeatureVariations", None):
+ self.table.FeatureVariations.subset_features(feature_indices)
+ else:
+ feature_indices = []
+ if self.table.ScriptList:
+ self.table.ScriptList.subset_features(
+ feature_indices, self.retain_empty_scripts()
+ )
+
+
+@_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS"))
def subset_script_tags(self, tags):
- langsys = {}
- script_tags = set()
- for tag in tags:
- script_tag, lang_tag = tag.split(".") if "." in tag else (tag, '*')
- script_tags.add(script_tag.ljust(4))
- langsys.setdefault(script_tag, set()).add(lang_tag.ljust(4))
-
- if self.table.ScriptList:
- self.table.ScriptList.ScriptRecord = \
- [s for s in self.table.ScriptList.ScriptRecord
- if s.ScriptTag in script_tags]
- self.table.ScriptList.ScriptCount = len(self.table.ScriptList.ScriptRecord)
-
- for record in self.table.ScriptList.ScriptRecord:
- if record.ScriptTag in langsys and '* ' not in langsys[record.ScriptTag]:
- record.Script.LangSysRecord = \
- [l for l in record.Script.LangSysRecord
- if l.LangSysTag in langsys[record.ScriptTag]]
- record.Script.LangSysCount = len(record.Script.LangSysRecord)
- if "dflt" not in langsys[record.ScriptTag]:
- record.Script.DefaultLangSys = None
-
-@_add_method(ttLib.getTableClass('GSUB'),
- ttLib.getTableClass('GPOS'))
+ langsys = {}
+ script_tags = set()
+ for tag in tags:
+ script_tag, lang_tag = tag.split(".") if "." in tag else (tag, "*")
+ script_tags.add(script_tag.ljust(4))
+ langsys.setdefault(script_tag, set()).add(lang_tag.ljust(4))
+
+ if self.table.ScriptList:
+ self.table.ScriptList.ScriptRecord = [
+ s for s in self.table.ScriptList.ScriptRecord if s.ScriptTag in script_tags
+ ]
+ self.table.ScriptList.ScriptCount = len(self.table.ScriptList.ScriptRecord)
+
+ for record in self.table.ScriptList.ScriptRecord:
+ if record.ScriptTag in langsys and "* " not in langsys[record.ScriptTag]:
+ record.Script.LangSysRecord = [
+ l
+ for l in record.Script.LangSysRecord
+ if l.LangSysTag in langsys[record.ScriptTag]
+ ]
+ record.Script.LangSysCount = len(record.Script.LangSysRecord)
+ if "dflt" not in langsys[record.ScriptTag]:
+ record.Script.DefaultLangSys = None
+
+
+@_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS"))
def prune_features(self):
- """Remove unreferenced features"""
- if self.table.ScriptList:
- feature_indices = self.table.ScriptList.collect_features()
- else:
- feature_indices = []
- if self.table.FeatureList:
- self.table.FeatureList.subset_features(feature_indices)
- if getattr(self.table, 'FeatureVariations', None):
- self.table.FeatureVariations.subset_features(feature_indices)
- if self.table.ScriptList:
- self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts())
-
-@_add_method(ttLib.getTableClass('GSUB'),
- ttLib.getTableClass('GPOS'))
+ """Remove unreferenced features"""
+ if self.table.ScriptList:
+ feature_indices = self.table.ScriptList.collect_features()
+ else:
+ feature_indices = []
+ if self.table.FeatureList:
+ self.table.FeatureList.subset_features(feature_indices)
+ if getattr(self.table, "FeatureVariations", None):
+ self.table.FeatureVariations.subset_features(feature_indices)
+ if self.table.ScriptList:
+ self.table.ScriptList.subset_features(
+ feature_indices, self.retain_empty_scripts()
+ )
+
+
+@_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS"))
def prune_pre_subset(self, font, options):
- # Drop undesired features
- if '*' not in options.layout_scripts:
- self.subset_script_tags(options.layout_scripts)
- if '*' not in options.layout_features:
- self.subset_feature_tags(options.layout_features)
- # Neuter unreferenced lookups
- self.prune_lookups(remap=False)
- return True
-
-@_add_method(ttLib.getTableClass('GSUB'),
- ttLib.getTableClass('GPOS'))
+ # Drop undesired features
+ if "*" not in options.layout_scripts:
+ self.subset_script_tags(options.layout_scripts)
+ if "*" not in options.layout_features:
+ self.subset_feature_tags(options.layout_features)
+ # Neuter unreferenced lookups
+ self.prune_lookups(remap=False)
+ return True
+
+
+@_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS"))
def remove_redundant_langsys(self):
- table = self.table
- if not table.ScriptList or not table.FeatureList:
- return
-
- features = table.FeatureList.FeatureRecord
-
- for s in table.ScriptList.ScriptRecord:
- d = s.Script.DefaultLangSys
- if not d:
- continue
- for lr in s.Script.LangSysRecord[:]:
- l = lr.LangSys
- # Compare d and l
- if len(d.FeatureIndex) != len(l.FeatureIndex):
- continue
- if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535):
- continue
-
- if d.ReqFeatureIndex != 65535:
- if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]:
- continue
-
- for i in range(len(d.FeatureIndex)):
- if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]:
- break
- else:
- # LangSys and default are equal; delete LangSys
- s.Script.LangSysRecord.remove(lr)
-
-@_add_method(ttLib.getTableClass('GSUB'),
- ttLib.getTableClass('GPOS'))
+ table = self.table
+ if not table.ScriptList or not table.FeatureList:
+ return
+
+ features = table.FeatureList.FeatureRecord
+
+ for s in table.ScriptList.ScriptRecord:
+ d = s.Script.DefaultLangSys
+ if not d:
+ continue
+ for lr in s.Script.LangSysRecord[:]:
+ l = lr.LangSys
+ # Compare d and l
+ if len(d.FeatureIndex) != len(l.FeatureIndex):
+ continue
+ if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535):
+ continue
+
+ if d.ReqFeatureIndex != 65535:
+ if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]:
+ continue
+
+ for i in range(len(d.FeatureIndex)):
+ if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]:
+ break
+ else:
+ # LangSys and default are equal; delete LangSys
+ s.Script.LangSysRecord.remove(lr)
+
+
+@_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS"))
def prune_post_subset(self, font, options):
- table = self.table
+ table = self.table
- self.prune_lookups() # XXX Is this actually needed?!
+ self.prune_lookups() # XXX Is this actually needed?!
- if table.LookupList:
- table.LookupList.prune_post_subset(font, options)
- # XXX Next two lines disabled because OTS is stupid and
- # doesn't like NULL offsets here.
- #if not table.LookupList.Lookup:
- # table.LookupList = None
+ if table.LookupList:
+ table.LookupList.prune_post_subset(font, options)
+ # XXX Next two lines disabled because OTS is stupid and
+ # doesn't like NULL offsets here.
+ # if not table.LookupList.Lookup:
+ # table.LookupList = None
- if not table.LookupList:
- table.FeatureList = None
+ if not table.LookupList:
+ table.FeatureList = None
+ if table.FeatureList:
+ self.remove_redundant_langsys()
+ # Remove unreferenced features
+ self.prune_features()
- if table.FeatureList:
- self.remove_redundant_langsys()
- # Remove unreferenced features
- self.prune_features()
+ # XXX Next two lines disabled because OTS is stupid and
+ # doesn't like NULL offsets here.
+ # if table.FeatureList and not table.FeatureList.FeatureRecord:
+ # table.FeatureList = None
- # XXX Next two lines disabled because OTS is stupid and
- # doesn't like NULL offsets here.
- #if table.FeatureList and not table.FeatureList.FeatureRecord:
- # table.FeatureList = None
+ # Never drop scripts themselves as them just being available
+ # holds semantic significance.
+ # XXX Next two lines disabled because OTS is stupid and
+ # doesn't like NULL offsets here.
+ # if table.ScriptList and not table.ScriptList.ScriptRecord:
+ # table.ScriptList = None
- # Never drop scripts themselves as them just being available
- # holds semantic significance.
- # XXX Next two lines disabled because OTS is stupid and
- # doesn't like NULL offsets here.
- #if table.ScriptList and not table.ScriptList.ScriptRecord:
- # table.ScriptList = None
+ if hasattr(table, "FeatureVariations"):
+ # drop FeatureVariations if there are no features to substitute
+ if table.FeatureVariations and not (
+ table.FeatureList and table.FeatureVariations.FeatureVariationRecord
+ ):
+ table.FeatureVariations = None
- if hasattr(table, 'FeatureVariations'):
- # drop FeatureVariations if there are no features to substitute
- if table.FeatureVariations and not (
- table.FeatureList and table.FeatureVariations.FeatureVariationRecord
- ):
- table.FeatureVariations = None
+ # downgrade table version if there are no FeatureVariations
+ if not table.FeatureVariations and table.Version == 0x00010001:
+ table.Version = 0x00010000
- # downgrade table version if there are no FeatureVariations
- if not table.FeatureVariations and table.Version == 0x00010001:
- table.Version = 0x00010000
+ return True
- return True
-@_add_method(ttLib.getTableClass('GDEF'))
+@_add_method(ttLib.getTableClass("GDEF"))
def subset_glyphs(self, s):
- glyphs = s.glyphs_gsubed
- table = self.table
- if table.LigCaretList:
- indices = table.LigCaretList.Coverage.subset(glyphs)
- table.LigCaretList.LigGlyph = _list_subset(table.LigCaretList.LigGlyph, indices)
- table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph)
- if table.MarkAttachClassDef:
- table.MarkAttachClassDef.classDefs = \
- {g:v for g,v in table.MarkAttachClassDef.classDefs.items()
- if g in glyphs}
- if table.GlyphClassDef:
- table.GlyphClassDef.classDefs = \
- {g:v for g,v in table.GlyphClassDef.classDefs.items()
- if g in glyphs}
- if table.AttachList:
- indices = table.AttachList.Coverage.subset(glyphs)
- GlyphCount = table.AttachList.GlyphCount
- table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i]
- for i in indices if i < GlyphCount]
- table.AttachList.GlyphCount = len(table.AttachList.AttachPoint)
- if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef:
- for coverage in table.MarkGlyphSetsDef.Coverage:
- if coverage:
- coverage.subset(glyphs)
-
- # TODO: The following is disabled. If enabling, we need to go fixup all
- # lookups that use MarkFilteringSet and map their set.
- # indices = table.MarkGlyphSetsDef.Coverage = \
- # [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs]
- # TODO: The following is disabled, as ots doesn't like it. Phew...
- # https://github.com/khaledhosny/ots/issues/172
- # table.MarkGlyphSetsDef.Coverage = [c if c.glyphs else None for c in table.MarkGlyphSetsDef.Coverage]
- return True
+ glyphs = s.glyphs_gsubed
+ table = self.table
+ if table.LigCaretList:
+ indices = table.LigCaretList.Coverage.subset(glyphs)
+ table.LigCaretList.LigGlyph = _list_subset(table.LigCaretList.LigGlyph, indices)
+ table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph)
+ if table.MarkAttachClassDef:
+ table.MarkAttachClassDef.classDefs = {
+ g: v for g, v in table.MarkAttachClassDef.classDefs.items() if g in glyphs
+ }
+ if table.GlyphClassDef:
+ table.GlyphClassDef.classDefs = {
+ g: v for g, v in table.GlyphClassDef.classDefs.items() if g in glyphs
+ }
+ if table.AttachList:
+ indices = table.AttachList.Coverage.subset(glyphs)
+ GlyphCount = table.AttachList.GlyphCount
+ table.AttachList.AttachPoint = [
+ table.AttachList.AttachPoint[i] for i in indices if i < GlyphCount
+ ]
+ table.AttachList.GlyphCount = len(table.AttachList.AttachPoint)
+ if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef:
+ markGlyphSets = table.MarkGlyphSetsDef
+ for coverage in markGlyphSets.Coverage:
+ if coverage:
+ coverage.subset(glyphs)
+
+ s.used_mark_sets = [i for i, c in enumerate(markGlyphSets.Coverage) if c.glyphs]
+ markGlyphSets.Coverage = [c for c in markGlyphSets.Coverage if c.glyphs]
+
+ return True
def _pruneGDEF(font):
- if 'GDEF' not in font: return
- gdef = font['GDEF']
- table = gdef.table
- if not hasattr(table, 'VarStore'): return
+ if "GDEF" not in font:
+ return
+ gdef = font["GDEF"]
+ table = gdef.table
+ if not hasattr(table, "VarStore"):
+ return
- store = table.VarStore
+ store = table.VarStore
- usedVarIdxes = set()
+ usedVarIdxes = set()
- # Collect.
- table.collect_device_varidxes(usedVarIdxes)
- if 'GPOS' in font:
- font['GPOS'].table.collect_device_varidxes(usedVarIdxes)
+ # Collect.
+ table.collect_device_varidxes(usedVarIdxes)
+ if "GPOS" in font:
+ font["GPOS"].table.collect_device_varidxes(usedVarIdxes)
- # Subset.
- varidx_map = store.subset_varidxes(usedVarIdxes)
+ # Subset.
+ varidx_map = store.subset_varidxes(usedVarIdxes)
- # Map.
- table.remap_device_varidxes(varidx_map)
- if 'GPOS' in font:
- font['GPOS'].table.remap_device_varidxes(varidx_map)
+ # Map.
+ table.remap_device_varidxes(varidx_map)
+ if "GPOS" in font:
+ font["GPOS"].table.remap_device_varidxes(varidx_map)
-@_add_method(ttLib.getTableClass('GDEF'))
+
+@_add_method(ttLib.getTableClass("GDEF"))
def prune_post_subset(self, font, options):
- table = self.table
- # XXX check these against OTS
- if table.LigCaretList and not table.LigCaretList.LigGlyphCount:
- table.LigCaretList = None
- if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs:
- table.MarkAttachClassDef = None
- if table.GlyphClassDef and not table.GlyphClassDef.classDefs:
- table.GlyphClassDef = None
- if table.AttachList and not table.AttachList.GlyphCount:
- table.AttachList = None
- if hasattr(table, "VarStore"):
- _pruneGDEF(font)
- if table.VarStore.VarDataCount == 0:
- if table.Version == 0x00010003:
- table.Version = 0x00010002
- if (not hasattr(table, "MarkGlyphSetsDef") or
- not table.MarkGlyphSetsDef or
- not table.MarkGlyphSetsDef.Coverage):
- table.MarkGlyphSetsDef = None
- if table.Version == 0x00010002:
- table.Version = 0x00010000
- return bool(table.LigCaretList or
- table.MarkAttachClassDef or
- table.GlyphClassDef or
- table.AttachList or
- (table.Version >= 0x00010002 and table.MarkGlyphSetsDef) or
- (table.Version >= 0x00010003 and table.VarStore))
-
-@_add_method(ttLib.getTableClass('kern'))
+ table = self.table
+ # XXX check these against OTS
+ if table.LigCaretList and not table.LigCaretList.LigGlyphCount:
+ table.LigCaretList = None
+ if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs:
+ table.MarkAttachClassDef = None
+ if table.GlyphClassDef and not table.GlyphClassDef.classDefs:
+ table.GlyphClassDef = None
+ if table.AttachList and not table.AttachList.GlyphCount:
+ table.AttachList = None
+ if hasattr(table, "VarStore"):
+ _pruneGDEF(font)
+ if table.VarStore.VarDataCount == 0:
+ if table.Version == 0x00010003:
+ table.Version = 0x00010002
+ if (
+ not hasattr(table, "MarkGlyphSetsDef")
+ or not table.MarkGlyphSetsDef
+ or not table.MarkGlyphSetsDef.Coverage
+ ):
+ table.MarkGlyphSetsDef = None
+ if table.Version == 0x00010002:
+ table.Version = 0x00010000
+ return bool(
+ table.LigCaretList
+ or table.MarkAttachClassDef
+ or table.GlyphClassDef
+ or table.AttachList
+ or (table.Version >= 0x00010002 and table.MarkGlyphSetsDef)
+ or (table.Version >= 0x00010003 and table.VarStore)
+ )
+
+
+@_add_method(ttLib.getTableClass("kern"))
def prune_pre_subset(self, font, options):
- # Prune unknown kern table types
- self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')]
- return bool(self.kernTables)
+ # Prune unknown kern table types
+ self.kernTables = [t for t in self.kernTables if hasattr(t, "kernTable")]
+ return bool(self.kernTables)
+
-@_add_method(ttLib.getTableClass('kern'))
+@_add_method(ttLib.getTableClass("kern"))
def subset_glyphs(self, s):
- glyphs = s.glyphs_gsubed
- for t in self.kernTables:
- t.kernTable = {(a,b):v for (a,b),v in t.kernTable.items()
- if a in glyphs and b in glyphs}
- self.kernTables = [t for t in self.kernTables if t.kernTable]
- return bool(self.kernTables)
-
-@_add_method(ttLib.getTableClass('vmtx'))
+ glyphs = s.glyphs_gsubed
+ for t in self.kernTables:
+ t.kernTable = {
+ (a, b): v
+ for (a, b), v in t.kernTable.items()
+ if a in glyphs and b in glyphs
+ }
+ self.kernTables = [t for t in self.kernTables if t.kernTable]
+ return bool(self.kernTables)
+
+
+@_add_method(ttLib.getTableClass("vmtx"))
def subset_glyphs(self, s):
- self.metrics = _dict_subset(self.metrics, s.glyphs)
- for g in s.glyphs_emptied:
- self.metrics[g] = (0,0)
- return bool(self.metrics)
+ self.metrics = _dict_subset(self.metrics, s.glyphs)
+ for g in s.glyphs_emptied:
+ self.metrics[g] = (0, 0)
+ return bool(self.metrics)
-@_add_method(ttLib.getTableClass('hmtx'))
+
+@_add_method(ttLib.getTableClass("hmtx"))
def subset_glyphs(self, s):
- self.metrics = _dict_subset(self.metrics, s.glyphs)
- for g in s.glyphs_emptied:
- self.metrics[g] = (0,0)
- return True # Required table
+ self.metrics = _dict_subset(self.metrics, s.glyphs)
+ for g in s.glyphs_emptied:
+ self.metrics[g] = (0, 0)
+ return True # Required table
+
-@_add_method(ttLib.getTableClass('hdmx'))
+@_add_method(ttLib.getTableClass("hdmx"))
def subset_glyphs(self, s):
- self.hdmx = {sz:_dict_subset(l, s.glyphs) for sz,l in self.hdmx.items()}
- for sz in self.hdmx:
- for g in s.glyphs_emptied:
- self.hdmx[sz][g] = 0
- return bool(self.hdmx)
+ self.hdmx = {sz: _dict_subset(l, s.glyphs) for sz, l in self.hdmx.items()}
+ for sz in self.hdmx:
+ for g in s.glyphs_emptied:
+ self.hdmx[sz][g] = 0
+ return bool(self.hdmx)
-@_add_method(ttLib.getTableClass('ankr'))
+
+@_add_method(ttLib.getTableClass("ankr"))
def subset_glyphs(self, s):
- table = self.table.AnchorPoints
- assert table.Format == 0, "unknown 'ankr' format %s" % table.Format
- table.Anchors = {glyph: table.Anchors[glyph] for glyph in s.glyphs
- if glyph in table.Anchors}
- return len(table.Anchors) > 0
+ table = self.table.AnchorPoints
+ assert table.Format == 0, "unknown 'ankr' format %s" % table.Format
+ table.Anchors = {
+ glyph: table.Anchors[glyph] for glyph in s.glyphs if glyph in table.Anchors
+ }
+ return len(table.Anchors) > 0
+
-@_add_method(ttLib.getTableClass('bsln'))
+@_add_method(ttLib.getTableClass("bsln"))
def closure_glyphs(self, s):
- table = self.table.Baseline
- if table.Format in (2, 3):
- s.glyphs.add(table.StandardGlyph)
+ table = self.table.Baseline
+ if table.Format in (2, 3):
+ s.glyphs.add(table.StandardGlyph)
-@_add_method(ttLib.getTableClass('bsln'))
+
+@_add_method(ttLib.getTableClass("bsln"))
def subset_glyphs(self, s):
- table = self.table.Baseline
- if table.Format in (1, 3):
- baselines = {glyph: table.BaselineValues.get(glyph, table.DefaultBaseline)
- for glyph in s.glyphs}
- if len(baselines) > 0:
- mostCommon, _cnt = Counter(baselines.values()).most_common(1)[0]
- table.DefaultBaseline = mostCommon
- baselines = {glyph: b for glyph, b in baselines.items()
- if b != mostCommon}
- if len(baselines) > 0:
- table.BaselineValues = baselines
- else:
- table.Format = {1: 0, 3: 2}[table.Format]
- del table.BaselineValues
- return True
-
-@_add_method(ttLib.getTableClass('lcar'))
+ table = self.table.Baseline
+ if table.Format in (1, 3):
+ baselines = {
+ glyph: table.BaselineValues.get(glyph, table.DefaultBaseline)
+ for glyph in s.glyphs
+ }
+ if len(baselines) > 0:
+ mostCommon, _cnt = Counter(baselines.values()).most_common(1)[0]
+ table.DefaultBaseline = mostCommon
+ baselines = {glyph: b for glyph, b in baselines.items() if b != mostCommon}
+ if len(baselines) > 0:
+ table.BaselineValues = baselines
+ else:
+ table.Format = {1: 0, 3: 2}[table.Format]
+ del table.BaselineValues
+ return True
+
+
+@_add_method(ttLib.getTableClass("lcar"))
def subset_glyphs(self, s):
- table = self.table.LigatureCarets
- if table.Format in (0, 1):
- table.Carets = {glyph: table.Carets[glyph] for glyph in s.glyphs
- if glyph in table.Carets}
- return len(table.Carets) > 0
- else:
- assert False, "unknown 'lcar' format %s" % table.Format
-
-@_add_method(ttLib.getTableClass('gvar'))
+ table = self.table.LigatureCarets
+ if table.Format in (0, 1):
+ table.Carets = {
+ glyph: table.Carets[glyph] for glyph in s.glyphs if glyph in table.Carets
+ }
+ return len(table.Carets) > 0
+ else:
+ assert False, "unknown 'lcar' format %s" % table.Format
+
+
+@_add_method(ttLib.getTableClass("gvar"))
def prune_pre_subset(self, font, options):
- if options.notdef_glyph and not options.notdef_outline:
- self.variations[font.glyphOrder[0]] = []
- return True
+ if options.notdef_glyph and not options.notdef_outline:
+ self.variations[font.glyphOrder[0]] = []
+ return True
+
-@_add_method(ttLib.getTableClass('gvar'))
+@_add_method(ttLib.getTableClass("gvar"))
def subset_glyphs(self, s):
- self.variations = _dict_subset(self.variations, s.glyphs)
- self.glyphCount = len(self.variations)
- return bool(self.variations)
+ self.variations = _dict_subset(self.variations, s.glyphs)
+ self.glyphCount = len(self.variations)
+ return bool(self.variations)
+
def _remap_index_map(s, varidx_map, table_map):
- map_ = {k:varidx_map[v] for k,v in table_map.mapping.items()}
- # Emptied glyphs are remapped to:
- # if GID <= last retained GID, 0/0: delta set for 0/0 is expected to exist & zeros compress well
- # if GID > last retained GID, major/minor of the last retained glyph: will be optimized out by table compiler
- last_idx = varidx_map[table_map.mapping[s.last_retained_glyph]]
- for g,i in s.reverseEmptiedGlyphMap.items():
- map_[g] = last_idx if i > s.last_retained_order else 0
- return map_
-
-@_add_method(ttLib.getTableClass('HVAR'))
+ map_ = {k: varidx_map[v] for k, v in table_map.mapping.items()}
+ # Emptied glyphs are remapped to:
+ # if GID <= last retained GID, 0/0: delta set for 0/0 is expected to exist & zeros compress well
+ # if GID > last retained GID, major/minor of the last retained glyph: will be optimized out by table compiler
+ last_idx = varidx_map[table_map.mapping[s.last_retained_glyph]]
+ for g, i in s.reverseEmptiedGlyphMap.items():
+ map_[g] = last_idx if i > s.last_retained_order else 0
+ return map_
+
+
+@_add_method(ttLib.getTableClass("HVAR"))
def subset_glyphs(self, s):
- table = self.table
-
- used = set()
- advIdxes_ = set()
- retainAdvMap = False
-
- if table.AdvWidthMap:
- table.AdvWidthMap.mapping = _dict_subset(table.AdvWidthMap.mapping, s.glyphs)
- used.update(table.AdvWidthMap.mapping.values())
- else:
- used.update(s.reverseOrigGlyphMap.values())
- advIdxes_ = used.copy()
- retainAdvMap = s.options.retain_gids
-
- if table.LsbMap:
- table.LsbMap.mapping = _dict_subset(table.LsbMap.mapping, s.glyphs)
- used.update(table.LsbMap.mapping.values())
- if table.RsbMap:
- table.RsbMap.mapping = _dict_subset(table.RsbMap.mapping, s.glyphs)
- used.update(table.RsbMap.mapping.values())
-
- varidx_map = table.VarStore.subset_varidxes(used, retainFirstMap=retainAdvMap, advIdxes=advIdxes_)
-
- if table.AdvWidthMap:
- table.AdvWidthMap.mapping = _remap_index_map(s, varidx_map, table.AdvWidthMap)
- if table.LsbMap:
- table.LsbMap.mapping = _remap_index_map(s, varidx_map, table.LsbMap)
- if table.RsbMap:
- table.RsbMap.mapping = _remap_index_map(s, varidx_map, table.RsbMap)
-
- # TODO Return emptiness...
- return True
-
-@_add_method(ttLib.getTableClass('VVAR'))
+ table = self.table
+
+ used = set()
+ advIdxes_ = set()
+ retainAdvMap = False
+
+ if table.AdvWidthMap:
+ table.AdvWidthMap.mapping = _dict_subset(table.AdvWidthMap.mapping, s.glyphs)
+ used.update(table.AdvWidthMap.mapping.values())
+ else:
+ used.update(s.reverseOrigGlyphMap.values())
+ advIdxes_ = used.copy()
+ retainAdvMap = s.options.retain_gids
+
+ if table.LsbMap:
+ table.LsbMap.mapping = _dict_subset(table.LsbMap.mapping, s.glyphs)
+ used.update(table.LsbMap.mapping.values())
+ if table.RsbMap:
+ table.RsbMap.mapping = _dict_subset(table.RsbMap.mapping, s.glyphs)
+ used.update(table.RsbMap.mapping.values())
+
+ varidx_map = table.VarStore.subset_varidxes(
+ used, retainFirstMap=retainAdvMap, advIdxes=advIdxes_
+ )
+
+ if table.AdvWidthMap:
+ table.AdvWidthMap.mapping = _remap_index_map(s, varidx_map, table.AdvWidthMap)
+ if table.LsbMap:
+ table.LsbMap.mapping = _remap_index_map(s, varidx_map, table.LsbMap)
+ if table.RsbMap:
+ table.RsbMap.mapping = _remap_index_map(s, varidx_map, table.RsbMap)
+
+ # TODO Return emptiness...
+ return True
+
+
+@_add_method(ttLib.getTableClass("VVAR"))
def subset_glyphs(self, s):
- table = self.table
-
- used = set()
- advIdxes_ = set()
- retainAdvMap = False
-
- if table.AdvHeightMap:
- table.AdvHeightMap.mapping = _dict_subset(table.AdvHeightMap.mapping, s.glyphs)
- used.update(table.AdvHeightMap.mapping.values())
- else:
- used.update(s.reverseOrigGlyphMap.values())
- advIdxes_ = used.copy()
- retainAdvMap = s.options.retain_gids
-
- if table.TsbMap:
- table.TsbMap.mapping = _dict_subset(table.TsbMap.mapping, s.glyphs)
- used.update(table.TsbMap.mapping.values())
- if table.BsbMap:
- table.BsbMap.mapping = _dict_subset(table.BsbMap.mapping, s.glyphs)
- used.update(table.BsbMap.mapping.values())
- if table.VOrgMap:
- table.VOrgMap.mapping = _dict_subset(table.VOrgMap.mapping, s.glyphs)
- used.update(table.VOrgMap.mapping.values())
-
- varidx_map = table.VarStore.subset_varidxes(used, retainFirstMap=retainAdvMap, advIdxes=advIdxes_)
-
- if table.AdvHeightMap:
- table.AdvHeightMap.mapping = _remap_index_map(s, varidx_map, table.AdvHeightMap)
- if table.TsbMap:
- table.TsbMap.mapping = _remap_index_map(s, varidx_map, table.TsbMap)
- if table.BsbMap:
- table.BsbMap.mapping = _remap_index_map(s, varidx_map, table.BsbMap)
- if table.VOrgMap:
- table.VOrgMap.mapping = _remap_index_map(s, varidx_map, table.VOrgMap)
-
- # TODO Return emptiness...
- return True
-
-@_add_method(ttLib.getTableClass('VORG'))
+ table = self.table
+
+ used = set()
+ advIdxes_ = set()
+ retainAdvMap = False
+
+ if table.AdvHeightMap:
+ table.AdvHeightMap.mapping = _dict_subset(table.AdvHeightMap.mapping, s.glyphs)
+ used.update(table.AdvHeightMap.mapping.values())
+ else:
+ used.update(s.reverseOrigGlyphMap.values())
+ advIdxes_ = used.copy()
+ retainAdvMap = s.options.retain_gids
+
+ if table.TsbMap:
+ table.TsbMap.mapping = _dict_subset(table.TsbMap.mapping, s.glyphs)
+ used.update(table.TsbMap.mapping.values())
+ if table.BsbMap:
+ table.BsbMap.mapping = _dict_subset(table.BsbMap.mapping, s.glyphs)
+ used.update(table.BsbMap.mapping.values())
+ if table.VOrgMap:
+ table.VOrgMap.mapping = _dict_subset(table.VOrgMap.mapping, s.glyphs)
+ used.update(table.VOrgMap.mapping.values())
+
+ varidx_map = table.VarStore.subset_varidxes(
+ used, retainFirstMap=retainAdvMap, advIdxes=advIdxes_
+ )
+
+ if table.AdvHeightMap:
+ table.AdvHeightMap.mapping = _remap_index_map(s, varidx_map, table.AdvHeightMap)
+ if table.TsbMap:
+ table.TsbMap.mapping = _remap_index_map(s, varidx_map, table.TsbMap)
+ if table.BsbMap:
+ table.BsbMap.mapping = _remap_index_map(s, varidx_map, table.BsbMap)
+ if table.VOrgMap:
+ table.VOrgMap.mapping = _remap_index_map(s, varidx_map, table.VOrgMap)
+
+ # TODO Return emptiness...
+ return True
+
+
+@_add_method(ttLib.getTableClass("VORG"))
def subset_glyphs(self, s):
- self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items()
- if g in s.glyphs}
- self.numVertOriginYMetrics = len(self.VOriginRecords)
- return True # Never drop; has default metrics
+ self.VOriginRecords = {
+ g: v for g, v in self.VOriginRecords.items() if g in s.glyphs
+ }
+ self.numVertOriginYMetrics = len(self.VOriginRecords)
+ return True # Never drop; has default metrics
+
-@_add_method(ttLib.getTableClass('opbd'))
+@_add_method(ttLib.getTableClass("opbd"))
def subset_glyphs(self, s):
- table = self.table.OpticalBounds
- if table.Format == 0:
- table.OpticalBoundsDeltas = {glyph: table.OpticalBoundsDeltas[glyph]
- for glyph in s.glyphs
- if glyph in table.OpticalBoundsDeltas}
- return len(table.OpticalBoundsDeltas) > 0
- elif table.Format == 1:
- table.OpticalBoundsPoints = {glyph: table.OpticalBoundsPoints[glyph]
- for glyph in s.glyphs
- if glyph in table.OpticalBoundsPoints}
- return len(table.OpticalBoundsPoints) > 0
- else:
- assert False, "unknown 'opbd' format %s" % table.Format
-
-@_add_method(ttLib.getTableClass('post'))
+ table = self.table.OpticalBounds
+ if table.Format == 0:
+ table.OpticalBoundsDeltas = {
+ glyph: table.OpticalBoundsDeltas[glyph]
+ for glyph in s.glyphs
+ if glyph in table.OpticalBoundsDeltas
+ }
+ return len(table.OpticalBoundsDeltas) > 0
+ elif table.Format == 1:
+ table.OpticalBoundsPoints = {
+ glyph: table.OpticalBoundsPoints[glyph]
+ for glyph in s.glyphs
+ if glyph in table.OpticalBoundsPoints
+ }
+ return len(table.OpticalBoundsPoints) > 0
+ else:
+ assert False, "unknown 'opbd' format %s" % table.Format
+
+
+@_add_method(ttLib.getTableClass("post"))
def prune_pre_subset(self, font, options):
- if not options.glyph_names:
- self.formatType = 3.0
- return True # Required table
+ if not options.glyph_names:
+ self.formatType = 3.0
+ return True # Required table
-@_add_method(ttLib.getTableClass('post'))
+
+@_add_method(ttLib.getTableClass("post"))
def subset_glyphs(self, s):
- self.extraNames = [] # This seems to do it
- return True # Required table
+ self.extraNames = [] # This seems to do it
+ return True # Required table
+
-@_add_method(ttLib.getTableClass('prop'))
+@_add_method(ttLib.getTableClass("prop"))
def subset_glyphs(self, s):
- prop = self.table.GlyphProperties
- if prop.Format == 0:
- return prop.DefaultProperties != 0
- elif prop.Format == 1:
- prop.Properties = {g: prop.Properties.get(g, prop.DefaultProperties)
- for g in s.glyphs}
- mostCommon, _cnt = Counter(prop.Properties.values()).most_common(1)[0]
- prop.DefaultProperties = mostCommon
- prop.Properties = {g: prop for g, prop in prop.Properties.items()
- if prop != mostCommon}
- if len(prop.Properties) == 0:
- del prop.Properties
- prop.Format = 0
- return prop.DefaultProperties != 0
- return True
- else:
- assert False, "unknown 'prop' format %s" % prop.Format
+ prop = self.table.GlyphProperties
+ if prop.Format == 0:
+ return prop.DefaultProperties != 0
+ elif prop.Format == 1:
+ prop.Properties = {
+ g: prop.Properties.get(g, prop.DefaultProperties) for g in s.glyphs
+ }
+ mostCommon, _cnt = Counter(prop.Properties.values()).most_common(1)[0]
+ prop.DefaultProperties = mostCommon
+ prop.Properties = {
+ g: prop for g, prop in prop.Properties.items() if prop != mostCommon
+ }
+ if len(prop.Properties) == 0:
+ del prop.Properties
+ prop.Format = 0
+ return prop.DefaultProperties != 0
+ return True
+ else:
+ assert False, "unknown 'prop' format %s" % prop.Format
+
def _paint_glyph_names(paint, colr):
- result = set()
+ result = set()
+
+ def callback(paint):
+ if paint.Format in {
+ otTables.PaintFormat.PaintGlyph,
+ otTables.PaintFormat.PaintColrGlyph,
+ }:
+ result.add(paint.Glyph)
- def callback(paint):
- if paint.Format in {
- otTables.PaintFormat.PaintGlyph,
- otTables.PaintFormat.PaintColrGlyph,
- }:
- result.add(paint.Glyph)
+ paint.traverse(colr, callback)
+ return result
- paint.traverse(colr, callback)
- return result
-@_add_method(ttLib.getTableClass('COLR'))
+@_add_method(ttLib.getTableClass("COLR"))
def closure_glyphs(self, s):
- if self.version > 0:
- # on decompiling COLRv1, we only keep around the raw otTables
- # but for subsetting we need dicts with fully decompiled layers;
- # we store them temporarily in the C_O_L_R_ instance and delete
- # them after we have finished subsetting.
- self.ColorLayers = self._decompileColorLayersV0(self.table)
- self.ColorLayersV1 = {
- rec.BaseGlyph: rec.Paint
- for rec in self.table.BaseGlyphList.BaseGlyphPaintRecord
- }
-
- decompose = s.glyphs
- while decompose:
- layers = set()
- for g in decompose:
- for layer in self.ColorLayers.get(g, []):
- layers.add(layer.name)
-
- if self.version > 0:
- paint = self.ColorLayersV1.get(g)
- if paint is not None:
- layers.update(_paint_glyph_names(paint, self.table))
-
- layers -= s.glyphs
- s.glyphs.update(layers)
- decompose = layers
-
-@_add_method(ttLib.getTableClass('COLR'))
+ if self.version > 0:
+ # on decompiling COLRv1, we only keep around the raw otTables
+ # but for subsetting we need dicts with fully decompiled layers;
+ # we store them temporarily in the C_O_L_R_ instance and delete
+ # them after we have finished subsetting.
+ self.ColorLayers = self._decompileColorLayersV0(self.table)
+ self.ColorLayersV1 = {
+ rec.BaseGlyph: rec.Paint
+ for rec in self.table.BaseGlyphList.BaseGlyphPaintRecord
+ }
+
+ decompose = s.glyphs
+ while decompose:
+ layers = set()
+ for g in decompose:
+ for layer in self.ColorLayers.get(g, []):
+ layers.add(layer.name)
+
+ if self.version > 0:
+ paint = self.ColorLayersV1.get(g)
+ if paint is not None:
+ layers.update(_paint_glyph_names(paint, self.table))
+
+ layers -= s.glyphs
+ s.glyphs.update(layers)
+ decompose = layers
+
+
+@_add_method(ttLib.getTableClass("COLR"))
def subset_glyphs(self, s):
- from fontTools.colorLib.unbuilder import unbuildColrV1
- from fontTools.colorLib.builder import buildColrV1, populateCOLRv0
-
- # only include glyphs after COLR closure, which in turn comes after cmap and GSUB
- # closure, but importantly before glyf/CFF closures. COLR layers can refer to
- # composite glyphs, and that's ok, since glyf/CFF closures happen after COLR closure
- # and take care of those. If we also included glyphs resulting from glyf/CFF closures
- # when deciding which COLR base glyphs to retain, then we may end up with a situation
- # whereby a COLR base glyph is kept, not because directly requested (cmap)
- # or substituted (GSUB) or referenced by another COLRv1 PaintColrGlyph, but because
- # it corresponds to (has same GID as) a non-COLR glyph that happens to be used as a
- # component in glyf or CFF table. Best case scenario we retain more glyphs than
- # required; worst case we retain incomplete COLR records that try to reference
- # glyphs that are no longer in the final subset font.
- # https://github.com/fonttools/fonttools/issues/2461
- s.glyphs = s.glyphs_colred
-
- self.ColorLayers = {g: self.ColorLayers[g] for g in s.glyphs if g in self.ColorLayers}
- if self.version == 0:
- return bool(self.ColorLayers)
-
- colorGlyphsV1 = unbuildColrV1(self.table.LayerList, self.table.BaseGlyphList)
- self.table.LayerList, self.table.BaseGlyphList = buildColrV1(
- {g: colorGlyphsV1[g] for g in colorGlyphsV1 if g in s.glyphs}
- )
- del self.ColorLayersV1
-
- if self.table.ClipList is not None:
- clips = self.table.ClipList.clips
- self.table.ClipList.clips = {g: clips[g] for g in clips if g in s.glyphs}
-
- layersV0 = self.ColorLayers
- if not self.table.BaseGlyphList.BaseGlyphPaintRecord:
- # no more COLRv1 glyphs: downgrade to version 0
- self.version = 0
- del self.table
- return bool(layersV0)
-
- populateCOLRv0(
- self.table,
- {
- g: [(layer.name, layer.colorID) for layer in layersV0[g]]
- for g in layersV0
- },
- )
- del self.ColorLayers
-
- # TODO: also prune ununsed varIndices in COLR.VarStore
- return True
-
-@_add_method(ttLib.getTableClass('CPAL'))
+ from fontTools.colorLib.unbuilder import unbuildColrV1
+ from fontTools.colorLib.builder import buildColrV1, populateCOLRv0
+
+ # only include glyphs after COLR closure, which in turn comes after cmap and GSUB
+ # closure, but importantly before glyf/CFF closures. COLR layers can refer to
+ # composite glyphs, and that's ok, since glyf/CFF closures happen after COLR closure
+ # and take care of those. If we also included glyphs resulting from glyf/CFF closures
+ # when deciding which COLR base glyphs to retain, then we may end up with a situation
+ # whereby a COLR base glyph is kept, not because directly requested (cmap)
+ # or substituted (GSUB) or referenced by another COLRv1 PaintColrGlyph, but because
+ # it corresponds to (has same GID as) a non-COLR glyph that happens to be used as a
+ # component in glyf or CFF table. Best case scenario we retain more glyphs than
+ # required; worst case we retain incomplete COLR records that try to reference
+ # glyphs that are no longer in the final subset font.
+ # https://github.com/fonttools/fonttools/issues/2461
+ s.glyphs = s.glyphs_colred
+
+ self.ColorLayers = {
+ g: self.ColorLayers[g] for g in s.glyphs if g in self.ColorLayers
+ }
+ if self.version == 0:
+ return bool(self.ColorLayers)
+
+ colorGlyphsV1 = unbuildColrV1(self.table.LayerList, self.table.BaseGlyphList)
+ self.table.LayerList, self.table.BaseGlyphList = buildColrV1(
+ {g: colorGlyphsV1[g] for g in colorGlyphsV1 if g in s.glyphs}
+ )
+ del self.ColorLayersV1
+
+ if self.table.ClipList is not None:
+ clips = self.table.ClipList.clips
+ self.table.ClipList.clips = {g: clips[g] for g in clips if g in s.glyphs}
+
+ layersV0 = self.ColorLayers
+ if not self.table.BaseGlyphList.BaseGlyphPaintRecord:
+ # no more COLRv1 glyphs: downgrade to version 0
+ self.version = 0
+ del self.table
+ return bool(layersV0)
+
+ populateCOLRv0(
+ self.table,
+ {g: [(layer.name, layer.colorID) for layer in layersV0[g]] for g in layersV0},
+ )
+ del self.ColorLayers
+
+ # TODO: also prune ununsed varIndices in COLR.VarStore
+ return True
+
+
+@_add_method(ttLib.getTableClass("CPAL"))
def prune_post_subset(self, font, options):
- colr = font.get("COLR")
- if not colr: # drop CPAL if COLR was subsetted to empty
- return False
-
- colors_by_index = defaultdict(list)
-
- def collect_colors_by_index(paint):
- if hasattr(paint, "PaletteIndex"): # either solid colors...
- colors_by_index[paint.PaletteIndex].append(paint)
- elif hasattr(paint, "ColorLine"): # ... or gradient color stops
- for stop in paint.ColorLine.ColorStop:
- colors_by_index[stop.PaletteIndex].append(stop)
-
- if colr.version == 0:
- for layers in colr.ColorLayers.values():
- for layer in layers:
- colors_by_index[layer.colorID].append(layer)
- else:
- if colr.table.LayerRecordArray:
- for layer in colr.table.LayerRecordArray.LayerRecord:
- colors_by_index[layer.PaletteIndex].append(layer)
- for record in colr.table.BaseGlyphList.BaseGlyphPaintRecord:
- record.Paint.traverse(colr.table, collect_colors_by_index)
-
- # don't remap palette entry index 0xFFFF, this is always the foreground color
- # https://github.com/fonttools/fonttools/issues/2257
- retained_palette_indices = set(colors_by_index.keys()) - {0xFFFF}
- for palette in self.palettes:
- palette[:] = [c for i, c in enumerate(palette) if i in retained_palette_indices]
- assert len(palette) == len(retained_palette_indices)
-
- for new_index, old_index in enumerate(sorted(retained_palette_indices)):
- for record in colors_by_index[old_index]:
- if hasattr(record, "colorID"): # v0
- record.colorID = new_index
- elif hasattr(record, "PaletteIndex"): # v1
- record.PaletteIndex = new_index
- else:
- raise AssertionError(record)
-
- self.numPaletteEntries = len(self.palettes[0])
-
- if self.version == 1:
- self.paletteEntryLabels = [
- label for i, label in self.paletteEntryLabels if i in retained_palette_indices
- ]
- return bool(self.numPaletteEntries)
+ # Keep whole "CPAL" if "SVG " is present as it may be referenced by the latter
+ # via 'var(--color{palette_entry_index}, ...)' CSS color variables.
+ # For now we just assume this is the case by the mere presence of "SVG " table,
+ # for parsing SVG to collect all the used indices is too much work...
+ # TODO(anthrotype): Do The Right Thing (TM).
+ if "SVG " in font:
+ return True
+
+ colr = font.get("COLR")
+ if not colr: # drop CPAL if COLR was subsetted to empty
+ return False
+
+ colors_by_index = defaultdict(list)
+
+ def collect_colors_by_index(paint):
+ if hasattr(paint, "PaletteIndex"): # either solid colors...
+ colors_by_index[paint.PaletteIndex].append(paint)
+ elif hasattr(paint, "ColorLine"): # ... or gradient color stops
+ for stop in paint.ColorLine.ColorStop:
+ colors_by_index[stop.PaletteIndex].append(stop)
+
+ if colr.version == 0:
+ for layers in colr.ColorLayers.values():
+ for layer in layers:
+ colors_by_index[layer.colorID].append(layer)
+ else:
+ if colr.table.LayerRecordArray:
+ for layer in colr.table.LayerRecordArray.LayerRecord:
+ colors_by_index[layer.PaletteIndex].append(layer)
+ for record in colr.table.BaseGlyphList.BaseGlyphPaintRecord:
+ record.Paint.traverse(colr.table, collect_colors_by_index)
+
+ # don't remap palette entry index 0xFFFF, this is always the foreground color
+ # https://github.com/fonttools/fonttools/issues/2257
+ retained_palette_indices = set(colors_by_index.keys()) - {0xFFFF}
+ for palette in self.palettes:
+ palette[:] = [c for i, c in enumerate(palette) if i in retained_palette_indices]
+ assert len(palette) == len(retained_palette_indices)
+
+ for new_index, old_index in enumerate(sorted(retained_palette_indices)):
+ for record in colors_by_index[old_index]:
+ if hasattr(record, "colorID"): # v0
+ record.colorID = new_index
+ elif hasattr(record, "PaletteIndex"): # v1
+ record.PaletteIndex = new_index
+ else:
+ raise AssertionError(record)
+
+ self.numPaletteEntries = len(self.palettes[0])
+
+ if self.version == 1:
+ kept_labels = []
+ for i, label in enumerate(self.paletteEntryLabels):
+ if i in retained_palette_indices:
+ kept_labels.append(label)
+ self.paletteEntryLabels = kept_labels
+ return bool(self.numPaletteEntries)
+
@_add_method(otTables.MathGlyphConstruction)
def closure_glyphs(self, glyphs):
- variants = set()
- for v in self.MathGlyphVariantRecord:
- variants.add(v.VariantGlyph)
- if self.GlyphAssembly:
- for p in self.GlyphAssembly.PartRecords:
- variants.add(p.glyph)
- return variants
+ variants = set()
+ for v in self.MathGlyphVariantRecord:
+ variants.add(v.VariantGlyph)
+ if self.GlyphAssembly:
+ for p in self.GlyphAssembly.PartRecords:
+ variants.add(p.glyph)
+ return variants
+
@_add_method(otTables.MathVariants)
def closure_glyphs(self, s):
- glyphs = frozenset(s.glyphs)
- variants = set()
+ glyphs = frozenset(s.glyphs)
+ variants = set()
- if self.VertGlyphCoverage:
- indices = self.VertGlyphCoverage.intersect(glyphs)
- for i in indices:
- variants.update(self.VertGlyphConstruction[i].closure_glyphs(glyphs))
+ if self.VertGlyphCoverage:
+ indices = self.VertGlyphCoverage.intersect(glyphs)
+ for i in indices:
+ variants.update(self.VertGlyphConstruction[i].closure_glyphs(glyphs))
- if self.HorizGlyphCoverage:
- indices = self.HorizGlyphCoverage.intersect(glyphs)
- for i in indices:
- variants.update(self.HorizGlyphConstruction[i].closure_glyphs(glyphs))
+ if self.HorizGlyphCoverage:
+ indices = self.HorizGlyphCoverage.intersect(glyphs)
+ for i in indices:
+ variants.update(self.HorizGlyphConstruction[i].closure_glyphs(glyphs))
- s.glyphs.update(variants)
+ s.glyphs.update(variants)
-@_add_method(ttLib.getTableClass('MATH'))
+
+@_add_method(ttLib.getTableClass("MATH"))
def closure_glyphs(self, s):
- if self.table.MathVariants:
- self.table.MathVariants.closure_glyphs(s)
+ if self.table.MathVariants:
+ self.table.MathVariants.closure_glyphs(s)
+
@_add_method(otTables.MathItalicsCorrectionInfo)
def subset_glyphs(self, s):
- indices = self.Coverage.subset(s.glyphs)
- self.ItalicsCorrection = _list_subset(self.ItalicsCorrection, indices)
- self.ItalicsCorrectionCount = len(self.ItalicsCorrection)
- return bool(self.ItalicsCorrectionCount)
+ indices = self.Coverage.subset(s.glyphs)
+ self.ItalicsCorrection = _list_subset(self.ItalicsCorrection, indices)
+ self.ItalicsCorrectionCount = len(self.ItalicsCorrection)
+ return bool(self.ItalicsCorrectionCount)
+
@_add_method(otTables.MathTopAccentAttachment)
def subset_glyphs(self, s):
- indices = self.TopAccentCoverage.subset(s.glyphs)
- self.TopAccentAttachment = _list_subset(self.TopAccentAttachment, indices)
- self.TopAccentAttachmentCount = len(self.TopAccentAttachment)
- return bool(self.TopAccentAttachmentCount)
+ indices = self.TopAccentCoverage.subset(s.glyphs)
+ self.TopAccentAttachment = _list_subset(self.TopAccentAttachment, indices)
+ self.TopAccentAttachmentCount = len(self.TopAccentAttachment)
+ return bool(self.TopAccentAttachmentCount)
+
@_add_method(otTables.MathKernInfo)
def subset_glyphs(self, s):
- indices = self.MathKernCoverage.subset(s.glyphs)
- self.MathKernInfoRecords = _list_subset(self.MathKernInfoRecords, indices)
- self.MathKernCount = len(self.MathKernInfoRecords)
- return bool(self.MathKernCount)
+ indices = self.MathKernCoverage.subset(s.glyphs)
+ self.MathKernInfoRecords = _list_subset(self.MathKernInfoRecords, indices)
+ self.MathKernCount = len(self.MathKernInfoRecords)
+ return bool(self.MathKernCount)
+
@_add_method(otTables.MathGlyphInfo)
def subset_glyphs(self, s):
- if self.MathItalicsCorrectionInfo:
- self.MathItalicsCorrectionInfo.subset_glyphs(s)
- if self.MathTopAccentAttachment:
- self.MathTopAccentAttachment.subset_glyphs(s)
- if self.MathKernInfo:
- self.MathKernInfo.subset_glyphs(s)
- if self.ExtendedShapeCoverage:
- self.ExtendedShapeCoverage.subset(s.glyphs)
- return True
+ if self.MathItalicsCorrectionInfo:
+ self.MathItalicsCorrectionInfo.subset_glyphs(s)
+ if self.MathTopAccentAttachment:
+ self.MathTopAccentAttachment.subset_glyphs(s)
+ if self.MathKernInfo:
+ self.MathKernInfo.subset_glyphs(s)
+ if self.ExtendedShapeCoverage:
+ self.ExtendedShapeCoverage.subset(s.glyphs)
+ return True
+
@_add_method(otTables.MathVariants)
def subset_glyphs(self, s):
- if self.VertGlyphCoverage:
- indices = self.VertGlyphCoverage.subset(s.glyphs)
- self.VertGlyphConstruction = _list_subset(self.VertGlyphConstruction, indices)
- self.VertGlyphCount = len(self.VertGlyphConstruction)
+ if self.VertGlyphCoverage:
+ indices = self.VertGlyphCoverage.subset(s.glyphs)
+ self.VertGlyphConstruction = _list_subset(self.VertGlyphConstruction, indices)
+ self.VertGlyphCount = len(self.VertGlyphConstruction)
- if self.HorizGlyphCoverage:
- indices = self.HorizGlyphCoverage.subset(s.glyphs)
- self.HorizGlyphConstruction = _list_subset(self.HorizGlyphConstruction, indices)
- self.HorizGlyphCount = len(self.HorizGlyphConstruction)
+ if self.HorizGlyphCoverage:
+ indices = self.HorizGlyphCoverage.subset(s.glyphs)
+ self.HorizGlyphConstruction = _list_subset(self.HorizGlyphConstruction, indices)
+ self.HorizGlyphCount = len(self.HorizGlyphConstruction)
- return True
+ return True
-@_add_method(ttLib.getTableClass('MATH'))
+
+@_add_method(ttLib.getTableClass("MATH"))
def subset_glyphs(self, s):
- s.glyphs = s.glyphs_mathed
- if self.table.MathGlyphInfo:
- self.table.MathGlyphInfo.subset_glyphs(s)
- if self.table.MathVariants:
- self.table.MathVariants.subset_glyphs(s)
- return True
-
-@_add_method(ttLib.getTableModule('glyf').Glyph)
+ s.glyphs = s.glyphs_mathed
+ if self.table.MathGlyphInfo:
+ self.table.MathGlyphInfo.subset_glyphs(s)
+ if self.table.MathVariants:
+ self.table.MathVariants.subset_glyphs(s)
+ return True
+
+
+@_add_method(ttLib.getTableModule("glyf").Glyph)
def remapComponentsFast(self, glyphidmap):
- if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
- return # Not composite
- data = self.data = bytearray(self.data)
- i = 10
- more = 1
- while more:
- flags =(data[i] << 8) | data[i+1]
- glyphID =(data[i+2] << 8) | data[i+3]
- # Remap
- glyphID = glyphidmap[glyphID]
- data[i+2] = glyphID >> 8
- data[i+3] = glyphID & 0xFF
- i += 4
- flags = int(flags)
-
- if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS
- else: i += 2
- if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE
- elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE
- elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO
- more = flags & 0x0020 # MORE_COMPONENTS
-
-@_add_method(ttLib.getTableClass('glyf'))
+ if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
+ return # Not composite
+ data = self.data = bytearray(self.data)
+ i = 10
+ more = 1
+ while more:
+ flags = (data[i] << 8) | data[i + 1]
+ glyphID = (data[i + 2] << 8) | data[i + 3]
+ # Remap
+ glyphID = glyphidmap[glyphID]
+ data[i + 2] = glyphID >> 8
+ data[i + 3] = glyphID & 0xFF
+ i += 4
+ flags = int(flags)
+
+ if flags & 0x0001:
+ i += 4 # ARG_1_AND_2_ARE_WORDS
+ else:
+ i += 2
+ if flags & 0x0008:
+ i += 2 # WE_HAVE_A_SCALE
+ elif flags & 0x0040:
+ i += 4 # WE_HAVE_AN_X_AND_Y_SCALE
+ elif flags & 0x0080:
+ i += 8 # WE_HAVE_A_TWO_BY_TWO
+ more = flags & 0x0020 # MORE_COMPONENTS
+
+
+@_add_method(ttLib.getTableClass("glyf"))
def closure_glyphs(self, s):
- glyphSet = self.glyphs
- decompose = s.glyphs
- while decompose:
- components = set()
- for g in decompose:
- if g not in glyphSet:
- continue
- gl = glyphSet[g]
- for c in gl.getComponentNames(self):
- components.add(c)
- components -= s.glyphs
- s.glyphs.update(components)
- decompose = components
-
-@_add_method(ttLib.getTableClass('glyf'))
+ glyphSet = self.glyphs
+ decompose = s.glyphs
+ while decompose:
+ components = set()
+ for g in decompose:
+ if g not in glyphSet:
+ continue
+ gl = glyphSet[g]
+ for c in gl.getComponentNames(self):
+ components.add(c)
+ components -= s.glyphs
+ s.glyphs.update(components)
+ decompose = components
+
+
+@_add_method(ttLib.getTableClass("glyf"))
def prune_pre_subset(self, font, options):
- if options.notdef_glyph and not options.notdef_outline:
- g = self[self.glyphOrder[0]]
- # Yay, easy!
- g.__dict__.clear()
- g.data = b''
- return True
-
-@_add_method(ttLib.getTableClass('glyf'))
+ if options.notdef_glyph and not options.notdef_outline:
+ g = self[self.glyphOrder[0]]
+ # Yay, easy!
+ g.__dict__.clear()
+ g.data = b""
+ return True
+
+
+@_add_method(ttLib.getTableClass("glyf"))
def subset_glyphs(self, s):
- self.glyphs = _dict_subset(self.glyphs, s.glyphs)
- if not s.options.retain_gids:
- indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs]
- glyphmap = {o:n for n,o in enumerate(indices)}
- for v in self.glyphs.values():
- if hasattr(v, "data"):
- v.remapComponentsFast(glyphmap)
- Glyph = ttLib.getTableModule('glyf').Glyph
- for g in s.glyphs_emptied:
- self.glyphs[g] = Glyph()
- self.glyphs[g].data = b''
- self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs or g in s.glyphs_emptied]
- # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset.
- return True
-
-@_add_method(ttLib.getTableClass('glyf'))
+ self.glyphs = _dict_subset(self.glyphs, s.glyphs)
+ if not s.options.retain_gids:
+ indices = [i for i, g in enumerate(self.glyphOrder) if g in s.glyphs]
+ glyphmap = {o: n for n, o in enumerate(indices)}
+ for v in self.glyphs.values():
+ if hasattr(v, "data"):
+ v.remapComponentsFast(glyphmap)
+ Glyph = ttLib.getTableModule("glyf").Glyph
+ for g in s.glyphs_emptied:
+ self.glyphs[g] = Glyph()
+ self.glyphs[g].data = b""
+ self.glyphOrder = [
+ g for g in self.glyphOrder if g in s.glyphs or g in s.glyphs_emptied
+ ]
+ # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset.
+ return True
+
+
+@_add_method(ttLib.getTableClass("glyf"))
def prune_post_subset(self, font, options):
- remove_hinting = not options.hinting
- for v in self.glyphs.values():
- v.trim(remove_hinting=remove_hinting)
- return True
+ remove_hinting = not options.hinting
+ for v in self.glyphs.values():
+ v.trim(remove_hinting=remove_hinting)
+ return True
-@_add_method(ttLib.getTableClass('cmap'))
+@_add_method(ttLib.getTableClass("cmap"))
def closure_glyphs(self, s):
- tables = [t for t in self.tables if t.isUnicode()]
-
- # Close glyphs
- for table in tables:
- if table.format == 14:
- for cmap in table.uvsDict.values():
- glyphs = {g for u,g in cmap if u in s.unicodes_requested}
- if None in glyphs:
- glyphs.remove(None)
- s.glyphs.update(glyphs)
- else:
- cmap = table.cmap
- intersection = s.unicodes_requested.intersection(cmap.keys())
- s.glyphs.update(cmap[u] for u in intersection)
-
- # Calculate unicodes_missing
- s.unicodes_missing = s.unicodes_requested.copy()
- for table in tables:
- s.unicodes_missing.difference_update(table.cmap)
-
-@_add_method(ttLib.getTableClass('cmap'))
+ tables = [t for t in self.tables if t.isUnicode()]
+
+ # Close glyphs
+ for table in tables:
+ if table.format == 14:
+ for cmap in table.uvsDict.values():
+ glyphs = {g for u, g in cmap if u in s.unicodes_requested}
+ if None in glyphs:
+ glyphs.remove(None)
+ s.glyphs.update(glyphs)
+ else:
+ cmap = table.cmap
+ intersection = s.unicodes_requested.intersection(cmap.keys())
+ s.glyphs.update(cmap[u] for u in intersection)
+
+ # Calculate unicodes_missing
+ s.unicodes_missing = s.unicodes_requested.copy()
+ for table in tables:
+ s.unicodes_missing.difference_update(table.cmap)
+
+
+@_add_method(ttLib.getTableClass("cmap"))
def prune_pre_subset(self, font, options):
- if not options.legacy_cmap:
- # Drop non-Unicode / non-Symbol cmaps
- self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()]
- if not options.symbol_cmap:
- self.tables = [t for t in self.tables if not t.isSymbol()]
- # TODO(behdad) Only keep one subtable?
- # For now, drop format=0 which can't be subset_glyphs easily?
- self.tables = [t for t in self.tables if t.format != 0]
- self.numSubTables = len(self.tables)
- return True # Required table
-
-@_add_method(ttLib.getTableClass('cmap'))
+ if not options.legacy_cmap:
+ # Drop non-Unicode / non-Symbol cmaps
+ self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()]
+ if not options.symbol_cmap:
+ self.tables = [t for t in self.tables if not t.isSymbol()]
+ # TODO(behdad) Only keep one subtable?
+ # For now, drop format=0 which can't be subset_glyphs easily?
+ self.tables = [t for t in self.tables if t.format != 0]
+ self.numSubTables = len(self.tables)
+ return True # Required table
+
+
+@_add_method(ttLib.getTableClass("cmap"))
def subset_glyphs(self, s):
- s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only
-
- tables_format12_bmp = []
- table_plat0_enc3 = {} # Unicode platform, Unicode BMP only, keyed by language
- table_plat3_enc1 = {} # Windows platform, Unicode BMP, keyed by language
-
- for t in self.tables:
- if t.platformID == 0 and t.platEncID == 3:
- table_plat0_enc3[t.language] = t
- if t.platformID == 3 and t.platEncID == 1:
- table_plat3_enc1[t.language] = t
-
- if t.format == 14:
- # TODO(behdad) We drop all the default-UVS mappings
- # for glyphs_requested. So it's the caller's responsibility to make
- # sure those are included.
- t.uvsDict = {v:[(u,g) for u,g in l
- if g in s.glyphs_requested or u in s.unicodes_requested]
- for v,l in t.uvsDict.items()}
- t.uvsDict = {v:l for v,l in t.uvsDict.items() if l}
- elif t.isUnicode():
- t.cmap = {u:g for u,g in t.cmap.items()
- if g in s.glyphs_requested or u in s.unicodes_requested}
- # Collect format 12 tables that hold only basic multilingual plane
- # codepoints.
- if t.format == 12 and t.cmap and max(t.cmap.keys()) < 0x10000:
- tables_format12_bmp.append(t)
- else:
- t.cmap = {u:g for u,g in t.cmap.items()
- if g in s.glyphs_requested}
-
- # Fomat 12 tables are redundant if they contain just the same BMP codepoints
- # their little BMP-only encoding siblings contain.
- for t in tables_format12_bmp:
- if (
- t.platformID == 0 # Unicode platform
- and t.platEncID == 4 # Unicode full repertoire
- and t.language in table_plat0_enc3 # Have a BMP-only sibling?
- and table_plat0_enc3[t.language].cmap == t.cmap
- ):
- t.cmap.clear()
- elif (
- t.platformID == 3 # Windows platform
- and t.platEncID == 10 # Unicode full repertoire
- and t.language in table_plat3_enc1 # Have a BMP-only sibling?
- and table_plat3_enc1[t.language].cmap == t.cmap
- ):
- t.cmap.clear()
-
- self.tables = [t for t in self.tables
- if (t.cmap if t.format != 14 else t.uvsDict)]
- self.numSubTables = len(self.tables)
- # TODO(behdad) Convert formats when needed.
- # In particular, if we have a format=12 without non-BMP
- # characters, convert it to format=4 if there's not one.
- return True # Required table
-
-@_add_method(ttLib.getTableClass('DSIG'))
+ s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only
+
+ tables_format12_bmp = []
+ table_plat0_enc3 = {} # Unicode platform, Unicode BMP only, keyed by language
+ table_plat3_enc1 = {} # Windows platform, Unicode BMP, keyed by language
+
+ for t in self.tables:
+ if t.platformID == 0 and t.platEncID == 3:
+ table_plat0_enc3[t.language] = t
+ if t.platformID == 3 and t.platEncID == 1:
+ table_plat3_enc1[t.language] = t
+
+ if t.format == 14:
+ # TODO(behdad) We drop all the default-UVS mappings
+ # for glyphs_requested. So it's the caller's responsibility to make
+ # sure those are included.
+ t.uvsDict = {
+ v: [
+ (u, g)
+ for u, g in l
+ if g in s.glyphs_requested or u in s.unicodes_requested
+ ]
+ for v, l in t.uvsDict.items()
+ }
+ t.uvsDict = {v: l for v, l in t.uvsDict.items() if l}
+ elif t.isUnicode():
+ t.cmap = {
+ u: g
+ for u, g in t.cmap.items()
+ if g in s.glyphs_requested or u in s.unicodes_requested
+ }
+ # Collect format 12 tables that hold only basic multilingual plane
+ # codepoints.
+ if t.format == 12 and t.cmap and max(t.cmap.keys()) < 0x10000:
+ tables_format12_bmp.append(t)
+ else:
+ t.cmap = {u: g for u, g in t.cmap.items() if g in s.glyphs_requested}
+
+ # Fomat 12 tables are redundant if they contain just the same BMP codepoints
+ # their little BMP-only encoding siblings contain.
+ for t in tables_format12_bmp:
+ if (
+ t.platformID == 0 # Unicode platform
+ and t.platEncID == 4 # Unicode full repertoire
+ and t.language in table_plat0_enc3 # Have a BMP-only sibling?
+ and table_plat0_enc3[t.language].cmap == t.cmap
+ ):
+ t.cmap.clear()
+ elif (
+ t.platformID == 3 # Windows platform
+ and t.platEncID == 10 # Unicode full repertoire
+ and t.language in table_plat3_enc1 # Have a BMP-only sibling?
+ and table_plat3_enc1[t.language].cmap == t.cmap
+ ):
+ t.cmap.clear()
+
+ self.tables = [t for t in self.tables if (t.cmap if t.format != 14 else t.uvsDict)]
+ self.numSubTables = len(self.tables)
+ # TODO(behdad) Convert formats when needed.
+ # In particular, if we have a format=12 without non-BMP
+ # characters, convert it to format=4 if there's not one.
+ return True # Required table
+
+
+@_add_method(ttLib.getTableClass("DSIG"))
def prune_pre_subset(self, font, options):
- # Drop all signatures since they will be invalid
- self.usNumSigs = 0
- self.signatureRecords = []
- return True
+ # Drop all signatures since they will be invalid
+ self.usNumSigs = 0
+ self.signatureRecords = []
+ return True
-@_add_method(ttLib.getTableClass('maxp'))
-def prune_pre_subset(self, font, options):
- if not options.hinting:
- if self.tableVersion == 0x00010000:
- self.maxZones = 1
- self.maxTwilightPoints = 0
- self.maxStorage = 0
- self.maxFunctionDefs = 0
- self.maxInstructionDefs = 0
- self.maxStackElements = 0
- self.maxSizeOfInstructions = 0
- return True
-
-@_add_method(ttLib.getTableClass('name'))
+
+@_add_method(ttLib.getTableClass("maxp"))
def prune_pre_subset(self, font, options):
- nameIDs = set(options.name_IDs)
- fvar = font.get('fvar')
- if fvar:
- nameIDs.update([axis.axisNameID for axis in fvar.axes])
- nameIDs.update([inst.subfamilyNameID for inst in fvar.instances])
- nameIDs.update([inst.postscriptNameID for inst in fvar.instances
- if inst.postscriptNameID != 0xFFFF])
- stat = font.get('STAT')
- if stat:
- if stat.table.AxisValueArray:
- nameIDs.update([val_rec.ValueNameID for val_rec in stat.table.AxisValueArray.AxisValue])
- nameIDs.update([axis_rec.AxisNameID for axis_rec in stat.table.DesignAxisRecord.Axis])
- if '*' not in options.name_IDs:
- self.names = [n for n in self.names if n.nameID in nameIDs]
- if not options.name_legacy:
- # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman
- # entry for Latin and no Unicode names.
- self.names = [n for n in self.names if n.isUnicode()]
- # TODO(behdad) Option to keep only one platform's
- if '*' not in options.name_languages:
- # TODO(behdad) This is Windows-platform specific!
- self.names = [n for n in self.names
- if n.langID in options.name_languages]
- if options.obfuscate_names:
- namerecs = []
- for n in self.names:
- if n.nameID in [1, 4]:
- n.string = ".\x7f".encode('utf_16_be') if n.isUnicode() else ".\x7f"
- elif n.nameID in [2, 6]:
- n.string = "\x7f".encode('utf_16_be') if n.isUnicode() else "\x7f"
- elif n.nameID == 3:
- n.string = ""
- elif n.nameID in [16, 17, 18]:
- continue
- namerecs.append(n)
- self.names = namerecs
- return True # Required table
-
-
-@_add_method(ttLib.getTableClass('head'))
+ if not options.hinting:
+ if self.tableVersion == 0x00010000:
+ self.maxZones = 1
+ self.maxTwilightPoints = 0
+ self.maxStorage = 0
+ self.maxFunctionDefs = 0
+ self.maxInstructionDefs = 0
+ self.maxStackElements = 0
+ self.maxSizeOfInstructions = 0
+ return True
+
+
+@_add_method(ttLib.getTableClass("name"))
+def prune_post_subset(self, font, options):
+ visitor = NameRecordVisitor()
+ visitor.visit(font)
+ nameIDs = set(options.name_IDs) | visitor.seen
+ if "*" not in options.name_IDs:
+ self.names = [n for n in self.names if n.nameID in nameIDs]
+ if not options.name_legacy:
+ # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman
+ # entry for Latin and no Unicode names.
+ self.names = [n for n in self.names if n.isUnicode()]
+ # TODO(behdad) Option to keep only one platform's
+ if "*" not in options.name_languages:
+ # TODO(behdad) This is Windows-platform specific!
+ self.names = [n for n in self.names if n.langID in options.name_languages]
+ if options.obfuscate_names:
+ namerecs = []
+ for n in self.names:
+ if n.nameID in [1, 4]:
+ n.string = ".\x7f".encode("utf_16_be") if n.isUnicode() else ".\x7f"
+ elif n.nameID in [2, 6]:
+ n.string = "\x7f".encode("utf_16_be") if n.isUnicode() else "\x7f"
+ elif n.nameID == 3:
+ n.string = ""
+ elif n.nameID in [16, 17, 18]:
+ continue
+ namerecs.append(n)
+ self.names = namerecs
+ return True # Required table
+
+
+@_add_method(ttLib.getTableClass("head"))
def prune_post_subset(self, font, options):
- # Force re-compiling head table, to update any recalculated values.
- return True
+ # Force re-compiling head table, to update any recalculated values.
+ return True
# TODO(behdad) OS/2 ulCodePageRange?
@@ -2592,647 +2955,768 @@ def prune_post_subset(self, font, options):
class Options(object):
-
- class OptionError(Exception): pass
- class UnknownOptionError(OptionError): pass
-
- # spaces in tag names (e.g. "SVG ", "cvt ") are stripped by the argument parser
- _drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC',
- 'EBSC', 'PCLT', 'LTSH']
- _drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite
- _no_subset_tables_default = ['avar', 'fvar',
- 'gasp', 'head', 'hhea', 'maxp',
- 'vhea', 'OS/2', 'loca', 'name', 'cvt',
- 'fpgm', 'prep', 'VDMX', 'DSIG', 'CPAL',
- 'MVAR', 'cvar', 'STAT']
- _hinting_tables_default = ['cvt', 'cvar', 'fpgm', 'prep', 'hdmx', 'VDMX']
-
- # Based on HarfBuzz shapers
- _layout_features_groups = {
- # Default shaper
- 'common': ['rvrn', 'ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'],
- 'fractions': ['frac', 'numr', 'dnom'],
- 'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'],
- 'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'],
- 'ltr': ['ltra', 'ltrm'],
- 'rtl': ['rtla', 'rtlm'],
- 'rand': ['rand'],
- 'justify': ['jalt'],
- 'private': ['Harf', 'HARF', 'Buzz', 'BUZZ'],
- # Complex shapers
- 'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3',
- 'cswh', 'mset', 'stch'],
- 'hangul': ['ljmo', 'vjmo', 'tjmo'],
- 'tibetan': ['abvs', 'blws', 'abvm', 'blwm'],
- 'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half',
- 'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres',
- 'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'],
- }
- _layout_features_default = _uniq_sort(sum(
- iter(_layout_features_groups.values()), []))
-
- def __init__(self, **kwargs):
-
- self.drop_tables = self._drop_tables_default[:]
- self.no_subset_tables = self._no_subset_tables_default[:]
- self.passthrough_tables = False # keep/drop tables we can't subset
- self.hinting_tables = self._hinting_tables_default[:]
- self.legacy_kern = False # drop 'kern' table if GPOS available
- self.layout_closure = True
- self.layout_features = self._layout_features_default[:]
- self.layout_scripts = ['*']
- self.ignore_missing_glyphs = False
- self.ignore_missing_unicodes = True
- self.hinting = True
- self.glyph_names = False
- self.legacy_cmap = False
- self.symbol_cmap = False
- self.name_IDs = [0, 1, 2, 3, 4, 5, 6] # https://github.com/fonttools/fonttools/issues/1170#issuecomment-364631225
- self.name_legacy = False
- self.name_languages = [0x0409] # English
- self.obfuscate_names = False # to make webfont unusable as a system font
- self.retain_gids = False
- self.notdef_glyph = True # gid0 for TrueType / .notdef for CFF
- self.notdef_outline = False # No need for notdef to have an outline really
- self.recommended_glyphs = False # gid1, gid2, gid3 for TrueType
- self.recalc_bounds = False # Recalculate font bounding boxes
- self.recalc_timestamp = False # Recalculate font modified timestamp
- self.prune_unicode_ranges = True # Clear unused 'ulUnicodeRange' bits
- self.recalc_average_width = False # update 'xAvgCharWidth'
- self.recalc_max_context = False # update 'usMaxContext'
- self.canonical_order = None # Order tables as recommended
- self.flavor = None # May be 'woff' or 'woff2'
- self.with_zopfli = False # use zopfli instead of zlib for WOFF 1.0
- self.desubroutinize = False # Desubroutinize CFF CharStrings
- self.harfbuzz_repacker = USE_HARFBUZZ_REPACKER.default
- self.verbose = False
- self.timing = False
- self.xml = False
- self.font_number = -1
- self.pretty_svg = False
-
- self.set(**kwargs)
-
- def set(self, **kwargs):
- for k,v in kwargs.items():
- if not hasattr(self, k):
- raise self.UnknownOptionError("Unknown option '%s'" % k)
- setattr(self, k, v)
-
- def parse_opts(self, argv, ignore_unknown=[]):
- posargs = []
- passthru_options = []
- for a in argv:
- orig_a = a
- if not a.startswith('--'):
- posargs.append(a)
- continue
- a = a[2:]
- i = a.find('=')
- op = '='
- if i == -1:
- if a.startswith("no-"):
- k = a[3:]
- if k == "canonical-order":
- # reorderTables=None is faster than False (the latter
- # still reorders to "keep" the original table order)
- v = None
- else:
- v = False
- else:
- k = a
- v = True
- if k.endswith("?"):
- k = k[:-1]
- v = '?'
- else:
- k = a[:i]
- if k[-1] in "-+":
- op = k[-1]+'=' # Op is '-=' or '+=' now.
- k = k[:-1]
- v = a[i+1:]
- ok = k
- k = k.replace('-', '_')
- if not hasattr(self, k):
- if ignore_unknown is True or ok in ignore_unknown:
- passthru_options.append(orig_a)
- continue
- else:
- raise self.UnknownOptionError("Unknown option '%s'" % a)
-
- ov = getattr(self, k)
- if v == '?':
- print("Current setting for '%s' is: %s" % (ok, ov))
- continue
- if isinstance(ov, bool):
- v = bool(v)
- elif isinstance(ov, int):
- v = int(v)
- elif isinstance(ov, str):
- v = str(v) # redundant
- elif isinstance(ov, list):
- if isinstance(v, bool):
- raise self.OptionError("Option '%s' requires values to be specified using '='" % a)
- vv = v.replace(',', ' ').split()
- if vv == ['']:
- vv = []
- vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
- if op == '=':
- v = vv
- elif op == '+=':
- v = ov
- v.extend(vv)
- elif op == '-=':
- v = ov
- for x in vv:
- if x in v:
- v.remove(x)
- else:
- assert False
-
- setattr(self, k, v)
-
- return posargs + passthru_options
+ class OptionError(Exception):
+ pass
+
+ class UnknownOptionError(OptionError):
+ pass
+
+ # spaces in tag names (e.g. "SVG ", "cvt ") are stripped by the argument parser
+ _drop_tables_default = [
+ "BASE",
+ "JSTF",
+ "DSIG",
+ "EBDT",
+ "EBLC",
+ "EBSC",
+ "PCLT",
+ "LTSH",
+ ]
+ _drop_tables_default += ["Feat", "Glat", "Gloc", "Silf", "Sill"] # Graphite
+ _no_subset_tables_default = [
+ "avar",
+ "fvar",
+ "gasp",
+ "head",
+ "hhea",
+ "maxp",
+ "vhea",
+ "OS/2",
+ "loca",
+ "name",
+ "cvt",
+ "fpgm",
+ "prep",
+ "VDMX",
+ "DSIG",
+ "CPAL",
+ "MVAR",
+ "cvar",
+ "STAT",
+ ]
+ _hinting_tables_default = ["cvt", "cvar", "fpgm", "prep", "hdmx", "VDMX"]
+
+ # Based on HarfBuzz shapers
+ _layout_features_groups = {
+ # Default shaper
+ "common": ["rvrn", "ccmp", "liga", "locl", "mark", "mkmk", "rlig"],
+ "fractions": ["frac", "numr", "dnom"],
+ "horizontal": ["calt", "clig", "curs", "kern", "rclt"],
+ "vertical": ["valt", "vert", "vkrn", "vpal", "vrt2"],
+ "ltr": ["ltra", "ltrm"],
+ "rtl": ["rtla", "rtlm"],
+ "rand": ["rand"],
+ "justify": ["jalt"],
+ "private": ["Harf", "HARF", "Buzz", "BUZZ"],
+ "east_asian_spacing": ["chws", "vchw", "halt", "vhal"],
+ # Complex shapers
+ "arabic": [
+ "init",
+ "medi",
+ "fina",
+ "isol",
+ "med2",
+ "fin2",
+ "fin3",
+ "cswh",
+ "mset",
+ "stch",
+ ],
+ "hangul": ["ljmo", "vjmo", "tjmo"],
+ "tibetan": ["abvs", "blws", "abvm", "blwm"],
+ "indic": [
+ "nukt",
+ "akhn",
+ "rphf",
+ "rkrf",
+ "pref",
+ "blwf",
+ "half",
+ "abvf",
+ "pstf",
+ "cfar",
+ "vatu",
+ "cjct",
+ "init",
+ "pres",
+ "abvs",
+ "blws",
+ "psts",
+ "haln",
+ "dist",
+ "abvm",
+ "blwm",
+ ],
+ }
+ _layout_features_default = _uniq_sort(
+ sum(iter(_layout_features_groups.values()), [])
+ )
+
+ def __init__(self, **kwargs):
+ self.drop_tables = self._drop_tables_default[:]
+ self.no_subset_tables = self._no_subset_tables_default[:]
+ self.passthrough_tables = False # keep/drop tables we can't subset
+ self.hinting_tables = self._hinting_tables_default[:]
+ self.legacy_kern = False # drop 'kern' table if GPOS available
+ self.layout_closure = True
+ self.layout_features = self._layout_features_default[:]
+ self.layout_scripts = ["*"]
+ self.ignore_missing_glyphs = False
+ self.ignore_missing_unicodes = True
+ self.hinting = True
+ self.glyph_names = False
+ self.legacy_cmap = False
+ self.symbol_cmap = False
+ self.name_IDs = [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ ] # https://github.com/fonttools/fonttools/issues/1170#issuecomment-364631225
+ self.name_legacy = False
+ self.name_languages = [0x0409] # English
+ self.obfuscate_names = False # to make webfont unusable as a system font
+ self.retain_gids = False
+ self.notdef_glyph = True # gid0 for TrueType / .notdef for CFF
+ self.notdef_outline = False # No need for notdef to have an outline really
+ self.recommended_glyphs = False # gid1, gid2, gid3 for TrueType
+ self.recalc_bounds = False # Recalculate font bounding boxes
+ self.recalc_timestamp = False # Recalculate font modified timestamp
+ self.prune_unicode_ranges = True # Clear unused 'ulUnicodeRange' bits
+ self.recalc_average_width = False # update 'xAvgCharWidth'
+ self.recalc_max_context = False # update 'usMaxContext'
+ self.canonical_order = None # Order tables as recommended
+ self.flavor = None # May be 'woff' or 'woff2'
+ self.with_zopfli = False # use zopfli instead of zlib for WOFF 1.0
+ self.desubroutinize = False # Desubroutinize CFF CharStrings
+ self.harfbuzz_repacker = USE_HARFBUZZ_REPACKER.default
+ self.verbose = False
+ self.timing = False
+ self.xml = False
+ self.font_number = -1
+ self.pretty_svg = False
+ self.lazy = True
+
+ self.set(**kwargs)
+
+ def set(self, **kwargs):
+ for k, v in kwargs.items():
+ if not hasattr(self, k):
+ raise self.UnknownOptionError("Unknown option '%s'" % k)
+ setattr(self, k, v)
+
+ def parse_opts(self, argv, ignore_unknown=[]):
+ posargs = []
+ passthru_options = []
+ for a in argv:
+ orig_a = a
+ if not a.startswith("--"):
+ posargs.append(a)
+ continue
+ a = a[2:]
+ i = a.find("=")
+ op = "="
+ if i == -1:
+ if a.startswith("no-"):
+ k = a[3:]
+ if k == "canonical-order":
+ # reorderTables=None is faster than False (the latter
+ # still reorders to "keep" the original table order)
+ v = None
+ else:
+ v = False
+ else:
+ k = a
+ v = True
+ if k.endswith("?"):
+ k = k[:-1]
+ v = "?"
+ else:
+ k = a[:i]
+ if k[-1] in "-+":
+ op = k[-1] + "=" # Op is '-=' or '+=' now.
+ k = k[:-1]
+ v = a[i + 1 :]
+ ok = k
+ k = k.replace("-", "_")
+ if not hasattr(self, k):
+ if ignore_unknown is True or ok in ignore_unknown:
+ passthru_options.append(orig_a)
+ continue
+ else:
+ raise self.UnknownOptionError("Unknown option '%s'" % a)
+
+ ov = getattr(self, k)
+ if v == "?":
+ print("Current setting for '%s' is: %s" % (ok, ov))
+ continue
+ if isinstance(ov, bool):
+ v = bool(v)
+ elif isinstance(ov, int):
+ v = int(v)
+ elif isinstance(ov, str):
+ v = str(v) # redundant
+ elif isinstance(ov, list):
+ if isinstance(v, bool):
+ raise self.OptionError(
+ "Option '%s' requires values to be specified using '='" % a
+ )
+ vv = v.replace(",", " ").split()
+ if vv == [""]:
+ vv = []
+ vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
+ if op == "=":
+ v = vv
+ elif op == "+=":
+ v = ov
+ v.extend(vv)
+ elif op == "-=":
+ v = ov
+ for x in vv:
+ if x in v:
+ v.remove(x)
+ else:
+ assert False
+
+ setattr(self, k, v)
+
+ return posargs + passthru_options
class Subsetter(object):
-
- class SubsettingError(Exception): pass
- class MissingGlyphsSubsettingError(SubsettingError): pass
- class MissingUnicodesSubsettingError(SubsettingError): pass
-
- def __init__(self, options=None):
-
- if not options:
- options = Options()
-
- self.options = options
- self.unicodes_requested = set()
- self.glyph_names_requested = set()
- self.glyph_ids_requested = set()
-
- def populate(self, glyphs=[], gids=[], unicodes=[], text=""):
- self.unicodes_requested.update(unicodes)
- if isinstance(text, bytes):
- text = text.decode("utf_8")
- text_utf32 = text.encode("utf-32-be")
- nchars = len(text_utf32)//4
- for u in struct.unpack('>%dL' % nchars, text_utf32):
- self.unicodes_requested.add(u)
- self.glyph_names_requested.update(glyphs)
- self.glyph_ids_requested.update(gids)
-
- def _prune_pre_subset(self, font):
- for tag in self._sort_tables(font):
- if (tag.strip() in self.options.drop_tables or
- (tag.strip() in self.options.hinting_tables and not self.options.hinting) or
- (tag == 'kern' and (not self.options.legacy_kern and 'GPOS' in font))):
- log.info("%s dropped", tag)
- del font[tag]
- continue
-
- clazz = ttLib.getTableClass(tag)
-
- if hasattr(clazz, 'prune_pre_subset'):
- with timer("load '%s'" % tag):
- table = font[tag]
- with timer("prune '%s'" % tag):
- retain = table.prune_pre_subset(font, self.options)
- if not retain:
- log.info("%s pruned to empty; dropped", tag)
- del font[tag]
- continue
- else:
- log.info("%s pruned", tag)
-
- def _closure_glyphs(self, font):
-
- realGlyphs = set(font.getGlyphOrder())
- self.orig_glyph_order = glyph_order = font.getGlyphOrder()
-
- self.glyphs_requested = set()
- self.glyphs_requested.update(self.glyph_names_requested)
- self.glyphs_requested.update(glyph_order[i]
- for i in self.glyph_ids_requested
- if i < len(glyph_order))
-
- self.glyphs_missing = set()
- self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs))
- self.glyphs_missing.update(i for i in self.glyph_ids_requested
- if i >= len(glyph_order))
- if self.glyphs_missing:
- log.info("Missing requested glyphs: %s", self.glyphs_missing)
- if not self.options.ignore_missing_glyphs:
- raise self.MissingGlyphsSubsettingError(self.glyphs_missing)
-
- self.glyphs = self.glyphs_requested.copy()
-
- self.unicodes_missing = set()
- if 'cmap' in font:
- with timer("close glyph list over 'cmap'"):
- font['cmap'].closure_glyphs(self)
- self.glyphs.intersection_update(realGlyphs)
- self.glyphs_cmaped = frozenset(self.glyphs)
- if self.unicodes_missing:
- missing = ["U+%04X" % u for u in self.unicodes_missing]
- log.info("Missing glyphs for requested Unicodes: %s", missing)
- if not self.options.ignore_missing_unicodes:
- raise self.MissingUnicodesSubsettingError(missing)
- del missing
-
- if self.options.notdef_glyph:
- if 'glyf' in font:
- self.glyphs.add(font.getGlyphName(0))
- log.info("Added gid0 to subset")
- else:
- self.glyphs.add('.notdef')
- log.info("Added .notdef to subset")
- if self.options.recommended_glyphs:
- if 'glyf' in font:
- for i in range(min(4, len(font.getGlyphOrder()))):
- self.glyphs.add(font.getGlyphName(i))
- log.info("Added first four glyphs to subset")
-
- if self.options.layout_closure and 'GSUB' in font:
- with timer("close glyph list over 'GSUB'"):
- log.info("Closing glyph list over 'GSUB': %d glyphs before",
- len(self.glyphs))
- log.glyphs(self.glyphs, font=font)
- font['GSUB'].closure_glyphs(self)
- self.glyphs.intersection_update(realGlyphs)
- log.info("Closed glyph list over 'GSUB': %d glyphs after",
- len(self.glyphs))
- log.glyphs(self.glyphs, font=font)
- self.glyphs_gsubed = frozenset(self.glyphs)
-
- if 'MATH' in font:
- with timer("close glyph list over 'MATH'"):
- log.info("Closing glyph list over 'MATH': %d glyphs before",
- len(self.glyphs))
- log.glyphs(self.glyphs, font=font)
- font['MATH'].closure_glyphs(self)
- self.glyphs.intersection_update(realGlyphs)
- log.info("Closed glyph list over 'MATH': %d glyphs after",
- len(self.glyphs))
- log.glyphs(self.glyphs, font=font)
- self.glyphs_mathed = frozenset(self.glyphs)
-
- for table in ('COLR', 'bsln'):
- if table in font:
- with timer("close glyph list over '%s'" % table):
- log.info("Closing glyph list over '%s': %d glyphs before",
- table, len(self.glyphs))
- log.glyphs(self.glyphs, font=font)
- font[table].closure_glyphs(self)
- self.glyphs.intersection_update(realGlyphs)
- log.info("Closed glyph list over '%s': %d glyphs after",
- table, len(self.glyphs))
- log.glyphs(self.glyphs, font=font)
- setattr(self, f"glyphs_{table.lower()}ed", frozenset(self.glyphs))
-
- if 'glyf' in font:
- with timer("close glyph list over 'glyf'"):
- log.info("Closing glyph list over 'glyf': %d glyphs before",
- len(self.glyphs))
- log.glyphs(self.glyphs, font=font)
- font['glyf'].closure_glyphs(self)
- self.glyphs.intersection_update(realGlyphs)
- log.info("Closed glyph list over 'glyf': %d glyphs after",
- len(self.glyphs))
- log.glyphs(self.glyphs, font=font)
- self.glyphs_glyfed = frozenset(self.glyphs)
-
- if 'CFF ' in font:
- with timer("close glyph list over 'CFF '"):
- log.info("Closing glyph list over 'CFF ': %d glyphs before",
- len(self.glyphs))
- log.glyphs(self.glyphs, font=font)
- font['CFF '].closure_glyphs(self)
- self.glyphs.intersection_update(realGlyphs)
- log.info("Closed glyph list over 'CFF ': %d glyphs after",
- len(self.glyphs))
- log.glyphs(self.glyphs, font=font)
- self.glyphs_cffed = frozenset(self.glyphs)
-
- self.glyphs_retained = frozenset(self.glyphs)
-
- order = font.getReverseGlyphMap()
- self.reverseOrigGlyphMap = {g:order[g] for g in self.glyphs_retained}
-
- self.last_retained_order = max(self.reverseOrigGlyphMap.values())
- self.last_retained_glyph = font.getGlyphOrder()[self.last_retained_order]
-
- self.glyphs_emptied = frozenset()
- if self.options.retain_gids:
- self.glyphs_emptied = {g for g in realGlyphs - self.glyphs_retained if order[g] <= self.last_retained_order}
-
- self.reverseEmptiedGlyphMap = {g:order[g] for g in self.glyphs_emptied}
-
- if not self.options.retain_gids:
- new_glyph_order = [
- g for g in glyph_order if g in self.glyphs_retained
- ]
- else:
- new_glyph_order = [
- g for g in glyph_order
- if font.getGlyphID(g) <= self.last_retained_order
- ]
- # We'll call font.setGlyphOrder() at the end of _subset_glyphs when all
- # tables have been subsetted. Below, we use the new glyph order to get
- # a map from old to new glyph indices, which can be useful when
- # subsetting individual tables (e.g. SVG) that refer to GIDs.
- self.new_glyph_order = new_glyph_order
- self.glyph_index_map = {
- order[new_glyph_order[i]]: i
- for i in range(len(new_glyph_order))
- }
-
- log.info("Retaining %d glyphs", len(self.glyphs_retained))
-
- del self.glyphs
-
- def _subset_glyphs(self, font):
- for tag in self._sort_tables(font):
- clazz = ttLib.getTableClass(tag)
-
- if tag.strip() in self.options.no_subset_tables:
- log.info("%s subsetting not needed", tag)
- elif hasattr(clazz, 'subset_glyphs'):
- with timer("subset '%s'" % tag):
- table = font[tag]
- self.glyphs = self.glyphs_retained
- retain = table.subset_glyphs(self)
- del self.glyphs
- if not retain:
- log.info("%s subsetted to empty; dropped", tag)
- del font[tag]
- else:
- log.info("%s subsetted", tag)
- elif self.options.passthrough_tables:
- log.info("%s NOT subset; don't know how to subset", tag)
- else:
- log.warning("%s NOT subset; don't know how to subset; dropped", tag)
- del font[tag]
-
- with timer("subset GlyphOrder"):
- font.setGlyphOrder(self.new_glyph_order)
-
-
- def _prune_post_subset(self, font):
- for tag in font.keys():
- if tag == 'GlyphOrder': continue
- if tag == 'OS/2' and self.options.prune_unicode_ranges:
- old_uniranges = font[tag].getUnicodeRanges()
- new_uniranges = font[tag].recalcUnicodeRanges(font, pruneOnly=True)
- if old_uniranges != new_uniranges:
- log.info("%s Unicode ranges pruned: %s", tag, sorted(new_uniranges))
- if self.options.recalc_average_width:
- old_avg_width = font[tag].xAvgCharWidth
- new_avg_width = font[tag].recalcAvgCharWidth(font)
- if old_avg_width != new_avg_width:
- log.info("%s xAvgCharWidth updated: %d", tag, new_avg_width)
- if self.options.recalc_max_context:
- max_context = maxCtxFont(font)
- if max_context != font[tag].usMaxContext:
- font[tag].usMaxContext = max_context
- log.info("%s usMaxContext updated: %d", tag, max_context)
- clazz = ttLib.getTableClass(tag)
- if hasattr(clazz, 'prune_post_subset'):
- with timer("prune '%s'" % tag):
- table = font[tag]
- retain = table.prune_post_subset(font, self.options)
- if not retain:
- log.info("%s pruned to empty; dropped", tag)
- del font[tag]
- else:
- log.info("%s pruned", tag)
-
- def _sort_tables(self, font):
- tagOrder = ['fvar', 'avar', 'gvar', 'name', 'glyf']
- tagOrder = {t: i + 1 for i, t in enumerate(tagOrder)}
- tags = sorted(font.keys(), key=lambda tag: tagOrder.get(tag, 0))
- return [t for t in tags if t != 'GlyphOrder']
-
- def subset(self, font):
- self._prune_pre_subset(font)
- self._closure_glyphs(font)
- self._subset_glyphs(font)
- self._prune_post_subset(font)
+ class SubsettingError(Exception):
+ pass
+
+ class MissingGlyphsSubsettingError(SubsettingError):
+ pass
+
+ class MissingUnicodesSubsettingError(SubsettingError):
+ pass
+
+ def __init__(self, options=None):
+ if not options:
+ options = Options()
+
+ self.options = options
+ self.unicodes_requested = set()
+ self.glyph_names_requested = set()
+ self.glyph_ids_requested = set()
+
+ def populate(self, glyphs=[], gids=[], unicodes=[], text=""):
+ self.unicodes_requested.update(unicodes)
+ if isinstance(text, bytes):
+ text = text.decode("utf_8")
+ text_utf32 = text.encode("utf-32-be")
+ nchars = len(text_utf32) // 4
+ for u in struct.unpack(">%dL" % nchars, text_utf32):
+ self.unicodes_requested.add(u)
+ self.glyph_names_requested.update(glyphs)
+ self.glyph_ids_requested.update(gids)
+
+ def _prune_pre_subset(self, font):
+ for tag in self._sort_tables(font):
+ if (
+ tag.strip() in self.options.drop_tables
+ or (
+ tag.strip() in self.options.hinting_tables
+ and not self.options.hinting
+ )
+ or (tag == "kern" and (not self.options.legacy_kern and "GPOS" in font))
+ ):
+ log.info("%s dropped", tag)
+ del font[tag]
+ continue
+
+ clazz = ttLib.getTableClass(tag)
+
+ if hasattr(clazz, "prune_pre_subset"):
+ with timer("load '%s'" % tag):
+ table = font[tag]
+ with timer("prune '%s'" % tag):
+ retain = table.prune_pre_subset(font, self.options)
+ if not retain:
+ log.info("%s pruned to empty; dropped", tag)
+ del font[tag]
+ continue
+ else:
+ log.info("%s pruned", tag)
+
+ def _closure_glyphs(self, font):
+ realGlyphs = set(font.getGlyphOrder())
+ self.orig_glyph_order = glyph_order = font.getGlyphOrder()
+
+ self.glyphs_requested = set()
+ self.glyphs_requested.update(self.glyph_names_requested)
+ self.glyphs_requested.update(
+ glyph_order[i] for i in self.glyph_ids_requested if i < len(glyph_order)
+ )
+
+ self.glyphs_missing = set()
+ self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs))
+ self.glyphs_missing.update(
+ i for i in self.glyph_ids_requested if i >= len(glyph_order)
+ )
+ if self.glyphs_missing:
+ log.info("Missing requested glyphs: %s", self.glyphs_missing)
+ if not self.options.ignore_missing_glyphs:
+ raise self.MissingGlyphsSubsettingError(self.glyphs_missing)
+
+ self.glyphs = self.glyphs_requested.copy()
+
+ self.unicodes_missing = set()
+ if "cmap" in font:
+ with timer("close glyph list over 'cmap'"):
+ font["cmap"].closure_glyphs(self)
+ self.glyphs.intersection_update(realGlyphs)
+ self.glyphs_cmaped = frozenset(self.glyphs)
+ if self.unicodes_missing:
+ missing = ["U+%04X" % u for u in self.unicodes_missing]
+ log.info("Missing glyphs for requested Unicodes: %s", missing)
+ if not self.options.ignore_missing_unicodes:
+ raise self.MissingUnicodesSubsettingError(missing)
+ del missing
+
+ if self.options.notdef_glyph:
+ if "glyf" in font:
+ self.glyphs.add(font.getGlyphName(0))
+ log.info("Added gid0 to subset")
+ else:
+ self.glyphs.add(".notdef")
+ log.info("Added .notdef to subset")
+ if self.options.recommended_glyphs:
+ if "glyf" in font:
+ for i in range(min(4, len(font.getGlyphOrder()))):
+ self.glyphs.add(font.getGlyphName(i))
+ log.info("Added first four glyphs to subset")
+
+ if self.options.layout_closure and "GSUB" in font:
+ with timer("close glyph list over 'GSUB'"):
+ log.info(
+ "Closing glyph list over 'GSUB': %d glyphs before", len(self.glyphs)
+ )
+ log.glyphs(self.glyphs, font=font)
+ font["GSUB"].closure_glyphs(self)
+ self.glyphs.intersection_update(realGlyphs)
+ log.info(
+ "Closed glyph list over 'GSUB': %d glyphs after", len(self.glyphs)
+ )
+ log.glyphs(self.glyphs, font=font)
+ self.glyphs_gsubed = frozenset(self.glyphs)
+
+ if "MATH" in font:
+ with timer("close glyph list over 'MATH'"):
+ log.info(
+ "Closing glyph list over 'MATH': %d glyphs before", len(self.glyphs)
+ )
+ log.glyphs(self.glyphs, font=font)
+ font["MATH"].closure_glyphs(self)
+ self.glyphs.intersection_update(realGlyphs)
+ log.info(
+ "Closed glyph list over 'MATH': %d glyphs after", len(self.glyphs)
+ )
+ log.glyphs(self.glyphs, font=font)
+ self.glyphs_mathed = frozenset(self.glyphs)
+
+ for table in ("COLR", "bsln"):
+ if table in font:
+ with timer("close glyph list over '%s'" % table):
+ log.info(
+ "Closing glyph list over '%s': %d glyphs before",
+ table,
+ len(self.glyphs),
+ )
+ log.glyphs(self.glyphs, font=font)
+ font[table].closure_glyphs(self)
+ self.glyphs.intersection_update(realGlyphs)
+ log.info(
+ "Closed glyph list over '%s': %d glyphs after",
+ table,
+ len(self.glyphs),
+ )
+ log.glyphs(self.glyphs, font=font)
+ setattr(self, f"glyphs_{table.lower()}ed", frozenset(self.glyphs))
+
+ if "glyf" in font:
+ with timer("close glyph list over 'glyf'"):
+ log.info(
+ "Closing glyph list over 'glyf': %d glyphs before", len(self.glyphs)
+ )
+ log.glyphs(self.glyphs, font=font)
+ font["glyf"].closure_glyphs(self)
+ self.glyphs.intersection_update(realGlyphs)
+ log.info(
+ "Closed glyph list over 'glyf': %d glyphs after", len(self.glyphs)
+ )
+ log.glyphs(self.glyphs, font=font)
+ self.glyphs_glyfed = frozenset(self.glyphs)
+
+ if "CFF " in font:
+ with timer("close glyph list over 'CFF '"):
+ log.info(
+ "Closing glyph list over 'CFF ': %d glyphs before", len(self.glyphs)
+ )
+ log.glyphs(self.glyphs, font=font)
+ font["CFF "].closure_glyphs(self)
+ self.glyphs.intersection_update(realGlyphs)
+ log.info(
+ "Closed glyph list over 'CFF ': %d glyphs after", len(self.glyphs)
+ )
+ log.glyphs(self.glyphs, font=font)
+ self.glyphs_cffed = frozenset(self.glyphs)
+
+ self.glyphs_retained = frozenset(self.glyphs)
+
+ order = font.getReverseGlyphMap()
+ self.reverseOrigGlyphMap = {g: order[g] for g in self.glyphs_retained}
+
+ self.last_retained_order = max(self.reverseOrigGlyphMap.values())
+ self.last_retained_glyph = font.getGlyphOrder()[self.last_retained_order]
+
+ self.glyphs_emptied = frozenset()
+ if self.options.retain_gids:
+ self.glyphs_emptied = {
+ g
+ for g in realGlyphs - self.glyphs_retained
+ if order[g] <= self.last_retained_order
+ }
+
+ self.reverseEmptiedGlyphMap = {g: order[g] for g in self.glyphs_emptied}
+
+ if not self.options.retain_gids:
+ new_glyph_order = [g for g in glyph_order if g in self.glyphs_retained]
+ else:
+ new_glyph_order = [
+ g for g in glyph_order if font.getGlyphID(g) <= self.last_retained_order
+ ]
+ # We'll call font.setGlyphOrder() at the end of _subset_glyphs when all
+ # tables have been subsetted. Below, we use the new glyph order to get
+ # a map from old to new glyph indices, which can be useful when
+ # subsetting individual tables (e.g. SVG) that refer to GIDs.
+ self.new_glyph_order = new_glyph_order
+ self.glyph_index_map = {
+ order[new_glyph_order[i]]: i for i in range(len(new_glyph_order))
+ }
+
+ log.info("Retaining %d glyphs", len(self.glyphs_retained))
+
+ del self.glyphs
+
+ def _subset_glyphs(self, font):
+ self.used_mark_sets = []
+ for tag in self._sort_tables(font):
+ clazz = ttLib.getTableClass(tag)
+
+ if tag.strip() in self.options.no_subset_tables:
+ log.info("%s subsetting not needed", tag)
+ elif hasattr(clazz, "subset_glyphs"):
+ with timer("subset '%s'" % tag):
+ table = font[tag]
+ self.glyphs = self.glyphs_retained
+ retain = table.subset_glyphs(self)
+ del self.glyphs
+ if not retain:
+ log.info("%s subsetted to empty; dropped", tag)
+ del font[tag]
+ else:
+ log.info("%s subsetted", tag)
+ elif self.options.passthrough_tables:
+ log.info("%s NOT subset; don't know how to subset", tag)
+ else:
+ log.warning("%s NOT subset; don't know how to subset; dropped", tag)
+ del font[tag]
+
+ with timer("subset GlyphOrder"):
+ font.setGlyphOrder(self.new_glyph_order)
+
+ def _prune_post_subset(self, font):
+ tableTags = font.keys()
+ # Prune the name table last because when we're pruning the name table,
+ # we visit each table in the font to see what name table records are
+ # still in use.
+ if "name" in tableTags:
+ tableTags.remove("name")
+ tableTags.append("name")
+ for tag in tableTags:
+ if tag == "GlyphOrder":
+ continue
+ if tag == "OS/2":
+ if self.options.prune_unicode_ranges:
+ old_uniranges = font[tag].getUnicodeRanges()
+ new_uniranges = font[tag].recalcUnicodeRanges(font, pruneOnly=True)
+ if old_uniranges != new_uniranges:
+ log.info(
+ "%s Unicode ranges pruned: %s", tag, sorted(new_uniranges)
+ )
+ if self.options.recalc_average_width:
+ old_avg_width = font[tag].xAvgCharWidth
+ new_avg_width = font[tag].recalcAvgCharWidth(font)
+ if old_avg_width != new_avg_width:
+ log.info("%s xAvgCharWidth updated: %d", tag, new_avg_width)
+ if self.options.recalc_max_context:
+ max_context = maxCtxFont(font)
+ if max_context != font[tag].usMaxContext:
+ font[tag].usMaxContext = max_context
+ log.info("%s usMaxContext updated: %d", tag, max_context)
+ clazz = ttLib.getTableClass(tag)
+ if hasattr(clazz, "prune_post_subset"):
+ with timer("prune '%s'" % tag):
+ table = font[tag]
+ retain = table.prune_post_subset(font, self.options)
+ if not retain:
+ log.info("%s pruned to empty; dropped", tag)
+ del font[tag]
+ else:
+ log.info("%s pruned", tag)
+
+ def _sort_tables(self, font):
+ tagOrder = ["GDEF", "GPOS", "GSUB", "fvar", "avar", "gvar", "name", "glyf"]
+ tagOrder = {t: i + 1 for i, t in enumerate(tagOrder)}
+ tags = sorted(font.keys(), key=lambda tag: tagOrder.get(tag, 0))
+ return [t for t in tags if t != "GlyphOrder"]
+
+ def subset(self, font):
+ self._prune_pre_subset(font)
+ self._closure_glyphs(font)
+ self._subset_glyphs(font)
+ self._prune_post_subset(font)
@timer("load font")
-def load_font(fontFile,
- options,
- checkChecksums=0,
- dontLoadGlyphNames=False,
- lazy=True):
-
- font = ttLib.TTFont(fontFile,
- checkChecksums=checkChecksums,
- recalcBBoxes=options.recalc_bounds,
- recalcTimestamp=options.recalc_timestamp,
- lazy=lazy,
- fontNumber=options.font_number)
-
- # Hack:
- #
- # If we don't need glyph names, change 'post' class to not try to
- # load them. It avoid lots of headache with broken fonts as well
- # as loading time.
- #
- # Ideally ttLib should provide a way to ask it to skip loading
- # glyph names. But it currently doesn't provide such a thing.
- #
- if dontLoadGlyphNames:
- post = ttLib.getTableClass('post')
- saved = post.decode_format_2_0
- post.decode_format_2_0 = post.decode_format_3_0
- f = font['post']
- if f.formatType == 2.0:
- f.formatType = 3.0
- post.decode_format_2_0 = saved
-
- return font
+def load_font(fontFile, options, checkChecksums=0, dontLoadGlyphNames=False, lazy=True):
+ font = ttLib.TTFont(
+ fontFile,
+ checkChecksums=checkChecksums,
+ recalcBBoxes=options.recalc_bounds,
+ recalcTimestamp=options.recalc_timestamp,
+ lazy=lazy,
+ fontNumber=options.font_number,
+ )
+
+ # Hack:
+ #
+ # If we don't need glyph names, change 'post' class to not try to
+ # load them. It avoid lots of headache with broken fonts as well
+ # as loading time.
+ #
+ # Ideally ttLib should provide a way to ask it to skip loading
+ # glyph names. But it currently doesn't provide such a thing.
+ #
+ if dontLoadGlyphNames:
+ post = ttLib.getTableClass("post")
+ saved = post.decode_format_2_0
+ post.decode_format_2_0 = post.decode_format_3_0
+ f = font["post"]
+ if f.formatType == 2.0:
+ f.formatType = 3.0
+ post.decode_format_2_0 = saved
+
+ return font
+
@timer("compile and save font")
def save_font(font, outfile, options):
- if options.with_zopfli and options.flavor == "woff":
- from fontTools.ttLib import sfnt
- sfnt.USE_ZOPFLI = True
- font.flavor = options.flavor
- font.cfg[USE_HARFBUZZ_REPACKER] = options.harfbuzz_repacker
- font.save(outfile, reorderTables=options.canonical_order)
+ if options.with_zopfli and options.flavor == "woff":
+ from fontTools.ttLib import sfnt
+
+ sfnt.USE_ZOPFLI = True
+ font.flavor = options.flavor
+ font.cfg[USE_HARFBUZZ_REPACKER] = options.harfbuzz_repacker
+ font.save(outfile, reorderTables=options.canonical_order)
+
def parse_unicodes(s):
- import re
- s = re.sub (r"0[xX]", " ", s)
- s = re.sub (r"[<+>,;&#\\xXuU\n ]", " ", s)
- l = []
- for item in s.split():
- fields = item.split('-')
- if len(fields) == 1:
- l.append(int(item, 16))
- else:
- start,end = fields
- l.extend(range(int(start, 16), int(end, 16)+1))
- return l
+ import re
+
+ s = re.sub(r"0[xX]", " ", s)
+ s = re.sub(r"[<+>,;&#\\xXuU\n ]", " ", s)
+ l = []
+ for item in s.split():
+ fields = item.split("-")
+ if len(fields) == 1:
+ l.append(int(item, 16))
+ else:
+ start, end = fields
+ l.extend(range(int(start, 16), int(end, 16) + 1))
+ return l
+
def parse_gids(s):
- l = []
- for item in s.replace(',', ' ').split():
- fields = item.split('-')
- if len(fields) == 1:
- l.append(int(fields[0]))
- else:
- l.extend(range(int(fields[0]), int(fields[1])+1))
- return l
+ l = []
+ for item in s.replace(",", " ").split():
+ fields = item.split("-")
+ if len(fields) == 1:
+ l.append(int(fields[0]))
+ else:
+ l.extend(range(int(fields[0]), int(fields[1]) + 1))
+ return l
+
def parse_glyphs(s):
- return s.replace(',', ' ').split()
+ return s.replace(",", " ").split()
+
def usage():
- print("usage:", __usage__, file=sys.stderr)
- print("Try pyftsubset --help for more information.\n", file=sys.stderr)
+ print("usage:", __usage__, file=sys.stderr)
+ print("Try pyftsubset --help for more information.\n", file=sys.stderr)
+
@timer("make one with everything (TOTAL TIME)")
def main(args=None):
- """OpenType font subsetter and optimizer"""
- from os.path import splitext
- from fontTools import configLogger
-
- if args is None:
- args = sys.argv[1:]
-
- if '--help' in args:
- print(__doc__)
- return 0
-
- options = Options()
- try:
- args = options.parse_opts(args,
- ignore_unknown=['gids', 'gids-file',
- 'glyphs', 'glyphs-file',
- 'text', 'text-file',
- 'unicodes', 'unicodes-file',
- 'output-file'])
- except options.OptionError as e:
- usage()
- print("ERROR:", e, file=sys.stderr)
- return 2
-
- if len(args) < 2:
- usage()
- return 1
-
- configLogger(level=logging.INFO if options.verbose else logging.WARNING)
- if options.timing:
- timer.logger.setLevel(logging.DEBUG)
- else:
- timer.logger.disabled = True
-
- fontfile = args[0]
- args = args[1:]
-
- subsetter = Subsetter(options=options)
- outfile = None
- glyphs = []
- gids = []
- unicodes = []
- wildcard_glyphs = False
- wildcard_unicodes = False
- text = ""
- for g in args:
- if g == '*':
- wildcard_glyphs = True
- continue
- if g.startswith('--output-file='):
- outfile = g[14:]
- continue
- if g.startswith('--text='):
- text += g[7:]
- continue
- if g.startswith('--text-file='):
- with open(g[12:], encoding='utf-8') as f:
- text += f.read().replace('\n', '')
- continue
- if g.startswith('--unicodes='):
- if g[11:] == '*':
- wildcard_unicodes = True
- else:
- unicodes.extend(parse_unicodes(g[11:]))
- continue
- if g.startswith('--unicodes-file='):
- with open(g[16:]) as f:
- for line in f.readlines():
- unicodes.extend(parse_unicodes(line.split('#')[0]))
- continue
- if g.startswith('--gids='):
- gids.extend(parse_gids(g[7:]))
- continue
- if g.startswith('--gids-file='):
- with open(g[12:]) as f:
- for line in f.readlines():
- gids.extend(parse_gids(line.split('#')[0]))
- continue
- if g.startswith('--glyphs='):
- if g[9:] == '*':
- wildcard_glyphs = True
- else:
- glyphs.extend(parse_glyphs(g[9:]))
- continue
- if g.startswith('--glyphs-file='):
- with open(g[14:]) as f:
- for line in f.readlines():
- glyphs.extend(parse_glyphs(line.split('#')[0]))
- continue
- glyphs.append(g)
-
- dontLoadGlyphNames = not options.glyph_names and not glyphs
- font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames)
-
- if outfile is None:
- outfile = makeOutputFileName(fontfile, overWrite=True, suffix=".subset")
-
- with timer("compile glyph list"):
- if wildcard_glyphs:
- glyphs.extend(font.getGlyphOrder())
- if wildcard_unicodes:
- for t in font['cmap'].tables:
- if t.isUnicode():
- unicodes.extend(t.cmap.keys())
- assert '' not in glyphs
-
- log.info("Text: '%s'" % text)
- log.info("Unicodes: %s", unicodes)
- log.info("Glyphs: %s", glyphs)
- log.info("Gids: %s", gids)
-
- subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text)
- subsetter.subset(font)
-
- save_font(font, outfile, options)
-
- if options.verbose:
- import os
- log.info("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile))
- log.info("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile))
-
- if options.xml:
- font.saveXML(sys.stdout)
-
- font.close()
+ """OpenType font subsetter and optimizer"""
+ from os.path import splitext
+ from fontTools import configLogger
+
+ if args is None:
+ args = sys.argv[1:]
+
+ if "--help" in args:
+ print(__doc__)
+ return 0
+
+ options = Options()
+ try:
+ args = options.parse_opts(
+ args,
+ ignore_unknown=[
+ "gids",
+ "gids-file",
+ "glyphs",
+ "glyphs-file",
+ "text",
+ "text-file",
+ "unicodes",
+ "unicodes-file",
+ "output-file",
+ ],
+ )
+ except options.OptionError as e:
+ usage()
+ print("ERROR:", e, file=sys.stderr)
+ return 2
+
+ if len(args) < 2:
+ usage()
+ return 1
+
+ configLogger(level=logging.INFO if options.verbose else logging.WARNING)
+ if options.timing:
+ timer.logger.setLevel(logging.DEBUG)
+ else:
+ timer.logger.disabled = True
+
+ fontfile = args[0]
+ args = args[1:]
+
+ subsetter = Subsetter(options=options)
+ outfile = None
+ glyphs = []
+ gids = []
+ unicodes = []
+ wildcard_glyphs = False
+ wildcard_unicodes = False
+ text = ""
+ for g in args:
+ if g == "*":
+ wildcard_glyphs = True
+ continue
+ if g.startswith("--output-file="):
+ outfile = g[14:]
+ continue
+ if g.startswith("--text="):
+ text += g[7:]
+ continue
+ if g.startswith("--text-file="):
+ with open(g[12:], encoding="utf-8") as f:
+ text += f.read().replace("\n", "")
+ continue
+ if g.startswith("--unicodes="):
+ if g[11:] == "*":
+ wildcard_unicodes = True
+ else:
+ unicodes.extend(parse_unicodes(g[11:]))
+ continue
+ if g.startswith("--unicodes-file="):
+ with open(g[16:]) as f:
+ for line in f.readlines():
+ unicodes.extend(parse_unicodes(line.split("#")[0]))
+ continue
+ if g.startswith("--gids="):
+ gids.extend(parse_gids(g[7:]))
+ continue
+ if g.startswith("--gids-file="):
+ with open(g[12:]) as f:
+ for line in f.readlines():
+ gids.extend(parse_gids(line.split("#")[0]))
+ continue
+ if g.startswith("--glyphs="):
+ if g[9:] == "*":
+ wildcard_glyphs = True
+ else:
+ glyphs.extend(parse_glyphs(g[9:]))
+ continue
+ if g.startswith("--glyphs-file="):
+ with open(g[14:]) as f:
+ for line in f.readlines():
+ glyphs.extend(parse_glyphs(line.split("#")[0]))
+ continue
+ glyphs.append(g)
+
+ dontLoadGlyphNames = not options.glyph_names and not glyphs
+ lazy = options.lazy
+ font = load_font(
+ fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames, lazy=lazy
+ )
+
+ if outfile is None:
+ ext = "." + options.flavor.lower() if options.flavor is not None else None
+ outfile = makeOutputFileName(
+ fontfile, extension=ext, overWrite=True, suffix=".subset"
+ )
+
+ with timer("compile glyph list"):
+ if wildcard_glyphs:
+ glyphs.extend(font.getGlyphOrder())
+ if wildcard_unicodes:
+ for t in font["cmap"].tables:
+ if t.isUnicode():
+ unicodes.extend(t.cmap.keys())
+ assert "" not in glyphs
+
+ log.info("Text: '%s'" % text)
+ log.info("Unicodes: %s", unicodes)
+ log.info("Glyphs: %s", glyphs)
+ log.info("Gids: %s", gids)
+
+ subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text)
+ subsetter.subset(font)
+
+ save_font(font, outfile, options)
+
+ if options.verbose:
+ import os
+
+ log.info("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile))
+ log.info("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile))
+
+ if options.xml:
+ font.saveXML(sys.stdout)
+
+ font.close()
__all__ = [
- 'Options',
- 'Subsetter',
- 'load_font',
- 'save_font',
- 'parse_gids',
- 'parse_glyphs',
- 'parse_unicodes',
- 'main'
+ "Options",
+ "Subsetter",
+ "load_font",
+ "save_font",
+ "parse_gids",
+ "parse_glyphs",
+ "parse_unicodes",
+ "main",
]
-if __name__ == '__main__':
- sys.exit(main())
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/Lib/fontTools/subset/__main__.py b/Lib/fontTools/subset/__main__.py
index 22038473..decf9ee6 100644
--- a/Lib/fontTools/subset/__main__.py
+++ b/Lib/fontTools/subset/__main__.py
@@ -2,5 +2,5 @@ import sys
from fontTools.subset import main
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/Lib/fontTools/subset/cff.py b/Lib/fontTools/subset/cff.py
index d6872f39..dd79f6db 100644
--- a/Lib/fontTools/subset/cff.py
+++ b/Lib/fontTools/subset/cff.py
@@ -7,496 +7,530 @@ from fontTools.subset.util import _add_method, _uniq_sort
class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
+ def __init__(self, components, localSubrs, globalSubrs):
+ psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs)
+ self.components = components
- def __init__(self, components, localSubrs, globalSubrs):
- psCharStrings.SimpleT2Decompiler.__init__(self,
- localSubrs,
- globalSubrs)
- self.components = components
-
- def op_endchar(self, index):
- args = self.popall()
- if len(args) >= 4:
- from fontTools.encodings.StandardEncoding import StandardEncoding
- # endchar can do seac accent bulding; The T2 spec says it's deprecated,
- # but recent software that shall remain nameless does output it.
- adx, ady, bchar, achar = args[-4:]
- baseGlyph = StandardEncoding[bchar]
- accentGlyph = StandardEncoding[achar]
- self.components.add(baseGlyph)
- self.components.add(accentGlyph)
-
-@_add_method(ttLib.getTableClass('CFF '))
+ def op_endchar(self, index):
+ args = self.popall()
+ if len(args) >= 4:
+ from fontTools.encodings.StandardEncoding import StandardEncoding
+
+ # endchar can do seac accent bulding; The T2 spec says it's deprecated,
+ # but recent software that shall remain nameless does output it.
+ adx, ady, bchar, achar = args[-4:]
+ baseGlyph = StandardEncoding[bchar]
+ accentGlyph = StandardEncoding[achar]
+ self.components.add(baseGlyph)
+ self.components.add(accentGlyph)
+
+
+@_add_method(ttLib.getTableClass("CFF "))
def closure_glyphs(self, s):
- cff = self.cff
- assert len(cff) == 1
- font = cff[cff.keys()[0]]
- glyphSet = font.CharStrings
-
- decompose = s.glyphs
- while decompose:
- components = set()
- for g in decompose:
- if g not in glyphSet:
- continue
- gl = glyphSet[g]
-
- subrs = getattr(gl.private, "Subrs", [])
- decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs)
- decompiler.execute(gl)
- components -= s.glyphs
- s.glyphs.update(components)
- decompose = components
+ cff = self.cff
+ assert len(cff) == 1
+ font = cff[cff.keys()[0]]
+ glyphSet = font.CharStrings
+
+ decompose = s.glyphs
+ while decompose:
+ components = set()
+ for g in decompose:
+ if g not in glyphSet:
+ continue
+ gl = glyphSet[g]
+
+ subrs = getattr(gl.private, "Subrs", [])
+ decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs)
+ decompiler.execute(gl)
+ components -= s.glyphs
+ s.glyphs.update(components)
+ decompose = components
+
def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
- c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
- if isCFF2 or ignoreWidth:
- # CFF2 charstrings have no widths nor 'endchar' operators
- c.setProgram([] if isCFF2 else ['endchar'])
- else:
- if hasattr(font, 'FDArray') and font.FDArray is not None:
- private = font.FDArray[fdSelectIndex].Private
- else:
- private = font.Private
- dfltWdX = private.defaultWidthX
- nmnlWdX = private.nominalWidthX
- pen = NullPen()
- c.draw(pen) # this will set the charstring's width
- if c.width != dfltWdX:
- c.program = [c.width - nmnlWdX, 'endchar']
- else:
- c.program = ['endchar']
-
-@_add_method(ttLib.getTableClass('CFF '))
+ c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
+ if isCFF2 or ignoreWidth:
+ # CFF2 charstrings have no widths nor 'endchar' operators
+ c.setProgram([] if isCFF2 else ["endchar"])
+ else:
+ if hasattr(font, "FDArray") and font.FDArray is not None:
+ private = font.FDArray[fdSelectIndex].Private
+ else:
+ private = font.Private
+ dfltWdX = private.defaultWidthX
+ nmnlWdX = private.nominalWidthX
+ pen = NullPen()
+ c.draw(pen) # this will set the charstring's width
+ if c.width != dfltWdX:
+ c.program = [c.width - nmnlWdX, "endchar"]
+ else:
+ c.program = ["endchar"]
+
+
+@_add_method(ttLib.getTableClass("CFF "))
def prune_pre_subset(self, font, options):
- cff = self.cff
- # CFF table must have one font only
- cff.fontNames = cff.fontNames[:1]
+ cff = self.cff
+ # CFF table must have one font only
+ cff.fontNames = cff.fontNames[:1]
+
+ if options.notdef_glyph and not options.notdef_outline:
+ isCFF2 = cff.major > 1
+ for fontname in cff.keys():
+ font = cff[fontname]
+ _empty_charstring(font, ".notdef", isCFF2=isCFF2)
- if options.notdef_glyph and not options.notdef_outline:
- isCFF2 = cff.major > 1
- for fontname in cff.keys():
- font = cff[fontname]
- _empty_charstring(font, ".notdef", isCFF2=isCFF2)
+ # Clear useless Encoding
+ for fontname in cff.keys():
+ font = cff[fontname]
+ # https://github.com/fonttools/fonttools/issues/620
+ font.Encoding = "StandardEncoding"
- # Clear useless Encoding
- for fontname in cff.keys():
- font = cff[fontname]
- # https://github.com/fonttools/fonttools/issues/620
- font.Encoding = "StandardEncoding"
+ return True # bool(cff.fontNames)
- return True # bool(cff.fontNames)
-@_add_method(ttLib.getTableClass('CFF '))
+@_add_method(ttLib.getTableClass("CFF "))
def subset_glyphs(self, s):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
-
- glyphs = s.glyphs.union(s.glyphs_emptied)
-
- # Load all glyphs
- for g in font.charset:
- if g not in glyphs: continue
- c, _ = cs.getItemAndSelector(g)
-
- if cs.charStringsAreIndexed:
- indices = [i for i,g in enumerate(font.charset) if g in glyphs]
- csi = cs.charStringsIndex
- csi.items = [csi.items[i] for i in indices]
- del csi.file, csi.offsets
- if hasattr(font, "FDSelect"):
- sel = font.FDSelect
- sel.format = None
- sel.gidArray = [sel.gidArray[i] for i in indices]
- newCharStrings = {}
- for indicesIdx, charsetIdx in enumerate(indices):
- g = font.charset[charsetIdx]
- if g in cs.charStrings:
- newCharStrings[g] = indicesIdx
- cs.charStrings = newCharStrings
- else:
- cs.charStrings = {g:v
- for g,v in cs.charStrings.items()
- if g in glyphs}
- font.charset = [g for g in font.charset if g in glyphs]
- font.numGlyphs = len(font.charset)
-
-
- if s.options.retain_gids:
- isCFF2 = cff.major > 1
- for g in s.glyphs_emptied:
- _empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
-
-
- return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
+ cff = self.cff
+ for fontname in cff.keys():
+ font = cff[fontname]
+ cs = font.CharStrings
+
+ glyphs = s.glyphs.union(s.glyphs_emptied)
+
+ # Load all glyphs
+ for g in font.charset:
+ if g not in glyphs:
+ continue
+ c, _ = cs.getItemAndSelector(g)
+
+ if cs.charStringsAreIndexed:
+ indices = [i for i, g in enumerate(font.charset) if g in glyphs]
+ csi = cs.charStringsIndex
+ csi.items = [csi.items[i] for i in indices]
+ del csi.file, csi.offsets
+ if hasattr(font, "FDSelect"):
+ sel = font.FDSelect
+ sel.format = None
+ sel.gidArray = [sel.gidArray[i] for i in indices]
+ newCharStrings = {}
+ for indicesIdx, charsetIdx in enumerate(indices):
+ g = font.charset[charsetIdx]
+ if g in cs.charStrings:
+ newCharStrings[g] = indicesIdx
+ cs.charStrings = newCharStrings
+ else:
+ cs.charStrings = {g: v for g, v in cs.charStrings.items() if g in glyphs}
+ font.charset = [g for g in font.charset if g in glyphs]
+ font.numGlyphs = len(font.charset)
+
+ if s.options.retain_gids:
+ isCFF2 = cff.major > 1
+ for g in s.glyphs_emptied:
+ _empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
+
+ return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
+
@_add_method(psCharStrings.T2CharString)
def subset_subroutines(self, subrs, gsubrs):
- p = self.program
- for i in range(1, len(p)):
- if p[i] == 'callsubr':
- assert isinstance(p[i-1], int)
- p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias
- elif p[i] == 'callgsubr':
- assert isinstance(p[i-1], int)
- p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias
+ p = self.program
+ for i in range(1, len(p)):
+ if p[i] == "callsubr":
+ assert isinstance(p[i - 1], int)
+ p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
+ elif p[i] == "callgsubr":
+ assert isinstance(p[i - 1], int)
+ p[i - 1] = (
+ gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias
+ )
+
@_add_method(psCharStrings.T2CharString)
def drop_hints(self):
- hints = self._hints
-
- if hints.deletions:
- p = self.program
- for idx in reversed(hints.deletions):
- del p[idx-2:idx]
-
- if hints.has_hint:
- assert not hints.deletions or hints.last_hint <= hints.deletions[0]
- self.program = self.program[hints.last_hint:]
- if not self.program:
- # TODO CFF2 no need for endchar.
- self.program.append('endchar')
- if hasattr(self, 'width'):
- # Insert width back if needed
- if self.width != self.private.defaultWidthX:
- # For CFF2 charstrings, this should never happen
- assert self.private.defaultWidthX is not None, "CFF2 CharStrings must not have an initial width value"
- self.program.insert(0, self.width - self.private.nominalWidthX)
-
- if hints.has_hintmask:
- i = 0
- p = self.program
- while i < len(p):
- if p[i] in ['hintmask', 'cntrmask']:
- assert i + 1 <= len(p)
- del p[i:i+2]
- continue
- i += 1
-
- assert len(self.program)
-
- del self._hints
+ hints = self._hints
+
+ if hints.deletions:
+ p = self.program
+ for idx in reversed(hints.deletions):
+ del p[idx - 2 : idx]
+
+ if hints.has_hint:
+ assert not hints.deletions or hints.last_hint <= hints.deletions[0]
+ self.program = self.program[hints.last_hint :]
+ if not self.program:
+ # TODO CFF2 no need for endchar.
+ self.program.append("endchar")
+ if hasattr(self, "width"):
+ # Insert width back if needed
+ if self.width != self.private.defaultWidthX:
+ # For CFF2 charstrings, this should never happen
+ assert (
+ self.private.defaultWidthX is not None
+ ), "CFF2 CharStrings must not have an initial width value"
+ self.program.insert(0, self.width - self.private.nominalWidthX)
+
+ if hints.has_hintmask:
+ i = 0
+ p = self.program
+ while i < len(p):
+ if p[i] in ["hintmask", "cntrmask"]:
+ assert i + 1 <= len(p)
+ del p[i : i + 2]
+ continue
+ i += 1
+
+ assert len(self.program)
+
+ del self._hints
+
class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
+ def __init__(self, localSubrs, globalSubrs, private):
+ psCharStrings.SimpleT2Decompiler.__init__(
+ self, localSubrs, globalSubrs, private
+ )
+ for subrs in [localSubrs, globalSubrs]:
+ if subrs and not hasattr(subrs, "_used"):
+ subrs._used = set()
- def __init__(self, localSubrs, globalSubrs, private):
- psCharStrings.SimpleT2Decompiler.__init__(self,
- localSubrs,
- globalSubrs,
- private)
- for subrs in [localSubrs, globalSubrs]:
- if subrs and not hasattr(subrs, "_used"):
- subrs._used = set()
+ def op_callsubr(self, index):
+ self.localSubrs._used.add(self.operandStack[-1] + self.localBias)
+ psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
- def op_callsubr(self, index):
- self.localSubrs._used.add(self.operandStack[-1]+self.localBias)
- psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
+ def op_callgsubr(self, index):
+ self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
+ psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
- def op_callgsubr(self, index):
- self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias)
- psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
-
- class Hints(object):
- def __init__(self):
- # Whether calling this charstring produces any hint stems
- # Note that if a charstring starts with hintmask, it will
- # have has_hint set to True, because it *might* produce an
- # implicit vstem if called under certain conditions.
- self.has_hint = False
- # Index to start at to drop all hints
- self.last_hint = 0
- # Index up to which we know more hints are possible.
- # Only relevant if status is 0 or 1.
- self.last_checked = 0
- # The status means:
- # 0: after dropping hints, this charstring is empty
- # 1: after dropping hints, there may be more hints
- # continuing after this, or there might be
- # other things. Not clear yet.
- # 2: no more hints possible after this charstring
- self.status = 0
- # Has hintmask instructions; not recursive
- self.has_hintmask = False
- # List of indices of calls to empty subroutines to remove.
- self.deletions = []
- pass
-
- def __init__(self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None):
- self._css = css
- psCharStrings.T2WidthExtractor.__init__(
- self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX)
- self.private = private
-
- def execute(self, charString):
- old_hints = charString._hints if hasattr(charString, '_hints') else None
- charString._hints = self.Hints()
-
- psCharStrings.T2WidthExtractor.execute(self, charString)
-
- hints = charString._hints
-
- if hints.has_hint or hints.has_hintmask:
- self._css.add(charString)
-
- if hints.status != 2:
- # Check from last_check, make sure we didn't have any operators.
- for i in range(hints.last_checked, len(charString.program) - 1):
- if isinstance(charString.program[i], str):
- hints.status = 2
- break
- else:
- hints.status = 1 # There's *something* here
- hints.last_checked = len(charString.program)
-
- if old_hints:
- assert hints.__dict__ == old_hints.__dict__
-
- def op_callsubr(self, index):
- subr = self.localSubrs[self.operandStack[-1]+self.localBias]
- psCharStrings.T2WidthExtractor.op_callsubr(self, index)
- self.processSubr(index, subr)
-
- def op_callgsubr(self, index):
- subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
- psCharStrings.T2WidthExtractor.op_callgsubr(self, index)
- self.processSubr(index, subr)
-
- def op_hstem(self, index):
- psCharStrings.T2WidthExtractor.op_hstem(self, index)
- self.processHint(index)
- def op_vstem(self, index):
- psCharStrings.T2WidthExtractor.op_vstem(self, index)
- self.processHint(index)
- def op_hstemhm(self, index):
- psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
- self.processHint(index)
- def op_vstemhm(self, index):
- psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
- self.processHint(index)
- def op_hintmask(self, index):
- rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
- self.processHintmask(index)
- return rv
- def op_cntrmask(self, index):
- rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
- self.processHintmask(index)
- return rv
-
- def processHintmask(self, index):
- cs = self.callingStack[-1]
- hints = cs._hints
- hints.has_hintmask = True
- if hints.status != 2:
- # Check from last_check, see if we may be an implicit vstem
- for i in range(hints.last_checked, index - 1):
- if isinstance(cs.program[i], str):
- hints.status = 2
- break
- else:
- # We are an implicit vstem
- hints.has_hint = True
- hints.last_hint = index + 1
- hints.status = 0
- hints.last_checked = index + 1
-
- def processHint(self, index):
- cs = self.callingStack[-1]
- hints = cs._hints
- hints.has_hint = True
- hints.last_hint = index
- hints.last_checked = index
-
- def processSubr(self, index, subr):
- cs = self.callingStack[-1]
- hints = cs._hints
- subr_hints = subr._hints
-
- # Check from last_check, make sure we didn't have
- # any operators.
- if hints.status != 2:
- for i in range(hints.last_checked, index - 1):
- if isinstance(cs.program[i], str):
- hints.status = 2
- break
- hints.last_checked = index
-
- if hints.status != 2:
- if subr_hints.has_hint:
- hints.has_hint = True
-
- # Decide where to chop off from
- if subr_hints.status == 0:
- hints.last_hint = index
- else:
- hints.last_hint = index - 2 # Leave the subr call in
-
- elif subr_hints.status == 0:
- hints.deletions.append(index)
-
- hints.status = max(hints.status, subr_hints.status)
-
-
-@_add_method(ttLib.getTableClass('CFF '))
+ class Hints(object):
+ def __init__(self):
+ # Whether calling this charstring produces any hint stems
+ # Note that if a charstring starts with hintmask, it will
+ # have has_hint set to True, because it *might* produce an
+ # implicit vstem if called under certain conditions.
+ self.has_hint = False
+ # Index to start at to drop all hints
+ self.last_hint = 0
+ # Index up to which we know more hints are possible.
+ # Only relevant if status is 0 or 1.
+ self.last_checked = 0
+ # The status means:
+ # 0: after dropping hints, this charstring is empty
+ # 1: after dropping hints, there may be more hints
+ # continuing after this, or there might be
+ # other things. Not clear yet.
+ # 2: no more hints possible after this charstring
+ self.status = 0
+ # Has hintmask instructions; not recursive
+ self.has_hintmask = False
+ # List of indices of calls to empty subroutines to remove.
+ self.deletions = []
+
+ pass
+
+ def __init__(
+ self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None
+ ):
+ self._css = css
+ psCharStrings.T2WidthExtractor.__init__(
+ self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX
+ )
+ self.private = private
+
+ def execute(self, charString):
+ old_hints = charString._hints if hasattr(charString, "_hints") else None
+ charString._hints = self.Hints()
+
+ psCharStrings.T2WidthExtractor.execute(self, charString)
+
+ hints = charString._hints
+
+ if hints.has_hint or hints.has_hintmask:
+ self._css.add(charString)
+
+ if hints.status != 2:
+ # Check from last_check, make sure we didn't have any operators.
+ for i in range(hints.last_checked, len(charString.program) - 1):
+ if isinstance(charString.program[i], str):
+ hints.status = 2
+ break
+ else:
+ hints.status = 1 # There's *something* here
+ hints.last_checked = len(charString.program)
+
+ if old_hints:
+ assert hints.__dict__ == old_hints.__dict__
+
+ def op_callsubr(self, index):
+ subr = self.localSubrs[self.operandStack[-1] + self.localBias]
+ psCharStrings.T2WidthExtractor.op_callsubr(self, index)
+ self.processSubr(index, subr)
+
+ def op_callgsubr(self, index):
+ subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
+ psCharStrings.T2WidthExtractor.op_callgsubr(self, index)
+ self.processSubr(index, subr)
+
+ def op_hstem(self, index):
+ psCharStrings.T2WidthExtractor.op_hstem(self, index)
+ self.processHint(index)
+
+ def op_vstem(self, index):
+ psCharStrings.T2WidthExtractor.op_vstem(self, index)
+ self.processHint(index)
+
+ def op_hstemhm(self, index):
+ psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
+ self.processHint(index)
+
+ def op_vstemhm(self, index):
+ psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
+ self.processHint(index)
+
+ def op_hintmask(self, index):
+ rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
+ self.processHintmask(index)
+ return rv
+
+ def op_cntrmask(self, index):
+ rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
+ self.processHintmask(index)
+ return rv
+
+ def processHintmask(self, index):
+ cs = self.callingStack[-1]
+ hints = cs._hints
+ hints.has_hintmask = True
+ if hints.status != 2:
+ # Check from last_check, see if we may be an implicit vstem
+ for i in range(hints.last_checked, index - 1):
+ if isinstance(cs.program[i], str):
+ hints.status = 2
+ break
+ else:
+ # We are an implicit vstem
+ hints.has_hint = True
+ hints.last_hint = index + 1
+ hints.status = 0
+ hints.last_checked = index + 1
+
+ def processHint(self, index):
+ cs = self.callingStack[-1]
+ hints = cs._hints
+ hints.has_hint = True
+ hints.last_hint = index
+ hints.last_checked = index
+
+ def processSubr(self, index, subr):
+ cs = self.callingStack[-1]
+ hints = cs._hints
+ subr_hints = subr._hints
+
+ # Check from last_check, make sure we didn't have
+ # any operators.
+ if hints.status != 2:
+ for i in range(hints.last_checked, index - 1):
+ if isinstance(cs.program[i], str):
+ hints.status = 2
+ break
+ hints.last_checked = index
+
+ if hints.status != 2:
+ if subr_hints.has_hint:
+ hints.has_hint = True
+
+ # Decide where to chop off from
+ if subr_hints.status == 0:
+ hints.last_hint = index
+ else:
+ hints.last_hint = index - 2 # Leave the subr call in
+
+ elif subr_hints.status == 0:
+ hints.deletions.append(index)
+
+ hints.status = max(hints.status, subr_hints.status)
+
+
+@_add_method(ttLib.getTableClass("CFF "))
def prune_post_subset(self, ttfFont, options):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
-
- # Drop unused FontDictionaries
- if hasattr(font, "FDSelect"):
- sel = font.FDSelect
- indices = _uniq_sort(sel.gidArray)
- sel.gidArray = [indices.index (ss) for ss in sel.gidArray]
- arr = font.FDArray
- arr.items = [arr[i] for i in indices]
- del arr.file, arr.offsets
-
- # Desubroutinize if asked for
- if options.desubroutinize:
- cff.desubroutinize()
-
- # Drop hints if not needed
- if not options.hinting:
- self.remove_hints()
- elif not options.desubroutinize:
- self.remove_unused_subroutines()
- return True
+ cff = self.cff
+ for fontname in cff.keys():
+ font = cff[fontname]
+ cs = font.CharStrings
+
+ # Drop unused FontDictionaries
+ if hasattr(font, "FDSelect"):
+ sel = font.FDSelect
+ indices = _uniq_sort(sel.gidArray)
+ sel.gidArray = [indices.index(ss) for ss in sel.gidArray]
+ arr = font.FDArray
+ arr.items = [arr[i] for i in indices]
+ del arr.file, arr.offsets
+
+ # Desubroutinize if asked for
+ if options.desubroutinize:
+ cff.desubroutinize()
+
+ # Drop hints if not needed
+ if not options.hinting:
+ self.remove_hints()
+ elif not options.desubroutinize:
+ self.remove_unused_subroutines()
+ return True
def _delete_empty_subrs(private_dict):
- if hasattr(private_dict, 'Subrs') and not private_dict.Subrs:
- if 'Subrs' in private_dict.rawDict:
- del private_dict.rawDict['Subrs']
- del private_dict.Subrs
+ if hasattr(private_dict, "Subrs") and not private_dict.Subrs:
+ if "Subrs" in private_dict.rawDict:
+ del private_dict.rawDict["Subrs"]
+ del private_dict.Subrs
-@deprecateFunction("use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning)
-@_add_method(ttLib.getTableClass('CFF '))
+@deprecateFunction(
+ "use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning
+)
+@_add_method(ttLib.getTableClass("CFF "))
def desubroutinize(self):
- self.cff.desubroutinize()
+ self.cff.desubroutinize()
-@_add_method(ttLib.getTableClass('CFF '))
+@_add_method(ttLib.getTableClass("CFF "))
def remove_hints(self):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
- # This can be tricky, but doesn't have to. What we do is:
- #
- # - Run all used glyph charstrings and recurse into subroutines,
- # - For each charstring (including subroutines), if it has any
- # of the hint stem operators, we mark it as such.
- # Upon returning, for each charstring we note all the
- # subroutine calls it makes that (recursively) contain a stem,
- # - Dropping hinting then consists of the following two ops:
- # * Drop the piece of the program in each charstring before the
- # last call to a stem op or a stem-calling subroutine,
- # * Drop all hintmask operations.
- # - It's trickier... A hintmask right after hints and a few numbers
- # will act as an implicit vstemhm. As such, we track whether
- # we have seen any non-hint operators so far and do the right
- # thing, recursively... Good luck understanding that :(
- css = set()
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- c.decompile()
- subrs = getattr(c.private, "Subrs", [])
- decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs,
- c.private.nominalWidthX,
- c.private.defaultWidthX,
- c.private)
- decompiler.execute(c)
- c.width = decompiler.width
- for charstring in css:
- charstring.drop_hints()
- del css
-
- # Drop font-wide hinting values
- all_privs = []
- if hasattr(font, 'FDArray'):
- all_privs.extend(fd.Private for fd in font.FDArray)
- else:
- all_privs.append(font.Private)
- for priv in all_privs:
- for k in ['BlueValues', 'OtherBlues',
- 'FamilyBlues', 'FamilyOtherBlues',
- 'BlueScale', 'BlueShift', 'BlueFuzz',
- 'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW',
- 'ForceBold', 'LanguageGroup', 'ExpansionFactor']:
- if hasattr(priv, k):
- setattr(priv, k, None)
- self.remove_unused_subroutines()
-
-
-@_add_method(ttLib.getTableClass('CFF '))
+ cff = self.cff
+ for fontname in cff.keys():
+ font = cff[fontname]
+ cs = font.CharStrings
+ # This can be tricky, but doesn't have to. What we do is:
+ #
+ # - Run all used glyph charstrings and recurse into subroutines,
+ # - For each charstring (including subroutines), if it has any
+ # of the hint stem operators, we mark it as such.
+ # Upon returning, for each charstring we note all the
+ # subroutine calls it makes that (recursively) contain a stem,
+ # - Dropping hinting then consists of the following two ops:
+ # * Drop the piece of the program in each charstring before the
+ # last call to a stem op or a stem-calling subroutine,
+ # * Drop all hintmask operations.
+ # - It's trickier... A hintmask right after hints and a few numbers
+ # will act as an implicit vstemhm. As such, we track whether
+ # we have seen any non-hint operators so far and do the right
+ # thing, recursively... Good luck understanding that :(
+ css = set()
+ for g in font.charset:
+ c, _ = cs.getItemAndSelector(g)
+ c.decompile()
+ subrs = getattr(c.private, "Subrs", [])
+ decompiler = _DehintingT2Decompiler(
+ css,
+ subrs,
+ c.globalSubrs,
+ c.private.nominalWidthX,
+ c.private.defaultWidthX,
+ c.private,
+ )
+ decompiler.execute(c)
+ c.width = decompiler.width
+ for charstring in css:
+ charstring.drop_hints()
+ del css
+
+ # Drop font-wide hinting values
+ all_privs = []
+ if hasattr(font, "FDArray"):
+ all_privs.extend(fd.Private for fd in font.FDArray)
+ else:
+ all_privs.append(font.Private)
+ for priv in all_privs:
+ for k in [
+ "BlueValues",
+ "OtherBlues",
+ "FamilyBlues",
+ "FamilyOtherBlues",
+ "BlueScale",
+ "BlueShift",
+ "BlueFuzz",
+ "StemSnapH",
+ "StemSnapV",
+ "StdHW",
+ "StdVW",
+ "ForceBold",
+ "LanguageGroup",
+ "ExpansionFactor",
+ ]:
+ if hasattr(priv, k):
+ setattr(priv, k, None)
+ self.remove_unused_subroutines()
+
+
+@_add_method(ttLib.getTableClass("CFF "))
def remove_unused_subroutines(self):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
- # Renumber subroutines to remove unused ones
-
- # Mark all used subroutines
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- subrs = getattr(c.private, "Subrs", [])
- decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
- decompiler.execute(c)
-
- all_subrs = [font.GlobalSubrs]
- if hasattr(font, 'FDArray'):
- all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs)
- elif hasattr(font.Private, 'Subrs') and font.Private.Subrs:
- all_subrs.append(font.Private.Subrs)
-
- subrs = set(subrs) # Remove duplicates
-
- # Prepare
- for subrs in all_subrs:
- if not hasattr(subrs, '_used'):
- subrs._used = set()
- subrs._used = _uniq_sort(subrs._used)
- subrs._old_bias = psCharStrings.calcSubrBias(subrs)
- subrs._new_bias = psCharStrings.calcSubrBias(subrs._used)
-
- # Renumber glyph charstrings
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- subrs = getattr(c.private, "Subrs", [])
- c.subset_subroutines (subrs, font.GlobalSubrs)
-
- # Renumber subroutines themselves
- for subrs in all_subrs:
- if subrs == font.GlobalSubrs:
- if not hasattr(font, 'FDArray') and hasattr(font.Private, 'Subrs'):
- local_subrs = font.Private.Subrs
- else:
- local_subrs = []
- else:
- local_subrs = subrs
-
- subrs.items = [subrs.items[i] for i in subrs._used]
- if hasattr(subrs, 'file'):
- del subrs.file
- if hasattr(subrs, 'offsets'):
- del subrs.offsets
-
- for subr in subrs.items:
- subr.subset_subroutines (local_subrs, font.GlobalSubrs)
-
- # Delete local SubrsIndex if empty
- if hasattr(font, 'FDArray'):
- for fd in font.FDArray:
- _delete_empty_subrs(fd.Private)
- else:
- _delete_empty_subrs(font.Private)
-
- # Cleanup
- for subrs in all_subrs:
- del subrs._used, subrs._old_bias, subrs._new_bias
+ cff = self.cff
+ for fontname in cff.keys():
+ font = cff[fontname]
+ cs = font.CharStrings
+ # Renumber subroutines to remove unused ones
+
+ # Mark all used subroutines
+ for g in font.charset:
+ c, _ = cs.getItemAndSelector(g)
+ subrs = getattr(c.private, "Subrs", [])
+ decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
+ decompiler.execute(c)
+
+ all_subrs = [font.GlobalSubrs]
+ if hasattr(font, "FDArray"):
+ all_subrs.extend(
+ fd.Private.Subrs
+ for fd in font.FDArray
+ if hasattr(fd.Private, "Subrs") and fd.Private.Subrs
+ )
+ elif hasattr(font.Private, "Subrs") and font.Private.Subrs:
+ all_subrs.append(font.Private.Subrs)
+
+ subrs = set(subrs) # Remove duplicates
+
+ # Prepare
+ for subrs in all_subrs:
+ if not hasattr(subrs, "_used"):
+ subrs._used = set()
+ subrs._used = _uniq_sort(subrs._used)
+ subrs._old_bias = psCharStrings.calcSubrBias(subrs)
+ subrs._new_bias = psCharStrings.calcSubrBias(subrs._used)
+
+ # Renumber glyph charstrings
+ for g in font.charset:
+ c, _ = cs.getItemAndSelector(g)
+ subrs = getattr(c.private, "Subrs", [])
+ c.subset_subroutines(subrs, font.GlobalSubrs)
+
+ # Renumber subroutines themselves
+ for subrs in all_subrs:
+ if subrs == font.GlobalSubrs:
+ if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"):
+ local_subrs = font.Private.Subrs
+ else:
+ local_subrs = []
+ else:
+ local_subrs = subrs
+
+ subrs.items = [subrs.items[i] for i in subrs._used]
+ if hasattr(subrs, "file"):
+ del subrs.file
+ if hasattr(subrs, "offsets"):
+ del subrs.offsets
+
+ for subr in subrs.items:
+ subr.subset_subroutines(local_subrs, font.GlobalSubrs)
+
+ # Delete local SubrsIndex if empty
+ if hasattr(font, "FDArray"):
+ for fd in font.FDArray:
+ _delete_empty_subrs(fd.Private)
+ else:
+ _delete_empty_subrs(font.Private)
+
+ # Cleanup
+ for subrs in all_subrs:
+ del subrs._used, subrs._old_bias, subrs._new_bias
diff --git a/Lib/fontTools/subset/svg.py b/Lib/fontTools/subset/svg.py
index 4ed2cbd2..329c68fb 100644
--- a/Lib/fontTools/subset/svg.py
+++ b/Lib/fontTools/subset/svg.py
@@ -77,7 +77,7 @@ def iter_referenced_ids(tree: etree.Element) -> Iterator[str]:
attrs = el.attrib
if "style" in attrs:
- attrs = {**attrs, **parse_css_declarations(el.attrib["style"])}
+ attrs = {**dict(attrs), **parse_css_declarations(el.attrib["style"])}
for attr in ("fill", "clip-path"):
if attr in attrs:
value = attrs[attr]
@@ -204,7 +204,6 @@ def subset_glyphs(self, s) -> bool:
new_docs: List[SVGDocument] = []
for doc in self.docList:
-
glyphs = {
glyph_order[i] for i in range(doc.startGlyphID, doc.endGlyphID + 1)
}.intersection(s.glyphs)
@@ -225,6 +224,9 @@ def subset_glyphs(self, s) -> bool:
# ignore blank text as it's not meaningful in OT-SVG; it also prevents
# dangling tail text after removing an element when pretty_print=True
remove_blank_text=True,
+ # don't replace entities; we don't expect any in OT-SVG and they may
+ # be abused for XXE attacks
+ resolve_entities=False,
),
)
diff --git a/Lib/fontTools/svgLib/path/__init__.py b/Lib/fontTools/svgLib/path/__init__.py
index fbddeeab..742bc64c 100644
--- a/Lib/fontTools/svgLib/path/__init__.py
+++ b/Lib/fontTools/svgLib/path/__init__.py
@@ -9,7 +9,7 @@ __all__ = [tostr(s) for s in ("SVGPath", "parse_path")]
class SVGPath(object):
- """ Parse SVG ``path`` elements from a file or string, and draw them
+ """Parse SVG ``path`` elements from a file or string, and draw them
onto a glyph object that supports the FontTools Pen protocol.
For example, reading from an SVG file and drawing to a Defcon Glyph:
diff --git a/Lib/fontTools/svgLib/path/arc.py b/Lib/fontTools/svgLib/path/arc.py
index 31810712..3e0a211e 100644
--- a/Lib/fontTools/svgLib/path/arc.py
+++ b/Lib/fontTools/svgLib/path/arc.py
@@ -19,7 +19,6 @@ def _map_point(matrix, pt):
class EllipticalArc(object):
-
def __init__(self, current_point, rx, ry, rotation, large, sweep, target_point):
self.current_point = current_point
self.rx = rx
diff --git a/Lib/fontTools/svgLib/path/parser.py b/Lib/fontTools/svgLib/path/parser.py
index e594b2b8..fa534745 100644
--- a/Lib/fontTools/svgLib/path/parser.py
+++ b/Lib/fontTools/svgLib/path/parser.py
@@ -11,9 +11,9 @@ from .arc import EllipticalArc
import re
-COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
+COMMANDS = set("MmZzLlHhVvCcSsQqTtAa")
ARC_COMMANDS = set("Aa")
-UPPERCASE = set('MZLHVCSQTA')
+UPPERCASE = set("MZLHVCSQTA")
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
@@ -93,7 +93,7 @@ def _tokenize_arc_arguments(arcdef):
def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
- """ Parse SVG path definition (i.e. "d" attribute of <path> elements)
+ """Parse SVG path definition (i.e. "d" attribute of <path> elements)
and call a 'pen' object's moveTo, lineTo, curveTo, qCurveTo and closePath
methods.
@@ -125,7 +125,6 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
have_arcTo = hasattr(pen, "arcTo")
while elements:
-
if elements[-1] in COMMANDS:
# New command.
last_command = command # Used by S and T
@@ -136,11 +135,13 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
# If this element starts with numbers, it is an implicit command
# and we don't change the command. Check that it's allowed:
if command is None:
- raise ValueError("Unallowed implicit command in %s, position %s" % (
- pathdef, len(pathdef.split()) - len(elements)))
+ raise ValueError(
+ "Unallowed implicit command in %s, position %s"
+ % (pathdef, len(pathdef.split()) - len(elements))
+ )
last_command = command # Used by S and T
- if command == 'M':
+ if command == "M":
# Moveto command.
x = elements.pop()
y = elements.pop()
@@ -164,9 +165,9 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
# Implicit moveto commands are treated as lineto commands.
# So we set command to lineto here, in case there are
# further implicit commands after this moveto.
- command = 'L'
+ command = "L"
- elif command == 'Z':
+ elif command == "Z":
# Close path
if current_pos != start_pos:
pen.lineTo((start_pos.real, start_pos.imag))
@@ -175,7 +176,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
start_pos = None
command = None # You can't have implicit commands after closing.
- elif command == 'L':
+ elif command == "L":
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
@@ -184,7 +185,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
pen.lineTo((pos.real, pos.imag))
current_pos = pos
- elif command == 'H':
+ elif command == "H":
x = elements.pop()
pos = float(x) + current_pos.imag * 1j
if not absolute:
@@ -192,7 +193,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
pen.lineTo((pos.real, pos.imag))
current_pos = pos
- elif command == 'V':
+ elif command == "V":
y = elements.pop()
pos = current_pos.real + float(y) * 1j
if not absolute:
@@ -200,7 +201,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
pen.lineTo((pos.real, pos.imag))
current_pos = pos
- elif command == 'C':
+ elif command == "C":
control1 = float(elements.pop()) + float(elements.pop()) * 1j
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
@@ -210,17 +211,19 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
control2 += current_pos
end += current_pos
- pen.curveTo((control1.real, control1.imag),
- (control2.real, control2.imag),
- (end.real, end.imag))
+ pen.curveTo(
+ (control1.real, control1.imag),
+ (control2.real, control2.imag),
+ (end.real, end.imag),
+ )
current_pos = end
last_control = control2
- elif command == 'S':
+ elif command == "S":
# Smooth curve. First control point is the "reflection" of
# the second control point in the previous path.
- if last_command not in 'CS':
+ if last_command not in "CS":
# If there is no previous command or if the previous command
# was not an C, c, S or s, assume the first control point is
# coincident with the current point.
@@ -238,13 +241,15 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
control2 += current_pos
end += current_pos
- pen.curveTo((control1.real, control1.imag),
- (control2.real, control2.imag),
- (end.real, end.imag))
+ pen.curveTo(
+ (control1.real, control1.imag),
+ (control2.real, control2.imag),
+ (end.real, end.imag),
+ )
current_pos = end
last_control = control2
- elif command == 'Q':
+ elif command == "Q":
control = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
@@ -256,11 +261,11 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
current_pos = end
last_control = control
- elif command == 'T':
+ elif command == "T":
# Smooth curve. Control point is the "reflection" of
# the second control point in the previous path.
- if last_command not in 'QT':
+ if last_command not in "QT":
# If there is no previous command or if the previous command
# was not an Q, q, T or t, assume the first control point is
# coincident with the current point.
@@ -280,7 +285,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
current_pos = end
last_control = control
- elif command == 'A':
+ elif command == "A":
rx = abs(float(elements.pop()))
ry = abs(float(elements.pop()))
rotation = float(elements.pop())
diff --git a/Lib/fontTools/svgLib/path/shapes.py b/Lib/fontTools/svgLib/path/shapes.py
index 4cc633ad..3f22e6c6 100644
--- a/Lib/fontTools/svgLib/path/shapes.py
+++ b/Lib/fontTools/svgLib/path/shapes.py
@@ -5,18 +5,18 @@ def _prefer_non_zero(*args):
for arg in args:
if arg != 0:
return arg
- return 0.
+ return 0.0
def _ntos(n):
# %f likes to add unnecessary 0's, %g isn't consistent about # decimals
- return ('%.3f' % n).rstrip('0').rstrip('.')
+ return ("%.3f" % n).rstrip("0").rstrip(".")
def _strip_xml_ns(tag):
# ElementTree API doesn't provide a way to ignore XML namespaces in tags
# so we here strip them ourselves: cf. https://bugs.python.org/issue18304
- return tag.split('}', 1)[1] if '}' in tag else tag
+ return tag.split("}", 1)[1] if "}" in tag else tag
def _transform(raw_value):
@@ -24,12 +24,12 @@ def _transform(raw_value):
# No other transform functions are supported at the moment.
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform
# start simple: if you aren't exactly matrix(...) then no love
- match = re.match(r'matrix\((.*)\)', raw_value)
+ match = re.match(r"matrix\((.*)\)", raw_value)
if not match:
raise NotImplementedError
- matrix = tuple(float(p) for p in re.split(r'\s+|,', match.group(1)))
+ matrix = tuple(float(p) for p in re.split(r"\s+|,", match.group(1)))
if len(matrix) != 6:
- raise ValueError('wrong # of terms in %s' % raw_value)
+ raise ValueError("wrong # of terms in %s" % raw_value)
return matrix
@@ -38,81 +38,83 @@ class PathBuilder(object):
self.paths = []
self.transforms = []
- def _start_path(self, initial_path=''):
+ def _start_path(self, initial_path=""):
self.paths.append(initial_path)
self.transforms.append(None)
def _end_path(self):
- self._add('z')
+ self._add("z")
def _add(self, path_snippet):
path = self.paths[-1]
if path:
- path += ' ' + path_snippet
+ path += " " + path_snippet
else:
path = path_snippet
self.paths[-1] = path
def _move(self, c, x, y):
- self._add('%s%s,%s' % (c, _ntos(x), _ntos(y)))
+ self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
def M(self, x, y):
- self._move('M', x, y)
+ self._move("M", x, y)
def m(self, x, y):
- self._move('m', x, y)
+ self._move("m", x, y)
def _arc(self, c, rx, ry, x, y, large_arc):
- self._add('%s%s,%s 0 %d 1 %s,%s' % (c, _ntos(rx), _ntos(ry), large_arc,
- _ntos(x), _ntos(y)))
+ self._add(
+ "%s%s,%s 0 %d 1 %s,%s"
+ % (c, _ntos(rx), _ntos(ry), large_arc, _ntos(x), _ntos(y))
+ )
def A(self, rx, ry, x, y, large_arc=0):
- self._arc('A', rx, ry, x, y, large_arc)
+ self._arc("A", rx, ry, x, y, large_arc)
def a(self, rx, ry, x, y, large_arc=0):
- self._arc('a', rx, ry, x, y, large_arc)
+ self._arc("a", rx, ry, x, y, large_arc)
def _vhline(self, c, x):
- self._add('%s%s' % (c, _ntos(x)))
+ self._add("%s%s" % (c, _ntos(x)))
def H(self, x):
- self._vhline('H', x)
+ self._vhline("H", x)
def h(self, x):
- self._vhline('h', x)
+ self._vhline("h", x)
def V(self, y):
- self._vhline('V', y)
+ self._vhline("V", y)
def v(self, y):
- self._vhline('v', y)
+ self._vhline("v", y)
def _line(self, c, x, y):
- self._add('%s%s,%s' % (c, _ntos(x), _ntos(y)))
+ self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
def L(self, x, y):
- self._line('L', x, y)
+ self._line("L", x, y)
def l(self, x, y):
- self._line('l', x, y)
+ self._line("l", x, y)
def _parse_line(self, line):
- x1 = float(line.attrib.get('x1', 0))
- y1 = float(line.attrib.get('y1', 0))
- x2 = float(line.attrib.get('x2', 0))
- y2 = float(line.attrib.get('y2', 0))
+ x1 = float(line.attrib.get("x1", 0))
+ y1 = float(line.attrib.get("y1", 0))
+ x2 = float(line.attrib.get("x2", 0))
+ y2 = float(line.attrib.get("y2", 0))
self._start_path()
self.M(x1, y1)
self.L(x2, y2)
def _parse_rect(self, rect):
- x = float(rect.attrib.get('x', 0))
- y = float(rect.attrib.get('y', 0))
- w = float(rect.attrib.get('width'))
- h = float(rect.attrib.get('height'))
- rx = float(rect.attrib.get('rx', 0))
- ry = float(rect.attrib.get('ry', 0))
+ x = float(rect.attrib.get("x", 0))
+ y = float(rect.attrib.get("y", 0))
+ w = float(rect.attrib.get("width"))
+ h = float(rect.attrib.get("height"))
+ rx = float(rect.attrib.get("rx", 0))
+ ry = float(rect.attrib.get("ry", 0))
rx = _prefer_non_zero(rx, ry)
ry = _prefer_non_zero(ry, rx)
@@ -135,22 +137,22 @@ class PathBuilder(object):
self._end_path()
def _parse_path(self, path):
- if 'd' in path.attrib:
- self._start_path(initial_path=path.attrib['d'])
+ if "d" in path.attrib:
+ self._start_path(initial_path=path.attrib["d"])
def _parse_polygon(self, poly):
- if 'points' in poly.attrib:
- self._start_path('M' + poly.attrib['points'])
+ if "points" in poly.attrib:
+ self._start_path("M" + poly.attrib["points"])
self._end_path()
def _parse_polyline(self, poly):
- if 'points' in poly.attrib:
- self._start_path('M' + poly.attrib['points'])
+ if "points" in poly.attrib:
+ self._start_path("M" + poly.attrib["points"])
def _parse_circle(self, circle):
- cx = float(circle.attrib.get('cx', 0))
- cy = float(circle.attrib.get('cy', 0))
- r = float(circle.attrib.get('r'))
+ cx = float(circle.attrib.get("cx", 0))
+ cy = float(circle.attrib.get("cy", 0))
+ r = float(circle.attrib.get("r"))
# arc doesn't seem to like being a complete shape, draw two halves
self._start_path()
@@ -159,10 +161,10 @@ class PathBuilder(object):
self.A(r, r, cx - r, cy, large_arc=1)
def _parse_ellipse(self, ellipse):
- cx = float(ellipse.attrib.get('cx', 0))
- cy = float(ellipse.attrib.get('cy', 0))
- rx = float(ellipse.attrib.get('rx'))
- ry = float(ellipse.attrib.get('ry'))
+ cx = float(ellipse.attrib.get("cx", 0))
+ cy = float(ellipse.attrib.get("cy", 0))
+ rx = float(ellipse.attrib.get("rx"))
+ ry = float(ellipse.attrib.get("ry"))
# arc doesn't seem to like being a complete shape, draw two halves
self._start_path()
@@ -172,10 +174,10 @@ class PathBuilder(object):
def add_path_from_element(self, el):
tag = _strip_xml_ns(el.tag)
- parse_fn = getattr(self, '_parse_%s' % tag.lower(), None)
+ parse_fn = getattr(self, "_parse_%s" % tag.lower(), None)
if not callable(parse_fn):
return False
parse_fn(el)
- if 'transform' in el.attrib:
- self.transforms[-1] = _transform(el.attrib['transform'])
+ if "transform" in el.attrib:
+ self.transforms[-1] = _transform(el.attrib["transform"])
return True
diff --git a/Lib/fontTools/t1Lib/__init__.py b/Lib/fontTools/t1Lib/__init__.py
index a74f9a47..a64f7809 100644
--- a/Lib/fontTools/t1Lib/__init__.py
+++ b/Lib/fontTools/t1Lib/__init__.py
@@ -1,4 +1,4 @@
-"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts (Python2 only)
+"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts.
Functions for reading and writing raw Type 1 data:
@@ -19,7 +19,11 @@ import fontTools
from fontTools.misc import eexec
from fontTools.misc.macCreatorType import getMacCreatorAndType
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes
-from fontTools.misc.psOperators import _type1_pre_eexec_order, _type1_fontinfo_order, _type1_post_eexec_order
+from fontTools.misc.psOperators import (
+ _type1_pre_eexec_order,
+ _type1_fontinfo_order,
+ _type1_post_eexec_order,
+)
from fontTools.encodings.StandardEncoding import StandardEncoding
import os
import re
@@ -30,260 +34,307 @@ DEBUG = 0
try:
- try:
- from Carbon import Res
- except ImportError:
- import Res # MacPython < 2.2
+ try:
+ from Carbon import Res
+ except ImportError:
+ import Res # MacPython < 2.2
except ImportError:
- haveMacSupport = 0
+ haveMacSupport = 0
else:
- haveMacSupport = 1
+ haveMacSupport = 1
-class T1Error(Exception): pass
+class T1Error(Exception):
+ pass
class T1Font(object):
- """Type 1 font class.
-
- Uses a minimal interpeter that supports just about enough PS to parse
- Type 1 fonts.
- """
-
- def __init__(self, path, encoding="ascii", kind=None):
- if kind is None:
- self.data, _ = read(path)
- elif kind == "LWFN":
- self.data = readLWFN(path)
- elif kind == "PFB":
- self.data = readPFB(path)
- elif kind == "OTHER":
- self.data = readOther(path)
- else:
- raise ValueError(kind)
- self.encoding = encoding
-
- def saveAs(self, path, type, dohex=False):
- write(path, self.getData(), type, dohex)
-
- def getData(self):
- if not hasattr(self, "data"):
- self.data = self.createData()
- return self.data
-
- def getGlyphSet(self):
- """Return a generic GlyphSet, which is a dict-like object
- mapping glyph names to glyph objects. The returned glyph objects
- have a .draw() method that supports the Pen protocol, and will
- have an attribute named 'width', but only *after* the .draw() method
- has been called.
-
- In the case of Type 1, the GlyphSet is simply the CharStrings dict.
- """
- return self["CharStrings"]
-
- def __getitem__(self, key):
- if not hasattr(self, "font"):
- self.parse()
- return self.font[key]
-
- def parse(self):
- from fontTools.misc import psLib
- from fontTools.misc import psCharStrings
- self.font = psLib.suckfont(self.data, self.encoding)
- charStrings = self.font["CharStrings"]
- lenIV = self.font["Private"].get("lenIV", 4)
- assert lenIV >= 0
- subrs = self.font["Private"]["Subrs"]
- for glyphName, charString in charStrings.items():
- charString, R = eexec.decrypt(charString, 4330)
- charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:],
- subrs=subrs)
- for i in range(len(subrs)):
- charString, R = eexec.decrypt(subrs[i], 4330)
- subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
- del self.data
-
- def createData(self):
- sf = self.font
-
- eexec_began = False
- eexec_dict = {}
- lines = []
- lines.extend([self._tobytes(f"%!FontType1-1.1: {sf['FontName']}"),
- self._tobytes(f"%t1Font: ({fontTools.version})"),
- self._tobytes(f"%%BeginResource: font {sf['FontName']}")])
- # follow t1write.c:writeRegNameKeyedFont
- size = 3 # Headroom for new key addition
- size += 1 # FontMatrix is always counted
- size += 1 + 1 # Private, CharStings
- for key in font_dictionary_keys:
- size += int(key in sf)
- lines.append(self._tobytes(f"{size} dict dup begin"))
-
- for key, value in sf.items():
- if eexec_began:
- eexec_dict[key] = value
- continue
-
- if key == "FontInfo":
- fi = sf["FontInfo"]
- # follow t1write.c:writeFontInfoDict
- size = 3 # Headroom for new key addition
- for subkey in FontInfo_dictionary_keys:
- size += int(subkey in fi)
- lines.append(self._tobytes(f"/FontInfo {size} dict dup begin"))
-
- for subkey, subvalue in fi.items():
- lines.extend(self._make_lines(subkey, subvalue))
- lines.append(b"end def")
- elif key in _type1_post_eexec_order: # usually 'Private'
- eexec_dict[key] = value
- eexec_began = True
- else:
- lines.extend(self._make_lines(key, value))
- lines.append(b"end")
- eexec_portion = self.encode_eexec(eexec_dict)
- lines.append(bytesjoin([b"currentfile eexec ", eexec_portion]))
-
- for _ in range(8):
- lines.append(self._tobytes("0"*64))
- lines.extend([b"cleartomark",
- b"%%EndResource",
- b"%%EOF"])
-
- data = bytesjoin(lines, "\n")
- return data
-
- def encode_eexec(self, eexec_dict):
- lines = []
-
- # '-|', '|-', '|'
- RD_key, ND_key, NP_key = None, None, None
-
- for key, value in eexec_dict.items():
- if key == "Private":
- pr = eexec_dict["Private"]
- # follow t1write.c:writePrivateDict
- size = 3 # for RD, ND, NP
- for subkey in Private_dictionary_keys:
- size += int(subkey in pr)
- lines.append(b"dup /Private")
- lines.append(self._tobytes(f"{size} dict dup begin"))
- for subkey, subvalue in pr.items():
- if not RD_key and subvalue == RD_value:
- RD_key = subkey
- elif not ND_key and subvalue == ND_value:
- ND_key = subkey
- elif not NP_key and subvalue == PD_value:
- NP_key = subkey
-
- if subkey == 'OtherSubrs':
- # XXX: assert that no flex hint is used
- lines.append(self._tobytes(hintothers))
- elif subkey == "Subrs":
- # XXX: standard Subrs only
- lines.append(b"/Subrs 5 array")
- for i, subr_bin in enumerate(std_subrs):
- encrypted_subr, R = eexec.encrypt(bytesjoin([char_IV, subr_bin]), 4330)
- lines.append(bytesjoin([self._tobytes(f"dup {i} {len(encrypted_subr)} {RD_key} "), encrypted_subr, self._tobytes(f" {NP_key}")]))
- lines.append(b'def')
-
- lines.append(b"put")
- else:
- lines.extend(self._make_lines(subkey, subvalue))
- elif key == "CharStrings":
- lines.append(b"dup /CharStrings")
- lines.append(self._tobytes(f"{len(eexec_dict['CharStrings'])} dict dup begin"))
- for glyph_name, char_bin in eexec_dict["CharStrings"].items():
- char_bin.compile()
- encrypted_char, R = eexec.encrypt(bytesjoin([char_IV, char_bin.bytecode]), 4330)
- lines.append(bytesjoin([self._tobytes(f"/{glyph_name} {len(encrypted_char)} {RD_key} "), encrypted_char, self._tobytes(f" {ND_key}")]))
- lines.append(b"end put")
- else:
- lines.extend(self._make_lines(key, value))
-
- lines.extend([b"end",
- b"dup /FontName get exch definefont pop",
- b"mark",
- b"currentfile closefile\n"])
-
- eexec_portion = bytesjoin(lines, "\n")
- encrypted_eexec, R = eexec.encrypt(bytesjoin([eexec_IV, eexec_portion]), 55665)
-
- return encrypted_eexec
-
- def _make_lines(self, key, value):
- if key == "FontName":
- return [self._tobytes(f"/{key} /{value} def")]
- if key in ["isFixedPitch", "ForceBold", "RndStemUp"]:
- return [self._tobytes(f"/{key} {'true' if value else 'false'} def")]
- elif key == "Encoding":
- if value == StandardEncoding:
- return [self._tobytes(f"/{key} StandardEncoding def")]
- else:
- # follow fontTools.misc.psOperators._type1_Encoding_repr
- lines = []
- lines.append(b"/Encoding 256 array")
- lines.append(b"0 1 255 {1 index exch /.notdef put} for")
- for i in range(256):
- name = value[i]
- if name != ".notdef":
- lines.append(self._tobytes(f"dup {i} /{name} put"))
- lines.append(b"def")
- return lines
- if isinstance(value, str):
- return [self._tobytes(f"/{key} ({value}) def")]
- elif isinstance(value, bool):
- return [self._tobytes(f"/{key} {'true' if value else 'false'} def")]
- elif isinstance(value, list):
- return [self._tobytes(f"/{key} [{' '.join(str(v) for v in value)}] def")]
- elif isinstance(value, tuple):
- return [self._tobytes(f"/{key} {{{' '.join(str(v) for v in value)}}} def")]
- else:
- return [self._tobytes(f"/{key} {value} def")]
-
- def _tobytes(self, s, errors="strict"):
- return tobytes(s, self.encoding, errors)
+ """Type 1 font class.
+
+ Uses a minimal interpeter that supports just about enough PS to parse
+ Type 1 fonts.
+ """
+
+ def __init__(self, path, encoding="ascii", kind=None):
+ if kind is None:
+ self.data, _ = read(path)
+ elif kind == "LWFN":
+ self.data = readLWFN(path)
+ elif kind == "PFB":
+ self.data = readPFB(path)
+ elif kind == "OTHER":
+ self.data = readOther(path)
+ else:
+ raise ValueError(kind)
+ self.encoding = encoding
+
+ def saveAs(self, path, type, dohex=False):
+ write(path, self.getData(), type, dohex)
+
+ def getData(self):
+ if not hasattr(self, "data"):
+ self.data = self.createData()
+ return self.data
+
+ def getGlyphSet(self):
+ """Return a generic GlyphSet, which is a dict-like object
+ mapping glyph names to glyph objects. The returned glyph objects
+ have a .draw() method that supports the Pen protocol, and will
+ have an attribute named 'width', but only *after* the .draw() method
+ has been called.
+
+ In the case of Type 1, the GlyphSet is simply the CharStrings dict.
+ """
+ return self["CharStrings"]
+
+ def __getitem__(self, key):
+ if not hasattr(self, "font"):
+ self.parse()
+ return self.font[key]
+
+ def parse(self):
+ from fontTools.misc import psLib
+ from fontTools.misc import psCharStrings
+
+ self.font = psLib.suckfont(self.data, self.encoding)
+ charStrings = self.font["CharStrings"]
+ lenIV = self.font["Private"].get("lenIV", 4)
+ assert lenIV >= 0
+ subrs = self.font["Private"]["Subrs"]
+ for glyphName, charString in charStrings.items():
+ charString, R = eexec.decrypt(charString, 4330)
+ charStrings[glyphName] = psCharStrings.T1CharString(
+ charString[lenIV:], subrs=subrs
+ )
+ for i in range(len(subrs)):
+ charString, R = eexec.decrypt(subrs[i], 4330)
+ subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
+ del self.data
+
+ def createData(self):
+ sf = self.font
+
+ eexec_began = False
+ eexec_dict = {}
+ lines = []
+ lines.extend(
+ [
+ self._tobytes(f"%!FontType1-1.1: {sf['FontName']}"),
+ self._tobytes(f"%t1Font: ({fontTools.version})"),
+ self._tobytes(f"%%BeginResource: font {sf['FontName']}"),
+ ]
+ )
+ # follow t1write.c:writeRegNameKeyedFont
+ size = 3 # Headroom for new key addition
+ size += 1 # FontMatrix is always counted
+ size += 1 + 1 # Private, CharStings
+ for key in font_dictionary_keys:
+ size += int(key in sf)
+ lines.append(self._tobytes(f"{size} dict dup begin"))
+
+ for key, value in sf.items():
+ if eexec_began:
+ eexec_dict[key] = value
+ continue
+
+ if key == "FontInfo":
+ fi = sf["FontInfo"]
+ # follow t1write.c:writeFontInfoDict
+ size = 3 # Headroom for new key addition
+ for subkey in FontInfo_dictionary_keys:
+ size += int(subkey in fi)
+ lines.append(self._tobytes(f"/FontInfo {size} dict dup begin"))
+
+ for subkey, subvalue in fi.items():
+ lines.extend(self._make_lines(subkey, subvalue))
+ lines.append(b"end def")
+ elif key in _type1_post_eexec_order: # usually 'Private'
+ eexec_dict[key] = value
+ eexec_began = True
+ else:
+ lines.extend(self._make_lines(key, value))
+ lines.append(b"end")
+ eexec_portion = self.encode_eexec(eexec_dict)
+ lines.append(bytesjoin([b"currentfile eexec ", eexec_portion]))
+
+ for _ in range(8):
+ lines.append(self._tobytes("0" * 64))
+ lines.extend([b"cleartomark", b"%%EndResource", b"%%EOF"])
+
+ data = bytesjoin(lines, "\n")
+ return data
+
+ def encode_eexec(self, eexec_dict):
+ lines = []
+
+ # '-|', '|-', '|'
+ RD_key, ND_key, NP_key = None, None, None
+ lenIV = 4
+ subrs = std_subrs
+
+ # Ensure we look at Private first, because we need RD_key, ND_key, NP_key and lenIV
+ sortedItems = sorted(eexec_dict.items(), key=lambda item: item[0] != "Private")
+
+ for key, value in sortedItems:
+ if key == "Private":
+ pr = eexec_dict["Private"]
+ # follow t1write.c:writePrivateDict
+ size = 3 # for RD, ND, NP
+ for subkey in Private_dictionary_keys:
+ size += int(subkey in pr)
+ lines.append(b"dup /Private")
+ lines.append(self._tobytes(f"{size} dict dup begin"))
+ for subkey, subvalue in pr.items():
+ if not RD_key and subvalue == RD_value:
+ RD_key = subkey
+ elif not ND_key and subvalue in ND_values:
+ ND_key = subkey
+ elif not NP_key and subvalue in PD_values:
+ NP_key = subkey
+
+ if subkey == "lenIV":
+ lenIV = subvalue
+
+ if subkey == "OtherSubrs":
+ # XXX: assert that no flex hint is used
+ lines.append(self._tobytes(hintothers))
+ elif subkey == "Subrs":
+ for subr_bin in subvalue:
+ subr_bin.compile()
+ subrs = [subr_bin.bytecode for subr_bin in subvalue]
+ lines.append(f"/Subrs {len(subrs)} array".encode("ascii"))
+ for i, subr_bin in enumerate(subrs):
+ encrypted_subr, R = eexec.encrypt(
+ bytesjoin([char_IV[:lenIV], subr_bin]), 4330
+ )
+ lines.append(
+ bytesjoin(
+ [
+ self._tobytes(
+ f"dup {i} {len(encrypted_subr)} {RD_key} "
+ ),
+ encrypted_subr,
+ self._tobytes(f" {NP_key}"),
+ ]
+ )
+ )
+ lines.append(b"def")
+
+ lines.append(b"put")
+ else:
+ lines.extend(self._make_lines(subkey, subvalue))
+ elif key == "CharStrings":
+ lines.append(b"dup /CharStrings")
+ lines.append(
+ self._tobytes(f"{len(eexec_dict['CharStrings'])} dict dup begin")
+ )
+ for glyph_name, char_bin in eexec_dict["CharStrings"].items():
+ char_bin.compile()
+ encrypted_char, R = eexec.encrypt(
+ bytesjoin([char_IV[:lenIV], char_bin.bytecode]), 4330
+ )
+ lines.append(
+ bytesjoin(
+ [
+ self._tobytes(
+ f"/{glyph_name} {len(encrypted_char)} {RD_key} "
+ ),
+ encrypted_char,
+ self._tobytes(f" {ND_key}"),
+ ]
+ )
+ )
+ lines.append(b"end put")
+ else:
+ lines.extend(self._make_lines(key, value))
+
+ lines.extend(
+ [
+ b"end",
+ b"dup /FontName get exch definefont pop",
+ b"mark",
+ b"currentfile closefile\n",
+ ]
+ )
+
+ eexec_portion = bytesjoin(lines, "\n")
+ encrypted_eexec, R = eexec.encrypt(bytesjoin([eexec_IV, eexec_portion]), 55665)
+
+ return encrypted_eexec
+
+ def _make_lines(self, key, value):
+ if key == "FontName":
+ return [self._tobytes(f"/{key} /{value} def")]
+ if key in ["isFixedPitch", "ForceBold", "RndStemUp"]:
+ return [self._tobytes(f"/{key} {'true' if value else 'false'} def")]
+ elif key == "Encoding":
+ if value == StandardEncoding:
+ return [self._tobytes(f"/{key} StandardEncoding def")]
+ else:
+ # follow fontTools.misc.psOperators._type1_Encoding_repr
+ lines = []
+ lines.append(b"/Encoding 256 array")
+ lines.append(b"0 1 255 {1 index exch /.notdef put} for")
+ for i in range(256):
+ name = value[i]
+ if name != ".notdef":
+ lines.append(self._tobytes(f"dup {i} /{name} put"))
+ lines.append(b"def")
+ return lines
+ if isinstance(value, str):
+ return [self._tobytes(f"/{key} ({value}) def")]
+ elif isinstance(value, bool):
+ return [self._tobytes(f"/{key} {'true' if value else 'false'} def")]
+ elif isinstance(value, list):
+ return [self._tobytes(f"/{key} [{' '.join(str(v) for v in value)}] def")]
+ elif isinstance(value, tuple):
+ return [self._tobytes(f"/{key} {{{' '.join(str(v) for v in value)}}} def")]
+ else:
+ return [self._tobytes(f"/{key} {value} def")]
+
+ def _tobytes(self, s, errors="strict"):
+ return tobytes(s, self.encoding, errors)
# low level T1 data read and write functions
+
def read(path, onlyHeader=False):
- """reads any Type 1 font file, returns raw data"""
- _, ext = os.path.splitext(path)
- ext = ext.lower()
- creator, typ = getMacCreatorAndType(path)
- if typ == 'LWFN':
- return readLWFN(path, onlyHeader), 'LWFN'
- if ext == '.pfb':
- return readPFB(path, onlyHeader), 'PFB'
- else:
- return readOther(path), 'OTHER'
-
-def write(path, data, kind='OTHER', dohex=False):
- assertType1(data)
- kind = kind.upper()
- try:
- os.remove(path)
- except os.error:
- pass
- err = 1
- try:
- if kind == 'LWFN':
- writeLWFN(path, data)
- elif kind == 'PFB':
- writePFB(path, data)
- else:
- writeOther(path, data, dohex)
- err = 0
- finally:
- if err and not DEBUG:
- try:
- os.remove(path)
- except os.error:
- pass
+ """reads any Type 1 font file, returns raw data"""
+ _, ext = os.path.splitext(path)
+ ext = ext.lower()
+ creator, typ = getMacCreatorAndType(path)
+ if typ == "LWFN":
+ return readLWFN(path, onlyHeader), "LWFN"
+ if ext == ".pfb":
+ return readPFB(path, onlyHeader), "PFB"
+ else:
+ return readOther(path), "OTHER"
+
+
+def write(path, data, kind="OTHER", dohex=False):
+ assertType1(data)
+ kind = kind.upper()
+ try:
+ os.remove(path)
+ except os.error:
+ pass
+ err = 1
+ try:
+ if kind == "LWFN":
+ writeLWFN(path, data)
+ elif kind == "PFB":
+ writePFB(path, data)
+ else:
+ writeOther(path, data, dohex)
+ err = 0
+ finally:
+ if err and not DEBUG:
+ try:
+ os.remove(path)
+ except os.error:
+ pass
# -- internal --
@@ -293,125 +344,132 @@ HEXLINELENGTH = 80
def readLWFN(path, onlyHeader=False):
- """reads an LWFN font file, returns raw data"""
- from fontTools.misc.macRes import ResourceReader
- reader = ResourceReader(path)
- try:
- data = []
- for res in reader.get('POST', []):
- code = byteord(res.data[0])
- if byteord(res.data[1]) != 0:
- raise T1Error('corrupt LWFN file')
- if code in [1, 2]:
- if onlyHeader and code == 2:
- break
- data.append(res.data[2:])
- elif code in [3, 5]:
- break
- elif code == 4:
- with open(path, "rb") as f:
- data.append(f.read())
- elif code == 0:
- pass # comment, ignore
- else:
- raise T1Error('bad chunk code: ' + repr(code))
- finally:
- reader.close()
- data = bytesjoin(data)
- assertType1(data)
- return data
+ """reads an LWFN font file, returns raw data"""
+ from fontTools.misc.macRes import ResourceReader
+
+ reader = ResourceReader(path)
+ try:
+ data = []
+ for res in reader.get("POST", []):
+ code = byteord(res.data[0])
+ if byteord(res.data[1]) != 0:
+ raise T1Error("corrupt LWFN file")
+ if code in [1, 2]:
+ if onlyHeader and code == 2:
+ break
+ data.append(res.data[2:])
+ elif code in [3, 5]:
+ break
+ elif code == 4:
+ with open(path, "rb") as f:
+ data.append(f.read())
+ elif code == 0:
+ pass # comment, ignore
+ else:
+ raise T1Error("bad chunk code: " + repr(code))
+ finally:
+ reader.close()
+ data = bytesjoin(data)
+ assertType1(data)
+ return data
+
def readPFB(path, onlyHeader=False):
- """reads a PFB font file, returns raw data"""
- data = []
- with open(path, "rb") as f:
- while True:
- if f.read(1) != bytechr(128):
- raise T1Error('corrupt PFB file')
- code = byteord(f.read(1))
- if code in [1, 2]:
- chunklen = stringToLong(f.read(4))
- chunk = f.read(chunklen)
- assert len(chunk) == chunklen
- data.append(chunk)
- elif code == 3:
- break
- else:
- raise T1Error('bad chunk code: ' + repr(code))
- if onlyHeader:
- break
- data = bytesjoin(data)
- assertType1(data)
- return data
+ """reads a PFB font file, returns raw data"""
+ data = []
+ with open(path, "rb") as f:
+ while True:
+ if f.read(1) != bytechr(128):
+ raise T1Error("corrupt PFB file")
+ code = byteord(f.read(1))
+ if code in [1, 2]:
+ chunklen = stringToLong(f.read(4))
+ chunk = f.read(chunklen)
+ assert len(chunk) == chunklen
+ data.append(chunk)
+ elif code == 3:
+ break
+ else:
+ raise T1Error("bad chunk code: " + repr(code))
+ if onlyHeader:
+ break
+ data = bytesjoin(data)
+ assertType1(data)
+ return data
+
def readOther(path):
- """reads any (font) file, returns raw data"""
- with open(path, "rb") as f:
- data = f.read()
- assertType1(data)
- chunks = findEncryptedChunks(data)
- data = []
- for isEncrypted, chunk in chunks:
- if isEncrypted and isHex(chunk[:4]):
- data.append(deHexString(chunk))
- else:
- data.append(chunk)
- return bytesjoin(data)
+ """reads any (font) file, returns raw data"""
+ with open(path, "rb") as f:
+ data = f.read()
+ assertType1(data)
+ chunks = findEncryptedChunks(data)
+ data = []
+ for isEncrypted, chunk in chunks:
+ if isEncrypted and isHex(chunk[:4]):
+ data.append(deHexString(chunk))
+ else:
+ data.append(chunk)
+ return bytesjoin(data)
+
# file writing tools
+
def writeLWFN(path, data):
- # Res.FSpCreateResFile was deprecated in OS X 10.5
- Res.FSpCreateResFile(path, "just", "LWFN", 0)
- resRef = Res.FSOpenResFile(path, 2) # write-only
- try:
- Res.UseResFile(resRef)
- resID = 501
- chunks = findEncryptedChunks(data)
- for isEncrypted, chunk in chunks:
- if isEncrypted:
- code = 2
- else:
- code = 1
- while chunk:
- res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2])
- res.AddResource('POST', resID, '')
- chunk = chunk[LWFNCHUNKSIZE - 2:]
- resID = resID + 1
- res = Res.Resource(bytechr(5) + '\0')
- res.AddResource('POST', resID, '')
- finally:
- Res.CloseResFile(resRef)
+ # Res.FSpCreateResFile was deprecated in OS X 10.5
+ Res.FSpCreateResFile(path, "just", "LWFN", 0)
+ resRef = Res.FSOpenResFile(path, 2) # write-only
+ try:
+ Res.UseResFile(resRef)
+ resID = 501
+ chunks = findEncryptedChunks(data)
+ for isEncrypted, chunk in chunks:
+ if isEncrypted:
+ code = 2
+ else:
+ code = 1
+ while chunk:
+ res = Res.Resource(bytechr(code) + "\0" + chunk[: LWFNCHUNKSIZE - 2])
+ res.AddResource("POST", resID, "")
+ chunk = chunk[LWFNCHUNKSIZE - 2 :]
+ resID = resID + 1
+ res = Res.Resource(bytechr(5) + "\0")
+ res.AddResource("POST", resID, "")
+ finally:
+ Res.CloseResFile(resRef)
+
def writePFB(path, data):
- chunks = findEncryptedChunks(data)
- with open(path, "wb") as f:
- for isEncrypted, chunk in chunks:
- if isEncrypted:
- code = 2
- else:
- code = 1
- f.write(bytechr(128) + bytechr(code))
- f.write(longToString(len(chunk)))
- f.write(chunk)
- f.write(bytechr(128) + bytechr(3))
+ chunks = findEncryptedChunks(data)
+ with open(path, "wb") as f:
+ for isEncrypted, chunk in chunks:
+ if isEncrypted:
+ code = 2
+ else:
+ code = 1
+ f.write(bytechr(128) + bytechr(code))
+ f.write(longToString(len(chunk)))
+ f.write(chunk)
+ f.write(bytechr(128) + bytechr(3))
+
def writeOther(path, data, dohex=False):
- chunks = findEncryptedChunks(data)
- with open(path, "wb") as f:
- hexlinelen = HEXLINELENGTH // 2
- for isEncrypted, chunk in chunks:
- if isEncrypted:
- code = 2
- else:
- code = 1
- if code == 2 and dohex:
- while chunk:
- f.write(eexec.hexString(chunk[:hexlinelen]))
- f.write(b'\r')
- chunk = chunk[hexlinelen:]
- else:
- f.write(chunk)
+ chunks = findEncryptedChunks(data)
+ with open(path, "wb") as f:
+ hexlinelen = HEXLINELENGTH // 2
+ for isEncrypted, chunk in chunks:
+ if isEncrypted:
+ code = 2
+ else:
+ code = 1
+ if code == 2 and dohex:
+ while chunk:
+ f.write(eexec.hexString(chunk[:hexlinelen]))
+ f.write(b"\r")
+ chunk = chunk[hexlinelen:]
+ else:
+ f.write(chunk)
# decryption tools
@@ -419,99 +477,107 @@ def writeOther(path, data, dohex=False):
EEXECBEGIN = b"currentfile eexec"
# The spec allows for 512 ASCII zeros interrupted by arbitrary whitespace to
# follow eexec
-EEXECEND = re.compile(b'(0[ \t\r\n]*){512}', flags=re.M)
+EEXECEND = re.compile(b"(0[ \t\r\n]*){512}", flags=re.M)
EEXECINTERNALEND = b"currentfile closefile"
EEXECBEGINMARKER = b"%-- eexec start\r"
EEXECENDMARKER = b"%-- eexec end\r"
-_ishexRE = re.compile(b'[0-9A-Fa-f]*$')
+_ishexRE = re.compile(b"[0-9A-Fa-f]*$")
+
def isHex(text):
- return _ishexRE.match(text) is not None
+ return _ishexRE.match(text) is not None
def decryptType1(data):
- chunks = findEncryptedChunks(data)
- data = []
- for isEncrypted, chunk in chunks:
- if isEncrypted:
- if isHex(chunk[:4]):
- chunk = deHexString(chunk)
- decrypted, R = eexec.decrypt(chunk, 55665)
- decrypted = decrypted[4:]
- if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \
- and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND:
- raise T1Error("invalid end of eexec part")
- decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + b'\r'
- data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER)
- else:
- if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN:
- data.append(chunk[:-len(EEXECBEGIN)-1])
- else:
- data.append(chunk)
- return bytesjoin(data)
+ chunks = findEncryptedChunks(data)
+ data = []
+ for isEncrypted, chunk in chunks:
+ if isEncrypted:
+ if isHex(chunk[:4]):
+ chunk = deHexString(chunk)
+ decrypted, R = eexec.decrypt(chunk, 55665)
+ decrypted = decrypted[4:]
+ if (
+ decrypted[-len(EEXECINTERNALEND) - 1 : -1] != EEXECINTERNALEND
+ and decrypted[-len(EEXECINTERNALEND) - 2 : -2] != EEXECINTERNALEND
+ ):
+ raise T1Error("invalid end of eexec part")
+ decrypted = decrypted[: -len(EEXECINTERNALEND) - 2] + b"\r"
+ data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER)
+ else:
+ if chunk[-len(EEXECBEGIN) - 1 : -1] == EEXECBEGIN:
+ data.append(chunk[: -len(EEXECBEGIN) - 1])
+ else:
+ data.append(chunk)
+ return bytesjoin(data)
+
def findEncryptedChunks(data):
- chunks = []
- while True:
- eBegin = data.find(EEXECBEGIN)
- if eBegin < 0:
- break
- eBegin = eBegin + len(EEXECBEGIN) + 1
- endMatch = EEXECEND.search(data, eBegin)
- if endMatch is None:
- raise T1Error("can't find end of eexec part")
- eEnd = endMatch.start()
- cypherText = data[eBegin:eEnd + 2]
- if isHex(cypherText[:4]):
- cypherText = deHexString(cypherText)
- plainText, R = eexec.decrypt(cypherText, 55665)
- eEndLocal = plainText.find(EEXECINTERNALEND)
- if eEndLocal < 0:
- raise T1Error("can't find end of eexec part")
- chunks.append((0, data[:eBegin]))
- chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1]))
- data = data[eEnd:]
- chunks.append((0, data))
- return chunks
+ chunks = []
+ while True:
+ eBegin = data.find(EEXECBEGIN)
+ if eBegin < 0:
+ break
+ eBegin = eBegin + len(EEXECBEGIN) + 1
+ endMatch = EEXECEND.search(data, eBegin)
+ if endMatch is None:
+ raise T1Error("can't find end of eexec part")
+ eEnd = endMatch.start()
+ cypherText = data[eBegin : eEnd + 2]
+ if isHex(cypherText[:4]):
+ cypherText = deHexString(cypherText)
+ plainText, R = eexec.decrypt(cypherText, 55665)
+ eEndLocal = plainText.find(EEXECINTERNALEND)
+ if eEndLocal < 0:
+ raise T1Error("can't find end of eexec part")
+ chunks.append((0, data[:eBegin]))
+ chunks.append((1, cypherText[: eEndLocal + len(EEXECINTERNALEND) + 1]))
+ data = data[eEnd:]
+ chunks.append((0, data))
+ return chunks
+
def deHexString(hexstring):
- return eexec.deHexString(bytesjoin(hexstring.split()))
+ return eexec.deHexString(bytesjoin(hexstring.split()))
# Type 1 assertion
-_fontType1RE = re.compile(br"/FontType\s+1\s+def")
+_fontType1RE = re.compile(rb"/FontType\s+1\s+def")
+
def assertType1(data):
- for head in [b'%!PS-AdobeFont', b'%!FontType1']:
- if data[:len(head)] == head:
- break
- else:
- raise T1Error("not a PostScript font")
- if not _fontType1RE.search(data):
- raise T1Error("not a Type 1 font")
- if data.find(b"currentfile eexec") < 0:
- raise T1Error("not an encrypted Type 1 font")
- # XXX what else?
- return data
+ for head in [b"%!PS-AdobeFont", b"%!FontType1"]:
+ if data[: len(head)] == head:
+ break
+ else:
+ raise T1Error("not a PostScript font")
+ if not _fontType1RE.search(data):
+ raise T1Error("not a Type 1 font")
+ if data.find(b"currentfile eexec") < 0:
+ raise T1Error("not an encrypted Type 1 font")
+ # XXX what else?
+ return data
# pfb helpers
+
def longToString(long):
- s = b""
- for i in range(4):
- s += bytechr((long & (0xff << (i * 8))) >> i * 8)
- return s
+ s = b""
+ for i in range(4):
+ s += bytechr((long & (0xFF << (i * 8))) >> i * 8)
+ return s
+
def stringToLong(s):
- if len(s) != 4:
- raise ValueError('string must be 4 bytes long')
- l = 0
- for i in range(4):
- l += byteord(s[i]) << (i * 8)
- return l
+ if len(s) != 4:
+ raise ValueError("string must be 4 bytes long")
+ l = 0
+ for i in range(4):
+ l += byteord(s[i]) << (i * 8)
+ return l
# PS stream helpers
@@ -523,36 +589,38 @@ font_dictionary_keys.remove("FontMatrix")
FontInfo_dictionary_keys = list(_type1_fontinfo_order)
# extend because AFDKO tx may use following keys
-FontInfo_dictionary_keys.extend([
- "FSType",
- "Copyright",
-])
+FontInfo_dictionary_keys.extend(
+ [
+ "FSType",
+ "Copyright",
+ ]
+)
Private_dictionary_keys = [
- # We don't know what names will be actually used.
- # "RD",
- # "ND",
- # "NP",
- "Subrs",
- "OtherSubrs",
- "UniqueID",
- "BlueValues",
- "OtherBlues",
- "FamilyBlues",
- "FamilyOtherBlues",
- "BlueScale",
- "BlueShift",
- "BlueFuzz",
- "StdHW",
- "StdVW",
- "StemSnapH",
- "StemSnapV",
- "ForceBold",
- "LanguageGroup",
- "password",
- "lenIV",
- "MinFeature",
- "RndStemUp",
+ # We don't know what names will be actually used.
+ # "RD",
+ # "ND",
+ # "NP",
+ "Subrs",
+ "OtherSubrs",
+ "UniqueID",
+ "BlueValues",
+ "OtherBlues",
+ "FamilyBlues",
+ "FamilyOtherBlues",
+ "BlueScale",
+ "BlueShift",
+ "BlueFuzz",
+ "StdHW",
+ "StdVW",
+ "StemSnapH",
+ "StemSnapV",
+ "ForceBold",
+ "LanguageGroup",
+ "password",
+ "lenIV",
+ "MinFeature",
+ "RndStemUp",
]
# t1write_hintothers.h
@@ -561,20 +629,20 @@ systemdict/internaldict get exec dup/startlock known{/startlock get exec}{dup
/strtlck known{/strtlck get exec}{pop 3}ifelse}ifelse}ifelse}executeonly]def"""
# t1write.c:saveStdSubrs
std_subrs = [
- # 3 0 callother pop pop setcurrentpoint return
- b"\x8e\x8b\x0c\x10\x0c\x11\x0c\x11\x0c\x21\x0b",
- # 0 1 callother return
- b"\x8b\x8c\x0c\x10\x0b",
- # 0 2 callother return
- b"\x8b\x8d\x0c\x10\x0b",
- # return
- b"\x0b",
- # 3 1 3 callother pop callsubr return
- b"\x8e\x8c\x8e\x0c\x10\x0c\x11\x0a\x0b"
+ # 3 0 callother pop pop setcurrentpoint return
+ b"\x8e\x8b\x0c\x10\x0c\x11\x0c\x11\x0c\x21\x0b",
+ # 0 1 callother return
+ b"\x8b\x8c\x0c\x10\x0b",
+ # 0 2 callother return
+ b"\x8b\x8d\x0c\x10\x0b",
+ # return
+ b"\x0b",
+ # 3 1 3 callother pop callsubr return
+ b"\x8e\x8c\x8e\x0c\x10\x0c\x11\x0a\x0b",
]
# follow t1write.c:writeRegNameKeyedFont
eexec_IV = b"cccc"
char_IV = b"\x0c\x0c\x0c\x0c"
RD_value = ("string", "currentfile", "exch", "readstring", "pop")
-ND_value = ("def",)
-PD_value = ("put",)
+ND_values = [("def",), ("noaccess", "def")]
+PD_values = [("put",), ("noaccess", "put")]
diff --git a/Lib/fontTools/ttLib/__init__.py b/Lib/fontTools/ttLib/__init__.py
index dadd7f20..ed00764f 100644
--- a/Lib/fontTools/ttLib/__init__.py
+++ b/Lib/fontTools/ttLib/__init__.py
@@ -6,12 +6,21 @@ import logging
log = logging.getLogger(__name__)
-class TTLibError(Exception): pass
+
+class TTLibError(Exception):
+ pass
+
+
+class TTLibFileIsCollectionError(TTLibError):
+ pass
+
@deprecateFunction("use logging instead", category=DeprecationWarning)
def debugmsg(msg):
- import time
- print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
+ import time
+
+ print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
+
from fontTools.ttLib.ttFont import *
from fontTools.ttLib.ttCollection import TTCollection
diff --git a/Lib/fontTools/ttLib/__main__.py b/Lib/fontTools/ttLib/__main__.py
new file mode 100644
index 00000000..2733444d
--- /dev/null
+++ b/Lib/fontTools/ttLib/__main__.py
@@ -0,0 +1,108 @@
+import sys
+from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError
+from fontTools.ttLib.ttFont import *
+from fontTools.ttLib.ttCollection import TTCollection
+
+
+def main(args=None):
+ """Open/save fonts with TTFont() or TTCollection()
+
+ ./fonttools ttLib [-oFILE] [-yNUMBER] files...
+
+ If multiple files are given on the command-line,
+ they are each opened (as a font or collection),
+ and added to the font list.
+
+ If -o (output-file) argument is given, the font
+ list is then saved to the output file, either as
+ a single font, if there is only one font, or as
+ a collection otherwise.
+
+ If -y (font-number) argument is given, only the
+ specified font from collections is opened.
+
+ The above allow extracting a single font from a
+ collection, or combining multiple fonts into a
+ collection.
+
+ If --lazy or --no-lazy are give, those are passed
+ to the TTFont() or TTCollection() constructors.
+ """
+ from fontTools import configLogger
+
+ if args is None:
+ args = sys.argv[1:]
+
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ "fonttools ttLib",
+ description="Open/save fonts with TTFont() or TTCollection()",
+ epilog="""
+ If multiple files are given on the command-line,
+ they are each opened (as a font or collection),
+ and added to the font list.
+
+ The above, when combined with -o / --output,
+ allows for extracting a single font from a
+ collection, or combining multiple fonts into a
+ collection.
+ """,
+ )
+ parser.add_argument("font", metavar="font", nargs="*", help="Font file.")
+ parser.add_argument(
+ "-t", "--table", metavar="table", nargs="*", help="Tables to decompile."
+ )
+ parser.add_argument(
+ "-o", "--output", metavar="FILE", default=None, help="Output file."
+ )
+ parser.add_argument(
+ "-y", metavar="NUMBER", default=-1, help="Font number to load from collections."
+ )
+ parser.add_argument(
+ "--lazy", action="store_true", default=None, help="Load fonts lazily."
+ )
+ parser.add_argument(
+ "--no-lazy", dest="lazy", action="store_false", help="Load fonts immediately."
+ )
+ parser.add_argument(
+ "--flavor",
+ dest="flavor",
+ default=None,
+ help="Flavor of output font. 'woff' or 'woff2'.",
+ )
+ options = parser.parse_args(args)
+
+ fontNumber = int(options.y) if options.y is not None else None
+ outFile = options.output
+ lazy = options.lazy
+ flavor = options.flavor
+ tables = options.table if options.table is not None else []
+
+ fonts = []
+ for f in options.font:
+ try:
+ font = TTFont(f, fontNumber=fontNumber, lazy=lazy)
+ fonts.append(font)
+ except TTLibFileIsCollectionError:
+ collection = TTCollection(f, lazy=lazy)
+ fonts.extend(collection.fonts)
+
+ for font in fonts:
+ for table in tables if "*" not in tables else font.keys():
+ font[table] # Decompiles
+
+ if outFile is not None:
+ if len(fonts) == 1:
+ fonts[0].flavor = flavor
+ fonts[0].save(outFile)
+ else:
+ if flavor is not None:
+ raise TTLibError("Cannot set flavor for collections.")
+ collection = TTCollection()
+ collection.fonts = fonts
+ collection.save(outFile)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/Lib/fontTools/ttLib/macUtils.py b/Lib/fontTools/ttLib/macUtils.py
index 496fb672..468a75ad 100644
--- a/Lib/fontTools/ttLib/macUtils.py
+++ b/Lib/fontTools/ttLib/macUtils.py
@@ -4,49 +4,51 @@ from fontTools.misc.macRes import ResourceReader, ResourceError
def getSFNTResIndices(path):
- """Determine whether a file has a 'sfnt' resource fork or not."""
- try:
- reader = ResourceReader(path)
- indices = reader.getIndices('sfnt')
- reader.close()
- return indices
- except ResourceError:
- return []
+ """Determine whether a file has a 'sfnt' resource fork or not."""
+ try:
+ reader = ResourceReader(path)
+ indices = reader.getIndices("sfnt")
+ reader.close()
+ return indices
+ except ResourceError:
+ return []
def openTTFonts(path):
- """Given a pathname, return a list of TTFont objects. In the case
- of a flat TTF/OTF file, the list will contain just one font object;
- but in the case of a Mac font suitcase it will contain as many
- font objects as there are sfnt resources in the file.
- """
- from fontTools import ttLib
- fonts = []
- sfnts = getSFNTResIndices(path)
- if not sfnts:
- fonts.append(ttLib.TTFont(path))
- else:
- for index in sfnts:
- fonts.append(ttLib.TTFont(path, index))
- if not fonts:
- raise ttLib.TTLibError("no fonts found in file '%s'" % path)
- return fonts
+ """Given a pathname, return a list of TTFont objects. In the case
+ of a flat TTF/OTF file, the list will contain just one font object;
+ but in the case of a Mac font suitcase it will contain as many
+ font objects as there are sfnt resources in the file.
+ """
+ from fontTools import ttLib
+
+ fonts = []
+ sfnts = getSFNTResIndices(path)
+ if not sfnts:
+ fonts.append(ttLib.TTFont(path))
+ else:
+ for index in sfnts:
+ fonts.append(ttLib.TTFont(path, index))
+ if not fonts:
+ raise ttLib.TTLibError("no fonts found in file '%s'" % path)
+ return fonts
class SFNTResourceReader(BytesIO):
- """Simple read-only file wrapper for 'sfnt' resources."""
-
- def __init__(self, path, res_name_or_index):
- from fontTools import ttLib
- reader = ResourceReader(path)
- if isinstance(res_name_or_index, str):
- rsrc = reader.getNamedResource('sfnt', res_name_or_index)
- else:
- rsrc = reader.getIndResource('sfnt', res_name_or_index)
- if rsrc is None:
- raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index)
- reader.close()
- self.rsrc = rsrc
- super(SFNTResourceReader, self).__init__(rsrc.data)
- self.name = path
+ """Simple read-only file wrapper for 'sfnt' resources."""
+
+ def __init__(self, path, res_name_or_index):
+ from fontTools import ttLib
+
+ reader = ResourceReader(path)
+ if isinstance(res_name_or_index, str):
+ rsrc = reader.getNamedResource("sfnt", res_name_or_index)
+ else:
+ rsrc = reader.getIndResource("sfnt", res_name_or_index)
+ if rsrc is None:
+ raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index)
+ reader.close()
+ self.rsrc = rsrc
+ super(SFNTResourceReader, self).__init__(rsrc.data)
+ self.name = path
diff --git a/Lib/fontTools/ttLib/scaleUpem.py b/Lib/fontTools/ttLib/scaleUpem.py
index 9e0e0ade..3f9b22af 100644
--- a/Lib/fontTools/ttLib/scaleUpem.py
+++ b/Lib/fontTools/ttLib/scaleUpem.py
@@ -10,7 +10,9 @@ import fontTools.ttLib.tables.otBase as otBase
import fontTools.ttLib.tables.otTables as otTables
from fontTools.cffLib import VarStoreData
import fontTools.cffLib.specializer as cffSpecializer
+from fontTools.varLib import builder # for VarData.calculateNumShorts
from fontTools.misc.fixedTools import otRound
+from fontTools.ttLib.tables._g_l_y_f import VarComponentFlags
__all__ = ["scale_upem", "ScalerVisitor"]
@@ -111,30 +113,82 @@ def visit(visitor, obj, attr, VOriginRecords):
@ScalerVisitor.register_attr(ttLib.getTableClass("glyf"), "glyphs")
def visit(visitor, obj, attr, glyphs):
for g in glyphs.values():
+ for attr in ("xMin", "xMax", "yMin", "yMax"):
+ v = getattr(g, attr, None)
+ if v is not None:
+ setattr(g, attr, visitor.scale(v))
+
if g.isComposite():
for component in g.components:
component.x = visitor.scale(component.x)
component.y = visitor.scale(component.y)
- else:
- for attr in ("xMin", "xMax", "yMin", "yMax"):
- v = getattr(g, attr, None)
- if v is not None:
- setattr(g, attr, visitor.scale(v))
+ continue
- glyf = visitor.font["glyf"]
- coordinates = g.getCoordinates(glyf)[0]
- for i, (x, y) in enumerate(coordinates):
- coordinates[i] = visitor.scale(x), visitor.scale(y)
+ if g.isVarComposite():
+ for component in g.components:
+ for attr in ("translateX", "translateY", "tCenterX", "tCenterY"):
+ v = getattr(component.transform, attr)
+ setattr(component.transform, attr, visitor.scale(v))
+ continue
+
+ if hasattr(g, "coordinates"):
+ coordinates = g.coordinates
+ for i, (x, y) in enumerate(coordinates):
+ coordinates[i] = visitor.scale(x), visitor.scale(y)
@ScalerVisitor.register_attr(ttLib.getTableClass("gvar"), "variations")
def visit(visitor, obj, attr, variations):
- for varlist in variations.values():
+ # VarComposites are a pain to handle :-(
+ glyfTable = visitor.font["glyf"]
+
+ for glyphName, varlist in variations.items():
+ glyph = glyfTable[glyphName]
+ isVarComposite = glyph.isVarComposite()
for var in varlist:
coordinates = var.coordinates
- for i, xy in enumerate(coordinates):
- if xy is None:
- continue
+
+ if not isVarComposite:
+ for i, xy in enumerate(coordinates):
+ if xy is None:
+ continue
+ coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
+ continue
+
+ # VarComposite glyph
+
+ i = 0
+ for component in glyph.components:
+ if component.flags & VarComponentFlags.AXES_HAVE_VARIATION:
+ i += len(component.location)
+ if component.flags & (
+ VarComponentFlags.HAVE_TRANSLATE_X
+ | VarComponentFlags.HAVE_TRANSLATE_Y
+ ):
+ xy = coordinates[i]
+ coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
+ i += 1
+ if component.flags & VarComponentFlags.HAVE_ROTATION:
+ i += 1
+ if component.flags & (
+ VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y
+ ):
+ i += 1
+ if component.flags & (
+ VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y
+ ):
+ i += 1
+ if component.flags & (
+ VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y
+ ):
+ xy = coordinates[i]
+ coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
+ i += 1
+
+ # Phantom points
+ assert i + 4 == len(coordinates)
+ for i in range(i, len(coordinates)):
+ xy = coordinates[i]
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
@@ -149,7 +203,8 @@ def visit(visitor, obj, attr, kernTables):
def _cff_scale(visitor, args):
for i, arg in enumerate(args):
if not isinstance(arg, list):
- args[i] = visitor.scale(arg)
+ if not isinstance(arg, bytes):
+ args[i] = visitor.scale(arg)
else:
num_blends = arg[-1]
_cff_scale(visitor, arg)
@@ -176,6 +231,8 @@ def visit(visitor, obj, attr, cff):
c.program, getNumRegions=getNumRegions
)
for op, args in commands:
+ if op == "vsindex":
+ continue
_cff_scale(visitor, args)
c.program[:] = cffSpecializer.commandsToProgram(commands)
@@ -231,6 +288,7 @@ def visit(visitor, varData):
for item in varData.Item:
for i, v in enumerate(item):
item[i] = visitor.scale(v)
+ varData.calculateNumShorts()
# COLRv1
diff --git a/Lib/fontTools/ttLib/sfnt.py b/Lib/fontTools/ttLib/sfnt.py
index e7c06337..b1569423 100644
--- a/Lib/fontTools/ttLib/sfnt.py
+++ b/Lib/fontTools/ttLib/sfnt.py
@@ -16,7 +16,7 @@ from io import BytesIO
from types import SimpleNamespace
from fontTools.misc.textTools import Tag
from fontTools.misc import sstruct
-from fontTools.ttLib import TTLibError
+from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError
import struct
from collections import OrderedDict
import logging
@@ -26,127 +26,130 @@ log = logging.getLogger(__name__)
class SFNTReader(object):
-
- def __new__(cls, *args, **kwargs):
- """ Return an instance of the SFNTReader sub-class which is compatible
- with the input file type.
- """
- if args and cls is SFNTReader:
- infile = args[0]
- infile.seek(0)
- sfntVersion = Tag(infile.read(4))
- infile.seek(0)
- if sfntVersion == "wOF2":
- # return new WOFF2Reader object
- from fontTools.ttLib.woff2 import WOFF2Reader
- return object.__new__(WOFF2Reader)
- # return default object
- return object.__new__(cls)
-
- def __init__(self, file, checkChecksums=0, fontNumber=-1):
- self.file = file
- self.checkChecksums = checkChecksums
-
- self.flavor = None
- self.flavorData = None
- self.DirectoryEntry = SFNTDirectoryEntry
- self.file.seek(0)
- self.sfntVersion = self.file.read(4)
- self.file.seek(0)
- if self.sfntVersion == b"ttcf":
- header = readTTCHeader(self.file)
- numFonts = header.numFonts
- if not 0 <= fontNumber < numFonts:
- raise TTLibError("specify a font number between 0 and %d (inclusive)" % (numFonts - 1))
- self.numFonts = numFonts
- self.file.seek(header.offsetTable[fontNumber])
- data = self.file.read(sfntDirectorySize)
- if len(data) != sfntDirectorySize:
- raise TTLibError("Not a Font Collection (not enough data)")
- sstruct.unpack(sfntDirectoryFormat, data, self)
- elif self.sfntVersion == b"wOFF":
- self.flavor = "woff"
- self.DirectoryEntry = WOFFDirectoryEntry
- data = self.file.read(woffDirectorySize)
- if len(data) != woffDirectorySize:
- raise TTLibError("Not a WOFF font (not enough data)")
- sstruct.unpack(woffDirectoryFormat, data, self)
- else:
- data = self.file.read(sfntDirectorySize)
- if len(data) != sfntDirectorySize:
- raise TTLibError("Not a TrueType or OpenType font (not enough data)")
- sstruct.unpack(sfntDirectoryFormat, data, self)
- self.sfntVersion = Tag(self.sfntVersion)
-
- if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"):
- raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
- tables = {}
- for i in range(self.numTables):
- entry = self.DirectoryEntry()
- entry.fromFile(self.file)
- tag = Tag(entry.tag)
- tables[tag] = entry
- self.tables = OrderedDict(sorted(tables.items(), key=lambda i: i[1].offset))
-
- # Load flavor data if any
- if self.flavor == "woff":
- self.flavorData = WOFFFlavorData(self)
-
- def has_key(self, tag):
- return tag in self.tables
-
- __contains__ = has_key
-
- def keys(self):
- return self.tables.keys()
-
- def __getitem__(self, tag):
- """Fetch the raw table data."""
- entry = self.tables[Tag(tag)]
- data = entry.loadData (self.file)
- if self.checkChecksums:
- if tag == 'head':
- # Beh: we have to special-case the 'head' table.
- checksum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
- else:
- checksum = calcChecksum(data)
- if self.checkChecksums > 1:
- # Be obnoxious, and barf when it's wrong
- assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag
- elif checksum != entry.checkSum:
- # Be friendly, and just log a warning.
- log.warning("bad checksum for '%s' table", tag)
- return data
-
- def __delitem__(self, tag):
- del self.tables[Tag(tag)]
-
- def close(self):
- self.file.close()
-
- # We define custom __getstate__ and __setstate__ to make SFNTReader pickle-able
- # and deepcopy-able. When a TTFont is loaded as lazy=True, SFNTReader holds a
- # reference to an external file object which is not pickleable. So in __getstate__
- # we store the file name and current position, and in __setstate__ we reopen the
- # same named file after unpickling.
-
- def __getstate__(self):
- if isinstance(self.file, BytesIO):
- # BytesIO is already pickleable, return the state unmodified
- return self.__dict__
-
- # remove unpickleable file attribute, and only store its name and pos
- state = self.__dict__.copy()
- del state["file"]
- state["_filename"] = self.file.name
- state["_filepos"] = self.file.tell()
- return state
-
- def __setstate__(self, state):
- if "file" not in state:
- self.file = open(state.pop("_filename"), "rb")
- self.file.seek(state.pop("_filepos"))
- self.__dict__.update(state)
+ def __new__(cls, *args, **kwargs):
+ """Return an instance of the SFNTReader sub-class which is compatible
+ with the input file type.
+ """
+ if args and cls is SFNTReader:
+ infile = args[0]
+ infile.seek(0)
+ sfntVersion = Tag(infile.read(4))
+ infile.seek(0)
+ if sfntVersion == "wOF2":
+ # return new WOFF2Reader object
+ from fontTools.ttLib.woff2 import WOFF2Reader
+
+ return object.__new__(WOFF2Reader)
+ # return default object
+ return object.__new__(cls)
+
+ def __init__(self, file, checkChecksums=0, fontNumber=-1):
+ self.file = file
+ self.checkChecksums = checkChecksums
+
+ self.flavor = None
+ self.flavorData = None
+ self.DirectoryEntry = SFNTDirectoryEntry
+ self.file.seek(0)
+ self.sfntVersion = self.file.read(4)
+ self.file.seek(0)
+ if self.sfntVersion == b"ttcf":
+ header = readTTCHeader(self.file)
+ numFonts = header.numFonts
+ if not 0 <= fontNumber < numFonts:
+ raise TTLibFileIsCollectionError(
+ "specify a font number between 0 and %d (inclusive)"
+ % (numFonts - 1)
+ )
+ self.numFonts = numFonts
+ self.file.seek(header.offsetTable[fontNumber])
+ data = self.file.read(sfntDirectorySize)
+ if len(data) != sfntDirectorySize:
+ raise TTLibError("Not a Font Collection (not enough data)")
+ sstruct.unpack(sfntDirectoryFormat, data, self)
+ elif self.sfntVersion == b"wOFF":
+ self.flavor = "woff"
+ self.DirectoryEntry = WOFFDirectoryEntry
+ data = self.file.read(woffDirectorySize)
+ if len(data) != woffDirectorySize:
+ raise TTLibError("Not a WOFF font (not enough data)")
+ sstruct.unpack(woffDirectoryFormat, data, self)
+ else:
+ data = self.file.read(sfntDirectorySize)
+ if len(data) != sfntDirectorySize:
+ raise TTLibError("Not a TrueType or OpenType font (not enough data)")
+ sstruct.unpack(sfntDirectoryFormat, data, self)
+ self.sfntVersion = Tag(self.sfntVersion)
+
+ if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"):
+ raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
+ tables = {}
+ for i in range(self.numTables):
+ entry = self.DirectoryEntry()
+ entry.fromFile(self.file)
+ tag = Tag(entry.tag)
+ tables[tag] = entry
+ self.tables = OrderedDict(sorted(tables.items(), key=lambda i: i[1].offset))
+
+ # Load flavor data if any
+ if self.flavor == "woff":
+ self.flavorData = WOFFFlavorData(self)
+
+ def has_key(self, tag):
+ return tag in self.tables
+
+ __contains__ = has_key
+
+ def keys(self):
+ return self.tables.keys()
+
+ def __getitem__(self, tag):
+ """Fetch the raw table data."""
+ entry = self.tables[Tag(tag)]
+ data = entry.loadData(self.file)
+ if self.checkChecksums:
+ if tag == "head":
+ # Beh: we have to special-case the 'head' table.
+ checksum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
+ else:
+ checksum = calcChecksum(data)
+ if self.checkChecksums > 1:
+ # Be obnoxious, and barf when it's wrong
+ assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag
+ elif checksum != entry.checkSum:
+ # Be friendly, and just log a warning.
+ log.warning("bad checksum for '%s' table", tag)
+ return data
+
+ def __delitem__(self, tag):
+ del self.tables[Tag(tag)]
+
+ def close(self):
+ self.file.close()
+
+ # We define custom __getstate__ and __setstate__ to make SFNTReader pickle-able
+ # and deepcopy-able. When a TTFont is loaded as lazy=True, SFNTReader holds a
+ # reference to an external file object which is not pickleable. So in __getstate__
+ # we store the file name and current position, and in __setstate__ we reopen the
+ # same named file after unpickling.
+
+ def __getstate__(self):
+ if isinstance(self.file, BytesIO):
+ # BytesIO is already pickleable, return the state unmodified
+ return self.__dict__
+
+ # remove unpickleable file attribute, and only store its name and pos
+ state = self.__dict__.copy()
+ del state["file"]
+ state["_filename"] = self.file.name
+ state["_filepos"] = self.file.tell()
+ return state
+
+ def __setstate__(self, state):
+ if "file" not in state:
+ self.file = open(state.pop("_filename"), "rb")
+ self.file.seek(state.pop("_filepos"))
+ self.__dict__.update(state)
# default compression level for WOFF 1.0 tables and metadata
@@ -159,232 +162,257 @@ USE_ZOPFLI = False
# mapping between zlib's compression levels and zopfli's 'numiterations'.
# Use lower values for files over several MB in size or it will be too slow
ZOPFLI_LEVELS = {
- # 0: 0, # can't do 0 iterations...
- 1: 1,
- 2: 3,
- 3: 5,
- 4: 8,
- 5: 10,
- 6: 15,
- 7: 25,
- 8: 50,
- 9: 100,
+ # 0: 0, # can't do 0 iterations...
+ 1: 1,
+ 2: 3,
+ 3: 5,
+ 4: 8,
+ 5: 10,
+ 6: 15,
+ 7: 25,
+ 8: 50,
+ 9: 100,
}
def compress(data, level=ZLIB_COMPRESSION_LEVEL):
- """ Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True,
- zopfli is used instead of the zlib module.
- The compression 'level' must be between 0 and 9. 1 gives best speed,
- 9 gives best compression (0 gives no compression at all).
- The default value is a compromise between speed and compression (6).
- """
- if not (0 <= level <= 9):
- raise ValueError('Bad compression level: %s' % level)
- if not USE_ZOPFLI or level == 0:
- from zlib import compress
- return compress(data, level)
- else:
- from zopfli.zlib import compress
- return compress(data, numiterations=ZOPFLI_LEVELS[level])
+ """Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True,
+ zopfli is used instead of the zlib module.
+ The compression 'level' must be between 0 and 9. 1 gives best speed,
+ 9 gives best compression (0 gives no compression at all).
+ The default value is a compromise between speed and compression (6).
+ """
+ if not (0 <= level <= 9):
+ raise ValueError("Bad compression level: %s" % level)
+ if not USE_ZOPFLI or level == 0:
+ from zlib import compress
+ return compress(data, level)
+ else:
+ from zopfli.zlib import compress
-class SFNTWriter(object):
+ return compress(data, numiterations=ZOPFLI_LEVELS[level])
- def __new__(cls, *args, **kwargs):
- """ Return an instance of the SFNTWriter sub-class which is compatible
- with the specified 'flavor'.
- """
- flavor = None
- if kwargs and 'flavor' in kwargs:
- flavor = kwargs['flavor']
- elif args and len(args) > 3:
- flavor = args[3]
- if cls is SFNTWriter:
- if flavor == "woff2":
- # return new WOFF2Writer object
- from fontTools.ttLib.woff2 import WOFF2Writer
- return object.__new__(WOFF2Writer)
- # return default object
- return object.__new__(cls)
-
- def __init__(self, file, numTables, sfntVersion="\000\001\000\000",
- flavor=None, flavorData=None):
- self.file = file
- self.numTables = numTables
- self.sfntVersion = Tag(sfntVersion)
- self.flavor = flavor
- self.flavorData = flavorData
-
- if self.flavor == "woff":
- self.directoryFormat = woffDirectoryFormat
- self.directorySize = woffDirectorySize
- self.DirectoryEntry = WOFFDirectoryEntry
-
- self.signature = "wOFF"
-
- # to calculate WOFF checksum adjustment, we also need the original SFNT offsets
- self.origNextTableOffset = sfntDirectorySize + numTables * sfntDirectoryEntrySize
- else:
- assert not self.flavor, "Unknown flavor '%s'" % self.flavor
- self.directoryFormat = sfntDirectoryFormat
- self.directorySize = sfntDirectorySize
- self.DirectoryEntry = SFNTDirectoryEntry
-
- from fontTools.ttLib import getSearchRange
- self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(numTables, 16)
-
- self.directoryOffset = self.file.tell()
- self.nextTableOffset = self.directoryOffset + self.directorySize + numTables * self.DirectoryEntry.formatSize
- # clear out directory area
- self.file.seek(self.nextTableOffset)
- # make sure we're actually where we want to be. (old cStringIO bug)
- self.file.write(b'\0' * (self.nextTableOffset - self.file.tell()))
- self.tables = OrderedDict()
-
- def setEntry(self, tag, entry):
- if tag in self.tables:
- raise TTLibError("cannot rewrite '%s' table" % tag)
-
- self.tables[tag] = entry
-
- def __setitem__(self, tag, data):
- """Write raw table data to disk."""
- if tag in self.tables:
- raise TTLibError("cannot rewrite '%s' table" % tag)
-
- entry = self.DirectoryEntry()
- entry.tag = tag
- entry.offset = self.nextTableOffset
- if tag == 'head':
- entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
- self.headTable = data
- entry.uncompressed = True
- else:
- entry.checkSum = calcChecksum(data)
- entry.saveData(self.file, data)
-
- if self.flavor == "woff":
- entry.origOffset = self.origNextTableOffset
- self.origNextTableOffset += (entry.origLength + 3) & ~3
-
- self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3)
- # Add NUL bytes to pad the table data to a 4-byte boundary.
- # Don't depend on f.seek() as we need to add the padding even if no
- # subsequent write follows (seek is lazy), ie. after the final table
- # in the font.
- self.file.write(b'\0' * (self.nextTableOffset - self.file.tell()))
- assert self.nextTableOffset == self.file.tell()
-
- self.setEntry(tag, entry)
-
- def __getitem__(self, tag):
- return self.tables[tag]
-
- def close(self):
- """All tables must have been written to disk. Now write the
- directory.
- """
- tables = sorted(self.tables.items())
- if len(tables) != self.numTables:
- raise TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(tables)))
-
- if self.flavor == "woff":
- self.signature = b"wOFF"
- self.reserved = 0
-
- self.totalSfntSize = 12
- self.totalSfntSize += 16 * len(tables)
- for tag, entry in tables:
- self.totalSfntSize += (entry.origLength + 3) & ~3
-
- data = self.flavorData if self.flavorData else WOFFFlavorData()
- if data.majorVersion is not None and data.minorVersion is not None:
- self.majorVersion = data.majorVersion
- self.minorVersion = data.minorVersion
- else:
- if hasattr(self, 'headTable'):
- self.majorVersion, self.minorVersion = struct.unpack(">HH", self.headTable[4:8])
- else:
- self.majorVersion = self.minorVersion = 0
- if data.metaData:
- self.metaOrigLength = len(data.metaData)
- self.file.seek(0,2)
- self.metaOffset = self.file.tell()
- compressedMetaData = compress(data.metaData)
- self.metaLength = len(compressedMetaData)
- self.file.write(compressedMetaData)
- else:
- self.metaOffset = self.metaLength = self.metaOrigLength = 0
- if data.privData:
- self.file.seek(0,2)
- off = self.file.tell()
- paddedOff = (off + 3) & ~3
- self.file.write('\0' * (paddedOff - off))
- self.privOffset = self.file.tell()
- self.privLength = len(data.privData)
- self.file.write(data.privData)
- else:
- self.privOffset = self.privLength = 0
-
- self.file.seek(0,2)
- self.length = self.file.tell()
-
- else:
- assert not self.flavor, "Unknown flavor '%s'" % self.flavor
- pass
-
- directory = sstruct.pack(self.directoryFormat, self)
-
- self.file.seek(self.directoryOffset + self.directorySize)
- seenHead = 0
- for tag, entry in tables:
- if tag == "head":
- seenHead = 1
- directory = directory + entry.toString()
- if seenHead:
- self.writeMasterChecksum(directory)
- self.file.seek(self.directoryOffset)
- self.file.write(directory)
-
- def _calcMasterChecksum(self, directory):
- # calculate checkSumAdjustment
- tags = list(self.tables.keys())
- checksums = []
- for i in range(len(tags)):
- checksums.append(self.tables[tags[i]].checkSum)
-
- if self.DirectoryEntry != SFNTDirectoryEntry:
- # Create a SFNT directory for checksum calculation purposes
- from fontTools.ttLib import getSearchRange
- self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16)
- directory = sstruct.pack(sfntDirectoryFormat, self)
- tables = sorted(self.tables.items())
- for tag, entry in tables:
- sfntEntry = SFNTDirectoryEntry()
- sfntEntry.tag = entry.tag
- sfntEntry.checkSum = entry.checkSum
- sfntEntry.offset = entry.origOffset
- sfntEntry.length = entry.origLength
- directory = directory + sfntEntry.toString()
-
- directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
- assert directory_end == len(directory)
-
- checksums.append(calcChecksum(directory))
- checksum = sum(checksums) & 0xffffffff
- # BiboAfba!
- checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff
- return checksumadjustment
-
- def writeMasterChecksum(self, directory):
- checksumadjustment = self._calcMasterChecksum(directory)
- # write the checksum to the file
- self.file.seek(self.tables['head'].offset + 8)
- self.file.write(struct.pack(">L", checksumadjustment))
-
- def reordersTables(self):
- return False
+
+class SFNTWriter(object):
+ def __new__(cls, *args, **kwargs):
+ """Return an instance of the SFNTWriter sub-class which is compatible
+ with the specified 'flavor'.
+ """
+ flavor = None
+ if kwargs and "flavor" in kwargs:
+ flavor = kwargs["flavor"]
+ elif args and len(args) > 3:
+ flavor = args[3]
+ if cls is SFNTWriter:
+ if flavor == "woff2":
+ # return new WOFF2Writer object
+ from fontTools.ttLib.woff2 import WOFF2Writer
+
+ return object.__new__(WOFF2Writer)
+ # return default object
+ return object.__new__(cls)
+
+ def __init__(
+ self,
+ file,
+ numTables,
+ sfntVersion="\000\001\000\000",
+ flavor=None,
+ flavorData=None,
+ ):
+ self.file = file
+ self.numTables = numTables
+ self.sfntVersion = Tag(sfntVersion)
+ self.flavor = flavor
+ self.flavorData = flavorData
+
+ if self.flavor == "woff":
+ self.directoryFormat = woffDirectoryFormat
+ self.directorySize = woffDirectorySize
+ self.DirectoryEntry = WOFFDirectoryEntry
+
+ self.signature = "wOFF"
+
+ # to calculate WOFF checksum adjustment, we also need the original SFNT offsets
+ self.origNextTableOffset = (
+ sfntDirectorySize + numTables * sfntDirectoryEntrySize
+ )
+ else:
+ assert not self.flavor, "Unknown flavor '%s'" % self.flavor
+ self.directoryFormat = sfntDirectoryFormat
+ self.directorySize = sfntDirectorySize
+ self.DirectoryEntry = SFNTDirectoryEntry
+
+ from fontTools.ttLib import getSearchRange
+
+ self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
+ numTables, 16
+ )
+
+ self.directoryOffset = self.file.tell()
+ self.nextTableOffset = (
+ self.directoryOffset
+ + self.directorySize
+ + numTables * self.DirectoryEntry.formatSize
+ )
+ # clear out directory area
+ self.file.seek(self.nextTableOffset)
+ # make sure we're actually where we want to be. (old cStringIO bug)
+ self.file.write(b"\0" * (self.nextTableOffset - self.file.tell()))
+ self.tables = OrderedDict()
+
+ def setEntry(self, tag, entry):
+ if tag in self.tables:
+ raise TTLibError("cannot rewrite '%s' table" % tag)
+
+ self.tables[tag] = entry
+
+ def __setitem__(self, tag, data):
+ """Write raw table data to disk."""
+ if tag in self.tables:
+ raise TTLibError("cannot rewrite '%s' table" % tag)
+
+ entry = self.DirectoryEntry()
+ entry.tag = tag
+ entry.offset = self.nextTableOffset
+ if tag == "head":
+ entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
+ self.headTable = data
+ entry.uncompressed = True
+ else:
+ entry.checkSum = calcChecksum(data)
+ entry.saveData(self.file, data)
+
+ if self.flavor == "woff":
+ entry.origOffset = self.origNextTableOffset
+ self.origNextTableOffset += (entry.origLength + 3) & ~3
+
+ self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3)
+ # Add NUL bytes to pad the table data to a 4-byte boundary.
+ # Don't depend on f.seek() as we need to add the padding even if no
+ # subsequent write follows (seek is lazy), ie. after the final table
+ # in the font.
+ self.file.write(b"\0" * (self.nextTableOffset - self.file.tell()))
+ assert self.nextTableOffset == self.file.tell()
+
+ self.setEntry(tag, entry)
+
+ def __getitem__(self, tag):
+ return self.tables[tag]
+
+ def close(self):
+ """All tables must have been written to disk. Now write the
+ directory.
+ """
+ tables = sorted(self.tables.items())
+ if len(tables) != self.numTables:
+ raise TTLibError(
+ "wrong number of tables; expected %d, found %d"
+ % (self.numTables, len(tables))
+ )
+
+ if self.flavor == "woff":
+ self.signature = b"wOFF"
+ self.reserved = 0
+
+ self.totalSfntSize = 12
+ self.totalSfntSize += 16 * len(tables)
+ for tag, entry in tables:
+ self.totalSfntSize += (entry.origLength + 3) & ~3
+
+ data = self.flavorData if self.flavorData else WOFFFlavorData()
+ if data.majorVersion is not None and data.minorVersion is not None:
+ self.majorVersion = data.majorVersion
+ self.minorVersion = data.minorVersion
+ else:
+ if hasattr(self, "headTable"):
+ self.majorVersion, self.minorVersion = struct.unpack(
+ ">HH", self.headTable[4:8]
+ )
+ else:
+ self.majorVersion = self.minorVersion = 0
+ if data.metaData:
+ self.metaOrigLength = len(data.metaData)
+ self.file.seek(0, 2)
+ self.metaOffset = self.file.tell()
+ compressedMetaData = compress(data.metaData)
+ self.metaLength = len(compressedMetaData)
+ self.file.write(compressedMetaData)
+ else:
+ self.metaOffset = self.metaLength = self.metaOrigLength = 0
+ if data.privData:
+ self.file.seek(0, 2)
+ off = self.file.tell()
+ paddedOff = (off + 3) & ~3
+ self.file.write(b"\0" * (paddedOff - off))
+ self.privOffset = self.file.tell()
+ self.privLength = len(data.privData)
+ self.file.write(data.privData)
+ else:
+ self.privOffset = self.privLength = 0
+
+ self.file.seek(0, 2)
+ self.length = self.file.tell()
+
+ else:
+ assert not self.flavor, "Unknown flavor '%s'" % self.flavor
+ pass
+
+ directory = sstruct.pack(self.directoryFormat, self)
+
+ self.file.seek(self.directoryOffset + self.directorySize)
+ seenHead = 0
+ for tag, entry in tables:
+ if tag == "head":
+ seenHead = 1
+ directory = directory + entry.toString()
+ if seenHead:
+ self.writeMasterChecksum(directory)
+ self.file.seek(self.directoryOffset)
+ self.file.write(directory)
+
+ def _calcMasterChecksum(self, directory):
+ # calculate checkSumAdjustment
+ tags = list(self.tables.keys())
+ checksums = []
+ for i in range(len(tags)):
+ checksums.append(self.tables[tags[i]].checkSum)
+
+ if self.DirectoryEntry != SFNTDirectoryEntry:
+ # Create a SFNT directory for checksum calculation purposes
+ from fontTools.ttLib import getSearchRange
+
+ self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
+ self.numTables, 16
+ )
+ directory = sstruct.pack(sfntDirectoryFormat, self)
+ tables = sorted(self.tables.items())
+ for tag, entry in tables:
+ sfntEntry = SFNTDirectoryEntry()
+ sfntEntry.tag = entry.tag
+ sfntEntry.checkSum = entry.checkSum
+ sfntEntry.offset = entry.origOffset
+ sfntEntry.length = entry.origLength
+ directory = directory + sfntEntry.toString()
+
+ directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
+ assert directory_end == len(directory)
+
+ checksums.append(calcChecksum(directory))
+ checksum = sum(checksums) & 0xFFFFFFFF
+ # BiboAfba!
+ checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF
+ return checksumadjustment
+
+ def writeMasterChecksum(self, directory):
+ checksumadjustment = self._calcMasterChecksum(directory)
+ # write the checksum to the file
+ self.file.seek(self.tables["head"].offset + 8)
+ self.file.write(struct.pack(">L", checksumadjustment))
+
+ def reordersTables(self):
+ return False
# -- sfnt directory helpers and cruft
@@ -455,170 +483,179 @@ woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat)
class DirectoryEntry(object):
+ def __init__(self):
+ self.uncompressed = False # if True, always embed entry raw
- def __init__(self):
- self.uncompressed = False # if True, always embed entry raw
+ def fromFile(self, file):
+ sstruct.unpack(self.format, file.read(self.formatSize), self)
- def fromFile(self, file):
- sstruct.unpack(self.format, file.read(self.formatSize), self)
+ def fromString(self, str):
+ sstruct.unpack(self.format, str, self)
- def fromString(self, str):
- sstruct.unpack(self.format, str, self)
+ def toString(self):
+ return sstruct.pack(self.format, self)
- def toString(self):
- return sstruct.pack(self.format, self)
+ def __repr__(self):
+ if hasattr(self, "tag"):
+ return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self))
+ else:
+ return "<%s at %x>" % (self.__class__.__name__, id(self))
- def __repr__(self):
- if hasattr(self, "tag"):
- return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self))
- else:
- return "<%s at %x>" % (self.__class__.__name__, id(self))
+ def loadData(self, file):
+ file.seek(self.offset)
+ data = file.read(self.length)
+ assert len(data) == self.length
+ if hasattr(self.__class__, "decodeData"):
+ data = self.decodeData(data)
+ return data
- def loadData(self, file):
- file.seek(self.offset)
- data = file.read(self.length)
- assert len(data) == self.length
- if hasattr(self.__class__, 'decodeData'):
- data = self.decodeData(data)
- return data
+ def saveData(self, file, data):
+ if hasattr(self.__class__, "encodeData"):
+ data = self.encodeData(data)
+ self.length = len(data)
+ file.seek(self.offset)
+ file.write(data)
- def saveData(self, file, data):
- if hasattr(self.__class__, 'encodeData'):
- data = self.encodeData(data)
- self.length = len(data)
- file.seek(self.offset)
- file.write(data)
+ def decodeData(self, rawData):
+ return rawData
- def decodeData(self, rawData):
- return rawData
+ def encodeData(self, data):
+ return data
- def encodeData(self, data):
- return data
class SFNTDirectoryEntry(DirectoryEntry):
+ format = sfntDirectoryEntryFormat
+ formatSize = sfntDirectoryEntrySize
- format = sfntDirectoryEntryFormat
- formatSize = sfntDirectoryEntrySize
class WOFFDirectoryEntry(DirectoryEntry):
-
- format = woffDirectoryEntryFormat
- formatSize = woffDirectoryEntrySize
-
- def __init__(self):
- super(WOFFDirectoryEntry, self).__init__()
- # With fonttools<=3.1.2, the only way to set a different zlib
- # compression level for WOFF directory entries was to set the class
- # attribute 'zlibCompressionLevel'. This is now replaced by a globally
- # defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when
- # compressing the metadata. For backward compatibility, we still
- # use the class attribute if it was already set.
- if not hasattr(WOFFDirectoryEntry, 'zlibCompressionLevel'):
- self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL
-
- def decodeData(self, rawData):
- import zlib
- if self.length == self.origLength:
- data = rawData
- else:
- assert self.length < self.origLength
- data = zlib.decompress(rawData)
- assert len(data) == self.origLength
- return data
-
- def encodeData(self, data):
- self.origLength = len(data)
- if not self.uncompressed:
- compressedData = compress(data, self.zlibCompressionLevel)
- if self.uncompressed or len(compressedData) >= self.origLength:
- # Encode uncompressed
- rawData = data
- self.length = self.origLength
- else:
- rawData = compressedData
- self.length = len(rawData)
- return rawData
-
-class WOFFFlavorData():
-
- Flavor = 'woff'
-
- def __init__(self, reader=None):
- self.majorVersion = None
- self.minorVersion = None
- self.metaData = None
- self.privData = None
- if reader:
- self.majorVersion = reader.majorVersion
- self.minorVersion = reader.minorVersion
- if reader.metaLength:
- reader.file.seek(reader.metaOffset)
- rawData = reader.file.read(reader.metaLength)
- assert len(rawData) == reader.metaLength
- data = self._decompress(rawData)
- assert len(data) == reader.metaOrigLength
- self.metaData = data
- if reader.privLength:
- reader.file.seek(reader.privOffset)
- data = reader.file.read(reader.privLength)
- assert len(data) == reader.privLength
- self.privData = data
-
- def _decompress(self, rawData):
- import zlib
- return zlib.decompress(rawData)
+ format = woffDirectoryEntryFormat
+ formatSize = woffDirectoryEntrySize
+
+ def __init__(self):
+ super(WOFFDirectoryEntry, self).__init__()
+ # With fonttools<=3.1.2, the only way to set a different zlib
+ # compression level for WOFF directory entries was to set the class
+ # attribute 'zlibCompressionLevel'. This is now replaced by a globally
+ # defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when
+ # compressing the metadata. For backward compatibility, we still
+ # use the class attribute if it was already set.
+ if not hasattr(WOFFDirectoryEntry, "zlibCompressionLevel"):
+ self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL
+
+ def decodeData(self, rawData):
+ import zlib
+
+ if self.length == self.origLength:
+ data = rawData
+ else:
+ assert self.length < self.origLength
+ data = zlib.decompress(rawData)
+ assert len(data) == self.origLength
+ return data
+
+ def encodeData(self, data):
+ self.origLength = len(data)
+ if not self.uncompressed:
+ compressedData = compress(data, self.zlibCompressionLevel)
+ if self.uncompressed or len(compressedData) >= self.origLength:
+ # Encode uncompressed
+ rawData = data
+ self.length = self.origLength
+ else:
+ rawData = compressedData
+ self.length = len(rawData)
+ return rawData
+
+
+class WOFFFlavorData:
+ Flavor = "woff"
+
+ def __init__(self, reader=None):
+ self.majorVersion = None
+ self.minorVersion = None
+ self.metaData = None
+ self.privData = None
+ if reader:
+ self.majorVersion = reader.majorVersion
+ self.minorVersion = reader.minorVersion
+ if reader.metaLength:
+ reader.file.seek(reader.metaOffset)
+ rawData = reader.file.read(reader.metaLength)
+ assert len(rawData) == reader.metaLength
+ data = self._decompress(rawData)
+ assert len(data) == reader.metaOrigLength
+ self.metaData = data
+ if reader.privLength:
+ reader.file.seek(reader.privOffset)
+ data = reader.file.read(reader.privLength)
+ assert len(data) == reader.privLength
+ self.privData = data
+
+ def _decompress(self, rawData):
+ import zlib
+
+ return zlib.decompress(rawData)
def calcChecksum(data):
- """Calculate the checksum for an arbitrary block of data.
-
- If the data length is not a multiple of four, it assumes
- it is to be padded with null byte.
-
- >>> print(calcChecksum(b"abcd"))
- 1633837924
- >>> print(calcChecksum(b"abcdxyz"))
- 3655064932
- """
- remainder = len(data) % 4
- if remainder:
- data += b"\0" * (4 - remainder)
- value = 0
- blockSize = 4096
- assert blockSize % 4 == 0
- for i in range(0, len(data), blockSize):
- block = data[i:i+blockSize]
- longs = struct.unpack(">%dL" % (len(block) // 4), block)
- value = (value + sum(longs)) & 0xffffffff
- return value
+ """Calculate the checksum for an arbitrary block of data.
+
+ If the data length is not a multiple of four, it assumes
+ it is to be padded with null byte.
+
+ >>> print(calcChecksum(b"abcd"))
+ 1633837924
+ >>> print(calcChecksum(b"abcdxyz"))
+ 3655064932
+ """
+ remainder = len(data) % 4
+ if remainder:
+ data += b"\0" * (4 - remainder)
+ value = 0
+ blockSize = 4096
+ assert blockSize % 4 == 0
+ for i in range(0, len(data), blockSize):
+ block = data[i : i + blockSize]
+ longs = struct.unpack(">%dL" % (len(block) // 4), block)
+ value = (value + sum(longs)) & 0xFFFFFFFF
+ return value
+
def readTTCHeader(file):
- file.seek(0)
- data = file.read(ttcHeaderSize)
- if len(data) != ttcHeaderSize:
- raise TTLibError("Not a Font Collection (not enough data)")
- self = SimpleNamespace()
- sstruct.unpack(ttcHeaderFormat, data, self)
- if self.TTCTag != "ttcf":
- raise TTLibError("Not a Font Collection")
- assert self.Version == 0x00010000 or self.Version == 0x00020000, "unrecognized TTC version 0x%08x" % self.Version
- self.offsetTable = struct.unpack(">%dL" % self.numFonts, file.read(self.numFonts * 4))
- if self.Version == 0x00020000:
- pass # ignoring version 2.0 signatures
- return self
+ file.seek(0)
+ data = file.read(ttcHeaderSize)
+ if len(data) != ttcHeaderSize:
+ raise TTLibError("Not a Font Collection (not enough data)")
+ self = SimpleNamespace()
+ sstruct.unpack(ttcHeaderFormat, data, self)
+ if self.TTCTag != "ttcf":
+ raise TTLibError("Not a Font Collection")
+ assert self.Version == 0x00010000 or self.Version == 0x00020000, (
+ "unrecognized TTC version 0x%08x" % self.Version
+ )
+ self.offsetTable = struct.unpack(
+ ">%dL" % self.numFonts, file.read(self.numFonts * 4)
+ )
+ if self.Version == 0x00020000:
+ pass # ignoring version 2.0 signatures
+ return self
+
def writeTTCHeader(file, numFonts):
- self = SimpleNamespace()
- self.TTCTag = 'ttcf'
- self.Version = 0x00010000
- self.numFonts = numFonts
- file.seek(0)
- file.write(sstruct.pack(ttcHeaderFormat, self))
- offset = file.tell()
- file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts)))
- return offset
+ self = SimpleNamespace()
+ self.TTCTag = "ttcf"
+ self.Version = 0x00010000
+ self.numFonts = numFonts
+ file.seek(0)
+ file.write(sstruct.pack(ttcHeaderFormat, self))
+ offset = file.tell()
+ file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts)))
+ return offset
+
if __name__ == "__main__":
- import sys
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/standardGlyphOrder.py b/Lib/fontTools/ttLib/standardGlyphOrder.py
index 1f980e45..40623852 100644
--- a/Lib/fontTools/ttLib/standardGlyphOrder.py
+++ b/Lib/fontTools/ttLib/standardGlyphOrder.py
@@ -2,270 +2,270 @@
# 'post' table formats 1.0 and 2.0 rely on this list of "standard"
# glyphs.
#
-# My list is correct according to the Apple documentation for the 'post'
-# table: http://developer.apple.com/fonts/TTRefMan/RM06/Chap6post.html
+# My list is correct according to the Apple documentation for the 'post' table:
+# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6post.html
# (However, it seems that TTFdump (from MS) and FontLab disagree, at
# least with respect to the last glyph, which they list as 'dslash'
# instead of 'dcroat'.)
#
standardGlyphOrder = [
- ".notdef", # 0
- ".null", # 1
- "nonmarkingreturn", # 2
- "space", # 3
- "exclam", # 4
- "quotedbl", # 5
- "numbersign", # 6
- "dollar", # 7
- "percent", # 8
- "ampersand", # 9
- "quotesingle", # 10
- "parenleft", # 11
- "parenright", # 12
- "asterisk", # 13
- "plus", # 14
- "comma", # 15
- "hyphen", # 16
- "period", # 17
- "slash", # 18
- "zero", # 19
- "one", # 20
- "two", # 21
- "three", # 22
- "four", # 23
- "five", # 24
- "six", # 25
- "seven", # 26
- "eight", # 27
- "nine", # 28
- "colon", # 29
- "semicolon", # 30
- "less", # 31
- "equal", # 32
- "greater", # 33
- "question", # 34
- "at", # 35
- "A", # 36
- "B", # 37
- "C", # 38
- "D", # 39
- "E", # 40
- "F", # 41
- "G", # 42
- "H", # 43
- "I", # 44
- "J", # 45
- "K", # 46
- "L", # 47
- "M", # 48
- "N", # 49
- "O", # 50
- "P", # 51
- "Q", # 52
- "R", # 53
- "S", # 54
- "T", # 55
- "U", # 56
- "V", # 57
- "W", # 58
- "X", # 59
- "Y", # 60
- "Z", # 61
- "bracketleft", # 62
- "backslash", # 63
- "bracketright", # 64
- "asciicircum", # 65
- "underscore", # 66
- "grave", # 67
- "a", # 68
- "b", # 69
- "c", # 70
- "d", # 71
- "e", # 72
- "f", # 73
- "g", # 74
- "h", # 75
- "i", # 76
- "j", # 77
- "k", # 78
- "l", # 79
- "m", # 80
- "n", # 81
- "o", # 82
- "p", # 83
- "q", # 84
- "r", # 85
- "s", # 86
- "t", # 87
- "u", # 88
- "v", # 89
- "w", # 90
- "x", # 91
- "y", # 92
- "z", # 93
- "braceleft", # 94
- "bar", # 95
- "braceright", # 96
- "asciitilde", # 97
- "Adieresis", # 98
- "Aring", # 99
- "Ccedilla", # 100
- "Eacute", # 101
- "Ntilde", # 102
- "Odieresis", # 103
- "Udieresis", # 104
- "aacute", # 105
- "agrave", # 106
- "acircumflex", # 107
- "adieresis", # 108
- "atilde", # 109
- "aring", # 110
- "ccedilla", # 111
- "eacute", # 112
- "egrave", # 113
- "ecircumflex", # 114
- "edieresis", # 115
- "iacute", # 116
- "igrave", # 117
- "icircumflex", # 118
- "idieresis", # 119
- "ntilde", # 120
- "oacute", # 121
- "ograve", # 122
- "ocircumflex", # 123
- "odieresis", # 124
- "otilde", # 125
- "uacute", # 126
- "ugrave", # 127
- "ucircumflex", # 128
- "udieresis", # 129
- "dagger", # 130
- "degree", # 131
- "cent", # 132
- "sterling", # 133
- "section", # 134
- "bullet", # 135
- "paragraph", # 136
- "germandbls", # 137
- "registered", # 138
- "copyright", # 139
- "trademark", # 140
- "acute", # 141
- "dieresis", # 142
- "notequal", # 143
- "AE", # 144
- "Oslash", # 145
- "infinity", # 146
- "plusminus", # 147
- "lessequal", # 148
- "greaterequal", # 149
- "yen", # 150
- "mu", # 151
- "partialdiff", # 152
- "summation", # 153
- "product", # 154
- "pi", # 155
- "integral", # 156
- "ordfeminine", # 157
- "ordmasculine", # 158
- "Omega", # 159
- "ae", # 160
- "oslash", # 161
- "questiondown", # 162
- "exclamdown", # 163
- "logicalnot", # 164
- "radical", # 165
- "florin", # 166
- "approxequal", # 167
- "Delta", # 168
- "guillemotleft", # 169
- "guillemotright", # 170
- "ellipsis", # 171
- "nonbreakingspace", # 172
- "Agrave", # 173
- "Atilde", # 174
- "Otilde", # 175
- "OE", # 176
- "oe", # 177
- "endash", # 178
- "emdash", # 179
- "quotedblleft", # 180
- "quotedblright", # 181
- "quoteleft", # 182
- "quoteright", # 183
- "divide", # 184
- "lozenge", # 185
- "ydieresis", # 186
- "Ydieresis", # 187
- "fraction", # 188
- "currency", # 189
- "guilsinglleft", # 190
- "guilsinglright", # 191
- "fi", # 192
- "fl", # 193
- "daggerdbl", # 194
- "periodcentered", # 195
- "quotesinglbase", # 196
- "quotedblbase", # 197
- "perthousand", # 198
- "Acircumflex", # 199
- "Ecircumflex", # 200
- "Aacute", # 201
- "Edieresis", # 202
- "Egrave", # 203
- "Iacute", # 204
- "Icircumflex", # 205
- "Idieresis", # 206
- "Igrave", # 207
- "Oacute", # 208
- "Ocircumflex", # 209
- "apple", # 210
- "Ograve", # 211
- "Uacute", # 212
- "Ucircumflex", # 213
- "Ugrave", # 214
- "dotlessi", # 215
- "circumflex", # 216
- "tilde", # 217
- "macron", # 218
- "breve", # 219
- "dotaccent", # 220
- "ring", # 221
- "cedilla", # 222
- "hungarumlaut", # 223
- "ogonek", # 224
- "caron", # 225
- "Lslash", # 226
- "lslash", # 227
- "Scaron", # 228
- "scaron", # 229
- "Zcaron", # 230
- "zcaron", # 231
- "brokenbar", # 232
- "Eth", # 233
- "eth", # 234
- "Yacute", # 235
- "yacute", # 236
- "Thorn", # 237
- "thorn", # 238
- "minus", # 239
- "multiply", # 240
- "onesuperior", # 241
- "twosuperior", # 242
- "threesuperior", # 243
- "onehalf", # 244
- "onequarter", # 245
- "threequarters", # 246
- "franc", # 247
- "Gbreve", # 248
- "gbreve", # 249
- "Idotaccent", # 250
- "Scedilla", # 251
- "scedilla", # 252
- "Cacute", # 253
- "cacute", # 254
- "Ccaron", # 255
- "ccaron", # 256
- "dcroat" # 257
+ ".notdef", # 0
+ ".null", # 1
+ "nonmarkingreturn", # 2
+ "space", # 3
+ "exclam", # 4
+ "quotedbl", # 5
+ "numbersign", # 6
+ "dollar", # 7
+ "percent", # 8
+ "ampersand", # 9
+ "quotesingle", # 10
+ "parenleft", # 11
+ "parenright", # 12
+ "asterisk", # 13
+ "plus", # 14
+ "comma", # 15
+ "hyphen", # 16
+ "period", # 17
+ "slash", # 18
+ "zero", # 19
+ "one", # 20
+ "two", # 21
+ "three", # 22
+ "four", # 23
+ "five", # 24
+ "six", # 25
+ "seven", # 26
+ "eight", # 27
+ "nine", # 28
+ "colon", # 29
+ "semicolon", # 30
+ "less", # 31
+ "equal", # 32
+ "greater", # 33
+ "question", # 34
+ "at", # 35
+ "A", # 36
+ "B", # 37
+ "C", # 38
+ "D", # 39
+ "E", # 40
+ "F", # 41
+ "G", # 42
+ "H", # 43
+ "I", # 44
+ "J", # 45
+ "K", # 46
+ "L", # 47
+ "M", # 48
+ "N", # 49
+ "O", # 50
+ "P", # 51
+ "Q", # 52
+ "R", # 53
+ "S", # 54
+ "T", # 55
+ "U", # 56
+ "V", # 57
+ "W", # 58
+ "X", # 59
+ "Y", # 60
+ "Z", # 61
+ "bracketleft", # 62
+ "backslash", # 63
+ "bracketright", # 64
+ "asciicircum", # 65
+ "underscore", # 66
+ "grave", # 67
+ "a", # 68
+ "b", # 69
+ "c", # 70
+ "d", # 71
+ "e", # 72
+ "f", # 73
+ "g", # 74
+ "h", # 75
+ "i", # 76
+ "j", # 77
+ "k", # 78
+ "l", # 79
+ "m", # 80
+ "n", # 81
+ "o", # 82
+ "p", # 83
+ "q", # 84
+ "r", # 85
+ "s", # 86
+ "t", # 87
+ "u", # 88
+ "v", # 89
+ "w", # 90
+ "x", # 91
+ "y", # 92
+ "z", # 93
+ "braceleft", # 94
+ "bar", # 95
+ "braceright", # 96
+ "asciitilde", # 97
+ "Adieresis", # 98
+ "Aring", # 99
+ "Ccedilla", # 100
+ "Eacute", # 101
+ "Ntilde", # 102
+ "Odieresis", # 103
+ "Udieresis", # 104
+ "aacute", # 105
+ "agrave", # 106
+ "acircumflex", # 107
+ "adieresis", # 108
+ "atilde", # 109
+ "aring", # 110
+ "ccedilla", # 111
+ "eacute", # 112
+ "egrave", # 113
+ "ecircumflex", # 114
+ "edieresis", # 115
+ "iacute", # 116
+ "igrave", # 117
+ "icircumflex", # 118
+ "idieresis", # 119
+ "ntilde", # 120
+ "oacute", # 121
+ "ograve", # 122
+ "ocircumflex", # 123
+ "odieresis", # 124
+ "otilde", # 125
+ "uacute", # 126
+ "ugrave", # 127
+ "ucircumflex", # 128
+ "udieresis", # 129
+ "dagger", # 130
+ "degree", # 131
+ "cent", # 132
+ "sterling", # 133
+ "section", # 134
+ "bullet", # 135
+ "paragraph", # 136
+ "germandbls", # 137
+ "registered", # 138
+ "copyright", # 139
+ "trademark", # 140
+ "acute", # 141
+ "dieresis", # 142
+ "notequal", # 143
+ "AE", # 144
+ "Oslash", # 145
+ "infinity", # 146
+ "plusminus", # 147
+ "lessequal", # 148
+ "greaterequal", # 149
+ "yen", # 150
+ "mu", # 151
+ "partialdiff", # 152
+ "summation", # 153
+ "product", # 154
+ "pi", # 155
+ "integral", # 156
+ "ordfeminine", # 157
+ "ordmasculine", # 158
+ "Omega", # 159
+ "ae", # 160
+ "oslash", # 161
+ "questiondown", # 162
+ "exclamdown", # 163
+ "logicalnot", # 164
+ "radical", # 165
+ "florin", # 166
+ "approxequal", # 167
+ "Delta", # 168
+ "guillemotleft", # 169
+ "guillemotright", # 170
+ "ellipsis", # 171
+ "nonbreakingspace", # 172
+ "Agrave", # 173
+ "Atilde", # 174
+ "Otilde", # 175
+ "OE", # 176
+ "oe", # 177
+ "endash", # 178
+ "emdash", # 179
+ "quotedblleft", # 180
+ "quotedblright", # 181
+ "quoteleft", # 182
+ "quoteright", # 183
+ "divide", # 184
+ "lozenge", # 185
+ "ydieresis", # 186
+ "Ydieresis", # 187
+ "fraction", # 188
+ "currency", # 189
+ "guilsinglleft", # 190
+ "guilsinglright", # 191
+ "fi", # 192
+ "fl", # 193
+ "daggerdbl", # 194
+ "periodcentered", # 195
+ "quotesinglbase", # 196
+ "quotedblbase", # 197
+ "perthousand", # 198
+ "Acircumflex", # 199
+ "Ecircumflex", # 200
+ "Aacute", # 201
+ "Edieresis", # 202
+ "Egrave", # 203
+ "Iacute", # 204
+ "Icircumflex", # 205
+ "Idieresis", # 206
+ "Igrave", # 207
+ "Oacute", # 208
+ "Ocircumflex", # 209
+ "apple", # 210
+ "Ograve", # 211
+ "Uacute", # 212
+ "Ucircumflex", # 213
+ "Ugrave", # 214
+ "dotlessi", # 215
+ "circumflex", # 216
+ "tilde", # 217
+ "macron", # 218
+ "breve", # 219
+ "dotaccent", # 220
+ "ring", # 221
+ "cedilla", # 222
+ "hungarumlaut", # 223
+ "ogonek", # 224
+ "caron", # 225
+ "Lslash", # 226
+ "lslash", # 227
+ "Scaron", # 228
+ "scaron", # 229
+ "Zcaron", # 230
+ "zcaron", # 231
+ "brokenbar", # 232
+ "Eth", # 233
+ "eth", # 234
+ "Yacute", # 235
+ "yacute", # 236
+ "Thorn", # 237
+ "thorn", # 238
+ "minus", # 239
+ "multiply", # 240
+ "onesuperior", # 241
+ "twosuperior", # 242
+ "threesuperior", # 243
+ "onehalf", # 244
+ "onequarter", # 245
+ "threequarters", # 246
+ "franc", # 247
+ "Gbreve", # 248
+ "gbreve", # 249
+ "Idotaccent", # 250
+ "Scedilla", # 251
+ "scedilla", # 252
+ "Cacute", # 253
+ "cacute", # 254
+ "Ccaron", # 255
+ "ccaron", # 256
+ "dcroat", # 257
]
diff --git a/Lib/fontTools/ttLib/tables/B_A_S_E_.py b/Lib/fontTools/ttLib/tables/B_A_S_E_.py
index 9551e2c6..f468a963 100644
--- a/Lib/fontTools/ttLib/tables/B_A_S_E_.py
+++ b/Lib/fontTools/ttLib/tables/B_A_S_E_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_B_A_S_E_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py b/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py
index 9197923d..10b4f828 100644
--- a/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py
+++ b/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py
@@ -28,32 +28,37 @@ smallGlyphMetricsFormat = """
Advance: B
"""
+
class BitmapGlyphMetrics(object):
+ def toXML(self, writer, ttFont):
+ writer.begintag(self.__class__.__name__)
+ writer.newline()
+ for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]:
+ writer.simpletag(metricName, value=getattr(self, metricName))
+ writer.newline()
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__)
- writer.newline()
- for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]:
- writer.simpletag(metricName, value=getattr(self, metricName))
- writer.newline()
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1])
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- # Make sure this is a metric that is needed by GlyphMetrics.
- if name in metricNames:
- vars(self)[name] = safeEval(attrs['value'])
- else:
- log.warning("unknown name '%s' being ignored in %s.", name, self.__class__.__name__)
+ def fromXML(self, name, attrs, content, ttFont):
+ metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1])
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ # Make sure this is a metric that is needed by GlyphMetrics.
+ if name in metricNames:
+ vars(self)[name] = safeEval(attrs["value"])
+ else:
+ log.warning(
+ "unknown name '%s' being ignored in %s.",
+ name,
+ self.__class__.__name__,
+ )
class BigGlyphMetrics(BitmapGlyphMetrics):
- binaryFormat = bigGlyphMetricsFormat
+ binaryFormat = bigGlyphMetricsFormat
+
class SmallGlyphMetrics(BitmapGlyphMetrics):
- binaryFormat = smallGlyphMetricsFormat
+ binaryFormat = smallGlyphMetricsFormat
diff --git a/Lib/fontTools/ttLib/tables/C_B_D_T_.py b/Lib/fontTools/ttLib/tables/C_B_D_T_.py
index adf5447f..2b87ac86 100644
--- a/Lib/fontTools/ttLib/tables/C_B_D_T_.py
+++ b/Lib/fontTools/ttLib/tables/C_B_D_T_.py
@@ -6,87 +6,98 @@
from fontTools.misc.textTools import bytesjoin
from fontTools.misc import sstruct
from . import E_B_D_T_
-from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
-from .E_B_D_T_ import BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin
+from .BitmapGlyphMetrics import (
+ BigGlyphMetrics,
+ bigGlyphMetricsFormat,
+ SmallGlyphMetrics,
+ smallGlyphMetricsFormat,
+)
+from .E_B_D_T_ import (
+ BitmapGlyph,
+ BitmapPlusSmallMetricsMixin,
+ BitmapPlusBigMetricsMixin,
+)
import struct
+
class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
+ # Change the data locator table being referenced.
+ locatorName = "CBLC"
- # Change the data locator table being referenced.
- locatorName = 'CBLC'
+ # Modify the format class accessor for color bitmap use.
+ def getImageFormatClass(self, imageFormat):
+ try:
+ return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat)
+ except KeyError:
+ return cbdt_bitmap_classes[imageFormat]
- # Modify the format class accessor for color bitmap use.
- def getImageFormatClass(self, imageFormat):
- try:
- return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat)
- except KeyError:
- return cbdt_bitmap_classes[imageFormat]
# Helper method for removing export features not supported by color bitmaps.
# Write data in the parent class will default to raw if an option is unsupported.
def _removeUnsupportedForColor(dataFunctions):
- dataFunctions = dict(dataFunctions)
- del dataFunctions['row']
- return dataFunctions
+ dataFunctions = dict(dataFunctions)
+ del dataFunctions["row"]
+ return dataFunctions
+
class ColorBitmapGlyph(BitmapGlyph):
+ fileExtension = ".png"
+ xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
- fileExtension = '.png'
- xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
+ def decompile(self):
+ self.metrics = SmallGlyphMetrics()
+ dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+ (dataLen,) = struct.unpack(">L", data[:4])
+ data = data[4:]
- def decompile(self):
- self.metrics = SmallGlyphMetrics()
- dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
- (dataLen,) = struct.unpack(">L", data[:4])
- data = data[4:]
+ # For the image data cut it to the size specified by dataLen.
+ assert dataLen <= len(data), "Data overun in format 17"
+ self.imageData = data[:dataLen]
- # For the image data cut it to the size specified by dataLen.
- assert dataLen <= len(data), "Data overun in format 17"
- self.imageData = data[:dataLen]
+ def compile(self, ttFont):
+ dataList = []
+ dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
+ dataList.append(struct.pack(">L", len(self.imageData)))
+ dataList.append(self.imageData)
+ return bytesjoin(dataList)
- def compile(self, ttFont):
- dataList = []
- dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
- dataList.append(struct.pack(">L", len(self.imageData)))
- dataList.append(self.imageData)
- return bytesjoin(dataList)
class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
+ def decompile(self):
+ self.metrics = BigGlyphMetrics()
+ dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+ (dataLen,) = struct.unpack(">L", data[:4])
+ data = data[4:]
- def decompile(self):
- self.metrics = BigGlyphMetrics()
- dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
- (dataLen,) = struct.unpack(">L", data[:4])
- data = data[4:]
+ # For the image data cut it to the size specified by dataLen.
+ assert dataLen <= len(data), "Data overun in format 18"
+ self.imageData = data[:dataLen]
- # For the image data cut it to the size specified by dataLen.
- assert dataLen <= len(data), "Data overun in format 18"
- self.imageData = data[:dataLen]
+ def compile(self, ttFont):
+ dataList = []
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+ dataList.append(struct.pack(">L", len(self.imageData)))
+ dataList.append(self.imageData)
+ return bytesjoin(dataList)
- def compile(self, ttFont):
- dataList = []
- dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
- dataList.append(struct.pack(">L", len(self.imageData)))
- dataList.append(self.imageData)
- return bytesjoin(dataList)
class cbdt_bitmap_format_19(ColorBitmapGlyph):
+ def decompile(self):
+ (dataLen,) = struct.unpack(">L", self.data[:4])
+ data = self.data[4:]
- def decompile(self):
- (dataLen,) = struct.unpack(">L", self.data[:4])
- data = self.data[4:]
+ assert dataLen <= len(data), "Data overun in format 19"
+ self.imageData = data[:dataLen]
- assert dataLen <= len(data), "Data overun in format 19"
- self.imageData = data[:dataLen]
+ def compile(self, ttFont):
+ return struct.pack(">L", len(self.imageData)) + self.imageData
- def compile(self, ttFont):
- return struct.pack(">L", len(self.imageData)) + self.imageData
# Dict for CBDT extended formats.
cbdt_bitmap_classes = {
- 17: cbdt_bitmap_format_17,
- 18: cbdt_bitmap_format_18,
- 19: cbdt_bitmap_format_19,
+ 17: cbdt_bitmap_format_17,
+ 18: cbdt_bitmap_format_18,
+ 19: cbdt_bitmap_format_19,
}
diff --git a/Lib/fontTools/ttLib/tables/C_B_L_C_.py b/Lib/fontTools/ttLib/tables/C_B_L_C_.py
index 2f785710..fc3974ec 100644
--- a/Lib/fontTools/ttLib/tables/C_B_L_C_.py
+++ b/Lib/fontTools/ttLib/tables/C_B_L_C_.py
@@ -4,6 +4,6 @@
from . import E_B_L_C_
-class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
- dependencies = ['CBDT']
+class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
+ dependencies = ["CBDT"]
diff --git a/Lib/fontTools/ttLib/tables/C_F_F_.py b/Lib/fontTools/ttLib/tables/C_F_F_.py
index d12b89d2..c231599e 100644
--- a/Lib/fontTools/ttLib/tables/C_F_F_.py
+++ b/Lib/fontTools/ttLib/tables/C_F_F_.py
@@ -4,43 +4,43 @@ from . import DefaultTable
class table_C_F_F_(DefaultTable.DefaultTable):
-
- def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
- self.cff = cffLib.CFFFontSet()
- self._gaveGlyphOrder = False
-
- def decompile(self, data, otFont):
- self.cff.decompile(BytesIO(data), otFont, isCFF2=False)
- assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
-
- def compile(self, otFont):
- f = BytesIO()
- self.cff.compile(f, otFont, isCFF2=False)
- return f.getvalue()
-
- def haveGlyphNames(self):
- if hasattr(self.cff[self.cff.fontNames[0]], "ROS"):
- return False # CID-keyed font
- else:
- return True
-
- def getGlyphOrder(self):
- if self._gaveGlyphOrder:
- from fontTools import ttLib
- raise ttLib.TTLibError("illegal use of getGlyphOrder()")
- self._gaveGlyphOrder = True
- return self.cff[self.cff.fontNames[0]].getGlyphOrder()
-
- def setGlyphOrder(self, glyphOrder):
- pass
- # XXX
- #self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder)
-
- def toXML(self, writer, otFont):
- self.cff.toXML(writer)
-
- def fromXML(self, name, attrs, content, otFont):
- if not hasattr(self, "cff"):
- self.cff = cffLib.CFFFontSet()
- self.cff.fromXML(name, attrs, content, otFont)
+ def __init__(self, tag=None):
+ DefaultTable.DefaultTable.__init__(self, tag)
+ self.cff = cffLib.CFFFontSet()
+ self._gaveGlyphOrder = False
+
+ def decompile(self, data, otFont):
+ self.cff.decompile(BytesIO(data), otFont, isCFF2=False)
+ assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
+
+ def compile(self, otFont):
+ f = BytesIO()
+ self.cff.compile(f, otFont, isCFF2=False)
+ return f.getvalue()
+
+ def haveGlyphNames(self):
+ if hasattr(self.cff[self.cff.fontNames[0]], "ROS"):
+ return False # CID-keyed font
+ else:
+ return True
+
+ def getGlyphOrder(self):
+ if self._gaveGlyphOrder:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("illegal use of getGlyphOrder()")
+ self._gaveGlyphOrder = True
+ return self.cff[self.cff.fontNames[0]].getGlyphOrder()
+
+ def setGlyphOrder(self, glyphOrder):
+ pass
+ # XXX
+ # self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder)
+
+ def toXML(self, writer, otFont):
+ self.cff.toXML(writer)
+
+ def fromXML(self, name, attrs, content, otFont):
+ if not hasattr(self, "cff"):
+ self.cff = cffLib.CFFFontSet()
+ self.cff.fromXML(name, attrs, content, otFont)
diff --git a/Lib/fontTools/ttLib/tables/C_F_F__2.py b/Lib/fontTools/ttLib/tables/C_F_F__2.py
index 6217ebba..edbb0b92 100644
--- a/Lib/fontTools/ttLib/tables/C_F_F__2.py
+++ b/Lib/fontTools/ttLib/tables/C_F_F__2.py
@@ -3,7 +3,6 @@ from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_
class table_C_F_F__2(table_C_F_F_):
-
def decompile(self, data, otFont):
self.cff.decompile(BytesIO(data), otFont, isCFF2=True)
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
diff --git a/Lib/fontTools/ttLib/tables/C_O_L_R_.py b/Lib/fontTools/ttLib/tables/C_O_L_R_.py
index 3528bf5b..2f03ec05 100644
--- a/Lib/fontTools/ttLib/tables/C_O_L_R_.py
+++ b/Lib/fontTools/ttLib/tables/C_O_L_R_.py
@@ -8,153 +8,151 @@ from . import DefaultTable
class table_C_O_L_R_(DefaultTable.DefaultTable):
- """ This table is structured so that you can treat it like a dictionary keyed by glyph name.
-
- ``ttFont['COLR'][<glyphName>]`` will return the color layers for any glyph.
-
- ``ttFont['COLR'][<glyphName>] = <value>`` will set the color layers for any glyph.
- """
-
- @staticmethod
- def _decompileColorLayersV0(table):
- if not table.LayerRecordArray:
- return {}
- colorLayerLists = {}
- layerRecords = table.LayerRecordArray.LayerRecord
- numLayerRecords = len(layerRecords)
- for baseRec in table.BaseGlyphRecordArray.BaseGlyphRecord:
- baseGlyph = baseRec.BaseGlyph
- firstLayerIndex = baseRec.FirstLayerIndex
- numLayers = baseRec.NumLayers
- assert (firstLayerIndex + numLayers <= numLayerRecords)
- layers = []
- for i in range(firstLayerIndex, firstLayerIndex+numLayers):
- layerRec = layerRecords[i]
- layers.append(
- LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex)
- )
- colorLayerLists[baseGlyph] = layers
- return colorLayerLists
-
- def _toOTTable(self, ttFont):
- from . import otTables
- from fontTools.colorLib.builder import populateCOLRv0
-
- tableClass = getattr(otTables, self.tableTag)
- table = tableClass()
- table.Version = self.version
-
- populateCOLRv0(
- table,
- {
- baseGlyph: [(layer.name, layer.colorID) for layer in layers]
- for baseGlyph, layers in self.ColorLayers.items()
- },
- glyphMap=ttFont.getReverseGlyphMap(rebuild=True),
- )
- return table
-
- def decompile(self, data, ttFont):
- from .otBase import OTTableReader
- from . import otTables
-
- # We use otData to decompile, but we adapt the decompiled otTables to the
- # existing COLR v0 API for backward compatibility.
- reader = OTTableReader(data, tableTag=self.tableTag)
- tableClass = getattr(otTables, self.tableTag)
- table = tableClass()
- table.decompile(reader, ttFont)
-
- self.version = table.Version
- if self.version == 0:
- self.ColorLayers = self._decompileColorLayersV0(table)
- else:
- # for new versions, keep the raw otTables around
- self.table = table
-
- def compile(self, ttFont):
- from .otBase import OTTableWriter
-
- if hasattr(self, "table"):
- table = self.table
- else:
- table = self._toOTTable(ttFont)
-
- writer = OTTableWriter(tableTag=self.tableTag)
- table.compile(writer, ttFont)
- return writer.getAllData()
-
- def toXML(self, writer, ttFont):
- if hasattr(self, "table"):
- self.table.toXML2(writer, ttFont)
- else:
- writer.simpletag("version", value=self.version)
- writer.newline()
- for baseGlyph in sorted(self.ColorLayers.keys(), key=ttFont.getGlyphID):
- writer.begintag("ColorGlyph", name=baseGlyph)
- writer.newline()
- for layer in self.ColorLayers[baseGlyph]:
- layer.toXML(writer, ttFont)
- writer.endtag("ColorGlyph")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "version": # old COLR v0 API
- setattr(self, name, safeEval(attrs["value"]))
- elif name == "ColorGlyph":
- if not hasattr(self, "ColorLayers"):
- self.ColorLayers = {}
- glyphName = attrs["name"]
- for element in content:
- if isinstance(element, str):
- continue
- layers = []
- for element in content:
- if isinstance(element, str):
- continue
- layer = LayerRecord()
- layer.fromXML(element[0], element[1], element[2], ttFont)
- layers.append (layer)
- self.ColorLayers[glyphName] = layers
- else: # new COLR v1 API
- from . import otTables
-
- if not hasattr(self, "table"):
- tableClass = getattr(otTables, self.tableTag)
- self.table = tableClass()
- self.table.fromXML(name, attrs, content, ttFont)
- self.table.populateDefaults()
- self.version = self.table.Version
-
- def __getitem__(self, glyphName):
- if not isinstance(glyphName, str):
- raise TypeError(f"expected str, found {type(glyphName).__name__}")
- return self.ColorLayers[glyphName]
-
- def __setitem__(self, glyphName, value):
- if not isinstance(glyphName, str):
- raise TypeError(f"expected str, found {type(glyphName).__name__}")
- if value is not None:
- self.ColorLayers[glyphName] = value
- elif glyphName in self.ColorLayers:
- del self.ColorLayers[glyphName]
-
- def __delitem__(self, glyphName):
- del self.ColorLayers[glyphName]
+ """This table is structured so that you can treat it like a dictionary keyed by glyph name.
+
+ ``ttFont['COLR'][<glyphName>]`` will return the color layers for any glyph.
+
+ ``ttFont['COLR'][<glyphName>] = <value>`` will set the color layers for any glyph.
+ """
+
+ @staticmethod
+ def _decompileColorLayersV0(table):
+ if not table.LayerRecordArray:
+ return {}
+ colorLayerLists = {}
+ layerRecords = table.LayerRecordArray.LayerRecord
+ numLayerRecords = len(layerRecords)
+ for baseRec in table.BaseGlyphRecordArray.BaseGlyphRecord:
+ baseGlyph = baseRec.BaseGlyph
+ firstLayerIndex = baseRec.FirstLayerIndex
+ numLayers = baseRec.NumLayers
+ assert firstLayerIndex + numLayers <= numLayerRecords
+ layers = []
+ for i in range(firstLayerIndex, firstLayerIndex + numLayers):
+ layerRec = layerRecords[i]
+ layers.append(LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex))
+ colorLayerLists[baseGlyph] = layers
+ return colorLayerLists
+
+ def _toOTTable(self, ttFont):
+ from . import otTables
+ from fontTools.colorLib.builder import populateCOLRv0
+
+ tableClass = getattr(otTables, self.tableTag)
+ table = tableClass()
+ table.Version = self.version
+
+ populateCOLRv0(
+ table,
+ {
+ baseGlyph: [(layer.name, layer.colorID) for layer in layers]
+ for baseGlyph, layers in self.ColorLayers.items()
+ },
+ glyphMap=ttFont.getReverseGlyphMap(rebuild=True),
+ )
+ return table
+
+ def decompile(self, data, ttFont):
+ from .otBase import OTTableReader
+ from . import otTables
+
+ # We use otData to decompile, but we adapt the decompiled otTables to the
+ # existing COLR v0 API for backward compatibility.
+ reader = OTTableReader(data, tableTag=self.tableTag)
+ tableClass = getattr(otTables, self.tableTag)
+ table = tableClass()
+ table.decompile(reader, ttFont)
+
+ self.version = table.Version
+ if self.version == 0:
+ self.ColorLayers = self._decompileColorLayersV0(table)
+ else:
+ # for new versions, keep the raw otTables around
+ self.table = table
+
+ def compile(self, ttFont):
+ from .otBase import OTTableWriter
+
+ if hasattr(self, "table"):
+ table = self.table
+ else:
+ table = self._toOTTable(ttFont)
+
+ writer = OTTableWriter(tableTag=self.tableTag)
+ table.compile(writer, ttFont)
+ return writer.getAllData()
+
+ def toXML(self, writer, ttFont):
+ if hasattr(self, "table"):
+ self.table.toXML2(writer, ttFont)
+ else:
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ for baseGlyph in sorted(self.ColorLayers.keys(), key=ttFont.getGlyphID):
+ writer.begintag("ColorGlyph", name=baseGlyph)
+ writer.newline()
+ for layer in self.ColorLayers[baseGlyph]:
+ layer.toXML(writer, ttFont)
+ writer.endtag("ColorGlyph")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version": # old COLR v0 API
+ setattr(self, name, safeEval(attrs["value"]))
+ elif name == "ColorGlyph":
+ if not hasattr(self, "ColorLayers"):
+ self.ColorLayers = {}
+ glyphName = attrs["name"]
+ for element in content:
+ if isinstance(element, str):
+ continue
+ layers = []
+ for element in content:
+ if isinstance(element, str):
+ continue
+ layer = LayerRecord()
+ layer.fromXML(element[0], element[1], element[2], ttFont)
+ layers.append(layer)
+ self.ColorLayers[glyphName] = layers
+ else: # new COLR v1 API
+ from . import otTables
+
+ if not hasattr(self, "table"):
+ tableClass = getattr(otTables, self.tableTag)
+ self.table = tableClass()
+ self.table.fromXML(name, attrs, content, ttFont)
+ self.table.populateDefaults()
+ self.version = self.table.Version
+
+ def __getitem__(self, glyphName):
+ if not isinstance(glyphName, str):
+ raise TypeError(f"expected str, found {type(glyphName).__name__}")
+ return self.ColorLayers[glyphName]
+
+ def __setitem__(self, glyphName, value):
+ if not isinstance(glyphName, str):
+ raise TypeError(f"expected str, found {type(glyphName).__name__}")
+ if value is not None:
+ self.ColorLayers[glyphName] = value
+ elif glyphName in self.ColorLayers:
+ del self.ColorLayers[glyphName]
+
+ def __delitem__(self, glyphName):
+ del self.ColorLayers[glyphName]
-class LayerRecord(object):
-
- def __init__(self, name=None, colorID=None):
- self.name = name
- self.colorID = colorID
- def toXML(self, writer, ttFont):
- writer.simpletag("layer", name=self.name, colorID=self.colorID)
- writer.newline()
-
- def fromXML(self, eltname, attrs, content, ttFont):
- for (name, value) in attrs.items():
- if name == "name":
- setattr(self, name, value)
- else:
- setattr(self, name, safeEval(value))
+class LayerRecord(object):
+ def __init__(self, name=None, colorID=None):
+ self.name = name
+ self.colorID = colorID
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("layer", name=self.name, colorID=self.colorID)
+ writer.newline()
+
+ def fromXML(self, eltname, attrs, content, ttFont):
+ for name, value in attrs.items():
+ if name == "name":
+ setattr(self, name, value)
+ else:
+ setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/C_P_A_L_.py b/Lib/fontTools/ttLib/tables/C_P_A_L_.py
index 1ad342f1..9fb2074a 100644
--- a/Lib/fontTools/ttLib/tables/C_P_A_L_.py
+++ b/Lib/fontTools/ttLib/tables/C_P_A_L_.py
@@ -11,250 +11,286 @@ import sys
class table_C_P_A_L_(DefaultTable.DefaultTable):
+ NO_NAME_ID = 0xFFFF
+ DEFAULT_PALETTE_TYPE = 0
- NO_NAME_ID = 0xFFFF
- DEFAULT_PALETTE_TYPE = 0
+ def __init__(self, tag=None):
+ DefaultTable.DefaultTable.__init__(self, tag)
+ self.palettes = []
+ self.paletteTypes = []
+ self.paletteLabels = []
+ self.paletteEntryLabels = []
- def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
- self.palettes = []
- self.paletteTypes = []
- self.paletteLabels = []
- self.paletteEntryLabels = []
+ def decompile(self, data, ttFont):
+ (
+ self.version,
+ self.numPaletteEntries,
+ numPalettes,
+ numColorRecords,
+ goffsetFirstColorRecord,
+ ) = struct.unpack(">HHHHL", data[:12])
+ assert (
+ self.version <= 1
+ ), "Version of CPAL table is higher than I know how to handle"
+ self.palettes = []
+ pos = 12
+ for i in range(numPalettes):
+ startIndex = struct.unpack(">H", data[pos : pos + 2])[0]
+ assert startIndex + self.numPaletteEntries <= numColorRecords
+ pos += 2
+ palette = []
+ ppos = goffsetFirstColorRecord + startIndex * 4
+ for j in range(self.numPaletteEntries):
+ palette.append(Color(*struct.unpack(">BBBB", data[ppos : ppos + 4])))
+ ppos += 4
+ self.palettes.append(palette)
+ if self.version == 0:
+ offsetToPaletteTypeArray = 0
+ offsetToPaletteLabelArray = 0
+ offsetToPaletteEntryLabelArray = 0
+ else:
+ pos = 12 + numPalettes * 2
+ (
+ offsetToPaletteTypeArray,
+ offsetToPaletteLabelArray,
+ offsetToPaletteEntryLabelArray,
+ ) = struct.unpack(">LLL", data[pos : pos + 12])
+ self.paletteTypes = self._decompileUInt32Array(
+ data,
+ offsetToPaletteTypeArray,
+ numPalettes,
+ default=self.DEFAULT_PALETTE_TYPE,
+ )
+ self.paletteLabels = self._decompileUInt16Array(
+ data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID
+ )
+ self.paletteEntryLabels = self._decompileUInt16Array(
+ data,
+ offsetToPaletteEntryLabelArray,
+ self.numPaletteEntries,
+ default=self.NO_NAME_ID,
+ )
- def decompile(self, data, ttFont):
- self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord = struct.unpack(">HHHHL", data[:12])
- assert (self.version <= 1), "Version of CPAL table is higher than I know how to handle"
- self.palettes = []
- pos = 12
- for i in range(numPalettes):
- startIndex = struct.unpack(">H", data[pos:pos+2])[0]
- assert (startIndex + self.numPaletteEntries <= numColorRecords)
- pos += 2
- palette = []
- ppos = goffsetFirstColorRecord + startIndex * 4
- for j in range(self.numPaletteEntries):
- palette.append( Color(*struct.unpack(">BBBB", data[ppos:ppos+4])) )
- ppos += 4
- self.palettes.append(palette)
- if self.version == 0:
- offsetToPaletteTypeArray = 0
- offsetToPaletteLabelArray = 0
- offsetToPaletteEntryLabelArray = 0
- else:
- pos = 12 + numPalettes * 2
- (offsetToPaletteTypeArray, offsetToPaletteLabelArray,
- offsetToPaletteEntryLabelArray) = (
- struct.unpack(">LLL", data[pos:pos+12]))
- self.paletteTypes = self._decompileUInt32Array(
- data, offsetToPaletteTypeArray, numPalettes,
- default=self.DEFAULT_PALETTE_TYPE)
- self.paletteLabels = self._decompileUInt16Array(
- data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID)
- self.paletteEntryLabels = self._decompileUInt16Array(
- data, offsetToPaletteEntryLabelArray,
- self.numPaletteEntries, default=self.NO_NAME_ID)
+ def _decompileUInt16Array(self, data, offset, numElements, default=0):
+ if offset == 0:
+ return [default] * numElements
+ result = array.array("H", data[offset : offset + 2 * numElements])
+ if sys.byteorder != "big":
+ result.byteswap()
+ assert len(result) == numElements, result
+ return result.tolist()
- def _decompileUInt16Array(self, data, offset, numElements, default=0):
- if offset == 0:
- return [default] * numElements
- result = array.array("H", data[offset : offset + 2 * numElements])
- if sys.byteorder != "big": result.byteswap()
- assert len(result) == numElements, result
- return result.tolist()
+ def _decompileUInt32Array(self, data, offset, numElements, default=0):
+ if offset == 0:
+ return [default] * numElements
+ result = array.array("I", data[offset : offset + 4 * numElements])
+ if sys.byteorder != "big":
+ result.byteswap()
+ assert len(result) == numElements, result
+ return result.tolist()
- def _decompileUInt32Array(self, data, offset, numElements, default=0):
- if offset == 0:
- return [default] * numElements
- result = array.array("I", data[offset : offset + 4 * numElements])
- if sys.byteorder != "big": result.byteswap()
- assert len(result) == numElements, result
- return result.tolist()
+ def compile(self, ttFont):
+ colorRecordIndices, colorRecords = self._compileColorRecords()
+ paletteTypes = self._compilePaletteTypes()
+ paletteLabels = self._compilePaletteLabels()
+ paletteEntryLabels = self._compilePaletteEntryLabels()
+ numColorRecords = len(colorRecords) // 4
+ offsetToFirstColorRecord = 12 + len(colorRecordIndices)
+ if self.version >= 1:
+ offsetToFirstColorRecord += 12
+ header = struct.pack(
+ ">HHHHL",
+ self.version,
+ self.numPaletteEntries,
+ len(self.palettes),
+ numColorRecords,
+ offsetToFirstColorRecord,
+ )
+ if self.version == 0:
+ dataList = [header, colorRecordIndices, colorRecords]
+ else:
+ pos = offsetToFirstColorRecord + len(colorRecords)
+ if len(paletteTypes) == 0:
+ offsetToPaletteTypeArray = 0
+ else:
+ offsetToPaletteTypeArray = pos
+ pos += len(paletteTypes)
+ if len(paletteLabels) == 0:
+ offsetToPaletteLabelArray = 0
+ else:
+ offsetToPaletteLabelArray = pos
+ pos += len(paletteLabels)
+ if len(paletteEntryLabels) == 0:
+ offsetToPaletteEntryLabelArray = 0
+ else:
+ offsetToPaletteEntryLabelArray = pos
+ pos += len(paletteLabels)
+ header1 = struct.pack(
+ ">LLL",
+ offsetToPaletteTypeArray,
+ offsetToPaletteLabelArray,
+ offsetToPaletteEntryLabelArray,
+ )
+ dataList = [
+ header,
+ colorRecordIndices,
+ header1,
+ colorRecords,
+ paletteTypes,
+ paletteLabels,
+ paletteEntryLabels,
+ ]
+ return bytesjoin(dataList)
- def compile(self, ttFont):
- colorRecordIndices, colorRecords = self._compileColorRecords()
- paletteTypes = self._compilePaletteTypes()
- paletteLabels = self._compilePaletteLabels()
- paletteEntryLabels = self._compilePaletteEntryLabels()
- numColorRecords = len(colorRecords) // 4
- offsetToFirstColorRecord = 12 + len(colorRecordIndices)
- if self.version >= 1:
- offsetToFirstColorRecord += 12
- header = struct.pack(">HHHHL", self.version,
- self.numPaletteEntries, len(self.palettes),
- numColorRecords, offsetToFirstColorRecord)
- if self.version == 0:
- dataList = [header, colorRecordIndices, colorRecords]
- else:
- pos = offsetToFirstColorRecord + len(colorRecords)
- if len(paletteTypes) == 0:
- offsetToPaletteTypeArray = 0
- else:
- offsetToPaletteTypeArray = pos
- pos += len(paletteTypes)
- if len(paletteLabels) == 0:
- offsetToPaletteLabelArray = 0
- else:
- offsetToPaletteLabelArray = pos
- pos += len(paletteLabels)
- if len(paletteEntryLabels) == 0:
- offsetToPaletteEntryLabelArray = 0
- else:
- offsetToPaletteEntryLabelArray = pos
- pos += len(paletteLabels)
- header1 = struct.pack(">LLL",
- offsetToPaletteTypeArray,
- offsetToPaletteLabelArray,
- offsetToPaletteEntryLabelArray)
- dataList = [header, colorRecordIndices, header1,
- colorRecords, paletteTypes, paletteLabels,
- paletteEntryLabels]
- return bytesjoin(dataList)
+ def _compilePalette(self, palette):
+ assert len(palette) == self.numPaletteEntries
+ pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
+ return bytesjoin([pack(color) for color in palette])
- def _compilePalette(self, palette):
- assert(len(palette) == self.numPaletteEntries)
- pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
- return bytesjoin([pack(color) for color in palette])
+ def _compileColorRecords(self):
+ colorRecords, colorRecordIndices, pool = [], [], {}
+ for palette in self.palettes:
+ packedPalette = self._compilePalette(palette)
+ if packedPalette in pool:
+ index = pool[packedPalette]
+ else:
+ index = len(colorRecords)
+ colorRecords.append(packedPalette)
+ pool[packedPalette] = index
+ colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries))
+ return bytesjoin(colorRecordIndices), bytesjoin(colorRecords)
- def _compileColorRecords(self):
- colorRecords, colorRecordIndices, pool = [], [], {}
- for palette in self.palettes:
- packedPalette = self._compilePalette(palette)
- if packedPalette in pool:
- index = pool[packedPalette]
- else:
- index = len(colorRecords)
- colorRecords.append(packedPalette)
- pool[packedPalette] = index
- colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries))
- return bytesjoin(colorRecordIndices), bytesjoin(colorRecords)
+ def _compilePaletteTypes(self):
+ if self.version == 0 or not any(self.paletteTypes):
+ return b""
+ assert len(self.paletteTypes) == len(self.palettes)
+ result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes])
+ assert len(result) == 4 * len(self.palettes)
+ return result
- def _compilePaletteTypes(self):
- if self.version == 0 or not any(self.paletteTypes):
- return b''
- assert len(self.paletteTypes) == len(self.palettes)
- result = bytesjoin([struct.pack(">I", ptype)
- for ptype in self.paletteTypes])
- assert len(result) == 4 * len(self.palettes)
- return result
+ def _compilePaletteLabels(self):
+ if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
+ return b""
+ assert len(self.paletteLabels) == len(self.palettes)
+ result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels])
+ assert len(result) == 2 * len(self.palettes)
+ return result
- def _compilePaletteLabels(self):
- if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
- return b''
- assert len(self.paletteLabels) == len(self.palettes)
- result = bytesjoin([struct.pack(">H", label)
- for label in self.paletteLabels])
- assert len(result) == 2 * len(self.palettes)
- return result
+ def _compilePaletteEntryLabels(self):
+ if self.version == 0 or all(
+ l == self.NO_NAME_ID for l in self.paletteEntryLabels
+ ):
+ return b""
+ assert len(self.paletteEntryLabels) == self.numPaletteEntries
+ result = bytesjoin(
+ [struct.pack(">H", label) for label in self.paletteEntryLabels]
+ )
+ assert len(result) == 2 * self.numPaletteEntries
+ return result
- def _compilePaletteEntryLabels(self):
- if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteEntryLabels):
- return b''
- assert len(self.paletteEntryLabels) == self.numPaletteEntries
- result = bytesjoin([struct.pack(">H", label)
- for label in self.paletteEntryLabels])
- assert len(result) == 2 * self.numPaletteEntries
- return result
+ def toXML(self, writer, ttFont):
+ numPalettes = len(self.palettes)
+ paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)}
+ paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ writer.simpletag("numPaletteEntries", value=self.numPaletteEntries)
+ writer.newline()
+ for index, palette in enumerate(self.palettes):
+ attrs = {"index": index}
+ paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE)
+ paletteLabel = paletteLabels.get(index, self.NO_NAME_ID)
+ if self.version > 0 and paletteLabel != self.NO_NAME_ID:
+ attrs["label"] = paletteLabel
+ if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE:
+ attrs["type"] = paletteType
+ writer.begintag("palette", **attrs)
+ writer.newline()
+ if (
+ self.version > 0
+ and paletteLabel != self.NO_NAME_ID
+ and ttFont
+ and "name" in ttFont
+ ):
+ name = ttFont["name"].getDebugName(paletteLabel)
+ if name is not None:
+ writer.comment(name)
+ writer.newline()
+ assert len(palette) == self.numPaletteEntries
+ for cindex, color in enumerate(palette):
+ color.toXML(writer, ttFont, cindex)
+ writer.endtag("palette")
+ writer.newline()
+ if self.version > 0 and not all(
+ l == self.NO_NAME_ID for l in self.paletteEntryLabels
+ ):
+ writer.begintag("paletteEntryLabels")
+ writer.newline()
+ for index, label in enumerate(self.paletteEntryLabels):
+ if label != self.NO_NAME_ID:
+ writer.simpletag("label", index=index, value=label)
+ if self.version > 0 and label and ttFont and "name" in ttFont:
+ name = ttFont["name"].getDebugName(label)
+ if name is not None:
+ writer.comment(name)
+ writer.newline()
+ writer.endtag("paletteEntryLabels")
+ writer.newline()
- def toXML(self, writer, ttFont):
- numPalettes = len(self.palettes)
- paletteLabels = {i: nameID
- for (i, nameID) in enumerate(self.paletteLabels)}
- paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
- writer.simpletag("version", value=self.version)
- writer.newline()
- writer.simpletag("numPaletteEntries",
- value=self.numPaletteEntries)
- writer.newline()
- for index, palette in enumerate(self.palettes):
- attrs = {"index": index}
- paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE)
- paletteLabel = paletteLabels.get(index, self.NO_NAME_ID)
- if self.version > 0 and paletteLabel != self.NO_NAME_ID:
- attrs["label"] = paletteLabel
- if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE:
- attrs["type"] = paletteType
- writer.begintag("palette", **attrs)
- writer.newline()
- if (self.version > 0 and paletteLabel != self.NO_NAME_ID and
- ttFont and "name" in ttFont):
- name = ttFont["name"].getDebugName(paletteLabel)
- if name is not None:
- writer.comment(name)
- writer.newline()
- assert(len(palette) == self.numPaletteEntries)
- for cindex, color in enumerate(palette):
- color.toXML(writer, ttFont, cindex)
- writer.endtag("palette")
- writer.newline()
- if self.version > 0 and not all(l == self.NO_NAME_ID for l in self.paletteEntryLabels):
- writer.begintag("paletteEntryLabels")
- writer.newline()
- for index, label in enumerate(self.paletteEntryLabels):
- if label != self.NO_NAME_ID:
- writer.simpletag("label", index=index, value=label)
- if (self.version > 0 and label and ttFont and "name" in ttFont):
- name = ttFont["name"].getDebugName(label)
- if name is not None:
- writer.comment(name)
- writer.newline()
- writer.endtag("paletteEntryLabels")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "palette":
- self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID)))
- self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE)))
- palette = []
- for element in content:
- if isinstance(element, str):
- continue
- attrs = element[1]
- color = Color.fromHex(attrs["value"])
- palette.append(color)
- self.palettes.append(palette)
- elif name == "paletteEntryLabels":
- colorLabels = {}
- for element in content:
- if isinstance(element, str):
- continue
- elementName, elementAttr, _ = element
- if elementName == "label":
- labelIndex = safeEval(elementAttr["index"])
- nameID = safeEval(elementAttr["value"])
- colorLabels[labelIndex] = nameID
- self.paletteEntryLabels = [
- colorLabels.get(i, self.NO_NAME_ID)
- for i in range(self.numPaletteEntries)]
- elif "value" in attrs:
- value = safeEval(attrs["value"])
- setattr(self, name, value)
- if name == "numPaletteEntries":
- self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "palette":
+ self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID)))
+ self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE)))
+ palette = []
+ for element in content:
+ if isinstance(element, str):
+ continue
+ attrs = element[1]
+ color = Color.fromHex(attrs["value"])
+ palette.append(color)
+ self.palettes.append(palette)
+ elif name == "paletteEntryLabels":
+ colorLabels = {}
+ for element in content:
+ if isinstance(element, str):
+ continue
+ elementName, elementAttr, _ = element
+ if elementName == "label":
+ labelIndex = safeEval(elementAttr["index"])
+ nameID = safeEval(elementAttr["value"])
+ colorLabels[labelIndex] = nameID
+ self.paletteEntryLabels = [
+ colorLabels.get(i, self.NO_NAME_ID)
+ for i in range(self.numPaletteEntries)
+ ]
+ elif "value" in attrs:
+ value = safeEval(attrs["value"])
+ setattr(self, name, value)
+ if name == "numPaletteEntries":
+ self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries
class Color(namedtuple("Color", "blue green red alpha")):
+ def hex(self):
+ return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
- def hex(self):
- return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
-
- def __repr__(self):
- return self.hex()
+ def __repr__(self):
+ return self.hex()
- def toXML(self, writer, ttFont, index=None):
- writer.simpletag("color", value=self.hex(), index=index)
- writer.newline()
+ def toXML(self, writer, ttFont, index=None):
+ writer.simpletag("color", value=self.hex(), index=index)
+ writer.newline()
- @classmethod
- def fromHex(cls, value):
- if value[0] == '#':
- value = value[1:]
- red = int(value[0:2], 16)
- green = int(value[2:4], 16)
- blue = int(value[4:6], 16)
- alpha = int(value[6:8], 16) if len (value) >= 8 else 0xFF
- return cls(red=red, green=green, blue=blue, alpha=alpha)
+ @classmethod
+ def fromHex(cls, value):
+ if value[0] == "#":
+ value = value[1:]
+ red = int(value[0:2], 16)
+ green = int(value[2:4], 16)
+ blue = int(value[4:6], 16)
+ alpha = int(value[6:8], 16) if len(value) >= 8 else 0xFF
+ return cls(red=red, green=green, blue=blue, alpha=alpha)
- @classmethod
- def fromRGBA(cls, red, green, blue, alpha):
- return cls(red=red, green=green, blue=blue, alpha=alpha)
+ @classmethod
+ def fromRGBA(cls, red, green, blue, alpha):
+ return cls(red=red, green=green, blue=blue, alpha=alpha)
diff --git a/Lib/fontTools/ttLib/tables/D_S_I_G_.py b/Lib/fontTools/ttLib/tables/D_S_I_G_.py
index 02fddee6..d902a290 100644
--- a/Lib/fontTools/ttLib/tables/D_S_I_G_.py
+++ b/Lib/fontTools/ttLib/tables/D_S_I_G_.py
@@ -37,93 +37,115 @@ DSIG_SignatureBlockFormat = """
# on compilation with no padding whatsoever.
#
+
class table_D_S_I_G_(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
+ assert self.ulVersion == 1, "DSIG ulVersion must be 1"
+ assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
+ self.signatureRecords = sigrecs = []
+ for n in range(self.usNumSigs):
+ sigrec, newData = sstruct.unpack2(
+ DSIG_SignatureFormat, newData, SignatureRecord()
+ )
+ assert sigrec.ulFormat == 1, (
+ "DSIG signature record #%d ulFormat must be 1" % n
+ )
+ sigrecs.append(sigrec)
+ for sigrec in sigrecs:
+ dummy, newData = sstruct.unpack2(
+ DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec
+ )
+ assert sigrec.usReserved1 == 0, (
+ "DSIG signature record #%d usReserverd1 must be 0" % n
+ )
+ assert sigrec.usReserved2 == 0, (
+ "DSIG signature record #%d usReserverd2 must be 0" % n
+ )
+ sigrec.pkcs7 = newData[: sigrec.cbSignature]
- def decompile(self, data, ttFont):
- dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
- assert self.ulVersion == 1, "DSIG ulVersion must be 1"
- assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
- self.signatureRecords = sigrecs = []
- for n in range(self.usNumSigs):
- sigrec, newData = sstruct.unpack2(DSIG_SignatureFormat, newData, SignatureRecord())
- assert sigrec.ulFormat == 1, "DSIG signature record #%d ulFormat must be 1" % n
- sigrecs.append(sigrec)
- for sigrec in sigrecs:
- dummy, newData = sstruct.unpack2(DSIG_SignatureBlockFormat, data[sigrec.ulOffset:], sigrec)
- assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n
- assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n
- sigrec.pkcs7 = newData[:sigrec.cbSignature]
+ def compile(self, ttFont):
+ packed = sstruct.pack(DSIG_HeaderFormat, self)
+ headers = [packed]
+ offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat)
+ data = []
+ for sigrec in self.signatureRecords:
+ # first pack signature block
+ sigrec.cbSignature = len(sigrec.pkcs7)
+ packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7
+ data.append(packed)
+ # update redundant length field
+ sigrec.ulLength = len(packed)
+ # update running table offset
+ sigrec.ulOffset = offset
+ headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec))
+ offset += sigrec.ulLength
+ if offset % 2:
+ # Pad to even bytes
+ data.append(b"\0")
+ return bytesjoin(headers + data)
- def compile(self, ttFont):
- packed = sstruct.pack(DSIG_HeaderFormat, self)
- headers = [packed]
- offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat)
- data = []
- for sigrec in self.signatureRecords:
- # first pack signature block
- sigrec.cbSignature = len(sigrec.pkcs7)
- packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7
- data.append(packed)
- # update redundant length field
- sigrec.ulLength = len(packed)
- # update running table offset
- sigrec.ulOffset = offset
- headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec))
- offset += sigrec.ulLength
- if offset % 2:
- # Pad to even bytes
- data.append(b'\0')
- return bytesjoin(headers+data)
+ def toXML(self, xmlWriter, ttFont):
+ xmlWriter.comment(
+ "note that the Digital Signature will be invalid after recompilation!"
+ )
+ xmlWriter.newline()
+ xmlWriter.simpletag(
+ "tableHeader",
+ version=self.ulVersion,
+ numSigs=self.usNumSigs,
+ flag="0x%X" % self.usFlag,
+ )
+ for sigrec in self.signatureRecords:
+ xmlWriter.newline()
+ sigrec.toXML(xmlWriter, ttFont)
+ xmlWriter.newline()
- def toXML(self, xmlWriter, ttFont):
- xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!")
- xmlWriter.newline()
- xmlWriter.simpletag("tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag)
- for sigrec in self.signatureRecords:
- xmlWriter.newline()
- sigrec.toXML(xmlWriter, ttFont)
- xmlWriter.newline()
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "tableHeader":
+ self.signatureRecords = []
+ self.ulVersion = safeEval(attrs["version"])
+ self.usNumSigs = safeEval(attrs["numSigs"])
+ self.usFlag = safeEval(attrs["flag"])
+ return
+ if name == "SignatureRecord":
+ sigrec = SignatureRecord()
+ sigrec.fromXML(name, attrs, content, ttFont)
+ self.signatureRecords.append(sigrec)
- def fromXML(self, name, attrs, content, ttFont):
- if name == "tableHeader":
- self.signatureRecords = []
- self.ulVersion = safeEval(attrs["version"])
- self.usNumSigs = safeEval(attrs["numSigs"])
- self.usFlag = safeEval(attrs["flag"])
- return
- if name == "SignatureRecord":
- sigrec = SignatureRecord()
- sigrec.fromXML(name, attrs, content, ttFont)
- self.signatureRecords.append(sigrec)
-pem_spam = lambda l, spam = {
- "-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True
+pem_spam = lambda l, spam={
+ "-----BEGIN PKCS7-----": True,
+ "-----END PKCS7-----": True,
+ "": True,
}: not spam.get(l.strip())
+
def b64encode(b):
- s = base64.b64encode(b)
- # Line-break at 76 chars.
- items = []
- while s:
- items.append(tostr(s[:76]))
- items.append('\n')
- s = s[76:]
- return strjoin(items)
+ s = base64.b64encode(b)
+ # Line-break at 76 chars.
+ items = []
+ while s:
+ items.append(tostr(s[:76]))
+ items.append("\n")
+ s = s[76:]
+ return strjoin(items)
+
class SignatureRecord(object):
- def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
+ def __repr__(self):
+ return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__, format=self.ulFormat)
- writer.newline()
- writer.write_noindent("-----BEGIN PKCS7-----\n")
- writer.write_noindent(b64encode(self.pkcs7))
- writer.write_noindent("-----END PKCS7-----\n")
- writer.endtag(self.__class__.__name__)
+ def toXML(self, writer, ttFont):
+ writer.begintag(self.__class__.__name__, format=self.ulFormat)
+ writer.newline()
+ writer.write_noindent("-----BEGIN PKCS7-----\n")
+ writer.write_noindent(b64encode(self.pkcs7))
+ writer.write_noindent("-----END PKCS7-----\n")
+ writer.endtag(self.__class__.__name__)
- def fromXML(self, name, attrs, content, ttFont):
- self.ulFormat = safeEval(attrs["format"])
- self.usReserved1 = safeEval(attrs.get("reserved1", "0"))
- self.usReserved2 = safeEval(attrs.get("reserved2", "0"))
- self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content))))
+ def fromXML(self, name, attrs, content, ttFont):
+ self.ulFormat = safeEval(attrs["format"])
+ self.usReserved1 = safeEval(attrs.get("reserved1", "0"))
+ self.usReserved2 = safeEval(attrs.get("reserved2", "0"))
+ self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content))))
diff --git a/Lib/fontTools/ttLib/tables/D__e_b_g.py b/Lib/fontTools/ttLib/tables/D__e_b_g.py
index ff64a9b5..54449a5f 100644
--- a/Lib/fontTools/ttLib/tables/D__e_b_g.py
+++ b/Lib/fontTools/ttLib/tables/D__e_b_g.py
@@ -11,7 +11,7 @@ class table_D__e_b_g(DefaultTable.DefaultTable):
return json.dumps(self.data).encode("utf-8")
def toXML(self, writer, ttFont):
- writer.writecdata(json.dumps(self.data))
+ writer.writecdata(json.dumps(self.data, indent=2))
def fromXML(self, name, attrs, content, ttFont):
self.data = json.loads(content)
diff --git a/Lib/fontTools/ttLib/tables/DefaultTable.py b/Lib/fontTools/ttLib/tables/DefaultTable.py
index dae83183..92f2aa65 100644
--- a/Lib/fontTools/ttLib/tables/DefaultTable.py
+++ b/Lib/fontTools/ttLib/tables/DefaultTable.py
@@ -1,48 +1,49 @@
from fontTools.misc.textTools import Tag
from fontTools.ttLib import getClassTag
-class DefaultTable(object):
- dependencies = []
-
- def __init__(self, tag=None):
- if tag is None:
- tag = getClassTag(self.__class__)
- self.tableTag = Tag(tag)
-
- def decompile(self, data, ttFont):
- self.data = data
-
- def compile(self, ttFont):
- return self.data
-
- def toXML(self, writer, ttFont, **kwargs):
- if hasattr(self, "ERROR"):
- writer.comment("An error occurred during the decompilation of this table")
- writer.newline()
- writer.comment(self.ERROR)
- writer.newline()
- writer.begintag("hexdata")
- writer.newline()
- writer.dumphex(self.compile(ttFont))
- writer.endtag("hexdata")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- from fontTools.misc.textTools import readHex
- from fontTools import ttLib
- if name != "hexdata":
- raise ttLib.TTLibError("can't handle '%s' element" % name)
- self.decompile(readHex(content), ttFont)
-
- def __repr__(self):
- return "<'%s' table at %x>" % (self.tableTag, id(self))
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
+class DefaultTable(object):
+ dependencies = []
+
+ def __init__(self, tag=None):
+ if tag is None:
+ tag = getClassTag(self.__class__)
+ self.tableTag = Tag(tag)
+
+ def decompile(self, data, ttFont):
+ self.data = data
+
+ def compile(self, ttFont):
+ return self.data
+
+ def toXML(self, writer, ttFont, **kwargs):
+ if hasattr(self, "ERROR"):
+ writer.comment("An error occurred during the decompilation of this table")
+ writer.newline()
+ writer.comment(self.ERROR)
+ writer.newline()
+ writer.begintag("hexdata")
+ writer.newline()
+ writer.dumphex(self.compile(ttFont))
+ writer.endtag("hexdata")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ from fontTools.misc.textTools import readHex
+ from fontTools import ttLib
+
+ if name != "hexdata":
+ raise ttLib.TTLibError("can't handle '%s' element" % name)
+ self.decompile(readHex(content), ttFont)
+
+ def __repr__(self):
+ return "<'%s' table at %x>" % (self.tableTag, id(self))
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
diff --git a/Lib/fontTools/ttLib/tables/E_B_D_T_.py b/Lib/fontTools/ttLib/tables/E_B_D_T_.py
index ae716512..9f7f82ef 100644
--- a/Lib/fontTools/ttLib/tables/E_B_D_T_.py
+++ b/Lib/fontTools/ttLib/tables/E_B_D_T_.py
@@ -1,6 +1,20 @@
from fontTools.misc import sstruct
-from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin, safeEval, readHex, hexStr, deHexStr
-from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
+from fontTools.misc.textTools import (
+ bytechr,
+ byteord,
+ bytesjoin,
+ strjoin,
+ safeEval,
+ readHex,
+ hexStr,
+ deHexStr,
+)
+from .BitmapGlyphMetrics import (
+ BigGlyphMetrics,
+ bigGlyphMetricsFormat,
+ SmallGlyphMetrics,
+ smallGlyphMetricsFormat,
+)
from . import DefaultTable
import itertools
import os
@@ -22,220 +36,232 @@ ebdtComponentFormat = """
yOffset: b
"""
+
class table_E_B_D_T_(DefaultTable.DefaultTable):
+ # Keep a reference to the name of the data locator table.
+ locatorName = "EBLC"
+
+ # This method can be overridden in subclasses to support new formats
+ # without changing the other implementation. Also can be used as a
+ # convenience method for coverting a font file to an alternative format.
+ def getImageFormatClass(self, imageFormat):
+ return ebdt_bitmap_classes[imageFormat]
+
+ def decompile(self, data, ttFont):
+ # Get the version but don't advance the slice.
+ # Most of the lookup for this table is done relative
+ # to the begining so slice by the offsets provided
+ # in the EBLC table.
+ sstruct.unpack2(ebdtTableVersionFormat, data, self)
+
+ # Keep a dict of glyphs that have been seen so they aren't remade.
+ # This dict maps intervals of data to the BitmapGlyph.
+ glyphDict = {}
+
+ # Pull out the EBLC table and loop through glyphs.
+ # A strike is a concept that spans both tables.
+ # The actual bitmap data is stored in the EBDT.
+ locator = ttFont[self.__class__.locatorName]
+ self.strikeData = []
+ for curStrike in locator.strikes:
+ bitmapGlyphDict = {}
+ self.strikeData.append(bitmapGlyphDict)
+ for indexSubTable in curStrike.indexSubTables:
+ dataIter = zip(indexSubTable.names, indexSubTable.locations)
+ for curName, curLoc in dataIter:
+ # Don't create duplicate data entries for the same glyphs.
+ # Instead just use the structures that already exist if they exist.
+ if curLoc in glyphDict:
+ curGlyph = glyphDict[curLoc]
+ else:
+ curGlyphData = data[slice(*curLoc)]
+ imageFormatClass = self.getImageFormatClass(
+ indexSubTable.imageFormat
+ )
+ curGlyph = imageFormatClass(curGlyphData, ttFont)
+ glyphDict[curLoc] = curGlyph
+ bitmapGlyphDict[curName] = curGlyph
+
+ def compile(self, ttFont):
+ dataList = []
+ dataList.append(sstruct.pack(ebdtTableVersionFormat, self))
+ dataSize = len(dataList[0])
+
+ # Keep a dict of glyphs that have been seen so they aren't remade.
+ # This dict maps the id of the BitmapGlyph to the interval
+ # in the data.
+ glyphDict = {}
+
+ # Go through the bitmap glyph data. Just in case the data for a glyph
+ # changed the size metrics should be recalculated. There are a variety
+ # of formats and they get stored in the EBLC table. That is why
+ # recalculation is defered to the EblcIndexSubTable class and just
+ # pass what is known about bitmap glyphs from this particular table.
+ locator = ttFont[self.__class__.locatorName]
+ for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
+ for curIndexSubTable in curStrike.indexSubTables:
+ dataLocations = []
+ for curName in curIndexSubTable.names:
+ # Handle the data placement based on seeing the glyph or not.
+ # Just save a reference to the location if the glyph has already
+ # been saved in compile. This code assumes that glyphs will only
+ # be referenced multiple times from indexFormat5. By luck the
+ # code may still work when referencing poorly ordered fonts with
+ # duplicate references. If there is a font that is unlucky the
+ # respective compile methods for the indexSubTables will fail
+ # their assertions. All fonts seem to follow this assumption.
+ # More complicated packing may be needed if a counter-font exists.
+ glyph = curGlyphDict[curName]
+ objectId = id(glyph)
+ if objectId not in glyphDict:
+ data = glyph.compile(ttFont)
+ data = curIndexSubTable.padBitmapData(data)
+ startByte = dataSize
+ dataSize += len(data)
+ endByte = dataSize
+ dataList.append(data)
+ dataLoc = (startByte, endByte)
+ glyphDict[objectId] = dataLoc
+ else:
+ dataLoc = glyphDict[objectId]
+ dataLocations.append(dataLoc)
+ # Just use the new data locations in the indexSubTable.
+ # The respective compile implementations will take care
+ # of any of the problems in the convertion that may arise.
+ curIndexSubTable.locations = dataLocations
+
+ return bytesjoin(dataList)
+
+ def toXML(self, writer, ttFont):
+ # When exporting to XML if one of the data export formats
+ # requires metrics then those metrics may be in the locator.
+ # In this case populate the bitmaps with "export metrics".
+ if ttFont.bitmapGlyphDataFormat in ("row", "bitwise"):
+ locator = ttFont[self.__class__.locatorName]
+ for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
+ for curIndexSubTable in curStrike.indexSubTables:
+ for curName in curIndexSubTable.names:
+ glyph = curGlyphDict[curName]
+ # I'm not sure which metrics have priority here.
+ # For now if both metrics exist go with glyph metrics.
+ if hasattr(glyph, "metrics"):
+ glyph.exportMetrics = glyph.metrics
+ else:
+ glyph.exportMetrics = curIndexSubTable.metrics
+ glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth
+
+ writer.simpletag("header", [("version", self.version)])
+ writer.newline()
+ locator = ttFont[self.__class__.locatorName]
+ for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData):
+ writer.begintag("strikedata", [("index", strikeIndex)])
+ writer.newline()
+ for curName, curBitmap in bitmapGlyphDict.items():
+ curBitmap.toXML(strikeIndex, curName, writer, ttFont)
+ writer.endtag("strikedata")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "header":
+ self.version = safeEval(attrs["version"])
+ elif name == "strikedata":
+ if not hasattr(self, "strikeData"):
+ self.strikeData = []
+ strikeIndex = safeEval(attrs["index"])
+
+ bitmapGlyphDict = {}
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]):
+ imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix) :])
+ glyphName = attrs["name"]
+ imageFormatClass = self.getImageFormatClass(imageFormat)
+ curGlyph = imageFormatClass(None, None)
+ curGlyph.fromXML(name, attrs, content, ttFont)
+ assert glyphName not in bitmapGlyphDict, (
+ "Duplicate glyphs with the same name '%s' in the same strike."
+ % glyphName
+ )
+ bitmapGlyphDict[glyphName] = curGlyph
+ else:
+ log.warning("%s being ignored by %s", name, self.__class__.__name__)
+
+ # Grow the strike data array to the appropriate size. The XML
+ # format allows the strike index value to be out of order.
+ if strikeIndex >= len(self.strikeData):
+ self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData))
+ assert (
+ self.strikeData[strikeIndex] is None
+ ), "Duplicate strike EBDT indices."
+ self.strikeData[strikeIndex] = bitmapGlyphDict
- # Keep a reference to the name of the data locator table.
- locatorName = 'EBLC'
-
- # This method can be overridden in subclasses to support new formats
- # without changing the other implementation. Also can be used as a
- # convenience method for coverting a font file to an alternative format.
- def getImageFormatClass(self, imageFormat):
- return ebdt_bitmap_classes[imageFormat]
-
- def decompile(self, data, ttFont):
- # Get the version but don't advance the slice.
- # Most of the lookup for this table is done relative
- # to the begining so slice by the offsets provided
- # in the EBLC table.
- sstruct.unpack2(ebdtTableVersionFormat, data, self)
-
- # Keep a dict of glyphs that have been seen so they aren't remade.
- # This dict maps intervals of data to the BitmapGlyph.
- glyphDict = {}
-
- # Pull out the EBLC table and loop through glyphs.
- # A strike is a concept that spans both tables.
- # The actual bitmap data is stored in the EBDT.
- locator = ttFont[self.__class__.locatorName]
- self.strikeData = []
- for curStrike in locator.strikes:
- bitmapGlyphDict = {}
- self.strikeData.append(bitmapGlyphDict)
- for indexSubTable in curStrike.indexSubTables:
- dataIter = zip(indexSubTable.names, indexSubTable.locations)
- for curName, curLoc in dataIter:
- # Don't create duplicate data entries for the same glyphs.
- # Instead just use the structures that already exist if they exist.
- if curLoc in glyphDict:
- curGlyph = glyphDict[curLoc]
- else:
- curGlyphData = data[slice(*curLoc)]
- imageFormatClass = self.getImageFormatClass(indexSubTable.imageFormat)
- curGlyph = imageFormatClass(curGlyphData, ttFont)
- glyphDict[curLoc] = curGlyph
- bitmapGlyphDict[curName] = curGlyph
-
- def compile(self, ttFont):
-
- dataList = []
- dataList.append(sstruct.pack(ebdtTableVersionFormat, self))
- dataSize = len(dataList[0])
-
- # Keep a dict of glyphs that have been seen so they aren't remade.
- # This dict maps the id of the BitmapGlyph to the interval
- # in the data.
- glyphDict = {}
-
- # Go through the bitmap glyph data. Just in case the data for a glyph
- # changed the size metrics should be recalculated. There are a variety
- # of formats and they get stored in the EBLC table. That is why
- # recalculation is defered to the EblcIndexSubTable class and just
- # pass what is known about bitmap glyphs from this particular table.
- locator = ttFont[self.__class__.locatorName]
- for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
- for curIndexSubTable in curStrike.indexSubTables:
- dataLocations = []
- for curName in curIndexSubTable.names:
- # Handle the data placement based on seeing the glyph or not.
- # Just save a reference to the location if the glyph has already
- # been saved in compile. This code assumes that glyphs will only
- # be referenced multiple times from indexFormat5. By luck the
- # code may still work when referencing poorly ordered fonts with
- # duplicate references. If there is a font that is unlucky the
- # respective compile methods for the indexSubTables will fail
- # their assertions. All fonts seem to follow this assumption.
- # More complicated packing may be needed if a counter-font exists.
- glyph = curGlyphDict[curName]
- objectId = id(glyph)
- if objectId not in glyphDict:
- data = glyph.compile(ttFont)
- data = curIndexSubTable.padBitmapData(data)
- startByte = dataSize
- dataSize += len(data)
- endByte = dataSize
- dataList.append(data)
- dataLoc = (startByte, endByte)
- glyphDict[objectId] = dataLoc
- else:
- dataLoc = glyphDict[objectId]
- dataLocations.append(dataLoc)
- # Just use the new data locations in the indexSubTable.
- # The respective compile implementations will take care
- # of any of the problems in the convertion that may arise.
- curIndexSubTable.locations = dataLocations
-
- return bytesjoin(dataList)
-
- def toXML(self, writer, ttFont):
- # When exporting to XML if one of the data export formats
- # requires metrics then those metrics may be in the locator.
- # In this case populate the bitmaps with "export metrics".
- if ttFont.bitmapGlyphDataFormat in ('row', 'bitwise'):
- locator = ttFont[self.__class__.locatorName]
- for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
- for curIndexSubTable in curStrike.indexSubTables:
- for curName in curIndexSubTable.names:
- glyph = curGlyphDict[curName]
- # I'm not sure which metrics have priority here.
- # For now if both metrics exist go with glyph metrics.
- if hasattr(glyph, 'metrics'):
- glyph.exportMetrics = glyph.metrics
- else:
- glyph.exportMetrics = curIndexSubTable.metrics
- glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth
-
- writer.simpletag("header", [('version', self.version)])
- writer.newline()
- locator = ttFont[self.__class__.locatorName]
- for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData):
- writer.begintag('strikedata', [('index', strikeIndex)])
- writer.newline()
- for curName, curBitmap in bitmapGlyphDict.items():
- curBitmap.toXML(strikeIndex, curName, writer, ttFont)
- writer.endtag('strikedata')
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == 'header':
- self.version = safeEval(attrs['version'])
- elif name == 'strikedata':
- if not hasattr(self, 'strikeData'):
- self.strikeData = []
- strikeIndex = safeEval(attrs['index'])
-
- bitmapGlyphDict = {}
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]):
- imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix):])
- glyphName = attrs['name']
- imageFormatClass = self.getImageFormatClass(imageFormat)
- curGlyph = imageFormatClass(None, None)
- curGlyph.fromXML(name, attrs, content, ttFont)
- assert glyphName not in bitmapGlyphDict, "Duplicate glyphs with the same name '%s' in the same strike." % glyphName
- bitmapGlyphDict[glyphName] = curGlyph
- else:
- log.warning("%s being ignored by %s", name, self.__class__.__name__)
-
- # Grow the strike data array to the appropriate size. The XML
- # format allows the strike index value to be out of order.
- if strikeIndex >= len(self.strikeData):
- self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData))
- assert self.strikeData[strikeIndex] is None, "Duplicate strike EBDT indices."
- self.strikeData[strikeIndex] = bitmapGlyphDict
class EbdtComponent(object):
+ def toXML(self, writer, ttFont):
+ writer.begintag("ebdtComponent", [("name", self.name)])
+ writer.newline()
+ for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]:
+ writer.simpletag(componentName, value=getattr(self, componentName))
+ writer.newline()
+ writer.endtag("ebdtComponent")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.name = attrs["name"]
+ componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:])
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name in componentNames:
+ vars(self)[name] = safeEval(attrs["value"])
+ else:
+ log.warning("unknown name '%s' being ignored by EbdtComponent.", name)
- def toXML(self, writer, ttFont):
- writer.begintag('ebdtComponent', [('name', self.name)])
- writer.newline()
- for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]:
- writer.simpletag(componentName, value=getattr(self, componentName))
- writer.newline()
- writer.endtag('ebdtComponent')
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.name = attrs['name']
- componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:])
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name in componentNames:
- vars(self)[name] = safeEval(attrs['value'])
- else:
- log.warning("unknown name '%s' being ignored by EbdtComponent.", name)
# Helper functions for dealing with binary.
+
def _data2binary(data, numBits):
- binaryList = []
- for curByte in data:
- value = byteord(curByte)
- numBitsCut = min(8, numBits)
- for i in range(numBitsCut):
- if value & 0x1:
- binaryList.append('1')
- else:
- binaryList.append('0')
- value = value >> 1
- numBits -= numBitsCut
- return strjoin(binaryList)
+ binaryList = []
+ for curByte in data:
+ value = byteord(curByte)
+ numBitsCut = min(8, numBits)
+ for i in range(numBitsCut):
+ if value & 0x1:
+ binaryList.append("1")
+ else:
+ binaryList.append("0")
+ value = value >> 1
+ numBits -= numBitsCut
+ return strjoin(binaryList)
+
def _binary2data(binary):
- byteList = []
- for bitLoc in range(0, len(binary), 8):
- byteString = binary[bitLoc:bitLoc+8]
- curByte = 0
- for curBit in reversed(byteString):
- curByte = curByte << 1
- if curBit == '1':
- curByte |= 1
- byteList.append(bytechr(curByte))
- return bytesjoin(byteList)
+ byteList = []
+ for bitLoc in range(0, len(binary), 8):
+ byteString = binary[bitLoc : bitLoc + 8]
+ curByte = 0
+ for curBit in reversed(byteString):
+ curByte = curByte << 1
+ if curBit == "1":
+ curByte |= 1
+ byteList.append(bytechr(curByte))
+ return bytesjoin(byteList)
+
def _memoize(f):
- class memodict(dict):
- def __missing__(self, key):
- ret = f(key)
- if len(key) == 1:
- self[key] = ret
- return ret
- return memodict().__getitem__
+ class memodict(dict):
+ def __missing__(self, key):
+ ret = f(key)
+ if isinstance(key, int) or len(key) == 1:
+ self[key] = ret
+ return ret
+
+ return memodict().__getitem__
+
# 00100111 -> 11100100 per byte, not to be confused with little/big endian.
# Bitmap data per byte is in the order that binary is written on the page
@@ -243,524 +269,559 @@ def _memoize(f):
# opposite of what makes sense algorithmically and hence this function.
@_memoize
def _reverseBytes(data):
- if len(data) != 1:
- return bytesjoin(map(_reverseBytes, data))
- byte = byteord(data)
- result = 0
- for i in range(8):
- result = result << 1
- result |= byte & 1
- byte = byte >> 1
- return bytechr(result)
+ r"""
+ >>> bin(ord(_reverseBytes(0b00100111)))
+ '0b11100100'
+ >>> _reverseBytes(b'\x00\xf0')
+ b'\x00\x0f'
+ """
+ if isinstance(data, bytes) and len(data) != 1:
+ return bytesjoin(map(_reverseBytes, data))
+ byte = byteord(data)
+ result = 0
+ for i in range(8):
+ result = result << 1
+ result |= byte & 1
+ byte = byte >> 1
+ return bytechr(result)
+
# This section of code is for reading and writing image data to/from XML.
+
def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
- writer.begintag('rawimagedata')
- writer.newline()
- writer.dumphex(bitmapObject.imageData)
- writer.endtag('rawimagedata')
- writer.newline()
+ writer.begintag("rawimagedata")
+ writer.newline()
+ writer.dumphex(bitmapObject.imageData)
+ writer.endtag("rawimagedata")
+ writer.newline()
+
def _readRawImageData(bitmapObject, name, attrs, content, ttFont):
- bitmapObject.imageData = readHex(content)
+ bitmapObject.imageData = readHex(content)
+
def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
- metrics = bitmapObject.exportMetrics
- del bitmapObject.exportMetrics
- bitDepth = bitmapObject.exportBitDepth
- del bitmapObject.exportBitDepth
-
- writer.begintag('rowimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height)
- writer.newline()
- for curRow in range(metrics.height):
- rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics)
- writer.simpletag('row', value=hexStr(rowData))
- writer.newline()
- writer.endtag('rowimagedata')
- writer.newline()
+ metrics = bitmapObject.exportMetrics
+ del bitmapObject.exportMetrics
+ bitDepth = bitmapObject.exportBitDepth
+ del bitmapObject.exportBitDepth
+
+ writer.begintag(
+ "rowimagedata", bitDepth=bitDepth, width=metrics.width, height=metrics.height
+ )
+ writer.newline()
+ for curRow in range(metrics.height):
+ rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics)
+ writer.simpletag("row", value=hexStr(rowData))
+ writer.newline()
+ writer.endtag("rowimagedata")
+ writer.newline()
+
def _readRowImageData(bitmapObject, name, attrs, content, ttFont):
- bitDepth = safeEval(attrs['bitDepth'])
- metrics = SmallGlyphMetrics()
- metrics.width = safeEval(attrs['width'])
- metrics.height = safeEval(attrs['height'])
-
- dataRows = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attr, content = element
- # Chop off 'imagedata' from the tag to get just the option.
- if name == 'row':
- dataRows.append(deHexStr(attr['value']))
- bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics)
+ bitDepth = safeEval(attrs["bitDepth"])
+ metrics = SmallGlyphMetrics()
+ metrics.width = safeEval(attrs["width"])
+ metrics.height = safeEval(attrs["height"])
+
+ dataRows = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attr, content = element
+ # Chop off 'imagedata' from the tag to get just the option.
+ if name == "row":
+ dataRows.append(deHexStr(attr["value"]))
+ bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics)
+
def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
- metrics = bitmapObject.exportMetrics
- del bitmapObject.exportMetrics
- bitDepth = bitmapObject.exportBitDepth
- del bitmapObject.exportBitDepth
-
- # A dict for mapping binary to more readable/artistic ASCII characters.
- binaryConv = {'0':'.', '1':'@'}
-
- writer.begintag('bitwiseimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height)
- writer.newline()
- for curRow in range(metrics.height):
- rowData = bitmapObject.getRow(curRow, bitDepth=1, metrics=metrics, reverseBytes=True)
- rowData = _data2binary(rowData, metrics.width)
- # Make the output a readable ASCII art form.
- rowData = strjoin(map(binaryConv.get, rowData))
- writer.simpletag('row', value=rowData)
- writer.newline()
- writer.endtag('bitwiseimagedata')
- writer.newline()
+ metrics = bitmapObject.exportMetrics
+ del bitmapObject.exportMetrics
+ bitDepth = bitmapObject.exportBitDepth
+ del bitmapObject.exportBitDepth
+
+ # A dict for mapping binary to more readable/artistic ASCII characters.
+ binaryConv = {"0": ".", "1": "@"}
+
+ writer.begintag(
+ "bitwiseimagedata",
+ bitDepth=bitDepth,
+ width=metrics.width,
+ height=metrics.height,
+ )
+ writer.newline()
+ for curRow in range(metrics.height):
+ rowData = bitmapObject.getRow(
+ curRow, bitDepth=1, metrics=metrics, reverseBytes=True
+ )
+ rowData = _data2binary(rowData, metrics.width)
+ # Make the output a readable ASCII art form.
+ rowData = strjoin(map(binaryConv.get, rowData))
+ writer.simpletag("row", value=rowData)
+ writer.newline()
+ writer.endtag("bitwiseimagedata")
+ writer.newline()
+
def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont):
- bitDepth = safeEval(attrs['bitDepth'])
- metrics = SmallGlyphMetrics()
- metrics.width = safeEval(attrs['width'])
- metrics.height = safeEval(attrs['height'])
-
- # A dict for mapping from ASCII to binary. All characters are considered
- # a '1' except space, period and '0' which maps to '0'.
- binaryConv = {' ':'0', '.':'0', '0':'0'}
-
- dataRows = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attr, content = element
- if name == 'row':
- mapParams = zip(attr['value'], itertools.repeat('1'))
- rowData = strjoin(itertools.starmap(binaryConv.get, mapParams))
- dataRows.append(_binary2data(rowData))
-
- bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True)
+ bitDepth = safeEval(attrs["bitDepth"])
+ metrics = SmallGlyphMetrics()
+ metrics.width = safeEval(attrs["width"])
+ metrics.height = safeEval(attrs["height"])
+
+ # A dict for mapping from ASCII to binary. All characters are considered
+ # a '1' except space, period and '0' which maps to '0'.
+ binaryConv = {" ": "0", ".": "0", "0": "0"}
+
+ dataRows = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attr, content = element
+ if name == "row":
+ mapParams = zip(attr["value"], itertools.repeat("1"))
+ rowData = strjoin(itertools.starmap(binaryConv.get, mapParams))
+ dataRows.append(_binary2data(rowData))
+
+ bitmapObject.setRows(
+ dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True
+ )
+
def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
- try:
- folder = os.path.dirname(writer.file.name)
- except AttributeError:
- # fall back to current directory if output file's directory isn't found
- folder = '.'
- folder = os.path.join(folder, 'bitmaps')
- filename = glyphName + bitmapObject.fileExtension
- if not os.path.isdir(folder):
- os.makedirs(folder)
- folder = os.path.join(folder, 'strike%d' % strikeIndex)
- if not os.path.isdir(folder):
- os.makedirs(folder)
-
- fullPath = os.path.join(folder, filename)
- writer.simpletag('extfileimagedata', value=fullPath)
- writer.newline()
-
- with open(fullPath, "wb") as file:
- file.write(bitmapObject.imageData)
+ try:
+ folder = os.path.dirname(writer.file.name)
+ except AttributeError:
+ # fall back to current directory if output file's directory isn't found
+ folder = "."
+ folder = os.path.join(folder, "bitmaps")
+ filename = glyphName + bitmapObject.fileExtension
+ if not os.path.isdir(folder):
+ os.makedirs(folder)
+ folder = os.path.join(folder, "strike%d" % strikeIndex)
+ if not os.path.isdir(folder):
+ os.makedirs(folder)
+
+ fullPath = os.path.join(folder, filename)
+ writer.simpletag("extfileimagedata", value=fullPath)
+ writer.newline()
+
+ with open(fullPath, "wb") as file:
+ file.write(bitmapObject.imageData)
+
def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont):
- fullPath = attrs['value']
- with open(fullPath, "rb") as file:
- bitmapObject.imageData = file.read()
+ fullPath = attrs["value"]
+ with open(fullPath, "rb") as file:
+ bitmapObject.imageData = file.read()
+
# End of XML writing code.
# Important information about the naming scheme. Used for identifying formats
# in XML.
-_bitmapGlyphSubclassPrefix = 'ebdt_bitmap_format_'
+_bitmapGlyphSubclassPrefix = "ebdt_bitmap_format_"
-class BitmapGlyph(object):
- # For the external file format. This can be changed in subclasses. This way
- # when the extfile option is turned on files have the form: glyphName.ext
- # The default is just a flat binary file with no meaning.
- fileExtension = '.bin'
-
- # Keep track of reading and writing of various forms.
- xmlDataFunctions = {
- 'raw': (_writeRawImageData, _readRawImageData),
- 'row': (_writeRowImageData, _readRowImageData),
- 'bitwise': (_writeBitwiseImageData, _readBitwiseImageData),
- 'extfile': (_writeExtFileImageData, _readExtFileImageData),
- }
-
- def __init__(self, data, ttFont):
- self.data = data
- self.ttFont = ttFont
- # TODO Currently non-lazy decompilation is untested here...
- #if not ttFont.lazy:
- # self.decompile()
- # del self.data
-
- def __getattr__(self, attr):
- # Allow lazy decompile.
- if attr[:2] == '__':
- raise AttributeError(attr)
- if attr == "data":
- raise AttributeError(attr)
- self.decompile()
- del self.data
- return getattr(self, attr)
-
- def ensureDecompiled(self, recurse=False):
- if hasattr(self, "data"):
- self.decompile()
- del self.data
-
- # Not a fan of this but it is needed for safer safety checking.
- def getFormat(self):
- return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix):])
-
- def toXML(self, strikeIndex, glyphName, writer, ttFont):
- writer.begintag(self.__class__.__name__, [('name', glyphName)])
- writer.newline()
-
- self.writeMetrics(writer, ttFont)
- # Use the internal write method to write using the correct output format.
- self.writeData(strikeIndex, glyphName, writer, ttFont)
-
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.readMetrics(name, attrs, content, ttFont)
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attr, content = element
- if not name.endswith('imagedata'):
- continue
- # Chop off 'imagedata' from the tag to get just the option.
- option = name[:-len('imagedata')]
- assert option in self.__class__.xmlDataFunctions
- self.readData(name, attr, content, ttFont)
-
- # Some of the glyphs have the metrics. This allows for metrics to be
- # added if the glyph format has them. Default behavior is to do nothing.
- def writeMetrics(self, writer, ttFont):
- pass
-
- # The opposite of write metrics.
- def readMetrics(self, name, attrs, content, ttFont):
- pass
-
- def writeData(self, strikeIndex, glyphName, writer, ttFont):
- try:
- writeFunc, readFunc = self.__class__.xmlDataFunctions[ttFont.bitmapGlyphDataFormat]
- except KeyError:
- writeFunc = _writeRawImageData
- writeFunc(strikeIndex, glyphName, self, writer, ttFont)
-
- def readData(self, name, attrs, content, ttFont):
- # Chop off 'imagedata' from the tag to get just the option.
- option = name[:-len('imagedata')]
- writeFunc, readFunc = self.__class__.xmlDataFunctions[option]
- readFunc(self, name, attrs, content, ttFont)
+class BitmapGlyph(object):
+ # For the external file format. This can be changed in subclasses. This way
+ # when the extfile option is turned on files have the form: glyphName.ext
+ # The default is just a flat binary file with no meaning.
+ fileExtension = ".bin"
+
+ # Keep track of reading and writing of various forms.
+ xmlDataFunctions = {
+ "raw": (_writeRawImageData, _readRawImageData),
+ "row": (_writeRowImageData, _readRowImageData),
+ "bitwise": (_writeBitwiseImageData, _readBitwiseImageData),
+ "extfile": (_writeExtFileImageData, _readExtFileImageData),
+ }
+
+ def __init__(self, data, ttFont):
+ self.data = data
+ self.ttFont = ttFont
+ # TODO Currently non-lazy decompilation is untested here...
+ # if not ttFont.lazy:
+ # self.decompile()
+ # del self.data
+
+ def __getattr__(self, attr):
+ # Allow lazy decompile.
+ if attr[:2] == "__":
+ raise AttributeError(attr)
+ if attr == "data":
+ raise AttributeError(attr)
+ self.decompile()
+ del self.data
+ return getattr(self, attr)
+
+ def ensureDecompiled(self, recurse=False):
+ if hasattr(self, "data"):
+ self.decompile()
+ del self.data
+
+ # Not a fan of this but it is needed for safer safety checking.
+ def getFormat(self):
+ return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix) :])
+
+ def toXML(self, strikeIndex, glyphName, writer, ttFont):
+ writer.begintag(self.__class__.__name__, [("name", glyphName)])
+ writer.newline()
+
+ self.writeMetrics(writer, ttFont)
+ # Use the internal write method to write using the correct output format.
+ self.writeData(strikeIndex, glyphName, writer, ttFont)
+
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.readMetrics(name, attrs, content, ttFont)
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attr, content = element
+ if not name.endswith("imagedata"):
+ continue
+ # Chop off 'imagedata' from the tag to get just the option.
+ option = name[: -len("imagedata")]
+ assert option in self.__class__.xmlDataFunctions
+ self.readData(name, attr, content, ttFont)
+
+ # Some of the glyphs have the metrics. This allows for metrics to be
+ # added if the glyph format has them. Default behavior is to do nothing.
+ def writeMetrics(self, writer, ttFont):
+ pass
+
+ # The opposite of write metrics.
+ def readMetrics(self, name, attrs, content, ttFont):
+ pass
+
+ def writeData(self, strikeIndex, glyphName, writer, ttFont):
+ try:
+ writeFunc, readFunc = self.__class__.xmlDataFunctions[
+ ttFont.bitmapGlyphDataFormat
+ ]
+ except KeyError:
+ writeFunc = _writeRawImageData
+ writeFunc(strikeIndex, glyphName, self, writer, ttFont)
+
+ def readData(self, name, attrs, content, ttFont):
+ # Chop off 'imagedata' from the tag to get just the option.
+ option = name[: -len("imagedata")]
+ writeFunc, readFunc = self.__class__.xmlDataFunctions[option]
+ readFunc(self, name, attrs, content, ttFont)
# A closure for creating a mixin for the two types of metrics handling.
# Most of the code is very similar so its easier to deal with here.
# Everything works just by passing the class that the mixin is for.
def _createBitmapPlusMetricsMixin(metricsClass):
- # Both metrics names are listed here to make meaningful error messages.
- metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__]
- curMetricsName = metricsClass.__name__
- # Find which metrics this is for and determine the opposite name.
- metricsId = metricStrings.index(curMetricsName)
- oppositeMetricsName = metricStrings[1-metricsId]
-
- class BitmapPlusMetricsMixin(object):
-
- def writeMetrics(self, writer, ttFont):
- self.metrics.toXML(writer, ttFont)
-
- def readMetrics(self, name, attrs, content, ttFont):
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == curMetricsName:
- self.metrics = metricsClass()
- self.metrics.fromXML(name, attrs, content, ttFont)
- elif name == oppositeMetricsName:
- log.warning("Warning: %s being ignored in format %d.", oppositeMetricsName, self.getFormat())
-
- return BitmapPlusMetricsMixin
+ # Both metrics names are listed here to make meaningful error messages.
+ metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__]
+ curMetricsName = metricsClass.__name__
+ # Find which metrics this is for and determine the opposite name.
+ metricsId = metricStrings.index(curMetricsName)
+ oppositeMetricsName = metricStrings[1 - metricsId]
+
+ class BitmapPlusMetricsMixin(object):
+ def writeMetrics(self, writer, ttFont):
+ self.metrics.toXML(writer, ttFont)
+
+ def readMetrics(self, name, attrs, content, ttFont):
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == curMetricsName:
+ self.metrics = metricsClass()
+ self.metrics.fromXML(name, attrs, content, ttFont)
+ elif name == oppositeMetricsName:
+ log.warning(
+ "Warning: %s being ignored in format %d.",
+ oppositeMetricsName,
+ self.getFormat(),
+ )
+
+ return BitmapPlusMetricsMixin
+
# Since there are only two types of mixin's just create them here.
BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics)
BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics)
+
# Data that is bit aligned can be tricky to deal with. These classes implement
# helper functionality for dealing with the data and getting a particular row
# of bitwise data. Also helps implement fancy data export/import in XML.
class BitAlignedBitmapMixin(object):
+ def _getBitRange(self, row, bitDepth, metrics):
+ rowBits = bitDepth * metrics.width
+ bitOffset = row * rowBits
+ return (bitOffset, bitOffset + rowBits)
+
+ def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
+ if metrics is None:
+ metrics = self.metrics
+ assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
+
+ # Loop through each byte. This can cover two bytes in the original data or
+ # a single byte if things happen to be aligned. The very last entry might
+ # not be aligned so take care to trim the binary data to size and pad with
+ # zeros in the row data. Bit aligned data is somewhat tricky.
+ #
+ # Example of data cut. Data cut represented in x's.
+ # '|' represents byte boundary.
+ # data = ...0XX|XXXXXX00|000... => XXXXXXXX
+ # or
+ # data = ...0XX|XXXX0000|000... => XXXXXX00
+ # or
+ # data = ...000|XXXXXXXX|000... => XXXXXXXX
+ # or
+ # data = ...000|00XXXX00|000... => XXXX0000
+ #
+ dataList = []
+ bitRange = self._getBitRange(row, bitDepth, metrics)
+ stepRange = bitRange + (8,)
+ for curBit in range(*stepRange):
+ endBit = min(curBit + 8, bitRange[1])
+ numBits = endBit - curBit
+ cutPoint = curBit % 8
+ firstByteLoc = curBit // 8
+ secondByteLoc = endBit // 8
+ if firstByteLoc < secondByteLoc:
+ numBitsCut = 8 - cutPoint
+ else:
+ numBitsCut = endBit - curBit
+ curByte = _reverseBytes(self.imageData[firstByteLoc])
+ firstHalf = byteord(curByte) >> cutPoint
+ firstHalf = ((1 << numBitsCut) - 1) & firstHalf
+ newByte = firstHalf
+ if firstByteLoc < secondByteLoc and secondByteLoc < len(self.imageData):
+ curByte = _reverseBytes(self.imageData[secondByteLoc])
+ secondHalf = byteord(curByte) << numBitsCut
+ newByte = (firstHalf | secondHalf) & ((1 << numBits) - 1)
+ dataList.append(bytechr(newByte))
+
+ # The way the data is kept is opposite the algorithm used.
+ data = bytesjoin(dataList)
+ if not reverseBytes:
+ data = _reverseBytes(data)
+ return data
+
+ def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
+ if metrics is None:
+ metrics = self.metrics
+ if not reverseBytes:
+ dataRows = list(map(_reverseBytes, dataRows))
+
+ # Keep track of a list of ordinal values as they are easier to modify
+ # than a list of strings. Map to actual strings later.
+ numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] + 7) // 8
+ ordDataList = [0] * numBytes
+ for row, data in enumerate(dataRows):
+ bitRange = self._getBitRange(row, bitDepth, metrics)
+ stepRange = bitRange + (8,)
+ for curBit, curByte in zip(range(*stepRange), data):
+ endBit = min(curBit + 8, bitRange[1])
+ cutPoint = curBit % 8
+ firstByteLoc = curBit // 8
+ secondByteLoc = endBit // 8
+ if firstByteLoc < secondByteLoc:
+ numBitsCut = 8 - cutPoint
+ else:
+ numBitsCut = endBit - curBit
+ curByte = byteord(curByte)
+ firstByte = curByte & ((1 << numBitsCut) - 1)
+ ordDataList[firstByteLoc] |= firstByte << cutPoint
+ if firstByteLoc < secondByteLoc and secondByteLoc < numBytes:
+ secondByte = (curByte >> numBitsCut) & ((1 << 8 - numBitsCut) - 1)
+ ordDataList[secondByteLoc] |= secondByte
+
+ # Save the image data with the bits going the correct way.
+ self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList)))
- def _getBitRange(self, row, bitDepth, metrics):
- rowBits = (bitDepth * metrics.width)
- bitOffset = row * rowBits
- return (bitOffset, bitOffset+rowBits)
-
- def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
- if metrics is None:
- metrics = self.metrics
- assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
-
- # Loop through each byte. This can cover two bytes in the original data or
- # a single byte if things happen to be aligned. The very last entry might
- # not be aligned so take care to trim the binary data to size and pad with
- # zeros in the row data. Bit aligned data is somewhat tricky.
- #
- # Example of data cut. Data cut represented in x's.
- # '|' represents byte boundary.
- # data = ...0XX|XXXXXX00|000... => XXXXXXXX
- # or
- # data = ...0XX|XXXX0000|000... => XXXXXX00
- # or
- # data = ...000|XXXXXXXX|000... => XXXXXXXX
- # or
- # data = ...000|00XXXX00|000... => XXXX0000
- #
- dataList = []
- bitRange = self._getBitRange(row, bitDepth, metrics)
- stepRange = bitRange + (8,)
- for curBit in range(*stepRange):
- endBit = min(curBit+8, bitRange[1])
- numBits = endBit - curBit
- cutPoint = curBit % 8
- firstByteLoc = curBit // 8
- secondByteLoc = endBit // 8
- if firstByteLoc < secondByteLoc:
- numBitsCut = 8 - cutPoint
- else:
- numBitsCut = endBit - curBit
- curByte = _reverseBytes(self.imageData[firstByteLoc])
- firstHalf = byteord(curByte) >> cutPoint
- firstHalf = ((1<<numBitsCut)-1) & firstHalf
- newByte = firstHalf
- if firstByteLoc < secondByteLoc and secondByteLoc < len(self.imageData):
- curByte = _reverseBytes(self.imageData[secondByteLoc])
- secondHalf = byteord(curByte) << numBitsCut
- newByte = (firstHalf | secondHalf) & ((1<<numBits)-1)
- dataList.append(bytechr(newByte))
-
- # The way the data is kept is opposite the algorithm used.
- data = bytesjoin(dataList)
- if not reverseBytes:
- data = _reverseBytes(data)
- return data
-
- def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
- if metrics is None:
- metrics = self.metrics
- if not reverseBytes:
- dataRows = list(map(_reverseBytes, dataRows))
-
- # Keep track of a list of ordinal values as they are easier to modify
- # than a list of strings. Map to actual strings later.
- numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] + 7) // 8
- ordDataList = [0] * numBytes
- for row, data in enumerate(dataRows):
- bitRange = self._getBitRange(row, bitDepth, metrics)
- stepRange = bitRange + (8,)
- for curBit, curByte in zip(range(*stepRange), data):
- endBit = min(curBit+8, bitRange[1])
- cutPoint = curBit % 8
- firstByteLoc = curBit // 8
- secondByteLoc = endBit // 8
- if firstByteLoc < secondByteLoc:
- numBitsCut = 8 - cutPoint
- else:
- numBitsCut = endBit - curBit
- curByte = byteord(curByte)
- firstByte = curByte & ((1<<numBitsCut)-1)
- ordDataList[firstByteLoc] |= (firstByte << cutPoint)
- if firstByteLoc < secondByteLoc and secondByteLoc < numBytes:
- secondByte = (curByte >> numBitsCut) & ((1<<8-numBitsCut)-1)
- ordDataList[secondByteLoc] |= secondByte
-
- # Save the image data with the bits going the correct way.
- self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList)))
class ByteAlignedBitmapMixin(object):
-
- def _getByteRange(self, row, bitDepth, metrics):
- rowBytes = (bitDepth * metrics.width + 7) // 8
- byteOffset = row * rowBytes
- return (byteOffset, byteOffset+rowBytes)
-
- def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
- if metrics is None:
- metrics = self.metrics
- assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
- byteRange = self._getByteRange(row, bitDepth, metrics)
- data = self.imageData[slice(*byteRange)]
- if reverseBytes:
- data = _reverseBytes(data)
- return data
-
- def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
- if metrics is None:
- metrics = self.metrics
- if reverseBytes:
- dataRows = map(_reverseBytes, dataRows)
- self.imageData = bytesjoin(dataRows)
-
-class ebdt_bitmap_format_1(ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph):
-
- def decompile(self):
- self.metrics = SmallGlyphMetrics()
- dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
- self.imageData = data
-
- def compile(self, ttFont):
- data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
- return data + self.imageData
-
-
-class ebdt_bitmap_format_2(BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph):
-
- def decompile(self):
- self.metrics = SmallGlyphMetrics()
- dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
- self.imageData = data
-
- def compile(self, ttFont):
- data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
- return data + self.imageData
+ def _getByteRange(self, row, bitDepth, metrics):
+ rowBytes = (bitDepth * metrics.width + 7) // 8
+ byteOffset = row * rowBytes
+ return (byteOffset, byteOffset + rowBytes)
+
+ def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
+ if metrics is None:
+ metrics = self.metrics
+ assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
+ byteRange = self._getByteRange(row, bitDepth, metrics)
+ data = self.imageData[slice(*byteRange)]
+ if reverseBytes:
+ data = _reverseBytes(data)
+ return data
+
+ def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
+ if metrics is None:
+ metrics = self.metrics
+ if reverseBytes:
+ dataRows = map(_reverseBytes, dataRows)
+ self.imageData = bytesjoin(dataRows)
+
+
+class ebdt_bitmap_format_1(
+ ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph
+):
+ def decompile(self):
+ self.metrics = SmallGlyphMetrics()
+ dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+ self.imageData = data
+
+ def compile(self, ttFont):
+ data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
+ return data + self.imageData
+
+
+class ebdt_bitmap_format_2(
+ BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph
+):
+ def decompile(self):
+ self.metrics = SmallGlyphMetrics()
+ dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+ self.imageData = data
+
+ def compile(self, ttFont):
+ data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
+ return data + self.imageData
class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph):
+ def decompile(self):
+ self.imageData = self.data
- def decompile(self):
- self.imageData = self.data
-
- def compile(self, ttFont):
- return self.imageData
+ def compile(self, ttFont):
+ return self.imageData
-class ebdt_bitmap_format_6(ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph):
- def decompile(self):
- self.metrics = BigGlyphMetrics()
- dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
- self.imageData = data
+class ebdt_bitmap_format_6(
+ ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph
+):
+ def decompile(self):
+ self.metrics = BigGlyphMetrics()
+ dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+ self.imageData = data
- def compile(self, ttFont):
- data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
- return data + self.imageData
+ def compile(self, ttFont):
+ data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
+ return data + self.imageData
-class ebdt_bitmap_format_7(BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph):
+class ebdt_bitmap_format_7(
+ BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph
+):
+ def decompile(self):
+ self.metrics = BigGlyphMetrics()
+ dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+ self.imageData = data
- def decompile(self):
- self.metrics = BigGlyphMetrics()
- dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
- self.imageData = data
-
- def compile(self, ttFont):
- data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
- return data + self.imageData
+ def compile(self, ttFont):
+ data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
+ return data + self.imageData
class ComponentBitmapGlyph(BitmapGlyph):
-
- def toXML(self, strikeIndex, glyphName, writer, ttFont):
- writer.begintag(self.__class__.__name__, [('name', glyphName)])
- writer.newline()
-
- self.writeMetrics(writer, ttFont)
-
- writer.begintag('components')
- writer.newline()
- for curComponent in self.componentArray:
- curComponent.toXML(writer, ttFont)
- writer.endtag('components')
- writer.newline()
-
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.readMetrics(name, attrs, content, ttFont)
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attr, content = element
- if name == 'components':
- self.componentArray = []
- for compElement in content:
- if not isinstance(compElement, tuple):
- continue
- name, attrs, content = compElement
- if name == 'ebdtComponent':
- curComponent = EbdtComponent()
- curComponent.fromXML(name, attrs, content, ttFont)
- self.componentArray.append(curComponent)
- else:
- log.warning("'%s' being ignored in component array.", name)
+ def toXML(self, strikeIndex, glyphName, writer, ttFont):
+ writer.begintag(self.__class__.__name__, [("name", glyphName)])
+ writer.newline()
+
+ self.writeMetrics(writer, ttFont)
+
+ writer.begintag("components")
+ writer.newline()
+ for curComponent in self.componentArray:
+ curComponent.toXML(writer, ttFont)
+ writer.endtag("components")
+ writer.newline()
+
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.readMetrics(name, attrs, content, ttFont)
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attr, content = element
+ if name == "components":
+ self.componentArray = []
+ for compElement in content:
+ if not isinstance(compElement, tuple):
+ continue
+ name, attrs, content = compElement
+ if name == "ebdtComponent":
+ curComponent = EbdtComponent()
+ curComponent.fromXML(name, attrs, content, ttFont)
+ self.componentArray.append(curComponent)
+ else:
+ log.warning("'%s' being ignored in component array.", name)
class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph):
-
- def decompile(self):
- self.metrics = SmallGlyphMetrics()
- dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
- data = data[1:]
-
- (numComponents,) = struct.unpack(">H", data[:2])
- data = data[2:]
- self.componentArray = []
- for i in range(numComponents):
- curComponent = EbdtComponent()
- dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
- curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
- self.componentArray.append(curComponent)
-
- def compile(self, ttFont):
- dataList = []
- dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
- dataList.append(b'\0')
- dataList.append(struct.pack(">H", len(self.componentArray)))
- for curComponent in self.componentArray:
- curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
- dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
- return bytesjoin(dataList)
+ def decompile(self):
+ self.metrics = SmallGlyphMetrics()
+ dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+ data = data[1:]
+
+ (numComponents,) = struct.unpack(">H", data[:2])
+ data = data[2:]
+ self.componentArray = []
+ for i in range(numComponents):
+ curComponent = EbdtComponent()
+ dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
+ curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
+ self.componentArray.append(curComponent)
+
+ def compile(self, ttFont):
+ dataList = []
+ dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
+ dataList.append(b"\0")
+ dataList.append(struct.pack(">H", len(self.componentArray)))
+ for curComponent in self.componentArray:
+ curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
+ dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
+ return bytesjoin(dataList)
class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph):
-
- def decompile(self):
- self.metrics = BigGlyphMetrics()
- dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
- (numComponents,) = struct.unpack(">H", data[:2])
- data = data[2:]
- self.componentArray = []
- for i in range(numComponents):
- curComponent = EbdtComponent()
- dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
- curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
- self.componentArray.append(curComponent)
-
- def compile(self, ttFont):
- dataList = []
- dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
- dataList.append(struct.pack(">H", len(self.componentArray)))
- for curComponent in self.componentArray:
- curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
- dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
- return bytesjoin(dataList)
+ def decompile(self):
+ self.metrics = BigGlyphMetrics()
+ dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+ (numComponents,) = struct.unpack(">H", data[:2])
+ data = data[2:]
+ self.componentArray = []
+ for i in range(numComponents):
+ curComponent = EbdtComponent()
+ dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
+ curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
+ self.componentArray.append(curComponent)
+
+ def compile(self, ttFont):
+ dataList = []
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+ dataList.append(struct.pack(">H", len(self.componentArray)))
+ for curComponent in self.componentArray:
+ curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
+ dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
+ return bytesjoin(dataList)
# Dictionary of bitmap formats to the class representing that format
# currently only the ones listed in this map are the ones supported.
ebdt_bitmap_classes = {
- 1: ebdt_bitmap_format_1,
- 2: ebdt_bitmap_format_2,
- 5: ebdt_bitmap_format_5,
- 6: ebdt_bitmap_format_6,
- 7: ebdt_bitmap_format_7,
- 8: ebdt_bitmap_format_8,
- 9: ebdt_bitmap_format_9,
- }
+ 1: ebdt_bitmap_format_1,
+ 2: ebdt_bitmap_format_2,
+ 5: ebdt_bitmap_format_5,
+ 6: ebdt_bitmap_format_6,
+ 7: ebdt_bitmap_format_7,
+ 8: ebdt_bitmap_format_8,
+ 9: ebdt_bitmap_format_9,
+}
diff --git a/Lib/fontTools/ttLib/tables/E_B_L_C_.py b/Lib/fontTools/ttLib/tables/E_B_L_C_.py
index bb3d2140..6046d910 100644
--- a/Lib/fontTools/ttLib/tables/E_B_L_C_.py
+++ b/Lib/fontTools/ttLib/tables/E_B_L_C_.py
@@ -1,7 +1,12 @@
from fontTools.misc import sstruct
from . import DefaultTable
from fontTools.misc.textTools import bytesjoin, safeEval
-from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
+from .BitmapGlyphMetrics import (
+ BigGlyphMetrics,
+ bigGlyphMetricsFormat,
+ SmallGlyphMetrics,
+ smallGlyphMetricsFormat,
+)
import struct
import itertools
from collections import deque
@@ -59,571 +64,647 @@ indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
codeOffsetPairFormat = ">HH"
codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
+
class table_E_B_L_C_(DefaultTable.DefaultTable):
+ dependencies = ["EBDT"]
+
+ # This method can be overridden in subclasses to support new formats
+ # without changing the other implementation. Also can be used as a
+ # convenience method for coverting a font file to an alternative format.
+ def getIndexFormatClass(self, indexFormat):
+ return eblc_sub_table_classes[indexFormat]
+
+ def decompile(self, data, ttFont):
+ # Save the original data because offsets are from the start of the table.
+ origData = data
+ i = 0
+
+ dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
+ i += 8
+
+ self.strikes = []
+ for curStrikeIndex in range(self.numSizes):
+ curStrike = Strike()
+ self.strikes.append(curStrike)
+ curTable = curStrike.bitmapSizeTable
+ dummy = sstruct.unpack2(
+ bitmapSizeTableFormatPart1, data[i : i + 16], curTable
+ )
+ i += 16
+ for metric in ("hori", "vert"):
+ metricObj = SbitLineMetrics()
+ vars(curTable)[metric] = metricObj
+ dummy = sstruct.unpack2(
+ sbitLineMetricsFormat, data[i : i + 12], metricObj
+ )
+ i += 12
+ dummy = sstruct.unpack(
+ bitmapSizeTableFormatPart2, data[i : i + 8], curTable
+ )
+ i += 8
+
+ for curStrike in self.strikes:
+ curTable = curStrike.bitmapSizeTable
+ for subtableIndex in range(curTable.numberOfIndexSubTables):
+ i = (
+ curTable.indexSubTableArrayOffset
+ + subtableIndex * indexSubTableArraySize
+ )
+
+ tup = struct.unpack(
+ indexSubTableArrayFormat, data[i : i + indexSubTableArraySize]
+ )
+ (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
+ i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
+
+ tup = struct.unpack(
+ indexSubHeaderFormat, data[i : i + indexSubHeaderSize]
+ )
+ (indexFormat, imageFormat, imageDataOffset) = tup
+
+ indexFormatClass = self.getIndexFormatClass(indexFormat)
+ indexSubTable = indexFormatClass(data[i + indexSubHeaderSize :], ttFont)
+ indexSubTable.firstGlyphIndex = firstGlyphIndex
+ indexSubTable.lastGlyphIndex = lastGlyphIndex
+ indexSubTable.additionalOffsetToIndexSubtable = (
+ additionalOffsetToIndexSubtable
+ )
+ indexSubTable.indexFormat = indexFormat
+ indexSubTable.imageFormat = imageFormat
+ indexSubTable.imageDataOffset = imageDataOffset
+ indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317
+ curStrike.indexSubTables.append(indexSubTable)
+
+ def compile(self, ttFont):
+ dataList = []
+ self.numSizes = len(self.strikes)
+ dataList.append(sstruct.pack(eblcHeaderFormat, self))
+
+ # Data size of the header + bitmapSizeTable needs to be calculated
+ # in order to form offsets. This value will hold the size of the data
+ # in dataList after all the data is consolidated in dataList.
+ dataSize = len(dataList[0])
+
+ # The table will be structured in the following order:
+ # (0) header
+ # (1) Each bitmapSizeTable [1 ... self.numSizes]
+ # (2) Alternate between indexSubTableArray and indexSubTable
+ # for each bitmapSizeTable present.
+ #
+ # The issue is maintaining the proper offsets when table information
+ # gets moved around. All offsets and size information must be recalculated
+ # when building the table to allow editing within ttLib and also allow easy
+ # import/export to and from XML. All of this offset information is lost
+ # when exporting to XML so everything must be calculated fresh so importing
+ # from XML will work cleanly. Only byte offset and size information is
+ # calculated fresh. Count information like numberOfIndexSubTables is
+ # checked through assertions. If the information in this table was not
+ # touched or was changed properly then these types of values should match.
+ #
+ # The table will be rebuilt the following way:
+ # (0) Precompute the size of all the bitmapSizeTables. This is needed to
+ # compute the offsets properly.
+ # (1) For each bitmapSizeTable compute the indexSubTable and
+ # indexSubTableArray pair. The indexSubTable must be computed first
+ # so that the offset information in indexSubTableArray can be
+ # calculated. Update the data size after each pairing.
+ # (2) Build each bitmapSizeTable.
+ # (3) Consolidate all the data into the main dataList in the correct order.
+
+ for _ in self.strikes:
+ dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
+ dataSize += len(("hori", "vert")) * sstruct.calcsize(sbitLineMetricsFormat)
+ dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
+
+ indexSubTablePairDataList = []
+ for curStrike in self.strikes:
+ curTable = curStrike.bitmapSizeTable
+ curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
+ curTable.indexSubTableArrayOffset = dataSize
+
+ # Precompute the size of the indexSubTableArray. This information
+ # is important for correctly calculating the new value for
+ # additionalOffsetToIndexSubtable.
+ sizeOfSubTableArray = (
+ curTable.numberOfIndexSubTables * indexSubTableArraySize
+ )
+ lowerBound = dataSize
+ dataSize += sizeOfSubTableArray
+ upperBound = dataSize
+
+ indexSubTableDataList = []
+ for indexSubTable in curStrike.indexSubTables:
+ indexSubTable.additionalOffsetToIndexSubtable = (
+ dataSize - curTable.indexSubTableArrayOffset
+ )
+ glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
+ indexSubTable.firstGlyphIndex = min(glyphIds)
+ indexSubTable.lastGlyphIndex = max(glyphIds)
+ data = indexSubTable.compile(ttFont)
+ indexSubTableDataList.append(data)
+ dataSize += len(data)
+ curTable.startGlyphIndex = min(
+ ist.firstGlyphIndex for ist in curStrike.indexSubTables
+ )
+ curTable.endGlyphIndex = max(
+ ist.lastGlyphIndex for ist in curStrike.indexSubTables
+ )
+
+ for i in curStrike.indexSubTables:
+ data = struct.pack(
+ indexSubHeaderFormat,
+ i.firstGlyphIndex,
+ i.lastGlyphIndex,
+ i.additionalOffsetToIndexSubtable,
+ )
+ indexSubTablePairDataList.append(data)
+ indexSubTablePairDataList.extend(indexSubTableDataList)
+ curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
+
+ for curStrike in self.strikes:
+ curTable = curStrike.bitmapSizeTable
+ data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
+ dataList.append(data)
+ for metric in ("hori", "vert"):
+ metricObj = vars(curTable)[metric]
+ data = sstruct.pack(sbitLineMetricsFormat, metricObj)
+ dataList.append(data)
+ data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
+ dataList.append(data)
+ dataList.extend(indexSubTablePairDataList)
+
+ return bytesjoin(dataList)
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("header", [("version", self.version)])
+ writer.newline()
+ for curIndex, curStrike in enumerate(self.strikes):
+ curStrike.toXML(curIndex, writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "header":
+ self.version = safeEval(attrs["version"])
+ elif name == "strike":
+ if not hasattr(self, "strikes"):
+ self.strikes = []
+ strikeIndex = safeEval(attrs["index"])
+ curStrike = Strike()
+ curStrike.fromXML(name, attrs, content, ttFont, self)
+
+ # Grow the strike array to the appropriate size. The XML format
+ # allows for the strike index value to be out of order.
+ if strikeIndex >= len(self.strikes):
+ self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
+ assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
+ self.strikes[strikeIndex] = curStrike
- dependencies = ['EBDT']
-
- # This method can be overridden in subclasses to support new formats
- # without changing the other implementation. Also can be used as a
- # convenience method for coverting a font file to an alternative format.
- def getIndexFormatClass(self, indexFormat):
- return eblc_sub_table_classes[indexFormat]
-
- def decompile(self, data, ttFont):
-
- # Save the original data because offsets are from the start of the table.
- origData = data
- i = 0;
-
- dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
- i += 8;
-
- self.strikes = []
- for curStrikeIndex in range(self.numSizes):
- curStrike = Strike()
- self.strikes.append(curStrike)
- curTable = curStrike.bitmapSizeTable
- dummy = sstruct.unpack2(bitmapSizeTableFormatPart1, data[i:i+16], curTable)
- i += 16
- for metric in ('hori', 'vert'):
- metricObj = SbitLineMetrics()
- vars(curTable)[metric] = metricObj
- dummy = sstruct.unpack2(sbitLineMetricsFormat, data[i:i+12], metricObj)
- i += 12
- dummy = sstruct.unpack(bitmapSizeTableFormatPart2, data[i:i+8], curTable)
- i += 8
-
- for curStrike in self.strikes:
- curTable = curStrike.bitmapSizeTable
- for subtableIndex in range(curTable.numberOfIndexSubTables):
- i = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize
-
- tup = struct.unpack(indexSubTableArrayFormat, data[i:i+indexSubTableArraySize])
- (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
- i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
-
- tup = struct.unpack(indexSubHeaderFormat, data[i:i+indexSubHeaderSize])
- (indexFormat, imageFormat, imageDataOffset) = tup
-
- indexFormatClass = self.getIndexFormatClass(indexFormat)
- indexSubTable = indexFormatClass(data[i+indexSubHeaderSize:], ttFont)
- indexSubTable.firstGlyphIndex = firstGlyphIndex
- indexSubTable.lastGlyphIndex = lastGlyphIndex
- indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable
- indexSubTable.indexFormat = indexFormat
- indexSubTable.imageFormat = imageFormat
- indexSubTable.imageDataOffset = imageDataOffset
- indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317
- curStrike.indexSubTables.append(indexSubTable)
-
- def compile(self, ttFont):
-
- dataList = []
- self.numSizes = len(self.strikes)
- dataList.append(sstruct.pack(eblcHeaderFormat, self))
-
- # Data size of the header + bitmapSizeTable needs to be calculated
- # in order to form offsets. This value will hold the size of the data
- # in dataList after all the data is consolidated in dataList.
- dataSize = len(dataList[0])
-
- # The table will be structured in the following order:
- # (0) header
- # (1) Each bitmapSizeTable [1 ... self.numSizes]
- # (2) Alternate between indexSubTableArray and indexSubTable
- # for each bitmapSizeTable present.
- #
- # The issue is maintaining the proper offsets when table information
- # gets moved around. All offsets and size information must be recalculated
- # when building the table to allow editing within ttLib and also allow easy
- # import/export to and from XML. All of this offset information is lost
- # when exporting to XML so everything must be calculated fresh so importing
- # from XML will work cleanly. Only byte offset and size information is
- # calculated fresh. Count information like numberOfIndexSubTables is
- # checked through assertions. If the information in this table was not
- # touched or was changed properly then these types of values should match.
- #
- # The table will be rebuilt the following way:
- # (0) Precompute the size of all the bitmapSizeTables. This is needed to
- # compute the offsets properly.
- # (1) For each bitmapSizeTable compute the indexSubTable and
- # indexSubTableArray pair. The indexSubTable must be computed first
- # so that the offset information in indexSubTableArray can be
- # calculated. Update the data size after each pairing.
- # (2) Build each bitmapSizeTable.
- # (3) Consolidate all the data into the main dataList in the correct order.
-
- for _ in self.strikes:
- dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
- dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat)
- dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
-
- indexSubTablePairDataList = []
- for curStrike in self.strikes:
- curTable = curStrike.bitmapSizeTable
- curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
- curTable.indexSubTableArrayOffset = dataSize
-
- # Precompute the size of the indexSubTableArray. This information
- # is important for correctly calculating the new value for
- # additionalOffsetToIndexSubtable.
- sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize
- lowerBound = dataSize
- dataSize += sizeOfSubTableArray
- upperBound = dataSize
-
- indexSubTableDataList = []
- for indexSubTable in curStrike.indexSubTables:
- indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset
- glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
- indexSubTable.firstGlyphIndex = min(glyphIds)
- indexSubTable.lastGlyphIndex = max(glyphIds)
- data = indexSubTable.compile(ttFont)
- indexSubTableDataList.append(data)
- dataSize += len(data)
- curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables)
- curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables)
-
- for i in curStrike.indexSubTables:
- data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable)
- indexSubTablePairDataList.append(data)
- indexSubTablePairDataList.extend(indexSubTableDataList)
- curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
-
- for curStrike in self.strikes:
- curTable = curStrike.bitmapSizeTable
- data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
- dataList.append(data)
- for metric in ('hori', 'vert'):
- metricObj = vars(curTable)[metric]
- data = sstruct.pack(sbitLineMetricsFormat, metricObj)
- dataList.append(data)
- data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
- dataList.append(data)
- dataList.extend(indexSubTablePairDataList)
-
- return bytesjoin(dataList)
-
- def toXML(self, writer, ttFont):
- writer.simpletag('header', [('version', self.version)])
- writer.newline()
- for curIndex, curStrike in enumerate(self.strikes):
- curStrike.toXML(curIndex, writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == 'header':
- self.version = safeEval(attrs['version'])
- elif name == 'strike':
- if not hasattr(self, 'strikes'):
- self.strikes = []
- strikeIndex = safeEval(attrs['index'])
- curStrike = Strike()
- curStrike.fromXML(name, attrs, content, ttFont, self)
-
- # Grow the strike array to the appropriate size. The XML format
- # allows for the strike index value to be out of order.
- if strikeIndex >= len(self.strikes):
- self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
- assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
- self.strikes[strikeIndex] = curStrike
class Strike(object):
-
- def __init__(self):
- self.bitmapSizeTable = BitmapSizeTable()
- self.indexSubTables = []
-
- def toXML(self, strikeIndex, writer, ttFont):
- writer.begintag('strike', [('index', strikeIndex)])
- writer.newline()
- self.bitmapSizeTable.toXML(writer, ttFont)
- writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.')
- writer.newline()
- for indexSubTable in self.indexSubTables:
- indexSubTable.toXML(writer, ttFont)
- writer.endtag('strike')
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont, locator):
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == 'bitmapSizeTable':
- self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
- elif name.startswith(_indexSubTableSubclassPrefix):
- indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):])
- indexFormatClass = locator.getIndexFormatClass(indexFormat)
- indexSubTable = indexFormatClass(None, None)
- indexSubTable.indexFormat = indexFormat
- indexSubTable.fromXML(name, attrs, content, ttFont)
- self.indexSubTables.append(indexSubTable)
+ def __init__(self):
+ self.bitmapSizeTable = BitmapSizeTable()
+ self.indexSubTables = []
+
+ def toXML(self, strikeIndex, writer, ttFont):
+ writer.begintag("strike", [("index", strikeIndex)])
+ writer.newline()
+ self.bitmapSizeTable.toXML(writer, ttFont)
+ writer.comment(
+ "GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler."
+ )
+ writer.newline()
+ for indexSubTable in self.indexSubTables:
+ indexSubTable.toXML(writer, ttFont)
+ writer.endtag("strike")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont, locator):
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "bitmapSizeTable":
+ self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
+ elif name.startswith(_indexSubTableSubclassPrefix):
+ indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix) :])
+ indexFormatClass = locator.getIndexFormatClass(indexFormat)
+ indexSubTable = indexFormatClass(None, None)
+ indexSubTable.indexFormat = indexFormat
+ indexSubTable.fromXML(name, attrs, content, ttFont)
+ self.indexSubTables.append(indexSubTable)
class BitmapSizeTable(object):
-
- # Returns all the simple metric names that bitmap size table
- # cares about in terms of XML creation.
- def _getXMLMetricNames(self):
- dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
- dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
- # Skip the first 3 data names because they are byte offsets and counts.
- return dataNames[3:]
-
- def toXML(self, writer, ttFont):
- writer.begintag('bitmapSizeTable')
- writer.newline()
- for metric in ('hori', 'vert'):
- getattr(self, metric).toXML(metric, writer, ttFont)
- for metricName in self._getXMLMetricNames():
- writer.simpletag(metricName, value=getattr(self, metricName))
- writer.newline()
- writer.endtag('bitmapSizeTable')
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- # Create a lookup for all the simple names that make sense to
- # bitmap size table. Only read the information from these names.
- dataNames = set(self._getXMLMetricNames())
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == 'sbitLineMetrics':
- direction = attrs['direction']
- assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid."
- metricObj = SbitLineMetrics()
- metricObj.fromXML(name, attrs, content, ttFont)
- vars(self)[direction] = metricObj
- elif name in dataNames:
- vars(self)[name] = safeEval(attrs['value'])
- else:
- log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
+ # Returns all the simple metric names that bitmap size table
+ # cares about in terms of XML creation.
+ def _getXMLMetricNames(self):
+ dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
+ dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
+ # Skip the first 3 data names because they are byte offsets and counts.
+ return dataNames[3:]
+
+ def toXML(self, writer, ttFont):
+ writer.begintag("bitmapSizeTable")
+ writer.newline()
+ for metric in ("hori", "vert"):
+ getattr(self, metric).toXML(metric, writer, ttFont)
+ for metricName in self._getXMLMetricNames():
+ writer.simpletag(metricName, value=getattr(self, metricName))
+ writer.newline()
+ writer.endtag("bitmapSizeTable")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ # Create a lookup for all the simple names that make sense to
+ # bitmap size table. Only read the information from these names.
+ dataNames = set(self._getXMLMetricNames())
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "sbitLineMetrics":
+ direction = attrs["direction"]
+ assert direction in (
+ "hori",
+ "vert",
+ ), "SbitLineMetrics direction specified invalid."
+ metricObj = SbitLineMetrics()
+ metricObj.fromXML(name, attrs, content, ttFont)
+ vars(self)[direction] = metricObj
+ elif name in dataNames:
+ vars(self)[name] = safeEval(attrs["value"])
+ else:
+ log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
class SbitLineMetrics(object):
+ def toXML(self, name, writer, ttFont):
+ writer.begintag("sbitLineMetrics", [("direction", name)])
+ writer.newline()
+ for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
+ writer.simpletag(metricName, value=getattr(self, metricName))
+ writer.newline()
+ writer.endtag("sbitLineMetrics")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name in metricNames:
+ vars(self)[name] = safeEval(attrs["value"])
- def toXML(self, name, writer, ttFont):
- writer.begintag('sbitLineMetrics', [('direction', name)])
- writer.newline()
- for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
- writer.simpletag(metricName, value=getattr(self, metricName))
- writer.newline()
- writer.endtag('sbitLineMetrics')
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name in metricNames:
- vars(self)[name] = safeEval(attrs['value'])
# Important information about the naming scheme. Used for identifying subtables.
-_indexSubTableSubclassPrefix = 'eblc_index_sub_table_'
+_indexSubTableSubclassPrefix = "eblc_index_sub_table_"
+
class EblcIndexSubTable(object):
+ def __init__(self, data, ttFont):
+ self.data = data
+ self.ttFont = ttFont
+ # TODO Currently non-lazy decompiling doesn't work for this class...
+ # if not ttFont.lazy:
+ # self.decompile()
+ # del self.data, self.ttFont
+
+ def __getattr__(self, attr):
+ # Allow lazy decompile.
+ if attr[:2] == "__":
+ raise AttributeError(attr)
+ if attr == "data":
+ raise AttributeError(attr)
+ self.decompile()
+ return getattr(self, attr)
+
+ def ensureDecompiled(self, recurse=False):
+ if hasattr(self, "data"):
+ self.decompile()
+
+ # This method just takes care of the indexSubHeader. Implementing subclasses
+ # should call it to compile the indexSubHeader and then continue compiling
+ # the remainder of their unique format.
+ def compile(self, ttFont):
+ return struct.pack(
+ indexSubHeaderFormat,
+ self.indexFormat,
+ self.imageFormat,
+ self.imageDataOffset,
+ )
+
+ # Creates the XML for bitmap glyphs. Each index sub table basically makes
+ # the same XML except for specific metric information that is written
+ # out via a method call that a subclass implements optionally.
+ def toXML(self, writer, ttFont):
+ writer.begintag(
+ self.__class__.__name__,
+ [
+ ("imageFormat", self.imageFormat),
+ ("firstGlyphIndex", self.firstGlyphIndex),
+ ("lastGlyphIndex", self.lastGlyphIndex),
+ ],
+ )
+ writer.newline()
+ self.writeMetrics(writer, ttFont)
+ # Write out the names as thats all thats needed to rebuild etc.
+ # For font debugging of consecutive formats the ids are also written.
+ # The ids are not read when moving from the XML format.
+ glyphIds = map(ttFont.getGlyphID, self.names)
+ for glyphName, glyphId in zip(self.names, glyphIds):
+ writer.simpletag("glyphLoc", name=glyphName, id=glyphId)
+ writer.newline()
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ # Read all the attributes. Even though the glyph indices are
+ # recalculated, they are still read in case there needs to
+ # be an immediate export of the data.
+ self.imageFormat = safeEval(attrs["imageFormat"])
+ self.firstGlyphIndex = safeEval(attrs["firstGlyphIndex"])
+ self.lastGlyphIndex = safeEval(attrs["lastGlyphIndex"])
+
+ self.readMetrics(name, attrs, content, ttFont)
+
+ self.names = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "glyphLoc":
+ self.names.append(attrs["name"])
+
+ # A helper method that writes the metrics for the index sub table. It also
+ # is responsible for writing the image size for fixed size data since fixed
+ # size is not recalculated on compile. Default behavior is to do nothing.
+ def writeMetrics(self, writer, ttFont):
+ pass
+
+ # A helper method that is the inverse of writeMetrics.
+ def readMetrics(self, name, attrs, content, ttFont):
+ pass
+
+ # This method is for fixed glyph data sizes. There are formats where
+ # the glyph data is fixed but are actually composite glyphs. To handle
+ # this the font spec in indexSubTable makes the data the size of the
+ # fixed size by padding the component arrays. This function abstracts
+ # out this padding process. Input is data unpadded. Output is data
+ # padded only in fixed formats. Default behavior is to return the data.
+ def padBitmapData(self, data):
+ return data
+
+ # Remove any of the glyph locations and names that are flagged as skipped.
+ # This only occurs in formats {1,3}.
+ def removeSkipGlyphs(self):
+ # Determines if a name, location pair is a valid data location.
+ # Skip glyphs are marked when the size is equal to zero.
+ def isValidLocation(args):
+ (name, (startByte, endByte)) = args
+ return startByte < endByte
+
+ # Remove all skip glyphs.
+ dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
+ self.names, self.locations = list(map(list, zip(*dataPairs)))
- def __init__(self, data, ttFont):
- self.data = data
- self.ttFont = ttFont
- # TODO Currently non-lazy decompiling doesn't work for this class...
- #if not ttFont.lazy:
- # self.decompile()
- # del self.data, self.ttFont
-
- def __getattr__(self, attr):
- # Allow lazy decompile.
- if attr[:2] == '__':
- raise AttributeError(attr)
- if attr == "data":
- raise AttributeError(attr)
- self.decompile()
- return getattr(self, attr)
-
- def ensureDecompiled(self, recurse=False):
- if hasattr(self, "data"):
- self.decompile()
-
- # This method just takes care of the indexSubHeader. Implementing subclasses
- # should call it to compile the indexSubHeader and then continue compiling
- # the remainder of their unique format.
- def compile(self, ttFont):
- return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset)
-
- # Creates the XML for bitmap glyphs. Each index sub table basically makes
- # the same XML except for specific metric information that is written
- # out via a method call that a subclass implements optionally.
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__, [
- ('imageFormat', self.imageFormat),
- ('firstGlyphIndex', self.firstGlyphIndex),
- ('lastGlyphIndex', self.lastGlyphIndex),
- ])
- writer.newline()
- self.writeMetrics(writer, ttFont)
- # Write out the names as thats all thats needed to rebuild etc.
- # For font debugging of consecutive formats the ids are also written.
- # The ids are not read when moving from the XML format.
- glyphIds = map(ttFont.getGlyphID, self.names)
- for glyphName, glyphId in zip(self.names, glyphIds):
- writer.simpletag('glyphLoc', name=glyphName, id=glyphId)
- writer.newline()
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- # Read all the attributes. Even though the glyph indices are
- # recalculated, they are still read in case there needs to
- # be an immediate export of the data.
- self.imageFormat = safeEval(attrs['imageFormat'])
- self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex'])
- self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex'])
-
- self.readMetrics(name, attrs, content, ttFont)
-
- self.names = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == 'glyphLoc':
- self.names.append(attrs['name'])
-
- # A helper method that writes the metrics for the index sub table. It also
- # is responsible for writing the image size for fixed size data since fixed
- # size is not recalculated on compile. Default behavior is to do nothing.
- def writeMetrics(self, writer, ttFont):
- pass
-
- # A helper method that is the inverse of writeMetrics.
- def readMetrics(self, name, attrs, content, ttFont):
- pass
-
- # This method is for fixed glyph data sizes. There are formats where
- # the glyph data is fixed but are actually composite glyphs. To handle
- # this the font spec in indexSubTable makes the data the size of the
- # fixed size by padding the component arrays. This function abstracts
- # out this padding process. Input is data unpadded. Output is data
- # padded only in fixed formats. Default behavior is to return the data.
- def padBitmapData(self, data):
- return data
-
- # Remove any of the glyph locations and names that are flagged as skipped.
- # This only occurs in formats {1,3}.
- def removeSkipGlyphs(self):
- # Determines if a name, location pair is a valid data location.
- # Skip glyphs are marked when the size is equal to zero.
- def isValidLocation(args):
- (name, (startByte, endByte)) = args
- return startByte < endByte
- # Remove all skip glyphs.
- dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
- self.names, self.locations = list(map(list, zip(*dataPairs)))
# A closure for creating a custom mixin. This is done because formats 1 and 3
# are very similar. The only difference between them is the size per offset
# value. Code put in here should handle both cases generally.
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
+ # Prep the data size for the offset array data format.
+ dataFormat = ">" + formatStringForDataType
+ offsetDataSize = struct.calcsize(dataFormat)
+
+ class OffsetArrayIndexSubTableMixin(object):
+ def decompile(self):
+ numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
+ indexingOffsets = [
+ glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs + 2)
+ ]
+ indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
+ offsetArray = [
+ struct.unpack(dataFormat, self.data[slice(*loc)])[0]
+ for loc in indexingLocations
+ ]
+
+ glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
+ modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
+ self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
+
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+ self.removeSkipGlyphs()
+ del self.data, self.ttFont
+
+ def compile(self, ttFont):
+ # First make sure that all the data lines up properly. Formats 1 and 3
+ # must have all its data lined up consecutively. If not this will fail.
+ for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
+ assert (
+ curLoc[1] == nxtLoc[0]
+ ), "Data must be consecutive in indexSubTable offset formats"
+
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
+ # Make sure that all ids are sorted strictly increasing.
+ assert all(glyphIds[i] < glyphIds[i + 1] for i in range(len(glyphIds) - 1))
+
+ # Run a simple algorithm to add skip glyphs to the data locations at
+ # the places where an id is not present.
+ idQueue = deque(glyphIds)
+ locQueue = deque(self.locations)
+ allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
+ allLocations = []
+ for curId in allGlyphIds:
+ if curId != idQueue[0]:
+ allLocations.append((locQueue[0][0], locQueue[0][0]))
+ else:
+ idQueue.popleft()
+ allLocations.append(locQueue.popleft())
+
+ # Now that all the locations are collected, pack them appropriately into
+ # offsets. This is the form where offset[i] is the location and
+ # offset[i+1]-offset[i] is the size of the data location.
+ offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
+ # Image data offset must be less than or equal to the minimum of locations.
+ # This offset may change the value for round tripping but is safer and
+ # allows imageDataOffset to not be required to be in the XML version.
+ self.imageDataOffset = min(offsets)
+ offsetArray = [offset - self.imageDataOffset for offset in offsets]
+
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
+ dataList += [
+ struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray
+ ]
+ # Take care of any padding issues. Only occurs in format 3.
+ if offsetDataSize * len(offsetArray) % 4 != 0:
+ dataList.append(struct.pack(dataFormat, 0))
+ return bytesjoin(dataList)
+
+ return OffsetArrayIndexSubTableMixin
- # Prep the data size for the offset array data format.
- dataFormat = '>'+formatStringForDataType
- offsetDataSize = struct.calcsize(dataFormat)
-
- class OffsetArrayIndexSubTableMixin(object):
-
- def decompile(self):
-
- numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
- indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)]
- indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
- offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations]
-
- glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
- modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
- self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
-
- self.names = list(map(self.ttFont.getGlyphName, glyphIds))
- self.removeSkipGlyphs()
- del self.data, self.ttFont
-
- def compile(self, ttFont):
- # First make sure that all the data lines up properly. Formats 1 and 3
- # must have all its data lined up consecutively. If not this will fail.
- for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
- assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats"
-
- glyphIds = list(map(ttFont.getGlyphID, self.names))
- # Make sure that all ids are sorted strictly increasing.
- assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1))
-
- # Run a simple algorithm to add skip glyphs to the data locations at
- # the places where an id is not present.
- idQueue = deque(glyphIds)
- locQueue = deque(self.locations)
- allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
- allLocations = []
- for curId in allGlyphIds:
- if curId != idQueue[0]:
- allLocations.append((locQueue[0][0], locQueue[0][0]))
- else:
- idQueue.popleft()
- allLocations.append(locQueue.popleft())
-
- # Now that all the locations are collected, pack them appropriately into
- # offsets. This is the form where offset[i] is the location and
- # offset[i+1]-offset[i] is the size of the data location.
- offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
- # Image data offset must be less than or equal to the minimum of locations.
- # This offset may change the value for round tripping but is safer and
- # allows imageDataOffset to not be required to be in the XML version.
- self.imageDataOffset = min(offsets)
- offsetArray = [offset - self.imageDataOffset for offset in offsets]
-
- dataList = [EblcIndexSubTable.compile(self, ttFont)]
- dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray]
- # Take care of any padding issues. Only occurs in format 3.
- if offsetDataSize * len(offsetArray) % 4 != 0:
- dataList.append(struct.pack(dataFormat, 0))
- return bytesjoin(dataList)
-
- return OffsetArrayIndexSubTableMixin
# A Mixin for functionality shared between the different kinds
# of fixed sized data handling. Both kinds have big metrics so
# that kind of special processing is also handled in this mixin.
class FixedSizeIndexSubTableMixin(object):
+ def writeMetrics(self, writer, ttFont):
+ writer.simpletag("imageSize", value=self.imageSize)
+ writer.newline()
+ self.metrics.toXML(writer, ttFont)
+
+ def readMetrics(self, name, attrs, content, ttFont):
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "imageSize":
+ self.imageSize = safeEval(attrs["value"])
+ elif name == BigGlyphMetrics.__name__:
+ self.metrics = BigGlyphMetrics()
+ self.metrics.fromXML(name, attrs, content, ttFont)
+ elif name == SmallGlyphMetrics.__name__:
+ log.warning(
+ "SmallGlyphMetrics being ignored in format %d.", self.indexFormat
+ )
+
+ def padBitmapData(self, data):
+ # Make sure that the data isn't bigger than the fixed size.
+ assert len(data) <= self.imageSize, (
+ "Data in indexSubTable format %d must be less than the fixed size."
+ % self.indexFormat
+ )
+ # Pad the data so that it matches the fixed size.
+ pad = (self.imageSize - len(data)) * b"\0"
+ return data + pad
+
+
+class eblc_index_sub_table_1(
+ _createOffsetArrayIndexSubTableMixin("L"), EblcIndexSubTable
+):
+ pass
- def writeMetrics(self, writer, ttFont):
- writer.simpletag('imageSize', value=self.imageSize)
- writer.newline()
- self.metrics.toXML(writer, ttFont)
-
- def readMetrics(self, name, attrs, content, ttFont):
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == 'imageSize':
- self.imageSize = safeEval(attrs['value'])
- elif name == BigGlyphMetrics.__name__:
- self.metrics = BigGlyphMetrics()
- self.metrics.fromXML(name, attrs, content, ttFont)
- elif name == SmallGlyphMetrics.__name__:
- log.warning("SmallGlyphMetrics being ignored in format %d.", self.indexFormat)
-
- def padBitmapData(self, data):
- # Make sure that the data isn't bigger than the fixed size.
- assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat
- # Pad the data so that it matches the fixed size.
- pad = (self.imageSize - len(data)) * b'\0'
- return data + pad
-
-class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable):
- pass
class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
+ def decompile(self):
+ (self.imageSize,) = struct.unpack(">L", self.data[:4])
+ self.metrics = BigGlyphMetrics()
+ sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
+ glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
+ offsets = [
+ self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
+ ]
+ self.locations = list(zip(offsets, offsets[1:]))
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+ del self.data, self.ttFont
+
+ def compile(self, ttFont):
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
+ # Make sure all the ids are consecutive. This is required by Format 2.
+ assert glyphIds == list(
+ range(self.firstGlyphIndex, self.lastGlyphIndex + 1)
+ ), "Format 2 ids must be consecutive."
+ self.imageDataOffset = min(next(iter(zip(*self.locations))))
+
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
+ dataList.append(struct.pack(">L", self.imageSize))
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+ return bytesjoin(dataList)
+
+
+class eblc_index_sub_table_3(
+ _createOffsetArrayIndexSubTableMixin("H"), EblcIndexSubTable
+):
+ pass
- def decompile(self):
- (self.imageSize,) = struct.unpack(">L", self.data[:4])
- self.metrics = BigGlyphMetrics()
- sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
- glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
- offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
- self.locations = list(zip(offsets, offsets[1:]))
- self.names = list(map(self.ttFont.getGlyphName, glyphIds))
- del self.data, self.ttFont
-
- def compile(self, ttFont):
- glyphIds = list(map(ttFont.getGlyphID, self.names))
- # Make sure all the ids are consecutive. This is required by Format 2.
- assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive."
- self.imageDataOffset = min(next(iter(zip(*self.locations))))
-
- dataList = [EblcIndexSubTable.compile(self, ttFont)]
- dataList.append(struct.pack(">L", self.imageSize))
- dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
- return bytesjoin(dataList)
-
-class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable):
- pass
class eblc_index_sub_table_4(EblcIndexSubTable):
+ def decompile(self):
+ (numGlyphs,) = struct.unpack(">L", self.data[:4])
+ data = self.data[4:]
+ indexingOffsets = [
+ glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs + 2)
+ ]
+ indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
+ glyphArray = [
+ struct.unpack(codeOffsetPairFormat, data[slice(*loc)])
+ for loc in indexingLocations
+ ]
+ glyphIds, offsets = list(map(list, zip(*glyphArray)))
+ # There are one too many glyph ids. Get rid of the last one.
+ glyphIds.pop()
+
+ offsets = [offset + self.imageDataOffset for offset in offsets]
+ self.locations = list(zip(offsets, offsets[1:]))
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+ del self.data, self.ttFont
+
+ def compile(self, ttFont):
+ # First make sure that all the data lines up properly. Format 4
+ # must have all its data lined up consecutively. If not this will fail.
+ for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
+ assert (
+ curLoc[1] == nxtLoc[0]
+ ), "Data must be consecutive in indexSubTable format 4"
+
+ offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
+ # Image data offset must be less than or equal to the minimum of locations.
+ # Resetting this offset may change the value for round tripping but is safer
+ # and allows imageDataOffset to not be required to be in the XML version.
+ self.imageDataOffset = min(offsets)
+ offsets = [offset - self.imageDataOffset for offset in offsets]
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
+ # Create an iterator over the ids plus a padding value.
+ idsPlusPad = list(itertools.chain(glyphIds, [0]))
+
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
+ dataList.append(struct.pack(">L", len(glyphIds)))
+ tmp = [
+ struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)
+ ]
+ dataList += tmp
+ data = bytesjoin(dataList)
+ return data
- def decompile(self):
-
- (numGlyphs,) = struct.unpack(">L", self.data[:4])
- data = self.data[4:]
- indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)]
- indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
- glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations]
- glyphIds, offsets = list(map(list, zip(*glyphArray)))
- # There are one too many glyph ids. Get rid of the last one.
- glyphIds.pop()
-
- offsets = [offset + self.imageDataOffset for offset in offsets]
- self.locations = list(zip(offsets, offsets[1:]))
- self.names = list(map(self.ttFont.getGlyphName, glyphIds))
- del self.data, self.ttFont
-
- def compile(self, ttFont):
- # First make sure that all the data lines up properly. Format 4
- # must have all its data lined up consecutively. If not this will fail.
- for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
- assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4"
-
- offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
- # Image data offset must be less than or equal to the minimum of locations.
- # Resetting this offset may change the value for round tripping but is safer
- # and allows imageDataOffset to not be required to be in the XML version.
- self.imageDataOffset = min(offsets)
- offsets = [offset - self.imageDataOffset for offset in offsets]
- glyphIds = list(map(ttFont.getGlyphID, self.names))
- # Create an iterator over the ids plus a padding value.
- idsPlusPad = list(itertools.chain(glyphIds, [0]))
-
- dataList = [EblcIndexSubTable.compile(self, ttFont)]
- dataList.append(struct.pack(">L", len(glyphIds)))
- tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)]
- dataList += tmp
- data = bytesjoin(dataList)
- return data
class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
+ def decompile(self):
+ self.origDataLen = 0
+ (self.imageSize,) = struct.unpack(">L", self.data[:4])
+ data = self.data[4:]
+ self.metrics, data = sstruct.unpack2(
+ bigGlyphMetricsFormat, data, BigGlyphMetrics()
+ )
+ (numGlyphs,) = struct.unpack(">L", data[:4])
+ data = data[4:]
+ glyphIds = [
+ struct.unpack(">H", data[2 * i : 2 * (i + 1)])[0] for i in range(numGlyphs)
+ ]
+
+ offsets = [
+ self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
+ ]
+ self.locations = list(zip(offsets, offsets[1:]))
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+ del self.data, self.ttFont
+
+ def compile(self, ttFont):
+ self.imageDataOffset = min(next(iter(zip(*self.locations))))
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
+ dataList.append(struct.pack(">L", self.imageSize))
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
+ dataList.append(struct.pack(">L", len(glyphIds)))
+ dataList += [struct.pack(">H", curId) for curId in glyphIds]
+ if len(glyphIds) % 2 == 1:
+ dataList.append(struct.pack(">H", 0))
+ return bytesjoin(dataList)
- def decompile(self):
- self.origDataLen = 0
- (self.imageSize,) = struct.unpack(">L", self.data[:4])
- data = self.data[4:]
- self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics())
- (numGlyphs,) = struct.unpack(">L", data[:4])
- data = data[4:]
- glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)]
-
- offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
- self.locations = list(zip(offsets, offsets[1:]))
- self.names = list(map(self.ttFont.getGlyphName, glyphIds))
- del self.data, self.ttFont
-
- def compile(self, ttFont):
- self.imageDataOffset = min(next(iter(zip(*self.locations))))
- dataList = [EblcIndexSubTable.compile(self, ttFont)]
- dataList.append(struct.pack(">L", self.imageSize))
- dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
- glyphIds = list(map(ttFont.getGlyphID, self.names))
- dataList.append(struct.pack(">L", len(glyphIds)))
- dataList += [struct.pack(">H", curId) for curId in glyphIds]
- if len(glyphIds) % 2 == 1:
- dataList.append(struct.pack(">H", 0))
- return bytesjoin(dataList)
# Dictionary of indexFormat to the class representing that format.
eblc_sub_table_classes = {
- 1: eblc_index_sub_table_1,
- 2: eblc_index_sub_table_2,
- 3: eblc_index_sub_table_3,
- 4: eblc_index_sub_table_4,
- 5: eblc_index_sub_table_5,
- }
+ 1: eblc_index_sub_table_1,
+ 2: eblc_index_sub_table_2,
+ 3: eblc_index_sub_table_3,
+ 4: eblc_index_sub_table_4,
+ 5: eblc_index_sub_table_5,
+}
diff --git a/Lib/fontTools/ttLib/tables/F_F_T_M_.py b/Lib/fontTools/ttLib/tables/F_F_T_M_.py
index 2376f2db..823ced1b 100644
--- a/Lib/fontTools/ttLib/tables/F_F_T_M_.py
+++ b/Lib/fontTools/ttLib/tables/F_F_T_M_.py
@@ -11,30 +11,32 @@ FFTMFormat = """
sourceModified: Q
"""
-class table_F_F_T_M_(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
+class table_F_F_T_M_(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
- def compile(self, ttFont):
- data = sstruct.pack(FFTMFormat, self)
- return data
+ def compile(self, ttFont):
+ data = sstruct.pack(FFTMFormat, self)
+ return data
- def toXML(self, writer, ttFont):
- writer.comment("FontForge's timestamp, font source creation and modification dates")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(FFTMFormat)
- for name in names:
- value = getattr(self, name)
- if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
- value = timestampToString(value)
- writer.simpletag(name, value=value)
- writer.newline()
+ def toXML(self, writer, ttFont):
+ writer.comment(
+ "FontForge's timestamp, font source creation and modification dates"
+ )
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(FFTMFormat)
+ for name in names:
+ value = getattr(self, name)
+ if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
+ value = timestampToString(value)
+ writer.simpletag(name, value=value)
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- value = attrs["value"]
- if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
- value = timestampFromString(value)
- else:
- value = safeEval(value)
- setattr(self, name, value)
+ def fromXML(self, name, attrs, content, ttFont):
+ value = attrs["value"]
+ if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
+ value = timestampFromString(value)
+ else:
+ value = safeEval(value)
+ setattr(self, name, value)
diff --git a/Lib/fontTools/ttLib/tables/F__e_a_t.py b/Lib/fontTools/ttLib/tables/F__e_a_t.py
index a444c11d..fbcd6ca6 100644
--- a/Lib/fontTools/ttLib/tables/F__e_a_t.py
+++ b/Lib/fontTools/ttLib/tables/F__e_a_t.py
@@ -5,10 +5,11 @@ from . import DefaultTable
from . import grUtils
import struct
-Feat_hdr_format='''
+Feat_hdr_format = """
>
version: 16.16F
-'''
+"""
+
class table_F__e_a_t(DefaultTable.DefaultTable):
"""The ``Feat`` table is used exclusively by the Graphite shaping engine
@@ -25,28 +26,30 @@ class table_F__e_a_t(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
(_, data) = sstruct.unpack2(Feat_hdr_format, data, self)
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
- numFeats, = struct.unpack('>H', data[:2])
+ (numFeats,) = struct.unpack(">H", data[:2])
data = data[8:]
allfeats = []
maxsetting = 0
for i in range(numFeats):
if self.version >= 2.0:
- (fid, nums, _, offset, flags, lid) = struct.unpack(">LHHLHH",
- data[16*i:16*(i+1)])
+ (fid, nums, _, offset, flags, lid) = struct.unpack(
+ ">LHHLHH", data[16 * i : 16 * (i + 1)]
+ )
offset = int((offset - 12 - 16 * numFeats) / 4)
else:
- (fid, nums, offset, flags, lid) = struct.unpack(">HHLHH",
- data[12*i:12*(i+1)])
+ (fid, nums, offset, flags, lid) = struct.unpack(
+ ">HHLHH", data[12 * i : 12 * (i + 1)]
+ )
offset = int((offset - 12 - 12 * numFeats) / 4)
allfeats.append((fid, nums, offset, flags, lid))
maxsetting = max(maxsetting, offset + nums)
- data = data[16*numFeats:]
+ data = data[16 * numFeats :]
allsettings = []
for i in range(maxsetting):
if len(data) >= 4 * (i + 1):
- (val, lid) = struct.unpack(">HH", data[4*i:4*(i+1)])
+ (val, lid) = struct.unpack(">HH", data[4 * i : 4 * (i + 1)])
allsettings.append((val, lid))
- for i,f in enumerate(allfeats):
+ for i, f in enumerate(allfeats):
(fid, nums, offset, flags, lid) = f
fobj = Feature()
fobj.flags = flags
@@ -56,7 +59,8 @@ class table_F__e_a_t(DefaultTable.DefaultTable):
fobj.default = None
fobj.index = i
for i in range(offset, offset + nums):
- if i >= len(allsettings): continue
+ if i >= len(allsettings):
+ continue
(vid, vlid) = allsettings[i]
fobj.settings[vid] = vlid
if fobj.default is None:
@@ -66,54 +70,75 @@ class table_F__e_a_t(DefaultTable.DefaultTable):
fdat = b""
vdat = b""
offset = 0
- for f, v in sorted(self.features.items(), key=lambda x:x[1].index):
+ for f, v in sorted(self.features.items(), key=lambda x: x[1].index):
fnum = grUtils.tag2num(f)
if self.version >= 2.0:
- fdat += struct.pack(">LHHLHH", grUtils.tag2num(f), len(v.settings),
- 0, offset * 4 + 12 + 16 * len(self.features), v.flags, v.label)
- elif fnum > 65535: # self healing for alphabetic ids
+ fdat += struct.pack(
+ ">LHHLHH",
+ grUtils.tag2num(f),
+ len(v.settings),
+ 0,
+ offset * 4 + 12 + 16 * len(self.features),
+ v.flags,
+ v.label,
+ )
+ elif fnum > 65535: # self healing for alphabetic ids
self.version = 2.0
return self.compile(ttFont)
else:
- fdat += struct.pack(">HHLHH", grUtils.tag2num(f), len(v.settings),
- offset * 4 + 12 + 12 * len(self.features), v.flags, v.label)
- for s, l in sorted(v.settings.items(), key=lambda x:(-1, x[1]) if x[0] == v.default else x):
+ fdat += struct.pack(
+ ">HHLHH",
+ grUtils.tag2num(f),
+ len(v.settings),
+ offset * 4 + 12 + 12 * len(self.features),
+ v.flags,
+ v.label,
+ )
+ for s, l in sorted(
+ v.settings.items(), key=lambda x: (-1, x[1]) if x[0] == v.default else x
+ ):
vdat += struct.pack(">HH", s, l)
offset += len(v.settings)
hdr = sstruct.pack(Feat_hdr_format, self)
- return hdr + struct.pack('>HHL', len(self.features), 0, 0) + fdat + vdat
+ return hdr + struct.pack(">HHL", len(self.features), 0, 0) + fdat + vdat
def toXML(self, writer, ttFont):
- writer.simpletag('version', version=self.version)
+ writer.simpletag("version", version=self.version)
writer.newline()
- for f, v in sorted(self.features.items(), key=lambda x:x[1].index):
- writer.begintag('feature', fid=f, label=v.label, flags=v.flags,
- default=(v.default if v.default else 0))
+ for f, v in sorted(self.features.items(), key=lambda x: x[1].index):
+ writer.begintag(
+ "feature",
+ fid=f,
+ label=v.label,
+ flags=v.flags,
+ default=(v.default if v.default else 0),
+ )
writer.newline()
for s, l in sorted(v.settings.items()):
- writer.simpletag('setting', value=s, label=l)
+ writer.simpletag("setting", value=s, label=l)
writer.newline()
- writer.endtag('feature')
+ writer.endtag("feature")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- if name == 'version':
- self.version = float(safeEval(attrs['version']))
- elif name == 'feature':
- fid = attrs['fid']
+ if name == "version":
+ self.version = float(safeEval(attrs["version"]))
+ elif name == "feature":
+ fid = attrs["fid"]
fobj = Feature()
- fobj.flags = int(safeEval(attrs['flags']))
- fobj.label = int(safeEval(attrs['label']))
- fobj.default = int(safeEval(attrs.get('default','0')))
+ fobj.flags = int(safeEval(attrs["flags"]))
+ fobj.label = int(safeEval(attrs["label"]))
+ fobj.default = int(safeEval(attrs.get("default", "0")))
fobj.index = len(self.features)
self.features[fid] = fobj
fobj.settings = {}
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, a, c = element
- if tag == 'setting':
- fobj.settings[int(safeEval(a['value']))] = int(safeEval(a['label']))
+ if tag == "setting":
+ fobj.settings[int(safeEval(a["value"]))] = int(safeEval(a["label"]))
+
class Feature(object):
pass
-
diff --git a/Lib/fontTools/ttLib/tables/G_D_E_F_.py b/Lib/fontTools/ttLib/tables/G_D_E_F_.py
index d4a57414..d8ae8b23 100644
--- a/Lib/fontTools/ttLib/tables/G_D_E_F_.py
+++ b/Lib/fontTools/ttLib/tables/G_D_E_F_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_G_D_E_F_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/G_M_A_P_.py b/Lib/fontTools/ttLib/tables/G_M_A_P_.py
index 833890da..949ef842 100644
--- a/Lib/fontTools/ttLib/tables/G_M_A_P_.py
+++ b/Lib/fontTools/ttLib/tables/G_M_A_P_.py
@@ -25,102 +25,117 @@ GMAPRecordFormat1 = """
class GMAPRecord(object):
- def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""):
- self.UV = uv
- self.cid = cid
- self.gid = gid
- self.ggid = ggid
- self.name = name
+ def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""):
+ self.UV = uv
+ self.cid = cid
+ self.gid = gid
+ self.ggid = ggid
+ self.name = name
- def toXML(self, writer, ttFont):
- writer.begintag("GMAPRecord")
- writer.newline()
- writer.simpletag("UV", value=self.UV)
- writer.newline()
- writer.simpletag("cid", value=self.cid)
- writer.newline()
- writer.simpletag("gid", value=self.gid)
- writer.newline()
- writer.simpletag("glyphletGid", value=self.gid)
- writer.newline()
- writer.simpletag("GlyphletName", value=self.name)
- writer.newline()
- writer.endtag("GMAPRecord")
- writer.newline()
+ def toXML(self, writer, ttFont):
+ writer.begintag("GMAPRecord")
+ writer.newline()
+ writer.simpletag("UV", value=self.UV)
+ writer.newline()
+ writer.simpletag("cid", value=self.cid)
+ writer.newline()
+ writer.simpletag("gid", value=self.gid)
+ writer.newline()
+ writer.simpletag("glyphletGid", value=self.gid)
+ writer.newline()
+ writer.simpletag("GlyphletName", value=self.name)
+ writer.newline()
+ writer.endtag("GMAPRecord")
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- value = attrs["value"]
- if name == "GlyphletName":
- self.name = value
- else:
- setattr(self, name, safeEval(value))
+ def fromXML(self, name, attrs, content, ttFont):
+ value = attrs["value"]
+ if name == "GlyphletName":
+ self.name = value
+ else:
+ setattr(self, name, safeEval(value))
- def compile(self, ttFont):
- if self.UV is None:
- self.UV = 0
- nameLen = len(self.name)
- if nameLen < 32:
- self.name = self.name + "\0"*(32 - nameLen)
- data = sstruct.pack(GMAPRecordFormat1, self)
- return data
+ def compile(self, ttFont):
+ if self.UV is None:
+ self.UV = 0
+ nameLen = len(self.name)
+ if nameLen < 32:
+ self.name = self.name + "\0" * (32 - nameLen)
+ data = sstruct.pack(GMAPRecordFormat1, self)
+ return data
- def __repr__(self):
- return "GMAPRecord[ UV: " + str(self.UV) + ", cid: " + str(self.cid) + ", gid: " + str(self.gid) + ", ggid: " + str(self.ggid) + ", Glyphlet Name: " + str(self.name) + " ]"
+ def __repr__(self):
+ return (
+ "GMAPRecord[ UV: "
+ + str(self.UV)
+ + ", cid: "
+ + str(self.cid)
+ + ", gid: "
+ + str(self.gid)
+ + ", ggid: "
+ + str(self.ggid)
+ + ", Glyphlet Name: "
+ + str(self.name)
+ + " ]"
+ )
class table_G_M_A_P_(DefaultTable.DefaultTable):
+ dependencies = []
- dependencies = []
+ def decompile(self, data, ttFont):
+ dummy, newData = sstruct.unpack2(GMAPFormat, data, self)
+ self.psFontName = tostr(newData[: self.fontNameLength])
+ assert (
+ self.recordsOffset % 4
+ ) == 0, "GMAP error: recordsOffset is not 32 bit aligned."
+ newData = data[self.recordsOffset :]
+ self.gmapRecords = []
+ for i in range(self.recordsCount):
+ gmapRecord, newData = sstruct.unpack2(
+ GMAPRecordFormat1, newData, GMAPRecord()
+ )
+ gmapRecord.name = gmapRecord.name.strip("\0")
+ self.gmapRecords.append(gmapRecord)
- def decompile(self, data, ttFont):
- dummy, newData = sstruct.unpack2(GMAPFormat, data, self)
- self.psFontName = tostr(newData[:self.fontNameLength])
- assert (self.recordsOffset % 4) == 0, "GMAP error: recordsOffset is not 32 bit aligned."
- newData = data[self.recordsOffset:]
- self.gmapRecords = []
- for i in range (self.recordsCount):
- gmapRecord, newData = sstruct.unpack2(GMAPRecordFormat1, newData, GMAPRecord())
- gmapRecord.name = gmapRecord.name.strip('\0')
- self.gmapRecords.append(gmapRecord)
+ def compile(self, ttFont):
+ self.recordsCount = len(self.gmapRecords)
+ self.fontNameLength = len(self.psFontName)
+ self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4)
+ data = sstruct.pack(GMAPFormat, self)
+ data = data + tobytes(self.psFontName)
+ data = data + b"\0" * (self.recordsOffset - len(data))
+ for record in self.gmapRecords:
+ data = data + record.compile(ttFont)
+ return data
- def compile(self, ttFont):
- self.recordsCount = len(self.gmapRecords)
- self.fontNameLength = len(self.psFontName)
- self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4)
- data = sstruct.pack(GMAPFormat, self)
- data = data + tobytes(self.psFontName)
- data = data + b"\0" * (self.recordsOffset - len(data))
- for record in self.gmapRecords:
- data = data + record.compile(ttFont)
- return data
+ def toXML(self, writer, ttFont):
+ writer.comment("Most of this table will be recalculated by the compiler")
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(GMAPFormat)
+ for name in names:
+ value = getattr(self, name)
+ writer.simpletag(name, value=value)
+ writer.newline()
+ writer.simpletag("PSFontName", value=self.psFontName)
+ writer.newline()
+ for gmapRecord in self.gmapRecords:
+ gmapRecord.toXML(writer, ttFont)
- def toXML(self, writer, ttFont):
- writer.comment("Most of this table will be recalculated by the compiler")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(GMAPFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
- writer.simpletag("PSFontName", value=self.psFontName)
- writer.newline()
- for gmapRecord in self.gmapRecords:
- gmapRecord.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "GMAPRecord":
- if not hasattr(self, "gmapRecords"):
- self.gmapRecords = []
- gmapRecord = GMAPRecord()
- self.gmapRecords.append(gmapRecord)
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- gmapRecord.fromXML(name, attrs, content, ttFont)
- else:
- value = attrs["value"]
- if name == "PSFontName":
- self.psFontName = value
- else:
- setattr(self, name, safeEval(value))
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "GMAPRecord":
+ if not hasattr(self, "gmapRecords"):
+ self.gmapRecords = []
+ gmapRecord = GMAPRecord()
+ self.gmapRecords.append(gmapRecord)
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ gmapRecord.fromXML(name, attrs, content, ttFont)
+ else:
+ value = attrs["value"]
+ if name == "PSFontName":
+ self.psFontName = value
+ else:
+ setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/G_P_K_G_.py b/Lib/fontTools/ttLib/tables/G_P_K_G_.py
index 4f469c02..eed34d92 100644
--- a/Lib/fontTools/ttLib/tables/G_P_K_G_.py
+++ b/Lib/fontTools/ttLib/tables/G_P_K_G_.py
@@ -16,108 +16,111 @@ GPKGFormat = """
class table_G_P_K_G_(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ dummy, newData = sstruct.unpack2(GPKGFormat, data, self)
- def decompile(self, data, ttFont):
- dummy, newData = sstruct.unpack2(GPKGFormat, data, self)
+ GMAPoffsets = array.array("I")
+ endPos = (self.numGMAPs + 1) * 4
+ GMAPoffsets.frombytes(newData[:endPos])
+ if sys.byteorder != "big":
+ GMAPoffsets.byteswap()
+ self.GMAPs = []
+ for i in range(self.numGMAPs):
+ start = GMAPoffsets[i]
+ end = GMAPoffsets[i + 1]
+ self.GMAPs.append(data[start:end])
+ pos = endPos
+ endPos = pos + (self.numGlyplets + 1) * 4
+ glyphletOffsets = array.array("I")
+ glyphletOffsets.frombytes(newData[pos:endPos])
+ if sys.byteorder != "big":
+ glyphletOffsets.byteswap()
+ self.glyphlets = []
+ for i in range(self.numGlyplets):
+ start = glyphletOffsets[i]
+ end = glyphletOffsets[i + 1]
+ self.glyphlets.append(data[start:end])
- GMAPoffsets = array.array("I")
- endPos = (self.numGMAPs+1) * 4
- GMAPoffsets.frombytes(newData[:endPos])
- if sys.byteorder != "big": GMAPoffsets.byteswap()
- self.GMAPs = []
- for i in range(self.numGMAPs):
- start = GMAPoffsets[i]
- end = GMAPoffsets[i+1]
- self.GMAPs.append(data[start:end])
- pos = endPos
- endPos = pos + (self.numGlyplets + 1)*4
- glyphletOffsets = array.array("I")
- glyphletOffsets.frombytes(newData[pos:endPos])
- if sys.byteorder != "big": glyphletOffsets.byteswap()
- self.glyphlets = []
- for i in range(self.numGlyplets):
- start = glyphletOffsets[i]
- end = glyphletOffsets[i+1]
- self.glyphlets.append(data[start:end])
+ def compile(self, ttFont):
+ self.numGMAPs = len(self.GMAPs)
+ self.numGlyplets = len(self.glyphlets)
+ GMAPoffsets = [0] * (self.numGMAPs + 1)
+ glyphletOffsets = [0] * (self.numGlyplets + 1)
- def compile(self, ttFont):
- self.numGMAPs = len(self.GMAPs)
- self.numGlyplets = len(self.glyphlets)
- GMAPoffsets = [0]*(self.numGMAPs + 1)
- glyphletOffsets = [0]*(self.numGlyplets + 1)
+ dataList = [sstruct.pack(GPKGFormat, self)]
- dataList =[ sstruct.pack(GPKGFormat, self)]
+ pos = len(dataList[0]) + (self.numGMAPs + 1) * 4 + (self.numGlyplets + 1) * 4
+ GMAPoffsets[0] = pos
+ for i in range(1, self.numGMAPs + 1):
+ pos += len(self.GMAPs[i - 1])
+ GMAPoffsets[i] = pos
+ gmapArray = array.array("I", GMAPoffsets)
+ if sys.byteorder != "big":
+ gmapArray.byteswap()
+ dataList.append(gmapArray.tobytes())
- pos = len(dataList[0]) + (self.numGMAPs + 1)*4 + (self.numGlyplets + 1)*4
- GMAPoffsets[0] = pos
- for i in range(1, self.numGMAPs +1):
- pos += len(self.GMAPs[i-1])
- GMAPoffsets[i] = pos
- gmapArray = array.array("I", GMAPoffsets)
- if sys.byteorder != "big": gmapArray.byteswap()
- dataList.append(gmapArray.tobytes())
+ glyphletOffsets[0] = pos
+ for i in range(1, self.numGlyplets + 1):
+ pos += len(self.glyphlets[i - 1])
+ glyphletOffsets[i] = pos
+ glyphletArray = array.array("I", glyphletOffsets)
+ if sys.byteorder != "big":
+ glyphletArray.byteswap()
+ dataList.append(glyphletArray.tobytes())
+ dataList += self.GMAPs
+ dataList += self.glyphlets
+ data = bytesjoin(dataList)
+ return data
- glyphletOffsets[0] = pos
- for i in range(1, self.numGlyplets +1):
- pos += len(self.glyphlets[i-1])
- glyphletOffsets[i] = pos
- glyphletArray = array.array("I", glyphletOffsets)
- if sys.byteorder != "big": glyphletArray.byteswap()
- dataList.append(glyphletArray.tobytes())
- dataList += self.GMAPs
- dataList += self.glyphlets
- data = bytesjoin(dataList)
- return data
+ def toXML(self, writer, ttFont):
+ writer.comment("Most of this table will be recalculated by the compiler")
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(GPKGFormat)
+ for name in names:
+ value = getattr(self, name)
+ writer.simpletag(name, value=value)
+ writer.newline()
- def toXML(self, writer, ttFont):
- writer.comment("Most of this table will be recalculated by the compiler")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(GPKGFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
+ writer.begintag("GMAPs")
+ writer.newline()
+ for gmapData in self.GMAPs:
+ writer.begintag("hexdata")
+ writer.newline()
+ writer.dumphex(gmapData)
+ writer.endtag("hexdata")
+ writer.newline()
+ writer.endtag("GMAPs")
+ writer.newline()
- writer.begintag("GMAPs")
- writer.newline()
- for gmapData in self.GMAPs:
- writer.begintag("hexdata")
- writer.newline()
- writer.dumphex(gmapData)
- writer.endtag("hexdata")
- writer.newline()
- writer.endtag("GMAPs")
- writer.newline()
+ writer.begintag("glyphlets")
+ writer.newline()
+ for glyphletData in self.glyphlets:
+ writer.begintag("hexdata")
+ writer.newline()
+ writer.dumphex(glyphletData)
+ writer.endtag("hexdata")
+ writer.newline()
+ writer.endtag("glyphlets")
+ writer.newline()
- writer.begintag("glyphlets")
- writer.newline()
- for glyphletData in self.glyphlets:
- writer.begintag("hexdata")
- writer.newline()
- writer.dumphex(glyphletData)
- writer.endtag("hexdata")
- writer.newline()
- writer.endtag("glyphlets")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "GMAPs":
- if not hasattr(self, "GMAPs"):
- self.GMAPs = []
- for element in content:
- if isinstance(element, str):
- continue
- itemName, itemAttrs, itemContent = element
- if itemName == "hexdata":
- self.GMAPs.append(readHex(itemContent))
- elif name == "glyphlets":
- if not hasattr(self, "glyphlets"):
- self.glyphlets = []
- for element in content:
- if isinstance(element, str):
- continue
- itemName, itemAttrs, itemContent = element
- if itemName == "hexdata":
- self.glyphlets.append(readHex(itemContent))
- else:
- setattr(self, name, safeEval(attrs["value"]))
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "GMAPs":
+ if not hasattr(self, "GMAPs"):
+ self.GMAPs = []
+ for element in content:
+ if isinstance(element, str):
+ continue
+ itemName, itemAttrs, itemContent = element
+ if itemName == "hexdata":
+ self.GMAPs.append(readHex(itemContent))
+ elif name == "glyphlets":
+ if not hasattr(self, "glyphlets"):
+ self.glyphlets = []
+ for element in content:
+ if isinstance(element, str):
+ continue
+ itemName, itemAttrs, itemContent = element
+ if itemName == "hexdata":
+ self.glyphlets.append(readHex(itemContent))
+ else:
+ setattr(self, name, safeEval(attrs["value"]))
diff --git a/Lib/fontTools/ttLib/tables/G_P_O_S_.py b/Lib/fontTools/ttLib/tables/G_P_O_S_.py
index 013c8209..ca8290ba 100644
--- a/Lib/fontTools/ttLib/tables/G_P_O_S_.py
+++ b/Lib/fontTools/ttLib/tables/G_P_O_S_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_G_P_O_S_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/G_S_U_B_.py b/Lib/fontTools/ttLib/tables/G_S_U_B_.py
index 44036490..bb8375a5 100644
--- a/Lib/fontTools/ttLib/tables/G_S_U_B_.py
+++ b/Lib/fontTools/ttLib/tables/G_S_U_B_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_G_S_U_B_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/G__l_a_t.py b/Lib/fontTools/ttLib/tables/G__l_a_t.py
index a4e8e38f..f1dfdaa0 100644
--- a/Lib/fontTools/ttLib/tables/G__l_a_t.py
+++ b/Lib/fontTools/ttLib/tables/G__l_a_t.py
@@ -1,6 +1,7 @@
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import safeEval
+
# from itertools import *
from functools import partial
from . import DefaultTable
@@ -51,16 +52,19 @@ Glat_format_3_subbox_entry = """
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
"""
-class _Object() :
+
+class _Object:
pass
-class _Dict(dict) :
+
+class _Dict(dict):
pass
+
class table_G__l_a_t(DefaultTable.DefaultTable):
- '''
+ """
Support Graphite Glat tables
- '''
+ """
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
@@ -70,31 +74,31 @@ class table_G__l_a_t(DefaultTable.DefaultTable):
sstruct.unpack2(Glat_format_0, data, self)
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
if self.version <= 1.9:
- decoder = partial(self.decompileAttributes12,fmt=Glat_format_1_entry)
- elif self.version <= 2.9:
- decoder = partial(self.decompileAttributes12,fmt=Glat_format_23_entry)
+ decoder = partial(self.decompileAttributes12, fmt=Glat_format_1_entry)
+ elif self.version <= 2.9:
+ decoder = partial(self.decompileAttributes12, fmt=Glat_format_23_entry)
elif self.version >= 3.0:
(data, self.scheme) = grUtils.decompress(data)
sstruct.unpack2(Glat_format_3, data, self)
self.hasOctaboxes = (self.compression & 1) == 1
decoder = self.decompileAttributes3
-
- gloc = ttFont['Gloc']
+
+ gloc = ttFont["Gloc"]
self.attributes = {}
count = 0
- for s,e in zip(gloc,gloc[1:]):
+ for s, e in zip(gloc, gloc[1:]):
self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e])
count += 1
-
+
def decompileAttributes12(self, data, fmt):
attributes = _Dict()
while len(data) > 3:
e, data = sstruct.unpack2(fmt, data, _Object())
- keys = range(e.attNum, e.attNum+e.num)
- if len(data) >= 2 * e.num :
- vals = struct.unpack_from(('>%dh' % e.num), data)
- attributes.update(zip(keys,vals))
- data = data[2*e.num:]
+ keys = range(e.attNum, e.attNum + e.num)
+ if len(data) >= 2 * e.num:
+ vals = struct.unpack_from((">%dh" % e.num), data)
+ attributes.update(zip(keys, vals))
+ data = data[2 * e.num :]
return attributes
def decompileAttributes3(self, data):
@@ -103,9 +107,10 @@ class table_G__l_a_t(DefaultTable.DefaultTable):
numsub = bin(o.subboxBitmap).count("1")
o.subboxes = []
for b in range(numsub):
- if len(data) >= 8 :
- subbox, data = sstruct.unpack2(Glat_format_3_subbox_entry,
- data, _Object())
+ if len(data) >= 8:
+ subbox, data = sstruct.unpack2(
+ Glat_format_3_subbox_entry, data, _Object()
+ )
o.subboxes.append(subbox)
attrs = self.decompileAttributes12(data, Glat_format_23_entry)
if self.hasOctaboxes:
@@ -128,7 +133,7 @@ class table_G__l_a_t(DefaultTable.DefaultTable):
glocs.append(len(data))
data += encoder(self.attributes[ttFont.getGlyphName(n)])
glocs.append(len(data))
- ttFont['Gloc'].set(glocs)
+ ttFont["Gloc"].set(glocs)
if self.version >= 3.0:
data = grUtils.compress(self.scheme, data)
@@ -137,82 +142,93 @@ class table_G__l_a_t(DefaultTable.DefaultTable):
def compileAttributes12(self, attrs, fmt):
data = b""
for e in grUtils.entries(attrs):
- data += sstruct.pack(fmt, {'attNum' : e[0], 'num' : e[1]}) + \
- struct.pack(('>%dh' % len(e[2])), *e[2])
+ data += sstruct.pack(fmt, {"attNum": e[0], "num": e[1]}) + struct.pack(
+ (">%dh" % len(e[2])), *e[2]
+ )
return data
-
+
def compileAttributes3(self, attrs):
if self.hasOctaboxes:
o = attrs.octabox
data = sstruct.pack(Glat_format_3_octabox_metrics, o)
numsub = bin(o.subboxBitmap).count("1")
- for b in range(numsub) :
+ for b in range(numsub):
data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b])
else:
data = ""
return data + self.compileAttributes12(attrs, Glat_format_23_entry)
def toXML(self, writer, ttFont):
- writer.simpletag('version', version=self.version, compressionScheme=self.scheme)
+ writer.simpletag("version", version=self.version, compressionScheme=self.scheme)
writer.newline()
- for n, a in sorted(self.attributes.items(), key=lambda x:ttFont.getGlyphID(x[0])):
- writer.begintag('glyph', name=n)
+ for n, a in sorted(
+ self.attributes.items(), key=lambda x: ttFont.getGlyphID(x[0])
+ ):
+ writer.begintag("glyph", name=n)
writer.newline()
- if hasattr(a, 'octabox'):
+ if hasattr(a, "octabox"):
o = a.octabox
- formatstring, names, fixes = sstruct.getformat(Glat_format_3_octabox_metrics)
+ formatstring, names, fixes = sstruct.getformat(
+ Glat_format_3_octabox_metrics
+ )
vals = {}
for k in names:
- if k == 'subboxBitmap': continue
- vals[k] = "{:.3f}%".format(getattr(o, k) * 100. / 255)
- vals['bitmap'] = "{:0X}".format(o.subboxBitmap)
- writer.begintag('octaboxes', **vals)
+ if k == "subboxBitmap":
+ continue
+ vals[k] = "{:.3f}%".format(getattr(o, k) * 100.0 / 255)
+ vals["bitmap"] = "{:0X}".format(o.subboxBitmap)
+ writer.begintag("octaboxes", **vals)
writer.newline()
- formatstring, names, fixes = sstruct.getformat(Glat_format_3_subbox_entry)
+ formatstring, names, fixes = sstruct.getformat(
+ Glat_format_3_subbox_entry
+ )
for s in o.subboxes:
vals = {}
for k in names:
- vals[k] = "{:.3f}%".format(getattr(s, k) * 100. / 255)
- writer.simpletag('octabox', **vals)
+ vals[k] = "{:.3f}%".format(getattr(s, k) * 100.0 / 255)
+ writer.simpletag("octabox", **vals)
writer.newline()
- writer.endtag('octaboxes')
+ writer.endtag("octaboxes")
writer.newline()
for k, v in sorted(a.items()):
- writer.simpletag('attribute', index=k, value=v)
+ writer.simpletag("attribute", index=k, value=v)
writer.newline()
- writer.endtag('glyph')
+ writer.endtag("glyph")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- if name == 'version' :
- self.version = float(safeEval(attrs['version']))
- self.scheme = int(safeEval(attrs['compressionScheme']))
- if name != 'glyph' : return
- if not hasattr(self, 'attributes'):
+ if name == "version":
+ self.version = float(safeEval(attrs["version"]))
+ self.scheme = int(safeEval(attrs["compressionScheme"]))
+ if name != "glyph":
+ return
+ if not hasattr(self, "attributes"):
self.attributes = {}
- gname = attrs['name']
+ gname = attrs["name"]
attributes = _Dict()
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
- if tag == 'attribute' :
- k = int(safeEval(attrs['index']))
- v = int(safeEval(attrs['value']))
- attributes[k]=v
- elif tag == 'octaboxes':
+ if tag == "attribute":
+ k = int(safeEval(attrs["index"]))
+ v = int(safeEval(attrs["value"]))
+ attributes[k] = v
+ elif tag == "octaboxes":
self.hasOctaboxes = True
o = _Object()
- o.subboxBitmap = int(attrs['bitmap'], 16)
+ o.subboxBitmap = int(attrs["bitmap"], 16)
o.subboxes = []
- del attrs['bitmap']
+ del attrs["bitmap"]
for k, v in attrs.items():
- setattr(o, k, int(float(v[:-1]) * 255. / 100. + 0.5))
+ setattr(o, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
for element in subcontent:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
(tag, attrs, subcontent) = element
so = _Object()
for k, v in attrs.items():
- setattr(so, k, int(float(v[:-1]) * 255. / 100. + 0.5))
+ setattr(so, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
o.subboxes.append(so)
attributes.octabox = o
self.attributes[gname] = attributes
diff --git a/Lib/fontTools/ttLib/tables/G__l_o_c.py b/Lib/fontTools/ttLib/tables/G__l_o_c.py
index fa114a31..7973b9be 100644
--- a/Lib/fontTools/ttLib/tables/G__l_o_c.py
+++ b/Lib/fontTools/ttLib/tables/G__l_o_c.py
@@ -5,19 +5,21 @@ import array
import sys
-Gloc_header = '''
+Gloc_header = """
> # big endian
version: 16.16F # Table version
flags: H # bit 0: 1=long format, 0=short format
# bit 1: 1=attribute names, 0=no names
numAttribs: H # NUmber of attributes
-'''
+"""
+
class table_G__l_o_c(DefaultTable.DefaultTable):
"""
Support Graphite Gloc tables
"""
- dependencies = ['Glat']
+
+ dependencies = ["Glat"]
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
@@ -28,38 +30,49 @@ class table_G__l_o_c(DefaultTable.DefaultTable):
_, data = sstruct.unpack2(Gloc_header, data, self)
flags = self.flags
del self.flags
- self.locations = array.array('I' if flags & 1 else 'H')
- self.locations.frombytes(data[:len(data) - self.numAttribs * (flags & 2)])
- if sys.byteorder != "big": self.locations.byteswap()
- self.attribIds = array.array('H')
+ self.locations = array.array("I" if flags & 1 else "H")
+ self.locations.frombytes(data[: len(data) - self.numAttribs * (flags & 2)])
+ if sys.byteorder != "big":
+ self.locations.byteswap()
+ self.attribIds = array.array("H")
if flags & 2:
- self.attribIds.frombytes(data[-self.numAttribs * 2:])
- if sys.byteorder != "big": self.attribIds.byteswap()
+ self.attribIds.frombytes(data[-self.numAttribs * 2 :])
+ if sys.byteorder != "big":
+ self.attribIds.byteswap()
def compile(self, ttFont):
- data = sstruct.pack(Gloc_header, dict(version=1.0,
- flags=(bool(self.attribIds) << 1) + (self.locations.typecode == 'I'),
- numAttribs=self.numAttribs))
- if sys.byteorder != "big": self.locations.byteswap()
+ data = sstruct.pack(
+ Gloc_header,
+ dict(
+ version=1.0,
+ flags=(bool(self.attribIds) << 1) + (self.locations.typecode == "I"),
+ numAttribs=self.numAttribs,
+ ),
+ )
+ if sys.byteorder != "big":
+ self.locations.byteswap()
data += self.locations.tobytes()
- if sys.byteorder != "big": self.locations.byteswap()
+ if sys.byteorder != "big":
+ self.locations.byteswap()
if self.attribIds:
- if sys.byteorder != "big": self.attribIds.byteswap()
+ if sys.byteorder != "big":
+ self.attribIds.byteswap()
data += self.attribIds.tobytes()
- if sys.byteorder != "big": self.attribIds.byteswap()
+ if sys.byteorder != "big":
+ self.attribIds.byteswap()
return data
def set(self, locations):
long_format = max(locations) >= 65536
- self.locations = array.array('I' if long_format else 'H', locations)
+ self.locations = array.array("I" if long_format else "H", locations)
def toXML(self, writer, ttFont):
writer.simpletag("attributes", number=self.numAttribs)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- if name == 'attributes':
- self.numAttribs = int(safeEval(attrs['number']))
+ if name == "attributes":
+ self.numAttribs = int(safeEval(attrs["number"]))
def __getitem__(self, index):
return self.locations[index]
diff --git a/Lib/fontTools/ttLib/tables/H_V_A_R_.py b/Lib/fontTools/ttLib/tables/H_V_A_R_.py
index 56992ad0..094aedae 100644
--- a/Lib/fontTools/ttLib/tables/H_V_A_R_.py
+++ b/Lib/fontTools/ttLib/tables/H_V_A_R_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_H_V_A_R_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/J_S_T_F_.py b/Lib/fontTools/ttLib/tables/J_S_T_F_.py
index ddf54055..111c7007 100644
--- a/Lib/fontTools/ttLib/tables/J_S_T_F_.py
+++ b/Lib/fontTools/ttLib/tables/J_S_T_F_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_J_S_T_F_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/L_T_S_H_.py b/Lib/fontTools/ttLib/tables/L_T_S_H_.py
index 94c2c22a..e0ab0d02 100644
--- a/Lib/fontTools/ttLib/tables/L_T_S_H_.py
+++ b/Lib/fontTools/ttLib/tables/L_T_S_H_.py
@@ -7,42 +7,42 @@ import array
# XXX gets through. They're looking into it, I hope to raise the standards
# XXX back to normal eventually.
-class table_L_T_S_H_(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- version, numGlyphs = struct.unpack(">HH", data[:4])
- data = data[4:]
- assert version == 0, "unknown version: %s" % version
- assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length"
- # ouch: the assertion is not true in Chicago!
- #assert numGlyphs == ttFont['maxp'].numGlyphs
- yPels = array.array("B")
- yPels.frombytes(data)
- self.yPels = {}
- for i in range(numGlyphs):
- self.yPels[ttFont.getGlyphName(i)] = yPels[i]
+class table_L_T_S_H_(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ version, numGlyphs = struct.unpack(">HH", data[:4])
+ data = data[4:]
+ assert version == 0, "unknown version: %s" % version
+ assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length"
+ # ouch: the assertion is not true in Chicago!
+ # assert numGlyphs == ttFont['maxp'].numGlyphs
+ yPels = array.array("B")
+ yPels.frombytes(data)
+ self.yPels = {}
+ for i in range(numGlyphs):
+ self.yPels[ttFont.getGlyphName(i)] = yPels[i]
- def compile(self, ttFont):
- version = 0
- names = list(self.yPels.keys())
- numGlyphs = len(names)
- yPels = [0] * numGlyphs
- # ouch: the assertion is not true in Chicago!
- #assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs
- for name in names:
- yPels[ttFont.getGlyphID(name)] = self.yPels[name]
- yPels = array.array("B", yPels)
- return struct.pack(">HH", version, numGlyphs) + yPels.tobytes()
+ def compile(self, ttFont):
+ version = 0
+ names = list(self.yPels.keys())
+ numGlyphs = len(names)
+ yPels = [0] * numGlyphs
+ # ouch: the assertion is not true in Chicago!
+ # assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs
+ for name in names:
+ yPels[ttFont.getGlyphID(name)] = self.yPels[name]
+ yPels = array.array("B", yPels)
+ return struct.pack(">HH", version, numGlyphs) + yPels.tobytes()
- def toXML(self, writer, ttFont):
- names = sorted(self.yPels.keys())
- for name in names:
- writer.simpletag("yPel", name=name, value=self.yPels[name])
- writer.newline()
+ def toXML(self, writer, ttFont):
+ names = sorted(self.yPels.keys())
+ for name in names:
+ writer.simpletag("yPel", name=name, value=self.yPels[name])
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "yPels"):
- self.yPels = {}
- if name != "yPel":
- return # ignore unknown tags
- self.yPels[attrs["name"]] = safeEval(attrs["value"])
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "yPels"):
+ self.yPels = {}
+ if name != "yPel":
+ return # ignore unknown tags
+ self.yPels[attrs["name"]] = safeEval(attrs["value"])
diff --git a/Lib/fontTools/ttLib/tables/M_A_T_H_.py b/Lib/fontTools/ttLib/tables/M_A_T_H_.py
index d894c082..011426b5 100644
--- a/Lib/fontTools/ttLib/tables/M_A_T_H_.py
+++ b/Lib/fontTools/ttLib/tables/M_A_T_H_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_M_A_T_H_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/M_E_T_A_.py b/Lib/fontTools/ttLib/tables/M_E_T_A_.py
index 990bfd2d..445aeb4d 100644
--- a/Lib/fontTools/ttLib/tables/M_E_T_A_.py
+++ b/Lib/fontTools/ttLib/tables/M_E_T_A_.py
@@ -45,259 +45,301 @@ METAStringRecordFormat = """
# Strings shall be Unicode UTF-8 encoded, and null-terminated.
METALabelDict = {
- 0: "MojikumiX4051", # An integer in the range 1-20
- 1: "UNIUnifiedBaseChars",
- 2: "BaseFontName",
- 3: "Language",
- 4: "CreationDate",
- 5: "FoundryName",
- 6: "FoundryCopyright",
- 7: "OwnerURI",
- 8: "WritingScript",
- 10: "StrokeCount",
- 11: "IndexingRadical",
+ 0: "MojikumiX4051", # An integer in the range 1-20
+ 1: "UNIUnifiedBaseChars",
+ 2: "BaseFontName",
+ 3: "Language",
+ 4: "CreationDate",
+ 5: "FoundryName",
+ 6: "FoundryCopyright",
+ 7: "OwnerURI",
+ 8: "WritingScript",
+ 10: "StrokeCount",
+ 11: "IndexingRadical",
}
def getLabelString(labelID):
- try:
- label = METALabelDict[labelID]
- except KeyError:
- label = "Unknown label"
- return str(label)
+ try:
+ label = METALabelDict[labelID]
+ except KeyError:
+ label = "Unknown label"
+ return str(label)
class table_M_E_T_A_(DefaultTable.DefaultTable):
-
- dependencies = []
-
- def decompile(self, data, ttFont):
- dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self)
- self.glyphRecords = []
- for i in range(self.nMetaRecs):
- glyphRecord, newData = sstruct.unpack2(METAGlyphRecordFormat, newData, GlyphRecord())
- if self.metaFlags == 0:
- [glyphRecord.offset] = struct.unpack(">H", newData[:2])
- newData = newData[2:]
- elif self.metaFlags == 1:
- [glyphRecord.offset] = struct.unpack(">H", newData[:4])
- newData = newData[4:]
- else:
- assert 0, "The metaFlags field in the META table header has a value other than 0 or 1 :" + str(self.metaFlags)
- glyphRecord.stringRecs = []
- newData = data[glyphRecord.offset:]
- for j in range(glyphRecord.nMetaEntry):
- stringRec, newData = sstruct.unpack2(METAStringRecordFormat, newData, StringRecord())
- if self.metaFlags == 0:
- [stringRec.offset] = struct.unpack(">H", newData[:2])
- newData = newData[2:]
- else:
- [stringRec.offset] = struct.unpack(">H", newData[:4])
- newData = newData[4:]
- stringRec.string = data[stringRec.offset:stringRec.offset + stringRec.stringLen]
- glyphRecord.stringRecs.append(stringRec)
- self.glyphRecords.append(glyphRecord)
-
- def compile(self, ttFont):
- offsetOK = 0
- self.nMetaRecs = len(self.glyphRecords)
- count = 0
- while (offsetOK != 1):
- count = count + 1
- if count > 4:
- pdb.set_trace()
- metaData = sstruct.pack(METAHeaderFormat, self)
- stringRecsOffset = len(metaData) + self.nMetaRecs * (6 + 2*(self.metaFlags & 1))
- stringRecSize = (6 + 2*(self.metaFlags & 1))
- for glyphRec in self.glyphRecords:
- glyphRec.offset = stringRecsOffset
- if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0):
- self.metaFlags = self.metaFlags + 1
- offsetOK = -1
- break
- metaData = metaData + glyphRec.compile(self)
- stringRecsOffset = stringRecsOffset + (glyphRec.nMetaEntry * stringRecSize)
- # this will be the String Record offset for the next GlyphRecord.
- if offsetOK == -1:
- offsetOK = 0
- continue
-
- # metaData now contains the header and all of the GlyphRecords. Its length should bw
- # the offset to the first StringRecord.
- stringOffset = stringRecsOffset
- for glyphRec in self.glyphRecords:
- assert (glyphRec.offset == len(metaData)), "Glyph record offset did not compile correctly! for rec:" + str(glyphRec)
- for stringRec in glyphRec.stringRecs:
- stringRec.offset = stringOffset
- if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0):
- self.metaFlags = self.metaFlags + 1
- offsetOK = -1
- break
- metaData = metaData + stringRec.compile(self)
- stringOffset = stringOffset + stringRec.stringLen
- if offsetOK == -1:
- offsetOK = 0
- continue
-
- if ((self.metaFlags & 1) == 1) and (stringOffset < 65536):
- self.metaFlags = self.metaFlags - 1
- continue
- else:
- offsetOK = 1
-
- # metaData now contains the header and all of the GlyphRecords and all of the String Records.
- # Its length should be the offset to the first string datum.
- for glyphRec in self.glyphRecords:
- for stringRec in glyphRec.stringRecs:
- assert (stringRec.offset == len(metaData)), "String offset did not compile correctly! for string:" + str(stringRec.string)
- metaData = metaData + stringRec.string
-
- return metaData
-
- def toXML(self, writer, ttFont):
- writer.comment("Lengths and number of entries in this table will be recalculated by the compiler")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(METAHeaderFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
- for glyphRec in self.glyphRecords:
- glyphRec.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "GlyphRecord":
- if not hasattr(self, "glyphRecords"):
- self.glyphRecords = []
- glyphRec = GlyphRecord()
- self.glyphRecords.append(glyphRec)
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- glyphRec.fromXML(name, attrs, content, ttFont)
- glyphRec.offset = -1
- glyphRec.nMetaEntry = len(glyphRec.stringRecs)
- else:
- setattr(self, name, safeEval(attrs["value"]))
+ dependencies = []
+
+ def decompile(self, data, ttFont):
+ dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self)
+ self.glyphRecords = []
+ for i in range(self.nMetaRecs):
+ glyphRecord, newData = sstruct.unpack2(
+ METAGlyphRecordFormat, newData, GlyphRecord()
+ )
+ if self.metaFlags == 0:
+ [glyphRecord.offset] = struct.unpack(">H", newData[:2])
+ newData = newData[2:]
+ elif self.metaFlags == 1:
+ [glyphRecord.offset] = struct.unpack(">H", newData[:4])
+ newData = newData[4:]
+ else:
+ assert 0, (
+ "The metaFlags field in the META table header has a value other than 0 or 1 :"
+ + str(self.metaFlags)
+ )
+ glyphRecord.stringRecs = []
+ newData = data[glyphRecord.offset :]
+ for j in range(glyphRecord.nMetaEntry):
+ stringRec, newData = sstruct.unpack2(
+ METAStringRecordFormat, newData, StringRecord()
+ )
+ if self.metaFlags == 0:
+ [stringRec.offset] = struct.unpack(">H", newData[:2])
+ newData = newData[2:]
+ else:
+ [stringRec.offset] = struct.unpack(">H", newData[:4])
+ newData = newData[4:]
+ stringRec.string = data[
+ stringRec.offset : stringRec.offset + stringRec.stringLen
+ ]
+ glyphRecord.stringRecs.append(stringRec)
+ self.glyphRecords.append(glyphRecord)
+
+ def compile(self, ttFont):
+ offsetOK = 0
+ self.nMetaRecs = len(self.glyphRecords)
+ count = 0
+ while offsetOK != 1:
+ count = count + 1
+ if count > 4:
+ pdb.set_trace()
+ metaData = sstruct.pack(METAHeaderFormat, self)
+ stringRecsOffset = len(metaData) + self.nMetaRecs * (
+ 6 + 2 * (self.metaFlags & 1)
+ )
+ stringRecSize = 6 + 2 * (self.metaFlags & 1)
+ for glyphRec in self.glyphRecords:
+ glyphRec.offset = stringRecsOffset
+ if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0):
+ self.metaFlags = self.metaFlags + 1
+ offsetOK = -1
+ break
+ metaData = metaData + glyphRec.compile(self)
+ stringRecsOffset = stringRecsOffset + (
+ glyphRec.nMetaEntry * stringRecSize
+ )
+ # this will be the String Record offset for the next GlyphRecord.
+ if offsetOK == -1:
+ offsetOK = 0
+ continue
+
+ # metaData now contains the header and all of the GlyphRecords. Its length should bw
+ # the offset to the first StringRecord.
+ stringOffset = stringRecsOffset
+ for glyphRec in self.glyphRecords:
+ assert glyphRec.offset == len(
+ metaData
+ ), "Glyph record offset did not compile correctly! for rec:" + str(
+ glyphRec
+ )
+ for stringRec in glyphRec.stringRecs:
+ stringRec.offset = stringOffset
+ if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0):
+ self.metaFlags = self.metaFlags + 1
+ offsetOK = -1
+ break
+ metaData = metaData + stringRec.compile(self)
+ stringOffset = stringOffset + stringRec.stringLen
+ if offsetOK == -1:
+ offsetOK = 0
+ continue
+
+ if ((self.metaFlags & 1) == 1) and (stringOffset < 65536):
+ self.metaFlags = self.metaFlags - 1
+ continue
+ else:
+ offsetOK = 1
+
+ # metaData now contains the header and all of the GlyphRecords and all of the String Records.
+ # Its length should be the offset to the first string datum.
+ for glyphRec in self.glyphRecords:
+ for stringRec in glyphRec.stringRecs:
+ assert stringRec.offset == len(
+ metaData
+ ), "String offset did not compile correctly! for string:" + str(
+ stringRec.string
+ )
+ metaData = metaData + stringRec.string
+
+ return metaData
+
+ def toXML(self, writer, ttFont):
+ writer.comment(
+ "Lengths and number of entries in this table will be recalculated by the compiler"
+ )
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(METAHeaderFormat)
+ for name in names:
+ value = getattr(self, name)
+ writer.simpletag(name, value=value)
+ writer.newline()
+ for glyphRec in self.glyphRecords:
+ glyphRec.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "GlyphRecord":
+ if not hasattr(self, "glyphRecords"):
+ self.glyphRecords = []
+ glyphRec = GlyphRecord()
+ self.glyphRecords.append(glyphRec)
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ glyphRec.fromXML(name, attrs, content, ttFont)
+ glyphRec.offset = -1
+ glyphRec.nMetaEntry = len(glyphRec.stringRecs)
+ else:
+ setattr(self, name, safeEval(attrs["value"]))
class GlyphRecord(object):
- def __init__(self):
- self.glyphID = -1
- self.nMetaEntry = -1
- self.offset = -1
- self.stringRecs = []
-
- def toXML(self, writer, ttFont):
- writer.begintag("GlyphRecord")
- writer.newline()
- writer.simpletag("glyphID", value=self.glyphID)
- writer.newline()
- writer.simpletag("nMetaEntry", value=self.nMetaEntry)
- writer.newline()
- for stringRec in self.stringRecs:
- stringRec.toXML(writer, ttFont)
- writer.endtag("GlyphRecord")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "StringRecord":
- stringRec = StringRecord()
- self.stringRecs.append(stringRec)
- for element in content:
- if isinstance(element, str):
- continue
- stringRec.fromXML(name, attrs, content, ttFont)
- stringRec.stringLen = len(stringRec.string)
- else:
- setattr(self, name, safeEval(attrs["value"]))
-
- def compile(self, parentTable):
- data = sstruct.pack(METAGlyphRecordFormat, self)
- if parentTable.metaFlags == 0:
- datum = struct.pack(">H", self.offset)
- elif parentTable.metaFlags == 1:
- datum = struct.pack(">L", self.offset)
- data = data + datum
- return data
-
- def __repr__(self):
- return "GlyphRecord[ glyphID: " + str(self.glyphID) + ", nMetaEntry: " + str(self.nMetaEntry) + ", offset: " + str(self.offset) + " ]"
+ def __init__(self):
+ self.glyphID = -1
+ self.nMetaEntry = -1
+ self.offset = -1
+ self.stringRecs = []
+
+ def toXML(self, writer, ttFont):
+ writer.begintag("GlyphRecord")
+ writer.newline()
+ writer.simpletag("glyphID", value=self.glyphID)
+ writer.newline()
+ writer.simpletag("nMetaEntry", value=self.nMetaEntry)
+ writer.newline()
+ for stringRec in self.stringRecs:
+ stringRec.toXML(writer, ttFont)
+ writer.endtag("GlyphRecord")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "StringRecord":
+ stringRec = StringRecord()
+ self.stringRecs.append(stringRec)
+ for element in content:
+ if isinstance(element, str):
+ continue
+ stringRec.fromXML(name, attrs, content, ttFont)
+ stringRec.stringLen = len(stringRec.string)
+ else:
+ setattr(self, name, safeEval(attrs["value"]))
+
+ def compile(self, parentTable):
+ data = sstruct.pack(METAGlyphRecordFormat, self)
+ if parentTable.metaFlags == 0:
+ datum = struct.pack(">H", self.offset)
+ elif parentTable.metaFlags == 1:
+ datum = struct.pack(">L", self.offset)
+ data = data + datum
+ return data
+
+ def __repr__(self):
+ return (
+ "GlyphRecord[ glyphID: "
+ + str(self.glyphID)
+ + ", nMetaEntry: "
+ + str(self.nMetaEntry)
+ + ", offset: "
+ + str(self.offset)
+ + " ]"
+ )
+
# XXX The following two functions are really broken around UTF-8 vs Unicode
+
def mapXMLToUTF8(string):
- uString = str()
- strLen = len(string)
- i = 0
- while i < strLen:
- prefixLen = 0
- if (string[i:i+3] == "&#x"):
- prefixLen = 3
- elif (string[i:i+7] == "&amp;#x"):
- prefixLen = 7
- if prefixLen:
- i = i+prefixLen
- j= i
- while string[i] != ";":
- i = i+1
- valStr = string[j:i]
-
- uString = uString + chr(eval('0x' + valStr))
- else:
- uString = uString + chr(byteord(string[i]))
- i = i +1
-
- return uString.encode('utf_8')
+ uString = str()
+ strLen = len(string)
+ i = 0
+ while i < strLen:
+ prefixLen = 0
+ if string[i : i + 3] == "&#x":
+ prefixLen = 3
+ elif string[i : i + 7] == "&amp;#x":
+ prefixLen = 7
+ if prefixLen:
+ i = i + prefixLen
+ j = i
+ while string[i] != ";":
+ i = i + 1
+ valStr = string[j:i]
+
+ uString = uString + chr(eval("0x" + valStr))
+ else:
+ uString = uString + chr(byteord(string[i]))
+ i = i + 1
+
+ return uString.encode("utf_8")
def mapUTF8toXML(string):
- uString = string.decode('utf_8')
- string = ""
- for uChar in uString:
- i = ord(uChar)
- if (i < 0x80) and (i > 0x1F):
- string = string + uChar
- else:
- string = string + "&#x" + hex(i)[2:] + ";"
- return string
+ uString = string.decode("utf_8")
+ string = ""
+ for uChar in uString:
+ i = ord(uChar)
+ if (i < 0x80) and (i > 0x1F):
+ string = string + uChar
+ else:
+ string = string + "&#x" + hex(i)[2:] + ";"
+ return string
class StringRecord(object):
-
- def toXML(self, writer, ttFont):
- writer.begintag("StringRecord")
- writer.newline()
- writer.simpletag("labelID", value=self.labelID)
- writer.comment(getLabelString(self.labelID))
- writer.newline()
- writer.newline()
- writer.simpletag("string", value=mapUTF8toXML(self.string))
- writer.newline()
- writer.endtag("StringRecord")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- value = attrs["value"]
- if name == "string":
- self.string = mapXMLToUTF8(value)
- else:
- setattr(self, name, safeEval(value))
-
- def compile(self, parentTable):
- data = sstruct.pack(METAStringRecordFormat, self)
- if parentTable.metaFlags == 0:
- datum = struct.pack(">H", self.offset)
- elif parentTable.metaFlags == 1:
- datum = struct.pack(">L", self.offset)
- data = data + datum
- return data
-
- def __repr__(self):
- return "StringRecord [ labelID: " + str(self.labelID) + " aka " + getLabelString(self.labelID) \
- + ", offset: " + str(self.offset) + ", length: " + str(self.stringLen) + ", string: " +self.string + " ]"
+ def toXML(self, writer, ttFont):
+ writer.begintag("StringRecord")
+ writer.newline()
+ writer.simpletag("labelID", value=self.labelID)
+ writer.comment(getLabelString(self.labelID))
+ writer.newline()
+ writer.newline()
+ writer.simpletag("string", value=mapUTF8toXML(self.string))
+ writer.newline()
+ writer.endtag("StringRecord")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ value = attrs["value"]
+ if name == "string":
+ self.string = mapXMLToUTF8(value)
+ else:
+ setattr(self, name, safeEval(value))
+
+ def compile(self, parentTable):
+ data = sstruct.pack(METAStringRecordFormat, self)
+ if parentTable.metaFlags == 0:
+ datum = struct.pack(">H", self.offset)
+ elif parentTable.metaFlags == 1:
+ datum = struct.pack(">L", self.offset)
+ data = data + datum
+ return data
+
+ def __repr__(self):
+ return (
+ "StringRecord [ labelID: "
+ + str(self.labelID)
+ + " aka "
+ + getLabelString(self.labelID)
+ + ", offset: "
+ + str(self.offset)
+ + ", length: "
+ + str(self.stringLen)
+ + ", string: "
+ + self.string
+ + " ]"
+ )
diff --git a/Lib/fontTools/ttLib/tables/M_V_A_R_.py b/Lib/fontTools/ttLib/tables/M_V_A_R_.py
index 34ab20f7..8371795e 100644
--- a/Lib/fontTools/ttLib/tables/M_V_A_R_.py
+++ b/Lib/fontTools/ttLib/tables/M_V_A_R_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_M_V_A_R_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/O_S_2f_2.py b/Lib/fontTools/ttLib/tables/O_S_2f_2.py
index ba2e3961..7b403026 100644
--- a/Lib/fontTools/ttLib/tables/O_S_2f_2.py
+++ b/Lib/fontTools/ttLib/tables/O_S_2f_2.py
@@ -23,16 +23,23 @@ panoseFormat = """
bXHeight: B
"""
+
class Panose(object):
+ def __init__(self, **kwargs):
+ _, names, _ = sstruct.getformat(panoseFormat)
+ for name in names:
+ setattr(self, name, kwargs.pop(name, 0))
+ for k in kwargs:
+ raise TypeError(f"Panose() got an unexpected keyword argument {k!r}")
- def toXML(self, writer, ttFont):
- formatstring, names, fixes = sstruct.getformat(panoseFormat)
- for name in names:
- writer.simpletag(name, value=getattr(self, name))
- writer.newline()
+ def toXML(self, writer, ttFont):
+ formatstring, names, fixes = sstruct.getformat(panoseFormat)
+ for name in names:
+ writer.simpletag(name, value=getattr(self, name))
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- setattr(self, name, safeEval(attrs["value"]))
+ def fromXML(self, name, attrs, content, ttFont):
+ setattr(self, name, safeEval(attrs["value"]))
# 'sfnt' OS/2 and Windows Metrics table - 'OS/2'
@@ -71,23 +78,29 @@ OS2_format_0 = """
usWinDescent: H # Windows descender
"""
-OS2_format_1_addition = """
+OS2_format_1_addition = """
ulCodePageRange1: L
ulCodePageRange2: L
"""
-OS2_format_2_addition = OS2_format_1_addition + """
+OS2_format_2_addition = (
+ OS2_format_1_addition
+ + """
sxHeight: h
sCapHeight: h
usDefaultChar: H
usBreakChar: H
usMaxContext: H
"""
+)
-OS2_format_5_addition = OS2_format_2_addition + """
+OS2_format_5_addition = (
+ OS2_format_2_addition
+ + """
usLowerOpticalPointSize: H
usUpperOpticalPointSize: H
"""
+)
bigendian = " > # big endian\n"
@@ -101,438 +114,504 @@ OS2_format_5_addition = bigendian + OS2_format_5_addition
class table_O_S_2f_2(DefaultTable.DefaultTable):
- """the OS/2 table"""
-
- dependencies = ["head"]
-
- def decompile(self, data, ttFont):
- dummy, data = sstruct.unpack2(OS2_format_0, data, self)
-
- if self.version == 1:
- dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self)
- elif self.version in (2, 3, 4):
- dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self)
- elif self.version == 5:
- dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self)
- self.usLowerOpticalPointSize /= 20
- self.usUpperOpticalPointSize /= 20
- elif self.version != 0:
- from fontTools import ttLib
- raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version)
- if len(data):
- log.warning("too much 'OS/2' table data")
-
- self.panose = sstruct.unpack(panoseFormat, self.panose, Panose())
-
- def compile(self, ttFont):
- self.updateFirstAndLastCharIndex(ttFont)
- panose = self.panose
- head = ttFont["head"]
- if (self.fsSelection & 1) and not (head.macStyle & 1<<1):
- log.warning("fsSelection bit 0 (italic) and "
- "head table macStyle bit 1 (italic) should match")
- if (self.fsSelection & 1<<5) and not (head.macStyle & 1):
- log.warning("fsSelection bit 5 (bold) and "
- "head table macStyle bit 0 (bold) should match")
- if (self.fsSelection & 1<<6) and (self.fsSelection & 1 + (1<<5)):
- log.warning("fsSelection bit 6 (regular) is set, "
- "bits 0 (italic) and 5 (bold) must be clear")
- if self.version < 4 and self.fsSelection & 0b1110000000:
- log.warning("fsSelection bits 7, 8 and 9 are only defined in "
- "OS/2 table version 4 and up: version %s", self.version)
- self.panose = sstruct.pack(panoseFormat, self.panose)
- if self.version == 0:
- data = sstruct.pack(OS2_format_0, self)
- elif self.version == 1:
- data = sstruct.pack(OS2_format_1, self)
- elif self.version in (2, 3, 4):
- data = sstruct.pack(OS2_format_2, self)
- elif self.version == 5:
- d = self.__dict__.copy()
- d['usLowerOpticalPointSize'] = round(self.usLowerOpticalPointSize * 20)
- d['usUpperOpticalPointSize'] = round(self.usUpperOpticalPointSize * 20)
- data = sstruct.pack(OS2_format_5, d)
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version)
- self.panose = panose
- return data
-
- def toXML(self, writer, ttFont):
- writer.comment(
- "The fields 'usFirstCharIndex' and 'usLastCharIndex'\n"
- "will be recalculated by the compiler")
- writer.newline()
- if self.version == 1:
- format = OS2_format_1
- elif self.version in (2, 3, 4):
- format = OS2_format_2
- elif self.version == 5:
- format = OS2_format_5
- else:
- format = OS2_format_0
- formatstring, names, fixes = sstruct.getformat(format)
- for name in names:
- value = getattr(self, name)
- if name=="panose":
- writer.begintag("panose")
- writer.newline()
- value.toXML(writer, ttFont)
- writer.endtag("panose")
- elif name in ("ulUnicodeRange1", "ulUnicodeRange2",
- "ulUnicodeRange3", "ulUnicodeRange4",
- "ulCodePageRange1", "ulCodePageRange2"):
- writer.simpletag(name, value=num2binary(value))
- elif name in ("fsType", "fsSelection"):
- writer.simpletag(name, value=num2binary(value, 16))
- elif name == "achVendID":
- writer.simpletag(name, value=repr(value)[1:-1])
- else:
- writer.simpletag(name, value=value)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "panose":
- self.panose = panose = Panose()
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- panose.fromXML(name, attrs, content, ttFont)
- elif name in ("ulUnicodeRange1", "ulUnicodeRange2",
- "ulUnicodeRange3", "ulUnicodeRange4",
- "ulCodePageRange1", "ulCodePageRange2",
- "fsType", "fsSelection"):
- setattr(self, name, binary2num(attrs["value"]))
- elif name == "achVendID":
- setattr(self, name, safeEval("'''" + attrs["value"] + "'''"))
- else:
- setattr(self, name, safeEval(attrs["value"]))
-
- def updateFirstAndLastCharIndex(self, ttFont):
- if 'cmap' not in ttFont:
- return
- codes = set()
- for table in getattr(ttFont['cmap'], 'tables', []):
- if table.isUnicode():
- codes.update(table.cmap.keys())
- if codes:
- minCode = min(codes)
- maxCode = max(codes)
- # USHORT cannot hold codepoints greater than 0xFFFF
- self.usFirstCharIndex = min(0xFFFF, minCode)
- self.usLastCharIndex = min(0xFFFF, maxCode)
-
- # misspelled attributes kept for legacy reasons
-
- @property
- def usMaxContex(self):
- return self.usMaxContext
-
- @usMaxContex.setter
- def usMaxContex(self, value):
- self.usMaxContext = value
-
- @property
- def fsFirstCharIndex(self):
- return self.usFirstCharIndex
-
- @fsFirstCharIndex.setter
- def fsFirstCharIndex(self, value):
- self.usFirstCharIndex = value
-
- @property
- def fsLastCharIndex(self):
- return self.usLastCharIndex
-
- @fsLastCharIndex.setter
- def fsLastCharIndex(self, value):
- self.usLastCharIndex = value
-
- def getUnicodeRanges(self):
- """ Return the set of 'ulUnicodeRange*' bits currently enabled. """
- bits = set()
- ul1, ul2 = self.ulUnicodeRange1, self.ulUnicodeRange2
- ul3, ul4 = self.ulUnicodeRange3, self.ulUnicodeRange4
- for i in range(32):
- if ul1 & (1 << i):
- bits.add(i)
- if ul2 & (1 << i):
- bits.add(i + 32)
- if ul3 & (1 << i):
- bits.add(i + 64)
- if ul4 & (1 << i):
- bits.add(i + 96)
- return bits
-
- def setUnicodeRanges(self, bits):
- """ Set the 'ulUnicodeRange*' fields to the specified 'bits'. """
- ul1, ul2, ul3, ul4 = 0, 0, 0, 0
- for bit in bits:
- if 0 <= bit < 32:
- ul1 |= (1 << bit)
- elif 32 <= bit < 64:
- ul2 |= (1 << (bit - 32))
- elif 64 <= bit < 96:
- ul3 |= (1 << (bit - 64))
- elif 96 <= bit < 123:
- ul4 |= (1 << (bit - 96))
- else:
- raise ValueError('expected 0 <= int <= 122, found: %r' % bit)
- self.ulUnicodeRange1, self.ulUnicodeRange2 = ul1, ul2
- self.ulUnicodeRange3, self.ulUnicodeRange4 = ul3, ul4
-
- def recalcUnicodeRanges(self, ttFont, pruneOnly=False):
- """ Intersect the codepoints in the font's Unicode cmap subtables with
- the Unicode block ranges defined in the OpenType specification (v1.7),
- and set the respective 'ulUnicodeRange*' bits if there is at least ONE
- intersection.
- If 'pruneOnly' is True, only clear unused bits with NO intersection.
- """
- unicodes = set()
- for table in ttFont['cmap'].tables:
- if table.isUnicode():
- unicodes.update(table.cmap.keys())
- if pruneOnly:
- empty = intersectUnicodeRanges(unicodes, inverse=True)
- bits = self.getUnicodeRanges() - empty
- else:
- bits = intersectUnicodeRanges(unicodes)
- self.setUnicodeRanges(bits)
- return bits
-
- def recalcAvgCharWidth(self, ttFont):
- """Recalculate xAvgCharWidth using metrics from ttFont's 'hmtx' table.
-
- Set it to 0 if the unlikely event 'hmtx' table is not found.
- """
- avg_width = 0
- hmtx = ttFont.get("hmtx")
- if hmtx:
- widths = [m[0] for m in hmtx.metrics.values() if m[0] > 0]
- avg_width = otRound(sum(widths) / len(widths))
- self.xAvgCharWidth = avg_width
- return avg_width
+ """the OS/2 table"""
+
+ dependencies = ["head"]
+
+ def decompile(self, data, ttFont):
+ dummy, data = sstruct.unpack2(OS2_format_0, data, self)
+
+ if self.version == 1:
+ dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self)
+ elif self.version in (2, 3, 4):
+ dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self)
+ elif self.version == 5:
+ dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self)
+ self.usLowerOpticalPointSize /= 20
+ self.usUpperOpticalPointSize /= 20
+ elif self.version != 0:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError(
+ "unknown format for OS/2 table: version %s" % self.version
+ )
+ if len(data):
+ log.warning("too much 'OS/2' table data")
+
+ self.panose = sstruct.unpack(panoseFormat, self.panose, Panose())
+
+ def compile(self, ttFont):
+ self.updateFirstAndLastCharIndex(ttFont)
+ panose = self.panose
+ head = ttFont["head"]
+ if (self.fsSelection & 1) and not (head.macStyle & 1 << 1):
+ log.warning(
+ "fsSelection bit 0 (italic) and "
+ "head table macStyle bit 1 (italic) should match"
+ )
+ if (self.fsSelection & 1 << 5) and not (head.macStyle & 1):
+ log.warning(
+ "fsSelection bit 5 (bold) and "
+ "head table macStyle bit 0 (bold) should match"
+ )
+ if (self.fsSelection & 1 << 6) and (self.fsSelection & 1 + (1 << 5)):
+ log.warning(
+ "fsSelection bit 6 (regular) is set, "
+ "bits 0 (italic) and 5 (bold) must be clear"
+ )
+ if self.version < 4 and self.fsSelection & 0b1110000000:
+ log.warning(
+ "fsSelection bits 7, 8 and 9 are only defined in "
+ "OS/2 table version 4 and up: version %s",
+ self.version,
+ )
+ self.panose = sstruct.pack(panoseFormat, self.panose)
+ if self.version == 0:
+ data = sstruct.pack(OS2_format_0, self)
+ elif self.version == 1:
+ data = sstruct.pack(OS2_format_1, self)
+ elif self.version in (2, 3, 4):
+ data = sstruct.pack(OS2_format_2, self)
+ elif self.version == 5:
+ d = self.__dict__.copy()
+ d["usLowerOpticalPointSize"] = round(self.usLowerOpticalPointSize * 20)
+ d["usUpperOpticalPointSize"] = round(self.usUpperOpticalPointSize * 20)
+ data = sstruct.pack(OS2_format_5, d)
+ else:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError(
+ "unknown format for OS/2 table: version %s" % self.version
+ )
+ self.panose = panose
+ return data
+
+ def toXML(self, writer, ttFont):
+ writer.comment(
+ "The fields 'usFirstCharIndex' and 'usLastCharIndex'\n"
+ "will be recalculated by the compiler"
+ )
+ writer.newline()
+ if self.version == 1:
+ format = OS2_format_1
+ elif self.version in (2, 3, 4):
+ format = OS2_format_2
+ elif self.version == 5:
+ format = OS2_format_5
+ else:
+ format = OS2_format_0
+ formatstring, names, fixes = sstruct.getformat(format)
+ for name in names:
+ value = getattr(self, name)
+ if name == "panose":
+ writer.begintag("panose")
+ writer.newline()
+ value.toXML(writer, ttFont)
+ writer.endtag("panose")
+ elif name in (
+ "ulUnicodeRange1",
+ "ulUnicodeRange2",
+ "ulUnicodeRange3",
+ "ulUnicodeRange4",
+ "ulCodePageRange1",
+ "ulCodePageRange2",
+ ):
+ writer.simpletag(name, value=num2binary(value))
+ elif name in ("fsType", "fsSelection"):
+ writer.simpletag(name, value=num2binary(value, 16))
+ elif name == "achVendID":
+ writer.simpletag(name, value=repr(value)[1:-1])
+ else:
+ writer.simpletag(name, value=value)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "panose":
+ self.panose = panose = Panose()
+ for element in content:
+ if isinstance(element, tuple):
+ name, attrs, content = element
+ panose.fromXML(name, attrs, content, ttFont)
+ elif name in (
+ "ulUnicodeRange1",
+ "ulUnicodeRange2",
+ "ulUnicodeRange3",
+ "ulUnicodeRange4",
+ "ulCodePageRange1",
+ "ulCodePageRange2",
+ "fsType",
+ "fsSelection",
+ ):
+ setattr(self, name, binary2num(attrs["value"]))
+ elif name == "achVendID":
+ setattr(self, name, safeEval("'''" + attrs["value"] + "'''"))
+ else:
+ setattr(self, name, safeEval(attrs["value"]))
+
+ def updateFirstAndLastCharIndex(self, ttFont):
+ if "cmap" not in ttFont:
+ return
+ codes = set()
+ for table in getattr(ttFont["cmap"], "tables", []):
+ if table.isUnicode():
+ codes.update(table.cmap.keys())
+ if codes:
+ minCode = min(codes)
+ maxCode = max(codes)
+ # USHORT cannot hold codepoints greater than 0xFFFF
+ self.usFirstCharIndex = min(0xFFFF, minCode)
+ self.usLastCharIndex = min(0xFFFF, maxCode)
+
+ # misspelled attributes kept for legacy reasons
+
+ @property
+ def usMaxContex(self):
+ return self.usMaxContext
+
+ @usMaxContex.setter
+ def usMaxContex(self, value):
+ self.usMaxContext = value
+
+ @property
+ def fsFirstCharIndex(self):
+ return self.usFirstCharIndex
+
+ @fsFirstCharIndex.setter
+ def fsFirstCharIndex(self, value):
+ self.usFirstCharIndex = value
+
+ @property
+ def fsLastCharIndex(self):
+ return self.usLastCharIndex
+
+ @fsLastCharIndex.setter
+ def fsLastCharIndex(self, value):
+ self.usLastCharIndex = value
+
+ def getUnicodeRanges(self):
+ """Return the set of 'ulUnicodeRange*' bits currently enabled."""
+ bits = set()
+ ul1, ul2 = self.ulUnicodeRange1, self.ulUnicodeRange2
+ ul3, ul4 = self.ulUnicodeRange3, self.ulUnicodeRange4
+ for i in range(32):
+ if ul1 & (1 << i):
+ bits.add(i)
+ if ul2 & (1 << i):
+ bits.add(i + 32)
+ if ul3 & (1 << i):
+ bits.add(i + 64)
+ if ul4 & (1 << i):
+ bits.add(i + 96)
+ return bits
+
+ def setUnicodeRanges(self, bits):
+ """Set the 'ulUnicodeRange*' fields to the specified 'bits'."""
+ ul1, ul2, ul3, ul4 = 0, 0, 0, 0
+ for bit in bits:
+ if 0 <= bit < 32:
+ ul1 |= 1 << bit
+ elif 32 <= bit < 64:
+ ul2 |= 1 << (bit - 32)
+ elif 64 <= bit < 96:
+ ul3 |= 1 << (bit - 64)
+ elif 96 <= bit < 123:
+ ul4 |= 1 << (bit - 96)
+ else:
+ raise ValueError("expected 0 <= int <= 122, found: %r" % bit)
+ self.ulUnicodeRange1, self.ulUnicodeRange2 = ul1, ul2
+ self.ulUnicodeRange3, self.ulUnicodeRange4 = ul3, ul4
+
+ def recalcUnicodeRanges(self, ttFont, pruneOnly=False):
+ """Intersect the codepoints in the font's Unicode cmap subtables with
+ the Unicode block ranges defined in the OpenType specification (v1.7),
+ and set the respective 'ulUnicodeRange*' bits if there is at least ONE
+ intersection.
+ If 'pruneOnly' is True, only clear unused bits with NO intersection.
+ """
+ unicodes = set()
+ for table in ttFont["cmap"].tables:
+ if table.isUnicode():
+ unicodes.update(table.cmap.keys())
+ if pruneOnly:
+ empty = intersectUnicodeRanges(unicodes, inverse=True)
+ bits = self.getUnicodeRanges() - empty
+ else:
+ bits = intersectUnicodeRanges(unicodes)
+ self.setUnicodeRanges(bits)
+ return bits
+
+ def recalcAvgCharWidth(self, ttFont):
+ """Recalculate xAvgCharWidth using metrics from ttFont's 'hmtx' table.
+
+ Set it to 0 if the unlikely event 'hmtx' table is not found.
+ """
+ avg_width = 0
+ hmtx = ttFont.get("hmtx")
+ if hmtx is not None:
+ widths = [width for width, _ in hmtx.metrics.values() if width > 0]
+ if widths:
+ avg_width = otRound(sum(widths) / len(widths))
+ self.xAvgCharWidth = avg_width
+ return avg_width
# Unicode ranges data from the OpenType OS/2 table specification v1.7
OS2_UNICODE_RANGES = (
- (('Basic Latin', (0x0000, 0x007F)),),
- (('Latin-1 Supplement', (0x0080, 0x00FF)),),
- (('Latin Extended-A', (0x0100, 0x017F)),),
- (('Latin Extended-B', (0x0180, 0x024F)),),
- (('IPA Extensions', (0x0250, 0x02AF)),
- ('Phonetic Extensions', (0x1D00, 0x1D7F)),
- ('Phonetic Extensions Supplement', (0x1D80, 0x1DBF))),
- (('Spacing Modifier Letters', (0x02B0, 0x02FF)),
- ('Modifier Tone Letters', (0xA700, 0xA71F))),
- (('Combining Diacritical Marks', (0x0300, 0x036F)),
- ('Combining Diacritical Marks Supplement', (0x1DC0, 0x1DFF))),
- (('Greek and Coptic', (0x0370, 0x03FF)),),
- (('Coptic', (0x2C80, 0x2CFF)),),
- (('Cyrillic', (0x0400, 0x04FF)),
- ('Cyrillic Supplement', (0x0500, 0x052F)),
- ('Cyrillic Extended-A', (0x2DE0, 0x2DFF)),
- ('Cyrillic Extended-B', (0xA640, 0xA69F))),
- (('Armenian', (0x0530, 0x058F)),),
- (('Hebrew', (0x0590, 0x05FF)),),
- (('Vai', (0xA500, 0xA63F)),),
- (('Arabic', (0x0600, 0x06FF)),
- ('Arabic Supplement', (0x0750, 0x077F))),
- (('NKo', (0x07C0, 0x07FF)),),
- (('Devanagari', (0x0900, 0x097F)),),
- (('Bengali', (0x0980, 0x09FF)),),
- (('Gurmukhi', (0x0A00, 0x0A7F)),),
- (('Gujarati', (0x0A80, 0x0AFF)),),
- (('Oriya', (0x0B00, 0x0B7F)),),
- (('Tamil', (0x0B80, 0x0BFF)),),
- (('Telugu', (0x0C00, 0x0C7F)),),
- (('Kannada', (0x0C80, 0x0CFF)),),
- (('Malayalam', (0x0D00, 0x0D7F)),),
- (('Thai', (0x0E00, 0x0E7F)),),
- (('Lao', (0x0E80, 0x0EFF)),),
- (('Georgian', (0x10A0, 0x10FF)),
- ('Georgian Supplement', (0x2D00, 0x2D2F))),
- (('Balinese', (0x1B00, 0x1B7F)),),
- (('Hangul Jamo', (0x1100, 0x11FF)),),
- (('Latin Extended Additional', (0x1E00, 0x1EFF)),
- ('Latin Extended-C', (0x2C60, 0x2C7F)),
- ('Latin Extended-D', (0xA720, 0xA7FF))),
- (('Greek Extended', (0x1F00, 0x1FFF)),),
- (('General Punctuation', (0x2000, 0x206F)),
- ('Supplemental Punctuation', (0x2E00, 0x2E7F))),
- (('Superscripts And Subscripts', (0x2070, 0x209F)),),
- (('Currency Symbols', (0x20A0, 0x20CF)),),
- (('Combining Diacritical Marks For Symbols', (0x20D0, 0x20FF)),),
- (('Letterlike Symbols', (0x2100, 0x214F)),),
- (('Number Forms', (0x2150, 0x218F)),),
- (('Arrows', (0x2190, 0x21FF)),
- ('Supplemental Arrows-A', (0x27F0, 0x27FF)),
- ('Supplemental Arrows-B', (0x2900, 0x297F)),
- ('Miscellaneous Symbols and Arrows', (0x2B00, 0x2BFF))),
- (('Mathematical Operators', (0x2200, 0x22FF)),
- ('Supplemental Mathematical Operators', (0x2A00, 0x2AFF)),
- ('Miscellaneous Mathematical Symbols-A', (0x27C0, 0x27EF)),
- ('Miscellaneous Mathematical Symbols-B', (0x2980, 0x29FF))),
- (('Miscellaneous Technical', (0x2300, 0x23FF)),),
- (('Control Pictures', (0x2400, 0x243F)),),
- (('Optical Character Recognition', (0x2440, 0x245F)),),
- (('Enclosed Alphanumerics', (0x2460, 0x24FF)),),
- (('Box Drawing', (0x2500, 0x257F)),),
- (('Block Elements', (0x2580, 0x259F)),),
- (('Geometric Shapes', (0x25A0, 0x25FF)),),
- (('Miscellaneous Symbols', (0x2600, 0x26FF)),),
- (('Dingbats', (0x2700, 0x27BF)),),
- (('CJK Symbols And Punctuation', (0x3000, 0x303F)),),
- (('Hiragana', (0x3040, 0x309F)),),
- (('Katakana', (0x30A0, 0x30FF)),
- ('Katakana Phonetic Extensions', (0x31F0, 0x31FF))),
- (('Bopomofo', (0x3100, 0x312F)),
- ('Bopomofo Extended', (0x31A0, 0x31BF))),
- (('Hangul Compatibility Jamo', (0x3130, 0x318F)),),
- (('Phags-pa', (0xA840, 0xA87F)),),
- (('Enclosed CJK Letters And Months', (0x3200, 0x32FF)),),
- (('CJK Compatibility', (0x3300, 0x33FF)),),
- (('Hangul Syllables', (0xAC00, 0xD7AF)),),
- (('Non-Plane 0 *', (0xD800, 0xDFFF)),),
- (('Phoenician', (0x10900, 0x1091F)),),
- (('CJK Unified Ideographs', (0x4E00, 0x9FFF)),
- ('CJK Radicals Supplement', (0x2E80, 0x2EFF)),
- ('Kangxi Radicals', (0x2F00, 0x2FDF)),
- ('Ideographic Description Characters', (0x2FF0, 0x2FFF)),
- ('CJK Unified Ideographs Extension A', (0x3400, 0x4DBF)),
- ('CJK Unified Ideographs Extension B', (0x20000, 0x2A6DF)),
- ('Kanbun', (0x3190, 0x319F))),
- (('Private Use Area (plane 0)', (0xE000, 0xF8FF)),),
- (('CJK Strokes', (0x31C0, 0x31EF)),
- ('CJK Compatibility Ideographs', (0xF900, 0xFAFF)),
- ('CJK Compatibility Ideographs Supplement', (0x2F800, 0x2FA1F))),
- (('Alphabetic Presentation Forms', (0xFB00, 0xFB4F)),),
- (('Arabic Presentation Forms-A', (0xFB50, 0xFDFF)),),
- (('Combining Half Marks', (0xFE20, 0xFE2F)),),
- (('Vertical Forms', (0xFE10, 0xFE1F)),
- ('CJK Compatibility Forms', (0xFE30, 0xFE4F))),
- (('Small Form Variants', (0xFE50, 0xFE6F)),),
- (('Arabic Presentation Forms-B', (0xFE70, 0xFEFF)),),
- (('Halfwidth And Fullwidth Forms', (0xFF00, 0xFFEF)),),
- (('Specials', (0xFFF0, 0xFFFF)),),
- (('Tibetan', (0x0F00, 0x0FFF)),),
- (('Syriac', (0x0700, 0x074F)),),
- (('Thaana', (0x0780, 0x07BF)),),
- (('Sinhala', (0x0D80, 0x0DFF)),),
- (('Myanmar', (0x1000, 0x109F)),),
- (('Ethiopic', (0x1200, 0x137F)),
- ('Ethiopic Supplement', (0x1380, 0x139F)),
- ('Ethiopic Extended', (0x2D80, 0x2DDF))),
- (('Cherokee', (0x13A0, 0x13FF)),),
- (('Unified Canadian Aboriginal Syllabics', (0x1400, 0x167F)),),
- (('Ogham', (0x1680, 0x169F)),),
- (('Runic', (0x16A0, 0x16FF)),),
- (('Khmer', (0x1780, 0x17FF)),
- ('Khmer Symbols', (0x19E0, 0x19FF))),
- (('Mongolian', (0x1800, 0x18AF)),),
- (('Braille Patterns', (0x2800, 0x28FF)),),
- (('Yi Syllables', (0xA000, 0xA48F)),
- ('Yi Radicals', (0xA490, 0xA4CF))),
- (('Tagalog', (0x1700, 0x171F)),
- ('Hanunoo', (0x1720, 0x173F)),
- ('Buhid', (0x1740, 0x175F)),
- ('Tagbanwa', (0x1760, 0x177F))),
- (('Old Italic', (0x10300, 0x1032F)),),
- (('Gothic', (0x10330, 0x1034F)),),
- (('Deseret', (0x10400, 0x1044F)),),
- (('Byzantine Musical Symbols', (0x1D000, 0x1D0FF)),
- ('Musical Symbols', (0x1D100, 0x1D1FF)),
- ('Ancient Greek Musical Notation', (0x1D200, 0x1D24F))),
- (('Mathematical Alphanumeric Symbols', (0x1D400, 0x1D7FF)),),
- (('Private Use (plane 15)', (0xF0000, 0xFFFFD)),
- ('Private Use (plane 16)', (0x100000, 0x10FFFD))),
- (('Variation Selectors', (0xFE00, 0xFE0F)),
- ('Variation Selectors Supplement', (0xE0100, 0xE01EF))),
- (('Tags', (0xE0000, 0xE007F)),),
- (('Limbu', (0x1900, 0x194F)),),
- (('Tai Le', (0x1950, 0x197F)),),
- (('New Tai Lue', (0x1980, 0x19DF)),),
- (('Buginese', (0x1A00, 0x1A1F)),),
- (('Glagolitic', (0x2C00, 0x2C5F)),),
- (('Tifinagh', (0x2D30, 0x2D7F)),),
- (('Yijing Hexagram Symbols', (0x4DC0, 0x4DFF)),),
- (('Syloti Nagri', (0xA800, 0xA82F)),),
- (('Linear B Syllabary', (0x10000, 0x1007F)),
- ('Linear B Ideograms', (0x10080, 0x100FF)),
- ('Aegean Numbers', (0x10100, 0x1013F))),
- (('Ancient Greek Numbers', (0x10140, 0x1018F)),),
- (('Ugaritic', (0x10380, 0x1039F)),),
- (('Old Persian', (0x103A0, 0x103DF)),),
- (('Shavian', (0x10450, 0x1047F)),),
- (('Osmanya', (0x10480, 0x104AF)),),
- (('Cypriot Syllabary', (0x10800, 0x1083F)),),
- (('Kharoshthi', (0x10A00, 0x10A5F)),),
- (('Tai Xuan Jing Symbols', (0x1D300, 0x1D35F)),),
- (('Cuneiform', (0x12000, 0x123FF)),
- ('Cuneiform Numbers and Punctuation', (0x12400, 0x1247F))),
- (('Counting Rod Numerals', (0x1D360, 0x1D37F)),),
- (('Sundanese', (0x1B80, 0x1BBF)),),
- (('Lepcha', (0x1C00, 0x1C4F)),),
- (('Ol Chiki', (0x1C50, 0x1C7F)),),
- (('Saurashtra', (0xA880, 0xA8DF)),),
- (('Kayah Li', (0xA900, 0xA92F)),),
- (('Rejang', (0xA930, 0xA95F)),),
- (('Cham', (0xAA00, 0xAA5F)),),
- (('Ancient Symbols', (0x10190, 0x101CF)),),
- (('Phaistos Disc', (0x101D0, 0x101FF)),),
- (('Carian', (0x102A0, 0x102DF)),
- ('Lycian', (0x10280, 0x1029F)),
- ('Lydian', (0x10920, 0x1093F))),
- (('Domino Tiles', (0x1F030, 0x1F09F)),
- ('Mahjong Tiles', (0x1F000, 0x1F02F))),
+ (("Basic Latin", (0x0000, 0x007F)),),
+ (("Latin-1 Supplement", (0x0080, 0x00FF)),),
+ (("Latin Extended-A", (0x0100, 0x017F)),),
+ (("Latin Extended-B", (0x0180, 0x024F)),),
+ (
+ ("IPA Extensions", (0x0250, 0x02AF)),
+ ("Phonetic Extensions", (0x1D00, 0x1D7F)),
+ ("Phonetic Extensions Supplement", (0x1D80, 0x1DBF)),
+ ),
+ (
+ ("Spacing Modifier Letters", (0x02B0, 0x02FF)),
+ ("Modifier Tone Letters", (0xA700, 0xA71F)),
+ ),
+ (
+ ("Combining Diacritical Marks", (0x0300, 0x036F)),
+ ("Combining Diacritical Marks Supplement", (0x1DC0, 0x1DFF)),
+ ),
+ (("Greek and Coptic", (0x0370, 0x03FF)),),
+ (("Coptic", (0x2C80, 0x2CFF)),),
+ (
+ ("Cyrillic", (0x0400, 0x04FF)),
+ ("Cyrillic Supplement", (0x0500, 0x052F)),
+ ("Cyrillic Extended-A", (0x2DE0, 0x2DFF)),
+ ("Cyrillic Extended-B", (0xA640, 0xA69F)),
+ ),
+ (("Armenian", (0x0530, 0x058F)),),
+ (("Hebrew", (0x0590, 0x05FF)),),
+ (("Vai", (0xA500, 0xA63F)),),
+ (("Arabic", (0x0600, 0x06FF)), ("Arabic Supplement", (0x0750, 0x077F))),
+ (("NKo", (0x07C0, 0x07FF)),),
+ (("Devanagari", (0x0900, 0x097F)),),
+ (("Bengali", (0x0980, 0x09FF)),),
+ (("Gurmukhi", (0x0A00, 0x0A7F)),),
+ (("Gujarati", (0x0A80, 0x0AFF)),),
+ (("Oriya", (0x0B00, 0x0B7F)),),
+ (("Tamil", (0x0B80, 0x0BFF)),),
+ (("Telugu", (0x0C00, 0x0C7F)),),
+ (("Kannada", (0x0C80, 0x0CFF)),),
+ (("Malayalam", (0x0D00, 0x0D7F)),),
+ (("Thai", (0x0E00, 0x0E7F)),),
+ (("Lao", (0x0E80, 0x0EFF)),),
+ (("Georgian", (0x10A0, 0x10FF)), ("Georgian Supplement", (0x2D00, 0x2D2F))),
+ (("Balinese", (0x1B00, 0x1B7F)),),
+ (("Hangul Jamo", (0x1100, 0x11FF)),),
+ (
+ ("Latin Extended Additional", (0x1E00, 0x1EFF)),
+ ("Latin Extended-C", (0x2C60, 0x2C7F)),
+ ("Latin Extended-D", (0xA720, 0xA7FF)),
+ ),
+ (("Greek Extended", (0x1F00, 0x1FFF)),),
+ (
+ ("General Punctuation", (0x2000, 0x206F)),
+ ("Supplemental Punctuation", (0x2E00, 0x2E7F)),
+ ),
+ (("Superscripts And Subscripts", (0x2070, 0x209F)),),
+ (("Currency Symbols", (0x20A0, 0x20CF)),),
+ (("Combining Diacritical Marks For Symbols", (0x20D0, 0x20FF)),),
+ (("Letterlike Symbols", (0x2100, 0x214F)),),
+ (("Number Forms", (0x2150, 0x218F)),),
+ (
+ ("Arrows", (0x2190, 0x21FF)),
+ ("Supplemental Arrows-A", (0x27F0, 0x27FF)),
+ ("Supplemental Arrows-B", (0x2900, 0x297F)),
+ ("Miscellaneous Symbols and Arrows", (0x2B00, 0x2BFF)),
+ ),
+ (
+ ("Mathematical Operators", (0x2200, 0x22FF)),
+ ("Supplemental Mathematical Operators", (0x2A00, 0x2AFF)),
+ ("Miscellaneous Mathematical Symbols-A", (0x27C0, 0x27EF)),
+ ("Miscellaneous Mathematical Symbols-B", (0x2980, 0x29FF)),
+ ),
+ (("Miscellaneous Technical", (0x2300, 0x23FF)),),
+ (("Control Pictures", (0x2400, 0x243F)),),
+ (("Optical Character Recognition", (0x2440, 0x245F)),),
+ (("Enclosed Alphanumerics", (0x2460, 0x24FF)),),
+ (("Box Drawing", (0x2500, 0x257F)),),
+ (("Block Elements", (0x2580, 0x259F)),),
+ (("Geometric Shapes", (0x25A0, 0x25FF)),),
+ (("Miscellaneous Symbols", (0x2600, 0x26FF)),),
+ (("Dingbats", (0x2700, 0x27BF)),),
+ (("CJK Symbols And Punctuation", (0x3000, 0x303F)),),
+ (("Hiragana", (0x3040, 0x309F)),),
+ (
+ ("Katakana", (0x30A0, 0x30FF)),
+ ("Katakana Phonetic Extensions", (0x31F0, 0x31FF)),
+ ),
+ (("Bopomofo", (0x3100, 0x312F)), ("Bopomofo Extended", (0x31A0, 0x31BF))),
+ (("Hangul Compatibility Jamo", (0x3130, 0x318F)),),
+ (("Phags-pa", (0xA840, 0xA87F)),),
+ (("Enclosed CJK Letters And Months", (0x3200, 0x32FF)),),
+ (("CJK Compatibility", (0x3300, 0x33FF)),),
+ (("Hangul Syllables", (0xAC00, 0xD7AF)),),
+ (("Non-Plane 0 *", (0xD800, 0xDFFF)),),
+ (("Phoenician", (0x10900, 0x1091F)),),
+ (
+ ("CJK Unified Ideographs", (0x4E00, 0x9FFF)),
+ ("CJK Radicals Supplement", (0x2E80, 0x2EFF)),
+ ("Kangxi Radicals", (0x2F00, 0x2FDF)),
+ ("Ideographic Description Characters", (0x2FF0, 0x2FFF)),
+ ("CJK Unified Ideographs Extension A", (0x3400, 0x4DBF)),
+ ("CJK Unified Ideographs Extension B", (0x20000, 0x2A6DF)),
+ ("Kanbun", (0x3190, 0x319F)),
+ ),
+ (("Private Use Area (plane 0)", (0xE000, 0xF8FF)),),
+ (
+ ("CJK Strokes", (0x31C0, 0x31EF)),
+ ("CJK Compatibility Ideographs", (0xF900, 0xFAFF)),
+ ("CJK Compatibility Ideographs Supplement", (0x2F800, 0x2FA1F)),
+ ),
+ (("Alphabetic Presentation Forms", (0xFB00, 0xFB4F)),),
+ (("Arabic Presentation Forms-A", (0xFB50, 0xFDFF)),),
+ (("Combining Half Marks", (0xFE20, 0xFE2F)),),
+ (
+ ("Vertical Forms", (0xFE10, 0xFE1F)),
+ ("CJK Compatibility Forms", (0xFE30, 0xFE4F)),
+ ),
+ (("Small Form Variants", (0xFE50, 0xFE6F)),),
+ (("Arabic Presentation Forms-B", (0xFE70, 0xFEFF)),),
+ (("Halfwidth And Fullwidth Forms", (0xFF00, 0xFFEF)),),
+ (("Specials", (0xFFF0, 0xFFFF)),),
+ (("Tibetan", (0x0F00, 0x0FFF)),),
+ (("Syriac", (0x0700, 0x074F)),),
+ (("Thaana", (0x0780, 0x07BF)),),
+ (("Sinhala", (0x0D80, 0x0DFF)),),
+ (("Myanmar", (0x1000, 0x109F)),),
+ (
+ ("Ethiopic", (0x1200, 0x137F)),
+ ("Ethiopic Supplement", (0x1380, 0x139F)),
+ ("Ethiopic Extended", (0x2D80, 0x2DDF)),
+ ),
+ (("Cherokee", (0x13A0, 0x13FF)),),
+ (("Unified Canadian Aboriginal Syllabics", (0x1400, 0x167F)),),
+ (("Ogham", (0x1680, 0x169F)),),
+ (("Runic", (0x16A0, 0x16FF)),),
+ (("Khmer", (0x1780, 0x17FF)), ("Khmer Symbols", (0x19E0, 0x19FF))),
+ (("Mongolian", (0x1800, 0x18AF)),),
+ (("Braille Patterns", (0x2800, 0x28FF)),),
+ (("Yi Syllables", (0xA000, 0xA48F)), ("Yi Radicals", (0xA490, 0xA4CF))),
+ (
+ ("Tagalog", (0x1700, 0x171F)),
+ ("Hanunoo", (0x1720, 0x173F)),
+ ("Buhid", (0x1740, 0x175F)),
+ ("Tagbanwa", (0x1760, 0x177F)),
+ ),
+ (("Old Italic", (0x10300, 0x1032F)),),
+ (("Gothic", (0x10330, 0x1034F)),),
+ (("Deseret", (0x10400, 0x1044F)),),
+ (
+ ("Byzantine Musical Symbols", (0x1D000, 0x1D0FF)),
+ ("Musical Symbols", (0x1D100, 0x1D1FF)),
+ ("Ancient Greek Musical Notation", (0x1D200, 0x1D24F)),
+ ),
+ (("Mathematical Alphanumeric Symbols", (0x1D400, 0x1D7FF)),),
+ (
+ ("Private Use (plane 15)", (0xF0000, 0xFFFFD)),
+ ("Private Use (plane 16)", (0x100000, 0x10FFFD)),
+ ),
+ (
+ ("Variation Selectors", (0xFE00, 0xFE0F)),
+ ("Variation Selectors Supplement", (0xE0100, 0xE01EF)),
+ ),
+ (("Tags", (0xE0000, 0xE007F)),),
+ (("Limbu", (0x1900, 0x194F)),),
+ (("Tai Le", (0x1950, 0x197F)),),
+ (("New Tai Lue", (0x1980, 0x19DF)),),
+ (("Buginese", (0x1A00, 0x1A1F)),),
+ (("Glagolitic", (0x2C00, 0x2C5F)),),
+ (("Tifinagh", (0x2D30, 0x2D7F)),),
+ (("Yijing Hexagram Symbols", (0x4DC0, 0x4DFF)),),
+ (("Syloti Nagri", (0xA800, 0xA82F)),),
+ (
+ ("Linear B Syllabary", (0x10000, 0x1007F)),
+ ("Linear B Ideograms", (0x10080, 0x100FF)),
+ ("Aegean Numbers", (0x10100, 0x1013F)),
+ ),
+ (("Ancient Greek Numbers", (0x10140, 0x1018F)),),
+ (("Ugaritic", (0x10380, 0x1039F)),),
+ (("Old Persian", (0x103A0, 0x103DF)),),
+ (("Shavian", (0x10450, 0x1047F)),),
+ (("Osmanya", (0x10480, 0x104AF)),),
+ (("Cypriot Syllabary", (0x10800, 0x1083F)),),
+ (("Kharoshthi", (0x10A00, 0x10A5F)),),
+ (("Tai Xuan Jing Symbols", (0x1D300, 0x1D35F)),),
+ (
+ ("Cuneiform", (0x12000, 0x123FF)),
+ ("Cuneiform Numbers and Punctuation", (0x12400, 0x1247F)),
+ ),
+ (("Counting Rod Numerals", (0x1D360, 0x1D37F)),),
+ (("Sundanese", (0x1B80, 0x1BBF)),),
+ (("Lepcha", (0x1C00, 0x1C4F)),),
+ (("Ol Chiki", (0x1C50, 0x1C7F)),),
+ (("Saurashtra", (0xA880, 0xA8DF)),),
+ (("Kayah Li", (0xA900, 0xA92F)),),
+ (("Rejang", (0xA930, 0xA95F)),),
+ (("Cham", (0xAA00, 0xAA5F)),),
+ (("Ancient Symbols", (0x10190, 0x101CF)),),
+ (("Phaistos Disc", (0x101D0, 0x101FF)),),
+ (
+ ("Carian", (0x102A0, 0x102DF)),
+ ("Lycian", (0x10280, 0x1029F)),
+ ("Lydian", (0x10920, 0x1093F)),
+ ),
+ (("Domino Tiles", (0x1F030, 0x1F09F)), ("Mahjong Tiles", (0x1F000, 0x1F02F))),
)
_unicodeStarts = []
_unicodeValues = [None]
+
def _getUnicodeRanges():
- # build the ranges of codepoints for each unicode range bit, and cache result
- if not _unicodeStarts:
- unicodeRanges = [
- (start, (stop, bit)) for bit, blocks in enumerate(OS2_UNICODE_RANGES)
- for _, (start, stop) in blocks]
- for start, (stop, bit) in sorted(unicodeRanges):
- _unicodeStarts.append(start)
- _unicodeValues.append((stop, bit))
- return _unicodeStarts, _unicodeValues
+ # build the ranges of codepoints for each unicode range bit, and cache result
+ if not _unicodeStarts:
+ unicodeRanges = [
+ (start, (stop, bit))
+ for bit, blocks in enumerate(OS2_UNICODE_RANGES)
+ for _, (start, stop) in blocks
+ ]
+ for start, (stop, bit) in sorted(unicodeRanges):
+ _unicodeStarts.append(start)
+ _unicodeValues.append((stop, bit))
+ return _unicodeStarts, _unicodeValues
def intersectUnicodeRanges(unicodes, inverse=False):
- """ Intersect a sequence of (int) Unicode codepoints with the Unicode block
- ranges defined in the OpenType specification v1.7, and return the set of
- 'ulUnicodeRanges' bits for which there is at least ONE intersection.
- If 'inverse' is True, return the the bits for which there is NO intersection.
-
- >>> intersectUnicodeRanges([0x0410]) == {9}
- True
- >>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122}
- True
- >>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == (
- ... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122})
- True
- """
- unicodes = set(unicodes)
- unicodestarts, unicodevalues = _getUnicodeRanges()
- bits = set()
- for code in unicodes:
- stop, bit = unicodevalues[bisect.bisect(unicodestarts, code)]
- if code <= stop:
- bits.add(bit)
- # The spec says that bit 57 ("Non Plane 0") implies that there's
- # at least one codepoint beyond the BMP; so I also include all
- # the non-BMP codepoints here
- if any(0x10000 <= code < 0x110000 for code in unicodes):
- bits.add(57)
- return set(range(len(OS2_UNICODE_RANGES))) - bits if inverse else bits
+ """Intersect a sequence of (int) Unicode codepoints with the Unicode block
+ ranges defined in the OpenType specification v1.7, and return the set of
+ 'ulUnicodeRanges' bits for which there is at least ONE intersection.
+ If 'inverse' is True, return the the bits for which there is NO intersection.
+
+ >>> intersectUnicodeRanges([0x0410]) == {9}
+ True
+ >>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122}
+ True
+ >>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == (
+ ... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122})
+ True
+ """
+ unicodes = set(unicodes)
+ unicodestarts, unicodevalues = _getUnicodeRanges()
+ bits = set()
+ for code in unicodes:
+ stop, bit = unicodevalues[bisect.bisect(unicodestarts, code)]
+ if code <= stop:
+ bits.add(bit)
+ # The spec says that bit 57 ("Non Plane 0") implies that there's
+ # at least one codepoint beyond the BMP; so I also include all
+ # the non-BMP codepoints here
+ if any(0x10000 <= code < 0x110000 for code in unicodes):
+ bits.add(57)
+ return set(range(len(OS2_UNICODE_RANGES))) - bits if inverse else bits
if __name__ == "__main__":
- import doctest, sys
- sys.exit(doctest.testmod().failed)
+ import doctest, sys
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/tables/S_I_N_G_.py b/Lib/fontTools/ttLib/tables/S_I_N_G_.py
index 73246df4..4522c06c 100644
--- a/Lib/fontTools/ttLib/tables/S_I_N_G_.py
+++ b/Lib/fontTools/ttLib/tables/S_I_N_G_.py
@@ -20,74 +20,73 @@ SINGFormat = """
class table_S_I_N_G_(DefaultTable.DefaultTable):
+ dependencies = []
- dependencies = []
+ def decompile(self, data, ttFont):
+ dummy, rest = sstruct.unpack2(SINGFormat, data, self)
+ self.uniqueName = self.decompileUniqueName(self.uniqueName)
+ self.nameLength = byteord(self.nameLength)
+ assert len(rest) == self.nameLength
+ self.baseGlyphName = tostr(rest)
- def decompile(self, data, ttFont):
- dummy, rest = sstruct.unpack2(SINGFormat, data, self)
- self.uniqueName = self.decompileUniqueName(self.uniqueName)
- self.nameLength = byteord(self.nameLength)
- assert len(rest) == self.nameLength
- self.baseGlyphName = tostr(rest)
+ rawMETAMD5 = self.METAMD5
+ self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
+ for char in rawMETAMD5[1:]:
+ self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
+ self.METAMD5 = self.METAMD5 + "]"
- rawMETAMD5 = self.METAMD5
- self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
- for char in rawMETAMD5[1:]:
- self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
- self.METAMD5 = self.METAMD5 + "]"
+ def decompileUniqueName(self, data):
+ name = ""
+ for char in data:
+ val = byteord(char)
+ if val == 0:
+ break
+ if (val > 31) or (val < 128):
+ name += chr(val)
+ else:
+ octString = oct(val)
+ if len(octString) > 3:
+ octString = octString[1:] # chop off that leading zero.
+ elif len(octString) < 3:
+ octString.zfill(3)
+ name += "\\" + octString
+ return name
- def decompileUniqueName(self, data):
- name = ""
- for char in data:
- val = byteord(char)
- if val == 0:
- break
- if (val > 31) or (val < 128):
- name += chr(val)
- else:
- octString = oct(val)
- if len(octString) > 3:
- octString = octString[1:] # chop off that leading zero.
- elif len(octString) < 3:
- octString.zfill(3)
- name += "\\" + octString
- return name
+ def compile(self, ttFont):
+ d = self.__dict__.copy()
+ d["nameLength"] = bytechr(len(self.baseGlyphName))
+ d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
+ METAMD5List = eval(self.METAMD5)
+ d["METAMD5"] = b""
+ for val in METAMD5List:
+ d["METAMD5"] += bytechr(val)
+ assert len(d["METAMD5"]) == 16, "Failed to pack 16 byte MD5 hash in SING table"
+ data = sstruct.pack(SINGFormat, d)
+ data = data + tobytes(self.baseGlyphName)
+ return data
- def compile(self, ttFont):
- d = self.__dict__.copy()
- d["nameLength"] = bytechr(len(self.baseGlyphName))
- d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
- METAMD5List = eval(self.METAMD5)
- d["METAMD5"] = b""
- for val in METAMD5List:
- d["METAMD5"] += bytechr(val)
- assert (len(d["METAMD5"]) == 16), "Failed to pack 16 byte MD5 hash in SING table"
- data = sstruct.pack(SINGFormat, d)
- data = data + tobytes(self.baseGlyphName)
- return data
+ def compilecompileUniqueName(self, name, length):
+ nameLen = len(name)
+ if length <= nameLen:
+ name = name[: length - 1] + "\000"
+ else:
+ name += (nameLen - length) * "\000"
+ return name
- def compilecompileUniqueName(self, name, length):
- nameLen = len(name)
- if length <= nameLen:
- name = name[:length-1] + "\000"
- else:
- name += (nameLen - length) * "\000"
- return name
+ def toXML(self, writer, ttFont):
+ writer.comment("Most of this table will be recalculated by the compiler")
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(SINGFormat)
+ for name in names:
+ value = getattr(self, name)
+ writer.simpletag(name, value=value)
+ writer.newline()
+ writer.simpletag("baseGlyphName", value=self.baseGlyphName)
+ writer.newline()
- def toXML(self, writer, ttFont):
- writer.comment("Most of this table will be recalculated by the compiler")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(SINGFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
- writer.simpletag("baseGlyphName", value=self.baseGlyphName)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- value = attrs["value"]
- if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
- setattr(self, name, value)
- else:
- setattr(self, name, safeEval(value))
+ def fromXML(self, name, attrs, content, ttFont):
+ value = attrs["value"]
+ if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
+ setattr(self, name, value)
+ else:
+ setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/S_V_G_.py b/Lib/fontTools/ttLib/tables/S_V_G_.py
index 49e98d03..ebc2befd 100644
--- a/Lib/fontTools/ttLib/tables/S_V_G_.py
+++ b/Lib/fontTools/ttLib/tables/S_V_G_.py
@@ -50,148 +50,166 @@ doc_index_entry_format_0 = """
doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0)
-
class table_S_V_G_(DefaultTable.DefaultTable):
-
- def decompile(self, data, ttFont):
- self.docList = []
- # Version 0 is the standardized version of the table; and current.
- # https://www.microsoft.com/typography/otspec/svg.htm
- sstruct.unpack(SVG_format_0, data[:SVG_format_0Size], self)
- if self.version != 0:
- log.warning(
- "Unknown SVG table version '%s'. Decompiling as version 0.", self.version)
- # read in SVG Documents Index
- # data starts with the first entry of the entry list.
- pos = subTableStart = self.offsetToSVGDocIndex
- self.numEntries = struct.unpack(">H", data[pos:pos+2])[0]
- pos += 2
- if self.numEntries > 0:
- data2 = data[pos:]
- entries = []
- for i in range(self.numEntries):
- docIndexEntry, data2 = sstruct.unpack2(doc_index_entry_format_0, data2, DocumentIndexEntry())
- entries.append(docIndexEntry)
-
- for entry in entries:
- start = entry.svgDocOffset + subTableStart
- end = start + entry.svgDocLength
- doc = data[start:end]
- compressed = False
- if doc.startswith(b"\x1f\x8b"):
- import gzip
- bytesIO = BytesIO(doc)
- with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper:
- doc = gunzipper.read()
- del bytesIO
- compressed = True
- doc = tostr(doc, "utf_8")
- self.docList.append(
- SVGDocument(doc, entry.startGlyphID, entry.endGlyphID, compressed)
- )
-
- def compile(self, ttFont):
- version = 0
- offsetToSVGDocIndex = SVG_format_0Size # I start the SVGDocIndex right after the header.
- # get SGVDoc info.
- docList = []
- entryList = []
- numEntries = len(self.docList)
- datum = struct.pack(">H",numEntries)
- entryList.append(datum)
- curOffset = len(datum) + doc_index_entry_format_0Size*numEntries
- seenDocs = {}
- allCompressed = getattr(self, "compressed", False)
- for i, doc in enumerate(self.docList):
- if isinstance(doc, (list, tuple)):
- doc = SVGDocument(*doc)
- self.docList[i] = doc
- docBytes = tobytes(doc.data, encoding="utf_8")
- if (allCompressed or doc.compressed) and not docBytes.startswith(b"\x1f\x8b"):
- import gzip
- bytesIO = BytesIO()
- # mtime=0 strips the useless timestamp and makes gzip output reproducible;
- # equivalent to `gzip -n`
- with gzip.GzipFile(None, "w", fileobj=bytesIO, mtime=0) as gzipper:
- gzipper.write(docBytes)
- gzipped = bytesIO.getvalue()
- if len(gzipped) < len(docBytes):
- docBytes = gzipped
- del gzipped, bytesIO
- docLength = len(docBytes)
- if docBytes in seenDocs:
- docOffset = seenDocs[docBytes]
- else:
- docOffset = curOffset
- curOffset += docLength
- seenDocs[docBytes] = docOffset
- docList.append(docBytes)
- entry = struct.pack(">HHLL", doc.startGlyphID, doc.endGlyphID, docOffset, docLength)
- entryList.append(entry)
- entryList.extend(docList)
- svgDocData = bytesjoin(entryList)
-
- reserved = 0
- header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved)
- data = [header, svgDocData]
- data = bytesjoin(data)
- return data
-
- def toXML(self, writer, ttFont):
- for i, doc in enumerate(self.docList):
- if isinstance(doc, (list, tuple)):
- doc = SVGDocument(*doc)
- self.docList[i] = doc
- attrs = {"startGlyphID": doc.startGlyphID, "endGlyphID": doc.endGlyphID}
- if doc.compressed:
- attrs["compressed"] = 1
- writer.begintag("svgDoc", **attrs)
- writer.newline()
- writer.writecdata(doc.data)
- writer.newline()
- writer.endtag("svgDoc")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "svgDoc":
- if not hasattr(self, "docList"):
- self.docList = []
- doc = strjoin(content)
- doc = doc.strip()
- startGID = int(attrs["startGlyphID"])
- endGID = int(attrs["endGlyphID"])
- compressed = bool(safeEval(attrs.get("compressed", "0")))
- self.docList.append(SVGDocument(doc, startGID, endGID, compressed))
- else:
- log.warning("Unknown %s %s", name, content)
+ def decompile(self, data, ttFont):
+ self.docList = []
+ # Version 0 is the standardized version of the table; and current.
+ # https://www.microsoft.com/typography/otspec/svg.htm
+ sstruct.unpack(SVG_format_0, data[:SVG_format_0Size], self)
+ if self.version != 0:
+ log.warning(
+ "Unknown SVG table version '%s'. Decompiling as version 0.",
+ self.version,
+ )
+ # read in SVG Documents Index
+ # data starts with the first entry of the entry list.
+ pos = subTableStart = self.offsetToSVGDocIndex
+ self.numEntries = struct.unpack(">H", data[pos : pos + 2])[0]
+ pos += 2
+ if self.numEntries > 0:
+ data2 = data[pos:]
+ entries = []
+ for i in range(self.numEntries):
+ record_data = data2[
+ i
+ * doc_index_entry_format_0Size : (i + 1)
+ * doc_index_entry_format_0Size
+ ]
+ docIndexEntry = sstruct.unpack(
+ doc_index_entry_format_0, record_data, DocumentIndexEntry()
+ )
+ entries.append(docIndexEntry)
+
+ for entry in entries:
+ start = entry.svgDocOffset + subTableStart
+ end = start + entry.svgDocLength
+ doc = data[start:end]
+ compressed = False
+ if doc.startswith(b"\x1f\x8b"):
+ import gzip
+
+ bytesIO = BytesIO(doc)
+ with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper:
+ doc = gunzipper.read()
+ del bytesIO
+ compressed = True
+ doc = tostr(doc, "utf_8")
+ self.docList.append(
+ SVGDocument(doc, entry.startGlyphID, entry.endGlyphID, compressed)
+ )
+
+ def compile(self, ttFont):
+ version = 0
+ offsetToSVGDocIndex = (
+ SVG_format_0Size # I start the SVGDocIndex right after the header.
+ )
+ # get SGVDoc info.
+ docList = []
+ entryList = []
+ numEntries = len(self.docList)
+ datum = struct.pack(">H", numEntries)
+ entryList.append(datum)
+ curOffset = len(datum) + doc_index_entry_format_0Size * numEntries
+ seenDocs = {}
+ allCompressed = getattr(self, "compressed", False)
+ for i, doc in enumerate(self.docList):
+ if isinstance(doc, (list, tuple)):
+ doc = SVGDocument(*doc)
+ self.docList[i] = doc
+ docBytes = tobytes(doc.data, encoding="utf_8")
+ if (allCompressed or doc.compressed) and not docBytes.startswith(
+ b"\x1f\x8b"
+ ):
+ import gzip
+
+ bytesIO = BytesIO()
+ # mtime=0 strips the useless timestamp and makes gzip output reproducible;
+ # equivalent to `gzip -n`
+ with gzip.GzipFile(None, "w", fileobj=bytesIO, mtime=0) as gzipper:
+ gzipper.write(docBytes)
+ gzipped = bytesIO.getvalue()
+ if len(gzipped) < len(docBytes):
+ docBytes = gzipped
+ del gzipped, bytesIO
+ docLength = len(docBytes)
+ if docBytes in seenDocs:
+ docOffset = seenDocs[docBytes]
+ else:
+ docOffset = curOffset
+ curOffset += docLength
+ seenDocs[docBytes] = docOffset
+ docList.append(docBytes)
+ entry = struct.pack(
+ ">HHLL", doc.startGlyphID, doc.endGlyphID, docOffset, docLength
+ )
+ entryList.append(entry)
+ entryList.extend(docList)
+ svgDocData = bytesjoin(entryList)
+
+ reserved = 0
+ header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved)
+ data = [header, svgDocData]
+ data = bytesjoin(data)
+ return data
+
+ def toXML(self, writer, ttFont):
+ for i, doc in enumerate(self.docList):
+ if isinstance(doc, (list, tuple)):
+ doc = SVGDocument(*doc)
+ self.docList[i] = doc
+ attrs = {"startGlyphID": doc.startGlyphID, "endGlyphID": doc.endGlyphID}
+ if doc.compressed:
+ attrs["compressed"] = 1
+ writer.begintag("svgDoc", **attrs)
+ writer.newline()
+ writer.writecdata(doc.data)
+ writer.newline()
+ writer.endtag("svgDoc")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "svgDoc":
+ if not hasattr(self, "docList"):
+ self.docList = []
+ doc = strjoin(content)
+ doc = doc.strip()
+ startGID = int(attrs["startGlyphID"])
+ endGID = int(attrs["endGlyphID"])
+ compressed = bool(safeEval(attrs.get("compressed", "0")))
+ self.docList.append(SVGDocument(doc, startGID, endGID, compressed))
+ else:
+ log.warning("Unknown %s %s", name, content)
class DocumentIndexEntry(object):
- def __init__(self):
- self.startGlyphID = None # USHORT
- self.endGlyphID = None # USHORT
- self.svgDocOffset = None # ULONG
- self.svgDocLength = None # ULONG
+ def __init__(self):
+ self.startGlyphID = None # USHORT
+ self.endGlyphID = None # USHORT
+ self.svgDocOffset = None # ULONG
+ self.svgDocLength = None # ULONG
- def __repr__(self):
- return "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength)
+ def __repr__(self):
+ return (
+ "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s"
+ % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength)
+ )
@dataclass
class SVGDocument(Sequence):
- data: str
- startGlyphID: int
- endGlyphID: int
- compressed: bool = False
-
- # Previously, the SVG table's docList attribute contained a lists of 3 items:
- # [doc, startGlyphID, endGlyphID]; later, we added a `compressed` attribute.
- # For backward compatibility with code that depends of them being sequences of
- # fixed length=3, we subclass the Sequence abstract base class and pretend only
- # the first three items are present. 'compressed' is only accessible via named
- # attribute lookup like regular dataclasses: i.e. `doc.compressed`, not `doc[3]`
- def __getitem__(self, index):
- return astuple(self)[:3][index]
-
- def __len__(self):
- return 3
+ data: str
+ startGlyphID: int
+ endGlyphID: int
+ compressed: bool = False
+
+ # Previously, the SVG table's docList attribute contained a lists of 3 items:
+ # [doc, startGlyphID, endGlyphID]; later, we added a `compressed` attribute.
+ # For backward compatibility with code that depends of them being sequences of
+ # fixed length=3, we subclass the Sequence abstract base class and pretend only
+ # the first three items are present. 'compressed' is only accessible via named
+ # attribute lookup like regular dataclasses: i.e. `doc.compressed`, not `doc[3]`
+ def __getitem__(self, index):
+ return astuple(self)[:3][index]
+
+ def __len__(self):
+ return 3
diff --git a/Lib/fontTools/ttLib/tables/S__i_l_f.py b/Lib/fontTools/ttLib/tables/S__i_l_f.py
index f326c386..324ffd01 100644
--- a/Lib/fontTools/ttLib/tables/S__i_l_f.py
+++ b/Lib/fontTools/ttLib/tables/S__i_l_f.py
@@ -1,6 +1,7 @@
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import byteord, safeEval
+
# from itertools import *
from . import DefaultTable
from . import grUtils
@@ -8,28 +9,28 @@ from array import array
from functools import reduce
import struct, re, sys
-Silf_hdr_format = '''
+Silf_hdr_format = """
>
version: 16.16F
-'''
+"""
-Silf_hdr_format_3 = '''
+Silf_hdr_format_3 = """
>
version: 16.16F
compilerVersion: L
numSilf: H
x
x
-'''
+"""
-Silf_part1_format_v3 = '''
+Silf_part1_format_v3 = """
>
ruleVersion: 16.16F
passOffset: H
pseudosOffset: H
-'''
+"""
-Silf_part1_format = '''
+Silf_part1_format = """
>
maxGlyphID: H
extraAscent: h
@@ -48,9 +49,9 @@ Silf_part1_format = '''
attrMirroring: B
attrSkipPasses: B
numJLevels: B
-'''
+"""
-Silf_justify_format = '''
+Silf_justify_format = """
>
attrStretch: B
attrShrink: B
@@ -60,9 +61,9 @@ Silf_justify_format = '''
x
x
x
-'''
+"""
-Silf_part2_format = '''
+Silf_part2_format = """
>
numLigComp: H
numUserDefn: B
@@ -73,41 +74,41 @@ Silf_part2_format = '''
x
x
numCritFeatures: B
-'''
+"""
-Silf_pseudomap_format = '''
+Silf_pseudomap_format = """
>
unicode: L
nPseudo: H
-'''
+"""
-Silf_pseudomap_format_h = '''
+Silf_pseudomap_format_h = """
>
unicode: H
nPseudo: H
-'''
+"""
-Silf_classmap_format = '''
+Silf_classmap_format = """
>
numClass: H
numLinear: H
-'''
+"""
-Silf_lookupclass_format = '''
+Silf_lookupclass_format = """
>
numIDs: H
searchRange: H
entrySelector: H
rangeShift: H
-'''
+"""
-Silf_lookuppair_format = '''
+Silf_lookuppair_format = """
>
glyphId: H
index: H
-'''
+"""
-Silf_pass_format = '''
+Silf_pass_format = """
>
flags: B
maxRuleLoop: B
@@ -123,7 +124,7 @@ Silf_pass_format = '''
numTransitional: H
numSuccess: H
numColumns: H
-'''
+"""
aCode_info = (
("NOP", 0),
@@ -142,7 +143,7 @@ aCode_info = (
("TRUNC8", 0),
("TRUNC16", 0),
("COND", 0),
- ("AND", 0), # x10
+ ("AND", 0), # x10
("OR", 0),
("NOT", 0),
("EQUAL", 0),
@@ -158,7 +159,7 @@ aCode_info = (
("PUT_SUBS_8BIT_OBS", "bBB"),
("PUT_COPY", "b"),
("INSERT", 0),
- ("DELETE", 0), # x20
+ ("DELETE", 0), # x20
("ASSOC", -1),
("CNTXT_ITEM", "bB"),
("ATTR_SET", "B"),
@@ -174,7 +175,7 @@ aCode_info = (
("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"),
("PUSH_ISLOT_ATTR", "Bbb"),
("PUSH_IGLYPH_ATTR", "Bbb"),
- ("POP_RET", 0), # x30
+ ("POP_RET", 0), # x30
("RET_ZERO", 0),
("RET_TRUE", 0),
("IATTR_SET", "BB"),
@@ -190,31 +191,33 @@ aCode_info = (
("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"),
("BITOR", 0),
("BITAND", 0),
- ("BITNOT", 0), # x40
+ ("BITNOT", 0), # x40
("BITSET", ">HH"),
- ("SET_FEAT", "Bb")
+ ("SET_FEAT", "Bb"),
)
-aCode_map = dict([(x[0], (i, x[1])) for i,x in enumerate(aCode_info)])
+aCode_map = dict([(x[0], (i, x[1])) for i, x in enumerate(aCode_info)])
+
def disassemble(aCode):
codelen = len(aCode)
pc = 0
res = []
while pc < codelen:
- opcode = byteord(aCode[pc:pc+1])
+ opcode = byteord(aCode[pc : pc + 1])
if opcode > len(aCode_info):
instr = aCode_info[0]
else:
instr = aCode_info[opcode]
pc += 1
- if instr[1] != 0 and pc >= codelen : return res
+ if instr[1] != 0 and pc >= codelen:
+ return res
if instr[1] == -1:
count = byteord(aCode[pc])
fmt = "%dB" % count
pc += 1
elif instr[1] == 0:
fmt = ""
- else :
+ else:
fmt = instr[1]
if fmt == "":
res.append(instr[0])
@@ -224,7 +227,10 @@ def disassemble(aCode):
pc += struct.calcsize(fmt)
return res
+
instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?")
+
+
def assemble(instrs):
res = b""
for inst in instrs:
@@ -239,11 +245,12 @@ def assemble(instrs):
parms = [int(x) for x in re.split(r",\s*", m.group(2))]
if parmfmt == -1:
l = len(parms)
- res += struct.pack(("%dB" % (l+1)), l, *parms)
+ res += struct.pack(("%dB" % (l + 1)), l, *parms)
else:
res += struct.pack(parmfmt, *parms)
return res
+
def writecode(tag, writer, instrs):
writer.begintag(tag)
writer.newline()
@@ -253,41 +260,71 @@ def writecode(tag, writer, instrs):
writer.endtag(tag)
writer.newline()
+
def readcode(content):
res = []
- for e in content_string(content).split('\n'):
+ for e in content_string(content).split("\n"):
e = e.strip()
- if not len(e): continue
+ if not len(e):
+ continue
res.append(e)
return assemble(res)
-
-attrs_info=('flags', 'extraAscent', 'extraDescent', 'maxGlyphID',
- 'numLigComp', 'numUserDefn', 'maxCompPerLig', 'direction', 'lbGID')
-attrs_passindexes = ('iSubst', 'iPos', 'iJust', 'iBidi')
-attrs_contexts = ('maxPreContext', 'maxPostContext')
-attrs_attributes = ('attrPseudo', 'attrBreakWeight', 'attrDirectionality',
- 'attrMirroring', 'attrSkipPasses', 'attCollisions')
-pass_attrs_info = ('flags', 'maxRuleLoop', 'maxRuleContext', 'maxBackup',
- 'minRulePreContext', 'maxRulePreContext', 'collisionThreshold')
-pass_attrs_fsm = ('numRows', 'numTransitional', 'numSuccess', 'numColumns')
+
+
+attrs_info = (
+ "flags",
+ "extraAscent",
+ "extraDescent",
+ "maxGlyphID",
+ "numLigComp",
+ "numUserDefn",
+ "maxCompPerLig",
+ "direction",
+ "lbGID",
+)
+attrs_passindexes = ("iSubst", "iPos", "iJust", "iBidi")
+attrs_contexts = ("maxPreContext", "maxPostContext")
+attrs_attributes = (
+ "attrPseudo",
+ "attrBreakWeight",
+ "attrDirectionality",
+ "attrMirroring",
+ "attrSkipPasses",
+ "attCollisions",
+)
+pass_attrs_info = (
+ "flags",
+ "maxRuleLoop",
+ "maxRuleContext",
+ "maxBackup",
+ "minRulePreContext",
+ "maxRulePreContext",
+ "collisionThreshold",
+)
+pass_attrs_fsm = ("numRows", "numTransitional", "numSuccess", "numColumns")
+
def writesimple(tag, self, writer, *attrkeys):
attrs = dict([(k, getattr(self, k)) for k in attrkeys])
writer.simpletag(tag, **attrs)
writer.newline()
+
def getSimple(self, attrs, *attr_list):
for k in attr_list:
if k in attrs:
setattr(self, k, int(safeEval(attrs[k])))
+
def content_string(contents):
res = ""
for element in contents:
- if isinstance(element, tuple): continue
+ if isinstance(element, tuple):
+ continue
res += element
return res.strip()
+
def wrapline(writer, dat, length=80):
currline = ""
for d in dat:
@@ -300,11 +337,13 @@ def wrapline(writer, dat, length=80):
writer.write(currline[:-1])
writer.newline()
-class _Object() :
+
+class _Object:
pass
+
class table_S__i_l_f(DefaultTable.DefaultTable):
- '''Silf table support'''
+ """Silf table support"""
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
@@ -318,7 +357,7 @@ class table_S__i_l_f(DefaultTable.DefaultTable):
sstruct.unpack2(Silf_hdr_format_3, data, self)
base = sstruct.calcsize(Silf_hdr_format_3)
elif self.version < 3.0:
- self.numSilf = struct.unpack('>H', data[4:6])
+ self.numSilf = struct.unpack(">H", data[4:6])
self.scheme = 0
self.compilerVersion = 0
base = 8
@@ -327,7 +366,7 @@ class table_S__i_l_f(DefaultTable.DefaultTable):
sstruct.unpack2(Silf_hdr_format_3, data, self)
base = sstruct.calcsize(Silf_hdr_format_3)
- silfoffsets = struct.unpack_from(('>%dL' % self.numSilf), data[base:])
+ silfoffsets = struct.unpack_from((">%dL" % self.numSilf), data[base:])
for offset in silfoffsets:
s = Silf()
self.silfs.append(s)
@@ -348,38 +387,44 @@ class table_S__i_l_f(DefaultTable.DefaultTable):
offset += len(subdata)
data += subdata
if self.version >= 5.0:
- return grUtils.compress(self.scheme, hdr+data)
- return hdr+data
+ return grUtils.compress(self.scheme, hdr + data)
+ return hdr + data
def toXML(self, writer, ttFont):
- writer.comment('Attributes starting with _ are informative only')
+ writer.comment("Attributes starting with _ are informative only")
writer.newline()
- writer.simpletag('version', version=self.version,
- compilerVersion=self.compilerVersion, compressionScheme=self.scheme)
+ writer.simpletag(
+ "version",
+ version=self.version,
+ compilerVersion=self.compilerVersion,
+ compressionScheme=self.scheme,
+ )
writer.newline()
for s in self.silfs:
- writer.begintag('silf')
+ writer.begintag("silf")
writer.newline()
s.toXML(writer, ttFont, self.version)
- writer.endtag('silf')
+ writer.endtag("silf")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- if name == 'version':
- self.scheme=int(safeEval(attrs['compressionScheme']))
- self.version = float(safeEval(attrs['version']))
- self.compilerVersion = int(safeEval(attrs['compilerVersion']))
+ if name == "version":
+ self.scheme = int(safeEval(attrs["compressionScheme"]))
+ self.version = float(safeEval(attrs["version"]))
+ self.compilerVersion = int(safeEval(attrs["compilerVersion"]))
return
- if name == 'silf':
+ if name == "silf":
s = Silf()
self.silfs.append(s)
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
s.fromXML(tag, attrs, subcontent, ttFont, self.version)
+
class Silf(object):
- '''A particular Silf subtable'''
+ """A particular Silf subtable"""
def __init__(self):
self.passes = []
@@ -389,37 +434,59 @@ class Silf(object):
self.pMap = {}
def decompile(self, data, ttFont, version=2.0):
- if version >= 3.0 :
+ if version >= 3.0:
_, data = sstruct.unpack2(Silf_part1_format_v3, data, self)
- self.ruleVersion = float(floatToFixedToStr(self.ruleVersion, precisionBits=16))
+ self.ruleVersion = float(
+ floatToFixedToStr(self.ruleVersion, precisionBits=16)
+ )
_, data = sstruct.unpack2(Silf_part1_format, data, self)
for jlevel in range(self.numJLevels):
j, data = sstruct.unpack2(Silf_justify_format, data, _Object())
self.jLevels.append(j)
_, data = sstruct.unpack2(Silf_part2_format, data, self)
if self.numCritFeatures:
- self.critFeatures = struct.unpack_from(('>%dH' % self.numCritFeatures), data)
- data = data[self.numCritFeatures * 2 + 1:]
- (numScriptTag,) = struct.unpack_from('B', data)
+ self.critFeatures = struct.unpack_from(
+ (">%dH" % self.numCritFeatures), data
+ )
+ data = data[self.numCritFeatures * 2 + 1 :]
+ (numScriptTag,) = struct.unpack_from("B", data)
if numScriptTag:
- self.scriptTags = [struct.unpack("4s", data[x:x+4])[0].decode("ascii") for x in range(1, 1 + 4 * numScriptTag, 4)]
- data = data[1 + 4 * numScriptTag:]
- (self.lbGID,) = struct.unpack('>H', data[:2])
+ self.scriptTags = [
+ struct.unpack("4s", data[x : x + 4])[0].decode("ascii")
+ for x in range(1, 1 + 4 * numScriptTag, 4)
+ ]
+ data = data[1 + 4 * numScriptTag :]
+ (self.lbGID,) = struct.unpack(">H", data[:2])
if self.numPasses:
- self.oPasses = struct.unpack(('>%dL' % (self.numPasses+1)), data[2:6+4*self.numPasses])
- data = data[6 + 4 * self.numPasses:]
+ self.oPasses = struct.unpack(
+ (">%dL" % (self.numPasses + 1)), data[2 : 6 + 4 * self.numPasses]
+ )
+ data = data[6 + 4 * self.numPasses :]
(numPseudo,) = struct.unpack(">H", data[:2])
for i in range(numPseudo):
if version >= 3.0:
- pseudo = sstruct.unpack(Silf_pseudomap_format, data[8+6*i:14+6*i], _Object())
+ pseudo = sstruct.unpack(
+ Silf_pseudomap_format, data[8 + 6 * i : 14 + 6 * i], _Object()
+ )
else:
- pseudo = sstruct.unpack(Silf_pseudomap_format_h, data[8+4*i:12+4*i], _Object())
+ pseudo = sstruct.unpack(
+ Silf_pseudomap_format_h, data[8 + 4 * i : 12 + 4 * i], _Object()
+ )
self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo)
- data = data[8 + 6 * numPseudo:]
- currpos = (sstruct.calcsize(Silf_part1_format)
- + sstruct.calcsize(Silf_justify_format) * self.numJLevels
- + sstruct.calcsize(Silf_part2_format) + 2 * self.numCritFeatures
- + 1 + 1 + 4 * numScriptTag + 6 + 4 * self.numPasses + 8 + 6 * numPseudo)
+ data = data[8 + 6 * numPseudo :]
+ currpos = (
+ sstruct.calcsize(Silf_part1_format)
+ + sstruct.calcsize(Silf_justify_format) * self.numJLevels
+ + sstruct.calcsize(Silf_part2_format)
+ + 2 * self.numCritFeatures
+ + 1
+ + 1
+ + 4 * numScriptTag
+ + 6
+ + 4 * self.numPasses
+ + 8
+ + 6 * numPseudo
+ )
if version >= 3.0:
currpos += sstruct.calcsize(Silf_part1_format_v3)
self.classes = Classes()
@@ -427,8 +494,11 @@ class Silf(object):
for i in range(self.numPasses):
p = Pass()
self.passes.append(p)
- p.decompile(data[self.oPasses[i]-currpos:self.oPasses[i+1]-currpos],
- ttFont, version)
+ p.decompile(
+ data[self.oPasses[i] - currpos : self.oPasses[i + 1] - currpos],
+ ttFont,
+ version,
+ )
def compile(self, ttFont, version=2.0):
self.numPasses = len(self.passes)
@@ -457,8 +527,9 @@ class Silf(object):
currpos = hdroffset + len(data) + 4 * (self.numPasses + 1)
self.pseudosOffset = currpos + len(data1)
for u, p in sorted(self.pMap.items()):
- data1 += struct.pack((">LH" if version >= 3.0 else ">HH"),
- u, ttFont.getGlyphID(p))
+ data1 += struct.pack(
+ (">LH" if version >= 3.0 else ">HH"), u, ttFont.getGlyphID(p)
+ )
data1 += self.classes.compile(ttFont, version)
currpos += len(data1)
data2 = b""
@@ -475,136 +546,147 @@ class Silf(object):
data3 = b""
return data3 + data + datao + data1 + data2
-
def toXML(self, writer, ttFont, version=2.0):
if version >= 3.0:
- writer.simpletag('version', ruleVersion=self.ruleVersion)
+ writer.simpletag("version", ruleVersion=self.ruleVersion)
writer.newline()
- writesimple('info', self, writer, *attrs_info)
- writesimple('passindexes', self, writer, *attrs_passindexes)
- writesimple('contexts', self, writer, *attrs_contexts)
- writesimple('attributes', self, writer, *attrs_attributes)
+ writesimple("info", self, writer, *attrs_info)
+ writesimple("passindexes", self, writer, *attrs_passindexes)
+ writesimple("contexts", self, writer, *attrs_contexts)
+ writesimple("attributes", self, writer, *attrs_attributes)
if len(self.jLevels):
- writer.begintag('justifications')
+ writer.begintag("justifications")
writer.newline()
jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format)
for i, j in enumerate(self.jLevels):
attrs = dict([(k, getattr(j, k)) for k in jnames])
- writer.simpletag('justify', **attrs)
+ writer.simpletag("justify", **attrs)
writer.newline()
- writer.endtag('justifications')
+ writer.endtag("justifications")
writer.newline()
if len(self.critFeatures):
- writer.begintag('critFeatures')
+ writer.begintag("critFeatures")
writer.newline()
writer.write(" ".join(map(str, self.critFeatures)))
writer.newline()
- writer.endtag('critFeatures')
+ writer.endtag("critFeatures")
writer.newline()
if len(self.scriptTags):
- writer.begintag('scriptTags')
+ writer.begintag("scriptTags")
writer.newline()
writer.write(" ".join(self.scriptTags))
writer.newline()
- writer.endtag('scriptTags')
+ writer.endtag("scriptTags")
writer.newline()
if self.pMap:
- writer.begintag('pseudoMap')
+ writer.begintag("pseudoMap")
writer.newline()
for k, v in sorted(self.pMap.items()):
- writer.simpletag('pseudo', unicode=hex(k), pseudo=v)
+ writer.simpletag("pseudo", unicode=hex(k), pseudo=v)
writer.newline()
- writer.endtag('pseudoMap')
+ writer.endtag("pseudoMap")
writer.newline()
self.classes.toXML(writer, ttFont, version)
if len(self.passes):
- writer.begintag('passes')
+ writer.begintag("passes")
writer.newline()
for i, p in enumerate(self.passes):
- writer.begintag('pass', _index=i)
+ writer.begintag("pass", _index=i)
writer.newline()
p.toXML(writer, ttFont, version)
- writer.endtag('pass')
+ writer.endtag("pass")
writer.newline()
- writer.endtag('passes')
+ writer.endtag("passes")
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
- if name == 'version':
- self.ruleVersion = float(safeEval(attrs.get('ruleVersion', "0")))
- if name == 'info':
+ if name == "version":
+ self.ruleVersion = float(safeEval(attrs.get("ruleVersion", "0")))
+ if name == "info":
getSimple(self, attrs, *attrs_info)
- elif name == 'passindexes':
+ elif name == "passindexes":
getSimple(self, attrs, *attrs_passindexes)
- elif name == 'contexts':
+ elif name == "contexts":
getSimple(self, attrs, *attrs_contexts)
- elif name == 'attributes':
+ elif name == "attributes":
getSimple(self, attrs, *attrs_attributes)
- elif name == 'justifications':
+ elif name == "justifications":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
(tag, attrs, subcontent) = element
- if tag == 'justify':
+ if tag == "justify":
j = _Object()
for k, v in attrs.items():
setattr(j, k, int(v))
self.jLevels.append(j)
- elif name == 'critFeatures':
+ elif name == "critFeatures":
self.critFeatures = []
element = content_string(content)
self.critFeatures.extend(map(int, element.split()))
- elif name == 'scriptTags':
+ elif name == "scriptTags":
self.scriptTags = []
element = content_string(content)
for n in element.split():
self.scriptTags.append(n)
- elif name == 'pseudoMap':
+ elif name == "pseudoMap":
self.pMap = {}
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
(tag, attrs, subcontent) = element
- if tag == 'pseudo':
- k = int(attrs['unicode'], 16)
- v = attrs['pseudo']
+ if tag == "pseudo":
+ k = int(attrs["unicode"], 16)
+ v = attrs["pseudo"]
self.pMap[k] = v
- elif name == 'classes':
+ elif name == "classes":
self.classes = Classes()
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
self.classes.fromXML(tag, attrs, subcontent, ttFont, version)
- elif name == 'passes':
+ elif name == "passes":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
- if tag == 'pass':
+ if tag == "pass":
p = Pass()
for e in subcontent:
- if not isinstance(e, tuple): continue
+ if not isinstance(e, tuple):
+ continue
p.fromXML(e[0], e[1], e[2], ttFont, version)
self.passes.append(p)
class Classes(object):
-
def __init__(self):
self.linear = []
self.nonLinear = []
def decompile(self, data, ttFont, version=2.0):
sstruct.unpack2(Silf_classmap_format, data, self)
- if version >= 4.0 :
- oClasses = struct.unpack((">%dL" % (self.numClass+1)),
- data[4:8+4*self.numClass])
+ if version >= 4.0:
+ oClasses = struct.unpack(
+ (">%dL" % (self.numClass + 1)), data[4 : 8 + 4 * self.numClass]
+ )
else:
- oClasses = struct.unpack((">%dH" % (self.numClass+1)),
- data[4:6+2*self.numClass])
- for s,e in zip(oClasses[:self.numLinear], oClasses[1:self.numLinear+1]):
- self.linear.append(ttFont.getGlyphName(x) for x in
- struct.unpack((">%dH" % ((e-s)/2)), data[s:e]))
- for s,e in zip(oClasses[self.numLinear:self.numClass],
- oClasses[self.numLinear+1:self.numClass+1]):
- nonLinids = [struct.unpack(">HH", data[x:x+4]) for x in range(s+8, e, 4)]
+ oClasses = struct.unpack(
+ (">%dH" % (self.numClass + 1)), data[4 : 6 + 2 * self.numClass]
+ )
+ for s, e in zip(oClasses[: self.numLinear], oClasses[1 : self.numLinear + 1]):
+ self.linear.append(
+ ttFont.getGlyphName(x)
+ for x in struct.unpack((">%dH" % ((e - s) / 2)), data[s:e])
+ )
+ for s, e in zip(
+ oClasses[self.numLinear : self.numClass],
+ oClasses[self.numLinear + 1 : self.numClass + 1],
+ ):
+ nonLinids = [
+ struct.unpack(">HH", data[x : x + 4]) for x in range(s + 8, e, 4)
+ ]
nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids])
self.nonLinear.append(nonLin)
@@ -627,61 +709,68 @@ class Classes(object):
oClasses.append(len(data) + offset)
self.numClass = len(oClasses) - 1
self.numLinear = len(self.linear)
- return sstruct.pack(Silf_classmap_format, self) + \
- struct.pack(((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)),
- *oClasses) + data
+ return (
+ sstruct.pack(Silf_classmap_format, self)
+ + struct.pack(
+ ((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)), *oClasses
+ )
+ + data
+ )
def toXML(self, writer, ttFont, version=2.0):
- writer.begintag('classes')
+ writer.begintag("classes")
writer.newline()
- writer.begintag('linearClasses')
+ writer.begintag("linearClasses")
writer.newline()
- for i,l in enumerate(self.linear):
- writer.begintag('linear', _index=i)
+ for i, l in enumerate(self.linear):
+ writer.begintag("linear", _index=i)
writer.newline()
wrapline(writer, l)
- writer.endtag('linear')
+ writer.endtag("linear")
writer.newline()
- writer.endtag('linearClasses')
+ writer.endtag("linearClasses")
writer.newline()
- writer.begintag('nonLinearClasses')
+ writer.begintag("nonLinearClasses")
writer.newline()
for i, l in enumerate(self.nonLinear):
- writer.begintag('nonLinear', _index=i + self.numLinear)
+ writer.begintag("nonLinear", _index=i + self.numLinear)
writer.newline()
for inp, ind in l.items():
- writer.simpletag('map', glyph=inp, index=ind)
+ writer.simpletag("map", glyph=inp, index=ind)
writer.newline()
- writer.endtag('nonLinear')
+ writer.endtag("nonLinear")
writer.newline()
- writer.endtag('nonLinearClasses')
+ writer.endtag("nonLinearClasses")
writer.newline()
- writer.endtag('classes')
+ writer.endtag("classes")
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
- if name == 'linearClasses':
+ if name == "linearClasses":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
- if tag == 'linear':
+ if tag == "linear":
l = content_string(subcontent).split()
self.linear.append(l)
- elif name == 'nonLinearClasses':
+ elif name == "nonLinearClasses":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, attrs, subcontent = element
- if tag =='nonLinear':
+ if tag == "nonLinear":
l = {}
for e in subcontent:
- if not isinstance(e, tuple): continue
+ if not isinstance(e, tuple):
+ continue
tag, attrs, subsubcontent = e
- if tag == 'map':
- l[attrs['glyph']] = int(safeEval(attrs['index']))
+ if tag == "map":
+ l[attrs["glyph"]] = int(safeEval(attrs["index"]))
self.nonLinear.append(l)
-class Pass(object):
+class Pass(object):
def __init__(self):
self.colMap = {}
self.rules = []
@@ -698,71 +787,109 @@ class Pass(object):
(numRange, _, _, _) = struct.unpack(">4H", data[:8])
data = data[8:]
for i in range(numRange):
- (first, last, col) = struct.unpack(">3H", data[6*i:6*i+6])
- for g in range(first, last+1):
+ (first, last, col) = struct.unpack(">3H", data[6 * i : 6 * i + 6])
+ for g in range(first, last + 1):
self.colMap[ttFont.getGlyphName(g)] = col
- data = data[6*numRange:]
+ data = data[6 * numRange :]
oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data)
- data = data[2+2*self.numSuccess:]
+ data = data[2 + 2 * self.numSuccess :]
rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data)
- self.rules = [rules[s:e] for (s,e) in zip(oRuleMap, oRuleMap[1:])]
- data = data[2*oRuleMap[-1]:]
- (self.minRulePreContext, self.maxRulePreContext) = struct.unpack('BB', data[:2])
+ self.rules = [rules[s:e] for (s, e) in zip(oRuleMap, oRuleMap[1:])]
+ data = data[2 * oRuleMap[-1] :]
+ (self.minRulePreContext, self.maxRulePreContext) = struct.unpack("BB", data[:2])
numStartStates = self.maxRulePreContext - self.minRulePreContext + 1
- self.startStates = struct.unpack((">%dH" % numStartStates),
- data[2:2 + numStartStates * 2])
- data = data[2+numStartStates*2:]
- self.ruleSortKeys = struct.unpack((">%dH" % self.numRules), data[:2 * self.numRules])
- data = data[2*self.numRules:]
- self.rulePreContexts = struct.unpack(("%dB" % self.numRules), data[:self.numRules])
- data = data[self.numRules:]
+ self.startStates = struct.unpack(
+ (">%dH" % numStartStates), data[2 : 2 + numStartStates * 2]
+ )
+ data = data[2 + numStartStates * 2 :]
+ self.ruleSortKeys = struct.unpack(
+ (">%dH" % self.numRules), data[: 2 * self.numRules]
+ )
+ data = data[2 * self.numRules :]
+ self.rulePreContexts = struct.unpack(
+ ("%dB" % self.numRules), data[: self.numRules]
+ )
+ data = data[self.numRules :]
(self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3])
- oConstraints = list(struct.unpack((">%dH" % (self.numRules + 1)),
- data[3:5 + self.numRules * 2]))
- data = data[5 + self.numRules * 2:]
- oActions = list(struct.unpack((">%dH" % (self.numRules + 1)),
- data[:2 + self.numRules * 2]))
- data = data[2 * self.numRules + 2:]
+ oConstraints = list(
+ struct.unpack(
+ (">%dH" % (self.numRules + 1)), data[3 : 5 + self.numRules * 2]
+ )
+ )
+ data = data[5 + self.numRules * 2 :]
+ oActions = list(
+ struct.unpack((">%dH" % (self.numRules + 1)), data[: 2 + self.numRules * 2])
+ )
+ data = data[2 * self.numRules + 2 :]
for i in range(self.numTransitional):
- a = array("H", data[i*self.numColumns*2:(i+1)*self.numColumns*2])
- if sys.byteorder != "big": a.byteswap()
+ a = array(
+ "H", data[i * self.numColumns * 2 : (i + 1) * self.numColumns * 2]
+ )
+ if sys.byteorder != "big":
+ a.byteswap()
self.stateTrans.append(a)
- data = data[self.numTransitional * self.numColumns * 2 + 1:]
+ data = data[self.numTransitional * self.numColumns * 2 + 1 :]
self.passConstraints = data[:pConstraint]
data = data[pConstraint:]
- for i in range(len(oConstraints)-2,-1,-1):
- if oConstraints[i] == 0 :
- oConstraints[i] = oConstraints[i+1]
- self.ruleConstraints = [(data[s:e] if (e-s > 1) else b"") for (s,e) in zip(oConstraints, oConstraints[1:])]
- data = data[oConstraints[-1]:]
- self.actions = [(data[s:e] if (e-s > 1) else "") for (s,e) in zip(oActions, oActions[1:])]
- data = data[oActions[-1]:]
+ for i in range(len(oConstraints) - 2, -1, -1):
+ if oConstraints[i] == 0:
+ oConstraints[i] = oConstraints[i + 1]
+ self.ruleConstraints = [
+ (data[s:e] if (e - s > 1) else b"")
+ for (s, e) in zip(oConstraints, oConstraints[1:])
+ ]
+ data = data[oConstraints[-1] :]
+ self.actions = [
+ (data[s:e] if (e - s > 1) else "") for (s, e) in zip(oActions, oActions[1:])
+ ]
+ data = data[oActions[-1] :]
# not using debug
def compile(self, ttFont, base, version=2.0):
# build it all up backwards
- oActions = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.actions + [b""], (0, []))[1]
- oConstraints = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.ruleConstraints + [b""], (1, []))[1]
+ oActions = reduce(
+ lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.actions + [b""], (0, [])
+ )[1]
+ oConstraints = reduce(
+ lambda a, x: (a[0] + len(x), a[1] + [a[0]]),
+ self.ruleConstraints + [b""],
+ (1, []),
+ )[1]
constraintCode = b"\000" + b"".join(self.ruleConstraints)
transes = []
for t in self.stateTrans:
- if sys.byteorder != "big": t.byteswap()
+ if sys.byteorder != "big":
+ t.byteswap()
transes.append(t.tobytes())
- if sys.byteorder != "big": t.byteswap()
+ if sys.byteorder != "big":
+ t.byteswap()
if not len(transes):
self.startStates = [0]
- oRuleMap = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.rules+[[]], (0, []))[1]
+ oRuleMap = reduce(
+ lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.rules + [[]], (0, [])
+ )[1]
passRanges = []
gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()])
- for e in grUtils.entries(gidcolmap, sameval = True):
+ for e in grUtils.entries(gidcolmap, sameval=True):
if e[1]:
- passRanges.append((e[0], e[0]+e[1]-1, e[2][0]))
+ passRanges.append((e[0], e[0] + e[1] - 1, e[2][0]))
self.numRules = len(self.actions)
- self.fsmOffset = (sstruct.calcsize(Silf_pass_format) + 8 + len(passRanges) * 6
- + len(oRuleMap) * 2 + 2 * oRuleMap[-1] + 2
- + 2 * len(self.startStates) + 3 * self.numRules + 3
- + 4 * self.numRules + 4)
- self.pcCode = self.fsmOffset + 2*self.numTransitional*self.numColumns + 1 + base
+ self.fsmOffset = (
+ sstruct.calcsize(Silf_pass_format)
+ + 8
+ + len(passRanges) * 6
+ + len(oRuleMap) * 2
+ + 2 * oRuleMap[-1]
+ + 2
+ + 2 * len(self.startStates)
+ + 3 * self.numRules
+ + 3
+ + 4 * self.numRules
+ + 4
+ )
+ self.pcCode = (
+ self.fsmOffset + 2 * self.numTransitional * self.numColumns + 1 + base
+ )
self.rcCode = self.pcCode + len(self.passConstraints)
self.aCode = self.rcCode + len(constraintCode)
self.oDebug = 0
@@ -771,115 +898,140 @@ class Pass(object):
data += grUtils.bininfo(len(passRanges), 6)
data += b"".join(struct.pack(">3H", *p) for p in passRanges)
data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap)
- flatrules = reduce(lambda a,x: a+x, self.rules, [])
+ flatrules = reduce(lambda a, x: a + x, self.rules, [])
data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules)
data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext)
data += struct.pack((">%dH" % len(self.startStates)), *self.startStates)
data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys)
data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts)
data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints))
- data += struct.pack((">%dH" % (self.numRules+1)), *oConstraints)
- data += struct.pack((">%dH" % (self.numRules+1)), *oActions)
- return data + b"".join(transes) + struct.pack("B", 0) + \
- self.passConstraints + constraintCode + b"".join(self.actions)
+ data += struct.pack((">%dH" % (self.numRules + 1)), *oConstraints)
+ data += struct.pack((">%dH" % (self.numRules + 1)), *oActions)
+ return (
+ data
+ + b"".join(transes)
+ + struct.pack("B", 0)
+ + self.passConstraints
+ + constraintCode
+ + b"".join(self.actions)
+ )
def toXML(self, writer, ttFont, version=2.0):
- writesimple('info', self, writer, *pass_attrs_info)
- writesimple('fsminfo', self, writer, *pass_attrs_fsm)
- writer.begintag('colmap')
+ writesimple("info", self, writer, *pass_attrs_info)
+ writesimple("fsminfo", self, writer, *pass_attrs_fsm)
+ writer.begintag("colmap")
writer.newline()
- wrapline(writer, ["{}={}".format(*x) for x in sorted(self.colMap.items(),
- key=lambda x:ttFont.getGlyphID(x[0]))])
- writer.endtag('colmap')
+ wrapline(
+ writer,
+ [
+ "{}={}".format(*x)
+ for x in sorted(
+ self.colMap.items(), key=lambda x: ttFont.getGlyphID(x[0])
+ )
+ ],
+ )
+ writer.endtag("colmap")
writer.newline()
- writer.begintag('staterulemap')
+ writer.begintag("staterulemap")
writer.newline()
for i, r in enumerate(self.rules):
- writer.simpletag('state', number = self.numRows - self.numSuccess + i,
- rules = " ".join(map(str, r)))
+ writer.simpletag(
+ "state",
+ number=self.numRows - self.numSuccess + i,
+ rules=" ".join(map(str, r)),
+ )
writer.newline()
- writer.endtag('staterulemap')
+ writer.endtag("staterulemap")
writer.newline()
- writer.begintag('rules')
+ writer.begintag("rules")
writer.newline()
for i in range(len(self.actions)):
- writer.begintag('rule', index=i, precontext=self.rulePreContexts[i],
- sortkey=self.ruleSortKeys[i])
+ writer.begintag(
+ "rule",
+ index=i,
+ precontext=self.rulePreContexts[i],
+ sortkey=self.ruleSortKeys[i],
+ )
writer.newline()
if len(self.ruleConstraints[i]):
- writecode('constraint', writer, self.ruleConstraints[i])
- writecode('action', writer, self.actions[i])
- writer.endtag('rule')
+ writecode("constraint", writer, self.ruleConstraints[i])
+ writecode("action", writer, self.actions[i])
+ writer.endtag("rule")
writer.newline()
- writer.endtag('rules')
+ writer.endtag("rules")
writer.newline()
if len(self.passConstraints):
- writecode('passConstraint', writer, self.passConstraints)
+ writecode("passConstraint", writer, self.passConstraints)
if len(self.stateTrans):
- writer.begintag('fsm')
+ writer.begintag("fsm")
writer.newline()
- writer.begintag('starts')
+ writer.begintag("starts")
writer.write(" ".join(map(str, self.startStates)))
- writer.endtag('starts')
+ writer.endtag("starts")
writer.newline()
for i, s in enumerate(self.stateTrans):
- writer.begintag('row', _i=i)
+ writer.begintag("row", _i=i)
# no newlines here
writer.write(" ".join(map(str, s)))
- writer.endtag('row')
+ writer.endtag("row")
writer.newline()
- writer.endtag('fsm')
+ writer.endtag("fsm")
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
- if name == 'info':
+ if name == "info":
getSimple(self, attrs, *pass_attrs_info)
- elif name == 'fsminfo':
+ elif name == "fsminfo":
getSimple(self, attrs, *pass_attrs_fsm)
- elif name == 'colmap':
+ elif name == "colmap":
e = content_string(content)
for w in e.split():
- x = w.split('=')
- if len(x) != 2 or x[0] == '' or x[1] == '': continue
+ x = w.split("=")
+ if len(x) != 2 or x[0] == "" or x[1] == "":
+ continue
self.colMap[x[0]] = int(x[1])
- elif name == 'staterulemap':
+ elif name == "staterulemap":
for e in content:
- if not isinstance(e, tuple): continue
+ if not isinstance(e, tuple):
+ continue
tag, a, c = e
- if tag == 'state':
- self.rules.append([int(x) for x in a['rules'].split(" ")])
- elif name == 'rules':
+ if tag == "state":
+ self.rules.append([int(x) for x in a["rules"].split(" ")])
+ elif name == "rules":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, a, c = element
- if tag != 'rule': continue
- self.rulePreContexts.append(int(a['precontext']))
- self.ruleSortKeys.append(int(a['sortkey']))
+ if tag != "rule":
+ continue
+ self.rulePreContexts.append(int(a["precontext"]))
+ self.ruleSortKeys.append(int(a["sortkey"]))
con = b""
act = b""
for e in c:
- if not isinstance(e, tuple): continue
+ if not isinstance(e, tuple):
+ continue
tag, a, subc = e
- if tag == 'constraint':
+ if tag == "constraint":
con = readcode(subc)
- elif tag == 'action':
+ elif tag == "action":
act = readcode(subc)
self.actions.append(act)
self.ruleConstraints.append(con)
- elif name == 'passConstraint':
+ elif name == "passConstraint":
self.passConstraints = readcode(content)
- elif name == 'fsm':
+ elif name == "fsm":
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, a, c = element
- if tag == 'row':
- s = array('H')
+ if tag == "row":
+ s = array("H")
e = content_string(c)
s.extend(map(int, e.split()))
self.stateTrans.append(s)
- elif tag == 'starts':
+ elif tag == "starts":
s = []
e = content_string(c)
s.extend(map(int, e.split()))
self.startStates = s
-
diff --git a/Lib/fontTools/ttLib/tables/S__i_l_l.py b/Lib/fontTools/ttLib/tables/S__i_l_l.py
index 5ab9ee34..12b0b8f6 100644
--- a/Lib/fontTools/ttLib/tables/S__i_l_l.py
+++ b/Lib/fontTools/ttLib/tables/S__i_l_l.py
@@ -5,13 +5,13 @@ from . import DefaultTable
from . import grUtils
import struct
-Sill_hdr = '''
+Sill_hdr = """
>
version: 16.16F
-'''
+"""
-class table_S__i_l_l(DefaultTable.DefaultTable):
+class table_S__i_l_l(DefaultTable.DefaultTable):
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.langs = {}
@@ -19,26 +19,27 @@ class table_S__i_l_l(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
(_, data) = sstruct.unpack2(Sill_hdr, data, self)
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
- numLangs, = struct.unpack('>H', data[:2])
+ (numLangs,) = struct.unpack(">H", data[:2])
data = data[8:]
maxsetting = 0
langinfo = []
for i in range(numLangs):
- (langcode, numsettings, offset) = struct.unpack(">4sHH",
- data[i * 8:(i+1) * 8])
+ (langcode, numsettings, offset) = struct.unpack(
+ ">4sHH", data[i * 8 : (i + 1) * 8]
+ )
offset = int(offset / 8) - (numLangs + 1)
- langcode = langcode.replace(b'\000', b'')
+ langcode = langcode.replace(b"\000", b"")
langinfo.append((langcode.decode("utf-8"), numsettings, offset))
maxsetting = max(maxsetting, offset + numsettings)
- data = data[numLangs * 8:]
+ data = data[numLangs * 8 :]
finfo = []
for i in range(maxsetting):
- (fid, val, _) = struct.unpack(">LHH", data[i * 8:(i+1) * 8])
+ (fid, val, _) = struct.unpack(">LHH", data[i * 8 : (i + 1) * 8])
finfo.append((fid, val))
self.langs = {}
for c, n, o in langinfo:
self.langs[c] = []
- for i in range(o, o+n):
+ for i in range(o, o + n):
self.langs[c].append(finfo[i])
def compile(self, ttFont):
@@ -46,35 +47,41 @@ class table_S__i_l_l(DefaultTable.DefaultTable):
fdat = b""
offset = len(self.langs)
for c, inf in sorted(self.langs.items()):
- ldat += struct.pack(">4sHH", c.encode('utf8'), len(inf), 8 * offset + 20)
+ ldat += struct.pack(">4sHH", c.encode("utf8"), len(inf), 8 * offset + 20)
for fid, val in inf:
fdat += struct.pack(">LHH", fid, val, 0)
offset += len(inf)
ldat += struct.pack(">LHH", 0x80808080, 0, 8 * offset + 20)
- return sstruct.pack(Sill_hdr, self) + grUtils.bininfo(len(self.langs)) + \
- ldat + fdat
+ return (
+ sstruct.pack(Sill_hdr, self)
+ + grUtils.bininfo(len(self.langs))
+ + ldat
+ + fdat
+ )
def toXML(self, writer, ttFont):
- writer.simpletag('version', version=self.version)
+ writer.simpletag("version", version=self.version)
writer.newline()
for c, inf in sorted(self.langs.items()):
- writer.begintag('lang', name=c)
+ writer.begintag("lang", name=c)
writer.newline()
for fid, val in inf:
- writer.simpletag('feature', fid=grUtils.num2tag(fid), val=val)
+ writer.simpletag("feature", fid=grUtils.num2tag(fid), val=val)
writer.newline()
- writer.endtag('lang')
+ writer.endtag("lang")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- if name == 'version':
- self.version = float(safeEval(attrs['version']))
- elif name == 'lang':
- c = attrs['name']
+ if name == "version":
+ self.version = float(safeEval(attrs["version"]))
+ elif name == "lang":
+ c = attrs["name"]
self.langs[c] = []
for element in content:
- if not isinstance(element, tuple): continue
+ if not isinstance(element, tuple):
+ continue
tag, a, subcontent = element
- if tag == 'feature':
- self.langs[c].append((grUtils.tag2num(a['fid']),
- int(safeEval(a['val']))))
+ if tag == "feature":
+ self.langs[c].append(
+ (grUtils.tag2num(a["fid"]), int(safeEval(a["val"])))
+ )
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_B_.py b/Lib/fontTools/ttLib/tables/T_S_I_B_.py
index 25d43104..8a6c14c4 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_B_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_B_.py
@@ -1,4 +1,5 @@
from .T_S_I_V_ import table_T_S_I_V_
+
class table_T_S_I_B_(table_T_S_I_V_):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_D_.py b/Lib/fontTools/ttLib/tables/T_S_I_D_.py
index 310eb174..536ff2f9 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_D_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_D_.py
@@ -1,4 +1,5 @@
from .T_S_I_V_ import table_T_S_I_V_
+
class table_T_S_I_D_(table_T_S_I_V_):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_J_.py b/Lib/fontTools/ttLib/tables/T_S_I_J_.py
index c1a46ba6..bc8fe92a 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_J_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_J_.py
@@ -1,4 +1,5 @@
from .T_S_I_V_ import table_T_S_I_V_
+
class table_T_S_I_J_(table_T_S_I_V_):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_P_.py b/Lib/fontTools/ttLib/tables/T_S_I_P_.py
index 778974c8..1abc0259 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_P_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_P_.py
@@ -1,4 +1,5 @@
from .T_S_I_V_ import table_T_S_I_V_
+
class table_T_S_I_P_(table_T_S_I_V_):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_S_.py b/Lib/fontTools/ttLib/tables/T_S_I_S_.py
index 61c9f76f..667eb0e5 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_S_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_S_.py
@@ -1,4 +1,5 @@
from .T_S_I_V_ import table_T_S_I_V_
+
class table_T_S_I_S_(table_T_S_I_V_):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_V_.py b/Lib/fontTools/ttLib/tables/T_S_I_V_.py
index c1e244c6..d7aec458 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I_V_.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I_V_.py
@@ -1,20 +1,20 @@
from fontTools.misc.textTools import strjoin, tobytes, tostr
from . import asciiTable
-class table_T_S_I_V_(asciiTable.asciiTable):
- def toXML(self, writer, ttFont):
- data = tostr(self.data)
- # removing null bytes. XXX needed??
- data = data.split('\0')
- data = strjoin(data)
- writer.begintag("source")
- writer.newline()
- writer.write_noindent(data.replace("\r", "\n"))
- writer.newline()
- writer.endtag("source")
- writer.newline()
+class table_T_S_I_V_(asciiTable.asciiTable):
+ def toXML(self, writer, ttFont):
+ data = tostr(self.data)
+ # removing null bytes. XXX needed??
+ data = data.split("\0")
+ data = strjoin(data)
+ writer.begintag("source")
+ writer.newline()
+ writer.write_noindent(data.replace("\r", "\n"))
+ writer.newline()
+ writer.endtag("source")
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- lines = strjoin(content).split("\n")
- self.data = tobytes("\r".join(lines[1:-1]))
+ def fromXML(self, name, attrs, content, ttFont):
+ lines = strjoin(content).split("\n")
+ self.data = tobytes("\r".join(lines[1:-1]))
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__0.py b/Lib/fontTools/ttLib/tables/T_S_I__0.py
index b187f425..f15fc67b 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I__0.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I__0.py
@@ -8,47 +8,49 @@ in the TSI1 table.
from . import DefaultTable
import struct
-tsi0Format = '>HHL'
+tsi0Format = ">HHL"
+
def fixlongs(glyphID, textLength, textOffset):
- return int(glyphID), int(textLength), textOffset
+ return int(glyphID), int(textLength), textOffset
class table_T_S_I__0(DefaultTable.DefaultTable):
-
- dependencies = ["TSI1"]
-
- def decompile(self, data, ttFont):
- numGlyphs = ttFont['maxp'].numGlyphs
- indices = []
- size = struct.calcsize(tsi0Format)
- for i in range(numGlyphs + 5):
- glyphID, textLength, textOffset = fixlongs(*struct.unpack(tsi0Format, data[:size]))
- indices.append((glyphID, textLength, textOffset))
- data = data[size:]
- assert len(data) == 0
- assert indices[-5] == (0XFFFE, 0, 0xABFC1F34), "bad magic number"
- self.indices = indices[:-5]
- self.extra_indices = indices[-4:]
-
- def compile(self, ttFont):
- if not hasattr(self, "indices"):
- # We have no corresponding table (TSI1 or TSI3); let's return
- # no data, which effectively means "ignore us".
- return b""
- data = b""
- for index, textLength, textOffset in self.indices:
- data = data + struct.pack(tsi0Format, index, textLength, textOffset)
- data = data + struct.pack(tsi0Format, 0XFFFE, 0, 0xABFC1F34)
- for index, textLength, textOffset in self.extra_indices:
- data = data + struct.pack(tsi0Format, index, textLength, textOffset)
- return data
-
- def set(self, indices, extra_indices):
- # gets called by 'TSI1' or 'TSI3'
- self.indices = indices
- self.extra_indices = extra_indices
-
- def toXML(self, writer, ttFont):
- writer.comment("This table will be calculated by the compiler")
- writer.newline()
+ dependencies = ["TSI1"]
+
+ def decompile(self, data, ttFont):
+ numGlyphs = ttFont["maxp"].numGlyphs
+ indices = []
+ size = struct.calcsize(tsi0Format)
+ for i in range(numGlyphs + 5):
+ glyphID, textLength, textOffset = fixlongs(
+ *struct.unpack(tsi0Format, data[:size])
+ )
+ indices.append((glyphID, textLength, textOffset))
+ data = data[size:]
+ assert len(data) == 0
+ assert indices[-5] == (0xFFFE, 0, 0xABFC1F34), "bad magic number"
+ self.indices = indices[:-5]
+ self.extra_indices = indices[-4:]
+
+ def compile(self, ttFont):
+ if not hasattr(self, "indices"):
+ # We have no corresponding table (TSI1 or TSI3); let's return
+ # no data, which effectively means "ignore us".
+ return b""
+ data = b""
+ for index, textLength, textOffset in self.indices:
+ data = data + struct.pack(tsi0Format, index, textLength, textOffset)
+ data = data + struct.pack(tsi0Format, 0xFFFE, 0, 0xABFC1F34)
+ for index, textLength, textOffset in self.extra_indices:
+ data = data + struct.pack(tsi0Format, index, textLength, textOffset)
+ return data
+
+ def set(self, indices, extra_indices):
+ # gets called by 'TSI1' or 'TSI3'
+ self.indices = indices
+ self.extra_indices = extra_indices
+
+ def toXML(self, writer, ttFont):
+ writer.comment("This table will be calculated by the compiler")
+ writer.newline()
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__1.py b/Lib/fontTools/ttLib/tables/T_S_I__1.py
index 7f7608b2..55aca339 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I__1.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I__1.py
@@ -10,147 +10,154 @@ from fontTools.misc.textTools import strjoin, tobytes, tostr
class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable):
+ extras = {0xFFFA: "ppgm", 0xFFFB: "cvt", 0xFFFC: "reserved", 0xFFFD: "fpgm"}
- extras = {0xfffa: "ppgm", 0xfffb: "cvt", 0xfffc: "reserved", 0xfffd: "fpgm"}
+ indextable = "TSI0"
- indextable = "TSI0"
+ def decompile(self, data, ttFont):
+ totalLength = len(data)
+ indextable = ttFont[self.indextable]
+ for indices, isExtra in zip(
+ (indextable.indices, indextable.extra_indices), (False, True)
+ ):
+ programs = {}
+ for i, (glyphID, textLength, textOffset) in enumerate(indices):
+ if isExtra:
+ name = self.extras[glyphID]
+ else:
+ name = ttFont.getGlyphName(glyphID)
+ if textOffset > totalLength:
+ self.log.warning("textOffset > totalLength; %r skipped" % name)
+ continue
+ if textLength < 0x8000:
+ # If the length stored in the record is less than 32768, then use
+ # that as the length of the record.
+ pass
+ elif textLength == 0x8000:
+ # If the length is 32768, compute the actual length as follows:
+ isLast = i == (len(indices) - 1)
+ if isLast:
+ if isExtra:
+ # For the last "extra" record (the very last record of the
+ # table), the length is the difference between the total
+ # length of the TSI1 table and the textOffset of the final
+ # record.
+ nextTextOffset = totalLength
+ else:
+ # For the last "normal" record (the last record just prior
+ # to the record containing the "magic number"), the length
+ # is the difference between the textOffset of the record
+ # following the "magic number" (0xFFFE) record (i.e. the
+ # first "extra" record), and the textOffset of the last
+ # "normal" record.
+ nextTextOffset = indextable.extra_indices[0][2]
+ else:
+ # For all other records with a length of 0x8000, the length is
+ # the difference between the textOffset of the record in
+ # question and the textOffset of the next record.
+ nextTextOffset = indices[i + 1][2]
+ assert nextTextOffset >= textOffset, "entries not sorted by offset"
+ if nextTextOffset > totalLength:
+ self.log.warning(
+ "nextTextOffset > totalLength; %r truncated" % name
+ )
+ nextTextOffset = totalLength
+ textLength = nextTextOffset - textOffset
+ else:
+ from fontTools import ttLib
- def decompile(self, data, ttFont):
- totalLength = len(data)
- indextable = ttFont[self.indextable]
- for indices, isExtra in zip(
- (indextable.indices, indextable.extra_indices), (False, True)):
- programs = {}
- for i, (glyphID, textLength, textOffset) in enumerate(indices):
- if isExtra:
- name = self.extras[glyphID]
- else:
- name = ttFont.getGlyphName(glyphID)
- if textOffset > totalLength:
- self.log.warning("textOffset > totalLength; %r skipped" % name)
- continue
- if textLength < 0x8000:
- # If the length stored in the record is less than 32768, then use
- # that as the length of the record.
- pass
- elif textLength == 0x8000:
- # If the length is 32768, compute the actual length as follows:
- isLast = i == (len(indices)-1)
- if isLast:
- if isExtra:
- # For the last "extra" record (the very last record of the
- # table), the length is the difference between the total
- # length of the TSI1 table and the textOffset of the final
- # record.
- nextTextOffset = totalLength
- else:
- # For the last "normal" record (the last record just prior
- # to the record containing the "magic number"), the length
- # is the difference between the textOffset of the record
- # following the "magic number" (0xFFFE) record (i.e. the
- # first "extra" record), and the textOffset of the last
- # "normal" record.
- nextTextOffset = indextable.extra_indices[0][2]
- else:
- # For all other records with a length of 0x8000, the length is
- # the difference between the textOffset of the record in
- # question and the textOffset of the next record.
- nextTextOffset = indices[i+1][2]
- assert nextTextOffset >= textOffset, "entries not sorted by offset"
- if nextTextOffset > totalLength:
- self.log.warning(
- "nextTextOffset > totalLength; %r truncated" % name)
- nextTextOffset = totalLength
- textLength = nextTextOffset - textOffset
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError(
- "%r textLength (%d) must not be > 32768" % (name, textLength))
- text = data[textOffset:textOffset+textLength]
- assert len(text) == textLength
- text = tostr(text, encoding='utf-8')
- if text:
- programs[name] = text
- if isExtra:
- self.extraPrograms = programs
- else:
- self.glyphPrograms = programs
+ raise ttLib.TTLibError(
+ "%r textLength (%d) must not be > 32768" % (name, textLength)
+ )
+ text = data[textOffset : textOffset + textLength]
+ assert len(text) == textLength
+ text = tostr(text, encoding="utf-8")
+ if text:
+ programs[name] = text
+ if isExtra:
+ self.extraPrograms = programs
+ else:
+ self.glyphPrograms = programs
- def compile(self, ttFont):
- if not hasattr(self, "glyphPrograms"):
- self.glyphPrograms = {}
- self.extraPrograms = {}
- data = b''
- indextable = ttFont[self.indextable]
- glyphNames = ttFont.getGlyphOrder()
+ def compile(self, ttFont):
+ if not hasattr(self, "glyphPrograms"):
+ self.glyphPrograms = {}
+ self.extraPrograms = {}
+ data = b""
+ indextable = ttFont[self.indextable]
+ glyphNames = ttFont.getGlyphOrder()
- indices = []
- for i in range(len(glyphNames)):
- if len(data) % 2:
- data = data + b"\015" # align on 2-byte boundaries, fill with return chars. Yum.
- name = glyphNames[i]
- if name in self.glyphPrograms:
- text = tobytes(self.glyphPrograms[name], encoding="utf-8")
- else:
- text = b""
- textLength = len(text)
- if textLength >= 0x8000:
- textLength = 0x8000
- indices.append((i, textLength, len(data)))
- data = data + text
+ indices = []
+ for i in range(len(glyphNames)):
+ if len(data) % 2:
+ data = (
+ data + b"\015"
+ ) # align on 2-byte boundaries, fill with return chars. Yum.
+ name = glyphNames[i]
+ if name in self.glyphPrograms:
+ text = tobytes(self.glyphPrograms[name], encoding="utf-8")
+ else:
+ text = b""
+ textLength = len(text)
+ if textLength >= 0x8000:
+ textLength = 0x8000
+ indices.append((i, textLength, len(data)))
+ data = data + text
- extra_indices = []
- codes = sorted(self.extras.items())
- for i in range(len(codes)):
- if len(data) % 2:
- data = data + b"\015" # align on 2-byte boundaries, fill with return chars.
- code, name = codes[i]
- if name in self.extraPrograms:
- text = tobytes(self.extraPrograms[name], encoding="utf-8")
- else:
- text = b""
- textLength = len(text)
- if textLength >= 0x8000:
- textLength = 0x8000
- extra_indices.append((code, textLength, len(data)))
- data = data + text
- indextable.set(indices, extra_indices)
- return data
+ extra_indices = []
+ codes = sorted(self.extras.items())
+ for i in range(len(codes)):
+ if len(data) % 2:
+ data = (
+ data + b"\015"
+ ) # align on 2-byte boundaries, fill with return chars.
+ code, name = codes[i]
+ if name in self.extraPrograms:
+ text = tobytes(self.extraPrograms[name], encoding="utf-8")
+ else:
+ text = b""
+ textLength = len(text)
+ if textLength >= 0x8000:
+ textLength = 0x8000
+ extra_indices.append((code, textLength, len(data)))
+ data = data + text
+ indextable.set(indices, extra_indices)
+ return data
- def toXML(self, writer, ttFont):
- names = sorted(self.glyphPrograms.keys())
- writer.newline()
- for name in names:
- text = self.glyphPrograms[name]
- if not text:
- continue
- writer.begintag("glyphProgram", name=name)
- writer.newline()
- writer.write_noindent(text.replace("\r", "\n"))
- writer.newline()
- writer.endtag("glyphProgram")
- writer.newline()
- writer.newline()
- extra_names = sorted(self.extraPrograms.keys())
- for name in extra_names:
- text = self.extraPrograms[name]
- if not text:
- continue
- writer.begintag("extraProgram", name=name)
- writer.newline()
- writer.write_noindent(text.replace("\r", "\n"))
- writer.newline()
- writer.endtag("extraProgram")
- writer.newline()
- writer.newline()
+ def toXML(self, writer, ttFont):
+ names = sorted(self.glyphPrograms.keys())
+ writer.newline()
+ for name in names:
+ text = self.glyphPrograms[name]
+ if not text:
+ continue
+ writer.begintag("glyphProgram", name=name)
+ writer.newline()
+ writer.write_noindent(text.replace("\r", "\n"))
+ writer.newline()
+ writer.endtag("glyphProgram")
+ writer.newline()
+ writer.newline()
+ extra_names = sorted(self.extraPrograms.keys())
+ for name in extra_names:
+ text = self.extraPrograms[name]
+ if not text:
+ continue
+ writer.begintag("extraProgram", name=name)
+ writer.newline()
+ writer.write_noindent(text.replace("\r", "\n"))
+ writer.newline()
+ writer.endtag("extraProgram")
+ writer.newline()
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "glyphPrograms"):
- self.glyphPrograms = {}
- self.extraPrograms = {}
- lines = strjoin(content).replace("\r", "\n").split("\n")
- text = '\r'.join(lines[1:-1])
- if name == "glyphProgram":
- self.glyphPrograms[attrs["name"]] = text
- elif name == "extraProgram":
- self.extraPrograms[attrs["name"]] = text
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "glyphPrograms"):
+ self.glyphPrograms = {}
+ self.extraPrograms = {}
+ lines = strjoin(content).replace("\r", "\n").split("\n")
+ text = "\r".join(lines[1:-1])
+ if name == "glyphProgram":
+ self.glyphPrograms[attrs["name"]] = text
+ elif name == "extraProgram":
+ self.extraPrograms[attrs["name"]] = text
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__2.py b/Lib/fontTools/ttLib/tables/T_S_I__2.py
index 036c9815..4278be15 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I__2.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I__2.py
@@ -9,6 +9,6 @@ from fontTools import ttLib
superclass = ttLib.getTableClass("TSI0")
-class table_T_S_I__2(superclass):
- dependencies = ["TSI3"]
+class table_T_S_I__2(superclass):
+ dependencies = ["TSI3"]
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__3.py b/Lib/fontTools/ttLib/tables/T_S_I__3.py
index a2490142..785ca231 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I__3.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I__3.py
@@ -7,8 +7,13 @@ from fontTools import ttLib
superclass = ttLib.getTableClass("TSI1")
-class table_T_S_I__3(superclass):
- extras = {0xfffa: "reserved0", 0xfffb: "reserved1", 0xfffc: "reserved2", 0xfffd: "reserved3"}
+class table_T_S_I__3(superclass):
+ extras = {
+ 0xFFFA: "reserved0",
+ 0xFFFB: "reserved1",
+ 0xFFFC: "reserved2",
+ 0xFFFD: "reserved3",
+ }
- indextable = "TSI2"
+ indextable = "TSI2"
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__5.py b/Lib/fontTools/ttLib/tables/T_S_I__5.py
index 7be09f9a..5edc86a9 100644
--- a/Lib/fontTools/ttLib/tables/T_S_I__5.py
+++ b/Lib/fontTools/ttLib/tables/T_S_I__5.py
@@ -10,34 +10,37 @@ import array
class table_T_S_I__5(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ numGlyphs = ttFont["maxp"].numGlyphs
+ assert len(data) == 2 * numGlyphs
+ a = array.array("H")
+ a.frombytes(data)
+ if sys.byteorder != "big":
+ a.byteswap()
+ self.glyphGrouping = {}
+ for i in range(numGlyphs):
+ self.glyphGrouping[ttFont.getGlyphName(i)] = a[i]
- def decompile(self, data, ttFont):
- numGlyphs = ttFont['maxp'].numGlyphs
- assert len(data) == 2 * numGlyphs
- a = array.array("H")
- a.frombytes(data)
- if sys.byteorder != "big": a.byteswap()
- self.glyphGrouping = {}
- for i in range(numGlyphs):
- self.glyphGrouping[ttFont.getGlyphName(i)] = a[i]
+ def compile(self, ttFont):
+ glyphNames = ttFont.getGlyphOrder()
+ a = array.array("H")
+ for i in range(len(glyphNames)):
+ a.append(self.glyphGrouping.get(glyphNames[i], 0))
+ if sys.byteorder != "big":
+ a.byteswap()
+ return a.tobytes()
- def compile(self, ttFont):
- glyphNames = ttFont.getGlyphOrder()
- a = array.array("H")
- for i in range(len(glyphNames)):
- a.append(self.glyphGrouping.get(glyphNames[i], 0))
- if sys.byteorder != "big": a.byteswap()
- return a.tobytes()
+ def toXML(self, writer, ttFont):
+ names = sorted(self.glyphGrouping.keys())
+ for glyphName in names:
+ writer.simpletag(
+ "glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName]
+ )
+ writer.newline()
- def toXML(self, writer, ttFont):
- names = sorted(self.glyphGrouping.keys())
- for glyphName in names:
- writer.simpletag("glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName])
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "glyphGrouping"):
- self.glyphGrouping = {}
- if name != "glyphgroup":
- return
- self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"])
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "glyphGrouping"):
+ self.glyphGrouping = {}
+ if name != "glyphgroup":
+ return
+ self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"])
diff --git a/Lib/fontTools/ttLib/tables/T_T_F_A_.py b/Lib/fontTools/ttLib/tables/T_T_F_A_.py
index 8446dfc5..e3cf2db2 100644
--- a/Lib/fontTools/ttLib/tables/T_T_F_A_.py
+++ b/Lib/fontTools/ttLib/tables/T_T_F_A_.py
@@ -1,4 +1,5 @@
from . import asciiTable
+
class table_T_T_F_A_(asciiTable.asciiTable):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/TupleVariation.py b/Lib/fontTools/ttLib/tables/TupleVariation.py
index 9c2895e4..30d00990 100644
--- a/Lib/fontTools/ttLib/tables/TupleVariation.py
+++ b/Lib/fontTools/ttLib/tables/TupleVariation.py
@@ -22,751 +22,787 @@ PRIVATE_POINT_NUMBERS = 0x2000
DELTAS_ARE_ZERO = 0x80
DELTAS_ARE_WORDS = 0x40
-DELTA_RUN_COUNT_MASK = 0x3f
+DELTA_RUN_COUNT_MASK = 0x3F
POINTS_ARE_WORDS = 0x80
-POINT_RUN_COUNT_MASK = 0x7f
+POINT_RUN_COUNT_MASK = 0x7F
TUPLES_SHARE_POINT_NUMBERS = 0x8000
-TUPLE_COUNT_MASK = 0x0fff
-TUPLE_INDEX_MASK = 0x0fff
+TUPLE_COUNT_MASK = 0x0FFF
+TUPLE_INDEX_MASK = 0x0FFF
log = logging.getLogger(__name__)
class TupleVariation(object):
-
- def __init__(self, axes, coordinates):
- self.axes = axes.copy()
- self.coordinates = list(coordinates)
-
- def __repr__(self):
- axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()]))
- return "<TupleVariation %s %s>" % (axes, self.coordinates)
-
- def __eq__(self, other):
- return self.coordinates == other.coordinates and self.axes == other.axes
-
- def getUsedPoints(self):
- # Empty set means "all points used".
- if None not in self.coordinates:
- return frozenset()
- used = frozenset([i for i,p in enumerate(self.coordinates) if p is not None])
- # Return None if no points used.
- return used if used else None
-
- def hasImpact(self):
- """Returns True if this TupleVariation has any visible impact.
-
- If the result is False, the TupleVariation can be omitted from the font
- without making any visible difference.
- """
- return any(c is not None for c in self.coordinates)
-
- def toXML(self, writer, axisTags):
- writer.begintag("tuple")
- writer.newline()
- for axis in axisTags:
- value = self.axes.get(axis)
- if value is not None:
- minValue, value, maxValue = value
- defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
- defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
- if minValue == defaultMinValue and maxValue == defaultMaxValue:
- writer.simpletag("coord", axis=axis, value=fl2str(value, 14))
- else:
- attrs = [
- ("axis", axis),
- ("min", fl2str(minValue, 14)),
- ("value", fl2str(value, 14)),
- ("max", fl2str(maxValue, 14)),
- ]
- writer.simpletag("coord", attrs)
- writer.newline()
- wrote_any_deltas = False
- for i, delta in enumerate(self.coordinates):
- if type(delta) == tuple and len(delta) == 2:
- writer.simpletag("delta", pt=i, x=delta[0], y=delta[1])
- writer.newline()
- wrote_any_deltas = True
- elif type(delta) == int:
- writer.simpletag("delta", cvt=i, value=delta)
- writer.newline()
- wrote_any_deltas = True
- elif delta is not None:
- log.error("bad delta format")
- writer.comment("bad delta #%d" % i)
- writer.newline()
- wrote_any_deltas = True
- if not wrote_any_deltas:
- writer.comment("no deltas")
- writer.newline()
- writer.endtag("tuple")
- writer.newline()
-
- def fromXML(self, name, attrs, _content):
- if name == "coord":
- axis = attrs["axis"]
- value = str2fl(attrs["value"], 14)
- defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
- defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
- minValue = str2fl(attrs.get("min", defaultMinValue), 14)
- maxValue = str2fl(attrs.get("max", defaultMaxValue), 14)
- self.axes[axis] = (minValue, value, maxValue)
- elif name == "delta":
- if "pt" in attrs:
- point = safeEval(attrs["pt"])
- x = safeEval(attrs["x"])
- y = safeEval(attrs["y"])
- self.coordinates[point] = (x, y)
- elif "cvt" in attrs:
- cvt = safeEval(attrs["cvt"])
- value = safeEval(attrs["value"])
- self.coordinates[cvt] = value
- else:
- log.warning("bad delta format: %s" %
- ", ".join(sorted(attrs.keys())))
-
- def compile(self, axisTags, sharedCoordIndices={}, pointData=None):
- assert set(self.axes.keys()) <= set(axisTags), ("Unknown axis tag found.", self.axes.keys(), axisTags)
-
- tupleData = []
- auxData = []
-
- if pointData is None:
- usedPoints = self.getUsedPoints()
- if usedPoints is None: # Nothing to encode
- return b'', b''
- pointData = self.compilePoints(usedPoints)
-
- coord = self.compileCoord(axisTags)
- flags = sharedCoordIndices.get(coord)
- if flags is None:
- flags = EMBEDDED_PEAK_TUPLE
- tupleData.append(coord)
-
- intermediateCoord = self.compileIntermediateCoord(axisTags)
- if intermediateCoord is not None:
- flags |= INTERMEDIATE_REGION
- tupleData.append(intermediateCoord)
-
- # pointData of b'' implies "use shared points".
- if pointData:
- flags |= PRIVATE_POINT_NUMBERS
- auxData.append(pointData)
-
- auxData.append(self.compileDeltas())
- auxData = b''.join(auxData)
-
- tupleData.insert(0, struct.pack('>HH', len(auxData), flags))
- return b''.join(tupleData), auxData
-
- def compileCoord(self, axisTags):
- result = bytearray()
- axes = self.axes
- for axis in axisTags:
- triple = axes.get(axis)
- if triple is None:
- result.extend(b'\0\0')
- else:
- result.extend(struct.pack(">h", fl2fi(triple[1], 14)))
- return bytes(result)
-
- def compileIntermediateCoord(self, axisTags):
- needed = False
- for axis in axisTags:
- minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
- defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
- defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
- if (minValue != defaultMinValue) or (maxValue != defaultMaxValue):
- needed = True
- break
- if not needed:
- return None
- minCoords = bytearray()
- maxCoords = bytearray()
- for axis in axisTags:
- minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
- minCoords.extend(struct.pack(">h", fl2fi(minValue, 14)))
- maxCoords.extend(struct.pack(">h", fl2fi(maxValue, 14)))
- return minCoords + maxCoords
-
- @staticmethod
- def decompileCoord_(axisTags, data, offset):
- coord = {}
- pos = offset
- for axis in axisTags:
- coord[axis] = fi2fl(struct.unpack(">h", data[pos:pos+2])[0], 14)
- pos += 2
- return coord, pos
-
- @staticmethod
- def compilePoints(points):
- # If the set consists of all points in the glyph, it gets encoded with
- # a special encoding: a single zero byte.
- #
- # To use this optimization, points passed in must be empty set.
- # The following two lines are not strictly necessary as the main code
- # below would emit the same. But this is most common and faster.
- if not points:
- return b'\0'
-
- # In the 'gvar' table, the packing of point numbers is a little surprising.
- # It consists of multiple runs, each being a delta-encoded list of integers.
- # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
- # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
- # There are two types of runs, with values being either 8 or 16 bit unsigned
- # integers.
- points = list(points)
- points.sort()
- numPoints = len(points)
-
- result = bytearray()
- # The binary representation starts with the total number of points in the set,
- # encoded into one or two bytes depending on the value.
- if numPoints < 0x80:
- result.append(numPoints)
- else:
- result.append((numPoints >> 8) | 0x80)
- result.append(numPoints & 0xff)
-
- MAX_RUN_LENGTH = 127
- pos = 0
- lastValue = 0
- while pos < numPoints:
- runLength = 0
-
- headerPos = len(result)
- result.append(0)
-
- useByteEncoding = None
- while pos < numPoints and runLength <= MAX_RUN_LENGTH:
- curValue = points[pos]
- delta = curValue - lastValue
- if useByteEncoding is None:
- useByteEncoding = 0 <= delta <= 0xff
- if useByteEncoding and (delta > 0xff or delta < 0):
- # we need to start a new run (which will not use byte encoding)
- break
- # TODO This never switches back to a byte-encoding from a short-encoding.
- # That's suboptimal.
- if useByteEncoding:
- result.append(delta)
- else:
- result.append(delta >> 8)
- result.append(delta & 0xff)
- lastValue = curValue
- pos += 1
- runLength += 1
- if useByteEncoding:
- result[headerPos] = runLength - 1
- else:
- result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS
-
- return result
-
- @staticmethod
- def decompilePoints_(numPoints, data, offset, tableTag):
- """(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)"""
- assert tableTag in ('cvar', 'gvar')
- pos = offset
- numPointsInData = data[pos]
- pos += 1
- if (numPointsInData & POINTS_ARE_WORDS) != 0:
- numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos]
- pos += 1
- if numPointsInData == 0:
- return (range(numPoints), pos)
-
- result = []
- while len(result) < numPointsInData:
- runHeader = data[pos]
- pos += 1
- numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
- point = 0
- if (runHeader & POINTS_ARE_WORDS) != 0:
- points = array.array("H")
- pointsSize = numPointsInRun * 2
- else:
- points = array.array("B")
- pointsSize = numPointsInRun
- points.frombytes(data[pos:pos+pointsSize])
- if sys.byteorder != "big": points.byteswap()
-
- assert len(points) == numPointsInRun
- pos += pointsSize
-
- result.extend(points)
-
- # Convert relative to absolute
- absolute = []
- current = 0
- for delta in result:
- current += delta
- absolute.append(current)
- result = absolute
- del absolute
-
- badPoints = {str(p) for p in result if p < 0 or p >= numPoints}
- if badPoints:
- log.warning("point %s out of range in '%s' table" %
- (",".join(sorted(badPoints)), tableTag))
- return (result, pos)
-
- def compileDeltas(self):
- deltaX = []
- deltaY = []
- if self.getCoordWidth() == 2:
- for c in self.coordinates:
- if c is None:
- continue
- deltaX.append(c[0])
- deltaY.append(c[1])
- else:
- for c in self.coordinates:
- if c is None:
- continue
- deltaX.append(c)
- bytearr = bytearray()
- self.compileDeltaValues_(deltaX, bytearr)
- self.compileDeltaValues_(deltaY, bytearr)
- return bytearr
-
- @staticmethod
- def compileDeltaValues_(deltas, bytearr=None):
- """[value1, value2, value3, ...] --> bytearray
-
- Emits a sequence of runs. Each run starts with a
- byte-sized header whose 6 least significant bits
- (header & 0x3F) indicate how many values are encoded
- in this run. The stored length is the actual length
- minus one; run lengths are thus in the range [1..64].
- If the header byte has its most significant bit (0x80)
- set, all values in this run are zero, and no data
- follows. Otherwise, the header byte is followed by
- ((header & 0x3F) + 1) signed values. If (header &
- 0x40) is clear, the delta values are stored as signed
- bytes; if (header & 0x40) is set, the delta values are
- signed 16-bit integers.
- """ # Explaining the format because the 'gvar' spec is hard to understand.
- if bytearr is None:
- bytearr = bytearray()
- pos = 0
- numDeltas = len(deltas)
- while pos < numDeltas:
- value = deltas[pos]
- if value == 0:
- pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
- elif -128 <= value <= 127:
- pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr)
- else:
- pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr)
- return bytearr
-
- @staticmethod
- def encodeDeltaRunAsZeroes_(deltas, offset, bytearr):
- pos = offset
- numDeltas = len(deltas)
- while pos < numDeltas and deltas[pos] == 0:
- pos += 1
- runLength = pos - offset
- while runLength >= 64:
- bytearr.append(DELTAS_ARE_ZERO | 63)
- runLength -= 64
- if runLength:
- bytearr.append(DELTAS_ARE_ZERO | (runLength - 1))
- return pos
-
- @staticmethod
- def encodeDeltaRunAsBytes_(deltas, offset, bytearr):
- pos = offset
- numDeltas = len(deltas)
- while pos < numDeltas:
- value = deltas[pos]
- if not (-128 <= value <= 127):
- break
- # Within a byte-encoded run of deltas, a single zero
- # is best stored literally as 0x00 value. However,
- # if are two or more zeroes in a sequence, it is
- # better to start a new run. For example, the sequence
- # of deltas [15, 15, 0, 15, 15] becomes 6 bytes
- # (04 0F 0F 00 0F 0F) when storing the zero value
- # literally, but 7 bytes (01 0F 0F 80 01 0F 0F)
- # when starting a new run.
- if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0:
- break
- pos += 1
- runLength = pos - offset
- while runLength >= 64:
- bytearr.append(63)
- bytearr.extend(array.array('b', deltas[offset:offset+64]))
- offset += 64
- runLength -= 64
- if runLength:
- bytearr.append(runLength - 1)
- bytearr.extend(array.array('b', deltas[offset:pos]))
- return pos
-
- @staticmethod
- def encodeDeltaRunAsWords_(deltas, offset, bytearr):
- pos = offset
- numDeltas = len(deltas)
- while pos < numDeltas:
- value = deltas[pos]
- # Within a word-encoded run of deltas, it is easiest
- # to start a new run (with a different encoding)
- # whenever we encounter a zero value. For example,
- # the sequence [0x6666, 0, 0x7777] needs 7 bytes when
- # storing the zero literally (42 66 66 00 00 77 77),
- # and equally 7 bytes when starting a new run
- # (40 66 66 80 40 77 77).
- if value == 0:
- break
-
- # Within a word-encoded run of deltas, a single value
- # in the range (-128..127) should be encoded literally
- # because it is more compact. For example, the sequence
- # [0x6666, 2, 0x7777] becomes 7 bytes when storing
- # the value literally (42 66 66 00 02 77 77), but 8 bytes
- # when starting a new run (40 66 66 00 02 40 77 77).
- if (-128 <= value <= 127) and pos+1 < numDeltas and (-128 <= deltas[pos+1] <= 127):
- break
- pos += 1
- runLength = pos - offset
- while runLength >= 64:
- bytearr.append(DELTAS_ARE_WORDS | 63)
- a = array.array('h', deltas[offset:offset+64])
- if sys.byteorder != "big": a.byteswap()
- bytearr.extend(a)
- offset += 64
- runLength -= 64
- if runLength:
- bytearr.append(DELTAS_ARE_WORDS | (runLength - 1))
- a = array.array('h', deltas[offset:pos])
- if sys.byteorder != "big": a.byteswap()
- bytearr.extend(a)
- return pos
-
- @staticmethod
- def decompileDeltas_(numDeltas, data, offset):
- """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
- result = []
- pos = offset
- while len(result) < numDeltas:
- runHeader = data[pos]
- pos += 1
- numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
- if (runHeader & DELTAS_ARE_ZERO) != 0:
- result.extend([0] * numDeltasInRun)
- else:
- if (runHeader & DELTAS_ARE_WORDS) != 0:
- deltas = array.array("h")
- deltasSize = numDeltasInRun * 2
- else:
- deltas = array.array("b")
- deltasSize = numDeltasInRun
- deltas.frombytes(data[pos:pos+deltasSize])
- if sys.byteorder != "big": deltas.byteswap()
- assert len(deltas) == numDeltasInRun
- pos += deltasSize
- result.extend(deltas)
- assert len(result) == numDeltas
- return (result, pos)
-
- @staticmethod
- def getTupleSize_(flags, axisCount):
- size = 4
- if (flags & EMBEDDED_PEAK_TUPLE) != 0:
- size += axisCount * 2
- if (flags & INTERMEDIATE_REGION) != 0:
- size += axisCount * 4
- return size
-
- def getCoordWidth(self):
- """ Return 2 if coordinates are (x, y) as in gvar, 1 if single values
- as in cvar, or 0 if empty.
- """
- firstDelta = next((c for c in self.coordinates if c is not None), None)
- if firstDelta is None:
- return 0 # empty or has no impact
- if type(firstDelta) in (int, float):
- return 1
- if type(firstDelta) is tuple and len(firstDelta) == 2:
- return 2
- raise TypeError(
- "invalid type of delta; expected (int or float) number, or "
- "Tuple[number, number]: %r" % firstDelta
- )
-
- def scaleDeltas(self, scalar):
- if scalar == 1.0:
- return # no change
- coordWidth = self.getCoordWidth()
- self.coordinates = [
- None
- if d is None
- else d * scalar
- if coordWidth == 1
- else (d[0] * scalar, d[1] * scalar)
- for d in self.coordinates
- ]
-
- def roundDeltas(self):
- coordWidth = self.getCoordWidth()
- self.coordinates = [
- None
- if d is None
- else otRound(d)
- if coordWidth == 1
- else (otRound(d[0]), otRound(d[1]))
- for d in self.coordinates
- ]
-
- def calcInferredDeltas(self, origCoords, endPts):
- from fontTools.varLib.iup import iup_delta
-
- if self.getCoordWidth() == 1:
- raise TypeError(
- "Only 'gvar' TupleVariation can have inferred deltas"
- )
- if None in self.coordinates:
- if len(self.coordinates) != len(origCoords):
- raise ValueError(
- "Expected len(origCoords) == %d; found %d"
- % (len(self.coordinates), len(origCoords))
- )
- self.coordinates = iup_delta(self.coordinates, origCoords, endPts)
-
- def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False):
- from fontTools.varLib.iup import iup_delta_optimize
-
- if None in self.coordinates:
- return # already optimized
-
- deltaOpt = iup_delta_optimize(
- self.coordinates, origCoords, endPts, tolerance=tolerance
- )
- if None in deltaOpt:
- if isComposite and all(d is None for d in deltaOpt):
- # Fix for macOS composites
- # https://github.com/fonttools/fonttools/issues/1381
- deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1)
- # Use "optimized" version only if smaller...
- varOpt = TupleVariation(self.axes, deltaOpt)
-
- # Shouldn't matter that this is different from fvar...?
- axisTags = sorted(self.axes.keys())
- tupleData, auxData = self.compile(axisTags)
- unoptimizedLength = len(tupleData) + len(auxData)
- tupleData, auxData = varOpt.compile(axisTags)
- optimizedLength = len(tupleData) + len(auxData)
-
- if optimizedLength < unoptimizedLength:
- self.coordinates = varOpt.coordinates
-
- def __iadd__(self, other):
- if not isinstance(other, TupleVariation):
- return NotImplemented
- deltas1 = self.coordinates
- length = len(deltas1)
- deltas2 = other.coordinates
- if len(deltas2) != length:
- raise ValueError(
- "cannot sum TupleVariation deltas with different lengths"
- )
- # 'None' values have different meanings in gvar vs cvar TupleVariations:
- # within the gvar, when deltas are not provided explicitly for some points,
- # they need to be inferred; whereas for the 'cvar' table, if deltas are not
- # provided for some CVT values, then no adjustments are made (i.e. None == 0).
- # Thus, we cannot sum deltas for gvar TupleVariations if they contain
- # inferred inferred deltas (the latter need to be computed first using
- # 'calcInferredDeltas' method), but we can treat 'None' values in cvar
- # deltas as if they are zeros.
- if self.getCoordWidth() == 2:
- for i, d2 in zip(range(length), deltas2):
- d1 = deltas1[i]
- try:
- deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1])
- except TypeError:
- raise ValueError(
- "cannot sum gvar deltas with inferred points"
- )
- else:
- for i, d2 in zip(range(length), deltas2):
- d1 = deltas1[i]
- if d1 is not None and d2 is not None:
- deltas1[i] = d1 + d2
- elif d1 is None and d2 is not None:
- deltas1[i] = d2
- # elif d2 is None do nothing
- return self
+ def __init__(self, axes, coordinates):
+ self.axes = axes.copy()
+ self.coordinates = list(coordinates)
+
+ def __repr__(self):
+ axes = ",".join(
+ sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])
+ )
+ return "<TupleVariation %s %s>" % (axes, self.coordinates)
+
+ def __eq__(self, other):
+ return self.coordinates == other.coordinates and self.axes == other.axes
+
+ def getUsedPoints(self):
+ # Empty set means "all points used".
+ if None not in self.coordinates:
+ return frozenset()
+ used = frozenset([i for i, p in enumerate(self.coordinates) if p is not None])
+ # Return None if no points used.
+ return used if used else None
+
+ def hasImpact(self):
+ """Returns True if this TupleVariation has any visible impact.
+
+ If the result is False, the TupleVariation can be omitted from the font
+ without making any visible difference.
+ """
+ return any(c is not None for c in self.coordinates)
+
+ def toXML(self, writer, axisTags):
+ writer.begintag("tuple")
+ writer.newline()
+ for axis in axisTags:
+ value = self.axes.get(axis)
+ if value is not None:
+ minValue, value, maxValue = value
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
+ if minValue == defaultMinValue and maxValue == defaultMaxValue:
+ writer.simpletag("coord", axis=axis, value=fl2str(value, 14))
+ else:
+ attrs = [
+ ("axis", axis),
+ ("min", fl2str(minValue, 14)),
+ ("value", fl2str(value, 14)),
+ ("max", fl2str(maxValue, 14)),
+ ]
+ writer.simpletag("coord", attrs)
+ writer.newline()
+ wrote_any_deltas = False
+ for i, delta in enumerate(self.coordinates):
+ if type(delta) == tuple and len(delta) == 2:
+ writer.simpletag("delta", pt=i, x=delta[0], y=delta[1])
+ writer.newline()
+ wrote_any_deltas = True
+ elif type(delta) == int:
+ writer.simpletag("delta", cvt=i, value=delta)
+ writer.newline()
+ wrote_any_deltas = True
+ elif delta is not None:
+ log.error("bad delta format")
+ writer.comment("bad delta #%d" % i)
+ writer.newline()
+ wrote_any_deltas = True
+ if not wrote_any_deltas:
+ writer.comment("no deltas")
+ writer.newline()
+ writer.endtag("tuple")
+ writer.newline()
+
+ def fromXML(self, name, attrs, _content):
+ if name == "coord":
+ axis = attrs["axis"]
+ value = str2fl(attrs["value"], 14)
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
+ minValue = str2fl(attrs.get("min", defaultMinValue), 14)
+ maxValue = str2fl(attrs.get("max", defaultMaxValue), 14)
+ self.axes[axis] = (minValue, value, maxValue)
+ elif name == "delta":
+ if "pt" in attrs:
+ point = safeEval(attrs["pt"])
+ x = safeEval(attrs["x"])
+ y = safeEval(attrs["y"])
+ self.coordinates[point] = (x, y)
+ elif "cvt" in attrs:
+ cvt = safeEval(attrs["cvt"])
+ value = safeEval(attrs["value"])
+ self.coordinates[cvt] = value
+ else:
+ log.warning("bad delta format: %s" % ", ".join(sorted(attrs.keys())))
+
+ def compile(self, axisTags, sharedCoordIndices={}, pointData=None):
+ assert set(self.axes.keys()) <= set(axisTags), (
+ "Unknown axis tag found.",
+ self.axes.keys(),
+ axisTags,
+ )
+
+ tupleData = []
+ auxData = []
+
+ if pointData is None:
+ usedPoints = self.getUsedPoints()
+ if usedPoints is None: # Nothing to encode
+ return b"", b""
+ pointData = self.compilePoints(usedPoints)
+
+ coord = self.compileCoord(axisTags)
+ flags = sharedCoordIndices.get(coord)
+ if flags is None:
+ flags = EMBEDDED_PEAK_TUPLE
+ tupleData.append(coord)
+
+ intermediateCoord = self.compileIntermediateCoord(axisTags)
+ if intermediateCoord is not None:
+ flags |= INTERMEDIATE_REGION
+ tupleData.append(intermediateCoord)
+
+ # pointData of b'' implies "use shared points".
+ if pointData:
+ flags |= PRIVATE_POINT_NUMBERS
+ auxData.append(pointData)
+
+ auxData.append(self.compileDeltas())
+ auxData = b"".join(auxData)
+
+ tupleData.insert(0, struct.pack(">HH", len(auxData), flags))
+ return b"".join(tupleData), auxData
+
+ def compileCoord(self, axisTags):
+ result = []
+ axes = self.axes
+ for axis in axisTags:
+ triple = axes.get(axis)
+ if triple is None:
+ result.append(b"\0\0")
+ else:
+ result.append(struct.pack(">h", fl2fi(triple[1], 14)))
+ return b"".join(result)
+
+ def compileIntermediateCoord(self, axisTags):
+ needed = False
+ for axis in axisTags:
+ minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
+ if (minValue != defaultMinValue) or (maxValue != defaultMaxValue):
+ needed = True
+ break
+ if not needed:
+ return None
+ minCoords = []
+ maxCoords = []
+ for axis in axisTags:
+ minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
+ minCoords.append(struct.pack(">h", fl2fi(minValue, 14)))
+ maxCoords.append(struct.pack(">h", fl2fi(maxValue, 14)))
+ return b"".join(minCoords + maxCoords)
+
+ @staticmethod
+ def decompileCoord_(axisTags, data, offset):
+ coord = {}
+ pos = offset
+ for axis in axisTags:
+ coord[axis] = fi2fl(struct.unpack(">h", data[pos : pos + 2])[0], 14)
+ pos += 2
+ return coord, pos
+
+ @staticmethod
+ def compilePoints(points):
+ # If the set consists of all points in the glyph, it gets encoded with
+ # a special encoding: a single zero byte.
+ #
+ # To use this optimization, points passed in must be empty set.
+ # The following two lines are not strictly necessary as the main code
+ # below would emit the same. But this is most common and faster.
+ if not points:
+ return b"\0"
+
+ # In the 'gvar' table, the packing of point numbers is a little surprising.
+ # It consists of multiple runs, each being a delta-encoded list of integers.
+ # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
+ # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
+ # There are two types of runs, with values being either 8 or 16 bit unsigned
+ # integers.
+ points = list(points)
+ points.sort()
+ numPoints = len(points)
+
+ result = bytearray()
+ # The binary representation starts with the total number of points in the set,
+ # encoded into one or two bytes depending on the value.
+ if numPoints < 0x80:
+ result.append(numPoints)
+ else:
+ result.append((numPoints >> 8) | 0x80)
+ result.append(numPoints & 0xFF)
+
+ MAX_RUN_LENGTH = 127
+ pos = 0
+ lastValue = 0
+ while pos < numPoints:
+ runLength = 0
+
+ headerPos = len(result)
+ result.append(0)
+
+ useByteEncoding = None
+ while pos < numPoints and runLength <= MAX_RUN_LENGTH:
+ curValue = points[pos]
+ delta = curValue - lastValue
+ if useByteEncoding is None:
+ useByteEncoding = 0 <= delta <= 0xFF
+ if useByteEncoding and (delta > 0xFF or delta < 0):
+ # we need to start a new run (which will not use byte encoding)
+ break
+ # TODO This never switches back to a byte-encoding from a short-encoding.
+ # That's suboptimal.
+ if useByteEncoding:
+ result.append(delta)
+ else:
+ result.append(delta >> 8)
+ result.append(delta & 0xFF)
+ lastValue = curValue
+ pos += 1
+ runLength += 1
+ if useByteEncoding:
+ result[headerPos] = runLength - 1
+ else:
+ result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS
+
+ return result
+
+ @staticmethod
+ def decompilePoints_(numPoints, data, offset, tableTag):
+ """(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)"""
+ assert tableTag in ("cvar", "gvar")
+ pos = offset
+ numPointsInData = data[pos]
+ pos += 1
+ if (numPointsInData & POINTS_ARE_WORDS) != 0:
+ numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos]
+ pos += 1
+ if numPointsInData == 0:
+ return (range(numPoints), pos)
+
+ result = []
+ while len(result) < numPointsInData:
+ runHeader = data[pos]
+ pos += 1
+ numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
+ point = 0
+ if (runHeader & POINTS_ARE_WORDS) != 0:
+ points = array.array("H")
+ pointsSize = numPointsInRun * 2
+ else:
+ points = array.array("B")
+ pointsSize = numPointsInRun
+ points.frombytes(data[pos : pos + pointsSize])
+ if sys.byteorder != "big":
+ points.byteswap()
+
+ assert len(points) == numPointsInRun
+ pos += pointsSize
+
+ result.extend(points)
+
+ # Convert relative to absolute
+ absolute = []
+ current = 0
+ for delta in result:
+ current += delta
+ absolute.append(current)
+ result = absolute
+ del absolute
+
+ badPoints = {str(p) for p in result if p < 0 or p >= numPoints}
+ if badPoints:
+ log.warning(
+ "point %s out of range in '%s' table"
+ % (",".join(sorted(badPoints)), tableTag)
+ )
+ return (result, pos)
+
+ def compileDeltas(self):
+ deltaX = []
+ deltaY = []
+ if self.getCoordWidth() == 2:
+ for c in self.coordinates:
+ if c is None:
+ continue
+ deltaX.append(c[0])
+ deltaY.append(c[1])
+ else:
+ for c in self.coordinates:
+ if c is None:
+ continue
+ deltaX.append(c)
+ bytearr = bytearray()
+ self.compileDeltaValues_(deltaX, bytearr)
+ self.compileDeltaValues_(deltaY, bytearr)
+ return bytearr
+
+ @staticmethod
+ def compileDeltaValues_(deltas, bytearr=None):
+ """[value1, value2, value3, ...] --> bytearray
+
+ Emits a sequence of runs. Each run starts with a
+ byte-sized header whose 6 least significant bits
+ (header & 0x3F) indicate how many values are encoded
+ in this run. The stored length is the actual length
+ minus one; run lengths are thus in the range [1..64].
+ If the header byte has its most significant bit (0x80)
+ set, all values in this run are zero, and no data
+ follows. Otherwise, the header byte is followed by
+ ((header & 0x3F) + 1) signed values. If (header &
+ 0x40) is clear, the delta values are stored as signed
+ bytes; if (header & 0x40) is set, the delta values are
+ signed 16-bit integers.
+ """ # Explaining the format because the 'gvar' spec is hard to understand.
+ if bytearr is None:
+ bytearr = bytearray()
+ pos = 0
+ numDeltas = len(deltas)
+ while pos < numDeltas:
+ value = deltas[pos]
+ if value == 0:
+ pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
+ elif -128 <= value <= 127:
+ pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr)
+ else:
+ pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr)
+ return bytearr
+
+ @staticmethod
+ def encodeDeltaRunAsZeroes_(deltas, offset, bytearr):
+ pos = offset
+ numDeltas = len(deltas)
+ while pos < numDeltas and deltas[pos] == 0:
+ pos += 1
+ runLength = pos - offset
+ while runLength >= 64:
+ bytearr.append(DELTAS_ARE_ZERO | 63)
+ runLength -= 64
+ if runLength:
+ bytearr.append(DELTAS_ARE_ZERO | (runLength - 1))
+ return pos
+
+ @staticmethod
+ def encodeDeltaRunAsBytes_(deltas, offset, bytearr):
+ pos = offset
+ numDeltas = len(deltas)
+ while pos < numDeltas:
+ value = deltas[pos]
+ if not (-128 <= value <= 127):
+ break
+ # Within a byte-encoded run of deltas, a single zero
+ # is best stored literally as 0x00 value. However,
+ # if are two or more zeroes in a sequence, it is
+ # better to start a new run. For example, the sequence
+ # of deltas [15, 15, 0, 15, 15] becomes 6 bytes
+ # (04 0F 0F 00 0F 0F) when storing the zero value
+ # literally, but 7 bytes (01 0F 0F 80 01 0F 0F)
+ # when starting a new run.
+ if value == 0 and pos + 1 < numDeltas and deltas[pos + 1] == 0:
+ break
+ pos += 1
+ runLength = pos - offset
+ while runLength >= 64:
+ bytearr.append(63)
+ bytearr.extend(array.array("b", deltas[offset : offset + 64]))
+ offset += 64
+ runLength -= 64
+ if runLength:
+ bytearr.append(runLength - 1)
+ bytearr.extend(array.array("b", deltas[offset:pos]))
+ return pos
+
+ @staticmethod
+ def encodeDeltaRunAsWords_(deltas, offset, bytearr):
+ pos = offset
+ numDeltas = len(deltas)
+ while pos < numDeltas:
+ value = deltas[pos]
+ # Within a word-encoded run of deltas, it is easiest
+ # to start a new run (with a different encoding)
+ # whenever we encounter a zero value. For example,
+ # the sequence [0x6666, 0, 0x7777] needs 7 bytes when
+ # storing the zero literally (42 66 66 00 00 77 77),
+ # and equally 7 bytes when starting a new run
+ # (40 66 66 80 40 77 77).
+ if value == 0:
+ break
+
+ # Within a word-encoded run of deltas, a single value
+ # in the range (-128..127) should be encoded literally
+ # because it is more compact. For example, the sequence
+ # [0x6666, 2, 0x7777] becomes 7 bytes when storing
+ # the value literally (42 66 66 00 02 77 77), but 8 bytes
+ # when starting a new run (40 66 66 00 02 40 77 77).
+ if (
+ (-128 <= value <= 127)
+ and pos + 1 < numDeltas
+ and (-128 <= deltas[pos + 1] <= 127)
+ ):
+ break
+ pos += 1
+ runLength = pos - offset
+ while runLength >= 64:
+ bytearr.append(DELTAS_ARE_WORDS | 63)
+ a = array.array("h", deltas[offset : offset + 64])
+ if sys.byteorder != "big":
+ a.byteswap()
+ bytearr.extend(a)
+ offset += 64
+ runLength -= 64
+ if runLength:
+ bytearr.append(DELTAS_ARE_WORDS | (runLength - 1))
+ a = array.array("h", deltas[offset:pos])
+ if sys.byteorder != "big":
+ a.byteswap()
+ bytearr.extend(a)
+ return pos
+
+ @staticmethod
+ def decompileDeltas_(numDeltas, data, offset):
+ """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
+ result = []
+ pos = offset
+ while len(result) < numDeltas:
+ runHeader = data[pos]
+ pos += 1
+ numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
+ if (runHeader & DELTAS_ARE_ZERO) != 0:
+ result.extend([0] * numDeltasInRun)
+ else:
+ if (runHeader & DELTAS_ARE_WORDS) != 0:
+ deltas = array.array("h")
+ deltasSize = numDeltasInRun * 2
+ else:
+ deltas = array.array("b")
+ deltasSize = numDeltasInRun
+ deltas.frombytes(data[pos : pos + deltasSize])
+ if sys.byteorder != "big":
+ deltas.byteswap()
+ assert len(deltas) == numDeltasInRun
+ pos += deltasSize
+ result.extend(deltas)
+ assert len(result) == numDeltas
+ return (result, pos)
+
+ @staticmethod
+ def getTupleSize_(flags, axisCount):
+ size = 4
+ if (flags & EMBEDDED_PEAK_TUPLE) != 0:
+ size += axisCount * 2
+ if (flags & INTERMEDIATE_REGION) != 0:
+ size += axisCount * 4
+ return size
+
+ def getCoordWidth(self):
+ """Return 2 if coordinates are (x, y) as in gvar, 1 if single values
+ as in cvar, or 0 if empty.
+ """
+ firstDelta = next((c for c in self.coordinates if c is not None), None)
+ if firstDelta is None:
+ return 0 # empty or has no impact
+ if type(firstDelta) in (int, float):
+ return 1
+ if type(firstDelta) is tuple and len(firstDelta) == 2:
+ return 2
+ raise TypeError(
+ "invalid type of delta; expected (int or float) number, or "
+ "Tuple[number, number]: %r" % firstDelta
+ )
+
+ def scaleDeltas(self, scalar):
+ if scalar == 1.0:
+ return # no change
+ coordWidth = self.getCoordWidth()
+ self.coordinates = [
+ None
+ if d is None
+ else d * scalar
+ if coordWidth == 1
+ else (d[0] * scalar, d[1] * scalar)
+ for d in self.coordinates
+ ]
+
+ def roundDeltas(self):
+ coordWidth = self.getCoordWidth()
+ self.coordinates = [
+ None
+ if d is None
+ else otRound(d)
+ if coordWidth == 1
+ else (otRound(d[0]), otRound(d[1]))
+ for d in self.coordinates
+ ]
+
+ def calcInferredDeltas(self, origCoords, endPts):
+ from fontTools.varLib.iup import iup_delta
+
+ if self.getCoordWidth() == 1:
+ raise TypeError("Only 'gvar' TupleVariation can have inferred deltas")
+ if None in self.coordinates:
+ if len(self.coordinates) != len(origCoords):
+ raise ValueError(
+ "Expected len(origCoords) == %d; found %d"
+ % (len(self.coordinates), len(origCoords))
+ )
+ self.coordinates = iup_delta(self.coordinates, origCoords, endPts)
+
+ def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False):
+ from fontTools.varLib.iup import iup_delta_optimize
+
+ if None in self.coordinates:
+ return # already optimized
+
+ deltaOpt = iup_delta_optimize(
+ self.coordinates, origCoords, endPts, tolerance=tolerance
+ )
+ if None in deltaOpt:
+ if isComposite and all(d is None for d in deltaOpt):
+ # Fix for macOS composites
+ # https://github.com/fonttools/fonttools/issues/1381
+ deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1)
+ # Use "optimized" version only if smaller...
+ varOpt = TupleVariation(self.axes, deltaOpt)
+
+ # Shouldn't matter that this is different from fvar...?
+ axisTags = sorted(self.axes.keys())
+ tupleData, auxData = self.compile(axisTags)
+ unoptimizedLength = len(tupleData) + len(auxData)
+ tupleData, auxData = varOpt.compile(axisTags)
+ optimizedLength = len(tupleData) + len(auxData)
+
+ if optimizedLength < unoptimizedLength:
+ self.coordinates = varOpt.coordinates
+
+ def __imul__(self, scalar):
+ self.scaleDeltas(scalar)
+ return self
+
+ def __iadd__(self, other):
+ if not isinstance(other, TupleVariation):
+ return NotImplemented
+ deltas1 = self.coordinates
+ length = len(deltas1)
+ deltas2 = other.coordinates
+ if len(deltas2) != length:
+ raise ValueError("cannot sum TupleVariation deltas with different lengths")
+ # 'None' values have different meanings in gvar vs cvar TupleVariations:
+ # within the gvar, when deltas are not provided explicitly for some points,
+ # they need to be inferred; whereas for the 'cvar' table, if deltas are not
+ # provided for some CVT values, then no adjustments are made (i.e. None == 0).
+ # Thus, we cannot sum deltas for gvar TupleVariations if they contain
+ # inferred inferred deltas (the latter need to be computed first using
+ # 'calcInferredDeltas' method), but we can treat 'None' values in cvar
+ # deltas as if they are zeros.
+ if self.getCoordWidth() == 2:
+ for i, d2 in zip(range(length), deltas2):
+ d1 = deltas1[i]
+ try:
+ deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1])
+ except TypeError:
+ raise ValueError("cannot sum gvar deltas with inferred points")
+ else:
+ for i, d2 in zip(range(length), deltas2):
+ d1 = deltas1[i]
+ if d1 is not None and d2 is not None:
+ deltas1[i] = d1 + d2
+ elif d1 is None and d2 is not None:
+ deltas1[i] = d2
+ # elif d2 is None do nothing
+ return self
def decompileSharedTuples(axisTags, sharedTupleCount, data, offset):
- result = []
- for _ in range(sharedTupleCount):
- t, offset = TupleVariation.decompileCoord_(axisTags, data, offset)
- result.append(t)
- return result
-
-
-def compileSharedTuples(axisTags, variations,
- MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1):
- coordCount = Counter()
- for var in variations:
- coord = var.compileCoord(axisTags)
- coordCount[coord] += 1
- # In python < 3.7, most_common() ordering is non-deterministic
- # so apply a sort to make sure the ordering is consistent.
- sharedCoords = sorted(
- coordCount.most_common(MAX_NUM_SHARED_COORDS),
- key=lambda item: (-item[1], item[0]),
- )
- return [c[0] for c in sharedCoords if c[1] > 1]
-
-
-def compileTupleVariationStore(variations, pointCount,
- axisTags, sharedTupleIndices,
- useSharedPoints=True):
- newVariations = []
- pointDatas = []
- # Compile all points and figure out sharing if desired
- sharedPoints = None
-
- # Collect, count, and compile point-sets for all variation sets
- pointSetCount = defaultdict(int)
- for v in variations:
- points = v.getUsedPoints()
- if points is None: # Empty variations
- continue
- pointSetCount[points] += 1
- newVariations.append(v)
- pointDatas.append(points)
- variations = newVariations
- del newVariations
-
- if not variations:
- return (0, b"", b"")
-
- n = len(variations[0].coordinates)
- assert all(len(v.coordinates) == n for v in variations), "Variation sets have different sizes"
-
- compiledPoints = {pointSet:TupleVariation.compilePoints(pointSet)
- for pointSet in pointSetCount}
-
- tupleVariationCount = len(variations)
- tuples = []
- data = []
-
- if useSharedPoints:
- # Find point-set which saves most bytes.
- def key(pn):
- pointSet = pn[0]
- count = pn[1]
- return len(compiledPoints[pointSet]) * (count - 1)
- sharedPoints = max(pointSetCount.items(), key=key)[0]
-
- data.append(compiledPoints[sharedPoints])
- tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS
-
- # b'' implies "use shared points"
- pointDatas = [compiledPoints[points] if points != sharedPoints else b''
- for points in pointDatas]
-
- for v,p in zip(variations, pointDatas):
- thisTuple, thisData = v.compile(axisTags, sharedTupleIndices, pointData=p)
-
- tuples.append(thisTuple)
- data.append(thisData)
-
- tuples = b''.join(tuples)
- data = b''.join(data)
- return tupleVariationCount, tuples, data
-
-
-def decompileTupleVariationStore(tableTag, axisTags,
- tupleVariationCount, pointCount, sharedTuples,
- data, pos, dataPos):
- numAxes = len(axisTags)
- result = []
- if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0:
- sharedPoints, dataPos = TupleVariation.decompilePoints_(
- pointCount, data, dataPos, tableTag)
- else:
- sharedPoints = []
- for _ in range(tupleVariationCount & TUPLE_COUNT_MASK):
- dataSize, flags = struct.unpack(">HH", data[pos:pos+4])
- tupleSize = TupleVariation.getTupleSize_(flags, numAxes)
- tupleData = data[pos : pos + tupleSize]
- pointDeltaData = data[dataPos : dataPos + dataSize]
- result.append(decompileTupleVariation_(
- pointCount, sharedTuples, sharedPoints,
- tableTag, axisTags, tupleData, pointDeltaData))
- pos += tupleSize
- dataPos += dataSize
- return result
-
-
-def decompileTupleVariation_(pointCount, sharedTuples, sharedPoints,
- tableTag, axisTags, data, tupleData):
- assert tableTag in ("cvar", "gvar"), tableTag
- flags = struct.unpack(">H", data[2:4])[0]
- pos = 4
- if (flags & EMBEDDED_PEAK_TUPLE) == 0:
- peak = sharedTuples[flags & TUPLE_INDEX_MASK]
- else:
- peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
- if (flags & INTERMEDIATE_REGION) != 0:
- start, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
- end, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
- else:
- start, end = inferRegion_(peak)
- axes = {}
- for axis in axisTags:
- region = start[axis], peak[axis], end[axis]
- if region != (0.0, 0.0, 0.0):
- axes[axis] = region
- pos = 0
- if (flags & PRIVATE_POINT_NUMBERS) != 0:
- points, pos = TupleVariation.decompilePoints_(
- pointCount, tupleData, pos, tableTag)
- else:
- points = sharedPoints
-
- deltas = [None] * pointCount
-
- if tableTag == "cvar":
- deltas_cvt, pos = TupleVariation.decompileDeltas_(
- len(points), tupleData, pos)
- for p, delta in zip(points, deltas_cvt):
- if 0 <= p < pointCount:
- deltas[p] = delta
-
- elif tableTag == "gvar":
- deltas_x, pos = TupleVariation.decompileDeltas_(
- len(points), tupleData, pos)
- deltas_y, pos = TupleVariation.decompileDeltas_(
- len(points), tupleData, pos)
- for p, x, y in zip(points, deltas_x, deltas_y):
- if 0 <= p < pointCount:
- deltas[p] = (x, y)
-
- return TupleVariation(axes, deltas)
+ result = []
+ for _ in range(sharedTupleCount):
+ t, offset = TupleVariation.decompileCoord_(axisTags, data, offset)
+ result.append(t)
+ return result
+
+
+def compileSharedTuples(
+ axisTags, variations, MAX_NUM_SHARED_COORDS=TUPLE_INDEX_MASK + 1
+):
+ coordCount = Counter()
+ for var in variations:
+ coord = var.compileCoord(axisTags)
+ coordCount[coord] += 1
+ # In python < 3.7, most_common() ordering is non-deterministic
+ # so apply a sort to make sure the ordering is consistent.
+ sharedCoords = sorted(
+ coordCount.most_common(MAX_NUM_SHARED_COORDS),
+ key=lambda item: (-item[1], item[0]),
+ )
+ return [c[0] for c in sharedCoords if c[1] > 1]
+
+
+def compileTupleVariationStore(
+ variations, pointCount, axisTags, sharedTupleIndices, useSharedPoints=True
+):
+ # pointCount is actually unused. Keeping for API compat.
+ del pointCount
+ newVariations = []
+ pointDatas = []
+ # Compile all points and figure out sharing if desired
+ sharedPoints = None
+
+ # Collect, count, and compile point-sets for all variation sets
+ pointSetCount = defaultdict(int)
+ for v in variations:
+ points = v.getUsedPoints()
+ if points is None: # Empty variations
+ continue
+ pointSetCount[points] += 1
+ newVariations.append(v)
+ pointDatas.append(points)
+ variations = newVariations
+ del newVariations
+
+ if not variations:
+ return (0, b"", b"")
+
+ n = len(variations[0].coordinates)
+ assert all(
+ len(v.coordinates) == n for v in variations
+ ), "Variation sets have different sizes"
+
+ compiledPoints = {
+ pointSet: TupleVariation.compilePoints(pointSet) for pointSet in pointSetCount
+ }
+
+ tupleVariationCount = len(variations)
+ tuples = []
+ data = []
+
+ if useSharedPoints:
+ # Find point-set which saves most bytes.
+ def key(pn):
+ pointSet = pn[0]
+ count = pn[1]
+ return len(compiledPoints[pointSet]) * (count - 1)
+
+ sharedPoints = max(pointSetCount.items(), key=key)[0]
+
+ data.append(compiledPoints[sharedPoints])
+ tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS
+
+ # b'' implies "use shared points"
+ pointDatas = [
+ compiledPoints[points] if points != sharedPoints else b""
+ for points in pointDatas
+ ]
+
+ for v, p in zip(variations, pointDatas):
+ thisTuple, thisData = v.compile(axisTags, sharedTupleIndices, pointData=p)
+
+ tuples.append(thisTuple)
+ data.append(thisData)
+
+ tuples = b"".join(tuples)
+ data = b"".join(data)
+ return tupleVariationCount, tuples, data
+
+
+def decompileTupleVariationStore(
+ tableTag,
+ axisTags,
+ tupleVariationCount,
+ pointCount,
+ sharedTuples,
+ data,
+ pos,
+ dataPos,
+):
+ numAxes = len(axisTags)
+ result = []
+ if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0:
+ sharedPoints, dataPos = TupleVariation.decompilePoints_(
+ pointCount, data, dataPos, tableTag
+ )
+ else:
+ sharedPoints = []
+ for _ in range(tupleVariationCount & TUPLE_COUNT_MASK):
+ dataSize, flags = struct.unpack(">HH", data[pos : pos + 4])
+ tupleSize = TupleVariation.getTupleSize_(flags, numAxes)
+ tupleData = data[pos : pos + tupleSize]
+ pointDeltaData = data[dataPos : dataPos + dataSize]
+ result.append(
+ decompileTupleVariation_(
+ pointCount,
+ sharedTuples,
+ sharedPoints,
+ tableTag,
+ axisTags,
+ tupleData,
+ pointDeltaData,
+ )
+ )
+ pos += tupleSize
+ dataPos += dataSize
+ return result
+
+
+def decompileTupleVariation_(
+ pointCount, sharedTuples, sharedPoints, tableTag, axisTags, data, tupleData
+):
+ assert tableTag in ("cvar", "gvar"), tableTag
+ flags = struct.unpack(">H", data[2:4])[0]
+ pos = 4
+ if (flags & EMBEDDED_PEAK_TUPLE) == 0:
+ peak = sharedTuples[flags & TUPLE_INDEX_MASK]
+ else:
+ peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
+ if (flags & INTERMEDIATE_REGION) != 0:
+ start, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
+ end, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
+ else:
+ start, end = inferRegion_(peak)
+ axes = {}
+ for axis in axisTags:
+ region = start[axis], peak[axis], end[axis]
+ if region != (0.0, 0.0, 0.0):
+ axes[axis] = region
+ pos = 0
+ if (flags & PRIVATE_POINT_NUMBERS) != 0:
+ points, pos = TupleVariation.decompilePoints_(
+ pointCount, tupleData, pos, tableTag
+ )
+ else:
+ points = sharedPoints
+
+ deltas = [None] * pointCount
+
+ if tableTag == "cvar":
+ deltas_cvt, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
+ for p, delta in zip(points, deltas_cvt):
+ if 0 <= p < pointCount:
+ deltas[p] = delta
+
+ elif tableTag == "gvar":
+ deltas_x, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
+ deltas_y, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
+ for p, x, y in zip(points, deltas_x, deltas_y):
+ if 0 <= p < pointCount:
+ deltas[p] = (x, y)
+
+ return TupleVariation(axes, deltas)
def inferRegion_(peak):
- """Infer start and end for a (non-intermediate) region
-
- This helper function computes the applicability region for
- variation tuples whose INTERMEDIATE_REGION flag is not set in the
- TupleVariationHeader structure. Variation tuples apply only to
- certain regions of the variation space; outside that region, the
- tuple has no effect. To make the binary encoding more compact,
- TupleVariationHeaders can omit the intermediateStartTuple and
- intermediateEndTuple fields.
+ """Infer start and end for a (non-intermediate) region
+
+ This helper function computes the applicability region for
+ variation tuples whose INTERMEDIATE_REGION flag is not set in the
+ TupleVariationHeader structure. Variation tuples apply only to
+ certain regions of the variation space; outside that region, the
+ tuple has no effect. To make the binary encoding more compact,
+ TupleVariationHeaders can omit the intermediateStartTuple and
+ intermediateEndTuple fields.
"""
- start, end = {}, {}
- for (axis, value) in peak.items():
- start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
- end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
- return (start, end)
+ start, end = {}, {}
+ for axis, value in peak.items():
+ start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
+ end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
+ return (start, end)
diff --git a/Lib/fontTools/ttLib/tables/V_D_M_X_.py b/Lib/fontTools/ttLib/tables/V_D_M_X_.py
index ba8593f1..0632173c 100644
--- a/Lib/fontTools/ttLib/tables/V_D_M_X_.py
+++ b/Lib/fontTools/ttLib/tables/V_D_M_X_.py
@@ -37,196 +37,205 @@ VDMX_vTableFmt = """
class table_V_D_M_X_(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ pos = 0 # track current position from to start of VDMX table
+ dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self)
+ pos += sstruct.calcsize(VDMX_HeaderFmt)
+ self.ratRanges = []
+ for i in range(self.numRatios):
+ ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data)
+ pos += sstruct.calcsize(VDMX_RatRangeFmt)
+ # the mapping between a ratio and a group is defined further below
+ ratio["groupIndex"] = None
+ self.ratRanges.append(ratio)
+ lenOffset = struct.calcsize(">H")
+ _offsets = [] # temporarily store offsets to groups
+ for i in range(self.numRatios):
+ offset = struct.unpack(">H", data[0:lenOffset])[0]
+ data = data[lenOffset:]
+ pos += lenOffset
+ _offsets.append(offset)
+ self.groups = []
+ for groupIndex in range(self.numRecs):
+ # the offset to this group from beginning of the VDMX table
+ currOffset = pos
+ group, data = sstruct.unpack2(VDMX_GroupFmt, data)
+ # the group lenght and bounding sizes are re-calculated on compile
+ recs = group.pop("recs")
+ startsz = group.pop("startsz")
+ endsz = group.pop("endsz")
+ pos += sstruct.calcsize(VDMX_GroupFmt)
+ for j in range(recs):
+ vTable, data = sstruct.unpack2(VDMX_vTableFmt, data)
+ vTableLength = sstruct.calcsize(VDMX_vTableFmt)
+ pos += vTableLength
+ # group is a dict of (yMax, yMin) tuples keyed by yPelHeight
+ group[vTable["yPelHeight"]] = (vTable["yMax"], vTable["yMin"])
+ # make sure startsz and endsz match the calculated values
+ minSize = min(group.keys())
+ maxSize = max(group.keys())
+ assert (
+ startsz == minSize
+ ), "startsz (%s) must equal min yPelHeight (%s): group %d" % (
+ group.startsz,
+ minSize,
+ groupIndex,
+ )
+ assert (
+ endsz == maxSize
+ ), "endsz (%s) must equal max yPelHeight (%s): group %d" % (
+ group.endsz,
+ maxSize,
+ groupIndex,
+ )
+ self.groups.append(group)
+ # match the defined offsets with the current group's offset
+ for offsetIndex, offsetValue in enumerate(_offsets):
+ # when numRecs < numRatios there can more than one ratio range
+ # sharing the same VDMX group
+ if currOffset == offsetValue:
+ # map the group with the ratio range thas has the same
+ # index as the offset to that group (it took me a while..)
+ self.ratRanges[offsetIndex]["groupIndex"] = groupIndex
+ # check that all ratio ranges have a group
+ for i in range(self.numRatios):
+ ratio = self.ratRanges[i]
+ if ratio["groupIndex"] is None:
+ from fontTools import ttLib
- def decompile(self, data, ttFont):
- pos = 0 # track current position from to start of VDMX table
- dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self)
- pos += sstruct.calcsize(VDMX_HeaderFmt)
- self.ratRanges = []
- for i in range(self.numRatios):
- ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data)
- pos += sstruct.calcsize(VDMX_RatRangeFmt)
- # the mapping between a ratio and a group is defined further below
- ratio['groupIndex'] = None
- self.ratRanges.append(ratio)
- lenOffset = struct.calcsize('>H')
- _offsets = [] # temporarily store offsets to groups
- for i in range(self.numRatios):
- offset = struct.unpack('>H', data[0:lenOffset])[0]
- data = data[lenOffset:]
- pos += lenOffset
- _offsets.append(offset)
- self.groups = []
- for groupIndex in range(self.numRecs):
- # the offset to this group from beginning of the VDMX table
- currOffset = pos
- group, data = sstruct.unpack2(VDMX_GroupFmt, data)
- # the group lenght and bounding sizes are re-calculated on compile
- recs = group.pop('recs')
- startsz = group.pop('startsz')
- endsz = group.pop('endsz')
- pos += sstruct.calcsize(VDMX_GroupFmt)
- for j in range(recs):
- vTable, data = sstruct.unpack2(VDMX_vTableFmt, data)
- vTableLength = sstruct.calcsize(VDMX_vTableFmt)
- pos += vTableLength
- # group is a dict of (yMax, yMin) tuples keyed by yPelHeight
- group[vTable['yPelHeight']] = (vTable['yMax'], vTable['yMin'])
- # make sure startsz and endsz match the calculated values
- minSize = min(group.keys())
- maxSize = max(group.keys())
- assert startsz == minSize, \
- "startsz (%s) must equal min yPelHeight (%s): group %d" % \
- (group.startsz, minSize, groupIndex)
- assert endsz == maxSize, \
- "endsz (%s) must equal max yPelHeight (%s): group %d" % \
- (group.endsz, maxSize, groupIndex)
- self.groups.append(group)
- # match the defined offsets with the current group's offset
- for offsetIndex, offsetValue in enumerate(_offsets):
- # when numRecs < numRatios there can more than one ratio range
- # sharing the same VDMX group
- if currOffset == offsetValue:
- # map the group with the ratio range thas has the same
- # index as the offset to that group (it took me a while..)
- self.ratRanges[offsetIndex]['groupIndex'] = groupIndex
- # check that all ratio ranges have a group
- for i in range(self.numRatios):
- ratio = self.ratRanges[i]
- if ratio['groupIndex'] is None:
- from fontTools import ttLib
- raise ttLib.TTLibError(
- "no group defined for ratRange %d" % i)
+ raise ttLib.TTLibError("no group defined for ratRange %d" % i)
- def _getOffsets(self):
- """
- Calculate offsets to VDMX_Group records.
- For each ratRange return a list of offset values from the beginning of
- the VDMX table to a VDMX_Group.
- """
- lenHeader = sstruct.calcsize(VDMX_HeaderFmt)
- lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt)
- lenOffset = struct.calcsize('>H')
- lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt)
- lenVTable = sstruct.calcsize(VDMX_vTableFmt)
- # offset to the first group
- pos = lenHeader + self.numRatios*lenRatRange + self.numRatios*lenOffset
- groupOffsets = []
- for group in self.groups:
- groupOffsets.append(pos)
- lenGroup = lenGroupHeader + len(group) * lenVTable
- pos += lenGroup # offset to next group
- offsets = []
- for ratio in self.ratRanges:
- groupIndex = ratio['groupIndex']
- offsets.append(groupOffsets[groupIndex])
- return offsets
+ def _getOffsets(self):
+ """
+ Calculate offsets to VDMX_Group records.
+ For each ratRange return a list of offset values from the beginning of
+ the VDMX table to a VDMX_Group.
+ """
+ lenHeader = sstruct.calcsize(VDMX_HeaderFmt)
+ lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt)
+ lenOffset = struct.calcsize(">H")
+ lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt)
+ lenVTable = sstruct.calcsize(VDMX_vTableFmt)
+ # offset to the first group
+ pos = lenHeader + self.numRatios * lenRatRange + self.numRatios * lenOffset
+ groupOffsets = []
+ for group in self.groups:
+ groupOffsets.append(pos)
+ lenGroup = lenGroupHeader + len(group) * lenVTable
+ pos += lenGroup # offset to next group
+ offsets = []
+ for ratio in self.ratRanges:
+ groupIndex = ratio["groupIndex"]
+ offsets.append(groupOffsets[groupIndex])
+ return offsets
- def compile(self, ttFont):
- if not(self.version == 0 or self.version == 1):
- from fontTools import ttLib
- raise ttLib.TTLibError(
- "unknown format for VDMX table: version %s" % self.version)
- data = sstruct.pack(VDMX_HeaderFmt, self)
- for ratio in self.ratRanges:
- data += sstruct.pack(VDMX_RatRangeFmt, ratio)
- # recalculate offsets to VDMX groups
- for offset in self._getOffsets():
- data += struct.pack('>H', offset)
- for group in self.groups:
- recs = len(group)
- startsz = min(group.keys())
- endsz = max(group.keys())
- gHeader = {'recs': recs, 'startsz': startsz, 'endsz': endsz}
- data += sstruct.pack(VDMX_GroupFmt, gHeader)
- for yPelHeight, (yMax, yMin) in sorted(group.items()):
- vTable = {'yPelHeight': yPelHeight, 'yMax': yMax, 'yMin': yMin}
- data += sstruct.pack(VDMX_vTableFmt, vTable)
- return data
+ def compile(self, ttFont):
+ if not (self.version == 0 or self.version == 1):
+ from fontTools import ttLib
- def toXML(self, writer, ttFont):
- writer.simpletag("version", value=self.version)
- writer.newline()
- writer.begintag("ratRanges")
- writer.newline()
- for ratio in self.ratRanges:
- groupIndex = ratio['groupIndex']
- writer.simpletag(
- "ratRange",
- bCharSet=ratio['bCharSet'],
- xRatio=ratio['xRatio'],
- yStartRatio=ratio['yStartRatio'],
- yEndRatio=ratio['yEndRatio'],
- groupIndex=groupIndex
- )
- writer.newline()
- writer.endtag("ratRanges")
- writer.newline()
- writer.begintag("groups")
- writer.newline()
- for groupIndex in range(self.numRecs):
- group = self.groups[groupIndex]
- recs = len(group)
- startsz = min(group.keys())
- endsz = max(group.keys())
- writer.begintag("group", index=groupIndex)
- writer.newline()
- writer.comment("recs=%d, startsz=%d, endsz=%d" %
- (recs, startsz, endsz))
- writer.newline()
- for yPelHeight, (yMax, yMin) in sorted(group.items()):
- writer.simpletag(
- "record",
- [('yPelHeight', yPelHeight), ('yMax', yMax), ('yMin', yMin)])
- writer.newline()
- writer.endtag("group")
- writer.newline()
- writer.endtag("groups")
- writer.newline()
+ raise ttLib.TTLibError(
+ "unknown format for VDMX table: version %s" % self.version
+ )
+ data = sstruct.pack(VDMX_HeaderFmt, self)
+ for ratio in self.ratRanges:
+ data += sstruct.pack(VDMX_RatRangeFmt, ratio)
+ # recalculate offsets to VDMX groups
+ for offset in self._getOffsets():
+ data += struct.pack(">H", offset)
+ for group in self.groups:
+ recs = len(group)
+ startsz = min(group.keys())
+ endsz = max(group.keys())
+ gHeader = {"recs": recs, "startsz": startsz, "endsz": endsz}
+ data += sstruct.pack(VDMX_GroupFmt, gHeader)
+ for yPelHeight, (yMax, yMin) in sorted(group.items()):
+ vTable = {"yPelHeight": yPelHeight, "yMax": yMax, "yMin": yMin}
+ data += sstruct.pack(VDMX_vTableFmt, vTable)
+ return data
- def fromXML(self, name, attrs, content, ttFont):
- if name == "version":
- self.version = safeEval(attrs["value"])
- elif name == "ratRanges":
- if not hasattr(self, "ratRanges"):
- self.ratRanges = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "ratRange":
- if not hasattr(self, "numRatios"):
- self.numRatios = 1
- else:
- self.numRatios += 1
- ratio = {
- "bCharSet": safeEval(attrs["bCharSet"]),
- "xRatio": safeEval(attrs["xRatio"]),
- "yStartRatio": safeEval(attrs["yStartRatio"]),
- "yEndRatio": safeEval(attrs["yEndRatio"]),
- "groupIndex": safeEval(attrs["groupIndex"])
- }
- self.ratRanges.append(ratio)
- elif name == "groups":
- if not hasattr(self, "groups"):
- self.groups = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "group":
- if not hasattr(self, "numRecs"):
- self.numRecs = 1
- else:
- self.numRecs += 1
- group = {}
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "record":
- yPelHeight = safeEval(attrs["yPelHeight"])
- yMax = safeEval(attrs["yMax"])
- yMin = safeEval(attrs["yMin"])
- group[yPelHeight] = (yMax, yMin)
- self.groups.append(group)
+ def toXML(self, writer, ttFont):
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ writer.begintag("ratRanges")
+ writer.newline()
+ for ratio in self.ratRanges:
+ groupIndex = ratio["groupIndex"]
+ writer.simpletag(
+ "ratRange",
+ bCharSet=ratio["bCharSet"],
+ xRatio=ratio["xRatio"],
+ yStartRatio=ratio["yStartRatio"],
+ yEndRatio=ratio["yEndRatio"],
+ groupIndex=groupIndex,
+ )
+ writer.newline()
+ writer.endtag("ratRanges")
+ writer.newline()
+ writer.begintag("groups")
+ writer.newline()
+ for groupIndex in range(self.numRecs):
+ group = self.groups[groupIndex]
+ recs = len(group)
+ startsz = min(group.keys())
+ endsz = max(group.keys())
+ writer.begintag("group", index=groupIndex)
+ writer.newline()
+ writer.comment("recs=%d, startsz=%d, endsz=%d" % (recs, startsz, endsz))
+ writer.newline()
+ for yPelHeight, (yMax, yMin) in sorted(group.items()):
+ writer.simpletag(
+ "record",
+ [("yPelHeight", yPelHeight), ("yMax", yMax), ("yMin", yMin)],
+ )
+ writer.newline()
+ writer.endtag("group")
+ writer.newline()
+ writer.endtag("groups")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version":
+ self.version = safeEval(attrs["value"])
+ elif name == "ratRanges":
+ if not hasattr(self, "ratRanges"):
+ self.ratRanges = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "ratRange":
+ if not hasattr(self, "numRatios"):
+ self.numRatios = 1
+ else:
+ self.numRatios += 1
+ ratio = {
+ "bCharSet": safeEval(attrs["bCharSet"]),
+ "xRatio": safeEval(attrs["xRatio"]),
+ "yStartRatio": safeEval(attrs["yStartRatio"]),
+ "yEndRatio": safeEval(attrs["yEndRatio"]),
+ "groupIndex": safeEval(attrs["groupIndex"]),
+ }
+ self.ratRanges.append(ratio)
+ elif name == "groups":
+ if not hasattr(self, "groups"):
+ self.groups = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "group":
+ if not hasattr(self, "numRecs"):
+ self.numRecs = 1
+ else:
+ self.numRecs += 1
+ group = {}
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "record":
+ yPelHeight = safeEval(attrs["yPelHeight"])
+ yMax = safeEval(attrs["yMax"])
+ yMin = safeEval(attrs["yMin"])
+ group[yPelHeight] = (yMax, yMin)
+ self.groups.append(group)
diff --git a/Lib/fontTools/ttLib/tables/V_O_R_G_.py b/Lib/fontTools/ttLib/tables/V_O_R_G_.py
index e03e164b..4508c137 100644
--- a/Lib/fontTools/ttLib/tables/V_O_R_G_.py
+++ b/Lib/fontTools/ttLib/tables/V_O_R_G_.py
@@ -5,135 +5,155 @@ import struct
class table_V_O_R_G_(DefaultTable.DefaultTable):
- """This table is structured so that you can treat it like a dictionary keyed by glyph name.
-
- ``ttFont['VORG'][<glyphName>]`` will return the vertical origin for any glyph.
-
- ``ttFont['VORG'][<glyphName>] = <value>`` will set the vertical origin for any glyph.
- """
-
- def decompile(self, data, ttFont):
- self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
- self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics = struct.unpack(">HHhH", data[:8])
- assert (self.majorVersion <= 1), "Major version of VORG table is higher than I know how to handle"
- data = data[8:]
- vids = []
- gids = []
- pos = 0
- for i in range(self.numVertOriginYMetrics):
- gid, vOrigin = struct.unpack(">Hh", data[pos:pos+4])
- pos += 4
- gids.append(gid)
- vids.append(vOrigin)
-
- self.VOriginRecords = vOrig = {}
- glyphOrder = ttFont.getGlyphOrder()
- try:
- names = [glyphOrder[gid] for gid in gids]
- except IndexError:
- getGlyphName = self.getGlyphName
- names = map(getGlyphName, gids)
-
- for name, vid in zip(names, vids):
- vOrig[name] = vid
-
- def compile(self, ttFont):
- vorgs = list(self.VOriginRecords.values())
- names = list(self.VOriginRecords.keys())
- nameMap = ttFont.getReverseGlyphMap()
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- nameMap = ttFont.getReverseGlyphMap(rebuild=True)
- gids = [nameMap[name] for name in names]
- vOriginTable = list(zip(gids, vorgs))
- self.numVertOriginYMetrics = len(vorgs)
- vOriginTable.sort() # must be in ascending GID order
- dataList = [struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable]
- header = struct.pack(">HHhH", self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics)
- dataList.insert(0, header)
- data = bytesjoin(dataList)
- return data
-
- def toXML(self, writer, ttFont):
- writer.simpletag("majorVersion", value=self.majorVersion)
- writer.newline()
- writer.simpletag("minorVersion", value=self.minorVersion)
- writer.newline()
- writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY)
- writer.newline()
- writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics)
- writer.newline()
- vOriginTable = []
- glyphNames = self.VOriginRecords.keys()
- for glyphName in glyphNames:
- try:
- gid = ttFont.getGlyphID(glyphName)
- except:
- assert 0, "VORG table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName)
- vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]])
- vOriginTable.sort()
- for entry in vOriginTable:
- vOriginRec = VOriginRecord(entry[1], entry[2])
- vOriginRec.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "VOriginRecords"):
- self.VOriginRecords = {}
- self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
- if name == "VOriginRecord":
- vOriginRec = VOriginRecord()
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- vOriginRec.fromXML(name, attrs, content, ttFont)
- self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin
- elif "value" in attrs:
- setattr(self, name, safeEval(attrs["value"]))
-
- def __getitem__(self, glyphSelector):
- if isinstance(glyphSelector, int):
- # its a gid, convert to glyph name
- glyphSelector = self.getGlyphName(glyphSelector)
-
- if glyphSelector not in self.VOriginRecords:
- return self.defaultVertOriginY
-
- return self.VOriginRecords[glyphSelector]
-
- def __setitem__(self, glyphSelector, value):
- if isinstance(glyphSelector, int):
- # its a gid, convert to glyph name
- glyphSelector = self.getGlyphName(glyphSelector)
-
- if value != self.defaultVertOriginY:
- self.VOriginRecords[glyphSelector] = value
- elif glyphSelector in self.VOriginRecords:
- del self.VOriginRecords[glyphSelector]
-
- def __delitem__(self, glyphSelector):
- del self.VOriginRecords[glyphSelector]
+ """This table is structured so that you can treat it like a dictionary keyed by glyph name.
+
+ ``ttFont['VORG'][<glyphName>]`` will return the vertical origin for any glyph.
+
+ ``ttFont['VORG'][<glyphName>] = <value>`` will set the vertical origin for any glyph.
+ """
+
+ def decompile(self, data, ttFont):
+ self.getGlyphName = (
+ ttFont.getGlyphName
+ ) # for use in get/set item functions, for access by GID
+ (
+ self.majorVersion,
+ self.minorVersion,
+ self.defaultVertOriginY,
+ self.numVertOriginYMetrics,
+ ) = struct.unpack(">HHhH", data[:8])
+ assert (
+ self.majorVersion <= 1
+ ), "Major version of VORG table is higher than I know how to handle"
+ data = data[8:]
+ vids = []
+ gids = []
+ pos = 0
+ for i in range(self.numVertOriginYMetrics):
+ gid, vOrigin = struct.unpack(">Hh", data[pos : pos + 4])
+ pos += 4
+ gids.append(gid)
+ vids.append(vOrigin)
+
+ self.VOriginRecords = vOrig = {}
+ glyphOrder = ttFont.getGlyphOrder()
+ try:
+ names = [glyphOrder[gid] for gid in gids]
+ except IndexError:
+ getGlyphName = self.getGlyphName
+ names = map(getGlyphName, gids)
+
+ for name, vid in zip(names, vids):
+ vOrig[name] = vid
+
+ def compile(self, ttFont):
+ vorgs = list(self.VOriginRecords.values())
+ names = list(self.VOriginRecords.keys())
+ nameMap = ttFont.getReverseGlyphMap()
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+ gids = [nameMap[name] for name in names]
+ vOriginTable = list(zip(gids, vorgs))
+ self.numVertOriginYMetrics = len(vorgs)
+ vOriginTable.sort() # must be in ascending GID order
+ dataList = [struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable]
+ header = struct.pack(
+ ">HHhH",
+ self.majorVersion,
+ self.minorVersion,
+ self.defaultVertOriginY,
+ self.numVertOriginYMetrics,
+ )
+ dataList.insert(0, header)
+ data = bytesjoin(dataList)
+ return data
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("majorVersion", value=self.majorVersion)
+ writer.newline()
+ writer.simpletag("minorVersion", value=self.minorVersion)
+ writer.newline()
+ writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY)
+ writer.newline()
+ writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics)
+ writer.newline()
+ vOriginTable = []
+ glyphNames = self.VOriginRecords.keys()
+ for glyphName in glyphNames:
+ try:
+ gid = ttFont.getGlyphID(glyphName)
+ except:
+ assert 0, (
+ "VORG table contains a glyph name not in ttFont.getGlyphNames(): "
+ + str(glyphName)
+ )
+ vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]])
+ vOriginTable.sort()
+ for entry in vOriginTable:
+ vOriginRec = VOriginRecord(entry[1], entry[2])
+ vOriginRec.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "VOriginRecords"):
+ self.VOriginRecords = {}
+ self.getGlyphName = (
+ ttFont.getGlyphName
+ ) # for use in get/set item functions, for access by GID
+ if name == "VOriginRecord":
+ vOriginRec = VOriginRecord()
+ for element in content:
+ if isinstance(element, str):
+ continue
+ name, attrs, content = element
+ vOriginRec.fromXML(name, attrs, content, ttFont)
+ self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin
+ elif "value" in attrs:
+ setattr(self, name, safeEval(attrs["value"]))
+
+ def __getitem__(self, glyphSelector):
+ if isinstance(glyphSelector, int):
+ # its a gid, convert to glyph name
+ glyphSelector = self.getGlyphName(glyphSelector)
+
+ if glyphSelector not in self.VOriginRecords:
+ return self.defaultVertOriginY
+
+ return self.VOriginRecords[glyphSelector]
+
+ def __setitem__(self, glyphSelector, value):
+ if isinstance(glyphSelector, int):
+ # its a gid, convert to glyph name
+ glyphSelector = self.getGlyphName(glyphSelector)
+
+ if value != self.defaultVertOriginY:
+ self.VOriginRecords[glyphSelector] = value
+ elif glyphSelector in self.VOriginRecords:
+ del self.VOriginRecords[glyphSelector]
+
+ def __delitem__(self, glyphSelector):
+ del self.VOriginRecords[glyphSelector]
-class VOriginRecord(object):
- def __init__(self, name=None, vOrigin=None):
- self.glyphName = name
- self.vOrigin = vOrigin
-
- def toXML(self, writer, ttFont):
- writer.begintag("VOriginRecord")
- writer.newline()
- writer.simpletag("glyphName", value=self.glyphName)
- writer.newline()
- writer.simpletag("vOrigin", value=self.vOrigin)
- writer.newline()
- writer.endtag("VOriginRecord")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- value = attrs["value"]
- if name == "glyphName":
- setattr(self, name, value)
- else:
- setattr(self, name, safeEval(value))
+class VOriginRecord(object):
+ def __init__(self, name=None, vOrigin=None):
+ self.glyphName = name
+ self.vOrigin = vOrigin
+
+ def toXML(self, writer, ttFont):
+ writer.begintag("VOriginRecord")
+ writer.newline()
+ writer.simpletag("glyphName", value=self.glyphName)
+ writer.newline()
+ writer.simpletag("vOrigin", value=self.vOrigin)
+ writer.newline()
+ writer.endtag("VOriginRecord")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ value = attrs["value"]
+ if name == "glyphName":
+ setattr(self, name, value)
+ else:
+ setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/V_V_A_R_.py b/Lib/fontTools/ttLib/tables/V_V_A_R_.py
index 88f30552..a3665fea 100644
--- a/Lib/fontTools/ttLib/tables/V_V_A_R_.py
+++ b/Lib/fontTools/ttLib/tables/V_V_A_R_.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_V_V_A_R_(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/__init__.py b/Lib/fontTools/ttLib/tables/__init__.py
index bbfb8b70..f4cba26b 100644
--- a/Lib/fontTools/ttLib/tables/__init__.py
+++ b/Lib/fontTools/ttLib/tables/__init__.py
@@ -1,95 +1,96 @@
-
# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.
def _moduleFinderHint():
- """Dummy function to let modulefinder know what tables may be
- dynamically imported. Generated by MetaTools/buildTableList.py.
+ """Dummy function to let modulefinder know what tables may be
+ dynamically imported. Generated by MetaTools/buildTableList.py.
+
+ >>> _moduleFinderHint()
+ """
+ from . import B_A_S_E_
+ from . import C_B_D_T_
+ from . import C_B_L_C_
+ from . import C_F_F_
+ from . import C_F_F__2
+ from . import C_O_L_R_
+ from . import C_P_A_L_
+ from . import D_S_I_G_
+ from . import D__e_b_g
+ from . import E_B_D_T_
+ from . import E_B_L_C_
+ from . import F_F_T_M_
+ from . import F__e_a_t
+ from . import G_D_E_F_
+ from . import G_M_A_P_
+ from . import G_P_K_G_
+ from . import G_P_O_S_
+ from . import G_S_U_B_
+ from . import G__l_a_t
+ from . import G__l_o_c
+ from . import H_V_A_R_
+ from . import J_S_T_F_
+ from . import L_T_S_H_
+ from . import M_A_T_H_
+ from . import M_E_T_A_
+ from . import M_V_A_R_
+ from . import O_S_2f_2
+ from . import S_I_N_G_
+ from . import S_T_A_T_
+ from . import S_V_G_
+ from . import S__i_l_f
+ from . import S__i_l_l
+ from . import T_S_I_B_
+ from . import T_S_I_C_
+ from . import T_S_I_D_
+ from . import T_S_I_J_
+ from . import T_S_I_P_
+ from . import T_S_I_S_
+ from . import T_S_I_V_
+ from . import T_S_I__0
+ from . import T_S_I__1
+ from . import T_S_I__2
+ from . import T_S_I__3
+ from . import T_S_I__5
+ from . import T_T_F_A_
+ from . import V_D_M_X_
+ from . import V_O_R_G_
+ from . import V_V_A_R_
+ from . import _a_n_k_r
+ from . import _a_v_a_r
+ from . import _b_s_l_n
+ from . import _c_i_d_g
+ from . import _c_m_a_p
+ from . import _c_v_a_r
+ from . import _c_v_t
+ from . import _f_e_a_t
+ from . import _f_p_g_m
+ from . import _f_v_a_r
+ from . import _g_a_s_p
+ from . import _g_c_i_d
+ from . import _g_l_y_f
+ from . import _g_v_a_r
+ from . import _h_d_m_x
+ from . import _h_e_a_d
+ from . import _h_h_e_a
+ from . import _h_m_t_x
+ from . import _k_e_r_n
+ from . import _l_c_a_r
+ from . import _l_o_c_a
+ from . import _l_t_a_g
+ from . import _m_a_x_p
+ from . import _m_e_t_a
+ from . import _m_o_r_t
+ from . import _m_o_r_x
+ from . import _n_a_m_e
+ from . import _o_p_b_d
+ from . import _p_o_s_t
+ from . import _p_r_e_p
+ from . import _p_r_o_p
+ from . import _s_b_i_x
+ from . import _t_r_a_k
+ from . import _v_h_e_a
+ from . import _v_m_t_x
- >>> _moduleFinderHint()
- """
- from . import B_A_S_E_
- from . import C_B_D_T_
- from . import C_B_L_C_
- from . import C_F_F_
- from . import C_F_F__2
- from . import C_O_L_R_
- from . import C_P_A_L_
- from . import D_S_I_G_
- from . import D__e_b_g
- from . import E_B_D_T_
- from . import E_B_L_C_
- from . import F_F_T_M_
- from . import F__e_a_t
- from . import G_D_E_F_
- from . import G_M_A_P_
- from . import G_P_K_G_
- from . import G_P_O_S_
- from . import G_S_U_B_
- from . import G__l_a_t
- from . import G__l_o_c
- from . import H_V_A_R_
- from . import J_S_T_F_
- from . import L_T_S_H_
- from . import M_A_T_H_
- from . import M_E_T_A_
- from . import M_V_A_R_
- from . import O_S_2f_2
- from . import S_I_N_G_
- from . import S_T_A_T_
- from . import S_V_G_
- from . import S__i_l_f
- from . import S__i_l_l
- from . import T_S_I_B_
- from . import T_S_I_C_
- from . import T_S_I_D_
- from . import T_S_I_J_
- from . import T_S_I_P_
- from . import T_S_I_S_
- from . import T_S_I_V_
- from . import T_S_I__0
- from . import T_S_I__1
- from . import T_S_I__2
- from . import T_S_I__3
- from . import T_S_I__5
- from . import T_T_F_A_
- from . import V_D_M_X_
- from . import V_O_R_G_
- from . import V_V_A_R_
- from . import _a_n_k_r
- from . import _a_v_a_r
- from . import _b_s_l_n
- from . import _c_i_d_g
- from . import _c_m_a_p
- from . import _c_v_a_r
- from . import _c_v_t
- from . import _f_e_a_t
- from . import _f_p_g_m
- from . import _f_v_a_r
- from . import _g_a_s_p
- from . import _g_c_i_d
- from . import _g_l_y_f
- from . import _g_v_a_r
- from . import _h_d_m_x
- from . import _h_e_a_d
- from . import _h_h_e_a
- from . import _h_m_t_x
- from . import _k_e_r_n
- from . import _l_c_a_r
- from . import _l_o_c_a
- from . import _l_t_a_g
- from . import _m_a_x_p
- from . import _m_e_t_a
- from . import _m_o_r_t
- from . import _m_o_r_x
- from . import _n_a_m_e
- from . import _o_p_b_d
- from . import _p_o_s_t
- from . import _p_r_e_p
- from . import _p_r_o_p
- from . import _s_b_i_x
- from . import _t_r_a_k
- from . import _v_h_e_a
- from . import _v_m_t_x
if __name__ == "__main__":
- import doctest, sys
- sys.exit(doctest.testmod().failed)
+ import doctest, sys
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/tables/_a_n_k_r.py b/Lib/fontTools/ttLib/tables/_a_n_k_r.py
index 16f5c184..d1062ecc 100644
--- a/Lib/fontTools/ttLib/tables/_a_n_k_r.py
+++ b/Lib/fontTools/ttLib/tables/_a_n_k_r.py
@@ -1,5 +1,6 @@
from .otBase import BaseTTXConverter
+
class table__a_n_k_r(BaseTTXConverter):
"""
The anchor point table provides a way to define anchor points.
@@ -9,4 +10,5 @@ class table__a_n_k_r(BaseTTXConverter):
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html
"""
+
pass
diff --git a/Lib/fontTools/ttLib/tables/_a_v_a_r.py b/Lib/fontTools/ttLib/tables/_a_v_a_r.py
index 16f2a219..39039cf7 100644
--- a/Lib/fontTools/ttLib/tables/_a_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_a_v_a_r.py
@@ -5,29 +5,20 @@ from fontTools.misc.fixedTools import (
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
)
-from fontTools.misc.textTools import bytesjoin
+from fontTools.misc.textTools import bytesjoin, safeEval
from fontTools.ttLib import TTLibError
from . import DefaultTable
+from . import otTables
import struct
import logging
log = logging.getLogger(__name__)
-# Apple's documentation of 'avar':
-# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6avar.html
+from .otBase import BaseTTXConverter
-AVAR_HEADER_FORMAT = """
- > # big endian
- majorVersion: H
- minorVersion: H
- reserved: H
- axisCount: H
-"""
-assert sstruct.calcsize(AVAR_HEADER_FORMAT) == 8, sstruct.calcsize(AVAR_HEADER_FORMAT)
-
-class table__a_v_a_r(DefaultTable.DefaultTable):
+class table__a_v_a_r(BaseTTXConverter):
"""Axis Variations Table
This class represents the ``avar`` table of a variable font. The object has one
@@ -54,46 +45,53 @@ class table__a_v_a_r(DefaultTable.DefaultTable):
dependencies = ["fvar"]
def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
+ super().__init__(tag)
self.segments = {}
def compile(self, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- header = {
- "majorVersion": 1,
- "minorVersion": 0,
- "reserved": 0,
- "axisCount": len(axisTags)
- }
- result = [sstruct.pack(AVAR_HEADER_FORMAT, header)]
+ if not hasattr(self, "table"):
+ self.table = otTables.avar()
+ if not hasattr(self.table, "Reserved"):
+ self.table.Reserved = 0
+ self.table.Version = (getattr(self, "majorVersion", 1) << 16) | getattr(
+ self, "minorVersion", 0
+ )
+ self.table.AxisCount = len(axisTags)
+ self.table.AxisSegmentMap = []
for axis in axisTags:
- mappings = sorted(self.segments[axis].items())
- result.append(struct.pack(">H", len(mappings)))
- for key, value in mappings:
- fixedKey = fl2fi(key, 14)
- fixedValue = fl2fi(value, 14)
- result.append(struct.pack(">hh", fixedKey, fixedValue))
- return bytesjoin(result)
+ mappings = self.segments[axis]
+ segmentMap = otTables.AxisSegmentMap()
+ segmentMap.PositionMapCount = len(mappings)
+ segmentMap.AxisValueMap = []
+ for key, value in sorted(mappings.items()):
+ valueMap = otTables.AxisValueMap()
+ valueMap.FromCoordinate = key
+ valueMap.ToCoordinate = value
+ segmentMap.AxisValueMap.append(valueMap)
+ self.table.AxisSegmentMap.append(segmentMap)
+ return super().compile(ttFont)
def decompile(self, data, ttFont):
+ super().decompile(data, ttFont)
+ assert self.table.Version >= 0x00010000
+ self.majorVersion = self.table.Version >> 16
+ self.minorVersion = self.table.Version & 0xFFFF
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- header = {}
- headerSize = sstruct.calcsize(AVAR_HEADER_FORMAT)
- header = sstruct.unpack(AVAR_HEADER_FORMAT, data[0:headerSize])
- majorVersion = header["majorVersion"]
- if majorVersion != 1:
- raise TTLibError("unsupported 'avar' version %d" % majorVersion)
- pos = headerSize
for axis in axisTags:
+ self.segments[axis] = {}
+ for axis, segmentMap in zip(axisTags, self.table.AxisSegmentMap):
segments = self.segments[axis] = {}
- numPairs = struct.unpack(">H", data[pos:pos+2])[0]
- pos = pos + 2
- for _ in range(numPairs):
- fromValue, toValue = struct.unpack(">hh", data[pos:pos+4])
- segments[fi2fl(fromValue, 14)] = fi2fl(toValue, 14)
- pos = pos + 4
+ for segment in segmentMap.AxisValueMap:
+ segments[segment.FromCoordinate] = segment.ToCoordinate
def toXML(self, writer, ttFont):
+ writer.simpletag(
+ "version",
+ major=getattr(self, "majorVersion", 1),
+ minor=getattr(self, "minorVersion", 0),
+ )
+ writer.newline()
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
for axis in axisTags:
writer.begintag("segment", axis=axis)
@@ -105,9 +103,24 @@ class table__a_v_a_r(DefaultTable.DefaultTable):
writer.newline()
writer.endtag("segment")
writer.newline()
+ if getattr(self, "majorVersion", 1) >= 2:
+ if self.table.VarIdxMap:
+ self.table.VarIdxMap.toXML(writer, ttFont, name="VarIdxMap")
+ if self.table.VarStore:
+ self.table.VarStore.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
- if name == "segment":
+ if not hasattr(self, "table"):
+ self.table = otTables.avar()
+ if not hasattr(self.table, "Reserved"):
+ self.table.Reserved = 0
+ if name == "version":
+ self.majorVersion = safeEval(attrs["major"])
+ self.minorVersion = safeEval(attrs["minor"])
+ self.table.Version = (getattr(self, "majorVersion", 1) << 16) | getattr(
+ self, "minorVersion", 0
+ )
+ elif name == "segment":
axis = attrs["axis"]
segment = self.segments[axis] = {}
for element in content:
@@ -117,6 +130,9 @@ class table__a_v_a_r(DefaultTable.DefaultTable):
fromValue = str2fl(elementAttrs["from"], 14)
toValue = str2fl(elementAttrs["to"], 14)
if fromValue in segment:
- log.warning("duplicate entry for %s in axis '%s'",
- fromValue, axis)
+ log.warning(
+ "duplicate entry for %s in axis '%s'", fromValue, axis
+ )
segment[fromValue] = toValue
+ else:
+ super().fromXML(name, attrs, content, ttFont)
diff --git a/Lib/fontTools/ttLib/tables/_c_i_d_g.py b/Lib/fontTools/ttLib/tables/_c_i_d_g.py
index 2517e785..f11901ba 100644
--- a/Lib/fontTools/ttLib/tables/_c_i_d_g.py
+++ b/Lib/fontTools/ttLib/tables/_c_i_d_g.py
@@ -4,16 +4,16 @@ from .otBase import BaseTTXConverter
class table__c_i_d_g(BaseTTXConverter):
"""The AAT ``cidg`` table has almost the same structure as ``gidc``,
-just mapping CIDs to GlyphIDs instead of the reverse direction.
+ just mapping CIDs to GlyphIDs instead of the reverse direction.
-It is useful for fonts that may be used by a PDF renderer in lieu of
-a font reference with a known glyph collection but no subsetted
-glyphs. For instance, a PDF can say “please use a font conforming
-to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is,
-say, a TrueType font. ``gidc`` is lossy for this purpose and is
-obsoleted by ``cidg``.
+ It is useful for fonts that may be used by a PDF renderer in lieu of
+ a font reference with a known glyph collection but no subsetted
+ glyphs. For instance, a PDF can say “please use a font conforming
+ to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is,
+ say, a TrueType font. ``gidc`` is lossy for this purpose and is
+ obsoleted by ``cidg``.
+
+ For example, the first font in ``/System/Library/Fonts/PingFang.ttc``
+ (which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table."""
-For example, the first font in ``/System/Library/Fonts/PingFang.ttc``
-(which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table.
-"""
pass
diff --git a/Lib/fontTools/ttLib/tables/_c_m_a_p.py b/Lib/fontTools/ttLib/tables/_c_m_a_p.py
index ef2b5758..484c331c 100644
--- a/Lib/fontTools/ttLib/tables/_c_m_a_p.py
+++ b/Lib/fontTools/ttLib/tables/_c_m_a_p.py
@@ -13,1379 +13,1564 @@ log = logging.getLogger(__name__)
def _make_map(font, chars, gids):
- assert len(chars) == len(gids)
- glyphNames = font.getGlyphNameMany(gids)
- cmap = {}
- for char,gid,name in zip(chars,gids,glyphNames):
- if gid == 0:
- continue
- cmap[char] = name
- return cmap
+ assert len(chars) == len(gids)
+ glyphNames = font.getGlyphNameMany(gids)
+ cmap = {}
+ for char, gid, name in zip(chars, gids, glyphNames):
+ if gid == 0:
+ continue
+ cmap[char] = name
+ return cmap
+
class table__c_m_a_p(DefaultTable.DefaultTable):
- """Character to Glyph Index Mapping Table
-
- This class represents the `cmap <https://docs.microsoft.com/en-us/typography/opentype/spec/cmap>`_
- table, which maps between input characters (in Unicode or other system encodings)
- and glyphs within the font. The ``cmap`` table contains one or more subtables
- which determine the mapping of of characters to glyphs across different platforms
- and encoding systems.
-
- ``table__c_m_a_p`` objects expose an accessor ``.tables`` which provides access
- to the subtables, although it is normally easier to retrieve individual subtables
- through the utility methods described below. To add new subtables to a font,
- first determine the subtable format (if in doubt use format 4 for glyphs within
- the BMP, format 12 for glyphs outside the BMP, and format 14 for Unicode Variation
- Sequences) construct subtable objects with ``CmapSubtable.newSubtable(format)``,
- and append them to the ``.tables`` list.
-
- Within a subtable, the mapping of characters to glyphs is provided by the ``.cmap``
- attribute.
-
- Example::
-
- cmap4_0_3 = CmapSubtable.newSubtable(4)
- cmap4_0_3.platformID = 0
- cmap4_0_3.platEncID = 3
- cmap4_0_3.language = 0
- cmap4_0_3.cmap = { 0xC1: "Aacute" }
-
- cmap = newTable("cmap")
- cmap.tableVersion = 0
- cmap.tables = [cmap4_0_3]
- """
-
- def getcmap(self, platformID, platEncID):
- """Returns the first subtable which matches the given platform and encoding.
-
- Args:
- platformID (int): The platform ID. Use 0 for Unicode, 1 for Macintosh
- (deprecated for new fonts), 2 for ISO (deprecated) and 3 for Windows.
- encodingID (int): Encoding ID. Interpretation depends on the platform ID.
- See the OpenType specification for details.
-
- Returns:
- An object which is a subclass of :py:class:`CmapSubtable` if a matching
- subtable is found within the font, or ``None`` otherwise.
- """
-
- for subtable in self.tables:
- if (subtable.platformID == platformID and
- subtable.platEncID == platEncID):
- return subtable
- return None # not found
-
- def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))):
- """Returns the 'best' Unicode cmap dictionary available in the font
- or ``None``, if no Unicode cmap subtable is available.
-
- By default it will search for the following (platformID, platEncID)
- pairs in order::
-
- (3, 10), # Windows Unicode full repertoire
- (0, 6), # Unicode full repertoire (format 13 subtable)
- (0, 4), # Unicode 2.0 full repertoire
- (3, 1), # Windows Unicode BMP
- (0, 3), # Unicode 2.0 BMP
- (0, 2), # Unicode ISO/IEC 10646
- (0, 1), # Unicode 1.1
- (0, 0) # Unicode 1.0
-
- This particular order matches what HarfBuzz uses to choose what
- subtable to use by default. This order prefers the largest-repertoire
- subtable, and among those, prefers the Windows-platform over the
- Unicode-platform as the former has wider support.
-
- This order can be customized via the ``cmapPreferences`` argument.
- """
- for platformID, platEncID in cmapPreferences:
- cmapSubtable = self.getcmap(platformID, platEncID)
- if cmapSubtable is not None:
- return cmapSubtable.cmap
- return None # None of the requested cmap subtables were found
-
- def buildReversed(self):
- """Builds a reverse mapping dictionary
-
- Iterates over all Unicode cmap tables and returns a dictionary mapping
- glyphs to sets of codepoints, such as::
-
- {
- 'one': {0x31}
- 'A': {0x41,0x391}
- }
-
- The values are sets of Unicode codepoints because
- some fonts map different codepoints to the same glyph.
- For example, ``U+0041 LATIN CAPITAL LETTER A`` and ``U+0391
- GREEK CAPITAL LETTER ALPHA`` are sometimes the same glyph.
- """
- result = {}
- for subtable in self.tables:
- if subtable.isUnicode():
- for codepoint, name in subtable.cmap.items():
- result.setdefault(name, set()).add(codepoint)
- return result
-
- def decompile(self, data, ttFont):
- tableVersion, numSubTables = struct.unpack(">HH", data[:4])
- self.tableVersion = int(tableVersion)
- self.tables = tables = []
- seenOffsets = {}
- for i in range(numSubTables):
- platformID, platEncID, offset = struct.unpack(
- ">HHl", data[4+i*8:4+(i+1)*8])
- platformID, platEncID = int(platformID), int(platEncID)
- format, length = struct.unpack(">HH", data[offset:offset+4])
- if format in [8,10,12,13]:
- format, reserved, length = struct.unpack(">HHL", data[offset:offset+8])
- elif format in [14]:
- format, length = struct.unpack(">HL", data[offset:offset+6])
-
- if not length:
- log.error(
- "cmap subtable is reported as having zero length: platformID %s, "
- "platEncID %s, format %s offset %s. Skipping table.",
- platformID, platEncID, format, offset)
- continue
- table = CmapSubtable.newSubtable(format)
- table.platformID = platformID
- table.platEncID = platEncID
- # Note that by default we decompile only the subtable header info;
- # any other data gets decompiled only when an attribute of the
- # subtable is referenced.
- table.decompileHeader(data[offset:offset+int(length)], ttFont)
- if offset in seenOffsets:
- table.data = None # Mark as decompiled
- table.cmap = tables[seenOffsets[offset]].cmap
- else:
- seenOffsets[offset] = i
- tables.append(table)
- if ttFont.lazy is False: # Be lazy for None and True
- self.ensureDecompiled()
-
- def ensureDecompiled(self, recurse=False):
- # The recurse argument is unused, but part of the signature of
- # ensureDecompiled across the library.
- for st in self.tables:
- st.ensureDecompiled()
-
- def compile(self, ttFont):
- self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__()
- numSubTables = len(self.tables)
- totalOffset = 4 + 8 * numSubTables
- data = struct.pack(">HH", self.tableVersion, numSubTables)
- tableData = b""
- seen = {} # Some tables are the same object reference. Don't compile them twice.
- done = {} # Some tables are different objects, but compile to the same data chunk
- for table in self.tables:
- offset = seen.get(id(table.cmap))
- if offset is None:
- chunk = table.compile(ttFont)
- offset = done.get(chunk)
- if offset is None:
- offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData)
- tableData = tableData + chunk
- data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
- return data + tableData
-
- def toXML(self, writer, ttFont):
- writer.simpletag("tableVersion", version=self.tableVersion)
- writer.newline()
- for table in self.tables:
- table.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "tableVersion":
- self.tableVersion = safeEval(attrs["version"])
- return
- if name[:12] != "cmap_format_":
- return
- if not hasattr(self, "tables"):
- self.tables = []
- format = safeEval(name[12:])
- table = CmapSubtable.newSubtable(format)
- table.platformID = safeEval(attrs["platformID"])
- table.platEncID = safeEval(attrs["platEncID"])
- table.fromXML(name, attrs, content, ttFont)
- self.tables.append(table)
+ """Character to Glyph Index Mapping Table
+
+ This class represents the `cmap <https://docs.microsoft.com/en-us/typography/opentype/spec/cmap>`_
+ table, which maps between input characters (in Unicode or other system encodings)
+ and glyphs within the font. The ``cmap`` table contains one or more subtables
+ which determine the mapping of of characters to glyphs across different platforms
+ and encoding systems.
+
+ ``table__c_m_a_p`` objects expose an accessor ``.tables`` which provides access
+ to the subtables, although it is normally easier to retrieve individual subtables
+ through the utility methods described below. To add new subtables to a font,
+ first determine the subtable format (if in doubt use format 4 for glyphs within
+ the BMP, format 12 for glyphs outside the BMP, and format 14 for Unicode Variation
+ Sequences) construct subtable objects with ``CmapSubtable.newSubtable(format)``,
+ and append them to the ``.tables`` list.
+
+ Within a subtable, the mapping of characters to glyphs is provided by the ``.cmap``
+ attribute.
+
+ Example::
+
+ cmap4_0_3 = CmapSubtable.newSubtable(4)
+ cmap4_0_3.platformID = 0
+ cmap4_0_3.platEncID = 3
+ cmap4_0_3.language = 0
+ cmap4_0_3.cmap = { 0xC1: "Aacute" }
+
+ cmap = newTable("cmap")
+ cmap.tableVersion = 0
+ cmap.tables = [cmap4_0_3]
+ """
+
+ def getcmap(self, platformID, platEncID):
+ """Returns the first subtable which matches the given platform and encoding.
+
+ Args:
+ platformID (int): The platform ID. Use 0 for Unicode, 1 for Macintosh
+ (deprecated for new fonts), 2 for ISO (deprecated) and 3 for Windows.
+ encodingID (int): Encoding ID. Interpretation depends on the platform ID.
+ See the OpenType specification for details.
+
+ Returns:
+ An object which is a subclass of :py:class:`CmapSubtable` if a matching
+ subtable is found within the font, or ``None`` otherwise.
+ """
+
+ for subtable in self.tables:
+ if subtable.platformID == platformID and subtable.platEncID == platEncID:
+ return subtable
+ return None # not found
+
+ def getBestCmap(
+ self,
+ cmapPreferences=(
+ (3, 10),
+ (0, 6),
+ (0, 4),
+ (3, 1),
+ (0, 3),
+ (0, 2),
+ (0, 1),
+ (0, 0),
+ ),
+ ):
+ """Returns the 'best' Unicode cmap dictionary available in the font
+ or ``None``, if no Unicode cmap subtable is available.
+
+ By default it will search for the following (platformID, platEncID)
+ pairs in order::
+
+ (3, 10), # Windows Unicode full repertoire
+ (0, 6), # Unicode full repertoire (format 13 subtable)
+ (0, 4), # Unicode 2.0 full repertoire
+ (3, 1), # Windows Unicode BMP
+ (0, 3), # Unicode 2.0 BMP
+ (0, 2), # Unicode ISO/IEC 10646
+ (0, 1), # Unicode 1.1
+ (0, 0) # Unicode 1.0
+
+ This particular order matches what HarfBuzz uses to choose what
+ subtable to use by default. This order prefers the largest-repertoire
+ subtable, and among those, prefers the Windows-platform over the
+ Unicode-platform as the former has wider support.
+
+ This order can be customized via the ``cmapPreferences`` argument.
+ """
+ for platformID, platEncID in cmapPreferences:
+ cmapSubtable = self.getcmap(platformID, platEncID)
+ if cmapSubtable is not None:
+ return cmapSubtable.cmap
+ return None # None of the requested cmap subtables were found
+
+ def buildReversed(self):
+ """Builds a reverse mapping dictionary
+
+ Iterates over all Unicode cmap tables and returns a dictionary mapping
+ glyphs to sets of codepoints, such as::
+
+ {
+ 'one': {0x31}
+ 'A': {0x41,0x391}
+ }
+
+ The values are sets of Unicode codepoints because
+ some fonts map different codepoints to the same glyph.
+ For example, ``U+0041 LATIN CAPITAL LETTER A`` and ``U+0391
+ GREEK CAPITAL LETTER ALPHA`` are sometimes the same glyph.
+ """
+ result = {}
+ for subtable in self.tables:
+ if subtable.isUnicode():
+ for codepoint, name in subtable.cmap.items():
+ result.setdefault(name, set()).add(codepoint)
+ return result
+
+ def decompile(self, data, ttFont):
+ tableVersion, numSubTables = struct.unpack(">HH", data[:4])
+ self.tableVersion = int(tableVersion)
+ self.tables = tables = []
+ seenOffsets = {}
+ for i in range(numSubTables):
+ platformID, platEncID, offset = struct.unpack(
+ ">HHl", data[4 + i * 8 : 4 + (i + 1) * 8]
+ )
+ platformID, platEncID = int(platformID), int(platEncID)
+ format, length = struct.unpack(">HH", data[offset : offset + 4])
+ if format in [8, 10, 12, 13]:
+ format, reserved, length = struct.unpack(
+ ">HHL", data[offset : offset + 8]
+ )
+ elif format in [14]:
+ format, length = struct.unpack(">HL", data[offset : offset + 6])
+
+ if not length:
+ log.error(
+ "cmap subtable is reported as having zero length: platformID %s, "
+ "platEncID %s, format %s offset %s. Skipping table.",
+ platformID,
+ platEncID,
+ format,
+ offset,
+ )
+ continue
+ table = CmapSubtable.newSubtable(format)
+ table.platformID = platformID
+ table.platEncID = platEncID
+ # Note that by default we decompile only the subtable header info;
+ # any other data gets decompiled only when an attribute of the
+ # subtable is referenced.
+ table.decompileHeader(data[offset : offset + int(length)], ttFont)
+ if offset in seenOffsets:
+ table.data = None # Mark as decompiled
+ table.cmap = tables[seenOffsets[offset]].cmap
+ else:
+ seenOffsets[offset] = i
+ tables.append(table)
+ if ttFont.lazy is False: # Be lazy for None and True
+ self.ensureDecompiled()
+
+ def ensureDecompiled(self, recurse=False):
+ # The recurse argument is unused, but part of the signature of
+ # ensureDecompiled across the library.
+ for st in self.tables:
+ st.ensureDecompiled()
+
+ def compile(self, ttFont):
+ self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__()
+ numSubTables = len(self.tables)
+ totalOffset = 4 + 8 * numSubTables
+ data = struct.pack(">HH", self.tableVersion, numSubTables)
+ tableData = b""
+ seen = (
+ {}
+ ) # Some tables are the same object reference. Don't compile them twice.
+ done = (
+ {}
+ ) # Some tables are different objects, but compile to the same data chunk
+ for table in self.tables:
+ offset = seen.get(id(table.cmap))
+ if offset is None:
+ chunk = table.compile(ttFont)
+ offset = done.get(chunk)
+ if offset is None:
+ offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(
+ tableData
+ )
+ tableData = tableData + chunk
+ data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
+ return data + tableData
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("tableVersion", version=self.tableVersion)
+ writer.newline()
+ for table in self.tables:
+ table.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "tableVersion":
+ self.tableVersion = safeEval(attrs["version"])
+ return
+ if name[:12] != "cmap_format_":
+ return
+ if not hasattr(self, "tables"):
+ self.tables = []
+ format = safeEval(name[12:])
+ table = CmapSubtable.newSubtable(format)
+ table.platformID = safeEval(attrs["platformID"])
+ table.platEncID = safeEval(attrs["platEncID"])
+ table.fromXML(name, attrs, content, ttFont)
+ self.tables.append(table)
class CmapSubtable(object):
- """Base class for all cmap subtable formats.
-
- Subclasses which handle the individual subtable formats are named
- ``cmap_format_0``, ``cmap_format_2`` etc. Use :py:meth:`getSubtableClass`
- to retrieve the concrete subclass, or :py:meth:`newSubtable` to get a
- new subtable object for a given format.
-
- The object exposes a ``.cmap`` attribute, which contains a dictionary mapping
- character codepoints to glyph names.
- """
-
- @staticmethod
- def getSubtableClass(format):
- """Return the subtable class for a format."""
- return cmap_classes.get(format, cmap_format_unknown)
-
- @staticmethod
- def newSubtable(format):
- """Return a new instance of a subtable for the given format
- ."""
- subtableClass = CmapSubtable.getSubtableClass(format)
- return subtableClass(format)
-
- def __init__(self, format):
- self.format = format
- self.data = None
- self.ttFont = None
- self.platformID = None #: The platform ID of this subtable
- self.platEncID = None #: The encoding ID of this subtable (interpretation depends on ``platformID``)
- self.language = None #: The language ID of this subtable (Macintosh platform only)
-
- def ensureDecompiled(self, recurse=False):
- # The recurse argument is unused, but part of the signature of
- # ensureDecompiled across the library.
- if self.data is None:
- return
- self.decompile(None, None) # use saved data.
- self.data = None # Once this table has been decompiled, make sure we don't
- # just return the original data. Also avoids recursion when
- # called with an attribute that the cmap subtable doesn't have.
-
- def __getattr__(self, attr):
- # allow lazy decompilation of subtables.
- if attr[:2] == '__': # don't handle requests for member functions like '__lt__'
- raise AttributeError(attr)
- if self.data is None:
- raise AttributeError(attr)
- self.ensureDecompiled()
- return getattr(self, attr)
-
- def decompileHeader(self, data, ttFont):
- format, length, language = struct.unpack(">HHH", data[:6])
- assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length)
- self.format = int(format)
- self.length = int(length)
- self.language = int(language)
- self.data = data[6:]
- self.ttFont = ttFont
-
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__, [
- ("platformID", self.platformID),
- ("platEncID", self.platEncID),
- ("language", self.language),
- ])
- writer.newline()
- codes = sorted(self.cmap.items())
- self._writeCodes(codes, writer)
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def getEncoding(self, default=None):
- """Returns the Python encoding name for this cmap subtable based on its platformID,
- platEncID, and language. If encoding for these values is not known, by default
- ``None`` is returned. That can be overridden by passing a value to the ``default``
- argument.
-
- Note that if you want to choose a "preferred" cmap subtable, most of the time
- ``self.isUnicode()`` is what you want as that one only returns true for the modern,
- commonly used, Unicode-compatible triplets, not the legacy ones.
- """
- return getEncoding(self.platformID, self.platEncID, self.language, default)
-
- def isUnicode(self):
- """Returns true if the characters are interpreted as Unicode codepoints."""
- return (self.platformID == 0 or
- (self.platformID == 3 and self.platEncID in [0, 1, 10]))
-
- def isSymbol(self):
- """Returns true if the subtable is for the Symbol encoding (3,0)"""
- return self.platformID == 3 and self.platEncID == 0
-
- def _writeCodes(self, codes, writer):
- isUnicode = self.isUnicode()
- for code, name in codes:
- writer.simpletag("map", code=hex(code), name=name)
- if isUnicode:
- writer.comment(Unicode[code])
- writer.newline()
-
- def __lt__(self, other):
- if not isinstance(other, CmapSubtable):
- return NotImplemented
-
- # implemented so that list.sort() sorts according to the spec.
- selfTuple = (
- getattr(self, "platformID", None),
- getattr(self, "platEncID", None),
- getattr(self, "language", None),
- self.__dict__)
- otherTuple = (
- getattr(other, "platformID", None),
- getattr(other, "platEncID", None),
- getattr(other, "language", None),
- other.__dict__)
- return selfTuple < otherTuple
+ """Base class for all cmap subtable formats.
+
+ Subclasses which handle the individual subtable formats are named
+ ``cmap_format_0``, ``cmap_format_2`` etc. Use :py:meth:`getSubtableClass`
+ to retrieve the concrete subclass, or :py:meth:`newSubtable` to get a
+ new subtable object for a given format.
+
+ The object exposes a ``.cmap`` attribute, which contains a dictionary mapping
+ character codepoints to glyph names.
+ """
+
+ @staticmethod
+ def getSubtableClass(format):
+ """Return the subtable class for a format."""
+ return cmap_classes.get(format, cmap_format_unknown)
+
+ @staticmethod
+ def newSubtable(format):
+ """Return a new instance of a subtable for the given format
+ ."""
+ subtableClass = CmapSubtable.getSubtableClass(format)
+ return subtableClass(format)
+
+ def __init__(self, format):
+ self.format = format
+ self.data = None
+ self.ttFont = None
+ self.platformID = None #: The platform ID of this subtable
+ self.platEncID = None #: The encoding ID of this subtable (interpretation depends on ``platformID``)
+ self.language = (
+ None #: The language ID of this subtable (Macintosh platform only)
+ )
+
+ def ensureDecompiled(self, recurse=False):
+ # The recurse argument is unused, but part of the signature of
+ # ensureDecompiled across the library.
+ if self.data is None:
+ return
+ self.decompile(None, None) # use saved data.
+ self.data = None # Once this table has been decompiled, make sure we don't
+ # just return the original data. Also avoids recursion when
+ # called with an attribute that the cmap subtable doesn't have.
+
+ def __getattr__(self, attr):
+ # allow lazy decompilation of subtables.
+ if attr[:2] == "__": # don't handle requests for member functions like '__lt__'
+ raise AttributeError(attr)
+ if self.data is None:
+ raise AttributeError(attr)
+ self.ensureDecompiled()
+ return getattr(self, attr)
+
+ def decompileHeader(self, data, ttFont):
+ format, length, language = struct.unpack(">HHH", data[:6])
+ assert (
+ len(data) == length
+ ), "corrupt cmap table format %d (data length: %d, header length: %d)" % (
+ format,
+ len(data),
+ length,
+ )
+ self.format = int(format)
+ self.length = int(length)
+ self.language = int(language)
+ self.data = data[6:]
+ self.ttFont = ttFont
+
+ def toXML(self, writer, ttFont):
+ writer.begintag(
+ self.__class__.__name__,
+ [
+ ("platformID", self.platformID),
+ ("platEncID", self.platEncID),
+ ("language", self.language),
+ ],
+ )
+ writer.newline()
+ codes = sorted(self.cmap.items())
+ self._writeCodes(codes, writer)
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def getEncoding(self, default=None):
+ """Returns the Python encoding name for this cmap subtable based on its platformID,
+ platEncID, and language. If encoding for these values is not known, by default
+ ``None`` is returned. That can be overridden by passing a value to the ``default``
+ argument.
+
+ Note that if you want to choose a "preferred" cmap subtable, most of the time
+ ``self.isUnicode()`` is what you want as that one only returns true for the modern,
+ commonly used, Unicode-compatible triplets, not the legacy ones.
+ """
+ return getEncoding(self.platformID, self.platEncID, self.language, default)
+
+ def isUnicode(self):
+ """Returns true if the characters are interpreted as Unicode codepoints."""
+ return self.platformID == 0 or (
+ self.platformID == 3 and self.platEncID in [0, 1, 10]
+ )
+
+ def isSymbol(self):
+ """Returns true if the subtable is for the Symbol encoding (3,0)"""
+ return self.platformID == 3 and self.platEncID == 0
+
+ def _writeCodes(self, codes, writer):
+ isUnicode = self.isUnicode()
+ for code, name in codes:
+ writer.simpletag("map", code=hex(code), name=name)
+ if isUnicode:
+ writer.comment(Unicode[code])
+ writer.newline()
+
+ def __lt__(self, other):
+ if not isinstance(other, CmapSubtable):
+ return NotImplemented
+
+ # implemented so that list.sort() sorts according to the spec.
+ selfTuple = (
+ getattr(self, "platformID", None),
+ getattr(self, "platEncID", None),
+ getattr(self, "language", None),
+ self.__dict__,
+ )
+ otherTuple = (
+ getattr(other, "platformID", None),
+ getattr(other, "platEncID", None),
+ getattr(other, "language", None),
+ other.__dict__,
+ )
+ return selfTuple < otherTuple
class cmap_format_0(CmapSubtable):
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
- data = self.data # decompileHeader assigns the data after the header to self.data
- assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
- gids = array.array("B")
- gids.frombytes(self.data)
- charCodes = list(range(len(gids)))
- self.cmap = _make_map(self.ttFont, charCodes, gids)
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HHH", 0, 262, self.language) + self.data
-
- cmap = self.cmap
- assert set(cmap.keys()).issubset(range(256))
- getGlyphID = ttFont.getGlyphID
- valueList = [getGlyphID(cmap[i]) if i in cmap else 0 for i in range(256)]
-
- gids = array.array("B", valueList)
- data = struct.pack(">HHH", 0, 262, self.language) + gids.tobytes()
- assert len(data) == 262
- return data
-
- def fromXML(self, name, attrs, content, ttFont):
- self.language = safeEval(attrs["language"])
- if not hasattr(self, "cmap"):
- self.cmap = {}
- cmap = self.cmap
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "map":
- continue
- cmap[safeEval(attrs["code"])] = attrs["name"]
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+ data = (
+ self.data
+ ) # decompileHeader assigns the data after the header to self.data
+ assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
+ gids = array.array("B")
+ gids.frombytes(self.data)
+ charCodes = list(range(len(gids)))
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
+
+ def compile(self, ttFont):
+ if self.data:
+ return struct.pack(">HHH", 0, 262, self.language) + self.data
+
+ cmap = self.cmap
+ assert set(cmap.keys()).issubset(range(256))
+ getGlyphID = ttFont.getGlyphID
+ valueList = [getGlyphID(cmap[i]) if i in cmap else 0 for i in range(256)]
+
+ gids = array.array("B", valueList)
+ data = struct.pack(">HHH", 0, 262, self.language) + gids.tobytes()
+ assert len(data) == 262
+ return data
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.language = safeEval(attrs["language"])
+ if not hasattr(self, "cmap"):
+ self.cmap = {}
+ cmap = self.cmap
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "map":
+ continue
+ cmap[safeEval(attrs["code"])] = attrs["name"]
subHeaderFormat = ">HHhH"
+
+
class SubHeader(object):
- def __init__(self):
- self.firstCode = None
- self.entryCount = None
- self.idDelta = None
- self.idRangeOffset = None
- self.glyphIndexArray = []
+ def __init__(self):
+ self.firstCode = None
+ self.entryCount = None
+ self.idDelta = None
+ self.idRangeOffset = None
+ self.glyphIndexArray = []
-class cmap_format_2(CmapSubtable):
- def setIDDelta(self, subHeader):
- subHeader.idDelta = 0
- # find the minGI which is not zero.
- minGI = subHeader.glyphIndexArray[0]
- for gid in subHeader.glyphIndexArray:
- if (gid != 0) and (gid < minGI):
- minGI = gid
- # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
- # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K.
- # We would like to pick an idDelta such that the first glyphArray GID is 1,
- # so that we are more likely to be able to combine glypharray GID subranges.
- # This means that we have a problem when minGI is > 32K
- # Since the final gi is reconstructed from the glyphArray GID by:
- # (short)finalGID = (gid + idDelta) % 0x10000),
- # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the
- # negative number to an unsigned short.
-
- if (minGI > 1):
- if minGI > 0x7FFF:
- subHeader.idDelta = -(0x10000 - minGI) -1
- else:
- subHeader.idDelta = minGI -1
- idDelta = subHeader.idDelta
- for i in range(subHeader.entryCount):
- gid = subHeader.glyphIndexArray[i]
- if gid > 0:
- subHeader.glyphIndexArray[i] = gid - idDelta
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
-
- data = self.data # decompileHeader assigns the data after the header to self.data
- subHeaderKeys = []
- maxSubHeaderindex = 0
- # get the key array, and determine the number of subHeaders.
- allKeys = array.array("H")
- allKeys.frombytes(data[:512])
- data = data[512:]
- if sys.byteorder != "big": allKeys.byteswap()
- subHeaderKeys = [ key//8 for key in allKeys]
- maxSubHeaderindex = max(subHeaderKeys)
-
- #Load subHeaders
- subHeaderList = []
- pos = 0
- for i in range(maxSubHeaderindex + 1):
- subHeader = SubHeader()
- (subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \
- subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8])
- pos += 8
- giDataPos = pos + subHeader.idRangeOffset-2
- giList = array.array("H")
- giList.frombytes(data[giDataPos:giDataPos + subHeader.entryCount*2])
- if sys.byteorder != "big": giList.byteswap()
- subHeader.glyphIndexArray = giList
- subHeaderList.append(subHeader)
- # How this gets processed.
- # Charcodes may be one or two bytes.
- # The first byte of a charcode is mapped through the subHeaderKeys, to select
- # a subHeader. For any subheader but 0, the next byte is then mapped through the
- # selected subheader. If subheader Index 0 is selected, then the byte itself is
- # mapped through the subheader, and there is no second byte.
- # Then assume that the subsequent byte is the first byte of the next charcode,and repeat.
- #
- # Each subheader references a range in the glyphIndexArray whose length is entryCount.
- # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray
- # referenced by another subheader.
- # The only subheader that will be referenced by more than one first-byte value is the subheader
- # that maps the entire range of glyphID values to glyphIndex 0, e.g notdef:
- # {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx}
- # A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex.
- # A subheader specifies a subrange within (0...256) by the
- # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero
- # (e.g. glyph not in font).
- # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar).
- # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by
- # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the
- # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex.
- # Example for Logocut-Medium
- # first byte of charcode = 129; selects subheader 1.
- # subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252}
- # second byte of charCode = 66
- # the index offset = 66-64 = 2.
- # The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is:
- # [glyphIndexArray index], [subrange array index] = glyphIndex
- # [256], [0]=1 from charcode [129, 64]
- # [257], [1]=2 from charcode [129, 65]
- # [258], [2]=3 from charcode [129, 66]
- # [259], [3]=4 from charcode [129, 67]
- # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero,
- # add it to the glyphID to get the final glyphIndex
- # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew!
-
- self.data = b""
- cmap = {}
- notdefGI = 0
- for firstByte in range(256):
- subHeadindex = subHeaderKeys[firstByte]
- subHeader = subHeaderList[subHeadindex]
- if subHeadindex == 0:
- if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount):
- continue # gi is notdef.
- else:
- charCode = firstByte
- offsetIndex = firstByte - subHeader.firstCode
- gi = subHeader.glyphIndexArray[offsetIndex]
- if gi != 0:
- gi = (gi + subHeader.idDelta) % 0x10000
- else:
- continue # gi is notdef.
- cmap[charCode] = gi
- else:
- if subHeader.entryCount:
- charCodeOffset = firstByte * 256 + subHeader.firstCode
- for offsetIndex in range(subHeader.entryCount):
- charCode = charCodeOffset + offsetIndex
- gi = subHeader.glyphIndexArray[offsetIndex]
- if gi != 0:
- gi = (gi + subHeader.idDelta) % 0x10000
- else:
- continue
- cmap[charCode] = gi
- # If not subHeader.entryCount, then all char codes with this first byte are
- # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the
- # same as mapping it to .notdef.
-
- gids = list(cmap.values())
- charCodes = list(cmap.keys())
- self.cmap = _make_map(self.ttFont, charCodes, gids)
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HHH", self.format, self.length, self.language) + self.data
- kEmptyTwoCharCodeRange = -1
- notdefGI = 0
-
- items = sorted(self.cmap.items())
- charCodes = [item[0] for item in items]
- names = [item[1] for item in items]
- nameMap = ttFont.getReverseGlyphMap()
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- nameMap = ttFont.getReverseGlyphMap(rebuild=True)
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- # allow virtual GIDs in format 2 tables
- gids = []
- for name in names:
- try:
- gid = nameMap[name]
- except KeyError:
- try:
- if (name[:3] == 'gid'):
- gid = int(name[3:])
- else:
- gid = ttFont.getGlyphID(name)
- except:
- raise KeyError(name)
-
- gids.append(gid)
-
- # Process the (char code to gid) item list in char code order.
- # By definition, all one byte char codes map to subheader 0.
- # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0,
- # which defines all char codes in its range to map to notdef) unless proven otherwise.
- # Note that since the char code items are processed in char code order, all the char codes with the
- # same first byte are in sequential order.
-
- subHeaderKeys = [kEmptyTwoCharCodeRange for x in range(256)] # list of indices into subHeaderList.
- subHeaderList = []
-
- # We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up
- # with a cmap where all the one byte char codes map to notdef,
- # with the result that the subhead 0 would not get created just by processing the item list.
- charCode = charCodes[0]
- if charCode > 255:
- subHeader = SubHeader()
- subHeader.firstCode = 0
- subHeader.entryCount = 0
- subHeader.idDelta = 0
- subHeader.idRangeOffset = 0
- subHeaderList.append(subHeader)
-
- lastFirstByte = -1
- items = zip(charCodes, gids)
- for charCode, gid in items:
- if gid == 0:
- continue
- firstbyte = charCode >> 8
- secondByte = charCode & 0x00FF
-
- if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one.
- if lastFirstByte > -1:
- # fix GI's and iDelta of current subheader.
- self.setIDDelta(subHeader)
-
- # If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero
- # for the indices matching the char codes.
- if lastFirstByte == 0:
- for index in range(subHeader.entryCount):
- charCode = subHeader.firstCode + index
- subHeaderKeys[charCode] = 0
-
- assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange."
- # init new subheader
- subHeader = SubHeader()
- subHeader.firstCode = secondByte
- subHeader.entryCount = 1
- subHeader.glyphIndexArray.append(gid)
- subHeaderList.append(subHeader)
- subHeaderKeys[firstbyte] = len(subHeaderList) -1
- lastFirstByte = firstbyte
- else:
- # need to fill in with notdefs all the code points between the last charCode and the current charCode.
- codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
- for i in range(codeDiff):
- subHeader.glyphIndexArray.append(notdefGI)
- subHeader.glyphIndexArray.append(gid)
- subHeader.entryCount = subHeader.entryCount + codeDiff + 1
-
- # fix GI's and iDelta of last subheader that we we added to the subheader array.
- self.setIDDelta(subHeader)
-
- # Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
- subHeader = SubHeader()
- subHeader.firstCode = 0
- subHeader.entryCount = 0
- subHeader.idDelta = 0
- subHeader.idRangeOffset = 2
- subHeaderList.append(subHeader)
- emptySubheadIndex = len(subHeaderList) - 1
- for index in range(256):
- if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
- subHeaderKeys[index] = emptySubheadIndex
- # Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
- # idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
- # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with
- # charcode 0 and GID 0.
-
- idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
- subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
- for index in range(subheadRangeLen):
- subHeader = subHeaderList[index]
- subHeader.idRangeOffset = 0
- for j in range(index):
- prevSubhead = subHeaderList[j]
- if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray: # use the glyphIndexArray subarray
- subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8
- subHeader.glyphIndexArray = []
- break
- if subHeader.idRangeOffset == 0: # didn't find one.
- subHeader.idRangeOffset = idRangeOffset
- idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray.
- else:
- idRangeOffset = idRangeOffset - 8 # one less subheader
-
- # Now we can write out the data!
- length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array.
- for subhead in subHeaderList[:-1]:
- length = length + len(subhead.glyphIndexArray)*2 # We can't use subhead.entryCount, as some of the subhead may share subArrays.
- dataList = [struct.pack(">HHH", 2, length, self.language)]
- for index in subHeaderKeys:
- dataList.append(struct.pack(">H", index*8))
- for subhead in subHeaderList:
- dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset))
- for subhead in subHeaderList[:-1]:
- for gi in subhead.glyphIndexArray:
- dataList.append(struct.pack(">H", gi))
- data = bytesjoin(dataList)
- assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length)
- return data
-
- def fromXML(self, name, attrs, content, ttFont):
- self.language = safeEval(attrs["language"])
- if not hasattr(self, "cmap"):
- self.cmap = {}
- cmap = self.cmap
-
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "map":
- continue
- cmap[safeEval(attrs["code"])] = attrs["name"]
+class cmap_format_2(CmapSubtable):
+ def setIDDelta(self, subHeader):
+ subHeader.idDelta = 0
+ # find the minGI which is not zero.
+ minGI = subHeader.glyphIndexArray[0]
+ for gid in subHeader.glyphIndexArray:
+ if (gid != 0) and (gid < minGI):
+ minGI = gid
+ # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
+ # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K.
+ # We would like to pick an idDelta such that the first glyphArray GID is 1,
+ # so that we are more likely to be able to combine glypharray GID subranges.
+ # This means that we have a problem when minGI is > 32K
+ # Since the final gi is reconstructed from the glyphArray GID by:
+ # (short)finalGID = (gid + idDelta) % 0x10000),
+ # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the
+ # negative number to an unsigned short.
+
+ if minGI > 1:
+ if minGI > 0x7FFF:
+ subHeader.idDelta = -(0x10000 - minGI) - 1
+ else:
+ subHeader.idDelta = minGI - 1
+ idDelta = subHeader.idDelta
+ for i in range(subHeader.entryCount):
+ gid = subHeader.glyphIndexArray[i]
+ if gid > 0:
+ subHeader.glyphIndexArray[i] = gid - idDelta
+
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+
+ data = (
+ self.data
+ ) # decompileHeader assigns the data after the header to self.data
+ subHeaderKeys = []
+ maxSubHeaderindex = 0
+ # get the key array, and determine the number of subHeaders.
+ allKeys = array.array("H")
+ allKeys.frombytes(data[:512])
+ data = data[512:]
+ if sys.byteorder != "big":
+ allKeys.byteswap()
+ subHeaderKeys = [key // 8 for key in allKeys]
+ maxSubHeaderindex = max(subHeaderKeys)
+
+ # Load subHeaders
+ subHeaderList = []
+ pos = 0
+ for i in range(maxSubHeaderindex + 1):
+ subHeader = SubHeader()
+ (
+ subHeader.firstCode,
+ subHeader.entryCount,
+ subHeader.idDelta,
+ subHeader.idRangeOffset,
+ ) = struct.unpack(subHeaderFormat, data[pos : pos + 8])
+ pos += 8
+ giDataPos = pos + subHeader.idRangeOffset - 2
+ giList = array.array("H")
+ giList.frombytes(data[giDataPos : giDataPos + subHeader.entryCount * 2])
+ if sys.byteorder != "big":
+ giList.byteswap()
+ subHeader.glyphIndexArray = giList
+ subHeaderList.append(subHeader)
+ # How this gets processed.
+ # Charcodes may be one or two bytes.
+ # The first byte of a charcode is mapped through the subHeaderKeys, to select
+ # a subHeader. For any subheader but 0, the next byte is then mapped through the
+ # selected subheader. If subheader Index 0 is selected, then the byte itself is
+ # mapped through the subheader, and there is no second byte.
+ # Then assume that the subsequent byte is the first byte of the next charcode,and repeat.
+ #
+ # Each subheader references a range in the glyphIndexArray whose length is entryCount.
+ # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray
+ # referenced by another subheader.
+ # The only subheader that will be referenced by more than one first-byte value is the subheader
+ # that maps the entire range of glyphID values to glyphIndex 0, e.g notdef:
+ # {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx}
+ # A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex.
+ # A subheader specifies a subrange within (0...256) by the
+ # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero
+ # (e.g. glyph not in font).
+ # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar).
+ # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by
+ # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the
+ # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex.
+ # Example for Logocut-Medium
+ # first byte of charcode = 129; selects subheader 1.
+ # subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252}
+ # second byte of charCode = 66
+ # the index offset = 66-64 = 2.
+ # The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is:
+ # [glyphIndexArray index], [subrange array index] = glyphIndex
+ # [256], [0]=1 from charcode [129, 64]
+ # [257], [1]=2 from charcode [129, 65]
+ # [258], [2]=3 from charcode [129, 66]
+ # [259], [3]=4 from charcode [129, 67]
+ # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero,
+ # add it to the glyphID to get the final glyphIndex
+ # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew!
+
+ self.data = b""
+ cmap = {}
+ notdefGI = 0
+ for firstByte in range(256):
+ subHeadindex = subHeaderKeys[firstByte]
+ subHeader = subHeaderList[subHeadindex]
+ if subHeadindex == 0:
+ if (firstByte < subHeader.firstCode) or (
+ firstByte >= subHeader.firstCode + subHeader.entryCount
+ ):
+ continue # gi is notdef.
+ else:
+ charCode = firstByte
+ offsetIndex = firstByte - subHeader.firstCode
+ gi = subHeader.glyphIndexArray[offsetIndex]
+ if gi != 0:
+ gi = (gi + subHeader.idDelta) % 0x10000
+ else:
+ continue # gi is notdef.
+ cmap[charCode] = gi
+ else:
+ if subHeader.entryCount:
+ charCodeOffset = firstByte * 256 + subHeader.firstCode
+ for offsetIndex in range(subHeader.entryCount):
+ charCode = charCodeOffset + offsetIndex
+ gi = subHeader.glyphIndexArray[offsetIndex]
+ if gi != 0:
+ gi = (gi + subHeader.idDelta) % 0x10000
+ else:
+ continue
+ cmap[charCode] = gi
+ # If not subHeader.entryCount, then all char codes with this first byte are
+ # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the
+ # same as mapping it to .notdef.
+
+ gids = list(cmap.values())
+ charCodes = list(cmap.keys())
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
+
+ def compile(self, ttFont):
+ if self.data:
+ return (
+ struct.pack(">HHH", self.format, self.length, self.language) + self.data
+ )
+ kEmptyTwoCharCodeRange = -1
+ notdefGI = 0
+
+ items = sorted(self.cmap.items())
+ charCodes = [item[0] for item in items]
+ names = [item[1] for item in items]
+ nameMap = ttFont.getReverseGlyphMap()
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ # allow virtual GIDs in format 2 tables
+ gids = []
+ for name in names:
+ try:
+ gid = nameMap[name]
+ except KeyError:
+ try:
+ if name[:3] == "gid":
+ gid = int(name[3:])
+ else:
+ gid = ttFont.getGlyphID(name)
+ except:
+ raise KeyError(name)
+
+ gids.append(gid)
+
+ # Process the (char code to gid) item list in char code order.
+ # By definition, all one byte char codes map to subheader 0.
+ # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0,
+ # which defines all char codes in its range to map to notdef) unless proven otherwise.
+ # Note that since the char code items are processed in char code order, all the char codes with the
+ # same first byte are in sequential order.
+
+ subHeaderKeys = [
+ kEmptyTwoCharCodeRange for x in range(256)
+ ] # list of indices into subHeaderList.
+ subHeaderList = []
+
+ # We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up
+ # with a cmap where all the one byte char codes map to notdef,
+ # with the result that the subhead 0 would not get created just by processing the item list.
+ charCode = charCodes[0]
+ if charCode > 255:
+ subHeader = SubHeader()
+ subHeader.firstCode = 0
+ subHeader.entryCount = 0
+ subHeader.idDelta = 0
+ subHeader.idRangeOffset = 0
+ subHeaderList.append(subHeader)
+
+ lastFirstByte = -1
+ items = zip(charCodes, gids)
+ for charCode, gid in items:
+ if gid == 0:
+ continue
+ firstbyte = charCode >> 8
+ secondByte = charCode & 0x00FF
+
+ if (
+ firstbyte != lastFirstByte
+ ): # Need to update the current subhead, and start a new one.
+ if lastFirstByte > -1:
+ # fix GI's and iDelta of current subheader.
+ self.setIDDelta(subHeader)
+
+ # If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero
+ # for the indices matching the char codes.
+ if lastFirstByte == 0:
+ for index in range(subHeader.entryCount):
+ charCode = subHeader.firstCode + index
+ subHeaderKeys[charCode] = 0
+
+ assert subHeader.entryCount == len(
+ subHeader.glyphIndexArray
+ ), "Error - subhead entry count does not match len of glyphID subrange."
+ # init new subheader
+ subHeader = SubHeader()
+ subHeader.firstCode = secondByte
+ subHeader.entryCount = 1
+ subHeader.glyphIndexArray.append(gid)
+ subHeaderList.append(subHeader)
+ subHeaderKeys[firstbyte] = len(subHeaderList) - 1
+ lastFirstByte = firstbyte
+ else:
+ # need to fill in with notdefs all the code points between the last charCode and the current charCode.
+ codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
+ for i in range(codeDiff):
+ subHeader.glyphIndexArray.append(notdefGI)
+ subHeader.glyphIndexArray.append(gid)
+ subHeader.entryCount = subHeader.entryCount + codeDiff + 1
+
+ # fix GI's and iDelta of last subheader that we we added to the subheader array.
+ self.setIDDelta(subHeader)
+
+ # Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
+ subHeader = SubHeader()
+ subHeader.firstCode = 0
+ subHeader.entryCount = 0
+ subHeader.idDelta = 0
+ subHeader.idRangeOffset = 2
+ subHeaderList.append(subHeader)
+ emptySubheadIndex = len(subHeaderList) - 1
+ for index in range(256):
+ if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
+ subHeaderKeys[index] = emptySubheadIndex
+ # Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
+ # idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
+ # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with
+ # charcode 0 and GID 0.
+
+ idRangeOffset = (
+ len(subHeaderList) - 1
+ ) * 8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
+ subheadRangeLen = (
+ len(subHeaderList) - 1
+ ) # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
+ for index in range(subheadRangeLen):
+ subHeader = subHeaderList[index]
+ subHeader.idRangeOffset = 0
+ for j in range(index):
+ prevSubhead = subHeaderList[j]
+ if (
+ prevSubhead.glyphIndexArray == subHeader.glyphIndexArray
+ ): # use the glyphIndexArray subarray
+ subHeader.idRangeOffset = (
+ prevSubhead.idRangeOffset - (index - j) * 8
+ )
+ subHeader.glyphIndexArray = []
+ break
+ if subHeader.idRangeOffset == 0: # didn't find one.
+ subHeader.idRangeOffset = idRangeOffset
+ idRangeOffset = (
+ idRangeOffset - 8
+ ) + subHeader.entryCount * 2 # one less subheader, one more subArray.
+ else:
+ idRangeOffset = idRangeOffset - 8 # one less subheader
+
+ # Now we can write out the data!
+ length = (
+ 6 + 512 + 8 * len(subHeaderList)
+ ) # header, 256 subHeaderKeys, and subheader array.
+ for subhead in subHeaderList[:-1]:
+ length = (
+ length + len(subhead.glyphIndexArray) * 2
+ ) # We can't use subhead.entryCount, as some of the subhead may share subArrays.
+ dataList = [struct.pack(">HHH", 2, length, self.language)]
+ for index in subHeaderKeys:
+ dataList.append(struct.pack(">H", index * 8))
+ for subhead in subHeaderList:
+ dataList.append(
+ struct.pack(
+ subHeaderFormat,
+ subhead.firstCode,
+ subhead.entryCount,
+ subhead.idDelta,
+ subhead.idRangeOffset,
+ )
+ )
+ for subhead in subHeaderList[:-1]:
+ for gi in subhead.glyphIndexArray:
+ dataList.append(struct.pack(">H", gi))
+ data = bytesjoin(dataList)
+ assert len(data) == length, (
+ "Error: cmap format 2 is not same length as calculated! actual: "
+ + str(len(data))
+ + " calc : "
+ + str(length)
+ )
+ return data
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.language = safeEval(attrs["language"])
+ if not hasattr(self, "cmap"):
+ self.cmap = {}
+ cmap = self.cmap
+
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "map":
+ continue
+ cmap[safeEval(attrs["code"])] = attrs["name"]
cmap_format_4_format = ">7H"
-#uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF.
-#uint16 reservedPad # This value should be zero
-#uint16 startCode[segCount] # Starting character code for each segment
-#uint16 idDelta[segCount] # Delta for all character codes in segment
-#uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0
-#uint16 glyphIndexArray[variable] # Glyph index array
+# uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF.
+# uint16 reservedPad # This value should be zero
+# uint16 startCode[segCount] # Starting character code for each segment
+# uint16 idDelta[segCount] # Delta for all character codes in segment
+# uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0
+# uint16 glyphIndexArray[variable] # Glyph index array
+
def splitRange(startCode, endCode, cmap):
- # Try to split a range of character codes into subranges with consecutive
- # glyph IDs in such a way that the cmap4 subtable can be stored "most"
- # efficiently. I can't prove I've got the optimal solution, but it seems
- # to do well with the fonts I tested: none became bigger, many became smaller.
- if startCode == endCode:
- return [], [endCode]
-
- lastID = cmap[startCode]
- lastCode = startCode
- inOrder = None
- orderedBegin = None
- subRanges = []
-
- # Gather subranges in which the glyph IDs are consecutive.
- for code in range(startCode + 1, endCode + 1):
- glyphID = cmap[code]
-
- if glyphID - 1 == lastID:
- if inOrder is None or not inOrder:
- inOrder = 1
- orderedBegin = lastCode
- else:
- if inOrder:
- inOrder = 0
- subRanges.append((orderedBegin, lastCode))
- orderedBegin = None
-
- lastID = glyphID
- lastCode = code
-
- if inOrder:
- subRanges.append((orderedBegin, lastCode))
- assert lastCode == endCode
-
- # Now filter out those new subranges that would only make the data bigger.
- # A new segment cost 8 bytes, not using a new segment costs 2 bytes per
- # character.
- newRanges = []
- for b, e in subRanges:
- if b == startCode and e == endCode:
- break # the whole range, we're fine
- if b == startCode or e == endCode:
- threshold = 4 # split costs one more segment
- else:
- threshold = 8 # split costs two more segments
- if (e - b + 1) > threshold:
- newRanges.append((b, e))
- subRanges = newRanges
-
- if not subRanges:
- return [], [endCode]
-
- if subRanges[0][0] != startCode:
- subRanges.insert(0, (startCode, subRanges[0][0] - 1))
- if subRanges[-1][1] != endCode:
- subRanges.append((subRanges[-1][1] + 1, endCode))
-
- # Fill the "holes" in the segments list -- those are the segments in which
- # the glyph IDs are _not_ consecutive.
- i = 1
- while i < len(subRanges):
- if subRanges[i-1][1] + 1 != subRanges[i][0]:
- subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1))
- i = i + 1
- i = i + 1
-
- # Transform the ranges into startCode/endCode lists.
- start = []
- end = []
- for b, e in subRanges:
- start.append(b)
- end.append(e)
- start.pop(0)
-
- assert len(start) + 1 == len(end)
- return start, end
+ # Try to split a range of character codes into subranges with consecutive
+ # glyph IDs in such a way that the cmap4 subtable can be stored "most"
+ # efficiently. I can't prove I've got the optimal solution, but it seems
+ # to do well with the fonts I tested: none became bigger, many became smaller.
+ if startCode == endCode:
+ return [], [endCode]
+
+ lastID = cmap[startCode]
+ lastCode = startCode
+ inOrder = None
+ orderedBegin = None
+ subRanges = []
+
+ # Gather subranges in which the glyph IDs are consecutive.
+ for code in range(startCode + 1, endCode + 1):
+ glyphID = cmap[code]
+
+ if glyphID - 1 == lastID:
+ if inOrder is None or not inOrder:
+ inOrder = 1
+ orderedBegin = lastCode
+ else:
+ if inOrder:
+ inOrder = 0
+ subRanges.append((orderedBegin, lastCode))
+ orderedBegin = None
+
+ lastID = glyphID
+ lastCode = code
+
+ if inOrder:
+ subRanges.append((orderedBegin, lastCode))
+ assert lastCode == endCode
+
+ # Now filter out those new subranges that would only make the data bigger.
+ # A new segment cost 8 bytes, not using a new segment costs 2 bytes per
+ # character.
+ newRanges = []
+ for b, e in subRanges:
+ if b == startCode and e == endCode:
+ break # the whole range, we're fine
+ if b == startCode or e == endCode:
+ threshold = 4 # split costs one more segment
+ else:
+ threshold = 8 # split costs two more segments
+ if (e - b + 1) > threshold:
+ newRanges.append((b, e))
+ subRanges = newRanges
+
+ if not subRanges:
+ return [], [endCode]
+
+ if subRanges[0][0] != startCode:
+ subRanges.insert(0, (startCode, subRanges[0][0] - 1))
+ if subRanges[-1][1] != endCode:
+ subRanges.append((subRanges[-1][1] + 1, endCode))
+
+ # Fill the "holes" in the segments list -- those are the segments in which
+ # the glyph IDs are _not_ consecutive.
+ i = 1
+ while i < len(subRanges):
+ if subRanges[i - 1][1] + 1 != subRanges[i][0]:
+ subRanges.insert(i, (subRanges[i - 1][1] + 1, subRanges[i][0] - 1))
+ i = i + 1
+ i = i + 1
+
+ # Transform the ranges into startCode/endCode lists.
+ start = []
+ end = []
+ for b, e in subRanges:
+ start.append(b)
+ end.append(e)
+ start.pop(0)
+
+ assert len(start) + 1 == len(end)
+ return start, end
class cmap_format_4(CmapSubtable):
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
-
- data = self.data # decompileHeader assigns the data after the header to self.data
- (segCountX2, searchRange, entrySelector, rangeShift) = \
- struct.unpack(">4H", data[:8])
- data = data[8:]
- segCount = segCountX2 // 2
-
- allCodes = array.array("H")
- allCodes.frombytes(data)
- self.data = data = None
-
- if sys.byteorder != "big": allCodes.byteswap()
-
- # divide the data
- endCode = allCodes[:segCount]
- allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field
- startCode = allCodes[:segCount]
- allCodes = allCodes[segCount:]
- idDelta = allCodes[:segCount]
- allCodes = allCodes[segCount:]
- idRangeOffset = allCodes[:segCount]
- glyphIndexArray = allCodes[segCount:]
- lenGIArray = len(glyphIndexArray)
-
- # build 2-byte character mapping
- charCodes = []
- gids = []
- for i in range(len(startCode) - 1): # don't do 0xffff!
- start = startCode[i]
- delta = idDelta[i]
- rangeOffset = idRangeOffset[i]
- partial = rangeOffset // 2 - start + i - len(idRangeOffset)
-
- rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
- charCodes.extend(rangeCharCodes)
- if rangeOffset == 0:
- gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes])
- else:
- for charCode in rangeCharCodes:
- index = charCode + partial
- assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray)
- if glyphIndexArray[index] != 0: # if not missing glyph
- glyphID = glyphIndexArray[index] + delta
- else:
- glyphID = 0 # missing glyph
- gids.append(glyphID & 0xFFFF)
-
- self.cmap = _make_map(self.ttFont, charCodes, gids)
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HHH", self.format, self.length, self.language) + self.data
-
- charCodes = list(self.cmap.keys())
- if not charCodes:
- startCode = [0xffff]
- endCode = [0xffff]
- else:
- charCodes.sort()
- names = [self.cmap[code] for code in charCodes]
- nameMap = ttFont.getReverseGlyphMap()
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- nameMap = ttFont.getReverseGlyphMap(rebuild=True)
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- # allow virtual GIDs in format 4 tables
- gids = []
- for name in names:
- try:
- gid = nameMap[name]
- except KeyError:
- try:
- if (name[:3] == 'gid'):
- gid = int(name[3:])
- else:
- gid = ttFont.getGlyphID(name)
- except:
- raise KeyError(name)
-
- gids.append(gid)
- cmap = {} # code:glyphID mapping
- for code, gid in zip(charCodes, gids):
- cmap[code] = gid
-
- # Build startCode and endCode lists.
- # Split the char codes in ranges of consecutive char codes, then split
- # each range in more ranges of consecutive/not consecutive glyph IDs.
- # See splitRange().
- lastCode = charCodes[0]
- endCode = []
- startCode = [lastCode]
- for charCode in charCodes[1:]: # skip the first code, it's the first start code
- if charCode == lastCode + 1:
- lastCode = charCode
- continue
- start, end = splitRange(startCode[-1], lastCode, cmap)
- startCode.extend(start)
- endCode.extend(end)
- startCode.append(charCode)
- lastCode = charCode
- start, end = splitRange(startCode[-1], lastCode, cmap)
- startCode.extend(start)
- endCode.extend(end)
- startCode.append(0xffff)
- endCode.append(0xffff)
-
- # build up rest of cruft
- idDelta = []
- idRangeOffset = []
- glyphIndexArray = []
- for i in range(len(endCode)-1): # skip the closing codes (0xffff)
- indices = []
- for charCode in range(startCode[i], endCode[i] + 1):
- indices.append(cmap[charCode])
- if (indices == list(range(indices[0], indices[0] + len(indices)))):
- idDelta.append((indices[0] - startCode[i]) % 0x10000)
- idRangeOffset.append(0)
- else:
- idDelta.append(0)
- idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
- glyphIndexArray.extend(indices)
- idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
- idRangeOffset.append(0)
-
- # Insane.
- segCount = len(endCode)
- segCountX2 = segCount * 2
- searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)
-
- charCodeArray = array.array("H", endCode + [0] + startCode)
- idDeltaArray = array.array("H", idDelta)
- restArray = array.array("H", idRangeOffset + glyphIndexArray)
- if sys.byteorder != "big": charCodeArray.byteswap()
- if sys.byteorder != "big": idDeltaArray.byteswap()
- if sys.byteorder != "big": restArray.byteswap()
- data = charCodeArray.tobytes() + idDeltaArray.tobytes() + restArray.tobytes()
-
- length = struct.calcsize(cmap_format_4_format) + len(data)
- header = struct.pack(cmap_format_4_format, self.format, length, self.language,
- segCountX2, searchRange, entrySelector, rangeShift)
- return header + data
-
- def fromXML(self, name, attrs, content, ttFont):
- self.language = safeEval(attrs["language"])
- if not hasattr(self, "cmap"):
- self.cmap = {}
- cmap = self.cmap
-
- for element in content:
- if not isinstance(element, tuple):
- continue
- nameMap, attrsMap, dummyContent = element
- if nameMap != "map":
- assert 0, "Unrecognized keyword in cmap subtable"
- cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+
+ data = (
+ self.data
+ ) # decompileHeader assigns the data after the header to self.data
+ (segCountX2, searchRange, entrySelector, rangeShift) = struct.unpack(
+ ">4H", data[:8]
+ )
+ data = data[8:]
+ segCount = segCountX2 // 2
+
+ allCodes = array.array("H")
+ allCodes.frombytes(data)
+ self.data = data = None
+
+ if sys.byteorder != "big":
+ allCodes.byteswap()
+
+ # divide the data
+ endCode = allCodes[:segCount]
+ allCodes = allCodes[segCount + 1 :] # the +1 is skipping the reservedPad field
+ startCode = allCodes[:segCount]
+ allCodes = allCodes[segCount:]
+ idDelta = allCodes[:segCount]
+ allCodes = allCodes[segCount:]
+ idRangeOffset = allCodes[:segCount]
+ glyphIndexArray = allCodes[segCount:]
+ lenGIArray = len(glyphIndexArray)
+
+ # build 2-byte character mapping
+ charCodes = []
+ gids = []
+ for i in range(len(startCode) - 1): # don't do 0xffff!
+ start = startCode[i]
+ delta = idDelta[i]
+ rangeOffset = idRangeOffset[i]
+ partial = rangeOffset // 2 - start + i - len(idRangeOffset)
+
+ rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
+ charCodes.extend(rangeCharCodes)
+ if rangeOffset == 0:
+ gids.extend(
+ [(charCode + delta) & 0xFFFF for charCode in rangeCharCodes]
+ )
+ else:
+ for charCode in rangeCharCodes:
+ index = charCode + partial
+ assert index < lenGIArray, (
+ "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !"
+ % (i, index, lenGIArray)
+ )
+ if glyphIndexArray[index] != 0: # if not missing glyph
+ glyphID = glyphIndexArray[index] + delta
+ else:
+ glyphID = 0 # missing glyph
+ gids.append(glyphID & 0xFFFF)
+
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
+
+ def compile(self, ttFont):
+ if self.data:
+ return (
+ struct.pack(">HHH", self.format, self.length, self.language) + self.data
+ )
+
+ charCodes = list(self.cmap.keys())
+ if not charCodes:
+ startCode = [0xFFFF]
+ endCode = [0xFFFF]
+ else:
+ charCodes.sort()
+ names = [self.cmap[code] for code in charCodes]
+ nameMap = ttFont.getReverseGlyphMap()
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ # allow virtual GIDs in format 4 tables
+ gids = []
+ for name in names:
+ try:
+ gid = nameMap[name]
+ except KeyError:
+ try:
+ if name[:3] == "gid":
+ gid = int(name[3:])
+ else:
+ gid = ttFont.getGlyphID(name)
+ except:
+ raise KeyError(name)
+
+ gids.append(gid)
+ cmap = {} # code:glyphID mapping
+ for code, gid in zip(charCodes, gids):
+ cmap[code] = gid
+
+ # Build startCode and endCode lists.
+ # Split the char codes in ranges of consecutive char codes, then split
+ # each range in more ranges of consecutive/not consecutive glyph IDs.
+ # See splitRange().
+ lastCode = charCodes[0]
+ endCode = []
+ startCode = [lastCode]
+ for charCode in charCodes[
+ 1:
+ ]: # skip the first code, it's the first start code
+ if charCode == lastCode + 1:
+ lastCode = charCode
+ continue
+ start, end = splitRange(startCode[-1], lastCode, cmap)
+ startCode.extend(start)
+ endCode.extend(end)
+ startCode.append(charCode)
+ lastCode = charCode
+ start, end = splitRange(startCode[-1], lastCode, cmap)
+ startCode.extend(start)
+ endCode.extend(end)
+ startCode.append(0xFFFF)
+ endCode.append(0xFFFF)
+
+ # build up rest of cruft
+ idDelta = []
+ idRangeOffset = []
+ glyphIndexArray = []
+ for i in range(len(endCode) - 1): # skip the closing codes (0xffff)
+ indices = []
+ for charCode in range(startCode[i], endCode[i] + 1):
+ indices.append(cmap[charCode])
+ if indices == list(range(indices[0], indices[0] + len(indices))):
+ idDelta.append((indices[0] - startCode[i]) % 0x10000)
+ idRangeOffset.append(0)
+ else:
+ idDelta.append(0)
+ idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
+ glyphIndexArray.extend(indices)
+ idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
+ idRangeOffset.append(0)
+
+ # Insane.
+ segCount = len(endCode)
+ segCountX2 = segCount * 2
+ searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)
+
+ charCodeArray = array.array("H", endCode + [0] + startCode)
+ idDeltaArray = array.array("H", idDelta)
+ restArray = array.array("H", idRangeOffset + glyphIndexArray)
+ if sys.byteorder != "big":
+ charCodeArray.byteswap()
+ if sys.byteorder != "big":
+ idDeltaArray.byteswap()
+ if sys.byteorder != "big":
+ restArray.byteswap()
+ data = charCodeArray.tobytes() + idDeltaArray.tobytes() + restArray.tobytes()
+
+ length = struct.calcsize(cmap_format_4_format) + len(data)
+ header = struct.pack(
+ cmap_format_4_format,
+ self.format,
+ length,
+ self.language,
+ segCountX2,
+ searchRange,
+ entrySelector,
+ rangeShift,
+ )
+ return header + data
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.language = safeEval(attrs["language"])
+ if not hasattr(self, "cmap"):
+ self.cmap = {}
+ cmap = self.cmap
+
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ nameMap, attrsMap, dummyContent = element
+ if nameMap != "map":
+ assert 0, "Unrecognized keyword in cmap subtable"
+ cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
class cmap_format_6(CmapSubtable):
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
-
- data = self.data # decompileHeader assigns the data after the header to self.data
- firstCode, entryCount = struct.unpack(">HH", data[:4])
- firstCode = int(firstCode)
- data = data[4:]
- #assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!!
- gids = array.array("H")
- gids.frombytes(data[:2 * int(entryCount)])
- if sys.byteorder != "big": gids.byteswap()
- self.data = data = None
-
- charCodes = list(range(firstCode, firstCode + len(gids)))
- self.cmap = _make_map(self.ttFont, charCodes, gids)
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HHH", self.format, self.length, self.language) + self.data
- cmap = self.cmap
- codes = sorted(cmap.keys())
- if codes: # yes, there are empty cmap tables.
- codes = list(range(codes[0], codes[-1] + 1))
- firstCode = codes[0]
- valueList = [
- ttFont.getGlyphID(cmap[code]) if code in cmap else 0
- for code in codes
- ]
- gids = array.array("H", valueList)
- if sys.byteorder != "big": gids.byteswap()
- data = gids.tobytes()
- else:
- data = b""
- firstCode = 0
- header = struct.pack(">HHHHH",
- 6, len(data) + 10, self.language, firstCode, len(codes))
- return header + data
-
- def fromXML(self, name, attrs, content, ttFont):
- self.language = safeEval(attrs["language"])
- if not hasattr(self, "cmap"):
- self.cmap = {}
- cmap = self.cmap
-
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "map":
- continue
- cmap[safeEval(attrs["code"])] = attrs["name"]
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+
+ data = (
+ self.data
+ ) # decompileHeader assigns the data after the header to self.data
+ firstCode, entryCount = struct.unpack(">HH", data[:4])
+ firstCode = int(firstCode)
+ data = data[4:]
+ # assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!!
+ gids = array.array("H")
+ gids.frombytes(data[: 2 * int(entryCount)])
+ if sys.byteorder != "big":
+ gids.byteswap()
+ self.data = data = None
+
+ charCodes = list(range(firstCode, firstCode + len(gids)))
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
+
+ def compile(self, ttFont):
+ if self.data:
+ return (
+ struct.pack(">HHH", self.format, self.length, self.language) + self.data
+ )
+ cmap = self.cmap
+ codes = sorted(cmap.keys())
+ if codes: # yes, there are empty cmap tables.
+ codes = list(range(codes[0], codes[-1] + 1))
+ firstCode = codes[0]
+ valueList = [
+ ttFont.getGlyphID(cmap[code]) if code in cmap else 0 for code in codes
+ ]
+ gids = array.array("H", valueList)
+ if sys.byteorder != "big":
+ gids.byteswap()
+ data = gids.tobytes()
+ else:
+ data = b""
+ firstCode = 0
+ header = struct.pack(
+ ">HHHHH", 6, len(data) + 10, self.language, firstCode, len(codes)
+ )
+ return header + data
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.language = safeEval(attrs["language"])
+ if not hasattr(self, "cmap"):
+ self.cmap = {}
+ cmap = self.cmap
+
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "map":
+ continue
+ cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12_or_13(CmapSubtable):
-
- def __init__(self, format):
- self.format = format
- self.reserved = 0
- self.data = None
- self.ttFont = None
-
- def decompileHeader(self, data, ttFont):
- format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
- assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (self.format, len(data), length)
- self.format = format
- self.reserved = reserved
- self.length = length
- self.language = language
- self.nGroups = nGroups
- self.data = data[16:]
- self.ttFont = ttFont
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
-
- data = self.data # decompileHeader assigns the data after the header to self.data
- charCodes = []
- gids = []
- pos = 0
- for i in range(self.nGroups):
- startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] )
- pos += 12
- lenGroup = 1 + endCharCode - startCharCode
- charCodes.extend(list(range(startCharCode, endCharCode +1)))
- gids.extend(self._computeGIDs(glyphID, lenGroup))
- self.data = data = None
- self.cmap = _make_map(self.ttFont, charCodes, gids)
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data
- charCodes = list(self.cmap.keys())
- names = list(self.cmap.values())
- nameMap = ttFont.getReverseGlyphMap()
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- nameMap = ttFont.getReverseGlyphMap(rebuild=True)
- try:
- gids = [nameMap[name] for name in names]
- except KeyError:
- # allow virtual GIDs in format 12 tables
- gids = []
- for name in names:
- try:
- gid = nameMap[name]
- except KeyError:
- try:
- if (name[:3] == 'gid'):
- gid = int(name[3:])
- else:
- gid = ttFont.getGlyphID(name)
- except:
- raise KeyError(name)
-
- gids.append(gid)
-
- cmap = {} # code:glyphID mapping
- for code, gid in zip(charCodes, gids):
- cmap[code] = gid
-
- charCodes.sort()
- index = 0
- startCharCode = charCodes[0]
- startGlyphID = cmap[startCharCode]
- lastGlyphID = startGlyphID - self._format_step
- lastCharCode = startCharCode - 1
- nGroups = 0
- dataList = []
- maxIndex = len(charCodes)
- for index in range(maxIndex):
- charCode = charCodes[index]
- glyphID = cmap[charCode]
- if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
- dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
- startCharCode = charCode
- startGlyphID = glyphID
- nGroups = nGroups + 1
- lastGlyphID = glyphID
- lastCharCode = charCode
- dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
- nGroups = nGroups + 1
- data = bytesjoin(dataList)
- lengthSubtable = len(data) +16
- assert len(data) == (nGroups*12) == (lengthSubtable-16)
- return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data
-
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__, [
- ("platformID", self.platformID),
- ("platEncID", self.platEncID),
- ("format", self.format),
- ("reserved", self.reserved),
- ("length", self.length),
- ("language", self.language),
- ("nGroups", self.nGroups),
- ])
- writer.newline()
- codes = sorted(self.cmap.items())
- self._writeCodes(codes, writer)
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.format = safeEval(attrs["format"])
- self.reserved = safeEval(attrs["reserved"])
- self.length = safeEval(attrs["length"])
- self.language = safeEval(attrs["language"])
- self.nGroups = safeEval(attrs["nGroups"])
- if not hasattr(self, "cmap"):
- self.cmap = {}
- cmap = self.cmap
-
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "map":
- continue
- cmap[safeEval(attrs["code"])] = attrs["name"]
+ def __init__(self, format):
+ self.format = format
+ self.reserved = 0
+ self.data = None
+ self.ttFont = None
+
+ def decompileHeader(self, data, ttFont):
+ format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
+ assert (
+ len(data) == (16 + nGroups * 12) == (length)
+ ), "corrupt cmap table format %d (data length: %d, header length: %d)" % (
+ self.format,
+ len(data),
+ length,
+ )
+ self.format = format
+ self.reserved = reserved
+ self.length = length
+ self.language = language
+ self.nGroups = nGroups
+ self.data = data[16:]
+ self.ttFont = ttFont
+
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+
+ data = (
+ self.data
+ ) # decompileHeader assigns the data after the header to self.data
+ charCodes = []
+ gids = []
+ pos = 0
+ for i in range(self.nGroups):
+ startCharCode, endCharCode, glyphID = struct.unpack(
+ ">LLL", data[pos : pos + 12]
+ )
+ pos += 12
+ lenGroup = 1 + endCharCode - startCharCode
+ charCodes.extend(list(range(startCharCode, endCharCode + 1)))
+ gids.extend(self._computeGIDs(glyphID, lenGroup))
+ self.data = data = None
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
+
+ def compile(self, ttFont):
+ if self.data:
+ return (
+ struct.pack(
+ ">HHLLL",
+ self.format,
+ self.reserved,
+ self.length,
+ self.language,
+ self.nGroups,
+ )
+ + self.data
+ )
+ charCodes = list(self.cmap.keys())
+ names = list(self.cmap.values())
+ nameMap = ttFont.getReverseGlyphMap()
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+ try:
+ gids = [nameMap[name] for name in names]
+ except KeyError:
+ # allow virtual GIDs in format 12 tables
+ gids = []
+ for name in names:
+ try:
+ gid = nameMap[name]
+ except KeyError:
+ try:
+ if name[:3] == "gid":
+ gid = int(name[3:])
+ else:
+ gid = ttFont.getGlyphID(name)
+ except:
+ raise KeyError(name)
+
+ gids.append(gid)
+
+ cmap = {} # code:glyphID mapping
+ for code, gid in zip(charCodes, gids):
+ cmap[code] = gid
+
+ charCodes.sort()
+ index = 0
+ startCharCode = charCodes[0]
+ startGlyphID = cmap[startCharCode]
+ lastGlyphID = startGlyphID - self._format_step
+ lastCharCode = startCharCode - 1
+ nGroups = 0
+ dataList = []
+ maxIndex = len(charCodes)
+ for index in range(maxIndex):
+ charCode = charCodes[index]
+ glyphID = cmap[charCode]
+ if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
+ dataList.append(
+ struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)
+ )
+ startCharCode = charCode
+ startGlyphID = glyphID
+ nGroups = nGroups + 1
+ lastGlyphID = glyphID
+ lastCharCode = charCode
+ dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
+ nGroups = nGroups + 1
+ data = bytesjoin(dataList)
+ lengthSubtable = len(data) + 16
+ assert len(data) == (nGroups * 12) == (lengthSubtable - 16)
+ return (
+ struct.pack(
+ ">HHLLL",
+ self.format,
+ self.reserved,
+ lengthSubtable,
+ self.language,
+ nGroups,
+ )
+ + data
+ )
+
+ def toXML(self, writer, ttFont):
+ writer.begintag(
+ self.__class__.__name__,
+ [
+ ("platformID", self.platformID),
+ ("platEncID", self.platEncID),
+ ("format", self.format),
+ ("reserved", self.reserved),
+ ("length", self.length),
+ ("language", self.language),
+ ("nGroups", self.nGroups),
+ ],
+ )
+ writer.newline()
+ codes = sorted(self.cmap.items())
+ self._writeCodes(codes, writer)
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.format = safeEval(attrs["format"])
+ self.reserved = safeEval(attrs["reserved"])
+ self.length = safeEval(attrs["length"])
+ self.language = safeEval(attrs["language"])
+ self.nGroups = safeEval(attrs["nGroups"])
+ if not hasattr(self, "cmap"):
+ self.cmap = {}
+ cmap = self.cmap
+
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "map":
+ continue
+ cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12(cmap_format_12_or_13):
+ _format_step = 1
- _format_step = 1
-
- def __init__(self, format=12):
- cmap_format_12_or_13.__init__(self, format)
+ def __init__(self, format=12):
+ cmap_format_12_or_13.__init__(self, format)
- def _computeGIDs(self, startingGlyph, numberOfGlyphs):
- return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
+ def _computeGIDs(self, startingGlyph, numberOfGlyphs):
+ return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
- def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
- return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
+ def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
+ return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
class cmap_format_13(cmap_format_12_or_13):
+ _format_step = 0
- _format_step = 0
+ def __init__(self, format=13):
+ cmap_format_12_or_13.__init__(self, format)
- def __init__(self, format=13):
- cmap_format_12_or_13.__init__(self, format)
+ def _computeGIDs(self, startingGlyph, numberOfGlyphs):
+ return [startingGlyph] * numberOfGlyphs
- def _computeGIDs(self, startingGlyph, numberOfGlyphs):
- return [startingGlyph] * numberOfGlyphs
-
- def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
- return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
+ def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
+ return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
def cvtToUVS(threeByteString):
- data = b"\0" + threeByteString
- val, = struct.unpack(">L", data)
- return val
+ data = b"\0" + threeByteString
+ (val,) = struct.unpack(">L", data)
+ return val
+
def cvtFromUVS(val):
- assert 0 <= val < 0x1000000
- fourByteString = struct.pack(">L", val)
- return fourByteString[1:]
+ assert 0 <= val < 0x1000000
+ fourByteString = struct.pack(">L", val)
+ return fourByteString[1:]
class cmap_format_14(CmapSubtable):
-
- def decompileHeader(self, data, ttFont):
- format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
- self.data = data[10:]
- self.length = length
- self.numVarSelectorRecords = numVarSelectorRecords
- self.ttFont = ttFont
- self.language = 0xFF # has no language.
-
- def decompile(self, data, ttFont):
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
- data = self.data
-
- self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
- uvsDict = {}
- recOffset = 0
- for n in range(self.numVarSelectorRecords):
- uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11])
- recOffset += 11
- varUVS = cvtToUVS(uvs)
- if defOVSOffset:
- startOffset = defOVSOffset - 10
- numValues, = struct.unpack(">L", data[startOffset:startOffset+4])
- startOffset +=4
- for r in range(numValues):
- uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4])
- startOffset += 4
- firstBaseUV = cvtToUVS(uv)
- cnt = addtlCnt+1
- baseUVList = list(range(firstBaseUV, firstBaseUV+cnt))
- glyphList = [None]*cnt
- localUVList = zip(baseUVList, glyphList)
- try:
- uvsDict[varUVS].extend(localUVList)
- except KeyError:
- uvsDict[varUVS] = list(localUVList)
-
- if nonDefUVSOffset:
- startOffset = nonDefUVSOffset - 10
- numRecs, = struct.unpack(">L", data[startOffset:startOffset+4])
- startOffset +=4
- localUVList = []
- for r in range(numRecs):
- uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5])
- startOffset += 5
- uv = cvtToUVS(uv)
- glyphName = self.ttFont.getGlyphName(gid)
- localUVList.append((uv, glyphName))
- try:
- uvsDict[varUVS].extend(localUVList)
- except KeyError:
- uvsDict[varUVS] = localUVList
-
- self.uvsDict = uvsDict
-
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__, [
- ("platformID", self.platformID),
- ("platEncID", self.platEncID),
- ])
- writer.newline()
- uvsDict = self.uvsDict
- uvsList = sorted(uvsDict.keys())
- for uvs in uvsList:
- uvList = uvsDict[uvs]
- uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
- for uv, gname in uvList:
- attrs = [("uv", hex(uv)), ("uvs", hex(uvs))]
- if gname is not None:
- attrs.append(("name", gname))
- writer.simpletag("map", attrs)
- writer.newline()
- writer.endtag(self.__class__.__name__)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail
- if not hasattr(self, "cmap"):
- self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
- if not hasattr(self, "uvsDict"):
- self.uvsDict = {}
- uvsDict = self.uvsDict
-
- # For backwards compatibility reasons we accept "None" as an indicator
- # for "default mapping", unless the font actually has a glyph named
- # "None".
- _hasGlyphNamedNone = None
-
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "map":
- continue
- uvs = safeEval(attrs["uvs"])
- uv = safeEval(attrs["uv"])
- gname = attrs.get("name")
- if gname == "None":
- if _hasGlyphNamedNone is None:
- _hasGlyphNamedNone = "None" in ttFont.getGlyphOrder()
- if not _hasGlyphNamedNone:
- gname = None
- try:
- uvsDict[uvs].append((uv, gname))
- except KeyError:
- uvsDict[uvs] = [(uv, gname)]
-
- def compile(self, ttFont):
- if self.data:
- return struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data
-
- uvsDict = self.uvsDict
- uvsList = sorted(uvsDict.keys())
- self.numVarSelectorRecords = len(uvsList)
- offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block.
- data = []
- varSelectorRecords =[]
- for uvs in uvsList:
- entryList = uvsDict[uvs]
-
- defList = [entry for entry in entryList if entry[1] is None]
- if defList:
- defList = [entry[0] for entry in defList]
- defOVSOffset = offset
- defList.sort()
-
- lastUV = defList[0]
- cnt = -1
- defRecs = []
- for defEntry in defList:
- cnt +=1
- if (lastUV+cnt) != defEntry:
- rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1)
- lastUV = defEntry
- defRecs.append(rec)
- cnt = 0
-
- rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
- defRecs.append(rec)
-
- numDefRecs = len(defRecs)
- data.append(struct.pack(">L", numDefRecs))
- data.extend(defRecs)
- offset += 4 + numDefRecs*4
- else:
- defOVSOffset = 0
-
- ndefList = [entry for entry in entryList if entry[1] is not None]
- if ndefList:
- nonDefUVSOffset = offset
- ndefList.sort()
- numNonDefRecs = len(ndefList)
- data.append(struct.pack(">L", numNonDefRecs))
- offset += 4 + numNonDefRecs*5
-
- for uv, gname in ndefList:
- gid = ttFont.getGlyphID(gname)
- ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
- data.append(ndrec)
- else:
- nonDefUVSOffset = 0
-
- vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
- varSelectorRecords.append(vrec)
-
- data = bytesjoin(varSelectorRecords) + bytesjoin(data)
- self.length = 10 + len(data)
- headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords)
-
- return headerdata + data
+ def decompileHeader(self, data, ttFont):
+ format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
+ self.data = data[10:]
+ self.length = length
+ self.numVarSelectorRecords = numVarSelectorRecords
+ self.ttFont = ttFont
+ self.language = 0xFF # has no language.
+
+ def decompile(self, data, ttFont):
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+ data = self.data
+
+ self.cmap = (
+ {}
+ ) # so that clients that expect this to exist in a cmap table won't fail.
+ uvsDict = {}
+ recOffset = 0
+ for n in range(self.numVarSelectorRecords):
+ uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(
+ ">3sLL", data[recOffset : recOffset + 11]
+ )
+ recOffset += 11
+ varUVS = cvtToUVS(uvs)
+ if defOVSOffset:
+ startOffset = defOVSOffset - 10
+ (numValues,) = struct.unpack(">L", data[startOffset : startOffset + 4])
+ startOffset += 4
+ for r in range(numValues):
+ uv, addtlCnt = struct.unpack(
+ ">3sB", data[startOffset : startOffset + 4]
+ )
+ startOffset += 4
+ firstBaseUV = cvtToUVS(uv)
+ cnt = addtlCnt + 1
+ baseUVList = list(range(firstBaseUV, firstBaseUV + cnt))
+ glyphList = [None] * cnt
+ localUVList = zip(baseUVList, glyphList)
+ try:
+ uvsDict[varUVS].extend(localUVList)
+ except KeyError:
+ uvsDict[varUVS] = list(localUVList)
+
+ if nonDefUVSOffset:
+ startOffset = nonDefUVSOffset - 10
+ (numRecs,) = struct.unpack(">L", data[startOffset : startOffset + 4])
+ startOffset += 4
+ localUVList = []
+ for r in range(numRecs):
+ uv, gid = struct.unpack(">3sH", data[startOffset : startOffset + 5])
+ startOffset += 5
+ uv = cvtToUVS(uv)
+ glyphName = self.ttFont.getGlyphName(gid)
+ localUVList.append((uv, glyphName))
+ try:
+ uvsDict[varUVS].extend(localUVList)
+ except KeyError:
+ uvsDict[varUVS] = localUVList
+
+ self.uvsDict = uvsDict
+
+ def toXML(self, writer, ttFont):
+ writer.begintag(
+ self.__class__.__name__,
+ [
+ ("platformID", self.platformID),
+ ("platEncID", self.platEncID),
+ ],
+ )
+ writer.newline()
+ uvsDict = self.uvsDict
+ uvsList = sorted(uvsDict.keys())
+ for uvs in uvsList:
+ uvList = uvsDict[uvs]
+ uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
+ for uv, gname in uvList:
+ attrs = [("uv", hex(uv)), ("uvs", hex(uvs))]
+ if gname is not None:
+ attrs.append(("name", gname))
+ writer.simpletag("map", attrs)
+ writer.newline()
+ writer.endtag(self.__class__.__name__)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail
+ if not hasattr(self, "cmap"):
+ self.cmap = (
+ {}
+ ) # so that clients that expect this to exist in a cmap table won't fail.
+ if not hasattr(self, "uvsDict"):
+ self.uvsDict = {}
+ uvsDict = self.uvsDict
+
+ # For backwards compatibility reasons we accept "None" as an indicator
+ # for "default mapping", unless the font actually has a glyph named
+ # "None".
+ _hasGlyphNamedNone = None
+
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "map":
+ continue
+ uvs = safeEval(attrs["uvs"])
+ uv = safeEval(attrs["uv"])
+ gname = attrs.get("name")
+ if gname == "None":
+ if _hasGlyphNamedNone is None:
+ _hasGlyphNamedNone = "None" in ttFont.getGlyphOrder()
+ if not _hasGlyphNamedNone:
+ gname = None
+ try:
+ uvsDict[uvs].append((uv, gname))
+ except KeyError:
+ uvsDict[uvs] = [(uv, gname)]
+
+ def compile(self, ttFont):
+ if self.data:
+ return (
+ struct.pack(
+ ">HLL", self.format, self.length, self.numVarSelectorRecords
+ )
+ + self.data
+ )
+
+ uvsDict = self.uvsDict
+ uvsList = sorted(uvsDict.keys())
+ self.numVarSelectorRecords = len(uvsList)
+ offset = (
+ 10 + self.numVarSelectorRecords * 11
+ ) # current value is end of VarSelectorRecords block.
+ data = []
+ varSelectorRecords = []
+ for uvs in uvsList:
+ entryList = uvsDict[uvs]
+
+ defList = [entry for entry in entryList if entry[1] is None]
+ if defList:
+ defList = [entry[0] for entry in defList]
+ defOVSOffset = offset
+ defList.sort()
+
+ lastUV = defList[0]
+ cnt = -1
+ defRecs = []
+ for defEntry in defList:
+ cnt += 1
+ if (lastUV + cnt) != defEntry:
+ rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt - 1)
+ lastUV = defEntry
+ defRecs.append(rec)
+ cnt = 0
+
+ rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
+ defRecs.append(rec)
+
+ numDefRecs = len(defRecs)
+ data.append(struct.pack(">L", numDefRecs))
+ data.extend(defRecs)
+ offset += 4 + numDefRecs * 4
+ else:
+ defOVSOffset = 0
+
+ ndefList = [entry for entry in entryList if entry[1] is not None]
+ if ndefList:
+ nonDefUVSOffset = offset
+ ndefList.sort()
+ numNonDefRecs = len(ndefList)
+ data.append(struct.pack(">L", numNonDefRecs))
+ offset += 4 + numNonDefRecs * 5
+
+ for uv, gname in ndefList:
+ gid = ttFont.getGlyphID(gname)
+ ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
+ data.append(ndrec)
+ else:
+ nonDefUVSOffset = 0
+
+ vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
+ varSelectorRecords.append(vrec)
+
+ data = bytesjoin(varSelectorRecords) + bytesjoin(data)
+ self.length = 10 + len(data)
+ headerdata = struct.pack(
+ ">HLL", self.format, self.length, self.numVarSelectorRecords
+ )
+
+ return headerdata + data
class cmap_format_unknown(CmapSubtable):
+ def toXML(self, writer, ttFont):
+ cmapName = self.__class__.__name__[:12] + str(self.format)
+ writer.begintag(
+ cmapName,
+ [
+ ("platformID", self.platformID),
+ ("platEncID", self.platEncID),
+ ],
+ )
+ writer.newline()
+ writer.dumphex(self.data)
+ writer.endtag(cmapName)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.data = readHex(content)
+ self.cmap = {}
+
+ def decompileHeader(self, data, ttFont):
+ self.language = 0 # dummy value
+ self.data = data
+
+ def decompile(self, data, ttFont):
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
+ if data is not None and ttFont is not None:
+ self.decompileHeader(data, ttFont)
+ else:
+ assert (
+ data is None and ttFont is None
+ ), "Need both data and ttFont arguments"
+
+ def compile(self, ttFont):
+ if self.data:
+ return self.data
+ else:
+ return None
- def toXML(self, writer, ttFont):
- cmapName = self.__class__.__name__[:12] + str(self.format)
- writer.begintag(cmapName, [
- ("platformID", self.platformID),
- ("platEncID", self.platEncID),
- ])
- writer.newline()
- writer.dumphex(self.data)
- writer.endtag(cmapName)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.data = readHex(content)
- self.cmap = {}
-
- def decompileHeader(self, data, ttFont):
- self.language = 0 # dummy value
- self.data = data
-
- def decompile(self, data, ttFont):
- # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
- # If not, someone is calling the subtable decompile() directly, and must provide both args.
- if data is not None and ttFont is not None:
- self.decompileHeader(data, ttFont)
- else:
- assert (data is None and ttFont is None), "Need both data and ttFont arguments"
-
- def compile(self, ttFont):
- if self.data:
- return self.data
- else:
- return None
cmap_classes = {
- 0: cmap_format_0,
- 2: cmap_format_2,
- 4: cmap_format_4,
- 6: cmap_format_6,
- 12: cmap_format_12,
- 13: cmap_format_13,
- 14: cmap_format_14,
+ 0: cmap_format_0,
+ 2: cmap_format_2,
+ 4: cmap_format_4,
+ 6: cmap_format_6,
+ 12: cmap_format_12,
+ 13: cmap_format_13,
+ 14: cmap_format_14,
}
diff --git a/Lib/fontTools/ttLib/tables/_c_v_a_r.py b/Lib/fontTools/ttLib/tables/_c_v_a_r.py
index a67efe02..6ea44dba 100644
--- a/Lib/fontTools/ttLib/tables/_c_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_c_v_a_r.py
@@ -1,8 +1,11 @@
from . import DefaultTable
from fontTools.misc import sstruct
from fontTools.misc.textTools import bytesjoin
-from fontTools.ttLib.tables.TupleVariation import \
- compileTupleVariationStore, decompileTupleVariationStore, TupleVariation
+from fontTools.ttLib.tables.TupleVariation import (
+ compileTupleVariationStore,
+ decompileTupleVariationStore,
+ TupleVariation,
+)
# https://www.microsoft.com/typography/otspec/cvar.htm
@@ -34,18 +37,15 @@ class table__c_v_a_r(DefaultTable.DefaultTable):
pointCount=len(ttFont["cvt "].values),
axisTags=[axis.axisTag for axis in ttFont["fvar"].axes],
sharedTupleIndices={},
- useSharedPoints=useSharedPoints)
+ useSharedPoints=useSharedPoints,
+ )
header = {
"majorVersion": self.majorVersion,
"minorVersion": self.minorVersion,
"tupleVariationCount": tupleVariationCount,
"offsetToData": CVAR_HEADER_SIZE + len(tuples),
}
- return b''.join([
- sstruct.pack(CVAR_HEADER_FORMAT, header),
- tuples,
- data
- ])
+ return b"".join([sstruct.pack(CVAR_HEADER_FORMAT, header), tuples, data])
def decompile(self, data, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
@@ -55,10 +55,15 @@ class table__c_v_a_r(DefaultTable.DefaultTable):
self.minorVersion = header["minorVersion"]
assert self.majorVersion == 1, self.majorVersion
self.variations = decompileTupleVariationStore(
- tableTag=self.tableTag, axisTags=axisTags,
+ tableTag=self.tableTag,
+ axisTags=axisTags,
tupleVariationCount=header["tupleVariationCount"],
- pointCount=len(ttFont["cvt "].values), sharedTuples=None,
- data=data, pos=CVAR_HEADER_SIZE, dataPos=header["offsetToData"])
+ pointCount=len(ttFont["cvt "].values),
+ sharedTuples=None,
+ data=data,
+ pos=CVAR_HEADER_SIZE,
+ dataPos=header["offsetToData"],
+ )
def fromXML(self, name, attrs, content, ttFont):
if name == "version":
@@ -75,8 +80,7 @@ class table__c_v_a_r(DefaultTable.DefaultTable):
def toXML(self, writer, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- writer.simpletag("version",
- major=self.majorVersion, minor=self.minorVersion)
+ writer.simpletag("version", major=self.majorVersion, minor=self.minorVersion)
writer.newline()
for var in self.variations:
var.toXML(writer, axisTags)
diff --git a/Lib/fontTools/ttLib/tables/_c_v_t.py b/Lib/fontTools/ttLib/tables/_c_v_t.py
index 26395c93..7f946775 100644
--- a/Lib/fontTools/ttLib/tables/_c_v_t.py
+++ b/Lib/fontTools/ttLib/tables/_c_v_t.py
@@ -3,43 +3,45 @@ from . import DefaultTable
import sys
import array
-class table__c_v_t(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- values = array.array("h")
- values.frombytes(data)
- if sys.byteorder != "big": values.byteswap()
- self.values = values
-
- def compile(self, ttFont):
- values = self.values[:]
- if sys.byteorder != "big": values.byteswap()
- return values.tobytes()
-
- def toXML(self, writer, ttFont):
- for i in range(len(self.values)):
- value = self.values[i]
- writer.simpletag("cv", value=value, index=i)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "values"):
- self.values = array.array("h")
- if name == "cv":
- index = safeEval(attrs["index"])
- value = safeEval(attrs["value"])
- for i in range(1 + index - len(self.values)):
- self.values.append(0)
- self.values[index] = value
-
- def __len__(self):
- return len(self.values)
-
- def __getitem__(self, index):
- return self.values[index]
-
- def __setitem__(self, index, value):
- self.values[index] = value
-
- def __delitem__(self, index):
- del self.values[index]
+class table__c_v_t(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ values = array.array("h")
+ values.frombytes(data)
+ if sys.byteorder != "big":
+ values.byteswap()
+ self.values = values
+
+ def compile(self, ttFont):
+ values = self.values[:]
+ if sys.byteorder != "big":
+ values.byteswap()
+ return values.tobytes()
+
+ def toXML(self, writer, ttFont):
+ for i in range(len(self.values)):
+ value = self.values[i]
+ writer.simpletag("cv", value=value, index=i)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "values"):
+ self.values = array.array("h")
+ if name == "cv":
+ index = safeEval(attrs["index"])
+ value = safeEval(attrs["value"])
+ for i in range(1 + index - len(self.values)):
+ self.values.append(0)
+ self.values[index] = value
+
+ def __len__(self):
+ return len(self.values)
+
+ def __getitem__(self, index):
+ return self.values[index]
+
+ def __setitem__(self, index, value):
+ self.values[index] = value
+
+ def __delitem__(self, index):
+ del self.values[index]
diff --git a/Lib/fontTools/ttLib/tables/_f_e_a_t.py b/Lib/fontTools/ttLib/tables/_f_e_a_t.py
index 079b514c..c9a48eff 100644
--- a/Lib/fontTools/ttLib/tables/_f_e_a_t.py
+++ b/Lib/fontTools/ttLib/tables/_f_e_a_t.py
@@ -2,10 +2,11 @@ from .otBase import BaseTTXConverter
class table__f_e_a_t(BaseTTXConverter):
- """The feature name table is an AAT (Apple Advanced Typography) table for
- storing font features, settings, and their human-readable names. It should
- not be confused with the ``Feat`` table or the OpenType Layout ``GSUB``/``GPOS``
- tables. See `Feature Name Table <https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6feat.html>`_
- in the TrueType Reference Manual for more information on the structure and
- purpose of this table."""
- pass
+ """The feature name table is an AAT (Apple Advanced Typography) table for
+ storing font features, settings, and their human-readable names. It should
+ not be confused with the ``Feat`` table or the OpenType Layout ``GSUB``/``GPOS``
+ tables. See `Feature Name Table <https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6feat.html>`_
+ in the TrueType Reference Manual for more information on the structure and
+ purpose of this table."""
+
+ pass
diff --git a/Lib/fontTools/ttLib/tables/_f_p_g_m.py b/Lib/fontTools/ttLib/tables/_f_p_g_m.py
index ec3576ce..df23041d 100644
--- a/Lib/fontTools/ttLib/tables/_f_p_g_m.py
+++ b/Lib/fontTools/ttLib/tables/_f_p_g_m.py
@@ -1,48 +1,49 @@
from . import DefaultTable
from . import ttProgram
-class table__f_p_g_m(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- program = ttProgram.Program()
- program.fromBytecode(data)
- self.program = program
-
- def compile(self, ttFont):
- return self.program.getBytecode()
-
- def toXML(self, writer, ttFont):
- self.program.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- program = ttProgram.Program()
- program.fromXML(name, attrs, content, ttFont)
- self.program = program
-
- def __bool__(self):
- """
- >>> fpgm = table__f_p_g_m()
- >>> bool(fpgm)
- False
- >>> p = ttProgram.Program()
- >>> fpgm.program = p
- >>> bool(fpgm)
- False
- >>> bc = bytearray([0])
- >>> p.fromBytecode(bc)
- >>> bool(fpgm)
- True
- >>> p.bytecode.pop()
- 0
- >>> bool(fpgm)
- False
- """
- return hasattr(self, 'program') and bool(self.program)
-
- __nonzero__ = __bool__
+class table__f_p_g_m(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ program = ttProgram.Program()
+ program.fromBytecode(data)
+ self.program = program
+
+ def compile(self, ttFont):
+ return self.program.getBytecode()
+
+ def toXML(self, writer, ttFont):
+ self.program.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ program = ttProgram.Program()
+ program.fromXML(name, attrs, content, ttFont)
+ self.program = program
+
+ def __bool__(self):
+ """
+ >>> fpgm = table__f_p_g_m()
+ >>> bool(fpgm)
+ False
+ >>> p = ttProgram.Program()
+ >>> fpgm.program = p
+ >>> bool(fpgm)
+ False
+ >>> bc = bytearray([0])
+ >>> p.fromBytecode(bc)
+ >>> bool(fpgm)
+ True
+ >>> p.bytecode.pop()
+ 0
+ >>> bool(fpgm)
+ False
+ """
+ return hasattr(self, "program") and bool(self.program)
+
+ __nonzero__ = __bool__
if __name__ == "__main__":
- import sys
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/tables/_f_v_a_r.py b/Lib/fontTools/ttLib/tables/_f_v_a_r.py
index d7409195..062a9aa4 100644
--- a/Lib/fontTools/ttLib/tables/_f_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_f_v_a_r.py
@@ -41,6 +41,7 @@ FVAR_INSTANCE_FORMAT = """
flags: H
"""
+
class table__f_v_a_r(DefaultTable.DefaultTable):
dependencies = ["name"]
@@ -51,8 +52,9 @@ class table__f_v_a_r(DefaultTable.DefaultTable):
def compile(self, ttFont):
instanceSize = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + (len(self.axes) * 4)
- includePostScriptNames = any(instance.postscriptNameID != 0xFFFF
- for instance in self.instances)
+ includePostScriptNames = any(
+ instance.postscriptNameID != 0xFFFF for instance in self.instances
+ )
if includePostScriptNames:
instanceSize += 2
header = {
@@ -81,14 +83,14 @@ class table__f_v_a_r(DefaultTable.DefaultTable):
axisSize = header["axisSize"]
for _ in range(header["axisCount"]):
axis = Axis()
- axis.decompile(data[pos:pos+axisSize])
+ axis.decompile(data[pos : pos + axisSize])
self.axes.append(axis)
pos += axisSize
instanceSize = header["instanceSize"]
axisTags = [axis.axisTag for axis in self.axes]
for _ in range(header["instanceCount"]):
instance = NamedInstance()
- instance.decompile(data[pos:pos+instanceSize], axisTags)
+ instance.decompile(data[pos : pos + instanceSize], axisTags)
self.instances.append(instance)
pos += instanceSize
@@ -108,6 +110,7 @@ class table__f_v_a_r(DefaultTable.DefaultTable):
instance.fromXML(name, attrs, content, ttFont)
self.instances.append(instance)
+
class Axis(object):
def __init__(self):
self.axisTag = None
@@ -124,19 +127,23 @@ class Axis(object):
sstruct.unpack2(FVAR_AXIS_FORMAT, data, self)
def toXML(self, writer, ttFont):
- name = ttFont["name"].getDebugName(self.axisNameID)
+ name = (
+ ttFont["name"].getDebugName(self.axisNameID) if "name" in ttFont else None
+ )
if name is not None:
writer.newline()
writer.comment(name)
writer.newline()
writer.begintag("Axis")
writer.newline()
- for tag, value in [("AxisTag", self.axisTag),
- ("Flags", "0x%X" % self.flags),
- ("MinValue", fl2str(self.minValue, 16)),
- ("DefaultValue", fl2str(self.defaultValue, 16)),
- ("MaxValue", fl2str(self.maxValue, 16)),
- ("AxisNameID", str(self.axisNameID))]:
+ for tag, value in [
+ ("AxisTag", self.axisTag),
+ ("Flags", "0x%X" % self.flags),
+ ("MinValue", fl2str(self.minValue, 16)),
+ ("DefaultValue", fl2str(self.defaultValue, 16)),
+ ("MaxValue", fl2str(self.maxValue, 16)),
+ ("AxisNameID", str(self.axisNameID)),
+ ]:
writer.begintag(tag)
writer.write(value)
writer.endtag(tag)
@@ -145,17 +152,16 @@ class Axis(object):
writer.newline()
def fromXML(self, name, _attrs, content, ttFont):
- assert(name == "Axis")
+ assert name == "Axis"
for tag, _, value in filter(lambda t: type(t) is tuple, content):
- value = ''.join(value)
+ value = "".join(value)
if tag == "AxisTag":
self.axisTag = Tag(value)
- elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue",
- "AxisNameID"}:
+ elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue", "AxisNameID"}:
setattr(
self,
tag[0].lower() + tag[1:],
- str2fl(value, 16) if tag.endswith("Value") else safeEval(value)
+ str2fl(value, 16) if tag.endswith("Value") else safeEval(value),
)
@@ -183,37 +189,54 @@ class NamedInstance(object):
self.coordinates[axis] = fi2fl(value, 16)
pos += 4
if pos + 2 <= len(data):
- self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0]
+ self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0]
else:
- self.postscriptNameID = 0xFFFF
+ self.postscriptNameID = 0xFFFF
def toXML(self, writer, ttFont):
- name = ttFont["name"].getDebugName(self.subfamilyNameID)
+ name = (
+ ttFont["name"].getDebugName(self.subfamilyNameID)
+ if "name" in ttFont
+ else None
+ )
if name is not None:
writer.newline()
writer.comment(name)
writer.newline()
- psname = ttFont["name"].getDebugName(self.postscriptNameID)
+ psname = (
+ ttFont["name"].getDebugName(self.postscriptNameID)
+ if "name" in ttFont
+ else None
+ )
if psname is not None:
- writer.comment(u"PostScript: " + psname)
+ writer.comment("PostScript: " + psname)
writer.newline()
- if self.postscriptNameID == 0xFFFF:
- writer.begintag("NamedInstance", flags=("0x%X" % self.flags),
- subfamilyNameID=self.subfamilyNameID)
+ if self.postscriptNameID == 0xFFFF:
+ writer.begintag(
+ "NamedInstance",
+ flags=("0x%X" % self.flags),
+ subfamilyNameID=self.subfamilyNameID,
+ )
else:
- writer.begintag("NamedInstance", flags=("0x%X" % self.flags),
- subfamilyNameID=self.subfamilyNameID,
- postscriptNameID=self.postscriptNameID, )
+ writer.begintag(
+ "NamedInstance",
+ flags=("0x%X" % self.flags),
+ subfamilyNameID=self.subfamilyNameID,
+ postscriptNameID=self.postscriptNameID,
+ )
writer.newline()
for axis in ttFont["fvar"].axes:
- writer.simpletag("coord", axis=axis.axisTag,
- value=fl2str(self.coordinates[axis.axisTag], 16))
+ writer.simpletag(
+ "coord",
+ axis=axis.axisTag,
+ value=fl2str(self.coordinates[axis.axisTag], 16),
+ )
writer.newline()
writer.endtag("NamedInstance")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
- assert(name == "NamedInstance")
+ assert name == "NamedInstance"
self.subfamilyNameID = safeEval(attrs["subfamilyNameID"])
self.flags = safeEval(attrs.get("flags", "0"))
if "postscriptNameID" in attrs:
diff --git a/Lib/fontTools/ttLib/tables/_g_a_s_p.py b/Lib/fontTools/ttLib/tables/_g_a_s_p.py
index 2c80913c..10c32a87 100644
--- a/Lib/fontTools/ttLib/tables/_g_a_s_p.py
+++ b/Lib/fontTools/ttLib/tables/_g_a_s_p.py
@@ -8,42 +8,48 @@ GASP_SYMMETRIC_SMOOTHING = 0x0008
GASP_DOGRAY = 0x0002
GASP_GRIDFIT = 0x0001
-class table__g_a_s_p(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- self.version, numRanges = struct.unpack(">HH", data[:4])
- assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version
- data = data[4:]
- self.gaspRange = {}
- for i in range(numRanges):
- rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4])
- self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior)
- data = data[4:]
- assert not data, "too much data"
+class table__g_a_s_p(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ self.version, numRanges = struct.unpack(">HH", data[:4])
+ assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version
+ data = data[4:]
+ self.gaspRange = {}
+ for i in range(numRanges):
+ rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4])
+ self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior)
+ data = data[4:]
+ assert not data, "too much data"
- def compile(self, ttFont):
- version = 0 # ignore self.version
- numRanges = len(self.gaspRange)
- data = b""
- items = sorted(self.gaspRange.items())
- for rangeMaxPPEM, rangeGaspBehavior in items:
- data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior)
- if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY):
- version = 1
- data = struct.pack(">HH", version, numRanges) + data
- return data
+ def compile(self, ttFont):
+ version = 0 # ignore self.version
+ numRanges = len(self.gaspRange)
+ data = b""
+ items = sorted(self.gaspRange.items())
+ for rangeMaxPPEM, rangeGaspBehavior in items:
+ data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior)
+ if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY):
+ version = 1
+ data = struct.pack(">HH", version, numRanges) + data
+ return data
- def toXML(self, writer, ttFont):
- items = sorted(self.gaspRange.items())
- for rangeMaxPPEM, rangeGaspBehavior in items:
- writer.simpletag("gaspRange", [
- ("rangeMaxPPEM", rangeMaxPPEM),
- ("rangeGaspBehavior", rangeGaspBehavior)])
- writer.newline()
+ def toXML(self, writer, ttFont):
+ items = sorted(self.gaspRange.items())
+ for rangeMaxPPEM, rangeGaspBehavior in items:
+ writer.simpletag(
+ "gaspRange",
+ [
+ ("rangeMaxPPEM", rangeMaxPPEM),
+ ("rangeGaspBehavior", rangeGaspBehavior),
+ ],
+ )
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- if name != "gaspRange":
- return
- if not hasattr(self, "gaspRange"):
- self.gaspRange = {}
- self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(attrs["rangeGaspBehavior"])
+ def fromXML(self, name, attrs, content, ttFont):
+ if name != "gaspRange":
+ return
+ if not hasattr(self, "gaspRange"):
+ self.gaspRange = {}
+ self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(
+ attrs["rangeGaspBehavior"]
+ )
diff --git a/Lib/fontTools/ttLib/tables/_g_l_y_f.py b/Lib/fontTools/ttLib/tables/_g_l_y_f.py
index 745ef72b..bff0d92c 100644
--- a/Lib/fontTools/ttLib/tables/_g_l_y_f.py
+++ b/Lib/fontTools/ttLib/tables/_g_l_y_f.py
@@ -4,16 +4,18 @@ from collections import namedtuple
from fontTools.misc import sstruct
from fontTools import ttLib
from fontTools import version
+from fontTools.misc.transform import DecomposedTransform
from fontTools.misc.textTools import tostr, safeEval, pad
-from fontTools.misc.arrayTools import calcIntBounds, pointInRect
+from fontTools.misc.arrayTools import updateBounds, pointInRect
from fontTools.misc.bezierTools import calcQuadraticBounds
from fontTools.misc.fixedTools import (
- fixedToFloat as fi2fl,
- floatToFixed as fl2fi,
- floatToFixedToStr as fl2str,
- strToFixedToFloat as str2fl,
- otRound,
+ fixedToFloat as fi2fl,
+ floatToFixed as fl2fi,
+ floatToFixedToStr as fl2str,
+ strToFixedToFloat as str2fl,
)
+from fontTools.misc.roundTools import noRound, otRound
+from fontTools.misc.vector import Vector
from numbers import Number
from . import DefaultTable
from . import ttProgram
@@ -21,17 +23,22 @@ import sys
import struct
import array
import logging
+import math
import os
from fontTools.misc import xmlWriter
from fontTools.misc.filenames import userNameToFileName
from fontTools.misc.loggingTools import deprecateFunction
+from enum import IntFlag
+from functools import partial
+from types import SimpleNamespace
+from typing import Set
log = logging.getLogger(__name__)
# We compute the version the same as is computed in ttlib/__init__
# so that we can write 'ttLibVersion' attribute of the glyf TTX files
# when glyf is written to separate files.
-version = ".".join(version.split('.')[:2])
+version = ".".join(version.split(".")[:2])
#
# The Apple and MS rasterizers behave differently for
@@ -43,459 +50,526 @@ version = ".".join(version.split('.')[:2])
# WE_HAVE_A_SCALE (eg. Chicago) case, and not when it's WE_HAVE_AN_X_AND_Y_SCALE
# (eg. Charcoal)...
#
-SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple
+SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple
class table__g_l_y_f(DefaultTable.DefaultTable):
- """Glyph Data Table
-
- This class represents the `glyf <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf>`_
- table, which contains outlines for glyphs in TrueType format. In many cases,
- it is easier to access and manipulate glyph outlines through the ``GlyphSet``
- object returned from :py:meth:`fontTools.ttLib.ttFont.getGlyphSet`::
-
- >> from fontTools.pens.boundsPen import BoundsPen
- >> glyphset = font.getGlyphSet()
- >> bp = BoundsPen(glyphset)
- >> glyphset["A"].draw(bp)
- >> bp.bounds
- (19, 0, 633, 716)
-
- However, this class can be used for low-level access to the ``glyf`` table data.
- Objects of this class support dictionary-like access, mapping glyph names to
- :py:class:`Glyph` objects::
-
- >> glyf = font["glyf"]
- >> len(glyf["Aacute"].components)
- 2
-
- Note that when adding glyphs to the font via low-level access to the ``glyf``
- table, the new glyphs must also be added to the ``hmtx``/``vmtx`` table::
-
- >> font["glyf"]["divisionslash"] = Glyph()
- >> font["hmtx"]["divisionslash"] = (640, 0)
-
- """
-
- # this attribute controls the amount of padding applied to glyph data upon compile.
- # Glyph lenghts are aligned to multiples of the specified value.
- # Allowed values are (0, 1, 2, 4). '0' means no padding; '1' (default) also means
- # no padding, except for when padding would allow to use short loca offsets.
- padding = 1
-
- def decompile(self, data, ttFont):
- loca = ttFont['loca']
- pos = int(loca[0])
- nextPos = 0
- noname = 0
- self.glyphs = {}
- self.glyphOrder = glyphOrder = ttFont.getGlyphOrder()
- for i in range(0, len(loca)-1):
- try:
- glyphName = glyphOrder[i]
- except IndexError:
- noname = noname + 1
- glyphName = 'ttxautoglyph%s' % i
- nextPos = int(loca[i+1])
- glyphdata = data[pos:nextPos]
- if len(glyphdata) != (nextPos - pos):
- raise ttLib.TTLibError("not enough 'glyf' table data")
- glyph = Glyph(glyphdata)
- self.glyphs[glyphName] = glyph
- pos = nextPos
- if len(data) - nextPos >= 4:
- log.warning(
- "too much 'glyf' table data: expected %d, received %d bytes",
- nextPos, len(data))
- if noname:
- log.warning('%s glyphs have no name', noname)
- if ttFont.lazy is False: # Be lazy for None and True
- self.ensureDecompiled()
-
- def ensureDecompiled(self, recurse=False):
- # The recurse argument is unused, but part of the signature of
- # ensureDecompiled across the library.
- for glyph in self.glyphs.values():
- glyph.expand(self)
-
- def compile(self, ttFont):
- if not hasattr(self, "glyphOrder"):
- self.glyphOrder = ttFont.getGlyphOrder()
- padding = self.padding
- assert padding in (0, 1, 2, 4)
- locations = []
- currentLocation = 0
- dataList = []
- recalcBBoxes = ttFont.recalcBBoxes
- for glyphName in self.glyphOrder:
- glyph = self.glyphs[glyphName]
- glyphData = glyph.compile(self, recalcBBoxes)
- if padding > 1:
- glyphData = pad(glyphData, size=padding)
- locations.append(currentLocation)
- currentLocation = currentLocation + len(glyphData)
- dataList.append(glyphData)
- locations.append(currentLocation)
-
- if padding == 1 and currentLocation < 0x20000:
- # See if we can pad any odd-lengthed glyphs to allow loca
- # table to use the short offsets.
- indices = [i for i,glyphData in enumerate(dataList) if len(glyphData) % 2 == 1]
- if indices and currentLocation + len(indices) < 0x20000:
- # It fits. Do it.
- for i in indices:
- dataList[i] += b'\0'
- currentLocation = 0
- for i,glyphData in enumerate(dataList):
- locations[i] = currentLocation
- currentLocation += len(glyphData)
- locations[len(dataList)] = currentLocation
-
- data = b''.join(dataList)
- if 'loca' in ttFont:
- ttFont['loca'].set(locations)
- if 'maxp' in ttFont:
- ttFont['maxp'].numGlyphs = len(self.glyphs)
- if not data:
- # As a special case when all glyph in the font are empty, add a zero byte
- # to the table, so that OTS doesn’t reject it, and to make the table work
- # on Windows as well.
- # See https://github.com/khaledhosny/ots/issues/52
- data = b"\0"
- return data
-
- def toXML(self, writer, ttFont, splitGlyphs=False):
- notice = (
- "The xMin, yMin, xMax and yMax values\n"
- "will be recalculated by the compiler.")
- glyphNames = ttFont.getGlyphNames()
- if not splitGlyphs:
- writer.newline()
- writer.comment(notice)
- writer.newline()
- writer.newline()
- numGlyphs = len(glyphNames)
- if splitGlyphs:
- path, ext = os.path.splitext(writer.file.name)
- existingGlyphFiles = set()
- for glyphName in glyphNames:
- glyph = self.get(glyphName)
- if glyph is None:
- log.warning("glyph '%s' does not exist in glyf table", glyphName)
- continue
- if glyph.numberOfContours:
- if splitGlyphs:
- glyphPath = userNameToFileName(
- tostr(glyphName, 'utf-8'),
- existingGlyphFiles,
- prefix=path + ".",
- suffix=ext)
- existingGlyphFiles.add(glyphPath.lower())
- glyphWriter = xmlWriter.XMLWriter(
- glyphPath, idlefunc=writer.idlefunc,
- newlinestr=writer.newlinestr)
- glyphWriter.begintag("ttFont", ttLibVersion=version)
- glyphWriter.newline()
- glyphWriter.begintag("glyf")
- glyphWriter.newline()
- glyphWriter.comment(notice)
- glyphWriter.newline()
- writer.simpletag("TTGlyph", src=os.path.basename(glyphPath))
- else:
- glyphWriter = writer
- glyphWriter.begintag('TTGlyph', [
- ("name", glyphName),
- ("xMin", glyph.xMin),
- ("yMin", glyph.yMin),
- ("xMax", glyph.xMax),
- ("yMax", glyph.yMax),
- ])
- glyphWriter.newline()
- glyph.toXML(glyphWriter, ttFont)
- glyphWriter.endtag('TTGlyph')
- glyphWriter.newline()
- if splitGlyphs:
- glyphWriter.endtag("glyf")
- glyphWriter.newline()
- glyphWriter.endtag("ttFont")
- glyphWriter.newline()
- glyphWriter.close()
- else:
- writer.simpletag('TTGlyph', name=glyphName)
- writer.comment("contains no outline data")
- if not splitGlyphs:
- writer.newline()
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name != "TTGlyph":
- return
- if not hasattr(self, "glyphs"):
- self.glyphs = {}
- if not hasattr(self, "glyphOrder"):
- self.glyphOrder = ttFont.getGlyphOrder()
- glyphName = attrs["name"]
- log.debug("unpacking glyph '%s'", glyphName)
- glyph = Glyph()
- for attr in ['xMin', 'yMin', 'xMax', 'yMax']:
- setattr(glyph, attr, safeEval(attrs.get(attr, '0')))
- self.glyphs[glyphName] = glyph
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- glyph.fromXML(name, attrs, content, ttFont)
- if not ttFont.recalcBBoxes:
- glyph.compact(self, 0)
-
- def setGlyphOrder(self, glyphOrder):
- """Sets the glyph order
-
- Args:
- glyphOrder ([str]): List of glyph names in order.
- """
- self.glyphOrder = glyphOrder
-
- def getGlyphName(self, glyphID):
- """Returns the name for the glyph with the given ID.
-
- Raises a ``KeyError`` if the glyph name is not found in the font.
- """
- return self.glyphOrder[glyphID]
-
- def getGlyphID(self, glyphName):
- """Returns the ID of the glyph with the given name.
-
- Raises a ``ValueError`` if the glyph is not found in the font.
- """
- # XXX optimize with reverse dict!!!
- return self.glyphOrder.index(glyphName)
-
- def removeHinting(self):
- """Removes TrueType hints from all glyphs in the glyphset.
-
- See :py:meth:`Glyph.removeHinting`.
- """
- for glyph in self.glyphs.values():
- glyph.removeHinting()
-
- def keys(self):
- return self.glyphs.keys()
-
- def has_key(self, glyphName):
- return glyphName in self.glyphs
-
- __contains__ = has_key
-
- def get(self, glyphName, default=None):
- glyph = self.glyphs.get(glyphName, default)
- if glyph is not None:
- glyph.expand(self)
- return glyph
-
- def __getitem__(self, glyphName):
- glyph = self.glyphs[glyphName]
- glyph.expand(self)
- return glyph
-
- def __setitem__(self, glyphName, glyph):
- self.glyphs[glyphName] = glyph
- if glyphName not in self.glyphOrder:
- self.glyphOrder.append(glyphName)
-
- def __delitem__(self, glyphName):
- del self.glyphs[glyphName]
- self.glyphOrder.remove(glyphName)
-
- def __len__(self):
- assert len(self.glyphOrder) == len(self.glyphs)
- return len(self.glyphs)
-
- def _getPhantomPoints(self, glyphName, hMetrics, vMetrics=None):
- """Compute the four "phantom points" for the given glyph from its bounding box
- and the horizontal and vertical advance widths and sidebearings stored in the
- ttFont's "hmtx" and "vmtx" tables.
-
- 'hMetrics' should be ttFont['hmtx'].metrics.
-
- 'vMetrics' should be ttFont['vmtx'].metrics if there is "vmtx" or None otherwise.
- If there is no vMetrics passed in, vertical phantom points are set to the zero coordinate.
-
- https://docs.microsoft.com/en-us/typography/opentype/spec/tt_instructing_glyphs#phantoms
- """
- glyph = self[glyphName]
- if not hasattr(glyph, 'xMin'):
- glyph.recalcBounds(self)
-
- horizontalAdvanceWidth, leftSideBearing = hMetrics[glyphName]
- leftSideX = glyph.xMin - leftSideBearing
- rightSideX = leftSideX + horizontalAdvanceWidth
-
- if vMetrics:
- verticalAdvanceWidth, topSideBearing = vMetrics[glyphName]
- topSideY = topSideBearing + glyph.yMax
- bottomSideY = topSideY - verticalAdvanceWidth
- else:
- bottomSideY = topSideY = 0
-
- return [
- (leftSideX, 0),
- (rightSideX, 0),
- (0, topSideY),
- (0, bottomSideY),
- ]
-
- def _getCoordinatesAndControls(self, glyphName, hMetrics, vMetrics=None):
- """Return glyph coordinates and controls as expected by "gvar" table.
-
- The coordinates includes four "phantom points" for the glyph metrics,
- as mandated by the "gvar" spec.
-
- The glyph controls is a namedtuple with the following attributes:
- - numberOfContours: -1 for composite glyphs.
- - endPts: list of indices of end points for each contour in simple
- glyphs, or component indices in composite glyphs (used for IUP
- optimization).
- - flags: array of contour point flags for simple glyphs (None for
- composite glyphs).
- - components: list of base glyph names (str) for each component in
- composite glyphs (None for simple glyphs).
-
- The "hMetrics" and vMetrics are used to compute the "phantom points" (see
- the "_getPhantomPoints" method).
-
- Return None if the requested glyphName is not present.
- """
- glyph = self.get(glyphName)
- if glyph is None:
- return None
- if glyph.isComposite():
- coords = GlyphCoordinates(
- [(getattr(c, 'x', 0), getattr(c, 'y', 0)) for c in glyph.components]
- )
- controls = _GlyphControls(
- numberOfContours=glyph.numberOfContours,
- endPts=list(range(len(glyph.components))),
- flags=None,
- components=[c.glyphName for c in glyph.components],
- )
- else:
- coords, endPts, flags = glyph.getCoordinates(self)
- coords = coords.copy()
- controls = _GlyphControls(
- numberOfContours=glyph.numberOfContours,
- endPts=endPts,
- flags=flags,
- components=None,
- )
- # Add phantom points for (left, right, top, bottom) positions.
- phantomPoints = self._getPhantomPoints(glyphName, hMetrics, vMetrics)
- coords.extend(phantomPoints)
- return coords, controls
-
- def _setCoordinates(self, glyphName, coord, hMetrics, vMetrics=None):
- """Set coordinates and metrics for the given glyph.
-
- "coord" is an array of GlyphCoordinates which must include the "phantom
- points" as the last four coordinates.
-
- Both the horizontal/vertical advances and left/top sidebearings in "hmtx"
- and "vmtx" tables (if any) are updated from four phantom points and
- the glyph's bounding boxes.
-
- The "hMetrics" and vMetrics are used to propagate "phantom points"
- into "hmtx" and "vmtx" tables if desired. (see the "_getPhantomPoints"
- method).
- """
- glyph = self[glyphName]
-
- # Handle phantom points for (left, right, top, bottom) positions.
- assert len(coord) >= 4
- leftSideX = coord[-4][0]
- rightSideX = coord[-3][0]
- topSideY = coord[-2][1]
- bottomSideY = coord[-1][1]
-
- coord = coord[:-4]
-
- if glyph.isComposite():
- assert len(coord) == len(glyph.components)
- for p, comp in zip(coord, glyph.components):
- if hasattr(comp, 'x'):
- comp.x, comp.y = p
- elif glyph.numberOfContours == 0:
- assert len(coord) == 0
- else:
- assert len(coord) == len(glyph.coordinates)
- glyph.coordinates = GlyphCoordinates(coord)
-
- glyph.recalcBounds(self)
-
- horizontalAdvanceWidth = otRound(rightSideX - leftSideX)
- if horizontalAdvanceWidth < 0:
- # unlikely, but it can happen, see:
- # https://github.com/fonttools/fonttools/pull/1198
- horizontalAdvanceWidth = 0
- leftSideBearing = otRound(glyph.xMin - leftSideX)
- hMetrics[glyphName] = horizontalAdvanceWidth, leftSideBearing
-
- if vMetrics is not None:
- verticalAdvanceWidth = otRound(topSideY - bottomSideY)
- if verticalAdvanceWidth < 0: # unlikely but do the same as horizontal
- verticalAdvanceWidth = 0
- topSideBearing = otRound(topSideY - glyph.yMax)
- vMetrics[glyphName] = verticalAdvanceWidth, topSideBearing
-
-
- # Deprecated
-
- def _synthesizeVMetrics(self, glyphName, ttFont, defaultVerticalOrigin):
- """This method is wrong and deprecated.
- For rationale see:
- https://github.com/fonttools/fonttools/pull/2266/files#r613569473
- """
- vMetrics = getattr(ttFont.get('vmtx'), 'metrics', None)
- if vMetrics is None:
- verticalAdvanceWidth = ttFont["head"].unitsPerEm
- topSideY = getattr(ttFont.get('hhea'), 'ascent', None)
- if topSideY is None:
- if defaultVerticalOrigin is not None:
- topSideY = defaultVerticalOrigin
- else:
- topSideY = verticalAdvanceWidth
- glyph = self[glyphName]
- glyph.recalcBounds(self)
- topSideBearing = otRound(topSideY - glyph.yMax)
- vMetrics = {glyphName: (verticalAdvanceWidth, topSideBearing)}
- return vMetrics
-
- @deprecateFunction("use '_getPhantomPoints' instead", category=DeprecationWarning)
- def getPhantomPoints(self, glyphName, ttFont, defaultVerticalOrigin=None):
- """Old public name for self._getPhantomPoints().
- See: https://github.com/fonttools/fonttools/pull/2266"""
- hMetrics = ttFont['hmtx'].metrics
- vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin)
- return self._getPhantomPoints(glyphName, hMetrics, vMetrics)
-
- @deprecateFunction("use '_getCoordinatesAndControls' instead", category=DeprecationWarning)
- def getCoordinatesAndControls(self, glyphName, ttFont, defaultVerticalOrigin=None):
- """Old public name for self._getCoordinatesAndControls().
- See: https://github.com/fonttools/fonttools/pull/2266"""
- hMetrics = ttFont['hmtx'].metrics
- vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin)
- return self._getCoordinatesAndControls(glyphName, hMetrics, vMetrics)
-
- @deprecateFunction("use '_setCoordinates' instead", category=DeprecationWarning)
- def setCoordinates(self, glyphName, ttFont):
- """Old public name for self._setCoordinates().
- See: https://github.com/fonttools/fonttools/pull/2266"""
- hMetrics = ttFont['hmtx'].metrics
- vMetrics = getattr(ttFont.get('vmtx'), 'metrics', None)
- self._setCoordinates(glyphName, hMetrics, vMetrics)
+ """Glyph Data Table
+
+ This class represents the `glyf <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf>`_
+ table, which contains outlines for glyphs in TrueType format. In many cases,
+ it is easier to access and manipulate glyph outlines through the ``GlyphSet``
+ object returned from :py:meth:`fontTools.ttLib.ttFont.getGlyphSet`::
+
+ >> from fontTools.pens.boundsPen import BoundsPen
+ >> glyphset = font.getGlyphSet()
+ >> bp = BoundsPen(glyphset)
+ >> glyphset["A"].draw(bp)
+ >> bp.bounds
+ (19, 0, 633, 716)
+
+ However, this class can be used for low-level access to the ``glyf`` table data.
+ Objects of this class support dictionary-like access, mapping glyph names to
+ :py:class:`Glyph` objects::
+
+ >> glyf = font["glyf"]
+ >> len(glyf["Aacute"].components)
+ 2
+
+ Note that when adding glyphs to the font via low-level access to the ``glyf``
+ table, the new glyphs must also be added to the ``hmtx``/``vmtx`` table::
+
+ >> font["glyf"]["divisionslash"] = Glyph()
+ >> font["hmtx"]["divisionslash"] = (640, 0)
+
+ """
+
+ dependencies = ["fvar"]
+
+ # this attribute controls the amount of padding applied to glyph data upon compile.
+ # Glyph lenghts are aligned to multiples of the specified value.
+ # Allowed values are (0, 1, 2, 4). '0' means no padding; '1' (default) also means
+ # no padding, except for when padding would allow to use short loca offsets.
+ padding = 1
+
+ def decompile(self, data, ttFont):
+ self.axisTags = (
+ [axis.axisTag for axis in ttFont["fvar"].axes] if "fvar" in ttFont else []
+ )
+ loca = ttFont["loca"]
+ pos = int(loca[0])
+ nextPos = 0
+ noname = 0
+ self.glyphs = {}
+ self.glyphOrder = glyphOrder = ttFont.getGlyphOrder()
+ self._reverseGlyphOrder = {}
+ for i in range(0, len(loca) - 1):
+ try:
+ glyphName = glyphOrder[i]
+ except IndexError:
+ noname = noname + 1
+ glyphName = "ttxautoglyph%s" % i
+ nextPos = int(loca[i + 1])
+ glyphdata = data[pos:nextPos]
+ if len(glyphdata) != (nextPos - pos):
+ raise ttLib.TTLibError("not enough 'glyf' table data")
+ glyph = Glyph(glyphdata)
+ self.glyphs[glyphName] = glyph
+ pos = nextPos
+ if len(data) - nextPos >= 4:
+ log.warning(
+ "too much 'glyf' table data: expected %d, received %d bytes",
+ nextPos,
+ len(data),
+ )
+ if noname:
+ log.warning("%s glyphs have no name", noname)
+ if ttFont.lazy is False: # Be lazy for None and True
+ self.ensureDecompiled()
+
+ def ensureDecompiled(self, recurse=False):
+ # The recurse argument is unused, but part of the signature of
+ # ensureDecompiled across the library.
+ for glyph in self.glyphs.values():
+ glyph.expand(self)
+
+ def compile(self, ttFont):
+ self.axisTags = (
+ [axis.axisTag for axis in ttFont["fvar"].axes] if "fvar" in ttFont else []
+ )
+ if not hasattr(self, "glyphOrder"):
+ self.glyphOrder = ttFont.getGlyphOrder()
+ padding = self.padding
+ assert padding in (0, 1, 2, 4)
+ locations = []
+ currentLocation = 0
+ dataList = []
+ recalcBBoxes = ttFont.recalcBBoxes
+ boundsDone = set()
+ for glyphName in self.glyphOrder:
+ glyph = self.glyphs[glyphName]
+ glyphData = glyph.compile(self, recalcBBoxes, boundsDone=boundsDone)
+ if padding > 1:
+ glyphData = pad(glyphData, size=padding)
+ locations.append(currentLocation)
+ currentLocation = currentLocation + len(glyphData)
+ dataList.append(glyphData)
+ locations.append(currentLocation)
+
+ if padding == 1 and currentLocation < 0x20000:
+ # See if we can pad any odd-lengthed glyphs to allow loca
+ # table to use the short offsets.
+ indices = [
+ i for i, glyphData in enumerate(dataList) if len(glyphData) % 2 == 1
+ ]
+ if indices and currentLocation + len(indices) < 0x20000:
+ # It fits. Do it.
+ for i in indices:
+ dataList[i] += b"\0"
+ currentLocation = 0
+ for i, glyphData in enumerate(dataList):
+ locations[i] = currentLocation
+ currentLocation += len(glyphData)
+ locations[len(dataList)] = currentLocation
+
+ data = b"".join(dataList)
+ if "loca" in ttFont:
+ ttFont["loca"].set(locations)
+ if "maxp" in ttFont:
+ ttFont["maxp"].numGlyphs = len(self.glyphs)
+ if not data:
+ # As a special case when all glyph in the font are empty, add a zero byte
+ # to the table, so that OTS doesn’t reject it, and to make the table work
+ # on Windows as well.
+ # See https://github.com/khaledhosny/ots/issues/52
+ data = b"\0"
+ return data
+
+ def toXML(self, writer, ttFont, splitGlyphs=False):
+ notice = (
+ "The xMin, yMin, xMax and yMax values\n"
+ "will be recalculated by the compiler."
+ )
+ glyphNames = ttFont.getGlyphNames()
+ if not splitGlyphs:
+ writer.newline()
+ writer.comment(notice)
+ writer.newline()
+ writer.newline()
+ numGlyphs = len(glyphNames)
+ if splitGlyphs:
+ path, ext = os.path.splitext(writer.file.name)
+ existingGlyphFiles = set()
+ for glyphName in glyphNames:
+ glyph = self.get(glyphName)
+ if glyph is None:
+ log.warning("glyph '%s' does not exist in glyf table", glyphName)
+ continue
+ if glyph.numberOfContours:
+ if splitGlyphs:
+ glyphPath = userNameToFileName(
+ tostr(glyphName, "utf-8"),
+ existingGlyphFiles,
+ prefix=path + ".",
+ suffix=ext,
+ )
+ existingGlyphFiles.add(glyphPath.lower())
+ glyphWriter = xmlWriter.XMLWriter(
+ glyphPath,
+ idlefunc=writer.idlefunc,
+ newlinestr=writer.newlinestr,
+ )
+ glyphWriter.begintag("ttFont", ttLibVersion=version)
+ glyphWriter.newline()
+ glyphWriter.begintag("glyf")
+ glyphWriter.newline()
+ glyphWriter.comment(notice)
+ glyphWriter.newline()
+ writer.simpletag("TTGlyph", src=os.path.basename(glyphPath))
+ else:
+ glyphWriter = writer
+ glyphWriter.begintag(
+ "TTGlyph",
+ [
+ ("name", glyphName),
+ ("xMin", glyph.xMin),
+ ("yMin", glyph.yMin),
+ ("xMax", glyph.xMax),
+ ("yMax", glyph.yMax),
+ ],
+ )
+ glyphWriter.newline()
+ glyph.toXML(glyphWriter, ttFont)
+ glyphWriter.endtag("TTGlyph")
+ glyphWriter.newline()
+ if splitGlyphs:
+ glyphWriter.endtag("glyf")
+ glyphWriter.newline()
+ glyphWriter.endtag("ttFont")
+ glyphWriter.newline()
+ glyphWriter.close()
+ else:
+ writer.simpletag("TTGlyph", name=glyphName)
+ writer.comment("contains no outline data")
+ if not splitGlyphs:
+ writer.newline()
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name != "TTGlyph":
+ return
+ if not hasattr(self, "glyphs"):
+ self.glyphs = {}
+ if not hasattr(self, "glyphOrder"):
+ self.glyphOrder = ttFont.getGlyphOrder()
+ glyphName = attrs["name"]
+ log.debug("unpacking glyph '%s'", glyphName)
+ glyph = Glyph()
+ for attr in ["xMin", "yMin", "xMax", "yMax"]:
+ setattr(glyph, attr, safeEval(attrs.get(attr, "0")))
+ self.glyphs[glyphName] = glyph
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ glyph.fromXML(name, attrs, content, ttFont)
+ if not ttFont.recalcBBoxes:
+ glyph.compact(self, 0)
+
+ def setGlyphOrder(self, glyphOrder):
+ """Sets the glyph order
+
+ Args:
+ glyphOrder ([str]): List of glyph names in order.
+ """
+ self.glyphOrder = glyphOrder
+ self._reverseGlyphOrder = {}
+
+ def getGlyphName(self, glyphID):
+ """Returns the name for the glyph with the given ID.
+
+ Raises a ``KeyError`` if the glyph name is not found in the font.
+ """
+ return self.glyphOrder[glyphID]
+
+ def _buildReverseGlyphOrderDict(self):
+ self._reverseGlyphOrder = d = {}
+ for glyphID, glyphName in enumerate(self.glyphOrder):
+ d[glyphName] = glyphID
+
+ def getGlyphID(self, glyphName):
+ """Returns the ID of the glyph with the given name.
+
+ Raises a ``ValueError`` if the glyph is not found in the font.
+ """
+ glyphOrder = self.glyphOrder
+ id = getattr(self, "_reverseGlyphOrder", {}).get(glyphName)
+ if id is None or id >= len(glyphOrder) or glyphOrder[id] != glyphName:
+ self._buildReverseGlyphOrderDict()
+ id = self._reverseGlyphOrder.get(glyphName)
+ if id is None:
+ raise ValueError(glyphName)
+ return id
+
+ def removeHinting(self):
+ """Removes TrueType hints from all glyphs in the glyphset.
+
+ See :py:meth:`Glyph.removeHinting`.
+ """
+ for glyph in self.glyphs.values():
+ glyph.removeHinting()
+
+ def keys(self):
+ return self.glyphs.keys()
+
+ def has_key(self, glyphName):
+ return glyphName in self.glyphs
+
+ __contains__ = has_key
+
+ def get(self, glyphName, default=None):
+ glyph = self.glyphs.get(glyphName, default)
+ if glyph is not None:
+ glyph.expand(self)
+ return glyph
+
+ def __getitem__(self, glyphName):
+ glyph = self.glyphs[glyphName]
+ glyph.expand(self)
+ return glyph
+
+ def __setitem__(self, glyphName, glyph):
+ self.glyphs[glyphName] = glyph
+ if glyphName not in self.glyphOrder:
+ self.glyphOrder.append(glyphName)
+
+ def __delitem__(self, glyphName):
+ del self.glyphs[glyphName]
+ self.glyphOrder.remove(glyphName)
+
+ def __len__(self):
+ assert len(self.glyphOrder) == len(self.glyphs)
+ return len(self.glyphs)
+
+ def _getPhantomPoints(self, glyphName, hMetrics, vMetrics=None):
+ """Compute the four "phantom points" for the given glyph from its bounding box
+ and the horizontal and vertical advance widths and sidebearings stored in the
+ ttFont's "hmtx" and "vmtx" tables.
+
+ 'hMetrics' should be ttFont['hmtx'].metrics.
+
+ 'vMetrics' should be ttFont['vmtx'].metrics if there is "vmtx" or None otherwise.
+ If there is no vMetrics passed in, vertical phantom points are set to the zero coordinate.
+
+ https://docs.microsoft.com/en-us/typography/opentype/spec/tt_instructing_glyphs#phantoms
+ """
+ glyph = self[glyphName]
+ if not hasattr(glyph, "xMin"):
+ glyph.recalcBounds(self)
+
+ horizontalAdvanceWidth, leftSideBearing = hMetrics[glyphName]
+ leftSideX = glyph.xMin - leftSideBearing
+ rightSideX = leftSideX + horizontalAdvanceWidth
+
+ if vMetrics:
+ verticalAdvanceWidth, topSideBearing = vMetrics[glyphName]
+ topSideY = topSideBearing + glyph.yMax
+ bottomSideY = topSideY - verticalAdvanceWidth
+ else:
+ bottomSideY = topSideY = 0
+
+ return [
+ (leftSideX, 0),
+ (rightSideX, 0),
+ (0, topSideY),
+ (0, bottomSideY),
+ ]
+
+ def _getCoordinatesAndControls(
+ self, glyphName, hMetrics, vMetrics=None, *, round=otRound
+ ):
+ """Return glyph coordinates and controls as expected by "gvar" table.
+
+ The coordinates includes four "phantom points" for the glyph metrics,
+ as mandated by the "gvar" spec.
+
+ The glyph controls is a namedtuple with the following attributes:
+ - numberOfContours: -1 for composite glyphs.
+ - endPts: list of indices of end points for each contour in simple
+ glyphs, or component indices in composite glyphs (used for IUP
+ optimization).
+ - flags: array of contour point flags for simple glyphs (None for
+ composite glyphs).
+ - components: list of base glyph names (str) for each component in
+ composite glyphs (None for simple glyphs).
+
+ The "hMetrics" and vMetrics are used to compute the "phantom points" (see
+ the "_getPhantomPoints" method).
+
+ Return None if the requested glyphName is not present.
+ """
+ glyph = self.get(glyphName)
+ if glyph is None:
+ return None
+ if glyph.isComposite():
+ coords = GlyphCoordinates(
+ [(getattr(c, "x", 0), getattr(c, "y", 0)) for c in glyph.components]
+ )
+ controls = _GlyphControls(
+ numberOfContours=glyph.numberOfContours,
+ endPts=list(range(len(glyph.components))),
+ flags=None,
+ components=[
+ (c.glyphName, getattr(c, "transform", None))
+ for c in glyph.components
+ ],
+ )
+ elif glyph.isVarComposite():
+ coords = []
+ controls = []
+
+ for component in glyph.components:
+ (
+ componentCoords,
+ componentControls,
+ ) = component.getCoordinatesAndControls()
+ coords.extend(componentCoords)
+ controls.extend(componentControls)
+
+ coords = GlyphCoordinates(coords)
+
+ controls = _GlyphControls(
+ numberOfContours=glyph.numberOfContours,
+ endPts=list(range(len(coords))),
+ flags=None,
+ components=[
+ (c.glyphName, getattr(c, "flags", None)) for c in glyph.components
+ ],
+ )
+
+ else:
+ coords, endPts, flags = glyph.getCoordinates(self)
+ coords = coords.copy()
+ controls = _GlyphControls(
+ numberOfContours=glyph.numberOfContours,
+ endPts=endPts,
+ flags=flags,
+ components=None,
+ )
+ # Add phantom points for (left, right, top, bottom) positions.
+ phantomPoints = self._getPhantomPoints(glyphName, hMetrics, vMetrics)
+ coords.extend(phantomPoints)
+ coords.toInt(round=round)
+ return coords, controls
+
+ def _setCoordinates(self, glyphName, coord, hMetrics, vMetrics=None):
+ """Set coordinates and metrics for the given glyph.
+
+ "coord" is an array of GlyphCoordinates which must include the "phantom
+ points" as the last four coordinates.
+
+ Both the horizontal/vertical advances and left/top sidebearings in "hmtx"
+ and "vmtx" tables (if any) are updated from four phantom points and
+ the glyph's bounding boxes.
+
+ The "hMetrics" and vMetrics are used to propagate "phantom points"
+ into "hmtx" and "vmtx" tables if desired. (see the "_getPhantomPoints"
+ method).
+ """
+ glyph = self[glyphName]
+
+ # Handle phantom points for (left, right, top, bottom) positions.
+ assert len(coord) >= 4
+ leftSideX = coord[-4][0]
+ rightSideX = coord[-3][0]
+ topSideY = coord[-2][1]
+ bottomSideY = coord[-1][1]
+
+ coord = coord[:-4]
+
+ if glyph.isComposite():
+ assert len(coord) == len(glyph.components)
+ for p, comp in zip(coord, glyph.components):
+ if hasattr(comp, "x"):
+ comp.x, comp.y = p
+ elif glyph.isVarComposite():
+ for comp in glyph.components:
+ coord = comp.setCoordinates(coord)
+ assert not coord
+ elif glyph.numberOfContours == 0:
+ assert len(coord) == 0
+ else:
+ assert len(coord) == len(glyph.coordinates)
+ glyph.coordinates = GlyphCoordinates(coord)
+
+ glyph.recalcBounds(self, boundsDone=set())
+
+ horizontalAdvanceWidth = otRound(rightSideX - leftSideX)
+ if horizontalAdvanceWidth < 0:
+ # unlikely, but it can happen, see:
+ # https://github.com/fonttools/fonttools/pull/1198
+ horizontalAdvanceWidth = 0
+ leftSideBearing = otRound(glyph.xMin - leftSideX)
+ hMetrics[glyphName] = horizontalAdvanceWidth, leftSideBearing
+
+ if vMetrics is not None:
+ verticalAdvanceWidth = otRound(topSideY - bottomSideY)
+ if verticalAdvanceWidth < 0: # unlikely but do the same as horizontal
+ verticalAdvanceWidth = 0
+ topSideBearing = otRound(topSideY - glyph.yMax)
+ vMetrics[glyphName] = verticalAdvanceWidth, topSideBearing
+
+ # Deprecated
+
+ def _synthesizeVMetrics(self, glyphName, ttFont, defaultVerticalOrigin):
+ """This method is wrong and deprecated.
+ For rationale see:
+ https://github.com/fonttools/fonttools/pull/2266/files#r613569473
+ """
+ vMetrics = getattr(ttFont.get("vmtx"), "metrics", None)
+ if vMetrics is None:
+ verticalAdvanceWidth = ttFont["head"].unitsPerEm
+ topSideY = getattr(ttFont.get("hhea"), "ascent", None)
+ if topSideY is None:
+ if defaultVerticalOrigin is not None:
+ topSideY = defaultVerticalOrigin
+ else:
+ topSideY = verticalAdvanceWidth
+ glyph = self[glyphName]
+ glyph.recalcBounds(self)
+ topSideBearing = otRound(topSideY - glyph.yMax)
+ vMetrics = {glyphName: (verticalAdvanceWidth, topSideBearing)}
+ return vMetrics
+
+ @deprecateFunction("use '_getPhantomPoints' instead", category=DeprecationWarning)
+ def getPhantomPoints(self, glyphName, ttFont, defaultVerticalOrigin=None):
+ """Old public name for self._getPhantomPoints().
+ See: https://github.com/fonttools/fonttools/pull/2266"""
+ hMetrics = ttFont["hmtx"].metrics
+ vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin)
+ return self._getPhantomPoints(glyphName, hMetrics, vMetrics)
+
+ @deprecateFunction(
+ "use '_getCoordinatesAndControls' instead", category=DeprecationWarning
+ )
+ def getCoordinatesAndControls(self, glyphName, ttFont, defaultVerticalOrigin=None):
+ """Old public name for self._getCoordinatesAndControls().
+ See: https://github.com/fonttools/fonttools/pull/2266"""
+ hMetrics = ttFont["hmtx"].metrics
+ vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin)
+ return self._getCoordinatesAndControls(glyphName, hMetrics, vMetrics)
+
+ @deprecateFunction("use '_setCoordinates' instead", category=DeprecationWarning)
+ def setCoordinates(self, glyphName, ttFont):
+ """Old public name for self._setCoordinates().
+ See: https://github.com/fonttools/fonttools/pull/2266"""
+ hMetrics = ttFont["hmtx"].metrics
+ vMetrics = getattr(ttFont.get("vmtx"), "metrics", None)
+ self._setCoordinates(glyphName, hMetrics, vMetrics)
_GlyphControls = namedtuple(
- "_GlyphControls", "numberOfContours endPts flags components"
+ "_GlyphControls", "numberOfContours endPts flags components"
)
@@ -513,1338 +587,2094 @@ flagOnCurve = 0x01
flagXShort = 0x02
flagYShort = 0x04
flagRepeat = 0x08
-flagXsame = 0x10
+flagXsame = 0x10
flagYsame = 0x20
flagOverlapSimple = 0x40
-flagReserved = 0x80
+flagCubic = 0x80
# These flags are kept for XML output after decompiling the coordinates
-keepFlags = flagOnCurve + flagOverlapSimple
+keepFlags = flagOnCurve + flagOverlapSimple + flagCubic
_flagSignBytes = {
- 0: 2,
- flagXsame: 0,
- flagXShort|flagXsame: +1,
- flagXShort: -1,
- flagYsame: 0,
- flagYShort|flagYsame: +1,
- flagYShort: -1,
+ 0: 2,
+ flagXsame: 0,
+ flagXShort | flagXsame: +1,
+ flagXShort: -1,
+ flagYsame: 0,
+ flagYShort | flagYsame: +1,
+ flagYShort: -1,
}
+
def flagBest(x, y, onCurve):
- """For a given x,y delta pair, returns the flag that packs this pair
- most efficiently, as well as the number of byte cost of such flag."""
-
- flag = flagOnCurve if onCurve else 0
- cost = 0
- # do x
- if x == 0:
- flag = flag | flagXsame
- elif -255 <= x <= 255:
- flag = flag | flagXShort
- if x > 0:
- flag = flag | flagXsame
- cost += 1
- else:
- cost += 2
- # do y
- if y == 0:
- flag = flag | flagYsame
- elif -255 <= y <= 255:
- flag = flag | flagYShort
- if y > 0:
- flag = flag | flagYsame
- cost += 1
- else:
- cost += 2
- return flag, cost
+ """For a given x,y delta pair, returns the flag that packs this pair
+ most efficiently, as well as the number of byte cost of such flag."""
+
+ flag = flagOnCurve if onCurve else 0
+ cost = 0
+ # do x
+ if x == 0:
+ flag = flag | flagXsame
+ elif -255 <= x <= 255:
+ flag = flag | flagXShort
+ if x > 0:
+ flag = flag | flagXsame
+ cost += 1
+ else:
+ cost += 2
+ # do y
+ if y == 0:
+ flag = flag | flagYsame
+ elif -255 <= y <= 255:
+ flag = flag | flagYShort
+ if y > 0:
+ flag = flag | flagYsame
+ cost += 1
+ else:
+ cost += 2
+ return flag, cost
+
def flagFits(newFlag, oldFlag, mask):
- newBytes = _flagSignBytes[newFlag & mask]
- oldBytes = _flagSignBytes[oldFlag & mask]
- return newBytes == oldBytes or abs(newBytes) > abs(oldBytes)
+ newBytes = _flagSignBytes[newFlag & mask]
+ oldBytes = _flagSignBytes[oldFlag & mask]
+ return newBytes == oldBytes or abs(newBytes) > abs(oldBytes)
+
def flagSupports(newFlag, oldFlag):
- return ((oldFlag & flagOnCurve) == (newFlag & flagOnCurve) and
- flagFits(newFlag, oldFlag, flagXsame|flagXShort) and
- flagFits(newFlag, oldFlag, flagYsame|flagYShort))
+ return (
+ (oldFlag & flagOnCurve) == (newFlag & flagOnCurve)
+ and flagFits(newFlag, oldFlag, flagXsame | flagXShort)
+ and flagFits(newFlag, oldFlag, flagYsame | flagYShort)
+ )
+
def flagEncodeCoord(flag, mask, coord, coordBytes):
- byteCount = _flagSignBytes[flag & mask]
- if byteCount == 1:
- coordBytes.append(coord)
- elif byteCount == -1:
- coordBytes.append(-coord)
- elif byteCount == 2:
- coordBytes.extend(struct.pack('>h', coord))
+ byteCount = _flagSignBytes[flag & mask]
+ if byteCount == 1:
+ coordBytes.append(coord)
+ elif byteCount == -1:
+ coordBytes.append(-coord)
+ elif byteCount == 2:
+ coordBytes.extend(struct.pack(">h", coord))
+
def flagEncodeCoords(flag, x, y, xBytes, yBytes):
- flagEncodeCoord(flag, flagXsame|flagXShort, x, xBytes)
- flagEncodeCoord(flag, flagYsame|flagYShort, y, yBytes)
+ flagEncodeCoord(flag, flagXsame | flagXShort, x, xBytes)
+ flagEncodeCoord(flag, flagYsame | flagYShort, y, yBytes)
+
+
+ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes
+ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points
+ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true
+WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0
+NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!)
+MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one
+WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy
+WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11
+WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow
+USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph
+OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts
+SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple)
+UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS)
+
+
+CompositeMaxpValues = namedtuple(
+ "CompositeMaxpValues", ["nPoints", "nContours", "maxComponentDepth"]
+)
-ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes
-ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points
-ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true
-WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0
-NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!)
-MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one
-WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy
-WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11
-WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow
-USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph
-OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts
-SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple)
-UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS)
+class Glyph(object):
+ """This class represents an individual TrueType glyph.
+
+ TrueType glyph objects come in two flavours: simple and composite. Simple
+ glyph objects contain contours, represented via the ``.coordinates``,
+ ``.flags``, ``.numberOfContours``, and ``.endPtsOfContours`` attributes;
+ composite glyphs contain components, available through the ``.components``
+ attributes.
+
+ Because the ``.coordinates`` attribute (and other simple glyph attributes mentioned
+ above) is only set on simple glyphs and the ``.components`` attribute is only
+ set on composite glyphs, it is necessary to use the :py:meth:`isComposite`
+ method to test whether a glyph is simple or composite before attempting to
+ access its data.
+
+ For a composite glyph, the components can also be accessed via array-like access::
+
+ >> assert(font["glyf"]["Aacute"].isComposite())
+ >> font["glyf"]["Aacute"][0]
+ <fontTools.ttLib.tables._g_l_y_f.GlyphComponent at 0x1027b2ee0>
+
+ """
+
+ def __init__(self, data=b""):
+ if not data:
+ # empty char
+ self.numberOfContours = 0
+ return
+ self.data = data
+
+ def compact(self, glyfTable, recalcBBoxes=True):
+ data = self.compile(glyfTable, recalcBBoxes)
+ self.__dict__.clear()
+ self.data = data
+
+ def expand(self, glyfTable):
+ if not hasattr(self, "data"):
+ # already unpacked
+ return
+ if not self.data:
+ # empty char
+ del self.data
+ self.numberOfContours = 0
+ return
+ dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self)
+ del self.data
+ # Some fonts (eg. Neirizi.ttf) have a 0 for numberOfContours in
+ # some glyphs; decompileCoordinates assumes that there's at least
+ # one, so short-circuit here.
+ if self.numberOfContours == 0:
+ return
+ if self.isComposite():
+ self.decompileComponents(data, glyfTable)
+ elif self.isVarComposite():
+ self.decompileVarComponents(data, glyfTable)
+ else:
+ self.decompileCoordinates(data)
+
+ def compile(self, glyfTable, recalcBBoxes=True, *, boundsDone=None):
+ if hasattr(self, "data"):
+ if recalcBBoxes:
+ # must unpack glyph in order to recalculate bounding box
+ self.expand(glyfTable)
+ else:
+ return self.data
+ if self.numberOfContours == 0:
+ return b""
+
+ if recalcBBoxes:
+ self.recalcBounds(glyfTable, boundsDone=boundsDone)
+
+ data = sstruct.pack(glyphHeaderFormat, self)
+ if self.isComposite():
+ data = data + self.compileComponents(glyfTable)
+ elif self.isVarComposite():
+ data = data + self.compileVarComponents(glyfTable)
+ else:
+ data = data + self.compileCoordinates()
+ return data
+
+ def toXML(self, writer, ttFont):
+ if self.isComposite():
+ for compo in self.components:
+ compo.toXML(writer, ttFont)
+ haveInstructions = hasattr(self, "program")
+ elif self.isVarComposite():
+ for compo in self.components:
+ compo.toXML(writer, ttFont)
+ haveInstructions = False
+ else:
+ last = 0
+ for i in range(self.numberOfContours):
+ writer.begintag("contour")
+ writer.newline()
+ for j in range(last, self.endPtsOfContours[i] + 1):
+ attrs = [
+ ("x", self.coordinates[j][0]),
+ ("y", self.coordinates[j][1]),
+ ("on", self.flags[j] & flagOnCurve),
+ ]
+ if self.flags[j] & flagOverlapSimple:
+ # Apple's rasterizer uses flagOverlapSimple in the first contour/first pt to flag glyphs that contain overlapping contours
+ attrs.append(("overlap", 1))
+ if self.flags[j] & flagCubic:
+ attrs.append(("cubic", 1))
+ writer.simpletag("pt", attrs)
+ writer.newline()
+ last = self.endPtsOfContours[i] + 1
+ writer.endtag("contour")
+ writer.newline()
+ haveInstructions = self.numberOfContours > 0
+ if haveInstructions:
+ if self.program:
+ writer.begintag("instructions")
+ writer.newline()
+ self.program.toXML(writer, ttFont)
+ writer.endtag("instructions")
+ else:
+ writer.simpletag("instructions")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "contour":
+ if self.numberOfContours < 0:
+ raise ttLib.TTLibError("can't mix composites and contours in glyph")
+ self.numberOfContours = self.numberOfContours + 1
+ coordinates = GlyphCoordinates()
+ flags = bytearray()
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name != "pt":
+ continue # ignore anything but "pt"
+ coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"])))
+ flag = bool(safeEval(attrs["on"]))
+ if "overlap" in attrs and bool(safeEval(attrs["overlap"])):
+ flag |= flagOverlapSimple
+ if "cubic" in attrs and bool(safeEval(attrs["cubic"])):
+ flag |= flagCubic
+ flags.append(flag)
+ if not hasattr(self, "coordinates"):
+ self.coordinates = coordinates
+ self.flags = flags
+ self.endPtsOfContours = [len(coordinates) - 1]
+ else:
+ self.coordinates.extend(coordinates)
+ self.flags.extend(flags)
+ self.endPtsOfContours.append(len(self.coordinates) - 1)
+ elif name == "component":
+ if self.numberOfContours > 0:
+ raise ttLib.TTLibError("can't mix composites and contours in glyph")
+ self.numberOfContours = -1
+ if not hasattr(self, "components"):
+ self.components = []
+ component = GlyphComponent()
+ self.components.append(component)
+ component.fromXML(name, attrs, content, ttFont)
+ elif name == "varComponent":
+ if self.numberOfContours > 0:
+ raise ttLib.TTLibError("can't mix composites and contours in glyph")
+ self.numberOfContours = -2
+ if not hasattr(self, "components"):
+ self.components = []
+ component = GlyphVarComponent()
+ self.components.append(component)
+ component.fromXML(name, attrs, content, ttFont)
+ elif name == "instructions":
+ self.program = ttProgram.Program()
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ self.program.fromXML(name, attrs, content, ttFont)
+
+ def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1):
+ assert self.isComposite() or self.isVarComposite()
+ nContours = 0
+ nPoints = 0
+ initialMaxComponentDepth = maxComponentDepth
+ for compo in self.components:
+ baseGlyph = glyfTable[compo.glyphName]
+ if baseGlyph.numberOfContours == 0:
+ continue
+ elif baseGlyph.numberOfContours > 0:
+ nP, nC = baseGlyph.getMaxpValues()
+ else:
+ nP, nC, componentDepth = baseGlyph.getCompositeMaxpValues(
+ glyfTable, initialMaxComponentDepth + 1
+ )
+ maxComponentDepth = max(maxComponentDepth, componentDepth)
+ nPoints = nPoints + nP
+ nContours = nContours + nC
+ return CompositeMaxpValues(nPoints, nContours, maxComponentDepth)
+
+ def getMaxpValues(self):
+ assert self.numberOfContours > 0
+ return len(self.coordinates), len(self.endPtsOfContours)
+
+ def decompileComponents(self, data, glyfTable):
+ self.components = []
+ more = 1
+ haveInstructions = 0
+ while more:
+ component = GlyphComponent()
+ more, haveInstr, data = component.decompile(data, glyfTable)
+ haveInstructions = haveInstructions | haveInstr
+ self.components.append(component)
+ if haveInstructions:
+ (numInstructions,) = struct.unpack(">h", data[:2])
+ data = data[2:]
+ self.program = ttProgram.Program()
+ self.program.fromBytecode(data[:numInstructions])
+ data = data[numInstructions:]
+ if len(data) >= 4:
+ log.warning(
+ "too much glyph data at the end of composite glyph: %d excess bytes",
+ len(data),
+ )
+
+ def decompileVarComponents(self, data, glyfTable):
+ self.components = []
+ while len(data) >= GlyphVarComponent.MIN_SIZE:
+ component = GlyphVarComponent()
+ data = component.decompile(data, glyfTable)
+ self.components.append(component)
+
+ def decompileCoordinates(self, data):
+ endPtsOfContours = array.array("H")
+ endPtsOfContours.frombytes(data[: 2 * self.numberOfContours])
+ if sys.byteorder != "big":
+ endPtsOfContours.byteswap()
+ self.endPtsOfContours = endPtsOfContours.tolist()
+
+ pos = 2 * self.numberOfContours
+ (instructionLength,) = struct.unpack(">h", data[pos : pos + 2])
+ self.program = ttProgram.Program()
+ self.program.fromBytecode(data[pos + 2 : pos + 2 + instructionLength])
+ pos += 2 + instructionLength
+ nCoordinates = self.endPtsOfContours[-1] + 1
+ flags, xCoordinates, yCoordinates = self.decompileCoordinatesRaw(
+ nCoordinates, data, pos
+ )
+
+ # fill in repetitions and apply signs
+ self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates)
+ xIndex = 0
+ yIndex = 0
+ for i in range(nCoordinates):
+ flag = flags[i]
+ # x coordinate
+ if flag & flagXShort:
+ if flag & flagXsame:
+ x = xCoordinates[xIndex]
+ else:
+ x = -xCoordinates[xIndex]
+ xIndex = xIndex + 1
+ elif flag & flagXsame:
+ x = 0
+ else:
+ x = xCoordinates[xIndex]
+ xIndex = xIndex + 1
+ # y coordinate
+ if flag & flagYShort:
+ if flag & flagYsame:
+ y = yCoordinates[yIndex]
+ else:
+ y = -yCoordinates[yIndex]
+ yIndex = yIndex + 1
+ elif flag & flagYsame:
+ y = 0
+ else:
+ y = yCoordinates[yIndex]
+ yIndex = yIndex + 1
+ coordinates[i] = (x, y)
+ assert xIndex == len(xCoordinates)
+ assert yIndex == len(yCoordinates)
+ coordinates.relativeToAbsolute()
+ # discard all flags except "keepFlags"
+ for i in range(len(flags)):
+ flags[i] &= keepFlags
+ self.flags = flags
+
+ def decompileCoordinatesRaw(self, nCoordinates, data, pos=0):
+ # unpack flags and prepare unpacking of coordinates
+ flags = bytearray(nCoordinates)
+ # Warning: deep Python trickery going on. We use the struct module to unpack
+ # the coordinates. We build a format string based on the flags, so we can
+ # unpack the coordinates in one struct.unpack() call.
+ xFormat = ">" # big endian
+ yFormat = ">" # big endian
+ j = 0
+ while True:
+ flag = data[pos]
+ pos += 1
+ repeat = 1
+ if flag & flagRepeat:
+ repeat = data[pos] + 1
+ pos += 1
+ for k in range(repeat):
+ if flag & flagXShort:
+ xFormat = xFormat + "B"
+ elif not (flag & flagXsame):
+ xFormat = xFormat + "h"
+ if flag & flagYShort:
+ yFormat = yFormat + "B"
+ elif not (flag & flagYsame):
+ yFormat = yFormat + "h"
+ flags[j] = flag
+ j = j + 1
+ if j >= nCoordinates:
+ break
+ assert j == nCoordinates, "bad glyph flags"
+ # unpack raw coordinates, krrrrrr-tching!
+ xDataLen = struct.calcsize(xFormat)
+ yDataLen = struct.calcsize(yFormat)
+ if len(data) - pos - (xDataLen + yDataLen) >= 4:
+ log.warning(
+ "too much glyph data: %d excess bytes",
+ len(data) - pos - (xDataLen + yDataLen),
+ )
+ xCoordinates = struct.unpack(xFormat, data[pos : pos + xDataLen])
+ yCoordinates = struct.unpack(
+ yFormat, data[pos + xDataLen : pos + xDataLen + yDataLen]
+ )
+ return flags, xCoordinates, yCoordinates
+
+ def compileComponents(self, glyfTable):
+ data = b""
+ lastcomponent = len(self.components) - 1
+ more = 1
+ haveInstructions = 0
+ for i in range(len(self.components)):
+ if i == lastcomponent:
+ haveInstructions = hasattr(self, "program")
+ more = 0
+ compo = self.components[i]
+ data = data + compo.compile(more, haveInstructions, glyfTable)
+ if haveInstructions:
+ instructions = self.program.getBytecode()
+ data = data + struct.pack(">h", len(instructions)) + instructions
+ return data
+
+ def compileVarComponents(self, glyfTable):
+ return b"".join(c.compile(glyfTable) for c in self.components)
+
+ def compileCoordinates(self):
+ assert len(self.coordinates) == len(self.flags)
+ data = []
+ endPtsOfContours = array.array("H", self.endPtsOfContours)
+ if sys.byteorder != "big":
+ endPtsOfContours.byteswap()
+ data.append(endPtsOfContours.tobytes())
+ instructions = self.program.getBytecode()
+ data.append(struct.pack(">h", len(instructions)))
+ data.append(instructions)
+
+ deltas = self.coordinates.copy()
+ deltas.toInt()
+ deltas.absoluteToRelative()
+
+ # TODO(behdad): Add a configuration option for this?
+ deltas = self.compileDeltasGreedy(self.flags, deltas)
+ # deltas = self.compileDeltasOptimal(self.flags, deltas)
+
+ data.extend(deltas)
+ return b"".join(data)
+
+ def compileDeltasGreedy(self, flags, deltas):
+ # Implements greedy algorithm for packing coordinate deltas:
+ # uses shortest representation one coordinate at a time.
+ compressedFlags = bytearray()
+ compressedXs = bytearray()
+ compressedYs = bytearray()
+ lastflag = None
+ repeat = 0
+ for flag, (x, y) in zip(flags, deltas):
+ # Oh, the horrors of TrueType
+ # do x
+ if x == 0:
+ flag = flag | flagXsame
+ elif -255 <= x <= 255:
+ flag = flag | flagXShort
+ if x > 0:
+ flag = flag | flagXsame
+ else:
+ x = -x
+ compressedXs.append(x)
+ else:
+ compressedXs.extend(struct.pack(">h", x))
+ # do y
+ if y == 0:
+ flag = flag | flagYsame
+ elif -255 <= y <= 255:
+ flag = flag | flagYShort
+ if y > 0:
+ flag = flag | flagYsame
+ else:
+ y = -y
+ compressedYs.append(y)
+ else:
+ compressedYs.extend(struct.pack(">h", y))
+ # handle repeating flags
+ if flag == lastflag and repeat != 255:
+ repeat = repeat + 1
+ if repeat == 1:
+ compressedFlags.append(flag)
+ else:
+ compressedFlags[-2] = flag | flagRepeat
+ compressedFlags[-1] = repeat
+ else:
+ repeat = 0
+ compressedFlags.append(flag)
+ lastflag = flag
+ return (compressedFlags, compressedXs, compressedYs)
+
+ def compileDeltasOptimal(self, flags, deltas):
+ # Implements optimal, dynaic-programming, algorithm for packing coordinate
+ # deltas. The savings are negligible :(.
+ candidates = []
+ bestTuple = None
+ bestCost = 0
+ repeat = 0
+ for flag, (x, y) in zip(flags, deltas):
+ # Oh, the horrors of TrueType
+ flag, coordBytes = flagBest(x, y, flag)
+ bestCost += 1 + coordBytes
+ newCandidates = [
+ (bestCost, bestTuple, flag, coordBytes),
+ (bestCost + 1, bestTuple, (flag | flagRepeat), coordBytes),
+ ]
+ for lastCost, lastTuple, lastFlag, coordBytes in candidates:
+ if (
+ lastCost + coordBytes <= bestCost + 1
+ and (lastFlag & flagRepeat)
+ and (lastFlag < 0xFF00)
+ and flagSupports(lastFlag, flag)
+ ):
+ if (lastFlag & 0xFF) == (
+ flag | flagRepeat
+ ) and lastCost == bestCost + 1:
+ continue
+ newCandidates.append(
+ (lastCost + coordBytes, lastTuple, lastFlag + 256, coordBytes)
+ )
+ candidates = newCandidates
+ bestTuple = min(candidates, key=lambda t: t[0])
+ bestCost = bestTuple[0]
+
+ flags = []
+ while bestTuple:
+ cost, bestTuple, flag, coordBytes = bestTuple
+ flags.append(flag)
+ flags.reverse()
+
+ compressedFlags = bytearray()
+ compressedXs = bytearray()
+ compressedYs = bytearray()
+ coords = iter(deltas)
+ ff = []
+ for flag in flags:
+ repeatCount, flag = flag >> 8, flag & 0xFF
+ compressedFlags.append(flag)
+ if flag & flagRepeat:
+ assert repeatCount > 0
+ compressedFlags.append(repeatCount)
+ else:
+ assert repeatCount == 0
+ for i in range(1 + repeatCount):
+ x, y = next(coords)
+ flagEncodeCoords(flag, x, y, compressedXs, compressedYs)
+ ff.append(flag)
+ try:
+ next(coords)
+ raise Exception("internal error")
+ except StopIteration:
+ pass
+
+ return (compressedFlags, compressedXs, compressedYs)
+
+ def recalcBounds(self, glyfTable, *, boundsDone=None):
+ """Recalculates the bounds of the glyph.
+
+ Each glyph object stores its bounding box in the
+ ``xMin``/``yMin``/``xMax``/``yMax`` attributes. These bounds must be
+ recomputed when the ``coordinates`` change. The ``table__g_l_y_f`` bounds
+ must be provided to resolve component bounds.
+ """
+ if self.isComposite() and self.tryRecalcBoundsComposite(
+ glyfTable, boundsDone=boundsDone
+ ):
+ return
+ try:
+ coords, endPts, flags = self.getCoordinates(glyfTable)
+ self.xMin, self.yMin, self.xMax, self.yMax = coords.calcIntBounds()
+ except NotImplementedError:
+ pass
+
+ def tryRecalcBoundsComposite(self, glyfTable, *, boundsDone=None):
+ """Try recalculating the bounds of a composite glyph that has
+ certain constrained properties. Namely, none of the components
+ have a transform other than an integer translate, and none
+ uses the anchor points.
+
+ Each glyph object stores its bounding box in the
+ ``xMin``/``yMin``/``xMax``/``yMax`` attributes. These bounds must be
+ recomputed when the ``coordinates`` change. The ``table__g_l_y_f`` bounds
+ must be provided to resolve component bounds.
+
+ Return True if bounds were calculated, False otherwise.
+ """
+ for compo in self.components:
+ if hasattr(compo, "firstPt") or hasattr(compo, "transform"):
+ return False
+ if not float(compo.x).is_integer() or not float(compo.y).is_integer():
+ return False
+
+ # All components are untransformed and have an integer x/y translate
+ bounds = None
+ for compo in self.components:
+ glyphName = compo.glyphName
+ g = glyfTable[glyphName]
+
+ if boundsDone is None or glyphName not in boundsDone:
+ g.recalcBounds(glyfTable, boundsDone=boundsDone)
+ if boundsDone is not None:
+ boundsDone.add(glyphName)
+
+ x, y = compo.x, compo.y
+ bounds = updateBounds(bounds, (g.xMin + x, g.yMin + y))
+ bounds = updateBounds(bounds, (g.xMax + x, g.yMax + y))
+
+ if bounds is None:
+ bounds = (0, 0, 0, 0)
+ self.xMin, self.yMin, self.xMax, self.yMax = bounds
+ return True
+
+ def isComposite(self):
+ """Test whether a glyph has components"""
+ if hasattr(self, "data"):
+ return struct.unpack(">h", self.data[:2])[0] == -1 if self.data else False
+ else:
+ return self.numberOfContours == -1
+
+ def isVarComposite(self):
+ """Test whether a glyph has variable components"""
+ if hasattr(self, "data"):
+ return struct.unpack(">h", self.data[:2])[0] == -2 if self.data else False
+ else:
+ return self.numberOfContours == -2
+
+ def getCoordinates(self, glyfTable):
+ """Return the coordinates, end points and flags
+
+ This method returns three values: A :py:class:`GlyphCoordinates` object,
+ a list of the indexes of the final points of each contour (allowing you
+ to split up the coordinates list into contours) and a list of flags.
+
+ On simple glyphs, this method returns information from the glyph's own
+ contours; on composite glyphs, it "flattens" all components recursively
+ to return a list of coordinates representing all the components involved
+ in the glyph.
+
+ To interpret the flags for each point, see the "Simple Glyph Flags"
+ section of the `glyf table specification <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf#simple-glyph-description>`.
+ """
+
+ if self.numberOfContours > 0:
+ return self.coordinates, self.endPtsOfContours, self.flags
+ elif self.isComposite():
+ # it's a composite
+ allCoords = GlyphCoordinates()
+ allFlags = bytearray()
+ allEndPts = []
+ for compo in self.components:
+ g = glyfTable[compo.glyphName]
+ try:
+ coordinates, endPts, flags = g.getCoordinates(glyfTable)
+ except RecursionError:
+ raise ttLib.TTLibError(
+ "glyph '%s' contains a recursive component reference"
+ % compo.glyphName
+ )
+ coordinates = GlyphCoordinates(coordinates)
+ if hasattr(compo, "firstPt"):
+ # component uses two reference points: we apply the transform _before_
+ # computing the offset between the points
+ if hasattr(compo, "transform"):
+ coordinates.transform(compo.transform)
+ x1, y1 = allCoords[compo.firstPt]
+ x2, y2 = coordinates[compo.secondPt]
+ move = x1 - x2, y1 - y2
+ coordinates.translate(move)
+ else:
+ # component uses XY offsets
+ move = compo.x, compo.y
+ if not hasattr(compo, "transform"):
+ coordinates.translate(move)
+ else:
+ apple_way = compo.flags & SCALED_COMPONENT_OFFSET
+ ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET
+ assert not (apple_way and ms_way)
+ if not (apple_way or ms_way):
+ scale_component_offset = (
+ SCALE_COMPONENT_OFFSET_DEFAULT # see top of this file
+ )
+ else:
+ scale_component_offset = apple_way
+ if scale_component_offset:
+ # the Apple way: first move, then scale (ie. scale the component offset)
+ coordinates.translate(move)
+ coordinates.transform(compo.transform)
+ else:
+ # the MS way: first scale, then move
+ coordinates.transform(compo.transform)
+ coordinates.translate(move)
+ offset = len(allCoords)
+ allEndPts.extend(e + offset for e in endPts)
+ allCoords.extend(coordinates)
+ allFlags.extend(flags)
+ return allCoords, allEndPts, allFlags
+ elif self.isVarComposite():
+ raise NotImplementedError("use TTGlyphSet to draw VarComposite glyphs")
+ else:
+ return GlyphCoordinates(), [], bytearray()
+
+ def getComponentNames(self, glyfTable):
+ """Returns a list of names of component glyphs used in this glyph
+
+ This method can be used on simple glyphs (in which case it returns an
+ empty list) or composite glyphs.
+ """
+ if hasattr(self, "data") and self.isVarComposite():
+ # TODO(VarComposite) Add implementation without expanding glyph
+ self.expand(glyfTable)
+
+ if not hasattr(self, "data"):
+ if self.isComposite() or self.isVarComposite():
+ return [c.glyphName for c in self.components]
+ else:
+ return []
+
+ # Extract components without expanding glyph
+
+ if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
+ return [] # Not composite
+
+ data = self.data
+ i = 10
+ components = []
+ more = 1
+ while more:
+ flags, glyphID = struct.unpack(">HH", data[i : i + 4])
+ i += 4
+ flags = int(flags)
+ components.append(glyfTable.getGlyphName(int(glyphID)))
+
+ if flags & ARG_1_AND_2_ARE_WORDS:
+ i += 4
+ else:
+ i += 2
+ if flags & WE_HAVE_A_SCALE:
+ i += 2
+ elif flags & WE_HAVE_AN_X_AND_Y_SCALE:
+ i += 4
+ elif flags & WE_HAVE_A_TWO_BY_TWO:
+ i += 8
+ more = flags & MORE_COMPONENTS
+
+ return components
+
+ def trim(self, remove_hinting=False):
+ """Remove padding and, if requested, hinting, from a glyph.
+ This works on both expanded and compacted glyphs, without
+ expanding it."""
+ if not hasattr(self, "data"):
+ if remove_hinting:
+ if self.isComposite():
+ if hasattr(self, "program"):
+ del self.program
+ elif self.isVarComposite():
+ pass # Doesn't have hinting
+ else:
+ self.program = ttProgram.Program()
+ self.program.fromBytecode([])
+ # No padding to trim.
+ return
+ if not self.data:
+ return
+ numContours = struct.unpack(">h", self.data[:2])[0]
+ data = bytearray(self.data)
+ i = 10
+ if numContours >= 0:
+ i += 2 * numContours # endPtsOfContours
+ nCoordinates = ((data[i - 2] << 8) | data[i - 1]) + 1
+ instructionLen = (data[i] << 8) | data[i + 1]
+ if remove_hinting:
+ # Zero instruction length
+ data[i] = data[i + 1] = 0
+ i += 2
+ if instructionLen:
+ # Splice it out
+ data = data[:i] + data[i + instructionLen :]
+ instructionLen = 0
+ else:
+ i += 2 + instructionLen
+
+ coordBytes = 0
+ j = 0
+ while True:
+ flag = data[i]
+ i = i + 1
+ repeat = 1
+ if flag & flagRepeat:
+ repeat = data[i] + 1
+ i = i + 1
+ xBytes = yBytes = 0
+ if flag & flagXShort:
+ xBytes = 1
+ elif not (flag & flagXsame):
+ xBytes = 2
+ if flag & flagYShort:
+ yBytes = 1
+ elif not (flag & flagYsame):
+ yBytes = 2
+ coordBytes += (xBytes + yBytes) * repeat
+ j += repeat
+ if j >= nCoordinates:
+ break
+ assert j == nCoordinates, "bad glyph flags"
+ i += coordBytes
+ # Remove padding
+ data = data[:i]
+ elif self.isComposite():
+ more = 1
+ we_have_instructions = False
+ while more:
+ flags = (data[i] << 8) | data[i + 1]
+ if remove_hinting:
+ flags &= ~WE_HAVE_INSTRUCTIONS
+ if flags & WE_HAVE_INSTRUCTIONS:
+ we_have_instructions = True
+ data[i + 0] = flags >> 8
+ data[i + 1] = flags & 0xFF
+ i += 4
+ flags = int(flags)
+
+ if flags & ARG_1_AND_2_ARE_WORDS:
+ i += 4
+ else:
+ i += 2
+ if flags & WE_HAVE_A_SCALE:
+ i += 2
+ elif flags & WE_HAVE_AN_X_AND_Y_SCALE:
+ i += 4
+ elif flags & WE_HAVE_A_TWO_BY_TWO:
+ i += 8
+ more = flags & MORE_COMPONENTS
+ if we_have_instructions:
+ instructionLen = (data[i] << 8) | data[i + 1]
+ i += 2 + instructionLen
+ # Remove padding
+ data = data[:i]
+ elif self.isVarComposite():
+ i = 0
+ MIN_SIZE = GlyphVarComponent.MIN_SIZE
+ while len(data[i : i + MIN_SIZE]) >= MIN_SIZE:
+ size = GlyphVarComponent.getSize(data[i : i + MIN_SIZE])
+ i += size
+ data = data[:i]
+
+ self.data = data
+
+ def removeHinting(self):
+ """Removes TrueType hinting instructions from the glyph."""
+ self.trim(remove_hinting=True)
+
+ def draw(self, pen, glyfTable, offset=0):
+ """Draws the glyph using the supplied pen object.
+
+ Arguments:
+ pen: An object conforming to the pen protocol.
+ glyfTable: A :py:class:`table__g_l_y_f` object, to resolve components.
+ offset (int): A horizontal offset. If provided, all coordinates are
+ translated by this offset.
+ """
+
+ if self.isComposite():
+ for component in self.components:
+ glyphName, transform = component.getComponentInfo()
+ pen.addComponent(glyphName, transform)
+ return
+
+ coordinates, endPts, flags = self.getCoordinates(glyfTable)
+ if offset:
+ coordinates = coordinates.copy()
+ coordinates.translate((offset, 0))
+ start = 0
+ maybeInt = lambda v: int(v) if v == int(v) else v
+ for end in endPts:
+ end = end + 1
+ contour = coordinates[start:end]
+ cFlags = [flagOnCurve & f for f in flags[start:end]]
+ cuFlags = [flagCubic & f for f in flags[start:end]]
+ start = end
+ if 1 not in cFlags:
+ assert all(cuFlags) or not any(cuFlags)
+ cubic = all(cuFlags)
+ if cubic:
+ count = len(contour)
+ assert count % 2 == 0, "Odd number of cubic off-curves undefined"
+ l = contour[-1]
+ f = contour[0]
+ p0 = (maybeInt((l[0] + f[0]) * 0.5), maybeInt((l[1] + f[1]) * 0.5))
+ pen.moveTo(p0)
+ for i in range(0, count, 2):
+ p1 = contour[i]
+ p2 = contour[i + 1]
+ p4 = contour[i + 2 if i + 2 < count else 0]
+ p3 = (
+ maybeInt((p2[0] + p4[0]) * 0.5),
+ maybeInt((p2[1] + p4[1]) * 0.5),
+ )
+ pen.curveTo(p1, p2, p3)
+ else:
+ # There is not a single on-curve point on the curve,
+ # use pen.qCurveTo's special case by specifying None
+ # as the on-curve point.
+ contour.append(None)
+ pen.qCurveTo(*contour)
+ else:
+ # Shuffle the points so that the contour is guaranteed
+ # to *end* in an on-curve point, which we'll use for
+ # the moveTo.
+ firstOnCurve = cFlags.index(1) + 1
+ contour = contour[firstOnCurve:] + contour[:firstOnCurve]
+ cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve]
+ cuFlags = cuFlags[firstOnCurve:] + cuFlags[:firstOnCurve]
+ pen.moveTo(contour[-1])
+ while contour:
+ nextOnCurve = cFlags.index(1) + 1
+ if nextOnCurve == 1:
+ # Skip a final lineTo(), as it is implied by
+ # pen.closePath()
+ if len(contour) > 1:
+ pen.lineTo(contour[0])
+ else:
+ cubicFlags = [f for f in cuFlags[: nextOnCurve - 1]]
+ assert all(cubicFlags) or not any(cubicFlags)
+ cubic = any(cubicFlags)
+ if cubic:
+ assert all(
+ cubicFlags
+ ), "Mixed cubic and quadratic segment undefined"
+
+ count = nextOnCurve
+ assert (
+ count >= 3
+ ), "At least two cubic off-curve points required"
+ assert (
+ count - 1
+ ) % 2 == 0, "Odd number of cubic off-curves undefined"
+ for i in range(0, count - 3, 2):
+ p1 = contour[i]
+ p2 = contour[i + 1]
+ p4 = contour[i + 2]
+ p3 = (
+ maybeInt((p2[0] + p4[0]) * 0.5),
+ maybeInt((p2[1] + p4[1]) * 0.5),
+ )
+ lastOnCurve = p3
+ pen.curveTo(p1, p2, p3)
+ pen.curveTo(*contour[count - 3 : count])
+ else:
+ pen.qCurveTo(*contour[:nextOnCurve])
+ contour = contour[nextOnCurve:]
+ cFlags = cFlags[nextOnCurve:]
+ cuFlags = cuFlags[nextOnCurve:]
+ pen.closePath()
+
+ def drawPoints(self, pen, glyfTable, offset=0):
+ """Draw the glyph using the supplied pointPen. As opposed to Glyph.draw(),
+ this will not change the point indices.
+ """
+
+ if self.isComposite():
+ for component in self.components:
+ glyphName, transform = component.getComponentInfo()
+ pen.addComponent(glyphName, transform)
+ return
+
+ coordinates, endPts, flags = self.getCoordinates(glyfTable)
+ if offset:
+ coordinates = coordinates.copy()
+ coordinates.translate((offset, 0))
+ start = 0
+ for end in endPts:
+ end = end + 1
+ contour = coordinates[start:end]
+ cFlags = flags[start:end]
+ start = end
+ pen.beginPath()
+ # Start with the appropriate segment type based on the final segment
+
+ if cFlags[-1] & flagOnCurve:
+ segmentType = "line"
+ elif cFlags[-1] & flagCubic:
+ segmentType = "curve"
+ else:
+ segmentType = "qcurve"
+ for i, pt in enumerate(contour):
+ if cFlags[i] & flagOnCurve:
+ pen.addPoint(pt, segmentType=segmentType)
+ segmentType = "line"
+ else:
+ pen.addPoint(pt)
+ segmentType = "curve" if cFlags[i] & flagCubic else "qcurve"
+ pen.endPath()
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
+
+
+# Vector.__round__ uses the built-in (Banker's) `round` but we want
+# to use otRound below
+_roundv = partial(Vector.__round__, round=otRound)
+
+
+def _is_mid_point(p0: tuple, p1: tuple, p2: tuple) -> bool:
+ # True if p1 is in the middle of p0 and p2, either before or after rounding
+ p0 = Vector(p0)
+ p1 = Vector(p1)
+ p2 = Vector(p2)
+ return ((p0 + p2) * 0.5).isclose(p1) or _roundv(p0) + _roundv(p2) == _roundv(p1) * 2
+
+
+def dropImpliedOnCurvePoints(*interpolatable_glyphs: Glyph) -> Set[int]:
+ """Drop impliable on-curve points from the (simple) glyph or glyphs.
+
+ In TrueType glyf outlines, on-curve points can be implied when they are located at
+ the midpoint of the line connecting two consecutive off-curve points.
+
+ If more than one glyphs are passed, these are assumed to be interpolatable masters
+ of the same glyph impliable, and thus only the on-curve points that are impliable
+ for all of them will actually be implied.
+ Composite glyphs or empty glyphs are skipped, only simple glyphs with 1 or more
+ contours are considered.
+ The input glyph(s) is/are modified in-place.
+
+ Args:
+ interpolatable_glyphs: The glyph or glyphs to modify in-place.
+
+ Returns:
+ The set of point indices that were dropped if any.
+
+ Raises:
+ ValueError if simple glyphs are not in fact interpolatable because they have
+ different point flags or number of contours.
+
+ Reference:
+ https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html
+ """
+ staticAttributes = SimpleNamespace(
+ numberOfContours=None, flags=None, endPtsOfContours=None
+ )
+ drop = None
+ simple_glyphs = []
+ for i, glyph in enumerate(interpolatable_glyphs):
+ if glyph.numberOfContours < 1:
+ # ignore composite or empty glyphs
+ continue
+
+ for attr in staticAttributes.__dict__:
+ expected = getattr(staticAttributes, attr)
+ found = getattr(glyph, attr)
+ if expected is None:
+ setattr(staticAttributes, attr, found)
+ elif expected != found:
+ raise ValueError(
+ f"Incompatible {attr} for glyph at master index {i}: "
+ f"expected {expected}, found {found}"
+ )
+
+ may_drop = set()
+ start = 0
+ coords = glyph.coordinates
+ flags = staticAttributes.flags
+ endPtsOfContours = staticAttributes.endPtsOfContours
+ for last in endPtsOfContours:
+ for i in range(start, last + 1):
+ if not (flags[i] & flagOnCurve):
+ continue
+ prv = i - 1 if i > start else last
+ nxt = i + 1 if i < last else start
+ if (flags[prv] & flagOnCurve) or flags[prv] != flags[nxt]:
+ continue
+ # we may drop the ith on-curve if halfway between previous/next off-curves
+ if not _is_mid_point(coords[prv], coords[i], coords[nxt]):
+ continue
+
+ may_drop.add(i)
+ start = last + 1
+ # we only want to drop if ALL interpolatable glyphs have the same implied oncurves
+ if drop is None:
+ drop = may_drop
+ else:
+ drop.intersection_update(may_drop)
+
+ simple_glyphs.append(glyph)
+
+ if drop:
+ # Do the actual dropping
+ flags = staticAttributes.flags
+ assert flags is not None
+ newFlags = array.array(
+ "B", (flags[i] for i in range(len(flags)) if i not in drop)
+ )
+
+ endPts = staticAttributes.endPtsOfContours
+ assert endPts is not None
+ newEndPts = []
+ i = 0
+ delta = 0
+ for d in sorted(drop):
+ while d > endPts[i]:
+ newEndPts.append(endPts[i] - delta)
+ i += 1
+ delta += 1
+ while i < len(endPts):
+ newEndPts.append(endPts[i] - delta)
+ i += 1
+
+ for glyph in simple_glyphs:
+ coords = glyph.coordinates
+ glyph.coordinates = GlyphCoordinates(
+ coords[i] for i in range(len(coords)) if i not in drop
+ )
+ glyph.flags = newFlags
+ glyph.endPtsOfContours = newEndPts
+
+ return drop if drop is not None else set()
-CompositeMaxpValues = namedtuple('CompositeMaxpValues', ['nPoints', 'nContours', 'maxComponentDepth'])
+class GlyphComponent(object):
+ """Represents a component within a composite glyph.
+
+ The component is represented internally with four attributes: ``glyphName``,
+ ``x``, ``y`` and ``transform``. If there is no "two-by-two" matrix (i.e
+ no scaling, reflection, or rotation; only translation), the ``transform``
+ attribute is not present.
+ """
+
+ # The above documentation is not *completely* true, but is *true enough* because
+ # the rare firstPt/lastPt attributes are not totally supported and nobody seems to
+ # mind - see below.
+
+ def __init__(self):
+ pass
+
+ def getComponentInfo(self):
+ """Return information about the component
+
+ This method returns a tuple of two values: the glyph name of the component's
+ base glyph, and a transformation matrix. As opposed to accessing the attributes
+ directly, ``getComponentInfo`` always returns a six-element tuple of the
+ component's transformation matrix, even when the two-by-two ``.transform``
+ matrix is not present.
+ """
+ # XXX Ignoring self.firstPt & self.lastpt for now: I need to implement
+ # something equivalent in fontTools.objects.glyph (I'd rather not
+ # convert it to an absolute offset, since it is valuable information).
+ # This method will now raise "AttributeError: x" on glyphs that use
+ # this TT feature.
+ if hasattr(self, "transform"):
+ [[xx, xy], [yx, yy]] = self.transform
+ trans = (xx, xy, yx, yy, self.x, self.y)
+ else:
+ trans = (1, 0, 0, 1, self.x, self.y)
+ return self.glyphName, trans
+
+ def decompile(self, data, glyfTable):
+ flags, glyphID = struct.unpack(">HH", data[:4])
+ self.flags = int(flags)
+ glyphID = int(glyphID)
+ self.glyphName = glyfTable.getGlyphName(int(glyphID))
+ data = data[4:]
+
+ if self.flags & ARG_1_AND_2_ARE_WORDS:
+ if self.flags & ARGS_ARE_XY_VALUES:
+ self.x, self.y = struct.unpack(">hh", data[:4])
+ else:
+ x, y = struct.unpack(">HH", data[:4])
+ self.firstPt, self.secondPt = int(x), int(y)
+ data = data[4:]
+ else:
+ if self.flags & ARGS_ARE_XY_VALUES:
+ self.x, self.y = struct.unpack(">bb", data[:2])
+ else:
+ x, y = struct.unpack(">BB", data[:2])
+ self.firstPt, self.secondPt = int(x), int(y)
+ data = data[2:]
+
+ if self.flags & WE_HAVE_A_SCALE:
+ (scale,) = struct.unpack(">h", data[:2])
+ self.transform = [
+ [fi2fl(scale, 14), 0],
+ [0, fi2fl(scale, 14)],
+ ] # fixed 2.14
+ data = data[2:]
+ elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE:
+ xscale, yscale = struct.unpack(">hh", data[:4])
+ self.transform = [
+ [fi2fl(xscale, 14), 0],
+ [0, fi2fl(yscale, 14)],
+ ] # fixed 2.14
+ data = data[4:]
+ elif self.flags & WE_HAVE_A_TWO_BY_TWO:
+ (xscale, scale01, scale10, yscale) = struct.unpack(">hhhh", data[:8])
+ self.transform = [
+ [fi2fl(xscale, 14), fi2fl(scale01, 14)],
+ [fi2fl(scale10, 14), fi2fl(yscale, 14)],
+ ] # fixed 2.14
+ data = data[8:]
+ more = self.flags & MORE_COMPONENTS
+ haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS
+ self.flags = self.flags & (
+ ROUND_XY_TO_GRID
+ | USE_MY_METRICS
+ | SCALED_COMPONENT_OFFSET
+ | UNSCALED_COMPONENT_OFFSET
+ | NON_OVERLAPPING
+ | OVERLAP_COMPOUND
+ )
+ return more, haveInstructions, data
+
+ def compile(self, more, haveInstructions, glyfTable):
+ data = b""
+
+ # reset all flags we will calculate ourselves
+ flags = self.flags & (
+ ROUND_XY_TO_GRID
+ | USE_MY_METRICS
+ | SCALED_COMPONENT_OFFSET
+ | UNSCALED_COMPONENT_OFFSET
+ | NON_OVERLAPPING
+ | OVERLAP_COMPOUND
+ )
+ if more:
+ flags = flags | MORE_COMPONENTS
+ if haveInstructions:
+ flags = flags | WE_HAVE_INSTRUCTIONS
+
+ if hasattr(self, "firstPt"):
+ if (0 <= self.firstPt <= 255) and (0 <= self.secondPt <= 255):
+ data = data + struct.pack(">BB", self.firstPt, self.secondPt)
+ else:
+ data = data + struct.pack(">HH", self.firstPt, self.secondPt)
+ flags = flags | ARG_1_AND_2_ARE_WORDS
+ else:
+ x = otRound(self.x)
+ y = otRound(self.y)
+ flags = flags | ARGS_ARE_XY_VALUES
+ if (-128 <= x <= 127) and (-128 <= y <= 127):
+ data = data + struct.pack(">bb", x, y)
+ else:
+ data = data + struct.pack(">hh", x, y)
+ flags = flags | ARG_1_AND_2_ARE_WORDS
+
+ if hasattr(self, "transform"):
+ transform = [[fl2fi(x, 14) for x in row] for row in self.transform]
+ if transform[0][1] or transform[1][0]:
+ flags = flags | WE_HAVE_A_TWO_BY_TWO
+ data = data + struct.pack(
+ ">hhhh",
+ transform[0][0],
+ transform[0][1],
+ transform[1][0],
+ transform[1][1],
+ )
+ elif transform[0][0] != transform[1][1]:
+ flags = flags | WE_HAVE_AN_X_AND_Y_SCALE
+ data = data + struct.pack(">hh", transform[0][0], transform[1][1])
+ else:
+ flags = flags | WE_HAVE_A_SCALE
+ data = data + struct.pack(">h", transform[0][0])
+
+ glyphID = glyfTable.getGlyphID(self.glyphName)
+ return struct.pack(">HH", flags, glyphID) + data
+
+ def toXML(self, writer, ttFont):
+ attrs = [("glyphName", self.glyphName)]
+ if not hasattr(self, "firstPt"):
+ attrs = attrs + [("x", self.x), ("y", self.y)]
+ else:
+ attrs = attrs + [("firstPt", self.firstPt), ("secondPt", self.secondPt)]
+
+ if hasattr(self, "transform"):
+ transform = self.transform
+ if transform[0][1] or transform[1][0]:
+ attrs = attrs + [
+ ("scalex", fl2str(transform[0][0], 14)),
+ ("scale01", fl2str(transform[0][1], 14)),
+ ("scale10", fl2str(transform[1][0], 14)),
+ ("scaley", fl2str(transform[1][1], 14)),
+ ]
+ elif transform[0][0] != transform[1][1]:
+ attrs = attrs + [
+ ("scalex", fl2str(transform[0][0], 14)),
+ ("scaley", fl2str(transform[1][1], 14)),
+ ]
+ else:
+ attrs = attrs + [("scale", fl2str(transform[0][0], 14))]
+ attrs = attrs + [("flags", hex(self.flags))]
+ writer.simpletag("component", attrs)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.glyphName = attrs["glyphName"]
+ if "firstPt" in attrs:
+ self.firstPt = safeEval(attrs["firstPt"])
+ self.secondPt = safeEval(attrs["secondPt"])
+ else:
+ self.x = safeEval(attrs["x"])
+ self.y = safeEval(attrs["y"])
+ if "scale01" in attrs:
+ scalex = str2fl(attrs["scalex"], 14)
+ scale01 = str2fl(attrs["scale01"], 14)
+ scale10 = str2fl(attrs["scale10"], 14)
+ scaley = str2fl(attrs["scaley"], 14)
+ self.transform = [[scalex, scale01], [scale10, scaley]]
+ elif "scalex" in attrs:
+ scalex = str2fl(attrs["scalex"], 14)
+ scaley = str2fl(attrs["scaley"], 14)
+ self.transform = [[scalex, 0], [0, scaley]]
+ elif "scale" in attrs:
+ scale = str2fl(attrs["scale"], 14)
+ self.transform = [[scale, 0], [0, scale]]
+ self.flags = safeEval(attrs["flags"])
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
-class Glyph(object):
- """This class represents an individual TrueType glyph.
-
- TrueType glyph objects come in two flavours: simple and composite. Simple
- glyph objects contain contours, represented via the ``.coordinates``,
- ``.flags``, ``.numberOfContours``, and ``.endPtsOfContours`` attributes;
- composite glyphs contain components, available through the ``.components``
- attributes.
-
- Because the ``.coordinates`` attribute (and other simple glyph attributes mentioned
- above) is only set on simple glyphs and the ``.components`` attribute is only
- set on composite glyphs, it is necessary to use the :py:meth:`isComposite`
- method to test whether a glyph is simple or composite before attempting to
- access its data.
-
- For a composite glyph, the components can also be accessed via array-like access::
-
- >> assert(font["glyf"]["Aacute"].isComposite())
- >> font["glyf"]["Aacute"][0]
- <fontTools.ttLib.tables._g_l_y_f.GlyphComponent at 0x1027b2ee0>
-
- """
-
- def __init__(self, data=b""):
- if not data:
- # empty char
- self.numberOfContours = 0
- return
- self.data = data
-
- def compact(self, glyfTable, recalcBBoxes=True):
- data = self.compile(glyfTable, recalcBBoxes)
- self.__dict__.clear()
- self.data = data
-
- def expand(self, glyfTable):
- if not hasattr(self, "data"):
- # already unpacked
- return
- if not self.data:
- # empty char
- del self.data
- self.numberOfContours = 0
- return
- dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self)
- del self.data
- # Some fonts (eg. Neirizi.ttf) have a 0 for numberOfContours in
- # some glyphs; decompileCoordinates assumes that there's at least
- # one, so short-circuit here.
- if self.numberOfContours == 0:
- return
- if self.isComposite():
- self.decompileComponents(data, glyfTable)
- else:
- self.decompileCoordinates(data)
-
- def compile(self, glyfTable, recalcBBoxes=True):
- if hasattr(self, "data"):
- if recalcBBoxes:
- # must unpack glyph in order to recalculate bounding box
- self.expand(glyfTable)
- else:
- return self.data
- if self.numberOfContours == 0:
- return b''
- if recalcBBoxes:
- self.recalcBounds(glyfTable)
- data = sstruct.pack(glyphHeaderFormat, self)
- if self.isComposite():
- data = data + self.compileComponents(glyfTable)
- else:
- data = data + self.compileCoordinates()
- return data
-
- def toXML(self, writer, ttFont):
- if self.isComposite():
- for compo in self.components:
- compo.toXML(writer, ttFont)
- haveInstructions = hasattr(self, "program")
- else:
- last = 0
- for i in range(self.numberOfContours):
- writer.begintag("contour")
- writer.newline()
- for j in range(last, self.endPtsOfContours[i] + 1):
- attrs = [
- ("x", self.coordinates[j][0]),
- ("y", self.coordinates[j][1]),
- ("on", self.flags[j] & flagOnCurve),
- ]
- if self.flags[j] & flagOverlapSimple:
- # Apple's rasterizer uses flagOverlapSimple in the first contour/first pt to flag glyphs that contain overlapping contours
- attrs.append(("overlap", 1))
- writer.simpletag("pt", attrs)
- writer.newline()
- last = self.endPtsOfContours[i] + 1
- writer.endtag("contour")
- writer.newline()
- haveInstructions = self.numberOfContours > 0
- if haveInstructions:
- if self.program:
- writer.begintag("instructions")
- writer.newline()
- self.program.toXML(writer, ttFont)
- writer.endtag("instructions")
- else:
- writer.simpletag("instructions")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "contour":
- if self.numberOfContours < 0:
- raise ttLib.TTLibError("can't mix composites and contours in glyph")
- self.numberOfContours = self.numberOfContours + 1
- coordinates = GlyphCoordinates()
- flags = bytearray()
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name != "pt":
- continue # ignore anything but "pt"
- coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"])))
- flag = bool(safeEval(attrs["on"]))
- if "overlap" in attrs and bool(safeEval(attrs["overlap"])):
- flag |= flagOverlapSimple
- flags.append(flag)
- if not hasattr(self, "coordinates"):
- self.coordinates = coordinates
- self.flags = flags
- self.endPtsOfContours = [len(coordinates)-1]
- else:
- self.coordinates.extend (coordinates)
- self.flags.extend(flags)
- self.endPtsOfContours.append(len(self.coordinates)-1)
- elif name == "component":
- if self.numberOfContours > 0:
- raise ttLib.TTLibError("can't mix composites and contours in glyph")
- self.numberOfContours = -1
- if not hasattr(self, "components"):
- self.components = []
- component = GlyphComponent()
- self.components.append(component)
- component.fromXML(name, attrs, content, ttFont)
- elif name == "instructions":
- self.program = ttProgram.Program()
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- self.program.fromXML(name, attrs, content, ttFont)
-
- def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1):
- assert self.isComposite()
- nContours = 0
- nPoints = 0
- initialMaxComponentDepth = maxComponentDepth
- for compo in self.components:
- baseGlyph = glyfTable[compo.glyphName]
- if baseGlyph.numberOfContours == 0:
- continue
- elif baseGlyph.numberOfContours > 0:
- nP, nC = baseGlyph.getMaxpValues()
- else:
- nP, nC, componentDepth = baseGlyph.getCompositeMaxpValues(
- glyfTable, initialMaxComponentDepth + 1)
- maxComponentDepth = max(maxComponentDepth, componentDepth)
- nPoints = nPoints + nP
- nContours = nContours + nC
- return CompositeMaxpValues(nPoints, nContours, maxComponentDepth)
-
- def getMaxpValues(self):
- assert self.numberOfContours > 0
- return len(self.coordinates), len(self.endPtsOfContours)
-
- def decompileComponents(self, data, glyfTable):
- self.components = []
- more = 1
- haveInstructions = 0
- while more:
- component = GlyphComponent()
- more, haveInstr, data = component.decompile(data, glyfTable)
- haveInstructions = haveInstructions | haveInstr
- self.components.append(component)
- if haveInstructions:
- numInstructions, = struct.unpack(">h", data[:2])
- data = data[2:]
- self.program = ttProgram.Program()
- self.program.fromBytecode(data[:numInstructions])
- data = data[numInstructions:]
- if len(data) >= 4:
- log.warning(
- "too much glyph data at the end of composite glyph: %d excess bytes",
- len(data))
-
- def decompileCoordinates(self, data):
- endPtsOfContours = array.array("h")
- endPtsOfContours.frombytes(data[:2*self.numberOfContours])
- if sys.byteorder != "big": endPtsOfContours.byteswap()
- self.endPtsOfContours = endPtsOfContours.tolist()
-
- pos = 2*self.numberOfContours
- instructionLength, = struct.unpack(">h", data[pos:pos+2])
- self.program = ttProgram.Program()
- self.program.fromBytecode(data[pos+2:pos+2+instructionLength])
- pos += 2 + instructionLength
- nCoordinates = self.endPtsOfContours[-1] + 1
- flags, xCoordinates, yCoordinates = \
- self.decompileCoordinatesRaw(nCoordinates, data, pos)
-
- # fill in repetitions and apply signs
- self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates)
- xIndex = 0
- yIndex = 0
- for i in range(nCoordinates):
- flag = flags[i]
- # x coordinate
- if flag & flagXShort:
- if flag & flagXsame:
- x = xCoordinates[xIndex]
- else:
- x = -xCoordinates[xIndex]
- xIndex = xIndex + 1
- elif flag & flagXsame:
- x = 0
- else:
- x = xCoordinates[xIndex]
- xIndex = xIndex + 1
- # y coordinate
- if flag & flagYShort:
- if flag & flagYsame:
- y = yCoordinates[yIndex]
- else:
- y = -yCoordinates[yIndex]
- yIndex = yIndex + 1
- elif flag & flagYsame:
- y = 0
- else:
- y = yCoordinates[yIndex]
- yIndex = yIndex + 1
- coordinates[i] = (x, y)
- assert xIndex == len(xCoordinates)
- assert yIndex == len(yCoordinates)
- coordinates.relativeToAbsolute()
- # discard all flags except "keepFlags"
- for i in range(len(flags)):
- flags[i] &= keepFlags
- self.flags = flags
-
- def decompileCoordinatesRaw(self, nCoordinates, data, pos=0):
- # unpack flags and prepare unpacking of coordinates
- flags = bytearray(nCoordinates)
- # Warning: deep Python trickery going on. We use the struct module to unpack
- # the coordinates. We build a format string based on the flags, so we can
- # unpack the coordinates in one struct.unpack() call.
- xFormat = ">" # big endian
- yFormat = ">" # big endian
- j = 0
- while True:
- flag = data[pos]
- pos += 1
- repeat = 1
- if flag & flagRepeat:
- repeat = data[pos] + 1
- pos += 1
- for k in range(repeat):
- if flag & flagXShort:
- xFormat = xFormat + 'B'
- elif not (flag & flagXsame):
- xFormat = xFormat + 'h'
- if flag & flagYShort:
- yFormat = yFormat + 'B'
- elif not (flag & flagYsame):
- yFormat = yFormat + 'h'
- flags[j] = flag
- j = j + 1
- if j >= nCoordinates:
- break
- assert j == nCoordinates, "bad glyph flags"
- # unpack raw coordinates, krrrrrr-tching!
- xDataLen = struct.calcsize(xFormat)
- yDataLen = struct.calcsize(yFormat)
- if len(data) - pos - (xDataLen + yDataLen) >= 4:
- log.warning(
- "too much glyph data: %d excess bytes", len(data) - pos - (xDataLen + yDataLen))
- xCoordinates = struct.unpack(xFormat, data[pos:pos+xDataLen])
- yCoordinates = struct.unpack(yFormat, data[pos+xDataLen:pos+xDataLen+yDataLen])
- return flags, xCoordinates, yCoordinates
-
- def compileComponents(self, glyfTable):
- data = b""
- lastcomponent = len(self.components) - 1
- more = 1
- haveInstructions = 0
- for i in range(len(self.components)):
- if i == lastcomponent:
- haveInstructions = hasattr(self, "program")
- more = 0
- compo = self.components[i]
- data = data + compo.compile(more, haveInstructions, glyfTable)
- if haveInstructions:
- instructions = self.program.getBytecode()
- data = data + struct.pack(">h", len(instructions)) + instructions
- return data
-
- def compileCoordinates(self):
- assert len(self.coordinates) == len(self.flags)
- data = []
- endPtsOfContours = array.array("h", self.endPtsOfContours)
- if sys.byteorder != "big": endPtsOfContours.byteswap()
- data.append(endPtsOfContours.tobytes())
- instructions = self.program.getBytecode()
- data.append(struct.pack(">h", len(instructions)))
- data.append(instructions)
-
- deltas = self.coordinates.copy()
- deltas.toInt()
- deltas.absoluteToRelative()
-
- # TODO(behdad): Add a configuration option for this?
- deltas = self.compileDeltasGreedy(self.flags, deltas)
- #deltas = self.compileDeltasOptimal(self.flags, deltas)
-
- data.extend(deltas)
- return b''.join(data)
-
- def compileDeltasGreedy(self, flags, deltas):
- # Implements greedy algorithm for packing coordinate deltas:
- # uses shortest representation one coordinate at a time.
- compressedFlags = bytearray()
- compressedXs = bytearray()
- compressedYs = bytearray()
- lastflag = None
- repeat = 0
- for flag,(x,y) in zip(flags, deltas):
- # Oh, the horrors of TrueType
- # do x
- if x == 0:
- flag = flag | flagXsame
- elif -255 <= x <= 255:
- flag = flag | flagXShort
- if x > 0:
- flag = flag | flagXsame
- else:
- x = -x
- compressedXs.append(x)
- else:
- compressedXs.extend(struct.pack('>h', x))
- # do y
- if y == 0:
- flag = flag | flagYsame
- elif -255 <= y <= 255:
- flag = flag | flagYShort
- if y > 0:
- flag = flag | flagYsame
- else:
- y = -y
- compressedYs.append(y)
- else:
- compressedYs.extend(struct.pack('>h', y))
- # handle repeating flags
- if flag == lastflag and repeat != 255:
- repeat = repeat + 1
- if repeat == 1:
- compressedFlags.append(flag)
- else:
- compressedFlags[-2] = flag | flagRepeat
- compressedFlags[-1] = repeat
- else:
- repeat = 0
- compressedFlags.append(flag)
- lastflag = flag
- return (compressedFlags, compressedXs, compressedYs)
-
- def compileDeltasOptimal(self, flags, deltas):
- # Implements optimal, dynaic-programming, algorithm for packing coordinate
- # deltas. The savings are negligible :(.
- candidates = []
- bestTuple = None
- bestCost = 0
- repeat = 0
- for flag,(x,y) in zip(flags, deltas):
- # Oh, the horrors of TrueType
- flag, coordBytes = flagBest(x, y, flag)
- bestCost += 1 + coordBytes
- newCandidates = [(bestCost, bestTuple, flag, coordBytes),
- (bestCost+1, bestTuple, (flag|flagRepeat), coordBytes)]
- for lastCost,lastTuple,lastFlag,coordBytes in candidates:
- if lastCost + coordBytes <= bestCost + 1 and (lastFlag & flagRepeat) and (lastFlag < 0xff00) and flagSupports(lastFlag, flag):
- if (lastFlag & 0xFF) == (flag|flagRepeat) and lastCost == bestCost + 1:
- continue
- newCandidates.append((lastCost + coordBytes, lastTuple, lastFlag+256, coordBytes))
- candidates = newCandidates
- bestTuple = min(candidates, key=lambda t:t[0])
- bestCost = bestTuple[0]
-
- flags = []
- while bestTuple:
- cost, bestTuple, flag, coordBytes = bestTuple
- flags.append(flag)
- flags.reverse()
-
- compressedFlags = bytearray()
- compressedXs = bytearray()
- compressedYs = bytearray()
- coords = iter(deltas)
- ff = []
- for flag in flags:
- repeatCount, flag = flag >> 8, flag & 0xFF
- compressedFlags.append(flag)
- if flag & flagRepeat:
- assert(repeatCount > 0)
- compressedFlags.append(repeatCount)
- else:
- assert(repeatCount == 0)
- for i in range(1 + repeatCount):
- x,y = next(coords)
- flagEncodeCoords(flag, x, y, compressedXs, compressedYs)
- ff.append(flag)
- try:
- next(coords)
- raise Exception("internal error")
- except StopIteration:
- pass
-
- return (compressedFlags, compressedXs, compressedYs)
-
- def recalcBounds(self, glyfTable):
- """Recalculates the bounds of the glyph.
-
- Each glyph object stores its bounding box in the
- ``xMin``/``yMin``/``xMax``/``yMax`` attributes. These bounds must be
- recomputed when the ``coordinates`` change. The ``table__g_l_y_f`` bounds
- must be provided to resolve component bounds.
- """
- coords, endPts, flags = self.getCoordinates(glyfTable)
- self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(coords)
-
- def isComposite(self):
- """Test whether a glyph has components"""
- if hasattr(self, "data") and self.data:
- return struct.unpack(">h", self.data[:2])[0] == -1
- else:
- return self.numberOfContours == -1
-
- def __getitem__(self, componentIndex):
- if not self.isComposite():
- raise ttLib.TTLibError("can't use glyph as sequence")
- return self.components[componentIndex]
-
- def getCoordinates(self, glyfTable):
- """Return the coordinates, end points and flags
-
- This method returns three values: A :py:class:`GlyphCoordinates` object,
- a list of the indexes of the final points of each contour (allowing you
- to split up the coordinates list into contours) and a list of flags.
-
- On simple glyphs, this method returns information from the glyph's own
- contours; on composite glyphs, it "flattens" all components recursively
- to return a list of coordinates representing all the components involved
- in the glyph.
-
- To interpret the flags for each point, see the "Simple Glyph Flags"
- section of the `glyf table specification <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf#simple-glyph-description>`.
- """
-
- if self.numberOfContours > 0:
- return self.coordinates, self.endPtsOfContours, self.flags
- elif self.isComposite():
- # it's a composite
- allCoords = GlyphCoordinates()
- allFlags = bytearray()
- allEndPts = []
- for compo in self.components:
- g = glyfTable[compo.glyphName]
- try:
- coordinates, endPts, flags = g.getCoordinates(glyfTable)
- except RecursionError:
- raise ttLib.TTLibError("glyph '%s' contains a recursive component reference" % compo.glyphName)
- coordinates = GlyphCoordinates(coordinates)
- if hasattr(compo, "firstPt"):
- # component uses two reference points: we apply the transform _before_
- # computing the offset between the points
- if hasattr(compo, "transform"):
- coordinates.transform(compo.transform)
- x1,y1 = allCoords[compo.firstPt]
- x2,y2 = coordinates[compo.secondPt]
- move = x1-x2, y1-y2
- coordinates.translate(move)
- else:
- # component uses XY offsets
- move = compo.x, compo.y
- if not hasattr(compo, "transform"):
- coordinates.translate(move)
- else:
- apple_way = compo.flags & SCALED_COMPONENT_OFFSET
- ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET
- assert not (apple_way and ms_way)
- if not (apple_way or ms_way):
- scale_component_offset = SCALE_COMPONENT_OFFSET_DEFAULT # see top of this file
- else:
- scale_component_offset = apple_way
- if scale_component_offset:
- # the Apple way: first move, then scale (ie. scale the component offset)
- coordinates.translate(move)
- coordinates.transform(compo.transform)
- else:
- # the MS way: first scale, then move
- coordinates.transform(compo.transform)
- coordinates.translate(move)
- offset = len(allCoords)
- allEndPts.extend(e + offset for e in endPts)
- allCoords.extend(coordinates)
- allFlags.extend(flags)
- return allCoords, allEndPts, allFlags
- else:
- return GlyphCoordinates(), [], bytearray()
-
- def getComponentNames(self, glyfTable):
- """Returns a list of names of component glyphs used in this glyph
-
- This method can be used on simple glyphs (in which case it returns an
- empty list) or composite glyphs.
- """
- if not hasattr(self, "data"):
- if self.isComposite():
- return [c.glyphName for c in self.components]
- else:
- return []
-
- # Extract components without expanding glyph
-
- if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
- return [] # Not composite
-
- data = self.data
- i = 10
- components = []
- more = 1
- while more:
- flags, glyphID = struct.unpack(">HH", data[i:i+4])
- i += 4
- flags = int(flags)
- components.append(glyfTable.getGlyphName(int(glyphID)))
-
- if flags & ARG_1_AND_2_ARE_WORDS: i += 4
- else: i += 2
- if flags & WE_HAVE_A_SCALE: i += 2
- elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4
- elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8
- more = flags & MORE_COMPONENTS
-
- return components
-
- def trim(self, remove_hinting=False):
- """ Remove padding and, if requested, hinting, from a glyph.
- This works on both expanded and compacted glyphs, without
- expanding it."""
- if not hasattr(self, "data"):
- if remove_hinting:
- if self.isComposite():
- if hasattr(self, "program"):
- del self.program
- else:
- self.program = ttProgram.Program()
- self.program.fromBytecode([])
- # No padding to trim.
- return
- if not self.data:
- return
- numContours = struct.unpack(">h", self.data[:2])[0]
- data = bytearray(self.data)
- i = 10
- if numContours >= 0:
- i += 2 * numContours # endPtsOfContours
- nCoordinates = ((data[i-2] << 8) | data[i-1]) + 1
- instructionLen = (data[i] << 8) | data[i+1]
- if remove_hinting:
- # Zero instruction length
- data[i] = data [i+1] = 0
- i += 2
- if instructionLen:
- # Splice it out
- data = data[:i] + data[i+instructionLen:]
- instructionLen = 0
- else:
- i += 2 + instructionLen
-
- coordBytes = 0
- j = 0
- while True:
- flag = data[i]
- i = i + 1
- repeat = 1
- if flag & flagRepeat:
- repeat = data[i] + 1
- i = i + 1
- xBytes = yBytes = 0
- if flag & flagXShort:
- xBytes = 1
- elif not (flag & flagXsame):
- xBytes = 2
- if flag & flagYShort:
- yBytes = 1
- elif not (flag & flagYsame):
- yBytes = 2
- coordBytes += (xBytes + yBytes) * repeat
- j += repeat
- if j >= nCoordinates:
- break
- assert j == nCoordinates, "bad glyph flags"
- i += coordBytes
- # Remove padding
- data = data[:i]
- else:
- more = 1
- we_have_instructions = False
- while more:
- flags =(data[i] << 8) | data[i+1]
- if remove_hinting:
- flags &= ~WE_HAVE_INSTRUCTIONS
- if flags & WE_HAVE_INSTRUCTIONS:
- we_have_instructions = True
- data[i+0] = flags >> 8
- data[i+1] = flags & 0xFF
- i += 4
- flags = int(flags)
-
- if flags & ARG_1_AND_2_ARE_WORDS: i += 4
- else: i += 2
- if flags & WE_HAVE_A_SCALE: i += 2
- elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4
- elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8
- more = flags & MORE_COMPONENTS
- if we_have_instructions:
- instructionLen = (data[i] << 8) | data[i+1]
- i += 2 + instructionLen
- # Remove padding
- data = data[:i]
-
- self.data = data
-
- def removeHinting(self):
- """Removes TrueType hinting instructions from the glyph."""
- self.trim (remove_hinting=True)
-
- def draw(self, pen, glyfTable, offset=0):
- """Draws the glyph using the supplied pen object.
-
- Arguments:
- pen: An object conforming to the pen protocol.
- glyfTable: A :py:class:`table__g_l_y_f` object, to resolve components.
- offset (int): A horizontal offset. If provided, all coordinates are
- translated by this offset.
- """
-
- if self.isComposite():
- for component in self.components:
- glyphName, transform = component.getComponentInfo()
- pen.addComponent(glyphName, transform)
- return
-
- coordinates, endPts, flags = self.getCoordinates(glyfTable)
- if offset:
- coordinates = coordinates.copy()
- coordinates.translate((offset, 0))
- start = 0
- for end in endPts:
- end = end + 1
- contour = coordinates[start:end]
- cFlags = [flagOnCurve & f for f in flags[start:end]]
- start = end
- if 1 not in cFlags:
- # There is not a single on-curve point on the curve,
- # use pen.qCurveTo's special case by specifying None
- # as the on-curve point.
- contour.append(None)
- pen.qCurveTo(*contour)
- else:
- # Shuffle the points so that contour the is guaranteed
- # to *end* in an on-curve point, which we'll use for
- # the moveTo.
- firstOnCurve = cFlags.index(1) + 1
- contour = contour[firstOnCurve:] + contour[:firstOnCurve]
- cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve]
- pen.moveTo(contour[-1])
- while contour:
- nextOnCurve = cFlags.index(1) + 1
- if nextOnCurve == 1:
- # Skip a final lineTo(), as it is implied by
- # pen.closePath()
- if len(contour) > 1:
- pen.lineTo(contour[0])
- else:
- pen.qCurveTo(*contour[:nextOnCurve])
- contour = contour[nextOnCurve:]
- cFlags = cFlags[nextOnCurve:]
- pen.closePath()
-
- def drawPoints(self, pen, glyfTable, offset=0):
- """Draw the glyph using the supplied pointPen. As opposed to Glyph.draw(),
- this will not change the point indices.
- """
-
- if self.isComposite():
- for component in self.components:
- glyphName, transform = component.getComponentInfo()
- pen.addComponent(glyphName, transform)
- return
-
- coordinates, endPts, flags = self.getCoordinates(glyfTable)
- if offset:
- coordinates = coordinates.copy()
- coordinates.translate((offset, 0))
- start = 0
- for end in endPts:
- end = end + 1
- contour = coordinates[start:end]
- cFlags = flags[start:end]
- start = end
- pen.beginPath()
- # Start with the appropriate segment type based on the final segment
- segmentType = "line" if cFlags[-1] == 1 else "qcurve"
- for i, pt in enumerate(contour):
- if cFlags[i] & flagOnCurve == 1:
- pen.addPoint(pt, segmentType=segmentType)
- segmentType = "line"
- else:
- pen.addPoint(pt)
- segmentType = "qcurve"
- pen.endPath()
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
+#
+# Variable Composite glyphs
+# https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1.md
+#
+
+
+class VarComponentFlags(IntFlag):
+ USE_MY_METRICS = 0x0001
+ AXIS_INDICES_ARE_SHORT = 0x0002
+ UNIFORM_SCALE = 0x0004
+ HAVE_TRANSLATE_X = 0x0008
+ HAVE_TRANSLATE_Y = 0x0010
+ HAVE_ROTATION = 0x0020
+ HAVE_SCALE_X = 0x0040
+ HAVE_SCALE_Y = 0x0080
+ HAVE_SKEW_X = 0x0100
+ HAVE_SKEW_Y = 0x0200
+ HAVE_TCENTER_X = 0x0400
+ HAVE_TCENTER_Y = 0x0800
+ GID_IS_24BIT = 0x1000
+ AXES_HAVE_VARIATION = 0x2000
+ RESET_UNSPECIFIED_AXES = 0x4000
+
+
+VarComponentTransformMappingValues = namedtuple(
+ "VarComponentTransformMappingValues",
+ ["flag", "fractionalBits", "scale", "defaultValue"],
+)
+
+VAR_COMPONENT_TRANSFORM_MAPPING = {
+ "translateX": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_TRANSLATE_X, 0, 1, 0
+ ),
+ "translateY": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_TRANSLATE_Y, 0, 1, 0
+ ),
+ "rotation": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_ROTATION, 12, 180, 0
+ ),
+ "scaleX": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_SCALE_X, 10, 1, 1
+ ),
+ "scaleY": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_SCALE_Y, 10, 1, 1
+ ),
+ "skewX": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_SKEW_X, 12, -180, 0
+ ),
+ "skewY": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_SKEW_Y, 12, 180, 0
+ ),
+ "tCenterX": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_TCENTER_X, 0, 1, 0
+ ),
+ "tCenterY": VarComponentTransformMappingValues(
+ VarComponentFlags.HAVE_TCENTER_Y, 0, 1, 0
+ ),
+}
+
+
+class GlyphVarComponent(object):
+ MIN_SIZE = 5
+
+ def __init__(self):
+ self.location = {}
+ self.transform = DecomposedTransform()
+
+ @staticmethod
+ def getSize(data):
+ size = 5
+ flags = struct.unpack(">H", data[:2])[0]
+ numAxes = int(data[2])
+
+ if flags & VarComponentFlags.GID_IS_24BIT:
+ size += 1
+
+ size += numAxes
+ if flags & VarComponentFlags.AXIS_INDICES_ARE_SHORT:
+ size += 2 * numAxes
+ else:
+ axisIndices = array.array("B", data[:numAxes])
+ size += numAxes
+
+ for attr_name, mapping_values in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ if flags & mapping_values.flag:
+ size += 2
+
+ return size
+
+ def decompile(self, data, glyfTable):
+ flags = struct.unpack(">H", data[:2])[0]
+ self.flags = int(flags)
+ data = data[2:]
+
+ numAxes = int(data[0])
+ data = data[1:]
+
+ if flags & VarComponentFlags.GID_IS_24BIT:
+ glyphID = int(struct.unpack(">L", b"\0" + data[:3])[0])
+ data = data[3:]
+ flags ^= VarComponentFlags.GID_IS_24BIT
+ else:
+ glyphID = int(struct.unpack(">H", data[:2])[0])
+ data = data[2:]
+ self.glyphName = glyfTable.getGlyphName(int(glyphID))
+
+ if flags & VarComponentFlags.AXIS_INDICES_ARE_SHORT:
+ axisIndices = array.array("H", data[: 2 * numAxes])
+ if sys.byteorder != "big":
+ axisIndices.byteswap()
+ data = data[2 * numAxes :]
+ flags ^= VarComponentFlags.AXIS_INDICES_ARE_SHORT
+ else:
+ axisIndices = array.array("B", data[:numAxes])
+ data = data[numAxes:]
+ assert len(axisIndices) == numAxes
+ axisIndices = list(axisIndices)
+
+ axisValues = array.array("h", data[: 2 * numAxes])
+ if sys.byteorder != "big":
+ axisValues.byteswap()
+ data = data[2 * numAxes :]
+ assert len(axisValues) == numAxes
+ axisValues = [fi2fl(v, 14) for v in axisValues]
+
+ self.location = {
+ glyfTable.axisTags[i]: v for i, v in zip(axisIndices, axisValues)
+ }
+
+ def read_transform_component(data, values):
+ if flags & values.flag:
+ return (
+ data[2:],
+ fi2fl(struct.unpack(">h", data[:2])[0], values.fractionalBits)
+ * values.scale,
+ )
+ else:
+ return data, values.defaultValue
+
+ for attr_name, mapping_values in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ data, value = read_transform_component(data, mapping_values)
+ setattr(self.transform, attr_name, value)
+
+ if flags & VarComponentFlags.UNIFORM_SCALE:
+ if flags & VarComponentFlags.HAVE_SCALE_X and not (
+ flags & VarComponentFlags.HAVE_SCALE_Y
+ ):
+ self.transform.scaleY = self.transform.scaleX
+ flags |= VarComponentFlags.HAVE_SCALE_Y
+ flags ^= VarComponentFlags.UNIFORM_SCALE
+
+ return data
+
+ def compile(self, glyfTable):
+ data = b""
+
+ if not hasattr(self, "flags"):
+ flags = 0
+ # Calculate optimal transform component flags
+ for attr_name, mapping in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ value = getattr(self.transform, attr_name)
+ if fl2fi(value / mapping.scale, mapping.fractionalBits) != fl2fi(
+ mapping.defaultValue / mapping.scale, mapping.fractionalBits
+ ):
+ flags |= mapping.flag
+ else:
+ flags = self.flags
+
+ if (
+ flags & VarComponentFlags.HAVE_SCALE_X
+ and flags & VarComponentFlags.HAVE_SCALE_Y
+ and fl2fi(self.transform.scaleX, 10) == fl2fi(self.transform.scaleY, 10)
+ ):
+ flags |= VarComponentFlags.UNIFORM_SCALE
+ flags ^= VarComponentFlags.HAVE_SCALE_Y
+
+ numAxes = len(self.location)
+
+ data = data + struct.pack(">B", numAxes)
+
+ glyphID = glyfTable.getGlyphID(self.glyphName)
+ if glyphID > 65535:
+ flags |= VarComponentFlags.GID_IS_24BIT
+ data = data + struct.pack(">L", glyphID)[1:]
+ else:
+ data = data + struct.pack(">H", glyphID)
+
+ axisIndices = [glyfTable.axisTags.index(tag) for tag in self.location.keys()]
+ if all(a <= 255 for a in axisIndices):
+ axisIndices = array.array("B", axisIndices)
+ else:
+ axisIndices = array.array("H", axisIndices)
+ if sys.byteorder != "big":
+ axisIndices.byteswap()
+ flags |= VarComponentFlags.AXIS_INDICES_ARE_SHORT
+ data = data + bytes(axisIndices)
+
+ axisValues = self.location.values()
+ axisValues = array.array("h", (fl2fi(v, 14) for v in axisValues))
+ if sys.byteorder != "big":
+ axisValues.byteswap()
+ data = data + bytes(axisValues)
+
+ def write_transform_component(data, value, values):
+ if flags & values.flag:
+ return data + struct.pack(
+ ">h", fl2fi(value / values.scale, values.fractionalBits)
+ )
+ else:
+ return data
+
+ for attr_name, mapping_values in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ value = getattr(self.transform, attr_name)
+ data = write_transform_component(data, value, mapping_values)
+
+ return struct.pack(">H", flags) + data
+
+ def toXML(self, writer, ttFont):
+ attrs = [("glyphName", self.glyphName)]
+
+ if hasattr(self, "flags"):
+ attrs = attrs + [("flags", hex(self.flags))]
+
+ for attr_name, mapping in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ v = getattr(self.transform, attr_name)
+ if v != mapping.defaultValue:
+ attrs.append((attr_name, fl2str(v, mapping.fractionalBits)))
+
+ writer.begintag("varComponent", attrs)
+ writer.newline()
+
+ writer.begintag("location")
+ writer.newline()
+ for tag, v in self.location.items():
+ writer.simpletag("axis", [("tag", tag), ("value", fl2str(v, 14))])
+ writer.newline()
+ writer.endtag("location")
+ writer.newline()
+
+ writer.endtag("varComponent")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.glyphName = attrs["glyphName"]
+
+ if "flags" in attrs:
+ self.flags = safeEval(attrs["flags"])
+
+ for attr_name, mapping in VAR_COMPONENT_TRANSFORM_MAPPING.items():
+ if attr_name not in attrs:
+ continue
+ v = str2fl(safeEval(attrs[attr_name]), mapping.fractionalBits)
+ setattr(self.transform, attr_name, v)
+
+ for c in content:
+ if not isinstance(c, tuple):
+ continue
+ name, attrs, content = c
+ if name != "location":
+ continue
+ for c in content:
+ if not isinstance(c, tuple):
+ continue
+ name, attrs, content = c
+ assert name == "axis"
+ assert not content
+ self.location[attrs["tag"]] = str2fl(safeEval(attrs["value"]), 14)
+
+ def getPointCount(self):
+ assert hasattr(self, "flags"), "VarComponent with variations must have flags"
+
+ count = 0
+
+ if self.flags & VarComponentFlags.AXES_HAVE_VARIATION:
+ count += len(self.location)
+
+ if self.flags & (
+ VarComponentFlags.HAVE_TRANSLATE_X | VarComponentFlags.HAVE_TRANSLATE_Y
+ ):
+ count += 1
+ if self.flags & VarComponentFlags.HAVE_ROTATION:
+ count += 1
+ if self.flags & (
+ VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y
+ ):
+ count += 1
+ if self.flags & (VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y):
+ count += 1
+ if self.flags & (
+ VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y
+ ):
+ count += 1
+
+ return count
+
+ def getCoordinatesAndControls(self):
+ coords = []
+ controls = []
+
+ if self.flags & VarComponentFlags.AXES_HAVE_VARIATION:
+ for tag, v in self.location.items():
+ controls.append(tag)
+ coords.append((fl2fi(v, 14), 0))
+
+ if self.flags & (
+ VarComponentFlags.HAVE_TRANSLATE_X | VarComponentFlags.HAVE_TRANSLATE_Y
+ ):
+ controls.append("translate")
+ coords.append((self.transform.translateX, self.transform.translateY))
+ if self.flags & VarComponentFlags.HAVE_ROTATION:
+ controls.append("rotation")
+ coords.append((fl2fi(self.transform.rotation / 180, 12), 0))
+ if self.flags & (
+ VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y
+ ):
+ controls.append("scale")
+ coords.append(
+ (fl2fi(self.transform.scaleX, 10), fl2fi(self.transform.scaleY, 10))
+ )
+ if self.flags & (VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y):
+ controls.append("skew")
+ coords.append(
+ (
+ fl2fi(self.transform.skewX / -180, 12),
+ fl2fi(self.transform.skewY / 180, 12),
+ )
+ )
+ if self.flags & (
+ VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y
+ ):
+ controls.append("tCenter")
+ coords.append((self.transform.tCenterX, self.transform.tCenterY))
+
+ return coords, controls
+
+ def setCoordinates(self, coords):
+ i = 0
+
+ if self.flags & VarComponentFlags.AXES_HAVE_VARIATION:
+ newLocation = {}
+ for tag in self.location:
+ newLocation[tag] = fi2fl(coords[i][0], 14)
+ i += 1
+ self.location = newLocation
+
+ self.transform = DecomposedTransform()
+ if self.flags & (
+ VarComponentFlags.HAVE_TRANSLATE_X | VarComponentFlags.HAVE_TRANSLATE_Y
+ ):
+ self.transform.translateX, self.transform.translateY = coords[i]
+ i += 1
+ if self.flags & VarComponentFlags.HAVE_ROTATION:
+ self.transform.rotation = fi2fl(coords[i][0], 12) * 180
+ i += 1
+ if self.flags & (
+ VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y
+ ):
+ self.transform.scaleX, self.transform.scaleY = fi2fl(
+ coords[i][0], 10
+ ), fi2fl(coords[i][1], 10)
+ i += 1
+ if self.flags & (VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y):
+ self.transform.skewX, self.transform.skewY = (
+ fi2fl(coords[i][0], 12) * -180,
+ fi2fl(coords[i][1], 12) * 180,
+ )
+ i += 1
+ if self.flags & (
+ VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y
+ ):
+ self.transform.tCenterX, self.transform.tCenterY = coords[i]
+ i += 1
+
+ return coords[i:]
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
-class GlyphComponent(object):
- """Represents a component within a composite glyph.
-
- The component is represented internally with four attributes: ``glyphName``,
- ``x``, ``y`` and ``transform``. If there is no "two-by-two" matrix (i.e
- no scaling, reflection, or rotation; only translation), the ``transform``
- attribute is not present.
- """
- # The above documentation is not *completely* true, but is *true enough* because
- # the rare firstPt/lastPt attributes are not totally supported and nobody seems to
- # mind - see below.
-
- def __init__(self):
- pass
-
- def getComponentInfo(self):
- """Return information about the component
-
- This method returns a tuple of two values: the glyph name of the component's
- base glyph, and a transformation matrix. As opposed to accessing the attributes
- directly, ``getComponentInfo`` always returns a six-element tuple of the
- component's transformation matrix, even when the two-by-two ``.transform``
- matrix is not present.
- """
- # XXX Ignoring self.firstPt & self.lastpt for now: I need to implement
- # something equivalent in fontTools.objects.glyph (I'd rather not
- # convert it to an absolute offset, since it is valuable information).
- # This method will now raise "AttributeError: x" on glyphs that use
- # this TT feature.
- if hasattr(self, "transform"):
- [[xx, xy], [yx, yy]] = self.transform
- trans = (xx, xy, yx, yy, self.x, self.y)
- else:
- trans = (1, 0, 0, 1, self.x, self.y)
- return self.glyphName, trans
-
- def decompile(self, data, glyfTable):
- flags, glyphID = struct.unpack(">HH", data[:4])
- self.flags = int(flags)
- glyphID = int(glyphID)
- self.glyphName = glyfTable.getGlyphName(int(glyphID))
- data = data[4:]
-
- if self.flags & ARG_1_AND_2_ARE_WORDS:
- if self.flags & ARGS_ARE_XY_VALUES:
- self.x, self.y = struct.unpack(">hh", data[:4])
- else:
- x, y = struct.unpack(">HH", data[:4])
- self.firstPt, self.secondPt = int(x), int(y)
- data = data[4:]
- else:
- if self.flags & ARGS_ARE_XY_VALUES:
- self.x, self.y = struct.unpack(">bb", data[:2])
- else:
- x, y = struct.unpack(">BB", data[:2])
- self.firstPt, self.secondPt = int(x), int(y)
- data = data[2:]
-
- if self.flags & WE_HAVE_A_SCALE:
- scale, = struct.unpack(">h", data[:2])
- self.transform = [[fi2fl(scale,14), 0], [0, fi2fl(scale,14)]] # fixed 2.14
- data = data[2:]
- elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE:
- xscale, yscale = struct.unpack(">hh", data[:4])
- self.transform = [[fi2fl(xscale,14), 0], [0, fi2fl(yscale,14)]] # fixed 2.14
- data = data[4:]
- elif self.flags & WE_HAVE_A_TWO_BY_TWO:
- (xscale, scale01,
- scale10, yscale) = struct.unpack(">hhhh", data[:8])
- self.transform = [[fi2fl(xscale,14), fi2fl(scale01,14)],
- [fi2fl(scale10,14), fi2fl(yscale,14)]] # fixed 2.14
- data = data[8:]
- more = self.flags & MORE_COMPONENTS
- haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS
- self.flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS |
- SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET |
- NON_OVERLAPPING | OVERLAP_COMPOUND)
- return more, haveInstructions, data
-
- def compile(self, more, haveInstructions, glyfTable):
- data = b""
-
- # reset all flags we will calculate ourselves
- flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS |
- SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET |
- NON_OVERLAPPING | OVERLAP_COMPOUND)
- if more:
- flags = flags | MORE_COMPONENTS
- if haveInstructions:
- flags = flags | WE_HAVE_INSTRUCTIONS
-
- if hasattr(self, "firstPt"):
- if (0 <= self.firstPt <= 255) and (0 <= self.secondPt <= 255):
- data = data + struct.pack(">BB", self.firstPt, self.secondPt)
- else:
- data = data + struct.pack(">HH", self.firstPt, self.secondPt)
- flags = flags | ARG_1_AND_2_ARE_WORDS
- else:
- x = otRound(self.x)
- y = otRound(self.y)
- flags = flags | ARGS_ARE_XY_VALUES
- if (-128 <= x <= 127) and (-128 <= y <= 127):
- data = data + struct.pack(">bb", x, y)
- else:
- data = data + struct.pack(">hh", x, y)
- flags = flags | ARG_1_AND_2_ARE_WORDS
-
- if hasattr(self, "transform"):
- transform = [[fl2fi(x,14) for x in row] for row in self.transform]
- if transform[0][1] or transform[1][0]:
- flags = flags | WE_HAVE_A_TWO_BY_TWO
- data = data + struct.pack(">hhhh",
- transform[0][0], transform[0][1],
- transform[1][0], transform[1][1])
- elif transform[0][0] != transform[1][1]:
- flags = flags | WE_HAVE_AN_X_AND_Y_SCALE
- data = data + struct.pack(">hh",
- transform[0][0], transform[1][1])
- else:
- flags = flags | WE_HAVE_A_SCALE
- data = data + struct.pack(">h",
- transform[0][0])
-
- glyphID = glyfTable.getGlyphID(self.glyphName)
- return struct.pack(">HH", flags, glyphID) + data
-
- def toXML(self, writer, ttFont):
- attrs = [("glyphName", self.glyphName)]
- if not hasattr(self, "firstPt"):
- attrs = attrs + [("x", self.x), ("y", self.y)]
- else:
- attrs = attrs + [("firstPt", self.firstPt), ("secondPt", self.secondPt)]
-
- if hasattr(self, "transform"):
- transform = self.transform
- if transform[0][1] or transform[1][0]:
- attrs = attrs + [
- ("scalex", fl2str(transform[0][0], 14)),
- ("scale01", fl2str(transform[0][1], 14)),
- ("scale10", fl2str(transform[1][0], 14)),
- ("scaley", fl2str(transform[1][1], 14)),
- ]
- elif transform[0][0] != transform[1][1]:
- attrs = attrs + [
- ("scalex", fl2str(transform[0][0], 14)),
- ("scaley", fl2str(transform[1][1], 14)),
- ]
- else:
- attrs = attrs + [("scale", fl2str(transform[0][0], 14))]
- attrs = attrs + [("flags", hex(self.flags))]
- writer.simpletag("component", attrs)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.glyphName = attrs["glyphName"]
- if "firstPt" in attrs:
- self.firstPt = safeEval(attrs["firstPt"])
- self.secondPt = safeEval(attrs["secondPt"])
- else:
- self.x = safeEval(attrs["x"])
- self.y = safeEval(attrs["y"])
- if "scale01" in attrs:
- scalex = str2fl(attrs["scalex"], 14)
- scale01 = str2fl(attrs["scale01"], 14)
- scale10 = str2fl(attrs["scale10"], 14)
- scaley = str2fl(attrs["scaley"], 14)
- self.transform = [[scalex, scale01], [scale10, scaley]]
- elif "scalex" in attrs:
- scalex = str2fl(attrs["scalex"], 14)
- scaley = str2fl(attrs["scaley"], 14)
- self.transform = [[scalex, 0], [0, scaley]]
- elif "scale" in attrs:
- scale = str2fl(attrs["scale"], 14)
- self.transform = [[scale, 0], [0, scale]]
- self.flags = safeEval(attrs["flags"])
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
class GlyphCoordinates(object):
- """A list of glyph coordinates.
-
- Unlike an ordinary list, this is a numpy-like matrix object which supports
- matrix addition, scalar multiplication and other operations described below.
- """
- def __init__(self, iterable=[]):
- self._a = array.array('d')
- self.extend(iterable)
-
- @property
- def array(self):
- """Returns the underlying array of coordinates"""
- return self._a
-
- @staticmethod
- def zeros(count):
- """Creates a new ``GlyphCoordinates`` object with all coordinates set to (0,0)"""
- g = GlyphCoordinates()
- g._a.frombytes(bytes(count * 2 * g._a.itemsize))
- return g
-
- def copy(self):
- """Creates a new ``GlyphCoordinates`` object which is a copy of the current one."""
- c = GlyphCoordinates()
- c._a.extend(self._a)
- return c
-
- def __len__(self):
- """Returns the number of coordinates in the array."""
- return len(self._a) // 2
-
- def __getitem__(self, k):
- """Returns a two element tuple (x,y)"""
- if isinstance(k, slice):
- indices = range(*k.indices(len(self)))
- return [self[i] for i in indices]
- a = self._a
- x = a[2*k]
- y = a[2*k+1]
- return (int(x) if x.is_integer() else x,
- int(y) if y.is_integer() else y)
-
- def __setitem__(self, k, v):
- """Sets a point's coordinates to a two element tuple (x,y)"""
- if isinstance(k, slice):
- indices = range(*k.indices(len(self)))
- # XXX This only works if len(v) == len(indices)
- for j,i in enumerate(indices):
- self[i] = v[j]
- return
- self._a[2*k],self._a[2*k+1] = v
-
- def __delitem__(self, i):
- """Removes a point from the list"""
- i = (2*i) % len(self._a)
- del self._a[i]
- del self._a[i]
-
- def __repr__(self):
- return 'GlyphCoordinates(['+','.join(str(c) for c in self)+'])'
-
- def append(self, p):
- self._a.extend(tuple(p))
-
- def extend(self, iterable):
- for p in iterable:
- self._a.extend(p)
-
- def toInt(self, *, round=otRound):
- a = self._a
- for i in range(len(a)):
- a[i] = round(a[i])
-
- def relativeToAbsolute(self):
- a = self._a
- x,y = 0,0
- for i in range(0, len(a), 2):
- a[i ] = x = a[i ] + x
- a[i+1] = y = a[i+1] + y
-
- def absoluteToRelative(self):
- a = self._a
- x,y = 0,0
- for i in range(0, len(a), 2):
- nx = a[i ]
- ny = a[i+1]
- a[i] = nx - x
- a[i+1] = ny - y
- x = nx
- y = ny
-
- def translate(self, p):
- """
- >>> GlyphCoordinates([(1,2)]).translate((.5,0))
- """
- x,y = p
- if x == 0 and y == 0:
- return
- a = self._a
- for i in range(0, len(a), 2):
- a[i] += x
- a[i+1] += y
-
- def scale(self, p):
- """
- >>> GlyphCoordinates([(1,2)]).scale((.5,0))
- """
- x,y = p
- if x == 1 and y == 1:
- return
- a = self._a
- for i in range(0, len(a), 2):
- a[i] *= x
- a[i+1] *= y
-
- def transform(self, t):
- """
- >>> GlyphCoordinates([(1,2)]).transform(((.5,0),(.2,.5)))
- """
- a = self._a
- for i in range(0, len(a), 2):
- x = a[i ]
- y = a[i+1]
- px = x * t[0][0] + y * t[1][0]
- py = x * t[0][1] + y * t[1][1]
- a[i] = px
- a[i+1] = py
-
- def __eq__(self, other):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g2 = GlyphCoordinates([(1.0,2)])
- >>> g3 = GlyphCoordinates([(1.5,2)])
- >>> g == g2
- True
- >>> g == g3
- False
- >>> g2 == g3
- False
- """
- if type(self) != type(other):
- return NotImplemented
- return self._a == other._a
-
- def __ne__(self, other):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g2 = GlyphCoordinates([(1.0,2)])
- >>> g3 = GlyphCoordinates([(1.5,2)])
- >>> g != g2
- False
- >>> g != g3
- True
- >>> g2 != g3
- True
- """
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
-
- # Math operations
-
- def __pos__(self):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g
- GlyphCoordinates([(1, 2)])
- >>> g2 = +g
- >>> g2
- GlyphCoordinates([(1, 2)])
- >>> g2.translate((1,0))
- >>> g2
- GlyphCoordinates([(2, 2)])
- >>> g
- GlyphCoordinates([(1, 2)])
- """
- return self.copy()
- def __neg__(self):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g
- GlyphCoordinates([(1, 2)])
- >>> g2 = -g
- >>> g2
- GlyphCoordinates([(-1, -2)])
- >>> g
- GlyphCoordinates([(1, 2)])
- """
- r = self.copy()
- a = r._a
- for i in range(len(a)):
- a[i] = -a[i]
- return r
- def __round__(self, *, round=otRound):
- r = self.copy()
- r.toInt(round=round)
- return r
-
- def __add__(self, other): return self.copy().__iadd__(other)
- def __sub__(self, other): return self.copy().__isub__(other)
- def __mul__(self, other): return self.copy().__imul__(other)
- def __truediv__(self, other): return self.copy().__itruediv__(other)
-
- __radd__ = __add__
- __rmul__ = __mul__
- def __rsub__(self, other): return other + (-self)
-
- def __iadd__(self, other):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g += (.5,0)
- >>> g
- GlyphCoordinates([(1.5, 2)])
- >>> g2 = GlyphCoordinates([(3,4)])
- >>> g += g2
- >>> g
- GlyphCoordinates([(4.5, 6)])
- """
- if isinstance(other, tuple):
- assert len(other) == 2
- self.translate(other)
- return self
- if isinstance(other, GlyphCoordinates):
- other = other._a
- a = self._a
- assert len(a) == len(other)
- for i in range(len(a)):
- a[i] += other[i]
- return self
- return NotImplemented
-
- def __isub__(self, other):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g -= (.5,0)
- >>> g
- GlyphCoordinates([(0.5, 2)])
- >>> g2 = GlyphCoordinates([(3,4)])
- >>> g -= g2
- >>> g
- GlyphCoordinates([(-2.5, -2)])
- """
- if isinstance(other, tuple):
- assert len(other) == 2
- self.translate((-other[0],-other[1]))
- return self
- if isinstance(other, GlyphCoordinates):
- other = other._a
- a = self._a
- assert len(a) == len(other)
- for i in range(len(a)):
- a[i] -= other[i]
- return self
- return NotImplemented
-
- def __imul__(self, other):
- """
- >>> g = GlyphCoordinates([(1,2)])
- >>> g *= (2,.5)
- >>> g *= 2
- >>> g
- GlyphCoordinates([(4, 2)])
- >>> g = GlyphCoordinates([(1,2)])
- >>> g *= 2
- >>> g
- GlyphCoordinates([(2, 4)])
- """
- if isinstance(other, tuple):
- assert len(other) == 2
- self.scale(other)
- return self
- if isinstance(other, Number):
- if other == 1:
- return self
- a = self._a
- for i in range(len(a)):
- a[i] *= other
- return self
- return NotImplemented
-
- def __itruediv__(self, other):
- """
- >>> g = GlyphCoordinates([(1,3)])
- >>> g /= (.5,1.5)
- >>> g /= 2
- >>> g
- GlyphCoordinates([(1, 1)])
- """
- if isinstance(other, Number):
- other = (other, other)
- if isinstance(other, tuple):
- if other == (1,1):
- return self
- assert len(other) == 2
- self.scale((1./other[0],1./other[1]))
- return self
- return NotImplemented
-
- def __bool__(self):
- """
- >>> g = GlyphCoordinates([])
- >>> bool(g)
- False
- >>> g = GlyphCoordinates([(0,0), (0.,0)])
- >>> bool(g)
- True
- >>> g = GlyphCoordinates([(0,0), (1,0)])
- >>> bool(g)
- True
- >>> g = GlyphCoordinates([(0,.5), (0,0)])
- >>> bool(g)
- True
- """
- return bool(self._a)
-
- __nonzero__ = __bool__
+ """A list of glyph coordinates.
+
+ Unlike an ordinary list, this is a numpy-like matrix object which supports
+ matrix addition, scalar multiplication and other operations described below.
+ """
+
+ def __init__(self, iterable=[]):
+ self._a = array.array("d")
+ self.extend(iterable)
+
+ @property
+ def array(self):
+ """Returns the underlying array of coordinates"""
+ return self._a
+
+ @staticmethod
+ def zeros(count):
+ """Creates a new ``GlyphCoordinates`` object with all coordinates set to (0,0)"""
+ g = GlyphCoordinates()
+ g._a.frombytes(bytes(count * 2 * g._a.itemsize))
+ return g
+
+ def copy(self):
+ """Creates a new ``GlyphCoordinates`` object which is a copy of the current one."""
+ c = GlyphCoordinates()
+ c._a.extend(self._a)
+ return c
+
+ def __len__(self):
+ """Returns the number of coordinates in the array."""
+ return len(self._a) // 2
+
+ def __getitem__(self, k):
+ """Returns a two element tuple (x,y)"""
+ a = self._a
+ if isinstance(k, slice):
+ indices = range(*k.indices(len(self)))
+ # Instead of calling ourselves recursively, duplicate code; faster
+ ret = []
+ for k in indices:
+ x = a[2 * k]
+ y = a[2 * k + 1]
+ ret.append(
+ (int(x) if x.is_integer() else x, int(y) if y.is_integer() else y)
+ )
+ return ret
+ x = a[2 * k]
+ y = a[2 * k + 1]
+ return (int(x) if x.is_integer() else x, int(y) if y.is_integer() else y)
+
+ def __setitem__(self, k, v):
+ """Sets a point's coordinates to a two element tuple (x,y)"""
+ if isinstance(k, slice):
+ indices = range(*k.indices(len(self)))
+ # XXX This only works if len(v) == len(indices)
+ for j, i in enumerate(indices):
+ self[i] = v[j]
+ return
+ self._a[2 * k], self._a[2 * k + 1] = v
+
+ def __delitem__(self, i):
+ """Removes a point from the list"""
+ i = (2 * i) % len(self._a)
+ del self._a[i]
+ del self._a[i]
+
+ def __repr__(self):
+ return "GlyphCoordinates([" + ",".join(str(c) for c in self) + "])"
+
+ def append(self, p):
+ self._a.extend(tuple(p))
+
+ def extend(self, iterable):
+ for p in iterable:
+ self._a.extend(p)
+
+ def toInt(self, *, round=otRound):
+ if round is noRound:
+ return
+ a = self._a
+ for i in range(len(a)):
+ a[i] = round(a[i])
+
+ def calcBounds(self):
+ a = self._a
+ if not a:
+ return 0, 0, 0, 0
+ xs = a[0::2]
+ ys = a[1::2]
+ return min(xs), min(ys), max(xs), max(ys)
+
+ def calcIntBounds(self, round=otRound):
+ return tuple(round(v) for v in self.calcBounds())
+
+ def relativeToAbsolute(self):
+ a = self._a
+ x, y = 0, 0
+ for i in range(0, len(a), 2):
+ a[i] = x = a[i] + x
+ a[i + 1] = y = a[i + 1] + y
+
+ def absoluteToRelative(self):
+ a = self._a
+ x, y = 0, 0
+ for i in range(0, len(a), 2):
+ nx = a[i]
+ ny = a[i + 1]
+ a[i] = nx - x
+ a[i + 1] = ny - y
+ x = nx
+ y = ny
+
+ def translate(self, p):
+ """
+ >>> GlyphCoordinates([(1,2)]).translate((.5,0))
+ """
+ x, y = p
+ if x == 0 and y == 0:
+ return
+ a = self._a
+ for i in range(0, len(a), 2):
+ a[i] += x
+ a[i + 1] += y
+
+ def scale(self, p):
+ """
+ >>> GlyphCoordinates([(1,2)]).scale((.5,0))
+ """
+ x, y = p
+ if x == 1 and y == 1:
+ return
+ a = self._a
+ for i in range(0, len(a), 2):
+ a[i] *= x
+ a[i + 1] *= y
+
+ def transform(self, t):
+ """
+ >>> GlyphCoordinates([(1,2)]).transform(((.5,0),(.2,.5)))
+ """
+ a = self._a
+ for i in range(0, len(a), 2):
+ x = a[i]
+ y = a[i + 1]
+ px = x * t[0][0] + y * t[1][0]
+ py = x * t[0][1] + y * t[1][1]
+ a[i] = px
+ a[i + 1] = py
+
+ def __eq__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g2 = GlyphCoordinates([(1.0,2)])
+ >>> g3 = GlyphCoordinates([(1.5,2)])
+ >>> g == g2
+ True
+ >>> g == g3
+ False
+ >>> g2 == g3
+ False
+ """
+ if type(self) != type(other):
+ return NotImplemented
+ return self._a == other._a
+
+ def __ne__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g2 = GlyphCoordinates([(1.0,2)])
+ >>> g3 = GlyphCoordinates([(1.5,2)])
+ >>> g != g2
+ False
+ >>> g != g3
+ True
+ >>> g2 != g3
+ True
+ """
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
+
+ # Math operations
+
+ def __pos__(self):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g
+ GlyphCoordinates([(1, 2)])
+ >>> g2 = +g
+ >>> g2
+ GlyphCoordinates([(1, 2)])
+ >>> g2.translate((1,0))
+ >>> g2
+ GlyphCoordinates([(2, 2)])
+ >>> g
+ GlyphCoordinates([(1, 2)])
+ """
+ return self.copy()
+
+ def __neg__(self):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g
+ GlyphCoordinates([(1, 2)])
+ >>> g2 = -g
+ >>> g2
+ GlyphCoordinates([(-1, -2)])
+ >>> g
+ GlyphCoordinates([(1, 2)])
+ """
+ r = self.copy()
+ a = r._a
+ for i in range(len(a)):
+ a[i] = -a[i]
+ return r
+
+ def __round__(self, *, round=otRound):
+ r = self.copy()
+ r.toInt(round=round)
+ return r
+
+ def __add__(self, other):
+ return self.copy().__iadd__(other)
+
+ def __sub__(self, other):
+ return self.copy().__isub__(other)
+
+ def __mul__(self, other):
+ return self.copy().__imul__(other)
+
+ def __truediv__(self, other):
+ return self.copy().__itruediv__(other)
+
+ __radd__ = __add__
+ __rmul__ = __mul__
+
+ def __rsub__(self, other):
+ return other + (-self)
+
+ def __iadd__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g += (.5,0)
+ >>> g
+ GlyphCoordinates([(1.5, 2)])
+ >>> g2 = GlyphCoordinates([(3,4)])
+ >>> g += g2
+ >>> g
+ GlyphCoordinates([(4.5, 6)])
+ """
+ if isinstance(other, tuple):
+ assert len(other) == 2
+ self.translate(other)
+ return self
+ if isinstance(other, GlyphCoordinates):
+ other = other._a
+ a = self._a
+ assert len(a) == len(other)
+ for i in range(len(a)):
+ a[i] += other[i]
+ return self
+ return NotImplemented
+
+ def __isub__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g -= (.5,0)
+ >>> g
+ GlyphCoordinates([(0.5, 2)])
+ >>> g2 = GlyphCoordinates([(3,4)])
+ >>> g -= g2
+ >>> g
+ GlyphCoordinates([(-2.5, -2)])
+ """
+ if isinstance(other, tuple):
+ assert len(other) == 2
+ self.translate((-other[0], -other[1]))
+ return self
+ if isinstance(other, GlyphCoordinates):
+ other = other._a
+ a = self._a
+ assert len(a) == len(other)
+ for i in range(len(a)):
+ a[i] -= other[i]
+ return self
+ return NotImplemented
+
+ def __imul__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g *= (2,.5)
+ >>> g *= 2
+ >>> g
+ GlyphCoordinates([(4, 2)])
+ >>> g = GlyphCoordinates([(1,2)])
+ >>> g *= 2
+ >>> g
+ GlyphCoordinates([(2, 4)])
+ """
+ if isinstance(other, tuple):
+ assert len(other) == 2
+ self.scale(other)
+ return self
+ if isinstance(other, Number):
+ if other == 1:
+ return self
+ a = self._a
+ for i in range(len(a)):
+ a[i] *= other
+ return self
+ return NotImplemented
+
+ def __itruediv__(self, other):
+ """
+ >>> g = GlyphCoordinates([(1,3)])
+ >>> g /= (.5,1.5)
+ >>> g /= 2
+ >>> g
+ GlyphCoordinates([(1, 1)])
+ """
+ if isinstance(other, Number):
+ other = (other, other)
+ if isinstance(other, tuple):
+ if other == (1, 1):
+ return self
+ assert len(other) == 2
+ self.scale((1.0 / other[0], 1.0 / other[1]))
+ return self
+ return NotImplemented
+
+ def __bool__(self):
+ """
+ >>> g = GlyphCoordinates([])
+ >>> bool(g)
+ False
+ >>> g = GlyphCoordinates([(0,0), (0.,0)])
+ >>> bool(g)
+ True
+ >>> g = GlyphCoordinates([(0,0), (1,0)])
+ >>> bool(g)
+ True
+ >>> g = GlyphCoordinates([(0,.5), (0,0)])
+ >>> bool(g)
+ True
+ """
+ return bool(self._a)
+
+ __nonzero__ = __bool__
if __name__ == "__main__":
- import doctest, sys
- sys.exit(doctest.testmod().failed)
+ import doctest, sys
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/tables/_g_v_a_r.py b/Lib/fontTools/ttLib/tables/_g_v_a_r.py
index dd198f4b..11485bf0 100644
--- a/Lib/fontTools/ttLib/tables/_g_v_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_g_v_a_r.py
@@ -1,3 +1,4 @@
+from collections import UserDict, deque
from functools import partial
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
@@ -37,238 +38,247 @@ GVAR_HEADER_FORMAT = """
GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT)
-class _lazy_dict(dict):
- def get(self, k, *args):
- v = super().get(k, *args)
- if callable(v):
- v = v()
- self[k] = v
- return v
+class _LazyDict(UserDict):
+ def __init__(self, data):
+ super().__init__()
+ self.data = data
def __getitem__(self, k):
- v = super().__getitem__(k)
+ v = self.data[k]
if callable(v):
v = v()
- self[k] = v
+ self.data[k] = v
return v
- def items(self):
- if not hasattr(self, '_loaded'):
- self._load()
- return super().items()
-
- def values(self):
- if not hasattr(self, '_loaded'):
- self._load()
- return super().values()
-
- def __eq__(self, other):
- if not hasattr(self, '_loaded'):
- self._load()
- return super().__eq__(other)
-
- def __neq__(self, other):
- if not hasattr(self, '_loaded'):
- self._load()
- return super().__neq__(other)
-
- def _load(self):
- for k in self:
- self[k]
- self._loaded = True
class table__g_v_a_r(DefaultTable.DefaultTable):
- dependencies = ["fvar", "glyf"]
-
- def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
- self.version, self.reserved = 1, 0
- self.variations = {}
-
- def compile(self, ttFont):
- axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- sharedTuples = tv.compileSharedTuples(
- axisTags, itertools.chain(*self.variations.values()))
- sharedTupleIndices = {coord:i for i, coord in enumerate(sharedTuples)}
- sharedTupleSize = sum([len(c) for c in sharedTuples])
- compiledGlyphs = self.compileGlyphs_(
- ttFont, axisTags, sharedTupleIndices)
- offset = 0
- offsets = []
- for glyph in compiledGlyphs:
- offsets.append(offset)
- offset += len(glyph)
- offsets.append(offset)
- compiledOffsets, tableFormat = self.compileOffsets_(offsets)
-
- header = {}
- header["version"] = self.version
- header["reserved"] = self.reserved
- header["axisCount"] = len(axisTags)
- header["sharedTupleCount"] = len(sharedTuples)
- header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(compiledOffsets)
- header["glyphCount"] = len(compiledGlyphs)
- header["flags"] = tableFormat
- header["offsetToGlyphVariationData"] = header["offsetToSharedTuples"] + sharedTupleSize
- compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header)
-
- result = [compiledHeader, compiledOffsets]
- result.extend(sharedTuples)
- result.extend(compiledGlyphs)
- return b''.join(result)
-
- def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices):
- result = []
- glyf = ttFont['glyf']
- for glyphName in ttFont.getGlyphOrder():
- glyph = glyf[glyphName]
- pointCount = self.getNumPoints_(glyph)
- variations = self.variations.get(glyphName, [])
- result.append(compileGlyph_(variations, pointCount,
- axisTags, sharedCoordIndices))
- return result
-
- def decompile(self, data, ttFont):
- axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- glyphs = ttFont.getGlyphOrder()
- sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self)
- assert len(glyphs) == self.glyphCount
- assert len(axisTags) == self.axisCount
- offsets = self.decompileOffsets_(data[GVAR_HEADER_SIZE:], tableFormat=(self.flags & 1), glyphCount=self.glyphCount)
- sharedCoords = tv.decompileSharedTuples(
- axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples)
- self.variations = _lazy_dict()
- offsetToData = self.offsetToGlyphVariationData
- glyf = ttFont['glyf']
-
- def decompileVarGlyph(glyphName, gid):
- glyph = glyf[glyphName]
- numPointsInGlyph = self.getNumPoints_(glyph)
- gvarData = data[offsetToData + offsets[gid] : offsetToData + offsets[gid + 1]]
- return decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData)
-
- for gid in range(self.glyphCount):
- glyphName = glyphs[gid]
- self.variations[glyphName] = partial(decompileVarGlyph, glyphName, gid)
-
- @staticmethod
- def decompileOffsets_(data, tableFormat, glyphCount):
- if tableFormat == 0:
- # Short format: array of UInt16
- offsets = array.array("H")
- offsetsSize = (glyphCount + 1) * 2
- else:
- # Long format: array of UInt32
- offsets = array.array("I")
- offsetsSize = (glyphCount + 1) * 4
- offsets.frombytes(data[0 : offsetsSize])
- if sys.byteorder != "big": offsets.byteswap()
-
- # In the short format, offsets need to be multiplied by 2.
- # This is not documented in Apple's TrueType specification,
- # but can be inferred from the FreeType implementation, and
- # we could verify it with two sample GX fonts.
- if tableFormat == 0:
- offsets = [off * 2 for off in offsets]
-
- return offsets
-
- @staticmethod
- def compileOffsets_(offsets):
- """Packs a list of offsets into a 'gvar' offset table.
-
- Returns a pair (bytestring, tableFormat). Bytestring is the
- packed offset table. Format indicates whether the table
- uses short (tableFormat=0) or long (tableFormat=1) integers.
- The returned tableFormat should get packed into the flags field
- of the 'gvar' header.
- """
- assert len(offsets) >= 2
- for i in range(1, len(offsets)):
- assert offsets[i - 1] <= offsets[i]
- if max(offsets) <= 0xffff * 2:
- packed = array.array("H", [n >> 1 for n in offsets])
- tableFormat = 0
- else:
- packed = array.array("I", offsets)
- tableFormat = 1
- if sys.byteorder != "big": packed.byteswap()
- return (packed.tobytes(), tableFormat)
-
- def toXML(self, writer, ttFont):
- writer.simpletag("version", value=self.version)
- writer.newline()
- writer.simpletag("reserved", value=self.reserved)
- writer.newline()
- axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
- for glyphName in ttFont.getGlyphNames():
- variations = self.variations.get(glyphName)
- if not variations:
- continue
- writer.begintag("glyphVariations", glyph=glyphName)
- writer.newline()
- for gvar in variations:
- gvar.toXML(writer, axisTags)
- writer.endtag("glyphVariations")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "version":
- self.version = safeEval(attrs["value"])
- elif name == "reserved":
- self.reserved = safeEval(attrs["value"])
- elif name == "glyphVariations":
- if not hasattr(self, "variations"):
- self.variations = {}
- glyphName = attrs["glyph"]
- glyph = ttFont["glyf"][glyphName]
- numPointsInGlyph = self.getNumPoints_(glyph)
- glyphVariations = []
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- if name == "tuple":
- gvar = TupleVariation({}, [None] * numPointsInGlyph)
- glyphVariations.append(gvar)
- for tupleElement in content:
- if isinstance(tupleElement, tuple):
- tupleName, tupleAttrs, tupleContent = tupleElement
- gvar.fromXML(tupleName, tupleAttrs, tupleContent)
- self.variations[glyphName] = glyphVariations
-
- @staticmethod
- def getNumPoints_(glyph):
- NUM_PHANTOM_POINTS = 4
- if glyph.isComposite():
- return len(glyph.components) + NUM_PHANTOM_POINTS
- else:
- # Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute.
- return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS
+ dependencies = ["fvar", "glyf"]
+
+ def __init__(self, tag=None):
+ DefaultTable.DefaultTable.__init__(self, tag)
+ self.version, self.reserved = 1, 0
+ self.variations = {}
+
+ def compile(self, ttFont):
+ axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
+ sharedTuples = tv.compileSharedTuples(
+ axisTags, itertools.chain(*self.variations.values())
+ )
+ sharedTupleIndices = {coord: i for i, coord in enumerate(sharedTuples)}
+ sharedTupleSize = sum([len(c) for c in sharedTuples])
+ compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedTupleIndices)
+ offset = 0
+ offsets = []
+ for glyph in compiledGlyphs:
+ offsets.append(offset)
+ offset += len(glyph)
+ offsets.append(offset)
+ compiledOffsets, tableFormat = self.compileOffsets_(offsets)
+
+ header = {}
+ header["version"] = self.version
+ header["reserved"] = self.reserved
+ header["axisCount"] = len(axisTags)
+ header["sharedTupleCount"] = len(sharedTuples)
+ header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(compiledOffsets)
+ header["glyphCount"] = len(compiledGlyphs)
+ header["flags"] = tableFormat
+ header["offsetToGlyphVariationData"] = (
+ header["offsetToSharedTuples"] + sharedTupleSize
+ )
+ compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header)
+
+ result = [compiledHeader, compiledOffsets]
+ result.extend(sharedTuples)
+ result.extend(compiledGlyphs)
+ return b"".join(result)
+
+ def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices):
+ result = []
+ glyf = ttFont["glyf"]
+ for glyphName in ttFont.getGlyphOrder():
+ variations = self.variations.get(glyphName, [])
+ if not variations:
+ result.append(b"")
+ continue
+ pointCountUnused = 0 # pointCount is actually unused by compileGlyph
+ result.append(
+ compileGlyph_(
+ variations, pointCountUnused, axisTags, sharedCoordIndices
+ )
+ )
+ return result
+
+ def decompile(self, data, ttFont):
+ axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
+ glyphs = ttFont.getGlyphOrder()
+ sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self)
+ assert len(glyphs) == self.glyphCount
+ assert len(axisTags) == self.axisCount
+ offsets = self.decompileOffsets_(
+ data[GVAR_HEADER_SIZE:],
+ tableFormat=(self.flags & 1),
+ glyphCount=self.glyphCount,
+ )
+ sharedCoords = tv.decompileSharedTuples(
+ axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples
+ )
+ variations = {}
+ offsetToData = self.offsetToGlyphVariationData
+ glyf = ttFont["glyf"]
+
+ def decompileVarGlyph(glyphName, gid):
+ gvarData = data[
+ offsetToData + offsets[gid] : offsetToData + offsets[gid + 1]
+ ]
+ if not gvarData:
+ return []
+ glyph = glyf[glyphName]
+ numPointsInGlyph = self.getNumPoints_(glyph)
+ return decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData)
+
+ for gid in range(self.glyphCount):
+ glyphName = glyphs[gid]
+ variations[glyphName] = partial(decompileVarGlyph, glyphName, gid)
+ self.variations = _LazyDict(variations)
+
+ if ttFont.lazy is False: # Be lazy for None and True
+ self.ensureDecompiled()
+
+ def ensureDecompiled(self, recurse=False):
+ # The recurse argument is unused, but part of the signature of
+ # ensureDecompiled across the library.
+ # Use a zero-length deque to consume the lazy dict
+ deque(self.variations.values(), maxlen=0)
+
+ @staticmethod
+ def decompileOffsets_(data, tableFormat, glyphCount):
+ if tableFormat == 0:
+ # Short format: array of UInt16
+ offsets = array.array("H")
+ offsetsSize = (glyphCount + 1) * 2
+ else:
+ # Long format: array of UInt32
+ offsets = array.array("I")
+ offsetsSize = (glyphCount + 1) * 4
+ offsets.frombytes(data[0:offsetsSize])
+ if sys.byteorder != "big":
+ offsets.byteswap()
+
+ # In the short format, offsets need to be multiplied by 2.
+ # This is not documented in Apple's TrueType specification,
+ # but can be inferred from the FreeType implementation, and
+ # we could verify it with two sample GX fonts.
+ if tableFormat == 0:
+ offsets = [off * 2 for off in offsets]
+
+ return offsets
+
+ @staticmethod
+ def compileOffsets_(offsets):
+ """Packs a list of offsets into a 'gvar' offset table.
+
+ Returns a pair (bytestring, tableFormat). Bytestring is the
+ packed offset table. Format indicates whether the table
+ uses short (tableFormat=0) or long (tableFormat=1) integers.
+ The returned tableFormat should get packed into the flags field
+ of the 'gvar' header.
+ """
+ assert len(offsets) >= 2
+ for i in range(1, len(offsets)):
+ assert offsets[i - 1] <= offsets[i]
+ if max(offsets) <= 0xFFFF * 2:
+ packed = array.array("H", [n >> 1 for n in offsets])
+ tableFormat = 0
+ else:
+ packed = array.array("I", offsets)
+ tableFormat = 1
+ if sys.byteorder != "big":
+ packed.byteswap()
+ return (packed.tobytes(), tableFormat)
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ writer.simpletag("reserved", value=self.reserved)
+ writer.newline()
+ axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
+ for glyphName in ttFont.getGlyphNames():
+ variations = self.variations.get(glyphName)
+ if not variations:
+ continue
+ writer.begintag("glyphVariations", glyph=glyphName)
+ writer.newline()
+ for gvar in variations:
+ gvar.toXML(writer, axisTags)
+ writer.endtag("glyphVariations")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version":
+ self.version = safeEval(attrs["value"])
+ elif name == "reserved":
+ self.reserved = safeEval(attrs["value"])
+ elif name == "glyphVariations":
+ if not hasattr(self, "variations"):
+ self.variations = {}
+ glyphName = attrs["glyph"]
+ glyph = ttFont["glyf"][glyphName]
+ numPointsInGlyph = self.getNumPoints_(glyph)
+ glyphVariations = []
+ for element in content:
+ if isinstance(element, tuple):
+ name, attrs, content = element
+ if name == "tuple":
+ gvar = TupleVariation({}, [None] * numPointsInGlyph)
+ glyphVariations.append(gvar)
+ for tupleElement in content:
+ if isinstance(tupleElement, tuple):
+ tupleName, tupleAttrs, tupleContent = tupleElement
+ gvar.fromXML(tupleName, tupleAttrs, tupleContent)
+ self.variations[glyphName] = glyphVariations
+
+ @staticmethod
+ def getNumPoints_(glyph):
+ NUM_PHANTOM_POINTS = 4
+
+ if glyph.isComposite():
+ return len(glyph.components) + NUM_PHANTOM_POINTS
+ elif glyph.isVarComposite():
+ count = 0
+ for component in glyph.components:
+ count += component.getPointCount()
+ return count + NUM_PHANTOM_POINTS
+ else:
+ # Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute.
+ return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS
def compileGlyph_(variations, pointCount, axisTags, sharedCoordIndices):
- tupleVariationCount, tuples, data = tv.compileTupleVariationStore(
- variations, pointCount, axisTags, sharedCoordIndices)
- if tupleVariationCount == 0:
- return b""
- result = [
- struct.pack(">HH", tupleVariationCount, 4 + len(tuples)),
- tuples,
- data
- ]
- if (len(tuples) + len(data)) % 2 != 0:
- result.append(b"\0") # padding
- return b''.join(result)
+ tupleVariationCount, tuples, data = tv.compileTupleVariationStore(
+ variations, pointCount, axisTags, sharedCoordIndices
+ )
+ if tupleVariationCount == 0:
+ return b""
+ result = [struct.pack(">HH", tupleVariationCount, 4 + len(tuples)), tuples, data]
+ if (len(tuples) + len(data)) % 2 != 0:
+ result.append(b"\0") # padding
+ return b"".join(result)
def decompileGlyph_(pointCount, sharedTuples, axisTags, data):
- if len(data) < 4:
- return []
- tupleVariationCount, offsetToData = struct.unpack(">HH", data[:4])
- dataPos = offsetToData
- return tv.decompileTupleVariationStore(
- "gvar", axisTags,
- tupleVariationCount, pointCount,
- sharedTuples, data, 4, offsetToData
- )
+ if len(data) < 4:
+ return []
+ tupleVariationCount, offsetToData = struct.unpack(">HH", data[:4])
+ dataPos = offsetToData
+ return tv.decompileTupleVariationStore(
+ "gvar",
+ axisTags,
+ tupleVariationCount,
+ pointCount,
+ sharedTuples,
+ data,
+ 4,
+ offsetToData,
+ )
diff --git a/Lib/fontTools/ttLib/tables/_h_d_m_x.py b/Lib/fontTools/ttLib/tables/_h_d_m_x.py
index 9f860d2a..b6d56a7e 100644
--- a/Lib/fontTools/ttLib/tables/_h_d_m_x.py
+++ b/Lib/fontTools/ttLib/tables/_h_d_m_x.py
@@ -11,106 +11,109 @@ hdmxHeaderFormat = """
recordSize: l
"""
+
class _GlyphnamedList(Mapping):
+ def __init__(self, reverseGlyphOrder, data):
+ self._array = data
+ self._map = dict(reverseGlyphOrder)
- def __init__(self, reverseGlyphOrder, data):
- self._array = data
- self._map = dict(reverseGlyphOrder)
+ def __getitem__(self, k):
+ return self._array[self._map[k]]
- def __getitem__(self, k):
- return self._array[self._map[k]]
+ def __len__(self):
+ return len(self._map)
- def __len__(self):
- return len(self._map)
+ def __iter__(self):
+ return iter(self._map)
- def __iter__(self):
- return iter(self._map)
+ def keys(self):
+ return self._map.keys()
- def keys(self):
- return self._map.keys()
class table__h_d_m_x(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ numGlyphs = ttFont["maxp"].numGlyphs
+ glyphOrder = ttFont.getGlyphOrder()
+ dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
+ self.hdmx = {}
+ for i in range(self.numRecords):
+ ppem = byteord(data[0])
+ maxSize = byteord(data[1])
+ widths = _GlyphnamedList(
+ ttFont.getReverseGlyphMap(), array.array("B", data[2 : 2 + numGlyphs])
+ )
+ self.hdmx[ppem] = widths
+ data = data[self.recordSize :]
+ assert len(data) == 0, "too much hdmx data"
- def decompile(self, data, ttFont):
- numGlyphs = ttFont['maxp'].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
- self.hdmx = {}
- for i in range(self.numRecords):
- ppem = byteord(data[0])
- maxSize = byteord(data[1])
- widths = _GlyphnamedList(ttFont.getReverseGlyphMap(), array.array("B", data[2:2+numGlyphs]))
- self.hdmx[ppem] = widths
- data = data[self.recordSize:]
- assert len(data) == 0, "too much hdmx data"
+ def compile(self, ttFont):
+ self.version = 0
+ numGlyphs = ttFont["maxp"].numGlyphs
+ glyphOrder = ttFont.getGlyphOrder()
+ self.recordSize = 4 * ((2 + numGlyphs + 3) // 4)
+ pad = (self.recordSize - 2 - numGlyphs) * b"\0"
+ self.numRecords = len(self.hdmx)
+ data = sstruct.pack(hdmxHeaderFormat, self)
+ items = sorted(self.hdmx.items())
+ for ppem, widths in items:
+ data = data + bytechr(ppem) + bytechr(max(widths.values()))
+ for glyphID in range(len(glyphOrder)):
+ width = widths[glyphOrder[glyphID]]
+ data = data + bytechr(width)
+ data = data + pad
+ return data
- def compile(self, ttFont):
- self.version = 0
- numGlyphs = ttFont['maxp'].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- self.recordSize = 4 * ((2 + numGlyphs + 3) // 4)
- pad = (self.recordSize - 2 - numGlyphs) * b"\0"
- self.numRecords = len(self.hdmx)
- data = sstruct.pack(hdmxHeaderFormat, self)
- items = sorted(self.hdmx.items())
- for ppem, widths in items:
- data = data + bytechr(ppem) + bytechr(max(widths.values()))
- for glyphID in range(len(glyphOrder)):
- width = widths[glyphOrder[glyphID]]
- data = data + bytechr(width)
- data = data + pad
- return data
+ def toXML(self, writer, ttFont):
+ writer.begintag("hdmxData")
+ writer.newline()
+ ppems = sorted(self.hdmx.keys())
+ records = []
+ format = ""
+ for ppem in ppems:
+ widths = self.hdmx[ppem]
+ records.append(widths)
+ format = format + "%4d"
+ glyphNames = ttFont.getGlyphOrder()[:]
+ glyphNames.sort()
+ maxNameLen = max(map(len, glyphNames))
+ format = "%" + repr(maxNameLen) + "s:" + format + " ;"
+ writer.write(format % (("ppem",) + tuple(ppems)))
+ writer.newline()
+ writer.newline()
+ for glyphName in glyphNames:
+ row = []
+ for ppem in ppems:
+ widths = self.hdmx[ppem]
+ row.append(widths[glyphName])
+ if ";" in glyphName:
+ glyphName = "\\x3b".join(glyphName.split(";"))
+ writer.write(format % ((glyphName,) + tuple(row)))
+ writer.newline()
+ writer.endtag("hdmxData")
+ writer.newline()
- def toXML(self, writer, ttFont):
- writer.begintag("hdmxData")
- writer.newline()
- ppems = sorted(self.hdmx.keys())
- records = []
- format = ""
- for ppem in ppems:
- widths = self.hdmx[ppem]
- records.append(widths)
- format = format + "%4d"
- glyphNames = ttFont.getGlyphOrder()[:]
- glyphNames.sort()
- maxNameLen = max(map(len, glyphNames))
- format = "%" + repr(maxNameLen) + 's:' + format + ' ;'
- writer.write(format % (("ppem",) + tuple(ppems)))
- writer.newline()
- writer.newline()
- for glyphName in glyphNames:
- row = []
- for ppem in ppems:
- widths = self.hdmx[ppem]
- row.append(widths[glyphName])
- if ";" in glyphName:
- glyphName = "\\x3b".join(glyphName.split(";"))
- writer.write(format % ((glyphName,) + tuple(row)))
- writer.newline()
- writer.endtag("hdmxData")
- writer.newline()
+ def fromXML(self, name, attrs, content, ttFont):
+ if name != "hdmxData":
+ return
+ content = strjoin(content)
+ lines = content.split(";")
+ topRow = lines[0].split()
+ assert topRow[0] == "ppem:", "illegal hdmx format"
+ ppems = list(map(int, topRow[1:]))
+ self.hdmx = hdmx = {}
+ for ppem in ppems:
+ hdmx[ppem] = {}
+ lines = (line.split() for line in lines[1:])
+ for line in lines:
+ if not line:
+ continue
+ assert line[0][-1] == ":", "illegal hdmx format"
+ glyphName = line[0][:-1]
+ if "\\" in glyphName:
+ from fontTools.misc.textTools import safeEval
- def fromXML(self, name, attrs, content, ttFont):
- if name != "hdmxData":
- return
- content = strjoin(content)
- lines = content.split(";")
- topRow = lines[0].split()
- assert topRow[0] == "ppem:", "illegal hdmx format"
- ppems = list(map(int, topRow[1:]))
- self.hdmx = hdmx = {}
- for ppem in ppems:
- hdmx[ppem] = {}
- lines = (line.split() for line in lines[1:])
- for line in lines:
- if not line:
- continue
- assert line[0][-1] == ":", "illegal hdmx format"
- glyphName = line[0][:-1]
- if "\\" in glyphName:
- from fontTools.misc.textTools import safeEval
- glyphName = safeEval('"""' + glyphName + '"""')
- line = list(map(int, line[1:]))
- assert len(line) == len(ppems), "illegal hdmx format"
- for i in range(len(ppems)):
- hdmx[ppems[i]][glyphName] = line[i]
+ glyphName = safeEval('"""' + glyphName + '"""')
+ line = list(map(int, line[1:]))
+ assert len(line) == len(ppems), "illegal hdmx format"
+ for i in range(len(ppems)):
+ hdmx[ppems[i]][glyphName] = line[i]
diff --git a/Lib/fontTools/ttLib/tables/_h_e_a_d.py b/Lib/fontTools/ttLib/tables/_h_e_a_d.py
index 4d19da03..fe29c8fc 100644
--- a/Lib/fontTools/ttLib/tables/_h_e_a_d.py
+++ b/Lib/fontTools/ttLib/tables/_h_e_a_d.py
@@ -1,8 +1,12 @@
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat
from fontTools.misc.textTools import safeEval, num2binary, binary2num
-from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow
-from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
+from fontTools.misc.timeTools import (
+ timestampFromString,
+ timestampToString,
+ timestampNow,
+)
+from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
from fontTools.misc.arrayTools import intRect, unionRect
from . import DefaultTable
import logging
@@ -31,87 +35,89 @@ headFormat = """
glyphDataFormat: h
"""
-class table__h_e_a_d(DefaultTable.DefaultTable):
- dependencies = ['maxp', 'loca', 'CFF ', 'CFF2']
+class table__h_e_a_d(DefaultTable.DefaultTable):
+ dependencies = ["maxp", "loca", "CFF ", "CFF2"]
- def decompile(self, data, ttFont):
- dummy, rest = sstruct.unpack2(headFormat, data, self)
- if rest:
- # this is quite illegal, but there seem to be fonts out there that do this
- log.warning("extra bytes at the end of 'head' table")
- assert rest == b"\0\0"
+ def decompile(self, data, ttFont):
+ dummy, rest = sstruct.unpack2(headFormat, data, self)
+ if rest:
+ # this is quite illegal, but there seem to be fonts out there that do this
+ log.warning("extra bytes at the end of 'head' table")
+ assert rest == b"\0\0"
- # For timestamp fields, ignore the top four bytes. Some fonts have
- # bogus values there. Since till 2038 those bytes only can be zero,
- # ignore them.
- #
- # https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
- for stamp in 'created', 'modified':
- value = getattr(self, stamp)
- if value > 0xFFFFFFFF:
- log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
- value &= 0xFFFFFFFF
- setattr(self, stamp, value)
- if value < 0x7C259DC0: # January 1, 1970 00:00:00
- log.warning("'%s' timestamp seems very low; regarding as unix timestamp", stamp)
- value += 0x7C259DC0
- setattr(self, stamp, value)
+ # For timestamp fields, ignore the top four bytes. Some fonts have
+ # bogus values there. Since till 2038 those bytes only can be zero,
+ # ignore them.
+ #
+ # https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
+ for stamp in "created", "modified":
+ value = getattr(self, stamp)
+ if value > 0xFFFFFFFF:
+ log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
+ value &= 0xFFFFFFFF
+ setattr(self, stamp, value)
+ if value < 0x7C259DC0: # January 1, 1970 00:00:00
+ log.warning(
+ "'%s' timestamp seems very low; regarding as unix timestamp", stamp
+ )
+ value += 0x7C259DC0
+ setattr(self, stamp, value)
- def compile(self, ttFont):
- if ttFont.recalcBBoxes:
- # For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
- if 'CFF ' in ttFont:
- topDict = ttFont['CFF '].cff.topDictIndex[0]
- self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
- elif 'CFF2' in ttFont:
- topDict = ttFont['CFF2'].cff.topDictIndex[0]
- charStrings = topDict.CharStrings
- fontBBox = None
- for charString in charStrings.values():
- bounds = charString.calcBounds(charStrings)
- if bounds is not None:
- if fontBBox is not None:
- fontBBox = unionRect(fontBBox, bounds)
- else:
- fontBBox = bounds
- if fontBBox is not None:
- self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
- if ttFont.recalcTimestamp:
- self.modified = timestampNow()
- data = sstruct.pack(headFormat, self)
- return data
+ def compile(self, ttFont):
+ if ttFont.recalcBBoxes:
+ # For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
+ if "CFF " in ttFont:
+ topDict = ttFont["CFF "].cff.topDictIndex[0]
+ self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
+ elif "CFF2" in ttFont:
+ topDict = ttFont["CFF2"].cff.topDictIndex[0]
+ charStrings = topDict.CharStrings
+ fontBBox = None
+ for charString in charStrings.values():
+ bounds = charString.calcBounds(charStrings)
+ if bounds is not None:
+ if fontBBox is not None:
+ fontBBox = unionRect(fontBBox, bounds)
+ else:
+ fontBBox = bounds
+ if fontBBox is not None:
+ self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
+ if ttFont.recalcTimestamp:
+ self.modified = timestampNow()
+ data = sstruct.pack(headFormat, self)
+ return data
- def toXML(self, writer, ttFont):
- writer.comment("Most of this table will be recalculated by the compiler")
- writer.newline()
- _, names, fixes = sstruct.getformat(headFormat)
- for name in names:
- value = getattr(self, name)
- if name in fixes:
- value = floatToFixedToStr(value, precisionBits=fixes[name])
- elif name in ("created", "modified"):
- value = timestampToString(value)
- elif name in ("magicNumber", "checkSumAdjustment"):
- if value < 0:
- value = value + 0x100000000
- value = hex(value)
- if value[-1:] == "L":
- value = value[:-1]
- elif name in ("macStyle", "flags"):
- value = num2binary(value, 16)
- writer.simpletag(name, value=value)
- writer.newline()
+ def toXML(self, writer, ttFont):
+ writer.comment("Most of this table will be recalculated by the compiler")
+ writer.newline()
+ _, names, fixes = sstruct.getformat(headFormat)
+ for name in names:
+ value = getattr(self, name)
+ if name in fixes:
+ value = floatToFixedToStr(value, precisionBits=fixes[name])
+ elif name in ("created", "modified"):
+ value = timestampToString(value)
+ elif name in ("magicNumber", "checkSumAdjustment"):
+ if value < 0:
+ value = value + 0x100000000
+ value = hex(value)
+ if value[-1:] == "L":
+ value = value[:-1]
+ elif name in ("macStyle", "flags"):
+ value = num2binary(value, 16)
+ writer.simpletag(name, value=value)
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- value = attrs["value"]
- fixes = sstruct.getformat(headFormat)[2]
- if name in fixes:
- value = strToFixedToFloat(value, precisionBits=fixes[name])
- elif name in ("created", "modified"):
- value = timestampFromString(value)
- elif name in ("macStyle", "flags"):
- value = binary2num(value)
- else:
- value = safeEval(value)
- setattr(self, name, value)
+ def fromXML(self, name, attrs, content, ttFont):
+ value = attrs["value"]
+ fixes = sstruct.getformat(headFormat)[2]
+ if name in fixes:
+ value = strToFixedToFloat(value, precisionBits=fixes[name])
+ elif name in ("created", "modified"):
+ value = timestampFromString(value)
+ elif name in ("macStyle", "flags"):
+ value = binary2num(value)
+ else:
+ value = safeEval(value)
+ setattr(self, name, value)
diff --git a/Lib/fontTools/ttLib/tables/_h_h_e_a.py b/Lib/fontTools/ttLib/tables/_h_h_e_a.py
index 9b8baaad..43e464f7 100644
--- a/Lib/fontTools/ttLib/tables/_h_h_e_a.py
+++ b/Lib/fontTools/ttLib/tables/_h_h_e_a.py
@@ -1,7 +1,9 @@
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from fontTools.misc.fixedTools import (
- ensureVersionIsLong as fi2ve, versionToFixed as ve2fi)
+ ensureVersionIsLong as fi2ve,
+ versionToFixed as ve2fi,
+)
from . import DefaultTable
import math
@@ -29,95 +31,105 @@ hheaFormat = """
class table__h_h_e_a(DefaultTable.DefaultTable):
-
- # Note: Keep in sync with table__v_h_e_a
-
- dependencies = ['hmtx', 'glyf', 'CFF ', 'CFF2']
-
- # OpenType spec renamed these, add aliases for compatibility
- @property
- def ascender(self): return self.ascent
-
- @ascender.setter
- def ascender(self,value): self.ascent = value
-
- @property
- def descender(self): return self.descent
-
- @descender.setter
- def descender(self,value): self.descent = value
-
- def decompile(self, data, ttFont):
- sstruct.unpack(hheaFormat, data, self)
-
- def compile(self, ttFont):
- if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ') or ttFont.isLoaded('CFF2')):
- self.recalc(ttFont)
- self.tableVersion = fi2ve(self.tableVersion)
- return sstruct.pack(hheaFormat, self)
-
- def recalc(self, ttFont):
- if 'hmtx' in ttFont:
- hmtxTable = ttFont['hmtx']
- self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())
-
- boundsWidthDict = {}
- if 'glyf' in ttFont:
- glyfTable = ttFont['glyf']
- for name in ttFont.getGlyphOrder():
- g = glyfTable[name]
- if g.numberOfContours == 0:
- continue
- if g.numberOfContours < 0 and not hasattr(g, "xMax"):
- # Composite glyph without extents set.
- # Calculate those.
- g.recalcBounds(glyfTable)
- boundsWidthDict[name] = g.xMax - g.xMin
- elif 'CFF ' in ttFont or 'CFF2' in ttFont:
- if 'CFF ' in ttFont:
- topDict = ttFont['CFF '].cff.topDictIndex[0]
- else:
- topDict = ttFont['CFF2'].cff.topDictIndex[0]
- charStrings = topDict.CharStrings
- for name in ttFont.getGlyphOrder():
- cs = charStrings[name]
- bounds = cs.calcBounds(charStrings)
- if bounds is not None:
- boundsWidthDict[name] = int(
- math.ceil(bounds[2]) - math.floor(bounds[0]))
-
- if boundsWidthDict:
- minLeftSideBearing = float('inf')
- minRightSideBearing = float('inf')
- xMaxExtent = -float('inf')
- for name, boundsWidth in boundsWidthDict.items():
- advanceWidth, lsb = hmtxTable[name]
- rsb = advanceWidth - lsb - boundsWidth
- extent = lsb + boundsWidth
- minLeftSideBearing = min(minLeftSideBearing, lsb)
- minRightSideBearing = min(minRightSideBearing, rsb)
- xMaxExtent = max(xMaxExtent, extent)
- self.minLeftSideBearing = minLeftSideBearing
- self.minRightSideBearing = minRightSideBearing
- self.xMaxExtent = xMaxExtent
-
- else: # No glyph has outlines.
- self.minLeftSideBearing = 0
- self.minRightSideBearing = 0
- self.xMaxExtent = 0
-
- def toXML(self, writer, ttFont):
- formatstring, names, fixes = sstruct.getformat(hheaFormat)
- for name in names:
- value = getattr(self, name)
- if name == "tableVersion":
- value = fi2ve(value)
- value = "0x%08x" % value
- writer.simpletag(name, value=value)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "tableVersion":
- setattr(self, name, ve2fi(attrs["value"]))
- return
- setattr(self, name, safeEval(attrs["value"]))
+ # Note: Keep in sync with table__v_h_e_a
+
+ dependencies = ["hmtx", "glyf", "CFF ", "CFF2"]
+
+ # OpenType spec renamed these, add aliases for compatibility
+ @property
+ def ascender(self):
+ return self.ascent
+
+ @ascender.setter
+ def ascender(self, value):
+ self.ascent = value
+
+ @property
+ def descender(self):
+ return self.descent
+
+ @descender.setter
+ def descender(self, value):
+ self.descent = value
+
+ def decompile(self, data, ttFont):
+ sstruct.unpack(hheaFormat, data, self)
+
+ def compile(self, ttFont):
+ if ttFont.recalcBBoxes and (
+ ttFont.isLoaded("glyf")
+ or ttFont.isLoaded("CFF ")
+ or ttFont.isLoaded("CFF2")
+ ):
+ self.recalc(ttFont)
+ self.tableVersion = fi2ve(self.tableVersion)
+ return sstruct.pack(hheaFormat, self)
+
+ def recalc(self, ttFont):
+ if "hmtx" not in ttFont:
+ return
+
+ hmtxTable = ttFont["hmtx"]
+ self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())
+
+ boundsWidthDict = {}
+ if "glyf" in ttFont:
+ glyfTable = ttFont["glyf"]
+ for name in ttFont.getGlyphOrder():
+ g = glyfTable[name]
+ if g.numberOfContours == 0:
+ continue
+ if g.numberOfContours < 0 and not hasattr(g, "xMax"):
+ # Composite glyph without extents set.
+ # Calculate those.
+ g.recalcBounds(glyfTable)
+ boundsWidthDict[name] = g.xMax - g.xMin
+ elif "CFF " in ttFont or "CFF2" in ttFont:
+ if "CFF " in ttFont:
+ topDict = ttFont["CFF "].cff.topDictIndex[0]
+ else:
+ topDict = ttFont["CFF2"].cff.topDictIndex[0]
+ charStrings = topDict.CharStrings
+ for name in ttFont.getGlyphOrder():
+ cs = charStrings[name]
+ bounds = cs.calcBounds(charStrings)
+ if bounds is not None:
+ boundsWidthDict[name] = int(
+ math.ceil(bounds[2]) - math.floor(bounds[0])
+ )
+
+ if boundsWidthDict:
+ minLeftSideBearing = float("inf")
+ minRightSideBearing = float("inf")
+ xMaxExtent = -float("inf")
+ for name, boundsWidth in boundsWidthDict.items():
+ advanceWidth, lsb = hmtxTable[name]
+ rsb = advanceWidth - lsb - boundsWidth
+ extent = lsb + boundsWidth
+ minLeftSideBearing = min(minLeftSideBearing, lsb)
+ minRightSideBearing = min(minRightSideBearing, rsb)
+ xMaxExtent = max(xMaxExtent, extent)
+ self.minLeftSideBearing = minLeftSideBearing
+ self.minRightSideBearing = minRightSideBearing
+ self.xMaxExtent = xMaxExtent
+
+ else: # No glyph has outlines.
+ self.minLeftSideBearing = 0
+ self.minRightSideBearing = 0
+ self.xMaxExtent = 0
+
+ def toXML(self, writer, ttFont):
+ formatstring, names, fixes = sstruct.getformat(hheaFormat)
+ for name in names:
+ value = getattr(self, name)
+ if name == "tableVersion":
+ value = fi2ve(value)
+ value = "0x%08x" % value
+ writer.simpletag(name, value=value)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "tableVersion":
+ setattr(self, name, ve2fi(attrs["value"]))
+ return
+ setattr(self, name, safeEval(attrs["value"]))
diff --git a/Lib/fontTools/ttLib/tables/_h_m_t_x.py b/Lib/fontTools/ttLib/tables/_h_m_t_x.py
index 6980b8d8..2dbdd7f9 100644
--- a/Lib/fontTools/ttLib/tables/_h_m_t_x.py
+++ b/Lib/fontTools/ttLib/tables/_h_m_t_x.py
@@ -12,127 +12,140 @@ log = logging.getLogger(__name__)
class table__h_m_t_x(DefaultTable.DefaultTable):
+ headerTag = "hhea"
+ advanceName = "width"
+ sideBearingName = "lsb"
+ numberOfMetricsName = "numberOfHMetrics"
+ longMetricFormat = "Hh"
- headerTag = 'hhea'
- advanceName = 'width'
- sideBearingName = 'lsb'
- numberOfMetricsName = 'numberOfHMetrics'
- longMetricFormat = 'Hh'
+ def decompile(self, data, ttFont):
+ numGlyphs = ttFont["maxp"].numGlyphs
+ headerTable = ttFont.get(self.headerTag)
+ if headerTable is not None:
+ numberOfMetrics = int(getattr(headerTable, self.numberOfMetricsName))
+ else:
+ numberOfMetrics = numGlyphs
+ if numberOfMetrics > numGlyphs:
+ log.warning(
+ "The %s.%s exceeds the maxp.numGlyphs"
+ % (self.headerTag, self.numberOfMetricsName)
+ )
+ numberOfMetrics = numGlyphs
+ if len(data) < 4 * numberOfMetrics:
+ raise ttLib.TTLibError("not enough '%s' table data" % self.tableTag)
+ # Note: advanceWidth is unsigned, but some font editors might
+ # read/write as signed. We can't be sure whether it was a mistake
+ # or not, so we read as unsigned but also issue a warning...
+ metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
+ metrics = struct.unpack(metricsFmt, data[: 4 * numberOfMetrics])
+ data = data[4 * numberOfMetrics :]
+ numberOfSideBearings = numGlyphs - numberOfMetrics
+ sideBearings = array.array("h", data[: 2 * numberOfSideBearings])
+ data = data[2 * numberOfSideBearings :]
- def decompile(self, data, ttFont):
- numGlyphs = ttFont['maxp'].numGlyphs
- headerTable = ttFont.get(self.headerTag)
- if headerTable is not None:
- numberOfMetrics = int(getattr(headerTable, self.numberOfMetricsName))
- else:
- numberOfMetrics = numGlyphs
- if numberOfMetrics > numGlyphs:
- log.warning("The %s.%s exceeds the maxp.numGlyphs" % (
- self.headerTag, self.numberOfMetricsName))
- numberOfMetrics = numGlyphs
- if len(data) < 4 * numberOfMetrics:
- raise ttLib.TTLibError("not enough '%s' table data" % self.tableTag)
- # Note: advanceWidth is unsigned, but some font editors might
- # read/write as signed. We can't be sure whether it was a mistake
- # or not, so we read as unsigned but also issue a warning...
- metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
- metrics = struct.unpack(metricsFmt, data[:4 * numberOfMetrics])
- data = data[4 * numberOfMetrics:]
- numberOfSideBearings = numGlyphs - numberOfMetrics
- sideBearings = array.array("h", data[:2 * numberOfSideBearings])
- data = data[2 * numberOfSideBearings:]
+ if sys.byteorder != "big":
+ sideBearings.byteswap()
+ if data:
+ log.warning("too much '%s' table data" % self.tableTag)
+ self.metrics = {}
+ glyphOrder = ttFont.getGlyphOrder()
+ for i in range(numberOfMetrics):
+ glyphName = glyphOrder[i]
+ advanceWidth, lsb = metrics[i * 2 : i * 2 + 2]
+ if advanceWidth > 32767:
+ log.warning(
+ "Glyph %r has a huge advance %s (%d); is it intentional or "
+ "an (invalid) negative value?",
+ glyphName,
+ self.advanceName,
+ advanceWidth,
+ )
+ self.metrics[glyphName] = (advanceWidth, lsb)
+ lastAdvance = metrics[-2]
+ for i in range(numberOfSideBearings):
+ glyphName = glyphOrder[i + numberOfMetrics]
+ self.metrics[glyphName] = (lastAdvance, sideBearings[i])
- if sys.byteorder != "big": sideBearings.byteswap()
- if data:
- log.warning("too much '%s' table data" % self.tableTag)
- self.metrics = {}
- glyphOrder = ttFont.getGlyphOrder()
- for i in range(numberOfMetrics):
- glyphName = glyphOrder[i]
- advanceWidth, lsb = metrics[i*2:i*2+2]
- if advanceWidth > 32767:
- log.warning(
- "Glyph %r has a huge advance %s (%d); is it intentional or "
- "an (invalid) negative value?", glyphName, self.advanceName,
- advanceWidth)
- self.metrics[glyphName] = (advanceWidth, lsb)
- lastAdvance = metrics[-2]
- for i in range(numberOfSideBearings):
- glyphName = glyphOrder[i + numberOfMetrics]
- self.metrics[glyphName] = (lastAdvance, sideBearings[i])
+ def compile(self, ttFont):
+ metrics = []
+ hasNegativeAdvances = False
+ for glyphName in ttFont.getGlyphOrder():
+ advanceWidth, sideBearing = self.metrics[glyphName]
+ if advanceWidth < 0:
+ log.error(
+ "Glyph %r has negative advance %s" % (glyphName, self.advanceName)
+ )
+ hasNegativeAdvances = True
+ metrics.append([advanceWidth, sideBearing])
- def compile(self, ttFont):
- metrics = []
- hasNegativeAdvances = False
- for glyphName in ttFont.getGlyphOrder():
- advanceWidth, sideBearing = self.metrics[glyphName]
- if advanceWidth < 0:
- log.error("Glyph %r has negative advance %s" % (
- glyphName, self.advanceName))
- hasNegativeAdvances = True
- metrics.append([advanceWidth, sideBearing])
+ headerTable = ttFont.get(self.headerTag)
+ if headerTable is not None:
+ lastAdvance = metrics[-1][0]
+ lastIndex = len(metrics)
+ while metrics[lastIndex - 2][0] == lastAdvance:
+ lastIndex -= 1
+ if lastIndex <= 1:
+ # all advances are equal
+ lastIndex = 1
+ break
+ additionalMetrics = metrics[lastIndex:]
+ additionalMetrics = [otRound(sb) for _, sb in additionalMetrics]
+ metrics = metrics[:lastIndex]
+ numberOfMetrics = len(metrics)
+ setattr(headerTable, self.numberOfMetricsName, numberOfMetrics)
+ else:
+ # no hhea/vhea, can't store numberOfMetrics; assume == numGlyphs
+ numberOfMetrics = ttFont["maxp"].numGlyphs
+ additionalMetrics = []
- headerTable = ttFont.get(self.headerTag)
- if headerTable is not None:
- lastAdvance = metrics[-1][0]
- lastIndex = len(metrics)
- while metrics[lastIndex-2][0] == lastAdvance:
- lastIndex -= 1
- if lastIndex <= 1:
- # all advances are equal
- lastIndex = 1
- break
- additionalMetrics = metrics[lastIndex:]
- additionalMetrics = [otRound(sb) for _, sb in additionalMetrics]
- metrics = metrics[:lastIndex]
- numberOfMetrics = len(metrics)
- setattr(headerTable, self.numberOfMetricsName, numberOfMetrics)
- else:
- # no hhea/vhea, can't store numberOfMetrics; assume == numGlyphs
- numberOfMetrics = ttFont["maxp"].numGlyphs
- additionalMetrics = []
+ allMetrics = []
+ for advance, sb in metrics:
+ allMetrics.extend([otRound(advance), otRound(sb)])
+ metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
+ try:
+ data = struct.pack(metricsFmt, *allMetrics)
+ except struct.error as e:
+ if "out of range" in str(e) and hasNegativeAdvances:
+ raise ttLib.TTLibError(
+ "'%s' table can't contain negative advance %ss"
+ % (self.tableTag, self.advanceName)
+ )
+ else:
+ raise
+ additionalMetrics = array.array("h", additionalMetrics)
+ if sys.byteorder != "big":
+ additionalMetrics.byteswap()
+ data = data + additionalMetrics.tobytes()
+ return data
- allMetrics = []
- for advance, sb in metrics:
- allMetrics.extend([otRound(advance), otRound(sb)])
- metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
- try:
- data = struct.pack(metricsFmt, *allMetrics)
- except struct.error as e:
- if "out of range" in str(e) and hasNegativeAdvances:
- raise ttLib.TTLibError(
- "'%s' table can't contain negative advance %ss"
- % (self.tableTag, self.advanceName))
- else:
- raise
- additionalMetrics = array.array("h", additionalMetrics)
- if sys.byteorder != "big": additionalMetrics.byteswap()
- data = data + additionalMetrics.tobytes()
- return data
+ def toXML(self, writer, ttFont):
+ names = sorted(self.metrics.keys())
+ for glyphName in names:
+ advance, sb = self.metrics[glyphName]
+ writer.simpletag(
+ "mtx",
+ [
+ ("name", glyphName),
+ (self.advanceName, advance),
+ (self.sideBearingName, sb),
+ ],
+ )
+ writer.newline()
- def toXML(self, writer, ttFont):
- names = sorted(self.metrics.keys())
- for glyphName in names:
- advance, sb = self.metrics[glyphName]
- writer.simpletag("mtx", [
- ("name", glyphName),
- (self.advanceName, advance),
- (self.sideBearingName, sb),
- ])
- writer.newline()
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "metrics"):
+ self.metrics = {}
+ if name == "mtx":
+ self.metrics[attrs["name"]] = (
+ safeEval(attrs[self.advanceName]),
+ safeEval(attrs[self.sideBearingName]),
+ )
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "metrics"):
- self.metrics = {}
- if name == "mtx":
- self.metrics[attrs["name"]] = (safeEval(attrs[self.advanceName]),
- safeEval(attrs[self.sideBearingName]))
+ def __delitem__(self, glyphName):
+ del self.metrics[glyphName]
- def __delitem__(self, glyphName):
- del self.metrics[glyphName]
+ def __getitem__(self, glyphName):
+ return self.metrics[glyphName]
- def __getitem__(self, glyphName):
- return self.metrics[glyphName]
-
- def __setitem__(self, glyphName, advance_sb_pair):
- self.metrics[glyphName] = tuple(advance_sb_pair)
+ def __setitem__(self, glyphName, advance_sb_pair):
+ self.metrics[glyphName] = tuple(advance_sb_pair)
diff --git a/Lib/fontTools/ttLib/tables/_k_e_r_n.py b/Lib/fontTools/ttLib/tables/_k_e_r_n.py
index bcad2cea..8f55a311 100644
--- a/Lib/fontTools/ttLib/tables/_k_e_r_n.py
+++ b/Lib/fontTools/ttLib/tables/_k_e_r_n.py
@@ -1,8 +1,6 @@
from fontTools.ttLib import getSearchRange
from fontTools.misc.textTools import safeEval, readHex
-from fontTools.misc.fixedTools import (
- fixedToFloat as fi2fl,
- floatToFixed as fl2fi)
+from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
from . import DefaultTable
import struct
import sys
@@ -14,273 +12,267 @@ log = logging.getLogger(__name__)
class table__k_e_r_n(DefaultTable.DefaultTable):
-
- def getkern(self, format):
- for subtable in self.kernTables:
- if subtable.format == format:
- return subtable
- return None # not found
-
- def decompile(self, data, ttFont):
- version, nTables = struct.unpack(">HH", data[:4])
- apple = False
- if (len(data) >= 8) and (version == 1):
- # AAT Apple's "new" format. Hm.
- version, nTables = struct.unpack(">LL", data[:8])
- self.version = fi2fl(version, 16)
- data = data[8:]
- apple = True
- else:
- self.version = version
- data = data[4:]
- self.kernTables = []
- for i in range(nTables):
- if self.version == 1.0:
- # Apple
- length, coverage, subtableFormat = struct.unpack(
- ">LBB", data[:6])
- else:
- # in OpenType spec the "version" field refers to the common
- # subtable header; the actual subtable format is stored in
- # the 8-15 mask bits of "coverage" field.
- # This "version" is always 0 so we ignore it here
- _, length, subtableFormat, coverage = struct.unpack(
- ">HHBB", data[:6])
- if nTables == 1 and subtableFormat == 0:
- # The "length" value is ignored since some fonts
- # (like OpenSans and Calibri) have a subtable larger than
- # its value.
- nPairs, = struct.unpack(">H", data[6:8])
- calculated_length = (nPairs * 6) + 14
- if length != calculated_length:
- log.warning(
- "'kern' subtable longer than defined: "
- "%d bytes instead of %d bytes" %
- (calculated_length, length)
- )
- length = calculated_length
- if subtableFormat not in kern_classes:
- subtable = KernTable_format_unkown(subtableFormat)
- else:
- subtable = kern_classes[subtableFormat](apple)
- subtable.decompile(data[:length], ttFont)
- self.kernTables.append(subtable)
- data = data[length:]
-
- def compile(self, ttFont):
- if hasattr(self, "kernTables"):
- nTables = len(self.kernTables)
- else:
- nTables = 0
- if self.version == 1.0:
- # AAT Apple's "new" format.
- data = struct.pack(">LL", fl2fi(self.version, 16), nTables)
- else:
- data = struct.pack(">HH", self.version, nTables)
- if hasattr(self, "kernTables"):
- for subtable in self.kernTables:
- data = data + subtable.compile(ttFont)
- return data
-
- def toXML(self, writer, ttFont):
- writer.simpletag("version", value=self.version)
- writer.newline()
- for subtable in self.kernTables:
- subtable.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "version":
- self.version = safeEval(attrs["value"])
- return
- if name != "kernsubtable":
- return
- if not hasattr(self, "kernTables"):
- self.kernTables = []
- format = safeEval(attrs["format"])
- if format not in kern_classes:
- subtable = KernTable_format_unkown(format)
- else:
- apple = self.version == 1.0
- subtable = kern_classes[format](apple)
- self.kernTables.append(subtable)
- subtable.fromXML(name, attrs, content, ttFont)
+ def getkern(self, format):
+ for subtable in self.kernTables:
+ if subtable.format == format:
+ return subtable
+ return None # not found
+
+ def decompile(self, data, ttFont):
+ version, nTables = struct.unpack(">HH", data[:4])
+ apple = False
+ if (len(data) >= 8) and (version == 1):
+ # AAT Apple's "new" format. Hm.
+ version, nTables = struct.unpack(">LL", data[:8])
+ self.version = fi2fl(version, 16)
+ data = data[8:]
+ apple = True
+ else:
+ self.version = version
+ data = data[4:]
+ self.kernTables = []
+ for i in range(nTables):
+ if self.version == 1.0:
+ # Apple
+ length, coverage, subtableFormat = struct.unpack(">LBB", data[:6])
+ else:
+ # in OpenType spec the "version" field refers to the common
+ # subtable header; the actual subtable format is stored in
+ # the 8-15 mask bits of "coverage" field.
+ # This "version" is always 0 so we ignore it here
+ _, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6])
+ if nTables == 1 and subtableFormat == 0:
+ # The "length" value is ignored since some fonts
+ # (like OpenSans and Calibri) have a subtable larger than
+ # its value.
+ (nPairs,) = struct.unpack(">H", data[6:8])
+ calculated_length = (nPairs * 6) + 14
+ if length != calculated_length:
+ log.warning(
+ "'kern' subtable longer than defined: "
+ "%d bytes instead of %d bytes" % (calculated_length, length)
+ )
+ length = calculated_length
+ if subtableFormat not in kern_classes:
+ subtable = KernTable_format_unkown(subtableFormat)
+ else:
+ subtable = kern_classes[subtableFormat](apple)
+ subtable.decompile(data[:length], ttFont)
+ self.kernTables.append(subtable)
+ data = data[length:]
+
+ def compile(self, ttFont):
+ if hasattr(self, "kernTables"):
+ nTables = len(self.kernTables)
+ else:
+ nTables = 0
+ if self.version == 1.0:
+ # AAT Apple's "new" format.
+ data = struct.pack(">LL", fl2fi(self.version, 16), nTables)
+ else:
+ data = struct.pack(">HH", self.version, nTables)
+ if hasattr(self, "kernTables"):
+ for subtable in self.kernTables:
+ data = data + subtable.compile(ttFont)
+ return data
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ for subtable in self.kernTables:
+ subtable.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version":
+ self.version = safeEval(attrs["value"])
+ return
+ if name != "kernsubtable":
+ return
+ if not hasattr(self, "kernTables"):
+ self.kernTables = []
+ format = safeEval(attrs["format"])
+ if format not in kern_classes:
+ subtable = KernTable_format_unkown(format)
+ else:
+ apple = self.version == 1.0
+ subtable = kern_classes[format](apple)
+ self.kernTables.append(subtable)
+ subtable.fromXML(name, attrs, content, ttFont)
class KernTable_format_0(object):
-
- # 'version' is kept for backward compatibility
- version = format = 0
-
- def __init__(self, apple=False):
- self.apple = apple
-
- def decompile(self, data, ttFont):
- if not self.apple:
- version, length, subtableFormat, coverage = struct.unpack(
- ">HHBB", data[:6])
- if version != 0:
- from fontTools.ttLib import TTLibError
- raise TTLibError(
- "unsupported kern subtable version: %d" % version)
- tupleIndex = None
- # Should we also assert length == len(data)?
- data = data[6:]
- else:
- length, coverage, subtableFormat, tupleIndex = struct.unpack(
- ">LBBH", data[:8])
- data = data[8:]
- assert self.format == subtableFormat, "unsupported format"
- self.coverage = coverage
- self.tupleIndex = tupleIndex
-
- self.kernTable = kernTable = {}
-
- nPairs, searchRange, entrySelector, rangeShift = struct.unpack(
- ">HHHH", data[:8])
- data = data[8:]
-
- datas = array.array("H", data[:6 * nPairs])
- if sys.byteorder != "big": datas.byteswap()
- it = iter(datas)
- glyphOrder = ttFont.getGlyphOrder()
- for k in range(nPairs):
- left, right, value = next(it), next(it), next(it)
- if value >= 32768:
- value -= 65536
- try:
- kernTable[(glyphOrder[left], glyphOrder[right])] = value
- except IndexError:
- # Slower, but will not throw an IndexError on an invalid
- # glyph id.
- kernTable[(
- ttFont.getGlyphName(left),
- ttFont.getGlyphName(right))] = value
- if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess
- log.warning(
- "excess data in 'kern' subtable: %d bytes",
- len(data) - 6 * nPairs)
-
- def compile(self, ttFont):
- nPairs = min(len(self.kernTable), 0xFFFF)
- searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)
- searchRange &= 0xFFFF
- entrySelector = min(entrySelector, 0xFFFF)
- rangeShift = min(rangeShift, 0xFFFF)
- data = struct.pack(
- ">HHHH", nPairs, searchRange, entrySelector, rangeShift)
-
- # yeehee! (I mean, turn names into indices)
- try:
- reverseOrder = ttFont.getReverseGlyphMap()
- kernTable = sorted(
- (reverseOrder[left], reverseOrder[right], value)
- for ((left, right), value) in self.kernTable.items())
- except KeyError:
- # Slower, but will not throw KeyError on invalid glyph id.
- getGlyphID = ttFont.getGlyphID
- kernTable = sorted(
- (getGlyphID(left), getGlyphID(right), value)
- for ((left, right), value) in self.kernTable.items())
-
- for left, right, value in kernTable:
- data = data + struct.pack(">HHh", left, right, value)
-
- if not self.apple:
- version = 0
- length = len(data) + 6
- if length >= 0x10000:
- log.warning('"kern" subtable overflow, '
- 'truncating length value while preserving pairs.')
- length &= 0xFFFF
- header = struct.pack(
- ">HHBB", version, length, self.format, self.coverage)
- else:
- if self.tupleIndex is None:
- # sensible default when compiling a TTX from an old fonttools
- # or when inserting a Windows-style format 0 subtable into an
- # Apple version=1.0 kern table
- log.warning("'tupleIndex' is None; default to 0")
- self.tupleIndex = 0
- length = len(data) + 8
- header = struct.pack(
- ">LBBH", length, self.coverage, self.format, self.tupleIndex)
- return header + data
-
- def toXML(self, writer, ttFont):
- attrs = dict(coverage=self.coverage, format=self.format)
- if self.apple:
- if self.tupleIndex is None:
- log.warning("'tupleIndex' is None; default to 0")
- attrs["tupleIndex"] = 0
- else:
- attrs["tupleIndex"] = self.tupleIndex
- writer.begintag("kernsubtable", **attrs)
- writer.newline()
- items = sorted(self.kernTable.items())
- for (left, right), value in items:
- writer.simpletag("pair", [
- ("l", left),
- ("r", right),
- ("v", value)
- ])
- writer.newline()
- writer.endtag("kernsubtable")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.coverage = safeEval(attrs["coverage"])
- subtableFormat = safeEval(attrs["format"])
- if self.apple:
- if "tupleIndex" in attrs:
- self.tupleIndex = safeEval(attrs["tupleIndex"])
- else:
- # previous fontTools versions didn't export tupleIndex
- log.warning(
- "Apple kern subtable is missing 'tupleIndex' attribute")
- self.tupleIndex = None
- else:
- self.tupleIndex = None
- assert subtableFormat == self.format, "unsupported format"
- if not hasattr(self, "kernTable"):
- self.kernTable = {}
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"])
-
- def __getitem__(self, pair):
- return self.kernTable[pair]
-
- def __setitem__(self, pair, value):
- self.kernTable[pair] = value
-
- def __delitem__(self, pair):
- del self.kernTable[pair]
+ # 'version' is kept for backward compatibility
+ version = format = 0
+
+ def __init__(self, apple=False):
+ self.apple = apple
+
+ def decompile(self, data, ttFont):
+ if not self.apple:
+ version, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6])
+ if version != 0:
+ from fontTools.ttLib import TTLibError
+
+ raise TTLibError("unsupported kern subtable version: %d" % version)
+ tupleIndex = None
+ # Should we also assert length == len(data)?
+ data = data[6:]
+ else:
+ length, coverage, subtableFormat, tupleIndex = struct.unpack(
+ ">LBBH", data[:8]
+ )
+ data = data[8:]
+ assert self.format == subtableFormat, "unsupported format"
+ self.coverage = coverage
+ self.tupleIndex = tupleIndex
+
+ self.kernTable = kernTable = {}
+
+ nPairs, searchRange, entrySelector, rangeShift = struct.unpack(
+ ">HHHH", data[:8]
+ )
+ data = data[8:]
+
+ datas = array.array("H", data[: 6 * nPairs])
+ if sys.byteorder != "big":
+ datas.byteswap()
+ it = iter(datas)
+ glyphOrder = ttFont.getGlyphOrder()
+ for k in range(nPairs):
+ left, right, value = next(it), next(it), next(it)
+ if value >= 32768:
+ value -= 65536
+ try:
+ kernTable[(glyphOrder[left], glyphOrder[right])] = value
+ except IndexError:
+ # Slower, but will not throw an IndexError on an invalid
+ # glyph id.
+ kernTable[
+ (ttFont.getGlyphName(left), ttFont.getGlyphName(right))
+ ] = value
+ if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess
+ log.warning(
+ "excess data in 'kern' subtable: %d bytes", len(data) - 6 * nPairs
+ )
+
+ def compile(self, ttFont):
+ nPairs = min(len(self.kernTable), 0xFFFF)
+ searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)
+ searchRange &= 0xFFFF
+ entrySelector = min(entrySelector, 0xFFFF)
+ rangeShift = min(rangeShift, 0xFFFF)
+ data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift)
+
+ # yeehee! (I mean, turn names into indices)
+ try:
+ reverseOrder = ttFont.getReverseGlyphMap()
+ kernTable = sorted(
+ (reverseOrder[left], reverseOrder[right], value)
+ for ((left, right), value) in self.kernTable.items()
+ )
+ except KeyError:
+ # Slower, but will not throw KeyError on invalid glyph id.
+ getGlyphID = ttFont.getGlyphID
+ kernTable = sorted(
+ (getGlyphID(left), getGlyphID(right), value)
+ for ((left, right), value) in self.kernTable.items()
+ )
+
+ for left, right, value in kernTable:
+ data = data + struct.pack(">HHh", left, right, value)
+
+ if not self.apple:
+ version = 0
+ length = len(data) + 6
+ if length >= 0x10000:
+ log.warning(
+ '"kern" subtable overflow, '
+ "truncating length value while preserving pairs."
+ )
+ length &= 0xFFFF
+ header = struct.pack(">HHBB", version, length, self.format, self.coverage)
+ else:
+ if self.tupleIndex is None:
+ # sensible default when compiling a TTX from an old fonttools
+ # or when inserting a Windows-style format 0 subtable into an
+ # Apple version=1.0 kern table
+ log.warning("'tupleIndex' is None; default to 0")
+ self.tupleIndex = 0
+ length = len(data) + 8
+ header = struct.pack(
+ ">LBBH", length, self.coverage, self.format, self.tupleIndex
+ )
+ return header + data
+
+ def toXML(self, writer, ttFont):
+ attrs = dict(coverage=self.coverage, format=self.format)
+ if self.apple:
+ if self.tupleIndex is None:
+ log.warning("'tupleIndex' is None; default to 0")
+ attrs["tupleIndex"] = 0
+ else:
+ attrs["tupleIndex"] = self.tupleIndex
+ writer.begintag("kernsubtable", **attrs)
+ writer.newline()
+ items = sorted(self.kernTable.items())
+ for (left, right), value in items:
+ writer.simpletag("pair", [("l", left), ("r", right), ("v", value)])
+ writer.newline()
+ writer.endtag("kernsubtable")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.coverage = safeEval(attrs["coverage"])
+ subtableFormat = safeEval(attrs["format"])
+ if self.apple:
+ if "tupleIndex" in attrs:
+ self.tupleIndex = safeEval(attrs["tupleIndex"])
+ else:
+ # previous fontTools versions didn't export tupleIndex
+ log.warning("Apple kern subtable is missing 'tupleIndex' attribute")
+ self.tupleIndex = None
+ else:
+ self.tupleIndex = None
+ assert subtableFormat == self.format, "unsupported format"
+ if not hasattr(self, "kernTable"):
+ self.kernTable = {}
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"])
+
+ def __getitem__(self, pair):
+ return self.kernTable[pair]
+
+ def __setitem__(self, pair, value):
+ self.kernTable[pair] = value
+
+ def __delitem__(self, pair):
+ del self.kernTable[pair]
class KernTable_format_unkown(object):
-
- def __init__(self, format):
- self.format = format
-
- def decompile(self, data, ttFont):
- self.data = data
-
- def compile(self, ttFont):
- return self.data
-
- def toXML(self, writer, ttFont):
- writer.begintag("kernsubtable", format=self.format)
- writer.newline()
- writer.comment("unknown 'kern' subtable format")
- writer.newline()
- writer.dumphex(self.data)
- writer.endtag("kernsubtable")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.decompile(readHex(content), ttFont)
+ def __init__(self, format):
+ self.format = format
+
+ def decompile(self, data, ttFont):
+ self.data = data
+
+ def compile(self, ttFont):
+ return self.data
+
+ def toXML(self, writer, ttFont):
+ writer.begintag("kernsubtable", format=self.format)
+ writer.newline()
+ writer.comment("unknown 'kern' subtable format")
+ writer.newline()
+ writer.dumphex(self.data)
+ writer.endtag("kernsubtable")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.decompile(readHex(content), ttFont)
kern_classes = {0: KernTable_format_0}
diff --git a/Lib/fontTools/ttLib/tables/_l_c_a_r.py b/Lib/fontTools/ttLib/tables/_l_c_a_r.py
index e63310ef..1323b670 100644
--- a/Lib/fontTools/ttLib/tables/_l_c_a_r.py
+++ b/Lib/fontTools/ttLib/tables/_l_c_a_r.py
@@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table__l_c_a_r(BaseTTXConverter):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/_l_o_c_a.py b/Lib/fontTools/ttLib/tables/_l_o_c_a.py
index 6a8693ed..5884cef4 100644
--- a/Lib/fontTools/ttLib/tables/_l_o_c_a.py
+++ b/Lib/fontTools/ttLib/tables/_l_o_c_a.py
@@ -8,54 +8,58 @@ log = logging.getLogger(__name__)
class table__l_o_c_a(DefaultTable.DefaultTable):
+ dependencies = ["glyf"]
- dependencies = ['glyf']
-
- def decompile(self, data, ttFont):
- longFormat = ttFont['head'].indexToLocFormat
- if longFormat:
- format = "I"
- else:
- format = "H"
- locations = array.array(format)
- locations.frombytes(data)
- if sys.byteorder != "big": locations.byteswap()
- if not longFormat:
- l = array.array("I")
- for i in range(len(locations)):
- l.append(locations[i] * 2)
- locations = l
- if len(locations) < (ttFont['maxp'].numGlyphs + 1):
- log.warning("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d",
- len(locations) - 1, ttFont['maxp'].numGlyphs)
- self.locations = locations
-
- def compile(self, ttFont):
- try:
- max_location = max(self.locations)
- except AttributeError:
- self.set([])
- max_location = 0
- if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations):
- locations = array.array("H")
- for i in range(len(self.locations)):
- locations.append(self.locations[i] // 2)
- ttFont['head'].indexToLocFormat = 0
- else:
- locations = array.array("I", self.locations)
- ttFont['head'].indexToLocFormat = 1
- if sys.byteorder != "big": locations.byteswap()
- return locations.tobytes()
-
- def set(self, locations):
- self.locations = array.array("I", locations)
-
- def toXML(self, writer, ttFont):
- writer.comment("The 'loca' table will be calculated by the compiler")
- writer.newline()
-
- def __getitem__(self, index):
- return self.locations[index]
-
- def __len__(self):
- return len(self.locations)
+ def decompile(self, data, ttFont):
+ longFormat = ttFont["head"].indexToLocFormat
+ if longFormat:
+ format = "I"
+ else:
+ format = "H"
+ locations = array.array(format)
+ locations.frombytes(data)
+ if sys.byteorder != "big":
+ locations.byteswap()
+ if not longFormat:
+ l = array.array("I")
+ for i in range(len(locations)):
+ l.append(locations[i] * 2)
+ locations = l
+ if len(locations) < (ttFont["maxp"].numGlyphs + 1):
+ log.warning(
+ "corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d",
+ len(locations) - 1,
+ ttFont["maxp"].numGlyphs,
+ )
+ self.locations = locations
+
+ def compile(self, ttFont):
+ try:
+ max_location = max(self.locations)
+ except AttributeError:
+ self.set([])
+ max_location = 0
+ if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations):
+ locations = array.array("H")
+ for i in range(len(self.locations)):
+ locations.append(self.locations[i] // 2)
+ ttFont["head"].indexToLocFormat = 0
+ else:
+ locations = array.array("I", self.locations)
+ ttFont["head"].indexToLocFormat = 1
+ if sys.byteorder != "big":
+ locations.byteswap()
+ return locations.tobytes()
+
+ def set(self, locations):
+ self.locations = array.array("I", locations)
+
+ def toXML(self, writer, ttFont):
+ writer.comment("The 'loca' table will be calculated by the compiler")
+ writer.newline()
+
+ def __getitem__(self, index):
+ return self.locations[index]
+
+ def __len__(self):
+ return len(self.locations)
diff --git a/Lib/fontTools/ttLib/tables/_l_t_a_g.py b/Lib/fontTools/ttLib/tables/_l_t_a_g.py
index ce3c6b97..24f5e131 100644
--- a/Lib/fontTools/ttLib/tables/_l_t_a_g.py
+++ b/Lib/fontTools/ttLib/tables/_l_t_a_g.py
@@ -4,60 +4,61 @@ import struct
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html
+
class table__l_t_a_g(DefaultTable.DefaultTable):
- def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
- self.version, self.flags = 1, 0
- self.tags = []
-
- def addTag(self, tag):
- """Add 'tag' to the list of langauge tags if not already there.
-
- Returns the integer index of 'tag' in the list of all tags.
- """
- try:
- return self.tags.index(tag)
- except ValueError:
- self.tags.append(tag)
- return len(self.tags) - 1
-
- def decompile(self, data, ttFont):
- self.version, self.flags, numTags = struct.unpack(">LLL", data[:12])
- assert self.version == 1
- self.tags = []
- for i in range(numTags):
- pos = 12 + i * 4
- offset, length = struct.unpack(">HH", data[pos:pos+4])
- tag = data[offset:offset+length].decode("ascii")
- self.tags.append(tag)
-
- def compile(self, ttFont):
- dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))]
- stringPool = ""
- for tag in self.tags:
- offset = stringPool.find(tag)
- if offset < 0:
- offset = len(stringPool)
- stringPool = stringPool + tag
- offset = offset + 12 + len(self.tags) * 4
- dataList.append(struct.pack(">HH", offset, len(tag)))
- dataList.append(tobytes(stringPool))
- return bytesjoin(dataList)
-
- def toXML(self, writer, ttFont):
- writer.simpletag("version", value=self.version)
- writer.newline()
- writer.simpletag("flags", value=self.flags)
- writer.newline()
- for tag in self.tags:
- writer.simpletag("LanguageTag", tag=tag)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "tags"):
- self.tags = []
- if name == "LanguageTag":
- self.tags.append(attrs["tag"])
- elif "value" in attrs:
- value = safeEval(attrs["value"])
- setattr(self, name, value)
+ def __init__(self, tag=None):
+ DefaultTable.DefaultTable.__init__(self, tag)
+ self.version, self.flags = 1, 0
+ self.tags = []
+
+ def addTag(self, tag):
+ """Add 'tag' to the list of langauge tags if not already there.
+
+ Returns the integer index of 'tag' in the list of all tags.
+ """
+ try:
+ return self.tags.index(tag)
+ except ValueError:
+ self.tags.append(tag)
+ return len(self.tags) - 1
+
+ def decompile(self, data, ttFont):
+ self.version, self.flags, numTags = struct.unpack(">LLL", data[:12])
+ assert self.version == 1
+ self.tags = []
+ for i in range(numTags):
+ pos = 12 + i * 4
+ offset, length = struct.unpack(">HH", data[pos : pos + 4])
+ tag = data[offset : offset + length].decode("ascii")
+ self.tags.append(tag)
+
+ def compile(self, ttFont):
+ dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))]
+ stringPool = ""
+ for tag in self.tags:
+ offset = stringPool.find(tag)
+ if offset < 0:
+ offset = len(stringPool)
+ stringPool = stringPool + tag
+ offset = offset + 12 + len(self.tags) * 4
+ dataList.append(struct.pack(">HH", offset, len(tag)))
+ dataList.append(tobytes(stringPool))
+ return bytesjoin(dataList)
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ writer.simpletag("flags", value=self.flags)
+ writer.newline()
+ for tag in self.tags:
+ writer.simpletag("LanguageTag", tag=tag)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "tags"):
+ self.tags = []
+ if name == "LanguageTag":
+ self.tags.append(attrs["tag"])
+ elif "value" in attrs:
+ value = safeEval(attrs["value"])
+ setattr(self, name, value)
diff --git a/Lib/fontTools/ttLib/tables/_m_a_x_p.py b/Lib/fontTools/ttLib/tables/_m_a_x_p.py
index e810806d..f0e6c33a 100644
--- a/Lib/fontTools/ttLib/tables/_m_a_x_p.py
+++ b/Lib/fontTools/ttLib/tables/_m_a_x_p.py
@@ -27,112 +27,113 @@ maxpFormat_1_0_add = """
class table__m_a_x_p(DefaultTable.DefaultTable):
+ dependencies = ["glyf"]
- dependencies = ['glyf']
+ def decompile(self, data, ttFont):
+ dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self)
+ self.numGlyphs = int(self.numGlyphs)
+ if self.tableVersion != 0x00005000:
+ dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self)
+ assert len(data) == 0
- def decompile(self, data, ttFont):
- dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self)
- self.numGlyphs = int(self.numGlyphs)
- if self.tableVersion != 0x00005000:
- dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self)
- assert len(data) == 0
+ def compile(self, ttFont):
+ if "glyf" in ttFont:
+ if ttFont.isLoaded("glyf") and ttFont.recalcBBoxes:
+ self.recalc(ttFont)
+ else:
+ pass # CFF
+ self.numGlyphs = len(ttFont.getGlyphOrder())
+ if self.tableVersion != 0x00005000:
+ self.tableVersion = 0x00010000
+ data = sstruct.pack(maxpFormat_0_5, self)
+ if self.tableVersion == 0x00010000:
+ data = data + sstruct.pack(maxpFormat_1_0_add, self)
+ return data
- def compile(self, ttFont):
- if 'glyf' in ttFont:
- if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:
- self.recalc(ttFont)
- else:
- pass # CFF
- self.numGlyphs = len(ttFont.getGlyphOrder())
- if self.tableVersion != 0x00005000:
- self.tableVersion = 0x00010000
- data = sstruct.pack(maxpFormat_0_5, self)
- if self.tableVersion == 0x00010000:
- data = data + sstruct.pack(maxpFormat_1_0_add, self)
- return data
+ def recalc(self, ttFont):
+ """Recalculate the font bounding box, and most other maxp values except
+ for the TT instructions values. Also recalculate the value of bit 1
+ of the flags field and the font bounding box of the 'head' table.
+ """
+ glyfTable = ttFont["glyf"]
+ hmtxTable = ttFont["hmtx"]
+ headTable = ttFont["head"]
+ self.numGlyphs = len(glyfTable)
+ INFINITY = 100000
+ xMin = +INFINITY
+ yMin = +INFINITY
+ xMax = -INFINITY
+ yMax = -INFINITY
+ maxPoints = 0
+ maxContours = 0
+ maxCompositePoints = 0
+ maxCompositeContours = 0
+ maxComponentElements = 0
+ maxComponentDepth = 0
+ allXMinIsLsb = 1
+ for glyphName in ttFont.getGlyphOrder():
+ g = glyfTable[glyphName]
+ if g.numberOfContours:
+ if hmtxTable[glyphName][1] != g.xMin:
+ allXMinIsLsb = 0
+ xMin = min(xMin, g.xMin)
+ yMin = min(yMin, g.yMin)
+ xMax = max(xMax, g.xMax)
+ yMax = max(yMax, g.yMax)
+ if g.numberOfContours > 0:
+ nPoints, nContours = g.getMaxpValues()
+ maxPoints = max(maxPoints, nPoints)
+ maxContours = max(maxContours, nContours)
+ elif g.isComposite():
+ nPoints, nContours, componentDepth = g.getCompositeMaxpValues(
+ glyfTable
+ )
+ maxCompositePoints = max(maxCompositePoints, nPoints)
+ maxCompositeContours = max(maxCompositeContours, nContours)
+ maxComponentElements = max(maxComponentElements, len(g.components))
+ maxComponentDepth = max(maxComponentDepth, componentDepth)
+ if xMin == +INFINITY:
+ headTable.xMin = 0
+ headTable.yMin = 0
+ headTable.xMax = 0
+ headTable.yMax = 0
+ else:
+ headTable.xMin = xMin
+ headTable.yMin = yMin
+ headTable.xMax = xMax
+ headTable.yMax = yMax
+ self.maxPoints = maxPoints
+ self.maxContours = maxContours
+ self.maxCompositePoints = maxCompositePoints
+ self.maxCompositeContours = maxCompositeContours
+ self.maxComponentElements = maxComponentElements
+ self.maxComponentDepth = maxComponentDepth
+ if allXMinIsLsb:
+ headTable.flags = headTable.flags | 0x2
+ else:
+ headTable.flags = headTable.flags & ~0x2
- def recalc(self, ttFont):
- """Recalculate the font bounding box, and most other maxp values except
- for the TT instructions values. Also recalculate the value of bit 1
- of the flags field and the font bounding box of the 'head' table.
- """
- glyfTable = ttFont['glyf']
- hmtxTable = ttFont['hmtx']
- headTable = ttFont['head']
- self.numGlyphs = len(glyfTable)
- INFINITY = 100000
- xMin = +INFINITY
- yMin = +INFINITY
- xMax = -INFINITY
- yMax = -INFINITY
- maxPoints = 0
- maxContours = 0
- maxCompositePoints = 0
- maxCompositeContours = 0
- maxComponentElements = 0
- maxComponentDepth = 0
- allXMinIsLsb = 1
- for glyphName in ttFont.getGlyphOrder():
- g = glyfTable[glyphName]
- if g.numberOfContours:
- if hmtxTable[glyphName][1] != g.xMin:
- allXMinIsLsb = 0
- xMin = min(xMin, g.xMin)
- yMin = min(yMin, g.yMin)
- xMax = max(xMax, g.xMax)
- yMax = max(yMax, g.yMax)
- if g.numberOfContours > 0:
- nPoints, nContours = g.getMaxpValues()
- maxPoints = max(maxPoints, nPoints)
- maxContours = max(maxContours, nContours)
- else:
- nPoints, nContours, componentDepth = g.getCompositeMaxpValues(glyfTable)
- maxCompositePoints = max(maxCompositePoints, nPoints)
- maxCompositeContours = max(maxCompositeContours, nContours)
- maxComponentElements = max(maxComponentElements, len(g.components))
- maxComponentDepth = max(maxComponentDepth, componentDepth)
- if xMin == +INFINITY:
- headTable.xMin = 0
- headTable.yMin = 0
- headTable.xMax = 0
- headTable.yMax = 0
- else:
- headTable.xMin = xMin
- headTable.yMin = yMin
- headTable.xMax = xMax
- headTable.yMax = yMax
- self.maxPoints = maxPoints
- self.maxContours = maxContours
- self.maxCompositePoints = maxCompositePoints
- self.maxCompositeContours = maxCompositeContours
- self.maxComponentElements = maxComponentElements
- self.maxComponentDepth = maxComponentDepth
- if allXMinIsLsb:
- headTable.flags = headTable.flags | 0x2
- else:
- headTable.flags = headTable.flags & ~0x2
+ def testrepr(self):
+ items = sorted(self.__dict__.items())
+ print(". . . . . . . . .")
+ for combo in items:
+ print(" %s: %s" % combo)
+ print(". . . . . . . . .")
- def testrepr(self):
- items = sorted(self.__dict__.items())
- print(". . . . . . . . .")
- for combo in items:
- print(" %s: %s" % combo)
- print(". . . . . . . . .")
+ def toXML(self, writer, ttFont):
+ if self.tableVersion != 0x00005000:
+ writer.comment("Most of this table will be recalculated by the compiler")
+ writer.newline()
+ formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5)
+ if self.tableVersion != 0x00005000:
+ formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add)
+ names = names + names_1_0
+ for name in names:
+ value = getattr(self, name)
+ if name == "tableVersion":
+ value = hex(value)
+ writer.simpletag(name, value=value)
+ writer.newline()
- def toXML(self, writer, ttFont):
- if self.tableVersion != 0x00005000:
- writer.comment("Most of this table will be recalculated by the compiler")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5)
- if self.tableVersion != 0x00005000:
- formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add)
- names = names + names_1_0
- for name in names:
- value = getattr(self, name)
- if name == "tableVersion":
- value = hex(value)
- writer.simpletag(name, value=value)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- setattr(self, name, safeEval(attrs["value"]))
+ def fromXML(self, name, attrs, content, ttFont):
+ setattr(self, name, safeEval(attrs["value"]))
diff --git a/Lib/fontTools/ttLib/tables/_m_e_t_a.py b/Lib/fontTools/ttLib/tables/_m_e_t_a.py
index 3faf0a56..3af9e543 100644
--- a/Lib/fontTools/ttLib/tables/_m_e_t_a.py
+++ b/Lib/fontTools/ttLib/tables/_m_e_t_a.py
@@ -30,16 +30,15 @@ class table__m_e_t_a(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
headerSize = sstruct.calcsize(META_HEADER_FORMAT)
- header = sstruct.unpack(META_HEADER_FORMAT, data[0 : headerSize])
+ header = sstruct.unpack(META_HEADER_FORMAT, data[0:headerSize])
if header["version"] != 1:
- raise TTLibError("unsupported 'meta' version %d" %
- header["version"])
+ raise TTLibError("unsupported 'meta' version %d" % header["version"])
dataMapSize = sstruct.calcsize(DATA_MAP_FORMAT)
for i in range(header["numDataMaps"]):
dataMapOffset = headerSize + i * dataMapSize
dataMap = sstruct.unpack(
- DATA_MAP_FORMAT,
- data[dataMapOffset : dataMapOffset + dataMapSize])
+ DATA_MAP_FORMAT, data[dataMapOffset : dataMapOffset + dataMapSize]
+ )
tag = dataMap["tag"]
offset = dataMap["dataOffset"]
self.data[tag] = data[offset : offset + dataMap["dataLength"]]
@@ -50,12 +49,15 @@ class table__m_e_t_a(DefaultTable.DefaultTable):
keys = sorted(self.data.keys())
headerSize = sstruct.calcsize(META_HEADER_FORMAT)
dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT)
- header = sstruct.pack(META_HEADER_FORMAT, {
+ header = sstruct.pack(
+ META_HEADER_FORMAT,
+ {
"version": 1,
"flags": 0,
"dataOffset": dataOffset,
- "numDataMaps": len(keys)
- })
+ "numDataMaps": len(keys),
+ },
+ )
dataMaps = []
dataBlocks = []
for tag in keys:
@@ -63,11 +65,12 @@ class table__m_e_t_a(DefaultTable.DefaultTable):
data = self.data[tag].encode("utf-8")
else:
data = self.data[tag]
- dataMaps.append(sstruct.pack(DATA_MAP_FORMAT, {
- "tag": tag,
- "dataOffset": dataOffset,
- "dataLength": len(data)
- }))
+ dataMaps.append(
+ sstruct.pack(
+ DATA_MAP_FORMAT,
+ {"tag": tag, "dataOffset": dataOffset, "dataLength": len(data)},
+ )
+ )
dataBlocks.append(data)
dataOffset += len(data)
return bytesjoin([header] + dataMaps + dataBlocks)
diff --git a/Lib/fontTools/ttLib/tables/_n_a_m_e.py b/Lib/fontTools/ttLib/tables/_n_a_m_e.py
index 9558addb..bbb4f536 100644
--- a/Lib/fontTools/ttLib/tables/_n_a_m_e.py
+++ b/Lib/fontTools/ttLib/tables/_n_a_m_e.py
@@ -1,8 +1,20 @@
# -*- coding: utf-8 -*-
from fontTools.misc import sstruct
-from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin, tobytes, tostr, safeEval
+from fontTools.misc.textTools import (
+ bytechr,
+ byteord,
+ bytesjoin,
+ strjoin,
+ tobytes,
+ tostr,
+ safeEval,
+)
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import newTable
+from fontTools.ttLib.ttVisitor import TTVisitor
+from fontTools import ttLib
+import fontTools.ttLib.tables.otTables as otTables
+from fontTools.ttLib.tables import C_P_A_L_
from . import DefaultTable
import struct
import logging
@@ -24,573 +36,643 @@ nameRecordSize = sstruct.calcsize(nameRecordFormat)
class table__n_a_m_e(DefaultTable.DefaultTable):
- dependencies = ["ltag"]
-
- def decompile(self, data, ttFont):
- format, n, stringOffset = struct.unpack(b">HHH", data[:6])
- expectedStringOffset = 6 + n * nameRecordSize
- if stringOffset != expectedStringOffset:
- log.error(
- "'name' table stringOffset incorrect. Expected: %s; Actual: %s",
- expectedStringOffset, stringOffset)
- stringData = data[stringOffset:]
- data = data[6:]
- self.names = []
- for i in range(n):
- if len(data) < 12:
- log.error('skipping malformed name record #%d', i)
- continue
- name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord())
- name.string = stringData[name.offset:name.offset+name.length]
- if name.offset + name.length > len(stringData):
- log.error('skipping malformed name record #%d', i)
- continue
- assert len(name.string) == name.length
- #if (name.platEncID, name.platformID) in ((0, 0), (1, 3)):
- # if len(name.string) % 2:
- # print "2-byte string doesn't have even length!"
- # print name.__dict__
- del name.offset, name.length
- self.names.append(name)
-
- def compile(self, ttFont):
- if not hasattr(self, "names"):
- # only happens when there are NO name table entries read
- # from the TTX file
- self.names = []
- names = self.names
- names.sort() # sort according to the spec; see NameRecord.__lt__()
- stringData = b""
- format = 0
- n = len(names)
- stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat)
- data = struct.pack(b">HHH", format, n, stringOffset)
- lastoffset = 0
- done = {} # remember the data so we can reuse the "pointers"
- for name in names:
- string = name.toBytes()
- if string in done:
- name.offset, name.length = done[string]
- else:
- name.offset, name.length = done[string] = len(stringData), len(string)
- stringData = bytesjoin([stringData, string])
- data = data + sstruct.pack(nameRecordFormat, name)
- return data + stringData
-
- def toXML(self, writer, ttFont):
- for name in self.names:
- name.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name != "namerecord":
- return # ignore unknown tags
- if not hasattr(self, "names"):
- self.names = []
- name = NameRecord()
- self.names.append(name)
- name.fromXML(name, attrs, content, ttFont)
-
- def getName(self, nameID, platformID, platEncID, langID=None):
- for namerecord in self.names:
- if ( namerecord.nameID == nameID and
- namerecord.platformID == platformID and
- namerecord.platEncID == platEncID):
- if langID is None or namerecord.langID == langID:
- return namerecord
- return None # not found
-
- def getDebugName(self, nameID):
- englishName = someName = None
- for name in self.names:
- if name.nameID != nameID:
- continue
- try:
- unistr = name.toUnicode()
- except UnicodeDecodeError:
- continue
-
- someName = unistr
- if (name.platformID, name.langID) in ((1, 0), (3, 0x409)):
- englishName = unistr
- break
- if englishName:
- return englishName
- elif someName:
- return someName
- else:
- return None
-
- def getFirstDebugName(self, nameIDs):
- for nameID in nameIDs:
- name = self.getDebugName(nameID)
- if name is not None:
- return name
- return None
-
- def getBestFamilyName(self):
- # 21 = WWS Family Name
- # 16 = Typographic Family Name
- # 1 = Family Name
- return self.getFirstDebugName((21, 16, 1))
-
- def getBestSubFamilyName(self):
- # 22 = WWS SubFamily Name
- # 17 = Typographic SubFamily Name
- # 2 = SubFamily Name
- return self.getFirstDebugName((22, 17, 2))
-
- def getBestFullName(self):
- # 4 = Full Name
- # 6 = PostScript Name
- for nameIDs in ((21, 22), (16, 17), (1, 2), (4, ), (6, )):
- if len(nameIDs) == 2:
- name_fam = self.getDebugName(nameIDs[0])
- name_subfam = self.getDebugName(nameIDs[1])
- if None in [name_fam, name_subfam]:
- continue # if any is None, skip
- name = f"{name_fam} {name_subfam}"
- if name_subfam.lower() == 'regular':
- name = f"{name_fam}"
- return name
- else:
- name = self.getDebugName(nameIDs[0])
- if name is not None:
- return name
- return None
-
- def setName(self, string, nameID, platformID, platEncID, langID):
- """ Set the 'string' for the name record identified by 'nameID', 'platformID',
- 'platEncID' and 'langID'. If a record with that nameID doesn't exist, create it
- and append to the name table.
-
- 'string' can be of type `str` (`unicode` in PY2) or `bytes`. In the latter case,
- it is assumed to be already encoded with the correct plaform-specific encoding
- identified by the (platformID, platEncID, langID) triplet. A warning is issued
- to prevent unexpected results.
- """
- if not hasattr(self, 'names'):
- self.names = []
- if not isinstance(string, str):
- if isinstance(string, bytes):
- log.warning(
- "name string is bytes, ensure it's correctly encoded: %r", string)
- else:
- raise TypeError(
- "expected unicode or bytes, found %s: %r" % (
- type(string).__name__, string))
- namerecord = self.getName(nameID, platformID, platEncID, langID)
- if namerecord:
- namerecord.string = string
- else:
- self.names.append(makeName(string, nameID, platformID, platEncID, langID))
-
- def removeNames(self, nameID=None, platformID=None, platEncID=None, langID=None):
- """Remove any name records identified by the given combination of 'nameID',
- 'platformID', 'platEncID' and 'langID'.
- """
- args = {
- argName: argValue
- for argName, argValue in (
- ("nameID", nameID),
- ("platformID", platformID),
- ("platEncID", platEncID),
- ("langID", langID),
- )
- if argValue is not None
- }
- if not args:
- # no arguments, nothing to do
- return
- self.names = [
- rec for rec in self.names
- if any(
- argValue != getattr(rec, argName)
- for argName, argValue in args.items()
- )
- ]
-
- def _findUnusedNameID(self, minNameID=256):
- """Finds an unused name id.
-
- The nameID is assigned in the range between 'minNameID' and 32767 (inclusive),
- following the last nameID in the name table.
- """
- names = getattr(self, 'names', [])
- nameID = 1 + max([n.nameID for n in names] + [minNameID - 1])
- if nameID > 32767:
- raise ValueError("nameID must be less than 32768")
- return nameID
-
- def findMultilingualName(self, names, windows=True, mac=True, minNameID=0):
- """Return the name ID of an existing multilingual name that
- matches the 'names' dictionary, or None if not found.
-
- 'names' is a dictionary with the name in multiple languages,
- such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}.
- The keys can be arbitrary IETF BCP 47 language codes;
- the values are Unicode strings.
-
- If 'windows' is True, the returned name ID is guaranteed
- exist for all requested languages for platformID=3 and
- platEncID=1.
- If 'mac' is True, the returned name ID is guaranteed to exist
- for all requested languages for platformID=1 and platEncID=0.
-
- The returned name ID will not be less than the 'minNameID'
- argument.
- """
- # Gather the set of requested
- # (string, platformID, platEncID, langID)
- # tuples
- reqNameSet = set()
- for lang, name in sorted(names.items()):
- if windows:
- windowsName = _makeWindowsName(name, None, lang)
- if windowsName is not None:
- reqNameSet.add((windowsName.string,
- windowsName.platformID,
- windowsName.platEncID,
- windowsName.langID))
- if mac:
- macName = _makeMacName(name, None, lang)
- if macName is not None:
- reqNameSet.add((macName.string,
- macName.platformID,
- macName.platEncID,
- macName.langID))
-
- # Collect matching name IDs
- matchingNames = dict()
- for name in self.names:
- try:
- key = (name.toUnicode(), name.platformID,
- name.platEncID, name.langID)
- except UnicodeDecodeError:
- continue
- if key in reqNameSet and name.nameID >= minNameID:
- nameSet = matchingNames.setdefault(name.nameID, set())
- nameSet.add(key)
-
- # Return the first name ID that defines all requested strings
- for nameID, nameSet in sorted(matchingNames.items()):
- if nameSet == reqNameSet:
- return nameID
-
- return None # not found
-
- def addMultilingualName(self, names, ttFont=None, nameID=None,
- windows=True, mac=True, minNameID=0):
- """Add a multilingual name, returning its name ID
-
- 'names' is a dictionary with the name in multiple languages,
- such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}.
- The keys can be arbitrary IETF BCP 47 language codes;
- the values are Unicode strings.
-
- 'ttFont' is the TTFont to which the names are added, or None.
- If present, the font's 'ltag' table can get populated
- to store exotic language codes, which allows encoding
- names that otherwise cannot get encoded at all.
-
- 'nameID' is the name ID to be used, or None to let the library
- find an existing set of name records that match, or pick an
- unused name ID.
-
- If 'windows' is True, a platformID=3 name record will be added.
- If 'mac' is True, a platformID=1 name record will be added.
-
- If the 'nameID' argument is None, the created nameID will not
- be less than the 'minNameID' argument.
- """
- if not hasattr(self, 'names'):
- self.names = []
- if nameID is None:
- # Reuse nameID if possible
- nameID = self.findMultilingualName(
- names, windows=windows, mac=mac, minNameID=minNameID)
- if nameID is not None:
- return nameID
- nameID = self._findUnusedNameID()
- # TODO: Should minimize BCP 47 language codes.
- # https://github.com/fonttools/fonttools/issues/930
- for lang, name in sorted(names.items()):
- if windows:
- windowsName = _makeWindowsName(name, nameID, lang)
- if windowsName is not None:
- self.names.append(windowsName)
- else:
- # We cannot not make a Windows name: make sure we add a
- # Mac name as a fallback. This can happen for exotic
- # BCP47 language tags that have no Windows language code.
- mac = True
- if mac:
- macName = _makeMacName(name, nameID, lang, ttFont)
- if macName is not None:
- self.names.append(macName)
- return nameID
-
- def addName(self, string, platforms=((1, 0, 0), (3, 1, 0x409)), minNameID=255):
- """ Add a new name record containing 'string' for each (platformID, platEncID,
- langID) tuple specified in the 'platforms' list.
-
- The nameID is assigned in the range between 'minNameID'+1 and 32767 (inclusive),
- following the last nameID in the name table.
- If no 'platforms' are specified, two English name records are added, one for the
- Macintosh (platformID=0), and one for the Windows platform (3).
-
- The 'string' must be a Unicode string, so it can be encoded with different,
- platform-specific encodings.
-
- Return the new nameID.
- """
- assert len(platforms) > 0, \
- "'platforms' must contain at least one (platformID, platEncID, langID) tuple"
- if not hasattr(self, 'names'):
- self.names = []
- if not isinstance(string, str):
- raise TypeError(
- "expected str, found %s: %r" % (type(string).__name__, string))
- nameID = self._findUnusedNameID(minNameID + 1)
- for platformID, platEncID, langID in platforms:
- self.names.append(makeName(string, nameID, platformID, platEncID, langID))
- return nameID
+ dependencies = ["ltag"]
+
+ def decompile(self, data, ttFont):
+ format, n, stringOffset = struct.unpack(b">HHH", data[:6])
+ expectedStringOffset = 6 + n * nameRecordSize
+ if stringOffset != expectedStringOffset:
+ log.error(
+ "'name' table stringOffset incorrect. Expected: %s; Actual: %s",
+ expectedStringOffset,
+ stringOffset,
+ )
+ stringData = data[stringOffset:]
+ data = data[6:]
+ self.names = []
+ for i in range(n):
+ if len(data) < 12:
+ log.error("skipping malformed name record #%d", i)
+ continue
+ name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord())
+ name.string = stringData[name.offset : name.offset + name.length]
+ if name.offset + name.length > len(stringData):
+ log.error("skipping malformed name record #%d", i)
+ continue
+ assert len(name.string) == name.length
+ # if (name.platEncID, name.platformID) in ((0, 0), (1, 3)):
+ # if len(name.string) % 2:
+ # print "2-byte string doesn't have even length!"
+ # print name.__dict__
+ del name.offset, name.length
+ self.names.append(name)
+
+ def compile(self, ttFont):
+ if not hasattr(self, "names"):
+ # only happens when there are NO name table entries read
+ # from the TTX file
+ self.names = []
+ names = self.names
+ names.sort() # sort according to the spec; see NameRecord.__lt__()
+ stringData = b""
+ format = 0
+ n = len(names)
+ stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat)
+ data = struct.pack(b">HHH", format, n, stringOffset)
+ lastoffset = 0
+ done = {} # remember the data so we can reuse the "pointers"
+ for name in names:
+ string = name.toBytes()
+ if string in done:
+ name.offset, name.length = done[string]
+ else:
+ name.offset, name.length = done[string] = len(stringData), len(string)
+ stringData = bytesjoin([stringData, string])
+ data = data + sstruct.pack(nameRecordFormat, name)
+ return data + stringData
+
+ def toXML(self, writer, ttFont):
+ for name in self.names:
+ name.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name != "namerecord":
+ return # ignore unknown tags
+ if not hasattr(self, "names"):
+ self.names = []
+ name = NameRecord()
+ self.names.append(name)
+ name.fromXML(name, attrs, content, ttFont)
+
+ def getName(self, nameID, platformID, platEncID, langID=None):
+ for namerecord in self.names:
+ if (
+ namerecord.nameID == nameID
+ and namerecord.platformID == platformID
+ and namerecord.platEncID == platEncID
+ ):
+ if langID is None or namerecord.langID == langID:
+ return namerecord
+ return None # not found
+
+ def getDebugName(self, nameID):
+ englishName = someName = None
+ for name in self.names:
+ if name.nameID != nameID:
+ continue
+ try:
+ unistr = name.toUnicode()
+ except UnicodeDecodeError:
+ continue
+
+ someName = unistr
+ if (name.platformID, name.langID) in ((1, 0), (3, 0x409)):
+ englishName = unistr
+ break
+ if englishName:
+ return englishName
+ elif someName:
+ return someName
+ else:
+ return None
+
+ def getFirstDebugName(self, nameIDs):
+ for nameID in nameIDs:
+ name = self.getDebugName(nameID)
+ if name is not None:
+ return name
+ return None
+
+ def getBestFamilyName(self):
+ # 21 = WWS Family Name
+ # 16 = Typographic Family Name
+ # 1 = Family Name
+ return self.getFirstDebugName((21, 16, 1))
+
+ def getBestSubFamilyName(self):
+ # 22 = WWS SubFamily Name
+ # 17 = Typographic SubFamily Name
+ # 2 = SubFamily Name
+ return self.getFirstDebugName((22, 17, 2))
+
+ def getBestFullName(self):
+ # 4 = Full Name
+ # 6 = PostScript Name
+ for nameIDs in ((21, 22), (16, 17), (1, 2), (4,), (6,)):
+ if len(nameIDs) == 2:
+ name_fam = self.getDebugName(nameIDs[0])
+ name_subfam = self.getDebugName(nameIDs[1])
+ if None in [name_fam, name_subfam]:
+ continue # if any is None, skip
+ name = f"{name_fam} {name_subfam}"
+ if name_subfam.lower() == "regular":
+ name = f"{name_fam}"
+ return name
+ else:
+ name = self.getDebugName(nameIDs[0])
+ if name is not None:
+ return name
+ return None
+
+ def setName(self, string, nameID, platformID, platEncID, langID):
+ """Set the 'string' for the name record identified by 'nameID', 'platformID',
+ 'platEncID' and 'langID'. If a record with that nameID doesn't exist, create it
+ and append to the name table.
+
+ 'string' can be of type `str` (`unicode` in PY2) or `bytes`. In the latter case,
+ it is assumed to be already encoded with the correct plaform-specific encoding
+ identified by the (platformID, platEncID, langID) triplet. A warning is issued
+ to prevent unexpected results.
+ """
+ if not hasattr(self, "names"):
+ self.names = []
+ if not isinstance(string, str):
+ if isinstance(string, bytes):
+ log.warning(
+ "name string is bytes, ensure it's correctly encoded: %r", string
+ )
+ else:
+ raise TypeError(
+ "expected unicode or bytes, found %s: %r"
+ % (type(string).__name__, string)
+ )
+ namerecord = self.getName(nameID, platformID, platEncID, langID)
+ if namerecord:
+ namerecord.string = string
+ else:
+ self.names.append(makeName(string, nameID, platformID, platEncID, langID))
+
+ def removeNames(self, nameID=None, platformID=None, platEncID=None, langID=None):
+ """Remove any name records identified by the given combination of 'nameID',
+ 'platformID', 'platEncID' and 'langID'.
+ """
+ args = {
+ argName: argValue
+ for argName, argValue in (
+ ("nameID", nameID),
+ ("platformID", platformID),
+ ("platEncID", platEncID),
+ ("langID", langID),
+ )
+ if argValue is not None
+ }
+ if not args:
+ # no arguments, nothing to do
+ return
+ self.names = [
+ rec
+ for rec in self.names
+ if any(
+ argValue != getattr(rec, argName) for argName, argValue in args.items()
+ )
+ ]
+
+ @staticmethod
+ def removeUnusedNames(ttFont):
+ """Remove any name records which are not in NameID range 0-255 and not utilized
+ within the font itself."""
+ visitor = NameRecordVisitor()
+ visitor.visit(ttFont)
+ toDelete = set()
+ for record in ttFont["name"].names:
+ # Name IDs 26 to 255, inclusive, are reserved for future standard names.
+ # https://learn.microsoft.com/en-us/typography/opentype/spec/name#name-ids
+ if record.nameID < 256:
+ continue
+ if record.nameID not in visitor.seen:
+ toDelete.add(record.nameID)
+
+ for nameID in toDelete:
+ ttFont["name"].removeNames(nameID)
+ return toDelete
+
+ def _findUnusedNameID(self, minNameID=256):
+ """Finds an unused name id.
+
+ The nameID is assigned in the range between 'minNameID' and 32767 (inclusive),
+ following the last nameID in the name table.
+ """
+ names = getattr(self, "names", [])
+ nameID = 1 + max([n.nameID for n in names] + [minNameID - 1])
+ if nameID > 32767:
+ raise ValueError("nameID must be less than 32768")
+ return nameID
+
+ def findMultilingualName(
+ self, names, windows=True, mac=True, minNameID=0, ttFont=None
+ ):
+ """Return the name ID of an existing multilingual name that
+ matches the 'names' dictionary, or None if not found.
+
+ 'names' is a dictionary with the name in multiple languages,
+ such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}.
+ The keys can be arbitrary IETF BCP 47 language codes;
+ the values are Unicode strings.
+
+ If 'windows' is True, the returned name ID is guaranteed
+ exist for all requested languages for platformID=3 and
+ platEncID=1.
+ If 'mac' is True, the returned name ID is guaranteed to exist
+ for all requested languages for platformID=1 and platEncID=0.
+
+ The returned name ID will not be less than the 'minNameID'
+ argument.
+ """
+ # Gather the set of requested
+ # (string, platformID, platEncID, langID)
+ # tuples
+ reqNameSet = set()
+ for lang, name in sorted(names.items()):
+ if windows:
+ windowsName = _makeWindowsName(name, None, lang)
+ if windowsName is not None:
+ reqNameSet.add(
+ (
+ windowsName.string,
+ windowsName.platformID,
+ windowsName.platEncID,
+ windowsName.langID,
+ )
+ )
+ if mac:
+ macName = _makeMacName(name, None, lang, ttFont)
+ if macName is not None:
+ reqNameSet.add(
+ (
+ macName.string,
+ macName.platformID,
+ macName.platEncID,
+ macName.langID,
+ )
+ )
+
+ # Collect matching name IDs
+ matchingNames = dict()
+ for name in self.names:
+ try:
+ key = (name.toUnicode(), name.platformID, name.platEncID, name.langID)
+ except UnicodeDecodeError:
+ continue
+ if key in reqNameSet and name.nameID >= minNameID:
+ nameSet = matchingNames.setdefault(name.nameID, set())
+ nameSet.add(key)
+
+ # Return the first name ID that defines all requested strings
+ for nameID, nameSet in sorted(matchingNames.items()):
+ if nameSet == reqNameSet:
+ return nameID
+
+ return None # not found
+
+ def addMultilingualName(
+ self, names, ttFont=None, nameID=None, windows=True, mac=True, minNameID=0
+ ):
+ """Add a multilingual name, returning its name ID
+
+ 'names' is a dictionary with the name in multiple languages,
+ such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}.
+ The keys can be arbitrary IETF BCP 47 language codes;
+ the values are Unicode strings.
+
+ 'ttFont' is the TTFont to which the names are added, or None.
+ If present, the font's 'ltag' table can get populated
+ to store exotic language codes, which allows encoding
+ names that otherwise cannot get encoded at all.
+
+ 'nameID' is the name ID to be used, or None to let the library
+ find an existing set of name records that match, or pick an
+ unused name ID.
+
+ If 'windows' is True, a platformID=3 name record will be added.
+ If 'mac' is True, a platformID=1 name record will be added.
+
+ If the 'nameID' argument is None, the created nameID will not
+ be less than the 'minNameID' argument.
+ """
+ if not hasattr(self, "names"):
+ self.names = []
+ if nameID is None:
+ # Reuse nameID if possible
+ nameID = self.findMultilingualName(
+ names, windows=windows, mac=mac, minNameID=minNameID, ttFont=ttFont
+ )
+ if nameID is not None:
+ return nameID
+ nameID = self._findUnusedNameID()
+ # TODO: Should minimize BCP 47 language codes.
+ # https://github.com/fonttools/fonttools/issues/930
+ for lang, name in sorted(names.items()):
+ if windows:
+ windowsName = _makeWindowsName(name, nameID, lang)
+ if windowsName is not None:
+ self.names.append(windowsName)
+ else:
+ # We cannot not make a Windows name: make sure we add a
+ # Mac name as a fallback. This can happen for exotic
+ # BCP47 language tags that have no Windows language code.
+ mac = True
+ if mac:
+ macName = _makeMacName(name, nameID, lang, ttFont)
+ if macName is not None:
+ self.names.append(macName)
+ return nameID
+
+ def addName(self, string, platforms=((1, 0, 0), (3, 1, 0x409)), minNameID=255):
+ """Add a new name record containing 'string' for each (platformID, platEncID,
+ langID) tuple specified in the 'platforms' list.
+
+ The nameID is assigned in the range between 'minNameID'+1 and 32767 (inclusive),
+ following the last nameID in the name table.
+ If no 'platforms' are specified, two English name records are added, one for the
+ Macintosh (platformID=0), and one for the Windows platform (3).
+
+ The 'string' must be a Unicode string, so it can be encoded with different,
+ platform-specific encodings.
+
+ Return the new nameID.
+ """
+ assert (
+ len(platforms) > 0
+ ), "'platforms' must contain at least one (platformID, platEncID, langID) tuple"
+ if not hasattr(self, "names"):
+ self.names = []
+ if not isinstance(string, str):
+ raise TypeError(
+ "expected str, found %s: %r" % (type(string).__name__, string)
+ )
+ nameID = self._findUnusedNameID(minNameID + 1)
+ for platformID, platEncID, langID in platforms:
+ self.names.append(makeName(string, nameID, platformID, platEncID, langID))
+ return nameID
def makeName(string, nameID, platformID, platEncID, langID):
- name = NameRecord()
- name.string, name.nameID, name.platformID, name.platEncID, name.langID = (
- string, nameID, platformID, platEncID, langID)
- return name
+ name = NameRecord()
+ name.string, name.nameID, name.platformID, name.platEncID, name.langID = (
+ string,
+ nameID,
+ platformID,
+ platEncID,
+ langID,
+ )
+ return name
def _makeWindowsName(name, nameID, language):
- """Create a NameRecord for the Microsoft Windows platform
-
- 'language' is an arbitrary IETF BCP 47 language identifier such
- as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows
- does not support the desired language, the result will be None.
- Future versions of fonttools might return a NameRecord for the
- OpenType 'name' table format 1, but this is not implemented yet.
- """
- langID = _WINDOWS_LANGUAGE_CODES.get(language.lower())
- if langID is not None:
- return makeName(name, nameID, 3, 1, langID)
- else:
- log.warning("cannot add Windows name in language %s "
- "because fonttools does not yet support "
- "name table format 1" % language)
- return None
+ """Create a NameRecord for the Microsoft Windows platform
+
+ 'language' is an arbitrary IETF BCP 47 language identifier such
+ as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows
+ does not support the desired language, the result will be None.
+ Future versions of fonttools might return a NameRecord for the
+ OpenType 'name' table format 1, but this is not implemented yet.
+ """
+ langID = _WINDOWS_LANGUAGE_CODES.get(language.lower())
+ if langID is not None:
+ return makeName(name, nameID, 3, 1, langID)
+ else:
+ log.warning(
+ "cannot add Windows name in language %s "
+ "because fonttools does not yet support "
+ "name table format 1" % language
+ )
+ return None
def _makeMacName(name, nameID, language, font=None):
- """Create a NameRecord for Apple platforms
-
- 'language' is an arbitrary IETF BCP 47 language identifier such
- as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we
- create a Macintosh NameRecord that is understood by old applications
- (platform ID 1 and an old-style Macintosh language enum). If this
- is not possible, we create a Unicode NameRecord (platform ID 0)
- whose language points to the font’s 'ltag' table. The latter
- can encode any string in any language, but legacy applications
- might not recognize the format (in which case they will ignore
- those names).
-
- 'font' should be the TTFont for which you want to create a name.
- If 'font' is None, we only return NameRecords for legacy Macintosh;
- in that case, the result will be None for names that need to
- be encoded with an 'ltag' table.
-
- See the section “The language identifier” in Apple’s specification:
- https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
- """
- macLang = _MAC_LANGUAGE_CODES.get(language.lower())
- macScript = _MAC_LANGUAGE_TO_SCRIPT.get(macLang)
- if macLang is not None and macScript is not None:
- encoding = getEncoding(1, macScript, macLang, default="ascii")
- # Check if we can actually encode this name. If we can't,
- # for example because we have no support for the legacy
- # encoding, or because the name string contains Unicode
- # characters that the legacy encoding cannot represent,
- # we fall back to encoding the name in Unicode and put
- # the language tag into the ltag table.
- try:
- _ = tobytes(name, encoding, errors="strict")
- return makeName(name, nameID, 1, macScript, macLang)
- except UnicodeEncodeError:
- pass
- if font is not None:
- ltag = font.tables.get("ltag")
- if ltag is None:
- ltag = font["ltag"] = newTable("ltag")
- # 0 = Unicode; 4 = “Unicode 2.0 or later semantics (non-BMP characters allowed)”
- # “The preferred platform-specific code for Unicode would be 3 or 4.”
- # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
- return makeName(name, nameID, 0, 4, ltag.addTag(language))
- else:
- log.warning("cannot store language %s into 'ltag' table "
- "without having access to the TTFont object" %
- language)
- return None
+ """Create a NameRecord for Apple platforms
+
+ 'language' is an arbitrary IETF BCP 47 language identifier such
+ as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we
+ create a Macintosh NameRecord that is understood by old applications
+ (platform ID 1 and an old-style Macintosh language enum). If this
+ is not possible, we create a Unicode NameRecord (platform ID 0)
+ whose language points to the font’s 'ltag' table. The latter
+ can encode any string in any language, but legacy applications
+ might not recognize the format (in which case they will ignore
+ those names).
+
+ 'font' should be the TTFont for which you want to create a name.
+ If 'font' is None, we only return NameRecords for legacy Macintosh;
+ in that case, the result will be None for names that need to
+ be encoded with an 'ltag' table.
+
+ See the section “The language identifier” in Apple’s specification:
+ https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
+ """
+ macLang = _MAC_LANGUAGE_CODES.get(language.lower())
+ macScript = _MAC_LANGUAGE_TO_SCRIPT.get(macLang)
+ if macLang is not None and macScript is not None:
+ encoding = getEncoding(1, macScript, macLang, default="ascii")
+ # Check if we can actually encode this name. If we can't,
+ # for example because we have no support for the legacy
+ # encoding, or because the name string contains Unicode
+ # characters that the legacy encoding cannot represent,
+ # we fall back to encoding the name in Unicode and put
+ # the language tag into the ltag table.
+ try:
+ _ = tobytes(name, encoding, errors="strict")
+ return makeName(name, nameID, 1, macScript, macLang)
+ except UnicodeEncodeError:
+ pass
+ if font is not None:
+ ltag = font.tables.get("ltag")
+ if ltag is None:
+ ltag = font["ltag"] = newTable("ltag")
+ # 0 = Unicode; 4 = “Unicode 2.0 or later semantics (non-BMP characters allowed)”
+ # “The preferred platform-specific code for Unicode would be 3 or 4.”
+ # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
+ return makeName(name, nameID, 0, 4, ltag.addTag(language))
+ else:
+ log.warning(
+ "cannot store language %s into 'ltag' table "
+ "without having access to the TTFont object" % language
+ )
+ return None
class NameRecord(object):
-
- def getEncoding(self, default='ascii'):
- """Returns the Python encoding name for this name entry based on its platformID,
- platEncID, and langID. If encoding for these values is not known, by default
- 'ascii' is returned. That can be overriden by passing a value to the default
- argument.
- """
- return getEncoding(self.platformID, self.platEncID, self.langID, default)
-
- def encodingIsUnicodeCompatible(self):
- return self.getEncoding(None) in ['utf_16_be', 'ucs2be', 'ascii', 'latin1']
-
- def __str__(self):
- return self.toStr(errors='backslashreplace')
-
- def isUnicode(self):
- return (self.platformID == 0 or
- (self.platformID == 3 and self.platEncID in [0, 1, 10]))
-
- def toUnicode(self, errors='strict'):
- """
- If self.string is a Unicode string, return it; otherwise try decoding the
- bytes in self.string to a Unicode string using the encoding of this
- entry as returned by self.getEncoding(); Note that self.getEncoding()
- returns 'ascii' if the encoding is unknown to the library.
-
- Certain heuristics are performed to recover data from bytes that are
- ill-formed in the chosen encoding, or that otherwise look misencoded
- (mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE
- but marked otherwise). If the bytes are ill-formed and the heuristics fail,
- the error is handled according to the errors parameter to this function, which is
- passed to the underlying decode() function; by default it throws a
- UnicodeDecodeError exception.
-
- Note: The mentioned heuristics mean that roundtripping a font to XML and back
- to binary might recover some misencoded data whereas just loading the font
- and saving it back will not change them.
- """
- def isascii(b):
- return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D]
- encoding = self.getEncoding()
- string = self.string
-
- if isinstance(string, bytes) and encoding == 'utf_16_be' and len(string) % 2 == 1:
- # Recover badly encoded UTF-16 strings that have an odd number of bytes:
- # - If the last byte is zero, drop it. Otherwise,
- # - If all the odd bytes are zero and all the even bytes are ASCII,
- # prepend one zero byte. Otherwise,
- # - If first byte is zero and all other bytes are ASCII, insert zero
- # bytes between consecutive ASCII bytes.
- #
- # (Yes, I've seen all of these in the wild... sigh)
- if byteord(string[-1]) == 0:
- string = string[:-1]
- elif all(byteord(b) == 0 if i % 2 else isascii(byteord(b)) for i,b in enumerate(string)):
- string = b'\0' + string
- elif byteord(string[0]) == 0 and all(isascii(byteord(b)) for b in string[1:]):
- string = bytesjoin(b'\0'+bytechr(byteord(b)) for b in string[1:])
-
- string = tostr(string, encoding=encoding, errors=errors)
-
- # If decoded strings still looks like UTF-16BE, it suggests a double-encoding.
- # Fix it up.
- if all(ord(c) == 0 if i % 2 == 0 else isascii(ord(c)) for i,c in enumerate(string)):
- # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text,
- # narrow it down.
- string = ''.join(c for c in string[1::2])
-
- return string
-
- def toBytes(self, errors='strict'):
- """ If self.string is a bytes object, return it; otherwise try encoding
- the Unicode string in self.string to bytes using the encoding of this
- entry as returned by self.getEncoding(); Note that self.getEncoding()
- returns 'ascii' if the encoding is unknown to the library.
-
- If the Unicode string cannot be encoded to bytes in the chosen encoding,
- the error is handled according to the errors parameter to this function,
- which is passed to the underlying encode() function; by default it throws a
- UnicodeEncodeError exception.
- """
- return tobytes(self.string, encoding=self.getEncoding(), errors=errors)
-
- toStr = toUnicode
-
- def toXML(self, writer, ttFont):
- try:
- unistr = self.toUnicode()
- except UnicodeDecodeError:
- unistr = None
- attrs = [
- ("nameID", self.nameID),
- ("platformID", self.platformID),
- ("platEncID", self.platEncID),
- ("langID", hex(self.langID)),
- ]
-
- if unistr is None or not self.encodingIsUnicodeCompatible():
- attrs.append(("unicode", unistr is not None))
-
- writer.begintag("namerecord", attrs)
- writer.newline()
- if unistr is not None:
- writer.write(unistr)
- else:
- writer.write8bit(self.string)
- writer.newline()
- writer.endtag("namerecord")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.nameID = safeEval(attrs["nameID"])
- self.platformID = safeEval(attrs["platformID"])
- self.platEncID = safeEval(attrs["platEncID"])
- self.langID = safeEval(attrs["langID"])
- s = strjoin(content).strip()
- encoding = self.getEncoding()
- if self.encodingIsUnicodeCompatible() or safeEval(attrs.get("unicode", "False")):
- self.string = s.encode(encoding)
- else:
- # This is the inverse of write8bit...
- self.string = s.encode("latin1")
-
- def __lt__(self, other):
- if type(self) != type(other):
- return NotImplemented
-
- try:
- # implemented so that list.sort() sorts according to the spec.
- selfTuple = (
- self.platformID,
- self.platEncID,
- self.langID,
- self.nameID,
- self.toBytes(),
- )
- otherTuple = (
- other.platformID,
- other.platEncID,
- other.langID,
- other.nameID,
- other.toBytes(),
- )
- return selfTuple < otherTuple
- except (UnicodeEncodeError, AttributeError):
- # This can only happen for
- # 1) an object that is not a NameRecord, or
- # 2) an unlikely incomplete NameRecord object which has not been
- # fully populated, or
- # 3) when all IDs are identical but the strings can't be encoded
- # for their platform encoding.
- # In all cases it is best to return NotImplemented.
- return NotImplemented
-
- def __repr__(self):
- return "<NameRecord NameID=%d; PlatformID=%d; LanguageID=%d>" % (
- self.nameID, self.platformID, self.langID)
+ def getEncoding(self, default="ascii"):
+ """Returns the Python encoding name for this name entry based on its platformID,
+ platEncID, and langID. If encoding for these values is not known, by default
+ 'ascii' is returned. That can be overriden by passing a value to the default
+ argument.
+ """
+ return getEncoding(self.platformID, self.platEncID, self.langID, default)
+
+ def encodingIsUnicodeCompatible(self):
+ return self.getEncoding(None) in ["utf_16_be", "ucs2be", "ascii", "latin1"]
+
+ def __str__(self):
+ return self.toStr(errors="backslashreplace")
+
+ def isUnicode(self):
+ return self.platformID == 0 or (
+ self.platformID == 3 and self.platEncID in [0, 1, 10]
+ )
+
+ def toUnicode(self, errors="strict"):
+ """
+ If self.string is a Unicode string, return it; otherwise try decoding the
+ bytes in self.string to a Unicode string using the encoding of this
+ entry as returned by self.getEncoding(); Note that self.getEncoding()
+ returns 'ascii' if the encoding is unknown to the library.
+
+ Certain heuristics are performed to recover data from bytes that are
+ ill-formed in the chosen encoding, or that otherwise look misencoded
+ (mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE
+ but marked otherwise). If the bytes are ill-formed and the heuristics fail,
+ the error is handled according to the errors parameter to this function, which is
+ passed to the underlying decode() function; by default it throws a
+ UnicodeDecodeError exception.
+
+ Note: The mentioned heuristics mean that roundtripping a font to XML and back
+ to binary might recover some misencoded data whereas just loading the font
+ and saving it back will not change them.
+ """
+
+ def isascii(b):
+ return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D]
+
+ encoding = self.getEncoding()
+ string = self.string
+
+ if (
+ isinstance(string, bytes)
+ and encoding == "utf_16_be"
+ and len(string) % 2 == 1
+ ):
+ # Recover badly encoded UTF-16 strings that have an odd number of bytes:
+ # - If the last byte is zero, drop it. Otherwise,
+ # - If all the odd bytes are zero and all the even bytes are ASCII,
+ # prepend one zero byte. Otherwise,
+ # - If first byte is zero and all other bytes are ASCII, insert zero
+ # bytes between consecutive ASCII bytes.
+ #
+ # (Yes, I've seen all of these in the wild... sigh)
+ if byteord(string[-1]) == 0:
+ string = string[:-1]
+ elif all(
+ byteord(b) == 0 if i % 2 else isascii(byteord(b))
+ for i, b in enumerate(string)
+ ):
+ string = b"\0" + string
+ elif byteord(string[0]) == 0 and all(
+ isascii(byteord(b)) for b in string[1:]
+ ):
+ string = bytesjoin(b"\0" + bytechr(byteord(b)) for b in string[1:])
+
+ string = tostr(string, encoding=encoding, errors=errors)
+
+ # If decoded strings still looks like UTF-16BE, it suggests a double-encoding.
+ # Fix it up.
+ if all(
+ ord(c) == 0 if i % 2 == 0 else isascii(ord(c)) for i, c in enumerate(string)
+ ):
+ # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text,
+ # narrow it down.
+ string = "".join(c for c in string[1::2])
+
+ return string
+
+ def toBytes(self, errors="strict"):
+ """If self.string is a bytes object, return it; otherwise try encoding
+ the Unicode string in self.string to bytes using the encoding of this
+ entry as returned by self.getEncoding(); Note that self.getEncoding()
+ returns 'ascii' if the encoding is unknown to the library.
+
+ If the Unicode string cannot be encoded to bytes in the chosen encoding,
+ the error is handled according to the errors parameter to this function,
+ which is passed to the underlying encode() function; by default it throws a
+ UnicodeEncodeError exception.
+ """
+ return tobytes(self.string, encoding=self.getEncoding(), errors=errors)
+
+ toStr = toUnicode
+
+ def toXML(self, writer, ttFont):
+ try:
+ unistr = self.toUnicode()
+ except UnicodeDecodeError:
+ unistr = None
+ attrs = [
+ ("nameID", self.nameID),
+ ("platformID", self.platformID),
+ ("platEncID", self.platEncID),
+ ("langID", hex(self.langID)),
+ ]
+
+ if unistr is None or not self.encodingIsUnicodeCompatible():
+ attrs.append(("unicode", unistr is not None))
+
+ writer.begintag("namerecord", attrs)
+ writer.newline()
+ if unistr is not None:
+ writer.write(unistr)
+ else:
+ writer.write8bit(self.string)
+ writer.newline()
+ writer.endtag("namerecord")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.nameID = safeEval(attrs["nameID"])
+ self.platformID = safeEval(attrs["platformID"])
+ self.platEncID = safeEval(attrs["platEncID"])
+ self.langID = safeEval(attrs["langID"])
+ s = strjoin(content).strip()
+ encoding = self.getEncoding()
+ if self.encodingIsUnicodeCompatible() or safeEval(
+ attrs.get("unicode", "False")
+ ):
+ self.string = s.encode(encoding)
+ else:
+ # This is the inverse of write8bit...
+ self.string = s.encode("latin1")
+
+ def __lt__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+
+ try:
+ selfTuple = (
+ self.platformID,
+ self.platEncID,
+ self.langID,
+ self.nameID,
+ )
+ otherTuple = (
+ other.platformID,
+ other.platEncID,
+ other.langID,
+ other.nameID,
+ )
+ except AttributeError:
+ # This can only happen for
+ # 1) an object that is not a NameRecord, or
+ # 2) an unlikely incomplete NameRecord object which has not been
+ # fully populated
+ return NotImplemented
+
+ try:
+ # Include the actual NameRecord string in the comparison tuples
+ selfTuple = selfTuple + (self.toBytes(),)
+ otherTuple = otherTuple + (other.toBytes(),)
+ except UnicodeEncodeError as e:
+ # toBytes caused an encoding error in either of the two, so content
+ # to sorting based on IDs only
+ log.error("NameRecord sorting failed to encode: %s" % e)
+
+ # Implemented so that list.sort() sorts according to the spec by using
+ # the order of the tuple items and their comparison
+ return selfTuple < otherTuple
+
+ def __repr__(self):
+ return "<NameRecord NameID=%d; PlatformID=%d; LanguageID=%d>" % (
+ self.nameID,
+ self.platformID,
+ self.langID,
+ )
# Windows language ID → IETF BCP-47 language tag
@@ -604,183 +686,182 @@ class NameRecord(object):
# http://www.unicode.org/cldr/charts/latest/supplemental/likely_subtags.html
# http://www.iana.org/assignments/language-subtag-registry/language-subtag-registry
_WINDOWS_LANGUAGES = {
- 0x0436: 'af',
- 0x041C: 'sq',
- 0x0484: 'gsw',
- 0x045E: 'am',
- 0x1401: 'ar-DZ',
- 0x3C01: 'ar-BH',
- 0x0C01: 'ar',
- 0x0801: 'ar-IQ',
- 0x2C01: 'ar-JO',
- 0x3401: 'ar-KW',
- 0x3001: 'ar-LB',
- 0x1001: 'ar-LY',
- 0x1801: 'ary',
- 0x2001: 'ar-OM',
- 0x4001: 'ar-QA',
- 0x0401: 'ar-SA',
- 0x2801: 'ar-SY',
- 0x1C01: 'aeb',
- 0x3801: 'ar-AE',
- 0x2401: 'ar-YE',
- 0x042B: 'hy',
- 0x044D: 'as',
- 0x082C: 'az-Cyrl',
- 0x042C: 'az',
- 0x046D: 'ba',
- 0x042D: 'eu',
- 0x0423: 'be',
- 0x0845: 'bn',
- 0x0445: 'bn-IN',
- 0x201A: 'bs-Cyrl',
- 0x141A: 'bs',
- 0x047E: 'br',
- 0x0402: 'bg',
- 0x0403: 'ca',
- 0x0C04: 'zh-HK',
- 0x1404: 'zh-MO',
- 0x0804: 'zh',
- 0x1004: 'zh-SG',
- 0x0404: 'zh-TW',
- 0x0483: 'co',
- 0x041A: 'hr',
- 0x101A: 'hr-BA',
- 0x0405: 'cs',
- 0x0406: 'da',
- 0x048C: 'prs',
- 0x0465: 'dv',
- 0x0813: 'nl-BE',
- 0x0413: 'nl',
- 0x0C09: 'en-AU',
- 0x2809: 'en-BZ',
- 0x1009: 'en-CA',
- 0x2409: 'en-029',
- 0x4009: 'en-IN',
- 0x1809: 'en-IE',
- 0x2009: 'en-JM',
- 0x4409: 'en-MY',
- 0x1409: 'en-NZ',
- 0x3409: 'en-PH',
- 0x4809: 'en-SG',
- 0x1C09: 'en-ZA',
- 0x2C09: 'en-TT',
- 0x0809: 'en-GB',
- 0x0409: 'en',
- 0x3009: 'en-ZW',
- 0x0425: 'et',
- 0x0438: 'fo',
- 0x0464: 'fil',
- 0x040B: 'fi',
- 0x080C: 'fr-BE',
- 0x0C0C: 'fr-CA',
- 0x040C: 'fr',
- 0x140C: 'fr-LU',
- 0x180C: 'fr-MC',
- 0x100C: 'fr-CH',
- 0x0462: 'fy',
- 0x0456: 'gl',
- 0x0437: 'ka',
- 0x0C07: 'de-AT',
- 0x0407: 'de',
- 0x1407: 'de-LI',
- 0x1007: 'de-LU',
- 0x0807: 'de-CH',
- 0x0408: 'el',
- 0x046F: 'kl',
- 0x0447: 'gu',
- 0x0468: 'ha',
- 0x040D: 'he',
- 0x0439: 'hi',
- 0x040E: 'hu',
- 0x040F: 'is',
- 0x0470: 'ig',
- 0x0421: 'id',
- 0x045D: 'iu',
- 0x085D: 'iu-Latn',
- 0x083C: 'ga',
- 0x0434: 'xh',
- 0x0435: 'zu',
- 0x0410: 'it',
- 0x0810: 'it-CH',
- 0x0411: 'ja',
- 0x044B: 'kn',
- 0x043F: 'kk',
- 0x0453: 'km',
- 0x0486: 'quc',
- 0x0487: 'rw',
- 0x0441: 'sw',
- 0x0457: 'kok',
- 0x0412: 'ko',
- 0x0440: 'ky',
- 0x0454: 'lo',
- 0x0426: 'lv',
- 0x0427: 'lt',
- 0x082E: 'dsb',
- 0x046E: 'lb',
- 0x042F: 'mk',
- 0x083E: 'ms-BN',
- 0x043E: 'ms',
- 0x044C: 'ml',
- 0x043A: 'mt',
- 0x0481: 'mi',
- 0x047A: 'arn',
- 0x044E: 'mr',
- 0x047C: 'moh',
- 0x0450: 'mn',
- 0x0850: 'mn-CN',
- 0x0461: 'ne',
- 0x0414: 'nb',
- 0x0814: 'nn',
- 0x0482: 'oc',
- 0x0448: 'or',
- 0x0463: 'ps',
- 0x0415: 'pl',
- 0x0416: 'pt',
- 0x0816: 'pt-PT',
- 0x0446: 'pa',
- 0x046B: 'qu-BO',
- 0x086B: 'qu-EC',
- 0x0C6B: 'qu',
- 0x0418: 'ro',
- 0x0417: 'rm',
- 0x0419: 'ru',
- 0x243B: 'smn',
- 0x103B: 'smj-NO',
- 0x143B: 'smj',
- 0x0C3B: 'se-FI',
- 0x043B: 'se',
- 0x083B: 'se-SE',
- 0x203B: 'sms',
- 0x183B: 'sma-NO',
- 0x1C3B: 'sms',
- 0x044F: 'sa',
- 0x1C1A: 'sr-Cyrl-BA',
- 0x0C1A: 'sr',
- 0x181A: 'sr-Latn-BA',
- 0x081A: 'sr-Latn',
- 0x046C: 'nso',
- 0x0432: 'tn',
- 0x045B: 'si',
- 0x041B: 'sk',
- 0x0424: 'sl',
- 0x2C0A: 'es-AR',
- 0x400A: 'es-BO',
- 0x340A: 'es-CL',
- 0x240A: 'es-CO',
- 0x140A: 'es-CR',
- 0x1C0A: 'es-DO',
- 0x300A: 'es-EC',
- 0x440A: 'es-SV',
- 0x100A: 'es-GT',
- 0x480A: 'es-HN',
- 0x080A: 'es-MX',
- 0x4C0A: 'es-NI',
- 0x180A: 'es-PA',
- 0x3C0A: 'es-PY',
- 0x280A: 'es-PE',
- 0x500A: 'es-PR',
-
+ 0x0436: "af",
+ 0x041C: "sq",
+ 0x0484: "gsw",
+ 0x045E: "am",
+ 0x1401: "ar-DZ",
+ 0x3C01: "ar-BH",
+ 0x0C01: "ar",
+ 0x0801: "ar-IQ",
+ 0x2C01: "ar-JO",
+ 0x3401: "ar-KW",
+ 0x3001: "ar-LB",
+ 0x1001: "ar-LY",
+ 0x1801: "ary",
+ 0x2001: "ar-OM",
+ 0x4001: "ar-QA",
+ 0x0401: "ar-SA",
+ 0x2801: "ar-SY",
+ 0x1C01: "aeb",
+ 0x3801: "ar-AE",
+ 0x2401: "ar-YE",
+ 0x042B: "hy",
+ 0x044D: "as",
+ 0x082C: "az-Cyrl",
+ 0x042C: "az",
+ 0x046D: "ba",
+ 0x042D: "eu",
+ 0x0423: "be",
+ 0x0845: "bn",
+ 0x0445: "bn-IN",
+ 0x201A: "bs-Cyrl",
+ 0x141A: "bs",
+ 0x047E: "br",
+ 0x0402: "bg",
+ 0x0403: "ca",
+ 0x0C04: "zh-HK",
+ 0x1404: "zh-MO",
+ 0x0804: "zh",
+ 0x1004: "zh-SG",
+ 0x0404: "zh-TW",
+ 0x0483: "co",
+ 0x041A: "hr",
+ 0x101A: "hr-BA",
+ 0x0405: "cs",
+ 0x0406: "da",
+ 0x048C: "prs",
+ 0x0465: "dv",
+ 0x0813: "nl-BE",
+ 0x0413: "nl",
+ 0x0C09: "en-AU",
+ 0x2809: "en-BZ",
+ 0x1009: "en-CA",
+ 0x2409: "en-029",
+ 0x4009: "en-IN",
+ 0x1809: "en-IE",
+ 0x2009: "en-JM",
+ 0x4409: "en-MY",
+ 0x1409: "en-NZ",
+ 0x3409: "en-PH",
+ 0x4809: "en-SG",
+ 0x1C09: "en-ZA",
+ 0x2C09: "en-TT",
+ 0x0809: "en-GB",
+ 0x0409: "en",
+ 0x3009: "en-ZW",
+ 0x0425: "et",
+ 0x0438: "fo",
+ 0x0464: "fil",
+ 0x040B: "fi",
+ 0x080C: "fr-BE",
+ 0x0C0C: "fr-CA",
+ 0x040C: "fr",
+ 0x140C: "fr-LU",
+ 0x180C: "fr-MC",
+ 0x100C: "fr-CH",
+ 0x0462: "fy",
+ 0x0456: "gl",
+ 0x0437: "ka",
+ 0x0C07: "de-AT",
+ 0x0407: "de",
+ 0x1407: "de-LI",
+ 0x1007: "de-LU",
+ 0x0807: "de-CH",
+ 0x0408: "el",
+ 0x046F: "kl",
+ 0x0447: "gu",
+ 0x0468: "ha",
+ 0x040D: "he",
+ 0x0439: "hi",
+ 0x040E: "hu",
+ 0x040F: "is",
+ 0x0470: "ig",
+ 0x0421: "id",
+ 0x045D: "iu",
+ 0x085D: "iu-Latn",
+ 0x083C: "ga",
+ 0x0434: "xh",
+ 0x0435: "zu",
+ 0x0410: "it",
+ 0x0810: "it-CH",
+ 0x0411: "ja",
+ 0x044B: "kn",
+ 0x043F: "kk",
+ 0x0453: "km",
+ 0x0486: "quc",
+ 0x0487: "rw",
+ 0x0441: "sw",
+ 0x0457: "kok",
+ 0x0412: "ko",
+ 0x0440: "ky",
+ 0x0454: "lo",
+ 0x0426: "lv",
+ 0x0427: "lt",
+ 0x082E: "dsb",
+ 0x046E: "lb",
+ 0x042F: "mk",
+ 0x083E: "ms-BN",
+ 0x043E: "ms",
+ 0x044C: "ml",
+ 0x043A: "mt",
+ 0x0481: "mi",
+ 0x047A: "arn",
+ 0x044E: "mr",
+ 0x047C: "moh",
+ 0x0450: "mn",
+ 0x0850: "mn-CN",
+ 0x0461: "ne",
+ 0x0414: "nb",
+ 0x0814: "nn",
+ 0x0482: "oc",
+ 0x0448: "or",
+ 0x0463: "ps",
+ 0x0415: "pl",
+ 0x0416: "pt",
+ 0x0816: "pt-PT",
+ 0x0446: "pa",
+ 0x046B: "qu-BO",
+ 0x086B: "qu-EC",
+ 0x0C6B: "qu",
+ 0x0418: "ro",
+ 0x0417: "rm",
+ 0x0419: "ru",
+ 0x243B: "smn",
+ 0x103B: "smj-NO",
+ 0x143B: "smj",
+ 0x0C3B: "se-FI",
+ 0x043B: "se",
+ 0x083B: "se-SE",
+ 0x203B: "sms",
+ 0x183B: "sma-NO",
+ 0x1C3B: "sms",
+ 0x044F: "sa",
+ 0x1C1A: "sr-Cyrl-BA",
+ 0x0C1A: "sr",
+ 0x181A: "sr-Latn-BA",
+ 0x081A: "sr-Latn",
+ 0x046C: "nso",
+ 0x0432: "tn",
+ 0x045B: "si",
+ 0x041B: "sk",
+ 0x0424: "sl",
+ 0x2C0A: "es-AR",
+ 0x400A: "es-BO",
+ 0x340A: "es-CL",
+ 0x240A: "es-CO",
+ 0x140A: "es-CR",
+ 0x1C0A: "es-DO",
+ 0x300A: "es-EC",
+ 0x440A: "es-SV",
+ 0x100A: "es-GT",
+ 0x480A: "es-HN",
+ 0x080A: "es-MX",
+ 0x4C0A: "es-NI",
+ 0x180A: "es-PA",
+ 0x3C0A: "es-PY",
+ 0x280A: "es-PE",
+ 0x500A: "es-PR",
# Microsoft has defined two different language codes for
# “Spanish with modern sorting” and “Spanish with traditional
# sorting”. This makes sense for collation APIs, and it would be
@@ -788,163 +869,164 @@ _WINDOWS_LANGUAGES = {
# extensions (eg., “es-u-co-trad” is “Spanish with traditional
# sorting”). However, for storing names in fonts, this distinction
# does not make sense, so we use “es” in both cases.
- 0x0C0A: 'es',
- 0x040A: 'es',
-
- 0x540A: 'es-US',
- 0x380A: 'es-UY',
- 0x200A: 'es-VE',
- 0x081D: 'sv-FI',
- 0x041D: 'sv',
- 0x045A: 'syr',
- 0x0428: 'tg',
- 0x085F: 'tzm',
- 0x0449: 'ta',
- 0x0444: 'tt',
- 0x044A: 'te',
- 0x041E: 'th',
- 0x0451: 'bo',
- 0x041F: 'tr',
- 0x0442: 'tk',
- 0x0480: 'ug',
- 0x0422: 'uk',
- 0x042E: 'hsb',
- 0x0420: 'ur',
- 0x0843: 'uz-Cyrl',
- 0x0443: 'uz',
- 0x042A: 'vi',
- 0x0452: 'cy',
- 0x0488: 'wo',
- 0x0485: 'sah',
- 0x0478: 'ii',
- 0x046A: 'yo',
+ 0x0C0A: "es",
+ 0x040A: "es",
+ 0x540A: "es-US",
+ 0x380A: "es-UY",
+ 0x200A: "es-VE",
+ 0x081D: "sv-FI",
+ 0x041D: "sv",
+ 0x045A: "syr",
+ 0x0428: "tg",
+ 0x085F: "tzm",
+ 0x0449: "ta",
+ 0x0444: "tt",
+ 0x044A: "te",
+ 0x041E: "th",
+ 0x0451: "bo",
+ 0x041F: "tr",
+ 0x0442: "tk",
+ 0x0480: "ug",
+ 0x0422: "uk",
+ 0x042E: "hsb",
+ 0x0420: "ur",
+ 0x0843: "uz-Cyrl",
+ 0x0443: "uz",
+ 0x042A: "vi",
+ 0x0452: "cy",
+ 0x0488: "wo",
+ 0x0485: "sah",
+ 0x0478: "ii",
+ 0x046A: "yo",
}
_MAC_LANGUAGES = {
- 0: 'en',
- 1: 'fr',
- 2: 'de',
- 3: 'it',
- 4: 'nl',
- 5: 'sv',
- 6: 'es',
- 7: 'da',
- 8: 'pt',
- 9: 'no',
- 10: 'he',
- 11: 'ja',
- 12: 'ar',
- 13: 'fi',
- 14: 'el',
- 15: 'is',
- 16: 'mt',
- 17: 'tr',
- 18: 'hr',
- 19: 'zh-Hant',
- 20: 'ur',
- 21: 'hi',
- 22: 'th',
- 23: 'ko',
- 24: 'lt',
- 25: 'pl',
- 26: 'hu',
- 27: 'es',
- 28: 'lv',
- 29: 'se',
- 30: 'fo',
- 31: 'fa',
- 32: 'ru',
- 33: 'zh',
- 34: 'nl-BE',
- 35: 'ga',
- 36: 'sq',
- 37: 'ro',
- 38: 'cz',
- 39: 'sk',
- 40: 'sl',
- 41: 'yi',
- 42: 'sr',
- 43: 'mk',
- 44: 'bg',
- 45: 'uk',
- 46: 'be',
- 47: 'uz',
- 48: 'kk',
- 49: 'az-Cyrl',
- 50: 'az-Arab',
- 51: 'hy',
- 52: 'ka',
- 53: 'mo',
- 54: 'ky',
- 55: 'tg',
- 56: 'tk',
- 57: 'mn-CN',
- 58: 'mn',
- 59: 'ps',
- 60: 'ks',
- 61: 'ku',
- 62: 'sd',
- 63: 'bo',
- 64: 'ne',
- 65: 'sa',
- 66: 'mr',
- 67: 'bn',
- 68: 'as',
- 69: 'gu',
- 70: 'pa',
- 71: 'or',
- 72: 'ml',
- 73: 'kn',
- 74: 'ta',
- 75: 'te',
- 76: 'si',
- 77: 'my',
- 78: 'km',
- 79: 'lo',
- 80: 'vi',
- 81: 'id',
- 82: 'tl',
- 83: 'ms',
- 84: 'ms-Arab',
- 85: 'am',
- 86: 'ti',
- 87: 'om',
- 88: 'so',
- 89: 'sw',
- 90: 'rw',
- 91: 'rn',
- 92: 'ny',
- 93: 'mg',
- 94: 'eo',
- 128: 'cy',
- 129: 'eu',
- 130: 'ca',
- 131: 'la',
- 132: 'qu',
- 133: 'gn',
- 134: 'ay',
- 135: 'tt',
- 136: 'ug',
- 137: 'dz',
- 138: 'jv',
- 139: 'su',
- 140: 'gl',
- 141: 'af',
- 142: 'br',
- 143: 'iu',
- 144: 'gd',
- 145: 'gv',
- 146: 'ga',
- 147: 'to',
- 148: 'el-polyton',
- 149: 'kl',
- 150: 'az',
- 151: 'nn',
+ 0: "en",
+ 1: "fr",
+ 2: "de",
+ 3: "it",
+ 4: "nl",
+ 5: "sv",
+ 6: "es",
+ 7: "da",
+ 8: "pt",
+ 9: "no",
+ 10: "he",
+ 11: "ja",
+ 12: "ar",
+ 13: "fi",
+ 14: "el",
+ 15: "is",
+ 16: "mt",
+ 17: "tr",
+ 18: "hr",
+ 19: "zh-Hant",
+ 20: "ur",
+ 21: "hi",
+ 22: "th",
+ 23: "ko",
+ 24: "lt",
+ 25: "pl",
+ 26: "hu",
+ 27: "es",
+ 28: "lv",
+ 29: "se",
+ 30: "fo",
+ 31: "fa",
+ 32: "ru",
+ 33: "zh",
+ 34: "nl-BE",
+ 35: "ga",
+ 36: "sq",
+ 37: "ro",
+ 38: "cz",
+ 39: "sk",
+ 40: "sl",
+ 41: "yi",
+ 42: "sr",
+ 43: "mk",
+ 44: "bg",
+ 45: "uk",
+ 46: "be",
+ 47: "uz",
+ 48: "kk",
+ 49: "az-Cyrl",
+ 50: "az-Arab",
+ 51: "hy",
+ 52: "ka",
+ 53: "mo",
+ 54: "ky",
+ 55: "tg",
+ 56: "tk",
+ 57: "mn-CN",
+ 58: "mn",
+ 59: "ps",
+ 60: "ks",
+ 61: "ku",
+ 62: "sd",
+ 63: "bo",
+ 64: "ne",
+ 65: "sa",
+ 66: "mr",
+ 67: "bn",
+ 68: "as",
+ 69: "gu",
+ 70: "pa",
+ 71: "or",
+ 72: "ml",
+ 73: "kn",
+ 74: "ta",
+ 75: "te",
+ 76: "si",
+ 77: "my",
+ 78: "km",
+ 79: "lo",
+ 80: "vi",
+ 81: "id",
+ 82: "tl",
+ 83: "ms",
+ 84: "ms-Arab",
+ 85: "am",
+ 86: "ti",
+ 87: "om",
+ 88: "so",
+ 89: "sw",
+ 90: "rw",
+ 91: "rn",
+ 92: "ny",
+ 93: "mg",
+ 94: "eo",
+ 128: "cy",
+ 129: "eu",
+ 130: "ca",
+ 131: "la",
+ 132: "qu",
+ 133: "gn",
+ 134: "ay",
+ 135: "tt",
+ 136: "ug",
+ 137: "dz",
+ 138: "jv",
+ 139: "su",
+ 140: "gl",
+ 141: "af",
+ 142: "br",
+ 143: "iu",
+ 144: "gd",
+ 145: "gv",
+ 146: "ga",
+ 147: "to",
+ 148: "el-polyton",
+ 149: "kl",
+ 150: "az",
+ 151: "nn",
}
-_WINDOWS_LANGUAGE_CODES = {lang.lower(): code for code, lang in _WINDOWS_LANGUAGES.items()}
+_WINDOWS_LANGUAGE_CODES = {
+ lang.lower(): code for code, lang in _WINDOWS_LANGUAGES.items()
+}
_MAC_LANGUAGE_CODES = {lang.lower(): code for code, lang in _MAC_LANGUAGES.items()}
@@ -1079,5 +1161,68 @@ _MAC_LANGUAGE_TO_SCRIPT = {
148: 6, # langGreekAncient → smRoman
149: 0, # langGreenlandic → smRoman
150: 0, # langAzerbaijanRoman → smRoman
- 151: 0, # langNynorsk → smRoman
+ 151: 0, # langNynorsk → smRoman
}
+
+
+class NameRecordVisitor(TTVisitor):
+ # Font tables that have NameIDs we need to collect.
+ TABLES = ("GSUB", "GPOS", "fvar", "CPAL", "STAT")
+
+ def __init__(self):
+ self.seen = set()
+
+
+@NameRecordVisitor.register_attrs(
+ (
+ (otTables.FeatureParamsSize, ("SubfamilyID", "SubfamilyNameID")),
+ (otTables.FeatureParamsStylisticSet, ("UINameID",)),
+ (
+ otTables.FeatureParamsCharacterVariants,
+ (
+ "FeatUILabelNameID",
+ "FeatUITooltipTextNameID",
+ "SampleTextNameID",
+ "FirstParamUILabelNameID",
+ ),
+ ),
+ (otTables.STAT, ("ElidedFallbackNameID",)),
+ (otTables.AxisRecord, ("AxisNameID",)),
+ (otTables.AxisValue, ("ValueNameID",)),
+ (otTables.FeatureName, ("FeatureNameID",)),
+ (otTables.Setting, ("SettingNameID",)),
+ )
+)
+def visit(visitor, obj, attr, value):
+ visitor.seen.add(value)
+
+
+@NameRecordVisitor.register(ttLib.getTableClass("fvar"))
+def visit(visitor, obj):
+ for inst in obj.instances:
+ if inst.postscriptNameID != 0xFFFF:
+ visitor.seen.add(inst.postscriptNameID)
+ visitor.seen.add(inst.subfamilyNameID)
+
+ for axis in obj.axes:
+ visitor.seen.add(axis.axisNameID)
+
+
+@NameRecordVisitor.register(ttLib.getTableClass("CPAL"))
+def visit(visitor, obj):
+ if obj.version == 1:
+ visitor.seen.update(obj.paletteLabels)
+ visitor.seen.update(obj.paletteEntryLabels)
+
+
+@NameRecordVisitor.register(ttLib.TTFont)
+def visit(visitor, font, *args, **kwargs):
+ if hasattr(visitor, "font"):
+ return False
+
+ visitor.font = font
+ for tag in visitor.TABLES:
+ if tag in font:
+ visitor.visit(font[tag], *args, **kwargs)
+ del visitor.font
+ return False
diff --git a/Lib/fontTools/ttLib/tables/_p_o_s_t.py b/Lib/fontTools/ttLib/tables/_p_o_s_t.py
index c54b87f0..dba63711 100644
--- a/Lib/fontTools/ttLib/tables/_p_o_s_t.py
+++ b/Lib/fontTools/ttLib/tables/_p_o_s_t.py
@@ -27,266 +27,282 @@ postFormatSize = sstruct.calcsize(postFormat)
class table__p_o_s_t(DefaultTable.DefaultTable):
+ def decompile(self, data, ttFont):
+ sstruct.unpack(postFormat, data[:postFormatSize], self)
+ data = data[postFormatSize:]
+ if self.formatType == 1.0:
+ self.decode_format_1_0(data, ttFont)
+ elif self.formatType == 2.0:
+ self.decode_format_2_0(data, ttFont)
+ elif self.formatType == 3.0:
+ self.decode_format_3_0(data, ttFont)
+ elif self.formatType == 4.0:
+ self.decode_format_4_0(data, ttFont)
+ else:
+ # supported format
+ raise ttLib.TTLibError(
+ "'post' table format %f not supported" % self.formatType
+ )
- def decompile(self, data, ttFont):
- sstruct.unpack(postFormat, data[:postFormatSize], self)
- data = data[postFormatSize:]
- if self.formatType == 1.0:
- self.decode_format_1_0(data, ttFont)
- elif self.formatType == 2.0:
- self.decode_format_2_0(data, ttFont)
- elif self.formatType == 3.0:
- self.decode_format_3_0(data, ttFont)
- elif self.formatType == 4.0:
- self.decode_format_4_0(data, ttFont)
- else:
- # supported format
- raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
+ def compile(self, ttFont):
+ data = sstruct.pack(postFormat, self)
+ if self.formatType == 1.0:
+ pass # we're done
+ elif self.formatType == 2.0:
+ data = data + self.encode_format_2_0(ttFont)
+ elif self.formatType == 3.0:
+ pass # we're done
+ elif self.formatType == 4.0:
+ data = data + self.encode_format_4_0(ttFont)
+ else:
+ # supported format
+ raise ttLib.TTLibError(
+ "'post' table format %f not supported" % self.formatType
+ )
+ return data
- def compile(self, ttFont):
- data = sstruct.pack(postFormat, self)
- if self.formatType == 1.0:
- pass # we're done
- elif self.formatType == 2.0:
- data = data + self.encode_format_2_0(ttFont)
- elif self.formatType == 3.0:
- pass # we're done
- elif self.formatType == 4.0:
- data = data + self.encode_format_4_0(ttFont)
- else:
- # supported format
- raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
- return data
+ def getGlyphOrder(self):
+ """This function will get called by a ttLib.TTFont instance.
+ Do not call this function yourself, use TTFont().getGlyphOrder()
+ or its relatives instead!
+ """
+ if not hasattr(self, "glyphOrder"):
+ raise ttLib.TTLibError("illegal use of getGlyphOrder()")
+ glyphOrder = self.glyphOrder
+ del self.glyphOrder
+ return glyphOrder
- def getGlyphOrder(self):
- """This function will get called by a ttLib.TTFont instance.
- Do not call this function yourself, use TTFont().getGlyphOrder()
- or its relatives instead!
- """
- if not hasattr(self, "glyphOrder"):
- raise ttLib.TTLibError("illegal use of getGlyphOrder()")
- glyphOrder = self.glyphOrder
- del self.glyphOrder
- return glyphOrder
+ def decode_format_1_0(self, data, ttFont):
+ self.glyphOrder = standardGlyphOrder[: ttFont["maxp"].numGlyphs]
- def decode_format_1_0(self, data, ttFont):
- self.glyphOrder = standardGlyphOrder[:ttFont["maxp"].numGlyphs]
+ def decode_format_2_0(self, data, ttFont):
+ (numGlyphs,) = struct.unpack(">H", data[:2])
+ numGlyphs = int(numGlyphs)
+ if numGlyphs > ttFont["maxp"].numGlyphs:
+ # Assume the numGlyphs field is bogus, so sync with maxp.
+ # I've seen this in one font, and if the assumption is
+ # wrong elsewhere, well, so be it: it's hard enough to
+ # work around _one_ non-conforming post format...
+ numGlyphs = ttFont["maxp"].numGlyphs
+ data = data[2:]
+ indices = array.array("H")
+ indices.frombytes(data[: 2 * numGlyphs])
+ if sys.byteorder != "big":
+ indices.byteswap()
+ data = data[2 * numGlyphs :]
+ maxIndex = max(indices)
+ self.extraNames = extraNames = unpackPStrings(data, maxIndex - 257)
+ self.glyphOrder = glyphOrder = [""] * int(ttFont["maxp"].numGlyphs)
+ for glyphID in range(numGlyphs):
+ index = indices[glyphID]
+ if index > 257:
+ try:
+ name = extraNames[index - 258]
+ except IndexError:
+ name = ""
+ else:
+ # fetch names from standard list
+ name = standardGlyphOrder[index]
+ glyphOrder[glyphID] = name
+ self.build_psNameMapping(ttFont)
- def decode_format_2_0(self, data, ttFont):
- numGlyphs, = struct.unpack(">H", data[:2])
- numGlyphs = int(numGlyphs)
- if numGlyphs > ttFont['maxp'].numGlyphs:
- # Assume the numGlyphs field is bogus, so sync with maxp.
- # I've seen this in one font, and if the assumption is
- # wrong elsewhere, well, so be it: it's hard enough to
- # work around _one_ non-conforming post format...
- numGlyphs = ttFont['maxp'].numGlyphs
- data = data[2:]
- indices = array.array("H")
- indices.frombytes(data[:2*numGlyphs])
- if sys.byteorder != "big": indices.byteswap()
- data = data[2*numGlyphs:]
- maxIndex = max(indices)
- self.extraNames = extraNames = unpackPStrings(data, maxIndex-257)
- self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs)
- for glyphID in range(numGlyphs):
- index = indices[glyphID]
- if index > 257:
- try:
- name = extraNames[index-258]
- except IndexError:
- name = ""
- else:
- # fetch names from standard list
- name = standardGlyphOrder[index]
- glyphOrder[glyphID] = name
- self.build_psNameMapping(ttFont)
+ def build_psNameMapping(self, ttFont):
+ mapping = {}
+ allNames = {}
+ for i in range(ttFont["maxp"].numGlyphs):
+ glyphName = psName = self.glyphOrder[i]
+ if glyphName == "":
+ glyphName = "glyph%.5d" % i
+ if glyphName in allNames:
+ # make up a new glyphName that's unique
+ n = allNames[glyphName]
+ while (glyphName + "#" + str(n)) in allNames:
+ n += 1
+ allNames[glyphName] = n + 1
+ glyphName = glyphName + "#" + str(n)
- def build_psNameMapping(self, ttFont):
- mapping = {}
- allNames = {}
- for i in range(ttFont['maxp'].numGlyphs):
- glyphName = psName = self.glyphOrder[i]
- if glyphName == "":
- glyphName = "glyph%.5d" % i
- if glyphName in allNames:
- # make up a new glyphName that's unique
- n = allNames[glyphName]
- while (glyphName + "#" + str(n)) in allNames:
- n += 1
- allNames[glyphName] = n + 1
- glyphName = glyphName + "#" + str(n)
+ self.glyphOrder[i] = glyphName
+ allNames[glyphName] = 1
+ if glyphName != psName:
+ mapping[glyphName] = psName
- self.glyphOrder[i] = glyphName
- allNames[glyphName] = 1
- if glyphName != psName:
- mapping[glyphName] = psName
+ self.mapping = mapping
- self.mapping = mapping
+ def decode_format_3_0(self, data, ttFont):
+ # Setting self.glyphOrder to None will cause the TTFont object
+ # try and construct glyph names from a Unicode cmap table.
+ self.glyphOrder = None
- def decode_format_3_0(self, data, ttFont):
- # Setting self.glyphOrder to None will cause the TTFont object
- # try and construct glyph names from a Unicode cmap table.
- self.glyphOrder = None
+ def decode_format_4_0(self, data, ttFont):
+ from fontTools import agl
- def decode_format_4_0(self, data, ttFont):
- from fontTools import agl
- numGlyphs = ttFont['maxp'].numGlyphs
- indices = array.array("H")
- indices.frombytes(data)
- if sys.byteorder != "big": indices.byteswap()
- # In some older fonts, the size of the post table doesn't match
- # the number of glyphs. Sometimes it's bigger, sometimes smaller.
- self.glyphOrder = glyphOrder = [''] * int(numGlyphs)
- for i in range(min(len(indices),numGlyphs)):
- if indices[i] == 0xFFFF:
- self.glyphOrder[i] = ''
- elif indices[i] in agl.UV2AGL:
- self.glyphOrder[i] = agl.UV2AGL[indices[i]]
- else:
- self.glyphOrder[i] = "uni%04X" % indices[i]
- self.build_psNameMapping(ttFont)
+ numGlyphs = ttFont["maxp"].numGlyphs
+ indices = array.array("H")
+ indices.frombytes(data)
+ if sys.byteorder != "big":
+ indices.byteswap()
+ # In some older fonts, the size of the post table doesn't match
+ # the number of glyphs. Sometimes it's bigger, sometimes smaller.
+ self.glyphOrder = glyphOrder = [""] * int(numGlyphs)
+ for i in range(min(len(indices), numGlyphs)):
+ if indices[i] == 0xFFFF:
+ self.glyphOrder[i] = ""
+ elif indices[i] in agl.UV2AGL:
+ self.glyphOrder[i] = agl.UV2AGL[indices[i]]
+ else:
+ self.glyphOrder[i] = "uni%04X" % indices[i]
+ self.build_psNameMapping(ttFont)
- def encode_format_2_0(self, ttFont):
- numGlyphs = ttFont['maxp'].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- assert len(glyphOrder) == numGlyphs
- indices = array.array("H")
- extraDict = {}
- extraNames = self.extraNames = [
- n for n in self.extraNames if n not in standardGlyphOrder]
- for i in range(len(extraNames)):
- extraDict[extraNames[i]] = i
- for glyphID in range(numGlyphs):
- glyphName = glyphOrder[glyphID]
- if glyphName in self.mapping:
- psName = self.mapping[glyphName]
- else:
- psName = glyphName
- if psName in extraDict:
- index = 258 + extraDict[psName]
- elif psName in standardGlyphOrder:
- index = standardGlyphOrder.index(psName)
- else:
- index = 258 + len(extraNames)
- extraDict[psName] = len(extraNames)
- extraNames.append(psName)
- indices.append(index)
- if sys.byteorder != "big": indices.byteswap()
- return struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames)
+ def encode_format_2_0(self, ttFont):
+ numGlyphs = ttFont["maxp"].numGlyphs
+ glyphOrder = ttFont.getGlyphOrder()
+ assert len(glyphOrder) == numGlyphs
+ indices = array.array("H")
+ extraDict = {}
+ extraNames = self.extraNames = [
+ n for n in self.extraNames if n not in standardGlyphOrder
+ ]
+ for i in range(len(extraNames)):
+ extraDict[extraNames[i]] = i
+ for glyphID in range(numGlyphs):
+ glyphName = glyphOrder[glyphID]
+ if glyphName in self.mapping:
+ psName = self.mapping[glyphName]
+ else:
+ psName = glyphName
+ if psName in extraDict:
+ index = 258 + extraDict[psName]
+ elif psName in standardGlyphOrder:
+ index = standardGlyphOrder.index(psName)
+ else:
+ index = 258 + len(extraNames)
+ extraDict[psName] = len(extraNames)
+ extraNames.append(psName)
+ indices.append(index)
+ if sys.byteorder != "big":
+ indices.byteswap()
+ return (
+ struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames)
+ )
- def encode_format_4_0(self, ttFont):
- from fontTools import agl
- numGlyphs = ttFont['maxp'].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- assert len(glyphOrder) == numGlyphs
- indices = array.array("H")
- for glyphID in glyphOrder:
- glyphID = glyphID.split('#')[0]
- if glyphID in agl.AGL2UV:
- indices.append(agl.AGL2UV[glyphID])
- elif len(glyphID) == 7 and glyphID[:3] == 'uni':
- indices.append(int(glyphID[3:],16))
- else:
- indices.append(0xFFFF)
- if sys.byteorder != "big": indices.byteswap()
- return indices.tobytes()
+ def encode_format_4_0(self, ttFont):
+ from fontTools import agl
- def toXML(self, writer, ttFont):
- formatstring, names, fixes = sstruct.getformat(postFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
- if hasattr(self, "mapping"):
- writer.begintag("psNames")
- writer.newline()
- writer.comment("This file uses unique glyph names based on the information\n"
- "found in the 'post' table. Since these names might not be unique,\n"
- "we have to invent artificial names in case of clashes. In order to\n"
- "be able to retain the original information, we need a name to\n"
- "ps name mapping for those cases where they differ. That's what\n"
- "you see below.\n")
- writer.newline()
- items = sorted(self.mapping.items())
- for name, psName in items:
- writer.simpletag("psName", name=name, psName=psName)
- writer.newline()
- writer.endtag("psNames")
- writer.newline()
- if hasattr(self, "extraNames"):
- writer.begintag("extraNames")
- writer.newline()
- writer.comment("following are the name that are not taken from the standard Mac glyph order")
- writer.newline()
- for name in self.extraNames:
- writer.simpletag("psName", name=name)
- writer.newline()
- writer.endtag("extraNames")
- writer.newline()
- if hasattr(self, "data"):
- writer.begintag("hexdata")
- writer.newline()
- writer.dumphex(self.data)
- writer.endtag("hexdata")
- writer.newline()
+ numGlyphs = ttFont["maxp"].numGlyphs
+ glyphOrder = ttFont.getGlyphOrder()
+ assert len(glyphOrder) == numGlyphs
+ indices = array.array("H")
+ for glyphID in glyphOrder:
+ glyphID = glyphID.split("#")[0]
+ if glyphID in agl.AGL2UV:
+ indices.append(agl.AGL2UV[glyphID])
+ elif len(glyphID) == 7 and glyphID[:3] == "uni":
+ indices.append(int(glyphID[3:], 16))
+ else:
+ indices.append(0xFFFF)
+ if sys.byteorder != "big":
+ indices.byteswap()
+ return indices.tobytes()
- def fromXML(self, name, attrs, content, ttFont):
- if name not in ("psNames", "extraNames", "hexdata"):
- setattr(self, name, safeEval(attrs["value"]))
- elif name == "psNames":
- self.mapping = {}
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "psName":
- self.mapping[attrs["name"]] = attrs["psName"]
- elif name == "extraNames":
- self.extraNames = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "psName":
- self.extraNames.append(attrs["name"])
- else:
- self.data = readHex(content)
+ def toXML(self, writer, ttFont):
+ formatstring, names, fixes = sstruct.getformat(postFormat)
+ for name in names:
+ value = getattr(self, name)
+ writer.simpletag(name, value=value)
+ writer.newline()
+ if hasattr(self, "mapping"):
+ writer.begintag("psNames")
+ writer.newline()
+ writer.comment(
+ "This file uses unique glyph names based on the information\n"
+ "found in the 'post' table. Since these names might not be unique,\n"
+ "we have to invent artificial names in case of clashes. In order to\n"
+ "be able to retain the original information, we need a name to\n"
+ "ps name mapping for those cases where they differ. That's what\n"
+ "you see below.\n"
+ )
+ writer.newline()
+ items = sorted(self.mapping.items())
+ for name, psName in items:
+ writer.simpletag("psName", name=name, psName=psName)
+ writer.newline()
+ writer.endtag("psNames")
+ writer.newline()
+ if hasattr(self, "extraNames"):
+ writer.begintag("extraNames")
+ writer.newline()
+ writer.comment(
+ "following are the name that are not taken from the standard Mac glyph order"
+ )
+ writer.newline()
+ for name in self.extraNames:
+ writer.simpletag("psName", name=name)
+ writer.newline()
+ writer.endtag("extraNames")
+ writer.newline()
+ if hasattr(self, "data"):
+ writer.begintag("hexdata")
+ writer.newline()
+ writer.dumphex(self.data)
+ writer.endtag("hexdata")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name not in ("psNames", "extraNames", "hexdata"):
+ setattr(self, name, safeEval(attrs["value"]))
+ elif name == "psNames":
+ self.mapping = {}
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "psName":
+ self.mapping[attrs["name"]] = attrs["psName"]
+ elif name == "extraNames":
+ self.extraNames = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ if name == "psName":
+ self.extraNames.append(attrs["name"])
+ else:
+ self.data = readHex(content)
def unpackPStrings(data, n):
- # extract n Pascal strings from data.
- # if there is not enough data, use ""
+ # extract n Pascal strings from data.
+ # if there is not enough data, use ""
- strings = []
- index = 0
- dataLen = len(data)
+ strings = []
+ index = 0
+ dataLen = len(data)
- for _ in range(n):
- if dataLen <= index:
- length = 0
- else:
- length = byteord(data[index])
- index += 1
+ for _ in range(n):
+ if dataLen <= index:
+ length = 0
+ else:
+ length = byteord(data[index])
+ index += 1
- if dataLen <= index + length - 1:
- name = ""
- else:
- name = tostr(data[index:index+length], encoding="latin1")
- strings.append (name)
- index += length
+ if dataLen <= index + length - 1:
+ name = ""
+ else:
+ name = tostr(data[index : index + length], encoding="latin1")
+ strings.append(name)
+ index += length
- if index < dataLen:
- log.warning("%d extra bytes in post.stringData array", dataLen - index)
+ if index < dataLen:
+ log.warning("%d extra bytes in post.stringData array", dataLen - index)
- elif dataLen < index:
- log.warning("not enough data in post.stringData array")
+ elif dataLen < index:
+ log.warning("not enough data in post.stringData array")
- return strings
+ return strings
def packPStrings(strings):
- data = b""
- for s in strings:
- data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
- return data
+ data = b""
+ for s in strings:
+ data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
+ return data
diff --git a/Lib/fontTools/ttLib/tables/_p_r_e_p.py b/Lib/fontTools/ttLib/tables/_p_r_e_p.py
index 7f517fb8..b4b92f3e 100644
--- a/Lib/fontTools/ttLib/tables/_p_r_e_p.py
+++ b/Lib/fontTools/ttLib/tables/_p_r_e_p.py
@@ -2,5 +2,6 @@ from fontTools import ttLib
superclass = ttLib.getTableClass("fpgm")
+
class table__p_r_e_p(superclass):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/_s_b_i_x.py b/Lib/fontTools/ttLib/tables/_s_b_i_x.py
index c4b2ad38..29b82c3e 100644
--- a/Lib/fontTools/ttLib/tables/_s_b_i_x.py
+++ b/Lib/fontTools/ttLib/tables/_s_b_i_x.py
@@ -28,88 +28,92 @@ sbixStrikeOffsetFormatSize = sstruct.calcsize(sbixStrikeOffsetFormat)
class table__s_b_i_x(DefaultTable.DefaultTable):
-
- def __init__(self, tag=None):
- DefaultTable.DefaultTable.__init__(self, tag)
- self.version = 1
- self.flags = 1
- self.numStrikes = 0
- self.strikes = {}
- self.strikeOffsets = []
-
- def decompile(self, data, ttFont):
- # read table header
- sstruct.unpack(sbixHeaderFormat, data[ : sbixHeaderFormatSize], self)
- # collect offsets to individual strikes in self.strikeOffsets
- for i in range(self.numStrikes):
- current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize
- offset_entry = sbixStrikeOffset()
- sstruct.unpack(sbixStrikeOffsetFormat, \
- data[current_offset:current_offset+sbixStrikeOffsetFormatSize], \
- offset_entry)
- self.strikeOffsets.append(offset_entry.strikeOffset)
-
- # decompile Strikes
- for i in range(self.numStrikes-1, -1, -1):
- current_strike = Strike(rawdata=data[self.strikeOffsets[i]:])
- data = data[:self.strikeOffsets[i]]
- current_strike.decompile(ttFont)
- #print " Strike length: %xh" % len(bitmapSetData)
- #print "Number of Glyph entries:", len(current_strike.glyphs)
- if current_strike.ppem in self.strikes:
- from fontTools import ttLib
- raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike")
- self.strikes[current_strike.ppem] = current_strike
-
- # after the glyph data records have been extracted, we don't need the offsets anymore
- del self.strikeOffsets
- del self.numStrikes
-
- def compile(self, ttFont):
- sbixData = b""
- self.numStrikes = len(self.strikes)
- sbixHeader = sstruct.pack(sbixHeaderFormat, self)
-
- # calculate offset to start of first strike
- setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes
-
- for si in sorted(self.strikes.keys()):
- current_strike = self.strikes[si]
- current_strike.compile(ttFont)
- # append offset to this strike to table header
- current_strike.strikeOffset = setOffset
- sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike)
- setOffset += len(current_strike.data)
- sbixData += current_strike.data
-
- return sbixHeader + sbixData
-
- def toXML(self, xmlWriter, ttFont):
- xmlWriter.simpletag("version", value=self.version)
- xmlWriter.newline()
- xmlWriter.simpletag("flags", value=num2binary(self.flags, 16))
- xmlWriter.newline()
- for i in sorted(self.strikes.keys()):
- self.strikes[i].toXML(xmlWriter, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name =="version":
- setattr(self, name, safeEval(attrs["value"]))
- elif name == "flags":
- setattr(self, name, binary2num(attrs["value"]))
- elif name == "strike":
- current_strike = Strike()
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- current_strike.fromXML(name, attrs, content, ttFont)
- self.strikes[current_strike.ppem] = current_strike
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError("can't handle '%s' element" % name)
+ def __init__(self, tag=None):
+ DefaultTable.DefaultTable.__init__(self, tag)
+ self.version = 1
+ self.flags = 1
+ self.numStrikes = 0
+ self.strikes = {}
+ self.strikeOffsets = []
+
+ def decompile(self, data, ttFont):
+ # read table header
+ sstruct.unpack(sbixHeaderFormat, data[:sbixHeaderFormatSize], self)
+ # collect offsets to individual strikes in self.strikeOffsets
+ for i in range(self.numStrikes):
+ current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize
+ offset_entry = sbixStrikeOffset()
+ sstruct.unpack(
+ sbixStrikeOffsetFormat,
+ data[current_offset : current_offset + sbixStrikeOffsetFormatSize],
+ offset_entry,
+ )
+ self.strikeOffsets.append(offset_entry.strikeOffset)
+
+ # decompile Strikes
+ for i in range(self.numStrikes - 1, -1, -1):
+ current_strike = Strike(rawdata=data[self.strikeOffsets[i] :])
+ data = data[: self.strikeOffsets[i]]
+ current_strike.decompile(ttFont)
+ # print " Strike length: %xh" % len(bitmapSetData)
+ # print "Number of Glyph entries:", len(current_strike.glyphs)
+ if current_strike.ppem in self.strikes:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike")
+ self.strikes[current_strike.ppem] = current_strike
+
+ # after the glyph data records have been extracted, we don't need the offsets anymore
+ del self.strikeOffsets
+ del self.numStrikes
+
+ def compile(self, ttFont):
+ sbixData = b""
+ self.numStrikes = len(self.strikes)
+ sbixHeader = sstruct.pack(sbixHeaderFormat, self)
+
+ # calculate offset to start of first strike
+ setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes
+
+ for si in sorted(self.strikes.keys()):
+ current_strike = self.strikes[si]
+ current_strike.compile(ttFont)
+ # append offset to this strike to table header
+ current_strike.strikeOffset = setOffset
+ sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike)
+ setOffset += len(current_strike.data)
+ sbixData += current_strike.data
+
+ return sbixHeader + sbixData
+
+ def toXML(self, xmlWriter, ttFont):
+ xmlWriter.simpletag("version", value=self.version)
+ xmlWriter.newline()
+ xmlWriter.simpletag("flags", value=num2binary(self.flags, 16))
+ xmlWriter.newline()
+ for i in sorted(self.strikes.keys()):
+ self.strikes[i].toXML(xmlWriter, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version":
+ setattr(self, name, safeEval(attrs["value"]))
+ elif name == "flags":
+ setattr(self, name, binary2num(attrs["value"]))
+ elif name == "strike":
+ current_strike = Strike()
+ for element in content:
+ if isinstance(element, tuple):
+ name, attrs, content = element
+ current_strike.fromXML(name, attrs, content, ttFont)
+ self.strikes[current_strike.ppem] = current_strike
+ else:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("can't handle '%s' element" % name)
# Helper classes
+
class sbixStrikeOffset(object):
- pass
+ pass
diff --git a/Lib/fontTools/ttLib/tables/_t_r_a_k.py b/Lib/fontTools/ttLib/tables/_t_r_a_k.py
index 3052496f..0d1b313e 100644
--- a/Lib/fontTools/ttLib/tables/_t_r_a_k.py
+++ b/Lib/fontTools/ttLib/tables/_t_r_a_k.py
@@ -1,9 +1,9 @@
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import (
- fixedToFloat as fi2fl,
- floatToFixed as fl2fi,
- floatToFixedToStr as fl2str,
- strToFixedToFloat as str2fl,
+ fixedToFloat as fi2fl,
+ floatToFixed as fl2fi,
+ floatToFixedToStr as fl2str,
+ strToFixedToFloat as str2fl,
)
from fontTools.misc.textTools import bytesjoin, safeEval
from fontTools.ttLib import TTLibError
@@ -58,257 +58,268 @@ PER_SIZE_VALUE_FORMAT_SIZE = struct.calcsize(PER_SIZE_VALUE_FORMAT)
class table__t_r_a_k(DefaultTable.DefaultTable):
- dependencies = ['name']
-
- def compile(self, ttFont):
- dataList = []
- offset = TRAK_HEADER_FORMAT_SIZE
- for direction in ('horiz', 'vert'):
- trackData = getattr(self, direction + 'Data', TrackData())
- offsetName = direction + 'Offset'
- # set offset to 0 if None or empty
- if not trackData:
- setattr(self, offsetName, 0)
- continue
- # TrackData table format must be longword aligned
- alignedOffset = (offset + 3) & ~3
- padding, offset = b"\x00"*(alignedOffset - offset), alignedOffset
- setattr(self, offsetName, offset)
-
- data = trackData.compile(offset)
- offset += len(data)
- dataList.append(padding + data)
-
- self.reserved = 0
- tableData = bytesjoin([sstruct.pack(TRAK_HEADER_FORMAT, self)] + dataList)
- return tableData
-
- def decompile(self, data, ttFont):
- sstruct.unpack(TRAK_HEADER_FORMAT, data[:TRAK_HEADER_FORMAT_SIZE], self)
- for direction in ('horiz', 'vert'):
- trackData = TrackData()
- offset = getattr(self, direction + 'Offset')
- if offset != 0:
- trackData.decompile(data, offset)
- setattr(self, direction + 'Data', trackData)
-
- def toXML(self, writer, ttFont):
- writer.simpletag('version', value=self.version)
- writer.newline()
- writer.simpletag('format', value=self.format)
- writer.newline()
- for direction in ('horiz', 'vert'):
- dataName = direction + 'Data'
- writer.begintag(dataName)
- writer.newline()
- trackData = getattr(self, dataName, TrackData())
- trackData.toXML(writer, ttFont)
- writer.endtag(dataName)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == 'version':
- self.version = safeEval(attrs['value'])
- elif name == 'format':
- self.format = safeEval(attrs['value'])
- elif name in ('horizData', 'vertData'):
- trackData = TrackData()
- setattr(self, name, trackData)
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content_ = element
- trackData.fromXML(name, attrs, content_, ttFont)
+ dependencies = ["name"]
+
+ def compile(self, ttFont):
+ dataList = []
+ offset = TRAK_HEADER_FORMAT_SIZE
+ for direction in ("horiz", "vert"):
+ trackData = getattr(self, direction + "Data", TrackData())
+ offsetName = direction + "Offset"
+ # set offset to 0 if None or empty
+ if not trackData:
+ setattr(self, offsetName, 0)
+ continue
+ # TrackData table format must be longword aligned
+ alignedOffset = (offset + 3) & ~3
+ padding, offset = b"\x00" * (alignedOffset - offset), alignedOffset
+ setattr(self, offsetName, offset)
+
+ data = trackData.compile(offset)
+ offset += len(data)
+ dataList.append(padding + data)
+
+ self.reserved = 0
+ tableData = bytesjoin([sstruct.pack(TRAK_HEADER_FORMAT, self)] + dataList)
+ return tableData
+
+ def decompile(self, data, ttFont):
+ sstruct.unpack(TRAK_HEADER_FORMAT, data[:TRAK_HEADER_FORMAT_SIZE], self)
+ for direction in ("horiz", "vert"):
+ trackData = TrackData()
+ offset = getattr(self, direction + "Offset")
+ if offset != 0:
+ trackData.decompile(data, offset)
+ setattr(self, direction + "Data", trackData)
+
+ def toXML(self, writer, ttFont):
+ writer.simpletag("version", value=self.version)
+ writer.newline()
+ writer.simpletag("format", value=self.format)
+ writer.newline()
+ for direction in ("horiz", "vert"):
+ dataName = direction + "Data"
+ writer.begintag(dataName)
+ writer.newline()
+ trackData = getattr(self, dataName, TrackData())
+ trackData.toXML(writer, ttFont)
+ writer.endtag(dataName)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "version":
+ self.version = safeEval(attrs["value"])
+ elif name == "format":
+ self.format = safeEval(attrs["value"])
+ elif name in ("horizData", "vertData"):
+ trackData = TrackData()
+ setattr(self, name, trackData)
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content_ = element
+ trackData.fromXML(name, attrs, content_, ttFont)
class TrackData(MutableMapping):
-
- def __init__(self, initialdata={}):
- self._map = dict(initialdata)
-
- def compile(self, offset):
- nTracks = len(self)
- sizes = self.sizes()
- nSizes = len(sizes)
-
- # offset to the start of the size subtable
- offset += TRACK_DATA_FORMAT_SIZE + TRACK_TABLE_ENTRY_FORMAT_SIZE*nTracks
- trackDataHeader = sstruct.pack(
- TRACK_DATA_FORMAT,
- {'nTracks': nTracks, 'nSizes': nSizes, 'sizeTableOffset': offset})
-
- entryDataList = []
- perSizeDataList = []
- # offset to per-size tracking values
- offset += SIZE_VALUE_FORMAT_SIZE*nSizes
- # sort track table entries by track value
- for track, entry in sorted(self.items()):
- assert entry.nameIndex is not None
- entry.track = track
- entry.offset = offset
- entryDataList += [sstruct.pack(TRACK_TABLE_ENTRY_FORMAT, entry)]
- # sort per-size values by size
- for size, value in sorted(entry.items()):
- perSizeDataList += [struct.pack(PER_SIZE_VALUE_FORMAT, value)]
- offset += PER_SIZE_VALUE_FORMAT_SIZE*nSizes
- # sort size values
- sizeDataList = [struct.pack(SIZE_VALUE_FORMAT, fl2fi(sv, 16)) for sv in sorted(sizes)]
-
- data = bytesjoin([trackDataHeader] + entryDataList + sizeDataList + perSizeDataList)
- return data
-
- def decompile(self, data, offset):
- # initial offset is from the start of trak table to the current TrackData
- trackDataHeader = data[offset:offset+TRACK_DATA_FORMAT_SIZE]
- if len(trackDataHeader) != TRACK_DATA_FORMAT_SIZE:
- raise TTLibError('not enough data to decompile TrackData header')
- sstruct.unpack(TRACK_DATA_FORMAT, trackDataHeader, self)
- offset += TRACK_DATA_FORMAT_SIZE
-
- nSizes = self.nSizes
- sizeTableOffset = self.sizeTableOffset
- sizeTable = []
- for i in range(nSizes):
- sizeValueData = data[sizeTableOffset:sizeTableOffset+SIZE_VALUE_FORMAT_SIZE]
- if len(sizeValueData) < SIZE_VALUE_FORMAT_SIZE:
- raise TTLibError('not enough data to decompile TrackData size subtable')
- sizeValue, = struct.unpack(SIZE_VALUE_FORMAT, sizeValueData)
- sizeTable.append(fi2fl(sizeValue, 16))
- sizeTableOffset += SIZE_VALUE_FORMAT_SIZE
-
- for i in range(self.nTracks):
- entry = TrackTableEntry()
- entryData = data[offset:offset+TRACK_TABLE_ENTRY_FORMAT_SIZE]
- if len(entryData) < TRACK_TABLE_ENTRY_FORMAT_SIZE:
- raise TTLibError('not enough data to decompile TrackTableEntry record')
- sstruct.unpack(TRACK_TABLE_ENTRY_FORMAT, entryData, entry)
- perSizeOffset = entry.offset
- for j in range(nSizes):
- size = sizeTable[j]
- perSizeValueData = data[perSizeOffset:perSizeOffset+PER_SIZE_VALUE_FORMAT_SIZE]
- if len(perSizeValueData) < PER_SIZE_VALUE_FORMAT_SIZE:
- raise TTLibError('not enough data to decompile per-size track values')
- perSizeValue, = struct.unpack(PER_SIZE_VALUE_FORMAT, perSizeValueData)
- entry[size] = perSizeValue
- perSizeOffset += PER_SIZE_VALUE_FORMAT_SIZE
- self[entry.track] = entry
- offset += TRACK_TABLE_ENTRY_FORMAT_SIZE
-
- def toXML(self, writer, ttFont):
- nTracks = len(self)
- nSizes = len(self.sizes())
- writer.comment("nTracks=%d, nSizes=%d" % (nTracks, nSizes))
- writer.newline()
- for track, entry in sorted(self.items()):
- assert entry.nameIndex is not None
- entry.track = track
- entry.toXML(writer, ttFont)
-
- def fromXML(self, name, attrs, content, ttFont):
- if name != 'trackEntry':
- return
- entry = TrackTableEntry()
- entry.fromXML(name, attrs, content, ttFont)
- self[entry.track] = entry
-
- def sizes(self):
- if not self:
- return frozenset()
- tracks = list(self.tracks())
- sizes = self[tracks.pop(0)].sizes()
- for track in tracks:
- entrySizes = self[track].sizes()
- if sizes != entrySizes:
- raise TTLibError(
- "'trak' table entries must specify the same sizes: "
- "%s != %s" % (sorted(sizes), sorted(entrySizes)))
- return frozenset(sizes)
-
- def __getitem__(self, track):
- return self._map[track]
-
- def __delitem__(self, track):
- del self._map[track]
-
- def __setitem__(self, track, entry):
- self._map[track] = entry
-
- def __len__(self):
- return len(self._map)
-
- def __iter__(self):
- return iter(self._map)
-
- def keys(self):
- return self._map.keys()
-
- tracks = keys
-
- def __repr__(self):
- return "TrackData({})".format(self._map if self else "")
+ def __init__(self, initialdata={}):
+ self._map = dict(initialdata)
+
+ def compile(self, offset):
+ nTracks = len(self)
+ sizes = self.sizes()
+ nSizes = len(sizes)
+
+ # offset to the start of the size subtable
+ offset += TRACK_DATA_FORMAT_SIZE + TRACK_TABLE_ENTRY_FORMAT_SIZE * nTracks
+ trackDataHeader = sstruct.pack(
+ TRACK_DATA_FORMAT,
+ {"nTracks": nTracks, "nSizes": nSizes, "sizeTableOffset": offset},
+ )
+
+ entryDataList = []
+ perSizeDataList = []
+ # offset to per-size tracking values
+ offset += SIZE_VALUE_FORMAT_SIZE * nSizes
+ # sort track table entries by track value
+ for track, entry in sorted(self.items()):
+ assert entry.nameIndex is not None
+ entry.track = track
+ entry.offset = offset
+ entryDataList += [sstruct.pack(TRACK_TABLE_ENTRY_FORMAT, entry)]
+ # sort per-size values by size
+ for size, value in sorted(entry.items()):
+ perSizeDataList += [struct.pack(PER_SIZE_VALUE_FORMAT, value)]
+ offset += PER_SIZE_VALUE_FORMAT_SIZE * nSizes
+ # sort size values
+ sizeDataList = [
+ struct.pack(SIZE_VALUE_FORMAT, fl2fi(sv, 16)) for sv in sorted(sizes)
+ ]
+
+ data = bytesjoin(
+ [trackDataHeader] + entryDataList + sizeDataList + perSizeDataList
+ )
+ return data
+
+ def decompile(self, data, offset):
+ # initial offset is from the start of trak table to the current TrackData
+ trackDataHeader = data[offset : offset + TRACK_DATA_FORMAT_SIZE]
+ if len(trackDataHeader) != TRACK_DATA_FORMAT_SIZE:
+ raise TTLibError("not enough data to decompile TrackData header")
+ sstruct.unpack(TRACK_DATA_FORMAT, trackDataHeader, self)
+ offset += TRACK_DATA_FORMAT_SIZE
+
+ nSizes = self.nSizes
+ sizeTableOffset = self.sizeTableOffset
+ sizeTable = []
+ for i in range(nSizes):
+ sizeValueData = data[
+ sizeTableOffset : sizeTableOffset + SIZE_VALUE_FORMAT_SIZE
+ ]
+ if len(sizeValueData) < SIZE_VALUE_FORMAT_SIZE:
+ raise TTLibError("not enough data to decompile TrackData size subtable")
+ (sizeValue,) = struct.unpack(SIZE_VALUE_FORMAT, sizeValueData)
+ sizeTable.append(fi2fl(sizeValue, 16))
+ sizeTableOffset += SIZE_VALUE_FORMAT_SIZE
+
+ for i in range(self.nTracks):
+ entry = TrackTableEntry()
+ entryData = data[offset : offset + TRACK_TABLE_ENTRY_FORMAT_SIZE]
+ if len(entryData) < TRACK_TABLE_ENTRY_FORMAT_SIZE:
+ raise TTLibError("not enough data to decompile TrackTableEntry record")
+ sstruct.unpack(TRACK_TABLE_ENTRY_FORMAT, entryData, entry)
+ perSizeOffset = entry.offset
+ for j in range(nSizes):
+ size = sizeTable[j]
+ perSizeValueData = data[
+ perSizeOffset : perSizeOffset + PER_SIZE_VALUE_FORMAT_SIZE
+ ]
+ if len(perSizeValueData) < PER_SIZE_VALUE_FORMAT_SIZE:
+ raise TTLibError(
+ "not enough data to decompile per-size track values"
+ )
+ (perSizeValue,) = struct.unpack(PER_SIZE_VALUE_FORMAT, perSizeValueData)
+ entry[size] = perSizeValue
+ perSizeOffset += PER_SIZE_VALUE_FORMAT_SIZE
+ self[entry.track] = entry
+ offset += TRACK_TABLE_ENTRY_FORMAT_SIZE
+
+ def toXML(self, writer, ttFont):
+ nTracks = len(self)
+ nSizes = len(self.sizes())
+ writer.comment("nTracks=%d, nSizes=%d" % (nTracks, nSizes))
+ writer.newline()
+ for track, entry in sorted(self.items()):
+ assert entry.nameIndex is not None
+ entry.track = track
+ entry.toXML(writer, ttFont)
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name != "trackEntry":
+ return
+ entry = TrackTableEntry()
+ entry.fromXML(name, attrs, content, ttFont)
+ self[entry.track] = entry
+
+ def sizes(self):
+ if not self:
+ return frozenset()
+ tracks = list(self.tracks())
+ sizes = self[tracks.pop(0)].sizes()
+ for track in tracks:
+ entrySizes = self[track].sizes()
+ if sizes != entrySizes:
+ raise TTLibError(
+ "'trak' table entries must specify the same sizes: "
+ "%s != %s" % (sorted(sizes), sorted(entrySizes))
+ )
+ return frozenset(sizes)
+
+ def __getitem__(self, track):
+ return self._map[track]
+
+ def __delitem__(self, track):
+ del self._map[track]
+
+ def __setitem__(self, track, entry):
+ self._map[track] = entry
+
+ def __len__(self):
+ return len(self._map)
+
+ def __iter__(self):
+ return iter(self._map)
+
+ def keys(self):
+ return self._map.keys()
+
+ tracks = keys
+
+ def __repr__(self):
+ return "TrackData({})".format(self._map if self else "")
class TrackTableEntry(MutableMapping):
-
- def __init__(self, values={}, nameIndex=None):
- self.nameIndex = nameIndex
- self._map = dict(values)
-
- def toXML(self, writer, ttFont):
- name = ttFont["name"].getDebugName(self.nameIndex)
- writer.begintag(
- "trackEntry",
- (('value', fl2str(self.track, 16)), ('nameIndex', self.nameIndex)))
- writer.newline()
- if name:
- writer.comment(name)
- writer.newline()
- for size, perSizeValue in sorted(self.items()):
- writer.simpletag("track", size=fl2str(size, 16), value=perSizeValue)
- writer.newline()
- writer.endtag("trackEntry")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- self.track = str2fl(attrs['value'], 16)
- self.nameIndex = safeEval(attrs['nameIndex'])
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, _ = element
- if name != 'track':
- continue
- size = str2fl(attrs['size'], 16)
- self[size] = safeEval(attrs['value'])
-
- def __getitem__(self, size):
- return self._map[size]
-
- def __delitem__(self, size):
- del self._map[size]
-
- def __setitem__(self, size, value):
- self._map[size] = value
-
- def __len__(self):
- return len(self._map)
-
- def __iter__(self):
- return iter(self._map)
-
- def keys(self):
- return self._map.keys()
-
- sizes = keys
-
- def __repr__(self):
- return "TrackTableEntry({}, nameIndex={})".format(self._map, self.nameIndex)
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- return NotImplemented
- return self.nameIndex == other.nameIndex and dict(self) == dict(other)
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
+ def __init__(self, values={}, nameIndex=None):
+ self.nameIndex = nameIndex
+ self._map = dict(values)
+
+ def toXML(self, writer, ttFont):
+ name = ttFont["name"].getDebugName(self.nameIndex)
+ writer.begintag(
+ "trackEntry",
+ (("value", fl2str(self.track, 16)), ("nameIndex", self.nameIndex)),
+ )
+ writer.newline()
+ if name:
+ writer.comment(name)
+ writer.newline()
+ for size, perSizeValue in sorted(self.items()):
+ writer.simpletag("track", size=fl2str(size, 16), value=perSizeValue)
+ writer.newline()
+ writer.endtag("trackEntry")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ self.track = str2fl(attrs["value"], 16)
+ self.nameIndex = safeEval(attrs["nameIndex"])
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, _ = element
+ if name != "track":
+ continue
+ size = str2fl(attrs["size"], 16)
+ self[size] = safeEval(attrs["value"])
+
+ def __getitem__(self, size):
+ return self._map[size]
+
+ def __delitem__(self, size):
+ del self._map[size]
+
+ def __setitem__(self, size, value):
+ self._map[size] = value
+
+ def __len__(self):
+ return len(self._map)
+
+ def __iter__(self):
+ return iter(self._map)
+
+ def keys(self):
+ return self._map.keys()
+
+ sizes = keys
+
+ def __repr__(self):
+ return "TrackTableEntry({}, nameIndex={})".format(self._map, self.nameIndex)
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self.nameIndex == other.nameIndex and dict(self) == dict(other)
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
diff --git a/Lib/fontTools/ttLib/tables/_v_h_e_a.py b/Lib/fontTools/ttLib/tables/_v_h_e_a.py
index 2bb24667..de7ce245 100644
--- a/Lib/fontTools/ttLib/tables/_v_h_e_a.py
+++ b/Lib/fontTools/ttLib/tables/_v_h_e_a.py
@@ -1,7 +1,9 @@
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from fontTools.misc.fixedTools import (
- ensureVersionIsLong as fi2ve, versionToFixed as ve2fi)
+ ensureVersionIsLong as fi2ve,
+ versionToFixed as ve2fi,
+)
from . import DefaultTable
import math
@@ -27,92 +29,99 @@ vheaFormat = """
numberOfVMetrics: H
"""
+
class table__v_h_e_a(DefaultTable.DefaultTable):
+ # Note: Keep in sync with table__h_h_e_a
+
+ dependencies = ["vmtx", "glyf", "CFF ", "CFF2"]
+
+ def decompile(self, data, ttFont):
+ sstruct.unpack(vheaFormat, data, self)
+
+ def compile(self, ttFont):
+ if ttFont.recalcBBoxes and (
+ ttFont.isLoaded("glyf")
+ or ttFont.isLoaded("CFF ")
+ or ttFont.isLoaded("CFF2")
+ ):
+ self.recalc(ttFont)
+ self.tableVersion = fi2ve(self.tableVersion)
+ return sstruct.pack(vheaFormat, self)
+
+ def recalc(self, ttFont):
+ if "vmtx" not in ttFont:
+ return
+
+ vmtxTable = ttFont["vmtx"]
+ self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values())
+
+ boundsHeightDict = {}
+ if "glyf" in ttFont:
+ glyfTable = ttFont["glyf"]
+ for name in ttFont.getGlyphOrder():
+ g = glyfTable[name]
+ if g.numberOfContours == 0:
+ continue
+ if g.numberOfContours < 0 and not hasattr(g, "yMax"):
+ # Composite glyph without extents set.
+ # Calculate those.
+ g.recalcBounds(glyfTable)
+ boundsHeightDict[name] = g.yMax - g.yMin
+ elif "CFF " in ttFont or "CFF2" in ttFont:
+ if "CFF " in ttFont:
+ topDict = ttFont["CFF "].cff.topDictIndex[0]
+ else:
+ topDict = ttFont["CFF2"].cff.topDictIndex[0]
+ charStrings = topDict.CharStrings
+ for name in ttFont.getGlyphOrder():
+ cs = charStrings[name]
+ bounds = cs.calcBounds(charStrings)
+ if bounds is not None:
+ boundsHeightDict[name] = int(
+ math.ceil(bounds[3]) - math.floor(bounds[1])
+ )
+
+ if boundsHeightDict:
+ minTopSideBearing = float("inf")
+ minBottomSideBearing = float("inf")
+ yMaxExtent = -float("inf")
+ for name, boundsHeight in boundsHeightDict.items():
+ advanceHeight, tsb = vmtxTable[name]
+ bsb = advanceHeight - tsb - boundsHeight
+ extent = tsb + boundsHeight
+ minTopSideBearing = min(minTopSideBearing, tsb)
+ minBottomSideBearing = min(minBottomSideBearing, bsb)
+ yMaxExtent = max(yMaxExtent, extent)
+ self.minTopSideBearing = minTopSideBearing
+ self.minBottomSideBearing = minBottomSideBearing
+ self.yMaxExtent = yMaxExtent
+
+ else: # No glyph has outlines.
+ self.minTopSideBearing = 0
+ self.minBottomSideBearing = 0
+ self.yMaxExtent = 0
+
+ def toXML(self, writer, ttFont):
+ formatstring, names, fixes = sstruct.getformat(vheaFormat)
+ for name in names:
+ value = getattr(self, name)
+ if name == "tableVersion":
+ value = fi2ve(value)
+ value = "0x%08x" % value
+ writer.simpletag(name, value=value)
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "tableVersion":
+ setattr(self, name, ve2fi(attrs["value"]))
+ return
+ setattr(self, name, safeEval(attrs["value"]))
+
+ # reserved0 is caretOffset for legacy reasons
+ @property
+ def reserved0(self):
+ return self.caretOffset
- # Note: Keep in sync with table__h_h_e_a
-
- dependencies = ['vmtx', 'glyf', 'CFF ', 'CFF2']
-
- def decompile(self, data, ttFont):
- sstruct.unpack(vheaFormat, data, self)
-
- def compile(self, ttFont):
- if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ') or ttFont.isLoaded('CFF2')):
- self.recalc(ttFont)
- self.tableVersion = fi2ve(self.tableVersion)
- return sstruct.pack(vheaFormat, self)
-
- def recalc(self, ttFont):
- if 'vmtx' in ttFont:
- vmtxTable = ttFont['vmtx']
- self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values())
-
- boundsHeightDict = {}
- if 'glyf' in ttFont:
- glyfTable = ttFont['glyf']
- for name in ttFont.getGlyphOrder():
- g = glyfTable[name]
- if g.numberOfContours == 0:
- continue
- if g.numberOfContours < 0 and not hasattr(g, "yMax"):
- # Composite glyph without extents set.
- # Calculate those.
- g.recalcBounds(glyfTable)
- boundsHeightDict[name] = g.yMax - g.yMin
- elif 'CFF ' in ttFont or 'CFF2' in ttFont:
- if 'CFF ' in ttFont:
- topDict = ttFont['CFF '].cff.topDictIndex[0]
- else:
- topDict = ttFont['CFF2'].cff.topDictIndex[0]
- charStrings = topDict.CharStrings
- for name in ttFont.getGlyphOrder():
- cs = charStrings[name]
- bounds = cs.calcBounds(charStrings)
- if bounds is not None:
- boundsHeightDict[name] = int(
- math.ceil(bounds[3]) - math.floor(bounds[1]))
-
- if boundsHeightDict:
- minTopSideBearing = float('inf')
- minBottomSideBearing = float('inf')
- yMaxExtent = -float('inf')
- for name, boundsHeight in boundsHeightDict.items():
- advanceHeight, tsb = vmtxTable[name]
- bsb = advanceHeight - tsb - boundsHeight
- extent = tsb + boundsHeight
- minTopSideBearing = min(minTopSideBearing, tsb)
- minBottomSideBearing = min(minBottomSideBearing, bsb)
- yMaxExtent = max(yMaxExtent, extent)
- self.minTopSideBearing = minTopSideBearing
- self.minBottomSideBearing = minBottomSideBearing
- self.yMaxExtent = yMaxExtent
-
- else: # No glyph has outlines.
- self.minTopSideBearing = 0
- self.minBottomSideBearing = 0
- self.yMaxExtent = 0
-
- def toXML(self, writer, ttFont):
- formatstring, names, fixes = sstruct.getformat(vheaFormat)
- for name in names:
- value = getattr(self, name)
- if name == "tableVersion":
- value = fi2ve(value)
- value = "0x%08x" % value
- writer.simpletag(name, value=value)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "tableVersion":
- setattr(self, name, ve2fi(attrs["value"]))
- return
- setattr(self, name, safeEval(attrs["value"]))
-
- # reserved0 is caretOffset for legacy reasons
- @property
- def reserved0(self):
- return self.caretOffset
-
- @reserved0.setter
- def reserved0(self, value):
- self.caretOffset = value
+ @reserved0.setter
+ def reserved0(self, value):
+ self.caretOffset = value
diff --git a/Lib/fontTools/ttLib/tables/_v_m_t_x.py b/Lib/fontTools/ttLib/tables/_v_m_t_x.py
index fc818d83..a13304c3 100644
--- a/Lib/fontTools/ttLib/tables/_v_m_t_x.py
+++ b/Lib/fontTools/ttLib/tables/_v_m_t_x.py
@@ -2,9 +2,9 @@ from fontTools import ttLib
superclass = ttLib.getTableClass("hmtx")
-class table__v_m_t_x(superclass):
- headerTag = 'vhea'
- advanceName = 'height'
- sideBearingName = 'tsb'
- numberOfMetricsName = 'numberOfVMetrics'
+class table__v_m_t_x(superclass):
+ headerTag = "vhea"
+ advanceName = "height"
+ sideBearingName = "tsb"
+ numberOfMetricsName = "numberOfVMetrics"
diff --git a/Lib/fontTools/ttLib/tables/asciiTable.py b/Lib/fontTools/ttLib/tables/asciiTable.py
index a97d92df..6f81c526 100644
--- a/Lib/fontTools/ttLib/tables/asciiTable.py
+++ b/Lib/fontTools/ttLib/tables/asciiTable.py
@@ -3,19 +3,18 @@ from . import DefaultTable
class asciiTable(DefaultTable.DefaultTable):
+ def toXML(self, writer, ttFont):
+ data = tostr(self.data)
+ # removing null bytes. XXX needed??
+ data = data.split("\0")
+ data = strjoin(data)
+ writer.begintag("source")
+ writer.newline()
+ writer.write_noindent(data)
+ writer.newline()
+ writer.endtag("source")
+ writer.newline()
- def toXML(self, writer, ttFont):
- data = tostr(self.data)
- # removing null bytes. XXX needed??
- data = data.split('\0')
- data = strjoin(data)
- writer.begintag("source")
- writer.newline()
- writer.write_noindent(data)
- writer.newline()
- writer.endtag("source")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- lines = strjoin(content).split("\n")
- self.data = tobytes("\n".join(lines[1:-1]))
+ def fromXML(self, name, attrs, content, ttFont):
+ lines = strjoin(content).split("\n")
+ self.data = tobytes("\n".join(lines[1:-1]))
diff --git a/Lib/fontTools/ttLib/tables/grUtils.py b/Lib/fontTools/ttLib/tables/grUtils.py
index a60df234..785684b1 100644
--- a/Lib/fontTools/ttLib/tables/grUtils.py
+++ b/Lib/fontTools/ttLib/tables/grUtils.py
@@ -1,4 +1,5 @@
import struct, warnings
+
try:
import lz4
except ImportError:
@@ -6,12 +7,13 @@ except ImportError:
else:
import lz4.block
-#old scheme for VERSION < 0.9 otherwise use lz4.block
+# old scheme for VERSION < 0.9 otherwise use lz4.block
+
def decompress(data):
(compression,) = struct.unpack(">L", data[4:8])
scheme = compression >> 27
- size = compression & 0x07ffffff
+ size = compression & 0x07FFFFFF
if scheme == 0:
pass
elif scheme == 1 and lz4:
@@ -24,23 +26,27 @@ def decompress(data):
warnings.warn("Table is compressed with an unsupported compression scheme")
return (data, scheme)
+
def compress(scheme, data):
- hdr = data[:4] + struct.pack(">L", (scheme << 27) + (len(data) & 0x07ffffff))
- if scheme == 0 :
+ hdr = data[:4] + struct.pack(">L", (scheme << 27) + (len(data) & 0x07FFFFFF))
+ if scheme == 0:
return data
elif scheme == 1 and lz4:
- res = lz4.block.compress(data, mode='high_compression', compression=16, store_size=False)
+ res = lz4.block.compress(
+ data, mode="high_compression", compression=16, store_size=False
+ )
return hdr + res
else:
warnings.warn("Table failed to compress by unsupported compression scheme")
return data
+
def _entries(attrs, sameval):
ak = 0
vals = []
lastv = 0
- for k,v in attrs:
- if len(vals) and (k != ak + 1 or (sameval and v != lastv)) :
+ for k, v in attrs:
+ if len(vals) and (k != ak + 1 or (sameval and v != lastv)):
yield (ak - len(vals) + 1, len(vals), vals)
vals = []
ak = k
@@ -48,14 +54,16 @@ def _entries(attrs, sameval):
lastv = v
yield (ak - len(vals) + 1, len(vals), vals)
-def entries(attributes, sameval = False):
- g = _entries(sorted(attributes.items(), key=lambda x:int(x[0])), sameval)
+
+def entries(attributes, sameval=False):
+ g = _entries(sorted(attributes.items(), key=lambda x: int(x[0])), sameval)
return g
+
def bininfo(num, size=1):
if num == 0:
return struct.pack(">4H", 0, 0, 0, 0)
- srange = 1;
+ srange = 1
select = 0
while srange <= num:
srange *= 2
@@ -66,16 +74,19 @@ def bininfo(num, size=1):
shift = num * size - srange
return struct.pack(">4H", num, srange, select, shift)
+
def num2tag(n):
if n < 0x200000:
return str(n)
else:
- return struct.unpack('4s', struct.pack('>L', n))[0].replace(b'\000', b'').decode()
+ return (
+ struct.unpack("4s", struct.pack(">L", n))[0].replace(b"\000", b"").decode()
+ )
+
def tag2num(n):
try:
return int(n)
except ValueError:
- n = (n+" ")[:4]
- return struct.unpack('>L', n.encode('ascii'))[0]
-
+ n = (n + " ")[:4]
+ return struct.unpack(">L", n.encode("ascii"))[0]
diff --git a/Lib/fontTools/ttLib/tables/otBase.py b/Lib/fontTools/ttLib/tables/otBase.py
index 1bd3198d..d565603b 100644
--- a/Lib/fontTools/ttLib/tables/otBase.py
+++ b/Lib/fontTools/ttLib/tables/otBase.py
@@ -13,1188 +13,1285 @@ log = logging.getLogger(__name__)
have_uharfbuzz = False
try:
- import uharfbuzz as hb
- # repack method added in uharfbuzz >= 0.23; if uharfbuzz *can* be
- # imported but repack method is missing, behave as if uharfbuzz
- # is not available (fallback to the slower Python implementation)
- have_uharfbuzz = callable(getattr(hb, "repack", None))
+ import uharfbuzz as hb
+
+ # repack method added in uharfbuzz >= 0.23; if uharfbuzz *can* be
+ # imported but repack method is missing, behave as if uharfbuzz
+ # is not available (fallback to the slower Python implementation)
+ have_uharfbuzz = callable(getattr(hb, "repack", None))
except ImportError:
- pass
+ pass
USE_HARFBUZZ_REPACKER = OPTIONS[f"{__name__}:USE_HARFBUZZ_REPACKER"]
+
class OverflowErrorRecord(object):
- def __init__(self, overflowTuple):
- self.tableType = overflowTuple[0]
- self.LookupListIndex = overflowTuple[1]
- self.SubTableIndex = overflowTuple[2]
- self.itemName = overflowTuple[3]
- self.itemIndex = overflowTuple[4]
+ def __init__(self, overflowTuple):
+ self.tableType = overflowTuple[0]
+ self.LookupListIndex = overflowTuple[1]
+ self.SubTableIndex = overflowTuple[2]
+ self.itemName = overflowTuple[3]
+ self.itemIndex = overflowTuple[4]
+
+ def __repr__(self):
+ return str(
+ (
+ self.tableType,
+ "LookupIndex:",
+ self.LookupListIndex,
+ "SubTableIndex:",
+ self.SubTableIndex,
+ "ItemName:",
+ self.itemName,
+ "ItemIndex:",
+ self.itemIndex,
+ )
+ )
- def __repr__(self):
- return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex))
class OTLOffsetOverflowError(Exception):
- def __init__(self, overflowErrorRecord):
- self.value = overflowErrorRecord
+ def __init__(self, overflowErrorRecord):
+ self.value = overflowErrorRecord
+
+ def __str__(self):
+ return repr(self.value)
- def __str__(self):
- return repr(self.value)
class RepackerState(IntEnum):
- # Repacking control flow is implemnted using a state machine. The state machine table:
- #
- # State | Packing Success | Packing Failed | Exception Raised |
- # ------------+-----------------+----------------+------------------+
- # PURE_FT | Return result | PURE_FT | Return failure |
- # HB_FT | Return result | HB_FT | FT_FALLBACK |
- # FT_FALLBACK | HB_FT | FT_FALLBACK | Return failure |
+ # Repacking control flow is implemnted using a state machine. The state machine table:
+ #
+ # State | Packing Success | Packing Failed | Exception Raised |
+ # ------------+-----------------+----------------+------------------+
+ # PURE_FT | Return result | PURE_FT | Return failure |
+ # HB_FT | Return result | HB_FT | FT_FALLBACK |
+ # FT_FALLBACK | HB_FT | FT_FALLBACK | Return failure |
+
+ # Pack only with fontTools, don't allow sharing between extensions.
+ PURE_FT = 1
- # Pack only with fontTools, don't allow sharing between extensions.
- PURE_FT = 1
+ # Attempt to pack with harfbuzz (allowing sharing between extensions)
+ # use fontTools to attempt overflow resolution.
+ HB_FT = 2
- # Attempt to pack with harfbuzz (allowing sharing between extensions)
- # use fontTools to attempt overflow resolution.
- HB_FT = 2
+ # Fallback if HB/FT packing gets stuck. Pack only with fontTools, don't allow sharing between
+ # extensions.
+ FT_FALLBACK = 3
- # Fallback if HB/FT packing gets stuck. Pack only with fontTools, don't allow sharing between
- # extensions.
- FT_FALLBACK = 3
class BaseTTXConverter(DefaultTable):
- """Generic base class for TTX table converters. It functions as an
- adapter between the TTX (ttLib actually) table model and the model
- we use for OpenType tables, which is necessarily subtly different.
- """
-
- def decompile(self, data, font):
- """Create an object from the binary data. Called automatically on access."""
- from . import otTables
- reader = OTTableReader(data, tableTag=self.tableTag)
- tableClass = getattr(otTables, self.tableTag)
- self.table = tableClass()
- self.table.decompile(reader, font)
-
- def compile(self, font):
- """Compiles the table into binary. Called automatically on save."""
-
- # General outline:
- # Create a top-level OTTableWriter for the GPOS/GSUB table.
- # Call the compile method for the the table
- # for each 'converter' record in the table converter list
- # call converter's write method for each item in the value.
- # - For simple items, the write method adds a string to the
- # writer's self.items list.
- # - For Struct/Table/Subtable items, it add first adds new writer to the
- # to the writer's self.items, then calls the item's compile method.
- # This creates a tree of writers, rooted at the GUSB/GPOS writer, with
- # each writer representing a table, and the writer.items list containing
- # the child data strings and writers.
- # call the getAllData method
- # call _doneWriting, which removes duplicates
- # call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
- # Traverse the flat list of tables, calling getDataLength on each to update their position
- # Traverse the flat list of tables again, calling getData each get the data in the table, now that
- # pos's and offset are known.
-
- # If a lookup subtable overflows an offset, we have to start all over.
- overflowRecord = None
- # this is 3-state option: default (None) means automatically use hb.repack or
- # silently fall back if it fails; True, use it and raise error if not possible
- # or it errors out; False, don't use it, even if you can.
- use_hb_repack = font.cfg[USE_HARFBUZZ_REPACKER]
- if self.tableTag in ("GSUB", "GPOS"):
- if use_hb_repack is False:
- log.debug(
- "hb.repack disabled, compiling '%s' with pure-python serializer",
- self.tableTag,
- )
- elif not have_uharfbuzz:
- if use_hb_repack is True:
- raise ImportError("No module named 'uharfbuzz'")
- else:
- assert use_hb_repack is None
- log.debug(
- "uharfbuzz not found, compiling '%s' with pure-python serializer",
- self.tableTag,
- )
-
- if (use_hb_repack in (None, True)
- and have_uharfbuzz
- and self.tableTag in ("GSUB", "GPOS")):
- state = RepackerState.HB_FT
- else:
- state = RepackerState.PURE_FT
-
- hb_first_error_logged = False
- lastOverflowRecord = None
- while True:
- try:
- writer = OTTableWriter(tableTag=self.tableTag)
- self.table.compile(writer, font)
- if state == RepackerState.HB_FT:
- return self.tryPackingHarfbuzz(writer, hb_first_error_logged)
- elif state == RepackerState.PURE_FT:
- return self.tryPackingFontTools(writer)
- elif state == RepackerState.FT_FALLBACK:
- # Run packing with FontTools only, but don't return the result as it will
- # not be optimally packed. Once a successful packing has been found, state is
- # changed back to harfbuzz packing to produce the final, optimal, packing.
- self.tryPackingFontTools(writer)
- log.debug("Re-enabling sharing between extensions and switching back to "
- "harfbuzz+fontTools packing.")
- state = RepackerState.HB_FT
-
- except OTLOffsetOverflowError as e:
- hb_first_error_logged = True
- ok = self.tryResolveOverflow(font, e, lastOverflowRecord)
- lastOverflowRecord = e.value
-
- if ok:
- continue
-
- if state is RepackerState.HB_FT:
- log.debug("Harfbuzz packing out of resolutions, disabling sharing between extensions and "
- "switching to fontTools only packing.")
- state = RepackerState.FT_FALLBACK
- else:
- raise
-
- def tryPackingHarfbuzz(self, writer, hb_first_error_logged):
- try:
- log.debug("serializing '%s' with hb.repack", self.tableTag)
- return writer.getAllDataUsingHarfbuzz(self.tableTag)
- except (ValueError, MemoryError, hb.RepackerError) as e:
- # Only log hb repacker errors the first time they occur in
- # the offset-overflow resolution loop, they are just noisy.
- # Maybe we can revisit this if/when uharfbuzz actually gives
- # us more info as to why hb.repack failed...
- if not hb_first_error_logged:
- error_msg = f"{type(e).__name__}"
- if str(e) != "":
- error_msg += f": {e}"
- log.warning(
- "hb.repack failed to serialize '%s', attempting fonttools resolutions "
- "; the error message was: %s",
- self.tableTag,
- error_msg,
- )
- hb_first_error_logged = True
- return writer.getAllData(remove_duplicate=False)
-
-
- def tryPackingFontTools(self, writer):
- return writer.getAllData()
-
-
- def tryResolveOverflow(self, font, e, lastOverflowRecord):
- ok = 0
- if lastOverflowRecord == e.value:
- # Oh well...
- return ok
-
- overflowRecord = e.value
- log.info("Attempting to fix OTLOffsetOverflowError %s", e)
-
- if overflowRecord.itemName is None:
- from .otTables import fixLookupOverFlows
- ok = fixLookupOverFlows(font, overflowRecord)
- else:
- from .otTables import fixSubTableOverFlows
- ok = fixSubTableOverFlows(font, overflowRecord)
-
- if ok:
- return ok
-
- # Try upgrading lookup to Extension and hope
- # that cross-lookup sharing not happening would
- # fix overflow...
- from .otTables import fixLookupOverFlows
- return fixLookupOverFlows(font, overflowRecord)
-
- def toXML(self, writer, font):
- self.table.toXML2(writer, font)
-
- def fromXML(self, name, attrs, content, font):
- from . import otTables
- if not hasattr(self, "table"):
- tableClass = getattr(otTables, self.tableTag)
- self.table = tableClass()
- self.table.fromXML(name, attrs, content, font)
- self.table.populateDefaults()
-
- def ensureDecompiled(self, recurse=True):
- self.table.ensureDecompiled(recurse=recurse)
+ """Generic base class for TTX table converters. It functions as an
+ adapter between the TTX (ttLib actually) table model and the model
+ we use for OpenType tables, which is necessarily subtly different.
+ """
+
+ def decompile(self, data, font):
+ """Create an object from the binary data. Called automatically on access."""
+ from . import otTables
+
+ reader = OTTableReader(data, tableTag=self.tableTag)
+ tableClass = getattr(otTables, self.tableTag)
+ self.table = tableClass()
+ self.table.decompile(reader, font)
+
+ def compile(self, font):
+ """Compiles the table into binary. Called automatically on save."""
+
+ # General outline:
+ # Create a top-level OTTableWriter for the GPOS/GSUB table.
+ # Call the compile method for the the table
+ # for each 'converter' record in the table converter list
+ # call converter's write method for each item in the value.
+ # - For simple items, the write method adds a string to the
+ # writer's self.items list.
+ # - For Struct/Table/Subtable items, it add first adds new writer to the
+ # to the writer's self.items, then calls the item's compile method.
+ # This creates a tree of writers, rooted at the GUSB/GPOS writer, with
+ # each writer representing a table, and the writer.items list containing
+ # the child data strings and writers.
+ # call the getAllData method
+ # call _doneWriting, which removes duplicates
+ # call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
+ # Traverse the flat list of tables, calling getDataLength on each to update their position
+ # Traverse the flat list of tables again, calling getData each get the data in the table, now that
+ # pos's and offset are known.
+
+ # If a lookup subtable overflows an offset, we have to start all over.
+ overflowRecord = None
+ # this is 3-state option: default (None) means automatically use hb.repack or
+ # silently fall back if it fails; True, use it and raise error if not possible
+ # or it errors out; False, don't use it, even if you can.
+ use_hb_repack = font.cfg[USE_HARFBUZZ_REPACKER]
+ if self.tableTag in ("GSUB", "GPOS"):
+ if use_hb_repack is False:
+ log.debug(
+ "hb.repack disabled, compiling '%s' with pure-python serializer",
+ self.tableTag,
+ )
+ elif not have_uharfbuzz:
+ if use_hb_repack is True:
+ raise ImportError("No module named 'uharfbuzz'")
+ else:
+ assert use_hb_repack is None
+ log.debug(
+ "uharfbuzz not found, compiling '%s' with pure-python serializer",
+ self.tableTag,
+ )
+
+ if (
+ use_hb_repack in (None, True)
+ and have_uharfbuzz
+ and self.tableTag in ("GSUB", "GPOS")
+ ):
+ state = RepackerState.HB_FT
+ else:
+ state = RepackerState.PURE_FT
+
+ hb_first_error_logged = False
+ lastOverflowRecord = None
+ while True:
+ try:
+ writer = OTTableWriter(tableTag=self.tableTag)
+ self.table.compile(writer, font)
+ if state == RepackerState.HB_FT:
+ return self.tryPackingHarfbuzz(writer, hb_first_error_logged)
+ elif state == RepackerState.PURE_FT:
+ return self.tryPackingFontTools(writer)
+ elif state == RepackerState.FT_FALLBACK:
+ # Run packing with FontTools only, but don't return the result as it will
+ # not be optimally packed. Once a successful packing has been found, state is
+ # changed back to harfbuzz packing to produce the final, optimal, packing.
+ self.tryPackingFontTools(writer)
+ log.debug(
+ "Re-enabling sharing between extensions and switching back to "
+ "harfbuzz+fontTools packing."
+ )
+ state = RepackerState.HB_FT
+
+ except OTLOffsetOverflowError as e:
+ hb_first_error_logged = True
+ ok = self.tryResolveOverflow(font, e, lastOverflowRecord)
+ lastOverflowRecord = e.value
+
+ if ok:
+ continue
+
+ if state is RepackerState.HB_FT:
+ log.debug(
+ "Harfbuzz packing out of resolutions, disabling sharing between extensions and "
+ "switching to fontTools only packing."
+ )
+ state = RepackerState.FT_FALLBACK
+ else:
+ raise
+
+ def tryPackingHarfbuzz(self, writer, hb_first_error_logged):
+ try:
+ log.debug("serializing '%s' with hb.repack", self.tableTag)
+ return writer.getAllDataUsingHarfbuzz(self.tableTag)
+ except (ValueError, MemoryError, hb.RepackerError) as e:
+ # Only log hb repacker errors the first time they occur in
+ # the offset-overflow resolution loop, they are just noisy.
+ # Maybe we can revisit this if/when uharfbuzz actually gives
+ # us more info as to why hb.repack failed...
+ if not hb_first_error_logged:
+ error_msg = f"{type(e).__name__}"
+ if str(e) != "":
+ error_msg += f": {e}"
+ log.warning(
+ "hb.repack failed to serialize '%s', attempting fonttools resolutions "
+ "; the error message was: %s",
+ self.tableTag,
+ error_msg,
+ )
+ hb_first_error_logged = True
+ return writer.getAllData(remove_duplicate=False)
+
+ def tryPackingFontTools(self, writer):
+ return writer.getAllData()
+
+ def tryResolveOverflow(self, font, e, lastOverflowRecord):
+ ok = 0
+ if lastOverflowRecord == e.value:
+ # Oh well...
+ return ok
+
+ overflowRecord = e.value
+ log.info("Attempting to fix OTLOffsetOverflowError %s", e)
+
+ if overflowRecord.itemName is None:
+ from .otTables import fixLookupOverFlows
+
+ ok = fixLookupOverFlows(font, overflowRecord)
+ else:
+ from .otTables import fixSubTableOverFlows
+
+ ok = fixSubTableOverFlows(font, overflowRecord)
+
+ if ok:
+ return ok
+
+ # Try upgrading lookup to Extension and hope
+ # that cross-lookup sharing not happening would
+ # fix overflow...
+ from .otTables import fixLookupOverFlows
+
+ return fixLookupOverFlows(font, overflowRecord)
+
+ def toXML(self, writer, font):
+ self.table.toXML2(writer, font)
+
+ def fromXML(self, name, attrs, content, font):
+ from . import otTables
+
+ if not hasattr(self, "table"):
+ tableClass = getattr(otTables, self.tableTag)
+ self.table = tableClass()
+ self.table.fromXML(name, attrs, content, font)
+ self.table.populateDefaults()
+
+ def ensureDecompiled(self, recurse=True):
+ self.table.ensureDecompiled(recurse=recurse)
# https://github.com/fonttools/fonttools/pull/2285#issuecomment-834652928
-assert len(struct.pack('i', 0)) == 4
-assert array.array('i').itemsize == 4, "Oops, file a bug against fonttools."
+assert len(struct.pack("i", 0)) == 4
+assert array.array("i").itemsize == 4, "Oops, file a bug against fonttools."
+
class OTTableReader(object):
- """Helper class to retrieve data from an OpenType table."""
-
- __slots__ = ('data', 'offset', 'pos', 'localState', 'tableTag')
-
- def __init__(self, data, localState=None, offset=0, tableTag=None):
- self.data = data
- self.offset = offset
- self.pos = offset
- self.localState = localState
- self.tableTag = tableTag
-
- def advance(self, count):
- self.pos += count
-
- def seek(self, pos):
- self.pos = pos
-
- def copy(self):
- other = self.__class__(self.data, self.localState, self.offset, self.tableTag)
- other.pos = self.pos
- return other
-
- def getSubReader(self, offset):
- offset = self.offset + offset
- return self.__class__(self.data, self.localState, offset, self.tableTag)
-
- def readValue(self, typecode, staticSize):
- pos = self.pos
- newpos = pos + staticSize
- value, = struct.unpack(f">{typecode}", self.data[pos:newpos])
- self.pos = newpos
- return value
- def readArray(self, typecode, staticSize, count):
- pos = self.pos
- newpos = pos + count * staticSize
- value = array.array(typecode, self.data[pos:newpos])
- if sys.byteorder != "big": value.byteswap()
- self.pos = newpos
- return value.tolist()
-
- def readInt8(self):
- return self.readValue("b", staticSize=1)
- def readInt8Array(self, count):
- return self.readArray("b", staticSize=1, count=count)
-
- def readShort(self):
- return self.readValue("h", staticSize=2)
- def readShortArray(self, count):
- return self.readArray("h", staticSize=2, count=count)
-
- def readLong(self):
- return self.readValue("i", staticSize=4)
- def readLongArray(self, count):
- return self.readArray("i", staticSize=4, count=count)
-
- def readUInt8(self):
- return self.readValue("B", staticSize=1)
- def readUInt8Array(self, count):
- return self.readArray("B", staticSize=1, count=count)
-
- def readUShort(self):
- return self.readValue("H", staticSize=2)
- def readUShortArray(self, count):
- return self.readArray("H", staticSize=2, count=count)
-
- def readULong(self):
- return self.readValue("I", staticSize=4)
- def readULongArray(self, count):
- return self.readArray("I", staticSize=4, count=count)
-
- def readUInt24(self):
- pos = self.pos
- newpos = pos + 3
- value, = struct.unpack(">l", b'\0'+self.data[pos:newpos])
- self.pos = newpos
- return value
- def readUInt24Array(self, count):
- return [self.readUInt24() for _ in range(count)]
-
- def readTag(self):
- pos = self.pos
- newpos = pos + 4
- value = Tag(self.data[pos:newpos])
- assert len(value) == 4, value
- self.pos = newpos
- return value
-
- def readData(self, count):
- pos = self.pos
- newpos = pos + count
- value = self.data[pos:newpos]
- self.pos = newpos
- return value
-
- def __setitem__(self, name, value):
- state = self.localState.copy() if self.localState else dict()
- state[name] = value
- self.localState = state
-
- def __getitem__(self, name):
- return self.localState and self.localState[name]
-
- def __contains__(self, name):
- return self.localState and name in self.localState
+ """Helper class to retrieve data from an OpenType table."""
+
+ __slots__ = ("data", "offset", "pos", "localState", "tableTag")
+
+ def __init__(self, data, localState=None, offset=0, tableTag=None):
+ self.data = data
+ self.offset = offset
+ self.pos = offset
+ self.localState = localState
+ self.tableTag = tableTag
+
+ def advance(self, count):
+ self.pos += count
+
+ def seek(self, pos):
+ self.pos = pos
+
+ def copy(self):
+ other = self.__class__(self.data, self.localState, self.offset, self.tableTag)
+ other.pos = self.pos
+ return other
+
+ def getSubReader(self, offset):
+ offset = self.offset + offset
+ return self.__class__(self.data, self.localState, offset, self.tableTag)
+
+ def readValue(self, typecode, staticSize):
+ pos = self.pos
+ newpos = pos + staticSize
+ (value,) = struct.unpack(f">{typecode}", self.data[pos:newpos])
+ self.pos = newpos
+ return value
+
+ def readArray(self, typecode, staticSize, count):
+ pos = self.pos
+ newpos = pos + count * staticSize
+ value = array.array(typecode, self.data[pos:newpos])
+ if sys.byteorder != "big":
+ value.byteswap()
+ self.pos = newpos
+ return value.tolist()
+
+ def readInt8(self):
+ return self.readValue("b", staticSize=1)
+
+ def readInt8Array(self, count):
+ return self.readArray("b", staticSize=1, count=count)
+
+ def readShort(self):
+ return self.readValue("h", staticSize=2)
+
+ def readShortArray(self, count):
+ return self.readArray("h", staticSize=2, count=count)
+
+ def readLong(self):
+ return self.readValue("i", staticSize=4)
+
+ def readLongArray(self, count):
+ return self.readArray("i", staticSize=4, count=count)
+
+ def readUInt8(self):
+ return self.readValue("B", staticSize=1)
+
+ def readUInt8Array(self, count):
+ return self.readArray("B", staticSize=1, count=count)
+
+ def readUShort(self):
+ return self.readValue("H", staticSize=2)
+
+ def readUShortArray(self, count):
+ return self.readArray("H", staticSize=2, count=count)
+
+ def readULong(self):
+ return self.readValue("I", staticSize=4)
+
+ def readULongArray(self, count):
+ return self.readArray("I", staticSize=4, count=count)
+
+ def readUInt24(self):
+ pos = self.pos
+ newpos = pos + 3
+ (value,) = struct.unpack(">l", b"\0" + self.data[pos:newpos])
+ self.pos = newpos
+ return value
+
+ def readUInt24Array(self, count):
+ return [self.readUInt24() for _ in range(count)]
+
+ def readTag(self):
+ pos = self.pos
+ newpos = pos + 4
+ value = Tag(self.data[pos:newpos])
+ assert len(value) == 4, value
+ self.pos = newpos
+ return value
+
+ def readData(self, count):
+ pos = self.pos
+ newpos = pos + count
+ value = self.data[pos:newpos]
+ self.pos = newpos
+ return value
+
+ def __setitem__(self, name, value):
+ state = self.localState.copy() if self.localState else dict()
+ state[name] = value
+ self.localState = state
+
+ def __getitem__(self, name):
+ return self.localState and self.localState[name]
+
+ def __contains__(self, name):
+ return self.localState and name in self.localState
+
+
+class OffsetToWriter(object):
+ def __init__(self, subWriter, offsetSize):
+ self.subWriter = subWriter
+ self.offsetSize = offsetSize
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.subWriter == other.subWriter and self.offsetSize == other.offsetSize
+
+ def __hash__(self):
+ # only works after self._doneWriting() has been called
+ return hash((self.subWriter, self.offsetSize))
class OTTableWriter(object):
- """Helper class to gather and assemble data for OpenType tables."""
-
- def __init__(self, localState=None, tableTag=None, offsetSize=2):
- self.items = []
- self.pos = None
- self.localState = localState
- self.tableTag = tableTag
- self.offsetSize = offsetSize
- self.parent = None
-
- # DEPRECATED: 'longOffset' is kept as a property for backward compat with old code.
- # You should use 'offsetSize' instead (2, 3 or 4 bytes).
- @property
- def longOffset(self):
- return self.offsetSize == 4
-
- @longOffset.setter
- def longOffset(self, value):
- self.offsetSize = 4 if value else 2
-
- def __setitem__(self, name, value):
- state = self.localState.copy() if self.localState else dict()
- state[name] = value
- self.localState = state
-
- def __getitem__(self, name):
- return self.localState[name]
-
- def __delitem__(self, name):
- del self.localState[name]
-
- # assembler interface
-
- def getDataLength(self):
- """Return the length of this table in bytes, without subtables."""
- l = 0
- for item in self.items:
- if hasattr(item, "getCountData"):
- l += item.size
- elif hasattr(item, "getData"):
- l += item.offsetSize
- else:
- l = l + len(item)
- return l
-
- def getData(self):
- """Assemble the data for this writer/table, without subtables."""
- items = list(self.items) # make a shallow copy
- pos = self.pos
- numItems = len(items)
- for i in range(numItems):
- item = items[i]
-
- if hasattr(item, "getData"):
- if item.offsetSize == 4:
- items[i] = packULong(item.pos - pos)
- elif item.offsetSize == 2:
- try:
- items[i] = packUShort(item.pos - pos)
- except struct.error:
- # provide data to fix overflow problem.
- overflowErrorRecord = self.getOverflowErrorRecord(item)
-
- raise OTLOffsetOverflowError(overflowErrorRecord)
- elif item.offsetSize == 3:
- items[i] = packUInt24(item.pos - pos)
- else:
- raise ValueError(item.offsetSize)
-
- return bytesjoin(items)
-
- def getDataForHarfbuzz(self):
- """Assemble the data for this writer/table with all offset field set to 0"""
- items = list(self.items)
- packFuncs = {2: packUShort, 3: packUInt24, 4: packULong}
- for i, item in enumerate(items):
- if hasattr(item, "getData"):
- # Offset value is not needed in harfbuzz repacker, so setting offset to 0 to avoid overflow here
- if item.offsetSize in packFuncs:
- items[i] = packFuncs[item.offsetSize](0)
- else:
- raise ValueError(item.offsetSize)
-
- return bytesjoin(items)
-
- def __hash__(self):
- # only works after self._doneWriting() has been called
- return hash(self.items)
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.offsetSize == other.offsetSize and self.items == other.items
-
- def _doneWriting(self, internedTables, shareExtension=False):
- # Convert CountData references to data string items
- # collapse duplicate table references to a unique entry
- # "tables" are OTTableWriter objects.
-
- # For Extension Lookup types, we can
- # eliminate duplicates only within the tree under the Extension Lookup,
- # as offsets may exceed 64K even between Extension LookupTable subtables.
- isExtension = hasattr(self, "Extension")
-
- # Certain versions of Uniscribe reject the font if the GSUB/GPOS top-level
- # arrays (ScriptList, FeatureList, LookupList) point to the same, possibly
- # empty, array. So, we don't share those.
- # See: https://github.com/fonttools/fonttools/issues/518
- dontShare = hasattr(self, 'DontShare')
-
- if isExtension and not shareExtension:
- internedTables = {}
-
- items = self.items
- for i in range(len(items)):
- item = items[i]
- if hasattr(item, "getCountData"):
- items[i] = item.getCountData()
- elif hasattr(item, "getData"):
- item._doneWriting(internedTables, shareExtension=shareExtension)
- # At this point, all subwriters are hashable based on their items.
- # (See hash and comparison magic methods above.) So the ``setdefault``
- # call here will return the first writer object we've seen with
- # equal content, or store it in the dictionary if it's not been
- # seen yet. We therefore replace the subwriter object with an equivalent
- # object, which deduplicates the tree.
- if not dontShare:
- items[i] = item = internedTables.setdefault(item, item)
- self.items = tuple(items)
-
- def _gatherTables(self, tables, extTables, done):
- # Convert table references in self.items tree to a flat
- # list of tables in depth-first traversal order.
- # "tables" are OTTableWriter objects.
- # We do the traversal in reverse order at each level, in order to
- # resolve duplicate references to be the last reference in the list of tables.
- # For extension lookups, duplicate references can be merged only within the
- # writer tree under the extension lookup.
-
- done[id(self)] = True
-
- numItems = len(self.items)
- iRange = list(range(numItems))
- iRange.reverse()
-
- isExtension = hasattr(self, "Extension")
-
- selfTables = tables
-
- if isExtension:
- assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
- tables, extTables, done = extTables, None, {}
-
- # add Coverage table if it is sorted last.
- sortCoverageLast = False
- if hasattr(self, "sortCoverageLast"):
- # Find coverage table
- for i in range(numItems):
- item = self.items[i]
- if getattr(item, 'name', None) == "Coverage":
- sortCoverageLast = True
- break
- if id(item) not in done:
- item._gatherTables(tables, extTables, done)
- else:
- # We're a new parent of item
- pass
-
- for i in iRange:
- item = self.items[i]
- if not hasattr(item, "getData"):
- continue
-
- if sortCoverageLast and (i==1) and getattr(item, 'name', None) == 'Coverage':
- # we've already 'gathered' it above
- continue
-
- if id(item) not in done:
- item._gatherTables(tables, extTables, done)
- else:
- # Item is already written out by other parent
- pass
-
- selfTables.append(self)
-
- def _gatherGraphForHarfbuzz(self, tables, obj_list, done, objidx, virtual_edges):
- real_links = []
- virtual_links = []
- item_idx = objidx
-
- # Merge virtual_links from parent
- for idx in virtual_edges:
- virtual_links.append((0, 0, idx))
-
- sortCoverageLast = False
- coverage_idx = 0
- if hasattr(self, "sortCoverageLast"):
- # Find coverage table
- for i, item in enumerate(self.items):
- if getattr(item, 'name', None) == "Coverage":
- sortCoverageLast = True
- if id(item) not in done:
- coverage_idx = item_idx = item._gatherGraphForHarfbuzz(tables, obj_list, done, item_idx, virtual_edges)
- else:
- coverage_idx = done[id(item)]
- virtual_edges.append(coverage_idx)
- break
-
- child_idx = 0
- offset_pos = 0
- for i, item in enumerate(self.items):
- if hasattr(item, "getData"):
- pos = offset_pos
- elif hasattr(item, "getCountData"):
- offset_pos += item.size
- continue
- else:
- offset_pos = offset_pos + len(item)
- continue
-
- if id(item) not in done:
- child_idx = item_idx = item._gatherGraphForHarfbuzz(tables, obj_list, done, item_idx, virtual_edges)
- else:
- child_idx = done[id(item)]
-
- real_edge = (pos, item.offsetSize, child_idx)
- real_links.append(real_edge)
- offset_pos += item.offsetSize
-
- tables.append(self)
- obj_list.append((real_links,virtual_links))
- item_idx += 1
- done[id(self)] = item_idx
- if sortCoverageLast:
- virtual_edges.pop()
-
- return item_idx
-
- def getAllDataUsingHarfbuzz(self, tableTag):
- """The Whole table is represented as a Graph.
- Assemble graph data and call Harfbuzz repacker to pack the table.
- Harfbuzz repacker is faster and retain as much sub-table sharing as possible, see also:
- https://github.com/harfbuzz/harfbuzz/blob/main/docs/repacker.md
- The input format for hb.repack() method is explained here:
- https://github.com/harfbuzz/uharfbuzz/blob/main/src/uharfbuzz/_harfbuzz.pyx#L1149
- """
- internedTables = {}
- self._doneWriting(internedTables, shareExtension=True)
- tables = []
- obj_list = []
- done = {}
- objidx = 0
- virtual_edges = []
- self._gatherGraphForHarfbuzz(tables, obj_list, done, objidx, virtual_edges)
- # Gather all data in two passes: the absolute positions of all
- # subtable are needed before the actual data can be assembled.
- pos = 0
- for table in tables:
- table.pos = pos
- pos = pos + table.getDataLength()
-
- data = []
- for table in tables:
- tableData = table.getDataForHarfbuzz()
- data.append(tableData)
-
- if hasattr(hb, "repack_with_tag"):
- return hb.repack_with_tag(str(tableTag), data, obj_list)
- else:
- return hb.repack(data, obj_list)
-
- def getAllData(self, remove_duplicate=True):
- """Assemble all data, including all subtables."""
- if remove_duplicate:
- internedTables = {}
- self._doneWriting(internedTables)
- tables = []
- extTables = []
- done = {}
- self._gatherTables(tables, extTables, done)
- tables.reverse()
- extTables.reverse()
- # Gather all data in two passes: the absolute positions of all
- # subtable are needed before the actual data can be assembled.
- pos = 0
- for table in tables:
- table.pos = pos
- pos = pos + table.getDataLength()
-
- for table in extTables:
- table.pos = pos
- pos = pos + table.getDataLength()
-
- data = []
- for table in tables:
- tableData = table.getData()
- data.append(tableData)
-
- for table in extTables:
- tableData = table.getData()
- data.append(tableData)
-
- return bytesjoin(data)
-
- # interface for gathering data, as used by table.compile()
-
- def getSubWriter(self, offsetSize=2):
- subwriter = self.__class__(self.localState, self.tableTag, offsetSize=offsetSize)
- subwriter.parent = self # because some subtables have idential values, we discard
- # the duplicates under the getAllData method. Hence some
- # subtable writers can have more than one parent writer.
- # But we just care about first one right now.
- return subwriter
-
- def writeValue(self, typecode, value):
- self.items.append(struct.pack(f">{typecode}", value))
- def writeArray(self, typecode, values):
- a = array.array(typecode, values)
- if sys.byteorder != "big": a.byteswap()
- self.items.append(a.tobytes())
-
- def writeInt8(self, value):
- assert -128 <= value < 128, value
- self.items.append(struct.pack(">b", value))
- def writeInt8Array(self, values):
- self.writeArray('b', values)
-
- def writeShort(self, value):
- assert -32768 <= value < 32768, value
- self.items.append(struct.pack(">h", value))
- def writeShortArray(self, values):
- self.writeArray('h', values)
-
- def writeLong(self, value):
- self.items.append(struct.pack(">i", value))
- def writeLongArray(self, values):
- self.writeArray('i', values)
-
- def writeUInt8(self, value):
- assert 0 <= value < 256, value
- self.items.append(struct.pack(">B", value))
- def writeUInt8Array(self, values):
- self.writeArray('B', values)
-
- def writeUShort(self, value):
- assert 0 <= value < 0x10000, value
- self.items.append(struct.pack(">H", value))
- def writeUShortArray(self, values):
- self.writeArray('H', values)
-
- def writeULong(self, value):
- self.items.append(struct.pack(">I", value))
- def writeULongArray(self, values):
- self.writeArray('I', values)
-
- def writeUInt24(self, value):
- assert 0 <= value < 0x1000000, value
- b = struct.pack(">L", value)
- self.items.append(b[1:])
- def writeUInt24Array(self, values):
- for value in values:
- self.writeUInt24(value)
-
- def writeTag(self, tag):
- tag = Tag(tag).tobytes()
- assert len(tag) == 4, tag
- self.items.append(tag)
-
- def writeSubTable(self, subWriter):
- self.items.append(subWriter)
-
- def writeCountReference(self, table, name, size=2, value=None):
- ref = CountReference(table, name, size=size, value=value)
- self.items.append(ref)
- return ref
-
- def writeStruct(self, format, values):
- data = struct.pack(*(format,) + values)
- self.items.append(data)
-
- def writeData(self, data):
- self.items.append(data)
-
- def getOverflowErrorRecord(self, item):
- LookupListIndex = SubTableIndex = itemName = itemIndex = None
- if self.name == 'LookupList':
- LookupListIndex = item.repeatIndex
- elif self.name == 'Lookup':
- LookupListIndex = self.repeatIndex
- SubTableIndex = item.repeatIndex
- else:
- itemName = getattr(item, 'name', '<none>')
- if hasattr(item, 'repeatIndex'):
- itemIndex = item.repeatIndex
- if self.name == 'SubTable':
- LookupListIndex = self.parent.repeatIndex
- SubTableIndex = self.repeatIndex
- elif self.name == 'ExtSubTable':
- LookupListIndex = self.parent.parent.repeatIndex
- SubTableIndex = self.parent.repeatIndex
- else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
- itemName = ".".join([self.name, itemName])
- p1 = self.parent
- while p1 and p1.name not in ['ExtSubTable', 'SubTable']:
- itemName = ".".join([p1.name, itemName])
- p1 = p1.parent
- if p1:
- if p1.name == 'ExtSubTable':
- LookupListIndex = p1.parent.parent.repeatIndex
- SubTableIndex = p1.parent.repeatIndex
- else:
- LookupListIndex = p1.parent.repeatIndex
- SubTableIndex = p1.repeatIndex
-
- return OverflowErrorRecord( (self.tableTag, LookupListIndex, SubTableIndex, itemName, itemIndex) )
+ """Helper class to gather and assemble data for OpenType tables."""
+
+ def __init__(self, localState=None, tableTag=None):
+ self.items = []
+ self.pos = None
+ self.localState = localState
+ self.tableTag = tableTag
+ self.parent = None
+
+ def __setitem__(self, name, value):
+ state = self.localState.copy() if self.localState else dict()
+ state[name] = value
+ self.localState = state
+
+ def __getitem__(self, name):
+ return self.localState[name]
+
+ def __delitem__(self, name):
+ del self.localState[name]
+
+ # assembler interface
+
+ def getDataLength(self):
+ """Return the length of this table in bytes, without subtables."""
+ l = 0
+ for item in self.items:
+ if hasattr(item, "getCountData"):
+ l += item.size
+ elif hasattr(item, "subWriter"):
+ l += item.offsetSize
+ else:
+ l = l + len(item)
+ return l
+
+ def getData(self):
+ """Assemble the data for this writer/table, without subtables."""
+ items = list(self.items) # make a shallow copy
+ pos = self.pos
+ numItems = len(items)
+ for i in range(numItems):
+ item = items[i]
+
+ if hasattr(item, "subWriter"):
+ if item.offsetSize == 4:
+ items[i] = packULong(item.subWriter.pos - pos)
+ elif item.offsetSize == 2:
+ try:
+ items[i] = packUShort(item.subWriter.pos - pos)
+ except struct.error:
+ # provide data to fix overflow problem.
+ overflowErrorRecord = self.getOverflowErrorRecord(
+ item.subWriter
+ )
+
+ raise OTLOffsetOverflowError(overflowErrorRecord)
+ elif item.offsetSize == 3:
+ items[i] = packUInt24(item.subWriter.pos - pos)
+ else:
+ raise ValueError(item.offsetSize)
+
+ return bytesjoin(items)
+
+ def getDataForHarfbuzz(self):
+ """Assemble the data for this writer/table with all offset field set to 0"""
+ items = list(self.items)
+ packFuncs = {2: packUShort, 3: packUInt24, 4: packULong}
+ for i, item in enumerate(items):
+ if hasattr(item, "subWriter"):
+ # Offset value is not needed in harfbuzz repacker, so setting offset to 0 to avoid overflow here
+ if item.offsetSize in packFuncs:
+ items[i] = packFuncs[item.offsetSize](0)
+ else:
+ raise ValueError(item.offsetSize)
+
+ return bytesjoin(items)
+
+ def __hash__(self):
+ # only works after self._doneWriting() has been called
+ return hash(self.items)
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.items == other.items
+
+ def _doneWriting(self, internedTables, shareExtension=False):
+ # Convert CountData references to data string items
+ # collapse duplicate table references to a unique entry
+ # "tables" are OTTableWriter objects.
+
+ # For Extension Lookup types, we can
+ # eliminate duplicates only within the tree under the Extension Lookup,
+ # as offsets may exceed 64K even between Extension LookupTable subtables.
+ isExtension = hasattr(self, "Extension")
+
+ # Certain versions of Uniscribe reject the font if the GSUB/GPOS top-level
+ # arrays (ScriptList, FeatureList, LookupList) point to the same, possibly
+ # empty, array. So, we don't share those.
+ # See: https://github.com/fonttools/fonttools/issues/518
+ dontShare = hasattr(self, "DontShare")
+
+ if isExtension and not shareExtension:
+ internedTables = {}
+
+ items = self.items
+ for i in range(len(items)):
+ item = items[i]
+ if hasattr(item, "getCountData"):
+ items[i] = item.getCountData()
+ elif hasattr(item, "subWriter"):
+ item.subWriter._doneWriting(
+ internedTables, shareExtension=shareExtension
+ )
+ # At this point, all subwriters are hashable based on their items.
+ # (See hash and comparison magic methods above.) So the ``setdefault``
+ # call here will return the first writer object we've seen with
+ # equal content, or store it in the dictionary if it's not been
+ # seen yet. We therefore replace the subwriter object with an equivalent
+ # object, which deduplicates the tree.
+ if not dontShare:
+ items[i].subWriter = internedTables.setdefault(
+ item.subWriter, item.subWriter
+ )
+ self.items = tuple(items)
+
+ def _gatherTables(self, tables, extTables, done):
+ # Convert table references in self.items tree to a flat
+ # list of tables in depth-first traversal order.
+ # "tables" are OTTableWriter objects.
+ # We do the traversal in reverse order at each level, in order to
+ # resolve duplicate references to be the last reference in the list of tables.
+ # For extension lookups, duplicate references can be merged only within the
+ # writer tree under the extension lookup.
+
+ done[id(self)] = True
+
+ numItems = len(self.items)
+ iRange = list(range(numItems))
+ iRange.reverse()
+
+ isExtension = hasattr(self, "Extension")
+
+ selfTables = tables
+
+ if isExtension:
+ assert (
+ extTables is not None
+ ), "Program or XML editing error. Extension subtables cannot contain extensions subtables"
+ tables, extTables, done = extTables, None, {}
+
+ # add Coverage table if it is sorted last.
+ sortCoverageLast = False
+ if hasattr(self, "sortCoverageLast"):
+ # Find coverage table
+ for i in range(numItems):
+ item = self.items[i]
+ if (
+ hasattr(item, "subWriter")
+ and getattr(item.subWriter, "name", None) == "Coverage"
+ ):
+ sortCoverageLast = True
+ break
+ if id(item.subWriter) not in done:
+ item.subWriter._gatherTables(tables, extTables, done)
+ else:
+ # We're a new parent of item
+ pass
+
+ for i in iRange:
+ item = self.items[i]
+ if not hasattr(item, "subWriter"):
+ continue
+
+ if (
+ sortCoverageLast
+ and (i == 1)
+ and getattr(item.subWriter, "name", None) == "Coverage"
+ ):
+ # we've already 'gathered' it above
+ continue
+
+ if id(item.subWriter) not in done:
+ item.subWriter._gatherTables(tables, extTables, done)
+ else:
+ # Item is already written out by other parent
+ pass
+
+ selfTables.append(self)
+
+ def _gatherGraphForHarfbuzz(self, tables, obj_list, done, objidx, virtual_edges):
+ real_links = []
+ virtual_links = []
+ item_idx = objidx
+
+ # Merge virtual_links from parent
+ for idx in virtual_edges:
+ virtual_links.append((0, 0, idx))
+
+ sortCoverageLast = False
+ coverage_idx = 0
+ if hasattr(self, "sortCoverageLast"):
+ # Find coverage table
+ for i, item in enumerate(self.items):
+ if getattr(item, "name", None) == "Coverage":
+ sortCoverageLast = True
+ if id(item) not in done:
+ coverage_idx = item_idx = item._gatherGraphForHarfbuzz(
+ tables, obj_list, done, item_idx, virtual_edges
+ )
+ else:
+ coverage_idx = done[id(item)]
+ virtual_edges.append(coverage_idx)
+ break
+
+ child_idx = 0
+ offset_pos = 0
+ for i, item in enumerate(self.items):
+ if hasattr(item, "subWriter"):
+ pos = offset_pos
+ elif hasattr(item, "getCountData"):
+ offset_pos += item.size
+ continue
+ else:
+ offset_pos = offset_pos + len(item)
+ continue
+
+ if id(item.subWriter) not in done:
+ child_idx = item_idx = item.subWriter._gatherGraphForHarfbuzz(
+ tables, obj_list, done, item_idx, virtual_edges
+ )
+ else:
+ child_idx = done[id(item.subWriter)]
+
+ real_edge = (pos, item.offsetSize, child_idx)
+ real_links.append(real_edge)
+ offset_pos += item.offsetSize
+
+ tables.append(self)
+ obj_list.append((real_links, virtual_links))
+ item_idx += 1
+ done[id(self)] = item_idx
+ if sortCoverageLast:
+ virtual_edges.pop()
+
+ return item_idx
+
+ def getAllDataUsingHarfbuzz(self, tableTag):
+ """The Whole table is represented as a Graph.
+ Assemble graph data and call Harfbuzz repacker to pack the table.
+ Harfbuzz repacker is faster and retain as much sub-table sharing as possible, see also:
+ https://github.com/harfbuzz/harfbuzz/blob/main/docs/repacker.md
+ The input format for hb.repack() method is explained here:
+ https://github.com/harfbuzz/uharfbuzz/blob/main/src/uharfbuzz/_harfbuzz.pyx#L1149
+ """
+ internedTables = {}
+ self._doneWriting(internedTables, shareExtension=True)
+ tables = []
+ obj_list = []
+ done = {}
+ objidx = 0
+ virtual_edges = []
+ self._gatherGraphForHarfbuzz(tables, obj_list, done, objidx, virtual_edges)
+ # Gather all data in two passes: the absolute positions of all
+ # subtable are needed before the actual data can be assembled.
+ pos = 0
+ for table in tables:
+ table.pos = pos
+ pos = pos + table.getDataLength()
+
+ data = []
+ for table in tables:
+ tableData = table.getDataForHarfbuzz()
+ data.append(tableData)
+
+ if hasattr(hb, "repack_with_tag"):
+ return hb.repack_with_tag(str(tableTag), data, obj_list)
+ else:
+ return hb.repack(data, obj_list)
+
+ def getAllData(self, remove_duplicate=True):
+ """Assemble all data, including all subtables."""
+ if remove_duplicate:
+ internedTables = {}
+ self._doneWriting(internedTables)
+ tables = []
+ extTables = []
+ done = {}
+ self._gatherTables(tables, extTables, done)
+ tables.reverse()
+ extTables.reverse()
+ # Gather all data in two passes: the absolute positions of all
+ # subtable are needed before the actual data can be assembled.
+ pos = 0
+ for table in tables:
+ table.pos = pos
+ pos = pos + table.getDataLength()
+
+ for table in extTables:
+ table.pos = pos
+ pos = pos + table.getDataLength()
+
+ data = []
+ for table in tables:
+ tableData = table.getData()
+ data.append(tableData)
+
+ for table in extTables:
+ tableData = table.getData()
+ data.append(tableData)
+
+ return bytesjoin(data)
+
+ # interface for gathering data, as used by table.compile()
+
+ def getSubWriter(self):
+ subwriter = self.__class__(self.localState, self.tableTag)
+ subwriter.parent = (
+ self # because some subtables have idential values, we discard
+ )
+ # the duplicates under the getAllData method. Hence some
+ # subtable writers can have more than one parent writer.
+ # But we just care about first one right now.
+ return subwriter
+
+ def writeValue(self, typecode, value):
+ self.items.append(struct.pack(f">{typecode}", value))
+
+ def writeArray(self, typecode, values):
+ a = array.array(typecode, values)
+ if sys.byteorder != "big":
+ a.byteswap()
+ self.items.append(a.tobytes())
+
+ def writeInt8(self, value):
+ assert -128 <= value < 128, value
+ self.items.append(struct.pack(">b", value))
+
+ def writeInt8Array(self, values):
+ self.writeArray("b", values)
+
+ def writeShort(self, value):
+ assert -32768 <= value < 32768, value
+ self.items.append(struct.pack(">h", value))
+
+ def writeShortArray(self, values):
+ self.writeArray("h", values)
+
+ def writeLong(self, value):
+ self.items.append(struct.pack(">i", value))
+
+ def writeLongArray(self, values):
+ self.writeArray("i", values)
+
+ def writeUInt8(self, value):
+ assert 0 <= value < 256, value
+ self.items.append(struct.pack(">B", value))
+
+ def writeUInt8Array(self, values):
+ self.writeArray("B", values)
+
+ def writeUShort(self, value):
+ assert 0 <= value < 0x10000, value
+ self.items.append(struct.pack(">H", value))
+
+ def writeUShortArray(self, values):
+ self.writeArray("H", values)
+
+ def writeULong(self, value):
+ self.items.append(struct.pack(">I", value))
+
+ def writeULongArray(self, values):
+ self.writeArray("I", values)
+
+ def writeUInt24(self, value):
+ assert 0 <= value < 0x1000000, value
+ b = struct.pack(">L", value)
+ self.items.append(b[1:])
+
+ def writeUInt24Array(self, values):
+ for value in values:
+ self.writeUInt24(value)
+
+ def writeTag(self, tag):
+ tag = Tag(tag).tobytes()
+ assert len(tag) == 4, tag
+ self.items.append(tag)
+
+ def writeSubTable(self, subWriter, offsetSize):
+ self.items.append(OffsetToWriter(subWriter, offsetSize))
+
+ def writeCountReference(self, table, name, size=2, value=None):
+ ref = CountReference(table, name, size=size, value=value)
+ self.items.append(ref)
+ return ref
+
+ def writeStruct(self, format, values):
+ data = struct.pack(*(format,) + values)
+ self.items.append(data)
+
+ def writeData(self, data):
+ self.items.append(data)
+
+ def getOverflowErrorRecord(self, item):
+ LookupListIndex = SubTableIndex = itemName = itemIndex = None
+ if self.name == "LookupList":
+ LookupListIndex = item.repeatIndex
+ elif self.name == "Lookup":
+ LookupListIndex = self.repeatIndex
+ SubTableIndex = item.repeatIndex
+ else:
+ itemName = getattr(item, "name", "<none>")
+ if hasattr(item, "repeatIndex"):
+ itemIndex = item.repeatIndex
+ if self.name == "SubTable":
+ LookupListIndex = self.parent.repeatIndex
+ SubTableIndex = self.repeatIndex
+ elif self.name == "ExtSubTable":
+ LookupListIndex = self.parent.parent.repeatIndex
+ SubTableIndex = self.parent.repeatIndex
+ else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
+ itemName = ".".join([self.name, itemName])
+ p1 = self.parent
+ while p1 and p1.name not in ["ExtSubTable", "SubTable"]:
+ itemName = ".".join([p1.name, itemName])
+ p1 = p1.parent
+ if p1:
+ if p1.name == "ExtSubTable":
+ LookupListIndex = p1.parent.parent.repeatIndex
+ SubTableIndex = p1.parent.repeatIndex
+ else:
+ LookupListIndex = p1.parent.repeatIndex
+ SubTableIndex = p1.repeatIndex
+
+ return OverflowErrorRecord(
+ (self.tableTag, LookupListIndex, SubTableIndex, itemName, itemIndex)
+ )
class CountReference(object):
- """A reference to a Count value, not a count of references."""
- def __init__(self, table, name, size=None, value=None):
- self.table = table
- self.name = name
- self.size = size
- if value is not None:
- self.setValue(value)
- def setValue(self, value):
- table = self.table
- name = self.name
- if table[name] is None:
- table[name] = value
- else:
- assert table[name] == value, (name, table[name], value)
- def getValue(self):
- return self.table[self.name]
- def getCountData(self):
- v = self.table[self.name]
- if v is None: v = 0
- return {1:packUInt8, 2:packUShort, 4:packULong}[self.size](v)
-
-
-def packUInt8 (value):
- return struct.pack(">B", value)
+ """A reference to a Count value, not a count of references."""
+
+ def __init__(self, table, name, size=None, value=None):
+ self.table = table
+ self.name = name
+ self.size = size
+ if value is not None:
+ self.setValue(value)
+
+ def setValue(self, value):
+ table = self.table
+ name = self.name
+ if table[name] is None:
+ table[name] = value
+ else:
+ assert table[name] == value, (name, table[name], value)
+
+ def getValue(self):
+ return self.table[self.name]
+
+ def getCountData(self):
+ v = self.table[self.name]
+ if v is None:
+ v = 0
+ return {1: packUInt8, 2: packUShort, 4: packULong}[self.size](v)
+
+
+def packUInt8(value):
+ return struct.pack(">B", value)
+
def packUShort(value):
- return struct.pack(">H", value)
+ return struct.pack(">H", value)
+
def packULong(value):
- assert 0 <= value < 0x100000000, value
- return struct.pack(">I", value)
+ assert 0 <= value < 0x100000000, value
+ return struct.pack(">I", value)
+
def packUInt24(value):
- assert 0 <= value < 0x1000000, value
- return struct.pack(">I", value)[1:]
+ assert 0 <= value < 0x1000000, value
+ return struct.pack(">I", value)[1:]
class BaseTable(object):
- """Generic base class for all OpenType (sub)tables."""
-
- def __getattr__(self, attr):
- reader = self.__dict__.get("reader")
- if reader:
- del self.reader
- font = self.font
- del self.font
- self.decompile(reader, font)
- return getattr(self, attr)
-
- raise AttributeError(attr)
-
- def ensureDecompiled(self, recurse=False):
- reader = self.__dict__.get("reader")
- if reader:
- del self.reader
- font = self.font
- del self.font
- self.decompile(reader, font)
- if recurse:
- for subtable in self.iterSubTables():
- subtable.value.ensureDecompiled(recurse)
-
- @classmethod
- def getRecordSize(cls, reader):
- totalSize = 0
- for conv in cls.converters:
- size = conv.getRecordSize(reader)
- if size is NotImplemented: return NotImplemented
- countValue = 1
- if conv.repeat:
- if conv.repeat in reader:
- countValue = reader[conv.repeat] + conv.aux
- else:
- return NotImplemented
- totalSize += size * countValue
- return totalSize
-
- def getConverters(self):
- return self.converters
-
- def getConverterByName(self, name):
- return self.convertersByName[name]
-
- def populateDefaults(self, propagator=None):
- for conv in self.getConverters():
- if conv.repeat:
- if not hasattr(self, conv.name):
- setattr(self, conv.name, [])
- countValue = len(getattr(self, conv.name)) - conv.aux
- try:
- count_conv = self.getConverterByName(conv.repeat)
- setattr(self, conv.repeat, countValue)
- except KeyError:
- # conv.repeat is a propagated count
- if propagator and conv.repeat in propagator:
- propagator[conv.repeat].setValue(countValue)
- else:
- if conv.aux and not eval(conv.aux, None, self.__dict__):
- continue
- if hasattr(self, conv.name):
- continue # Warn if it should NOT be present?!
- if hasattr(conv, 'writeNullOffset'):
- setattr(self, conv.name, None) # Warn?
- #elif not conv.isCount:
- # # Warn?
- # pass
- if hasattr(conv, "DEFAULT"):
- # OptionalValue converters (e.g. VarIndex)
- setattr(self, conv.name, conv.DEFAULT)
-
- def decompile(self, reader, font):
- self.readFormat(reader)
- table = {}
- self.__rawTable = table # for debugging
- for conv in self.getConverters():
- if conv.name == "SubTable":
- conv = conv.getConverter(reader.tableTag,
- table["LookupType"])
- if conv.name == "ExtSubTable":
- conv = conv.getConverter(reader.tableTag,
- table["ExtensionLookupType"])
- if conv.name == "FeatureParams":
- conv = conv.getConverter(reader["FeatureTag"])
- if conv.name == "SubStruct":
- conv = conv.getConverter(reader.tableTag,
- table["MorphType"])
- try:
- if conv.repeat:
- if isinstance(conv.repeat, int):
- countValue = conv.repeat
- elif conv.repeat in table:
- countValue = table[conv.repeat]
- else:
- # conv.repeat is a propagated count
- countValue = reader[conv.repeat]
- countValue += conv.aux
- table[conv.name] = conv.readArray(reader, font, table, countValue)
- else:
- if conv.aux and not eval(conv.aux, None, table):
- continue
- table[conv.name] = conv.read(reader, font, table)
- if conv.isPropagated:
- reader[conv.name] = table[conv.name]
- except Exception as e:
- name = conv.name
- e.args = e.args + (name,)
- raise
-
- if hasattr(self, 'postRead'):
- self.postRead(table, font)
- else:
- self.__dict__.update(table)
-
- del self.__rawTable # succeeded, get rid of debugging info
-
- def compile(self, writer, font):
- self.ensureDecompiled()
- # TODO Following hack to be removed by rewriting how FormatSwitching tables
- # are handled.
- # https://github.com/fonttools/fonttools/pull/2238#issuecomment-805192631
- if hasattr(self, 'preWrite'):
- deleteFormat = not hasattr(self, 'Format')
- table = self.preWrite(font)
- deleteFormat = deleteFormat and hasattr(self, 'Format')
- else:
- deleteFormat = False
- table = self.__dict__.copy()
-
- # some count references may have been initialized in a custom preWrite; we set
- # these in the writer's state beforehand (instead of sequentially) so they will
- # be propagated to all nested subtables even if the count appears in the current
- # table only *after* the offset to the subtable that it is counting.
- for conv in self.getConverters():
- if conv.isCount and conv.isPropagated:
- value = table.get(conv.name)
- if isinstance(value, CountReference):
- writer[conv.name] = value
-
- if hasattr(self, 'sortCoverageLast'):
- writer.sortCoverageLast = 1
-
- if hasattr(self, 'DontShare'):
- writer.DontShare = True
-
- if hasattr(self.__class__, 'LookupType'):
- writer['LookupType'].setValue(self.__class__.LookupType)
-
- self.writeFormat(writer)
- for conv in self.getConverters():
- value = table.get(conv.name) # TODO Handle defaults instead of defaulting to None!
- if conv.repeat:
- if value is None:
- value = []
- countValue = len(value) - conv.aux
- if isinstance(conv.repeat, int):
- assert len(value) == conv.repeat, 'expected %d values, got %d' % (conv.repeat, len(value))
- elif conv.repeat in table:
- CountReference(table, conv.repeat, value=countValue)
- else:
- # conv.repeat is a propagated count
- writer[conv.repeat].setValue(countValue)
- try:
- conv.writeArray(writer, font, table, value)
- except Exception as e:
- e.args = e.args + (conv.name+'[]',)
- raise
- elif conv.isCount:
- # Special-case Count values.
- # Assumption: a Count field will *always* precede
- # the actual array(s).
- # We need a default value, as it may be set later by a nested
- # table. We will later store it here.
- # We add a reference: by the time the data is assembled
- # the Count value will be filled in.
- # We ignore the current count value since it will be recomputed,
- # unless it's a CountReference that was already initialized in a custom preWrite.
- if isinstance(value, CountReference):
- ref = value
- ref.size = conv.staticSize
- writer.writeData(ref)
- table[conv.name] = ref.getValue()
- else:
- ref = writer.writeCountReference(table, conv.name, conv.staticSize)
- table[conv.name] = None
- if conv.isPropagated:
- writer[conv.name] = ref
- elif conv.isLookupType:
- # We make sure that subtables have the same lookup type,
- # and that the type is the same as the one set on the
- # Lookup object, if any is set.
- if conv.name not in table:
- table[conv.name] = None
- ref = writer.writeCountReference(table, conv.name, conv.staticSize, table[conv.name])
- writer['LookupType'] = ref
- else:
- if conv.aux and not eval(conv.aux, None, table):
- continue
- try:
- conv.write(writer, font, table, value)
- except Exception as e:
- name = value.__class__.__name__ if value is not None else conv.name
- e.args = e.args + (name,)
- raise
- if conv.isPropagated:
- writer[conv.name] = value
-
- if deleteFormat:
- del self.Format
-
- def readFormat(self, reader):
- pass
-
- def writeFormat(self, writer):
- pass
-
- def toXML(self, xmlWriter, font, attrs=None, name=None):
- tableName = name if name else self.__class__.__name__
- if attrs is None:
- attrs = []
- if hasattr(self, "Format"):
- attrs = attrs + [("Format", self.Format)]
- xmlWriter.begintag(tableName, attrs)
- xmlWriter.newline()
- self.toXML2(xmlWriter, font)
- xmlWriter.endtag(tableName)
- xmlWriter.newline()
-
- def toXML2(self, xmlWriter, font):
- # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
- # This is because in TTX our parent writes our main tag, and in otBase.py we
- # do it ourselves. I think I'm getting schizophrenic...
- for conv in self.getConverters():
- if conv.repeat:
- value = getattr(self, conv.name, [])
- for i in range(len(value)):
- item = value[i]
- conv.xmlWrite(xmlWriter, font, item, conv.name,
- [("index", i)])
- else:
- if conv.aux and not eval(conv.aux, None, vars(self)):
- continue
- value = getattr(self, conv.name, None) # TODO Handle defaults instead of defaulting to None!
- conv.xmlWrite(xmlWriter, font, value, conv.name, [])
-
- def fromXML(self, name, attrs, content, font):
- try:
- conv = self.getConverterByName(name)
- except KeyError:
- raise # XXX on KeyError, raise nice error
- value = conv.xmlRead(attrs, content, font)
- if conv.repeat:
- seq = getattr(self, conv.name, None)
- if seq is None:
- seq = []
- setattr(self, conv.name, seq)
- seq.append(value)
- else:
- setattr(self, conv.name, value)
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
-
- self.ensureDecompiled()
- other.ensureDecompiled()
-
- return self.__dict__ == other.__dict__
-
- class SubTableEntry(NamedTuple):
- """See BaseTable.iterSubTables()"""
- name: str
- value: "BaseTable"
- index: Optional[int] = None # index into given array, None for single values
-
- def iterSubTables(self) -> Iterator[SubTableEntry]:
- """Yield (name, value, index) namedtuples for all subtables of current table.
-
- A sub-table is an instance of BaseTable (or subclass thereof) that is a child
- of self, the current parent table.
- The tuples also contain the attribute name (str) of the of parent table to get
- a subtable, and optionally, for lists of subtables (i.e. attributes associated
- with a converter that has a 'repeat'), an index into the list containing the
- given subtable value.
- This method can be useful to traverse trees of otTables.
- """
- for conv in self.getConverters():
- name = conv.name
- value = getattr(self, name, None)
- if value is None:
- continue
- if isinstance(value, BaseTable):
- yield self.SubTableEntry(name, value)
- elif isinstance(value, list):
- yield from (
- self.SubTableEntry(name, v, index=i)
- for i, v in enumerate(value)
- if isinstance(v, BaseTable)
- )
-
- # instance (not @class)method for consistency with FormatSwitchingBaseTable
- def getVariableAttrs(self):
- return getVariableAttrs(self.__class__)
+ """Generic base class for all OpenType (sub)tables."""
+
+ def __getattr__(self, attr):
+ reader = self.__dict__.get("reader")
+ if reader:
+ del self.reader
+ font = self.font
+ del self.font
+ self.decompile(reader, font)
+ return getattr(self, attr)
+
+ raise AttributeError(attr)
+
+ def ensureDecompiled(self, recurse=False):
+ reader = self.__dict__.get("reader")
+ if reader:
+ del self.reader
+ font = self.font
+ del self.font
+ self.decompile(reader, font)
+ if recurse:
+ for subtable in self.iterSubTables():
+ subtable.value.ensureDecompiled(recurse)
+
+ def __getstate__(self):
+ # before copying/pickling 'lazy' objects, make a shallow copy of OTTableReader
+ # https://github.com/fonttools/fonttools/issues/2965
+ if "reader" in self.__dict__:
+ state = self.__dict__.copy()
+ state["reader"] = self.__dict__["reader"].copy()
+ return state
+ return self.__dict__
+
+ @classmethod
+ def getRecordSize(cls, reader):
+ totalSize = 0
+ for conv in cls.converters:
+ size = conv.getRecordSize(reader)
+ if size is NotImplemented:
+ return NotImplemented
+ countValue = 1
+ if conv.repeat:
+ if conv.repeat in reader:
+ countValue = reader[conv.repeat] + conv.aux
+ else:
+ return NotImplemented
+ totalSize += size * countValue
+ return totalSize
+
+ def getConverters(self):
+ return self.converters
+
+ def getConverterByName(self, name):
+ return self.convertersByName[name]
+
+ def populateDefaults(self, propagator=None):
+ for conv in self.getConverters():
+ if conv.repeat:
+ if not hasattr(self, conv.name):
+ setattr(self, conv.name, [])
+ countValue = len(getattr(self, conv.name)) - conv.aux
+ try:
+ count_conv = self.getConverterByName(conv.repeat)
+ setattr(self, conv.repeat, countValue)
+ except KeyError:
+ # conv.repeat is a propagated count
+ if propagator and conv.repeat in propagator:
+ propagator[conv.repeat].setValue(countValue)
+ else:
+ if conv.aux and not eval(conv.aux, None, self.__dict__):
+ continue
+ if hasattr(self, conv.name):
+ continue # Warn if it should NOT be present?!
+ if hasattr(conv, "writeNullOffset"):
+ setattr(self, conv.name, None) # Warn?
+ # elif not conv.isCount:
+ # # Warn?
+ # pass
+ if hasattr(conv, "DEFAULT"):
+ # OptionalValue converters (e.g. VarIndex)
+ setattr(self, conv.name, conv.DEFAULT)
+
+ def decompile(self, reader, font):
+ self.readFormat(reader)
+ table = {}
+ self.__rawTable = table # for debugging
+ for conv in self.getConverters():
+ if conv.name == "SubTable":
+ conv = conv.getConverter(reader.tableTag, table["LookupType"])
+ if conv.name == "ExtSubTable":
+ conv = conv.getConverter(reader.tableTag, table["ExtensionLookupType"])
+ if conv.name == "FeatureParams":
+ conv = conv.getConverter(reader["FeatureTag"])
+ if conv.name == "SubStruct":
+ conv = conv.getConverter(reader.tableTag, table["MorphType"])
+ try:
+ if conv.repeat:
+ if isinstance(conv.repeat, int):
+ countValue = conv.repeat
+ elif conv.repeat in table:
+ countValue = table[conv.repeat]
+ else:
+ # conv.repeat is a propagated count
+ countValue = reader[conv.repeat]
+ countValue += conv.aux
+ table[conv.name] = conv.readArray(reader, font, table, countValue)
+ else:
+ if conv.aux and not eval(conv.aux, None, table):
+ continue
+ table[conv.name] = conv.read(reader, font, table)
+ if conv.isPropagated:
+ reader[conv.name] = table[conv.name]
+ except Exception as e:
+ name = conv.name
+ e.args = e.args + (name,)
+ raise
+
+ if hasattr(self, "postRead"):
+ self.postRead(table, font)
+ else:
+ self.__dict__.update(table)
+
+ del self.__rawTable # succeeded, get rid of debugging info
+
+ def compile(self, writer, font):
+ self.ensureDecompiled()
+ # TODO Following hack to be removed by rewriting how FormatSwitching tables
+ # are handled.
+ # https://github.com/fonttools/fonttools/pull/2238#issuecomment-805192631
+ if hasattr(self, "preWrite"):
+ deleteFormat = not hasattr(self, "Format")
+ table = self.preWrite(font)
+ deleteFormat = deleteFormat and hasattr(self, "Format")
+ else:
+ deleteFormat = False
+ table = self.__dict__.copy()
+
+ # some count references may have been initialized in a custom preWrite; we set
+ # these in the writer's state beforehand (instead of sequentially) so they will
+ # be propagated to all nested subtables even if the count appears in the current
+ # table only *after* the offset to the subtable that it is counting.
+ for conv in self.getConverters():
+ if conv.isCount and conv.isPropagated:
+ value = table.get(conv.name)
+ if isinstance(value, CountReference):
+ writer[conv.name] = value
+
+ if hasattr(self, "sortCoverageLast"):
+ writer.sortCoverageLast = 1
+
+ if hasattr(self, "DontShare"):
+ writer.DontShare = True
+
+ if hasattr(self.__class__, "LookupType"):
+ writer["LookupType"].setValue(self.__class__.LookupType)
+
+ self.writeFormat(writer)
+ for conv in self.getConverters():
+ value = table.get(
+ conv.name
+ ) # TODO Handle defaults instead of defaulting to None!
+ if conv.repeat:
+ if value is None:
+ value = []
+ countValue = len(value) - conv.aux
+ if isinstance(conv.repeat, int):
+ assert len(value) == conv.repeat, "expected %d values, got %d" % (
+ conv.repeat,
+ len(value),
+ )
+ elif conv.repeat in table:
+ CountReference(table, conv.repeat, value=countValue)
+ else:
+ # conv.repeat is a propagated count
+ writer[conv.repeat].setValue(countValue)
+ try:
+ conv.writeArray(writer, font, table, value)
+ except Exception as e:
+ e.args = e.args + (conv.name + "[]",)
+ raise
+ elif conv.isCount:
+ # Special-case Count values.
+ # Assumption: a Count field will *always* precede
+ # the actual array(s).
+ # We need a default value, as it may be set later by a nested
+ # table. We will later store it here.
+ # We add a reference: by the time the data is assembled
+ # the Count value will be filled in.
+ # We ignore the current count value since it will be recomputed,
+ # unless it's a CountReference that was already initialized in a custom preWrite.
+ if isinstance(value, CountReference):
+ ref = value
+ ref.size = conv.staticSize
+ writer.writeData(ref)
+ table[conv.name] = ref.getValue()
+ else:
+ ref = writer.writeCountReference(table, conv.name, conv.staticSize)
+ table[conv.name] = None
+ if conv.isPropagated:
+ writer[conv.name] = ref
+ elif conv.isLookupType:
+ # We make sure that subtables have the same lookup type,
+ # and that the type is the same as the one set on the
+ # Lookup object, if any is set.
+ if conv.name not in table:
+ table[conv.name] = None
+ ref = writer.writeCountReference(
+ table, conv.name, conv.staticSize, table[conv.name]
+ )
+ writer["LookupType"] = ref
+ else:
+ if conv.aux and not eval(conv.aux, None, table):
+ continue
+ try:
+ conv.write(writer, font, table, value)
+ except Exception as e:
+ name = value.__class__.__name__ if value is not None else conv.name
+ e.args = e.args + (name,)
+ raise
+ if conv.isPropagated:
+ writer[conv.name] = value
+
+ if deleteFormat:
+ del self.Format
+
+ def readFormat(self, reader):
+ pass
+
+ def writeFormat(self, writer):
+ pass
+
+ def toXML(self, xmlWriter, font, attrs=None, name=None):
+ tableName = name if name else self.__class__.__name__
+ if attrs is None:
+ attrs = []
+ if hasattr(self, "Format"):
+ attrs = attrs + [("Format", self.Format)]
+ xmlWriter.begintag(tableName, attrs)
+ xmlWriter.newline()
+ self.toXML2(xmlWriter, font)
+ xmlWriter.endtag(tableName)
+ xmlWriter.newline()
+
+ def toXML2(self, xmlWriter, font):
+ # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
+ # This is because in TTX our parent writes our main tag, and in otBase.py we
+ # do it ourselves. I think I'm getting schizophrenic...
+ for conv in self.getConverters():
+ if conv.repeat:
+ value = getattr(self, conv.name, [])
+ for i in range(len(value)):
+ item = value[i]
+ conv.xmlWrite(xmlWriter, font, item, conv.name, [("index", i)])
+ else:
+ if conv.aux and not eval(conv.aux, None, vars(self)):
+ continue
+ value = getattr(
+ self, conv.name, None
+ ) # TODO Handle defaults instead of defaulting to None!
+ conv.xmlWrite(xmlWriter, font, value, conv.name, [])
+
+ def fromXML(self, name, attrs, content, font):
+ try:
+ conv = self.getConverterByName(name)
+ except KeyError:
+ raise # XXX on KeyError, raise nice error
+ value = conv.xmlRead(attrs, content, font)
+ if conv.repeat:
+ seq = getattr(self, conv.name, None)
+ if seq is None:
+ seq = []
+ setattr(self, conv.name, seq)
+ seq.append(value)
+ else:
+ setattr(self, conv.name, value)
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+
+ self.ensureDecompiled()
+ other.ensureDecompiled()
+
+ return self.__dict__ == other.__dict__
+
+ class SubTableEntry(NamedTuple):
+ """See BaseTable.iterSubTables()"""
+
+ name: str
+ value: "BaseTable"
+ index: Optional[int] = None # index into given array, None for single values
+
+ def iterSubTables(self) -> Iterator[SubTableEntry]:
+ """Yield (name, value, index) namedtuples for all subtables of current table.
+
+ A sub-table is an instance of BaseTable (or subclass thereof) that is a child
+ of self, the current parent table.
+ The tuples also contain the attribute name (str) of the of parent table to get
+ a subtable, and optionally, for lists of subtables (i.e. attributes associated
+ with a converter that has a 'repeat'), an index into the list containing the
+ given subtable value.
+ This method can be useful to traverse trees of otTables.
+ """
+ for conv in self.getConverters():
+ name = conv.name
+ value = getattr(self, name, None)
+ if value is None:
+ continue
+ if isinstance(value, BaseTable):
+ yield self.SubTableEntry(name, value)
+ elif isinstance(value, list):
+ yield from (
+ self.SubTableEntry(name, v, index=i)
+ for i, v in enumerate(value)
+ if isinstance(v, BaseTable)
+ )
+
+ # instance (not @class)method for consistency with FormatSwitchingBaseTable
+ def getVariableAttrs(self):
+ return getVariableAttrs(self.__class__)
class FormatSwitchingBaseTable(BaseTable):
- """Minor specialization of BaseTable, for tables that have multiple
- formats, eg. CoverageFormat1 vs. CoverageFormat2."""
+ """Minor specialization of BaseTable, for tables that have multiple
+ formats, eg. CoverageFormat1 vs. CoverageFormat2."""
- @classmethod
- def getRecordSize(cls, reader):
- return NotImplemented
+ @classmethod
+ def getRecordSize(cls, reader):
+ return NotImplemented
- def getConverters(self):
- try:
- fmt = self.Format
- except AttributeError:
- # some FormatSwitchingBaseTables (e.g. Coverage) no longer have 'Format'
- # attribute after fully decompiled, only gain one in preWrite before being
- # recompiled. In the decompiled state, these hand-coded classes defined in
- # otTables.py lose their format-specific nature and gain more high-level
- # attributes that are not tied to converters.
- return []
- return self.converters.get(self.Format, [])
+ def getConverters(self):
+ try:
+ fmt = self.Format
+ except AttributeError:
+ # some FormatSwitchingBaseTables (e.g. Coverage) no longer have 'Format'
+ # attribute after fully decompiled, only gain one in preWrite before being
+ # recompiled. In the decompiled state, these hand-coded classes defined in
+ # otTables.py lose their format-specific nature and gain more high-level
+ # attributes that are not tied to converters.
+ return []
+ return self.converters.get(self.Format, [])
- def getConverterByName(self, name):
- return self.convertersByName[self.Format][name]
+ def getConverterByName(self, name):
+ return self.convertersByName[self.Format][name]
- def readFormat(self, reader):
- self.Format = reader.readUShort()
+ def readFormat(self, reader):
+ self.Format = reader.readUShort()
- def writeFormat(self, writer):
- writer.writeUShort(self.Format)
+ def writeFormat(self, writer):
+ writer.writeUShort(self.Format)
- def toXML(self, xmlWriter, font, attrs=None, name=None):
- BaseTable.toXML(self, xmlWriter, font, attrs, name)
+ def toXML(self, xmlWriter, font, attrs=None, name=None):
+ BaseTable.toXML(self, xmlWriter, font, attrs, name)
- def getVariableAttrs(self):
- return getVariableAttrs(self.__class__, self.Format)
+ def getVariableAttrs(self):
+ return getVariableAttrs(self.__class__, self.Format)
class UInt8FormatSwitchingBaseTable(FormatSwitchingBaseTable):
- def readFormat(self, reader):
- self.Format = reader.readUInt8()
+ def readFormat(self, reader):
+ self.Format = reader.readUInt8()
- def writeFormat(self, writer):
- writer.writeUInt8(self.Format)
+ def writeFormat(self, writer):
+ writer.writeUInt8(self.Format)
formatSwitchingBaseTables = {
- "uint16": FormatSwitchingBaseTable,
- "uint8": UInt8FormatSwitchingBaseTable,
+ "uint16": FormatSwitchingBaseTable,
+ "uint8": UInt8FormatSwitchingBaseTable,
}
+
def getFormatSwitchingBaseTableClass(formatType):
- try:
- return formatSwitchingBaseTables[formatType]
- except KeyError:
- raise TypeError(f"Unsupported format type: {formatType!r}")
+ try:
+ return formatSwitchingBaseTables[formatType]
+ except KeyError:
+ raise TypeError(f"Unsupported format type: {formatType!r}")
# memoize since these are parsed from otData.py, thus stay constant
@lru_cache()
def getVariableAttrs(cls: BaseTable, fmt: Optional[int] = None) -> Tuple[str]:
- """Return sequence of variable table field names (can be empty).
-
- Attributes are deemed "variable" when their otData.py's description contain
- 'VarIndexBase + {offset}', e.g. COLRv1 PaintVar* tables.
- """
- if not issubclass(cls, BaseTable):
- raise TypeError(cls)
- if issubclass(cls, FormatSwitchingBaseTable):
- if fmt is None:
- raise TypeError(f"'fmt' is required for format-switching {cls.__name__}")
- converters = cls.convertersByName[fmt]
- else:
- converters = cls.convertersByName
- # assume if no 'VarIndexBase' field is present, table has no variable fields
- if "VarIndexBase" not in converters:
- return ()
- varAttrs = {}
- for name, conv in converters.items():
- offset = conv.getVarIndexOffset()
- if offset is not None:
- varAttrs[name] = offset
- return tuple(sorted(varAttrs, key=varAttrs.__getitem__))
+ """Return sequence of variable table field names (can be empty).
+
+ Attributes are deemed "variable" when their otData.py's description contain
+ 'VarIndexBase + {offset}', e.g. COLRv1 PaintVar* tables.
+ """
+ if not issubclass(cls, BaseTable):
+ raise TypeError(cls)
+ if issubclass(cls, FormatSwitchingBaseTable):
+ if fmt is None:
+ raise TypeError(f"'fmt' is required for format-switching {cls.__name__}")
+ converters = cls.convertersByName[fmt]
+ else:
+ converters = cls.convertersByName
+ # assume if no 'VarIndexBase' field is present, table has no variable fields
+ if "VarIndexBase" not in converters:
+ return ()
+ varAttrs = {}
+ for name, conv in converters.items():
+ offset = conv.getVarIndexOffset()
+ if offset is not None:
+ varAttrs[name] = offset
+ return tuple(sorted(varAttrs, key=varAttrs.__getitem__))
#
@@ -1206,163 +1303,166 @@ def getVariableAttrs(cls: BaseTable, fmt: Optional[int] = None) -> Tuple[str]:
#
valueRecordFormat = [
-# Mask Name isDevice signed
- (0x0001, "XPlacement", 0, 1),
- (0x0002, "YPlacement", 0, 1),
- (0x0004, "XAdvance", 0, 1),
- (0x0008, "YAdvance", 0, 1),
- (0x0010, "XPlaDevice", 1, 0),
- (0x0020, "YPlaDevice", 1, 0),
- (0x0040, "XAdvDevice", 1, 0),
- (0x0080, "YAdvDevice", 1, 0),
-# reserved:
- (0x0100, "Reserved1", 0, 0),
- (0x0200, "Reserved2", 0, 0),
- (0x0400, "Reserved3", 0, 0),
- (0x0800, "Reserved4", 0, 0),
- (0x1000, "Reserved5", 0, 0),
- (0x2000, "Reserved6", 0, 0),
- (0x4000, "Reserved7", 0, 0),
- (0x8000, "Reserved8", 0, 0),
+ # Mask Name isDevice signed
+ (0x0001, "XPlacement", 0, 1),
+ (0x0002, "YPlacement", 0, 1),
+ (0x0004, "XAdvance", 0, 1),
+ (0x0008, "YAdvance", 0, 1),
+ (0x0010, "XPlaDevice", 1, 0),
+ (0x0020, "YPlaDevice", 1, 0),
+ (0x0040, "XAdvDevice", 1, 0),
+ (0x0080, "YAdvDevice", 1, 0),
+ # reserved:
+ (0x0100, "Reserved1", 0, 0),
+ (0x0200, "Reserved2", 0, 0),
+ (0x0400, "Reserved3", 0, 0),
+ (0x0800, "Reserved4", 0, 0),
+ (0x1000, "Reserved5", 0, 0),
+ (0x2000, "Reserved6", 0, 0),
+ (0x4000, "Reserved7", 0, 0),
+ (0x8000, "Reserved8", 0, 0),
]
+
def _buildDict():
- d = {}
- for mask, name, isDevice, signed in valueRecordFormat:
- d[name] = mask, isDevice, signed
- return d
+ d = {}
+ for mask, name, isDevice, signed in valueRecordFormat:
+ d[name] = mask, isDevice, signed
+ return d
+
valueRecordFormatDict = _buildDict()
class ValueRecordFactory(object):
- """Given a format code, this object convert ValueRecords."""
-
- def __init__(self, valueFormat):
- format = []
- for mask, name, isDevice, signed in valueRecordFormat:
- if valueFormat & mask:
- format.append((name, isDevice, signed))
- self.format = format
-
- def __len__(self):
- return len(self.format)
-
- def readValueRecord(self, reader, font):
- format = self.format
- if not format:
- return None
- valueRecord = ValueRecord()
- for name, isDevice, signed in format:
- if signed:
- value = reader.readShort()
- else:
- value = reader.readUShort()
- if isDevice:
- if value:
- from . import otTables
- subReader = reader.getSubReader(value)
- value = getattr(otTables, name)()
- value.decompile(subReader, font)
- else:
- value = None
- setattr(valueRecord, name, value)
- return valueRecord
-
- def writeValueRecord(self, writer, font, valueRecord):
- for name, isDevice, signed in self.format:
- value = getattr(valueRecord, name, 0)
- if isDevice:
- if value:
- subWriter = writer.getSubWriter()
- writer.writeSubTable(subWriter)
- value.compile(subWriter, font)
- else:
- writer.writeUShort(0)
- elif signed:
- writer.writeShort(value)
- else:
- writer.writeUShort(value)
+ """Given a format code, this object convert ValueRecords."""
+
+ def __init__(self, valueFormat):
+ format = []
+ for mask, name, isDevice, signed in valueRecordFormat:
+ if valueFormat & mask:
+ format.append((name, isDevice, signed))
+ self.format = format
+
+ def __len__(self):
+ return len(self.format)
+
+ def readValueRecord(self, reader, font):
+ format = self.format
+ if not format:
+ return None
+ valueRecord = ValueRecord()
+ for name, isDevice, signed in format:
+ if signed:
+ value = reader.readShort()
+ else:
+ value = reader.readUShort()
+ if isDevice:
+ if value:
+ from . import otTables
+
+ subReader = reader.getSubReader(value)
+ value = getattr(otTables, name)()
+ value.decompile(subReader, font)
+ else:
+ value = None
+ setattr(valueRecord, name, value)
+ return valueRecord
+
+ def writeValueRecord(self, writer, font, valueRecord):
+ for name, isDevice, signed in self.format:
+ value = getattr(valueRecord, name, 0)
+ if isDevice:
+ if value:
+ subWriter = writer.getSubWriter()
+ writer.writeSubTable(subWriter, offsetSize=2)
+ value.compile(subWriter, font)
+ else:
+ writer.writeUShort(0)
+ elif signed:
+ writer.writeShort(value)
+ else:
+ writer.writeUShort(value)
class ValueRecord(object):
-
- # see ValueRecordFactory
-
- def __init__(self, valueFormat=None, src=None):
- if valueFormat is not None:
- for mask, name, isDevice, signed in valueRecordFormat:
- if valueFormat & mask:
- setattr(self, name, None if isDevice else 0)
- if src is not None:
- for key,val in src.__dict__.items():
- if not hasattr(self, key):
- continue
- setattr(self, key, val)
- elif src is not None:
- self.__dict__ = src.__dict__.copy()
-
- def getFormat(self):
- format = 0
- for name in self.__dict__.keys():
- format = format | valueRecordFormatDict[name][0]
- return format
-
- def getEffectiveFormat(self):
- format = 0
- for name,value in self.__dict__.items():
- if value:
- format = format | valueRecordFormatDict[name][0]
- return format
-
- def toXML(self, xmlWriter, font, valueName, attrs=None):
- if attrs is None:
- simpleItems = []
- else:
- simpleItems = list(attrs)
- for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values
- if hasattr(self, name):
- simpleItems.append((name, getattr(self, name)))
- deviceItems = []
- for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records
- if hasattr(self, name):
- device = getattr(self, name)
- if device is not None:
- deviceItems.append((name, device))
- if deviceItems:
- xmlWriter.begintag(valueName, simpleItems)
- xmlWriter.newline()
- for name, deviceRecord in deviceItems:
- if deviceRecord is not None:
- deviceRecord.toXML(xmlWriter, font, name=name)
- xmlWriter.endtag(valueName)
- xmlWriter.newline()
- else:
- xmlWriter.simpletag(valueName, simpleItems)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- from . import otTables
- for k, v in attrs.items():
- setattr(self, k, int(v))
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- value = getattr(otTables, name)()
- for elem2 in content:
- if not isinstance(elem2, tuple):
- continue
- name2, attrs2, content2 = elem2
- value.fromXML(name2, attrs2, content2, font)
- setattr(self, name, value)
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.__dict__ == other.__dict__
+ # see ValueRecordFactory
+
+ def __init__(self, valueFormat=None, src=None):
+ if valueFormat is not None:
+ for mask, name, isDevice, signed in valueRecordFormat:
+ if valueFormat & mask:
+ setattr(self, name, None if isDevice else 0)
+ if src is not None:
+ for key, val in src.__dict__.items():
+ if not hasattr(self, key):
+ continue
+ setattr(self, key, val)
+ elif src is not None:
+ self.__dict__ = src.__dict__.copy()
+
+ def getFormat(self):
+ format = 0
+ for name in self.__dict__.keys():
+ format = format | valueRecordFormatDict[name][0]
+ return format
+
+ def getEffectiveFormat(self):
+ format = 0
+ for name, value in self.__dict__.items():
+ if value:
+ format = format | valueRecordFormatDict[name][0]
+ return format
+
+ def toXML(self, xmlWriter, font, valueName, attrs=None):
+ if attrs is None:
+ simpleItems = []
+ else:
+ simpleItems = list(attrs)
+ for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values
+ if hasattr(self, name):
+ simpleItems.append((name, getattr(self, name)))
+ deviceItems = []
+ for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records
+ if hasattr(self, name):
+ device = getattr(self, name)
+ if device is not None:
+ deviceItems.append((name, device))
+ if deviceItems:
+ xmlWriter.begintag(valueName, simpleItems)
+ xmlWriter.newline()
+ for name, deviceRecord in deviceItems:
+ if deviceRecord is not None:
+ deviceRecord.toXML(xmlWriter, font, name=name)
+ xmlWriter.endtag(valueName)
+ xmlWriter.newline()
+ else:
+ xmlWriter.simpletag(valueName, simpleItems)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ from . import otTables
+
+ for k, v in attrs.items():
+ setattr(self, k, int(v))
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ value = getattr(otTables, name)()
+ for elem2 in content:
+ if not isinstance(elem2, tuple):
+ continue
+ name2, attrs2, content2 = elem2
+ value.fromXML(name2, attrs2, content2, font)
+ setattr(self, name, value)
+
+ def __ne__(self, other):
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
diff --git a/Lib/fontTools/ttLib/tables/otConverters.py b/Lib/fontTools/ttLib/tables/otConverters.py
index b08f1f19..390f1660 100644
--- a/Lib/fontTools/ttLib/tables/otConverters.py
+++ b/Lib/fontTools/ttLib/tables/otConverters.py
@@ -1,22 +1,34 @@
from fontTools.misc.fixedTools import (
- fixedToFloat as fi2fl,
- floatToFixed as fl2fi,
- floatToFixedToStr as fl2str,
- strToFixedToFloat as str2fl,
- ensureVersionIsLong as fi2ve,
- versionToFixed as ve2fi,
+ fixedToFloat as fi2fl,
+ floatToFixed as fl2fi,
+ floatToFixedToStr as fl2str,
+ strToFixedToFloat as str2fl,
+ ensureVersionIsLong as fi2ve,
+ versionToFixed as ve2fi,
)
from fontTools.misc.roundTools import nearestMultipleShortestRepr, otRound
from fontTools.misc.textTools import bytesjoin, tobytes, tostr, pad, safeEval
from fontTools.ttLib import getSearchRange
-from .otBase import (CountReference, FormatSwitchingBaseTable,
- OTTableReader, OTTableWriter, ValueRecordFactory)
-from .otTables import (lookupTypes, AATStateTable, AATState, AATAction,
- ContextualMorphAction, LigatureMorphAction,
- InsertionMorphAction, MorxSubtable,
- ExtendMode as _ExtendMode,
- CompositeMode as _CompositeMode,
- NO_VARIATION_INDEX)
+from .otBase import (
+ CountReference,
+ FormatSwitchingBaseTable,
+ OTTableReader,
+ OTTableWriter,
+ ValueRecordFactory,
+)
+from .otTables import (
+ lookupTypes,
+ AATStateTable,
+ AATState,
+ AATAction,
+ ContextualMorphAction,
+ LigatureMorphAction,
+ InsertionMorphAction,
+ MorxSubtable,
+ ExtendMode as _ExtendMode,
+ CompositeMode as _CompositeMode,
+ NO_VARIATION_INDEX,
+)
from itertools import zip_longest
from functools import partial
import re
@@ -30,947 +42,1059 @@ istuple = lambda t: isinstance(t, tuple)
def buildConverters(tableSpec, tableNamespace):
- """Given a table spec from otData.py, build a converter object for each
- field of the table. This is called for each table in otData.py, and
- the results are assigned to the corresponding class in otTables.py."""
- converters = []
- convertersByName = {}
- for tp, name, repeat, aux, descr in tableSpec:
- tableName = name
- if name.startswith("ValueFormat"):
- assert tp == "uint16"
- converterClass = ValueFormat
- elif name.endswith("Count") or name in ("StructLength", "MorphType"):
- converterClass = {
- "uint8": ComputedUInt8,
- "uint16": ComputedUShort,
- "uint32": ComputedULong,
- }[tp]
- elif name == "SubTable":
- converterClass = SubTable
- elif name == "ExtSubTable":
- converterClass = ExtSubTable
- elif name == "SubStruct":
- converterClass = SubStruct
- elif name == "FeatureParams":
- converterClass = FeatureParams
- elif name in ("CIDGlyphMapping", "GlyphCIDMapping"):
- converterClass = StructWithLength
- else:
- if not tp in converterMapping and '(' not in tp:
- tableName = tp
- converterClass = Struct
- else:
- converterClass = eval(tp, tableNamespace, converterMapping)
-
- conv = converterClass(name, repeat, aux, description=descr)
-
- if conv.tableClass:
- # A "template" such as OffsetTo(AType) knowss the table class already
- tableClass = conv.tableClass
- elif tp in ('MortChain', 'MortSubtable', 'MorxChain'):
- tableClass = tableNamespace.get(tp)
- else:
- tableClass = tableNamespace.get(tableName)
-
- if not conv.tableClass:
- conv.tableClass = tableClass
-
- if name in ["SubTable", "ExtSubTable", "SubStruct"]:
- conv.lookupTypes = tableNamespace['lookupTypes']
- # also create reverse mapping
- for t in conv.lookupTypes.values():
- for cls in t.values():
- convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
- if name == "FeatureParams":
- conv.featureParamTypes = tableNamespace['featureParamTypes']
- conv.defaultFeatureParams = tableNamespace['FeatureParams']
- for cls in conv.featureParamTypes.values():
- convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
- converters.append(conv)
- assert name not in convertersByName, name
- convertersByName[name] = conv
- return converters, convertersByName
+ """Given a table spec from otData.py, build a converter object for each
+ field of the table. This is called for each table in otData.py, and
+ the results are assigned to the corresponding class in otTables.py."""
+ converters = []
+ convertersByName = {}
+ for tp, name, repeat, aux, descr in tableSpec:
+ tableName = name
+ if name.startswith("ValueFormat"):
+ assert tp == "uint16"
+ converterClass = ValueFormat
+ elif name.endswith("Count") or name in ("StructLength", "MorphType"):
+ converterClass = {
+ "uint8": ComputedUInt8,
+ "uint16": ComputedUShort,
+ "uint32": ComputedULong,
+ }[tp]
+ elif name == "SubTable":
+ converterClass = SubTable
+ elif name == "ExtSubTable":
+ converterClass = ExtSubTable
+ elif name == "SubStruct":
+ converterClass = SubStruct
+ elif name == "FeatureParams":
+ converterClass = FeatureParams
+ elif name in ("CIDGlyphMapping", "GlyphCIDMapping"):
+ converterClass = StructWithLength
+ else:
+ if not tp in converterMapping and "(" not in tp:
+ tableName = tp
+ converterClass = Struct
+ else:
+ converterClass = eval(tp, tableNamespace, converterMapping)
+
+ conv = converterClass(name, repeat, aux, description=descr)
+
+ if conv.tableClass:
+ # A "template" such as OffsetTo(AType) knowss the table class already
+ tableClass = conv.tableClass
+ elif tp in ("MortChain", "MortSubtable", "MorxChain"):
+ tableClass = tableNamespace.get(tp)
+ else:
+ tableClass = tableNamespace.get(tableName)
+
+ if not conv.tableClass:
+ conv.tableClass = tableClass
+
+ if name in ["SubTable", "ExtSubTable", "SubStruct"]:
+ conv.lookupTypes = tableNamespace["lookupTypes"]
+ # also create reverse mapping
+ for t in conv.lookupTypes.values():
+ for cls in t.values():
+ convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
+ if name == "FeatureParams":
+ conv.featureParamTypes = tableNamespace["featureParamTypes"]
+ conv.defaultFeatureParams = tableNamespace["FeatureParams"]
+ for cls in conv.featureParamTypes.values():
+ convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
+ converters.append(conv)
+ assert name not in convertersByName, name
+ convertersByName[name] = conv
+ return converters, convertersByName
class _MissingItem(tuple):
- __slots__ = ()
+ __slots__ = ()
try:
- from collections import UserList
+ from collections import UserList
except ImportError:
- from UserList import UserList
+ from UserList import UserList
class _LazyList(UserList):
-
- def __getslice__(self, i, j):
- return self.__getitem__(slice(i, j))
-
- def __getitem__(self, k):
- if isinstance(k, slice):
- indices = range(*k.indices(len(self)))
- return [self[i] for i in indices]
- item = self.data[k]
- if isinstance(item, _MissingItem):
- self.reader.seek(self.pos + item[0] * self.recordSize)
- item = self.conv.read(self.reader, self.font, {})
- self.data[k] = item
- return item
-
- def __add__(self, other):
- if isinstance(other, _LazyList):
- other = list(other)
- elif isinstance(other, list):
- pass
- else:
- return NotImplemented
- return list(self) + other
-
- def __radd__(self, other):
- if not isinstance(other, list):
- return NotImplemented
- return other + list(self)
+ def __getslice__(self, i, j):
+ return self.__getitem__(slice(i, j))
+
+ def __getitem__(self, k):
+ if isinstance(k, slice):
+ indices = range(*k.indices(len(self)))
+ return [self[i] for i in indices]
+ item = self.data[k]
+ if isinstance(item, _MissingItem):
+ self.reader.seek(self.pos + item[0] * self.recordSize)
+ item = self.conv.read(self.reader, self.font, {})
+ self.data[k] = item
+ return item
+
+ def __add__(self, other):
+ if isinstance(other, _LazyList):
+ other = list(other)
+ elif isinstance(other, list):
+ pass
+ else:
+ return NotImplemented
+ return list(self) + other
+
+ def __radd__(self, other):
+ if not isinstance(other, list):
+ return NotImplemented
+ return other + list(self)
class BaseConverter(object):
- """Base class for converter objects. Apart from the constructor, this
- is an abstract class."""
-
- def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
- self.name = name
- self.repeat = repeat
- self.aux = aux
- self.tableClass = tableClass
- self.isCount = name.endswith("Count") or name in ['DesignAxisRecordSize', 'ValueRecordSize']
- self.isLookupType = name.endswith("LookupType") or name == "MorphType"
- self.isPropagated = name in [
- "ClassCount",
- "Class2Count",
- "FeatureTag",
- "SettingsCount",
- "VarRegionCount",
- "MappingCount",
- "RegionAxisCount",
- "DesignAxisCount",
- "DesignAxisRecordSize",
- "AxisValueCount",
- "ValueRecordSize",
- "AxisCount",
- "BaseGlyphRecordCount",
- "LayerRecordCount",
- ]
- self.description = description
-
- def readArray(self, reader, font, tableDict, count):
- """Read an array of values from the reader."""
- lazy = font.lazy and count > 8
- if lazy:
- recordSize = self.getRecordSize(reader)
- if recordSize is NotImplemented:
- lazy = False
- if not lazy:
- l = []
- for i in range(count):
- l.append(self.read(reader, font, tableDict))
- return l
- else:
- l = _LazyList()
- l.reader = reader.copy()
- l.pos = l.reader.pos
- l.font = font
- l.conv = self
- l.recordSize = recordSize
- l.extend(_MissingItem([i]) for i in range(count))
- reader.advance(count * recordSize)
- return l
-
- def getRecordSize(self, reader):
- if hasattr(self, 'staticSize'): return self.staticSize
- return NotImplemented
-
- def read(self, reader, font, tableDict):
- """Read a value from the reader."""
- raise NotImplementedError(self)
-
- def writeArray(self, writer, font, tableDict, values):
- try:
- for i, value in enumerate(values):
- self.write(writer, font, tableDict, value, i)
- except Exception as e:
- e.args = e.args + (i,)
- raise
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- """Write a value to the writer."""
- raise NotImplementedError(self)
-
- def xmlRead(self, attrs, content, font):
- """Read a value from XML."""
- raise NotImplementedError(self)
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- """Write a value to XML."""
- raise NotImplementedError(self)
-
- varIndexBasePlusOffsetRE = re.compile(r"VarIndexBase\s*\+\s*(\d+)")
-
- def getVarIndexOffset(self) -> Optional[int]:
- """If description has `VarIndexBase + {offset}`, return the offset else None."""
- m = self.varIndexBasePlusOffsetRE.search(self.description)
- if not m:
- return None
- return int(m.group(1))
+ """Base class for converter objects. Apart from the constructor, this
+ is an abstract class."""
+
+ def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
+ self.name = name
+ self.repeat = repeat
+ self.aux = aux
+ self.tableClass = tableClass
+ self.isCount = name.endswith("Count") or name in [
+ "DesignAxisRecordSize",
+ "ValueRecordSize",
+ ]
+ self.isLookupType = name.endswith("LookupType") or name == "MorphType"
+ self.isPropagated = name in [
+ "ClassCount",
+ "Class2Count",
+ "FeatureTag",
+ "SettingsCount",
+ "VarRegionCount",
+ "MappingCount",
+ "RegionAxisCount",
+ "DesignAxisCount",
+ "DesignAxisRecordSize",
+ "AxisValueCount",
+ "ValueRecordSize",
+ "AxisCount",
+ "BaseGlyphRecordCount",
+ "LayerRecordCount",
+ ]
+ self.description = description
+
+ def readArray(self, reader, font, tableDict, count):
+ """Read an array of values from the reader."""
+ lazy = font.lazy and count > 8
+ if lazy:
+ recordSize = self.getRecordSize(reader)
+ if recordSize is NotImplemented:
+ lazy = False
+ if not lazy:
+ l = []
+ for i in range(count):
+ l.append(self.read(reader, font, tableDict))
+ return l
+ else:
+ l = _LazyList()
+ l.reader = reader.copy()
+ l.pos = l.reader.pos
+ l.font = font
+ l.conv = self
+ l.recordSize = recordSize
+ l.extend(_MissingItem([i]) for i in range(count))
+ reader.advance(count * recordSize)
+ return l
+
+ def getRecordSize(self, reader):
+ if hasattr(self, "staticSize"):
+ return self.staticSize
+ return NotImplemented
+
+ def read(self, reader, font, tableDict):
+ """Read a value from the reader."""
+ raise NotImplementedError(self)
+
+ def writeArray(self, writer, font, tableDict, values):
+ try:
+ for i, value in enumerate(values):
+ self.write(writer, font, tableDict, value, i)
+ except Exception as e:
+ e.args = e.args + (i,)
+ raise
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ """Write a value to the writer."""
+ raise NotImplementedError(self)
+
+ def xmlRead(self, attrs, content, font):
+ """Read a value from XML."""
+ raise NotImplementedError(self)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ """Write a value to XML."""
+ raise NotImplementedError(self)
+
+ varIndexBasePlusOffsetRE = re.compile(r"VarIndexBase\s*\+\s*(\d+)")
+
+ def getVarIndexOffset(self) -> Optional[int]:
+ """If description has `VarIndexBase + {offset}`, return the offset else None."""
+ m = self.varIndexBasePlusOffsetRE.search(self.description)
+ if not m:
+ return None
+ return int(m.group(1))
class SimpleValue(BaseConverter):
- @staticmethod
- def toString(value):
- return value
- @staticmethod
- def fromString(value):
- return value
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", self.toString(value))])
- xmlWriter.newline()
- def xmlRead(self, attrs, content, font):
- return self.fromString(attrs["value"])
+ @staticmethod
+ def toString(value):
+ return value
+
+ @staticmethod
+ def fromString(value):
+ return value
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", self.toString(value))])
+ xmlWriter.newline()
+
+ def xmlRead(self, attrs, content, font):
+ return self.fromString(attrs["value"])
+
class OptionalValue(SimpleValue):
- DEFAULT = None
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- if value != self.DEFAULT:
- attrs.append(("value", self.toString(value)))
- xmlWriter.simpletag(name, attrs)
- xmlWriter.newline()
- def xmlRead(self, attrs, content, font):
- if "value" in attrs:
- return self.fromString(attrs["value"])
- return self.DEFAULT
+ DEFAULT = None
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ if value != self.DEFAULT:
+ attrs.append(("value", self.toString(value)))
+ xmlWriter.simpletag(name, attrs)
+ xmlWriter.newline()
+
+ def xmlRead(self, attrs, content, font):
+ if "value" in attrs:
+ return self.fromString(attrs["value"])
+ return self.DEFAULT
+
class IntValue(SimpleValue):
- @staticmethod
- def fromString(value):
- return int(value, 0)
+ @staticmethod
+ def fromString(value):
+ return int(value, 0)
+
class Long(IntValue):
- staticSize = 4
- def read(self, reader, font, tableDict):
- return reader.readLong()
- def readArray(self, reader, font, tableDict, count):
- return reader.readLongArray(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeLong(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeLongArray(values)
+ staticSize = 4
+
+ def read(self, reader, font, tableDict):
+ return reader.readLong()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readLongArray(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeLong(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeLongArray(values)
+
class ULong(IntValue):
- staticSize = 4
- def read(self, reader, font, tableDict):
- return reader.readULong()
- def readArray(self, reader, font, tableDict, count):
- return reader.readULongArray(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeULong(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeULongArray(values)
+ staticSize = 4
+
+ def read(self, reader, font, tableDict):
+ return reader.readULong()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readULongArray(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeULong(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeULongArray(values)
+
class Flags32(ULong):
- @staticmethod
- def toString(value):
- return "0x%08X" % value
+ @staticmethod
+ def toString(value):
+ return "0x%08X" % value
+
class VarIndex(OptionalValue, ULong):
- DEFAULT = NO_VARIATION_INDEX
+ DEFAULT = NO_VARIATION_INDEX
+
class Short(IntValue):
- staticSize = 2
- def read(self, reader, font, tableDict):
- return reader.readShort()
- def readArray(self, reader, font, tableDict, count):
- return reader.readShortArray(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeShort(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeShortArray(values)
+ staticSize = 2
+
+ def read(self, reader, font, tableDict):
+ return reader.readShort()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readShortArray(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeShort(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeShortArray(values)
+
class UShort(IntValue):
- staticSize = 2
- def read(self, reader, font, tableDict):
- return reader.readUShort()
- def readArray(self, reader, font, tableDict, count):
- return reader.readUShortArray(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeUShort(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeUShortArray(values)
+ staticSize = 2
+
+ def read(self, reader, font, tableDict):
+ return reader.readUShort()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readUShortArray(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeUShort(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeUShortArray(values)
+
class Int8(IntValue):
- staticSize = 1
- def read(self, reader, font, tableDict):
- return reader.readInt8()
- def readArray(self, reader, font, tableDict, count):
- return reader.readInt8Array(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeInt8(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeInt8Array(values)
+ staticSize = 1
+
+ def read(self, reader, font, tableDict):
+ return reader.readInt8()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readInt8Array(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeInt8(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeInt8Array(values)
+
class UInt8(IntValue):
- staticSize = 1
- def read(self, reader, font, tableDict):
- return reader.readUInt8()
- def readArray(self, reader, font, tableDict, count):
- return reader.readUInt8Array(count)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeUInt8(value)
- def writeArray(self, writer, font, tableDict, values):
- writer.writeUInt8Array(values)
+ staticSize = 1
+
+ def read(self, reader, font, tableDict):
+ return reader.readUInt8()
+
+ def readArray(self, reader, font, tableDict, count):
+ return reader.readUInt8Array(count)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeUInt8(value)
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeUInt8Array(values)
+
class UInt24(IntValue):
- staticSize = 3
- def read(self, reader, font, tableDict):
- return reader.readUInt24()
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeUInt24(value)
+ staticSize = 3
+
+ def read(self, reader, font, tableDict):
+ return reader.readUInt24()
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeUInt24(value)
+
class ComputedInt(IntValue):
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- if value is not None:
- xmlWriter.comment("%s=%s" % (name, value))
- xmlWriter.newline()
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ if value is not None:
+ xmlWriter.comment("%s=%s" % (name, value))
+ xmlWriter.newline()
+
class ComputedUInt8(ComputedInt, UInt8):
- pass
+ pass
+
+
class ComputedUShort(ComputedInt, UShort):
- pass
+ pass
+
+
class ComputedULong(ComputedInt, ULong):
- pass
+ pass
+
class Tag(SimpleValue):
- staticSize = 4
- def read(self, reader, font, tableDict):
- return reader.readTag()
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeTag(value)
+ staticSize = 4
+
+ def read(self, reader, font, tableDict):
+ return reader.readTag()
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeTag(value)
+
class GlyphID(SimpleValue):
- staticSize = 2
- typecode = "H"
- def readArray(self, reader, font, tableDict, count):
- return font.getGlyphNameMany(reader.readArray(self.typecode, self.staticSize, count))
- def read(self, reader, font, tableDict):
- return font.getGlyphName(reader.readValue(self.typecode, self.staticSize))
- def writeArray(self, writer, font, tableDict, values):
- writer.writeArray(self.typecode, font.getGlyphIDMany(values))
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeValue(self.typecode, font.getGlyphID(value))
+ staticSize = 2
+ typecode = "H"
+
+ def readArray(self, reader, font, tableDict, count):
+ return font.getGlyphNameMany(
+ reader.readArray(self.typecode, self.staticSize, count)
+ )
+
+ def read(self, reader, font, tableDict):
+ return font.getGlyphName(reader.readValue(self.typecode, self.staticSize))
+
+ def writeArray(self, writer, font, tableDict, values):
+ writer.writeArray(self.typecode, font.getGlyphIDMany(values))
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeValue(self.typecode, font.getGlyphID(value))
class GlyphID32(GlyphID):
- staticSize = 4
- typecode = "L"
+ staticSize = 4
+ typecode = "L"
class NameID(UShort):
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", value)])
- if font and value:
- nameTable = font.get("name")
- if nameTable:
- name = nameTable.getDebugName(value)
- xmlWriter.write(" ")
- if name:
- xmlWriter.comment(name)
- else:
- xmlWriter.comment("missing from name table")
- log.warning("name id %d missing from name table" % value)
- xmlWriter.newline()
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", value)])
+ if font and value:
+ nameTable = font.get("name")
+ if nameTable:
+ name = nameTable.getDebugName(value)
+ xmlWriter.write(" ")
+ if name:
+ xmlWriter.comment(name)
+ else:
+ xmlWriter.comment("missing from name table")
+ log.warning("name id %d missing from name table" % value)
+ xmlWriter.newline()
+
class STATFlags(UShort):
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", value)])
- flags = []
- if value & 0x01:
- flags.append("OlderSiblingFontAttribute")
- if value & 0x02:
- flags.append("ElidableAxisValueName")
- if flags:
- xmlWriter.write(" ")
- xmlWriter.comment(" ".join(flags))
- xmlWriter.newline()
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", value)])
+ flags = []
+ if value & 0x01:
+ flags.append("OlderSiblingFontAttribute")
+ if value & 0x02:
+ flags.append("ElidableAxisValueName")
+ if flags:
+ xmlWriter.write(" ")
+ xmlWriter.comment(" ".join(flags))
+ xmlWriter.newline()
+
class FloatValue(SimpleValue):
- @staticmethod
- def fromString(value):
- return float(value)
+ @staticmethod
+ def fromString(value):
+ return float(value)
+
class DeciPoints(FloatValue):
- staticSize = 2
- def read(self, reader, font, tableDict):
- return reader.readUShort() / 10
+ staticSize = 2
+
+ def read(self, reader, font, tableDict):
+ return reader.readUShort() / 10
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.writeUShort(round(value * 10))
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.writeUShort(round(value * 10))
class BaseFixedValue(FloatValue):
- staticSize = NotImplemented
- precisionBits = NotImplemented
- readerMethod = NotImplemented
- writerMethod = NotImplemented
- def read(self, reader, font, tableDict):
- return self.fromInt(getattr(reader, self.readerMethod)())
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- getattr(writer, self.writerMethod)(self.toInt(value))
- @classmethod
- def fromInt(cls, value):
- return fi2fl(value, cls.precisionBits)
- @classmethod
- def toInt(cls, value):
- return fl2fi(value, cls.precisionBits)
- @classmethod
- def fromString(cls, value):
- return str2fl(value, cls.precisionBits)
- @classmethod
- def toString(cls, value):
- return fl2str(value, cls.precisionBits)
+ staticSize = NotImplemented
+ precisionBits = NotImplemented
+ readerMethod = NotImplemented
+ writerMethod = NotImplemented
+
+ def read(self, reader, font, tableDict):
+ return self.fromInt(getattr(reader, self.readerMethod)())
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ getattr(writer, self.writerMethod)(self.toInt(value))
+
+ @classmethod
+ def fromInt(cls, value):
+ return fi2fl(value, cls.precisionBits)
+
+ @classmethod
+ def toInt(cls, value):
+ return fl2fi(value, cls.precisionBits)
+
+ @classmethod
+ def fromString(cls, value):
+ return str2fl(value, cls.precisionBits)
+
+ @classmethod
+ def toString(cls, value):
+ return fl2str(value, cls.precisionBits)
+
class Fixed(BaseFixedValue):
- staticSize = 4
- precisionBits = 16
- readerMethod = "readLong"
- writerMethod = "writeLong"
+ staticSize = 4
+ precisionBits = 16
+ readerMethod = "readLong"
+ writerMethod = "writeLong"
+
class F2Dot14(BaseFixedValue):
- staticSize = 2
- precisionBits = 14
- readerMethod = "readShort"
- writerMethod = "writeShort"
+ staticSize = 2
+ precisionBits = 14
+ readerMethod = "readShort"
+ writerMethod = "writeShort"
+
class Angle(F2Dot14):
- # angles are specified in degrees, and encoded as F2Dot14 fractions of half
- # circle: e.g. 1.0 => 180, -0.5 => -90, -2.0 => -360, etc.
- bias = 0.0
- factor = 1.0/(1<<14) * 180 # 0.010986328125
- @classmethod
- def fromInt(cls, value):
- return (super().fromInt(value) + cls.bias) * 180
- @classmethod
- def toInt(cls, value):
- return super().toInt((value / 180) - cls.bias)
- @classmethod
- def fromString(cls, value):
- # quantize to nearest multiples of minimum fixed-precision angle
- return otRound(float(value) / cls.factor) * cls.factor
- @classmethod
- def toString(cls, value):
- return nearestMultipleShortestRepr(value, cls.factor)
+ # angles are specified in degrees, and encoded as F2Dot14 fractions of half
+ # circle: e.g. 1.0 => 180, -0.5 => -90, -2.0 => -360, etc.
+ bias = 0.0
+ factor = 1.0 / (1 << 14) * 180 # 0.010986328125
+
+ @classmethod
+ def fromInt(cls, value):
+ return (super().fromInt(value) + cls.bias) * 180
+
+ @classmethod
+ def toInt(cls, value):
+ return super().toInt((value / 180) - cls.bias)
+
+ @classmethod
+ def fromString(cls, value):
+ # quantize to nearest multiples of minimum fixed-precision angle
+ return otRound(float(value) / cls.factor) * cls.factor
+
+ @classmethod
+ def toString(cls, value):
+ return nearestMultipleShortestRepr(value, cls.factor)
+
class BiasedAngle(Angle):
- # A bias of 1.0 is used in the representation of start and end angles
- # of COLRv1 PaintSweepGradients to allow for encoding +360deg
- bias = 1.0
+ # A bias of 1.0 is used in the representation of start and end angles
+ # of COLRv1 PaintSweepGradients to allow for encoding +360deg
+ bias = 1.0
+
class Version(SimpleValue):
- staticSize = 4
- def read(self, reader, font, tableDict):
- value = reader.readLong()
- assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
- return value
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- value = fi2ve(value)
- assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
- writer.writeLong(value)
- @staticmethod
- def fromString(value):
- return ve2fi(value)
- @staticmethod
- def toString(value):
- return "0x%08x" % value
- @staticmethod
- def fromFloat(v):
- return fl2fi(v, 16)
+ staticSize = 4
+
+ def read(self, reader, font, tableDict):
+ value = reader.readLong()
+ return value
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ value = fi2ve(value)
+ writer.writeLong(value)
+
+ @staticmethod
+ def fromString(value):
+ return ve2fi(value)
+
+ @staticmethod
+ def toString(value):
+ return "0x%08x" % value
+
+ @staticmethod
+ def fromFloat(v):
+ return fl2fi(v, 16)
class Char64(SimpleValue):
- """An ASCII string with up to 64 characters.
-
- Unused character positions are filled with 0x00 bytes.
- Used in Apple AAT fonts in the `gcid` table.
- """
- staticSize = 64
-
- def read(self, reader, font, tableDict):
- data = reader.readData(self.staticSize)
- zeroPos = data.find(b"\0")
- if zeroPos >= 0:
- data = data[:zeroPos]
- s = tostr(data, encoding="ascii", errors="replace")
- if s != tostr(data, encoding="ascii", errors="ignore"):
- log.warning('replaced non-ASCII characters in "%s"' %
- s)
- return s
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- data = tobytes(value, encoding="ascii", errors="replace")
- if data != tobytes(value, encoding="ascii", errors="ignore"):
- log.warning('replacing non-ASCII characters in "%s"' %
- value)
- if len(data) > self.staticSize:
- log.warning('truncating overlong "%s" to %d bytes' %
- (value, self.staticSize))
- data = (data + b"\0" * self.staticSize)[:self.staticSize]
- writer.writeData(data)
+ """An ASCII string with up to 64 characters.
+
+ Unused character positions are filled with 0x00 bytes.
+ Used in Apple AAT fonts in the `gcid` table.
+ """
+
+ staticSize = 64
+
+ def read(self, reader, font, tableDict):
+ data = reader.readData(self.staticSize)
+ zeroPos = data.find(b"\0")
+ if zeroPos >= 0:
+ data = data[:zeroPos]
+ s = tostr(data, encoding="ascii", errors="replace")
+ if s != tostr(data, encoding="ascii", errors="ignore"):
+ log.warning('replaced non-ASCII characters in "%s"' % s)
+ return s
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ data = tobytes(value, encoding="ascii", errors="replace")
+ if data != tobytes(value, encoding="ascii", errors="ignore"):
+ log.warning('replacing non-ASCII characters in "%s"' % value)
+ if len(data) > self.staticSize:
+ log.warning(
+ 'truncating overlong "%s" to %d bytes' % (value, self.staticSize)
+ )
+ data = (data + b"\0" * self.staticSize)[: self.staticSize]
+ writer.writeData(data)
class Struct(BaseConverter):
-
- def getRecordSize(self, reader):
- return self.tableClass and self.tableClass.getRecordSize(reader)
-
- def read(self, reader, font, tableDict):
- table = self.tableClass()
- table.decompile(reader, font)
- return table
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- value.compile(writer, font)
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- if value is None:
- if attrs:
- # If there are attributes (probably index), then
- # don't drop this even if it's NULL. It will mess
- # up the array indices of the containing element.
- xmlWriter.simpletag(name, attrs + [("empty", 1)])
- xmlWriter.newline()
- else:
- pass # NULL table, ignore
- else:
- value.toXML(xmlWriter, font, attrs, name=name)
-
- def xmlRead(self, attrs, content, font):
- if "empty" in attrs and safeEval(attrs["empty"]):
- return None
- table = self.tableClass()
- Format = attrs.get("Format")
- if Format is not None:
- table.Format = int(Format)
-
- noPostRead = not hasattr(table, 'postRead')
- if noPostRead:
- # TODO Cache table.hasPropagated.
- cleanPropagation = False
- for conv in table.getConverters():
- if conv.isPropagated:
- cleanPropagation = True
- if not hasattr(font, '_propagator'):
- font._propagator = {}
- propagator = font._propagator
- assert conv.name not in propagator, (conv.name, propagator)
- setattr(table, conv.name, None)
- propagator[conv.name] = CountReference(table.__dict__, conv.name)
-
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- table.fromXML(name, attrs, content, font)
- else:
- pass
-
- table.populateDefaults(propagator=getattr(font, '_propagator', None))
-
- if noPostRead:
- if cleanPropagation:
- for conv in table.getConverters():
- if conv.isPropagated:
- propagator = font._propagator
- del propagator[conv.name]
- if not propagator:
- del font._propagator
-
- return table
-
- def __repr__(self):
- return "Struct of " + repr(self.tableClass)
+ def getRecordSize(self, reader):
+ return self.tableClass and self.tableClass.getRecordSize(reader)
+
+ def read(self, reader, font, tableDict):
+ table = self.tableClass()
+ table.decompile(reader, font)
+ return table
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ value.compile(writer, font)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ if value is None:
+ if attrs:
+ # If there are attributes (probably index), then
+ # don't drop this even if it's NULL. It will mess
+ # up the array indices of the containing element.
+ xmlWriter.simpletag(name, attrs + [("empty", 1)])
+ xmlWriter.newline()
+ else:
+ pass # NULL table, ignore
+ else:
+ value.toXML(xmlWriter, font, attrs, name=name)
+
+ def xmlRead(self, attrs, content, font):
+ if "empty" in attrs and safeEval(attrs["empty"]):
+ return None
+ table = self.tableClass()
+ Format = attrs.get("Format")
+ if Format is not None:
+ table.Format = int(Format)
+
+ noPostRead = not hasattr(table, "postRead")
+ if noPostRead:
+ # TODO Cache table.hasPropagated.
+ cleanPropagation = False
+ for conv in table.getConverters():
+ if conv.isPropagated:
+ cleanPropagation = True
+ if not hasattr(font, "_propagator"):
+ font._propagator = {}
+ propagator = font._propagator
+ assert conv.name not in propagator, (conv.name, propagator)
+ setattr(table, conv.name, None)
+ propagator[conv.name] = CountReference(table.__dict__, conv.name)
+
+ for element in content:
+ if isinstance(element, tuple):
+ name, attrs, content = element
+ table.fromXML(name, attrs, content, font)
+ else:
+ pass
+
+ table.populateDefaults(propagator=getattr(font, "_propagator", None))
+
+ if noPostRead:
+ if cleanPropagation:
+ for conv in table.getConverters():
+ if conv.isPropagated:
+ propagator = font._propagator
+ del propagator[conv.name]
+ if not propagator:
+ del font._propagator
+
+ return table
+
+ def __repr__(self):
+ return "Struct of " + repr(self.tableClass)
class StructWithLength(Struct):
- def read(self, reader, font, tableDict):
- pos = reader.pos
- table = self.tableClass()
- table.decompile(reader, font)
- reader.seek(pos + table.StructLength)
- return table
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- for convIndex, conv in enumerate(value.getConverters()):
- if conv.name == "StructLength":
- break
- lengthIndex = len(writer.items) + convIndex
- if isinstance(value, FormatSwitchingBaseTable):
- lengthIndex += 1 # implicit Format field
- deadbeef = {1:0xDE, 2:0xDEAD, 4:0xDEADBEEF}[conv.staticSize]
-
- before = writer.getDataLength()
- value.StructLength = deadbeef
- value.compile(writer, font)
- length = writer.getDataLength() - before
- lengthWriter = writer.getSubWriter()
- conv.write(lengthWriter, font, tableDict, length)
- assert(writer.items[lengthIndex] ==
- b"\xde\xad\xbe\xef"[:conv.staticSize])
- writer.items[lengthIndex] = lengthWriter.getAllData()
+ def read(self, reader, font, tableDict):
+ pos = reader.pos
+ table = self.tableClass()
+ table.decompile(reader, font)
+ reader.seek(pos + table.StructLength)
+ return table
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ for convIndex, conv in enumerate(value.getConverters()):
+ if conv.name == "StructLength":
+ break
+ lengthIndex = len(writer.items) + convIndex
+ if isinstance(value, FormatSwitchingBaseTable):
+ lengthIndex += 1 # implicit Format field
+ deadbeef = {1: 0xDE, 2: 0xDEAD, 4: 0xDEADBEEF}[conv.staticSize]
+
+ before = writer.getDataLength()
+ value.StructLength = deadbeef
+ value.compile(writer, font)
+ length = writer.getDataLength() - before
+ lengthWriter = writer.getSubWriter()
+ conv.write(lengthWriter, font, tableDict, length)
+ assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef"[: conv.staticSize]
+ writer.items[lengthIndex] = lengthWriter.getAllData()
class Table(Struct):
+ staticSize = 2
+
+ def readOffset(self, reader):
+ return reader.readUShort()
+
+ def writeNullOffset(self, writer):
+ writer.writeUShort(0)
+
+ def read(self, reader, font, tableDict):
+ offset = self.readOffset(reader)
+ if offset == 0:
+ return None
+ table = self.tableClass()
+ reader = reader.getSubReader(offset)
+ if font.lazy:
+ table.reader = reader
+ table.font = font
+ else:
+ table.decompile(reader, font)
+ return table
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ if value is None:
+ self.writeNullOffset(writer)
+ else:
+ subWriter = writer.getSubWriter()
+ subWriter.name = self.name
+ if repeatIndex is not None:
+ subWriter.repeatIndex = repeatIndex
+ writer.writeSubTable(subWriter, offsetSize=self.staticSize)
+ value.compile(subWriter, font)
- staticSize = 2
-
- def readOffset(self, reader):
- return reader.readUShort()
-
- def writeNullOffset(self, writer):
- writer.writeUShort(0)
-
- def read(self, reader, font, tableDict):
- offset = self.readOffset(reader)
- if offset == 0:
- return None
- table = self.tableClass()
- reader = reader.getSubReader(offset)
- if font.lazy:
- table.reader = reader
- table.font = font
- else:
- table.decompile(reader, font)
- return table
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- if value is None:
- self.writeNullOffset(writer)
- else:
- subWriter = writer.getSubWriter(offsetSize=self.staticSize)
- subWriter.name = self.name
- if repeatIndex is not None:
- subWriter.repeatIndex = repeatIndex
- writer.writeSubTable(subWriter)
- value.compile(subWriter, font)
class LTable(Table):
+ staticSize = 4
- staticSize = 4
+ def readOffset(self, reader):
+ return reader.readULong()
- def readOffset(self, reader):
- return reader.readULong()
-
- def writeNullOffset(self, writer):
- writer.writeULong(0)
+ def writeNullOffset(self, writer):
+ writer.writeULong(0)
# Table pointed to by a 24-bit, 3-byte long offset
class Table24(Table):
+ staticSize = 3
- staticSize = 3
-
- def readOffset(self, reader):
- return reader.readUInt24()
+ def readOffset(self, reader):
+ return reader.readUInt24()
- def writeNullOffset(self, writer):
- writer.writeUInt24(0)
+ def writeNullOffset(self, writer):
+ writer.writeUInt24(0)
# TODO Clean / merge the SubTable and SubStruct
+
class SubStruct(Struct):
- def getConverter(self, tableType, lookupType):
- tableClass = self.lookupTypes[tableType][lookupType]
- return self.__class__(self.name, self.repeat, self.aux, tableClass)
+ def getConverter(self, tableType, lookupType):
+ tableClass = self.lookupTypes[tableType][lookupType]
+ return self.__class__(self.name, self.repeat, self.aux, tableClass)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ super(SubStruct, self).xmlWrite(xmlWriter, font, value, None, attrs)
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- super(SubStruct, self).xmlWrite(xmlWriter, font, value, None, attrs)
class SubTable(Table):
- def getConverter(self, tableType, lookupType):
- tableClass = self.lookupTypes[tableType][lookupType]
- return self.__class__(self.name, self.repeat, self.aux, tableClass)
+ def getConverter(self, tableType, lookupType):
+ tableClass = self.lookupTypes[tableType][lookupType]
+ return self.__class__(self.name, self.repeat, self.aux, tableClass)
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- super(SubTable, self).xmlWrite(xmlWriter, font, value, None, attrs)
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ super(SubTable, self).xmlWrite(xmlWriter, font, value, None, attrs)
-class ExtSubTable(LTable, SubTable):
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer.Extension = True # actually, mere presence of the field flags it as an Ext Subtable writer.
- Table.write(self, writer, font, tableDict, value, repeatIndex)
+class ExtSubTable(LTable, SubTable):
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer.Extension = True # actually, mere presence of the field flags it as an Ext Subtable writer.
+ Table.write(self, writer, font, tableDict, value, repeatIndex)
class FeatureParams(Table):
- def getConverter(self, featureTag):
- tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams)
- return self.__class__(self.name, self.repeat, self.aux, tableClass)
+ def getConverter(self, featureTag):
+ tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams)
+ return self.__class__(self.name, self.repeat, self.aux, tableClass)
class ValueFormat(IntValue):
- staticSize = 2
- def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
- BaseConverter.__init__(
- self, name, repeat, aux, tableClass, description=description
- )
- self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1")
- def read(self, reader, font, tableDict):
- format = reader.readUShort()
- reader[self.which] = ValueRecordFactory(format)
- return format
- def write(self, writer, font, tableDict, format, repeatIndex=None):
- writer.writeUShort(format)
- writer[self.which] = ValueRecordFactory(format)
+ staticSize = 2
+
+ def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
+ BaseConverter.__init__(
+ self, name, repeat, aux, tableClass, description=description
+ )
+ self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1")
+
+ def read(self, reader, font, tableDict):
+ format = reader.readUShort()
+ reader[self.which] = ValueRecordFactory(format)
+ return format
+
+ def write(self, writer, font, tableDict, format, repeatIndex=None):
+ writer.writeUShort(format)
+ writer[self.which] = ValueRecordFactory(format)
class ValueRecord(ValueFormat):
- def getRecordSize(self, reader):
- return 2 * len(reader[self.which])
- def read(self, reader, font, tableDict):
- return reader[self.which].readValueRecord(reader, font)
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- writer[self.which].writeValueRecord(writer, font, value)
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- if value is None:
- pass # NULL table, ignore
- else:
- value.toXML(xmlWriter, font, self.name, attrs)
- def xmlRead(self, attrs, content, font):
- from .otBase import ValueRecord
- value = ValueRecord()
- value.fromXML(None, attrs, content, font)
- return value
+ def getRecordSize(self, reader):
+ return 2 * len(reader[self.which])
+
+ def read(self, reader, font, tableDict):
+ return reader[self.which].readValueRecord(reader, font)
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ writer[self.which].writeValueRecord(writer, font, value)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ if value is None:
+ pass # NULL table, ignore
+ else:
+ value.toXML(xmlWriter, font, self.name, attrs)
+
+ def xmlRead(self, attrs, content, font):
+ from .otBase import ValueRecord
+
+ value = ValueRecord()
+ value.fromXML(None, attrs, content, font)
+ return value
class AATLookup(BaseConverter):
- BIN_SEARCH_HEADER_SIZE = 10
-
- def __init__(self, name, repeat, aux, tableClass, *, description=""):
- BaseConverter.__init__(
- self, name, repeat, aux, tableClass, description=description
- )
- if issubclass(self.tableClass, SimpleValue):
- self.converter = self.tableClass(name='Value', repeat=None, aux=None)
- else:
- self.converter = Table(name='Value', repeat=None, aux=None, tableClass=self.tableClass)
-
- def read(self, reader, font, tableDict):
- format = reader.readUShort()
- if format == 0:
- return self.readFormat0(reader, font)
- elif format == 2:
- return self.readFormat2(reader, font)
- elif format == 4:
- return self.readFormat4(reader, font)
- elif format == 6:
- return self.readFormat6(reader, font)
- elif format == 8:
- return self.readFormat8(reader, font)
- else:
- assert False, "unsupported lookup format: %d" % format
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- values = list(sorted([(font.getGlyphID(glyph), val)
- for glyph, val in value.items()]))
- # TODO: Also implement format 4.
- formats = list(sorted(filter(None, [
- self.buildFormat0(writer, font, values),
- self.buildFormat2(writer, font, values),
- self.buildFormat6(writer, font, values),
- self.buildFormat8(writer, font, values),
- ])))
- # We use the format ID as secondary sort key to make the output
- # deterministic when multiple formats have same encoded size.
- dataSize, lookupFormat, writeMethod = formats[0]
- pos = writer.getDataLength()
- writeMethod()
- actualSize = writer.getDataLength() - pos
- assert actualSize == dataSize, (
- "AATLookup format %d claimed to write %d bytes, but wrote %d" %
- (lookupFormat, dataSize, actualSize))
-
- @staticmethod
- def writeBinSearchHeader(writer, numUnits, unitSize):
- writer.writeUShort(unitSize)
- writer.writeUShort(numUnits)
- searchRange, entrySelector, rangeShift = \
- getSearchRange(n=numUnits, itemSize=unitSize)
- writer.writeUShort(searchRange)
- writer.writeUShort(entrySelector)
- writer.writeUShort(rangeShift)
-
- def buildFormat0(self, writer, font, values):
- numGlyphs = len(font.getGlyphOrder())
- if len(values) != numGlyphs:
- return None
- valueSize = self.converter.staticSize
- return (2 + numGlyphs * valueSize, 0,
- lambda: self.writeFormat0(writer, font, values))
-
- def writeFormat0(self, writer, font, values):
- writer.writeUShort(0)
- for glyphID_, value in values:
- self.converter.write(
- writer, font, tableDict=None,
- value=value, repeatIndex=None)
-
- def buildFormat2(self, writer, font, values):
- segStart, segValue = values[0]
- segEnd = segStart
- segments = []
- for glyphID, curValue in values[1:]:
- if glyphID != segEnd + 1 or curValue != segValue:
- segments.append((segStart, segEnd, segValue))
- segStart = segEnd = glyphID
- segValue = curValue
- else:
- segEnd = glyphID
- segments.append((segStart, segEnd, segValue))
- valueSize = self.converter.staticSize
- numUnits, unitSize = len(segments) + 1, valueSize + 4
- return (2 + self.BIN_SEARCH_HEADER_SIZE + numUnits * unitSize, 2,
- lambda: self.writeFormat2(writer, font, segments))
-
- def writeFormat2(self, writer, font, segments):
- writer.writeUShort(2)
- valueSize = self.converter.staticSize
- numUnits, unitSize = len(segments), valueSize + 4
- self.writeBinSearchHeader(writer, numUnits, unitSize)
- for firstGlyph, lastGlyph, value in segments:
- writer.writeUShort(lastGlyph)
- writer.writeUShort(firstGlyph)
- self.converter.write(
- writer, font, tableDict=None,
- value=value, repeatIndex=None)
- writer.writeUShort(0xFFFF)
- writer.writeUShort(0xFFFF)
- writer.writeData(b'\x00' * valueSize)
-
- def buildFormat6(self, writer, font, values):
- valueSize = self.converter.staticSize
- numUnits, unitSize = len(values), valueSize + 2
- return (2 + self.BIN_SEARCH_HEADER_SIZE + (numUnits + 1) * unitSize, 6,
- lambda: self.writeFormat6(writer, font, values))
-
- def writeFormat6(self, writer, font, values):
- writer.writeUShort(6)
- valueSize = self.converter.staticSize
- numUnits, unitSize = len(values), valueSize + 2
- self.writeBinSearchHeader(writer, numUnits, unitSize)
- for glyphID, value in values:
- writer.writeUShort(glyphID)
- self.converter.write(
- writer, font, tableDict=None,
- value=value, repeatIndex=None)
- writer.writeUShort(0xFFFF)
- writer.writeData(b'\x00' * valueSize)
-
- def buildFormat8(self, writer, font, values):
- minGlyphID, maxGlyphID = values[0][0], values[-1][0]
- if len(values) != maxGlyphID - minGlyphID + 1:
- return None
- valueSize = self.converter.staticSize
- return (6 + len(values) * valueSize, 8,
- lambda: self.writeFormat8(writer, font, values))
-
- def writeFormat8(self, writer, font, values):
- firstGlyphID = values[0][0]
- writer.writeUShort(8)
- writer.writeUShort(firstGlyphID)
- writer.writeUShort(len(values))
- for _, value in values:
- self.converter.write(
- writer, font, tableDict=None,
- value=value, repeatIndex=None)
-
- def readFormat0(self, reader, font):
- numGlyphs = len(font.getGlyphOrder())
- data = self.converter.readArray(
- reader, font, tableDict=None, count=numGlyphs)
- return {font.getGlyphName(k): value
- for k, value in enumerate(data)}
-
- def readFormat2(self, reader, font):
- mapping = {}
- pos = reader.pos - 2 # start of table is at UShort for format
- unitSize, numUnits = reader.readUShort(), reader.readUShort()
- assert unitSize >= 4 + self.converter.staticSize, unitSize
- for i in range(numUnits):
- reader.seek(pos + i * unitSize + 12)
- last = reader.readUShort()
- first = reader.readUShort()
- value = self.converter.read(reader, font, tableDict=None)
- if last != 0xFFFF:
- for k in range(first, last + 1):
- mapping[font.getGlyphName(k)] = value
- return mapping
-
- def readFormat4(self, reader, font):
- mapping = {}
- pos = reader.pos - 2 # start of table is at UShort for format
- unitSize = reader.readUShort()
- assert unitSize >= 6, unitSize
- for i in range(reader.readUShort()):
- reader.seek(pos + i * unitSize + 12)
- last = reader.readUShort()
- first = reader.readUShort()
- offset = reader.readUShort()
- if last != 0xFFFF:
- dataReader = reader.getSubReader(0) # relative to current position
- dataReader.seek(pos + offset) # relative to start of table
- data = self.converter.readArray(
- dataReader, font, tableDict=None,
- count=last - first + 1)
- for k, v in enumerate(data):
- mapping[font.getGlyphName(first + k)] = v
- return mapping
-
- def readFormat6(self, reader, font):
- mapping = {}
- pos = reader.pos - 2 # start of table is at UShort for format
- unitSize = reader.readUShort()
- assert unitSize >= 2 + self.converter.staticSize, unitSize
- for i in range(reader.readUShort()):
- reader.seek(pos + i * unitSize + 12)
- glyphID = reader.readUShort()
- value = self.converter.read(
- reader, font, tableDict=None)
- if glyphID != 0xFFFF:
- mapping[font.getGlyphName(glyphID)] = value
- return mapping
-
- def readFormat8(self, reader, font):
- first = reader.readUShort()
- count = reader.readUShort()
- data = self.converter.readArray(
- reader, font, tableDict=None, count=count)
- return {font.getGlyphName(first + k): value
- for (k, value) in enumerate(data)}
-
- def xmlRead(self, attrs, content, font):
- value = {}
- for element in content:
- if isinstance(element, tuple):
- name, a, eltContent = element
- if name == "Lookup":
- value[a["glyph"]] = self.converter.xmlRead(a, eltContent, font)
- return value
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.begintag(name, attrs)
- xmlWriter.newline()
- for glyph, value in sorted(value.items()):
- self.converter.xmlWrite(
- xmlWriter, font, value=value,
- name="Lookup", attrs=[("glyph", glyph)])
- xmlWriter.endtag(name)
- xmlWriter.newline()
+ BIN_SEARCH_HEADER_SIZE = 10
+
+ def __init__(self, name, repeat, aux, tableClass, *, description=""):
+ BaseConverter.__init__(
+ self, name, repeat, aux, tableClass, description=description
+ )
+ if issubclass(self.tableClass, SimpleValue):
+ self.converter = self.tableClass(name="Value", repeat=None, aux=None)
+ else:
+ self.converter = Table(
+ name="Value", repeat=None, aux=None, tableClass=self.tableClass
+ )
+
+ def read(self, reader, font, tableDict):
+ format = reader.readUShort()
+ if format == 0:
+ return self.readFormat0(reader, font)
+ elif format == 2:
+ return self.readFormat2(reader, font)
+ elif format == 4:
+ return self.readFormat4(reader, font)
+ elif format == 6:
+ return self.readFormat6(reader, font)
+ elif format == 8:
+ return self.readFormat8(reader, font)
+ else:
+ assert False, "unsupported lookup format: %d" % format
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ values = list(
+ sorted([(font.getGlyphID(glyph), val) for glyph, val in value.items()])
+ )
+ # TODO: Also implement format 4.
+ formats = list(
+ sorted(
+ filter(
+ None,
+ [
+ self.buildFormat0(writer, font, values),
+ self.buildFormat2(writer, font, values),
+ self.buildFormat6(writer, font, values),
+ self.buildFormat8(writer, font, values),
+ ],
+ )
+ )
+ )
+ # We use the format ID as secondary sort key to make the output
+ # deterministic when multiple formats have same encoded size.
+ dataSize, lookupFormat, writeMethod = formats[0]
+ pos = writer.getDataLength()
+ writeMethod()
+ actualSize = writer.getDataLength() - pos
+ assert (
+ actualSize == dataSize
+ ), "AATLookup format %d claimed to write %d bytes, but wrote %d" % (
+ lookupFormat,
+ dataSize,
+ actualSize,
+ )
+
+ @staticmethod
+ def writeBinSearchHeader(writer, numUnits, unitSize):
+ writer.writeUShort(unitSize)
+ writer.writeUShort(numUnits)
+ searchRange, entrySelector, rangeShift = getSearchRange(
+ n=numUnits, itemSize=unitSize
+ )
+ writer.writeUShort(searchRange)
+ writer.writeUShort(entrySelector)
+ writer.writeUShort(rangeShift)
+
+ def buildFormat0(self, writer, font, values):
+ numGlyphs = len(font.getGlyphOrder())
+ if len(values) != numGlyphs:
+ return None
+ valueSize = self.converter.staticSize
+ return (
+ 2 + numGlyphs * valueSize,
+ 0,
+ lambda: self.writeFormat0(writer, font, values),
+ )
+
+ def writeFormat0(self, writer, font, values):
+ writer.writeUShort(0)
+ for glyphID_, value in values:
+ self.converter.write(
+ writer, font, tableDict=None, value=value, repeatIndex=None
+ )
+
+ def buildFormat2(self, writer, font, values):
+ segStart, segValue = values[0]
+ segEnd = segStart
+ segments = []
+ for glyphID, curValue in values[1:]:
+ if glyphID != segEnd + 1 or curValue != segValue:
+ segments.append((segStart, segEnd, segValue))
+ segStart = segEnd = glyphID
+ segValue = curValue
+ else:
+ segEnd = glyphID
+ segments.append((segStart, segEnd, segValue))
+ valueSize = self.converter.staticSize
+ numUnits, unitSize = len(segments) + 1, valueSize + 4
+ return (
+ 2 + self.BIN_SEARCH_HEADER_SIZE + numUnits * unitSize,
+ 2,
+ lambda: self.writeFormat2(writer, font, segments),
+ )
+
+ def writeFormat2(self, writer, font, segments):
+ writer.writeUShort(2)
+ valueSize = self.converter.staticSize
+ numUnits, unitSize = len(segments), valueSize + 4
+ self.writeBinSearchHeader(writer, numUnits, unitSize)
+ for firstGlyph, lastGlyph, value in segments:
+ writer.writeUShort(lastGlyph)
+ writer.writeUShort(firstGlyph)
+ self.converter.write(
+ writer, font, tableDict=None, value=value, repeatIndex=None
+ )
+ writer.writeUShort(0xFFFF)
+ writer.writeUShort(0xFFFF)
+ writer.writeData(b"\x00" * valueSize)
+
+ def buildFormat6(self, writer, font, values):
+ valueSize = self.converter.staticSize
+ numUnits, unitSize = len(values), valueSize + 2
+ return (
+ 2 + self.BIN_SEARCH_HEADER_SIZE + (numUnits + 1) * unitSize,
+ 6,
+ lambda: self.writeFormat6(writer, font, values),
+ )
+
+ def writeFormat6(self, writer, font, values):
+ writer.writeUShort(6)
+ valueSize = self.converter.staticSize
+ numUnits, unitSize = len(values), valueSize + 2
+ self.writeBinSearchHeader(writer, numUnits, unitSize)
+ for glyphID, value in values:
+ writer.writeUShort(glyphID)
+ self.converter.write(
+ writer, font, tableDict=None, value=value, repeatIndex=None
+ )
+ writer.writeUShort(0xFFFF)
+ writer.writeData(b"\x00" * valueSize)
+
+ def buildFormat8(self, writer, font, values):
+ minGlyphID, maxGlyphID = values[0][0], values[-1][0]
+ if len(values) != maxGlyphID - minGlyphID + 1:
+ return None
+ valueSize = self.converter.staticSize
+ return (
+ 6 + len(values) * valueSize,
+ 8,
+ lambda: self.writeFormat8(writer, font, values),
+ )
+
+ def writeFormat8(self, writer, font, values):
+ firstGlyphID = values[0][0]
+ writer.writeUShort(8)
+ writer.writeUShort(firstGlyphID)
+ writer.writeUShort(len(values))
+ for _, value in values:
+ self.converter.write(
+ writer, font, tableDict=None, value=value, repeatIndex=None
+ )
+
+ def readFormat0(self, reader, font):
+ numGlyphs = len(font.getGlyphOrder())
+ data = self.converter.readArray(reader, font, tableDict=None, count=numGlyphs)
+ return {font.getGlyphName(k): value for k, value in enumerate(data)}
+
+ def readFormat2(self, reader, font):
+ mapping = {}
+ pos = reader.pos - 2 # start of table is at UShort for format
+ unitSize, numUnits = reader.readUShort(), reader.readUShort()
+ assert unitSize >= 4 + self.converter.staticSize, unitSize
+ for i in range(numUnits):
+ reader.seek(pos + i * unitSize + 12)
+ last = reader.readUShort()
+ first = reader.readUShort()
+ value = self.converter.read(reader, font, tableDict=None)
+ if last != 0xFFFF:
+ for k in range(first, last + 1):
+ mapping[font.getGlyphName(k)] = value
+ return mapping
+
+ def readFormat4(self, reader, font):
+ mapping = {}
+ pos = reader.pos - 2 # start of table is at UShort for format
+ unitSize = reader.readUShort()
+ assert unitSize >= 6, unitSize
+ for i in range(reader.readUShort()):
+ reader.seek(pos + i * unitSize + 12)
+ last = reader.readUShort()
+ first = reader.readUShort()
+ offset = reader.readUShort()
+ if last != 0xFFFF:
+ dataReader = reader.getSubReader(0) # relative to current position
+ dataReader.seek(pos + offset) # relative to start of table
+ data = self.converter.readArray(
+ dataReader, font, tableDict=None, count=last - first + 1
+ )
+ for k, v in enumerate(data):
+ mapping[font.getGlyphName(first + k)] = v
+ return mapping
+
+ def readFormat6(self, reader, font):
+ mapping = {}
+ pos = reader.pos - 2 # start of table is at UShort for format
+ unitSize = reader.readUShort()
+ assert unitSize >= 2 + self.converter.staticSize, unitSize
+ for i in range(reader.readUShort()):
+ reader.seek(pos + i * unitSize + 12)
+ glyphID = reader.readUShort()
+ value = self.converter.read(reader, font, tableDict=None)
+ if glyphID != 0xFFFF:
+ mapping[font.getGlyphName(glyphID)] = value
+ return mapping
+
+ def readFormat8(self, reader, font):
+ first = reader.readUShort()
+ count = reader.readUShort()
+ data = self.converter.readArray(reader, font, tableDict=None, count=count)
+ return {font.getGlyphName(first + k): value for (k, value) in enumerate(data)}
+
+ def xmlRead(self, attrs, content, font):
+ value = {}
+ for element in content:
+ if isinstance(element, tuple):
+ name, a, eltContent = element
+ if name == "Lookup":
+ value[a["glyph"]] = self.converter.xmlRead(a, eltContent, font)
+ return value
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.begintag(name, attrs)
+ xmlWriter.newline()
+ for glyph, value in sorted(value.items()):
+ self.converter.xmlWrite(
+ xmlWriter, font, value=value, name="Lookup", attrs=[("glyph", glyph)]
+ )
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
# The AAT 'ankr' table has an unusual structure: An offset to an AATLookup
@@ -981,831 +1105,822 @@ class AATLookup(BaseConverter):
# to the data table to the offset found in the AATLookup, and then use
# the sum of these two offsets to find the actual data.
class AATLookupWithDataOffset(BaseConverter):
- def read(self, reader, font, tableDict):
- lookupOffset = reader.readULong()
- dataOffset = reader.readULong()
- lookupReader = reader.getSubReader(lookupOffset)
- lookup = AATLookup('DataOffsets', None, None, UShort)
- offsets = lookup.read(lookupReader, font, tableDict)
- result = {}
- for glyph, offset in offsets.items():
- dataReader = reader.getSubReader(offset + dataOffset)
- item = self.tableClass()
- item.decompile(dataReader, font)
- result[glyph] = item
- return result
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- # We do not work with OTTableWriter sub-writers because
- # the offsets in our AATLookup are relative to our data
- # table, for which we need to provide an offset value itself.
- # It might have been possible to somehow make a kludge for
- # performing this indirect offset computation directly inside
- # OTTableWriter. But this would have made the internal logic
- # of OTTableWriter even more complex than it already is,
- # so we decided to roll our own offset computation for the
- # contents of the AATLookup and associated data table.
- offsetByGlyph, offsetByData, dataLen = {}, {}, 0
- compiledData = []
- for glyph in sorted(value, key=font.getGlyphID):
- subWriter = OTTableWriter()
- value[glyph].compile(subWriter, font)
- data = subWriter.getAllData()
- offset = offsetByData.get(data, None)
- if offset == None:
- offset = dataLen
- dataLen = dataLen + len(data)
- offsetByData[data] = offset
- compiledData.append(data)
- offsetByGlyph[glyph] = offset
- # For calculating the offsets to our AATLookup and data table,
- # we can use the regular OTTableWriter infrastructure.
- lookupWriter = writer.getSubWriter(offsetSize=4)
- lookup = AATLookup('DataOffsets', None, None, UShort)
- lookup.write(lookupWriter, font, tableDict, offsetByGlyph, None)
-
- dataWriter = writer.getSubWriter(offsetSize=4)
- writer.writeSubTable(lookupWriter)
- writer.writeSubTable(dataWriter)
- for d in compiledData:
- dataWriter.writeData(d)
-
- def xmlRead(self, attrs, content, font):
- lookup = AATLookup('DataOffsets', None, None, self.tableClass)
- return lookup.xmlRead(attrs, content, font)
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- lookup = AATLookup('DataOffsets', None, None, self.tableClass)
- lookup.xmlWrite(xmlWriter, font, value, name, attrs)
+ def read(self, reader, font, tableDict):
+ lookupOffset = reader.readULong()
+ dataOffset = reader.readULong()
+ lookupReader = reader.getSubReader(lookupOffset)
+ lookup = AATLookup("DataOffsets", None, None, UShort)
+ offsets = lookup.read(lookupReader, font, tableDict)
+ result = {}
+ for glyph, offset in offsets.items():
+ dataReader = reader.getSubReader(offset + dataOffset)
+ item = self.tableClass()
+ item.decompile(dataReader, font)
+ result[glyph] = item
+ return result
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ # We do not work with OTTableWriter sub-writers because
+ # the offsets in our AATLookup are relative to our data
+ # table, for which we need to provide an offset value itself.
+ # It might have been possible to somehow make a kludge for
+ # performing this indirect offset computation directly inside
+ # OTTableWriter. But this would have made the internal logic
+ # of OTTableWriter even more complex than it already is,
+ # so we decided to roll our own offset computation for the
+ # contents of the AATLookup and associated data table.
+ offsetByGlyph, offsetByData, dataLen = {}, {}, 0
+ compiledData = []
+ for glyph in sorted(value, key=font.getGlyphID):
+ subWriter = OTTableWriter()
+ value[glyph].compile(subWriter, font)
+ data = subWriter.getAllData()
+ offset = offsetByData.get(data, None)
+ if offset == None:
+ offset = dataLen
+ dataLen = dataLen + len(data)
+ offsetByData[data] = offset
+ compiledData.append(data)
+ offsetByGlyph[glyph] = offset
+ # For calculating the offsets to our AATLookup and data table,
+ # we can use the regular OTTableWriter infrastructure.
+ lookupWriter = writer.getSubWriter()
+ lookup = AATLookup("DataOffsets", None, None, UShort)
+ lookup.write(lookupWriter, font, tableDict, offsetByGlyph, None)
+
+ dataWriter = writer.getSubWriter()
+ writer.writeSubTable(lookupWriter, offsetSize=4)
+ writer.writeSubTable(dataWriter, offsetSize=4)
+ for d in compiledData:
+ dataWriter.writeData(d)
+
+ def xmlRead(self, attrs, content, font):
+ lookup = AATLookup("DataOffsets", None, None, self.tableClass)
+ return lookup.xmlRead(attrs, content, font)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ lookup = AATLookup("DataOffsets", None, None, self.tableClass)
+ lookup.xmlWrite(xmlWriter, font, value, name, attrs)
class MorxSubtableConverter(BaseConverter):
- _PROCESSING_ORDERS = {
- # bits 30 and 28 of morx.CoverageFlags; see morx spec
- (False, False): "LayoutOrder",
- (True, False): "ReversedLayoutOrder",
- (False, True): "LogicalOrder",
- (True, True): "ReversedLogicalOrder",
- }
-
- _PROCESSING_ORDERS_REVERSED = {
- val: key for key, val in _PROCESSING_ORDERS.items()
- }
-
- def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
- BaseConverter.__init__(
- self, name, repeat, aux, tableClass, description=description
- )
-
- def _setTextDirectionFromCoverageFlags(self, flags, subtable):
- if (flags & 0x20) != 0:
- subtable.TextDirection = "Any"
- elif (flags & 0x80) != 0:
- subtable.TextDirection = "Vertical"
- else:
- subtable.TextDirection = "Horizontal"
-
- def read(self, reader, font, tableDict):
- pos = reader.pos
- m = MorxSubtable()
- m.StructLength = reader.readULong()
- flags = reader.readUInt8()
- orderKey = ((flags & 0x40) != 0, (flags & 0x10) != 0)
- m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey]
- self._setTextDirectionFromCoverageFlags(flags, m)
- m.Reserved = reader.readUShort()
- m.Reserved |= (flags & 0xF) << 16
- m.MorphType = reader.readUInt8()
- m.SubFeatureFlags = reader.readULong()
- tableClass = lookupTypes["morx"].get(m.MorphType)
- if tableClass is None:
- assert False, ("unsupported 'morx' lookup type %s" %
- m.MorphType)
- # To decode AAT ligatures, we need to know the subtable size.
- # The easiest way to pass this along is to create a new reader
- # that works on just the subtable as its data.
- headerLength = reader.pos - pos
- data = reader.data[
- reader.pos
- : reader.pos + m.StructLength - headerLength]
- assert len(data) == m.StructLength - headerLength
- subReader = OTTableReader(data=data, tableTag=reader.tableTag)
- m.SubStruct = tableClass()
- m.SubStruct.decompile(subReader, font)
- reader.seek(pos + m.StructLength)
- return m
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.begintag(name, attrs)
- xmlWriter.newline()
- xmlWriter.comment("StructLength=%d" % value.StructLength)
- xmlWriter.newline()
- xmlWriter.simpletag("TextDirection", value=value.TextDirection)
- xmlWriter.newline()
- xmlWriter.simpletag("ProcessingOrder",
- value=value.ProcessingOrder)
- xmlWriter.newline()
- if value.Reserved != 0:
- xmlWriter.simpletag("Reserved",
- value="0x%04x" % value.Reserved)
- xmlWriter.newline()
- xmlWriter.comment("MorphType=%d" % value.MorphType)
- xmlWriter.newline()
- xmlWriter.simpletag("SubFeatureFlags",
- value="0x%08x" % value.SubFeatureFlags)
- xmlWriter.newline()
- value.SubStruct.toXML(xmlWriter, font)
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def xmlRead(self, attrs, content, font):
- m = MorxSubtable()
- covFlags = 0
- m.Reserved = 0
- for eltName, eltAttrs, eltContent in filter(istuple, content):
- if eltName == "CoverageFlags":
- # Only in XML from old versions of fonttools.
- covFlags = safeEval(eltAttrs["value"])
- orderKey = ((covFlags & 0x40) != 0,
- (covFlags & 0x10) != 0)
- m.ProcessingOrder = self._PROCESSING_ORDERS[
- orderKey]
- self._setTextDirectionFromCoverageFlags(
- covFlags, m)
- elif eltName == "ProcessingOrder":
- m.ProcessingOrder = eltAttrs["value"]
- assert m.ProcessingOrder in self._PROCESSING_ORDERS_REVERSED, "unknown ProcessingOrder: %s" % m.ProcessingOrder
- elif eltName == "TextDirection":
- m.TextDirection = eltAttrs["value"]
- assert m.TextDirection in {"Horizontal", "Vertical", "Any"}, "unknown TextDirection %s" % m.TextDirection
- elif eltName == "Reserved":
- m.Reserved = safeEval(eltAttrs["value"])
- elif eltName == "SubFeatureFlags":
- m.SubFeatureFlags = safeEval(eltAttrs["value"])
- elif eltName.endswith("Morph"):
- m.fromXML(eltName, eltAttrs, eltContent, font)
- else:
- assert False, eltName
- m.Reserved = (covFlags & 0xF) << 16 | m.Reserved
- return m
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- covFlags = (value.Reserved & 0x000F0000) >> 16
- reverseOrder, logicalOrder = self._PROCESSING_ORDERS_REVERSED[
- value.ProcessingOrder]
- covFlags |= 0x80 if value.TextDirection == "Vertical" else 0
- covFlags |= 0x40 if reverseOrder else 0
- covFlags |= 0x20 if value.TextDirection == "Any" else 0
- covFlags |= 0x10 if logicalOrder else 0
- value.CoverageFlags = covFlags
- lengthIndex = len(writer.items)
- before = writer.getDataLength()
- value.StructLength = 0xdeadbeef
- # The high nibble of value.Reserved is actuallly encoded
- # into coverageFlags, so we need to clear it here.
- origReserved = value.Reserved # including high nibble
- value.Reserved = value.Reserved & 0xFFFF # without high nibble
- value.compile(writer, font)
- value.Reserved = origReserved # restore original value
- assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef"
- length = writer.getDataLength() - before
- writer.items[lengthIndex] = struct.pack(">L", length)
+ _PROCESSING_ORDERS = {
+ # bits 30 and 28 of morx.CoverageFlags; see morx spec
+ (False, False): "LayoutOrder",
+ (True, False): "ReversedLayoutOrder",
+ (False, True): "LogicalOrder",
+ (True, True): "ReversedLogicalOrder",
+ }
+
+ _PROCESSING_ORDERS_REVERSED = {val: key for key, val in _PROCESSING_ORDERS.items()}
+
+ def __init__(self, name, repeat, aux, tableClass=None, *, description=""):
+ BaseConverter.__init__(
+ self, name, repeat, aux, tableClass, description=description
+ )
+
+ def _setTextDirectionFromCoverageFlags(self, flags, subtable):
+ if (flags & 0x20) != 0:
+ subtable.TextDirection = "Any"
+ elif (flags & 0x80) != 0:
+ subtable.TextDirection = "Vertical"
+ else:
+ subtable.TextDirection = "Horizontal"
+
+ def read(self, reader, font, tableDict):
+ pos = reader.pos
+ m = MorxSubtable()
+ m.StructLength = reader.readULong()
+ flags = reader.readUInt8()
+ orderKey = ((flags & 0x40) != 0, (flags & 0x10) != 0)
+ m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey]
+ self._setTextDirectionFromCoverageFlags(flags, m)
+ m.Reserved = reader.readUShort()
+ m.Reserved |= (flags & 0xF) << 16
+ m.MorphType = reader.readUInt8()
+ m.SubFeatureFlags = reader.readULong()
+ tableClass = lookupTypes["morx"].get(m.MorphType)
+ if tableClass is None:
+ assert False, "unsupported 'morx' lookup type %s" % m.MorphType
+ # To decode AAT ligatures, we need to know the subtable size.
+ # The easiest way to pass this along is to create a new reader
+ # that works on just the subtable as its data.
+ headerLength = reader.pos - pos
+ data = reader.data[reader.pos : reader.pos + m.StructLength - headerLength]
+ assert len(data) == m.StructLength - headerLength
+ subReader = OTTableReader(data=data, tableTag=reader.tableTag)
+ m.SubStruct = tableClass()
+ m.SubStruct.decompile(subReader, font)
+ reader.seek(pos + m.StructLength)
+ return m
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.begintag(name, attrs)
+ xmlWriter.newline()
+ xmlWriter.comment("StructLength=%d" % value.StructLength)
+ xmlWriter.newline()
+ xmlWriter.simpletag("TextDirection", value=value.TextDirection)
+ xmlWriter.newline()
+ xmlWriter.simpletag("ProcessingOrder", value=value.ProcessingOrder)
+ xmlWriter.newline()
+ if value.Reserved != 0:
+ xmlWriter.simpletag("Reserved", value="0x%04x" % value.Reserved)
+ xmlWriter.newline()
+ xmlWriter.comment("MorphType=%d" % value.MorphType)
+ xmlWriter.newline()
+ xmlWriter.simpletag("SubFeatureFlags", value="0x%08x" % value.SubFeatureFlags)
+ xmlWriter.newline()
+ value.SubStruct.toXML(xmlWriter, font)
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def xmlRead(self, attrs, content, font):
+ m = MorxSubtable()
+ covFlags = 0
+ m.Reserved = 0
+ for eltName, eltAttrs, eltContent in filter(istuple, content):
+ if eltName == "CoverageFlags":
+ # Only in XML from old versions of fonttools.
+ covFlags = safeEval(eltAttrs["value"])
+ orderKey = ((covFlags & 0x40) != 0, (covFlags & 0x10) != 0)
+ m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey]
+ self._setTextDirectionFromCoverageFlags(covFlags, m)
+ elif eltName == "ProcessingOrder":
+ m.ProcessingOrder = eltAttrs["value"]
+ assert m.ProcessingOrder in self._PROCESSING_ORDERS_REVERSED, (
+ "unknown ProcessingOrder: %s" % m.ProcessingOrder
+ )
+ elif eltName == "TextDirection":
+ m.TextDirection = eltAttrs["value"]
+ assert m.TextDirection in {"Horizontal", "Vertical", "Any"}, (
+ "unknown TextDirection %s" % m.TextDirection
+ )
+ elif eltName == "Reserved":
+ m.Reserved = safeEval(eltAttrs["value"])
+ elif eltName == "SubFeatureFlags":
+ m.SubFeatureFlags = safeEval(eltAttrs["value"])
+ elif eltName.endswith("Morph"):
+ m.fromXML(eltName, eltAttrs, eltContent, font)
+ else:
+ assert False, eltName
+ m.Reserved = (covFlags & 0xF) << 16 | m.Reserved
+ return m
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ covFlags = (value.Reserved & 0x000F0000) >> 16
+ reverseOrder, logicalOrder = self._PROCESSING_ORDERS_REVERSED[
+ value.ProcessingOrder
+ ]
+ covFlags |= 0x80 if value.TextDirection == "Vertical" else 0
+ covFlags |= 0x40 if reverseOrder else 0
+ covFlags |= 0x20 if value.TextDirection == "Any" else 0
+ covFlags |= 0x10 if logicalOrder else 0
+ value.CoverageFlags = covFlags
+ lengthIndex = len(writer.items)
+ before = writer.getDataLength()
+ value.StructLength = 0xDEADBEEF
+ # The high nibble of value.Reserved is actuallly encoded
+ # into coverageFlags, so we need to clear it here.
+ origReserved = value.Reserved # including high nibble
+ value.Reserved = value.Reserved & 0xFFFF # without high nibble
+ value.compile(writer, font)
+ value.Reserved = origReserved # restore original value
+ assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef"
+ length = writer.getDataLength() - before
+ writer.items[lengthIndex] = struct.pack(">L", length)
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6Tables.html#ExtendedStateHeader
# TODO: Untangle the implementation of the various lookup-specific formats.
class STXHeader(BaseConverter):
- def __init__(self, name, repeat, aux, tableClass, *, description=""):
- BaseConverter.__init__(
- self, name, repeat, aux, tableClass, description=description
- )
- assert issubclass(self.tableClass, AATAction)
- self.classLookup = AATLookup("GlyphClasses", None, None, UShort)
- if issubclass(self.tableClass, ContextualMorphAction):
- self.perGlyphLookup = AATLookup("PerGlyphLookup",
- None, None, GlyphID)
- else:
- self.perGlyphLookup = None
-
- def read(self, reader, font, tableDict):
- table = AATStateTable()
- pos = reader.pos
- classTableReader = reader.getSubReader(0)
- stateArrayReader = reader.getSubReader(0)
- entryTableReader = reader.getSubReader(0)
- actionReader = None
- ligaturesReader = None
- table.GlyphClassCount = reader.readULong()
- classTableReader.seek(pos + reader.readULong())
- stateArrayReader.seek(pos + reader.readULong())
- entryTableReader.seek(pos + reader.readULong())
- if self.perGlyphLookup is not None:
- perGlyphTableReader = reader.getSubReader(0)
- perGlyphTableReader.seek(pos + reader.readULong())
- if issubclass(self.tableClass, LigatureMorphAction):
- actionReader = reader.getSubReader(0)
- actionReader.seek(pos + reader.readULong())
- ligComponentReader = reader.getSubReader(0)
- ligComponentReader.seek(pos + reader.readULong())
- ligaturesReader = reader.getSubReader(0)
- ligaturesReader.seek(pos + reader.readULong())
- numLigComponents = (ligaturesReader.pos
- - ligComponentReader.pos) // 2
- assert numLigComponents >= 0
- table.LigComponents = \
- ligComponentReader.readUShortArray(numLigComponents)
- table.Ligatures = self._readLigatures(ligaturesReader, font)
- elif issubclass(self.tableClass, InsertionMorphAction):
- actionReader = reader.getSubReader(0)
- actionReader.seek(pos + reader.readULong())
- table.GlyphClasses = self.classLookup.read(classTableReader,
- font, tableDict)
- numStates = int((entryTableReader.pos - stateArrayReader.pos)
- / (table.GlyphClassCount * 2))
- for stateIndex in range(numStates):
- state = AATState()
- table.States.append(state)
- for glyphClass in range(table.GlyphClassCount):
- entryIndex = stateArrayReader.readUShort()
- state.Transitions[glyphClass] = \
- self._readTransition(entryTableReader,
- entryIndex, font,
- actionReader)
- if self.perGlyphLookup is not None:
- table.PerGlyphLookups = self._readPerGlyphLookups(
- table, perGlyphTableReader, font)
- return table
-
- def _readTransition(self, reader, entryIndex, font, actionReader):
- transition = self.tableClass()
- entryReader = reader.getSubReader(
- reader.pos + entryIndex * transition.staticSize)
- transition.decompile(entryReader, font, actionReader)
- return transition
-
- def _readLigatures(self, reader, font):
- limit = len(reader.data)
- numLigatureGlyphs = (limit - reader.pos) // 2
- return font.getGlyphNameMany(reader.readUShortArray(numLigatureGlyphs))
-
- def _countPerGlyphLookups(self, table):
- # Somewhat annoyingly, the morx table does not encode
- # the size of the per-glyph table. So we need to find
- # the maximum value that MorphActions use as index
- # into this table.
- numLookups = 0
- for state in table.States:
- for t in state.Transitions.values():
- if isinstance(t, ContextualMorphAction):
- if t.MarkIndex != 0xFFFF:
- numLookups = max(
- numLookups,
- t.MarkIndex + 1)
- if t.CurrentIndex != 0xFFFF:
- numLookups = max(
- numLookups,
- t.CurrentIndex + 1)
- return numLookups
-
- def _readPerGlyphLookups(self, table, reader, font):
- pos = reader.pos
- lookups = []
- for _ in range(self._countPerGlyphLookups(table)):
- lookupReader = reader.getSubReader(0)
- lookupReader.seek(pos + reader.readULong())
- lookups.append(
- self.perGlyphLookup.read(lookupReader, font, {}))
- return lookups
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- glyphClassWriter = OTTableWriter()
- self.classLookup.write(glyphClassWriter, font, tableDict,
- value.GlyphClasses, repeatIndex=None)
- glyphClassData = pad(glyphClassWriter.getAllData(), 2)
- glyphClassCount = max(value.GlyphClasses.values()) + 1
- glyphClassTableOffset = 16 # size of STXHeader
- if self.perGlyphLookup is not None:
- glyphClassTableOffset += 4
-
- glyphClassTableOffset += self.tableClass.actionHeaderSize
- actionData, actionIndex = \
- self.tableClass.compileActions(font, value.States)
- stateArrayData, entryTableData = self._compileStates(
- font, value.States, glyphClassCount, actionIndex)
- stateArrayOffset = glyphClassTableOffset + len(glyphClassData)
- entryTableOffset = stateArrayOffset + len(stateArrayData)
- perGlyphOffset = entryTableOffset + len(entryTableData)
- perGlyphData = \
- pad(self._compilePerGlyphLookups(value, font), 4)
- if actionData is not None:
- actionOffset = entryTableOffset + len(entryTableData)
- else:
- actionOffset = None
-
- ligaturesOffset, ligComponentsOffset = None, None
- ligComponentsData = self._compileLigComponents(value, font)
- ligaturesData = self._compileLigatures(value, font)
- if ligComponentsData is not None:
- assert len(perGlyphData) == 0
- ligComponentsOffset = actionOffset + len(actionData)
- ligaturesOffset = ligComponentsOffset + len(ligComponentsData)
-
- writer.writeULong(glyphClassCount)
- writer.writeULong(glyphClassTableOffset)
- writer.writeULong(stateArrayOffset)
- writer.writeULong(entryTableOffset)
- if self.perGlyphLookup is not None:
- writer.writeULong(perGlyphOffset)
- if actionOffset is not None:
- writer.writeULong(actionOffset)
- if ligComponentsOffset is not None:
- writer.writeULong(ligComponentsOffset)
- writer.writeULong(ligaturesOffset)
- writer.writeData(glyphClassData)
- writer.writeData(stateArrayData)
- writer.writeData(entryTableData)
- writer.writeData(perGlyphData)
- if actionData is not None:
- writer.writeData(actionData)
- if ligComponentsData is not None:
- writer.writeData(ligComponentsData)
- if ligaturesData is not None:
- writer.writeData(ligaturesData)
-
- def _compileStates(self, font, states, glyphClassCount, actionIndex):
- stateArrayWriter = OTTableWriter()
- entries, entryIDs = [], {}
- for state in states:
- for glyphClass in range(glyphClassCount):
- transition = state.Transitions[glyphClass]
- entryWriter = OTTableWriter()
- transition.compile(entryWriter, font,
- actionIndex)
- entryData = entryWriter.getAllData()
- assert len(entryData) == transition.staticSize, ( \
- "%s has staticSize %d, "
- "but actually wrote %d bytes" % (
- repr(transition),
- transition.staticSize,
- len(entryData)))
- entryIndex = entryIDs.get(entryData)
- if entryIndex is None:
- entryIndex = len(entries)
- entryIDs[entryData] = entryIndex
- entries.append(entryData)
- stateArrayWriter.writeUShort(entryIndex)
- stateArrayData = pad(stateArrayWriter.getAllData(), 4)
- entryTableData = pad(bytesjoin(entries), 4)
- return stateArrayData, entryTableData
-
- def _compilePerGlyphLookups(self, table, font):
- if self.perGlyphLookup is None:
- return b""
- numLookups = self._countPerGlyphLookups(table)
- assert len(table.PerGlyphLookups) == numLookups, (
- "len(AATStateTable.PerGlyphLookups) is %d, "
- "but the actions inside the table refer to %d" %
- (len(table.PerGlyphLookups), numLookups))
- writer = OTTableWriter()
- for lookup in table.PerGlyphLookups:
- lookupWriter = writer.getSubWriter(offsetSize=4)
- self.perGlyphLookup.write(lookupWriter, font,
- {}, lookup, None)
- writer.writeSubTable(lookupWriter)
- return writer.getAllData()
-
- def _compileLigComponents(self, table, font):
- if not hasattr(table, "LigComponents"):
- return None
- writer = OTTableWriter()
- for component in table.LigComponents:
- writer.writeUShort(component)
- return writer.getAllData()
-
- def _compileLigatures(self, table, font):
- if not hasattr(table, "Ligatures"):
- return None
- writer = OTTableWriter()
- for glyphName in table.Ligatures:
- writer.writeUShort(font.getGlyphID(glyphName))
- return writer.getAllData()
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.begintag(name, attrs)
- xmlWriter.newline()
- xmlWriter.comment("GlyphClassCount=%s" %value.GlyphClassCount)
- xmlWriter.newline()
- for g, klass in sorted(value.GlyphClasses.items()):
- xmlWriter.simpletag("GlyphClass", glyph=g, value=klass)
- xmlWriter.newline()
- for stateIndex, state in enumerate(value.States):
- xmlWriter.begintag("State", index=stateIndex)
- xmlWriter.newline()
- for glyphClass, trans in sorted(state.Transitions.items()):
- trans.toXML(xmlWriter, font=font,
- attrs={"onGlyphClass": glyphClass},
- name="Transition")
- xmlWriter.endtag("State")
- xmlWriter.newline()
- for i, lookup in enumerate(value.PerGlyphLookups):
- xmlWriter.begintag("PerGlyphLookup", index=i)
- xmlWriter.newline()
- for glyph, val in sorted(lookup.items()):
- xmlWriter.simpletag("Lookup", glyph=glyph,
- value=val)
- xmlWriter.newline()
- xmlWriter.endtag("PerGlyphLookup")
- xmlWriter.newline()
- if hasattr(value, "LigComponents"):
- xmlWriter.begintag("LigComponents")
- xmlWriter.newline()
- for i, val in enumerate(getattr(value, "LigComponents")):
- xmlWriter.simpletag("LigComponent", index=i,
- value=val)
- xmlWriter.newline()
- xmlWriter.endtag("LigComponents")
- xmlWriter.newline()
- self._xmlWriteLigatures(xmlWriter, font, value, name, attrs)
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def _xmlWriteLigatures(self, xmlWriter, font, value, name, attrs):
- if not hasattr(value, "Ligatures"):
- return
- xmlWriter.begintag("Ligatures")
- xmlWriter.newline()
- for i, g in enumerate(getattr(value, "Ligatures")):
- xmlWriter.simpletag("Ligature", index=i, glyph=g)
- xmlWriter.newline()
- xmlWriter.endtag("Ligatures")
- xmlWriter.newline()
-
- def xmlRead(self, attrs, content, font):
- table = AATStateTable()
- for eltName, eltAttrs, eltContent in filter(istuple, content):
- if eltName == "GlyphClass":
- glyph = eltAttrs["glyph"]
- value = eltAttrs["value"]
- table.GlyphClasses[glyph] = safeEval(value)
- elif eltName == "State":
- state = self._xmlReadState(eltAttrs, eltContent, font)
- table.States.append(state)
- elif eltName == "PerGlyphLookup":
- lookup = self.perGlyphLookup.xmlRead(
- eltAttrs, eltContent, font)
- table.PerGlyphLookups.append(lookup)
- elif eltName == "LigComponents":
- table.LigComponents = \
- self._xmlReadLigComponents(
- eltAttrs, eltContent, font)
- elif eltName == "Ligatures":
- table.Ligatures = \
- self._xmlReadLigatures(
- eltAttrs, eltContent, font)
- table.GlyphClassCount = max(table.GlyphClasses.values()) + 1
- return table
-
- def _xmlReadState(self, attrs, content, font):
- state = AATState()
- for eltName, eltAttrs, eltContent in filter(istuple, content):
- if eltName == "Transition":
- glyphClass = safeEval(eltAttrs["onGlyphClass"])
- transition = self.tableClass()
- transition.fromXML(eltName, eltAttrs,
- eltContent, font)
- state.Transitions[glyphClass] = transition
- return state
-
- def _xmlReadLigComponents(self, attrs, content, font):
- ligComponents = []
- for eltName, eltAttrs, _eltContent in filter(istuple, content):
- if eltName == "LigComponent":
- ligComponents.append(
- safeEval(eltAttrs["value"]))
- return ligComponents
-
- def _xmlReadLigatures(self, attrs, content, font):
- ligs = []
- for eltName, eltAttrs, _eltContent in filter(istuple, content):
- if eltName == "Ligature":
- ligs.append(eltAttrs["glyph"])
- return ligs
+ def __init__(self, name, repeat, aux, tableClass, *, description=""):
+ BaseConverter.__init__(
+ self, name, repeat, aux, tableClass, description=description
+ )
+ assert issubclass(self.tableClass, AATAction)
+ self.classLookup = AATLookup("GlyphClasses", None, None, UShort)
+ if issubclass(self.tableClass, ContextualMorphAction):
+ self.perGlyphLookup = AATLookup("PerGlyphLookup", None, None, GlyphID)
+ else:
+ self.perGlyphLookup = None
+
+ def read(self, reader, font, tableDict):
+ table = AATStateTable()
+ pos = reader.pos
+ classTableReader = reader.getSubReader(0)
+ stateArrayReader = reader.getSubReader(0)
+ entryTableReader = reader.getSubReader(0)
+ actionReader = None
+ ligaturesReader = None
+ table.GlyphClassCount = reader.readULong()
+ classTableReader.seek(pos + reader.readULong())
+ stateArrayReader.seek(pos + reader.readULong())
+ entryTableReader.seek(pos + reader.readULong())
+ if self.perGlyphLookup is not None:
+ perGlyphTableReader = reader.getSubReader(0)
+ perGlyphTableReader.seek(pos + reader.readULong())
+ if issubclass(self.tableClass, LigatureMorphAction):
+ actionReader = reader.getSubReader(0)
+ actionReader.seek(pos + reader.readULong())
+ ligComponentReader = reader.getSubReader(0)
+ ligComponentReader.seek(pos + reader.readULong())
+ ligaturesReader = reader.getSubReader(0)
+ ligaturesReader.seek(pos + reader.readULong())
+ numLigComponents = (ligaturesReader.pos - ligComponentReader.pos) // 2
+ assert numLigComponents >= 0
+ table.LigComponents = ligComponentReader.readUShortArray(numLigComponents)
+ table.Ligatures = self._readLigatures(ligaturesReader, font)
+ elif issubclass(self.tableClass, InsertionMorphAction):
+ actionReader = reader.getSubReader(0)
+ actionReader.seek(pos + reader.readULong())
+ table.GlyphClasses = self.classLookup.read(classTableReader, font, tableDict)
+ numStates = int(
+ (entryTableReader.pos - stateArrayReader.pos) / (table.GlyphClassCount * 2)
+ )
+ for stateIndex in range(numStates):
+ state = AATState()
+ table.States.append(state)
+ for glyphClass in range(table.GlyphClassCount):
+ entryIndex = stateArrayReader.readUShort()
+ state.Transitions[glyphClass] = self._readTransition(
+ entryTableReader, entryIndex, font, actionReader
+ )
+ if self.perGlyphLookup is not None:
+ table.PerGlyphLookups = self._readPerGlyphLookups(
+ table, perGlyphTableReader, font
+ )
+ return table
+
+ def _readTransition(self, reader, entryIndex, font, actionReader):
+ transition = self.tableClass()
+ entryReader = reader.getSubReader(
+ reader.pos + entryIndex * transition.staticSize
+ )
+ transition.decompile(entryReader, font, actionReader)
+ return transition
+
+ def _readLigatures(self, reader, font):
+ limit = len(reader.data)
+ numLigatureGlyphs = (limit - reader.pos) // 2
+ return font.getGlyphNameMany(reader.readUShortArray(numLigatureGlyphs))
+
+ def _countPerGlyphLookups(self, table):
+ # Somewhat annoyingly, the morx table does not encode
+ # the size of the per-glyph table. So we need to find
+ # the maximum value that MorphActions use as index
+ # into this table.
+ numLookups = 0
+ for state in table.States:
+ for t in state.Transitions.values():
+ if isinstance(t, ContextualMorphAction):
+ if t.MarkIndex != 0xFFFF:
+ numLookups = max(numLookups, t.MarkIndex + 1)
+ if t.CurrentIndex != 0xFFFF:
+ numLookups = max(numLookups, t.CurrentIndex + 1)
+ return numLookups
+
+ def _readPerGlyphLookups(self, table, reader, font):
+ pos = reader.pos
+ lookups = []
+ for _ in range(self._countPerGlyphLookups(table)):
+ lookupReader = reader.getSubReader(0)
+ lookupReader.seek(pos + reader.readULong())
+ lookups.append(self.perGlyphLookup.read(lookupReader, font, {}))
+ return lookups
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ glyphClassWriter = OTTableWriter()
+ self.classLookup.write(
+ glyphClassWriter, font, tableDict, value.GlyphClasses, repeatIndex=None
+ )
+ glyphClassData = pad(glyphClassWriter.getAllData(), 2)
+ glyphClassCount = max(value.GlyphClasses.values()) + 1
+ glyphClassTableOffset = 16 # size of STXHeader
+ if self.perGlyphLookup is not None:
+ glyphClassTableOffset += 4
+
+ glyphClassTableOffset += self.tableClass.actionHeaderSize
+ actionData, actionIndex = self.tableClass.compileActions(font, value.States)
+ stateArrayData, entryTableData = self._compileStates(
+ font, value.States, glyphClassCount, actionIndex
+ )
+ stateArrayOffset = glyphClassTableOffset + len(glyphClassData)
+ entryTableOffset = stateArrayOffset + len(stateArrayData)
+ perGlyphOffset = entryTableOffset + len(entryTableData)
+ perGlyphData = pad(self._compilePerGlyphLookups(value, font), 4)
+ if actionData is not None:
+ actionOffset = entryTableOffset + len(entryTableData)
+ else:
+ actionOffset = None
+
+ ligaturesOffset, ligComponentsOffset = None, None
+ ligComponentsData = self._compileLigComponents(value, font)
+ ligaturesData = self._compileLigatures(value, font)
+ if ligComponentsData is not None:
+ assert len(perGlyphData) == 0
+ ligComponentsOffset = actionOffset + len(actionData)
+ ligaturesOffset = ligComponentsOffset + len(ligComponentsData)
+
+ writer.writeULong(glyphClassCount)
+ writer.writeULong(glyphClassTableOffset)
+ writer.writeULong(stateArrayOffset)
+ writer.writeULong(entryTableOffset)
+ if self.perGlyphLookup is not None:
+ writer.writeULong(perGlyphOffset)
+ if actionOffset is not None:
+ writer.writeULong(actionOffset)
+ if ligComponentsOffset is not None:
+ writer.writeULong(ligComponentsOffset)
+ writer.writeULong(ligaturesOffset)
+ writer.writeData(glyphClassData)
+ writer.writeData(stateArrayData)
+ writer.writeData(entryTableData)
+ writer.writeData(perGlyphData)
+ if actionData is not None:
+ writer.writeData(actionData)
+ if ligComponentsData is not None:
+ writer.writeData(ligComponentsData)
+ if ligaturesData is not None:
+ writer.writeData(ligaturesData)
+
+ def _compileStates(self, font, states, glyphClassCount, actionIndex):
+ stateArrayWriter = OTTableWriter()
+ entries, entryIDs = [], {}
+ for state in states:
+ for glyphClass in range(glyphClassCount):
+ transition = state.Transitions[glyphClass]
+ entryWriter = OTTableWriter()
+ transition.compile(entryWriter, font, actionIndex)
+ entryData = entryWriter.getAllData()
+ assert (
+ len(entryData) == transition.staticSize
+ ), "%s has staticSize %d, " "but actually wrote %d bytes" % (
+ repr(transition),
+ transition.staticSize,
+ len(entryData),
+ )
+ entryIndex = entryIDs.get(entryData)
+ if entryIndex is None:
+ entryIndex = len(entries)
+ entryIDs[entryData] = entryIndex
+ entries.append(entryData)
+ stateArrayWriter.writeUShort(entryIndex)
+ stateArrayData = pad(stateArrayWriter.getAllData(), 4)
+ entryTableData = pad(bytesjoin(entries), 4)
+ return stateArrayData, entryTableData
+
+ def _compilePerGlyphLookups(self, table, font):
+ if self.perGlyphLookup is None:
+ return b""
+ numLookups = self._countPerGlyphLookups(table)
+ assert len(table.PerGlyphLookups) == numLookups, (
+ "len(AATStateTable.PerGlyphLookups) is %d, "
+ "but the actions inside the table refer to %d"
+ % (len(table.PerGlyphLookups), numLookups)
+ )
+ writer = OTTableWriter()
+ for lookup in table.PerGlyphLookups:
+ lookupWriter = writer.getSubWriter()
+ self.perGlyphLookup.write(lookupWriter, font, {}, lookup, None)
+ writer.writeSubTable(lookupWriter, offsetSize=4)
+ return writer.getAllData()
+
+ def _compileLigComponents(self, table, font):
+ if not hasattr(table, "LigComponents"):
+ return None
+ writer = OTTableWriter()
+ for component in table.LigComponents:
+ writer.writeUShort(component)
+ return writer.getAllData()
+
+ def _compileLigatures(self, table, font):
+ if not hasattr(table, "Ligatures"):
+ return None
+ writer = OTTableWriter()
+ for glyphName in table.Ligatures:
+ writer.writeUShort(font.getGlyphID(glyphName))
+ return writer.getAllData()
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.begintag(name, attrs)
+ xmlWriter.newline()
+ xmlWriter.comment("GlyphClassCount=%s" % value.GlyphClassCount)
+ xmlWriter.newline()
+ for g, klass in sorted(value.GlyphClasses.items()):
+ xmlWriter.simpletag("GlyphClass", glyph=g, value=klass)
+ xmlWriter.newline()
+ for stateIndex, state in enumerate(value.States):
+ xmlWriter.begintag("State", index=stateIndex)
+ xmlWriter.newline()
+ for glyphClass, trans in sorted(state.Transitions.items()):
+ trans.toXML(
+ xmlWriter,
+ font=font,
+ attrs={"onGlyphClass": glyphClass},
+ name="Transition",
+ )
+ xmlWriter.endtag("State")
+ xmlWriter.newline()
+ for i, lookup in enumerate(value.PerGlyphLookups):
+ xmlWriter.begintag("PerGlyphLookup", index=i)
+ xmlWriter.newline()
+ for glyph, val in sorted(lookup.items()):
+ xmlWriter.simpletag("Lookup", glyph=glyph, value=val)
+ xmlWriter.newline()
+ xmlWriter.endtag("PerGlyphLookup")
+ xmlWriter.newline()
+ if hasattr(value, "LigComponents"):
+ xmlWriter.begintag("LigComponents")
+ xmlWriter.newline()
+ for i, val in enumerate(getattr(value, "LigComponents")):
+ xmlWriter.simpletag("LigComponent", index=i, value=val)
+ xmlWriter.newline()
+ xmlWriter.endtag("LigComponents")
+ xmlWriter.newline()
+ self._xmlWriteLigatures(xmlWriter, font, value, name, attrs)
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def _xmlWriteLigatures(self, xmlWriter, font, value, name, attrs):
+ if not hasattr(value, "Ligatures"):
+ return
+ xmlWriter.begintag("Ligatures")
+ xmlWriter.newline()
+ for i, g in enumerate(getattr(value, "Ligatures")):
+ xmlWriter.simpletag("Ligature", index=i, glyph=g)
+ xmlWriter.newline()
+ xmlWriter.endtag("Ligatures")
+ xmlWriter.newline()
+
+ def xmlRead(self, attrs, content, font):
+ table = AATStateTable()
+ for eltName, eltAttrs, eltContent in filter(istuple, content):
+ if eltName == "GlyphClass":
+ glyph = eltAttrs["glyph"]
+ value = eltAttrs["value"]
+ table.GlyphClasses[glyph] = safeEval(value)
+ elif eltName == "State":
+ state = self._xmlReadState(eltAttrs, eltContent, font)
+ table.States.append(state)
+ elif eltName == "PerGlyphLookup":
+ lookup = self.perGlyphLookup.xmlRead(eltAttrs, eltContent, font)
+ table.PerGlyphLookups.append(lookup)
+ elif eltName == "LigComponents":
+ table.LigComponents = self._xmlReadLigComponents(
+ eltAttrs, eltContent, font
+ )
+ elif eltName == "Ligatures":
+ table.Ligatures = self._xmlReadLigatures(eltAttrs, eltContent, font)
+ table.GlyphClassCount = max(table.GlyphClasses.values()) + 1
+ return table
+
+ def _xmlReadState(self, attrs, content, font):
+ state = AATState()
+ for eltName, eltAttrs, eltContent in filter(istuple, content):
+ if eltName == "Transition":
+ glyphClass = safeEval(eltAttrs["onGlyphClass"])
+ transition = self.tableClass()
+ transition.fromXML(eltName, eltAttrs, eltContent, font)
+ state.Transitions[glyphClass] = transition
+ return state
+
+ def _xmlReadLigComponents(self, attrs, content, font):
+ ligComponents = []
+ for eltName, eltAttrs, _eltContent in filter(istuple, content):
+ if eltName == "LigComponent":
+ ligComponents.append(safeEval(eltAttrs["value"]))
+ return ligComponents
+
+ def _xmlReadLigatures(self, attrs, content, font):
+ ligs = []
+ for eltName, eltAttrs, _eltContent in filter(istuple, content):
+ if eltName == "Ligature":
+ ligs.append(eltAttrs["glyph"])
+ return ligs
class CIDGlyphMap(BaseConverter):
- def read(self, reader, font, tableDict):
- numCIDs = reader.readUShort()
- result = {}
- for cid, glyphID in enumerate(reader.readUShortArray(numCIDs)):
- if glyphID != 0xFFFF:
- result[cid] = font.getGlyphName(glyphID)
- return result
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- items = {cid: font.getGlyphID(glyph)
- for cid, glyph in value.items()}
- count = max(items) + 1 if items else 0
- writer.writeUShort(count)
- for cid in range(count):
- writer.writeUShort(items.get(cid, 0xFFFF))
-
- def xmlRead(self, attrs, content, font):
- result = {}
- for eName, eAttrs, _eContent in filter(istuple, content):
- if eName == "CID":
- result[safeEval(eAttrs["cid"])] = \
- eAttrs["glyph"].strip()
- return result
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.begintag(name, attrs)
- xmlWriter.newline()
- for cid, glyph in sorted(value.items()):
- if glyph is not None and glyph != 0xFFFF:
- xmlWriter.simpletag(
- "CID", cid=cid, glyph=glyph)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
+ def read(self, reader, font, tableDict):
+ numCIDs = reader.readUShort()
+ result = {}
+ for cid, glyphID in enumerate(reader.readUShortArray(numCIDs)):
+ if glyphID != 0xFFFF:
+ result[cid] = font.getGlyphName(glyphID)
+ return result
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ items = {cid: font.getGlyphID(glyph) for cid, glyph in value.items()}
+ count = max(items) + 1 if items else 0
+ writer.writeUShort(count)
+ for cid in range(count):
+ writer.writeUShort(items.get(cid, 0xFFFF))
+
+ def xmlRead(self, attrs, content, font):
+ result = {}
+ for eName, eAttrs, _eContent in filter(istuple, content):
+ if eName == "CID":
+ result[safeEval(eAttrs["cid"])] = eAttrs["glyph"].strip()
+ return result
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.begintag(name, attrs)
+ xmlWriter.newline()
+ for cid, glyph in sorted(value.items()):
+ if glyph is not None and glyph != 0xFFFF:
+ xmlWriter.simpletag("CID", cid=cid, glyph=glyph)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
class GlyphCIDMap(BaseConverter):
- def read(self, reader, font, tableDict):
- glyphOrder = font.getGlyphOrder()
- count = reader.readUShort()
- cids = reader.readUShortArray(count)
- if count > len(glyphOrder):
- log.warning("GlyphCIDMap has %d elements, "
- "but the font has only %d glyphs; "
- "ignoring the rest" %
- (count, len(glyphOrder)))
- result = {}
- for glyphID in range(min(len(cids), len(glyphOrder))):
- cid = cids[glyphID]
- if cid != 0xFFFF:
- result[glyphOrder[glyphID]] = cid
- return result
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- items = {font.getGlyphID(g): cid
- for g, cid in value.items()
- if cid is not None and cid != 0xFFFF}
- count = max(items) + 1 if items else 0
- writer.writeUShort(count)
- for glyphID in range(count):
- writer.writeUShort(items.get(glyphID, 0xFFFF))
-
- def xmlRead(self, attrs, content, font):
- result = {}
- for eName, eAttrs, _eContent in filter(istuple, content):
- if eName == "CID":
- result[eAttrs["glyph"]] = \
- safeEval(eAttrs["value"])
- return result
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.begintag(name, attrs)
- xmlWriter.newline()
- for glyph, cid in sorted(value.items()):
- if cid is not None and cid != 0xFFFF:
- xmlWriter.simpletag(
- "CID", glyph=glyph, value=cid)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
+ def read(self, reader, font, tableDict):
+ glyphOrder = font.getGlyphOrder()
+ count = reader.readUShort()
+ cids = reader.readUShortArray(count)
+ if count > len(glyphOrder):
+ log.warning(
+ "GlyphCIDMap has %d elements, "
+ "but the font has only %d glyphs; "
+ "ignoring the rest" % (count, len(glyphOrder))
+ )
+ result = {}
+ for glyphID in range(min(len(cids), len(glyphOrder))):
+ cid = cids[glyphID]
+ if cid != 0xFFFF:
+ result[glyphOrder[glyphID]] = cid
+ return result
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ items = {
+ font.getGlyphID(g): cid
+ for g, cid in value.items()
+ if cid is not None and cid != 0xFFFF
+ }
+ count = max(items) + 1 if items else 0
+ writer.writeUShort(count)
+ for glyphID in range(count):
+ writer.writeUShort(items.get(glyphID, 0xFFFF))
+
+ def xmlRead(self, attrs, content, font):
+ result = {}
+ for eName, eAttrs, _eContent in filter(istuple, content):
+ if eName == "CID":
+ result[eAttrs["glyph"]] = safeEval(eAttrs["value"])
+ return result
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.begintag(name, attrs)
+ xmlWriter.newline()
+ for glyph, cid in sorted(value.items()):
+ if cid is not None and cid != 0xFFFF:
+ xmlWriter.simpletag("CID", glyph=glyph, value=cid)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
class DeltaValue(BaseConverter):
-
- def read(self, reader, font, tableDict):
- StartSize = tableDict["StartSize"]
- EndSize = tableDict["EndSize"]
- DeltaFormat = tableDict["DeltaFormat"]
- assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
- nItems = EndSize - StartSize + 1
- nBits = 1 << DeltaFormat
- minusOffset = 1 << nBits
- mask = (1 << nBits) - 1
- signMask = 1 << (nBits - 1)
-
- DeltaValue = []
- tmp, shift = 0, 0
- for i in range(nItems):
- if shift == 0:
- tmp, shift = reader.readUShort(), 16
- shift = shift - nBits
- value = (tmp >> shift) & mask
- if value & signMask:
- value = value - minusOffset
- DeltaValue.append(value)
- return DeltaValue
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- StartSize = tableDict["StartSize"]
- EndSize = tableDict["EndSize"]
- DeltaFormat = tableDict["DeltaFormat"]
- DeltaValue = value
- assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
- nItems = EndSize - StartSize + 1
- nBits = 1 << DeltaFormat
- assert len(DeltaValue) == nItems
- mask = (1 << nBits) - 1
-
- tmp, shift = 0, 16
- for value in DeltaValue:
- shift = shift - nBits
- tmp = tmp | ((value & mask) << shift)
- if shift == 0:
- writer.writeUShort(tmp)
- tmp, shift = 0, 16
- if shift != 16:
- writer.writeUShort(tmp)
-
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", value)])
- xmlWriter.newline()
-
- def xmlRead(self, attrs, content, font):
- return safeEval(attrs["value"])
+ def read(self, reader, font, tableDict):
+ StartSize = tableDict["StartSize"]
+ EndSize = tableDict["EndSize"]
+ DeltaFormat = tableDict["DeltaFormat"]
+ assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
+ nItems = EndSize - StartSize + 1
+ nBits = 1 << DeltaFormat
+ minusOffset = 1 << nBits
+ mask = (1 << nBits) - 1
+ signMask = 1 << (nBits - 1)
+
+ DeltaValue = []
+ tmp, shift = 0, 0
+ for i in range(nItems):
+ if shift == 0:
+ tmp, shift = reader.readUShort(), 16
+ shift = shift - nBits
+ value = (tmp >> shift) & mask
+ if value & signMask:
+ value = value - minusOffset
+ DeltaValue.append(value)
+ return DeltaValue
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ StartSize = tableDict["StartSize"]
+ EndSize = tableDict["EndSize"]
+ DeltaFormat = tableDict["DeltaFormat"]
+ DeltaValue = value
+ assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
+ nItems = EndSize - StartSize + 1
+ nBits = 1 << DeltaFormat
+ assert len(DeltaValue) == nItems
+ mask = (1 << nBits) - 1
+
+ tmp, shift = 0, 16
+ for value in DeltaValue:
+ shift = shift - nBits
+ tmp = tmp | ((value & mask) << shift)
+ if shift == 0:
+ writer.writeUShort(tmp)
+ tmp, shift = 0, 16
+ if shift != 16:
+ writer.writeUShort(tmp)
+
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", value)])
+ xmlWriter.newline()
+
+ def xmlRead(self, attrs, content, font):
+ return safeEval(attrs["value"])
class VarIdxMapValue(BaseConverter):
-
- def read(self, reader, font, tableDict):
- fmt = tableDict['EntryFormat']
- nItems = tableDict['MappingCount']
-
- innerBits = 1 + (fmt & 0x000F)
- innerMask = (1<<innerBits) - 1
- outerMask = 0xFFFFFFFF - innerMask
- outerShift = 16 - innerBits
-
- entrySize = 1 + ((fmt & 0x0030) >> 4)
- readArray = {
- 1: reader.readUInt8Array,
- 2: reader.readUShortArray,
- 3: reader.readUInt24Array,
- 4: reader.readULongArray,
- }[entrySize]
-
- return [(((raw & outerMask) << outerShift) | (raw & innerMask))
- for raw in readArray(nItems)]
-
- def write(self, writer, font, tableDict, value, repeatIndex=None):
- fmt = tableDict['EntryFormat']
- mapping = value
- writer['MappingCount'].setValue(len(mapping))
-
- innerBits = 1 + (fmt & 0x000F)
- innerMask = (1<<innerBits) - 1
- outerShift = 16 - innerBits
-
- entrySize = 1 + ((fmt & 0x0030) >> 4)
- writeArray = {
- 1: writer.writeUInt8Array,
- 2: writer.writeUShortArray,
- 3: writer.writeUInt24Array,
- 4: writer.writeULongArray,
- }[entrySize]
-
- writeArray([(((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask))
- for idx in mapping])
+ def read(self, reader, font, tableDict):
+ fmt = tableDict["EntryFormat"]
+ nItems = tableDict["MappingCount"]
+
+ innerBits = 1 + (fmt & 0x000F)
+ innerMask = (1 << innerBits) - 1
+ outerMask = 0xFFFFFFFF - innerMask
+ outerShift = 16 - innerBits
+
+ entrySize = 1 + ((fmt & 0x0030) >> 4)
+ readArray = {
+ 1: reader.readUInt8Array,
+ 2: reader.readUShortArray,
+ 3: reader.readUInt24Array,
+ 4: reader.readULongArray,
+ }[entrySize]
+
+ return [
+ (((raw & outerMask) << outerShift) | (raw & innerMask))
+ for raw in readArray(nItems)
+ ]
+
+ def write(self, writer, font, tableDict, value, repeatIndex=None):
+ fmt = tableDict["EntryFormat"]
+ mapping = value
+ writer["MappingCount"].setValue(len(mapping))
+
+ innerBits = 1 + (fmt & 0x000F)
+ innerMask = (1 << innerBits) - 1
+ outerShift = 16 - innerBits
+
+ entrySize = 1 + ((fmt & 0x0030) >> 4)
+ writeArray = {
+ 1: writer.writeUInt8Array,
+ 2: writer.writeUShortArray,
+ 3: writer.writeUInt24Array,
+ 4: writer.writeULongArray,
+ }[entrySize]
+
+ writeArray(
+ [
+ (((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask))
+ for idx in mapping
+ ]
+ )
class VarDataValue(BaseConverter):
+ def read(self, reader, font, tableDict):
+ values = []
- def read(self, reader, font, tableDict):
- values = []
+ regionCount = tableDict["VarRegionCount"]
+ wordCount = tableDict["NumShorts"]
- regionCount = tableDict["VarRegionCount"]
- wordCount = tableDict["NumShorts"]
+ # https://github.com/fonttools/fonttools/issues/2279
+ longWords = bool(wordCount & 0x8000)
+ wordCount = wordCount & 0x7FFF
- # https://github.com/fonttools/fonttools/issues/2279
- longWords = bool(wordCount & 0x8000)
- wordCount = wordCount & 0x7FFF
+ if longWords:
+ readBigArray, readSmallArray = reader.readLongArray, reader.readShortArray
+ else:
+ readBigArray, readSmallArray = reader.readShortArray, reader.readInt8Array
- if longWords:
- readBigArray, readSmallArray = reader.readLongArray, reader.readShortArray
- else:
- readBigArray, readSmallArray = reader.readShortArray, reader.readInt8Array
+ n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount)
+ values.extend(readBigArray(n1))
+ values.extend(readSmallArray(n2 - n1))
+ if n2 > regionCount: # Padding
+ del values[regionCount:]
- n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount)
- values.extend(readBigArray(n1))
- values.extend(readSmallArray(n2 - n1))
- if n2 > regionCount: # Padding
- del values[regionCount:]
+ return values
- return values
+ def write(self, writer, font, tableDict, values, repeatIndex=None):
+ regionCount = tableDict["VarRegionCount"]
+ wordCount = tableDict["NumShorts"]
- def write(self, writer, font, tableDict, values, repeatIndex=None):
- regionCount = tableDict["VarRegionCount"]
- wordCount = tableDict["NumShorts"]
+ # https://github.com/fonttools/fonttools/issues/2279
+ longWords = bool(wordCount & 0x8000)
+ wordCount = wordCount & 0x7FFF
- # https://github.com/fonttools/fonttools/issues/2279
- longWords = bool(wordCount & 0x8000)
- wordCount = wordCount & 0x7FFF
+ (writeBigArray, writeSmallArray) = {
+ False: (writer.writeShortArray, writer.writeInt8Array),
+ True: (writer.writeLongArray, writer.writeShortArray),
+ }[longWords]
- (writeBigArray, writeSmallArray) = {
- False: (writer.writeShortArray, writer.writeInt8Array),
- True: (writer.writeLongArray, writer.writeShortArray),
- }[longWords]
+ n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount)
+ writeBigArray(values[:n1])
+ writeSmallArray(values[n1:regionCount])
+ if n2 > regionCount: # Padding
+ writer.writeSmallArray([0] * (n2 - regionCount))
- n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount)
- writeBigArray(values[:n1])
- writeSmallArray(values[n1:regionCount])
- if n2 > regionCount: # Padding
- writer.writeSmallArray([0] * (n2 - regionCount))
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", value)])
+ xmlWriter.newline()
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", value)])
- xmlWriter.newline()
+ def xmlRead(self, attrs, content, font):
+ return safeEval(attrs["value"])
- def xmlRead(self, attrs, content, font):
- return safeEval(attrs["value"])
class LookupFlag(UShort):
- def xmlWrite(self, xmlWriter, font, value, name, attrs):
- xmlWriter.simpletag(name, attrs + [("value", value)])
- flags = []
- if value & 0x01: flags.append("rightToLeft")
- if value & 0x02: flags.append("ignoreBaseGlyphs")
- if value & 0x04: flags.append("ignoreLigatures")
- if value & 0x08: flags.append("ignoreMarks")
- if value & 0x10: flags.append("useMarkFilteringSet")
- if value & 0xff00: flags.append("markAttachmentType[%i]" % (value >> 8))
- if flags:
- xmlWriter.comment(" ".join(flags))
- xmlWriter.newline()
+ def xmlWrite(self, xmlWriter, font, value, name, attrs):
+ xmlWriter.simpletag(name, attrs + [("value", value)])
+ flags = []
+ if value & 0x01:
+ flags.append("rightToLeft")
+ if value & 0x02:
+ flags.append("ignoreBaseGlyphs")
+ if value & 0x04:
+ flags.append("ignoreLigatures")
+ if value & 0x08:
+ flags.append("ignoreMarks")
+ if value & 0x10:
+ flags.append("useMarkFilteringSet")
+ if value & 0xFF00:
+ flags.append("markAttachmentType[%i]" % (value >> 8))
+ if flags:
+ xmlWriter.comment(" ".join(flags))
+ xmlWriter.newline()
class _UInt8Enum(UInt8):
- enumClass = NotImplemented
+ enumClass = NotImplemented
+
+ def read(self, reader, font, tableDict):
+ return self.enumClass(super().read(reader, font, tableDict))
+
+ @classmethod
+ def fromString(cls, value):
+ return getattr(cls.enumClass, value.upper())
- def read(self, reader, font, tableDict):
- return self.enumClass(super().read(reader, font, tableDict))
- @classmethod
- def fromString(cls, value):
- return getattr(cls.enumClass, value.upper())
- @classmethod
- def toString(cls, value):
- return cls.enumClass(value).name.lower()
+ @classmethod
+ def toString(cls, value):
+ return cls.enumClass(value).name.lower()
class ExtendMode(_UInt8Enum):
- enumClass = _ExtendMode
+ enumClass = _ExtendMode
class CompositeMode(_UInt8Enum):
- enumClass = _CompositeMode
+ enumClass = _CompositeMode
converterMapping = {
- # type class
- "int8": Int8,
- "int16": Short,
- "uint8": UInt8,
- "uint16": UShort,
- "uint24": UInt24,
- "uint32": ULong,
- "char64": Char64,
- "Flags32": Flags32,
- "VarIndex": VarIndex,
- "Version": Version,
- "Tag": Tag,
- "GlyphID": GlyphID,
- "GlyphID32": GlyphID32,
- "NameID": NameID,
- "DeciPoints": DeciPoints,
- "Fixed": Fixed,
- "F2Dot14": F2Dot14,
- "Angle": Angle,
- "BiasedAngle": BiasedAngle,
- "struct": Struct,
- "Offset": Table,
- "LOffset": LTable,
- "Offset24": Table24,
- "ValueRecord": ValueRecord,
- "DeltaValue": DeltaValue,
- "VarIdxMapValue": VarIdxMapValue,
- "VarDataValue": VarDataValue,
- "LookupFlag": LookupFlag,
- "ExtendMode": ExtendMode,
- "CompositeMode": CompositeMode,
- "STATFlags": STATFlags,
-
- # AAT
- "CIDGlyphMap": CIDGlyphMap,
- "GlyphCIDMap": GlyphCIDMap,
- "MortChain": StructWithLength,
- "MortSubtable": StructWithLength,
- "MorxChain": StructWithLength,
- "MorxSubtable": MorxSubtableConverter,
-
- # "Template" types
- "AATLookup": lambda C: partial(AATLookup, tableClass=C),
- "AATLookupWithDataOffset": lambda C: partial(AATLookupWithDataOffset, tableClass=C),
- "STXHeader": lambda C: partial(STXHeader, tableClass=C),
- "OffsetTo": lambda C: partial(Table, tableClass=C),
- "LOffsetTo": lambda C: partial(LTable, tableClass=C),
- "LOffset24To": lambda C: partial(Table24, tableClass=C),
+ # type class
+ "int8": Int8,
+ "int16": Short,
+ "uint8": UInt8,
+ "uint16": UShort,
+ "uint24": UInt24,
+ "uint32": ULong,
+ "char64": Char64,
+ "Flags32": Flags32,
+ "VarIndex": VarIndex,
+ "Version": Version,
+ "Tag": Tag,
+ "GlyphID": GlyphID,
+ "GlyphID32": GlyphID32,
+ "NameID": NameID,
+ "DeciPoints": DeciPoints,
+ "Fixed": Fixed,
+ "F2Dot14": F2Dot14,
+ "Angle": Angle,
+ "BiasedAngle": BiasedAngle,
+ "struct": Struct,
+ "Offset": Table,
+ "LOffset": LTable,
+ "Offset24": Table24,
+ "ValueRecord": ValueRecord,
+ "DeltaValue": DeltaValue,
+ "VarIdxMapValue": VarIdxMapValue,
+ "VarDataValue": VarDataValue,
+ "LookupFlag": LookupFlag,
+ "ExtendMode": ExtendMode,
+ "CompositeMode": CompositeMode,
+ "STATFlags": STATFlags,
+ # AAT
+ "CIDGlyphMap": CIDGlyphMap,
+ "GlyphCIDMap": GlyphCIDMap,
+ "MortChain": StructWithLength,
+ "MortSubtable": StructWithLength,
+ "MorxChain": StructWithLength,
+ "MorxSubtable": MorxSubtableConverter,
+ # "Template" types
+ "AATLookup": lambda C: partial(AATLookup, tableClass=C),
+ "AATLookupWithDataOffset": lambda C: partial(AATLookupWithDataOffset, tableClass=C),
+ "STXHeader": lambda C: partial(STXHeader, tableClass=C),
+ "OffsetTo": lambda C: partial(Table, tableClass=C),
+ "LOffsetTo": lambda C: partial(LTable, tableClass=C),
+ "LOffset24To": lambda C: partial(Table24, tableClass=C),
}
diff --git a/Lib/fontTools/ttLib/tables/otData.py b/Lib/fontTools/ttLib/tables/otData.py
index 2e65869f..56716824 100755..100644
--- a/Lib/fontTools/ttLib/tables/otData.py
+++ b/Lib/fontTools/ttLib/tables/otData.py
@@ -1,1957 +1,6236 @@
otData = [
-
- #
- # common
- #
-
- ('LookupOrder', []),
-
- ('ScriptList', [
- ('uint16', 'ScriptCount', None, None, 'Number of ScriptRecords'),
- ('struct', 'ScriptRecord', 'ScriptCount', 0, 'Array of ScriptRecords -listed alphabetically by ScriptTag'),
- ]),
-
- ('ScriptRecord', [
- ('Tag', 'ScriptTag', None, None, '4-byte ScriptTag identifier'),
- ('Offset', 'Script', None, None, 'Offset to Script table-from beginning of ScriptList'),
- ]),
-
- ('Script', [
- ('Offset', 'DefaultLangSys', None, None, 'Offset to DefaultLangSys table-from beginning of Script table-may be NULL'),
- ('uint16', 'LangSysCount', None, None, 'Number of LangSysRecords for this script-excluding the DefaultLangSys'),
- ('struct', 'LangSysRecord', 'LangSysCount', 0, 'Array of LangSysRecords-listed alphabetically by LangSysTag'),
- ]),
-
- ('LangSysRecord', [
- ('Tag', 'LangSysTag', None, None, '4-byte LangSysTag identifier'),
- ('Offset', 'LangSys', None, None, 'Offset to LangSys table-from beginning of Script table'),
- ]),
-
- ('LangSys', [
- ('Offset', 'LookupOrder', None, None, '= NULL (reserved for an offset to a reordering table)'),
- ('uint16', 'ReqFeatureIndex', None, None, 'Index of a feature required for this language system- if no required features = 0xFFFF'),
- ('uint16', 'FeatureCount', None, None, 'Number of FeatureIndex values for this language system-excludes the required feature'),
- ('uint16', 'FeatureIndex', 'FeatureCount', 0, 'Array of indices into the FeatureList-in arbitrary order'),
- ]),
-
- ('FeatureList', [
- ('uint16', 'FeatureCount', None, None, 'Number of FeatureRecords in this table'),
- ('struct', 'FeatureRecord', 'FeatureCount', 0, 'Array of FeatureRecords-zero-based (first feature has FeatureIndex = 0)-listed alphabetically by FeatureTag'),
- ]),
-
- ('FeatureRecord', [
- ('Tag', 'FeatureTag', None, None, '4-byte feature identification tag'),
- ('Offset', 'Feature', None, None, 'Offset to Feature table-from beginning of FeatureList'),
- ]),
-
- ('Feature', [
- ('Offset', 'FeatureParams', None, None, '= NULL (reserved for offset to FeatureParams)'),
- ('uint16', 'LookupCount', None, None, 'Number of LookupList indices for this feature'),
- ('uint16', 'LookupListIndex', 'LookupCount', 0, 'Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)'),
- ]),
-
- ('FeatureParams', [
- ]),
-
- ('FeatureParamsSize', [
- ('DeciPoints', 'DesignSize', None, None, 'The design size in 720/inch units (decipoints).'),
- ('uint16', 'SubfamilyID', None, None, 'Serves as an identifier that associates fonts in a subfamily.'),
- ('NameID', 'SubfamilyNameID', None, None, 'Subfamily NameID.'),
- ('DeciPoints', 'RangeStart', None, None, 'Small end of recommended usage range (exclusive) in 720/inch units.'),
- ('DeciPoints', 'RangeEnd', None, None, 'Large end of recommended usage range (inclusive) in 720/inch units.'),
- ]),
-
- ('FeatureParamsStylisticSet', [
- ('uint16', 'Version', None, None, 'Set to 0.'),
- ('NameID', 'UINameID', None, None, 'UI NameID.'),
- ]),
-
- ('FeatureParamsCharacterVariants', [
- ('uint16', 'Format', None, None, 'Set to 0.'),
- ('NameID', 'FeatUILabelNameID', None, None, 'Feature UI label NameID.'),
- ('NameID', 'FeatUITooltipTextNameID', None, None, 'Feature UI tooltip text NameID.'),
- ('NameID', 'SampleTextNameID', None, None, 'Sample text NameID.'),
- ('uint16', 'NumNamedParameters', None, None, 'Number of named parameters.'),
- ('NameID', 'FirstParamUILabelNameID', None, None, 'First NameID of UI feature parameters.'),
- ('uint16', 'CharCount', None, None, 'Count of characters this feature provides glyph variants for.'),
- ('uint24', 'Character', 'CharCount', 0, 'Unicode characters for which this feature provides glyph variants.'),
- ]),
-
- ('LookupList', [
- ('uint16', 'LookupCount', None, None, 'Number of lookups in this table'),
- ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'),
- ]),
-
- ('Lookup', [
- ('uint16', 'LookupType', None, None, 'Different enumerations for GSUB and GPOS'),
- ('LookupFlag', 'LookupFlag', None, None, 'Lookup qualifiers'),
- ('uint16', 'SubTableCount', None, None, 'Number of SubTables for this lookup'),
- ('Offset', 'SubTable', 'SubTableCount', 0, 'Array of offsets to SubTables-from beginning of Lookup table'),
- ('uint16', 'MarkFilteringSet', None, 'LookupFlag & 0x0010', 'If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.'),
- ]),
-
- ('CoverageFormat1', [
- ('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 1'),
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the GlyphArray'),
- ('GlyphID', 'GlyphArray', 'GlyphCount', 0, 'Array of GlyphIDs-in numerical order'),
- ]),
-
- ('CoverageFormat2', [
- ('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 2'),
- ('uint16', 'RangeCount', None, None, 'Number of RangeRecords'),
- ('struct', 'RangeRecord', 'RangeCount', 0, 'Array of glyph ranges-ordered by Start GlyphID'),
- ]),
-
- ('RangeRecord', [
- ('GlyphID', 'Start', None, None, 'First GlyphID in the range'),
- ('GlyphID', 'End', None, None, 'Last GlyphID in the range'),
- ('uint16', 'StartCoverageIndex', None, None, 'Coverage Index of first GlyphID in range'),
- ]),
-
- ('ClassDefFormat1', [
- ('uint16', 'ClassFormat', None, None, 'Format identifier-format = 1'),
- ('GlyphID', 'StartGlyph', None, None, 'First GlyphID of the ClassValueArray'),
- ('uint16', 'GlyphCount', None, None, 'Size of the ClassValueArray'),
- ('uint16', 'ClassValueArray', 'GlyphCount', 0, 'Array of Class Values-one per GlyphID'),
- ]),
-
- ('ClassDefFormat2', [
- ('uint16', 'ClassFormat', None, None, 'Format identifier-format = 2'),
- ('uint16', 'ClassRangeCount', None, None, 'Number of ClassRangeRecords'),
- ('struct', 'ClassRangeRecord', 'ClassRangeCount', 0, 'Array of ClassRangeRecords-ordered by Start GlyphID'),
- ]),
-
- ('ClassRangeRecord', [
- ('GlyphID', 'Start', None, None, 'First GlyphID in the range'),
- ('GlyphID', 'End', None, None, 'Last GlyphID in the range'),
- ('uint16', 'Class', None, None, 'Applied to all glyphs in the range'),
- ]),
-
- ('Device', [
- ('uint16', 'StartSize', None, None, 'Smallest size to correct-in ppem'),
- ('uint16', 'EndSize', None, None, 'Largest size to correct-in ppem'),
- ('uint16', 'DeltaFormat', None, None, 'Format of DeltaValue array data: 1, 2, or 3'),
- ('DeltaValue', 'DeltaValue', '', 'DeltaFormat in (1,2,3)', 'Array of compressed data'),
- ]),
-
-
- #
- # gpos
- #
-
- ('GPOS', [
- ('Version', 'Version', None, None, 'Version of the GPOS table- 0x00010000 or 0x00010001'),
- ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GPOS table'),
- ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GPOS table'),
- ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GPOS table'),
- ('LOffset', 'FeatureVariations', None, 'Version >= 0x00010001', 'Offset to FeatureVariations table-from beginning of GPOS table'),
- ]),
-
- ('SinglePosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'),
- ('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'),
- ('ValueRecord', 'Value', None, None, 'Defines positioning value(s)-applied to all glyphs in the Coverage table'),
- ]),
-
- ('SinglePosFormat2', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'),
- ('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'),
- ('uint16', 'ValueCount', None, None, 'Number of ValueRecords'),
- ('ValueRecord', 'Value', 'ValueCount', 0, 'Array of ValueRecords-positioning values applied to glyphs'),
- ]),
-
- ('PairPosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-only the first glyph in each pair'),
- ('uint16', 'ValueFormat1', None, None, 'Defines the types of data in ValueRecord1-for the first glyph in the pair -may be zero (0)'),
- ('uint16', 'ValueFormat2', None, None, 'Defines the types of data in ValueRecord2-for the second glyph in the pair -may be zero (0)'),
- ('uint16', 'PairSetCount', None, None, 'Number of PairSet tables'),
- ('Offset', 'PairSet', 'PairSetCount', 0, 'Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index'),
- ]),
-
- ('PairSet', [
- ('uint16', 'PairValueCount', None, None, 'Number of PairValueRecords'),
- ('struct', 'PairValueRecord', 'PairValueCount', 0, 'Array of PairValueRecords-ordered by GlyphID of the second glyph'),
- ]),
-
- ('PairValueRecord', [
- ('GlyphID', 'SecondGlyph', None, None, 'GlyphID of second glyph in the pair-first glyph is listed in the Coverage table'),
- ('ValueRecord', 'Value1', None, None, 'Positioning data for the first glyph in the pair'),
- ('ValueRecord', 'Value2', None, None, 'Positioning data for the second glyph in the pair'),
- ]),
-
- ('PairPosFormat2', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-for the first glyph of the pair'),
- ('uint16', 'ValueFormat1', None, None, 'ValueRecord definition-for the first glyph of the pair-may be zero (0)'),
- ('uint16', 'ValueFormat2', None, None, 'ValueRecord definition-for the second glyph of the pair-may be zero (0)'),
- ('Offset', 'ClassDef1', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the first glyph of the pair'),
- ('Offset', 'ClassDef2', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the second glyph of the pair'),
- ('uint16', 'Class1Count', None, None, 'Number of classes in ClassDef1 table-includes Class0'),
- ('uint16', 'Class2Count', None, None, 'Number of classes in ClassDef2 table-includes Class0'),
- ('struct', 'Class1Record', 'Class1Count', 0, 'Array of Class1 records-ordered by Class1'),
- ]),
-
- ('Class1Record', [
- ('struct', 'Class2Record', 'Class2Count', 0, 'Array of Class2 records-ordered by Class2'),
- ]),
-
- ('Class2Record', [
- ('ValueRecord', 'Value1', None, None, 'Positioning for first glyph-empty if ValueFormat1 = 0'),
- ('ValueRecord', 'Value2', None, None, 'Positioning for second glyph-empty if ValueFormat2 = 0'),
- ]),
-
- ('CursivePosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of CursivePos subtable'),
- ('uint16', 'EntryExitCount', None, None, 'Number of EntryExit records'),
- ('struct', 'EntryExitRecord', 'EntryExitCount', 0, 'Array of EntryExit records-in Coverage Index order'),
- ]),
-
- ('EntryExitRecord', [
- ('Offset', 'EntryAnchor', None, None, 'Offset to EntryAnchor table-from beginning of CursivePos subtable-may be NULL'),
- ('Offset', 'ExitAnchor', None, None, 'Offset to ExitAnchor table-from beginning of CursivePos subtable-may be NULL'),
- ]),
-
- ('MarkBasePosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'MarkCoverage', None, None, 'Offset to MarkCoverage table-from beginning of MarkBasePos subtable'),
- ('Offset', 'BaseCoverage', None, None, 'Offset to BaseCoverage table-from beginning of MarkBasePos subtable'),
- ('uint16', 'ClassCount', None, None, 'Number of classes defined for marks'),
- ('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkBasePos subtable'),
- ('Offset', 'BaseArray', None, None, 'Offset to BaseArray table-from beginning of MarkBasePos subtable'),
- ]),
-
- ('BaseArray', [
- ('uint16', 'BaseCount', None, None, 'Number of BaseRecords'),
- ('struct', 'BaseRecord', 'BaseCount', 0, 'Array of BaseRecords-in order of BaseCoverage Index'),
- ]),
-
- ('BaseRecord', [
- ('Offset', 'BaseAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of BaseArray table-ordered by class-zero-based'),
- ]),
-
- ('MarkLigPosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'MarkCoverage', None, None, 'Offset to Mark Coverage table-from beginning of MarkLigPos subtable'),
- ('Offset', 'LigatureCoverage', None, None, 'Offset to Ligature Coverage table-from beginning of MarkLigPos subtable'),
- ('uint16', 'ClassCount', None, None, 'Number of defined mark classes'),
- ('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkLigPos subtable'),
- ('Offset', 'LigatureArray', None, None, 'Offset to LigatureArray table-from beginning of MarkLigPos subtable'),
- ]),
-
- ('LigatureArray', [
- ('uint16', 'LigatureCount', None, None, 'Number of LigatureAttach table offsets'),
- ('Offset', 'LigatureAttach', 'LigatureCount', 0, 'Array of offsets to LigatureAttach tables-from beginning of LigatureArray table-ordered by LigatureCoverage Index'),
- ]),
-
- ('LigatureAttach', [
- ('uint16', 'ComponentCount', None, None, 'Number of ComponentRecords in this ligature'),
- ('struct', 'ComponentRecord', 'ComponentCount', 0, 'Array of Component records-ordered in writing direction'),
- ]),
-
- ('ComponentRecord', [
- ('Offset', 'LigatureAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of LigatureAttach table-ordered by class-NULL if a component does not have an attachment for a class-zero-based array'),
- ]),
-
- ('MarkMarkPosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Mark1Coverage', None, None, 'Offset to Combining Mark Coverage table-from beginning of MarkMarkPos subtable'),
- ('Offset', 'Mark2Coverage', None, None, 'Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable'),
- ('uint16', 'ClassCount', None, None, 'Number of Combining Mark classes defined'),
- ('Offset', 'Mark1Array', None, None, 'Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable'),
- ('Offset', 'Mark2Array', None, None, 'Offset to Mark2Array table for Mark2-from beginning of MarkMarkPos subtable'),
- ]),
-
- ('Mark2Array', [
- ('uint16', 'Mark2Count', None, None, 'Number of Mark2 records'),
- ('struct', 'Mark2Record', 'Mark2Count', 0, 'Array of Mark2 records-in Coverage order'),
- ]),
-
- ('Mark2Record', [
- ('Offset', 'Mark2Anchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of Mark2Array table-zero-based array'),
- ]),
-
- ('PosLookupRecord', [
- ('uint16', 'SequenceIndex', None, None, 'Index to input glyph sequence-first glyph = 0'),
- ('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'),
- ]),
-
- ('ContextPosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'),
- ('uint16', 'PosRuleSetCount', None, None, 'Number of PosRuleSet tables'),
- ('Offset', 'PosRuleSet', 'PosRuleSetCount', 0, 'Array of offsets to PosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'),
- ]),
-
- ('PosRuleSet', [
- ('uint16', 'PosRuleCount', None, None, 'Number of PosRule tables'),
- ('Offset', 'PosRule', 'PosRuleCount', 0, 'Array of offsets to PosRule tables-from beginning of PosRuleSet-ordered by preference'),
- ]),
-
- ('PosRule', [
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the Input glyph sequence'),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-starting with the second glyph'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'),
- ]),
-
- ('ContextPosFormat2', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'),
- ('Offset', 'ClassDef', None, None, 'Offset to ClassDef table-from beginning of ContextPos subtable'),
- ('uint16', 'PosClassSetCount', None, None, 'Number of PosClassSet tables'),
- ('Offset', 'PosClassSet', 'PosClassSetCount', 0, 'Array of offsets to PosClassSet tables-from beginning of ContextPos subtable-ordered by class-may be NULL'),
- ]),
-
- ('PosClassSet', [
- ('uint16', 'PosClassRuleCount', None, None, 'Number of PosClassRule tables'),
- ('Offset', 'PosClassRule', 'PosClassRuleCount', 0, 'Array of offsets to PosClassRule tables-from beginning of PosClassSet-ordered by preference'),
- ]),
-
- ('PosClassRule', [
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs to be matched'),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph sequence'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'),
- ]),
-
- ('ContextPosFormat3', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'),
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input sequence'),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage tables-from beginning of ContextPos subtable'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'),
- ]),
-
- ('ChainContextPosFormat1', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'),
- ('uint16', 'ChainPosRuleSetCount', None, None, 'Number of ChainPosRuleSet tables'),
- ('Offset', 'ChainPosRuleSet', 'ChainPosRuleSetCount', 0, 'Array of offsets to ChainPosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'),
- ]),
-
- ('ChainPosRuleSet', [
- ('uint16', 'ChainPosRuleCount', None, None, 'Number of ChainPosRule tables'),
- ('Offset', 'ChainPosRule', 'ChainPosRuleCount', 0, 'Array of offsets to ChainPosRule tables-from beginning of ChainPosRuleSet-ordered by preference'),
- ]),
-
- ('ChainPosRule', [
- ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
- ('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"),
- ('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'),
- ('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'),
- ('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'),
- ]),
-
- ('ChainContextPosFormat2', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ChainContextPos subtable'),
- ('Offset', 'BacktrackClassDef', None, None, 'Offset to ClassDef table containing backtrack sequence context-from beginning of ChainContextPos subtable'),
- ('Offset', 'InputClassDef', None, None, 'Offset to ClassDef table containing input sequence context-from beginning of ChainContextPos subtable'),
- ('Offset', 'LookAheadClassDef', None, None, 'Offset to ClassDef table containing lookahead sequence context-from beginning of ChainContextPos subtable'),
- ('uint16', 'ChainPosClassSetCount', None, None, 'Number of ChainPosClassSet tables'),
- ('Offset', 'ChainPosClassSet', 'ChainPosClassSetCount', 0, 'Array of offsets to ChainPosClassSet tables-from beginning of ChainContextPos subtable-ordered by input class-may be NULL'),
- ]),
-
- ('ChainPosClassSet', [
- ('uint16', 'ChainPosClassRuleCount', None, None, 'Number of ChainPosClassRule tables'),
- ('Offset', 'ChainPosClassRule', 'ChainPosClassRuleCount', 0, 'Array of offsets to ChainPosClassRule tables-from beginning of ChainPosClassSet-ordered by preference'),
- ]),
-
- ('ChainPosClassRule', [
- ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
- ('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'),
- ('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'),
- ('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'),
- ('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'),
- ]),
-
- ('ChainContextPosFormat3', [
- ('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'),
- ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'),
- ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'),
- ('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'),
- ('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'),
- ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'),
- ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
- ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords,in design order'),
- ]),
-
- ('ExtensionPosFormat1', [
- ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'),
- ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'),
- ('LOffset', 'ExtSubTable', None, None, 'Offset to SubTable'),
- ]),
-
-# ('ValueRecord', [
-# ('int16', 'XPlacement', None, None, 'Horizontal adjustment for placement-in design units'),
-# ('int16', 'YPlacement', None, None, 'Vertical adjustment for placement-in design units'),
-# ('int16', 'XAdvance', None, None, 'Horizontal adjustment for advance-in design units (only used for horizontal writing)'),
-# ('int16', 'YAdvance', None, None, 'Vertical adjustment for advance-in design units (only used for vertical writing)'),
-# ('Offset', 'XPlaDevice', None, None, 'Offset to Device table for horizontal placement-measured from beginning of PosTable (may be NULL)'),
-# ('Offset', 'YPlaDevice', None, None, 'Offset to Device table for vertical placement-measured from beginning of PosTable (may be NULL)'),
-# ('Offset', 'XAdvDevice', None, None, 'Offset to Device table for horizontal advance-measured from beginning of PosTable (may be NULL)'),
-# ('Offset', 'YAdvDevice', None, None, 'Offset to Device table for vertical advance-measured from beginning of PosTable (may be NULL)'),
-# ]),
-
- ('AnchorFormat1', [
- ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 1'),
- ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'),
- ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'),
- ]),
-
- ('AnchorFormat2', [
- ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 2'),
- ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'),
- ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'),
- ('uint16', 'AnchorPoint', None, None, 'Index to glyph contour point'),
- ]),
-
- ('AnchorFormat3', [
- ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 3'),
- ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'),
- ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'),
- ('Offset', 'XDeviceTable', None, None, 'Offset to Device table for X coordinate- from beginning of Anchor table (may be NULL)'),
- ('Offset', 'YDeviceTable', None, None, 'Offset to Device table for Y coordinate- from beginning of Anchor table (may be NULL)'),
- ]),
-
- ('MarkArray', [
- ('uint16', 'MarkCount', None, None, 'Number of MarkRecords'),
- ('struct', 'MarkRecord', 'MarkCount', 0, 'Array of MarkRecords-in Coverage order'),
- ]),
-
- ('MarkRecord', [
- ('uint16', 'Class', None, None, 'Class defined for this mark'),
- ('Offset', 'MarkAnchor', None, None, 'Offset to Anchor table-from beginning of MarkArray table'),
- ]),
-
-
- #
- # gsub
- #
-
- ('GSUB', [
- ('Version', 'Version', None, None, 'Version of the GSUB table- 0x00010000 or 0x00010001'),
- ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GSUB table'),
- ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GSUB table'),
- ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GSUB table'),
- ('LOffset', 'FeatureVariations', None, 'Version >= 0x00010001', 'Offset to FeatureVariations table-from beginning of GSUB table'),
- ]),
-
- ('SingleSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'DeltaGlyphID', None, None, 'Add to original GlyphID modulo 65536 to get substitute GlyphID'),
- ]),
-
- ('SingleSubstFormat2', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'),
- ('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage Index'),
- ]),
-
- ('MultipleSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'SequenceCount', None, None, 'Number of Sequence table offsets in the Sequence array'),
- ('Offset', 'Sequence', 'SequenceCount', 0, 'Array of offsets to Sequence tables-from beginning of Substitution table-ordered by Coverage Index'),
- ]),
-
- ('Sequence', [
- ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array. This should always be greater than 0.'),
- ('GlyphID', 'Substitute', 'GlyphCount', 0, 'String of GlyphIDs to substitute'),
- ]),
-
- ('AlternateSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'AlternateSetCount', None, None, 'Number of AlternateSet tables'),
- ('Offset', 'AlternateSet', 'AlternateSetCount', 0, 'Array of offsets to AlternateSet tables-from beginning of Substitution table-ordered by Coverage Index'),
- ]),
-
- ('AlternateSet', [
- ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Alternate array'),
- ('GlyphID', 'Alternate', 'GlyphCount', 0, 'Array of alternate GlyphIDs-in arbitrary order'),
- ]),
-
- ('LigatureSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'LigSetCount', None, None, 'Number of LigatureSet tables'),
- ('Offset', 'LigatureSet', 'LigSetCount', 0, 'Array of offsets to LigatureSet tables-from beginning of Substitution table-ordered by Coverage Index'),
- ]),
-
- ('LigatureSet', [
- ('uint16', 'LigatureCount', None, None, 'Number of Ligature tables'),
- ('Offset', 'Ligature', 'LigatureCount', 0, 'Array of offsets to Ligature tables-from beginning of LigatureSet table-ordered by preference'),
- ]),
-
- ('Ligature', [
- ('GlyphID', 'LigGlyph', None, None, 'GlyphID of ligature to substitute'),
- ('uint16', 'CompCount', None, None, 'Number of components in the ligature'),
- ('GlyphID', 'Component', 'CompCount', -1, 'Array of component GlyphIDs-start with the second component-ordered in writing direction'),
- ]),
-
- ('SubstLookupRecord', [
- ('uint16', 'SequenceIndex', None, None, 'Index into current glyph sequence-first glyph = 0'),
- ('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'),
- ]),
-
- ('ContextSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'SubRuleSetCount', None, None, 'Number of SubRuleSet tables-must equal GlyphCount in Coverage table'),
- ('Offset', 'SubRuleSet', 'SubRuleSetCount', 0, 'Array of offsets to SubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'),
- ]),
-
- ('SubRuleSet', [
- ('uint16', 'SubRuleCount', None, None, 'Number of SubRule tables'),
- ('Offset', 'SubRule', 'SubRuleCount', 0, 'Array of offsets to SubRule tables-from beginning of SubRuleSet table-ordered by preference'),
- ]),
-
- ('SubRule', [
- ('uint16', 'GlyphCount', None, None, 'Total number of glyphs in input glyph sequence-includes the first glyph'),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-start with second glyph'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'),
- ]),
-
- ('ContextSubstFormat2', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('Offset', 'ClassDef', None, None, 'Offset to glyph ClassDef table-from beginning of Substitution table'),
- ('uint16', 'SubClassSetCount', None, None, 'Number of SubClassSet tables'),
- ('Offset', 'SubClassSet', 'SubClassSetCount', 0, 'Array of offsets to SubClassSet tables-from beginning of Substitution table-ordered by class-may be NULL'),
- ]),
-
- ('SubClassSet', [
- ('uint16', 'SubClassRuleCount', None, None, 'Number of SubClassRule tables'),
- ('Offset', 'SubClassRule', 'SubClassRuleCount', 0, 'Array of offsets to SubClassRule tables-from beginning of SubClassSet-ordered by preference'),
- ]),
-
- ('SubClassRule', [
- ('uint16', 'GlyphCount', None, None, 'Total number of classes specified for the context in the rule-includes the first class'),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph class sequence'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of Substitution lookups-in design order'),
- ]),
-
- ('ContextSubstFormat3', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'),
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input glyph sequence'),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage table-from beginning of Substitution table-in glyph sequence order'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'),
- ]),
-
- ('ChainContextSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('uint16', 'ChainSubRuleSetCount', None, None, 'Number of ChainSubRuleSet tables-must equal GlyphCount in Coverage table'),
- ('Offset', 'ChainSubRuleSet', 'ChainSubRuleSetCount', 0, 'Array of offsets to ChainSubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'),
- ]),
-
- ('ChainSubRuleSet', [
- ('uint16', 'ChainSubRuleCount', None, None, 'Number of ChainSubRule tables'),
- ('Offset', 'ChainSubRule', 'ChainSubRuleCount', 0, 'Array of offsets to ChainSubRule tables-from beginning of ChainSubRuleSet table-ordered by preference'),
- ]),
-
- ('ChainSubRule', [
- ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
- ('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"),
- ('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'),
- ('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'),
- ('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'),
- ]),
-
- ('ChainContextSubstFormat2', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'),
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
- ('Offset', 'BacktrackClassDef', None, None, 'Offset to glyph ClassDef table containing backtrack sequence data-from beginning of Substitution table'),
- ('Offset', 'InputClassDef', None, None, 'Offset to glyph ClassDef table containing input sequence data-from beginning of Substitution table'),
- ('Offset', 'LookAheadClassDef', None, None, 'Offset to glyph ClassDef table containing lookahead sequence data-from beginning of Substitution table'),
- ('uint16', 'ChainSubClassSetCount', None, None, 'Number of ChainSubClassSet tables'),
- ('Offset', 'ChainSubClassSet', 'ChainSubClassSetCount', 0, 'Array of offsets to ChainSubClassSet tables-from beginning of Substitution table-ordered by input class-may be NULL'),
- ]),
-
- ('ChainSubClassSet', [
- ('uint16', 'ChainSubClassRuleCount', None, None, 'Number of ChainSubClassRule tables'),
- ('Offset', 'ChainSubClassRule', 'ChainSubClassRuleCount', 0, 'Array of offsets to ChainSubClassRule tables-from beginning of ChainSubClassSet-ordered by preference'),
- ]),
-
- ('ChainSubClassRule', [
- ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
- ('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'),
- ('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'),
- ('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'),
- ('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'),
- ]),
-
- ('ChainContextSubstFormat3', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'),
- ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'),
- ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'),
- ('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'),
- ('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'),
- ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'),
- ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
- ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords, in design order'),
- ]),
-
- ('ExtensionSubstFormat1', [
- ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'),
- ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'),
- ('LOffset', 'ExtSubTable', None, None, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'),
- ]),
-
- ('ReverseChainSingleSubstFormat1', [
- ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
- ('Offset', 'Coverage', None, 0, 'Offset to Coverage table - from beginning of Substitution table'),
- ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'),
- ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'),
- ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'),
- ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'),
- ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'),
- ('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage index'),
- ]),
-
- #
- # gdef
- #
-
- ('GDEF', [
- ('Version', 'Version', None, None, 'Version of the GDEF table- 0x00010000, 0x00010002, or 0x00010003'),
- ('Offset', 'GlyphClassDef', None, None, 'Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)'),
- ('Offset', 'AttachList', None, None, 'Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)'),
- ('Offset', 'LigCaretList', None, None, 'Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)'),
- ('Offset', 'MarkAttachClassDef', None, None, 'Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)'),
- ('Offset', 'MarkGlyphSetsDef', None, 'Version >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'),
- ('LOffset', 'VarStore', None, 'Version >= 0x00010003', 'Offset to variation store (may be NULL)'),
- ]),
-
- ('AttachList', [
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of AttachList table'),
- ('uint16', 'GlyphCount', None, None, 'Number of glyphs with attachment points'),
- ('Offset', 'AttachPoint', 'GlyphCount', 0, 'Array of offsets to AttachPoint tables-from beginning of AttachList table-in Coverage Index order'),
- ]),
-
- ('AttachPoint', [
- ('uint16', 'PointCount', None, None, 'Number of attachment points on this glyph'),
- ('uint16', 'PointIndex', 'PointCount', 0, 'Array of contour point indices -in increasing numerical order'),
- ]),
-
- ('LigCaretList', [
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of LigCaretList table'),
- ('uint16', 'LigGlyphCount', None, None, 'Number of ligature glyphs'),
- ('Offset', 'LigGlyph', 'LigGlyphCount', 0, 'Array of offsets to LigGlyph tables-from beginning of LigCaretList table-in Coverage Index order'),
- ]),
-
- ('LigGlyph', [
- ('uint16', 'CaretCount', None, None, 'Number of CaretValues for this ligature (components - 1)'),
- ('Offset', 'CaretValue', 'CaretCount', 0, 'Array of offsets to CaretValue tables-from beginning of LigGlyph table-in increasing coordinate order'),
- ]),
-
- ('CaretValueFormat1', [
- ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 1'),
- ('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
- ]),
-
- ('CaretValueFormat2', [
- ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 2'),
- ('uint16', 'CaretValuePoint', None, None, 'Contour point index on glyph'),
- ]),
-
- ('CaretValueFormat3', [
- ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 3'),
- ('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
- ('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value-from beginning of CaretValue table'),
- ]),
-
- ('MarkGlyphSetsDef', [
- ('uint16', 'MarkSetTableFormat', None, None, 'Format identifier == 1'),
- ('uint16', 'MarkSetCount', None, None, 'Number of mark sets defined'),
- ('LOffset', 'Coverage', 'MarkSetCount', 0, 'Array of offsets to mark set coverage tables.'),
- ]),
-
- #
- # base
- #
-
- ('BASE', [
- ('Version', 'Version', None, None, 'Version of the BASE table-initially 0x00010000'),
- ('Offset', 'HorizAxis', None, None, 'Offset to horizontal Axis table-from beginning of BASE table-may be NULL'),
- ('Offset', 'VertAxis', None, None, 'Offset to vertical Axis table-from beginning of BASE table-may be NULL'),
- ('LOffset', 'VarStore', None, 'Version >= 0x00010001', 'Offset to variation store (may be NULL)'),
- ]),
-
- ('Axis', [
- ('Offset', 'BaseTagList', None, None, 'Offset to BaseTagList table-from beginning of Axis table-may be NULL'),
- ('Offset', 'BaseScriptList', None, None, 'Offset to BaseScriptList table-from beginning of Axis table'),
- ]),
-
- ('BaseTagList', [
- ('uint16', 'BaseTagCount', None, None, 'Number of baseline identification tags in this text direction-may be zero (0)'),
- ('Tag', 'BaselineTag', 'BaseTagCount', 0, 'Array of 4-byte baseline identification tags-must be in alphabetical order'),
- ]),
-
- ('BaseScriptList', [
- ('uint16', 'BaseScriptCount', None, None, 'Number of BaseScriptRecords defined'),
- ('struct', 'BaseScriptRecord', 'BaseScriptCount', 0, 'Array of BaseScriptRecords-in alphabetical order by BaseScriptTag'),
- ]),
-
- ('BaseScriptRecord', [
- ('Tag', 'BaseScriptTag', None, None, '4-byte script identification tag'),
- ('Offset', 'BaseScript', None, None, 'Offset to BaseScript table-from beginning of BaseScriptList'),
- ]),
-
- ('BaseScript', [
- ('Offset', 'BaseValues', None, None, 'Offset to BaseValues table-from beginning of BaseScript table-may be NULL'),
- ('Offset', 'DefaultMinMax', None, None, 'Offset to MinMax table- from beginning of BaseScript table-may be NULL'),
- ('uint16', 'BaseLangSysCount', None, None, 'Number of BaseLangSysRecords defined-may be zero (0)'),
- ('struct', 'BaseLangSysRecord', 'BaseLangSysCount', 0, 'Array of BaseLangSysRecords-in alphabetical order by BaseLangSysTag'),
- ]),
-
- ('BaseLangSysRecord', [
- ('Tag', 'BaseLangSysTag', None, None, '4-byte language system identification tag'),
- ('Offset', 'MinMax', None, None, 'Offset to MinMax table-from beginning of BaseScript table'),
- ]),
-
- ('BaseValues', [
- ('uint16', 'DefaultIndex', None, None, 'Index number of default baseline for this script-equals index position of baseline tag in BaselineArray of the BaseTagList'),
- ('uint16', 'BaseCoordCount', None, None, 'Number of BaseCoord tables defined-should equal BaseTagCount in the BaseTagList'),
- ('Offset', 'BaseCoord', 'BaseCoordCount', 0, 'Array of offsets to BaseCoord-from beginning of BaseValues table-order matches BaselineTag array in the BaseTagList'),
- ]),
-
- ('MinMax', [
- ('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from the beginning of MinMax table-may be NULL'),
- ('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from the beginning of MinMax table-may be NULL'),
- ('uint16', 'FeatMinMaxCount', None, None, 'Number of FeatMinMaxRecords-may be zero (0)'),
- ('struct', 'FeatMinMaxRecord', 'FeatMinMaxCount', 0, 'Array of FeatMinMaxRecords-in alphabetical order, by FeatureTableTag'),
- ]),
-
- ('FeatMinMaxRecord', [
- ('Tag', 'FeatureTableTag', None, None, '4-byte feature identification tag-must match FeatureTag in FeatureList'),
- ('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from beginning of MinMax table-may be NULL'),
- ('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from beginning of MinMax table-may be NULL'),
- ]),
-
- ('BaseCoordFormat1', [
- ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 1'),
- ('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
- ]),
-
- ('BaseCoordFormat2', [
- ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 2'),
- ('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
- ('GlyphID', 'ReferenceGlyph', None, None, 'GlyphID of control glyph'),
- ('uint16', 'BaseCoordPoint', None, None, 'Index of contour point on the ReferenceGlyph'),
- ]),
-
- ('BaseCoordFormat3', [
- ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 3'),
- ('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
- ('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value'),
- ]),
-
-
- #
- # jstf
- #
-
- ('JSTF', [
- ('Version', 'Version', None, None, 'Version of the JSTF table-initially set to 0x00010000'),
- ('uint16', 'JstfScriptCount', None, None, 'Number of JstfScriptRecords in this table'),
- ('struct', 'JstfScriptRecord', 'JstfScriptCount', 0, 'Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag'),
- ]),
-
- ('JstfScriptRecord', [
- ('Tag', 'JstfScriptTag', None, None, '4-byte JstfScript identification'),
- ('Offset', 'JstfScript', None, None, 'Offset to JstfScript table-from beginning of JSTF Header'),
- ]),
-
- ('JstfScript', [
- ('Offset', 'ExtenderGlyph', None, None, 'Offset to ExtenderGlyph table-from beginning of JstfScript table-may be NULL'),
- ('Offset', 'DefJstfLangSys', None, None, 'Offset to Default JstfLangSys table-from beginning of JstfScript table-may be NULL'),
- ('uint16', 'JstfLangSysCount', None, None, 'Number of JstfLangSysRecords in this table- may be zero (0)'),
- ('struct', 'JstfLangSysRecord', 'JstfLangSysCount', 0, 'Array of JstfLangSysRecords-in alphabetical order, by JstfLangSysTag'),
- ]),
-
- ('JstfLangSysRecord', [
- ('Tag', 'JstfLangSysTag', None, None, '4-byte JstfLangSys identifier'),
- ('Offset', 'JstfLangSys', None, None, 'Offset to JstfLangSys table-from beginning of JstfScript table'),
- ]),
-
- ('ExtenderGlyph', [
- ('uint16', 'GlyphCount', None, None, 'Number of Extender Glyphs in this script'),
- ('GlyphID', 'ExtenderGlyph', 'GlyphCount', 0, 'GlyphIDs-in increasing numerical order'),
- ]),
-
- ('JstfLangSys', [
- ('uint16', 'JstfPriorityCount', None, None, 'Number of JstfPriority tables'),
- ('Offset', 'JstfPriority', 'JstfPriorityCount', 0, 'Array of offsets to JstfPriority tables-from beginning of JstfLangSys table-in priority order'),
- ]),
-
- ('JstfPriority', [
- ('Offset', 'ShrinkageEnableGSUB', None, None, 'Offset to Shrinkage Enable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ShrinkageDisableGSUB', None, None, 'Offset to Shrinkage Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ShrinkageEnableGPOS', None, None, 'Offset to Shrinkage Enable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ShrinkageDisableGPOS', None, None, 'Offset to Shrinkage Disable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ShrinkageJstfMax', None, None, 'Offset to Shrinkage JstfMax table-from beginning of JstfPriority table -may be NULL'),
- ('Offset', 'ExtensionEnableGSUB', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'),
- ('Offset', 'ExtensionDisableGSUB', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ExtensionEnableGPOS', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'),
- ('Offset', 'ExtensionDisableGPOS', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
- ('Offset', 'ExtensionJstfMax', None, None, 'Offset to Extension JstfMax table-from beginning of JstfPriority table -may be NULL'),
- ]),
-
- ('JstfGSUBModList', [
- ('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'),
- ('uint16', 'GSUBLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GSUB-in increasing numerical order'),
- ]),
-
- ('JstfGPOSModList', [
- ('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'),
- ('uint16', 'GPOSLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GPOS-in increasing numerical order'),
- ]),
-
- ('JstfMax', [
- ('uint16', 'LookupCount', None, None, 'Number of lookup Indices for this modification'),
- ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order'),
- ]),
-
-
- #
- # STAT
- #
- ('STAT', [
- ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000, currently 0x00010002.'),
- ('uint16', 'DesignAxisRecordSize', None, None, 'Size in bytes of each design axis record'),
- ('uint16', 'DesignAxisCount', None, None, 'Number of design axis records'),
- ('LOffsetTo(AxisRecordArray)', 'DesignAxisRecord', None, None, 'Offset in bytes from the beginning of the STAT table to the start of the design axes array'),
- ('uint16', 'AxisValueCount', None, None, 'Number of axis value tables'),
- ('LOffsetTo(AxisValueArray)', 'AxisValueArray', None, None, 'Offset in bytes from the beginning of the STAT table to the start of the axes value offset array'),
- ('NameID', 'ElidedFallbackNameID', None, 'Version >= 0x00010001', 'NameID to use when all style attributes are elided.'),
- ]),
-
- ('AxisRecordArray', [
- ('AxisRecord', 'Axis', 'DesignAxisCount', 0, 'Axis records'),
- ]),
-
- ('AxisRecord', [
- ('Tag', 'AxisTag', None, None, 'A tag identifying the axis of design variation'),
- ('NameID', 'AxisNameID', None, None, 'The name ID for entries in the "name" table that provide a display string for this axis'),
- ('uint16', 'AxisOrdering', None, None, 'A value that applications can use to determine primary sorting of face names, or for ordering of descriptors when composing family or face names'),
- ('uint8', 'MoreBytes', 'DesignAxisRecordSize', -8, 'Extra bytes. Set to empty array.'),
- ]),
-
- ('AxisValueArray', [
- ('Offset', 'AxisValue', 'AxisValueCount', 0, 'Axis values'),
- ]),
-
- ('AxisValueFormat1', [
- ('uint16', 'Format', None, None, 'Format, = 1'),
- ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'),
- ('STATFlags', 'Flags', None, None, 'Flags.'),
- ('NameID', 'ValueNameID', None, None, ''),
- ('Fixed', 'Value', None, None, ''),
- ]),
-
- ('AxisValueFormat2', [
- ('uint16', 'Format', None, None, 'Format, = 2'),
- ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'),
- ('STATFlags', 'Flags', None, None, 'Flags.'),
- ('NameID', 'ValueNameID', None, None, ''),
- ('Fixed', 'NominalValue', None, None, ''),
- ('Fixed', 'RangeMinValue', None, None, ''),
- ('Fixed', 'RangeMaxValue', None, None, ''),
- ]),
-
- ('AxisValueFormat3', [
- ('uint16', 'Format', None, None, 'Format, = 3'),
- ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'),
- ('STATFlags', 'Flags', None, None, 'Flags.'),
- ('NameID', 'ValueNameID', None, None, ''),
- ('Fixed', 'Value', None, None, ''),
- ('Fixed', 'LinkedValue', None, None, ''),
- ]),
-
- ('AxisValueFormat4', [
- ('uint16', 'Format', None, None, 'Format, = 4'),
- ('uint16', 'AxisCount', None, None, 'The total number of axes contributing to this axis-values combination.'),
- ('STATFlags', 'Flags', None, None, 'Flags.'),
- ('NameID', 'ValueNameID', None, None, ''),
- ('struct', 'AxisValueRecord', 'AxisCount', 0, 'Array of AxisValue records that provide the combination of axis values, one for each contributing axis. '),
- ]),
-
- ('AxisValueRecord', [
- ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'),
- ('Fixed', 'Value', None, None, 'A numeric value for this attribute value.'),
- ]),
-
-
- #
- # Variation fonts
- #
-
- # GSUB/GPOS FeatureVariations
-
- ('FeatureVariations', [
- ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000'),
- ('uint32', 'FeatureVariationCount', None, None, 'Number of records in the FeatureVariationRecord array'),
- ('struct', 'FeatureVariationRecord', 'FeatureVariationCount', 0, 'Array of FeatureVariationRecord'),
- ]),
-
- ('FeatureVariationRecord', [
- ('LOffset', 'ConditionSet', None, None, 'Offset to a ConditionSet table, from beginning of the FeatureVariations table.'),
- ('LOffset', 'FeatureTableSubstitution', None, None, 'Offset to a FeatureTableSubstitution table, from beginning of the FeatureVariations table'),
- ]),
-
- ('ConditionSet', [
- ('uint16', 'ConditionCount', None, None, 'Number of condition tables in the ConditionTable array'),
- ('LOffset', 'ConditionTable', 'ConditionCount', 0, 'Array of condition tables.'),
- ]),
-
- ('ConditionTableFormat1', [
- ('uint16', 'Format', None, None, 'Format, = 1'),
- ('uint16', 'AxisIndex', None, None, 'Index for the variation axis within the fvar table, base 0.'),
- ('F2Dot14', 'FilterRangeMinValue', None, None, 'Minimum normalized axis value of the font variation instances that satisfy this condition.'),
- ('F2Dot14', 'FilterRangeMaxValue', None, None, 'Maximum value that satisfies this condition.'),
- ]),
-
- ('FeatureTableSubstitution', [
- ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000'),
- ('uint16', 'SubstitutionCount', None, None, 'Number of records in the FeatureVariationRecords array'),
- ('FeatureTableSubstitutionRecord', 'SubstitutionRecord', 'SubstitutionCount', 0, 'Array of FeatureTableSubstitutionRecord'),
- ]),
-
- ('FeatureTableSubstitutionRecord', [
- ('uint16', 'FeatureIndex', None, None, 'The feature table index to match.'),
- ('LOffset', 'Feature', None, None, 'Offset to an alternate feature table, from start of the FeatureTableSubstitution table.'),
- ]),
-
- # VariationStore
-
- ('VarRegionAxis', [
- ('F2Dot14', 'StartCoord', None, None, ''),
- ('F2Dot14', 'PeakCoord', None, None, ''),
- ('F2Dot14', 'EndCoord', None, None, ''),
- ]),
-
- ('VarRegion', [
- ('struct', 'VarRegionAxis', 'RegionAxisCount', 0, ''),
- ]),
-
- ('VarRegionList', [
- ('uint16', 'RegionAxisCount', None, None, ''),
- ('uint16', 'RegionCount', None, None, ''),
- ('VarRegion', 'Region', 'RegionCount', 0, ''),
- ]),
-
- ('VarData', [
- ('uint16', 'ItemCount', None, None, ''),
- ('uint16', 'NumShorts', None, None, ''),
- ('uint16', 'VarRegionCount', None, None, ''),
- ('uint16', 'VarRegionIndex', 'VarRegionCount', 0, ''),
- ('VarDataValue', 'Item', 'ItemCount', 0, ''),
- ]),
-
- ('VarStore', [
- ('uint16', 'Format', None, None, 'Set to 1.'),
- ('LOffset', 'VarRegionList', None, None, ''),
- ('uint16', 'VarDataCount', None, None, ''),
- ('LOffset', 'VarData', 'VarDataCount', 0, ''),
- ]),
-
- # Variation helpers
-
- ('VarIdxMap', [
- ('uint16', 'EntryFormat', None, None, ''), # Automatically computed
- ('uint16', 'MappingCount', None, None, ''), # Automatically computed
- ('VarIdxMapValue', 'mapping', '', 0, 'Array of compressed data'),
- ]),
-
- ('DeltaSetIndexMapFormat0', [
- ('uint8', 'Format', None, None, 'Format of the DeltaSetIndexMap = 0'),
- ('uint8', 'EntryFormat', None, None, ''), # Automatically computed
- ('uint16', 'MappingCount', None, None, ''), # Automatically computed
- ('VarIdxMapValue', 'mapping', '', 0, 'Array of compressed data'),
- ]),
-
- ('DeltaSetIndexMapFormat1', [
- ('uint8', 'Format', None, None, 'Format of the DeltaSetIndexMap = 1'),
- ('uint8', 'EntryFormat', None, None, ''), # Automatically computed
- ('uint32', 'MappingCount', None, None, ''), # Automatically computed
- ('VarIdxMapValue', 'mapping', '', 0, 'Array of compressed data'),
- ]),
-
- # Glyph advance variations
-
- ('HVAR', [
- ('Version', 'Version', None, None, 'Version of the HVAR table-initially = 0x00010000'),
- ('LOffset', 'VarStore', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'AdvWidthMap', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'LsbMap', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'RsbMap', None, None, ''),
- ]),
- ('VVAR', [
- ('Version', 'Version', None, None, 'Version of the VVAR table-initially = 0x00010000'),
- ('LOffset', 'VarStore', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'AdvHeightMap', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'TsbMap', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'BsbMap', None, None, ''),
- ('LOffsetTo(VarIdxMap)', 'VOrgMap', None, None, 'Vertical origin mapping.'),
- ]),
-
- # Font-wide metrics variations
-
- ('MetricsValueRecord', [
- ('Tag', 'ValueTag', None, None, '4-byte font-wide measure identifier'),
- ('uint32', 'VarIdx', None, None, 'Combined outer-inner variation index'),
- ('uint8', 'MoreBytes', 'ValueRecordSize', -8, 'Extra bytes. Set to empty array.'),
- ]),
-
- ('MVAR', [
- ('Version', 'Version', None, None, 'Version of the MVAR table-initially = 0x00010000'),
- ('uint16', 'Reserved', None, None, 'Set to 0'),
- ('uint16', 'ValueRecordSize', None, None, ''),
- ('uint16', 'ValueRecordCount', None, None, ''),
- ('Offset', 'VarStore', None, None, ''),
- ('MetricsValueRecord', 'ValueRecord', 'ValueRecordCount', 0, ''),
- ]),
-
-
- #
- # math
- #
-
- ('MATH', [
- ('Version', 'Version', None, None, 'Version of the MATH table-initially set to 0x00010000.'),
- ('Offset', 'MathConstants', None, None, 'Offset to MathConstants table - from the beginning of MATH table.'),
- ('Offset', 'MathGlyphInfo', None, None, 'Offset to MathGlyphInfo table - from the beginning of MATH table.'),
- ('Offset', 'MathVariants', None, None, 'Offset to MathVariants table - from the beginning of MATH table.'),
- ]),
-
- ('MathValueRecord', [
- ('int16', 'Value', None, None, 'The X or Y value in design units.'),
- ('Offset', 'DeviceTable', None, None, 'Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.'),
- ]),
-
- ('MathConstants', [
- ('int16', 'ScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 1. Suggested value: 80%.'),
- ('int16', 'ScriptScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.'),
- ('uint16', 'DelimitedSubFormulaMinHeight', None, None, 'Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.'),
- ('uint16', 'DisplayOperatorMinHeight', None, None, 'Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.'),
- ('MathValueRecord', 'MathLeading', None, None, 'White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.'),
- ('MathValueRecord', 'AxisHeight', None, None, 'Axis height of the font.'),
- ('MathValueRecord', 'AccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.'),
- ('MathValueRecord', 'FlattenedAccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).'),
- ('MathValueRecord', 'SubscriptShiftDown', None, None, 'The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.'),
- ('MathValueRecord', 'SubscriptTopMax', None, None, 'Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.'),
- ('MathValueRecord', 'SubscriptBaselineDropMin', None, None, 'Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.'),
- ('MathValueRecord', 'SuperscriptShiftUp', None, None, 'Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.'),
- ('MathValueRecord', 'SuperscriptShiftUpCramped', None, None, 'Standard shift of superscripts relative to the base, in cramped style.'),
- ('MathValueRecord', 'SuperscriptBottomMin', None, None, 'Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.'),
- ('MathValueRecord', 'SuperscriptBaselineDropMax', None, None, 'Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.'),
- ('MathValueRecord', 'SubSuperscriptGapMin', None, None, 'Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.'),
- ('MathValueRecord', 'SuperscriptBottomMaxWithSubscript', None, None, 'The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.'),
- ('MathValueRecord', 'SpaceAfterScript', None, None, 'Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.'),
- ('MathValueRecord', 'UpperLimitGapMin', None, None, 'Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.'),
- ('MathValueRecord', 'UpperLimitBaselineRiseMin', None, None, 'Minimum distance between baseline of upper limit and (ink) top of the base operator.'),
- ('MathValueRecord', 'LowerLimitGapMin', None, None, 'Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.'),
- ('MathValueRecord', 'LowerLimitBaselineDropMin', None, None, 'Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.'),
- ('MathValueRecord', 'StackTopShiftUp', None, None, 'Standard shift up applied to the top element of a stack.'),
- ('MathValueRecord', 'StackTopDisplayStyleShiftUp', None, None, 'Standard shift up applied to the top element of a stack in display style.'),
- ('MathValueRecord', 'StackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.'),
- ('MathValueRecord', 'StackBottomDisplayStyleShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.'),
- ('MathValueRecord', 'StackGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.'),
- ('MathValueRecord', 'StackDisplayStyleGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.'),
- ('MathValueRecord', 'StretchStackTopShiftUp', None, None, 'Standard shift up applied to the top element of the stretch stack.'),
- ('MathValueRecord', 'StretchStackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.'),
- ('MathValueRecord', 'StretchStackGapAboveMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin'),
- ('MathValueRecord', 'StretchStackGapBelowMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.'),
- ('MathValueRecord', 'FractionNumeratorShiftUp', None, None, 'Standard shift up applied to the numerator.'),
- ('MathValueRecord', 'FractionNumeratorDisplayStyleShiftUp', None, None, 'Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.'),
- ('MathValueRecord', 'FractionDenominatorShiftDown', None, None, 'Standard shift down applied to the denominator. Positive for moving in the downward direction.'),
- ('MathValueRecord', 'FractionDenominatorDisplayStyleShiftDown', None, None, 'Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.'),
- ('MathValueRecord', 'FractionNumeratorGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness'),
- ('MathValueRecord', 'FractionNumDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'),
- ('MathValueRecord', 'FractionRuleThickness', None, None, 'Thickness of the fraction bar. Suggested: default rule thickness.'),
- ('MathValueRecord', 'FractionDenominatorGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness'),
- ('MathValueRecord', 'FractionDenomDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'),
- ('MathValueRecord', 'SkewedFractionHorizontalGap', None, None, 'Horizontal distance between the top and bottom elements of a skewed fraction.'),
- ('MathValueRecord', 'SkewedFractionVerticalGap', None, None, 'Vertical distance between the ink of the top and bottom elements of a skewed fraction.'),
- ('MathValueRecord', 'OverbarVerticalGap', None, None, 'Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.'),
- ('MathValueRecord', 'OverbarRuleThickness', None, None, 'Thickness of overbar. Suggested: default rule thickness.'),
- ('MathValueRecord', 'OverbarExtraAscender', None, None, 'Extra white space reserved above the overbar. Suggested: default rule thickness.'),
- ('MathValueRecord', 'UnderbarVerticalGap', None, None, 'Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.'),
- ('MathValueRecord', 'UnderbarRuleThickness', None, None, 'Thickness of underbar. Suggested: default rule thickness.'),
- ('MathValueRecord', 'UnderbarExtraDescender', None, None, 'Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.'),
- ('MathValueRecord', 'RadicalVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.'),
- ('MathValueRecord', 'RadicalDisplayStyleVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.'),
- ('MathValueRecord', 'RadicalRuleThickness', None, None, 'Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.'),
- ('MathValueRecord', 'RadicalExtraAscender', None, None, 'Extra white space reserved above the radical. Suggested: RadicalRuleThickness.'),
- ('MathValueRecord', 'RadicalKernBeforeDegree', None, None, 'Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.'),
- ('MathValueRecord', 'RadicalKernAfterDegree', None, None, 'Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.'),
- ('uint16', 'RadicalDegreeBottomRaisePercent', None, None, 'Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.'),
- ]),
-
- ('MathGlyphInfo', [
- ('Offset', 'MathItalicsCorrectionInfo', None, None, 'Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.'),
- ('Offset', 'MathTopAccentAttachment', None, None, 'Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.'),
- ('Offset', 'ExtendedShapeCoverage', None, None, 'Offset to coverage table for Extended Shape glyphs - from the beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.'),
- ('Offset', 'MathKernInfo', None, None, 'Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.'),
- ]),
-
- ('MathItalicsCorrectionInfo', [
- ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.'),
- ('uint16', 'ItalicsCorrectionCount', None, None, 'Number of italics correction values. Should coincide with the number of covered glyphs.'),
- ('MathValueRecord', 'ItalicsCorrection', 'ItalicsCorrectionCount', 0, 'Array of MathValueRecords defining italics correction values for each covered glyph.'),
- ]),
-
- ('MathTopAccentAttachment', [
- ('Offset', 'TopAccentCoverage', None, None, 'Offset to Coverage table - from the beginning of MathTopAccentAttachment table.'),
- ('uint16', 'TopAccentAttachmentCount', None, None, 'Number of top accent attachment point values. Should coincide with the number of covered glyphs'),
- ('MathValueRecord', 'TopAccentAttachment', 'TopAccentAttachmentCount', 0, 'Array of MathValueRecords defining top accent attachment points for each covered glyph'),
- ]),
-
- ('MathKernInfo', [
- ('Offset', 'MathKernCoverage', None, None, 'Offset to Coverage table - from the beginning of the MathKernInfo table.'),
- ('uint16', 'MathKernCount', None, None, 'Number of MathKernInfoRecords.'),
- ('MathKernInfoRecord', 'MathKernInfoRecords', 'MathKernCount', 0, 'Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.'),
- ]),
-
- ('MathKernInfoRecord', [
- ('Offset', 'TopRightMathKern', None, None, 'Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.'),
- ('Offset', 'TopLeftMathKern', None, None, 'Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.'),
- ('Offset', 'BottomRightMathKern', None, None, 'Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.'),
- ('Offset', 'BottomLeftMathKern', None, None, 'Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.'),
- ]),
-
- ('MathKern', [
- ('uint16', 'HeightCount', None, None, 'Number of heights on which the kern value changes.'),
- ('MathValueRecord', 'CorrectionHeight', 'HeightCount', 0, 'Array of correction heights at which the kern value changes. Sorted by the height value in design units.'),
- ('MathValueRecord', 'KernValue', 'HeightCount', 1, 'Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.'),
- ]),
-
- ('MathVariants', [
- ('uint16', 'MinConnectorOverlap', None, None, 'Minimum overlap of connecting glyphs during glyph construction, in design units.'),
- ('Offset', 'VertGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'),
- ('Offset', 'HorizGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'),
- ('uint16', 'VertGlyphCount', None, None, 'Number of glyphs for which information is provided for vertically growing variants.'),
- ('uint16', 'HorizGlyphCount', None, None, 'Number of glyphs for which information is provided for horizontally growing variants.'),
- ('Offset', 'VertGlyphConstruction', 'VertGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.'),
- ('Offset', 'HorizGlyphConstruction', 'HorizGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.'),
- ]),
-
- ('MathGlyphConstruction', [
- ('Offset', 'GlyphAssembly', None, None, 'Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL'),
- ('uint16', 'VariantCount', None, None, 'Count of glyph growing variants for this glyph.'),
- ('MathGlyphVariantRecord', 'MathGlyphVariantRecord', 'VariantCount', 0, 'MathGlyphVariantRecords for alternative variants of the glyphs.'),
- ]),
-
- ('MathGlyphVariantRecord', [
- ('GlyphID', 'VariantGlyph', None, None, 'Glyph ID for the variant.'),
- ('uint16', 'AdvanceMeasurement', None, None, 'Advance width/height, in design units, of the variant, in the direction of requested glyph extension.'),
- ]),
-
- ('GlyphAssembly', [
- ('MathValueRecord', 'ItalicsCorrection', None, None, 'Italics correction of this GlyphAssembly. Should not depend on the assembly size.'),
- ('uint16', 'PartCount', None, None, 'Number of parts in this assembly.'),
- ('GlyphPartRecord', 'PartRecords', 'PartCount', 0, 'Array of part records, from left to right and bottom to top.'),
- ]),
-
- ('GlyphPartRecord', [
- ('GlyphID', 'glyph', None, None, 'Glyph ID for the part.'),
- ('uint16', 'StartConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.'),
- ('uint16', 'EndConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.'),
- ('uint16', 'FullAdvance', None, None, 'Full advance width/height for this part, in the direction of the extension. In design units.'),
- ('uint16', 'PartFlags', None, None, 'Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved'),
- ]),
-
-
- ##
- ## Apple Advanced Typography (AAT) tables
- ##
-
- ('AATLookupSegment', [
- ('uint16', 'lastGlyph', None, None, 'Last glyph index in this segment.'),
- ('uint16', 'firstGlyph', None, None, 'First glyph index in this segment.'),
- ('uint16', 'value', None, None, 'A 16-bit offset from the start of the table to the data.'),
- ]),
-
-
- #
- # ankr
- #
-
- ('ankr', [
- ('struct', 'AnchorPoints', None, None, 'Anchor points table.'),
- ]),
-
- ('AnchorPointsFormat0', [
- ('uint16', 'Format', None, None, 'Format of the anchor points table, = 0.'),
- ('uint16', 'Flags', None, None, 'Flags. Currenty unused, set to zero.'),
- ('AATLookupWithDataOffset(AnchorGlyphData)', 'Anchors', None, None, 'Table of with anchor overrides for each glyph.'),
- ]),
-
- ('AnchorGlyphData', [
- ('uint32', 'AnchorPointCount', None, None, 'Number of anchor points for this glyph.'),
- ('struct', 'AnchorPoint', 'AnchorPointCount', 0, 'Individual anchor points.'),
- ]),
-
- ('AnchorPoint', [
- ('int16', 'XCoordinate', None, None, 'X coordinate of this anchor point.'),
- ('int16', 'YCoordinate', None, None, 'Y coordinate of this anchor point.'),
- ]),
-
- #
- # bsln
- #
-
- ('bsln', [
- ('Version', 'Version', None, None, 'Version number of the AAT baseline table (0x00010000 for the initial version).'),
- ('struct', 'Baseline', None, None, 'Baseline table.'),
- ]),
-
- ('BaselineFormat0', [
- ('uint16', 'Format', None, None, 'Format of the baseline table, = 0.'),
- ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'),
- ('uint16', 'Delta', 32, 0, u'These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.'),
- ]),
-
- ('BaselineFormat1', [
- ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'),
- ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'),
- ('uint16', 'Delta', 32, 0, u'These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.'),
- ('AATLookup(uint16)', 'BaselineValues', None, None, 'Lookup table that maps glyphs to their baseline values.'),
- ]),
-
- ('BaselineFormat2', [
- ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'),
- ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'),
- ('GlyphID', 'StandardGlyph', None, None, 'Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.'),
- ('uint16', 'ControlPoint', 32, 0, 'Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.'),
- ]),
-
- ('BaselineFormat3', [
- ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'),
- ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'),
- ('GlyphID', 'StandardGlyph', None, None, 'Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.'),
- ('uint16', 'ControlPoint', 32, 0, 'Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.'),
- ('AATLookup(uint16)', 'BaselineValues', None, None, 'Lookup table that maps glyphs to their baseline values.'),
- ]),
-
-
- #
- # cidg
- #
-
- ('cidg', [
- ('struct', 'CIDGlyphMapping', None, None, 'CID-to-glyph mapping table.'),
- ]),
-
- ('CIDGlyphMappingFormat0', [
- ('uint16', 'Format', None, None, 'Format of the CID-to-glyph mapping table, = 0.'),
- ('uint16', 'DataFormat', None, None, 'Currenty unused, set to zero.'),
- ('uint32', 'StructLength', None, None, 'Size of the table in bytes.'),
- ('uint16', 'Registry', None, None, 'The registry ID.'),
- ('char64', 'RegistryName', None, None, 'The registry name in ASCII; unused bytes should be set to 0.'),
- ('uint16', 'Order', None, None, 'The order ID.'),
- ('char64', 'OrderName', None, None, 'The order name in ASCII; unused bytes should be set to 0.'),
- ('uint16', 'SupplementVersion', None, None, 'The supplement version.'),
- ('CIDGlyphMap', 'Mapping', None, None, 'A mapping from CIDs to the glyphs in the font, starting with CID 0. If a CID from the identified collection has no glyph in the font, 0xFFFF is used'),
- ]),
-
-
- #
- # feat
- #
-
- ('feat', [
- ('Version', 'Version', None, None, 'Version of the feat table-initially set to 0x00010000.'),
- ('FeatureNames', 'FeatureNames', None, None, 'The feature names.'),
- ]),
-
- ('FeatureNames', [
- ('uint16', 'FeatureNameCount', None, None, 'Number of entries in the feature name array.'),
- ('uint16', 'Reserved1', None, None, 'Reserved (set to zero).'),
- ('uint32', 'Reserved2', None, None, 'Reserved (set to zero).'),
- ('FeatureName', 'FeatureName', 'FeatureNameCount', 0, 'The feature name array.'),
- ]),
-
- ('FeatureName', [
- ('uint16', 'FeatureType', None, None, 'Feature type.'),
- ('uint16', 'SettingsCount', None, None, 'The number of records in the setting name array.'),
- ('LOffset', 'Settings', None, None, 'Offset to setting table for this feature.'),
- ('uint16', 'FeatureFlags', None, None, 'Single-bit flags associated with the feature type.'),
- ('NameID', 'FeatureNameID', None, None, 'The name table index for the feature name.'),
- ]),
-
- ('Settings', [
- ('Setting', 'Setting', 'SettingsCount', 0, 'The setting array.'),
- ]),
-
- ('Setting', [
- ('uint16', 'SettingValue', None, None, 'The setting.'),
- ('NameID', 'SettingNameID', None, None, 'The name table index for the setting name.'),
- ]),
-
-
- #
- # gcid
- #
-
- ('gcid', [
- ('struct', 'GlyphCIDMapping', None, None, 'Glyph to CID mapping table.'),
- ]),
-
- ('GlyphCIDMappingFormat0', [
- ('uint16', 'Format', None, None, 'Format of the glyph-to-CID mapping table, = 0.'),
- ('uint16', 'DataFormat', None, None, 'Currenty unused, set to zero.'),
- ('uint32', 'StructLength', None, None, 'Size of the table in bytes.'),
- ('uint16', 'Registry', None, None, 'The registry ID.'),
- ('char64', 'RegistryName', None, None, 'The registry name in ASCII; unused bytes should be set to 0.'),
- ('uint16', 'Order', None, None, 'The order ID.'),
- ('char64', 'OrderName', None, None, 'The order name in ASCII; unused bytes should be set to 0.'),
- ('uint16', 'SupplementVersion', None, None, 'The supplement version.'),
- ('GlyphCIDMap', 'Mapping', None, None, 'The CIDs for the glyphs in the font, starting with glyph 0. If a glyph does not correspond to a CID in the identified collection, 0xFFFF is used'),
- ]),
-
-
- #
- # lcar
- #
-
- ('lcar', [
- ('Version', 'Version', None, None, 'Version number of the ligature caret table (0x00010000 for the initial version).'),
- ('struct', 'LigatureCarets', None, None, 'Ligature carets table.'),
- ]),
-
- ('LigatureCaretsFormat0', [
- ('uint16', 'Format', None, None, 'Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.'),
- ('AATLookup(LigCaretDistances)', 'Carets', None, None, 'Lookup table associating ligature glyphs with their caret positions, in font unit distances.'),
- ]),
-
- ('LigatureCaretsFormat1', [
- ('uint16', 'Format', None, None, 'Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.'),
- ('AATLookup(LigCaretPoints)', 'Carets', None, None, 'Lookup table associating ligature glyphs with their caret positions, as control points.'),
- ]),
-
- ('LigCaretDistances', [
- ('uint16', 'DivsionPointCount', None, None, 'Number of division points.'),
- ('int16', 'DivisionPoint', 'DivsionPointCount', 0, 'Distance in font units through which a subdivision is made orthogonally to the baseline.'),
- ]),
-
- ('LigCaretPoints', [
- ('uint16', 'DivsionPointCount', None, None, 'Number of division points.'),
- ('int16', 'DivisionPoint', 'DivsionPointCount', 0, 'The number of the control point through which a subdivision is made orthogonally to the baseline.'),
- ]),
-
-
- #
- # mort
- #
-
- ('mort', [
- ('Version', 'Version', None, None, 'Version of the mort table.'),
- ('uint32', 'MorphChainCount', None, None, 'Number of metamorphosis chains.'),
- ('MortChain', 'MorphChain', 'MorphChainCount', 0, 'Array of metamorphosis chains.'),
- ]),
-
- ('MortChain', [
- ('Flags32', 'DefaultFlags', None, None, 'The default specification for subtables.'),
- ('uint32', 'StructLength', None, None, 'Total byte count, including this header; must be a multiple of 4.'),
- ('uint16', 'MorphFeatureCount', None, None, 'Number of metamorphosis feature entries.'),
- ('uint16', 'MorphSubtableCount', None, None, 'The number of subtables in the chain.'),
- ('struct', 'MorphFeature', 'MorphFeatureCount', 0, 'Array of metamorphosis features.'),
- ('MortSubtable', 'MorphSubtable', 'MorphSubtableCount', 0, 'Array of metamorphosis subtables.'),
- ]),
-
- ('MortSubtable', [
- ('uint16', 'StructLength', None, None, 'Total subtable length, including this header.'),
- ('uint8', 'CoverageFlags', None, None, 'Most significant byte of coverage flags.'),
- ('uint8', 'MorphType', None, None, 'Subtable type.'),
- ('Flags32', 'SubFeatureFlags', None, None, 'The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).'),
- ('SubStruct', 'SubStruct', None, None, 'SubTable.'),
- ]),
-
- #
- # morx
- #
-
- ('morx', [
- ('uint16', 'Version', None, None, 'Version of the morx table.'),
- ('uint16', 'Reserved', None, None, 'Reserved (set to zero).'),
- ('uint32', 'MorphChainCount', None, None, 'Number of extended metamorphosis chains.'),
- ('MorxChain', 'MorphChain', 'MorphChainCount', 0, 'Array of extended metamorphosis chains.'),
- ]),
-
- ('MorxChain', [
- ('Flags32', 'DefaultFlags', None, None, 'The default specification for subtables.'),
- ('uint32', 'StructLength', None, None, 'Total byte count, including this header; must be a multiple of 4.'),
- ('uint32', 'MorphFeatureCount', None, None, 'Number of feature subtable entries.'),
- ('uint32', 'MorphSubtableCount', None, None, 'The number of subtables in the chain.'),
- ('MorphFeature', 'MorphFeature', 'MorphFeatureCount', 0, 'Array of metamorphosis features.'),
- ('MorxSubtable', 'MorphSubtable', 'MorphSubtableCount', 0, 'Array of extended metamorphosis subtables.'),
- ]),
-
- ('MorphFeature', [
- ('uint16', 'FeatureType', None, None, 'The type of feature.'),
- ('uint16', 'FeatureSetting', None, None, "The feature's setting (aka selector)."),
- ('Flags32', 'EnableFlags', None, None, 'Flags for the settings that this feature and setting enables.'),
- ('Flags32', 'DisableFlags', None, None, 'Complement of flags for the settings that this feature and setting disable.'),
- ]),
-
- # Apple TrueType Reference Manual, chapter “The ‘morx’ table”,
- # section “Metamorphosis Subtables”.
- # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
- ('MorxSubtable', [
- ('uint32', 'StructLength', None, None, 'Total subtable length, including this header.'),
- ('uint8', 'CoverageFlags', None, None, 'Most significant byte of coverage flags.'),
- ('uint16', 'Reserved', None, None, 'Unused.'),
- ('uint8', 'MorphType', None, None, 'Subtable type.'),
- ('Flags32', 'SubFeatureFlags', None, None, 'The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).'),
- ('SubStruct', 'SubStruct', None, None, 'SubTable.'),
- ]),
-
- ('StateHeader', [
- ('uint32', 'ClassCount', None, None, 'Number of classes, which is the number of 16-bit entry indices in a single line in the state array.'),
- ('uint32', 'MorphClass', None, None, 'Offset from the start of this state table header to the start of the class table.'),
- ('uint32', 'StateArrayOffset', None, None, 'Offset from the start of this state table header to the start of the state array.'),
- ('uint32', 'EntryTableOffset', None, None, 'Offset from the start of this state table header to the start of the entry table.'),
- ]),
-
- ('RearrangementMorph', [
- ('STXHeader(RearrangementMorphAction)', 'StateTable', None, None, 'Finite-state transducer table for indic rearrangement.'),
- ]),
-
- ('ContextualMorph', [
- ('STXHeader(ContextualMorphAction)', 'StateTable', None, None, 'Finite-state transducer for contextual glyph substitution.'),
- ]),
-
- ('LigatureMorph', [
- ('STXHeader(LigatureMorphAction)', 'StateTable', None, None, 'Finite-state transducer for ligature substitution.'),
- ]),
-
- ('NoncontextualMorph', [
- ('AATLookup(GlyphID)', 'Substitution', None, None, 'The noncontextual glyph substitution table.'),
- ]),
-
- ('InsertionMorph', [
- ('STXHeader(InsertionMorphAction)', 'StateTable', None, None, 'Finite-state transducer for glyph insertion.'),
- ]),
-
- ('MorphClass', [
- ('uint16', 'FirstGlyph', None, None, 'Glyph index of the first glyph in the class table.'),
- #('uint16', 'GlyphCount', None, None, 'Number of glyphs in class table.'),
- #('uint8', 'GlyphClass', 'GlyphCount', 0, 'The class codes (indexed by glyph index minus firstGlyph). Class codes range from 0 to the value of stateSize minus 1.'),
- ]),
-
- # If the 'morx' table version is 3 or greater, then the last subtable in the chain is followed by a subtableGlyphCoverageArray, as described below.
- # ('Offset', 'MarkGlyphSetsDef', None, 'round(Version*0x10000) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'),
-
-
- #
- # prop
- #
-
- ('prop', [
- ('Fixed', 'Version', None, None, 'Version number of the AAT glyphs property table. Version 1.0 is the initial table version. Version 2.0, which is recognized by macOS 8.5 and later, adds support for the “attaches on right” bit. Version 3.0, which gets recognized by macOS X and iOS, adds support for the additional directional properties defined in Unicode 3.0.'),
- ('struct', 'GlyphProperties', None, None, 'Glyph properties.'),
- ]),
-
- ('GlyphPropertiesFormat0', [
- ('uint16', 'Format', None, None, 'Format, = 0.'),
- ('uint16', 'DefaultProperties', None, None, 'Default properties applied to a glyph. Since there is no lookup table in prop format 0, the default properties get applied to every glyph in the font.'),
- ]),
-
- ('GlyphPropertiesFormat1', [
- ('uint16', 'Format', None, None, 'Format, = 1.'),
- ('uint16', 'DefaultProperties', None, None, 'Default properties applied to a glyph if that glyph is not present in the Properties lookup table.'),
- ('AATLookup(uint16)', 'Properties', None, None, 'Lookup data associating glyphs with their properties.'),
- ]),
-
-
- #
- # opbd
- #
-
- ('opbd', [
- ('Version', 'Version', None, None, 'Version number of the optical bounds table (0x00010000 for the initial version).'),
- ('struct', 'OpticalBounds', None, None, 'Optical bounds table.'),
- ]),
-
- ('OpticalBoundsFormat0', [
- ('uint16', 'Format', None, None, 'Format of the optical bounds table, = 0.'),
- ('AATLookup(OpticalBoundsDeltas)', 'OpticalBoundsDeltas', None, None, 'Lookup table associating glyphs with their optical bounds, given as deltas in font units.'),
- ]),
-
- ('OpticalBoundsFormat1', [
- ('uint16', 'Format', None, None, 'Format of the optical bounds table, = 1.'),
- ('AATLookup(OpticalBoundsPoints)', 'OpticalBoundsPoints', None, None, 'Lookup table associating glyphs with their optical bounds, given as references to control points.'),
- ]),
-
- ('OpticalBoundsDeltas', [
- ('int16', 'Left', None, None, 'Delta value for the left-side optical edge.'),
- ('int16', 'Top', None, None, 'Delta value for the top-side optical edge.'),
- ('int16', 'Right', None, None, 'Delta value for the right-side optical edge.'),
- ('int16', 'Bottom', None, None, 'Delta value for the bottom-side optical edge.'),
- ]),
-
- ('OpticalBoundsPoints', [
- ('int16', 'Left', None, None, 'Control point index for the left-side optical edge, or -1 if this glyph has none.'),
- ('int16', 'Top', None, None, 'Control point index for the top-side optical edge, or -1 if this glyph has none.'),
- ('int16', 'Right', None, None, 'Control point index for the right-side optical edge, or -1 if this glyph has none.'),
- ('int16', 'Bottom', None, None, 'Control point index for the bottom-side optical edge, or -1 if this glyph has none.'),
- ]),
-
- #
- # TSIC
- #
- ('TSIC', [
- ('Version', 'Version', None, None, 'Version of table initially set to 0x00010000.'),
- ('uint16', 'Flags', None, None, 'TSIC flags - set to 0'),
- ('uint16', 'AxisCount', None, None, 'Axis count from fvar'),
- ('uint16', 'RecordCount', None, None, 'TSIC record count'),
- ('uint16', 'Reserved', None, None, 'Set to 0'),
- ('Tag', 'AxisArray', 'AxisCount', 0, 'Array of axis tags in fvar order'),
- ('LocationRecord', 'RecordLocations', 'RecordCount', 0, 'Location in variation space of TSIC record'),
- ('TSICRecord', 'Record', 'RecordCount', 0, 'Array of TSIC records'),
- ]),
-
- ('LocationRecord', [
- ('F2Dot14', 'Axis', 'AxisCount', 0, 'Axis record'),
- ]),
-
- ('TSICRecord', [
- ('uint16', 'Flags', None, None, 'Record flags - set to 0'),
- ('uint16', 'NumCVTEntries', None, None, 'Number of CVT number value pairs'),
- ('uint16', 'NameLength', None, None, 'Length of optional user record name'),
- ('uint16', 'NameArray', 'NameLength', 0, 'Unicode 16 name'),
- ('uint16', 'CVTArray', 'NumCVTEntries', 0, 'CVT number array'),
- ('int16', 'CVTValueArray', 'NumCVTEntries', 0, 'CVT value'),
- ]),
-
- #
- # COLR
- #
-
- ('COLR', [
- ('uint16', 'Version', None, None, 'Table version number (starts at 0).'),
- ('uint16', 'BaseGlyphRecordCount', None, None, 'Number of Base Glyph Records.'),
- ('LOffset', 'BaseGlyphRecordArray', None, None, 'Offset (from beginning of COLR table) to Base Glyph records.'),
- ('LOffset', 'LayerRecordArray', None, None, 'Offset (from beginning of COLR table) to Layer Records.'),
- ('uint16', 'LayerRecordCount', None, None, 'Number of Layer Records.'),
- ('LOffset', 'BaseGlyphList', None, 'Version >= 1', 'Offset (from beginning of COLR table) to array of Version-1 Base Glyph records.'),
- ('LOffset', 'LayerList', None, 'Version >= 1', 'Offset (from beginning of COLR table) to LayerList.'),
- ('LOffset', 'ClipList', None, 'Version >= 1', 'Offset to ClipList table (may be NULL)'),
- ('LOffsetTo(DeltaSetIndexMap)', 'VarIndexMap', None, 'Version >= 1', 'Offset to DeltaSetIndexMap table (may be NULL)'),
- ('LOffset', 'VarStore', None, 'Version >= 1', 'Offset to variation store (may be NULL)'),
- ]),
-
- ('BaseGlyphRecordArray', [
- ('BaseGlyphRecord', 'BaseGlyphRecord', 'BaseGlyphRecordCount', 0, 'Base Glyph records.'),
- ]),
-
- ('BaseGlyphRecord', [
- ('GlyphID', 'BaseGlyph', None, None, 'Glyph ID of reference glyph. This glyph is for reference only and is not rendered for color.'),
- ('uint16', 'FirstLayerIndex', None, None, 'Index (from beginning of the Layer Records) to the layer record. There will be numLayers consecutive entries for this base glyph.'),
- ('uint16', 'NumLayers', None, None, 'Number of color layers associated with this glyph.'),
- ]),
-
- ('LayerRecordArray', [
- ('LayerRecord', 'LayerRecord', 'LayerRecordCount', 0, 'Layer records.'),
- ]),
-
- ('LayerRecord', [
- ('GlyphID', 'LayerGlyph', None, None, 'Glyph ID of layer glyph (must be in z-order from bottom to top).'),
- ('uint16', 'PaletteIndex', None, None, 'Index value to use with a selected color palette.'),
- ]),
-
- ('BaseGlyphList', [
- ('uint32', 'BaseGlyphCount', None, None, 'Number of Version-1 Base Glyph records'),
- ('struct', 'BaseGlyphPaintRecord', 'BaseGlyphCount', 0, 'Array of Version-1 Base Glyph records'),
- ]),
-
- ('BaseGlyphPaintRecord', [
- ('GlyphID', 'BaseGlyph', None, None, 'Glyph ID of reference glyph.'),
- ('LOffset', 'Paint', None, None, 'Offset (from beginning of BaseGlyphPaintRecord) to Paint, typically a PaintColrLayers.'),
- ]),
-
- ('LayerList', [
- ('uint32', 'LayerCount', None, None, 'Number of Version-1 Layers'),
- ('LOffset', 'Paint', 'LayerCount', 0, 'Array of offsets to Paint tables, from the start of the LayerList table.'),
- ]),
-
- ('ClipListFormat1', [
- ('uint8', 'Format', None, None, 'Format for ClipList with 16bit glyph IDs: 1'),
- ('uint32', 'ClipCount', None, None, 'Number of Clip records.'),
- ('struct', 'ClipRecord', 'ClipCount', 0, 'Array of Clip records sorted by glyph ID.'),
- ]),
-
- ('ClipRecord', [
- ('uint16', 'StartGlyphID', None, None, 'First glyph ID in the range.'),
- ('uint16', 'EndGlyphID', None, None, 'Last glyph ID in the range.'),
- ('Offset24', 'ClipBox', None, None, 'Offset to a ClipBox table.'),
- ]),
-
- ('ClipBoxFormat1', [
- ('uint8', 'Format', None, None, 'Format for ClipBox without variation: set to 1.'),
- ('int16', 'xMin', None, None, 'Minimum x of clip box.'),
- ('int16', 'yMin', None, None, 'Minimum y of clip box.'),
- ('int16', 'xMax', None, None, 'Maximum x of clip box.'),
- ('int16', 'yMax', None, None, 'Maximum y of clip box.'),
- ]),
-
- ('ClipBoxFormat2', [
- ('uint8', 'Format', None, None, 'Format for variable ClipBox: set to 2.'),
- ('int16', 'xMin', None, None, 'Minimum x of clip box. VarIndexBase + 0.'),
- ('int16', 'yMin', None, None, 'Minimum y of clip box. VarIndexBase + 1.'),
- ('int16', 'xMax', None, None, 'Maximum x of clip box. VarIndexBase + 2.'),
- ('int16', 'yMax', None, None, 'Maximum y of clip box. VarIndexBase + 3.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # COLRv1 Affine2x3 uses the same column-major order to serialize a 2D
- # Affine Transformation as the one used by fontTools.misc.transform.
- # However, for historical reasons, the labels 'xy' and 'yx' are swapped.
- # Their fundamental meaning is the same though.
- # COLRv1 Affine2x3 follows the names found in FreeType and Cairo.
- # In all case, the second element in the 6-tuple correspond to the
- # y-part of the x basis vector, and the third to the x-part of the y
- # basis vector.
- # See https://github.com/googlefonts/colr-gradients-spec/pull/85
- ('Affine2x3', [
- ('Fixed', 'xx', None, None, 'x-part of x basis vector'),
- ('Fixed', 'yx', None, None, 'y-part of x basis vector'),
- ('Fixed', 'xy', None, None, 'x-part of y basis vector'),
- ('Fixed', 'yy', None, None, 'y-part of y basis vector'),
- ('Fixed', 'dx', None, None, 'Translation in x direction'),
- ('Fixed', 'dy', None, None, 'Translation in y direction'),
- ]),
- ('VarAffine2x3', [
- ('Fixed', 'xx', None, None, 'x-part of x basis vector. VarIndexBase + 0.'),
- ('Fixed', 'yx', None, None, 'y-part of x basis vector. VarIndexBase + 1.'),
- ('Fixed', 'xy', None, None, 'x-part of y basis vector. VarIndexBase + 2.'),
- ('Fixed', 'yy', None, None, 'y-part of y basis vector. VarIndexBase + 3.'),
- ('Fixed', 'dx', None, None, 'Translation in x direction. VarIndexBase + 4.'),
- ('Fixed', 'dy', None, None, 'Translation in y direction. VarIndexBase + 5.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- ('ColorStop', [
- ('F2Dot14', 'StopOffset', None, None, ''),
- ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
- ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved'),
- ]),
- ('VarColorStop', [
- ('F2Dot14', 'StopOffset', None, None, 'VarIndexBase + 0.'),
- ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
- ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved. VarIndexBase + 1.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- ('ColorLine', [
- ('ExtendMode', 'Extend', None, None, 'Enum {PAD = 0, REPEAT = 1, REFLECT = 2}'),
- ('uint16', 'StopCount', None, None, 'Number of Color stops.'),
- ('ColorStop', 'ColorStop', 'StopCount', 0, 'Array of Color stops.'),
- ]),
- ('VarColorLine', [
- ('ExtendMode', 'Extend', None, None, 'Enum {PAD = 0, REPEAT = 1, REFLECT = 2}'),
- ('uint16', 'StopCount', None, None, 'Number of Color stops.'),
- ('VarColorStop', 'ColorStop', 'StopCount', 0, 'Array of Color stops.'),
- ]),
-
- # PaintColrLayers
- ('PaintFormat1', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 1'),
- ('uint8', 'NumLayers', None, None, 'Number of offsets to Paint to read from LayerList.'),
- ('uint32', 'FirstLayerIndex', None, None, 'Index into LayerList.'),
- ]),
-
- # PaintSolid
- ('PaintFormat2', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 2'),
- ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
- ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved'),
- ]),
- # PaintVarSolid
- ('PaintFormat3', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 3'),
- ('uint16', 'PaletteIndex', None, None, 'Index for a CPAL palette entry.'),
- ('F2Dot14', 'Alpha', None, None, 'Values outsided [0.,1.] reserved. VarIndexBase + 0.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintLinearGradient
- ('PaintFormat4', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 4'),
- ('Offset24', 'ColorLine', None, None, 'Offset (from beginning of PaintLinearGradient table) to ColorLine subtable.'),
- ('int16', 'x0', None, None, ''),
- ('int16', 'y0', None, None, ''),
- ('int16', 'x1', None, None, ''),
- ('int16', 'y1', None, None, ''),
- ('int16', 'x2', None, None, ''),
- ('int16', 'y2', None, None, ''),
- ]),
- # PaintVarLinearGradient
- ('PaintFormat5', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 5'),
- ('LOffset24To(VarColorLine)', 'ColorLine', None, None, 'Offset (from beginning of PaintVarLinearGradient table) to VarColorLine subtable.'),
- ('int16', 'x0', None, None, 'VarIndexBase + 0.'),
- ('int16', 'y0', None, None, 'VarIndexBase + 1.'),
- ('int16', 'x1', None, None, 'VarIndexBase + 2.'),
- ('int16', 'y1', None, None, 'VarIndexBase + 3.'),
- ('int16', 'x2', None, None, 'VarIndexBase + 4.'),
- ('int16', 'y2', None, None, 'VarIndexBase + 5.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintRadialGradient
- ('PaintFormat6', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 6'),
- ('Offset24', 'ColorLine', None, None, 'Offset (from beginning of PaintRadialGradient table) to ColorLine subtable.'),
- ('int16', 'x0', None, None, ''),
- ('int16', 'y0', None, None, ''),
- ('uint16', 'r0', None, None, ''),
- ('int16', 'x1', None, None, ''),
- ('int16', 'y1', None, None, ''),
- ('uint16', 'r1', None, None, ''),
- ]),
- # PaintVarRadialGradient
- ('PaintFormat7', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 7'),
- ('LOffset24To(VarColorLine)', 'ColorLine', None, None, 'Offset (from beginning of PaintVarRadialGradient table) to VarColorLine subtable.'),
- ('int16', 'x0', None, None, 'VarIndexBase + 0.'),
- ('int16', 'y0', None, None, 'VarIndexBase + 1.'),
- ('uint16', 'r0', None, None, 'VarIndexBase + 2.'),
- ('int16', 'x1', None, None, 'VarIndexBase + 3.'),
- ('int16', 'y1', None, None, 'VarIndexBase + 4.'),
- ('uint16', 'r1', None, None, 'VarIndexBase + 5.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintSweepGradient
- ('PaintFormat8', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 8'),
- ('Offset24', 'ColorLine', None, None, 'Offset (from beginning of PaintSweepGradient table) to ColorLine subtable.'),
- ('int16', 'centerX', None, None, 'Center x coordinate.'),
- ('int16', 'centerY', None, None, 'Center y coordinate.'),
- ('BiasedAngle', 'startAngle', None, None, 'Start of the angular range of the gradient.'),
- ('BiasedAngle', 'endAngle', None, None, 'End of the angular range of the gradient.'),
- ]),
- # PaintVarSweepGradient
- ('PaintFormat9', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 9'),
- ('LOffset24To(VarColorLine)', 'ColorLine', None, None, 'Offset (from beginning of PaintVarSweepGradient table) to VarColorLine subtable.'),
- ('int16', 'centerX', None, None, 'Center x coordinate. VarIndexBase + 0.'),
- ('int16', 'centerY', None, None, 'Center y coordinate. VarIndexBase + 1.'),
- ('BiasedAngle', 'startAngle', None, None, 'Start of the angular range of the gradient. VarIndexBase + 2.'),
- ('BiasedAngle', 'endAngle', None, None, 'End of the angular range of the gradient. VarIndexBase + 3.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintGlyph
- ('PaintFormat10', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 10'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintGlyph table) to Paint subtable.'),
- ('GlyphID', 'Glyph', None, None, 'Glyph ID for the source outline.'),
- ]),
-
- # PaintColrGlyph
- ('PaintFormat11', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 11'),
- ('GlyphID', 'Glyph', None, None, 'Virtual glyph ID for a BaseGlyphList base glyph.'),
- ]),
-
- # PaintTransform
- ('PaintFormat12', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 12'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintTransform table) to Paint subtable.'),
- ('LOffset24To(Affine2x3)', 'Transform', None, None, '2x3 matrix for 2D affine transformations.'),
- ]),
- # PaintVarTransform
- ('PaintFormat13', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 13'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarTransform table) to Paint subtable.'),
- ('LOffset24To(VarAffine2x3)', 'Transform', None, None, '2x3 matrix for 2D affine transformations.'),
- ]),
-
- # PaintTranslate
- ('PaintFormat14', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 14'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintTranslate table) to Paint subtable.'),
- ('int16', 'dx', None, None, 'Translation in x direction.'),
- ('int16', 'dy', None, None, 'Translation in y direction.'),
- ]),
- # PaintVarTranslate
- ('PaintFormat15', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 15'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarTranslate table) to Paint subtable.'),
- ('int16', 'dx', None, None, 'Translation in x direction. VarIndexBase + 0.'),
- ('int16', 'dy', None, None, 'Translation in y direction. VarIndexBase + 1.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintScale
- ('PaintFormat16', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 16'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScale table) to Paint subtable.'),
- ('F2Dot14', 'scaleX', None, None, ''),
- ('F2Dot14', 'scaleY', None, None, ''),
- ]),
- # PaintVarScale
- ('PaintFormat17', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 17'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScale table) to Paint subtable.'),
- ('F2Dot14', 'scaleX', None, None, 'VarIndexBase + 0.'),
- ('F2Dot14', 'scaleY', None, None, 'VarIndexBase + 1.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintScaleAroundCenter
- ('PaintFormat18', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 18'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScaleAroundCenter table) to Paint subtable.'),
- ('F2Dot14', 'scaleX', None, None, ''),
- ('F2Dot14', 'scaleY', None, None, ''),
- ('int16', 'centerX', None, None, ''),
- ('int16', 'centerY', None, None, ''),
- ]),
- # PaintVarScaleAroundCenter
- ('PaintFormat19', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 19'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScaleAroundCenter table) to Paint subtable.'),
- ('F2Dot14', 'scaleX', None, None, 'VarIndexBase + 0.'),
- ('F2Dot14', 'scaleY', None, None, 'VarIndexBase + 1.'),
- ('int16', 'centerX', None, None, 'VarIndexBase + 2.'),
- ('int16', 'centerY', None, None, 'VarIndexBase + 3.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintScaleUniform
- ('PaintFormat20', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 20'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScaleUniform table) to Paint subtable.'),
- ('F2Dot14', 'scale', None, None, ''),
- ]),
- # PaintVarScaleUniform
- ('PaintFormat21', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 21'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScaleUniform table) to Paint subtable.'),
- ('F2Dot14', 'scale', None, None, 'VarIndexBase + 0.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintScaleUniformAroundCenter
- ('PaintFormat22', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 22'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintScaleUniformAroundCenter table) to Paint subtable.'),
- ('F2Dot14', 'scale', None, None, ''),
- ('int16', 'centerX', None, None, ''),
- ('int16', 'centerY', None, None, ''),
- ]),
- # PaintVarScaleUniformAroundCenter
- ('PaintFormat23', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 23'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarScaleUniformAroundCenter table) to Paint subtable.'),
- ('F2Dot14', 'scale', None, None, 'VarIndexBase + 0'),
- ('int16', 'centerX', None, None, 'VarIndexBase + 1'),
- ('int16', 'centerY', None, None, 'VarIndexBase + 2'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintRotate
- ('PaintFormat24', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 24'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintRotate table) to Paint subtable.'),
- ('Angle', 'angle', None, None, ''),
- ]),
- # PaintVarRotate
- ('PaintFormat25', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 25'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarRotate table) to Paint subtable.'),
- ('Angle', 'angle', None, None, 'VarIndexBase + 0.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintRotateAroundCenter
- ('PaintFormat26', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 26'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintRotateAroundCenter table) to Paint subtable.'),
- ('Angle', 'angle', None, None, ''),
- ('int16', 'centerX', None, None, ''),
- ('int16', 'centerY', None, None, ''),
- ]),
- # PaintVarRotateAroundCenter
- ('PaintFormat27', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 27'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarRotateAroundCenter table) to Paint subtable.'),
- ('Angle', 'angle', None, None, 'VarIndexBase + 0.'),
- ('int16', 'centerX', None, None, 'VarIndexBase + 1.'),
- ('int16', 'centerY', None, None, 'VarIndexBase + 2.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintSkew
- ('PaintFormat28', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 28'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintSkew table) to Paint subtable.'),
- ('Angle', 'xSkewAngle', None, None, ''),
- ('Angle', 'ySkewAngle', None, None, ''),
- ]),
- # PaintVarSkew
- ('PaintFormat29', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 29'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarSkew table) to Paint subtable.'),
- ('Angle', 'xSkewAngle', None, None, 'VarIndexBase + 0.'),
- ('Angle', 'ySkewAngle', None, None, 'VarIndexBase + 1.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintSkewAroundCenter
- ('PaintFormat30', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 30'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintSkewAroundCenter table) to Paint subtable.'),
- ('Angle', 'xSkewAngle', None, None, ''),
- ('Angle', 'ySkewAngle', None, None, ''),
- ('int16', 'centerX', None, None, ''),
- ('int16', 'centerY', None, None, ''),
- ]),
- # PaintVarSkewAroundCenter
- ('PaintFormat31', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 31'),
- ('Offset24', 'Paint', None, None, 'Offset (from beginning of PaintVarSkewAroundCenter table) to Paint subtable.'),
- ('Angle', 'xSkewAngle', None, None, 'VarIndexBase + 0.'),
- ('Angle', 'ySkewAngle', None, None, 'VarIndexBase + 1.'),
- ('int16', 'centerX', None, None, 'VarIndexBase + 2.'),
- ('int16', 'centerY', None, None, 'VarIndexBase + 3.'),
- ('VarIndex', 'VarIndexBase', None, None, 'Base index into DeltaSetIndexMap.'),
- ]),
-
- # PaintComposite
- ('PaintFormat32', [
- ('uint8', 'PaintFormat', None, None, 'Format identifier-format = 32'),
- ('LOffset24To(Paint)', 'SourcePaint', None, None, 'Offset (from beginning of PaintComposite table) to source Paint subtable.'),
- ('CompositeMode', 'CompositeMode', None, None, 'A CompositeMode enumeration value.'),
- ('LOffset24To(Paint)', 'BackdropPaint', None, None, 'Offset (from beginning of PaintComposite table) to backdrop Paint subtable.'),
- ]),
+ #
+ # common
+ #
+ ("LookupOrder", []),
+ (
+ "ScriptList",
+ [
+ ("uint16", "ScriptCount", None, None, "Number of ScriptRecords"),
+ (
+ "struct",
+ "ScriptRecord",
+ "ScriptCount",
+ 0,
+ "Array of ScriptRecords -listed alphabetically by ScriptTag",
+ ),
+ ],
+ ),
+ (
+ "ScriptRecord",
+ [
+ ("Tag", "ScriptTag", None, None, "4-byte ScriptTag identifier"),
+ (
+ "Offset",
+ "Script",
+ None,
+ None,
+ "Offset to Script table-from beginning of ScriptList",
+ ),
+ ],
+ ),
+ (
+ "Script",
+ [
+ (
+ "Offset",
+ "DefaultLangSys",
+ None,
+ None,
+ "Offset to DefaultLangSys table-from beginning of Script table-may be NULL",
+ ),
+ (
+ "uint16",
+ "LangSysCount",
+ None,
+ None,
+ "Number of LangSysRecords for this script-excluding the DefaultLangSys",
+ ),
+ (
+ "struct",
+ "LangSysRecord",
+ "LangSysCount",
+ 0,
+ "Array of LangSysRecords-listed alphabetically by LangSysTag",
+ ),
+ ],
+ ),
+ (
+ "LangSysRecord",
+ [
+ ("Tag", "LangSysTag", None, None, "4-byte LangSysTag identifier"),
+ (
+ "Offset",
+ "LangSys",
+ None,
+ None,
+ "Offset to LangSys table-from beginning of Script table",
+ ),
+ ],
+ ),
+ (
+ "LangSys",
+ [
+ (
+ "Offset",
+ "LookupOrder",
+ None,
+ None,
+ "= NULL (reserved for an offset to a reordering table)",
+ ),
+ (
+ "uint16",
+ "ReqFeatureIndex",
+ None,
+ None,
+ "Index of a feature required for this language system- if no required features = 0xFFFF",
+ ),
+ (
+ "uint16",
+ "FeatureCount",
+ None,
+ None,
+ "Number of FeatureIndex values for this language system-excludes the required feature",
+ ),
+ (
+ "uint16",
+ "FeatureIndex",
+ "FeatureCount",
+ 0,
+ "Array of indices into the FeatureList-in arbitrary order",
+ ),
+ ],
+ ),
+ (
+ "FeatureList",
+ [
+ (
+ "uint16",
+ "FeatureCount",
+ None,
+ None,
+ "Number of FeatureRecords in this table",
+ ),
+ (
+ "struct",
+ "FeatureRecord",
+ "FeatureCount",
+ 0,
+ "Array of FeatureRecords-zero-based (first feature has FeatureIndex = 0)-listed alphabetically by FeatureTag",
+ ),
+ ],
+ ),
+ (
+ "FeatureRecord",
+ [
+ ("Tag", "FeatureTag", None, None, "4-byte feature identification tag"),
+ (
+ "Offset",
+ "Feature",
+ None,
+ None,
+ "Offset to Feature table-from beginning of FeatureList",
+ ),
+ ],
+ ),
+ (
+ "Feature",
+ [
+ (
+ "Offset",
+ "FeatureParams",
+ None,
+ None,
+ "= NULL (reserved for offset to FeatureParams)",
+ ),
+ (
+ "uint16",
+ "LookupCount",
+ None,
+ None,
+ "Number of LookupList indices for this feature",
+ ),
+ (
+ "uint16",
+ "LookupListIndex",
+ "LookupCount",
+ 0,
+ "Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)",
+ ),
+ ],
+ ),
+ ("FeatureParams", []),
+ (
+ "FeatureParamsSize",
+ [
+ (
+ "DeciPoints",
+ "DesignSize",
+ None,
+ None,
+ "The design size in 720/inch units (decipoints).",
+ ),
+ (
+ "uint16",
+ "SubfamilyID",
+ None,
+ None,
+ "Serves as an identifier that associates fonts in a subfamily.",
+ ),
+ ("NameID", "SubfamilyNameID", None, None, "Subfamily NameID."),
+ (
+ "DeciPoints",
+ "RangeStart",
+ None,
+ None,
+ "Small end of recommended usage range (exclusive) in 720/inch units.",
+ ),
+ (
+ "DeciPoints",
+ "RangeEnd",
+ None,
+ None,
+ "Large end of recommended usage range (inclusive) in 720/inch units.",
+ ),
+ ],
+ ),
+ (
+ "FeatureParamsStylisticSet",
+ [
+ ("uint16", "Version", None, None, "Set to 0."),
+ ("NameID", "UINameID", None, None, "UI NameID."),
+ ],
+ ),
+ (
+ "FeatureParamsCharacterVariants",
+ [
+ ("uint16", "Format", None, None, "Set to 0."),
+ ("NameID", "FeatUILabelNameID", None, None, "Feature UI label NameID."),
+ (
+ "NameID",
+ "FeatUITooltipTextNameID",
+ None,
+ None,
+ "Feature UI tooltip text NameID.",
+ ),
+ ("NameID", "SampleTextNameID", None, None, "Sample text NameID."),
+ ("uint16", "NumNamedParameters", None, None, "Number of named parameters."),
+ (
+ "NameID",
+ "FirstParamUILabelNameID",
+ None,
+ None,
+ "First NameID of UI feature parameters.",
+ ),
+ (
+ "uint16",
+ "CharCount",
+ None,
+ None,
+ "Count of characters this feature provides glyph variants for.",
+ ),
+ (
+ "uint24",
+ "Character",
+ "CharCount",
+ 0,
+ "Unicode characters for which this feature provides glyph variants.",
+ ),
+ ],
+ ),
+ (
+ "LookupList",
+ [
+ ("uint16", "LookupCount", None, None, "Number of lookups in this table"),
+ (
+ "Offset",
+ "Lookup",
+ "LookupCount",
+ 0,
+ "Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)",
+ ),
+ ],
+ ),
+ (
+ "Lookup",
+ [
+ (
+ "uint16",
+ "LookupType",
+ None,
+ None,
+ "Different enumerations for GSUB and GPOS",
+ ),
+ ("LookupFlag", "LookupFlag", None, None, "Lookup qualifiers"),
+ (
+ "uint16",
+ "SubTableCount",
+ None,
+ None,
+ "Number of SubTables for this lookup",
+ ),
+ (
+ "Offset",
+ "SubTable",
+ "SubTableCount",
+ 0,
+ "Array of offsets to SubTables-from beginning of Lookup table",
+ ),
+ (
+ "uint16",
+ "MarkFilteringSet",
+ None,
+ "LookupFlag & 0x0010",
+ "If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.",
+ ),
+ ],
+ ),
+ (
+ "CoverageFormat1",
+ [
+ ("uint16", "CoverageFormat", None, None, "Format identifier-format = 1"),
+ ("uint16", "GlyphCount", None, None, "Number of glyphs in the GlyphArray"),
+ (
+ "GlyphID",
+ "GlyphArray",
+ "GlyphCount",
+ 0,
+ "Array of GlyphIDs-in numerical order",
+ ),
+ ],
+ ),
+ (
+ "CoverageFormat2",
+ [
+ ("uint16", "CoverageFormat", None, None, "Format identifier-format = 2"),
+ ("uint16", "RangeCount", None, None, "Number of RangeRecords"),
+ (
+ "struct",
+ "RangeRecord",
+ "RangeCount",
+ 0,
+ "Array of glyph ranges-ordered by Start GlyphID",
+ ),
+ ],
+ ),
+ (
+ "RangeRecord",
+ [
+ ("GlyphID", "Start", None, None, "First GlyphID in the range"),
+ ("GlyphID", "End", None, None, "Last GlyphID in the range"),
+ (
+ "uint16",
+ "StartCoverageIndex",
+ None,
+ None,
+ "Coverage Index of first GlyphID in range",
+ ),
+ ],
+ ),
+ (
+ "ClassDefFormat1",
+ [
+ ("uint16", "ClassFormat", None, None, "Format identifier-format = 1"),
+ (
+ "GlyphID",
+ "StartGlyph",
+ None,
+ None,
+ "First GlyphID of the ClassValueArray",
+ ),
+ ("uint16", "GlyphCount", None, None, "Size of the ClassValueArray"),
+ (
+ "uint16",
+ "ClassValueArray",
+ "GlyphCount",
+ 0,
+ "Array of Class Values-one per GlyphID",
+ ),
+ ],
+ ),
+ (
+ "ClassDefFormat2",
+ [
+ ("uint16", "ClassFormat", None, None, "Format identifier-format = 2"),
+ ("uint16", "ClassRangeCount", None, None, "Number of ClassRangeRecords"),
+ (
+ "struct",
+ "ClassRangeRecord",
+ "ClassRangeCount",
+ 0,
+ "Array of ClassRangeRecords-ordered by Start GlyphID",
+ ),
+ ],
+ ),
+ (
+ "ClassRangeRecord",
+ [
+ ("GlyphID", "Start", None, None, "First GlyphID in the range"),
+ ("GlyphID", "End", None, None, "Last GlyphID in the range"),
+ ("uint16", "Class", None, None, "Applied to all glyphs in the range"),
+ ],
+ ),
+ (
+ "Device",
+ [
+ ("uint16", "StartSize", None, None, "Smallest size to correct-in ppem"),
+ ("uint16", "EndSize", None, None, "Largest size to correct-in ppem"),
+ (
+ "uint16",
+ "DeltaFormat",
+ None,
+ None,
+ "Format of DeltaValue array data: 1, 2, or 3",
+ ),
+ (
+ "DeltaValue",
+ "DeltaValue",
+ "",
+ "DeltaFormat in (1,2,3)",
+ "Array of compressed data",
+ ),
+ ],
+ ),
+ #
+ # gpos
+ #
+ (
+ "GPOS",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the GPOS table- 0x00010000 or 0x00010001",
+ ),
+ (
+ "Offset",
+ "ScriptList",
+ None,
+ None,
+ "Offset to ScriptList table-from beginning of GPOS table",
+ ),
+ (
+ "Offset",
+ "FeatureList",
+ None,
+ None,
+ "Offset to FeatureList table-from beginning of GPOS table",
+ ),
+ (
+ "Offset",
+ "LookupList",
+ None,
+ None,
+ "Offset to LookupList table-from beginning of GPOS table",
+ ),
+ (
+ "LOffset",
+ "FeatureVariations",
+ None,
+ "Version >= 0x00010001",
+ "Offset to FeatureVariations table-from beginning of GPOS table",
+ ),
+ ],
+ ),
+ (
+ "SinglePosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of SinglePos subtable",
+ ),
+ (
+ "uint16",
+ "ValueFormat",
+ None,
+ None,
+ "Defines the types of data in the ValueRecord",
+ ),
+ (
+ "ValueRecord",
+ "Value",
+ None,
+ None,
+ "Defines positioning value(s)-applied to all glyphs in the Coverage table",
+ ),
+ ],
+ ),
+ (
+ "SinglePosFormat2",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of SinglePos subtable",
+ ),
+ (
+ "uint16",
+ "ValueFormat",
+ None,
+ None,
+ "Defines the types of data in the ValueRecord",
+ ),
+ ("uint16", "ValueCount", None, None, "Number of ValueRecords"),
+ (
+ "ValueRecord",
+ "Value",
+ "ValueCount",
+ 0,
+ "Array of ValueRecords-positioning values applied to glyphs",
+ ),
+ ],
+ ),
+ (
+ "PairPosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of PairPos subtable-only the first glyph in each pair",
+ ),
+ (
+ "uint16",
+ "ValueFormat1",
+ None,
+ None,
+ "Defines the types of data in ValueRecord1-for the first glyph in the pair -may be zero (0)",
+ ),
+ (
+ "uint16",
+ "ValueFormat2",
+ None,
+ None,
+ "Defines the types of data in ValueRecord2-for the second glyph in the pair -may be zero (0)",
+ ),
+ ("uint16", "PairSetCount", None, None, "Number of PairSet tables"),
+ (
+ "Offset",
+ "PairSet",
+ "PairSetCount",
+ 0,
+ "Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "PairSet",
+ [
+ ("uint16", "PairValueCount", None, None, "Number of PairValueRecords"),
+ (
+ "struct",
+ "PairValueRecord",
+ "PairValueCount",
+ 0,
+ "Array of PairValueRecords-ordered by GlyphID of the second glyph",
+ ),
+ ],
+ ),
+ (
+ "PairValueRecord",
+ [
+ (
+ "GlyphID",
+ "SecondGlyph",
+ None,
+ None,
+ "GlyphID of second glyph in the pair-first glyph is listed in the Coverage table",
+ ),
+ (
+ "ValueRecord",
+ "Value1",
+ None,
+ None,
+ "Positioning data for the first glyph in the pair",
+ ),
+ (
+ "ValueRecord",
+ "Value2",
+ None,
+ None,
+ "Positioning data for the second glyph in the pair",
+ ),
+ ],
+ ),
+ (
+ "PairPosFormat2",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of PairPos subtable-for the first glyph of the pair",
+ ),
+ (
+ "uint16",
+ "ValueFormat1",
+ None,
+ None,
+ "ValueRecord definition-for the first glyph of the pair-may be zero (0)",
+ ),
+ (
+ "uint16",
+ "ValueFormat2",
+ None,
+ None,
+ "ValueRecord definition-for the second glyph of the pair-may be zero (0)",
+ ),
+ (
+ "Offset",
+ "ClassDef1",
+ None,
+ None,
+ "Offset to ClassDef table-from beginning of PairPos subtable-for the first glyph of the pair",
+ ),
+ (
+ "Offset",
+ "ClassDef2",
+ None,
+ None,
+ "Offset to ClassDef table-from beginning of PairPos subtable-for the second glyph of the pair",
+ ),
+ (
+ "uint16",
+ "Class1Count",
+ None,
+ None,
+ "Number of classes in ClassDef1 table-includes Class0",
+ ),
+ (
+ "uint16",
+ "Class2Count",
+ None,
+ None,
+ "Number of classes in ClassDef2 table-includes Class0",
+ ),
+ (
+ "struct",
+ "Class1Record",
+ "Class1Count",
+ 0,
+ "Array of Class1 records-ordered by Class1",
+ ),
+ ],
+ ),
+ (
+ "Class1Record",
+ [
+ (
+ "struct",
+ "Class2Record",
+ "Class2Count",
+ 0,
+ "Array of Class2 records-ordered by Class2",
+ ),
+ ],
+ ),
+ (
+ "Class2Record",
+ [
+ (
+ "ValueRecord",
+ "Value1",
+ None,
+ None,
+ "Positioning for first glyph-empty if ValueFormat1 = 0",
+ ),
+ (
+ "ValueRecord",
+ "Value2",
+ None,
+ None,
+ "Positioning for second glyph-empty if ValueFormat2 = 0",
+ ),
+ ],
+ ),
+ (
+ "CursivePosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of CursivePos subtable",
+ ),
+ ("uint16", "EntryExitCount", None, None, "Number of EntryExit records"),
+ (
+ "struct",
+ "EntryExitRecord",
+ "EntryExitCount",
+ 0,
+ "Array of EntryExit records-in Coverage Index order",
+ ),
+ ],
+ ),
+ (
+ "EntryExitRecord",
+ [
+ (
+ "Offset",
+ "EntryAnchor",
+ None,
+ None,
+ "Offset to EntryAnchor table-from beginning of CursivePos subtable-may be NULL",
+ ),
+ (
+ "Offset",
+ "ExitAnchor",
+ None,
+ None,
+ "Offset to ExitAnchor table-from beginning of CursivePos subtable-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "MarkBasePosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "MarkCoverage",
+ None,
+ None,
+ "Offset to MarkCoverage table-from beginning of MarkBasePos subtable",
+ ),
+ (
+ "Offset",
+ "BaseCoverage",
+ None,
+ None,
+ "Offset to BaseCoverage table-from beginning of MarkBasePos subtable",
+ ),
+ ("uint16", "ClassCount", None, None, "Number of classes defined for marks"),
+ (
+ "Offset",
+ "MarkArray",
+ None,
+ None,
+ "Offset to MarkArray table-from beginning of MarkBasePos subtable",
+ ),
+ (
+ "Offset",
+ "BaseArray",
+ None,
+ None,
+ "Offset to BaseArray table-from beginning of MarkBasePos subtable",
+ ),
+ ],
+ ),
+ (
+ "BaseArray",
+ [
+ ("uint16", "BaseCount", None, None, "Number of BaseRecords"),
+ (
+ "struct",
+ "BaseRecord",
+ "BaseCount",
+ 0,
+ "Array of BaseRecords-in order of BaseCoverage Index",
+ ),
+ ],
+ ),
+ (
+ "BaseRecord",
+ [
+ (
+ "Offset",
+ "BaseAnchor",
+ "ClassCount",
+ 0,
+ "Array of offsets (one per class) to Anchor tables-from beginning of BaseArray table-ordered by class-zero-based",
+ ),
+ ],
+ ),
+ (
+ "MarkLigPosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "MarkCoverage",
+ None,
+ None,
+ "Offset to Mark Coverage table-from beginning of MarkLigPos subtable",
+ ),
+ (
+ "Offset",
+ "LigatureCoverage",
+ None,
+ None,
+ "Offset to Ligature Coverage table-from beginning of MarkLigPos subtable",
+ ),
+ ("uint16", "ClassCount", None, None, "Number of defined mark classes"),
+ (
+ "Offset",
+ "MarkArray",
+ None,
+ None,
+ "Offset to MarkArray table-from beginning of MarkLigPos subtable",
+ ),
+ (
+ "Offset",
+ "LigatureArray",
+ None,
+ None,
+ "Offset to LigatureArray table-from beginning of MarkLigPos subtable",
+ ),
+ ],
+ ),
+ (
+ "LigatureArray",
+ [
+ (
+ "uint16",
+ "LigatureCount",
+ None,
+ None,
+ "Number of LigatureAttach table offsets",
+ ),
+ (
+ "Offset",
+ "LigatureAttach",
+ "LigatureCount",
+ 0,
+ "Array of offsets to LigatureAttach tables-from beginning of LigatureArray table-ordered by LigatureCoverage Index",
+ ),
+ ],
+ ),
+ (
+ "LigatureAttach",
+ [
+ (
+ "uint16",
+ "ComponentCount",
+ None,
+ None,
+ "Number of ComponentRecords in this ligature",
+ ),
+ (
+ "struct",
+ "ComponentRecord",
+ "ComponentCount",
+ 0,
+ "Array of Component records-ordered in writing direction",
+ ),
+ ],
+ ),
+ (
+ "ComponentRecord",
+ [
+ (
+ "Offset",
+ "LigatureAnchor",
+ "ClassCount",
+ 0,
+ "Array of offsets (one per class) to Anchor tables-from beginning of LigatureAttach table-ordered by class-NULL if a component does not have an attachment for a class-zero-based array",
+ ),
+ ],
+ ),
+ (
+ "MarkMarkPosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Mark1Coverage",
+ None,
+ None,
+ "Offset to Combining Mark Coverage table-from beginning of MarkMarkPos subtable",
+ ),
+ (
+ "Offset",
+ "Mark2Coverage",
+ None,
+ None,
+ "Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable",
+ ),
+ (
+ "uint16",
+ "ClassCount",
+ None,
+ None,
+ "Number of Combining Mark classes defined",
+ ),
+ (
+ "Offset",
+ "Mark1Array",
+ None,
+ None,
+ "Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable",
+ ),
+ (
+ "Offset",
+ "Mark2Array",
+ None,
+ None,
+ "Offset to Mark2Array table for Mark2-from beginning of MarkMarkPos subtable",
+ ),
+ ],
+ ),
+ (
+ "Mark2Array",
+ [
+ ("uint16", "Mark2Count", None, None, "Number of Mark2 records"),
+ (
+ "struct",
+ "Mark2Record",
+ "Mark2Count",
+ 0,
+ "Array of Mark2 records-in Coverage order",
+ ),
+ ],
+ ),
+ (
+ "Mark2Record",
+ [
+ (
+ "Offset",
+ "Mark2Anchor",
+ "ClassCount",
+ 0,
+ "Array of offsets (one per class) to Anchor tables-from beginning of Mark2Array table-zero-based array",
+ ),
+ ],
+ ),
+ (
+ "PosLookupRecord",
+ [
+ (
+ "uint16",
+ "SequenceIndex",
+ None,
+ None,
+ "Index to input glyph sequence-first glyph = 0",
+ ),
+ (
+ "uint16",
+ "LookupListIndex",
+ None,
+ None,
+ "Lookup to apply to that position-zero-based",
+ ),
+ ],
+ ),
+ (
+ "ContextPosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of ContextPos subtable",
+ ),
+ ("uint16", "PosRuleSetCount", None, None, "Number of PosRuleSet tables"),
+ (
+ "Offset",
+ "PosRuleSet",
+ "PosRuleSetCount",
+ 0,
+ "Array of offsets to PosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "PosRuleSet",
+ [
+ ("uint16", "PosRuleCount", None, None, "Number of PosRule tables"),
+ (
+ "Offset",
+ "PosRule",
+ "PosRuleCount",
+ 0,
+ "Array of offsets to PosRule tables-from beginning of PosRuleSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "PosRule",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of glyphs in the Input glyph sequence",
+ ),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "GlyphID",
+ "Input",
+ "GlyphCount",
+ -1,
+ "Array of input GlyphIDs-starting with the second glyph",
+ ),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of positioning lookups-in design order",
+ ),
+ ],
+ ),
+ (
+ "ContextPosFormat2",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of ContextPos subtable",
+ ),
+ (
+ "Offset",
+ "ClassDef",
+ None,
+ None,
+ "Offset to ClassDef table-from beginning of ContextPos subtable",
+ ),
+ ("uint16", "PosClassSetCount", None, None, "Number of PosClassSet tables"),
+ (
+ "Offset",
+ "PosClassSet",
+ "PosClassSetCount",
+ 0,
+ "Array of offsets to PosClassSet tables-from beginning of ContextPos subtable-ordered by class-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "PosClassSet",
+ [
+ (
+ "uint16",
+ "PosClassRuleCount",
+ None,
+ None,
+ "Number of PosClassRule tables",
+ ),
+ (
+ "Offset",
+ "PosClassRule",
+ "PosClassRuleCount",
+ 0,
+ "Array of offsets to PosClassRule tables-from beginning of PosClassSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "PosClassRule",
+ [
+ ("uint16", "GlyphCount", None, None, "Number of glyphs to be matched"),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "uint16",
+ "Class",
+ "GlyphCount",
+ -1,
+ "Array of classes-beginning with the second class-to be matched to the input glyph sequence",
+ ),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of positioning lookups-in design order",
+ ),
+ ],
+ ),
+ (
+ "ContextPosFormat3",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 3"),
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of glyphs in the input sequence",
+ ),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "Offset",
+ "Coverage",
+ "GlyphCount",
+ 0,
+ "Array of offsets to Coverage tables-from beginning of ContextPos subtable",
+ ),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of positioning lookups-in design order",
+ ),
+ ],
+ ),
+ (
+ "ChainContextPosFormat1",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of ContextPos subtable",
+ ),
+ (
+ "uint16",
+ "ChainPosRuleSetCount",
+ None,
+ None,
+ "Number of ChainPosRuleSet tables",
+ ),
+ (
+ "Offset",
+ "ChainPosRuleSet",
+ "ChainPosRuleSetCount",
+ 0,
+ "Array of offsets to ChainPosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "ChainPosRuleSet",
+ [
+ (
+ "uint16",
+ "ChainPosRuleCount",
+ None,
+ None,
+ "Number of ChainPosRule tables",
+ ),
+ (
+ "Offset",
+ "ChainPosRule",
+ "ChainPosRuleCount",
+ 0,
+ "Array of offsets to ChainPosRule tables-from beginning of ChainPosRuleSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "ChainPosRule",
+ [
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
+ ),
+ (
+ "GlyphID",
+ "Backtrack",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of backtracking GlyphID's (to be matched before the input sequence)",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the input sequence (includes the first glyph)",
+ ),
+ (
+ "GlyphID",
+ "Input",
+ "InputGlyphCount",
+ -1,
+ "Array of input GlyphIDs (start with second glyph)",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)",
+ ),
+ (
+ "GlyphID",
+ "LookAhead",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of lookahead GlyphID's (to be matched after the input sequence)",
+ ),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of PosLookupRecords (in design order)",
+ ),
+ ],
+ ),
+ (
+ "ChainContextPosFormat2",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of ChainContextPos subtable",
+ ),
+ (
+ "Offset",
+ "BacktrackClassDef",
+ None,
+ None,
+ "Offset to ClassDef table containing backtrack sequence context-from beginning of ChainContextPos subtable",
+ ),
+ (
+ "Offset",
+ "InputClassDef",
+ None,
+ None,
+ "Offset to ClassDef table containing input sequence context-from beginning of ChainContextPos subtable",
+ ),
+ (
+ "Offset",
+ "LookAheadClassDef",
+ None,
+ None,
+ "Offset to ClassDef table containing lookahead sequence context-from beginning of ChainContextPos subtable",
+ ),
+ (
+ "uint16",
+ "ChainPosClassSetCount",
+ None,
+ None,
+ "Number of ChainPosClassSet tables",
+ ),
+ (
+ "Offset",
+ "ChainPosClassSet",
+ "ChainPosClassSetCount",
+ 0,
+ "Array of offsets to ChainPosClassSet tables-from beginning of ChainContextPos subtable-ordered by input class-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "ChainPosClassSet",
+ [
+ (
+ "uint16",
+ "ChainPosClassRuleCount",
+ None,
+ None,
+ "Number of ChainPosClassRule tables",
+ ),
+ (
+ "Offset",
+ "ChainPosClassRule",
+ "ChainPosClassRuleCount",
+ 0,
+ "Array of offsets to ChainPosClassRule tables-from beginning of ChainPosClassSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "ChainPosClassRule",
+ [
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
+ ),
+ (
+ "uint16",
+ "Backtrack",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of backtracking classes(to be matched before the input sequence)",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Total number of classes in the input sequence (includes the first class)",
+ ),
+ (
+ "uint16",
+ "Input",
+ "InputGlyphCount",
+ -1,
+ "Array of input classes(start with second class; to be matched with the input glyph sequence)",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)",
+ ),
+ (
+ "uint16",
+ "LookAhead",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of lookahead classes(to be matched after the input sequence)",
+ ),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of PosLookupRecords (in design order)",
+ ),
+ ],
+ ),
+ (
+ "ChainContextPosFormat3",
+ [
+ ("uint16", "PosFormat", None, None, "Format identifier-format = 3"),
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Number of glyphs in the backtracking sequence",
+ ),
+ (
+ "Offset",
+ "BacktrackCoverage",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Number of glyphs in input sequence",
+ ),
+ (
+ "Offset",
+ "InputCoverage",
+ "InputGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in input sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Number of glyphs in lookahead sequence",
+ ),
+ (
+ "Offset",
+ "LookAheadCoverage",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order",
+ ),
+ ("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
+ (
+ "struct",
+ "PosLookupRecord",
+ "PosCount",
+ 0,
+ "Array of PosLookupRecords,in design order",
+ ),
+ ],
+ ),
+ (
+ "ExtensionPosFormat1",
+ [
+ ("uint16", "ExtFormat", None, None, "Format identifier. Set to 1."),
+ (
+ "uint16",
+ "ExtensionLookupType",
+ None,
+ None,
+ "Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).",
+ ),
+ ("LOffset", "ExtSubTable", None, None, "Offset to SubTable"),
+ ],
+ ),
+ # ('ValueRecord', [
+ # ('int16', 'XPlacement', None, None, 'Horizontal adjustment for placement-in design units'),
+ # ('int16', 'YPlacement', None, None, 'Vertical adjustment for placement-in design units'),
+ # ('int16', 'XAdvance', None, None, 'Horizontal adjustment for advance-in design units (only used for horizontal writing)'),
+ # ('int16', 'YAdvance', None, None, 'Vertical adjustment for advance-in design units (only used for vertical writing)'),
+ # ('Offset', 'XPlaDevice', None, None, 'Offset to Device table for horizontal placement-measured from beginning of PosTable (may be NULL)'),
+ # ('Offset', 'YPlaDevice', None, None, 'Offset to Device table for vertical placement-measured from beginning of PosTable (may be NULL)'),
+ # ('Offset', 'XAdvDevice', None, None, 'Offset to Device table for horizontal advance-measured from beginning of PosTable (may be NULL)'),
+ # ('Offset', 'YAdvDevice', None, None, 'Offset to Device table for vertical advance-measured from beginning of PosTable (may be NULL)'),
+ # ]),
+ (
+ "AnchorFormat1",
+ [
+ ("uint16", "AnchorFormat", None, None, "Format identifier-format = 1"),
+ ("int16", "XCoordinate", None, None, "Horizontal value-in design units"),
+ ("int16", "YCoordinate", None, None, "Vertical value-in design units"),
+ ],
+ ),
+ (
+ "AnchorFormat2",
+ [
+ ("uint16", "AnchorFormat", None, None, "Format identifier-format = 2"),
+ ("int16", "XCoordinate", None, None, "Horizontal value-in design units"),
+ ("int16", "YCoordinate", None, None, "Vertical value-in design units"),
+ ("uint16", "AnchorPoint", None, None, "Index to glyph contour point"),
+ ],
+ ),
+ (
+ "AnchorFormat3",
+ [
+ ("uint16", "AnchorFormat", None, None, "Format identifier-format = 3"),
+ ("int16", "XCoordinate", None, None, "Horizontal value-in design units"),
+ ("int16", "YCoordinate", None, None, "Vertical value-in design units"),
+ (
+ "Offset",
+ "XDeviceTable",
+ None,
+ None,
+ "Offset to Device table for X coordinate- from beginning of Anchor table (may be NULL)",
+ ),
+ (
+ "Offset",
+ "YDeviceTable",
+ None,
+ None,
+ "Offset to Device table for Y coordinate- from beginning of Anchor table (may be NULL)",
+ ),
+ ],
+ ),
+ (
+ "MarkArray",
+ [
+ ("uint16", "MarkCount", None, None, "Number of MarkRecords"),
+ (
+ "struct",
+ "MarkRecord",
+ "MarkCount",
+ 0,
+ "Array of MarkRecords-in Coverage order",
+ ),
+ ],
+ ),
+ (
+ "MarkRecord",
+ [
+ ("uint16", "Class", None, None, "Class defined for this mark"),
+ (
+ "Offset",
+ "MarkAnchor",
+ None,
+ None,
+ "Offset to Anchor table-from beginning of MarkArray table",
+ ),
+ ],
+ ),
+ #
+ # gsub
+ #
+ (
+ "GSUB",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the GSUB table- 0x00010000 or 0x00010001",
+ ),
+ (
+ "Offset",
+ "ScriptList",
+ None,
+ None,
+ "Offset to ScriptList table-from beginning of GSUB table",
+ ),
+ (
+ "Offset",
+ "FeatureList",
+ None,
+ None,
+ "Offset to FeatureList table-from beginning of GSUB table",
+ ),
+ (
+ "Offset",
+ "LookupList",
+ None,
+ None,
+ "Offset to LookupList table-from beginning of GSUB table",
+ ),
+ (
+ "LOffset",
+ "FeatureVariations",
+ None,
+ "Version >= 0x00010001",
+ "Offset to FeatureVariations table-from beginning of GSUB table",
+ ),
+ ],
+ ),
+ (
+ "SingleSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "DeltaGlyphID",
+ None,
+ None,
+ "Add to original GlyphID modulo 65536 to get substitute GlyphID",
+ ),
+ ],
+ ),
+ (
+ "SingleSubstFormat2",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of GlyphIDs in the Substitute array",
+ ),
+ (
+ "GlyphID",
+ "Substitute",
+ "GlyphCount",
+ 0,
+ "Array of substitute GlyphIDs-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "MultipleSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "SequenceCount",
+ None,
+ None,
+ "Number of Sequence table offsets in the Sequence array",
+ ),
+ (
+ "Offset",
+ "Sequence",
+ "SequenceCount",
+ 0,
+ "Array of offsets to Sequence tables-from beginning of Substitution table-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "Sequence",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of GlyphIDs in the Substitute array. This should always be greater than 0.",
+ ),
+ (
+ "GlyphID",
+ "Substitute",
+ "GlyphCount",
+ 0,
+ "String of GlyphIDs to substitute",
+ ),
+ ],
+ ),
+ (
+ "AlternateSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "AlternateSetCount",
+ None,
+ None,
+ "Number of AlternateSet tables",
+ ),
+ (
+ "Offset",
+ "AlternateSet",
+ "AlternateSetCount",
+ 0,
+ "Array of offsets to AlternateSet tables-from beginning of Substitution table-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "AlternateSet",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of GlyphIDs in the Alternate array",
+ ),
+ (
+ "GlyphID",
+ "Alternate",
+ "GlyphCount",
+ 0,
+ "Array of alternate GlyphIDs-in arbitrary order",
+ ),
+ ],
+ ),
+ (
+ "LigatureSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ ("uint16", "LigSetCount", None, None, "Number of LigatureSet tables"),
+ (
+ "Offset",
+ "LigatureSet",
+ "LigSetCount",
+ 0,
+ "Array of offsets to LigatureSet tables-from beginning of Substitution table-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "LigatureSet",
+ [
+ ("uint16", "LigatureCount", None, None, "Number of Ligature tables"),
+ (
+ "Offset",
+ "Ligature",
+ "LigatureCount",
+ 0,
+ "Array of offsets to Ligature tables-from beginning of LigatureSet table-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "Ligature",
+ [
+ ("GlyphID", "LigGlyph", None, None, "GlyphID of ligature to substitute"),
+ ("uint16", "CompCount", None, None, "Number of components in the ligature"),
+ (
+ "GlyphID",
+ "Component",
+ "CompCount",
+ -1,
+ "Array of component GlyphIDs-start with the second component-ordered in writing direction",
+ ),
+ ],
+ ),
+ (
+ "SubstLookupRecord",
+ [
+ (
+ "uint16",
+ "SequenceIndex",
+ None,
+ None,
+ "Index into current glyph sequence-first glyph = 0",
+ ),
+ (
+ "uint16",
+ "LookupListIndex",
+ None,
+ None,
+ "Lookup to apply to that position-zero-based",
+ ),
+ ],
+ ),
+ (
+ "ContextSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "SubRuleSetCount",
+ None,
+ None,
+ "Number of SubRuleSet tables-must equal GlyphCount in Coverage table",
+ ),
+ (
+ "Offset",
+ "SubRuleSet",
+ "SubRuleSetCount",
+ 0,
+ "Array of offsets to SubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "SubRuleSet",
+ [
+ ("uint16", "SubRuleCount", None, None, "Number of SubRule tables"),
+ (
+ "Offset",
+ "SubRule",
+ "SubRuleCount",
+ 0,
+ "Array of offsets to SubRule tables-from beginning of SubRuleSet table-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "SubRule",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Total number of glyphs in input glyph sequence-includes the first glyph",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "GlyphID",
+ "Input",
+ "GlyphCount",
+ -1,
+ "Array of input GlyphIDs-start with second glyph",
+ ),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of SubstLookupRecords-in design order",
+ ),
+ ],
+ ),
+ (
+ "ContextSubstFormat2",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "Offset",
+ "ClassDef",
+ None,
+ None,
+ "Offset to glyph ClassDef table-from beginning of Substitution table",
+ ),
+ ("uint16", "SubClassSetCount", None, None, "Number of SubClassSet tables"),
+ (
+ "Offset",
+ "SubClassSet",
+ "SubClassSetCount",
+ 0,
+ "Array of offsets to SubClassSet tables-from beginning of Substitution table-ordered by class-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "SubClassSet",
+ [
+ (
+ "uint16",
+ "SubClassRuleCount",
+ None,
+ None,
+ "Number of SubClassRule tables",
+ ),
+ (
+ "Offset",
+ "SubClassRule",
+ "SubClassRuleCount",
+ 0,
+ "Array of offsets to SubClassRule tables-from beginning of SubClassSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "SubClassRule",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Total number of classes specified for the context in the rule-includes the first class",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "uint16",
+ "Class",
+ "GlyphCount",
+ -1,
+ "Array of classes-beginning with the second class-to be matched to the input glyph class sequence",
+ ),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of Substitution lookups-in design order",
+ ),
+ ],
+ ),
+ (
+ "ContextSubstFormat3",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 3"),
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of glyphs in the input glyph sequence",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "Offset",
+ "Coverage",
+ "GlyphCount",
+ 0,
+ "Array of offsets to Coverage table-from beginning of Substitution table-in glyph sequence order",
+ ),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of SubstLookupRecords-in design order",
+ ),
+ ],
+ ),
+ (
+ "ChainContextSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "ChainSubRuleSetCount",
+ None,
+ None,
+ "Number of ChainSubRuleSet tables-must equal GlyphCount in Coverage table",
+ ),
+ (
+ "Offset",
+ "ChainSubRuleSet",
+ "ChainSubRuleSetCount",
+ 0,
+ "Array of offsets to ChainSubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index",
+ ),
+ ],
+ ),
+ (
+ "ChainSubRuleSet",
+ [
+ (
+ "uint16",
+ "ChainSubRuleCount",
+ None,
+ None,
+ "Number of ChainSubRule tables",
+ ),
+ (
+ "Offset",
+ "ChainSubRule",
+ "ChainSubRuleCount",
+ 0,
+ "Array of offsets to ChainSubRule tables-from beginning of ChainSubRuleSet table-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "ChainSubRule",
+ [
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
+ ),
+ (
+ "GlyphID",
+ "Backtrack",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of backtracking GlyphID's (to be matched before the input sequence)",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the input sequence (includes the first glyph)",
+ ),
+ (
+ "GlyphID",
+ "Input",
+ "InputGlyphCount",
+ -1,
+ "Array of input GlyphIDs (start with second glyph)",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)",
+ ),
+ (
+ "GlyphID",
+ "LookAhead",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of lookahead GlyphID's (to be matched after the input sequence)",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of SubstLookupRecords (in design order)",
+ ),
+ ],
+ ),
+ (
+ "ChainContextSubstFormat2",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table-from beginning of Substitution table",
+ ),
+ (
+ "Offset",
+ "BacktrackClassDef",
+ None,
+ None,
+ "Offset to glyph ClassDef table containing backtrack sequence data-from beginning of Substitution table",
+ ),
+ (
+ "Offset",
+ "InputClassDef",
+ None,
+ None,
+ "Offset to glyph ClassDef table containing input sequence data-from beginning of Substitution table",
+ ),
+ (
+ "Offset",
+ "LookAheadClassDef",
+ None,
+ None,
+ "Offset to glyph ClassDef table containing lookahead sequence data-from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "ChainSubClassSetCount",
+ None,
+ None,
+ "Number of ChainSubClassSet tables",
+ ),
+ (
+ "Offset",
+ "ChainSubClassSet",
+ "ChainSubClassSetCount",
+ 0,
+ "Array of offsets to ChainSubClassSet tables-from beginning of Substitution table-ordered by input class-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "ChainSubClassSet",
+ [
+ (
+ "uint16",
+ "ChainSubClassRuleCount",
+ None,
+ None,
+ "Number of ChainSubClassRule tables",
+ ),
+ (
+ "Offset",
+ "ChainSubClassRule",
+ "ChainSubClassRuleCount",
+ 0,
+ "Array of offsets to ChainSubClassRule tables-from beginning of ChainSubClassSet-ordered by preference",
+ ),
+ ],
+ ),
+ (
+ "ChainSubClassRule",
+ [
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
+ ),
+ (
+ "uint16",
+ "Backtrack",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of backtracking classes(to be matched before the input sequence)",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Total number of classes in the input sequence (includes the first class)",
+ ),
+ (
+ "uint16",
+ "Input",
+ "InputGlyphCount",
+ -1,
+ "Array of input classes(start with second class; to be matched with the input glyph sequence)",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)",
+ ),
+ (
+ "uint16",
+ "LookAhead",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of lookahead classes(to be matched after the input sequence)",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of SubstLookupRecords (in design order)",
+ ),
+ ],
+ ),
+ (
+ "ChainContextSubstFormat3",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 3"),
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Number of glyphs in the backtracking sequence",
+ ),
+ (
+ "Offset",
+ "BacktrackCoverage",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "InputGlyphCount",
+ None,
+ None,
+ "Number of glyphs in input sequence",
+ ),
+ (
+ "Offset",
+ "InputCoverage",
+ "InputGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in input sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Number of glyphs in lookahead sequence",
+ ),
+ (
+ "Offset",
+ "LookAheadCoverage",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order",
+ ),
+ ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
+ (
+ "struct",
+ "SubstLookupRecord",
+ "SubstCount",
+ 0,
+ "Array of SubstLookupRecords, in design order",
+ ),
+ ],
+ ),
+ (
+ "ExtensionSubstFormat1",
+ [
+ ("uint16", "ExtFormat", None, None, "Format identifier. Set to 1."),
+ (
+ "uint16",
+ "ExtensionLookupType",
+ None,
+ None,
+ "Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).",
+ ),
+ (
+ "LOffset",
+ "ExtSubTable",
+ None,
+ None,
+ "Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)",
+ ),
+ ],
+ ),
+ (
+ "ReverseChainSingleSubstFormat1",
+ [
+ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
+ (
+ "Offset",
+ "Coverage",
+ None,
+ 0,
+ "Offset to Coverage table - from beginning of Substitution table",
+ ),
+ (
+ "uint16",
+ "BacktrackGlyphCount",
+ None,
+ None,
+ "Number of glyphs in the backtracking sequence",
+ ),
+ (
+ "Offset",
+ "BacktrackCoverage",
+ "BacktrackGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "LookAheadGlyphCount",
+ None,
+ None,
+ "Number of glyphs in lookahead sequence",
+ ),
+ (
+ "Offset",
+ "LookAheadCoverage",
+ "LookAheadGlyphCount",
+ 0,
+ "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order",
+ ),
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of GlyphIDs in the Substitute array",
+ ),
+ (
+ "GlyphID",
+ "Substitute",
+ "GlyphCount",
+ 0,
+ "Array of substitute GlyphIDs-ordered by Coverage index",
+ ),
+ ],
+ ),
+ #
+ # gdef
+ #
+ (
+ "GDEF",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the GDEF table- 0x00010000, 0x00010002, or 0x00010003",
+ ),
+ (
+ "Offset",
+ "GlyphClassDef",
+ None,
+ None,
+ "Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)",
+ ),
+ (
+ "Offset",
+ "AttachList",
+ None,
+ None,
+ "Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)",
+ ),
+ (
+ "Offset",
+ "LigCaretList",
+ None,
+ None,
+ "Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)",
+ ),
+ (
+ "Offset",
+ "MarkAttachClassDef",
+ None,
+ None,
+ "Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)",
+ ),
+ (
+ "Offset",
+ "MarkGlyphSetsDef",
+ None,
+ "Version >= 0x00010002",
+ "Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)",
+ ),
+ (
+ "LOffset",
+ "VarStore",
+ None,
+ "Version >= 0x00010003",
+ "Offset to variation store (may be NULL)",
+ ),
+ ],
+ ),
+ (
+ "AttachList",
+ [
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table - from beginning of AttachList table",
+ ),
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of glyphs with attachment points",
+ ),
+ (
+ "Offset",
+ "AttachPoint",
+ "GlyphCount",
+ 0,
+ "Array of offsets to AttachPoint tables-from beginning of AttachList table-in Coverage Index order",
+ ),
+ ],
+ ),
+ (
+ "AttachPoint",
+ [
+ (
+ "uint16",
+ "PointCount",
+ None,
+ None,
+ "Number of attachment points on this glyph",
+ ),
+ (
+ "uint16",
+ "PointIndex",
+ "PointCount",
+ 0,
+ "Array of contour point indices -in increasing numerical order",
+ ),
+ ],
+ ),
+ (
+ "LigCaretList",
+ [
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table - from beginning of LigCaretList table",
+ ),
+ ("uint16", "LigGlyphCount", None, None, "Number of ligature glyphs"),
+ (
+ "Offset",
+ "LigGlyph",
+ "LigGlyphCount",
+ 0,
+ "Array of offsets to LigGlyph tables-from beginning of LigCaretList table-in Coverage Index order",
+ ),
+ ],
+ ),
+ (
+ "LigGlyph",
+ [
+ (
+ "uint16",
+ "CaretCount",
+ None,
+ None,
+ "Number of CaretValues for this ligature (components - 1)",
+ ),
+ (
+ "Offset",
+ "CaretValue",
+ "CaretCount",
+ 0,
+ "Array of offsets to CaretValue tables-from beginning of LigGlyph table-in increasing coordinate order",
+ ),
+ ],
+ ),
+ (
+ "CaretValueFormat1",
+ [
+ ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 1"),
+ ("int16", "Coordinate", None, None, "X or Y value, in design units"),
+ ],
+ ),
+ (
+ "CaretValueFormat2",
+ [
+ ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 2"),
+ ("uint16", "CaretValuePoint", None, None, "Contour point index on glyph"),
+ ],
+ ),
+ (
+ "CaretValueFormat3",
+ [
+ ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 3"),
+ ("int16", "Coordinate", None, None, "X or Y value, in design units"),
+ (
+ "Offset",
+ "DeviceTable",
+ None,
+ None,
+ "Offset to Device table for X or Y value-from beginning of CaretValue table",
+ ),
+ ],
+ ),
+ (
+ "MarkGlyphSetsDef",
+ [
+ ("uint16", "MarkSetTableFormat", None, None, "Format identifier == 1"),
+ ("uint16", "MarkSetCount", None, None, "Number of mark sets defined"),
+ (
+ "LOffset",
+ "Coverage",
+ "MarkSetCount",
+ 0,
+ "Array of offsets to mark set coverage tables.",
+ ),
+ ],
+ ),
+ #
+ # base
+ #
+ (
+ "BASE",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the BASE table-initially 0x00010000",
+ ),
+ (
+ "Offset",
+ "HorizAxis",
+ None,
+ None,
+ "Offset to horizontal Axis table-from beginning of BASE table-may be NULL",
+ ),
+ (
+ "Offset",
+ "VertAxis",
+ None,
+ None,
+ "Offset to vertical Axis table-from beginning of BASE table-may be NULL",
+ ),
+ (
+ "LOffset",
+ "VarStore",
+ None,
+ "Version >= 0x00010001",
+ "Offset to variation store (may be NULL)",
+ ),
+ ],
+ ),
+ (
+ "Axis",
+ [
+ (
+ "Offset",
+ "BaseTagList",
+ None,
+ None,
+ "Offset to BaseTagList table-from beginning of Axis table-may be NULL",
+ ),
+ (
+ "Offset",
+ "BaseScriptList",
+ None,
+ None,
+ "Offset to BaseScriptList table-from beginning of Axis table",
+ ),
+ ],
+ ),
+ (
+ "BaseTagList",
+ [
+ (
+ "uint16",
+ "BaseTagCount",
+ None,
+ None,
+ "Number of baseline identification tags in this text direction-may be zero (0)",
+ ),
+ (
+ "Tag",
+ "BaselineTag",
+ "BaseTagCount",
+ 0,
+ "Array of 4-byte baseline identification tags-must be in alphabetical order",
+ ),
+ ],
+ ),
+ (
+ "BaseScriptList",
+ [
+ (
+ "uint16",
+ "BaseScriptCount",
+ None,
+ None,
+ "Number of BaseScriptRecords defined",
+ ),
+ (
+ "struct",
+ "BaseScriptRecord",
+ "BaseScriptCount",
+ 0,
+ "Array of BaseScriptRecords-in alphabetical order by BaseScriptTag",
+ ),
+ ],
+ ),
+ (
+ "BaseScriptRecord",
+ [
+ ("Tag", "BaseScriptTag", None, None, "4-byte script identification tag"),
+ (
+ "Offset",
+ "BaseScript",
+ None,
+ None,
+ "Offset to BaseScript table-from beginning of BaseScriptList",
+ ),
+ ],
+ ),
+ (
+ "BaseScript",
+ [
+ (
+ "Offset",
+ "BaseValues",
+ None,
+ None,
+ "Offset to BaseValues table-from beginning of BaseScript table-may be NULL",
+ ),
+ (
+ "Offset",
+ "DefaultMinMax",
+ None,
+ None,
+ "Offset to MinMax table- from beginning of BaseScript table-may be NULL",
+ ),
+ (
+ "uint16",
+ "BaseLangSysCount",
+ None,
+ None,
+ "Number of BaseLangSysRecords defined-may be zero (0)",
+ ),
+ (
+ "struct",
+ "BaseLangSysRecord",
+ "BaseLangSysCount",
+ 0,
+ "Array of BaseLangSysRecords-in alphabetical order by BaseLangSysTag",
+ ),
+ ],
+ ),
+ (
+ "BaseLangSysRecord",
+ [
+ (
+ "Tag",
+ "BaseLangSysTag",
+ None,
+ None,
+ "4-byte language system identification tag",
+ ),
+ (
+ "Offset",
+ "MinMax",
+ None,
+ None,
+ "Offset to MinMax table-from beginning of BaseScript table",
+ ),
+ ],
+ ),
+ (
+ "BaseValues",
+ [
+ (
+ "uint16",
+ "DefaultIndex",
+ None,
+ None,
+ "Index number of default baseline for this script-equals index position of baseline tag in BaselineArray of the BaseTagList",
+ ),
+ (
+ "uint16",
+ "BaseCoordCount",
+ None,
+ None,
+ "Number of BaseCoord tables defined-should equal BaseTagCount in the BaseTagList",
+ ),
+ (
+ "Offset",
+ "BaseCoord",
+ "BaseCoordCount",
+ 0,
+ "Array of offsets to BaseCoord-from beginning of BaseValues table-order matches BaselineTag array in the BaseTagList",
+ ),
+ ],
+ ),
+ (
+ "MinMax",
+ [
+ (
+ "Offset",
+ "MinCoord",
+ None,
+ None,
+ "Offset to BaseCoord table-defines minimum extent value-from the beginning of MinMax table-may be NULL",
+ ),
+ (
+ "Offset",
+ "MaxCoord",
+ None,
+ None,
+ "Offset to BaseCoord table-defines maximum extent value-from the beginning of MinMax table-may be NULL",
+ ),
+ (
+ "uint16",
+ "FeatMinMaxCount",
+ None,
+ None,
+ "Number of FeatMinMaxRecords-may be zero (0)",
+ ),
+ (
+ "struct",
+ "FeatMinMaxRecord",
+ "FeatMinMaxCount",
+ 0,
+ "Array of FeatMinMaxRecords-in alphabetical order, by FeatureTableTag",
+ ),
+ ],
+ ),
+ (
+ "FeatMinMaxRecord",
+ [
+ (
+ "Tag",
+ "FeatureTableTag",
+ None,
+ None,
+ "4-byte feature identification tag-must match FeatureTag in FeatureList",
+ ),
+ (
+ "Offset",
+ "MinCoord",
+ None,
+ None,
+ "Offset to BaseCoord table-defines minimum extent value-from beginning of MinMax table-may be NULL",
+ ),
+ (
+ "Offset",
+ "MaxCoord",
+ None,
+ None,
+ "Offset to BaseCoord table-defines maximum extent value-from beginning of MinMax table-may be NULL",
+ ),
+ ],
+ ),
+ (
+ "BaseCoordFormat1",
+ [
+ ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 1"),
+ ("int16", "Coordinate", None, None, "X or Y value, in design units"),
+ ],
+ ),
+ (
+ "BaseCoordFormat2",
+ [
+ ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 2"),
+ ("int16", "Coordinate", None, None, "X or Y value, in design units"),
+ ("GlyphID", "ReferenceGlyph", None, None, "GlyphID of control glyph"),
+ (
+ "uint16",
+ "BaseCoordPoint",
+ None,
+ None,
+ "Index of contour point on the ReferenceGlyph",
+ ),
+ ],
+ ),
+ (
+ "BaseCoordFormat3",
+ [
+ ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 3"),
+ ("int16", "Coordinate", None, None, "X or Y value, in design units"),
+ (
+ "Offset",
+ "DeviceTable",
+ None,
+ None,
+ "Offset to Device table for X or Y value",
+ ),
+ ],
+ ),
+ #
+ # jstf
+ #
+ (
+ "JSTF",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the JSTF table-initially set to 0x00010000",
+ ),
+ (
+ "uint16",
+ "JstfScriptCount",
+ None,
+ None,
+ "Number of JstfScriptRecords in this table",
+ ),
+ (
+ "struct",
+ "JstfScriptRecord",
+ "JstfScriptCount",
+ 0,
+ "Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag",
+ ),
+ ],
+ ),
+ (
+ "JstfScriptRecord",
+ [
+ ("Tag", "JstfScriptTag", None, None, "4-byte JstfScript identification"),
+ (
+ "Offset",
+ "JstfScript",
+ None,
+ None,
+ "Offset to JstfScript table-from beginning of JSTF Header",
+ ),
+ ],
+ ),
+ (
+ "JstfScript",
+ [
+ (
+ "Offset",
+ "ExtenderGlyph",
+ None,
+ None,
+ "Offset to ExtenderGlyph table-from beginning of JstfScript table-may be NULL",
+ ),
+ (
+ "Offset",
+ "DefJstfLangSys",
+ None,
+ None,
+ "Offset to Default JstfLangSys table-from beginning of JstfScript table-may be NULL",
+ ),
+ (
+ "uint16",
+ "JstfLangSysCount",
+ None,
+ None,
+ "Number of JstfLangSysRecords in this table- may be zero (0)",
+ ),
+ (
+ "struct",
+ "JstfLangSysRecord",
+ "JstfLangSysCount",
+ 0,
+ "Array of JstfLangSysRecords-in alphabetical order, by JstfLangSysTag",
+ ),
+ ],
+ ),
+ (
+ "JstfLangSysRecord",
+ [
+ ("Tag", "JstfLangSysTag", None, None, "4-byte JstfLangSys identifier"),
+ (
+ "Offset",
+ "JstfLangSys",
+ None,
+ None,
+ "Offset to JstfLangSys table-from beginning of JstfScript table",
+ ),
+ ],
+ ),
+ (
+ "ExtenderGlyph",
+ [
+ (
+ "uint16",
+ "GlyphCount",
+ None,
+ None,
+ "Number of Extender Glyphs in this script",
+ ),
+ (
+ "GlyphID",
+ "ExtenderGlyph",
+ "GlyphCount",
+ 0,
+ "GlyphIDs-in increasing numerical order",
+ ),
+ ],
+ ),
+ (
+ "JstfLangSys",
+ [
+ (
+ "uint16",
+ "JstfPriorityCount",
+ None,
+ None,
+ "Number of JstfPriority tables",
+ ),
+ (
+ "Offset",
+ "JstfPriority",
+ "JstfPriorityCount",
+ 0,
+ "Array of offsets to JstfPriority tables-from beginning of JstfLangSys table-in priority order",
+ ),
+ ],
+ ),
+ (
+ "JstfPriority",
+ [
+ (
+ "Offset",
+ "ShrinkageEnableGSUB",
+ None,
+ None,
+ "Offset to Shrinkage Enable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ShrinkageDisableGSUB",
+ None,
+ None,
+ "Offset to Shrinkage Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ShrinkageEnableGPOS",
+ None,
+ None,
+ "Offset to Shrinkage Enable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ShrinkageDisableGPOS",
+ None,
+ None,
+ "Offset to Shrinkage Disable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ShrinkageJstfMax",
+ None,
+ None,
+ "Offset to Shrinkage JstfMax table-from beginning of JstfPriority table -may be NULL",
+ ),
+ (
+ "Offset",
+ "ExtensionEnableGSUB",
+ None,
+ None,
+ "Offset to Extension Enable JstfGSUBModList table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ExtensionDisableGSUB",
+ None,
+ None,
+ "Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ExtensionEnableGPOS",
+ None,
+ None,
+ "Offset to Extension Enable JstfGSUBModList table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ExtensionDisableGPOS",
+ None,
+ None,
+ "Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
+ ),
+ (
+ "Offset",
+ "ExtensionJstfMax",
+ None,
+ None,
+ "Offset to Extension JstfMax table-from beginning of JstfPriority table -may be NULL",
+ ),
+ ],
+ ),
+ (
+ "JstfGSUBModList",
+ [
+ (
+ "uint16",
+ "LookupCount",
+ None,
+ None,
+ "Number of lookups for this modification",
+ ),
+ (
+ "uint16",
+ "GSUBLookupIndex",
+ "LookupCount",
+ 0,
+ "Array of LookupIndex identifiers in GSUB-in increasing numerical order",
+ ),
+ ],
+ ),
+ (
+ "JstfGPOSModList",
+ [
+ (
+ "uint16",
+ "LookupCount",
+ None,
+ None,
+ "Number of lookups for this modification",
+ ),
+ (
+ "uint16",
+ "GPOSLookupIndex",
+ "LookupCount",
+ 0,
+ "Array of LookupIndex identifiers in GPOS-in increasing numerical order",
+ ),
+ ],
+ ),
+ (
+ "JstfMax",
+ [
+ (
+ "uint16",
+ "LookupCount",
+ None,
+ None,
+ "Number of lookup Indices for this modification",
+ ),
+ (
+ "Offset",
+ "Lookup",
+ "LookupCount",
+ 0,
+ "Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order",
+ ),
+ ],
+ ),
+ #
+ # STAT
+ #
+ (
+ "STAT",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the table-initially set to 0x00010000, currently 0x00010002.",
+ ),
+ (
+ "uint16",
+ "DesignAxisRecordSize",
+ None,
+ None,
+ "Size in bytes of each design axis record",
+ ),
+ ("uint16", "DesignAxisCount", None, None, "Number of design axis records"),
+ (
+ "LOffsetTo(AxisRecordArray)",
+ "DesignAxisRecord",
+ None,
+ None,
+ "Offset in bytes from the beginning of the STAT table to the start of the design axes array",
+ ),
+ ("uint16", "AxisValueCount", None, None, "Number of axis value tables"),
+ (
+ "LOffsetTo(AxisValueArray)",
+ "AxisValueArray",
+ None,
+ None,
+ "Offset in bytes from the beginning of the STAT table to the start of the axes value offset array",
+ ),
+ (
+ "NameID",
+ "ElidedFallbackNameID",
+ None,
+ "Version >= 0x00010001",
+ "NameID to use when all style attributes are elided.",
+ ),
+ ],
+ ),
+ (
+ "AxisRecordArray",
+ [
+ ("AxisRecord", "Axis", "DesignAxisCount", 0, "Axis records"),
+ ],
+ ),
+ (
+ "AxisRecord",
+ [
+ (
+ "Tag",
+ "AxisTag",
+ None,
+ None,
+ "A tag identifying the axis of design variation",
+ ),
+ (
+ "NameID",
+ "AxisNameID",
+ None,
+ None,
+ 'The name ID for entries in the "name" table that provide a display string for this axis',
+ ),
+ (
+ "uint16",
+ "AxisOrdering",
+ None,
+ None,
+ "A value that applications can use to determine primary sorting of face names, or for ordering of descriptors when composing family or face names",
+ ),
+ (
+ "uint8",
+ "MoreBytes",
+ "DesignAxisRecordSize",
+ -8,
+ "Extra bytes. Set to empty array.",
+ ),
+ ],
+ ),
+ (
+ "AxisValueArray",
+ [
+ ("Offset", "AxisValue", "AxisValueCount", 0, "Axis values"),
+ ],
+ ),
+ (
+ "AxisValueFormat1",
+ [
+ ("uint16", "Format", None, None, "Format, = 1"),
+ (
+ "uint16",
+ "AxisIndex",
+ None,
+ None,
+ "Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
+ ),
+ ("STATFlags", "Flags", None, None, "Flags."),
+ ("NameID", "ValueNameID", None, None, ""),
+ ("Fixed", "Value", None, None, ""),
+ ],
+ ),
+ (
+ "AxisValueFormat2",
+ [
+ ("uint16", "Format", None, None, "Format, = 2"),
+ (
+ "uint16",
+ "AxisIndex",
+ None,
+ None,
+ "Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
+ ),
+ ("STATFlags", "Flags", None, None, "Flags."),
+ ("NameID", "ValueNameID", None, None, ""),
+ ("Fixed", "NominalValue", None, None, ""),
+ ("Fixed", "RangeMinValue", None, None, ""),
+ ("Fixed", "RangeMaxValue", None, None, ""),
+ ],
+ ),
+ (
+ "AxisValueFormat3",
+ [
+ ("uint16", "Format", None, None, "Format, = 3"),
+ (
+ "uint16",
+ "AxisIndex",
+ None,
+ None,
+ "Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
+ ),
+ ("STATFlags", "Flags", None, None, "Flags."),
+ ("NameID", "ValueNameID", None, None, ""),
+ ("Fixed", "Value", None, None, ""),
+ ("Fixed", "LinkedValue", None, None, ""),
+ ],
+ ),
+ (
+ "AxisValueFormat4",
+ [
+ ("uint16", "Format", None, None, "Format, = 4"),
+ (
+ "uint16",
+ "AxisCount",
+ None,
+ None,
+ "The total number of axes contributing to this axis-values combination.",
+ ),
+ ("STATFlags", "Flags", None, None, "Flags."),
+ ("NameID", "ValueNameID", None, None, ""),
+ (
+ "struct",
+ "AxisValueRecord",
+ "AxisCount",
+ 0,
+ "Array of AxisValue records that provide the combination of axis values, one for each contributing axis. ",
+ ),
+ ],
+ ),
+ (
+ "AxisValueRecord",
+ [
+ (
+ "uint16",
+ "AxisIndex",
+ None,
+ None,
+ "Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
+ ),
+ ("Fixed", "Value", None, None, "A numeric value for this attribute value."),
+ ],
+ ),
+ #
+ # Variation fonts
+ #
+ # GSUB/GPOS FeatureVariations
+ (
+ "FeatureVariations",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the table-initially set to 0x00010000",
+ ),
+ (
+ "uint32",
+ "FeatureVariationCount",
+ None,
+ None,
+ "Number of records in the FeatureVariationRecord array",
+ ),
+ (
+ "struct",
+ "FeatureVariationRecord",
+ "FeatureVariationCount",
+ 0,
+ "Array of FeatureVariationRecord",
+ ),
+ ],
+ ),
+ (
+ "FeatureVariationRecord",
+ [
+ (
+ "LOffset",
+ "ConditionSet",
+ None,
+ None,
+ "Offset to a ConditionSet table, from beginning of the FeatureVariations table.",
+ ),
+ (
+ "LOffset",
+ "FeatureTableSubstitution",
+ None,
+ None,
+ "Offset to a FeatureTableSubstitution table, from beginning of the FeatureVariations table",
+ ),
+ ],
+ ),
+ (
+ "ConditionSet",
+ [
+ (
+ "uint16",
+ "ConditionCount",
+ None,
+ None,
+ "Number of condition tables in the ConditionTable array",
+ ),
+ (
+ "LOffset",
+ "ConditionTable",
+ "ConditionCount",
+ 0,
+ "Array of condition tables.",
+ ),
+ ],
+ ),
+ (
+ "ConditionTableFormat1",
+ [
+ ("uint16", "Format", None, None, "Format, = 1"),
+ (
+ "uint16",
+ "AxisIndex",
+ None,
+ None,
+ "Index for the variation axis within the fvar table, base 0.",
+ ),
+ (
+ "F2Dot14",
+ "FilterRangeMinValue",
+ None,
+ None,
+ "Minimum normalized axis value of the font variation instances that satisfy this condition.",
+ ),
+ (
+ "F2Dot14",
+ "FilterRangeMaxValue",
+ None,
+ None,
+ "Maximum value that satisfies this condition.",
+ ),
+ ],
+ ),
+ (
+ "FeatureTableSubstitution",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the table-initially set to 0x00010000",
+ ),
+ (
+ "uint16",
+ "SubstitutionCount",
+ None,
+ None,
+ "Number of records in the FeatureVariationRecords array",
+ ),
+ (
+ "FeatureTableSubstitutionRecord",
+ "SubstitutionRecord",
+ "SubstitutionCount",
+ 0,
+ "Array of FeatureTableSubstitutionRecord",
+ ),
+ ],
+ ),
+ (
+ "FeatureTableSubstitutionRecord",
+ [
+ ("uint16", "FeatureIndex", None, None, "The feature table index to match."),
+ (
+ "LOffset",
+ "Feature",
+ None,
+ None,
+ "Offset to an alternate feature table, from start of the FeatureTableSubstitution table.",
+ ),
+ ],
+ ),
+ # VariationStore
+ (
+ "VarRegionAxis",
+ [
+ ("F2Dot14", "StartCoord", None, None, ""),
+ ("F2Dot14", "PeakCoord", None, None, ""),
+ ("F2Dot14", "EndCoord", None, None, ""),
+ ],
+ ),
+ (
+ "VarRegion",
+ [
+ ("struct", "VarRegionAxis", "RegionAxisCount", 0, ""),
+ ],
+ ),
+ (
+ "VarRegionList",
+ [
+ ("uint16", "RegionAxisCount", None, None, ""),
+ ("uint16", "RegionCount", None, None, ""),
+ ("VarRegion", "Region", "RegionCount", 0, ""),
+ ],
+ ),
+ (
+ "VarData",
+ [
+ ("uint16", "ItemCount", None, None, ""),
+ ("uint16", "NumShorts", None, None, ""),
+ ("uint16", "VarRegionCount", None, None, ""),
+ ("uint16", "VarRegionIndex", "VarRegionCount", 0, ""),
+ ("VarDataValue", "Item", "ItemCount", 0, ""),
+ ],
+ ),
+ (
+ "VarStore",
+ [
+ ("uint16", "Format", None, None, "Set to 1."),
+ ("LOffset", "VarRegionList", None, None, ""),
+ ("uint16", "VarDataCount", None, None, ""),
+ ("LOffset", "VarData", "VarDataCount", 0, ""),
+ ],
+ ),
+ # Variation helpers
+ (
+ "VarIdxMap",
+ [
+ ("uint16", "EntryFormat", None, None, ""), # Automatically computed
+ ("uint16", "MappingCount", None, None, ""), # Automatically computed
+ ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"),
+ ],
+ ),
+ (
+ "DeltaSetIndexMapFormat0",
+ [
+ ("uint8", "Format", None, None, "Format of the DeltaSetIndexMap = 0"),
+ ("uint8", "EntryFormat", None, None, ""), # Automatically computed
+ ("uint16", "MappingCount", None, None, ""), # Automatically computed
+ ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"),
+ ],
+ ),
+ (
+ "DeltaSetIndexMapFormat1",
+ [
+ ("uint8", "Format", None, None, "Format of the DeltaSetIndexMap = 1"),
+ ("uint8", "EntryFormat", None, None, ""), # Automatically computed
+ ("uint32", "MappingCount", None, None, ""), # Automatically computed
+ ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"),
+ ],
+ ),
+ # Glyph advance variations
+ (
+ "HVAR",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the HVAR table-initially = 0x00010000",
+ ),
+ ("LOffset", "VarStore", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "AdvWidthMap", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "LsbMap", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "RsbMap", None, None, ""),
+ ],
+ ),
+ (
+ "VVAR",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the VVAR table-initially = 0x00010000",
+ ),
+ ("LOffset", "VarStore", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "AdvHeightMap", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "TsbMap", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "BsbMap", None, None, ""),
+ ("LOffsetTo(VarIdxMap)", "VOrgMap", None, None, "Vertical origin mapping."),
+ ],
+ ),
+ # Font-wide metrics variations
+ (
+ "MetricsValueRecord",
+ [
+ ("Tag", "ValueTag", None, None, "4-byte font-wide measure identifier"),
+ ("uint32", "VarIdx", None, None, "Combined outer-inner variation index"),
+ (
+ "uint8",
+ "MoreBytes",
+ "ValueRecordSize",
+ -8,
+ "Extra bytes. Set to empty array.",
+ ),
+ ],
+ ),
+ (
+ "MVAR",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the MVAR table-initially = 0x00010000",
+ ),
+ ("uint16", "Reserved", None, None, "Set to 0"),
+ ("uint16", "ValueRecordSize", None, None, ""),
+ ("uint16", "ValueRecordCount", None, None, ""),
+ ("Offset", "VarStore", None, None, ""),
+ ("MetricsValueRecord", "ValueRecord", "ValueRecordCount", 0, ""),
+ ],
+ ),
+ #
+ # math
+ #
+ (
+ "MATH",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the MATH table-initially set to 0x00010000.",
+ ),
+ (
+ "Offset",
+ "MathConstants",
+ None,
+ None,
+ "Offset to MathConstants table - from the beginning of MATH table.",
+ ),
+ (
+ "Offset",
+ "MathGlyphInfo",
+ None,
+ None,
+ "Offset to MathGlyphInfo table - from the beginning of MATH table.",
+ ),
+ (
+ "Offset",
+ "MathVariants",
+ None,
+ None,
+ "Offset to MathVariants table - from the beginning of MATH table.",
+ ),
+ ],
+ ),
+ (
+ "MathValueRecord",
+ [
+ ("int16", "Value", None, None, "The X or Y value in design units."),
+ (
+ "Offset",
+ "DeviceTable",
+ None,
+ None,
+ "Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.",
+ ),
+ ],
+ ),
+ (
+ "MathConstants",
+ [
+ (
+ "int16",
+ "ScriptPercentScaleDown",
+ None,
+ None,
+ "Percentage of scaling down for script level 1. Suggested value: 80%.",
+ ),
+ (
+ "int16",
+ "ScriptScriptPercentScaleDown",
+ None,
+ None,
+ "Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.",
+ ),
+ (
+ "uint16",
+ "DelimitedSubFormulaMinHeight",
+ None,
+ None,
+ "Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.",
+ ),
+ (
+ "uint16",
+ "DisplayOperatorMinHeight",
+ None,
+ None,
+ "Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.",
+ ),
+ (
+ "MathValueRecord",
+ "MathLeading",
+ None,
+ None,
+ "White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.",
+ ),
+ ("MathValueRecord", "AxisHeight", None, None, "Axis height of the font."),
+ (
+ "MathValueRecord",
+ "AccentBaseHeight",
+ None,
+ None,
+ "Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.",
+ ),
+ (
+ "MathValueRecord",
+ "FlattenedAccentBaseHeight",
+ None,
+ None,
+ "Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).",
+ ),
+ (
+ "MathValueRecord",
+ "SubscriptShiftDown",
+ None,
+ None,
+ "The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.",
+ ),
+ (
+ "MathValueRecord",
+ "SubscriptTopMax",
+ None,
+ None,
+ "Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.",
+ ),
+ (
+ "MathValueRecord",
+ "SubscriptBaselineDropMin",
+ None,
+ None,
+ "Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.",
+ ),
+ (
+ "MathValueRecord",
+ "SuperscriptShiftUp",
+ None,
+ None,
+ "Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.",
+ ),
+ (
+ "MathValueRecord",
+ "SuperscriptShiftUpCramped",
+ None,
+ None,
+ "Standard shift of superscripts relative to the base, in cramped style.",
+ ),
+ (
+ "MathValueRecord",
+ "SuperscriptBottomMin",
+ None,
+ None,
+ "Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.",
+ ),
+ (
+ "MathValueRecord",
+ "SuperscriptBaselineDropMax",
+ None,
+ None,
+ "Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.",
+ ),
+ (
+ "MathValueRecord",
+ "SubSuperscriptGapMin",
+ None,
+ None,
+ "Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "SuperscriptBottomMaxWithSubscript",
+ None,
+ None,
+ "The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.",
+ ),
+ (
+ "MathValueRecord",
+ "SpaceAfterScript",
+ None,
+ None,
+ "Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.",
+ ),
+ (
+ "MathValueRecord",
+ "UpperLimitGapMin",
+ None,
+ None,
+ "Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.",
+ ),
+ (
+ "MathValueRecord",
+ "UpperLimitBaselineRiseMin",
+ None,
+ None,
+ "Minimum distance between baseline of upper limit and (ink) top of the base operator.",
+ ),
+ (
+ "MathValueRecord",
+ "LowerLimitGapMin",
+ None,
+ None,
+ "Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.",
+ ),
+ (
+ "MathValueRecord",
+ "LowerLimitBaselineDropMin",
+ None,
+ None,
+ "Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.",
+ ),
+ (
+ "MathValueRecord",
+ "StackTopShiftUp",
+ None,
+ None,
+ "Standard shift up applied to the top element of a stack.",
+ ),
+ (
+ "MathValueRecord",
+ "StackTopDisplayStyleShiftUp",
+ None,
+ None,
+ "Standard shift up applied to the top element of a stack in display style.",
+ ),
+ (
+ "MathValueRecord",
+ "StackBottomShiftDown",
+ None,
+ None,
+ "Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.",
+ ),
+ (
+ "MathValueRecord",
+ "StackBottomDisplayStyleShiftDown",
+ None,
+ None,
+ "Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.",
+ ),
+ (
+ "MathValueRecord",
+ "StackGapMin",
+ None,
+ None,
+ "Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "StackDisplayStyleGapMin",
+ None,
+ None,
+ "Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "StretchStackTopShiftUp",
+ None,
+ None,
+ "Standard shift up applied to the top element of the stretch stack.",
+ ),
+ (
+ "MathValueRecord",
+ "StretchStackBottomShiftDown",
+ None,
+ None,
+ "Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.",
+ ),
+ (
+ "MathValueRecord",
+ "StretchStackGapAboveMin",
+ None,
+ None,
+ "Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin",
+ ),
+ (
+ "MathValueRecord",
+ "StretchStackGapBelowMin",
+ None,
+ None,
+ "Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionNumeratorShiftUp",
+ None,
+ None,
+ "Standard shift up applied to the numerator.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionNumeratorDisplayStyleShiftUp",
+ None,
+ None,
+ "Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionDenominatorShiftDown",
+ None,
+ None,
+ "Standard shift down applied to the denominator. Positive for moving in the downward direction.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionDenominatorDisplayStyleShiftDown",
+ None,
+ None,
+ "Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionNumeratorGapMin",
+ None,
+ None,
+ "Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness",
+ ),
+ (
+ "MathValueRecord",
+ "FractionNumDisplayStyleGapMin",
+ None,
+ None,
+ "Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionRuleThickness",
+ None,
+ None,
+ "Thickness of the fraction bar. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "FractionDenominatorGapMin",
+ None,
+ None,
+ "Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness",
+ ),
+ (
+ "MathValueRecord",
+ "FractionDenomDisplayStyleGapMin",
+ None,
+ None,
+ "Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "SkewedFractionHorizontalGap",
+ None,
+ None,
+ "Horizontal distance between the top and bottom elements of a skewed fraction.",
+ ),
+ (
+ "MathValueRecord",
+ "SkewedFractionVerticalGap",
+ None,
+ None,
+ "Vertical distance between the ink of the top and bottom elements of a skewed fraction.",
+ ),
+ (
+ "MathValueRecord",
+ "OverbarVerticalGap",
+ None,
+ None,
+ "Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "OverbarRuleThickness",
+ None,
+ None,
+ "Thickness of overbar. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "OverbarExtraAscender",
+ None,
+ None,
+ "Extra white space reserved above the overbar. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "UnderbarVerticalGap",
+ None,
+ None,
+ "Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "UnderbarRuleThickness",
+ None,
+ None,
+ "Thickness of underbar. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "UnderbarExtraDescender",
+ None,
+ None,
+ "Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalVerticalGap",
+ None,
+ None,
+ "Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalDisplayStyleVerticalGap",
+ None,
+ None,
+ "Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalRuleThickness",
+ None,
+ None,
+ "Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalExtraAscender",
+ None,
+ None,
+ "Extra white space reserved above the radical. Suggested: RadicalRuleThickness.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalKernBeforeDegree",
+ None,
+ None,
+ "Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.",
+ ),
+ (
+ "MathValueRecord",
+ "RadicalKernAfterDegree",
+ None,
+ None,
+ "Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.",
+ ),
+ (
+ "uint16",
+ "RadicalDegreeBottomRaisePercent",
+ None,
+ None,
+ "Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.",
+ ),
+ ],
+ ),
+ (
+ "MathGlyphInfo",
+ [
+ (
+ "Offset",
+ "MathItalicsCorrectionInfo",
+ None,
+ None,
+ "Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.",
+ ),
+ (
+ "Offset",
+ "MathTopAccentAttachment",
+ None,
+ None,
+ "Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.",
+ ),
+ (
+ "Offset",
+ "ExtendedShapeCoverage",
+ None,
+ None,
+ "Offset to coverage table for Extended Shape glyphs - from the beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.",
+ ),
+ (
+ "Offset",
+ "MathKernInfo",
+ None,
+ None,
+ "Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.",
+ ),
+ ],
+ ),
+ (
+ "MathItalicsCorrectionInfo",
+ [
+ (
+ "Offset",
+ "Coverage",
+ None,
+ None,
+ "Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.",
+ ),
+ (
+ "uint16",
+ "ItalicsCorrectionCount",
+ None,
+ None,
+ "Number of italics correction values. Should coincide with the number of covered glyphs.",
+ ),
+ (
+ "MathValueRecord",
+ "ItalicsCorrection",
+ "ItalicsCorrectionCount",
+ 0,
+ "Array of MathValueRecords defining italics correction values for each covered glyph.",
+ ),
+ ],
+ ),
+ (
+ "MathTopAccentAttachment",
+ [
+ (
+ "Offset",
+ "TopAccentCoverage",
+ None,
+ None,
+ "Offset to Coverage table - from the beginning of MathTopAccentAttachment table.",
+ ),
+ (
+ "uint16",
+ "TopAccentAttachmentCount",
+ None,
+ None,
+ "Number of top accent attachment point values. Should coincide with the number of covered glyphs",
+ ),
+ (
+ "MathValueRecord",
+ "TopAccentAttachment",
+ "TopAccentAttachmentCount",
+ 0,
+ "Array of MathValueRecords defining top accent attachment points for each covered glyph",
+ ),
+ ],
+ ),
+ (
+ "MathKernInfo",
+ [
+ (
+ "Offset",
+ "MathKernCoverage",
+ None,
+ None,
+ "Offset to Coverage table - from the beginning of the MathKernInfo table.",
+ ),
+ ("uint16", "MathKernCount", None, None, "Number of MathKernInfoRecords."),
+ (
+ "MathKernInfoRecord",
+ "MathKernInfoRecords",
+ "MathKernCount",
+ 0,
+ "Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.",
+ ),
+ ],
+ ),
+ (
+ "MathKernInfoRecord",
+ [
+ (
+ "Offset",
+ "TopRightMathKern",
+ None,
+ None,
+ "Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.",
+ ),
+ (
+ "Offset",
+ "TopLeftMathKern",
+ None,
+ None,
+ "Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.",
+ ),
+ (
+ "Offset",
+ "BottomRightMathKern",
+ None,
+ None,
+ "Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.",
+ ),
+ (
+ "Offset",
+ "BottomLeftMathKern",
+ None,
+ None,
+ "Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.",
+ ),
+ ],
+ ),
+ (
+ "MathKern",
+ [
+ (
+ "uint16",
+ "HeightCount",
+ None,
+ None,
+ "Number of heights on which the kern value changes.",
+ ),
+ (
+ "MathValueRecord",
+ "CorrectionHeight",
+ "HeightCount",
+ 0,
+ "Array of correction heights at which the kern value changes. Sorted by the height value in design units.",
+ ),
+ (
+ "MathValueRecord",
+ "KernValue",
+ "HeightCount",
+ 1,
+ "Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.",
+ ),
+ ],
+ ),
+ (
+ "MathVariants",
+ [
+ (
+ "uint16",
+ "MinConnectorOverlap",
+ None,
+ None,
+ "Minimum overlap of connecting glyphs during glyph construction, in design units.",
+ ),
+ (
+ "Offset",
+ "VertGlyphCoverage",
+ None,
+ None,
+ "Offset to Coverage table - from the beginning of MathVariants table.",
+ ),
+ (
+ "Offset",
+ "HorizGlyphCoverage",
+ None,
+ None,
+ "Offset to Coverage table - from the beginning of MathVariants table.",
+ ),
+ (
+ "uint16",
+ "VertGlyphCount",
+ None,
+ None,
+ "Number of glyphs for which information is provided for vertically growing variants.",
+ ),
+ (
+ "uint16",
+ "HorizGlyphCount",
+ None,
+ None,
+ "Number of glyphs for which information is provided for horizontally growing variants.",
+ ),
+ (
+ "Offset",
+ "VertGlyphConstruction",
+ "VertGlyphCount",
+ 0,
+ "Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.",
+ ),
+ (
+ "Offset",
+ "HorizGlyphConstruction",
+ "HorizGlyphCount",
+ 0,
+ "Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.",
+ ),
+ ],
+ ),
+ (
+ "MathGlyphConstruction",
+ [
+ (
+ "Offset",
+ "GlyphAssembly",
+ None,
+ None,
+ "Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL",
+ ),
+ (
+ "uint16",
+ "VariantCount",
+ None,
+ None,
+ "Count of glyph growing variants for this glyph.",
+ ),
+ (
+ "MathGlyphVariantRecord",
+ "MathGlyphVariantRecord",
+ "VariantCount",
+ 0,
+ "MathGlyphVariantRecords for alternative variants of the glyphs.",
+ ),
+ ],
+ ),
+ (
+ "MathGlyphVariantRecord",
+ [
+ ("GlyphID", "VariantGlyph", None, None, "Glyph ID for the variant."),
+ (
+ "uint16",
+ "AdvanceMeasurement",
+ None,
+ None,
+ "Advance width/height, in design units, of the variant, in the direction of requested glyph extension.",
+ ),
+ ],
+ ),
+ (
+ "GlyphAssembly",
+ [
+ (
+ "MathValueRecord",
+ "ItalicsCorrection",
+ None,
+ None,
+ "Italics correction of this GlyphAssembly. Should not depend on the assembly size.",
+ ),
+ ("uint16", "PartCount", None, None, "Number of parts in this assembly."),
+ (
+ "GlyphPartRecord",
+ "PartRecords",
+ "PartCount",
+ 0,
+ "Array of part records, from left to right and bottom to top.",
+ ),
+ ],
+ ),
+ (
+ "GlyphPartRecord",
+ [
+ ("GlyphID", "glyph", None, None, "Glyph ID for the part."),
+ (
+ "uint16",
+ "StartConnectorLength",
+ None,
+ None,
+ "Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.",
+ ),
+ (
+ "uint16",
+ "EndConnectorLength",
+ None,
+ None,
+ "Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.",
+ ),
+ (
+ "uint16",
+ "FullAdvance",
+ None,
+ None,
+ "Full advance width/height for this part, in the direction of the extension. In design units.",
+ ),
+ (
+ "uint16",
+ "PartFlags",
+ None,
+ None,
+ "Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved",
+ ),
+ ],
+ ),
+ ##
+ ## Apple Advanced Typography (AAT) tables
+ ##
+ (
+ "AATLookupSegment",
+ [
+ ("uint16", "lastGlyph", None, None, "Last glyph index in this segment."),
+ ("uint16", "firstGlyph", None, None, "First glyph index in this segment."),
+ (
+ "uint16",
+ "value",
+ None,
+ None,
+ "A 16-bit offset from the start of the table to the data.",
+ ),
+ ],
+ ),
+ #
+ # ankr
+ #
+ (
+ "ankr",
+ [
+ ("struct", "AnchorPoints", None, None, "Anchor points table."),
+ ],
+ ),
+ (
+ "AnchorPointsFormat0",
+ [
+ ("uint16", "Format", None, None, "Format of the anchor points table, = 0."),
+ ("uint16", "Flags", None, None, "Flags. Currenty unused, set to zero."),
+ (
+ "AATLookupWithDataOffset(AnchorGlyphData)",
+ "Anchors",
+ None,
+ None,
+ "Table of with anchor overrides for each glyph.",
+ ),
+ ],
+ ),
+ (
+ "AnchorGlyphData",
+ [
+ (
+ "uint32",
+ "AnchorPointCount",
+ None,
+ None,
+ "Number of anchor points for this glyph.",
+ ),
+ (
+ "struct",
+ "AnchorPoint",
+ "AnchorPointCount",
+ 0,
+ "Individual anchor points.",
+ ),
+ ],
+ ),
+ (
+ "AnchorPoint",
+ [
+ ("int16", "XCoordinate", None, None, "X coordinate of this anchor point."),
+ ("int16", "YCoordinate", None, None, "Y coordinate of this anchor point."),
+ ],
+ ),
+ #
+ # bsln
+ #
+ (
+ "bsln",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version number of the AAT baseline table (0x00010000 for the initial version).",
+ ),
+ ("struct", "Baseline", None, None, "Baseline table."),
+ ],
+ ),
+ (
+ "BaselineFormat0",
+ [
+ ("uint16", "Format", None, None, "Format of the baseline table, = 0."),
+ (
+ "uint16",
+ "DefaultBaseline",
+ None,
+ None,
+ "Default baseline value for all glyphs. This value can be from 0 through 31.",
+ ),
+ (
+ "uint16",
+ "Delta",
+ 32,
+ 0,
+ "These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.",
+ ),
+ ],
+ ),
+ (
+ "BaselineFormat1",
+ [
+ ("uint16", "Format", None, None, "Format of the baseline table, = 1."),
+ (
+ "uint16",
+ "DefaultBaseline",
+ None,
+ None,
+ "Default baseline value for all glyphs. This value can be from 0 through 31.",
+ ),
+ (
+ "uint16",
+ "Delta",
+ 32,
+ 0,
+ "These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.",
+ ),
+ (
+ "AATLookup(uint16)",
+ "BaselineValues",
+ None,
+ None,
+ "Lookup table that maps glyphs to their baseline values.",
+ ),
+ ],
+ ),
+ (
+ "BaselineFormat2",
+ [
+ ("uint16", "Format", None, None, "Format of the baseline table, = 1."),
+ (
+ "uint16",
+ "DefaultBaseline",
+ None,
+ None,
+ "Default baseline value for all glyphs. This value can be from 0 through 31.",
+ ),
+ (
+ "GlyphID",
+ "StandardGlyph",
+ None,
+ None,
+ "Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.",
+ ),
+ (
+ "uint16",
+ "ControlPoint",
+ 32,
+ 0,
+ "Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.",
+ ),
+ ],
+ ),
+ (
+ "BaselineFormat3",
+ [
+ ("uint16", "Format", None, None, "Format of the baseline table, = 1."),
+ (
+ "uint16",
+ "DefaultBaseline",
+ None,
+ None,
+ "Default baseline value for all glyphs. This value can be from 0 through 31.",
+ ),
+ (
+ "GlyphID",
+ "StandardGlyph",
+ None,
+ None,
+ "Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.",
+ ),
+ (
+ "uint16",
+ "ControlPoint",
+ 32,
+ 0,
+ "Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.",
+ ),
+ (
+ "AATLookup(uint16)",
+ "BaselineValues",
+ None,
+ None,
+ "Lookup table that maps glyphs to their baseline values.",
+ ),
+ ],
+ ),
+ #
+ # cidg
+ #
+ (
+ "cidg",
+ [
+ ("struct", "CIDGlyphMapping", None, None, "CID-to-glyph mapping table."),
+ ],
+ ),
+ (
+ "CIDGlyphMappingFormat0",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the CID-to-glyph mapping table, = 0.",
+ ),
+ ("uint16", "DataFormat", None, None, "Currenty unused, set to zero."),
+ ("uint32", "StructLength", None, None, "Size of the table in bytes."),
+ ("uint16", "Registry", None, None, "The registry ID."),
+ (
+ "char64",
+ "RegistryName",
+ None,
+ None,
+ "The registry name in ASCII; unused bytes should be set to 0.",
+ ),
+ ("uint16", "Order", None, None, "The order ID."),
+ (
+ "char64",
+ "OrderName",
+ None,
+ None,
+ "The order name in ASCII; unused bytes should be set to 0.",
+ ),
+ ("uint16", "SupplementVersion", None, None, "The supplement version."),
+ (
+ "CIDGlyphMap",
+ "Mapping",
+ None,
+ None,
+ "A mapping from CIDs to the glyphs in the font, starting with CID 0. If a CID from the identified collection has no glyph in the font, 0xFFFF is used",
+ ),
+ ],
+ ),
+ #
+ # feat
+ #
+ (
+ "feat",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the feat table-initially set to 0x00010000.",
+ ),
+ ("FeatureNames", "FeatureNames", None, None, "The feature names."),
+ ],
+ ),
+ (
+ "FeatureNames",
+ [
+ (
+ "uint16",
+ "FeatureNameCount",
+ None,
+ None,
+ "Number of entries in the feature name array.",
+ ),
+ ("uint16", "Reserved1", None, None, "Reserved (set to zero)."),
+ ("uint32", "Reserved2", None, None, "Reserved (set to zero)."),
+ (
+ "FeatureName",
+ "FeatureName",
+ "FeatureNameCount",
+ 0,
+ "The feature name array.",
+ ),
+ ],
+ ),
+ (
+ "FeatureName",
+ [
+ ("uint16", "FeatureType", None, None, "Feature type."),
+ (
+ "uint16",
+ "SettingsCount",
+ None,
+ None,
+ "The number of records in the setting name array.",
+ ),
+ (
+ "LOffset",
+ "Settings",
+ None,
+ None,
+ "Offset to setting table for this feature.",
+ ),
+ (
+ "uint16",
+ "FeatureFlags",
+ None,
+ None,
+ "Single-bit flags associated with the feature type.",
+ ),
+ (
+ "NameID",
+ "FeatureNameID",
+ None,
+ None,
+ "The name table index for the feature name.",
+ ),
+ ],
+ ),
+ (
+ "Settings",
+ [
+ ("Setting", "Setting", "SettingsCount", 0, "The setting array."),
+ ],
+ ),
+ (
+ "Setting",
+ [
+ ("uint16", "SettingValue", None, None, "The setting."),
+ (
+ "NameID",
+ "SettingNameID",
+ None,
+ None,
+ "The name table index for the setting name.",
+ ),
+ ],
+ ),
+ #
+ # gcid
+ #
+ (
+ "gcid",
+ [
+ ("struct", "GlyphCIDMapping", None, None, "Glyph to CID mapping table."),
+ ],
+ ),
+ (
+ "GlyphCIDMappingFormat0",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the glyph-to-CID mapping table, = 0.",
+ ),
+ ("uint16", "DataFormat", None, None, "Currenty unused, set to zero."),
+ ("uint32", "StructLength", None, None, "Size of the table in bytes."),
+ ("uint16", "Registry", None, None, "The registry ID."),
+ (
+ "char64",
+ "RegistryName",
+ None,
+ None,
+ "The registry name in ASCII; unused bytes should be set to 0.",
+ ),
+ ("uint16", "Order", None, None, "The order ID."),
+ (
+ "char64",
+ "OrderName",
+ None,
+ None,
+ "The order name in ASCII; unused bytes should be set to 0.",
+ ),
+ ("uint16", "SupplementVersion", None, None, "The supplement version."),
+ (
+ "GlyphCIDMap",
+ "Mapping",
+ None,
+ None,
+ "The CIDs for the glyphs in the font, starting with glyph 0. If a glyph does not correspond to a CID in the identified collection, 0xFFFF is used",
+ ),
+ ],
+ ),
+ #
+ # lcar
+ #
+ (
+ "lcar",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version number of the ligature caret table (0x00010000 for the initial version).",
+ ),
+ ("struct", "LigatureCarets", None, None, "Ligature carets table."),
+ ],
+ ),
+ (
+ "LigatureCaretsFormat0",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.",
+ ),
+ (
+ "AATLookup(LigCaretDistances)",
+ "Carets",
+ None,
+ None,
+ "Lookup table associating ligature glyphs with their caret positions, in font unit distances.",
+ ),
+ ],
+ ),
+ (
+ "LigatureCaretsFormat1",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.",
+ ),
+ (
+ "AATLookup(LigCaretPoints)",
+ "Carets",
+ None,
+ None,
+ "Lookup table associating ligature glyphs with their caret positions, as control points.",
+ ),
+ ],
+ ),
+ (
+ "LigCaretDistances",
+ [
+ ("uint16", "DivsionPointCount", None, None, "Number of division points."),
+ (
+ "int16",
+ "DivisionPoint",
+ "DivsionPointCount",
+ 0,
+ "Distance in font units through which a subdivision is made orthogonally to the baseline.",
+ ),
+ ],
+ ),
+ (
+ "LigCaretPoints",
+ [
+ ("uint16", "DivsionPointCount", None, None, "Number of division points."),
+ (
+ "int16",
+ "DivisionPoint",
+ "DivsionPointCount",
+ 0,
+ "The number of the control point through which a subdivision is made orthogonally to the baseline.",
+ ),
+ ],
+ ),
+ #
+ # mort
+ #
+ (
+ "mort",
+ [
+ ("Version", "Version", None, None, "Version of the mort table."),
+ (
+ "uint32",
+ "MorphChainCount",
+ None,
+ None,
+ "Number of metamorphosis chains.",
+ ),
+ (
+ "MortChain",
+ "MorphChain",
+ "MorphChainCount",
+ 0,
+ "Array of metamorphosis chains.",
+ ),
+ ],
+ ),
+ (
+ "MortChain",
+ [
+ (
+ "Flags32",
+ "DefaultFlags",
+ None,
+ None,
+ "The default specification for subtables.",
+ ),
+ (
+ "uint32",
+ "StructLength",
+ None,
+ None,
+ "Total byte count, including this header; must be a multiple of 4.",
+ ),
+ (
+ "uint16",
+ "MorphFeatureCount",
+ None,
+ None,
+ "Number of metamorphosis feature entries.",
+ ),
+ (
+ "uint16",
+ "MorphSubtableCount",
+ None,
+ None,
+ "The number of subtables in the chain.",
+ ),
+ (
+ "struct",
+ "MorphFeature",
+ "MorphFeatureCount",
+ 0,
+ "Array of metamorphosis features.",
+ ),
+ (
+ "MortSubtable",
+ "MorphSubtable",
+ "MorphSubtableCount",
+ 0,
+ "Array of metamorphosis subtables.",
+ ),
+ ],
+ ),
+ (
+ "MortSubtable",
+ [
+ (
+ "uint16",
+ "StructLength",
+ None,
+ None,
+ "Total subtable length, including this header.",
+ ),
+ (
+ "uint8",
+ "CoverageFlags",
+ None,
+ None,
+ "Most significant byte of coverage flags.",
+ ),
+ ("uint8", "MorphType", None, None, "Subtable type."),
+ (
+ "Flags32",
+ "SubFeatureFlags",
+ None,
+ None,
+ "The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).",
+ ),
+ ("SubStruct", "SubStruct", None, None, "SubTable."),
+ ],
+ ),
+ #
+ # morx
+ #
+ (
+ "morx",
+ [
+ ("uint16", "Version", None, None, "Version of the morx table."),
+ ("uint16", "Reserved", None, None, "Reserved (set to zero)."),
+ (
+ "uint32",
+ "MorphChainCount",
+ None,
+ None,
+ "Number of extended metamorphosis chains.",
+ ),
+ (
+ "MorxChain",
+ "MorphChain",
+ "MorphChainCount",
+ 0,
+ "Array of extended metamorphosis chains.",
+ ),
+ ],
+ ),
+ (
+ "MorxChain",
+ [
+ (
+ "Flags32",
+ "DefaultFlags",
+ None,
+ None,
+ "The default specification for subtables.",
+ ),
+ (
+ "uint32",
+ "StructLength",
+ None,
+ None,
+ "Total byte count, including this header; must be a multiple of 4.",
+ ),
+ (
+ "uint32",
+ "MorphFeatureCount",
+ None,
+ None,
+ "Number of feature subtable entries.",
+ ),
+ (
+ "uint32",
+ "MorphSubtableCount",
+ None,
+ None,
+ "The number of subtables in the chain.",
+ ),
+ (
+ "MorphFeature",
+ "MorphFeature",
+ "MorphFeatureCount",
+ 0,
+ "Array of metamorphosis features.",
+ ),
+ (
+ "MorxSubtable",
+ "MorphSubtable",
+ "MorphSubtableCount",
+ 0,
+ "Array of extended metamorphosis subtables.",
+ ),
+ ],
+ ),
+ (
+ "MorphFeature",
+ [
+ ("uint16", "FeatureType", None, None, "The type of feature."),
+ (
+ "uint16",
+ "FeatureSetting",
+ None,
+ None,
+ "The feature's setting (aka selector).",
+ ),
+ (
+ "Flags32",
+ "EnableFlags",
+ None,
+ None,
+ "Flags for the settings that this feature and setting enables.",
+ ),
+ (
+ "Flags32",
+ "DisableFlags",
+ None,
+ None,
+ "Complement of flags for the settings that this feature and setting disable.",
+ ),
+ ],
+ ),
+ # Apple TrueType Reference Manual, chapter “The ‘morx’ table”,
+ # section “Metamorphosis Subtables”.
+ # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
+ (
+ "MorxSubtable",
+ [
+ (
+ "uint32",
+ "StructLength",
+ None,
+ None,
+ "Total subtable length, including this header.",
+ ),
+ (
+ "uint8",
+ "CoverageFlags",
+ None,
+ None,
+ "Most significant byte of coverage flags.",
+ ),
+ ("uint16", "Reserved", None, None, "Unused."),
+ ("uint8", "MorphType", None, None, "Subtable type."),
+ (
+ "Flags32",
+ "SubFeatureFlags",
+ None,
+ None,
+ "The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).",
+ ),
+ ("SubStruct", "SubStruct", None, None, "SubTable."),
+ ],
+ ),
+ (
+ "StateHeader",
+ [
+ (
+ "uint32",
+ "ClassCount",
+ None,
+ None,
+ "Number of classes, which is the number of 16-bit entry indices in a single line in the state array.",
+ ),
+ (
+ "uint32",
+ "MorphClass",
+ None,
+ None,
+ "Offset from the start of this state table header to the start of the class table.",
+ ),
+ (
+ "uint32",
+ "StateArrayOffset",
+ None,
+ None,
+ "Offset from the start of this state table header to the start of the state array.",
+ ),
+ (
+ "uint32",
+ "EntryTableOffset",
+ None,
+ None,
+ "Offset from the start of this state table header to the start of the entry table.",
+ ),
+ ],
+ ),
+ (
+ "RearrangementMorph",
+ [
+ (
+ "STXHeader(RearrangementMorphAction)",
+ "StateTable",
+ None,
+ None,
+ "Finite-state transducer table for indic rearrangement.",
+ ),
+ ],
+ ),
+ (
+ "ContextualMorph",
+ [
+ (
+ "STXHeader(ContextualMorphAction)",
+ "StateTable",
+ None,
+ None,
+ "Finite-state transducer for contextual glyph substitution.",
+ ),
+ ],
+ ),
+ (
+ "LigatureMorph",
+ [
+ (
+ "STXHeader(LigatureMorphAction)",
+ "StateTable",
+ None,
+ None,
+ "Finite-state transducer for ligature substitution.",
+ ),
+ ],
+ ),
+ (
+ "NoncontextualMorph",
+ [
+ (
+ "AATLookup(GlyphID)",
+ "Substitution",
+ None,
+ None,
+ "The noncontextual glyph substitution table.",
+ ),
+ ],
+ ),
+ (
+ "InsertionMorph",
+ [
+ (
+ "STXHeader(InsertionMorphAction)",
+ "StateTable",
+ None,
+ None,
+ "Finite-state transducer for glyph insertion.",
+ ),
+ ],
+ ),
+ (
+ "MorphClass",
+ [
+ (
+ "uint16",
+ "FirstGlyph",
+ None,
+ None,
+ "Glyph index of the first glyph in the class table.",
+ ),
+ # ('uint16', 'GlyphCount', None, None, 'Number of glyphs in class table.'),
+ # ('uint8', 'GlyphClass', 'GlyphCount', 0, 'The class codes (indexed by glyph index minus firstGlyph). Class codes range from 0 to the value of stateSize minus 1.'),
+ ],
+ ),
+ # If the 'morx' table version is 3 or greater, then the last subtable in the chain is followed by a subtableGlyphCoverageArray, as described below.
+ # ('Offset', 'MarkGlyphSetsDef', None, 'round(Version*0x10000) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'),
+ #
+ # prop
+ #
+ (
+ "prop",
+ [
+ (
+ "Fixed",
+ "Version",
+ None,
+ None,
+ "Version number of the AAT glyphs property table. Version 1.0 is the initial table version. Version 2.0, which is recognized by macOS 8.5 and later, adds support for the “attaches on right” bit. Version 3.0, which gets recognized by macOS X and iOS, adds support for the additional directional properties defined in Unicode 3.0.",
+ ),
+ ("struct", "GlyphProperties", None, None, "Glyph properties."),
+ ],
+ ),
+ (
+ "GlyphPropertiesFormat0",
+ [
+ ("uint16", "Format", None, None, "Format, = 0."),
+ (
+ "uint16",
+ "DefaultProperties",
+ None,
+ None,
+ "Default properties applied to a glyph. Since there is no lookup table in prop format 0, the default properties get applied to every glyph in the font.",
+ ),
+ ],
+ ),
+ (
+ "GlyphPropertiesFormat1",
+ [
+ ("uint16", "Format", None, None, "Format, = 1."),
+ (
+ "uint16",
+ "DefaultProperties",
+ None,
+ None,
+ "Default properties applied to a glyph if that glyph is not present in the Properties lookup table.",
+ ),
+ (
+ "AATLookup(uint16)",
+ "Properties",
+ None,
+ None,
+ "Lookup data associating glyphs with their properties.",
+ ),
+ ],
+ ),
+ #
+ # opbd
+ #
+ (
+ "opbd",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version number of the optical bounds table (0x00010000 for the initial version).",
+ ),
+ ("struct", "OpticalBounds", None, None, "Optical bounds table."),
+ ],
+ ),
+ (
+ "OpticalBoundsFormat0",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the optical bounds table, = 0.",
+ ),
+ (
+ "AATLookup(OpticalBoundsDeltas)",
+ "OpticalBoundsDeltas",
+ None,
+ None,
+ "Lookup table associating glyphs with their optical bounds, given as deltas in font units.",
+ ),
+ ],
+ ),
+ (
+ "OpticalBoundsFormat1",
+ [
+ (
+ "uint16",
+ "Format",
+ None,
+ None,
+ "Format of the optical bounds table, = 1.",
+ ),
+ (
+ "AATLookup(OpticalBoundsPoints)",
+ "OpticalBoundsPoints",
+ None,
+ None,
+ "Lookup table associating glyphs with their optical bounds, given as references to control points.",
+ ),
+ ],
+ ),
+ (
+ "OpticalBoundsDeltas",
+ [
+ (
+ "int16",
+ "Left",
+ None,
+ None,
+ "Delta value for the left-side optical edge.",
+ ),
+ ("int16", "Top", None, None, "Delta value for the top-side optical edge."),
+ (
+ "int16",
+ "Right",
+ None,
+ None,
+ "Delta value for the right-side optical edge.",
+ ),
+ (
+ "int16",
+ "Bottom",
+ None,
+ None,
+ "Delta value for the bottom-side optical edge.",
+ ),
+ ],
+ ),
+ (
+ "OpticalBoundsPoints",
+ [
+ (
+ "int16",
+ "Left",
+ None,
+ None,
+ "Control point index for the left-side optical edge, or -1 if this glyph has none.",
+ ),
+ (
+ "int16",
+ "Top",
+ None,
+ None,
+ "Control point index for the top-side optical edge, or -1 if this glyph has none.",
+ ),
+ (
+ "int16",
+ "Right",
+ None,
+ None,
+ "Control point index for the right-side optical edge, or -1 if this glyph has none.",
+ ),
+ (
+ "int16",
+ "Bottom",
+ None,
+ None,
+ "Control point index for the bottom-side optical edge, or -1 if this glyph has none.",
+ ),
+ ],
+ ),
+ #
+ # TSIC
+ #
+ (
+ "TSIC",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of table initially set to 0x00010000.",
+ ),
+ ("uint16", "Flags", None, None, "TSIC flags - set to 0"),
+ ("uint16", "AxisCount", None, None, "Axis count from fvar"),
+ ("uint16", "RecordCount", None, None, "TSIC record count"),
+ ("uint16", "Reserved", None, None, "Set to 0"),
+ ("Tag", "AxisArray", "AxisCount", 0, "Array of axis tags in fvar order"),
+ (
+ "LocationRecord",
+ "RecordLocations",
+ "RecordCount",
+ 0,
+ "Location in variation space of TSIC record",
+ ),
+ ("TSICRecord", "Record", "RecordCount", 0, "Array of TSIC records"),
+ ],
+ ),
+ (
+ "LocationRecord",
+ [
+ ("F2Dot14", "Axis", "AxisCount", 0, "Axis record"),
+ ],
+ ),
+ (
+ "TSICRecord",
+ [
+ ("uint16", "Flags", None, None, "Record flags - set to 0"),
+ ("uint16", "NumCVTEntries", None, None, "Number of CVT number value pairs"),
+ ("uint16", "NameLength", None, None, "Length of optional user record name"),
+ ("uint16", "NameArray", "NameLength", 0, "Unicode 16 name"),
+ ("uint16", "CVTArray", "NumCVTEntries", 0, "CVT number array"),
+ ("int16", "CVTValueArray", "NumCVTEntries", 0, "CVT value"),
+ ],
+ ),
+ #
+ # COLR
+ #
+ (
+ "COLR",
+ [
+ ("uint16", "Version", None, None, "Table version number (starts at 0)."),
+ (
+ "uint16",
+ "BaseGlyphRecordCount",
+ None,
+ None,
+ "Number of Base Glyph Records.",
+ ),
+ (
+ "LOffset",
+ "BaseGlyphRecordArray",
+ None,
+ None,
+ "Offset (from beginning of COLR table) to Base Glyph records.",
+ ),
+ (
+ "LOffset",
+ "LayerRecordArray",
+ None,
+ None,
+ "Offset (from beginning of COLR table) to Layer Records.",
+ ),
+ ("uint16", "LayerRecordCount", None, None, "Number of Layer Records."),
+ (
+ "LOffset",
+ "BaseGlyphList",
+ None,
+ "Version >= 1",
+ "Offset (from beginning of COLR table) to array of Version-1 Base Glyph records.",
+ ),
+ (
+ "LOffset",
+ "LayerList",
+ None,
+ "Version >= 1",
+ "Offset (from beginning of COLR table) to LayerList.",
+ ),
+ (
+ "LOffset",
+ "ClipList",
+ None,
+ "Version >= 1",
+ "Offset to ClipList table (may be NULL)",
+ ),
+ (
+ "LOffsetTo(DeltaSetIndexMap)",
+ "VarIndexMap",
+ None,
+ "Version >= 1",
+ "Offset to DeltaSetIndexMap table (may be NULL)",
+ ),
+ (
+ "LOffset",
+ "VarStore",
+ None,
+ "Version >= 1",
+ "Offset to variation store (may be NULL)",
+ ),
+ ],
+ ),
+ (
+ "BaseGlyphRecordArray",
+ [
+ (
+ "BaseGlyphRecord",
+ "BaseGlyphRecord",
+ "BaseGlyphRecordCount",
+ 0,
+ "Base Glyph records.",
+ ),
+ ],
+ ),
+ (
+ "BaseGlyphRecord",
+ [
+ (
+ "GlyphID",
+ "BaseGlyph",
+ None,
+ None,
+ "Glyph ID of reference glyph. This glyph is for reference only and is not rendered for color.",
+ ),
+ (
+ "uint16",
+ "FirstLayerIndex",
+ None,
+ None,
+ "Index (from beginning of the Layer Records) to the layer record. There will be numLayers consecutive entries for this base glyph.",
+ ),
+ (
+ "uint16",
+ "NumLayers",
+ None,
+ None,
+ "Number of color layers associated with this glyph.",
+ ),
+ ],
+ ),
+ (
+ "LayerRecordArray",
+ [
+ ("LayerRecord", "LayerRecord", "LayerRecordCount", 0, "Layer records."),
+ ],
+ ),
+ (
+ "LayerRecord",
+ [
+ (
+ "GlyphID",
+ "LayerGlyph",
+ None,
+ None,
+ "Glyph ID of layer glyph (must be in z-order from bottom to top).",
+ ),
+ (
+ "uint16",
+ "PaletteIndex",
+ None,
+ None,
+ "Index value to use with a selected color palette.",
+ ),
+ ],
+ ),
+ (
+ "BaseGlyphList",
+ [
+ (
+ "uint32",
+ "BaseGlyphCount",
+ None,
+ None,
+ "Number of Version-1 Base Glyph records",
+ ),
+ (
+ "struct",
+ "BaseGlyphPaintRecord",
+ "BaseGlyphCount",
+ 0,
+ "Array of Version-1 Base Glyph records",
+ ),
+ ],
+ ),
+ (
+ "BaseGlyphPaintRecord",
+ [
+ ("GlyphID", "BaseGlyph", None, None, "Glyph ID of reference glyph."),
+ (
+ "LOffset",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of BaseGlyphPaintRecord) to Paint, typically a PaintColrLayers.",
+ ),
+ ],
+ ),
+ (
+ "LayerList",
+ [
+ ("uint32", "LayerCount", None, None, "Number of Version-1 Layers"),
+ (
+ "LOffset",
+ "Paint",
+ "LayerCount",
+ 0,
+ "Array of offsets to Paint tables, from the start of the LayerList table.",
+ ),
+ ],
+ ),
+ (
+ "ClipListFormat1",
+ [
+ (
+ "uint8",
+ "Format",
+ None,
+ None,
+ "Format for ClipList with 16bit glyph IDs: 1",
+ ),
+ ("uint32", "ClipCount", None, None, "Number of Clip records."),
+ (
+ "struct",
+ "ClipRecord",
+ "ClipCount",
+ 0,
+ "Array of Clip records sorted by glyph ID.",
+ ),
+ ],
+ ),
+ (
+ "ClipRecord",
+ [
+ ("uint16", "StartGlyphID", None, None, "First glyph ID in the range."),
+ ("uint16", "EndGlyphID", None, None, "Last glyph ID in the range."),
+ ("Offset24", "ClipBox", None, None, "Offset to a ClipBox table."),
+ ],
+ ),
+ (
+ "ClipBoxFormat1",
+ [
+ (
+ "uint8",
+ "Format",
+ None,
+ None,
+ "Format for ClipBox without variation: set to 1.",
+ ),
+ ("int16", "xMin", None, None, "Minimum x of clip box."),
+ ("int16", "yMin", None, None, "Minimum y of clip box."),
+ ("int16", "xMax", None, None, "Maximum x of clip box."),
+ ("int16", "yMax", None, None, "Maximum y of clip box."),
+ ],
+ ),
+ (
+ "ClipBoxFormat2",
+ [
+ ("uint8", "Format", None, None, "Format for variable ClipBox: set to 2."),
+ ("int16", "xMin", None, None, "Minimum x of clip box. VarIndexBase + 0."),
+ ("int16", "yMin", None, None, "Minimum y of clip box. VarIndexBase + 1."),
+ ("int16", "xMax", None, None, "Maximum x of clip box. VarIndexBase + 2."),
+ ("int16", "yMax", None, None, "Maximum y of clip box. VarIndexBase + 3."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # COLRv1 Affine2x3 uses the same column-major order to serialize a 2D
+ # Affine Transformation as the one used by fontTools.misc.transform.
+ # However, for historical reasons, the labels 'xy' and 'yx' are swapped.
+ # Their fundamental meaning is the same though.
+ # COLRv1 Affine2x3 follows the names found in FreeType and Cairo.
+ # In all case, the second element in the 6-tuple correspond to the
+ # y-part of the x basis vector, and the third to the x-part of the y
+ # basis vector.
+ # See https://github.com/googlefonts/colr-gradients-spec/pull/85
+ (
+ "Affine2x3",
+ [
+ ("Fixed", "xx", None, None, "x-part of x basis vector"),
+ ("Fixed", "yx", None, None, "y-part of x basis vector"),
+ ("Fixed", "xy", None, None, "x-part of y basis vector"),
+ ("Fixed", "yy", None, None, "y-part of y basis vector"),
+ ("Fixed", "dx", None, None, "Translation in x direction"),
+ ("Fixed", "dy", None, None, "Translation in y direction"),
+ ],
+ ),
+ (
+ "VarAffine2x3",
+ [
+ ("Fixed", "xx", None, None, "x-part of x basis vector. VarIndexBase + 0."),
+ ("Fixed", "yx", None, None, "y-part of x basis vector. VarIndexBase + 1."),
+ ("Fixed", "xy", None, None, "x-part of y basis vector. VarIndexBase + 2."),
+ ("Fixed", "yy", None, None, "y-part of y basis vector. VarIndexBase + 3."),
+ (
+ "Fixed",
+ "dx",
+ None,
+ None,
+ "Translation in x direction. VarIndexBase + 4.",
+ ),
+ (
+ "Fixed",
+ "dy",
+ None,
+ None,
+ "Translation in y direction. VarIndexBase + 5.",
+ ),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ (
+ "ColorStop",
+ [
+ ("F2Dot14", "StopOffset", None, None, ""),
+ ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
+ ("F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved"),
+ ],
+ ),
+ (
+ "VarColorStop",
+ [
+ ("F2Dot14", "StopOffset", None, None, "VarIndexBase + 0."),
+ ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
+ (
+ "F2Dot14",
+ "Alpha",
+ None,
+ None,
+ "Values outsided [0.,1.] reserved. VarIndexBase + 1.",
+ ),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ (
+ "ColorLine",
+ [
+ (
+ "ExtendMode",
+ "Extend",
+ None,
+ None,
+ "Enum {PAD = 0, REPEAT = 1, REFLECT = 2}",
+ ),
+ ("uint16", "StopCount", None, None, "Number of Color stops."),
+ ("ColorStop", "ColorStop", "StopCount", 0, "Array of Color stops."),
+ ],
+ ),
+ (
+ "VarColorLine",
+ [
+ (
+ "ExtendMode",
+ "Extend",
+ None,
+ None,
+ "Enum {PAD = 0, REPEAT = 1, REFLECT = 2}",
+ ),
+ ("uint16", "StopCount", None, None, "Number of Color stops."),
+ ("VarColorStop", "ColorStop", "StopCount", 0, "Array of Color stops."),
+ ],
+ ),
+ # PaintColrLayers
+ (
+ "PaintFormat1",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 1"),
+ (
+ "uint8",
+ "NumLayers",
+ None,
+ None,
+ "Number of offsets to Paint to read from LayerList.",
+ ),
+ ("uint32", "FirstLayerIndex", None, None, "Index into LayerList."),
+ ],
+ ),
+ # PaintSolid
+ (
+ "PaintFormat2",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 2"),
+ ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
+ ("F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved"),
+ ],
+ ),
+ # PaintVarSolid
+ (
+ "PaintFormat3",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 3"),
+ ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
+ (
+ "F2Dot14",
+ "Alpha",
+ None,
+ None,
+ "Values outsided [0.,1.] reserved. VarIndexBase + 0.",
+ ),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintLinearGradient
+ (
+ "PaintFormat4",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 4"),
+ (
+ "Offset24",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintLinearGradient table) to ColorLine subtable.",
+ ),
+ ("int16", "x0", None, None, ""),
+ ("int16", "y0", None, None, ""),
+ ("int16", "x1", None, None, ""),
+ ("int16", "y1", None, None, ""),
+ ("int16", "x2", None, None, ""),
+ ("int16", "y2", None, None, ""),
+ ],
+ ),
+ # PaintVarLinearGradient
+ (
+ "PaintFormat5",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 5"),
+ (
+ "LOffset24To(VarColorLine)",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintVarLinearGradient table) to VarColorLine subtable.",
+ ),
+ ("int16", "x0", None, None, "VarIndexBase + 0."),
+ ("int16", "y0", None, None, "VarIndexBase + 1."),
+ ("int16", "x1", None, None, "VarIndexBase + 2."),
+ ("int16", "y1", None, None, "VarIndexBase + 3."),
+ ("int16", "x2", None, None, "VarIndexBase + 4."),
+ ("int16", "y2", None, None, "VarIndexBase + 5."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintRadialGradient
+ (
+ "PaintFormat6",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 6"),
+ (
+ "Offset24",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintRadialGradient table) to ColorLine subtable.",
+ ),
+ ("int16", "x0", None, None, ""),
+ ("int16", "y0", None, None, ""),
+ ("uint16", "r0", None, None, ""),
+ ("int16", "x1", None, None, ""),
+ ("int16", "y1", None, None, ""),
+ ("uint16", "r1", None, None, ""),
+ ],
+ ),
+ # PaintVarRadialGradient
+ (
+ "PaintFormat7",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 7"),
+ (
+ "LOffset24To(VarColorLine)",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintVarRadialGradient table) to VarColorLine subtable.",
+ ),
+ ("int16", "x0", None, None, "VarIndexBase + 0."),
+ ("int16", "y0", None, None, "VarIndexBase + 1."),
+ ("uint16", "r0", None, None, "VarIndexBase + 2."),
+ ("int16", "x1", None, None, "VarIndexBase + 3."),
+ ("int16", "y1", None, None, "VarIndexBase + 4."),
+ ("uint16", "r1", None, None, "VarIndexBase + 5."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintSweepGradient
+ (
+ "PaintFormat8",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 8"),
+ (
+ "Offset24",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintSweepGradient table) to ColorLine subtable.",
+ ),
+ ("int16", "centerX", None, None, "Center x coordinate."),
+ ("int16", "centerY", None, None, "Center y coordinate."),
+ (
+ "BiasedAngle",
+ "startAngle",
+ None,
+ None,
+ "Start of the angular range of the gradient.",
+ ),
+ (
+ "BiasedAngle",
+ "endAngle",
+ None,
+ None,
+ "End of the angular range of the gradient.",
+ ),
+ ],
+ ),
+ # PaintVarSweepGradient
+ (
+ "PaintFormat9",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 9"),
+ (
+ "LOffset24To(VarColorLine)",
+ "ColorLine",
+ None,
+ None,
+ "Offset (from beginning of PaintVarSweepGradient table) to VarColorLine subtable.",
+ ),
+ ("int16", "centerX", None, None, "Center x coordinate. VarIndexBase + 0."),
+ ("int16", "centerY", None, None, "Center y coordinate. VarIndexBase + 1."),
+ (
+ "BiasedAngle",
+ "startAngle",
+ None,
+ None,
+ "Start of the angular range of the gradient. VarIndexBase + 2.",
+ ),
+ (
+ "BiasedAngle",
+ "endAngle",
+ None,
+ None,
+ "End of the angular range of the gradient. VarIndexBase + 3.",
+ ),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintGlyph
+ (
+ "PaintFormat10",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 10"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintGlyph table) to Paint subtable.",
+ ),
+ ("GlyphID", "Glyph", None, None, "Glyph ID for the source outline."),
+ ],
+ ),
+ # PaintColrGlyph
+ (
+ "PaintFormat11",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 11"),
+ (
+ "GlyphID",
+ "Glyph",
+ None,
+ None,
+ "Virtual glyph ID for a BaseGlyphList base glyph.",
+ ),
+ ],
+ ),
+ # PaintTransform
+ (
+ "PaintFormat12",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 12"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintTransform table) to Paint subtable.",
+ ),
+ (
+ "LOffset24To(Affine2x3)",
+ "Transform",
+ None,
+ None,
+ "2x3 matrix for 2D affine transformations.",
+ ),
+ ],
+ ),
+ # PaintVarTransform
+ (
+ "PaintFormat13",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 13"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarTransform table) to Paint subtable.",
+ ),
+ (
+ "LOffset24To(VarAffine2x3)",
+ "Transform",
+ None,
+ None,
+ "2x3 matrix for 2D affine transformations.",
+ ),
+ ],
+ ),
+ # PaintTranslate
+ (
+ "PaintFormat14",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 14"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintTranslate table) to Paint subtable.",
+ ),
+ ("int16", "dx", None, None, "Translation in x direction."),
+ ("int16", "dy", None, None, "Translation in y direction."),
+ ],
+ ),
+ # PaintVarTranslate
+ (
+ "PaintFormat15",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 15"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarTranslate table) to Paint subtable.",
+ ),
+ (
+ "int16",
+ "dx",
+ None,
+ None,
+ "Translation in x direction. VarIndexBase + 0.",
+ ),
+ (
+ "int16",
+ "dy",
+ None,
+ None,
+ "Translation in y direction. VarIndexBase + 1.",
+ ),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintScale
+ (
+ "PaintFormat16",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 16"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintScale table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scaleX", None, None, ""),
+ ("F2Dot14", "scaleY", None, None, ""),
+ ],
+ ),
+ # PaintVarScale
+ (
+ "PaintFormat17",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 17"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarScale table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scaleX", None, None, "VarIndexBase + 0."),
+ ("F2Dot14", "scaleY", None, None, "VarIndexBase + 1."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintScaleAroundCenter
+ (
+ "PaintFormat18",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 18"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintScaleAroundCenter table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scaleX", None, None, ""),
+ ("F2Dot14", "scaleY", None, None, ""),
+ ("int16", "centerX", None, None, ""),
+ ("int16", "centerY", None, None, ""),
+ ],
+ ),
+ # PaintVarScaleAroundCenter
+ (
+ "PaintFormat19",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 19"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarScaleAroundCenter table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scaleX", None, None, "VarIndexBase + 0."),
+ ("F2Dot14", "scaleY", None, None, "VarIndexBase + 1."),
+ ("int16", "centerX", None, None, "VarIndexBase + 2."),
+ ("int16", "centerY", None, None, "VarIndexBase + 3."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintScaleUniform
+ (
+ "PaintFormat20",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 20"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintScaleUniform table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scale", None, None, ""),
+ ],
+ ),
+ # PaintVarScaleUniform
+ (
+ "PaintFormat21",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 21"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarScaleUniform table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scale", None, None, "VarIndexBase + 0."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintScaleUniformAroundCenter
+ (
+ "PaintFormat22",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 22"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintScaleUniformAroundCenter table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scale", None, None, ""),
+ ("int16", "centerX", None, None, ""),
+ ("int16", "centerY", None, None, ""),
+ ],
+ ),
+ # PaintVarScaleUniformAroundCenter
+ (
+ "PaintFormat23",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 23"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarScaleUniformAroundCenter table) to Paint subtable.",
+ ),
+ ("F2Dot14", "scale", None, None, "VarIndexBase + 0"),
+ ("int16", "centerX", None, None, "VarIndexBase + 1"),
+ ("int16", "centerY", None, None, "VarIndexBase + 2"),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintRotate
+ (
+ "PaintFormat24",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 24"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintRotate table) to Paint subtable.",
+ ),
+ ("Angle", "angle", None, None, ""),
+ ],
+ ),
+ # PaintVarRotate
+ (
+ "PaintFormat25",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 25"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarRotate table) to Paint subtable.",
+ ),
+ ("Angle", "angle", None, None, "VarIndexBase + 0."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintRotateAroundCenter
+ (
+ "PaintFormat26",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 26"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintRotateAroundCenter table) to Paint subtable.",
+ ),
+ ("Angle", "angle", None, None, ""),
+ ("int16", "centerX", None, None, ""),
+ ("int16", "centerY", None, None, ""),
+ ],
+ ),
+ # PaintVarRotateAroundCenter
+ (
+ "PaintFormat27",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 27"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarRotateAroundCenter table) to Paint subtable.",
+ ),
+ ("Angle", "angle", None, None, "VarIndexBase + 0."),
+ ("int16", "centerX", None, None, "VarIndexBase + 1."),
+ ("int16", "centerY", None, None, "VarIndexBase + 2."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintSkew
+ (
+ "PaintFormat28",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 28"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintSkew table) to Paint subtable.",
+ ),
+ ("Angle", "xSkewAngle", None, None, ""),
+ ("Angle", "ySkewAngle", None, None, ""),
+ ],
+ ),
+ # PaintVarSkew
+ (
+ "PaintFormat29",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 29"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarSkew table) to Paint subtable.",
+ ),
+ ("Angle", "xSkewAngle", None, None, "VarIndexBase + 0."),
+ ("Angle", "ySkewAngle", None, None, "VarIndexBase + 1."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintSkewAroundCenter
+ (
+ "PaintFormat30",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 30"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintSkewAroundCenter table) to Paint subtable.",
+ ),
+ ("Angle", "xSkewAngle", None, None, ""),
+ ("Angle", "ySkewAngle", None, None, ""),
+ ("int16", "centerX", None, None, ""),
+ ("int16", "centerY", None, None, ""),
+ ],
+ ),
+ # PaintVarSkewAroundCenter
+ (
+ "PaintFormat31",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 31"),
+ (
+ "Offset24",
+ "Paint",
+ None,
+ None,
+ "Offset (from beginning of PaintVarSkewAroundCenter table) to Paint subtable.",
+ ),
+ ("Angle", "xSkewAngle", None, None, "VarIndexBase + 0."),
+ ("Angle", "ySkewAngle", None, None, "VarIndexBase + 1."),
+ ("int16", "centerX", None, None, "VarIndexBase + 2."),
+ ("int16", "centerY", None, None, "VarIndexBase + 3."),
+ (
+ "VarIndex",
+ "VarIndexBase",
+ None,
+ None,
+ "Base index into DeltaSetIndexMap.",
+ ),
+ ],
+ ),
+ # PaintComposite
+ (
+ "PaintFormat32",
+ [
+ ("uint8", "PaintFormat", None, None, "Format identifier-format = 32"),
+ (
+ "LOffset24To(Paint)",
+ "SourcePaint",
+ None,
+ None,
+ "Offset (from beginning of PaintComposite table) to source Paint subtable.",
+ ),
+ (
+ "CompositeMode",
+ "CompositeMode",
+ None,
+ None,
+ "A CompositeMode enumeration value.",
+ ),
+ (
+ "LOffset24To(Paint)",
+ "BackdropPaint",
+ None,
+ None,
+ "Offset (from beginning of PaintComposite table) to backdrop Paint subtable.",
+ ),
+ ],
+ ),
+ #
+ # avar
+ #
+ (
+ "AxisValueMap",
+ [
+ (
+ "F2Dot14",
+ "FromCoordinate",
+ None,
+ None,
+ "A normalized coordinate value obtained using default normalization",
+ ),
+ (
+ "F2Dot14",
+ "ToCoordinate",
+ None,
+ None,
+ "The modified, normalized coordinate value",
+ ),
+ ],
+ ),
+ (
+ "AxisSegmentMap",
+ [
+ (
+ "uint16",
+ "PositionMapCount",
+ None,
+ None,
+ "The number of correspondence pairs for this axis",
+ ),
+ (
+ "AxisValueMap",
+ "AxisValueMap",
+ "PositionMapCount",
+ 0,
+ "The array of axis value map records for this axis",
+ ),
+ ],
+ ),
+ (
+ "avar",
+ [
+ (
+ "Version",
+ "Version",
+ None,
+ None,
+ "Version of the avar table- 0x00010000 or 0x00020000",
+ ),
+ ("uint16", "Reserved", None, None, "Permanently reserved; set to zero"),
+ (
+ "uint16",
+ "AxisCount",
+ None,
+ None,
+ 'The number of variation axes for this font. This must be the same number as axisCount in the "fvar" table',
+ ),
+ (
+ "AxisSegmentMap",
+ "AxisSegmentMap",
+ "AxisCount",
+ 0,
+ 'The segment maps array — one segment map for each axis, in the order of axes specified in the "fvar" table',
+ ),
+ (
+ "LOffsetTo(DeltaSetIndexMap)",
+ "VarIdxMap",
+ None,
+ "Version >= 0x00020000",
+ "",
+ ),
+ ("LOffset", "VarStore", None, "Version >= 0x00020000", ""),
+ ],
+ ),
]
diff --git a/Lib/fontTools/ttLib/tables/otTables.py b/Lib/fontTools/ttLib/tables/otTables.py
index 6e7f3dfb..262f8d41 100644
--- a/Lib/fontTools/ttLib/tables/otTables.py
+++ b/Lib/fontTools/ttLib/tables/otTables.py
@@ -7,597 +7,623 @@ converter objects from otConverters.py.
"""
import copy
from enum import IntEnum
+from functools import reduce
+from math import radians
import itertools
from collections import defaultdict, namedtuple
+from fontTools.ttLib.tables.otTraverse import dfs_base_table
+from fontTools.misc.arrayTools import quantizeRect
from fontTools.misc.roundTools import otRound
+from fontTools.misc.transform import Transform, Identity
from fontTools.misc.textTools import bytesjoin, pad, safeEval
+from fontTools.pens.boundsPen import ControlBoundsPen
+from fontTools.pens.transformPen import TransformPen
from .otBase import (
- BaseTable, FormatSwitchingBaseTable, ValueRecord, CountReference,
- getFormatSwitchingBaseTableClass,
+ BaseTable,
+ FormatSwitchingBaseTable,
+ ValueRecord,
+ CountReference,
+ getFormatSwitchingBaseTableClass,
)
from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY
import logging
import struct
+from typing import TYPE_CHECKING, Iterator, List, Optional, Set
+
+if TYPE_CHECKING:
+ from fontTools.ttLib.ttGlyphSet import _TTGlyphSet
log = logging.getLogger(__name__)
class AATStateTable(object):
- def __init__(self):
- self.GlyphClasses = {} # GlyphID --> GlyphClass
- self.States = [] # List of AATState, indexed by state number
- self.PerGlyphLookups = [] # [{GlyphID:GlyphID}, ...]
+ def __init__(self):
+ self.GlyphClasses = {} # GlyphID --> GlyphClass
+ self.States = [] # List of AATState, indexed by state number
+ self.PerGlyphLookups = [] # [{GlyphID:GlyphID}, ...]
class AATState(object):
- def __init__(self):
- self.Transitions = {} # GlyphClass --> AATAction
+ def __init__(self):
+ self.Transitions = {} # GlyphClass --> AATAction
class AATAction(object):
- _FLAGS = None
+ _FLAGS = None
- @staticmethod
- def compileActions(font, states):
- return (None, None)
+ @staticmethod
+ def compileActions(font, states):
+ return (None, None)
- def _writeFlagsToXML(self, xmlWriter):
- flags = [f for f in self._FLAGS if self.__dict__[f]]
- if flags:
- xmlWriter.simpletag("Flags", value=",".join(flags))
- xmlWriter.newline()
- if self.ReservedFlags != 0:
- xmlWriter.simpletag(
- "ReservedFlags",
- value='0x%04X' % self.ReservedFlags)
- xmlWriter.newline()
+ def _writeFlagsToXML(self, xmlWriter):
+ flags = [f for f in self._FLAGS if self.__dict__[f]]
+ if flags:
+ xmlWriter.simpletag("Flags", value=",".join(flags))
+ xmlWriter.newline()
+ if self.ReservedFlags != 0:
+ xmlWriter.simpletag("ReservedFlags", value="0x%04X" % self.ReservedFlags)
+ xmlWriter.newline()
- def _setFlag(self, flag):
- assert flag in self._FLAGS, "unsupported flag %s" % flag
- self.__dict__[flag] = True
+ def _setFlag(self, flag):
+ assert flag in self._FLAGS, "unsupported flag %s" % flag
+ self.__dict__[flag] = True
class RearrangementMorphAction(AATAction):
- staticSize = 4
- actionHeaderSize = 0
- _FLAGS = ["MarkFirst", "DontAdvance", "MarkLast"]
-
- _VERBS = {
- 0: "no change",
- 1: "Ax ⇒ xA",
- 2: "xD ⇒ Dx",
- 3: "AxD ⇒ DxA",
- 4: "ABx ⇒ xAB",
- 5: "ABx ⇒ xBA",
- 6: "xCD ⇒ CDx",
- 7: "xCD ⇒ DCx",
- 8: "AxCD ⇒ CDxA",
- 9: "AxCD ⇒ DCxA",
- 10: "ABxD ⇒ DxAB",
- 11: "ABxD ⇒ DxBA",
- 12: "ABxCD ⇒ CDxAB",
- 13: "ABxCD ⇒ CDxBA",
- 14: "ABxCD ⇒ DCxAB",
- 15: "ABxCD ⇒ DCxBA",
- }
-
- def __init__(self):
- self.NewState = 0
- self.Verb = 0
- self.MarkFirst = False
- self.DontAdvance = False
- self.MarkLast = False
- self.ReservedFlags = 0
-
- def compile(self, writer, font, actionIndex):
- assert actionIndex is None
- writer.writeUShort(self.NewState)
- assert self.Verb >= 0 and self.Verb <= 15, self.Verb
- flags = self.Verb | self.ReservedFlags
- if self.MarkFirst: flags |= 0x8000
- if self.DontAdvance: flags |= 0x4000
- if self.MarkLast: flags |= 0x2000
- writer.writeUShort(flags)
-
- def decompile(self, reader, font, actionReader):
- assert actionReader is None
- self.NewState = reader.readUShort()
- flags = reader.readUShort()
- self.Verb = flags & 0xF
- self.MarkFirst = bool(flags & 0x8000)
- self.DontAdvance = bool(flags & 0x4000)
- self.MarkLast = bool(flags & 0x2000)
- self.ReservedFlags = flags & 0x1FF0
-
- def toXML(self, xmlWriter, font, attrs, name):
- xmlWriter.begintag(name, **attrs)
- xmlWriter.newline()
- xmlWriter.simpletag("NewState", value=self.NewState)
- xmlWriter.newline()
- self._writeFlagsToXML(xmlWriter)
- xmlWriter.simpletag("Verb", value=self.Verb)
- verbComment = self._VERBS.get(self.Verb)
- if verbComment is not None:
- xmlWriter.comment(verbComment)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- self.NewState = self.Verb = self.ReservedFlags = 0
- self.MarkFirst = self.DontAdvance = self.MarkLast = False
- content = [t for t in content if isinstance(t, tuple)]
- for eltName, eltAttrs, eltContent in content:
- if eltName == "NewState":
- self.NewState = safeEval(eltAttrs["value"])
- elif eltName == "Verb":
- self.Verb = safeEval(eltAttrs["value"])
- elif eltName == "ReservedFlags":
- self.ReservedFlags = safeEval(eltAttrs["value"])
- elif eltName == "Flags":
- for flag in eltAttrs["value"].split(","):
- self._setFlag(flag.strip())
+ staticSize = 4
+ actionHeaderSize = 0
+ _FLAGS = ["MarkFirst", "DontAdvance", "MarkLast"]
+
+ _VERBS = {
+ 0: "no change",
+ 1: "Ax ⇒ xA",
+ 2: "xD ⇒ Dx",
+ 3: "AxD ⇒ DxA",
+ 4: "ABx ⇒ xAB",
+ 5: "ABx ⇒ xBA",
+ 6: "xCD ⇒ CDx",
+ 7: "xCD ⇒ DCx",
+ 8: "AxCD ⇒ CDxA",
+ 9: "AxCD ⇒ DCxA",
+ 10: "ABxD ⇒ DxAB",
+ 11: "ABxD ⇒ DxBA",
+ 12: "ABxCD ⇒ CDxAB",
+ 13: "ABxCD ⇒ CDxBA",
+ 14: "ABxCD ⇒ DCxAB",
+ 15: "ABxCD ⇒ DCxBA",
+ }
+
+ def __init__(self):
+ self.NewState = 0
+ self.Verb = 0
+ self.MarkFirst = False
+ self.DontAdvance = False
+ self.MarkLast = False
+ self.ReservedFlags = 0
+
+ def compile(self, writer, font, actionIndex):
+ assert actionIndex is None
+ writer.writeUShort(self.NewState)
+ assert self.Verb >= 0 and self.Verb <= 15, self.Verb
+ flags = self.Verb | self.ReservedFlags
+ if self.MarkFirst:
+ flags |= 0x8000
+ if self.DontAdvance:
+ flags |= 0x4000
+ if self.MarkLast:
+ flags |= 0x2000
+ writer.writeUShort(flags)
+
+ def decompile(self, reader, font, actionReader):
+ assert actionReader is None
+ self.NewState = reader.readUShort()
+ flags = reader.readUShort()
+ self.Verb = flags & 0xF
+ self.MarkFirst = bool(flags & 0x8000)
+ self.DontAdvance = bool(flags & 0x4000)
+ self.MarkLast = bool(flags & 0x2000)
+ self.ReservedFlags = flags & 0x1FF0
+
+ def toXML(self, xmlWriter, font, attrs, name):
+ xmlWriter.begintag(name, **attrs)
+ xmlWriter.newline()
+ xmlWriter.simpletag("NewState", value=self.NewState)
+ xmlWriter.newline()
+ self._writeFlagsToXML(xmlWriter)
+ xmlWriter.simpletag("Verb", value=self.Verb)
+ verbComment = self._VERBS.get(self.Verb)
+ if verbComment is not None:
+ xmlWriter.comment(verbComment)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ self.NewState = self.Verb = self.ReservedFlags = 0
+ self.MarkFirst = self.DontAdvance = self.MarkLast = False
+ content = [t for t in content if isinstance(t, tuple)]
+ for eltName, eltAttrs, eltContent in content:
+ if eltName == "NewState":
+ self.NewState = safeEval(eltAttrs["value"])
+ elif eltName == "Verb":
+ self.Verb = safeEval(eltAttrs["value"])
+ elif eltName == "ReservedFlags":
+ self.ReservedFlags = safeEval(eltAttrs["value"])
+ elif eltName == "Flags":
+ for flag in eltAttrs["value"].split(","):
+ self._setFlag(flag.strip())
class ContextualMorphAction(AATAction):
- staticSize = 8
- actionHeaderSize = 0
- _FLAGS = ["SetMark", "DontAdvance"]
-
- def __init__(self):
- self.NewState = 0
- self.SetMark, self.DontAdvance = False, False
- self.ReservedFlags = 0
- self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF
-
- def compile(self, writer, font, actionIndex):
- assert actionIndex is None
- writer.writeUShort(self.NewState)
- flags = self.ReservedFlags
- if self.SetMark: flags |= 0x8000
- if self.DontAdvance: flags |= 0x4000
- writer.writeUShort(flags)
- writer.writeUShort(self.MarkIndex)
- writer.writeUShort(self.CurrentIndex)
-
- def decompile(self, reader, font, actionReader):
- assert actionReader is None
- self.NewState = reader.readUShort()
- flags = reader.readUShort()
- self.SetMark = bool(flags & 0x8000)
- self.DontAdvance = bool(flags & 0x4000)
- self.ReservedFlags = flags & 0x3FFF
- self.MarkIndex = reader.readUShort()
- self.CurrentIndex = reader.readUShort()
-
- def toXML(self, xmlWriter, font, attrs, name):
- xmlWriter.begintag(name, **attrs)
- xmlWriter.newline()
- xmlWriter.simpletag("NewState", value=self.NewState)
- xmlWriter.newline()
- self._writeFlagsToXML(xmlWriter)
- xmlWriter.simpletag("MarkIndex", value=self.MarkIndex)
- xmlWriter.newline()
- xmlWriter.simpletag("CurrentIndex",
- value=self.CurrentIndex)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- self.NewState = self.ReservedFlags = 0
- self.SetMark = self.DontAdvance = False
- self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF
- content = [t for t in content if isinstance(t, tuple)]
- for eltName, eltAttrs, eltContent in content:
- if eltName == "NewState":
- self.NewState = safeEval(eltAttrs["value"])
- elif eltName == "Flags":
- for flag in eltAttrs["value"].split(","):
- self._setFlag(flag.strip())
- elif eltName == "ReservedFlags":
- self.ReservedFlags = safeEval(eltAttrs["value"])
- elif eltName == "MarkIndex":
- self.MarkIndex = safeEval(eltAttrs["value"])
- elif eltName == "CurrentIndex":
- self.CurrentIndex = safeEval(eltAttrs["value"])
+ staticSize = 8
+ actionHeaderSize = 0
+ _FLAGS = ["SetMark", "DontAdvance"]
+
+ def __init__(self):
+ self.NewState = 0
+ self.SetMark, self.DontAdvance = False, False
+ self.ReservedFlags = 0
+ self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF
+
+ def compile(self, writer, font, actionIndex):
+ assert actionIndex is None
+ writer.writeUShort(self.NewState)
+ flags = self.ReservedFlags
+ if self.SetMark:
+ flags |= 0x8000
+ if self.DontAdvance:
+ flags |= 0x4000
+ writer.writeUShort(flags)
+ writer.writeUShort(self.MarkIndex)
+ writer.writeUShort(self.CurrentIndex)
+
+ def decompile(self, reader, font, actionReader):
+ assert actionReader is None
+ self.NewState = reader.readUShort()
+ flags = reader.readUShort()
+ self.SetMark = bool(flags & 0x8000)
+ self.DontAdvance = bool(flags & 0x4000)
+ self.ReservedFlags = flags & 0x3FFF
+ self.MarkIndex = reader.readUShort()
+ self.CurrentIndex = reader.readUShort()
+
+ def toXML(self, xmlWriter, font, attrs, name):
+ xmlWriter.begintag(name, **attrs)
+ xmlWriter.newline()
+ xmlWriter.simpletag("NewState", value=self.NewState)
+ xmlWriter.newline()
+ self._writeFlagsToXML(xmlWriter)
+ xmlWriter.simpletag("MarkIndex", value=self.MarkIndex)
+ xmlWriter.newline()
+ xmlWriter.simpletag("CurrentIndex", value=self.CurrentIndex)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ self.NewState = self.ReservedFlags = 0
+ self.SetMark = self.DontAdvance = False
+ self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF
+ content = [t for t in content if isinstance(t, tuple)]
+ for eltName, eltAttrs, eltContent in content:
+ if eltName == "NewState":
+ self.NewState = safeEval(eltAttrs["value"])
+ elif eltName == "Flags":
+ for flag in eltAttrs["value"].split(","):
+ self._setFlag(flag.strip())
+ elif eltName == "ReservedFlags":
+ self.ReservedFlags = safeEval(eltAttrs["value"])
+ elif eltName == "MarkIndex":
+ self.MarkIndex = safeEval(eltAttrs["value"])
+ elif eltName == "CurrentIndex":
+ self.CurrentIndex = safeEval(eltAttrs["value"])
class LigAction(object):
- def __init__(self):
- self.Store = False
- # GlyphIndexDelta is a (possibly negative) delta that gets
- # added to the glyph ID at the top of the AAT runtime
- # execution stack. It is *not* a byte offset into the
- # morx table. The result of the addition, which is performed
- # at run time by the shaping engine, is an index into
- # the ligature components table. See 'morx' specification.
- # In the AAT specification, this field is called Offset;
- # but its meaning is quite different from other offsets
- # in either AAT or OpenType, so we use a different name.
- self.GlyphIndexDelta = 0
+ def __init__(self):
+ self.Store = False
+ # GlyphIndexDelta is a (possibly negative) delta that gets
+ # added to the glyph ID at the top of the AAT runtime
+ # execution stack. It is *not* a byte offset into the
+ # morx table. The result of the addition, which is performed
+ # at run time by the shaping engine, is an index into
+ # the ligature components table. See 'morx' specification.
+ # In the AAT specification, this field is called Offset;
+ # but its meaning is quite different from other offsets
+ # in either AAT or OpenType, so we use a different name.
+ self.GlyphIndexDelta = 0
class LigatureMorphAction(AATAction):
- staticSize = 6
-
- # 4 bytes for each of {action,ligComponents,ligatures}Offset
- actionHeaderSize = 12
-
- _FLAGS = ["SetComponent", "DontAdvance"]
-
- def __init__(self):
- self.NewState = 0
- self.SetComponent, self.DontAdvance = False, False
- self.ReservedFlags = 0
- self.Actions = []
-
- def compile(self, writer, font, actionIndex):
- assert actionIndex is not None
- writer.writeUShort(self.NewState)
- flags = self.ReservedFlags
- if self.SetComponent: flags |= 0x8000
- if self.DontAdvance: flags |= 0x4000
- if len(self.Actions) > 0: flags |= 0x2000
- writer.writeUShort(flags)
- if len(self.Actions) > 0:
- actions = self.compileLigActions()
- writer.writeUShort(actionIndex[actions])
- else:
- writer.writeUShort(0)
-
- def decompile(self, reader, font, actionReader):
- assert actionReader is not None
- self.NewState = reader.readUShort()
- flags = reader.readUShort()
- self.SetComponent = bool(flags & 0x8000)
- self.DontAdvance = bool(flags & 0x4000)
- performAction = bool(flags & 0x2000)
- # As of 2017-09-12, the 'morx' specification says that
- # the reserved bitmask in ligature subtables is 0x3FFF.
- # However, the specification also defines a flag 0x2000,
- # so the reserved value should actually be 0x1FFF.
- # TODO: Report this specification bug to Apple.
- self.ReservedFlags = flags & 0x1FFF
- actionIndex = reader.readUShort()
- if performAction:
- self.Actions = self._decompileLigActions(
- actionReader, actionIndex)
- else:
- self.Actions = []
-
- @staticmethod
- def compileActions(font, states):
- result, actions, actionIndex = b"", set(), {}
- for state in states:
- for _glyphClass, trans in state.Transitions.items():
- actions.add(trans.compileLigActions())
- # Sort the compiled actions in decreasing order of
- # length, so that the longer sequence come before the
- # shorter ones. For each compiled action ABCD, its
- # suffixes BCD, CD, and D do not be encoded separately
- # (in case they occur); instead, we can just store an
- # index that points into the middle of the longer
- # sequence. Every compiled AAT ligature sequence is
- # terminated with an end-of-sequence flag, which can
- # only be set on the last element of the sequence.
- # Therefore, it is sufficient to consider just the
- # suffixes.
- for a in sorted(actions, key=lambda x:(-len(x), x)):
- if a not in actionIndex:
- for i in range(0, len(a), 4):
- suffix = a[i:]
- suffixIndex = (len(result) + i) // 4
- actionIndex.setdefault(
- suffix, suffixIndex)
- result += a
- result = pad(result, 4)
- return (result, actionIndex)
-
- def compileLigActions(self):
- result = []
- for i, action in enumerate(self.Actions):
- last = (i == len(self.Actions) - 1)
- value = action.GlyphIndexDelta & 0x3FFFFFFF
- value |= 0x80000000 if last else 0
- value |= 0x40000000 if action.Store else 0
- result.append(struct.pack(">L", value))
- return bytesjoin(result)
-
- def _decompileLigActions(self, actionReader, actionIndex):
- actions = []
- last = False
- reader = actionReader.getSubReader(
- actionReader.pos + actionIndex * 4)
- while not last:
- value = reader.readULong()
- last = bool(value & 0x80000000)
- action = LigAction()
- actions.append(action)
- action.Store = bool(value & 0x40000000)
- delta = value & 0x3FFFFFFF
- if delta >= 0x20000000: # sign-extend 30-bit value
- delta = -0x40000000 + delta
- action.GlyphIndexDelta = delta
- return actions
-
- def fromXML(self, name, attrs, content, font):
- self.NewState = self.ReservedFlags = 0
- self.SetComponent = self.DontAdvance = False
- self.ReservedFlags = 0
- self.Actions = []
- content = [t for t in content if isinstance(t, tuple)]
- for eltName, eltAttrs, eltContent in content:
- if eltName == "NewState":
- self.NewState = safeEval(eltAttrs["value"])
- elif eltName == "Flags":
- for flag in eltAttrs["value"].split(","):
- self._setFlag(flag.strip())
- elif eltName == "ReservedFlags":
- self.ReservedFlags = safeEval(eltAttrs["value"])
- elif eltName == "Action":
- action = LigAction()
- flags = eltAttrs.get("Flags", "").split(",")
- flags = [f.strip() for f in flags]
- action.Store = "Store" in flags
- action.GlyphIndexDelta = safeEval(
- eltAttrs["GlyphIndexDelta"])
- self.Actions.append(action)
-
- def toXML(self, xmlWriter, font, attrs, name):
- xmlWriter.begintag(name, **attrs)
- xmlWriter.newline()
- xmlWriter.simpletag("NewState", value=self.NewState)
- xmlWriter.newline()
- self._writeFlagsToXML(xmlWriter)
- for action in self.Actions:
- attribs = [("GlyphIndexDelta", action.GlyphIndexDelta)]
- if action.Store:
- attribs.append(("Flags", "Store"))
- xmlWriter.simpletag("Action", attribs)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
+ staticSize = 6
+
+ # 4 bytes for each of {action,ligComponents,ligatures}Offset
+ actionHeaderSize = 12
+
+ _FLAGS = ["SetComponent", "DontAdvance"]
+
+ def __init__(self):
+ self.NewState = 0
+ self.SetComponent, self.DontAdvance = False, False
+ self.ReservedFlags = 0
+ self.Actions = []
+
+ def compile(self, writer, font, actionIndex):
+ assert actionIndex is not None
+ writer.writeUShort(self.NewState)
+ flags = self.ReservedFlags
+ if self.SetComponent:
+ flags |= 0x8000
+ if self.DontAdvance:
+ flags |= 0x4000
+ if len(self.Actions) > 0:
+ flags |= 0x2000
+ writer.writeUShort(flags)
+ if len(self.Actions) > 0:
+ actions = self.compileLigActions()
+ writer.writeUShort(actionIndex[actions])
+ else:
+ writer.writeUShort(0)
+
+ def decompile(self, reader, font, actionReader):
+ assert actionReader is not None
+ self.NewState = reader.readUShort()
+ flags = reader.readUShort()
+ self.SetComponent = bool(flags & 0x8000)
+ self.DontAdvance = bool(flags & 0x4000)
+ performAction = bool(flags & 0x2000)
+ # As of 2017-09-12, the 'morx' specification says that
+ # the reserved bitmask in ligature subtables is 0x3FFF.
+ # However, the specification also defines a flag 0x2000,
+ # so the reserved value should actually be 0x1FFF.
+ # TODO: Report this specification bug to Apple.
+ self.ReservedFlags = flags & 0x1FFF
+ actionIndex = reader.readUShort()
+ if performAction:
+ self.Actions = self._decompileLigActions(actionReader, actionIndex)
+ else:
+ self.Actions = []
+
+ @staticmethod
+ def compileActions(font, states):
+ result, actions, actionIndex = b"", set(), {}
+ for state in states:
+ for _glyphClass, trans in state.Transitions.items():
+ actions.add(trans.compileLigActions())
+ # Sort the compiled actions in decreasing order of
+ # length, so that the longer sequence come before the
+ # shorter ones. For each compiled action ABCD, its
+ # suffixes BCD, CD, and D do not be encoded separately
+ # (in case they occur); instead, we can just store an
+ # index that points into the middle of the longer
+ # sequence. Every compiled AAT ligature sequence is
+ # terminated with an end-of-sequence flag, which can
+ # only be set on the last element of the sequence.
+ # Therefore, it is sufficient to consider just the
+ # suffixes.
+ for a in sorted(actions, key=lambda x: (-len(x), x)):
+ if a not in actionIndex:
+ for i in range(0, len(a), 4):
+ suffix = a[i:]
+ suffixIndex = (len(result) + i) // 4
+ actionIndex.setdefault(suffix, suffixIndex)
+ result += a
+ result = pad(result, 4)
+ return (result, actionIndex)
+
+ def compileLigActions(self):
+ result = []
+ for i, action in enumerate(self.Actions):
+ last = i == len(self.Actions) - 1
+ value = action.GlyphIndexDelta & 0x3FFFFFFF
+ value |= 0x80000000 if last else 0
+ value |= 0x40000000 if action.Store else 0
+ result.append(struct.pack(">L", value))
+ return bytesjoin(result)
+
+ def _decompileLigActions(self, actionReader, actionIndex):
+ actions = []
+ last = False
+ reader = actionReader.getSubReader(actionReader.pos + actionIndex * 4)
+ while not last:
+ value = reader.readULong()
+ last = bool(value & 0x80000000)
+ action = LigAction()
+ actions.append(action)
+ action.Store = bool(value & 0x40000000)
+ delta = value & 0x3FFFFFFF
+ if delta >= 0x20000000: # sign-extend 30-bit value
+ delta = -0x40000000 + delta
+ action.GlyphIndexDelta = delta
+ return actions
+
+ def fromXML(self, name, attrs, content, font):
+ self.NewState = self.ReservedFlags = 0
+ self.SetComponent = self.DontAdvance = False
+ self.ReservedFlags = 0
+ self.Actions = []
+ content = [t for t in content if isinstance(t, tuple)]
+ for eltName, eltAttrs, eltContent in content:
+ if eltName == "NewState":
+ self.NewState = safeEval(eltAttrs["value"])
+ elif eltName == "Flags":
+ for flag in eltAttrs["value"].split(","):
+ self._setFlag(flag.strip())
+ elif eltName == "ReservedFlags":
+ self.ReservedFlags = safeEval(eltAttrs["value"])
+ elif eltName == "Action":
+ action = LigAction()
+ flags = eltAttrs.get("Flags", "").split(",")
+ flags = [f.strip() for f in flags]
+ action.Store = "Store" in flags
+ action.GlyphIndexDelta = safeEval(eltAttrs["GlyphIndexDelta"])
+ self.Actions.append(action)
+
+ def toXML(self, xmlWriter, font, attrs, name):
+ xmlWriter.begintag(name, **attrs)
+ xmlWriter.newline()
+ xmlWriter.simpletag("NewState", value=self.NewState)
+ xmlWriter.newline()
+ self._writeFlagsToXML(xmlWriter)
+ for action in self.Actions:
+ attribs = [("GlyphIndexDelta", action.GlyphIndexDelta)]
+ if action.Store:
+ attribs.append(("Flags", "Store"))
+ xmlWriter.simpletag("Action", attribs)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
class InsertionMorphAction(AATAction):
- staticSize = 8
- actionHeaderSize = 4 # 4 bytes for actionOffset
- _FLAGS = ["SetMark", "DontAdvance",
- "CurrentIsKashidaLike", "MarkedIsKashidaLike",
- "CurrentInsertBefore", "MarkedInsertBefore"]
-
- def __init__(self):
- self.NewState = 0
- for flag in self._FLAGS:
- setattr(self, flag, False)
- self.ReservedFlags = 0
- self.CurrentInsertionAction, self.MarkedInsertionAction = [], []
-
- def compile(self, writer, font, actionIndex):
- assert actionIndex is not None
- writer.writeUShort(self.NewState)
- flags = self.ReservedFlags
- if self.SetMark: flags |= 0x8000
- if self.DontAdvance: flags |= 0x4000
- if self.CurrentIsKashidaLike: flags |= 0x2000
- if self.MarkedIsKashidaLike: flags |= 0x1000
- if self.CurrentInsertBefore: flags |= 0x0800
- if self.MarkedInsertBefore: flags |= 0x0400
- flags |= len(self.CurrentInsertionAction) << 5
- flags |= len(self.MarkedInsertionAction)
- writer.writeUShort(flags)
- if len(self.CurrentInsertionAction) > 0:
- currentIndex = actionIndex[
- tuple(self.CurrentInsertionAction)]
- else:
- currentIndex = 0xFFFF
- writer.writeUShort(currentIndex)
- if len(self.MarkedInsertionAction) > 0:
- markedIndex = actionIndex[
- tuple(self.MarkedInsertionAction)]
- else:
- markedIndex = 0xFFFF
- writer.writeUShort(markedIndex)
-
- def decompile(self, reader, font, actionReader):
- assert actionReader is not None
- self.NewState = reader.readUShort()
- flags = reader.readUShort()
- self.SetMark = bool(flags & 0x8000)
- self.DontAdvance = bool(flags & 0x4000)
- self.CurrentIsKashidaLike = bool(flags & 0x2000)
- self.MarkedIsKashidaLike = bool(flags & 0x1000)
- self.CurrentInsertBefore = bool(flags & 0x0800)
- self.MarkedInsertBefore = bool(flags & 0x0400)
- self.CurrentInsertionAction = self._decompileInsertionAction(
- actionReader, font,
- index=reader.readUShort(),
- count=((flags & 0x03E0) >> 5))
- self.MarkedInsertionAction = self._decompileInsertionAction(
- actionReader, font,
- index=reader.readUShort(),
- count=(flags & 0x001F))
-
- def _decompileInsertionAction(self, actionReader, font, index, count):
- if index == 0xFFFF or count == 0:
- return []
- reader = actionReader.getSubReader(
- actionReader.pos + index * 2)
- return font.getGlyphNameMany(reader.readUShortArray(count))
-
- def toXML(self, xmlWriter, font, attrs, name):
- xmlWriter.begintag(name, **attrs)
- xmlWriter.newline()
- xmlWriter.simpletag("NewState", value=self.NewState)
- xmlWriter.newline()
- self._writeFlagsToXML(xmlWriter)
- for g in self.CurrentInsertionAction:
- xmlWriter.simpletag("CurrentInsertionAction", glyph=g)
- xmlWriter.newline()
- for g in self.MarkedInsertionAction:
- xmlWriter.simpletag("MarkedInsertionAction", glyph=g)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- self.__init__()
- content = [t for t in content if isinstance(t, tuple)]
- for eltName, eltAttrs, eltContent in content:
- if eltName == "NewState":
- self.NewState = safeEval(eltAttrs["value"])
- elif eltName == "Flags":
- for flag in eltAttrs["value"].split(","):
- self._setFlag(flag.strip())
- elif eltName == "CurrentInsertionAction":
- self.CurrentInsertionAction.append(
- eltAttrs["glyph"])
- elif eltName == "MarkedInsertionAction":
- self.MarkedInsertionAction.append(
- eltAttrs["glyph"])
- else:
- assert False, eltName
-
- @staticmethod
- def compileActions(font, states):
- actions, actionIndex, result = set(), {}, b""
- for state in states:
- for _glyphClass, trans in state.Transitions.items():
- if trans.CurrentInsertionAction is not None:
- actions.add(tuple(trans.CurrentInsertionAction))
- if trans.MarkedInsertionAction is not None:
- actions.add(tuple(trans.MarkedInsertionAction))
- # Sort the compiled actions in decreasing order of
- # length, so that the longer sequence come before the
- # shorter ones.
- for action in sorted(actions, key=lambda x:(-len(x), x)):
- # We insert all sub-sequences of the action glyph sequence
- # into actionIndex. For example, if one action triggers on
- # glyph sequence [A, B, C, D, E] and another action triggers
- # on [C, D], we return result=[A, B, C, D, E] (as list of
- # encoded glyph IDs), and actionIndex={('A','B','C','D','E'): 0,
- # ('C','D'): 2}.
- if action in actionIndex:
- continue
- for start in range(0, len(action)):
- startIndex = (len(result) // 2) + start
- for limit in range(start, len(action)):
- glyphs = action[start : limit + 1]
- actionIndex.setdefault(glyphs, startIndex)
- for glyph in action:
- glyphID = font.getGlyphID(glyph)
- result += struct.pack(">H", glyphID)
- return result, actionIndex
+ staticSize = 8
+ actionHeaderSize = 4 # 4 bytes for actionOffset
+ _FLAGS = [
+ "SetMark",
+ "DontAdvance",
+ "CurrentIsKashidaLike",
+ "MarkedIsKashidaLike",
+ "CurrentInsertBefore",
+ "MarkedInsertBefore",
+ ]
+
+ def __init__(self):
+ self.NewState = 0
+ for flag in self._FLAGS:
+ setattr(self, flag, False)
+ self.ReservedFlags = 0
+ self.CurrentInsertionAction, self.MarkedInsertionAction = [], []
+
+ def compile(self, writer, font, actionIndex):
+ assert actionIndex is not None
+ writer.writeUShort(self.NewState)
+ flags = self.ReservedFlags
+ if self.SetMark:
+ flags |= 0x8000
+ if self.DontAdvance:
+ flags |= 0x4000
+ if self.CurrentIsKashidaLike:
+ flags |= 0x2000
+ if self.MarkedIsKashidaLike:
+ flags |= 0x1000
+ if self.CurrentInsertBefore:
+ flags |= 0x0800
+ if self.MarkedInsertBefore:
+ flags |= 0x0400
+ flags |= len(self.CurrentInsertionAction) << 5
+ flags |= len(self.MarkedInsertionAction)
+ writer.writeUShort(flags)
+ if len(self.CurrentInsertionAction) > 0:
+ currentIndex = actionIndex[tuple(self.CurrentInsertionAction)]
+ else:
+ currentIndex = 0xFFFF
+ writer.writeUShort(currentIndex)
+ if len(self.MarkedInsertionAction) > 0:
+ markedIndex = actionIndex[tuple(self.MarkedInsertionAction)]
+ else:
+ markedIndex = 0xFFFF
+ writer.writeUShort(markedIndex)
+
+ def decompile(self, reader, font, actionReader):
+ assert actionReader is not None
+ self.NewState = reader.readUShort()
+ flags = reader.readUShort()
+ self.SetMark = bool(flags & 0x8000)
+ self.DontAdvance = bool(flags & 0x4000)
+ self.CurrentIsKashidaLike = bool(flags & 0x2000)
+ self.MarkedIsKashidaLike = bool(flags & 0x1000)
+ self.CurrentInsertBefore = bool(flags & 0x0800)
+ self.MarkedInsertBefore = bool(flags & 0x0400)
+ self.CurrentInsertionAction = self._decompileInsertionAction(
+ actionReader, font, index=reader.readUShort(), count=((flags & 0x03E0) >> 5)
+ )
+ self.MarkedInsertionAction = self._decompileInsertionAction(
+ actionReader, font, index=reader.readUShort(), count=(flags & 0x001F)
+ )
+
+ def _decompileInsertionAction(self, actionReader, font, index, count):
+ if index == 0xFFFF or count == 0:
+ return []
+ reader = actionReader.getSubReader(actionReader.pos + index * 2)
+ return font.getGlyphNameMany(reader.readUShortArray(count))
+
+ def toXML(self, xmlWriter, font, attrs, name):
+ xmlWriter.begintag(name, **attrs)
+ xmlWriter.newline()
+ xmlWriter.simpletag("NewState", value=self.NewState)
+ xmlWriter.newline()
+ self._writeFlagsToXML(xmlWriter)
+ for g in self.CurrentInsertionAction:
+ xmlWriter.simpletag("CurrentInsertionAction", glyph=g)
+ xmlWriter.newline()
+ for g in self.MarkedInsertionAction:
+ xmlWriter.simpletag("MarkedInsertionAction", glyph=g)
+ xmlWriter.newline()
+ xmlWriter.endtag(name)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ self.__init__()
+ content = [t for t in content if isinstance(t, tuple)]
+ for eltName, eltAttrs, eltContent in content:
+ if eltName == "NewState":
+ self.NewState = safeEval(eltAttrs["value"])
+ elif eltName == "Flags":
+ for flag in eltAttrs["value"].split(","):
+ self._setFlag(flag.strip())
+ elif eltName == "CurrentInsertionAction":
+ self.CurrentInsertionAction.append(eltAttrs["glyph"])
+ elif eltName == "MarkedInsertionAction":
+ self.MarkedInsertionAction.append(eltAttrs["glyph"])
+ else:
+ assert False, eltName
+
+ @staticmethod
+ def compileActions(font, states):
+ actions, actionIndex, result = set(), {}, b""
+ for state in states:
+ for _glyphClass, trans in state.Transitions.items():
+ if trans.CurrentInsertionAction is not None:
+ actions.add(tuple(trans.CurrentInsertionAction))
+ if trans.MarkedInsertionAction is not None:
+ actions.add(tuple(trans.MarkedInsertionAction))
+ # Sort the compiled actions in decreasing order of
+ # length, so that the longer sequence come before the
+ # shorter ones.
+ for action in sorted(actions, key=lambda x: (-len(x), x)):
+ # We insert all sub-sequences of the action glyph sequence
+ # into actionIndex. For example, if one action triggers on
+ # glyph sequence [A, B, C, D, E] and another action triggers
+ # on [C, D], we return result=[A, B, C, D, E] (as list of
+ # encoded glyph IDs), and actionIndex={('A','B','C','D','E'): 0,
+ # ('C','D'): 2}.
+ if action in actionIndex:
+ continue
+ for start in range(0, len(action)):
+ startIndex = (len(result) // 2) + start
+ for limit in range(start, len(action)):
+ glyphs = action[start : limit + 1]
+ actionIndex.setdefault(glyphs, startIndex)
+ for glyph in action:
+ glyphID = font.getGlyphID(glyph)
+ result += struct.pack(">H", glyphID)
+ return result, actionIndex
class FeatureParams(BaseTable):
+ def compile(self, writer, font):
+ assert (
+ featureParamTypes.get(writer["FeatureTag"]) == self.__class__
+ ), "Wrong FeatureParams type for feature '%s': %s" % (
+ writer["FeatureTag"],
+ self.__class__.__name__,
+ )
+ BaseTable.compile(self, writer, font)
- def compile(self, writer, font):
- assert featureParamTypes.get(writer['FeatureTag']) == self.__class__, "Wrong FeatureParams type for feature '%s': %s" % (writer['FeatureTag'], self.__class__.__name__)
- BaseTable.compile(self, writer, font)
+ def toXML(self, xmlWriter, font, attrs=None, name=None):
+ BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__)
- def toXML(self, xmlWriter, font, attrs=None, name=None):
- BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__)
class FeatureParamsSize(FeatureParams):
- pass
+ pass
+
class FeatureParamsStylisticSet(FeatureParams):
- pass
+ pass
+
class FeatureParamsCharacterVariants(FeatureParams):
- pass
+ pass
-class Coverage(FormatSwitchingBaseTable):
- # manual implementation to get rid of glyphID dependencies
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'glyphs'):
- self.glyphs = []
-
- def postRead(self, rawTable, font):
- if self.Format == 1:
- self.glyphs = rawTable["GlyphArray"]
- elif self.Format == 2:
- glyphs = self.glyphs = []
- ranges = rawTable["RangeRecord"]
- # Some SIL fonts have coverage entries that don't have sorted
- # StartCoverageIndex. If it is so, fixup and warn. We undo
- # this when writing font out.
- sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex)
- if ranges != sorted_ranges:
- log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.")
- ranges = sorted_ranges
- del sorted_ranges
- for r in ranges:
- start = r.Start
- end = r.End
- startID = font.getGlyphID(start)
- endID = font.getGlyphID(end) + 1
- glyphs.extend(font.getGlyphNameMany(range(startID, endID)))
- else:
- self.glyphs = []
- log.warning("Unknown Coverage format: %s", self.Format)
- del self.Format # Don't need this anymore
-
- def preWrite(self, font):
- glyphs = getattr(self, "glyphs", None)
- if glyphs is None:
- glyphs = self.glyphs = []
- format = 1
- rawTable = {"GlyphArray": glyphs}
- if glyphs:
- # find out whether Format 2 is more compact or not
- glyphIDs = font.getGlyphIDMany(glyphs)
- brokenOrder = sorted(glyphIDs) != glyphIDs
-
- last = glyphIDs[0]
- ranges = [[last]]
- for glyphID in glyphIDs[1:]:
- if glyphID != last + 1:
- ranges[-1].append(last)
- ranges.append([glyphID])
- last = glyphID
- ranges[-1].append(last)
-
- if brokenOrder or len(ranges) * 3 < len(glyphs): # 3 words vs. 1 word
- # Format 2 is more compact
- index = 0
- for i in range(len(ranges)):
- start, end = ranges[i]
- r = RangeRecord()
- r.StartID = start
- r.Start = font.getGlyphName(start)
- r.End = font.getGlyphName(end)
- r.StartCoverageIndex = index
- ranges[i] = r
- index = index + end - start + 1
- if brokenOrder:
- log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.")
- ranges.sort(key=lambda a: a.StartID)
- for r in ranges:
- del r.StartID
- format = 2
- rawTable = {"RangeRecord": ranges}
- #else:
- # fallthrough; Format 1 is more compact
- self.Format = format
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- for glyphName in getattr(self, "glyphs", []):
- xmlWriter.simpletag("Glyph", value=glyphName)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- glyphs = getattr(self, "glyphs", None)
- if glyphs is None:
- glyphs = []
- self.glyphs = glyphs
- glyphs.append(attrs["value"])
+class Coverage(FormatSwitchingBaseTable):
+ # manual implementation to get rid of glyphID dependencies
+
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "glyphs"):
+ self.glyphs = []
+
+ def postRead(self, rawTable, font):
+ if self.Format == 1:
+ self.glyphs = rawTable["GlyphArray"]
+ elif self.Format == 2:
+ glyphs = self.glyphs = []
+ ranges = rawTable["RangeRecord"]
+ # Some SIL fonts have coverage entries that don't have sorted
+ # StartCoverageIndex. If it is so, fixup and warn. We undo
+ # this when writing font out.
+ sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex)
+ if ranges != sorted_ranges:
+ log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.")
+ ranges = sorted_ranges
+ del sorted_ranges
+ for r in ranges:
+ start = r.Start
+ end = r.End
+ startID = font.getGlyphID(start)
+ endID = font.getGlyphID(end) + 1
+ glyphs.extend(font.getGlyphNameMany(range(startID, endID)))
+ else:
+ self.glyphs = []
+ log.warning("Unknown Coverage format: %s", self.Format)
+ del self.Format # Don't need this anymore
+
+ def preWrite(self, font):
+ glyphs = getattr(self, "glyphs", None)
+ if glyphs is None:
+ glyphs = self.glyphs = []
+ format = 1
+ rawTable = {"GlyphArray": glyphs}
+ if glyphs:
+ # find out whether Format 2 is more compact or not
+ glyphIDs = font.getGlyphIDMany(glyphs)
+ brokenOrder = sorted(glyphIDs) != glyphIDs
+
+ last = glyphIDs[0]
+ ranges = [[last]]
+ for glyphID in glyphIDs[1:]:
+ if glyphID != last + 1:
+ ranges[-1].append(last)
+ ranges.append([glyphID])
+ last = glyphID
+ ranges[-1].append(last)
+
+ if brokenOrder or len(ranges) * 3 < len(glyphs): # 3 words vs. 1 word
+ # Format 2 is more compact
+ index = 0
+ for i in range(len(ranges)):
+ start, end = ranges[i]
+ r = RangeRecord()
+ r.StartID = start
+ r.Start = font.getGlyphName(start)
+ r.End = font.getGlyphName(end)
+ r.StartCoverageIndex = index
+ ranges[i] = r
+ index = index + end - start + 1
+ if brokenOrder:
+ log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.")
+ ranges.sort(key=lambda a: a.StartID)
+ for r in ranges:
+ del r.StartID
+ format = 2
+ rawTable = {"RangeRecord": ranges}
+ # else:
+ # fallthrough; Format 1 is more compact
+ self.Format = format
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ for glyphName in getattr(self, "glyphs", []):
+ xmlWriter.simpletag("Glyph", value=glyphName)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ glyphs = getattr(self, "glyphs", None)
+ if glyphs is None:
+ glyphs = []
+ self.glyphs = glyphs
+ glyphs.append(attrs["value"])
# The special 0xFFFFFFFF delta-set index is used to indicate that there
@@ -606,986 +632,1077 @@ NO_VARIATION_INDEX = 0xFFFFFFFF
class DeltaSetIndexMap(getFormatSwitchingBaseTableClass("uint8")):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'mapping'):
- self.mapping = []
-
- def postRead(self, rawTable, font):
- assert (rawTable['EntryFormat'] & 0xFFC0) == 0
- self.mapping = rawTable['mapping']
-
- @staticmethod
- def getEntryFormat(mapping):
- ored = 0
- for idx in mapping:
- ored |= idx
-
- inner = ored & 0xFFFF
- innerBits = 0
- while inner:
- innerBits += 1
- inner >>= 1
- innerBits = max(innerBits, 1)
- assert innerBits <= 16
-
- ored = (ored >> (16-innerBits)) | (ored & ((1<<innerBits)-1))
- if ored <= 0x000000FF:
- entrySize = 1
- elif ored <= 0x0000FFFF:
- entrySize = 2
- elif ored <= 0x00FFFFFF:
- entrySize = 3
- else:
- entrySize = 4
-
- return ((entrySize - 1) << 4) | (innerBits - 1)
-
- def preWrite(self, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = self.mapping = []
- self.Format = 1 if len(mapping) > 0xFFFF else 0
- rawTable = self.__dict__.copy()
- rawTable['MappingCount'] = len(mapping)
- rawTable['EntryFormat'] = self.getEntryFormat(mapping)
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- # Make xml dump less verbose, by omitting no-op entries like:
- # <Map index="..." outer="65535" inner="65535"/>
- xmlWriter.comment(
- "Omitted values default to 0xFFFF/0xFFFF (no variations)"
- )
- xmlWriter.newline()
- for i, value in enumerate(getattr(self, "mapping", [])):
- attrs = [('index', i)]
- if value != NO_VARIATION_INDEX:
- attrs.extend([
- ('outer', value >> 16),
- ('inner', value & 0xFFFF),
- ])
- xmlWriter.simpletag("Map", attrs)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- self.mapping = mapping = []
- index = safeEval(attrs['index'])
- outer = safeEval(attrs.get('outer', '0xFFFF'))
- inner = safeEval(attrs.get('inner', '0xFFFF'))
- assert inner <= 0xFFFF
- mapping.insert(index, (outer << 16) | inner)
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "mapping"):
+ self.mapping = []
+
+ def postRead(self, rawTable, font):
+ assert (rawTable["EntryFormat"] & 0xFFC0) == 0
+ self.mapping = rawTable["mapping"]
+
+ @staticmethod
+ def getEntryFormat(mapping):
+ ored = 0
+ for idx in mapping:
+ ored |= idx
+
+ inner = ored & 0xFFFF
+ innerBits = 0
+ while inner:
+ innerBits += 1
+ inner >>= 1
+ innerBits = max(innerBits, 1)
+ assert innerBits <= 16
+
+ ored = (ored >> (16 - innerBits)) | (ored & ((1 << innerBits) - 1))
+ if ored <= 0x000000FF:
+ entrySize = 1
+ elif ored <= 0x0000FFFF:
+ entrySize = 2
+ elif ored <= 0x00FFFFFF:
+ entrySize = 3
+ else:
+ entrySize = 4
+
+ return ((entrySize - 1) << 4) | (innerBits - 1)
+
+ def preWrite(self, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = self.mapping = []
+ self.Format = 1 if len(mapping) > 0xFFFF else 0
+ rawTable = self.__dict__.copy()
+ rawTable["MappingCount"] = len(mapping)
+ rawTable["EntryFormat"] = self.getEntryFormat(mapping)
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ # Make xml dump less verbose, by omitting no-op entries like:
+ # <Map index="..." outer="65535" inner="65535"/>
+ xmlWriter.comment("Omitted values default to 0xFFFF/0xFFFF (no variations)")
+ xmlWriter.newline()
+ for i, value in enumerate(getattr(self, "mapping", [])):
+ attrs = [("index", i)]
+ if value != NO_VARIATION_INDEX:
+ attrs.extend(
+ [
+ ("outer", value >> 16),
+ ("inner", value & 0xFFFF),
+ ]
+ )
+ xmlWriter.simpletag("Map", attrs)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ self.mapping = mapping = []
+ index = safeEval(attrs["index"])
+ outer = safeEval(attrs.get("outer", "0xFFFF"))
+ inner = safeEval(attrs.get("inner", "0xFFFF"))
+ assert inner <= 0xFFFF
+ mapping.insert(index, (outer << 16) | inner)
class VarIdxMap(BaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'mapping'):
- self.mapping = {}
-
- def postRead(self, rawTable, font):
- assert (rawTable['EntryFormat'] & 0xFFC0) == 0
- glyphOrder = font.getGlyphOrder()
- mapList = rawTable['mapping']
- mapList.extend([mapList[-1]] * (len(glyphOrder) - len(mapList)))
- self.mapping = dict(zip(glyphOrder, mapList))
-
- def preWrite(self, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = self.mapping = {}
-
- glyphOrder = font.getGlyphOrder()
- mapping = [mapping[g] for g in glyphOrder]
- while len(mapping) > 1 and mapping[-2] == mapping[-1]:
- del mapping[-1]
-
- rawTable = {'mapping': mapping}
- rawTable['MappingCount'] = len(mapping)
- rawTable['EntryFormat'] = DeltaSetIndexMap.getEntryFormat(mapping)
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- for glyph, value in sorted(getattr(self, "mapping", {}).items()):
- attrs = (
- ('glyph', glyph),
- ('outer', value >> 16),
- ('inner', value & 0xFFFF),
- )
- xmlWriter.simpletag("Map", attrs)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = {}
- self.mapping = mapping
- try:
- glyph = attrs['glyph']
- except: # https://github.com/fonttools/fonttools/commit/21cbab8ce9ded3356fef3745122da64dcaf314e9#commitcomment-27649836
- glyph = font.getGlyphOrder()[attrs['index']]
- outer = safeEval(attrs['outer'])
- inner = safeEval(attrs['inner'])
- assert inner <= 0xFFFF
- mapping[glyph] = (outer << 16) | inner
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "mapping"):
+ self.mapping = {}
+
+ def postRead(self, rawTable, font):
+ assert (rawTable["EntryFormat"] & 0xFFC0) == 0
+ glyphOrder = font.getGlyphOrder()
+ mapList = rawTable["mapping"]
+ mapList.extend([mapList[-1]] * (len(glyphOrder) - len(mapList)))
+ self.mapping = dict(zip(glyphOrder, mapList))
+
+ def preWrite(self, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = self.mapping = {}
+
+ glyphOrder = font.getGlyphOrder()
+ mapping = [mapping[g] for g in glyphOrder]
+ while len(mapping) > 1 and mapping[-2] == mapping[-1]:
+ del mapping[-1]
+
+ rawTable = {"mapping": mapping}
+ rawTable["MappingCount"] = len(mapping)
+ rawTable["EntryFormat"] = DeltaSetIndexMap.getEntryFormat(mapping)
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ for glyph, value in sorted(getattr(self, "mapping", {}).items()):
+ attrs = (
+ ("glyph", glyph),
+ ("outer", value >> 16),
+ ("inner", value & 0xFFFF),
+ )
+ xmlWriter.simpletag("Map", attrs)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = {}
+ self.mapping = mapping
+ try:
+ glyph = attrs["glyph"]
+ except: # https://github.com/fonttools/fonttools/commit/21cbab8ce9ded3356fef3745122da64dcaf314e9#commitcomment-27649836
+ glyph = font.getGlyphOrder()[attrs["index"]]
+ outer = safeEval(attrs["outer"])
+ inner = safeEval(attrs["inner"])
+ assert inner <= 0xFFFF
+ mapping[glyph] = (outer << 16) | inner
class VarRegionList(BaseTable):
-
- def preWrite(self, font):
- # The OT spec says VarStore.VarRegionList.RegionAxisCount should always
- # be equal to the fvar.axisCount, and OTS < v8.0.0 enforces this rule
- # even when the VarRegionList is empty. We can't treat RegionAxisCount
- # like a normal propagated count (== len(Region[i].VarRegionAxis)),
- # otherwise it would default to 0 if VarRegionList is empty.
- # Thus, we force it to always be equal to fvar.axisCount.
- # https://github.com/khaledhosny/ots/pull/192
- fvarTable = font.get("fvar")
- if fvarTable:
- self.RegionAxisCount = len(fvarTable.axes)
- return {
- **self.__dict__,
- "RegionAxisCount": CountReference(self.__dict__, "RegionAxisCount")
- }
+ def preWrite(self, font):
+ # The OT spec says VarStore.VarRegionList.RegionAxisCount should always
+ # be equal to the fvar.axisCount, and OTS < v8.0.0 enforces this rule
+ # even when the VarRegionList is empty. We can't treat RegionAxisCount
+ # like a normal propagated count (== len(Region[i].VarRegionAxis)),
+ # otherwise it would default to 0 if VarRegionList is empty.
+ # Thus, we force it to always be equal to fvar.axisCount.
+ # https://github.com/khaledhosny/ots/pull/192
+ fvarTable = font.get("fvar")
+ if fvarTable:
+ self.RegionAxisCount = len(fvarTable.axes)
+ return {
+ **self.__dict__,
+ "RegionAxisCount": CountReference(self.__dict__, "RegionAxisCount"),
+ }
class SingleSubst(FormatSwitchingBaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'mapping'):
- self.mapping = {}
-
- def postRead(self, rawTable, font):
- mapping = {}
- input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
- if self.Format == 1:
- delta = rawTable["DeltaGlyphID"]
- inputGIDS = font.getGlyphIDMany(input)
- outGIDS = [ (glyphID + delta) % 65536 for glyphID in inputGIDS ]
- outNames = font.getGlyphNameMany(outGIDS)
- for inp, out in zip(input, outNames):
- mapping[inp] = out
- elif self.Format == 2:
- assert len(input) == rawTable["GlyphCount"], \
- "invalid SingleSubstFormat2 table"
- subst = rawTable["Substitute"]
- for inp, sub in zip(input, subst):
- mapping[inp] = sub
- else:
- assert 0, "unknown format: %s" % self.Format
- self.mapping = mapping
- del self.Format # Don't need this anymore
-
- def preWrite(self, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = self.mapping = {}
- items = list(mapping.items())
- getGlyphID = font.getGlyphID
- gidItems = [(getGlyphID(a), getGlyphID(b)) for a,b in items]
- sortableItems = sorted(zip(gidItems, items))
-
- # figure out format
- format = 2
- delta = None
- for inID, outID in gidItems:
- if delta is None:
- delta = (outID - inID) % 65536
-
- if (inID + delta) % 65536 != outID:
- break
- else:
- if delta is None:
- # the mapping is empty, better use format 2
- format = 2
- else:
- format = 1
-
- rawTable = {}
- self.Format = format
- cov = Coverage()
- input = [ item [1][0] for item in sortableItems]
- subst = [ item [1][1] for item in sortableItems]
- cov.glyphs = input
- rawTable["Coverage"] = cov
- if format == 1:
- assert delta is not None
- rawTable["DeltaGlyphID"] = delta
- else:
- rawTable["Substitute"] = subst
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- items = sorted(self.mapping.items())
- for inGlyph, outGlyph in items:
- xmlWriter.simpletag("Substitution",
- [("in", inGlyph), ("out", outGlyph)])
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = {}
- self.mapping = mapping
- mapping[attrs["in"]] = attrs["out"]
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "mapping"):
+ self.mapping = {}
+
+ def postRead(self, rawTable, font):
+ mapping = {}
+ input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
+ if self.Format == 1:
+ delta = rawTable["DeltaGlyphID"]
+ inputGIDS = font.getGlyphIDMany(input)
+ outGIDS = [(glyphID + delta) % 65536 for glyphID in inputGIDS]
+ outNames = font.getGlyphNameMany(outGIDS)
+ for inp, out in zip(input, outNames):
+ mapping[inp] = out
+ elif self.Format == 2:
+ assert (
+ len(input) == rawTable["GlyphCount"]
+ ), "invalid SingleSubstFormat2 table"
+ subst = rawTable["Substitute"]
+ for inp, sub in zip(input, subst):
+ mapping[inp] = sub
+ else:
+ assert 0, "unknown format: %s" % self.Format
+ self.mapping = mapping
+ del self.Format # Don't need this anymore
+
+ def preWrite(self, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = self.mapping = {}
+ items = list(mapping.items())
+ getGlyphID = font.getGlyphID
+ gidItems = [(getGlyphID(a), getGlyphID(b)) for a, b in items]
+ sortableItems = sorted(zip(gidItems, items))
+
+ # figure out format
+ format = 2
+ delta = None
+ for inID, outID in gidItems:
+ if delta is None:
+ delta = (outID - inID) % 65536
+
+ if (inID + delta) % 65536 != outID:
+ break
+ else:
+ if delta is None:
+ # the mapping is empty, better use format 2
+ format = 2
+ else:
+ format = 1
+
+ rawTable = {}
+ self.Format = format
+ cov = Coverage()
+ input = [item[1][0] for item in sortableItems]
+ subst = [item[1][1] for item in sortableItems]
+ cov.glyphs = input
+ rawTable["Coverage"] = cov
+ if format == 1:
+ assert delta is not None
+ rawTable["DeltaGlyphID"] = delta
+ else:
+ rawTable["Substitute"] = subst
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ items = sorted(self.mapping.items())
+ for inGlyph, outGlyph in items:
+ xmlWriter.simpletag("Substitution", [("in", inGlyph), ("out", outGlyph)])
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = {}
+ self.mapping = mapping
+ mapping[attrs["in"]] = attrs["out"]
class MultipleSubst(FormatSwitchingBaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'mapping'):
- self.mapping = {}
-
- def postRead(self, rawTable, font):
- mapping = {}
- if self.Format == 1:
- glyphs = _getGlyphsFromCoverageTable(rawTable["Coverage"])
- subst = [s.Substitute for s in rawTable["Sequence"]]
- mapping = dict(zip(glyphs, subst))
- else:
- assert 0, "unknown format: %s" % self.Format
- self.mapping = mapping
- del self.Format # Don't need this anymore
-
- def preWrite(self, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = self.mapping = {}
- cov = Coverage()
- cov.glyphs = sorted(list(mapping.keys()), key=font.getGlyphID)
- self.Format = 1
- rawTable = {
- "Coverage": cov,
- "Sequence": [self.makeSequence_(mapping[glyph])
- for glyph in cov.glyphs],
- }
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- items = sorted(self.mapping.items())
- for inGlyph, outGlyphs in items:
- out = ",".join(outGlyphs)
- xmlWriter.simpletag("Substitution",
- [("in", inGlyph), ("out", out)])
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- mapping = getattr(self, "mapping", None)
- if mapping is None:
- mapping = {}
- self.mapping = mapping
-
- # TTX v3.0 and earlier.
- if name == "Coverage":
- self.old_coverage_ = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- element_name, element_attrs, _ = element
- if element_name == "Glyph":
- self.old_coverage_.append(element_attrs["value"])
- return
- if name == "Sequence":
- index = int(attrs.get("index", len(mapping)))
- glyph = self.old_coverage_[index]
- glyph_mapping = mapping[glyph] = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- element_name, element_attrs, _ = element
- if element_name == "Substitute":
- glyph_mapping.append(element_attrs["value"])
- return
-
- # TTX v3.1 and later.
- outGlyphs = attrs["out"].split(",") if attrs["out"] else []
- mapping[attrs["in"]] = [g.strip() for g in outGlyphs]
-
- @staticmethod
- def makeSequence_(g):
- seq = Sequence()
- seq.Substitute = g
- return seq
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "mapping"):
+ self.mapping = {}
+
+ def postRead(self, rawTable, font):
+ mapping = {}
+ if self.Format == 1:
+ glyphs = _getGlyphsFromCoverageTable(rawTable["Coverage"])
+ subst = [s.Substitute for s in rawTable["Sequence"]]
+ mapping = dict(zip(glyphs, subst))
+ else:
+ assert 0, "unknown format: %s" % self.Format
+ self.mapping = mapping
+ del self.Format # Don't need this anymore
+
+ def preWrite(self, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = self.mapping = {}
+ cov = Coverage()
+ cov.glyphs = sorted(list(mapping.keys()), key=font.getGlyphID)
+ self.Format = 1
+ rawTable = {
+ "Coverage": cov,
+ "Sequence": [self.makeSequence_(mapping[glyph]) for glyph in cov.glyphs],
+ }
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ items = sorted(self.mapping.items())
+ for inGlyph, outGlyphs in items:
+ out = ",".join(outGlyphs)
+ xmlWriter.simpletag("Substitution", [("in", inGlyph), ("out", out)])
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ mapping = getattr(self, "mapping", None)
+ if mapping is None:
+ mapping = {}
+ self.mapping = mapping
+
+ # TTX v3.0 and earlier.
+ if name == "Coverage":
+ self.old_coverage_ = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ element_name, element_attrs, _ = element
+ if element_name == "Glyph":
+ self.old_coverage_.append(element_attrs["value"])
+ return
+ if name == "Sequence":
+ index = int(attrs.get("index", len(mapping)))
+ glyph = self.old_coverage_[index]
+ glyph_mapping = mapping[glyph] = []
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ element_name, element_attrs, _ = element
+ if element_name == "Substitute":
+ glyph_mapping.append(element_attrs["value"])
+ return
+
+ # TTX v3.1 and later.
+ outGlyphs = attrs["out"].split(",") if attrs["out"] else []
+ mapping[attrs["in"]] = [g.strip() for g in outGlyphs]
+
+ @staticmethod
+ def makeSequence_(g):
+ seq = Sequence()
+ seq.Substitute = g
+ return seq
class ClassDef(FormatSwitchingBaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'classDefs'):
- self.classDefs = {}
-
- def postRead(self, rawTable, font):
- classDefs = {}
-
- if self.Format == 1:
- start = rawTable["StartGlyph"]
- classList = rawTable["ClassValueArray"]
- startID = font.getGlyphID(start)
- endID = startID + len(classList)
- glyphNames = font.getGlyphNameMany(range(startID, endID))
- for glyphName, cls in zip(glyphNames, classList):
- if cls:
- classDefs[glyphName] = cls
-
- elif self.Format == 2:
- records = rawTable["ClassRangeRecord"]
- for rec in records:
- cls = rec.Class
- if not cls:
- continue
- start = rec.Start
- end = rec.End
- startID = font.getGlyphID(start)
- endID = font.getGlyphID(end) + 1
- glyphNames = font.getGlyphNameMany(range(startID, endID))
- for glyphName in glyphNames:
- classDefs[glyphName] = cls
- else:
- log.warning("Unknown ClassDef format: %s", self.Format)
- self.classDefs = classDefs
- del self.Format # Don't need this anymore
-
- def _getClassRanges(self, font):
- classDefs = getattr(self, "classDefs", None)
- if classDefs is None:
- self.classDefs = {}
- return
- getGlyphID = font.getGlyphID
- items = []
- for glyphName, cls in classDefs.items():
- if not cls:
- continue
- items.append((getGlyphID(glyphName), glyphName, cls))
- if items:
- items.sort()
- last, lastName, lastCls = items[0]
- ranges = [[lastCls, last, lastName]]
- for glyphID, glyphName, cls in items[1:]:
- if glyphID != last + 1 or cls != lastCls:
- ranges[-1].extend([last, lastName])
- ranges.append([cls, glyphID, glyphName])
- last = glyphID
- lastName = glyphName
- lastCls = cls
- ranges[-1].extend([last, lastName])
- return ranges
-
- def preWrite(self, font):
- format = 2
- rawTable = {"ClassRangeRecord": []}
- ranges = self._getClassRanges(font)
- if ranges:
- startGlyph = ranges[0][1]
- endGlyph = ranges[-1][3]
- glyphCount = endGlyph - startGlyph + 1
- if len(ranges) * 3 < glyphCount + 1:
- # Format 2 is more compact
- for i in range(len(ranges)):
- cls, start, startName, end, endName = ranges[i]
- rec = ClassRangeRecord()
- rec.Start = startName
- rec.End = endName
- rec.Class = cls
- ranges[i] = rec
- format = 2
- rawTable = {"ClassRangeRecord": ranges}
- else:
- # Format 1 is more compact
- startGlyphName = ranges[0][2]
- classes = [0] * glyphCount
- for cls, start, startName, end, endName in ranges:
- for g in range(start - startGlyph, end - startGlyph + 1):
- classes[g] = cls
- format = 1
- rawTable = {"StartGlyph": startGlyphName, "ClassValueArray": classes}
- self.Format = format
- return rawTable
-
- def toXML2(self, xmlWriter, font):
- items = sorted(self.classDefs.items())
- for glyphName, cls in items:
- xmlWriter.simpletag("ClassDef", [("glyph", glyphName), ("class", cls)])
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- classDefs = getattr(self, "classDefs", None)
- if classDefs is None:
- classDefs = {}
- self.classDefs = classDefs
- classDefs[attrs["glyph"]] = int(attrs["class"])
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "classDefs"):
+ self.classDefs = {}
+
+ def postRead(self, rawTable, font):
+ classDefs = {}
+
+ if self.Format == 1:
+ start = rawTable["StartGlyph"]
+ classList = rawTable["ClassValueArray"]
+ startID = font.getGlyphID(start)
+ endID = startID + len(classList)
+ glyphNames = font.getGlyphNameMany(range(startID, endID))
+ for glyphName, cls in zip(glyphNames, classList):
+ if cls:
+ classDefs[glyphName] = cls
+
+ elif self.Format == 2:
+ records = rawTable["ClassRangeRecord"]
+ for rec in records:
+ cls = rec.Class
+ if not cls:
+ continue
+ start = rec.Start
+ end = rec.End
+ startID = font.getGlyphID(start)
+ endID = font.getGlyphID(end) + 1
+ glyphNames = font.getGlyphNameMany(range(startID, endID))
+ for glyphName in glyphNames:
+ classDefs[glyphName] = cls
+ else:
+ log.warning("Unknown ClassDef format: %s", self.Format)
+ self.classDefs = classDefs
+ del self.Format # Don't need this anymore
+
+ def _getClassRanges(self, font):
+ classDefs = getattr(self, "classDefs", None)
+ if classDefs is None:
+ self.classDefs = {}
+ return
+ getGlyphID = font.getGlyphID
+ items = []
+ for glyphName, cls in classDefs.items():
+ if not cls:
+ continue
+ items.append((getGlyphID(glyphName), glyphName, cls))
+ if items:
+ items.sort()
+ last, lastName, lastCls = items[0]
+ ranges = [[lastCls, last, lastName]]
+ for glyphID, glyphName, cls in items[1:]:
+ if glyphID != last + 1 or cls != lastCls:
+ ranges[-1].extend([last, lastName])
+ ranges.append([cls, glyphID, glyphName])
+ last = glyphID
+ lastName = glyphName
+ lastCls = cls
+ ranges[-1].extend([last, lastName])
+ return ranges
+
+ def preWrite(self, font):
+ format = 2
+ rawTable = {"ClassRangeRecord": []}
+ ranges = self._getClassRanges(font)
+ if ranges:
+ startGlyph = ranges[0][1]
+ endGlyph = ranges[-1][3]
+ glyphCount = endGlyph - startGlyph + 1
+ if len(ranges) * 3 < glyphCount + 1:
+ # Format 2 is more compact
+ for i in range(len(ranges)):
+ cls, start, startName, end, endName = ranges[i]
+ rec = ClassRangeRecord()
+ rec.Start = startName
+ rec.End = endName
+ rec.Class = cls
+ ranges[i] = rec
+ format = 2
+ rawTable = {"ClassRangeRecord": ranges}
+ else:
+ # Format 1 is more compact
+ startGlyphName = ranges[0][2]
+ classes = [0] * glyphCount
+ for cls, start, startName, end, endName in ranges:
+ for g in range(start - startGlyph, end - startGlyph + 1):
+ classes[g] = cls
+ format = 1
+ rawTable = {"StartGlyph": startGlyphName, "ClassValueArray": classes}
+ self.Format = format
+ return rawTable
+
+ def toXML2(self, xmlWriter, font):
+ items = sorted(self.classDefs.items())
+ for glyphName, cls in items:
+ xmlWriter.simpletag("ClassDef", [("glyph", glyphName), ("class", cls)])
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ classDefs = getattr(self, "classDefs", None)
+ if classDefs is None:
+ classDefs = {}
+ self.classDefs = classDefs
+ classDefs[attrs["glyph"]] = int(attrs["class"])
class AlternateSubst(FormatSwitchingBaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'alternates'):
- self.alternates = {}
-
- def postRead(self, rawTable, font):
- alternates = {}
- if self.Format == 1:
- input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
- alts = rawTable["AlternateSet"]
- assert len(input) == len(alts)
- for inp,alt in zip(input,alts):
- alternates[inp] = alt.Alternate
- else:
- assert 0, "unknown format: %s" % self.Format
- self.alternates = alternates
- del self.Format # Don't need this anymore
-
- def preWrite(self, font):
- self.Format = 1
- alternates = getattr(self, "alternates", None)
- if alternates is None:
- alternates = self.alternates = {}
- items = list(alternates.items())
- for i in range(len(items)):
- glyphName, set = items[i]
- items[i] = font.getGlyphID(glyphName), glyphName, set
- items.sort()
- cov = Coverage()
- cov.glyphs = [ item[1] for item in items]
- alternates = []
- setList = [ item[-1] for item in items]
- for set in setList:
- alts = AlternateSet()
- alts.Alternate = set
- alternates.append(alts)
- # a special case to deal with the fact that several hundred Adobe Japan1-5
- # CJK fonts will overflow an offset if the coverage table isn't pushed to the end.
- # Also useful in that when splitting a sub-table because of an offset overflow
- # I don't need to calculate the change in the subtable offset due to the change in the coverage table size.
- # Allows packing more rules in subtable.
- self.sortCoverageLast = 1
- return {"Coverage": cov, "AlternateSet": alternates}
-
- def toXML2(self, xmlWriter, font):
- items = sorted(self.alternates.items())
- for glyphName, alternates in items:
- xmlWriter.begintag("AlternateSet", glyph=glyphName)
- xmlWriter.newline()
- for alt in alternates:
- xmlWriter.simpletag("Alternate", glyph=alt)
- xmlWriter.newline()
- xmlWriter.endtag("AlternateSet")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- alternates = getattr(self, "alternates", None)
- if alternates is None:
- alternates = {}
- self.alternates = alternates
- glyphName = attrs["glyph"]
- set = []
- alternates[glyphName] = set
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- set.append(attrs["glyph"])
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "alternates"):
+ self.alternates = {}
+
+ def postRead(self, rawTable, font):
+ alternates = {}
+ if self.Format == 1:
+ input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
+ alts = rawTable["AlternateSet"]
+ assert len(input) == len(alts)
+ for inp, alt in zip(input, alts):
+ alternates[inp] = alt.Alternate
+ else:
+ assert 0, "unknown format: %s" % self.Format
+ self.alternates = alternates
+ del self.Format # Don't need this anymore
+
+ def preWrite(self, font):
+ self.Format = 1
+ alternates = getattr(self, "alternates", None)
+ if alternates is None:
+ alternates = self.alternates = {}
+ items = list(alternates.items())
+ for i in range(len(items)):
+ glyphName, set = items[i]
+ items[i] = font.getGlyphID(glyphName), glyphName, set
+ items.sort()
+ cov = Coverage()
+ cov.glyphs = [item[1] for item in items]
+ alternates = []
+ setList = [item[-1] for item in items]
+ for set in setList:
+ alts = AlternateSet()
+ alts.Alternate = set
+ alternates.append(alts)
+ # a special case to deal with the fact that several hundred Adobe Japan1-5
+ # CJK fonts will overflow an offset if the coverage table isn't pushed to the end.
+ # Also useful in that when splitting a sub-table because of an offset overflow
+ # I don't need to calculate the change in the subtable offset due to the change in the coverage table size.
+ # Allows packing more rules in subtable.
+ self.sortCoverageLast = 1
+ return {"Coverage": cov, "AlternateSet": alternates}
+
+ def toXML2(self, xmlWriter, font):
+ items = sorted(self.alternates.items())
+ for glyphName, alternates in items:
+ xmlWriter.begintag("AlternateSet", glyph=glyphName)
+ xmlWriter.newline()
+ for alt in alternates:
+ xmlWriter.simpletag("Alternate", glyph=alt)
+ xmlWriter.newline()
+ xmlWriter.endtag("AlternateSet")
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ alternates = getattr(self, "alternates", None)
+ if alternates is None:
+ alternates = {}
+ self.alternates = alternates
+ glyphName = attrs["glyph"]
+ set = []
+ alternates[glyphName] = set
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ set.append(attrs["glyph"])
class LigatureSubst(FormatSwitchingBaseTable):
-
- def populateDefaults(self, propagator=None):
- if not hasattr(self, 'ligatures'):
- self.ligatures = {}
-
- def postRead(self, rawTable, font):
- ligatures = {}
- if self.Format == 1:
- input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
- ligSets = rawTable["LigatureSet"]
- assert len(input) == len(ligSets)
- for i in range(len(input)):
- ligatures[input[i]] = ligSets[i].Ligature
- else:
- assert 0, "unknown format: %s" % self.Format
- self.ligatures = ligatures
- del self.Format # Don't need this anymore
-
- def preWrite(self, font):
- self.Format = 1
- ligatures = getattr(self, "ligatures", None)
- if ligatures is None:
- ligatures = self.ligatures = {}
-
- if ligatures and isinstance(next(iter(ligatures)), tuple):
- # New high-level API in v3.1 and later. Note that we just support compiling this
- # for now. We don't load to this API, and don't do XML with it.
-
- # ligatures is map from components-sequence to lig-glyph
- newLigatures = dict()
- for comps,lig in sorted(ligatures.items(), key=lambda item: (-len(item[0]), item[0])):
- ligature = Ligature()
- ligature.Component = comps[1:]
- ligature.CompCount = len(comps)
- ligature.LigGlyph = lig
- newLigatures.setdefault(comps[0], []).append(ligature)
- ligatures = newLigatures
-
- items = list(ligatures.items())
- for i in range(len(items)):
- glyphName, set = items[i]
- items[i] = font.getGlyphID(glyphName), glyphName, set
- items.sort()
- cov = Coverage()
- cov.glyphs = [ item[1] for item in items]
-
- ligSets = []
- setList = [ item[-1] for item in items ]
- for set in setList:
- ligSet = LigatureSet()
- ligs = ligSet.Ligature = []
- for lig in set:
- ligs.append(lig)
- ligSets.append(ligSet)
- # Useful in that when splitting a sub-table because of an offset overflow
- # I don't need to calculate the change in subtabl offset due to the coverage table size.
- # Allows packing more rules in subtable.
- self.sortCoverageLast = 1
- return {"Coverage": cov, "LigatureSet": ligSets}
-
- def toXML2(self, xmlWriter, font):
- items = sorted(self.ligatures.items())
- for glyphName, ligSets in items:
- xmlWriter.begintag("LigatureSet", glyph=glyphName)
- xmlWriter.newline()
- for lig in ligSets:
- xmlWriter.simpletag("Ligature", glyph=lig.LigGlyph,
- components=",".join(lig.Component))
- xmlWriter.newline()
- xmlWriter.endtag("LigatureSet")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- ligatures = getattr(self, "ligatures", None)
- if ligatures is None:
- ligatures = {}
- self.ligatures = ligatures
- glyphName = attrs["glyph"]
- ligs = []
- ligatures[glyphName] = ligs
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- lig = Ligature()
- lig.LigGlyph = attrs["glyph"]
- components = attrs["components"]
- lig.Component = components.split(",") if components else []
- lig.CompCount = len(lig.Component)
- ligs.append(lig)
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "ligatures"):
+ self.ligatures = {}
+
+ def postRead(self, rawTable, font):
+ ligatures = {}
+ if self.Format == 1:
+ input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
+ ligSets = rawTable["LigatureSet"]
+ assert len(input) == len(ligSets)
+ for i in range(len(input)):
+ ligatures[input[i]] = ligSets[i].Ligature
+ else:
+ assert 0, "unknown format: %s" % self.Format
+ self.ligatures = ligatures
+ del self.Format # Don't need this anymore
+
+ def preWrite(self, font):
+ self.Format = 1
+ ligatures = getattr(self, "ligatures", None)
+ if ligatures is None:
+ ligatures = self.ligatures = {}
+
+ if ligatures and isinstance(next(iter(ligatures)), tuple):
+ # New high-level API in v3.1 and later. Note that we just support compiling this
+ # for now. We don't load to this API, and don't do XML with it.
+
+ # ligatures is map from components-sequence to lig-glyph
+ newLigatures = dict()
+ for comps, lig in sorted(
+ ligatures.items(), key=lambda item: (-len(item[0]), item[0])
+ ):
+ ligature = Ligature()
+ ligature.Component = comps[1:]
+ ligature.CompCount = len(comps)
+ ligature.LigGlyph = lig
+ newLigatures.setdefault(comps[0], []).append(ligature)
+ ligatures = newLigatures
+
+ items = list(ligatures.items())
+ for i in range(len(items)):
+ glyphName, set = items[i]
+ items[i] = font.getGlyphID(glyphName), glyphName, set
+ items.sort()
+ cov = Coverage()
+ cov.glyphs = [item[1] for item in items]
+
+ ligSets = []
+ setList = [item[-1] for item in items]
+ for set in setList:
+ ligSet = LigatureSet()
+ ligs = ligSet.Ligature = []
+ for lig in set:
+ ligs.append(lig)
+ ligSets.append(ligSet)
+ # Useful in that when splitting a sub-table because of an offset overflow
+ # I don't need to calculate the change in subtabl offset due to the coverage table size.
+ # Allows packing more rules in subtable.
+ self.sortCoverageLast = 1
+ return {"Coverage": cov, "LigatureSet": ligSets}
+
+ def toXML2(self, xmlWriter, font):
+ items = sorted(self.ligatures.items())
+ for glyphName, ligSets in items:
+ xmlWriter.begintag("LigatureSet", glyph=glyphName)
+ xmlWriter.newline()
+ for lig in ligSets:
+ xmlWriter.simpletag(
+ "Ligature", glyph=lig.LigGlyph, components=",".join(lig.Component)
+ )
+ xmlWriter.newline()
+ xmlWriter.endtag("LigatureSet")
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ ligatures = getattr(self, "ligatures", None)
+ if ligatures is None:
+ ligatures = {}
+ self.ligatures = ligatures
+ glyphName = attrs["glyph"]
+ ligs = []
+ ligatures[glyphName] = ligs
+ for element in content:
+ if not isinstance(element, tuple):
+ continue
+ name, attrs, content = element
+ lig = Ligature()
+ lig.LigGlyph = attrs["glyph"]
+ components = attrs["components"]
+ lig.Component = components.split(",") if components else []
+ lig.CompCount = len(lig.Component)
+ ligs.append(lig)
class COLR(BaseTable):
+ def decompile(self, reader, font):
+ # COLRv0 is exceptional in that LayerRecordCount appears *after* the
+ # LayerRecordArray it counts, but the parser logic expects Count fields
+ # to always precede the arrays. Here we work around this by parsing the
+ # LayerRecordCount before the rest of the table, and storing it in
+ # the reader's local state.
+ subReader = reader.getSubReader(offset=0)
+ for conv in self.getConverters():
+ if conv.name != "LayerRecordCount":
+ subReader.advance(conv.staticSize)
+ continue
+ reader[conv.name] = conv.read(subReader, font, tableDict={})
+ break
+ else:
+ raise AssertionError("LayerRecordCount converter not found")
+ return BaseTable.decompile(self, reader, font)
+
+ def preWrite(self, font):
+ # The writer similarly assumes Count values precede the things counted,
+ # thus here we pre-initialize a CountReference; the actual count value
+ # will be set to the lenght of the array by the time this is assembled.
+ self.LayerRecordCount = None
+ return {
+ **self.__dict__,
+ "LayerRecordCount": CountReference(self.__dict__, "LayerRecordCount"),
+ }
+
+ def computeClipBoxes(self, glyphSet: "_TTGlyphSet", quantization: int = 1):
+ if self.Version == 0:
+ return
+
+ clips = {}
+ for rec in self.BaseGlyphList.BaseGlyphPaintRecord:
+ try:
+ clipBox = rec.Paint.computeClipBox(self, glyphSet, quantization)
+ except Exception as e:
+ from fontTools.ttLib import TTLibError
- def decompile(self, reader, font):
- # COLRv0 is exceptional in that LayerRecordCount appears *after* the
- # LayerRecordArray it counts, but the parser logic expects Count fields
- # to always precede the arrays. Here we work around this by parsing the
- # LayerRecordCount before the rest of the table, and storing it in
- # the reader's local state.
- subReader = reader.getSubReader(offset=0)
- for conv in self.getConverters():
- if conv.name != "LayerRecordCount":
- subReader.advance(conv.staticSize)
- continue
- reader[conv.name] = conv.read(subReader, font, tableDict={})
- break
- else:
- raise AssertionError("LayerRecordCount converter not found")
- return BaseTable.decompile(self, reader, font)
-
- def preWrite(self, font):
- # The writer similarly assumes Count values precede the things counted,
- # thus here we pre-initialize a CountReference; the actual count value
- # will be set to the lenght of the array by the time this is assembled.
- self.LayerRecordCount = None
- return {
- **self.__dict__,
- "LayerRecordCount": CountReference(self.__dict__, "LayerRecordCount")
- }
+ raise TTLibError(
+ f"Failed to compute COLR ClipBox for {rec.BaseGlyph!r}"
+ ) from e
+
+ if clipBox is not None:
+ clips[rec.BaseGlyph] = clipBox
+
+ hasClipList = hasattr(self, "ClipList") and self.ClipList is not None
+ if not clips:
+ if hasClipList:
+ self.ClipList = None
+ else:
+ if not hasClipList:
+ self.ClipList = ClipList()
+ self.ClipList.Format = 1
+ self.ClipList.clips = clips
class LookupList(BaseTable):
- @property
- def table(self):
- for l in self.Lookup:
- for st in l.SubTable:
- if type(st).__name__.endswith("Subst"):
- return "GSUB"
- if type(st).__name__.endswith("Pos"):
- return "GPOS"
- raise ValueError
-
- def toXML2(self, xmlWriter, font):
- if not font or "Debg" not in font or LOOKUP_DEBUG_INFO_KEY not in font["Debg"].data:
- return super().toXML2(xmlWriter, font)
- debugData = font["Debg"].data[LOOKUP_DEBUG_INFO_KEY][self.table]
- for conv in self.getConverters():
- if conv.repeat:
- value = getattr(self, conv.name, [])
- for lookupIndex, item in enumerate(value):
- if str(lookupIndex) in debugData:
- info = LookupDebugInfo(*debugData[str(lookupIndex)])
- tag = info.location
- if info.name:
- tag = f'{info.name}: {tag}'
- if info.feature:
- script,language,feature = info.feature
- tag = f'{tag} in {feature} ({script}/{language})'
- xmlWriter.comment(tag)
- xmlWriter.newline()
-
- conv.xmlWrite(xmlWriter, font, item, conv.name,
- [("index", lookupIndex)])
- else:
- if conv.aux and not eval(conv.aux, None, vars(self)):
- continue
- value = getattr(self, conv.name, None) # TODO Handle defaults instead of defaulting to None!
- conv.xmlWrite(xmlWriter, font, value, conv.name, [])
+ @property
+ def table(self):
+ for l in self.Lookup:
+ for st in l.SubTable:
+ if type(st).__name__.endswith("Subst"):
+ return "GSUB"
+ if type(st).__name__.endswith("Pos"):
+ return "GPOS"
+ raise ValueError
+
+ def toXML2(self, xmlWriter, font):
+ if (
+ not font
+ or "Debg" not in font
+ or LOOKUP_DEBUG_INFO_KEY not in font["Debg"].data
+ ):
+ return super().toXML2(xmlWriter, font)
+ debugData = font["Debg"].data[LOOKUP_DEBUG_INFO_KEY][self.table]
+ for conv in self.getConverters():
+ if conv.repeat:
+ value = getattr(self, conv.name, [])
+ for lookupIndex, item in enumerate(value):
+ if str(lookupIndex) in debugData:
+ info = LookupDebugInfo(*debugData[str(lookupIndex)])
+ tag = info.location
+ if info.name:
+ tag = f"{info.name}: {tag}"
+ if info.feature:
+ script, language, feature = info.feature
+ tag = f"{tag} in {feature} ({script}/{language})"
+ xmlWriter.comment(tag)
+ xmlWriter.newline()
+
+ conv.xmlWrite(
+ xmlWriter, font, item, conv.name, [("index", lookupIndex)]
+ )
+ else:
+ if conv.aux and not eval(conv.aux, None, vars(self)):
+ continue
+ value = getattr(
+ self, conv.name, None
+ ) # TODO Handle defaults instead of defaulting to None!
+ conv.xmlWrite(xmlWriter, font, value, conv.name, [])
-class BaseGlyphRecordArray(BaseTable):
- def preWrite(self, font):
- self.BaseGlyphRecord = sorted(
- self.BaseGlyphRecord,
- key=lambda rec: font.getGlyphID(rec.BaseGlyph)
- )
- return self.__dict__.copy()
+class BaseGlyphRecordArray(BaseTable):
+ def preWrite(self, font):
+ self.BaseGlyphRecord = sorted(
+ self.BaseGlyphRecord, key=lambda rec: font.getGlyphID(rec.BaseGlyph)
+ )
+ return self.__dict__.copy()
class BaseGlyphList(BaseTable):
-
- def preWrite(self, font):
- self.BaseGlyphPaintRecord = sorted(
- self.BaseGlyphPaintRecord,
- key=lambda rec: font.getGlyphID(rec.BaseGlyph)
- )
- return self.__dict__.copy()
+ def preWrite(self, font):
+ self.BaseGlyphPaintRecord = sorted(
+ self.BaseGlyphPaintRecord, key=lambda rec: font.getGlyphID(rec.BaseGlyph)
+ )
+ return self.__dict__.copy()
class ClipBoxFormat(IntEnum):
- Static = 1
- Variable = 2
+ Static = 1
+ Variable = 2
- def is_variable(self):
- return self is self.Variable
+ def is_variable(self):
+ return self is self.Variable
- def as_variable(self):
- return self.Variable
+ def as_variable(self):
+ return self.Variable
class ClipBox(getFormatSwitchingBaseTableClass("uint8")):
- formatEnum = ClipBoxFormat
+ formatEnum = ClipBoxFormat
- def as_tuple(self):
- return tuple(getattr(self, conv.name) for conv in self.getConverters())
+ def as_tuple(self):
+ return tuple(getattr(self, conv.name) for conv in self.getConverters())
- def __repr__(self):
- return f"{self.__class__.__name__}{self.as_tuple()}"
+ def __repr__(self):
+ return f"{self.__class__.__name__}{self.as_tuple()}"
class ClipList(getFormatSwitchingBaseTableClass("uint8")):
+ def populateDefaults(self, propagator=None):
+ if not hasattr(self, "clips"):
+ self.clips = {}
+
+ def postRead(self, rawTable, font):
+ clips = {}
+ glyphOrder = font.getGlyphOrder()
+ for i, rec in enumerate(rawTable["ClipRecord"]):
+ if rec.StartGlyphID > rec.EndGlyphID:
+ log.warning(
+ "invalid ClipRecord[%i].StartGlyphID (%i) > "
+ "EndGlyphID (%i); skipped",
+ i,
+ rec.StartGlyphID,
+ rec.EndGlyphID,
+ )
+ continue
+ redefinedGlyphs = []
+ missingGlyphs = []
+ for glyphID in range(rec.StartGlyphID, rec.EndGlyphID + 1):
+ try:
+ glyph = glyphOrder[glyphID]
+ except IndexError:
+ missingGlyphs.append(glyphID)
+ continue
+ if glyph not in clips:
+ clips[glyph] = copy.copy(rec.ClipBox)
+ else:
+ redefinedGlyphs.append(glyphID)
+ if redefinedGlyphs:
+ log.warning(
+ "ClipRecord[%i] overlaps previous records; "
+ "ignoring redefined clip boxes for the "
+ "following glyph ID range: [%i-%i]",
+ i,
+ min(redefinedGlyphs),
+ max(redefinedGlyphs),
+ )
+ if missingGlyphs:
+ log.warning(
+ "ClipRecord[%i] range references missing " "glyph IDs: [%i-%i]",
+ i,
+ min(missingGlyphs),
+ max(missingGlyphs),
+ )
+ self.clips = clips
+
+ def groups(self):
+ glyphsByClip = defaultdict(list)
+ uniqueClips = {}
+ for glyphName, clipBox in self.clips.items():
+ key = clipBox.as_tuple()
+ glyphsByClip[key].append(glyphName)
+ if key not in uniqueClips:
+ uniqueClips[key] = clipBox
+ return {
+ frozenset(glyphs): uniqueClips[key] for key, glyphs in glyphsByClip.items()
+ }
- def populateDefaults(self, propagator=None):
- if not hasattr(self, "clips"):
- self.clips = {}
-
- def postRead(self, rawTable, font):
- clips = {}
- glyphOrder = font.getGlyphOrder()
- for i, rec in enumerate(rawTable["ClipRecord"]):
- if rec.StartGlyphID > rec.EndGlyphID:
- log.warning(
- "invalid ClipRecord[%i].StartGlyphID (%i) > "
- "EndGlyphID (%i); skipped",
- i,
- rec.StartGlyphID,
- rec.EndGlyphID,
- )
- continue
- redefinedGlyphs = []
- missingGlyphs = []
- for glyphID in range(rec.StartGlyphID, rec.EndGlyphID + 1):
- try:
- glyph = glyphOrder[glyphID]
- except IndexError:
- missingGlyphs.append(glyphID)
- continue
- if glyph not in clips:
- clips[glyph] = copy.copy(rec.ClipBox)
- else:
- redefinedGlyphs.append(glyphID)
- if redefinedGlyphs:
- log.warning(
- "ClipRecord[%i] overlaps previous records; "
- "ignoring redefined clip boxes for the "
- "following glyph ID range: [%i-%i]",
- i,
- min(redefinedGlyphs),
- max(redefinedGlyphs),
- )
- if missingGlyphs:
- log.warning(
- "ClipRecord[%i] range references missing "
- "glyph IDs: [%i-%i]",
- i,
- min(missingGlyphs),
- max(missingGlyphs),
- )
- self.clips = clips
-
- def groups(self):
- glyphsByClip = defaultdict(list)
- uniqueClips = {}
- for glyphName, clipBox in self.clips.items():
- key = clipBox.as_tuple()
- glyphsByClip[key].append(glyphName)
- if key not in uniqueClips:
- uniqueClips[key] = clipBox
- return {
- frozenset(glyphs): uniqueClips[key]
- for key, glyphs in glyphsByClip.items()
- }
-
- def preWrite(self, font):
- if not hasattr(self, "clips"):
- self.clips = {}
- clipBoxRanges = {}
- glyphMap = font.getReverseGlyphMap()
- for glyphs, clipBox in self.groups().items():
- glyphIDs = sorted(
- glyphMap[glyphName] for glyphName in glyphs
- if glyphName in glyphMap
- )
- if not glyphIDs:
- continue
- last = glyphIDs[0]
- ranges = [[last]]
- for glyphID in glyphIDs[1:]:
- if glyphID != last + 1:
- ranges[-1].append(last)
- ranges.append([glyphID])
- last = glyphID
- ranges[-1].append(last)
- for start, end in ranges:
- assert (start, end) not in clipBoxRanges
- clipBoxRanges[(start, end)] = clipBox
-
- clipRecords = []
- for (start, end), clipBox in sorted(clipBoxRanges.items()):
- record = ClipRecord()
- record.StartGlyphID = start
- record.EndGlyphID = end
- record.ClipBox = clipBox
- clipRecords.append(record)
- rawTable = {
- "ClipCount": len(clipRecords),
- "ClipRecord": clipRecords,
- }
- return rawTable
-
- def toXML(self, xmlWriter, font, attrs=None, name=None):
- tableName = name if name else self.__class__.__name__
- if attrs is None:
- attrs = []
- if hasattr(self, "Format"):
- attrs.append(("Format", self.Format))
- xmlWriter.begintag(tableName, attrs)
- xmlWriter.newline()
- # sort clips alphabetically to ensure deterministic XML dump
- for glyphs, clipBox in sorted(
- self.groups().items(), key=lambda item: min(item[0])
- ):
- xmlWriter.begintag("Clip")
- xmlWriter.newline()
- for glyphName in sorted(glyphs):
- xmlWriter.simpletag("Glyph", value=glyphName)
- xmlWriter.newline()
- xmlWriter.begintag("ClipBox", [("Format", clipBox.Format)])
- xmlWriter.newline()
- clipBox.toXML2(xmlWriter, font)
- xmlWriter.endtag("ClipBox")
- xmlWriter.newline()
- xmlWriter.endtag("Clip")
- xmlWriter.newline()
- xmlWriter.endtag(tableName)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, font):
- clips = getattr(self, "clips", None)
- if clips is None:
- self.clips = clips = {}
- assert name == "Clip"
- glyphs = []
- clipBox = None
- for elem in content:
- if not isinstance(elem, tuple):
- continue
- name, attrs, content = elem
- if name == "Glyph":
- glyphs.append(attrs["value"])
- elif name == "ClipBox":
- clipBox = ClipBox()
- clipBox.Format = safeEval(attrs["Format"])
- for elem in content:
- if not isinstance(elem, tuple):
- continue
- name, attrs, content = elem
- clipBox.fromXML(name, attrs, content, font)
- if clipBox:
- for glyphName in glyphs:
- clips[glyphName] = clipBox
+ def preWrite(self, font):
+ if not hasattr(self, "clips"):
+ self.clips = {}
+ clipBoxRanges = {}
+ glyphMap = font.getReverseGlyphMap()
+ for glyphs, clipBox in self.groups().items():
+ glyphIDs = sorted(
+ glyphMap[glyphName] for glyphName in glyphs if glyphName in glyphMap
+ )
+ if not glyphIDs:
+ continue
+ last = glyphIDs[0]
+ ranges = [[last]]
+ for glyphID in glyphIDs[1:]:
+ if glyphID != last + 1:
+ ranges[-1].append(last)
+ ranges.append([glyphID])
+ last = glyphID
+ ranges[-1].append(last)
+ for start, end in ranges:
+ assert (start, end) not in clipBoxRanges
+ clipBoxRanges[(start, end)] = clipBox
+
+ clipRecords = []
+ for (start, end), clipBox in sorted(clipBoxRanges.items()):
+ record = ClipRecord()
+ record.StartGlyphID = start
+ record.EndGlyphID = end
+ record.ClipBox = clipBox
+ clipRecords.append(record)
+ rawTable = {
+ "ClipCount": len(clipRecords),
+ "ClipRecord": clipRecords,
+ }
+ return rawTable
+
+ def toXML(self, xmlWriter, font, attrs=None, name=None):
+ tableName = name if name else self.__class__.__name__
+ if attrs is None:
+ attrs = []
+ if hasattr(self, "Format"):
+ attrs.append(("Format", self.Format))
+ xmlWriter.begintag(tableName, attrs)
+ xmlWriter.newline()
+ # sort clips alphabetically to ensure deterministic XML dump
+ for glyphs, clipBox in sorted(
+ self.groups().items(), key=lambda item: min(item[0])
+ ):
+ xmlWriter.begintag("Clip")
+ xmlWriter.newline()
+ for glyphName in sorted(glyphs):
+ xmlWriter.simpletag("Glyph", value=glyphName)
+ xmlWriter.newline()
+ xmlWriter.begintag("ClipBox", [("Format", clipBox.Format)])
+ xmlWriter.newline()
+ clipBox.toXML2(xmlWriter, font)
+ xmlWriter.endtag("ClipBox")
+ xmlWriter.newline()
+ xmlWriter.endtag("Clip")
+ xmlWriter.newline()
+ xmlWriter.endtag(tableName)
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, font):
+ clips = getattr(self, "clips", None)
+ if clips is None:
+ self.clips = clips = {}
+ assert name == "Clip"
+ glyphs = []
+ clipBox = None
+ for elem in content:
+ if not isinstance(elem, tuple):
+ continue
+ name, attrs, content = elem
+ if name == "Glyph":
+ glyphs.append(attrs["value"])
+ elif name == "ClipBox":
+ clipBox = ClipBox()
+ clipBox.Format = safeEval(attrs["Format"])
+ for elem in content:
+ if not isinstance(elem, tuple):
+ continue
+ name, attrs, content = elem
+ clipBox.fromXML(name, attrs, content, font)
+ if clipBox:
+ for glyphName in glyphs:
+ clips[glyphName] = clipBox
class ExtendMode(IntEnum):
- PAD = 0
- REPEAT = 1
- REFLECT = 2
+ PAD = 0
+ REPEAT = 1
+ REFLECT = 2
# Porter-Duff modes for COLRv1 PaintComposite:
# https://github.com/googlefonts/colr-gradients-spec/tree/off_sub_1#compositemode-enumeration
class CompositeMode(IntEnum):
- CLEAR = 0
- SRC = 1
- DEST = 2
- SRC_OVER = 3
- DEST_OVER = 4
- SRC_IN = 5
- DEST_IN = 6
- SRC_OUT = 7
- DEST_OUT = 8
- SRC_ATOP = 9
- DEST_ATOP = 10
- XOR = 11
- PLUS = 12
- SCREEN = 13
- OVERLAY = 14
- DARKEN = 15
- LIGHTEN = 16
- COLOR_DODGE = 17
- COLOR_BURN = 18
- HARD_LIGHT = 19
- SOFT_LIGHT = 20
- DIFFERENCE = 21
- EXCLUSION = 22
- MULTIPLY = 23
- HSL_HUE = 24
- HSL_SATURATION = 25
- HSL_COLOR = 26
- HSL_LUMINOSITY = 27
+ CLEAR = 0
+ SRC = 1
+ DEST = 2
+ SRC_OVER = 3
+ DEST_OVER = 4
+ SRC_IN = 5
+ DEST_IN = 6
+ SRC_OUT = 7
+ DEST_OUT = 8
+ SRC_ATOP = 9
+ DEST_ATOP = 10
+ XOR = 11
+ PLUS = 12
+ SCREEN = 13
+ OVERLAY = 14
+ DARKEN = 15
+ LIGHTEN = 16
+ COLOR_DODGE = 17
+ COLOR_BURN = 18
+ HARD_LIGHT = 19
+ SOFT_LIGHT = 20
+ DIFFERENCE = 21
+ EXCLUSION = 22
+ MULTIPLY = 23
+ HSL_HUE = 24
+ HSL_SATURATION = 25
+ HSL_COLOR = 26
+ HSL_LUMINOSITY = 27
class PaintFormat(IntEnum):
- PaintColrLayers = 1
- PaintSolid = 2
- PaintVarSolid = 3,
- PaintLinearGradient = 4
- PaintVarLinearGradient = 5
- PaintRadialGradient = 6
- PaintVarRadialGradient = 7
- PaintSweepGradient = 8
- PaintVarSweepGradient = 9
- PaintGlyph = 10
- PaintColrGlyph = 11
- PaintTransform = 12
- PaintVarTransform = 13
- PaintTranslate = 14
- PaintVarTranslate = 15
- PaintScale = 16
- PaintVarScale = 17
- PaintScaleAroundCenter = 18
- PaintVarScaleAroundCenter = 19
- PaintScaleUniform = 20
- PaintVarScaleUniform = 21
- PaintScaleUniformAroundCenter = 22
- PaintVarScaleUniformAroundCenter = 23
- PaintRotate = 24
- PaintVarRotate = 25
- PaintRotateAroundCenter = 26
- PaintVarRotateAroundCenter = 27
- PaintSkew = 28
- PaintVarSkew = 29
- PaintSkewAroundCenter = 30
- PaintVarSkewAroundCenter = 31
- PaintComposite = 32
-
- def is_variable(self):
- return self.name.startswith("PaintVar")
-
- def as_variable(self):
- if self.is_variable():
- return self
- try:
- return PaintFormat.__members__[f"PaintVar{self.name[5:]}"]
- except KeyError:
- return None
+ PaintColrLayers = 1
+ PaintSolid = 2
+ PaintVarSolid = 3
+ PaintLinearGradient = 4
+ PaintVarLinearGradient = 5
+ PaintRadialGradient = 6
+ PaintVarRadialGradient = 7
+ PaintSweepGradient = 8
+ PaintVarSweepGradient = 9
+ PaintGlyph = 10
+ PaintColrGlyph = 11
+ PaintTransform = 12
+ PaintVarTransform = 13
+ PaintTranslate = 14
+ PaintVarTranslate = 15
+ PaintScale = 16
+ PaintVarScale = 17
+ PaintScaleAroundCenter = 18
+ PaintVarScaleAroundCenter = 19
+ PaintScaleUniform = 20
+ PaintVarScaleUniform = 21
+ PaintScaleUniformAroundCenter = 22
+ PaintVarScaleUniformAroundCenter = 23
+ PaintRotate = 24
+ PaintVarRotate = 25
+ PaintRotateAroundCenter = 26
+ PaintVarRotateAroundCenter = 27
+ PaintSkew = 28
+ PaintVarSkew = 29
+ PaintSkewAroundCenter = 30
+ PaintVarSkewAroundCenter = 31
+ PaintComposite = 32
+
+ def is_variable(self):
+ return self.name.startswith("PaintVar")
+
+ def as_variable(self):
+ if self.is_variable():
+ return self
+ try:
+ return PaintFormat.__members__[f"PaintVar{self.name[5:]}"]
+ except KeyError:
+ return None
class Paint(getFormatSwitchingBaseTableClass("uint8")):
- formatEnum = PaintFormat
-
- def getFormatName(self):
- try:
- return self.formatEnum(self.Format).name
- except ValueError:
- raise NotImplementedError(f"Unknown Paint format: {self.Format}")
-
- def toXML(self, xmlWriter, font, attrs=None, name=None):
- tableName = name if name else self.__class__.__name__
- if attrs is None:
- attrs = []
- attrs.append(("Format", self.Format))
- xmlWriter.begintag(tableName, attrs)
- xmlWriter.comment(self.getFormatName())
- xmlWriter.newline()
- self.toXML2(xmlWriter, font)
- xmlWriter.endtag(tableName)
- xmlWriter.newline()
-
- def getChildren(self, colr):
- if self.Format == PaintFormat.PaintColrLayers:
- # https://github.com/fonttools/fonttools/issues/2438: don't die when no LayerList exists
- layers = []
- if colr.LayerList is not None:
- layers = colr.LayerList.Paint
- return layers[
- self.FirstLayerIndex : self.FirstLayerIndex + self.NumLayers
- ]
-
- if self.Format == PaintFormat.PaintColrGlyph:
- for record in colr.BaseGlyphList.BaseGlyphPaintRecord:
- if record.BaseGlyph == self.Glyph:
- return [record.Paint]
- else:
- raise KeyError(f"{self.Glyph!r} not in colr.BaseGlyphList")
-
- children = []
- for conv in self.getConverters():
- if conv.tableClass is not None and issubclass(conv.tableClass, type(self)):
- children.append(getattr(self, conv.name))
-
- return children
-
- def traverse(self, colr: COLR, callback):
- """Depth-first traversal of graph rooted at self, callback on each node."""
- if not callable(callback):
- raise TypeError("callback must be callable")
- stack = [self]
- visited = set()
- while stack:
- current = stack.pop()
- if id(current) in visited:
- continue
- callback(current)
- visited.add(id(current))
- stack.extend(reversed(current.getChildren(colr)))
+ formatEnum = PaintFormat
+
+ def getFormatName(self):
+ try:
+ return self.formatEnum(self.Format).name
+ except ValueError:
+ raise NotImplementedError(f"Unknown Paint format: {self.Format}")
+
+ def toXML(self, xmlWriter, font, attrs=None, name=None):
+ tableName = name if name else self.__class__.__name__
+ if attrs is None:
+ attrs = []
+ attrs.append(("Format", self.Format))
+ xmlWriter.begintag(tableName, attrs)
+ xmlWriter.comment(self.getFormatName())
+ xmlWriter.newline()
+ self.toXML2(xmlWriter, font)
+ xmlWriter.endtag(tableName)
+ xmlWriter.newline()
+
+ def iterPaintSubTables(self, colr: COLR) -> Iterator[BaseTable.SubTableEntry]:
+ if self.Format == PaintFormat.PaintColrLayers:
+ # https://github.com/fonttools/fonttools/issues/2438: don't die when no LayerList exists
+ layers = []
+ if colr.LayerList is not None:
+ layers = colr.LayerList.Paint
+ yield from (
+ BaseTable.SubTableEntry(name="Layers", value=v, index=i)
+ for i, v in enumerate(
+ layers[self.FirstLayerIndex : self.FirstLayerIndex + self.NumLayers]
+ )
+ )
+ return
+
+ if self.Format == PaintFormat.PaintColrGlyph:
+ for record in colr.BaseGlyphList.BaseGlyphPaintRecord:
+ if record.BaseGlyph == self.Glyph:
+ yield BaseTable.SubTableEntry(name="BaseGlyph", value=record.Paint)
+ return
+ else:
+ raise KeyError(f"{self.Glyph!r} not in colr.BaseGlyphList")
+
+ for conv in self.getConverters():
+ if conv.tableClass is not None and issubclass(conv.tableClass, type(self)):
+ value = getattr(self, conv.name)
+ yield BaseTable.SubTableEntry(name=conv.name, value=value)
+
+ def getChildren(self, colr) -> List["Paint"]:
+ # this is kept for backward compatibility (e.g. it's used by the subsetter)
+ return [p.value for p in self.iterPaintSubTables(colr)]
+
+ def traverse(self, colr: COLR, callback):
+ """Depth-first traversal of graph rooted at self, callback on each node."""
+ if not callable(callback):
+ raise TypeError("callback must be callable")
+
+ for path in dfs_base_table(
+ self, iter_subtables_fn=lambda paint: paint.iterPaintSubTables(colr)
+ ):
+ paint = path[-1].value
+ callback(paint)
+
+ def getTransform(self) -> Transform:
+ if self.Format == PaintFormat.PaintTransform:
+ t = self.Transform
+ return Transform(t.xx, t.yx, t.xy, t.yy, t.dx, t.dy)
+ elif self.Format == PaintFormat.PaintTranslate:
+ return Identity.translate(self.dx, self.dy)
+ elif self.Format == PaintFormat.PaintScale:
+ return Identity.scale(self.scaleX, self.scaleY)
+ elif self.Format == PaintFormat.PaintScaleAroundCenter:
+ return (
+ Identity.translate(self.centerX, self.centerY)
+ .scale(self.scaleX, self.scaleY)
+ .translate(-self.centerX, -self.centerY)
+ )
+ elif self.Format == PaintFormat.PaintScaleUniform:
+ return Identity.scale(self.scale)
+ elif self.Format == PaintFormat.PaintScaleUniformAroundCenter:
+ return (
+ Identity.translate(self.centerX, self.centerY)
+ .scale(self.scale)
+ .translate(-self.centerX, -self.centerY)
+ )
+ elif self.Format == PaintFormat.PaintRotate:
+ return Identity.rotate(radians(self.angle))
+ elif self.Format == PaintFormat.PaintRotateAroundCenter:
+ return (
+ Identity.translate(self.centerX, self.centerY)
+ .rotate(radians(self.angle))
+ .translate(-self.centerX, -self.centerY)
+ )
+ elif self.Format == PaintFormat.PaintSkew:
+ return Identity.skew(radians(-self.xSkewAngle), radians(self.ySkewAngle))
+ elif self.Format == PaintFormat.PaintSkewAroundCenter:
+ return (
+ Identity.translate(self.centerX, self.centerY)
+ .skew(radians(-self.xSkewAngle), radians(self.ySkewAngle))
+ .translate(-self.centerX, -self.centerY)
+ )
+ if PaintFormat(self.Format).is_variable():
+ raise NotImplementedError(f"Variable Paints not supported: {self.Format}")
+
+ return Identity
+
+ def computeClipBox(
+ self, colr: COLR, glyphSet: "_TTGlyphSet", quantization: int = 1
+ ) -> Optional[ClipBox]:
+ pen = ControlBoundsPen(glyphSet)
+ for path in dfs_base_table(
+ self, iter_subtables_fn=lambda paint: paint.iterPaintSubTables(colr)
+ ):
+ paint = path[-1].value
+ if paint.Format == PaintFormat.PaintGlyph:
+ transformation = reduce(
+ Transform.transform,
+ (st.value.getTransform() for st in path),
+ Identity,
+ )
+ glyphSet[paint.Glyph].draw(TransformPen(pen, transformation))
+
+ if pen.bounds is None:
+ return None
+
+ cb = ClipBox()
+ cb.Format = int(ClipBoxFormat.Static)
+ cb.xMin, cb.yMin, cb.xMax, cb.yMax = quantizeRect(pen.bounds, quantization)
+ return cb
# For each subtable format there is a class. However, we don't really distinguish
@@ -1595,30 +1712,82 @@ class Paint(getFormatSwitchingBaseTableClass("uint8")):
# subclass for each alternate field name.
#
_equivalents = {
- 'MarkArray': ("Mark1Array",),
- 'LangSys': ('DefaultLangSys',),
- 'Coverage': ('MarkCoverage', 'BaseCoverage', 'LigatureCoverage', 'Mark1Coverage',
- 'Mark2Coverage', 'BacktrackCoverage', 'InputCoverage',
- 'LookAheadCoverage', 'VertGlyphCoverage', 'HorizGlyphCoverage',
- 'TopAccentCoverage', 'ExtendedShapeCoverage', 'MathKernCoverage'),
- 'ClassDef': ('ClassDef1', 'ClassDef2', 'BacktrackClassDef', 'InputClassDef',
- 'LookAheadClassDef', 'GlyphClassDef', 'MarkAttachClassDef'),
- 'Anchor': ('EntryAnchor', 'ExitAnchor', 'BaseAnchor', 'LigatureAnchor',
- 'Mark2Anchor', 'MarkAnchor'),
- 'Device': ('XPlaDevice', 'YPlaDevice', 'XAdvDevice', 'YAdvDevice',
- 'XDeviceTable', 'YDeviceTable', 'DeviceTable'),
- 'Axis': ('HorizAxis', 'VertAxis',),
- 'MinMax': ('DefaultMinMax',),
- 'BaseCoord': ('MinCoord', 'MaxCoord',),
- 'JstfLangSys': ('DefJstfLangSys',),
- 'JstfGSUBModList': ('ShrinkageEnableGSUB', 'ShrinkageDisableGSUB', 'ExtensionEnableGSUB',
- 'ExtensionDisableGSUB',),
- 'JstfGPOSModList': ('ShrinkageEnableGPOS', 'ShrinkageDisableGPOS', 'ExtensionEnableGPOS',
- 'ExtensionDisableGPOS',),
- 'JstfMax': ('ShrinkageJstfMax', 'ExtensionJstfMax',),
- 'MathKern': ('TopRightMathKern', 'TopLeftMathKern', 'BottomRightMathKern',
- 'BottomLeftMathKern'),
- 'MathGlyphConstruction': ('VertGlyphConstruction', 'HorizGlyphConstruction'),
+ "MarkArray": ("Mark1Array",),
+ "LangSys": ("DefaultLangSys",),
+ "Coverage": (
+ "MarkCoverage",
+ "BaseCoverage",
+ "LigatureCoverage",
+ "Mark1Coverage",
+ "Mark2Coverage",
+ "BacktrackCoverage",
+ "InputCoverage",
+ "LookAheadCoverage",
+ "VertGlyphCoverage",
+ "HorizGlyphCoverage",
+ "TopAccentCoverage",
+ "ExtendedShapeCoverage",
+ "MathKernCoverage",
+ ),
+ "ClassDef": (
+ "ClassDef1",
+ "ClassDef2",
+ "BacktrackClassDef",
+ "InputClassDef",
+ "LookAheadClassDef",
+ "GlyphClassDef",
+ "MarkAttachClassDef",
+ ),
+ "Anchor": (
+ "EntryAnchor",
+ "ExitAnchor",
+ "BaseAnchor",
+ "LigatureAnchor",
+ "Mark2Anchor",
+ "MarkAnchor",
+ ),
+ "Device": (
+ "XPlaDevice",
+ "YPlaDevice",
+ "XAdvDevice",
+ "YAdvDevice",
+ "XDeviceTable",
+ "YDeviceTable",
+ "DeviceTable",
+ ),
+ "Axis": (
+ "HorizAxis",
+ "VertAxis",
+ ),
+ "MinMax": ("DefaultMinMax",),
+ "BaseCoord": (
+ "MinCoord",
+ "MaxCoord",
+ ),
+ "JstfLangSys": ("DefJstfLangSys",),
+ "JstfGSUBModList": (
+ "ShrinkageEnableGSUB",
+ "ShrinkageDisableGSUB",
+ "ExtensionEnableGSUB",
+ "ExtensionDisableGSUB",
+ ),
+ "JstfGPOSModList": (
+ "ShrinkageEnableGPOS",
+ "ShrinkageDisableGPOS",
+ "ExtensionEnableGPOS",
+ "ExtensionDisableGPOS",
+ ),
+ "JstfMax": (
+ "ShrinkageJstfMax",
+ "ExtensionJstfMax",
+ ),
+ "MathKern": (
+ "TopRightMathKern",
+ "TopLeftMathKern",
+ "BottomRightMathKern",
+ "BottomLeftMathKern",
+ ),
+ "MathGlyphConstruction": ("VertGlyphConstruction", "HorizGlyphConstruction"),
}
#
@@ -1626,468 +1795,479 @@ _equivalents = {
# XXX This should probably move to otBase.py
#
+
def fixLookupOverFlows(ttf, overflowRecord):
- """ Either the offset from the LookupList to a lookup overflowed, or
- an offset from a lookup to a subtable overflowed.
- The table layout is:
- GPSO/GUSB
- Script List
- Feature List
- LookUpList
- Lookup[0] and contents
- SubTable offset list
- SubTable[0] and contents
- ...
- SubTable[n] and contents
- ...
- Lookup[n] and contents
- SubTable offset list
- SubTable[0] and contents
- ...
- SubTable[n] and contents
- If the offset to a lookup overflowed (SubTableIndex is None)
- we must promote the *previous* lookup to an Extension type.
- If the offset from a lookup to subtable overflowed, then we must promote it
- to an Extension Lookup type.
- """
- ok = 0
- lookupIndex = overflowRecord.LookupListIndex
- if (overflowRecord.SubTableIndex is None):
- lookupIndex = lookupIndex - 1
- if lookupIndex < 0:
- return ok
- if overflowRecord.tableType == 'GSUB':
- extType = 7
- elif overflowRecord.tableType == 'GPOS':
- extType = 9
-
- lookups = ttf[overflowRecord.tableType].table.LookupList.Lookup
- lookup = lookups[lookupIndex]
- # If the previous lookup is an extType, look further back. Very unlikely, but possible.
- while lookup.SubTable[0].__class__.LookupType == extType:
- lookupIndex = lookupIndex -1
- if lookupIndex < 0:
- return ok
- lookup = lookups[lookupIndex]
-
- for lookupIndex in range(lookupIndex, len(lookups)):
- lookup = lookups[lookupIndex]
- if lookup.LookupType != extType:
- lookup.LookupType = extType
- for si in range(len(lookup.SubTable)):
- subTable = lookup.SubTable[si]
- extSubTableClass = lookupTypes[overflowRecord.tableType][extType]
- extSubTable = extSubTableClass()
- extSubTable.Format = 1
- extSubTable.ExtSubTable = subTable
- lookup.SubTable[si] = extSubTable
- ok = 1
- return ok
+ """Either the offset from the LookupList to a lookup overflowed, or
+ an offset from a lookup to a subtable overflowed.
+ The table layout is:
+ GPSO/GUSB
+ Script List
+ Feature List
+ LookUpList
+ Lookup[0] and contents
+ SubTable offset list
+ SubTable[0] and contents
+ ...
+ SubTable[n] and contents
+ ...
+ Lookup[n] and contents
+ SubTable offset list
+ SubTable[0] and contents
+ ...
+ SubTable[n] and contents
+ If the offset to a lookup overflowed (SubTableIndex is None)
+ we must promote the *previous* lookup to an Extension type.
+ If the offset from a lookup to subtable overflowed, then we must promote it
+ to an Extension Lookup type.
+ """
+ ok = 0
+ lookupIndex = overflowRecord.LookupListIndex
+ if overflowRecord.SubTableIndex is None:
+ lookupIndex = lookupIndex - 1
+ if lookupIndex < 0:
+ return ok
+ if overflowRecord.tableType == "GSUB":
+ extType = 7
+ elif overflowRecord.tableType == "GPOS":
+ extType = 9
+
+ lookups = ttf[overflowRecord.tableType].table.LookupList.Lookup
+ lookup = lookups[lookupIndex]
+ # If the previous lookup is an extType, look further back. Very unlikely, but possible.
+ while lookup.SubTable[0].__class__.LookupType == extType:
+ lookupIndex = lookupIndex - 1
+ if lookupIndex < 0:
+ return ok
+ lookup = lookups[lookupIndex]
+
+ for lookupIndex in range(lookupIndex, len(lookups)):
+ lookup = lookups[lookupIndex]
+ if lookup.LookupType != extType:
+ lookup.LookupType = extType
+ for si in range(len(lookup.SubTable)):
+ subTable = lookup.SubTable[si]
+ extSubTableClass = lookupTypes[overflowRecord.tableType][extType]
+ extSubTable = extSubTableClass()
+ extSubTable.Format = 1
+ extSubTable.ExtSubTable = subTable
+ lookup.SubTable[si] = extSubTable
+ ok = 1
+ return ok
+
def splitMultipleSubst(oldSubTable, newSubTable, overflowRecord):
- ok = 1
- oldMapping = sorted(oldSubTable.mapping.items())
- oldLen = len(oldMapping)
-
- if overflowRecord.itemName in ['Coverage', 'RangeRecord']:
- # Coverage table is written last. Overflow is to or within the
- # the coverage table. We will just cut the subtable in half.
- newLen = oldLen // 2
-
- elif overflowRecord.itemName == 'Sequence':
- # We just need to back up by two items from the overflowed
- # Sequence index to make sure the offset to the Coverage table
- # doesn't overflow.
- newLen = overflowRecord.itemIndex - 1
-
- newSubTable.mapping = {}
- for i in range(newLen, oldLen):
- item = oldMapping[i]
- key = item[0]
- newSubTable.mapping[key] = item[1]
- del oldSubTable.mapping[key]
-
- return ok
+ ok = 1
+ oldMapping = sorted(oldSubTable.mapping.items())
+ oldLen = len(oldMapping)
+
+ if overflowRecord.itemName in ["Coverage", "RangeRecord"]:
+ # Coverage table is written last. Overflow is to or within the
+ # the coverage table. We will just cut the subtable in half.
+ newLen = oldLen // 2
+
+ elif overflowRecord.itemName == "Sequence":
+ # We just need to back up by two items from the overflowed
+ # Sequence index to make sure the offset to the Coverage table
+ # doesn't overflow.
+ newLen = overflowRecord.itemIndex - 1
+
+ newSubTable.mapping = {}
+ for i in range(newLen, oldLen):
+ item = oldMapping[i]
+ key = item[0]
+ newSubTable.mapping[key] = item[1]
+ del oldSubTable.mapping[key]
+
+ return ok
+
def splitAlternateSubst(oldSubTable, newSubTable, overflowRecord):
- ok = 1
- if hasattr(oldSubTable, 'sortCoverageLast'):
- newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast
+ ok = 1
+ if hasattr(oldSubTable, "sortCoverageLast"):
+ newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast
- oldAlts = sorted(oldSubTable.alternates.items())
- oldLen = len(oldAlts)
+ oldAlts = sorted(oldSubTable.alternates.items())
+ oldLen = len(oldAlts)
- if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']:
- # Coverage table is written last. overflow is to or within the
- # the coverage table. We will just cut the subtable in half.
- newLen = oldLen//2
+ if overflowRecord.itemName in ["Coverage", "RangeRecord"]:
+ # Coverage table is written last. overflow is to or within the
+ # the coverage table. We will just cut the subtable in half.
+ newLen = oldLen // 2
- elif overflowRecord.itemName == 'AlternateSet':
- # We just need to back up by two items
- # from the overflowed AlternateSet index to make sure the offset
- # to the Coverage table doesn't overflow.
- newLen = overflowRecord.itemIndex - 1
+ elif overflowRecord.itemName == "AlternateSet":
+ # We just need to back up by two items
+ # from the overflowed AlternateSet index to make sure the offset
+ # to the Coverage table doesn't overflow.
+ newLen = overflowRecord.itemIndex - 1
- newSubTable.alternates = {}
- for i in range(newLen, oldLen):
- item = oldAlts[i]
- key = item[0]
- newSubTable.alternates[key] = item[1]
- del oldSubTable.alternates[key]
+ newSubTable.alternates = {}
+ for i in range(newLen, oldLen):
+ item = oldAlts[i]
+ key = item[0]
+ newSubTable.alternates[key] = item[1]
+ del oldSubTable.alternates[key]
- return ok
+ return ok
def splitLigatureSubst(oldSubTable, newSubTable, overflowRecord):
- ok = 1
- oldLigs = sorted(oldSubTable.ligatures.items())
- oldLen = len(oldLigs)
+ ok = 1
+ oldLigs = sorted(oldSubTable.ligatures.items())
+ oldLen = len(oldLigs)
- if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']:
- # Coverage table is written last. overflow is to or within the
- # the coverage table. We will just cut the subtable in half.
- newLen = oldLen//2
+ if overflowRecord.itemName in ["Coverage", "RangeRecord"]:
+ # Coverage table is written last. overflow is to or within the
+ # the coverage table. We will just cut the subtable in half.
+ newLen = oldLen // 2
- elif overflowRecord.itemName == 'LigatureSet':
- # We just need to back up by two items
- # from the overflowed AlternateSet index to make sure the offset
- # to the Coverage table doesn't overflow.
- newLen = overflowRecord.itemIndex - 1
+ elif overflowRecord.itemName == "LigatureSet":
+ # We just need to back up by two items
+ # from the overflowed AlternateSet index to make sure the offset
+ # to the Coverage table doesn't overflow.
+ newLen = overflowRecord.itemIndex - 1
- newSubTable.ligatures = {}
- for i in range(newLen, oldLen):
- item = oldLigs[i]
- key = item[0]
- newSubTable.ligatures[key] = item[1]
- del oldSubTable.ligatures[key]
+ newSubTable.ligatures = {}
+ for i in range(newLen, oldLen):
+ item = oldLigs[i]
+ key = item[0]
+ newSubTable.ligatures[key] = item[1]
+ del oldSubTable.ligatures[key]
- return ok
+ return ok
def splitPairPos(oldSubTable, newSubTable, overflowRecord):
- st = oldSubTable
- ok = False
- newSubTable.Format = oldSubTable.Format
- if oldSubTable.Format == 1 and len(oldSubTable.PairSet) > 1:
- for name in 'ValueFormat1', 'ValueFormat2':
- setattr(newSubTable, name, getattr(oldSubTable, name))
+ st = oldSubTable
+ ok = False
+ newSubTable.Format = oldSubTable.Format
+ if oldSubTable.Format == 1 and len(oldSubTable.PairSet) > 1:
+ for name in "ValueFormat1", "ValueFormat2":
+ setattr(newSubTable, name, getattr(oldSubTable, name))
- # Move top half of coverage to new subtable
+ # Move top half of coverage to new subtable
- newSubTable.Coverage = oldSubTable.Coverage.__class__()
+ newSubTable.Coverage = oldSubTable.Coverage.__class__()
- coverage = oldSubTable.Coverage.glyphs
- records = oldSubTable.PairSet
+ coverage = oldSubTable.Coverage.glyphs
+ records = oldSubTable.PairSet
- oldCount = len(oldSubTable.PairSet) // 2
+ oldCount = len(oldSubTable.PairSet) // 2
- oldSubTable.Coverage.glyphs = coverage[:oldCount]
- oldSubTable.PairSet = records[:oldCount]
+ oldSubTable.Coverage.glyphs = coverage[:oldCount]
+ oldSubTable.PairSet = records[:oldCount]
- newSubTable.Coverage.glyphs = coverage[oldCount:]
- newSubTable.PairSet = records[oldCount:]
+ newSubTable.Coverage.glyphs = coverage[oldCount:]
+ newSubTable.PairSet = records[oldCount:]
- oldSubTable.PairSetCount = len(oldSubTable.PairSet)
- newSubTable.PairSetCount = len(newSubTable.PairSet)
+ oldSubTable.PairSetCount = len(oldSubTable.PairSet)
+ newSubTable.PairSetCount = len(newSubTable.PairSet)
- ok = True
+ ok = True
- elif oldSubTable.Format == 2 and len(oldSubTable.Class1Record) > 1:
- if not hasattr(oldSubTable, 'Class2Count'):
- oldSubTable.Class2Count = len(oldSubTable.Class1Record[0].Class2Record)
- for name in 'Class2Count', 'ClassDef2', 'ValueFormat1', 'ValueFormat2':
- setattr(newSubTable, name, getattr(oldSubTable, name))
+ elif oldSubTable.Format == 2 and len(oldSubTable.Class1Record) > 1:
+ if not hasattr(oldSubTable, "Class2Count"):
+ oldSubTable.Class2Count = len(oldSubTable.Class1Record[0].Class2Record)
+ for name in "Class2Count", "ClassDef2", "ValueFormat1", "ValueFormat2":
+ setattr(newSubTable, name, getattr(oldSubTable, name))
- # The two subtables will still have the same ClassDef2 and the table
- # sharing will still cause the sharing to overflow. As such, disable
- # sharing on the one that is serialized second (that's oldSubTable).
- oldSubTable.DontShare = True
+ # The two subtables will still have the same ClassDef2 and the table
+ # sharing will still cause the sharing to overflow. As such, disable
+ # sharing on the one that is serialized second (that's oldSubTable).
+ oldSubTable.DontShare = True
- # Move top half of class numbers to new subtable
+ # Move top half of class numbers to new subtable
- newSubTable.Coverage = oldSubTable.Coverage.__class__()
- newSubTable.ClassDef1 = oldSubTable.ClassDef1.__class__()
+ newSubTable.Coverage = oldSubTable.Coverage.__class__()
+ newSubTable.ClassDef1 = oldSubTable.ClassDef1.__class__()
- coverage = oldSubTable.Coverage.glyphs
- classDefs = oldSubTable.ClassDef1.classDefs
- records = oldSubTable.Class1Record
+ coverage = oldSubTable.Coverage.glyphs
+ classDefs = oldSubTable.ClassDef1.classDefs
+ records = oldSubTable.Class1Record
- oldCount = len(oldSubTable.Class1Record) // 2
- newGlyphs = set(k for k,v in classDefs.items() if v >= oldCount)
+ oldCount = len(oldSubTable.Class1Record) // 2
+ newGlyphs = set(k for k, v in classDefs.items() if v >= oldCount)
- oldSubTable.Coverage.glyphs = [g for g in coverage if g not in newGlyphs]
- oldSubTable.ClassDef1.classDefs = {k:v for k,v in classDefs.items() if v < oldCount}
- oldSubTable.Class1Record = records[:oldCount]
+ oldSubTable.Coverage.glyphs = [g for g in coverage if g not in newGlyphs]
+ oldSubTable.ClassDef1.classDefs = {
+ k: v for k, v in classDefs.items() if v < oldCount
+ }
+ oldSubTable.Class1Record = records[:oldCount]
- newSubTable.Coverage.glyphs = [g for g in coverage if g in newGlyphs]
- newSubTable.ClassDef1.classDefs = {k:(v-oldCount) for k,v in classDefs.items() if v > oldCount}
- newSubTable.Class1Record = records[oldCount:]
+ newSubTable.Coverage.glyphs = [g for g in coverage if g in newGlyphs]
+ newSubTable.ClassDef1.classDefs = {
+ k: (v - oldCount) for k, v in classDefs.items() if v > oldCount
+ }
+ newSubTable.Class1Record = records[oldCount:]
- oldSubTable.Class1Count = len(oldSubTable.Class1Record)
- newSubTable.Class1Count = len(newSubTable.Class1Record)
+ oldSubTable.Class1Count = len(oldSubTable.Class1Record)
+ newSubTable.Class1Count = len(newSubTable.Class1Record)
- ok = True
+ ok = True
- return ok
+ return ok
def splitMarkBasePos(oldSubTable, newSubTable, overflowRecord):
- # split half of the mark classes to the new subtable
- classCount = oldSubTable.ClassCount
- if classCount < 2:
- # oh well, not much left to split...
- return False
-
- oldClassCount = classCount // 2
- newClassCount = classCount - oldClassCount
-
- oldMarkCoverage, oldMarkRecords = [], []
- newMarkCoverage, newMarkRecords = [], []
- for glyphName, markRecord in zip(
- oldSubTable.MarkCoverage.glyphs,
- oldSubTable.MarkArray.MarkRecord
- ):
- if markRecord.Class < oldClassCount:
- oldMarkCoverage.append(glyphName)
- oldMarkRecords.append(markRecord)
- else:
- markRecord.Class -= oldClassCount
- newMarkCoverage.append(glyphName)
- newMarkRecords.append(markRecord)
-
- oldBaseRecords, newBaseRecords = [], []
- for rec in oldSubTable.BaseArray.BaseRecord:
- oldBaseRecord, newBaseRecord = rec.__class__(), rec.__class__()
- oldBaseRecord.BaseAnchor = rec.BaseAnchor[:oldClassCount]
- newBaseRecord.BaseAnchor = rec.BaseAnchor[oldClassCount:]
- oldBaseRecords.append(oldBaseRecord)
- newBaseRecords.append(newBaseRecord)
-
- newSubTable.Format = oldSubTable.Format
-
- oldSubTable.MarkCoverage.glyphs = oldMarkCoverage
- newSubTable.MarkCoverage = oldSubTable.MarkCoverage.__class__()
- newSubTable.MarkCoverage.glyphs = newMarkCoverage
-
- # share the same BaseCoverage in both halves
- newSubTable.BaseCoverage = oldSubTable.BaseCoverage
-
- oldSubTable.ClassCount = oldClassCount
- newSubTable.ClassCount = newClassCount
-
- oldSubTable.MarkArray.MarkRecord = oldMarkRecords
- newSubTable.MarkArray = oldSubTable.MarkArray.__class__()
- newSubTable.MarkArray.MarkRecord = newMarkRecords
-
- oldSubTable.MarkArray.MarkCount = len(oldMarkRecords)
- newSubTable.MarkArray.MarkCount = len(newMarkRecords)
-
- oldSubTable.BaseArray.BaseRecord = oldBaseRecords
- newSubTable.BaseArray = oldSubTable.BaseArray.__class__()
- newSubTable.BaseArray.BaseRecord = newBaseRecords
-
- oldSubTable.BaseArray.BaseCount = len(oldBaseRecords)
- newSubTable.BaseArray.BaseCount = len(newBaseRecords)
-
- return True
-
-
-splitTable = { 'GSUB': {
-# 1: splitSingleSubst,
- 2: splitMultipleSubst,
- 3: splitAlternateSubst,
- 4: splitLigatureSubst,
-# 5: splitContextSubst,
-# 6: splitChainContextSubst,
-# 7: splitExtensionSubst,
-# 8: splitReverseChainSingleSubst,
- },
- 'GPOS': {
-# 1: splitSinglePos,
- 2: splitPairPos,
-# 3: splitCursivePos,
- 4: splitMarkBasePos,
-# 5: splitMarkLigPos,
-# 6: splitMarkMarkPos,
-# 7: splitContextPos,
-# 8: splitChainContextPos,
-# 9: splitExtensionPos,
- }
-
- }
+ # split half of the mark classes to the new subtable
+ classCount = oldSubTable.ClassCount
+ if classCount < 2:
+ # oh well, not much left to split...
+ return False
+
+ oldClassCount = classCount // 2
+ newClassCount = classCount - oldClassCount
+
+ oldMarkCoverage, oldMarkRecords = [], []
+ newMarkCoverage, newMarkRecords = [], []
+ for glyphName, markRecord in zip(
+ oldSubTable.MarkCoverage.glyphs, oldSubTable.MarkArray.MarkRecord
+ ):
+ if markRecord.Class < oldClassCount:
+ oldMarkCoverage.append(glyphName)
+ oldMarkRecords.append(markRecord)
+ else:
+ markRecord.Class -= oldClassCount
+ newMarkCoverage.append(glyphName)
+ newMarkRecords.append(markRecord)
+
+ oldBaseRecords, newBaseRecords = [], []
+ for rec in oldSubTable.BaseArray.BaseRecord:
+ oldBaseRecord, newBaseRecord = rec.__class__(), rec.__class__()
+ oldBaseRecord.BaseAnchor = rec.BaseAnchor[:oldClassCount]
+ newBaseRecord.BaseAnchor = rec.BaseAnchor[oldClassCount:]
+ oldBaseRecords.append(oldBaseRecord)
+ newBaseRecords.append(newBaseRecord)
+
+ newSubTable.Format = oldSubTable.Format
+
+ oldSubTable.MarkCoverage.glyphs = oldMarkCoverage
+ newSubTable.MarkCoverage = oldSubTable.MarkCoverage.__class__()
+ newSubTable.MarkCoverage.glyphs = newMarkCoverage
+
+ # share the same BaseCoverage in both halves
+ newSubTable.BaseCoverage = oldSubTable.BaseCoverage
+
+ oldSubTable.ClassCount = oldClassCount
+ newSubTable.ClassCount = newClassCount
+
+ oldSubTable.MarkArray.MarkRecord = oldMarkRecords
+ newSubTable.MarkArray = oldSubTable.MarkArray.__class__()
+ newSubTable.MarkArray.MarkRecord = newMarkRecords
+
+ oldSubTable.MarkArray.MarkCount = len(oldMarkRecords)
+ newSubTable.MarkArray.MarkCount = len(newMarkRecords)
+
+ oldSubTable.BaseArray.BaseRecord = oldBaseRecords
+ newSubTable.BaseArray = oldSubTable.BaseArray.__class__()
+ newSubTable.BaseArray.BaseRecord = newBaseRecords
+
+ oldSubTable.BaseArray.BaseCount = len(oldBaseRecords)
+ newSubTable.BaseArray.BaseCount = len(newBaseRecords)
+
+ return True
+
+
+splitTable = {
+ "GSUB": {
+ # 1: splitSingleSubst,
+ 2: splitMultipleSubst,
+ 3: splitAlternateSubst,
+ 4: splitLigatureSubst,
+ # 5: splitContextSubst,
+ # 6: splitChainContextSubst,
+ # 7: splitExtensionSubst,
+ # 8: splitReverseChainSingleSubst,
+ },
+ "GPOS": {
+ # 1: splitSinglePos,
+ 2: splitPairPos,
+ # 3: splitCursivePos,
+ 4: splitMarkBasePos,
+ # 5: splitMarkLigPos,
+ # 6: splitMarkMarkPos,
+ # 7: splitContextPos,
+ # 8: splitChainContextPos,
+ # 9: splitExtensionPos,
+ },
+}
+
def fixSubTableOverFlows(ttf, overflowRecord):
- """
- An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts.
- """
- table = ttf[overflowRecord.tableType].table
- lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex]
- subIndex = overflowRecord.SubTableIndex
- subtable = lookup.SubTable[subIndex]
-
- # First, try not sharing anything for this subtable...
- if not hasattr(subtable, "DontShare"):
- subtable.DontShare = True
- return True
-
- if hasattr(subtable, 'ExtSubTable'):
- # We split the subtable of the Extension table, and add a new Extension table
- # to contain the new subtable.
-
- subTableType = subtable.ExtSubTable.__class__.LookupType
- extSubTable = subtable
- subtable = extSubTable.ExtSubTable
- newExtSubTableClass = lookupTypes[overflowRecord.tableType][extSubTable.__class__.LookupType]
- newExtSubTable = newExtSubTableClass()
- newExtSubTable.Format = extSubTable.Format
- toInsert = newExtSubTable
-
- newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
- newSubTable = newSubTableClass()
- newExtSubTable.ExtSubTable = newSubTable
- else:
- subTableType = subtable.__class__.LookupType
- newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
- newSubTable = newSubTableClass()
- toInsert = newSubTable
-
- if hasattr(lookup, 'SubTableCount'): # may not be defined yet.
- lookup.SubTableCount = lookup.SubTableCount + 1
-
- try:
- splitFunc = splitTable[overflowRecord.tableType][subTableType]
- except KeyError:
- log.error(
- "Don't know how to split %s lookup type %s",
- overflowRecord.tableType,
- subTableType,
- )
- return False
-
- ok = splitFunc(subtable, newSubTable, overflowRecord)
- if ok:
- lookup.SubTable.insert(subIndex + 1, toInsert)
- return ok
+ """
+ An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts.
+ """
+ table = ttf[overflowRecord.tableType].table
+ lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex]
+ subIndex = overflowRecord.SubTableIndex
+ subtable = lookup.SubTable[subIndex]
+
+ # First, try not sharing anything for this subtable...
+ if not hasattr(subtable, "DontShare"):
+ subtable.DontShare = True
+ return True
+
+ if hasattr(subtable, "ExtSubTable"):
+ # We split the subtable of the Extension table, and add a new Extension table
+ # to contain the new subtable.
+
+ subTableType = subtable.ExtSubTable.__class__.LookupType
+ extSubTable = subtable
+ subtable = extSubTable.ExtSubTable
+ newExtSubTableClass = lookupTypes[overflowRecord.tableType][
+ extSubTable.__class__.LookupType
+ ]
+ newExtSubTable = newExtSubTableClass()
+ newExtSubTable.Format = extSubTable.Format
+ toInsert = newExtSubTable
+
+ newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
+ newSubTable = newSubTableClass()
+ newExtSubTable.ExtSubTable = newSubTable
+ else:
+ subTableType = subtable.__class__.LookupType
+ newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
+ newSubTable = newSubTableClass()
+ toInsert = newSubTable
+
+ if hasattr(lookup, "SubTableCount"): # may not be defined yet.
+ lookup.SubTableCount = lookup.SubTableCount + 1
+
+ try:
+ splitFunc = splitTable[overflowRecord.tableType][subTableType]
+ except KeyError:
+ log.error(
+ "Don't know how to split %s lookup type %s",
+ overflowRecord.tableType,
+ subTableType,
+ )
+ return False
+
+ ok = splitFunc(subtable, newSubTable, overflowRecord)
+ if ok:
+ lookup.SubTable.insert(subIndex + 1, toInsert)
+ return ok
+
# End of OverFlow logic
def _buildClasses():
- import re
- from .otData import otData
-
- formatPat = re.compile(r"([A-Za-z0-9]+)Format(\d+)$")
- namespace = globals()
-
- # populate module with classes
- for name, table in otData:
- baseClass = BaseTable
- m = formatPat.match(name)
- if m:
- # XxxFormatN subtable, we only add the "base" table
- name = m.group(1)
- # the first row of a format-switching otData table describes the Format;
- # the first column defines the type of the Format field.
- # Currently this can be either 'uint16' or 'uint8'.
- formatType = table[0][0]
- baseClass = getFormatSwitchingBaseTableClass(formatType)
- if name not in namespace:
- # the class doesn't exist yet, so the base implementation is used.
- cls = type(name, (baseClass,), {})
- if name in ('GSUB', 'GPOS'):
- cls.DontShare = True
- namespace[name] = cls
-
- # link Var{Table} <-> {Table} (e.g. ColorStop <-> VarColorStop, etc.)
- for name, _ in otData:
- if name.startswith("Var") and len(name) > 3 and name[3:] in namespace:
- varType = namespace[name]
- noVarType = namespace[name[3:]]
- varType.NoVarType = noVarType
- noVarType.VarType = varType
-
- for base, alts in _equivalents.items():
- base = namespace[base]
- for alt in alts:
- namespace[alt] = base
-
- global lookupTypes
- lookupTypes = {
- 'GSUB': {
- 1: SingleSubst,
- 2: MultipleSubst,
- 3: AlternateSubst,
- 4: LigatureSubst,
- 5: ContextSubst,
- 6: ChainContextSubst,
- 7: ExtensionSubst,
- 8: ReverseChainSingleSubst,
- },
- 'GPOS': {
- 1: SinglePos,
- 2: PairPos,
- 3: CursivePos,
- 4: MarkBasePos,
- 5: MarkLigPos,
- 6: MarkMarkPos,
- 7: ContextPos,
- 8: ChainContextPos,
- 9: ExtensionPos,
- },
- 'mort': {
- 4: NoncontextualMorph,
- },
- 'morx': {
- 0: RearrangementMorph,
- 1: ContextualMorph,
- 2: LigatureMorph,
- # 3: Reserved,
- 4: NoncontextualMorph,
- 5: InsertionMorph,
- },
- }
- lookupTypes['JSTF'] = lookupTypes['GPOS'] # JSTF contains GPOS
- for lookupEnum in lookupTypes.values():
- for enum, cls in lookupEnum.items():
- cls.LookupType = enum
-
- global featureParamTypes
- featureParamTypes = {
- 'size': FeatureParamsSize,
- }
- for i in range(1, 20+1):
- featureParamTypes['ss%02d' % i] = FeatureParamsStylisticSet
- for i in range(1, 99+1):
- featureParamTypes['cv%02d' % i] = FeatureParamsCharacterVariants
-
- # add converters to classes
- from .otConverters import buildConverters
- for name, table in otData:
- m = formatPat.match(name)
- if m:
- # XxxFormatN subtable, add converter to "base" table
- name, format = m.groups()
- format = int(format)
- cls = namespace[name]
- if not hasattr(cls, "converters"):
- cls.converters = {}
- cls.convertersByName = {}
- converters, convertersByName = buildConverters(table[1:], namespace)
- cls.converters[format] = converters
- cls.convertersByName[format] = convertersByName
- # XXX Add staticSize?
- else:
- cls = namespace[name]
- cls.converters, cls.convertersByName = buildConverters(table, namespace)
- # XXX Add staticSize?
+ import re
+ from .otData import otData
+
+ formatPat = re.compile(r"([A-Za-z0-9]+)Format(\d+)$")
+ namespace = globals()
+
+ # populate module with classes
+ for name, table in otData:
+ baseClass = BaseTable
+ m = formatPat.match(name)
+ if m:
+ # XxxFormatN subtable, we only add the "base" table
+ name = m.group(1)
+ # the first row of a format-switching otData table describes the Format;
+ # the first column defines the type of the Format field.
+ # Currently this can be either 'uint16' or 'uint8'.
+ formatType = table[0][0]
+ baseClass = getFormatSwitchingBaseTableClass(formatType)
+ if name not in namespace:
+ # the class doesn't exist yet, so the base implementation is used.
+ cls = type(name, (baseClass,), {})
+ if name in ("GSUB", "GPOS"):
+ cls.DontShare = True
+ namespace[name] = cls
+
+ # link Var{Table} <-> {Table} (e.g. ColorStop <-> VarColorStop, etc.)
+ for name, _ in otData:
+ if name.startswith("Var") and len(name) > 3 and name[3:] in namespace:
+ varType = namespace[name]
+ noVarType = namespace[name[3:]]
+ varType.NoVarType = noVarType
+ noVarType.VarType = varType
+
+ for base, alts in _equivalents.items():
+ base = namespace[base]
+ for alt in alts:
+ namespace[alt] = base
+
+ global lookupTypes
+ lookupTypes = {
+ "GSUB": {
+ 1: SingleSubst,
+ 2: MultipleSubst,
+ 3: AlternateSubst,
+ 4: LigatureSubst,
+ 5: ContextSubst,
+ 6: ChainContextSubst,
+ 7: ExtensionSubst,
+ 8: ReverseChainSingleSubst,
+ },
+ "GPOS": {
+ 1: SinglePos,
+ 2: PairPos,
+ 3: CursivePos,
+ 4: MarkBasePos,
+ 5: MarkLigPos,
+ 6: MarkMarkPos,
+ 7: ContextPos,
+ 8: ChainContextPos,
+ 9: ExtensionPos,
+ },
+ "mort": {
+ 4: NoncontextualMorph,
+ },
+ "morx": {
+ 0: RearrangementMorph,
+ 1: ContextualMorph,
+ 2: LigatureMorph,
+ # 3: Reserved,
+ 4: NoncontextualMorph,
+ 5: InsertionMorph,
+ },
+ }
+ lookupTypes["JSTF"] = lookupTypes["GPOS"] # JSTF contains GPOS
+ for lookupEnum in lookupTypes.values():
+ for enum, cls in lookupEnum.items():
+ cls.LookupType = enum
+
+ global featureParamTypes
+ featureParamTypes = {
+ "size": FeatureParamsSize,
+ }
+ for i in range(1, 20 + 1):
+ featureParamTypes["ss%02d" % i] = FeatureParamsStylisticSet
+ for i in range(1, 99 + 1):
+ featureParamTypes["cv%02d" % i] = FeatureParamsCharacterVariants
+
+ # add converters to classes
+ from .otConverters import buildConverters
+
+ for name, table in otData:
+ m = formatPat.match(name)
+ if m:
+ # XxxFormatN subtable, add converter to "base" table
+ name, format = m.groups()
+ format = int(format)
+ cls = namespace[name]
+ if not hasattr(cls, "converters"):
+ cls.converters = {}
+ cls.convertersByName = {}
+ converters, convertersByName = buildConverters(table[1:], namespace)
+ cls.converters[format] = converters
+ cls.convertersByName[format] = convertersByName
+ # XXX Add staticSize?
+ else:
+ cls = namespace[name]
+ cls.converters, cls.convertersByName = buildConverters(table, namespace)
+ # XXX Add staticSize?
_buildClasses()
def _getGlyphsFromCoverageTable(coverage):
- if coverage is None:
- # empty coverage table
- return []
- else:
- return coverage.glyphs
+ if coverage is None:
+ # empty coverage table
+ return []
+ else:
+ return coverage.glyphs
diff --git a/Lib/fontTools/ttLib/tables/otTraverse.py b/Lib/fontTools/ttLib/tables/otTraverse.py
index 40b28b2b..bf22dcfd 100644
--- a/Lib/fontTools/ttLib/tables/otTraverse.py
+++ b/Lib/fontTools/ttLib/tables/otTraverse.py
@@ -12,7 +12,6 @@ __all__ = [
class SubTablePath(Tuple[BaseTable.SubTableEntry, ...]):
-
def __str__(self) -> str:
path_parts = []
for entry in self:
@@ -32,6 +31,9 @@ def dfs_base_table(
root_accessor: Optional[str] = None,
skip_root: bool = False,
predicate: Optional[Callable[[SubTablePath], bool]] = None,
+ iter_subtables_fn: Optional[
+ Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
+ ] = None,
) -> Iterable[SubTablePath]:
"""Depth-first search tree of BaseTables.
@@ -44,6 +46,9 @@ def dfs_base_table(
predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out
paths. If True, the path is yielded and its subtables are added to the
queue. If False, the path is skipped and its subtables are not traversed.
+ iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]):
+ function to iterate over subtables of a table. If None, the default
+ BaseTable.iterSubTables() is used.
Yields:
SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples
@@ -57,6 +62,7 @@ def dfs_base_table(
skip_root,
predicate,
lambda frontier, new: frontier.extendleft(reversed(new)),
+ iter_subtables_fn,
)
@@ -65,11 +71,14 @@ def bfs_base_table(
root_accessor: Optional[str] = None,
skip_root: bool = False,
predicate: Optional[Callable[[SubTablePath], bool]] = None,
+ iter_subtables_fn: Optional[
+ Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
+ ] = None,
) -> Iterable[SubTablePath]:
"""Breadth-first search tree of BaseTables.
Args:
- root (BaseTable): the root of the tree.
+ the root of the tree.
root_accessor (Optional[str]): attribute name for the root table, if any (mostly
useful for debugging).
skip_root (Optional[bool]): if True, the root itself is not visited, only its
@@ -77,6 +86,9 @@ def bfs_base_table(
predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out
paths. If True, the path is yielded and its subtables are added to the
queue. If False, the path is skipped and its subtables are not traversed.
+ iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]):
+ function to iterate over subtables of a table. If None, the default
+ BaseTable.iterSubTables() is used.
Yields:
SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples
@@ -90,6 +102,7 @@ def bfs_base_table(
skip_root,
predicate,
lambda frontier, new: frontier.extend(new),
+ iter_subtables_fn,
)
@@ -99,6 +112,9 @@ def _traverse_ot_data(
skip_root: bool,
predicate: Optional[Callable[[SubTablePath], bool]],
add_to_frontier_fn: AddToFrontierFn,
+ iter_subtables_fn: Optional[
+ Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
+ ] = None,
) -> Iterable[SubTablePath]:
# no visited because general otData cannot cycle (forward-offset only)
if root_accessor is None:
@@ -109,6 +125,11 @@ def _traverse_ot_data(
def predicate(path):
return True
+ if iter_subtables_fn is None:
+
+ def iter_subtables_fn(table):
+ return table.iterSubTables()
+
frontier: Deque[SubTablePath] = deque()
root_entry = BaseTable.SubTableEntry(root_accessor, root)
@@ -117,7 +138,10 @@ def _traverse_ot_data(
else:
add_to_frontier_fn(
frontier,
- [(root_entry, subtable_entry) for subtable_entry in root.iterSubTables()],
+ [
+ (root_entry, subtable_entry)
+ for subtable_entry in iter_subtables_fn(root)
+ ],
)
while frontier:
@@ -131,7 +155,7 @@ def _traverse_ot_data(
yield SubTablePath(path)
new_entries = [
- path + (subtable_entry,) for subtable_entry in current.iterSubTables()
+ path + (subtable_entry,) for subtable_entry in iter_subtables_fn(current)
]
add_to_frontier_fn(frontier, new_entries)
diff --git a/Lib/fontTools/ttLib/tables/sbixGlyph.py b/Lib/fontTools/ttLib/tables/sbixGlyph.py
index fe29c090..fd687a18 100644
--- a/Lib/fontTools/ttLib/tables/sbixGlyph.py
+++ b/Lib/fontTools/ttLib/tables/sbixGlyph.py
@@ -20,98 +20,126 @@ sbixGlyphHeaderFormatSize = sstruct.calcsize(sbixGlyphHeaderFormat)
class Glyph(object):
- def __init__(self, glyphName=None, referenceGlyphName=None, originOffsetX=0, originOffsetY=0, graphicType=None, imageData=None, rawdata=None, gid=0):
- self.gid = gid
- self.glyphName = glyphName
- self.referenceGlyphName = referenceGlyphName
- self.originOffsetX = originOffsetX
- self.originOffsetY = originOffsetY
- self.rawdata = rawdata
- self.graphicType = graphicType
- self.imageData = imageData
-
- # fix self.graphicType if it is null terminated or too short
- if self.graphicType is not None:
- if self.graphicType[-1] == "\0":
- self.graphicType = self.graphicType[:-1]
- if len(self.graphicType) > 4:
- from fontTools import ttLib
- raise ttLib.TTLibError("Glyph.graphicType must not be longer than 4 characters.")
- elif len(self.graphicType) < 4:
- # pad with spaces
- self.graphicType += " "[:(4 - len(self.graphicType))]
-
- def decompile(self, ttFont):
- self.glyphName = ttFont.getGlyphName(self.gid)
- if self.rawdata is None:
- from fontTools import ttLib
- raise ttLib.TTLibError("No table data to decompile")
- if len(self.rawdata) > 0:
- if len(self.rawdata) < sbixGlyphHeaderFormatSize:
- from fontTools import ttLib
- #print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata))
- raise ttLib.TTLibError("Glyph header too short.")
-
- sstruct.unpack(sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self)
-
- if self.graphicType == "dupe":
- # this glyph is a reference to another glyph's image data
- gid, = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:])
- self.referenceGlyphName = ttFont.getGlyphName(gid)
- else:
- self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:]
- self.referenceGlyphName = None
- # clean up
- del self.rawdata
- del self.gid
-
- def compile(self, ttFont):
- if self.glyphName is None:
- from fontTools import ttLib
- raise ttLib.TTLibError("Can't compile Glyph without glyph name")
- # TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index?
- # (needed if you just want to compile the sbix table on its own)
- self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName))
- if self.graphicType is None:
- self.rawdata = b""
- else:
- self.rawdata = sstruct.pack(sbixGlyphHeaderFormat, self) + self.imageData
-
- def toXML(self, xmlWriter, ttFont):
- if self.graphicType == None:
- # TODO: ignore empty glyphs?
- # a glyph data entry is required for each glyph,
- # but empty ones can be calculated at compile time
- xmlWriter.simpletag("glyph", name=self.glyphName)
- xmlWriter.newline()
- return
- xmlWriter.begintag("glyph",
- graphicType=self.graphicType,
- name=self.glyphName,
- originOffsetX=self.originOffsetX,
- originOffsetY=self.originOffsetY,
- )
- xmlWriter.newline()
- if self.graphicType == "dupe":
- # graphicType == "dupe" is a reference to another glyph id.
- xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName)
- else:
- xmlWriter.begintag("hexdata")
- xmlWriter.newline()
- xmlWriter.dumphex(self.imageData)
- xmlWriter.endtag("hexdata")
- xmlWriter.newline()
- xmlWriter.endtag("glyph")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "ref":
- # glyph is a "dupe", i.e. a reference to another glyph's image data.
- # in this case imageData contains the glyph id of the reference glyph
- # get glyph id from glyphname
- self.imageData = struct.pack(">H", ttFont.getGlyphID(safeEval("'''" + attrs["glyphname"] + "'''")))
- elif name == "hexdata":
- self.imageData = readHex(content)
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError("can't handle '%s' element" % name)
+ def __init__(
+ self,
+ glyphName=None,
+ referenceGlyphName=None,
+ originOffsetX=0,
+ originOffsetY=0,
+ graphicType=None,
+ imageData=None,
+ rawdata=None,
+ gid=0,
+ ):
+ self.gid = gid
+ self.glyphName = glyphName
+ self.referenceGlyphName = referenceGlyphName
+ self.originOffsetX = originOffsetX
+ self.originOffsetY = originOffsetY
+ self.rawdata = rawdata
+ self.graphicType = graphicType
+ self.imageData = imageData
+
+ # fix self.graphicType if it is null terminated or too short
+ if self.graphicType is not None:
+ if self.graphicType[-1] == "\0":
+ self.graphicType = self.graphicType[:-1]
+ if len(self.graphicType) > 4:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError(
+ "Glyph.graphicType must not be longer than 4 characters."
+ )
+ elif len(self.graphicType) < 4:
+ # pad with spaces
+ self.graphicType += " "[: (4 - len(self.graphicType))]
+
+ def decompile(self, ttFont):
+ self.glyphName = ttFont.getGlyphName(self.gid)
+ if self.rawdata is None:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("No table data to decompile")
+ if len(self.rawdata) > 0:
+ if len(self.rawdata) < sbixGlyphHeaderFormatSize:
+ from fontTools import ttLib
+
+ # print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata))
+ raise ttLib.TTLibError("Glyph header too short.")
+
+ sstruct.unpack(
+ sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self
+ )
+
+ if self.graphicType == "dupe":
+ # this glyph is a reference to another glyph's image data
+ (gid,) = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:])
+ self.referenceGlyphName = ttFont.getGlyphName(gid)
+ else:
+ self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:]
+ self.referenceGlyphName = None
+ # clean up
+ del self.rawdata
+ del self.gid
+
+ def compile(self, ttFont):
+ if self.glyphName is None:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("Can't compile Glyph without glyph name")
+ # TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index?
+ # (needed if you just want to compile the sbix table on its own)
+ self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName))
+ if self.graphicType is None:
+ rawdata = b""
+ else:
+ rawdata = sstruct.pack(sbixGlyphHeaderFormat, self)
+ if self.graphicType == "dupe":
+ rawdata += struct.pack(">H", ttFont.getGlyphID(self.referenceGlyphName))
+ else:
+ assert self.imageData is not None
+ rawdata += self.imageData
+ self.rawdata = rawdata
+
+ def toXML(self, xmlWriter, ttFont):
+ if self.graphicType is None:
+ # TODO: ignore empty glyphs?
+ # a glyph data entry is required for each glyph,
+ # but empty ones can be calculated at compile time
+ xmlWriter.simpletag("glyph", name=self.glyphName)
+ xmlWriter.newline()
+ return
+ xmlWriter.begintag(
+ "glyph",
+ graphicType=self.graphicType,
+ name=self.glyphName,
+ originOffsetX=self.originOffsetX,
+ originOffsetY=self.originOffsetY,
+ )
+ xmlWriter.newline()
+ if self.graphicType == "dupe":
+ # graphicType == "dupe" is a reference to another glyph id.
+ xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName)
+ else:
+ xmlWriter.begintag("hexdata")
+ xmlWriter.newline()
+ xmlWriter.dumphex(self.imageData)
+ xmlWriter.endtag("hexdata")
+ xmlWriter.newline()
+ xmlWriter.endtag("glyph")
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name == "ref":
+ # glyph is a "dupe", i.e. a reference to another glyph's image data.
+ # in this case imageData contains the glyph id of the reference glyph
+ # get glyph id from glyphname
+ glyphname = safeEval("'''" + attrs["glyphname"] + "'''")
+ self.imageData = struct.pack(">H", ttFont.getGlyphID(glyphname))
+ self.referenceGlyphName = glyphname
+ elif name == "hexdata":
+ self.imageData = readHex(content)
+ else:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("can't handle '%s' element" % name)
diff --git a/Lib/fontTools/ttLib/tables/sbixStrike.py b/Lib/fontTools/ttLib/tables/sbixStrike.py
index b367a99f..7614af4c 100644
--- a/Lib/fontTools/ttLib/tables/sbixStrike.py
+++ b/Lib/fontTools/ttLib/tables/sbixStrike.py
@@ -22,127 +22,156 @@ sbixGlyphDataOffsetFormatSize = sstruct.calcsize(sbixGlyphDataOffsetFormat)
class Strike(object):
- def __init__(self, rawdata=None, ppem=0, resolution=72):
- self.data = rawdata
- self.ppem = ppem
- self.resolution = resolution
- self.glyphs = {}
-
- def decompile(self, ttFont):
- if self.data is None:
- from fontTools import ttLib
- raise ttLib.TTLibError
- if len(self.data) < sbixStrikeHeaderFormatSize:
- from fontTools import ttLib
- raise(ttLib.TTLibError, "Strike header too short: Expected %x, got %x.") \
- % (sbixStrikeHeaderFormatSize, len(self.data))
-
- # read Strike header from raw data
- sstruct.unpack(sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self)
-
- # calculate number of glyphs
- firstGlyphDataOffset, = struct.unpack(">L", \
- self.data[sbixStrikeHeaderFormatSize:sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize])
- self.numGlyphs = (firstGlyphDataOffset - sbixStrikeHeaderFormatSize) // sbixGlyphDataOffsetFormatSize - 1
- # ^ -1 because there's one more offset than glyphs
-
- # build offset list for single glyph data offsets
- self.glyphDataOffsets = []
- for i in range(self.numGlyphs + 1): # + 1 because there's one more offset than glyphs
- start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize
- current_offset, = struct.unpack(">L", self.data[start:start + sbixGlyphDataOffsetFormatSize])
- self.glyphDataOffsets.append(current_offset)
-
- # iterate through offset list and slice raw data into glyph data records
- for i in range(self.numGlyphs):
- current_glyph = Glyph(rawdata=self.data[self.glyphDataOffsets[i]:self.glyphDataOffsets[i+1]], gid=i)
- current_glyph.decompile(ttFont)
- self.glyphs[current_glyph.glyphName] = current_glyph
- del self.glyphDataOffsets
- del self.numGlyphs
- del self.data
-
- def compile(self, ttFont):
- self.glyphDataOffsets = b""
- self.bitmapData = b""
-
- glyphOrder = ttFont.getGlyphOrder()
-
- # first glyph starts right after the header
- currentGlyphDataOffset = sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1)
- for glyphName in glyphOrder:
- if glyphName in self.glyphs:
- # we have glyph data for this glyph
- current_glyph = self.glyphs[glyphName]
- else:
- # must add empty glyph data record for this glyph
- current_glyph = Glyph(glyphName=glyphName)
- current_glyph.compile(ttFont)
- current_glyph.glyphDataOffset = currentGlyphDataOffset
- self.bitmapData += current_glyph.rawdata
- currentGlyphDataOffset += len(current_glyph.rawdata)
- self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, current_glyph)
-
- # add last "offset", really the end address of the last glyph data record
- dummy = Glyph()
- dummy.glyphDataOffset = currentGlyphDataOffset
- self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy)
-
- # pack header
- self.data = sstruct.pack(sbixStrikeHeaderFormat, self)
- # add offsets and image data after header
- self.data += self.glyphDataOffsets + self.bitmapData
-
- def toXML(self, xmlWriter, ttFont):
- xmlWriter.begintag("strike")
- xmlWriter.newline()
- xmlWriter.simpletag("ppem", value=self.ppem)
- xmlWriter.newline()
- xmlWriter.simpletag("resolution", value=self.resolution)
- xmlWriter.newline()
- glyphOrder = ttFont.getGlyphOrder()
- for i in range(len(glyphOrder)):
- if glyphOrder[i] in self.glyphs:
- self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont)
- # TODO: what if there are more glyph data records than (glyf table) glyphs?
- xmlWriter.endtag("strike")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name in ["ppem", "resolution"]:
- setattr(self, name, safeEval(attrs["value"]))
- elif name == "glyph":
- if "graphicType" in attrs:
- myFormat = safeEval("'''" + attrs["graphicType"] + "'''")
- else:
- myFormat = None
- if "glyphname" in attrs:
- myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''")
- elif "name" in attrs:
- myGlyphName = safeEval("'''" + attrs["name"] + "'''")
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError("Glyph must have a glyph name.")
- if "originOffsetX" in attrs:
- myOffsetX = safeEval(attrs["originOffsetX"])
- else:
- myOffsetX = 0
- if "originOffsetY" in attrs:
- myOffsetY = safeEval(attrs["originOffsetY"])
- else:
- myOffsetY = 0
- current_glyph = Glyph(
- glyphName=myGlyphName,
- graphicType=myFormat,
- originOffsetX=myOffsetX,
- originOffsetY=myOffsetY,
- )
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- current_glyph.fromXML(name, attrs, content, ttFont)
- current_glyph.compile(ttFont)
- self.glyphs[current_glyph.glyphName] = current_glyph
- else:
- from fontTools import ttLib
- raise ttLib.TTLibError("can't handle '%s' element" % name)
+ def __init__(self, rawdata=None, ppem=0, resolution=72):
+ self.data = rawdata
+ self.ppem = ppem
+ self.resolution = resolution
+ self.glyphs = {}
+
+ def decompile(self, ttFont):
+ if self.data is None:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError
+ if len(self.data) < sbixStrikeHeaderFormatSize:
+ from fontTools import ttLib
+
+ raise (
+ ttLib.TTLibError,
+ "Strike header too short: Expected %x, got %x.",
+ ) % (sbixStrikeHeaderFormatSize, len(self.data))
+
+ # read Strike header from raw data
+ sstruct.unpack(
+ sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self
+ )
+
+ # calculate number of glyphs
+ (firstGlyphDataOffset,) = struct.unpack(
+ ">L",
+ self.data[
+ sbixStrikeHeaderFormatSize : sbixStrikeHeaderFormatSize
+ + sbixGlyphDataOffsetFormatSize
+ ],
+ )
+ self.numGlyphs = (
+ firstGlyphDataOffset - sbixStrikeHeaderFormatSize
+ ) // sbixGlyphDataOffsetFormatSize - 1
+ # ^ -1 because there's one more offset than glyphs
+
+ # build offset list for single glyph data offsets
+ self.glyphDataOffsets = []
+ for i in range(
+ self.numGlyphs + 1
+ ): # + 1 because there's one more offset than glyphs
+ start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize
+ (current_offset,) = struct.unpack(
+ ">L", self.data[start : start + sbixGlyphDataOffsetFormatSize]
+ )
+ self.glyphDataOffsets.append(current_offset)
+
+ # iterate through offset list and slice raw data into glyph data records
+ for i in range(self.numGlyphs):
+ current_glyph = Glyph(
+ rawdata=self.data[
+ self.glyphDataOffsets[i] : self.glyphDataOffsets[i + 1]
+ ],
+ gid=i,
+ )
+ current_glyph.decompile(ttFont)
+ self.glyphs[current_glyph.glyphName] = current_glyph
+ del self.glyphDataOffsets
+ del self.numGlyphs
+ del self.data
+
+ def compile(self, ttFont):
+ self.glyphDataOffsets = b""
+ self.bitmapData = b""
+
+ glyphOrder = ttFont.getGlyphOrder()
+
+ # first glyph starts right after the header
+ currentGlyphDataOffset = (
+ sbixStrikeHeaderFormatSize
+ + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1)
+ )
+ for glyphName in glyphOrder:
+ if glyphName in self.glyphs:
+ # we have glyph data for this glyph
+ current_glyph = self.glyphs[glyphName]
+ else:
+ # must add empty glyph data record for this glyph
+ current_glyph = Glyph(glyphName=glyphName)
+ current_glyph.compile(ttFont)
+ current_glyph.glyphDataOffset = currentGlyphDataOffset
+ self.bitmapData += current_glyph.rawdata
+ currentGlyphDataOffset += len(current_glyph.rawdata)
+ self.glyphDataOffsets += sstruct.pack(
+ sbixGlyphDataOffsetFormat, current_glyph
+ )
+
+ # add last "offset", really the end address of the last glyph data record
+ dummy = Glyph()
+ dummy.glyphDataOffset = currentGlyphDataOffset
+ self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy)
+
+ # pack header
+ self.data = sstruct.pack(sbixStrikeHeaderFormat, self)
+ # add offsets and image data after header
+ self.data += self.glyphDataOffsets + self.bitmapData
+
+ def toXML(self, xmlWriter, ttFont):
+ xmlWriter.begintag("strike")
+ xmlWriter.newline()
+ xmlWriter.simpletag("ppem", value=self.ppem)
+ xmlWriter.newline()
+ xmlWriter.simpletag("resolution", value=self.resolution)
+ xmlWriter.newline()
+ glyphOrder = ttFont.getGlyphOrder()
+ for i in range(len(glyphOrder)):
+ if glyphOrder[i] in self.glyphs:
+ self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont)
+ # TODO: what if there are more glyph data records than (glyf table) glyphs?
+ xmlWriter.endtag("strike")
+ xmlWriter.newline()
+
+ def fromXML(self, name, attrs, content, ttFont):
+ if name in ["ppem", "resolution"]:
+ setattr(self, name, safeEval(attrs["value"]))
+ elif name == "glyph":
+ if "graphicType" in attrs:
+ myFormat = safeEval("'''" + attrs["graphicType"] + "'''")
+ else:
+ myFormat = None
+ if "glyphname" in attrs:
+ myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''")
+ elif "name" in attrs:
+ myGlyphName = safeEval("'''" + attrs["name"] + "'''")
+ else:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("Glyph must have a glyph name.")
+ if "originOffsetX" in attrs:
+ myOffsetX = safeEval(attrs["originOffsetX"])
+ else:
+ myOffsetX = 0
+ if "originOffsetY" in attrs:
+ myOffsetY = safeEval(attrs["originOffsetY"])
+ else:
+ myOffsetY = 0
+ current_glyph = Glyph(
+ glyphName=myGlyphName,
+ graphicType=myFormat,
+ originOffsetX=myOffsetX,
+ originOffsetY=myOffsetY,
+ )
+ for element in content:
+ if isinstance(element, tuple):
+ name, attrs, content = element
+ current_glyph.fromXML(name, attrs, content, ttFont)
+ current_glyph.compile(ttFont)
+ self.glyphs[current_glyph.glyphName] = current_glyph
+ else:
+ from fontTools import ttLib
+
+ raise ttLib.TTLibError("can't handle '%s' element" % name)
diff --git a/Lib/fontTools/ttLib/tables/ttProgram.py b/Lib/fontTools/ttLib/tables/ttProgram.py
index 72377583..84aa63f3 100644
--- a/Lib/fontTools/ttLib/tables/ttProgram.py
+++ b/Lib/fontTools/ttLib/tables/ttProgram.py
@@ -1,187 +1,197 @@
"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs."""
+from __future__ import annotations
from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin
import array
from io import StringIO
+from typing import List
import re
import logging
log = logging.getLogger(__name__)
+# fmt: off
+
# first, the list of instructions that eat bytes or words from the instruction stream
streamInstructions = [
#
-# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes
+# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes
#
- (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn
- (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn
- (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn
- (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn
+ (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn
+ (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn
+ (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn
+ (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn
]
-# next, the list of "normal" instructions
+# next, the list of "normal" instructions
instructions = [
#
-#, opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes
+# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes
#
- (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p -
- (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n|
- (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2)
- (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 -
- (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue -
- (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b
- (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f -
- (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n)
- (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek
- (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack -
- (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n -
- (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
- (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
- (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
- (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
- (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
- (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
- (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n
- (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2
- (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e
- (0x59, 'EIF', 0, 'EndIf', 0, 0), # - -
- (0x1b, 'ELSE', 0, 'Else', 0, 0), # - -
- (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - -
- (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b
- (0x57, 'EVEN', 0, 'Even', 1, 1), # e b
- (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f -
- (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - -
- (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - -
- (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue -
- (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l -
- (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l -
- (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n)
- (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c
- (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result
- (0x91, 'GETVARIATION', 0, 'GetVariation', 0, -1), # - a1,..,an
- (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py
- (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py
- (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b
- (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b
- (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f -
- (0x58, 'IF', 0, 'If', 1, 0), # e -
- (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v -
- (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue -
- (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p -
- (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - -
- (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset -
- (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset -
- (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset -
- (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count -
- (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b
- (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b
- (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2)
- (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d
- (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p -
- (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p -
- (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p -
- (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2)
- (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek
- (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p -
- (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem
- (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize
- (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p -
- (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64
- (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n
- (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b
- (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e )
- (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2
- (0x56, 'ODD', 0, 'Odd', 1, 1), # e b
- (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b
- (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e -
- (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value
- (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - -
- (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - -
- (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c
- (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2
- (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v
- (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - -
- (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - -
- (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - -
- (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - -
- (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n -
- (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight -
- (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n -
- (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n -
- (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p -
- (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n -
- (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n -
- (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 -
- (0x5f, 'SDS', 0, 'SetDeltaShiftInGState',1, 0), # n -
- (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x -
- (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - -
- (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 -
- (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - -
- (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c -
- (0x32, 'SHP', 1, 'ShiftPointByLastPoint',-1, 0), # p1, p2, ..., ploopvalue -
- (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue -
- (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e -
- (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n -
- (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance -
- (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x -
- (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - -
- (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 -
- (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n -
- (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p -
- (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p -
- (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p -
- (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n -
- (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n -
- (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2)
- (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - -
- (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2
- (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n -
- (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n -
- (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n -
- (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n -
- (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p -
- (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l -
- (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l -
- (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l -
+ (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p -
+ (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n|
+ (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2)
+ (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 -
+ (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue -
+ (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b
+ (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f -
+ (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n)
+ (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek
+ (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack -
+ (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n -
+ (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
+ (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
+ (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
+ (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
+ (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
+ (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
+ (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n
+ (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2
+ (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e
+ (0x59, 'EIF', 0, 'EndIf', 0, 0), # - -
+ (0x1b, 'ELSE', 0, 'Else', 0, 0), # - -
+ (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - -
+ (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b
+ (0x57, 'EVEN', 0, 'Even', 1, 1), # e b
+ (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f -
+ (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - -
+ (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - -
+ (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue -
+ (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l -
+ (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l -
+ (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n)
+ (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c
+ (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result
+ (0x91, 'GETVARIATION', 0, 'GetVariation', 0, -1), # - a1,..,an
+ (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py
+ (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py
+ (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b
+ (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b
+ (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f -
+ (0x58, 'IF', 0, 'If', 1, 0), # e -
+ (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v -
+ (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue -
+ (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p -
+ (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - -
+ (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset -
+ (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset -
+ (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset -
+ (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count -
+ (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b
+ (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b
+ (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2)
+ (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d
+ (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p -
+ (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p -
+ (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p -
+ (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2)
+ (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek
+ (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p -
+ (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem
+ (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize
+ (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p -
+ (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64
+ (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n
+ (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b
+ (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e )
+ (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2
+ (0x56, 'ODD', 0, 'Odd', 1, 1), # e b
+ (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b
+ (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e -
+ (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value
+ (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - -
+ (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - -
+ (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c
+ (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2
+ (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v
+ (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - -
+ (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - -
+ (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - -
+ (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - -
+ (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n -
+ (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight -
+ (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n -
+ (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n -
+ (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p -
+ (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n -
+ (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n -
+ (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 -
+ (0x5f, 'SDS', 0, 'SetDeltaShiftInGState', 1, 0), # n -
+ (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x -
+ (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - -
+ (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 -
+ (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - -
+ (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c -
+ (0x32, 'SHP', 1, 'ShiftPointByLastPoint', -1, 0), # p1, p2, ..., ploopvalue -
+ (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue -
+ (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e -
+ (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n -
+ (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance -
+ (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x -
+ (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - -
+ (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 -
+ (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n -
+ (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p -
+ (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p -
+ (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p -
+ (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n -
+ (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n -
+ (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2)
+ (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - -
+ (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2
+ (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n -
+ (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n -
+ (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n -
+ (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n -
+ (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p -
+ (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l -
+ (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l -
+ (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l -
]
+# fmt: on
+
def bitRepr(value, bits):
- s = ""
- for i in range(bits):
- s = "01"[value & 0x1] + s
- value = value >> 1
- return s
+ s = ""
+ for i in range(bits):
+ s = "01"[value & 0x1] + s
+ value = value >> 1
+ return s
_mnemonicPat = re.compile(r"[A-Z][A-Z0-9]*$")
+
def _makeDict(instructionList):
- opcodeDict = {}
- mnemonicDict = {}
- for op, mnemonic, argBits, name, pops, pushes in instructionList:
- assert _mnemonicPat.match(mnemonic)
- mnemonicDict[mnemonic] = op, argBits, name
- if argBits:
- argoffset = op
- for i in range(1 << argBits):
- opcodeDict[op+i] = mnemonic, argBits, argoffset, name
- else:
- opcodeDict[op] = mnemonic, 0, 0, name
- return opcodeDict, mnemonicDict
+ opcodeDict = {}
+ mnemonicDict = {}
+ for op, mnemonic, argBits, name, pops, pushes in instructionList:
+ assert _mnemonicPat.match(mnemonic)
+ mnemonicDict[mnemonic] = op, argBits, name
+ if argBits:
+ argoffset = op
+ for i in range(1 << argBits):
+ opcodeDict[op + i] = mnemonic, argBits, argoffset, name
+ else:
+ opcodeDict[op] = mnemonic, 0, 0, name
+ return opcodeDict, mnemonicDict
+
streamOpcodeDict, streamMnemonicDict = _makeDict(streamInstructions)
opcodeDict, mnemonicDict = _makeDict(instructions)
+
class tt_instructions_error(Exception):
- def __init__(self, error):
- self.error = error
- def __str__(self):
- return "TT instructions error: %s" % repr(self.error)
+ def __init__(self, error):
+ self.error = error
+
+ def __str__(self):
+ return "TT instructions error: %s" % repr(self.error)
_comment = r"/\*.*?\*/"
@@ -197,348 +207,387 @@ _pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/")
_indentRE = re.compile(r"^FDEF|IF|ELSE\[ \]\t.+")
_unindentRE = re.compile(r"^ELSE|ENDF|EIF\[ \]\t.+")
+
def _skipWhite(data, pos):
- m = _whiteRE.match(data, pos)
- newPos = m.regs[0][1]
- assert newPos >= pos
- return newPos
+ m = _whiteRE.match(data, pos)
+ newPos = m.regs[0][1]
+ assert newPos >= pos
+ return newPos
class Program(object):
-
- def __init__(self):
- pass
-
- def fromBytecode(self, bytecode):
- self.bytecode = array.array("B", bytecode)
- if hasattr(self, "assembly"):
- del self.assembly
-
- def fromAssembly(self, assembly):
- self.assembly = assembly
- if hasattr(self, "bytecode"):
- del self.bytecode
-
- def getBytecode(self):
- if not hasattr(self, "bytecode"):
- self._assemble()
- return self.bytecode.tobytes()
-
- def getAssembly(self, preserve=True):
- if not hasattr(self, "assembly"):
- self._disassemble(preserve=preserve)
- return self.assembly
-
- def toXML(self, writer, ttFont):
- if not hasattr (ttFont, "disassembleInstructions") or ttFont.disassembleInstructions:
- try:
- assembly = self.getAssembly()
- except:
- import traceback
- tmp = StringIO()
- traceback.print_exc(file=tmp)
- msg = "An exception occurred during the decompilation of glyph program:\n\n"
- msg += tmp.getvalue()
- log.error(msg)
- writer.begintag("bytecode")
- writer.newline()
- writer.comment(msg.strip())
- writer.newline()
- writer.dumphex(self.getBytecode())
- writer.endtag("bytecode")
- writer.newline()
- else:
- if not assembly:
- return
- writer.begintag("assembly")
- writer.newline()
- i = 0
- indent = 0
- nInstr = len(assembly)
- while i < nInstr:
- instr = assembly[i]
- if _unindentRE.match(instr):
- indent -= 1
- writer.write(writer.indentwhite * indent)
- writer.write(instr)
- writer.newline()
- m = _pushCountPat.match(instr)
- i = i + 1
- if m:
- nValues = int(m.group(1))
- line = []
- j = 0
- for j in range(nValues):
- if j and not (j % 25):
- writer.write(writer.indentwhite * indent)
- writer.write(' '.join(line))
- writer.newline()
- line = []
- line.append(assembly[i+j])
- writer.write(writer.indentwhite * indent)
- writer.write(' '.join(line))
- writer.newline()
- i = i + j + 1
- if _indentRE.match(instr):
- indent += 1
- writer.endtag("assembly")
- writer.newline()
- else:
- bytecode = self.getBytecode()
- if not bytecode:
- return
- writer.begintag("bytecode")
- writer.newline()
- writer.dumphex(bytecode)
- writer.endtag("bytecode")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "assembly":
- self.fromAssembly(strjoin(content))
- self._assemble()
- del self.assembly
- else:
- assert name == "bytecode"
- self.fromBytecode(readHex(content))
-
- def _assemble(self):
- assembly = getattr(self, 'assembly', [])
- if isinstance(assembly, type([])):
- assembly = ' '.join(assembly)
- bytecode = []
- push = bytecode.append
- lenAssembly = len(assembly)
- pos = _skipWhite(assembly, 0)
- while pos < lenAssembly:
- m = _tokenRE.match(assembly, pos)
- if m is None:
- raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos-5:pos+15])
- dummy, mnemonic, arg, number, comment = m.groups()
- pos = m.regs[0][1]
- if comment:
- pos = _skipWhite(assembly, pos)
- continue
-
- arg = arg.strip()
- if mnemonic.startswith("INSTR"):
- # Unknown instruction
- op = int(mnemonic[5:])
- push(op)
- elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"):
- op, argBits, name = mnemonicDict[mnemonic]
- if len(arg) != argBits:
- raise tt_instructions_error("Incorrect number of argument bits (%s[%s])" % (mnemonic, arg))
- if arg:
- arg = binary2num(arg)
- push(op + arg)
- else:
- push(op)
- else:
- args = []
- pos = _skipWhite(assembly, pos)
- while pos < lenAssembly:
- m = _tokenRE.match(assembly, pos)
- if m is None:
- raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos:pos+15])
- dummy, _mnemonic, arg, number, comment = m.groups()
- if number is None and comment is None:
- break
- pos = m.regs[0][1]
- pos = _skipWhite(assembly, pos)
- if comment is not None:
- continue
- args.append(int(number))
- nArgs = len(args)
- if mnemonic == "PUSH":
- # Automatically choose the most compact representation
- nWords = 0
- while nArgs:
- while nWords < nArgs and nWords < 255 and not (0 <= args[nWords] <= 255):
- nWords += 1
- nBytes = 0
- while nWords+nBytes < nArgs and nBytes < 255 and 0 <= args[nWords+nBytes] <= 255:
- nBytes += 1
- if nBytes < 2 and nWords + nBytes < 255 and nWords + nBytes != nArgs:
- # Will write bytes as words
- nWords += nBytes
- continue
-
- # Write words
- if nWords:
- if nWords <= 8:
- op, argBits, name = streamMnemonicDict["PUSHW"]
- op = op + nWords - 1
- push(op)
- else:
- op, argBits, name = streamMnemonicDict["NPUSHW"]
- push(op)
- push(nWords)
- for value in args[:nWords]:
- assert -32768 <= value < 32768, "PUSH value out of range %d" % value
- push((value >> 8) & 0xff)
- push(value & 0xff)
-
- # Write bytes
- if nBytes:
- pass
- if nBytes <= 8:
- op, argBits, name = streamMnemonicDict["PUSHB"]
- op = op + nBytes - 1
- push(op)
- else:
- op, argBits, name = streamMnemonicDict["NPUSHB"]
- push(op)
- push(nBytes)
- for value in args[nWords:nWords+nBytes]:
- push(value)
-
- nTotal = nWords + nBytes
- args = args[nTotal:]
- nArgs -= nTotal
- nWords = 0
- else:
- # Write exactly what we've been asked to
- words = mnemonic[-1] == "W"
- op, argBits, name = streamMnemonicDict[mnemonic]
- if mnemonic[0] != "N":
- assert nArgs <= 8, nArgs
- op = op + nArgs - 1
- push(op)
- else:
- assert nArgs < 256
- push(op)
- push(nArgs)
- if words:
- for value in args:
- assert -32768 <= value < 32768, "PUSHW value out of range %d" % value
- push((value >> 8) & 0xff)
- push(value & 0xff)
- else:
- for value in args:
- assert 0 <= value < 256, "PUSHB value out of range %d" % value
- push(value)
-
- pos = _skipWhite(assembly, pos)
-
- if bytecode:
- assert max(bytecode) < 256 and min(bytecode) >= 0
- self.bytecode = array.array("B", bytecode)
-
- def _disassemble(self, preserve=False):
- assembly = []
- i = 0
- bytecode = getattr(self, 'bytecode', [])
- numBytecode = len(bytecode)
- while i < numBytecode:
- op = bytecode[i]
- try:
- mnemonic, argBits, argoffset, name = opcodeDict[op]
- except KeyError:
- if op in streamOpcodeDict:
- values = []
-
- # Merge consecutive PUSH operations
- while bytecode[i] in streamOpcodeDict:
- op = bytecode[i]
- mnemonic, argBits, argoffset, name = streamOpcodeDict[op]
- words = mnemonic[-1] == "W"
- if argBits:
- nValues = op - argoffset + 1
- else:
- i = i + 1
- nValues = bytecode[i]
- i = i + 1
- assert nValues > 0
- if not words:
- for j in range(nValues):
- value = bytecode[i]
- values.append(repr(value))
- i = i + 1
- else:
- for j in range(nValues):
- # cast to signed int16
- value = (bytecode[i] << 8) | bytecode[i+1]
- if value >= 0x8000:
- value = value - 0x10000
- values.append(repr(value))
- i = i + 2
- if preserve:
- break
-
- if not preserve:
- mnemonic = "PUSH"
- nValues = len(values)
- if nValues == 1:
- assembly.append("%s[ ] /* 1 value pushed */" % mnemonic)
- else:
- assembly.append("%s[ ] /* %s values pushed */" % (mnemonic, nValues))
- assembly.extend(values)
- else:
- assembly.append("INSTR%d[ ]" % op)
- i = i + 1
- else:
- if argBits:
- assembly.append(mnemonic + "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name))
- else:
- assembly.append(mnemonic + "[ ] /* %s */" % name)
- i = i + 1
- self.assembly = assembly
-
- def __bool__(self):
- """
- >>> p = Program()
- >>> bool(p)
- False
- >>> bc = array.array("B", [0])
- >>> p.fromBytecode(bc)
- >>> bool(p)
- True
- >>> p.bytecode.pop()
- 0
- >>> bool(p)
- False
-
- >>> p = Program()
- >>> asm = ['SVTCA[0]']
- >>> p.fromAssembly(asm)
- >>> bool(p)
- True
- >>> p.assembly.pop()
- 'SVTCA[0]'
- >>> bool(p)
- False
- """
- return ((hasattr(self, 'assembly') and len(self.assembly) > 0) or
- (hasattr(self, 'bytecode') and len(self.bytecode) > 0))
-
- __nonzero__ = __bool__
-
- def __eq__(self, other):
- if type(self) != type(other):
- return NotImplemented
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- result = self.__eq__(other)
- return result if result is NotImplemented else not result
+ def __init__(self) -> None:
+ pass
+
+ def fromBytecode(self, bytecode: bytes) -> None:
+ self.bytecode = array.array("B", bytecode)
+ if hasattr(self, "assembly"):
+ del self.assembly
+
+ def fromAssembly(self, assembly: List[str] | str) -> None:
+ if isinstance(assembly, list):
+ self.assembly = assembly
+ elif isinstance(assembly, str):
+ self.assembly = assembly.splitlines()
+ else:
+ raise TypeError(f"expected str or List[str], got {type(assembly).__name__}")
+ if hasattr(self, "bytecode"):
+ del self.bytecode
+
+ def getBytecode(self) -> bytes:
+ if not hasattr(self, "bytecode"):
+ self._assemble()
+ return self.bytecode.tobytes()
+
+ def getAssembly(self, preserve=True) -> List[str]:
+ if not hasattr(self, "assembly"):
+ self._disassemble(preserve=preserve)
+ return self.assembly
+
+ def toXML(self, writer, ttFont) -> None:
+ if (
+ not hasattr(ttFont, "disassembleInstructions")
+ or ttFont.disassembleInstructions
+ ):
+ try:
+ assembly = self.getAssembly()
+ except:
+ import traceback
+
+ tmp = StringIO()
+ traceback.print_exc(file=tmp)
+ msg = "An exception occurred during the decompilation of glyph program:\n\n"
+ msg += tmp.getvalue()
+ log.error(msg)
+ writer.begintag("bytecode")
+ writer.newline()
+ writer.comment(msg.strip())
+ writer.newline()
+ writer.dumphex(self.getBytecode())
+ writer.endtag("bytecode")
+ writer.newline()
+ else:
+ if not assembly:
+ return
+ writer.begintag("assembly")
+ writer.newline()
+ i = 0
+ indent = 0
+ nInstr = len(assembly)
+ while i < nInstr:
+ instr = assembly[i]
+ if _unindentRE.match(instr):
+ indent -= 1
+ writer.write(writer.indentwhite * indent)
+ writer.write(instr)
+ writer.newline()
+ m = _pushCountPat.match(instr)
+ i = i + 1
+ if m:
+ nValues = int(m.group(1))
+ line: List[str] = []
+ j = 0
+ for j in range(nValues):
+ if j and not (j % 25):
+ writer.write(writer.indentwhite * indent)
+ writer.write(" ".join(line))
+ writer.newline()
+ line = []
+ line.append(assembly[i + j])
+ writer.write(writer.indentwhite * indent)
+ writer.write(" ".join(line))
+ writer.newline()
+ i = i + j + 1
+ if _indentRE.match(instr):
+ indent += 1
+ writer.endtag("assembly")
+ writer.newline()
+ else:
+ bytecode = self.getBytecode()
+ if not bytecode:
+ return
+ writer.begintag("bytecode")
+ writer.newline()
+ writer.dumphex(bytecode)
+ writer.endtag("bytecode")
+ writer.newline()
+
+ def fromXML(self, name, attrs, content, ttFont) -> None:
+ if name == "assembly":
+ self.fromAssembly(strjoin(content))
+ self._assemble()
+ del self.assembly
+ else:
+ assert name == "bytecode"
+ self.fromBytecode(readHex(content))
+
+ def _assemble(self) -> None:
+ assembly = " ".join(getattr(self, "assembly", []))
+ bytecode: List[int] = []
+ push = bytecode.append
+ lenAssembly = len(assembly)
+ pos = _skipWhite(assembly, 0)
+ while pos < lenAssembly:
+ m = _tokenRE.match(assembly, pos)
+ if m is None:
+ raise tt_instructions_error(
+ "Syntax error in TT program (%s)" % assembly[pos - 5 : pos + 15]
+ )
+ dummy, mnemonic, arg, number, comment = m.groups()
+ pos = m.regs[0][1]
+ if comment:
+ pos = _skipWhite(assembly, pos)
+ continue
+
+ arg = arg.strip()
+ if mnemonic.startswith("INSTR"):
+ # Unknown instruction
+ op = int(mnemonic[5:])
+ push(op)
+ elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"):
+ op, argBits, name = mnemonicDict[mnemonic]
+ if len(arg) != argBits:
+ raise tt_instructions_error(
+ "Incorrect number of argument bits (%s[%s])" % (mnemonic, arg)
+ )
+ if arg:
+ arg = binary2num(arg)
+ push(op + arg)
+ else:
+ push(op)
+ else:
+ args = []
+ pos = _skipWhite(assembly, pos)
+ while pos < lenAssembly:
+ m = _tokenRE.match(assembly, pos)
+ if m is None:
+ raise tt_instructions_error(
+ "Syntax error in TT program (%s)" % assembly[pos : pos + 15]
+ )
+ dummy, _mnemonic, arg, number, comment = m.groups()
+ if number is None and comment is None:
+ break
+ pos = m.regs[0][1]
+ pos = _skipWhite(assembly, pos)
+ if comment is not None:
+ continue
+ args.append(int(number))
+ nArgs = len(args)
+ if mnemonic == "PUSH":
+ # Automatically choose the most compact representation
+ nWords = 0
+ while nArgs:
+ while (
+ nWords < nArgs
+ and nWords < 255
+ and not (0 <= args[nWords] <= 255)
+ ):
+ nWords += 1
+ nBytes = 0
+ while (
+ nWords + nBytes < nArgs
+ and nBytes < 255
+ and 0 <= args[nWords + nBytes] <= 255
+ ):
+ nBytes += 1
+ if (
+ nBytes < 2
+ and nWords + nBytes < 255
+ and nWords + nBytes != nArgs
+ ):
+ # Will write bytes as words
+ nWords += nBytes
+ continue
+
+ # Write words
+ if nWords:
+ if nWords <= 8:
+ op, argBits, name = streamMnemonicDict["PUSHW"]
+ op = op + nWords - 1
+ push(op)
+ else:
+ op, argBits, name = streamMnemonicDict["NPUSHW"]
+ push(op)
+ push(nWords)
+ for value in args[:nWords]:
+ assert -32768 <= value < 32768, (
+ "PUSH value out of range %d" % value
+ )
+ push((value >> 8) & 0xFF)
+ push(value & 0xFF)
+
+ # Write bytes
+ if nBytes:
+ pass
+ if nBytes <= 8:
+ op, argBits, name = streamMnemonicDict["PUSHB"]
+ op = op + nBytes - 1
+ push(op)
+ else:
+ op, argBits, name = streamMnemonicDict["NPUSHB"]
+ push(op)
+ push(nBytes)
+ for value in args[nWords : nWords + nBytes]:
+ push(value)
+
+ nTotal = nWords + nBytes
+ args = args[nTotal:]
+ nArgs -= nTotal
+ nWords = 0
+ else:
+ # Write exactly what we've been asked to
+ words = mnemonic[-1] == "W"
+ op, argBits, name = streamMnemonicDict[mnemonic]
+ if mnemonic[0] != "N":
+ assert nArgs <= 8, nArgs
+ op = op + nArgs - 1
+ push(op)
+ else:
+ assert nArgs < 256
+ push(op)
+ push(nArgs)
+ if words:
+ for value in args:
+ assert -32768 <= value < 32768, (
+ "PUSHW value out of range %d" % value
+ )
+ push((value >> 8) & 0xFF)
+ push(value & 0xFF)
+ else:
+ for value in args:
+ assert 0 <= value < 256, (
+ "PUSHB value out of range %d" % value
+ )
+ push(value)
+
+ pos = _skipWhite(assembly, pos)
+
+ if bytecode:
+ assert max(bytecode) < 256 and min(bytecode) >= 0
+ self.bytecode = array.array("B", bytecode)
+
+ def _disassemble(self, preserve=False) -> None:
+ assembly = []
+ i = 0
+ bytecode = getattr(self, "bytecode", [])
+ numBytecode = len(bytecode)
+ while i < numBytecode:
+ op = bytecode[i]
+ try:
+ mnemonic, argBits, argoffset, name = opcodeDict[op]
+ except KeyError:
+ if op in streamOpcodeDict:
+ values = []
+
+ # Merge consecutive PUSH operations
+ while bytecode[i] in streamOpcodeDict:
+ op = bytecode[i]
+ mnemonic, argBits, argoffset, name = streamOpcodeDict[op]
+ words = mnemonic[-1] == "W"
+ if argBits:
+ nValues = op - argoffset + 1
+ else:
+ i = i + 1
+ nValues = bytecode[i]
+ i = i + 1
+ assert nValues > 0
+ if not words:
+ for j in range(nValues):
+ value = bytecode[i]
+ values.append(repr(value))
+ i = i + 1
+ else:
+ for j in range(nValues):
+ # cast to signed int16
+ value = (bytecode[i] << 8) | bytecode[i + 1]
+ if value >= 0x8000:
+ value = value - 0x10000
+ values.append(repr(value))
+ i = i + 2
+ if preserve:
+ break
+
+ if not preserve:
+ mnemonic = "PUSH"
+ nValues = len(values)
+ if nValues == 1:
+ assembly.append("%s[ ] /* 1 value pushed */" % mnemonic)
+ else:
+ assembly.append(
+ "%s[ ] /* %s values pushed */" % (mnemonic, nValues)
+ )
+ assembly.extend(values)
+ else:
+ assembly.append("INSTR%d[ ]" % op)
+ i = i + 1
+ else:
+ if argBits:
+ assembly.append(
+ mnemonic
+ + "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name)
+ )
+ else:
+ assembly.append(mnemonic + "[ ] /* %s */" % name)
+ i = i + 1
+ self.assembly = assembly
+
+ def __bool__(self) -> bool:
+ """
+ >>> p = Program()
+ >>> bool(p)
+ False
+ >>> bc = array.array("B", [0])
+ >>> p.fromBytecode(bc)
+ >>> bool(p)
+ True
+ >>> p.bytecode.pop()
+ 0
+ >>> bool(p)
+ False
+
+ >>> p = Program()
+ >>> asm = ['SVTCA[0]']
+ >>> p.fromAssembly(asm)
+ >>> bool(p)
+ True
+ >>> p.assembly.pop()
+ 'SVTCA[0]'
+ >>> bool(p)
+ False
+ """
+ return (hasattr(self, "assembly") and len(self.assembly) > 0) or (
+ hasattr(self, "bytecode") and len(self.bytecode) > 0
+ )
+
+ __nonzero__ = __bool__
+
+ def __eq__(self, other) -> bool:
+ if type(self) != type(other):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other) -> bool:
+ result = self.__eq__(other)
+ return result if result is NotImplemented else not result
def _test():
- """
- >>> _test()
- True
- """
+ """
+ >>> _test()
+ True
+ """
- bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-"""
+ bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-"""
+
+ p = Program()
+ p.fromBytecode(bc)
+ asm = p.getAssembly(preserve=True)
+ p.fromAssembly(asm)
+ print(bc == p.getBytecode())
- p = Program()
- p.fromBytecode(bc)
- asm = p.getAssembly(preserve=True)
- p.fromAssembly(asm)
- print(bc == p.getBytecode())
if __name__ == "__main__":
- import sys
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/ttLib/ttCollection.py b/Lib/fontTools/ttLib/ttCollection.py
index f0922127..70ed4b7a 100644
--- a/Lib/fontTools/ttLib/ttCollection.py
+++ b/Lib/fontTools/ttLib/ttCollection.py
@@ -9,118 +9,118 @@ log = logging.getLogger(__name__)
class TTCollection(object):
- """Object representing a TrueType Collection / OpenType Collection.
- The main API is self.fonts being a list of TTFont instances.
-
- If shareTables is True, then different fonts in the collection
- might point to the same table object if the data for the table was
- the same in the font file. Note, however, that this might result
- in suprises and incorrect behavior if the different fonts involved
- have different GlyphOrder. Use only if you know what you are doing.
- """
-
- def __init__(self, file=None, shareTables=False, **kwargs):
- fonts = self.fonts = []
- if file is None:
- return
-
- assert 'fontNumber' not in kwargs, kwargs
-
- closeStream = False
- if not hasattr(file, "read"):
- file = open(file, "rb")
- closeStream = True
-
- tableCache = {} if shareTables else None
-
- header = readTTCHeader(file)
- for i in range(header.numFonts):
- font = TTFont(file, fontNumber=i, _tableCache=tableCache, **kwargs)
- fonts.append(font)
-
- # don't close file if lazy=True, as the TTFont hold a reference to the original
- # file; the file will be closed once the TTFonts are closed in the
- # TTCollection.close(). We still want to close the file if lazy is None or
- # False, because in that case the TTFont no longer need the original file
- # and we want to avoid 'ResourceWarning: unclosed file'.
- if not kwargs.get("lazy") and closeStream:
- file.close()
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, traceback):
- self.close()
-
- def close(self):
- for font in self.fonts:
- font.close()
-
- def save(self, file, shareTables=True):
- """Save the font to disk. Similarly to the constructor,
- the 'file' argument can be either a pathname or a writable
- file object.
- """
- if not hasattr(file, "write"):
- final = None
- file = open(file, "wb")
- else:
- # assume "file" is a writable file object
- # write to a temporary stream to allow saving to unseekable streams
- final = file
- file = BytesIO()
-
- tableCache = {} if shareTables else None
-
- offsets_offset = writeTTCHeader(file, len(self.fonts))
- offsets = []
- for font in self.fonts:
- offsets.append(file.tell())
- font._save(file, tableCache=tableCache)
- file.seek(0,2)
-
- file.seek(offsets_offset)
- file.write(struct.pack(">%dL" % len(self.fonts), *offsets))
-
- if final:
- final.write(file.getvalue())
- file.close()
-
- def saveXML(self, fileOrPath, newlinestr="\n", writeVersion=True, **kwargs):
-
- from fontTools.misc import xmlWriter
- writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
-
- if writeVersion:
- from fontTools import version
- version = ".".join(version.split('.')[:2])
- writer.begintag("ttCollection", ttLibVersion=version)
- else:
- writer.begintag("ttCollection")
- writer.newline()
- writer.newline()
-
- for font in self.fonts:
- font._saveXML(writer, writeVersion=False, **kwargs)
- writer.newline()
-
- writer.endtag("ttCollection")
- writer.newline()
-
- writer.close()
-
-
- def __getitem__(self, item):
- return self.fonts[item]
-
- def __setitem__(self, item, value):
- self.fonts[item] = value
-
- def __delitem__(self, item):
- return self.fonts[item]
+ """Object representing a TrueType Collection / OpenType Collection.
+ The main API is self.fonts being a list of TTFont instances.
+
+ If shareTables is True, then different fonts in the collection
+ might point to the same table object if the data for the table was
+ the same in the font file. Note, however, that this might result
+ in suprises and incorrect behavior if the different fonts involved
+ have different GlyphOrder. Use only if you know what you are doing.
+ """
+
+ def __init__(self, file=None, shareTables=False, **kwargs):
+ fonts = self.fonts = []
+ if file is None:
+ return
+
+ assert "fontNumber" not in kwargs, kwargs
+
+ closeStream = False
+ if not hasattr(file, "read"):
+ file = open(file, "rb")
+ closeStream = True
+
+ tableCache = {} if shareTables else None
+
+ header = readTTCHeader(file)
+ for i in range(header.numFonts):
+ font = TTFont(file, fontNumber=i, _tableCache=tableCache, **kwargs)
+ fonts.append(font)
+
+ # don't close file if lazy=True, as the TTFont hold a reference to the original
+ # file; the file will be closed once the TTFonts are closed in the
+ # TTCollection.close(). We still want to close the file if lazy is None or
+ # False, because in that case the TTFont no longer need the original file
+ # and we want to avoid 'ResourceWarning: unclosed file'.
+ if not kwargs.get("lazy") and closeStream:
+ file.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+ def close(self):
+ for font in self.fonts:
+ font.close()
+
+ def save(self, file, shareTables=True):
+ """Save the font to disk. Similarly to the constructor,
+ the 'file' argument can be either a pathname or a writable
+ file object.
+ """
+ if not hasattr(file, "write"):
+ final = None
+ file = open(file, "wb")
+ else:
+ # assume "file" is a writable file object
+ # write to a temporary stream to allow saving to unseekable streams
+ final = file
+ file = BytesIO()
+
+ tableCache = {} if shareTables else None
+
+ offsets_offset = writeTTCHeader(file, len(self.fonts))
+ offsets = []
+ for font in self.fonts:
+ offsets.append(file.tell())
+ font._save(file, tableCache=tableCache)
+ file.seek(0, 2)
+
+ file.seek(offsets_offset)
+ file.write(struct.pack(">%dL" % len(self.fonts), *offsets))
+
+ if final:
+ final.write(file.getvalue())
+ file.close()
+
+ def saveXML(self, fileOrPath, newlinestr="\n", writeVersion=True, **kwargs):
+ from fontTools.misc import xmlWriter
+
+ writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
+
+ if writeVersion:
+ from fontTools import version
+
+ version = ".".join(version.split(".")[:2])
+ writer.begintag("ttCollection", ttLibVersion=version)
+ else:
+ writer.begintag("ttCollection")
+ writer.newline()
+ writer.newline()
+
+ for font in self.fonts:
+ font._saveXML(writer, writeVersion=False, **kwargs)
+ writer.newline()
+
+ writer.endtag("ttCollection")
+ writer.newline()
+
+ writer.close()
+
+ def __getitem__(self, item):
+ return self.fonts[item]
+
+ def __setitem__(self, item, value):
+ self.fonts[item] = value
+
+ def __delitem__(self, item):
+ return self.fonts[item]
- def __len__(self):
- return len(self.fonts)
+ def __len__(self):
+ return len(self.fonts)
- def __iter__(self):
- return iter(self.fonts)
+ def __iter__(self):
+ return iter(self.fonts)
diff --git a/Lib/fontTools/ttLib/ttFont.py b/Lib/fontTools/ttLib/ttFont.py
index 327d113f..6a9ca098 100644
--- a/Lib/fontTools/ttLib/ttFont.py
+++ b/Lib/fontTools/ttLib/ttFont.py
@@ -4,793 +4,890 @@ from fontTools.misc.configTools import AbstractConfig
from fontTools.misc.textTools import Tag, byteord, tostr
from fontTools.misc.loggingTools import deprecateArgument
from fontTools.ttLib import TTLibError
-from fontTools.ttLib.ttGlyphSet import (
- _TTGlyphSet, _TTGlyph,
- _TTGlyphCFF, _TTGlyphGlyf,
- _TTVarGlyphSet,
-)
+from fontTools.ttLib.ttGlyphSet import _TTGlyph, _TTGlyphSetCFF, _TTGlyphSetGlyf
from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter
-from io import BytesIO, StringIO
+from io import BytesIO, StringIO, UnsupportedOperation
import os
import logging
import traceback
log = logging.getLogger(__name__)
+
class TTFont(object):
- """Represents a TrueType font.
-
- The object manages file input and output, and offers a convenient way of
- accessing tables. Tables will be only decompiled when necessary, ie. when
- they're actually accessed. This means that simple operations can be extremely fast.
-
- Example usage::
-
- >> from fontTools import ttLib
- >> tt = ttLib.TTFont("afont.ttf") # Load an existing font file
- >> tt['maxp'].numGlyphs
- 242
- >> tt['OS/2'].achVendID
- 'B&H\000'
- >> tt['head'].unitsPerEm
- 2048
-
- For details of the objects returned when accessing each table, see :ref:`tables`.
- To add a table to the font, use the :py:func:`newTable` function::
-
- >> os2 = newTable("OS/2")
- >> os2.version = 4
- >> # set other attributes
- >> font["OS/2"] = os2
-
- TrueType fonts can also be serialized to and from XML format (see also the
- :ref:`ttx` binary)::
-
- >> tt.saveXML("afont.ttx")
- Dumping 'LTSH' table...
- Dumping 'OS/2' table...
- [...]
-
- >> tt2 = ttLib.TTFont() # Create a new font object
- >> tt2.importXML("afont.ttx")
- >> tt2['maxp'].numGlyphs
- 242
-
- The TTFont object may be used as a context manager; this will cause the file
- reader to be closed after the context ``with`` block is exited::
-
- with TTFont(filename) as f:
- # Do stuff
-
- Args:
- file: When reading a font from disk, either a pathname pointing to a file,
- or a readable file object.
- res_name_or_index: If running on a Macintosh, either a sfnt resource name or
- an sfnt resource index number. If the index number is zero, TTLib will
- autodetect whether the file is a flat file or a suitcase. (If it is a suitcase,
- only the first 'sfnt' resource will be read.)
- sfntVersion (str): When constructing a font object from scratch, sets the four-byte
- sfnt magic number to be used. Defaults to ``\0\1\0\0`` (TrueType). To create
- an OpenType file, use ``OTTO``.
- flavor (str): Set this to ``woff`` when creating a WOFF file or ``woff2`` for a WOFF2
- file.
- checkChecksums (int): How checksum data should be treated. Default is 0
- (no checking). Set to 1 to check and warn on wrong checksums; set to 2 to
- raise an exception if any wrong checksums are found.
- recalcBBoxes (bool): If true (the default), recalculates ``glyf``, ``CFF ``,
- ``head`` bounding box values and ``hhea``/``vhea`` min/max values on save.
- Also compiles the glyphs on importing, which saves memory consumption and
- time.
- ignoreDecompileErrors (bool): If true, exceptions raised during table decompilation
- will be ignored, and the binary data will be returned for those tables instead.
- recalcTimestamp (bool): If true (the default), sets the ``modified`` timestamp in
- the ``head`` table on save.
- fontNumber (int): The index of the font in a TrueType Collection file.
- lazy (bool): If lazy is set to True, many data structures are loaded lazily, upon
- access only. If it is set to False, many data structures are loaded immediately.
- The default is ``lazy=None`` which is somewhere in between.
- """
-
- def __init__(self, file=None, res_name_or_index=None,
- sfntVersion="\000\001\000\000", flavor=None, checkChecksums=0,
- verbose=None, recalcBBoxes=True, allowVID=NotImplemented, ignoreDecompileErrors=False,
- recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=None,
- _tableCache=None, cfg={}):
- for name in ("verbose", "quiet"):
- val = locals().get(name)
- if val is not None:
- deprecateArgument(name, "configure logging instead")
- setattr(self, name, val)
-
- self.lazy = lazy
- self.recalcBBoxes = recalcBBoxes
- self.recalcTimestamp = recalcTimestamp
- self.tables = {}
- self.reader = None
- self.cfg = cfg.copy() if isinstance(cfg, AbstractConfig) else Config(cfg)
- self.ignoreDecompileErrors = ignoreDecompileErrors
-
- if not file:
- self.sfntVersion = sfntVersion
- self.flavor = flavor
- self.flavorData = None
- return
- if not hasattr(file, "read"):
- closeStream = True
- # assume file is a string
- if res_name_or_index is not None:
- # see if it contains 'sfnt' resources in the resource or data fork
- from . import macUtils
- if res_name_or_index == 0:
- if macUtils.getSFNTResIndices(file):
- # get the first available sfnt font.
- file = macUtils.SFNTResourceReader(file, 1)
- else:
- file = open(file, "rb")
- else:
- file = macUtils.SFNTResourceReader(file, res_name_or_index)
- else:
- file = open(file, "rb")
- else:
- # assume "file" is a readable file object
- closeStream = False
- file.seek(0)
-
- if not self.lazy:
- # read input file in memory and wrap a stream around it to allow overwriting
- file.seek(0)
- tmp = BytesIO(file.read())
- if hasattr(file, 'name'):
- # save reference to input file name
- tmp.name = file.name
- if closeStream:
- file.close()
- file = tmp
- self._tableCache = _tableCache
- self.reader = SFNTReader(file, checkChecksums, fontNumber=fontNumber)
- self.sfntVersion = self.reader.sfntVersion
- self.flavor = self.reader.flavor
- self.flavorData = self.reader.flavorData
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, traceback):
- self.close()
-
- def close(self):
- """If we still have a reader object, close it."""
- if self.reader is not None:
- self.reader.close()
-
- def save(self, file, reorderTables=True):
- """Save the font to disk.
-
- Args:
- file: Similarly to the constructor, can be either a pathname or a writable
- file object.
- reorderTables (Option[bool]): If true (the default), reorder the tables,
- sorting them by tag (recommended by the OpenType specification). If
- false, retain the original font order. If None, reorder by table
- dependency (fastest).
- """
- if not hasattr(file, "write"):
- if self.lazy and self.reader.file.name == file:
- raise TTLibError(
- "Can't overwrite TTFont when 'lazy' attribute is True")
- createStream = True
- else:
- # assume "file" is a writable file object
- createStream = False
-
- tmp = BytesIO()
-
- writer_reordersTables = self._save(tmp)
-
- if not (reorderTables is None or writer_reordersTables or
- (reorderTables is False and self.reader is None)):
- if reorderTables is False:
- # sort tables using the original font's order
- tableOrder = list(self.reader.keys())
- else:
- # use the recommended order from the OpenType specification
- tableOrder = None
- tmp.flush()
- tmp2 = BytesIO()
- reorderFontTables(tmp, tmp2, tableOrder)
- tmp.close()
- tmp = tmp2
-
- if createStream:
- # "file" is a path
- with open(file, "wb") as file:
- file.write(tmp.getvalue())
- else:
- file.write(tmp.getvalue())
-
- tmp.close()
-
- def _save(self, file, tableCache=None):
- """Internal function, to be shared by save() and TTCollection.save()"""
-
- if self.recalcTimestamp and 'head' in self:
- self['head'] # make sure 'head' is loaded so the recalculation is actually done
-
- tags = list(self.keys())
- if "GlyphOrder" in tags:
- tags.remove("GlyphOrder")
- numTables = len(tags)
- # write to a temporary stream to allow saving to unseekable streams
- writer = SFNTWriter(file, numTables, self.sfntVersion, self.flavor, self.flavorData)
-
- done = []
- for tag in tags:
- self._writeTable(tag, writer, done, tableCache)
-
- writer.close()
-
- return writer.reordersTables()
-
- def saveXML(self, fileOrPath, newlinestr="\n", **kwargs):
- """Export the font as TTX (an XML-based text file), or as a series of text
- files when splitTables is true. In the latter case, the 'fileOrPath'
- argument should be a path to a directory.
- The 'tables' argument must either be false (dump all tables) or a
- list of tables to dump. The 'skipTables' argument may be a list of tables
- to skip, but only when the 'tables' argument is false.
- """
-
- writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
- self._saveXML(writer, **kwargs)
- writer.close()
-
- def _saveXML(self, writer,
- writeVersion=True,
- quiet=None, tables=None, skipTables=None, splitTables=False,
- splitGlyphs=False, disassembleInstructions=True,
- bitmapGlyphDataFormat='raw'):
-
- if quiet is not None:
- deprecateArgument("quiet", "configure logging instead")
-
- self.disassembleInstructions = disassembleInstructions
- self.bitmapGlyphDataFormat = bitmapGlyphDataFormat
- if not tables:
- tables = list(self.keys())
- if "GlyphOrder" not in tables:
- tables = ["GlyphOrder"] + tables
- if skipTables:
- for tag in skipTables:
- if tag in tables:
- tables.remove(tag)
- numTables = len(tables)
-
- if writeVersion:
- from fontTools import version
- version = ".".join(version.split('.')[:2])
- writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1],
- ttLibVersion=version)
- else:
- writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1])
- writer.newline()
-
- # always splitTables if splitGlyphs is enabled
- splitTables = splitTables or splitGlyphs
-
- if not splitTables:
- writer.newline()
- else:
- path, ext = os.path.splitext(writer.filename)
- fileNameTemplate = path + ".%s" + ext
-
- for i in range(numTables):
- tag = tables[i]
- if splitTables:
- tablePath = fileNameTemplate % tagToIdentifier(tag)
- tableWriter = xmlWriter.XMLWriter(tablePath,
- newlinestr=writer.newlinestr)
- tableWriter.begintag("ttFont", ttLibVersion=version)
- tableWriter.newline()
- tableWriter.newline()
- writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath))
- writer.newline()
- else:
- tableWriter = writer
- self._tableToXML(tableWriter, tag, splitGlyphs=splitGlyphs)
- if splitTables:
- tableWriter.endtag("ttFont")
- tableWriter.newline()
- tableWriter.close()
- writer.endtag("ttFont")
- writer.newline()
-
- def _tableToXML(self, writer, tag, quiet=None, splitGlyphs=False):
- if quiet is not None:
- deprecateArgument("quiet", "configure logging instead")
- if tag in self:
- table = self[tag]
- report = "Dumping '%s' table..." % tag
- else:
- report = "No '%s' table found." % tag
- log.info(report)
- if tag not in self:
- return
- xmlTag = tagToXML(tag)
- attrs = dict()
- if hasattr(table, "ERROR"):
- attrs['ERROR'] = "decompilation error"
- from .tables.DefaultTable import DefaultTable
- if table.__class__ == DefaultTable:
- attrs['raw'] = True
- writer.begintag(xmlTag, **attrs)
- writer.newline()
- if tag == "glyf":
- table.toXML(writer, self, splitGlyphs=splitGlyphs)
- else:
- table.toXML(writer, self)
- writer.endtag(xmlTag)
- writer.newline()
- writer.newline()
-
- def importXML(self, fileOrPath, quiet=None):
- """Import a TTX file (an XML-based text format), so as to recreate
- a font object.
- """
- if quiet is not None:
- deprecateArgument("quiet", "configure logging instead")
-
- if "maxp" in self and "post" in self:
- # Make sure the glyph order is loaded, as it otherwise gets
- # lost if the XML doesn't contain the glyph order, yet does
- # contain the table which was originally used to extract the
- # glyph names from (ie. 'post', 'cmap' or 'CFF ').
- self.getGlyphOrder()
-
- from fontTools.misc import xmlReader
-
- reader = xmlReader.XMLReader(fileOrPath, self)
- reader.read()
-
- def isLoaded(self, tag):
- """Return true if the table identified by ``tag`` has been
- decompiled and loaded into memory."""
- return tag in self.tables
-
- def has_key(self, tag):
- """Test if the table identified by ``tag`` is present in the font.
-
- As well as this method, ``tag in font`` can also be used to determine the
- presence of the table."""
- if self.isLoaded(tag):
- return True
- elif self.reader and tag in self.reader:
- return True
- elif tag == "GlyphOrder":
- return True
- else:
- return False
-
- __contains__ = has_key
-
- def keys(self):
- """Returns the list of tables in the font, along with the ``GlyphOrder`` pseudo-table."""
- keys = list(self.tables.keys())
- if self.reader:
- for key in list(self.reader.keys()):
- if key not in keys:
- keys.append(key)
-
- if "GlyphOrder" in keys:
- keys.remove("GlyphOrder")
- keys = sortedTagList(keys)
- return ["GlyphOrder"] + keys
-
- def ensureDecompiled(self, recurse=None):
- """Decompile all the tables, even if a TTFont was opened in 'lazy' mode."""
- for tag in self.keys():
- table = self[tag]
- if recurse is None:
- recurse = self.lazy is not False
- if recurse and hasattr(table, "ensureDecompiled"):
- table.ensureDecompiled(recurse=recurse)
- self.lazy = False
-
- def __len__(self):
- return len(list(self.keys()))
-
- def __getitem__(self, tag):
- tag = Tag(tag)
- table = self.tables.get(tag)
- if table is None:
- if tag == "GlyphOrder":
- table = GlyphOrder(tag)
- self.tables[tag] = table
- elif self.reader is not None:
- table = self._readTable(tag)
- else:
- raise KeyError("'%s' table not found" % tag)
- return table
-
- def _readTable(self, tag):
- log.debug("Reading '%s' table from disk", tag)
- data = self.reader[tag]
- if self._tableCache is not None:
- table = self._tableCache.get((tag, data))
- if table is not None:
- return table
- tableClass = getTableClass(tag)
- table = tableClass(tag)
- self.tables[tag] = table
- log.debug("Decompiling '%s' table", tag)
- try:
- table.decompile(data, self)
- except Exception:
- if not self.ignoreDecompileErrors:
- raise
- # fall back to DefaultTable, retaining the binary table data
- log.exception(
- "An exception occurred during the decompilation of the '%s' table", tag)
- from .tables.DefaultTable import DefaultTable
- file = StringIO()
- traceback.print_exc(file=file)
- table = DefaultTable(tag)
- table.ERROR = file.getvalue()
- self.tables[tag] = table
- table.decompile(data, self)
- if self._tableCache is not None:
- self._tableCache[(tag, data)] = table
- return table
-
- def __setitem__(self, tag, table):
- self.tables[Tag(tag)] = table
-
- def __delitem__(self, tag):
- if tag not in self:
- raise KeyError("'%s' table not found" % tag)
- if tag in self.tables:
- del self.tables[tag]
- if self.reader and tag in self.reader:
- del self.reader[tag]
-
- def get(self, tag, default=None):
- """Returns the table if it exists or (optionally) a default if it doesn't."""
- try:
- return self[tag]
- except KeyError:
- return default
-
- def setGlyphOrder(self, glyphOrder):
- """Set the glyph order
-
- Args:
- glyphOrder ([str]): List of glyph names in order.
- """
- self.glyphOrder = glyphOrder
- if hasattr(self, '_reverseGlyphOrderDict'):
- del self._reverseGlyphOrderDict
- if self.isLoaded("glyf"):
- self["glyf"].setGlyphOrder(glyphOrder)
-
- def getGlyphOrder(self):
- """Returns a list of glyph names ordered by their position in the font."""
- try:
- return self.glyphOrder
- except AttributeError:
- pass
- if 'CFF ' in self:
- cff = self['CFF ']
- self.glyphOrder = cff.getGlyphOrder()
- elif 'post' in self:
- # TrueType font
- glyphOrder = self['post'].getGlyphOrder()
- if glyphOrder is None:
- #
- # No names found in the 'post' table.
- # Try to create glyph names from the unicode cmap (if available)
- # in combination with the Adobe Glyph List (AGL).
- #
- self._getGlyphNamesFromCmap()
- else:
- self.glyphOrder = glyphOrder
- else:
- self._getGlyphNamesFromCmap()
- return self.glyphOrder
-
- def _getGlyphNamesFromCmap(self):
- #
- # This is rather convoluted, but then again, it's an interesting problem:
- # - we need to use the unicode values found in the cmap table to
- # build glyph names (eg. because there is only a minimal post table,
- # or none at all).
- # - but the cmap parser also needs glyph names to work with...
- # So here's what we do:
- # - make up glyph names based on glyphID
- # - load a temporary cmap table based on those names
- # - extract the unicode values, build the "real" glyph names
- # - unload the temporary cmap table
- #
- if self.isLoaded("cmap"):
- # Bootstrapping: we're getting called by the cmap parser
- # itself. This means self.tables['cmap'] contains a partially
- # loaded cmap, making it impossible to get at a unicode
- # subtable here. We remove the partially loaded cmap and
- # restore it later.
- # This only happens if the cmap table is loaded before any
- # other table that does f.getGlyphOrder() or f.getGlyphName().
- cmapLoading = self.tables['cmap']
- del self.tables['cmap']
- else:
- cmapLoading = None
- # Make up glyph names based on glyphID, which will be used by the
- # temporary cmap and by the real cmap in case we don't find a unicode
- # cmap.
- numGlyphs = int(self['maxp'].numGlyphs)
- glyphOrder = [None] * numGlyphs
- glyphOrder[0] = ".notdef"
- for i in range(1, numGlyphs):
- glyphOrder[i] = "glyph%.5d" % i
- # Set the glyph order, so the cmap parser has something
- # to work with (so we don't get called recursively).
- self.glyphOrder = glyphOrder
-
- # Make up glyph names based on the reversed cmap table. Because some
- # glyphs (eg. ligatures or alternates) may not be reachable via cmap,
- # this naming table will usually not cover all glyphs in the font.
- # If the font has no Unicode cmap table, reversecmap will be empty.
- if 'cmap' in self:
- reversecmap = self['cmap'].buildReversed()
- else:
- reversecmap = {}
- useCount = {}
- for i in range(numGlyphs):
- tempName = glyphOrder[i]
- if tempName in reversecmap:
- # If a font maps both U+0041 LATIN CAPITAL LETTER A and
- # U+0391 GREEK CAPITAL LETTER ALPHA to the same glyph,
- # we prefer naming the glyph as "A".
- glyphName = self._makeGlyphName(min(reversecmap[tempName]))
- numUses = useCount[glyphName] = useCount.get(glyphName, 0) + 1
- if numUses > 1:
- glyphName = "%s.alt%d" % (glyphName, numUses - 1)
- glyphOrder[i] = glyphName
-
- if 'cmap' in self:
- # Delete the temporary cmap table from the cache, so it can
- # be parsed again with the right names.
- del self.tables['cmap']
- self.glyphOrder = glyphOrder
- if cmapLoading:
- # restore partially loaded cmap, so it can continue loading
- # using the proper names.
- self.tables['cmap'] = cmapLoading
-
- @staticmethod
- def _makeGlyphName(codepoint):
- from fontTools import agl # Adobe Glyph List
- if codepoint in agl.UV2AGL:
- return agl.UV2AGL[codepoint]
- elif codepoint <= 0xFFFF:
- return "uni%04X" % codepoint
- else:
- return "u%X" % codepoint
-
- def getGlyphNames(self):
- """Get a list of glyph names, sorted alphabetically."""
- glyphNames = sorted(self.getGlyphOrder())
- return glyphNames
-
- def getGlyphNames2(self):
- """Get a list of glyph names, sorted alphabetically,
- but not case sensitive.
- """
- from fontTools.misc import textTools
- return textTools.caselessSort(self.getGlyphOrder())
-
- def getGlyphName(self, glyphID):
- """Returns the name for the glyph with the given ID.
-
- If no name is available, synthesises one with the form ``glyphXXXXX``` where
- ```XXXXX`` is the zero-padded glyph ID.
- """
- try:
- return self.getGlyphOrder()[glyphID]
- except IndexError:
- return "glyph%.5d" % glyphID
-
- def getGlyphNameMany(self, lst):
- """Converts a list of glyph IDs into a list of glyph names."""
- glyphOrder = self.getGlyphOrder();
- cnt = len(glyphOrder)
- return [glyphOrder[gid] if gid < cnt else "glyph%.5d" % gid
- for gid in lst]
-
- def getGlyphID(self, glyphName):
- """Returns the ID of the glyph with the given name."""
- try:
- return self.getReverseGlyphMap()[glyphName]
- except KeyError:
- if glyphName[:5] == "glyph":
- try:
- return int(glyphName[5:])
- except (NameError, ValueError):
- raise KeyError(glyphName)
-
- def getGlyphIDMany(self, lst):
- """Converts a list of glyph names into a list of glyph IDs."""
- d = self.getReverseGlyphMap()
- try:
- return [d[glyphName] for glyphName in lst]
- except KeyError:
- getGlyphID = self.getGlyphID
- return [getGlyphID(glyphName) for glyphName in lst]
-
- def getReverseGlyphMap(self, rebuild=False):
- """Returns a mapping of glyph names to glyph IDs."""
- if rebuild or not hasattr(self, "_reverseGlyphOrderDict"):
- self._buildReverseGlyphOrderDict()
- return self._reverseGlyphOrderDict
-
- def _buildReverseGlyphOrderDict(self):
- self._reverseGlyphOrderDict = d = {}
- for glyphID,glyphName in enumerate(self.getGlyphOrder()):
- d[glyphName] = glyphID
- return d
-
- def _writeTable(self, tag, writer, done, tableCache=None):
- """Internal helper function for self.save(). Keeps track of
- inter-table dependencies.
- """
- if tag in done:
- return
- tableClass = getTableClass(tag)
- for masterTable in tableClass.dependencies:
- if masterTable not in done:
- if masterTable in self:
- self._writeTable(masterTable, writer, done, tableCache)
- else:
- done.append(masterTable)
- done.append(tag)
- tabledata = self.getTableData(tag)
- if tableCache is not None:
- entry = tableCache.get((Tag(tag), tabledata))
- if entry is not None:
- log.debug("reusing '%s' table", tag)
- writer.setEntry(tag, entry)
- return
- log.debug("Writing '%s' table to disk", tag)
- writer[tag] = tabledata
- if tableCache is not None:
- tableCache[(Tag(tag), tabledata)] = writer[tag]
-
- def getTableData(self, tag):
- """Returns the binary representation of a table.
-
- If the table is currently loaded and in memory, the data is compiled to
- binary and returned; if it is not currently loaded, the binary data is
- read from the font file and returned.
- """
- tag = Tag(tag)
- if self.isLoaded(tag):
- log.debug("Compiling '%s' table", tag)
- return self.tables[tag].compile(self)
- elif self.reader and tag in self.reader:
- log.debug("Reading '%s' table from disk", tag)
- return self.reader[tag]
- else:
- raise KeyError(tag)
-
- def getGlyphSet(self, preferCFF=True, location=None, normalized=False):
- """Return a generic GlyphSet, which is a dict-like object
- mapping glyph names to glyph objects. The returned glyph objects
- have a .draw() method that supports the Pen protocol, and will
- have an attribute named 'width'.
-
- If the font is CFF-based, the outlines will be taken from the 'CFF ' or
- 'CFF2' tables. Otherwise the outlines will be taken from the 'glyf' table.
- If the font contains both a 'CFF '/'CFF2' and a 'glyf' table, you can use
- the 'preferCFF' argument to specify which one should be taken. If the
- font contains both a 'CFF ' and a 'CFF2' table, the latter is taken.
-
- If the 'location' parameter is set, it should be a dictionary mapping
- four-letter variation tags to their float values, and the returned
- glyph-set will represent an instance of a variable font at that location.
- If the 'normalized' variable is set to True, that location is interpretted
- as in the normalized (-1..+1) space, otherwise it is in the font's defined
- axes space.
- """
- glyphs = None
- if (preferCFF and any(tb in self for tb in ["CFF ", "CFF2"]) or
- ("glyf" not in self and any(tb in self for tb in ["CFF ", "CFF2"]))):
- table_tag = "CFF2" if "CFF2" in self else "CFF "
- if location:
- raise NotImplementedError # TODO
- glyphs = _TTGlyphSet(self,
- list(self[table_tag].cff.values())[0].CharStrings, _TTGlyphCFF)
-
- if glyphs is None and "glyf" in self:
- if location and 'gvar' in self:
- glyphs = _TTVarGlyphSet(self, location=location, normalized=normalized)
- else:
- glyphs = _TTGlyphSet(self, self["glyf"], _TTGlyphGlyf)
-
- if glyphs is None:
- raise TTLibError("Font contains no outlines")
-
- return glyphs
-
- def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))):
- """Returns the 'best' Unicode cmap dictionary available in the font
- or ``None``, if no Unicode cmap subtable is available.
-
- By default it will search for the following (platformID, platEncID)
- pairs in order::
-
- (3, 10), # Windows Unicode full repertoire
- (0, 6), # Unicode full repertoire (format 13 subtable)
- (0, 4), # Unicode 2.0 full repertoire
- (3, 1), # Windows Unicode BMP
- (0, 3), # Unicode 2.0 BMP
- (0, 2), # Unicode ISO/IEC 10646
- (0, 1), # Unicode 1.1
- (0, 0) # Unicode 1.0
-
- This particular order matches what HarfBuzz uses to choose what
- subtable to use by default. This order prefers the largest-repertoire
- subtable, and among those, prefers the Windows-platform over the
- Unicode-platform as the former has wider support.
-
- This order can be customized via the ``cmapPreferences`` argument.
- """
- return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences)
+ """Represents a TrueType font.
+
+ The object manages file input and output, and offers a convenient way of
+ accessing tables. Tables will be only decompiled when necessary, ie. when
+ they're actually accessed. This means that simple operations can be extremely fast.
+
+ Example usage::
+
+ >> from fontTools import ttLib
+ >> tt = ttLib.TTFont("afont.ttf") # Load an existing font file
+ >> tt['maxp'].numGlyphs
+ 242
+ >> tt['OS/2'].achVendID
+ 'B&H\000'
+ >> tt['head'].unitsPerEm
+ 2048
+
+ For details of the objects returned when accessing each table, see :ref:`tables`.
+ To add a table to the font, use the :py:func:`newTable` function::
+
+ >> os2 = newTable("OS/2")
+ >> os2.version = 4
+ >> # set other attributes
+ >> font["OS/2"] = os2
+
+ TrueType fonts can also be serialized to and from XML format (see also the
+ :ref:`ttx` binary)::
+
+ >> tt.saveXML("afont.ttx")
+ Dumping 'LTSH' table...
+ Dumping 'OS/2' table...
+ [...]
+
+ >> tt2 = ttLib.TTFont() # Create a new font object
+ >> tt2.importXML("afont.ttx")
+ >> tt2['maxp'].numGlyphs
+ 242
+
+ The TTFont object may be used as a context manager; this will cause the file
+ reader to be closed after the context ``with`` block is exited::
+
+ with TTFont(filename) as f:
+ # Do stuff
+
+ Args:
+ file: When reading a font from disk, either a pathname pointing to a file,
+ or a readable file object.
+ res_name_or_index: If running on a Macintosh, either a sfnt resource name or
+ an sfnt resource index number. If the index number is zero, TTLib will
+ autodetect whether the file is a flat file or a suitcase. (If it is a suitcase,
+ only the first 'sfnt' resource will be read.)
+ sfntVersion (str): When constructing a font object from scratch, sets the four-byte
+ sfnt magic number to be used. Defaults to ``\0\1\0\0`` (TrueType). To create
+ an OpenType file, use ``OTTO``.
+ flavor (str): Set this to ``woff`` when creating a WOFF file or ``woff2`` for a WOFF2
+ file.
+ checkChecksums (int): How checksum data should be treated. Default is 0
+ (no checking). Set to 1 to check and warn on wrong checksums; set to 2 to
+ raise an exception if any wrong checksums are found.
+ recalcBBoxes (bool): If true (the default), recalculates ``glyf``, ``CFF ``,
+ ``head`` bounding box values and ``hhea``/``vhea`` min/max values on save.
+ Also compiles the glyphs on importing, which saves memory consumption and
+ time.
+ ignoreDecompileErrors (bool): If true, exceptions raised during table decompilation
+ will be ignored, and the binary data will be returned for those tables instead.
+ recalcTimestamp (bool): If true (the default), sets the ``modified`` timestamp in
+ the ``head`` table on save.
+ fontNumber (int): The index of the font in a TrueType Collection file.
+ lazy (bool): If lazy is set to True, many data structures are loaded lazily, upon
+ access only. If it is set to False, many data structures are loaded immediately.
+ The default is ``lazy=None`` which is somewhere in between.
+ """
+
+ def __init__(
+ self,
+ file=None,
+ res_name_or_index=None,
+ sfntVersion="\000\001\000\000",
+ flavor=None,
+ checkChecksums=0,
+ verbose=None,
+ recalcBBoxes=True,
+ allowVID=NotImplemented,
+ ignoreDecompileErrors=False,
+ recalcTimestamp=True,
+ fontNumber=-1,
+ lazy=None,
+ quiet=None,
+ _tableCache=None,
+ cfg={},
+ ):
+ for name in ("verbose", "quiet"):
+ val = locals().get(name)
+ if val is not None:
+ deprecateArgument(name, "configure logging instead")
+ setattr(self, name, val)
+
+ self.lazy = lazy
+ self.recalcBBoxes = recalcBBoxes
+ self.recalcTimestamp = recalcTimestamp
+ self.tables = {}
+ self.reader = None
+ self.cfg = cfg.copy() if isinstance(cfg, AbstractConfig) else Config(cfg)
+ self.ignoreDecompileErrors = ignoreDecompileErrors
+
+ if not file:
+ self.sfntVersion = sfntVersion
+ self.flavor = flavor
+ self.flavorData = None
+ return
+ seekable = True
+ if not hasattr(file, "read"):
+ closeStream = True
+ # assume file is a string
+ if res_name_or_index is not None:
+ # see if it contains 'sfnt' resources in the resource or data fork
+ from . import macUtils
+
+ if res_name_or_index == 0:
+ if macUtils.getSFNTResIndices(file):
+ # get the first available sfnt font.
+ file = macUtils.SFNTResourceReader(file, 1)
+ else:
+ file = open(file, "rb")
+ else:
+ file = macUtils.SFNTResourceReader(file, res_name_or_index)
+ else:
+ file = open(file, "rb")
+ else:
+ # assume "file" is a readable file object
+ closeStream = False
+ # SFNTReader wants the input file to be seekable.
+ # SpooledTemporaryFile has no seekable() on < 3.11, but still can seek:
+ # https://github.com/fonttools/fonttools/issues/3052
+ if hasattr(file, "seekable"):
+ seekable = file.seekable()
+ elif hasattr(file, "seek"):
+ try:
+ file.seek(0)
+ except UnsupportedOperation:
+ seekable = False
+
+ if not self.lazy:
+ # read input file in memory and wrap a stream around it to allow overwriting
+ if seekable:
+ file.seek(0)
+ tmp = BytesIO(file.read())
+ if hasattr(file, "name"):
+ # save reference to input file name
+ tmp.name = file.name
+ if closeStream:
+ file.close()
+ file = tmp
+ elif not seekable:
+ raise TTLibError("Input file must be seekable when lazy=True")
+ self._tableCache = _tableCache
+ self.reader = SFNTReader(file, checkChecksums, fontNumber=fontNumber)
+ self.sfntVersion = self.reader.sfntVersion
+ self.flavor = self.reader.flavor
+ self.flavorData = self.reader.flavorData
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+ def close(self):
+ """If we still have a reader object, close it."""
+ if self.reader is not None:
+ self.reader.close()
+
+ def save(self, file, reorderTables=True):
+ """Save the font to disk.
+
+ Args:
+ file: Similarly to the constructor, can be either a pathname or a writable
+ file object.
+ reorderTables (Option[bool]): If true (the default), reorder the tables,
+ sorting them by tag (recommended by the OpenType specification). If
+ false, retain the original font order. If None, reorder by table
+ dependency (fastest).
+ """
+ if not hasattr(file, "write"):
+ if self.lazy and self.reader.file.name == file:
+ raise TTLibError("Can't overwrite TTFont when 'lazy' attribute is True")
+ createStream = True
+ else:
+ # assume "file" is a writable file object
+ createStream = False
+
+ tmp = BytesIO()
+
+ writer_reordersTables = self._save(tmp)
+
+ if not (
+ reorderTables is None
+ or writer_reordersTables
+ or (reorderTables is False and self.reader is None)
+ ):
+ if reorderTables is False:
+ # sort tables using the original font's order
+ tableOrder = list(self.reader.keys())
+ else:
+ # use the recommended order from the OpenType specification
+ tableOrder = None
+ tmp.flush()
+ tmp2 = BytesIO()
+ reorderFontTables(tmp, tmp2, tableOrder)
+ tmp.close()
+ tmp = tmp2
+
+ if createStream:
+ # "file" is a path
+ with open(file, "wb") as file:
+ file.write(tmp.getvalue())
+ else:
+ file.write(tmp.getvalue())
+
+ tmp.close()
+
+ def _save(self, file, tableCache=None):
+ """Internal function, to be shared by save() and TTCollection.save()"""
+
+ if self.recalcTimestamp and "head" in self:
+ self[
+ "head"
+ ] # make sure 'head' is loaded so the recalculation is actually done
+
+ tags = list(self.keys())
+ if "GlyphOrder" in tags:
+ tags.remove("GlyphOrder")
+ numTables = len(tags)
+ # write to a temporary stream to allow saving to unseekable streams
+ writer = SFNTWriter(
+ file, numTables, self.sfntVersion, self.flavor, self.flavorData
+ )
+
+ done = []
+ for tag in tags:
+ self._writeTable(tag, writer, done, tableCache)
+
+ writer.close()
+
+ return writer.reordersTables()
+
+ def saveXML(self, fileOrPath, newlinestr="\n", **kwargs):
+ """Export the font as TTX (an XML-based text file), or as a series of text
+ files when splitTables is true. In the latter case, the 'fileOrPath'
+ argument should be a path to a directory.
+ The 'tables' argument must either be false (dump all tables) or a
+ list of tables to dump. The 'skipTables' argument may be a list of tables
+ to skip, but only when the 'tables' argument is false.
+ """
+
+ writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
+ self._saveXML(writer, **kwargs)
+ writer.close()
+
+ def _saveXML(
+ self,
+ writer,
+ writeVersion=True,
+ quiet=None,
+ tables=None,
+ skipTables=None,
+ splitTables=False,
+ splitGlyphs=False,
+ disassembleInstructions=True,
+ bitmapGlyphDataFormat="raw",
+ ):
+ if quiet is not None:
+ deprecateArgument("quiet", "configure logging instead")
+
+ self.disassembleInstructions = disassembleInstructions
+ self.bitmapGlyphDataFormat = bitmapGlyphDataFormat
+ if not tables:
+ tables = list(self.keys())
+ if "GlyphOrder" not in tables:
+ tables = ["GlyphOrder"] + tables
+ if skipTables:
+ for tag in skipTables:
+ if tag in tables:
+ tables.remove(tag)
+ numTables = len(tables)
+
+ if writeVersion:
+ from fontTools import version
+
+ version = ".".join(version.split(".")[:2])
+ writer.begintag(
+ "ttFont",
+ sfntVersion=repr(tostr(self.sfntVersion))[1:-1],
+ ttLibVersion=version,
+ )
+ else:
+ writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1])
+ writer.newline()
+
+ # always splitTables if splitGlyphs is enabled
+ splitTables = splitTables or splitGlyphs
+
+ if not splitTables:
+ writer.newline()
+ else:
+ path, ext = os.path.splitext(writer.filename)
+
+ for i in range(numTables):
+ tag = tables[i]
+ if splitTables:
+ tablePath = path + "." + tagToIdentifier(tag) + ext
+ tableWriter = xmlWriter.XMLWriter(
+ tablePath, newlinestr=writer.newlinestr
+ )
+ tableWriter.begintag("ttFont", ttLibVersion=version)
+ tableWriter.newline()
+ tableWriter.newline()
+ writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath))
+ writer.newline()
+ else:
+ tableWriter = writer
+ self._tableToXML(tableWriter, tag, splitGlyphs=splitGlyphs)
+ if splitTables:
+ tableWriter.endtag("ttFont")
+ tableWriter.newline()
+ tableWriter.close()
+ writer.endtag("ttFont")
+ writer.newline()
+
+ def _tableToXML(self, writer, tag, quiet=None, splitGlyphs=False):
+ if quiet is not None:
+ deprecateArgument("quiet", "configure logging instead")
+ if tag in self:
+ table = self[tag]
+ report = "Dumping '%s' table..." % tag
+ else:
+ report = "No '%s' table found." % tag
+ log.info(report)
+ if tag not in self:
+ return
+ xmlTag = tagToXML(tag)
+ attrs = dict()
+ if hasattr(table, "ERROR"):
+ attrs["ERROR"] = "decompilation error"
+ from .tables.DefaultTable import DefaultTable
+
+ if table.__class__ == DefaultTable:
+ attrs["raw"] = True
+ writer.begintag(xmlTag, **attrs)
+ writer.newline()
+ if tag == "glyf":
+ table.toXML(writer, self, splitGlyphs=splitGlyphs)
+ else:
+ table.toXML(writer, self)
+ writer.endtag(xmlTag)
+ writer.newline()
+ writer.newline()
+
+ def importXML(self, fileOrPath, quiet=None):
+ """Import a TTX file (an XML-based text format), so as to recreate
+ a font object.
+ """
+ if quiet is not None:
+ deprecateArgument("quiet", "configure logging instead")
+
+ if "maxp" in self and "post" in self:
+ # Make sure the glyph order is loaded, as it otherwise gets
+ # lost if the XML doesn't contain the glyph order, yet does
+ # contain the table which was originally used to extract the
+ # glyph names from (ie. 'post', 'cmap' or 'CFF ').
+ self.getGlyphOrder()
+
+ from fontTools.misc import xmlReader
+
+ reader = xmlReader.XMLReader(fileOrPath, self)
+ reader.read()
+
+ def isLoaded(self, tag):
+ """Return true if the table identified by ``tag`` has been
+ decompiled and loaded into memory."""
+ return tag in self.tables
+
+ def has_key(self, tag):
+ """Test if the table identified by ``tag`` is present in the font.
+
+ As well as this method, ``tag in font`` can also be used to determine the
+ presence of the table."""
+ if self.isLoaded(tag):
+ return True
+ elif self.reader and tag in self.reader:
+ return True
+ elif tag == "GlyphOrder":
+ return True
+ else:
+ return False
+
+ __contains__ = has_key
+
+ def keys(self):
+ """Returns the list of tables in the font, along with the ``GlyphOrder`` pseudo-table."""
+ keys = list(self.tables.keys())
+ if self.reader:
+ for key in list(self.reader.keys()):
+ if key not in keys:
+ keys.append(key)
+
+ if "GlyphOrder" in keys:
+ keys.remove("GlyphOrder")
+ keys = sortedTagList(keys)
+ return ["GlyphOrder"] + keys
+
+ def ensureDecompiled(self, recurse=None):
+ """Decompile all the tables, even if a TTFont was opened in 'lazy' mode."""
+ for tag in self.keys():
+ table = self[tag]
+ if recurse is None:
+ recurse = self.lazy is not False
+ if recurse and hasattr(table, "ensureDecompiled"):
+ table.ensureDecompiled(recurse=recurse)
+ self.lazy = False
+
+ def __len__(self):
+ return len(list(self.keys()))
+
+ def __getitem__(self, tag):
+ tag = Tag(tag)
+ table = self.tables.get(tag)
+ if table is None:
+ if tag == "GlyphOrder":
+ table = GlyphOrder(tag)
+ self.tables[tag] = table
+ elif self.reader is not None:
+ table = self._readTable(tag)
+ else:
+ raise KeyError("'%s' table not found" % tag)
+ return table
+
+ def _readTable(self, tag):
+ log.debug("Reading '%s' table from disk", tag)
+ data = self.reader[tag]
+ if self._tableCache is not None:
+ table = self._tableCache.get((tag, data))
+ if table is not None:
+ return table
+ tableClass = getTableClass(tag)
+ table = tableClass(tag)
+ self.tables[tag] = table
+ log.debug("Decompiling '%s' table", tag)
+ try:
+ table.decompile(data, self)
+ except Exception:
+ if not self.ignoreDecompileErrors:
+ raise
+ # fall back to DefaultTable, retaining the binary table data
+ log.exception(
+ "An exception occurred during the decompilation of the '%s' table", tag
+ )
+ from .tables.DefaultTable import DefaultTable
+
+ file = StringIO()
+ traceback.print_exc(file=file)
+ table = DefaultTable(tag)
+ table.ERROR = file.getvalue()
+ self.tables[tag] = table
+ table.decompile(data, self)
+ if self._tableCache is not None:
+ self._tableCache[(tag, data)] = table
+ return table
+
+ def __setitem__(self, tag, table):
+ self.tables[Tag(tag)] = table
+
+ def __delitem__(self, tag):
+ if tag not in self:
+ raise KeyError("'%s' table not found" % tag)
+ if tag in self.tables:
+ del self.tables[tag]
+ if self.reader and tag in self.reader:
+ del self.reader[tag]
+
+ def get(self, tag, default=None):
+ """Returns the table if it exists or (optionally) a default if it doesn't."""
+ try:
+ return self[tag]
+ except KeyError:
+ return default
+
+ def setGlyphOrder(self, glyphOrder):
+ """Set the glyph order
+
+ Args:
+ glyphOrder ([str]): List of glyph names in order.
+ """
+ self.glyphOrder = glyphOrder
+ if hasattr(self, "_reverseGlyphOrderDict"):
+ del self._reverseGlyphOrderDict
+ if self.isLoaded("glyf"):
+ self["glyf"].setGlyphOrder(glyphOrder)
+
+ def getGlyphOrder(self):
+ """Returns a list of glyph names ordered by their position in the font."""
+ try:
+ return self.glyphOrder
+ except AttributeError:
+ pass
+ if "CFF " in self:
+ cff = self["CFF "]
+ self.glyphOrder = cff.getGlyphOrder()
+ elif "post" in self:
+ # TrueType font
+ glyphOrder = self["post"].getGlyphOrder()
+ if glyphOrder is None:
+ #
+ # No names found in the 'post' table.
+ # Try to create glyph names from the unicode cmap (if available)
+ # in combination with the Adobe Glyph List (AGL).
+ #
+ self._getGlyphNamesFromCmap()
+ elif len(glyphOrder) < self["maxp"].numGlyphs:
+ #
+ # Not enough names found in the 'post' table.
+ # Can happen when 'post' format 1 is improperly used on a font that
+ # has more than 258 glyphs (the lenght of 'standardGlyphOrder').
+ #
+ log.warning(
+ "Not enough names found in the 'post' table, generating them from cmap instead"
+ )
+ self._getGlyphNamesFromCmap()
+ else:
+ self.glyphOrder = glyphOrder
+ else:
+ self._getGlyphNamesFromCmap()
+ return self.glyphOrder
+
+ def _getGlyphNamesFromCmap(self):
+ #
+ # This is rather convoluted, but then again, it's an interesting problem:
+ # - we need to use the unicode values found in the cmap table to
+ # build glyph names (eg. because there is only a minimal post table,
+ # or none at all).
+ # - but the cmap parser also needs glyph names to work with...
+ # So here's what we do:
+ # - make up glyph names based on glyphID
+ # - load a temporary cmap table based on those names
+ # - extract the unicode values, build the "real" glyph names
+ # - unload the temporary cmap table
+ #
+ if self.isLoaded("cmap"):
+ # Bootstrapping: we're getting called by the cmap parser
+ # itself. This means self.tables['cmap'] contains a partially
+ # loaded cmap, making it impossible to get at a unicode
+ # subtable here. We remove the partially loaded cmap and
+ # restore it later.
+ # This only happens if the cmap table is loaded before any
+ # other table that does f.getGlyphOrder() or f.getGlyphName().
+ cmapLoading = self.tables["cmap"]
+ del self.tables["cmap"]
+ else:
+ cmapLoading = None
+ # Make up glyph names based on glyphID, which will be used by the
+ # temporary cmap and by the real cmap in case we don't find a unicode
+ # cmap.
+ numGlyphs = int(self["maxp"].numGlyphs)
+ glyphOrder = [None] * numGlyphs
+ glyphOrder[0] = ".notdef"
+ for i in range(1, numGlyphs):
+ glyphOrder[i] = "glyph%.5d" % i
+ # Set the glyph order, so the cmap parser has something
+ # to work with (so we don't get called recursively).
+ self.glyphOrder = glyphOrder
+
+ # Make up glyph names based on the reversed cmap table. Because some
+ # glyphs (eg. ligatures or alternates) may not be reachable via cmap,
+ # this naming table will usually not cover all glyphs in the font.
+ # If the font has no Unicode cmap table, reversecmap will be empty.
+ if "cmap" in self:
+ reversecmap = self["cmap"].buildReversed()
+ else:
+ reversecmap = {}
+ useCount = {}
+ for i in range(numGlyphs):
+ tempName = glyphOrder[i]
+ if tempName in reversecmap:
+ # If a font maps both U+0041 LATIN CAPITAL LETTER A and
+ # U+0391 GREEK CAPITAL LETTER ALPHA to the same glyph,
+ # we prefer naming the glyph as "A".
+ glyphName = self._makeGlyphName(min(reversecmap[tempName]))
+ numUses = useCount[glyphName] = useCount.get(glyphName, 0) + 1
+ if numUses > 1:
+ glyphName = "%s.alt%d" % (glyphName, numUses - 1)
+ glyphOrder[i] = glyphName
+
+ if "cmap" in self:
+ # Delete the temporary cmap table from the cache, so it can
+ # be parsed again with the right names.
+ del self.tables["cmap"]
+ self.glyphOrder = glyphOrder
+ if cmapLoading:
+ # restore partially loaded cmap, so it can continue loading
+ # using the proper names.
+ self.tables["cmap"] = cmapLoading
+
+ @staticmethod
+ def _makeGlyphName(codepoint):
+ from fontTools import agl # Adobe Glyph List
+
+ if codepoint in agl.UV2AGL:
+ return agl.UV2AGL[codepoint]
+ elif codepoint <= 0xFFFF:
+ return "uni%04X" % codepoint
+ else:
+ return "u%X" % codepoint
+
+ def getGlyphNames(self):
+ """Get a list of glyph names, sorted alphabetically."""
+ glyphNames = sorted(self.getGlyphOrder())
+ return glyphNames
+
+ def getGlyphNames2(self):
+ """Get a list of glyph names, sorted alphabetically,
+ but not case sensitive.
+ """
+ from fontTools.misc import textTools
+
+ return textTools.caselessSort(self.getGlyphOrder())
+
+ def getGlyphName(self, glyphID):
+ """Returns the name for the glyph with the given ID.
+
+ If no name is available, synthesises one with the form ``glyphXXXXX``` where
+ ```XXXXX`` is the zero-padded glyph ID.
+ """
+ try:
+ return self.getGlyphOrder()[glyphID]
+ except IndexError:
+ return "glyph%.5d" % glyphID
+
+ def getGlyphNameMany(self, lst):
+ """Converts a list of glyph IDs into a list of glyph names."""
+ glyphOrder = self.getGlyphOrder()
+ cnt = len(glyphOrder)
+ return [glyphOrder[gid] if gid < cnt else "glyph%.5d" % gid for gid in lst]
+
+ def getGlyphID(self, glyphName):
+ """Returns the ID of the glyph with the given name."""
+ try:
+ return self.getReverseGlyphMap()[glyphName]
+ except KeyError:
+ if glyphName[:5] == "glyph":
+ try:
+ return int(glyphName[5:])
+ except (NameError, ValueError):
+ raise KeyError(glyphName)
+ raise
+
+ def getGlyphIDMany(self, lst):
+ """Converts a list of glyph names into a list of glyph IDs."""
+ d = self.getReverseGlyphMap()
+ try:
+ return [d[glyphName] for glyphName in lst]
+ except KeyError:
+ getGlyphID = self.getGlyphID
+ return [getGlyphID(glyphName) for glyphName in lst]
+
+ def getReverseGlyphMap(self, rebuild=False):
+ """Returns a mapping of glyph names to glyph IDs."""
+ if rebuild or not hasattr(self, "_reverseGlyphOrderDict"):
+ self._buildReverseGlyphOrderDict()
+ return self._reverseGlyphOrderDict
+
+ def _buildReverseGlyphOrderDict(self):
+ self._reverseGlyphOrderDict = d = {}
+ for glyphID, glyphName in enumerate(self.getGlyphOrder()):
+ d[glyphName] = glyphID
+ return d
+
+ def _writeTable(self, tag, writer, done, tableCache=None):
+ """Internal helper function for self.save(). Keeps track of
+ inter-table dependencies.
+ """
+ if tag in done:
+ return
+ tableClass = getTableClass(tag)
+ for masterTable in tableClass.dependencies:
+ if masterTable not in done:
+ if masterTable in self:
+ self._writeTable(masterTable, writer, done, tableCache)
+ else:
+ done.append(masterTable)
+ done.append(tag)
+ tabledata = self.getTableData(tag)
+ if tableCache is not None:
+ entry = tableCache.get((Tag(tag), tabledata))
+ if entry is not None:
+ log.debug("reusing '%s' table", tag)
+ writer.setEntry(tag, entry)
+ return
+ log.debug("Writing '%s' table to disk", tag)
+ writer[tag] = tabledata
+ if tableCache is not None:
+ tableCache[(Tag(tag), tabledata)] = writer[tag]
+
+ def getTableData(self, tag):
+ """Returns the binary representation of a table.
+
+ If the table is currently loaded and in memory, the data is compiled to
+ binary and returned; if it is not currently loaded, the binary data is
+ read from the font file and returned.
+ """
+ tag = Tag(tag)
+ if self.isLoaded(tag):
+ log.debug("Compiling '%s' table", tag)
+ return self.tables[tag].compile(self)
+ elif self.reader and tag in self.reader:
+ log.debug("Reading '%s' table from disk", tag)
+ return self.reader[tag]
+ else:
+ raise KeyError(tag)
+
+ def getGlyphSet(self, preferCFF=True, location=None, normalized=False):
+ """Return a generic GlyphSet, which is a dict-like object
+ mapping glyph names to glyph objects. The returned glyph objects
+ have a ``.draw()`` method that supports the Pen protocol, and will
+ have an attribute named 'width'.
+
+ If the font is CFF-based, the outlines will be taken from the ``CFF ``
+ or ``CFF2`` tables. Otherwise the outlines will be taken from the
+ ``glyf`` table.
+
+ If the font contains both a ``CFF ``/``CFF2`` and a ``glyf`` table, you
+ can use the ``preferCFF`` argument to specify which one should be taken.
+ If the font contains both a ``CFF `` and a ``CFF2`` table, the latter is
+ taken.
+
+ If the ``location`` parameter is set, it should be a dictionary mapping
+ four-letter variation tags to their float values, and the returned
+ glyph-set will represent an instance of a variable font at that
+ location.
+
+ If the ``normalized`` variable is set to True, that location is
+ interpreted as in the normalized (-1..+1) space, otherwise it is in the
+ font's defined axes space.
+ """
+ if location and "fvar" not in self:
+ location = None
+ if location and not normalized:
+ location = self.normalizeLocation(location)
+ if ("CFF " in self or "CFF2" in self) and (preferCFF or "glyf" not in self):
+ return _TTGlyphSetCFF(self, location)
+ elif "glyf" in self:
+ return _TTGlyphSetGlyf(self, location)
+ else:
+ raise TTLibError("Font contains no outlines")
+
+ def normalizeLocation(self, location):
+ """Normalize a ``location`` from the font's defined axes space (also
+ known as user space) into the normalized (-1..+1) space. It applies
+ ``avar`` mapping if the font contains an ``avar`` table.
+
+ The ``location`` parameter should be a dictionary mapping four-letter
+ variation tags to their float values.
+
+ Raises ``TTLibError`` if the font is not a variable font.
+ """
+ from fontTools.varLib.models import normalizeLocation, piecewiseLinearMap
+
+ if "fvar" not in self:
+ raise TTLibError("Not a variable font")
+
+ axes = {
+ a.axisTag: (a.minValue, a.defaultValue, a.maxValue)
+ for a in self["fvar"].axes
+ }
+ location = normalizeLocation(location, axes)
+ if "avar" in self:
+ avar = self["avar"]
+ avarSegments = avar.segments
+ mappedLocation = {}
+ for axisTag, value in location.items():
+ avarMapping = avarSegments.get(axisTag, None)
+ if avarMapping is not None:
+ value = piecewiseLinearMap(value, avarMapping)
+ mappedLocation[axisTag] = value
+ location = mappedLocation
+ return location
+
+ def getBestCmap(
+ self,
+ cmapPreferences=(
+ (3, 10),
+ (0, 6),
+ (0, 4),
+ (3, 1),
+ (0, 3),
+ (0, 2),
+ (0, 1),
+ (0, 0),
+ ),
+ ):
+ """Returns the 'best' Unicode cmap dictionary available in the font
+ or ``None``, if no Unicode cmap subtable is available.
+
+ By default it will search for the following (platformID, platEncID)
+ pairs in order::
+
+ (3, 10), # Windows Unicode full repertoire
+ (0, 6), # Unicode full repertoire (format 13 subtable)
+ (0, 4), # Unicode 2.0 full repertoire
+ (3, 1), # Windows Unicode BMP
+ (0, 3), # Unicode 2.0 BMP
+ (0, 2), # Unicode ISO/IEC 10646
+ (0, 1), # Unicode 1.1
+ (0, 0) # Unicode 1.0
+
+ This particular order matches what HarfBuzz uses to choose what
+ subtable to use by default. This order prefers the largest-repertoire
+ subtable, and among those, prefers the Windows-platform over the
+ Unicode-platform as the former has wider support.
+
+ This order can be customized via the ``cmapPreferences`` argument.
+ """
+ return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences)
class GlyphOrder(object):
- """A pseudo table. The glyph order isn't in the font as a separate
- table, but it's nice to present it as such in the TTX format.
- """
+ """A pseudo table. The glyph order isn't in the font as a separate
+ table, but it's nice to present it as such in the TTX format.
+ """
- def __init__(self, tag=None):
- pass
+ def __init__(self, tag=None):
+ pass
- def toXML(self, writer, ttFont):
- glyphOrder = ttFont.getGlyphOrder()
- writer.comment("The 'id' attribute is only for humans; "
- "it is ignored when parsed.")
- writer.newline()
- for i in range(len(glyphOrder)):
- glyphName = glyphOrder[i]
- writer.simpletag("GlyphID", id=i, name=glyphName)
- writer.newline()
+ def toXML(self, writer, ttFont):
+ glyphOrder = ttFont.getGlyphOrder()
+ writer.comment(
+ "The 'id' attribute is only for humans; " "it is ignored when parsed."
+ )
+ writer.newline()
+ for i in range(len(glyphOrder)):
+ glyphName = glyphOrder[i]
+ writer.simpletag("GlyphID", id=i, name=glyphName)
+ writer.newline()
- def fromXML(self, name, attrs, content, ttFont):
- if not hasattr(self, "glyphOrder"):
- self.glyphOrder = []
- if name == "GlyphID":
- self.glyphOrder.append(attrs["name"])
- ttFont.setGlyphOrder(self.glyphOrder)
+ def fromXML(self, name, attrs, content, ttFont):
+ if not hasattr(self, "glyphOrder"):
+ self.glyphOrder = []
+ if name == "GlyphID":
+ self.glyphOrder.append(attrs["name"])
+ ttFont.setGlyphOrder(self.glyphOrder)
def getTableModule(tag):
- """Fetch the packer/unpacker module for a table.
- Return None when no module is found.
- """
- from . import tables
- pyTag = tagToIdentifier(tag)
- try:
- __import__("fontTools.ttLib.tables." + pyTag)
- except ImportError as err:
- # If pyTag is found in the ImportError message,
- # means table is not implemented. If it's not
- # there, then some other module is missing, don't
- # suppress the error.
- if str(err).find(pyTag) >= 0:
- return None
- else:
- raise err
- else:
- return getattr(tables, pyTag)
+ """Fetch the packer/unpacker module for a table.
+ Return None when no module is found.
+ """
+ from . import tables
+
+ pyTag = tagToIdentifier(tag)
+ try:
+ __import__("fontTools.ttLib.tables." + pyTag)
+ except ImportError as err:
+ # If pyTag is found in the ImportError message,
+ # means table is not implemented. If it's not
+ # there, then some other module is missing, don't
+ # suppress the error.
+ if str(err).find(pyTag) >= 0:
+ return None
+ else:
+ raise err
+ else:
+ return getattr(tables, pyTag)
# Registry for custom table packer/unpacker classes. Keys are table
@@ -800,221 +897,248 @@ _customTableRegistry = {}
def registerCustomTableClass(tag, moduleName, className=None):
- """Register a custom packer/unpacker class for a table.
+ """Register a custom packer/unpacker class for a table.
- The 'moduleName' must be an importable module. If no 'className'
- is given, it is derived from the tag, for example it will be
- ``table_C_U_S_T_`` for a 'CUST' tag.
+ The 'moduleName' must be an importable module. If no 'className'
+ is given, it is derived from the tag, for example it will be
+ ``table_C_U_S_T_`` for a 'CUST' tag.
- The registered table class should be a subclass of
- :py:class:`fontTools.ttLib.tables.DefaultTable.DefaultTable`
- """
- if className is None:
- className = "table_" + tagToIdentifier(tag)
- _customTableRegistry[tag] = (moduleName, className)
+ The registered table class should be a subclass of
+ :py:class:`fontTools.ttLib.tables.DefaultTable.DefaultTable`
+ """
+ if className is None:
+ className = "table_" + tagToIdentifier(tag)
+ _customTableRegistry[tag] = (moduleName, className)
def unregisterCustomTableClass(tag):
- """Unregister the custom packer/unpacker class for a table."""
- del _customTableRegistry[tag]
+ """Unregister the custom packer/unpacker class for a table."""
+ del _customTableRegistry[tag]
def getCustomTableClass(tag):
- """Return the custom table class for tag, if one has been registered
- with 'registerCustomTableClass()'. Else return None.
- """
- if tag not in _customTableRegistry:
- return None
- import importlib
- moduleName, className = _customTableRegistry[tag]
- module = importlib.import_module(moduleName)
- return getattr(module, className)
+ """Return the custom table class for tag, if one has been registered
+ with 'registerCustomTableClass()'. Else return None.
+ """
+ if tag not in _customTableRegistry:
+ return None
+ import importlib
+
+ moduleName, className = _customTableRegistry[tag]
+ module = importlib.import_module(moduleName)
+ return getattr(module, className)
def getTableClass(tag):
- """Fetch the packer/unpacker class for a table."""
- tableClass = getCustomTableClass(tag)
- if tableClass is not None:
- return tableClass
- module = getTableModule(tag)
- if module is None:
- from .tables.DefaultTable import DefaultTable
- return DefaultTable
- pyTag = tagToIdentifier(tag)
- tableClass = getattr(module, "table_" + pyTag)
- return tableClass
+ """Fetch the packer/unpacker class for a table."""
+ tableClass = getCustomTableClass(tag)
+ if tableClass is not None:
+ return tableClass
+ module = getTableModule(tag)
+ if module is None:
+ from .tables.DefaultTable import DefaultTable
+
+ return DefaultTable
+ pyTag = tagToIdentifier(tag)
+ tableClass = getattr(module, "table_" + pyTag)
+ return tableClass
def getClassTag(klass):
- """Fetch the table tag for a class object."""
- name = klass.__name__
- assert name[:6] == 'table_'
- name = name[6:] # Chop 'table_'
- return identifierToTag(name)
+ """Fetch the table tag for a class object."""
+ name = klass.__name__
+ assert name[:6] == "table_"
+ name = name[6:] # Chop 'table_'
+ return identifierToTag(name)
def newTable(tag):
- """Return a new instance of a table."""
- tableClass = getTableClass(tag)
- return tableClass(tag)
+ """Return a new instance of a table."""
+ tableClass = getTableClass(tag)
+ return tableClass(tag)
def _escapechar(c):
- """Helper function for tagToIdentifier()"""
- import re
- if re.match("[a-z0-9]", c):
- return "_" + c
- elif re.match("[A-Z]", c):
- return c + "_"
- else:
- return hex(byteord(c))[2:]
+ """Helper function for tagToIdentifier()"""
+ import re
+
+ if re.match("[a-z0-9]", c):
+ return "_" + c
+ elif re.match("[A-Z]", c):
+ return c + "_"
+ else:
+ return hex(byteord(c))[2:]
def tagToIdentifier(tag):
- """Convert a table tag to a valid (but UGLY) python identifier,
- as well as a filename that's guaranteed to be unique even on a
- caseless file system. Each character is mapped to two characters.
- Lowercase letters get an underscore before the letter, uppercase
- letters get an underscore after the letter. Trailing spaces are
- trimmed. Illegal characters are escaped as two hex bytes. If the
- result starts with a number (as the result of a hex escape), an
- extra underscore is prepended. Examples::
-
- >>> tagToIdentifier('glyf')
- '_g_l_y_f'
- >>> tagToIdentifier('cvt ')
- '_c_v_t'
- >>> tagToIdentifier('OS/2')
- 'O_S_2f_2'
- """
- import re
- tag = Tag(tag)
- if tag == "GlyphOrder":
- return tag
- assert len(tag) == 4, "tag should be 4 characters long"
- while len(tag) > 1 and tag[-1] == ' ':
- tag = tag[:-1]
- ident = ""
- for c in tag:
- ident = ident + _escapechar(c)
- if re.match("[0-9]", ident):
- ident = "_" + ident
- return ident
+ """Convert a table tag to a valid (but UGLY) python identifier,
+ as well as a filename that's guaranteed to be unique even on a
+ caseless file system. Each character is mapped to two characters.
+ Lowercase letters get an underscore before the letter, uppercase
+ letters get an underscore after the letter. Trailing spaces are
+ trimmed. Illegal characters are escaped as two hex bytes. If the
+ result starts with a number (as the result of a hex escape), an
+ extra underscore is prepended. Examples::
+
+ >>> tagToIdentifier('glyf')
+ '_g_l_y_f'
+ >>> tagToIdentifier('cvt ')
+ '_c_v_t'
+ >>> tagToIdentifier('OS/2')
+ 'O_S_2f_2'
+ """
+ import re
+
+ tag = Tag(tag)
+ if tag == "GlyphOrder":
+ return tag
+ assert len(tag) == 4, "tag should be 4 characters long"
+ while len(tag) > 1 and tag[-1] == " ":
+ tag = tag[:-1]
+ ident = ""
+ for c in tag:
+ ident = ident + _escapechar(c)
+ if re.match("[0-9]", ident):
+ ident = "_" + ident
+ return ident
def identifierToTag(ident):
- """the opposite of tagToIdentifier()"""
- if ident == "GlyphOrder":
- return ident
- if len(ident) % 2 and ident[0] == "_":
- ident = ident[1:]
- assert not (len(ident) % 2)
- tag = ""
- for i in range(0, len(ident), 2):
- if ident[i] == "_":
- tag = tag + ident[i+1]
- elif ident[i+1] == "_":
- tag = tag + ident[i]
- else:
- # assume hex
- tag = tag + chr(int(ident[i:i+2], 16))
- # append trailing spaces
- tag = tag + (4 - len(tag)) * ' '
- return Tag(tag)
+ """the opposite of tagToIdentifier()"""
+ if ident == "GlyphOrder":
+ return ident
+ if len(ident) % 2 and ident[0] == "_":
+ ident = ident[1:]
+ assert not (len(ident) % 2)
+ tag = ""
+ for i in range(0, len(ident), 2):
+ if ident[i] == "_":
+ tag = tag + ident[i + 1]
+ elif ident[i + 1] == "_":
+ tag = tag + ident[i]
+ else:
+ # assume hex
+ tag = tag + chr(int(ident[i : i + 2], 16))
+ # append trailing spaces
+ tag = tag + (4 - len(tag)) * " "
+ return Tag(tag)
def tagToXML(tag):
- """Similarly to tagToIdentifier(), this converts a TT tag
- to a valid XML element name. Since XML element names are
- case sensitive, this is a fairly simple/readable translation.
- """
- import re
- tag = Tag(tag)
- if tag == "OS/2":
- return "OS_2"
- elif tag == "GlyphOrder":
- return tag
- if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag):
- return tag.strip()
- else:
- return tagToIdentifier(tag)
+ """Similarly to tagToIdentifier(), this converts a TT tag
+ to a valid XML element name. Since XML element names are
+ case sensitive, this is a fairly simple/readable translation.
+ """
+ import re
+
+ tag = Tag(tag)
+ if tag == "OS/2":
+ return "OS_2"
+ elif tag == "GlyphOrder":
+ return tag
+ if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag):
+ return tag.strip()
+ else:
+ return tagToIdentifier(tag)
def xmlToTag(tag):
- """The opposite of tagToXML()"""
- if tag == "OS_2":
- return Tag("OS/2")
- if len(tag) == 8:
- return identifierToTag(tag)
- else:
- return Tag(tag + " " * (4 - len(tag)))
-
+ """The opposite of tagToXML()"""
+ if tag == "OS_2":
+ return Tag("OS/2")
+ if len(tag) == 8:
+ return identifierToTag(tag)
+ else:
+ return Tag(tag + " " * (4 - len(tag)))
# Table order as recommended in the OpenType specification 1.4
-TTFTableOrder = ["head", "hhea", "maxp", "OS/2", "hmtx", "LTSH", "VDMX",
- "hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf",
- "kern", "name", "post", "gasp", "PCLT"]
+TTFTableOrder = [
+ "head",
+ "hhea",
+ "maxp",
+ "OS/2",
+ "hmtx",
+ "LTSH",
+ "VDMX",
+ "hdmx",
+ "cmap",
+ "fpgm",
+ "prep",
+ "cvt ",
+ "loca",
+ "glyf",
+ "kern",
+ "name",
+ "post",
+ "gasp",
+ "PCLT",
+]
+
+OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post", "CFF "]
-OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post",
- "CFF "]
def sortedTagList(tagList, tableOrder=None):
- """Return a sorted copy of tagList, sorted according to the OpenType
- specification, or according to a custom tableOrder. If given and not
- None, tableOrder needs to be a list of tag names.
- """
- tagList = sorted(tagList)
- if tableOrder is None:
- if "DSIG" in tagList:
- # DSIG should be last (XXX spec reference?)
- tagList.remove("DSIG")
- tagList.append("DSIG")
- if "CFF " in tagList:
- tableOrder = OTFTableOrder
- else:
- tableOrder = TTFTableOrder
- orderedTables = []
- for tag in tableOrder:
- if tag in tagList:
- orderedTables.append(tag)
- tagList.remove(tag)
- orderedTables.extend(tagList)
- return orderedTables
+ """Return a sorted copy of tagList, sorted according to the OpenType
+ specification, or according to a custom tableOrder. If given and not
+ None, tableOrder needs to be a list of tag names.
+ """
+ tagList = sorted(tagList)
+ if tableOrder is None:
+ if "DSIG" in tagList:
+ # DSIG should be last (XXX spec reference?)
+ tagList.remove("DSIG")
+ tagList.append("DSIG")
+ if "CFF " in tagList:
+ tableOrder = OTFTableOrder
+ else:
+ tableOrder = TTFTableOrder
+ orderedTables = []
+ for tag in tableOrder:
+ if tag in tagList:
+ orderedTables.append(tag)
+ tagList.remove(tag)
+ orderedTables.extend(tagList)
+ return orderedTables
def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False):
- """Rewrite a font file, ordering the tables as recommended by the
- OpenType specification 1.4.
- """
- inFile.seek(0)
- outFile.seek(0)
- reader = SFNTReader(inFile, checkChecksums=checkChecksums)
- writer = SFNTWriter(outFile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData)
- tables = list(reader.keys())
- for tag in sortedTagList(tables, tableOrder):
- writer[tag] = reader[tag]
- writer.close()
+ """Rewrite a font file, ordering the tables as recommended by the
+ OpenType specification 1.4.
+ """
+ inFile.seek(0)
+ outFile.seek(0)
+ reader = SFNTReader(inFile, checkChecksums=checkChecksums)
+ writer = SFNTWriter(
+ outFile,
+ len(reader.tables),
+ reader.sfntVersion,
+ reader.flavor,
+ reader.flavorData,
+ )
+ tables = list(reader.keys())
+ for tag in sortedTagList(tables, tableOrder):
+ writer[tag] = reader[tag]
+ writer.close()
def maxPowerOfTwo(x):
- """Return the highest exponent of two, so that
- (2 ** exponent) <= x. Return 0 if x is 0.
- """
- exponent = 0
- while x:
- x = x >> 1
- exponent = exponent + 1
- return max(exponent - 1, 0)
+ """Return the highest exponent of two, so that
+ (2 ** exponent) <= x. Return 0 if x is 0.
+ """
+ exponent = 0
+ while x:
+ x = x >> 1
+ exponent = exponent + 1
+ return max(exponent - 1, 0)
def getSearchRange(n, itemSize=16):
- """Calculate searchRange, entrySelector, rangeShift.
- """
- # itemSize defaults to 16, for backward compatibility
- # with upstream fonttools.
- exponent = maxPowerOfTwo(n)
- searchRange = (2 ** exponent) * itemSize
- entrySelector = exponent
- rangeShift = max(0, n * itemSize - searchRange)
- return searchRange, entrySelector, rangeShift
+ """Calculate searchRange, entrySelector, rangeShift."""
+ # itemSize defaults to 16, for backward compatibility
+ # with upstream fonttools.
+ exponent = maxPowerOfTwo(n)
+ searchRange = (2**exponent) * itemSize
+ entrySelector = exponent
+ rangeShift = max(0, n * itemSize - searchRange)
+ return searchRange, entrySelector, rangeShift
diff --git a/Lib/fontTools/ttLib/ttGlyphSet.py b/Lib/fontTools/ttLib/ttGlyphSet.py
index be26215b..d4384c89 100644
--- a/Lib/fontTools/ttLib/ttGlyphSet.py
+++ b/Lib/fontTools/ttLib/ttGlyphSet.py
@@ -1,221 +1,318 @@
"""GlyphSets returned by a TTFont."""
-from fontTools.misc.fixedTools import otRound
+from abc import ABC, abstractmethod
+from collections.abc import Mapping
+from contextlib import contextmanager
from copy import copy
+from types import SimpleNamespace
+from fontTools.misc.fixedTools import otRound
+from fontTools.misc.loggingTools import deprecateFunction
+from fontTools.misc.transform import Transform
+from fontTools.pens.transformPen import TransformPen, TransformPointPen
+
+
+class _TTGlyphSet(Mapping):
+
+ """Generic dict-like GlyphSet class that pulls metrics from hmtx and
+ glyph shape from TrueType or CFF.
+ """
+
+ def __init__(self, font, location, glyphsMapping):
+ self.font = font
+ self.defaultLocationNormalized = (
+ {axis.axisTag: 0 for axis in self.font["fvar"].axes}
+ if "fvar" in self.font
+ else {}
+ )
+ self.location = location if location is not None else {}
+ self.rawLocation = {} # VarComponent-only location
+ self.originalLocation = location if location is not None else {}
+ self.depth = 0
+ self.locationStack = []
+ self.rawLocationStack = []
+ self.glyphsMapping = glyphsMapping
+ self.hMetrics = font["hmtx"].metrics
+ self.vMetrics = getattr(font.get("vmtx"), "metrics", None)
+ self.hvarTable = None
+ if location:
+ from fontTools.varLib.varStore import VarStoreInstancer
+
+ self.hvarTable = getattr(font.get("HVAR"), "table", None)
+ if self.hvarTable is not None:
+ self.hvarInstancer = VarStoreInstancer(
+ self.hvarTable.VarStore, font["fvar"].axes, location
+ )
+ # TODO VVAR, VORG
+
+ @contextmanager
+ def pushLocation(self, location, reset: bool):
+ self.locationStack.append(self.location)
+ self.rawLocationStack.append(self.rawLocation)
+ if reset:
+ self.location = self.originalLocation.copy()
+ self.rawLocation = self.defaultLocationNormalized.copy()
+ else:
+ self.location = self.location.copy()
+ self.rawLocation = {}
+ self.location.update(location)
+ self.rawLocation.update(location)
+
+ try:
+ yield None
+ finally:
+ self.location = self.locationStack.pop()
+ self.rawLocation = self.rawLocationStack.pop()
+
+ @contextmanager
+ def pushDepth(self):
+ try:
+ depth = self.depth
+ self.depth += 1
+ yield depth
+ finally:
+ self.depth -= 1
+
+ def __contains__(self, glyphName):
+ return glyphName in self.glyphsMapping
+
+ def __iter__(self):
+ return iter(self.glyphsMapping.keys())
+
+ def __len__(self):
+ return len(self.glyphsMapping)
+
+ @deprecateFunction(
+ "use 'glyphName in glyphSet' instead", category=DeprecationWarning
+ )
+ def has_key(self, glyphName):
+ return glyphName in self.glyphsMapping
+
+
+class _TTGlyphSetGlyf(_TTGlyphSet):
+ def __init__(self, font, location):
+ self.glyfTable = font["glyf"]
+ super().__init__(font, location, self.glyfTable)
+ self.gvarTable = font.get("gvar")
+
+ def __getitem__(self, glyphName):
+ return _TTGlyphGlyf(self, glyphName)
+
+
+class _TTGlyphSetCFF(_TTGlyphSet):
+ def __init__(self, font, location):
+ tableTag = "CFF2" if "CFF2" in font else "CFF "
+ self.charStrings = list(font[tableTag].cff.values())[0].CharStrings
+ super().__init__(font, location, self.charStrings)
+ self.blender = None
+ if location:
+ from fontTools.varLib.varStore import VarStoreInstancer
+
+ varStore = getattr(self.charStrings, "varStore", None)
+ if varStore is not None:
+ instancer = VarStoreInstancer(
+ varStore.otVarStore, font["fvar"].axes, location
+ )
+ self.blender = instancer.interpolateFromDeltas
+
+ def __getitem__(self, glyphName):
+ return _TTGlyphCFF(self, glyphName)
+
+
+class _TTGlyph(ABC):
+
+ """Glyph object that supports the Pen protocol, meaning that it has
+ .draw() and .drawPoints() methods that take a pen object as their only
+ argument. Additionally there are 'width' and 'lsb' attributes, read from
+ the 'hmtx' table.
+
+ If the font contains a 'vmtx' table, there will also be 'height' and 'tsb'
+ attributes.
+ """
+
+ def __init__(self, glyphSet, glyphName):
+ self.glyphSet = glyphSet
+ self.name = glyphName
+ self.width, self.lsb = glyphSet.hMetrics[glyphName]
+ if glyphSet.vMetrics is not None:
+ self.height, self.tsb = glyphSet.vMetrics[glyphName]
+ else:
+ self.height, self.tsb = None, None
+ if glyphSet.location and glyphSet.hvarTable is not None:
+ varidx = (
+ glyphSet.font.getGlyphID(glyphName)
+ if glyphSet.hvarTable.AdvWidthMap is None
+ else glyphSet.hvarTable.AdvWidthMap.mapping[glyphName]
+ )
+ self.width += glyphSet.hvarInstancer[varidx]
+ # TODO: VVAR/VORG
+
+ @abstractmethod
+ def draw(self, pen):
+ """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
+ how that works.
+ """
+ raise NotImplementedError
+
+ def drawPoints(self, pen):
+ """Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details
+ how that works.
+ """
+ from fontTools.pens.pointPen import SegmentToPointPen
+
+ self.draw(SegmentToPointPen(pen))
-class _TTGlyphSet(object):
-
- """Generic dict-like GlyphSet class that pulls metrics from hmtx and
- glyph shape from TrueType or CFF.
- """
-
- def __init__(self, ttFont, glyphs, glyphType):
- """Construct a new glyphset.
-
- Args:
- font (TTFont): The font object (used to get metrics).
- glyphs (dict): A dictionary mapping glyph names to ``_TTGlyph`` objects.
- glyphType (class): Either ``_TTGlyphCFF`` or ``_TTGlyphGlyf``.
- """
- self._glyphs = glyphs
- self._hmtx = ttFont['hmtx']
- self._vmtx = ttFont['vmtx'] if 'vmtx' in ttFont else None
- self._glyphType = glyphType
-
- def keys(self):
- return list(self._glyphs.keys())
-
- def has_key(self, glyphName):
- return glyphName in self._glyphs
-
- __contains__ = has_key
-
- def __getitem__(self, glyphName):
- horizontalMetrics = self._hmtx[glyphName]
- verticalMetrics = self._vmtx[glyphName] if self._vmtx else None
- return self._glyphType(
- self, self._glyphs[glyphName], horizontalMetrics, verticalMetrics)
-
- def __len__(self):
- return len(self._glyphs)
-
- def get(self, glyphName, default=None):
- try:
- return self[glyphName]
- except KeyError:
- return default
-
-class _TTGlyph(object):
-
- """Wrapper for a TrueType glyph that supports the Pen protocol, meaning
- that it has .draw() and .drawPoints() methods that take a pen object as
- their only argument. Additionally there are 'width' and 'lsb' attributes,
- read from the 'hmtx' table.
-
- If the font contains a 'vmtx' table, there will also be 'height' and 'tsb'
- attributes.
- """
-
- def __init__(self, glyphset, glyph, horizontalMetrics, verticalMetrics=None):
- """Construct a new _TTGlyph.
-
- Args:
- glyphset (_TTGlyphSet): A glyphset object used to resolve components.
- glyph (ttLib.tables._g_l_y_f.Glyph): The glyph object.
- horizontalMetrics (int, int): The glyph's width and left sidebearing.
- """
- self._glyphset = glyphset
- self._glyph = glyph
- self.width, self.lsb = horizontalMetrics
- if verticalMetrics:
- self.height, self.tsb = verticalMetrics
- else:
- self.height, self.tsb = None, None
-
- def draw(self, pen):
- """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
- how that works.
- """
- self._glyph.draw(pen)
-
- def drawPoints(self, pen):
- from fontTools.pens.pointPen import SegmentToPointPen
- self.draw(SegmentToPointPen(pen))
-
-class _TTGlyphCFF(_TTGlyph):
- pass
class _TTGlyphGlyf(_TTGlyph):
+ def draw(self, pen):
+ """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
+ how that works.
+ """
+ glyph, offset = self._getGlyphAndOffset()
+
+ with self.glyphSet.pushDepth() as depth:
+ if depth:
+ offset = 0 # Offset should only apply at top-level
+
+ if glyph.isVarComposite():
+ self._drawVarComposite(glyph, pen, False)
+ return
+
+ glyph.draw(pen, self.glyphSet.glyfTable, offset)
+
+ def drawPoints(self, pen):
+ """Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details
+ how that works.
+ """
+ glyph, offset = self._getGlyphAndOffset()
+
+ with self.glyphSet.pushDepth() as depth:
+ if depth:
+ offset = 0 # Offset should only apply at top-level
+
+ if glyph.isVarComposite():
+ self._drawVarComposite(glyph, pen, True)
+ return
+
+ glyph.drawPoints(pen, self.glyphSet.glyfTable, offset)
+
+ def _drawVarComposite(self, glyph, pen, isPointPen):
+ from fontTools.ttLib.tables._g_l_y_f import (
+ VarComponentFlags,
+ VAR_COMPONENT_TRANSFORM_MAPPING,
+ )
+
+ for comp in glyph.components:
+ with self.glyphSet.pushLocation(
+ comp.location, comp.flags & VarComponentFlags.RESET_UNSPECIFIED_AXES
+ ):
+ try:
+ pen.addVarComponent(
+ comp.glyphName, comp.transform, self.glyphSet.rawLocation
+ )
+ except AttributeError:
+ t = comp.transform.toTransform()
+ if isPointPen:
+ tPen = TransformPointPen(pen, t)
+ self.glyphSet[comp.glyphName].drawPoints(tPen)
+ else:
+ tPen = TransformPen(pen, t)
+ self.glyphSet[comp.glyphName].draw(tPen)
+
+ def _getGlyphAndOffset(self):
+ if self.glyphSet.location and self.glyphSet.gvarTable is not None:
+ glyph = self._getGlyphInstance()
+ else:
+ glyph = self.glyphSet.glyfTable[self.name]
+
+ offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
+ return glyph, offset
+
+ def _getGlyphInstance(self):
+ from fontTools.varLib.iup import iup_delta
+ from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
+ from fontTools.varLib.models import supportScalar
+
+ glyphSet = self.glyphSet
+ glyfTable = glyphSet.glyfTable
+ variations = glyphSet.gvarTable.variations[self.name]
+ hMetrics = glyphSet.hMetrics
+ vMetrics = glyphSet.vMetrics
+ coordinates, _ = glyfTable._getCoordinatesAndControls(
+ self.name, hMetrics, vMetrics
+ )
+ origCoords, endPts = None, None
+ for var in variations:
+ scalar = supportScalar(glyphSet.location, var.axes)
+ if not scalar:
+ continue
+ delta = var.coordinates
+ if None in delta:
+ if origCoords is None:
+ origCoords, control = glyfTable._getCoordinatesAndControls(
+ self.name, hMetrics, vMetrics
+ )
+ endPts = (
+ control[1] if control[0] >= 1 else list(range(len(control[1])))
+ )
+ delta = iup_delta(delta, origCoords, endPts)
+ coordinates += GlyphCoordinates(delta) * scalar
+
+ glyph = copy(glyfTable[self.name]) # Shallow copy
+ width, lsb, height, tsb = _setCoordinates(glyph, coordinates, glyfTable)
+ self.lsb = lsb
+ self.tsb = tsb
+ if glyphSet.hvarTable is None:
+ # no HVAR: let's set metrics from the phantom points
+ self.width = width
+ self.height = height
+ return glyph
+
- def draw(self, pen):
- """Draw the glyph onto Pen. See fontTools.pens.basePen for details
- how that works.
- """
- glyfTable = self._glyphset._glyphs
- glyph = self._glyph
- offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
- glyph.draw(pen, glyfTable, offset)
-
- def drawPoints(self, pen):
- """Draw the glyph onto PointPen. See fontTools.pens.pointPen
- for details how that works.
- """
- glyfTable = self._glyphset._glyphs
- glyph = self._glyph
- offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
- glyph.drawPoints(pen, glyfTable, offset)
-
-
-
-class _TTVarGlyphSet(_TTGlyphSet):
-
- def __init__(self, font, location, normalized=False):
- self._ttFont = font
- self._glyphs = font['glyf']
-
- if not normalized:
- from fontTools.varLib.models import normalizeLocation, piecewiseLinearMap
-
- axes = {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in font['fvar'].axes}
- location = normalizeLocation(location, axes)
- if 'avar' in font:
- avar = font['avar']
- avarSegments = avar.segments
- new_location = {}
- for axis_tag, value in location.items():
- avarMapping = avarSegments.get(axis_tag, None)
- if avarMapping is not None:
- value = piecewiseLinearMap(value, avarMapping)
- new_location[axis_tag] = value
- location = new_location
- del new_location
-
- self.location = location
-
- def __getitem__(self, glyphName):
- if glyphName not in self._glyphs:
- raise KeyError(glyphName)
- return _TTVarGlyphGlyf(self._ttFont, glyphName, self.location)
+class _TTGlyphCFF(_TTGlyph):
+ def draw(self, pen):
+ """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
+ how that works.
+ """
+ self.glyphSet.charStrings[self.name].draw(pen, self.glyphSet.blender)
def _setCoordinates(glyph, coord, glyfTable):
- # Handle phantom points for (left, right, top, bottom) positions.
- assert len(coord) >= 4
- if not hasattr(glyph, 'xMin'):
- glyph.recalcBounds(glyfTable)
- leftSideX = coord[-4][0]
- rightSideX = coord[-3][0]
- topSideY = coord[-2][1]
- bottomSideY = coord[-1][1]
-
- for _ in range(4):
- del coord[-1]
-
- if glyph.isComposite():
- assert len(coord) == len(glyph.components)
- for p,comp in zip(coord, glyph.components):
- if hasattr(comp, 'x'):
- comp.x,comp.y = p
- elif glyph.numberOfContours == 0:
- assert len(coord) == 0
- else:
- assert len(coord) == len(glyph.coordinates)
- glyph.coordinates = coord
-
- glyph.recalcBounds(glyfTable)
-
- horizontalAdvanceWidth = otRound(rightSideX - leftSideX)
- verticalAdvanceWidth = otRound(topSideY - bottomSideY)
- leftSideBearing = otRound(glyph.xMin - leftSideX)
- topSideBearing = otRound(topSideY - glyph.yMax)
- return (
- horizontalAdvanceWidth,
- leftSideBearing,
- verticalAdvanceWidth,
- topSideBearing,
- )
-
-
-class _TTVarGlyph(_TTGlyph):
- def __init__(self, ttFont, glyphName, location):
- self._ttFont = ttFont
- self._glyphName = glyphName
- self._location = location
- # draw() fills these in
- self.width = self.height = self.lsb = self.tsb = None
-
-
-class _TTVarGlyphGlyf(_TTVarGlyph):
-
- def draw(self, pen):
- from fontTools.varLib.iup import iup_delta
- from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
- from fontTools.varLib.models import supportScalar
-
- glyf = self._ttFont['glyf']
- hMetrics = self._ttFont['hmtx'].metrics
- vMetrics = getattr(self._ttFont.get('vmtx'), 'metrics', None)
-
- variations = self._ttFont['gvar'].variations[self._glyphName]
- coordinates, _ = glyf._getCoordinatesAndControls(self._glyphName, hMetrics, vMetrics)
- origCoords, endPts = None, None
- for var in variations:
- scalar = supportScalar(self._location, var.axes)
- if not scalar:
- continue
- delta = var.coordinates
- if None in delta:
- if origCoords is None:
- origCoords,control = glyf._getCoordinatesAndControls(self._glyphName, hMetrics, vMetrics)
- endPts = control[1] if control[0] >= 1 else list(range(len(control[1])))
- delta = iup_delta(delta, origCoords, endPts)
- coordinates += GlyphCoordinates(delta) * scalar
-
- glyph = copy(glyf[self._glyphName]) # Shallow copy
- width, lsb, height, tsb = _setCoordinates(glyph, coordinates, glyf)
- self.width = width
- self.lsb = lsb
- self.height = height
- self.tsb = tsb
- offset = lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
- glyph.draw(pen, glyf, offset)
+ # Handle phantom points for (left, right, top, bottom) positions.
+ assert len(coord) >= 4
+ leftSideX = coord[-4][0]
+ rightSideX = coord[-3][0]
+ topSideY = coord[-2][1]
+ bottomSideY = coord[-1][1]
+
+ for _ in range(4):
+ del coord[-1]
+
+ if glyph.isComposite():
+ assert len(coord) == len(glyph.components)
+ glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy
+ for p, comp in zip(coord, glyph.components):
+ if hasattr(comp, "x"):
+ comp.x, comp.y = p
+ elif glyph.isVarComposite():
+ glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy
+ for comp in glyph.components:
+ coord = comp.setCoordinates(coord)
+ assert not coord
+ elif glyph.numberOfContours == 0:
+ assert len(coord) == 0
+ else:
+ assert len(coord) == len(glyph.coordinates)
+ glyph.coordinates = coord
+
+ glyph.recalcBounds(glyfTable)
+
+ horizontalAdvanceWidth = otRound(rightSideX - leftSideX)
+ verticalAdvanceWidth = otRound(topSideY - bottomSideY)
+ leftSideBearing = otRound(glyph.xMin - leftSideX)
+ topSideBearing = otRound(topSideY - glyph.yMax)
+ return (
+ horizontalAdvanceWidth,
+ leftSideBearing,
+ verticalAdvanceWidth,
+ topSideBearing,
+ )
diff --git a/Lib/fontTools/ttLib/woff2.py b/Lib/fontTools/ttLib/woff2.py
index b66661ab..9da2f7e6 100644
--- a/Lib/fontTools/ttLib/woff2.py
+++ b/Lib/fontTools/ttLib/woff2.py
@@ -6,11 +6,24 @@ from collections import OrderedDict
from fontTools.misc import sstruct
from fontTools.misc.arrayTools import calcIntBounds
from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad
-from fontTools.ttLib import (TTFont, TTLibError, getTableModule, getTableClass,
- getSearchRange)
-from fontTools.ttLib.sfnt import (SFNTReader, SFNTWriter, DirectoryEntry,
- WOFFFlavorData, sfntDirectoryFormat, sfntDirectorySize, SFNTDirectoryEntry,
- sfntDirectoryEntrySize, calcChecksum)
+from fontTools.ttLib import (
+ TTFont,
+ TTLibError,
+ getTableModule,
+ getTableClass,
+ getSearchRange,
+)
+from fontTools.ttLib.sfnt import (
+ SFNTReader,
+ SFNTWriter,
+ DirectoryEntry,
+ WOFFFlavorData,
+ sfntDirectoryFormat,
+ sfntDirectorySize,
+ SFNTDirectoryEntry,
+ sfntDirectoryEntrySize,
+ calcChecksum,
+)
from fontTools.ttLib.tables import ttProgram, _g_l_y_f
import logging
@@ -19,454 +32,473 @@ log = logging.getLogger("fontTools.ttLib.woff2")
haveBrotli = False
try:
- try:
- import brotlicffi as brotli
- except ImportError:
- import brotli
- haveBrotli = True
+ try:
+ import brotlicffi as brotli
+ except ImportError:
+ import brotli
+ haveBrotli = True
except ImportError:
- pass
+ pass
class WOFF2Reader(SFNTReader):
-
- flavor = "woff2"
-
- def __init__(self, file, checkChecksums=0, fontNumber=-1):
- if not haveBrotli:
- log.error(
- 'The WOFF2 decoder requires the Brotli Python extension, available at: '
- 'https://github.com/google/brotli')
- raise ImportError("No module named brotli")
-
- self.file = file
-
- signature = Tag(self.file.read(4))
- if signature != b"wOF2":
- raise TTLibError("Not a WOFF2 font (bad signature)")
-
- self.file.seek(0)
- self.DirectoryEntry = WOFF2DirectoryEntry
- data = self.file.read(woff2DirectorySize)
- if len(data) != woff2DirectorySize:
- raise TTLibError('Not a WOFF2 font (not enough data)')
- sstruct.unpack(woff2DirectoryFormat, data, self)
-
- self.tables = OrderedDict()
- offset = 0
- for i in range(self.numTables):
- entry = self.DirectoryEntry()
- entry.fromFile(self.file)
- tag = Tag(entry.tag)
- self.tables[tag] = entry
- entry.offset = offset
- offset += entry.length
-
- totalUncompressedSize = offset
- compressedData = self.file.read(self.totalCompressedSize)
- decompressedData = brotli.decompress(compressedData)
- if len(decompressedData) != totalUncompressedSize:
- raise TTLibError(
- 'unexpected size for decompressed font data: expected %d, found %d'
- % (totalUncompressedSize, len(decompressedData)))
- self.transformBuffer = BytesIO(decompressedData)
-
- self.file.seek(0, 2)
- if self.length != self.file.tell():
- raise TTLibError("reported 'length' doesn't match the actual file size")
-
- self.flavorData = WOFF2FlavorData(self)
-
- # make empty TTFont to store data while reconstructing tables
- self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
-
- def __getitem__(self, tag):
- """Fetch the raw table data. Reconstruct transformed tables."""
- entry = self.tables[Tag(tag)]
- if not hasattr(entry, 'data'):
- if entry.transformed:
- entry.data = self.reconstructTable(tag)
- else:
- entry.data = entry.loadData(self.transformBuffer)
- return entry.data
-
- def reconstructTable(self, tag):
- """Reconstruct table named 'tag' from transformed data."""
- entry = self.tables[Tag(tag)]
- rawData = entry.loadData(self.transformBuffer)
- if tag == 'glyf':
- # no need to pad glyph data when reconstructing
- padding = self.padding if hasattr(self, 'padding') else None
- data = self._reconstructGlyf(rawData, padding)
- elif tag == 'loca':
- data = self._reconstructLoca()
- elif tag == 'hmtx':
- data = self._reconstructHmtx(rawData)
- else:
- raise TTLibError("transform for table '%s' is unknown" % tag)
- return data
-
- def _reconstructGlyf(self, data, padding=None):
- """ Return recostructed glyf table data, and set the corresponding loca's
- locations. Optionally pad glyph offsets to the specified number of bytes.
- """
- self.ttFont['loca'] = WOFF2LocaTable()
- glyfTable = self.ttFont['glyf'] = WOFF2GlyfTable()
- glyfTable.reconstruct(data, self.ttFont)
- if padding:
- glyfTable.padding = padding
- data = glyfTable.compile(self.ttFont)
- return data
-
- def _reconstructLoca(self):
- """ Return reconstructed loca table data. """
- if 'loca' not in self.ttFont:
- # make sure glyf is reconstructed first
- self.tables['glyf'].data = self.reconstructTable('glyf')
- locaTable = self.ttFont['loca']
- data = locaTable.compile(self.ttFont)
- if len(data) != self.tables['loca'].origLength:
- raise TTLibError(
- "reconstructed 'loca' table doesn't match original size: "
- "expected %d, found %d"
- % (self.tables['loca'].origLength, len(data)))
- return data
-
- def _reconstructHmtx(self, data):
- """ Return reconstructed hmtx table data. """
- # Before reconstructing 'hmtx' table we need to parse other tables:
- # 'glyf' is required for reconstructing the sidebearings from the glyphs'
- # bounding box; 'hhea' is needed for the numberOfHMetrics field.
- if "glyf" in self.flavorData.transformedTables:
- # transformed 'glyf' table is self-contained, thus 'loca' not needed
- tableDependencies = ("maxp", "hhea", "glyf")
- else:
- # decompiling untransformed 'glyf' requires 'loca', which requires 'head'
- tableDependencies = ("maxp", "head", "hhea", "loca", "glyf")
- for tag in tableDependencies:
- self._decompileTable(tag)
- hmtxTable = self.ttFont["hmtx"] = WOFF2HmtxTable()
- hmtxTable.reconstruct(data, self.ttFont)
- data = hmtxTable.compile(self.ttFont)
- return data
-
- def _decompileTable(self, tag):
- """Decompile table data and store it inside self.ttFont."""
- data = self[tag]
- if self.ttFont.isLoaded(tag):
- return self.ttFont[tag]
- tableClass = getTableClass(tag)
- table = tableClass(tag)
- self.ttFont.tables[tag] = table
- table.decompile(data, self.ttFont)
+ flavor = "woff2"
+
+ def __init__(self, file, checkChecksums=0, fontNumber=-1):
+ if not haveBrotli:
+ log.error(
+ "The WOFF2 decoder requires the Brotli Python extension, available at: "
+ "https://github.com/google/brotli"
+ )
+ raise ImportError("No module named brotli")
+
+ self.file = file
+
+ signature = Tag(self.file.read(4))
+ if signature != b"wOF2":
+ raise TTLibError("Not a WOFF2 font (bad signature)")
+
+ self.file.seek(0)
+ self.DirectoryEntry = WOFF2DirectoryEntry
+ data = self.file.read(woff2DirectorySize)
+ if len(data) != woff2DirectorySize:
+ raise TTLibError("Not a WOFF2 font (not enough data)")
+ sstruct.unpack(woff2DirectoryFormat, data, self)
+
+ self.tables = OrderedDict()
+ offset = 0
+ for i in range(self.numTables):
+ entry = self.DirectoryEntry()
+ entry.fromFile(self.file)
+ tag = Tag(entry.tag)
+ self.tables[tag] = entry
+ entry.offset = offset
+ offset += entry.length
+
+ totalUncompressedSize = offset
+ compressedData = self.file.read(self.totalCompressedSize)
+ decompressedData = brotli.decompress(compressedData)
+ if len(decompressedData) != totalUncompressedSize:
+ raise TTLibError(
+ "unexpected size for decompressed font data: expected %d, found %d"
+ % (totalUncompressedSize, len(decompressedData))
+ )
+ self.transformBuffer = BytesIO(decompressedData)
+
+ self.file.seek(0, 2)
+ if self.length != self.file.tell():
+ raise TTLibError("reported 'length' doesn't match the actual file size")
+
+ self.flavorData = WOFF2FlavorData(self)
+
+ # make empty TTFont to store data while reconstructing tables
+ self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
+
+ def __getitem__(self, tag):
+ """Fetch the raw table data. Reconstruct transformed tables."""
+ entry = self.tables[Tag(tag)]
+ if not hasattr(entry, "data"):
+ if entry.transformed:
+ entry.data = self.reconstructTable(tag)
+ else:
+ entry.data = entry.loadData(self.transformBuffer)
+ return entry.data
+
+ def reconstructTable(self, tag):
+ """Reconstruct table named 'tag' from transformed data."""
+ entry = self.tables[Tag(tag)]
+ rawData = entry.loadData(self.transformBuffer)
+ if tag == "glyf":
+ # no need to pad glyph data when reconstructing
+ padding = self.padding if hasattr(self, "padding") else None
+ data = self._reconstructGlyf(rawData, padding)
+ elif tag == "loca":
+ data = self._reconstructLoca()
+ elif tag == "hmtx":
+ data = self._reconstructHmtx(rawData)
+ else:
+ raise TTLibError("transform for table '%s' is unknown" % tag)
+ return data
+
+ def _reconstructGlyf(self, data, padding=None):
+ """Return recostructed glyf table data, and set the corresponding loca's
+ locations. Optionally pad glyph offsets to the specified number of bytes.
+ """
+ self.ttFont["loca"] = WOFF2LocaTable()
+ glyfTable = self.ttFont["glyf"] = WOFF2GlyfTable()
+ glyfTable.reconstruct(data, self.ttFont)
+ if padding:
+ glyfTable.padding = padding
+ data = glyfTable.compile(self.ttFont)
+ return data
+
+ def _reconstructLoca(self):
+ """Return reconstructed loca table data."""
+ if "loca" not in self.ttFont:
+ # make sure glyf is reconstructed first
+ self.tables["glyf"].data = self.reconstructTable("glyf")
+ locaTable = self.ttFont["loca"]
+ data = locaTable.compile(self.ttFont)
+ if len(data) != self.tables["loca"].origLength:
+ raise TTLibError(
+ "reconstructed 'loca' table doesn't match original size: "
+ "expected %d, found %d" % (self.tables["loca"].origLength, len(data))
+ )
+ return data
+
+ def _reconstructHmtx(self, data):
+ """Return reconstructed hmtx table data."""
+ # Before reconstructing 'hmtx' table we need to parse other tables:
+ # 'glyf' is required for reconstructing the sidebearings from the glyphs'
+ # bounding box; 'hhea' is needed for the numberOfHMetrics field.
+ if "glyf" in self.flavorData.transformedTables:
+ # transformed 'glyf' table is self-contained, thus 'loca' not needed
+ tableDependencies = ("maxp", "hhea", "glyf")
+ else:
+ # decompiling untransformed 'glyf' requires 'loca', which requires 'head'
+ tableDependencies = ("maxp", "head", "hhea", "loca", "glyf")
+ for tag in tableDependencies:
+ self._decompileTable(tag)
+ hmtxTable = self.ttFont["hmtx"] = WOFF2HmtxTable()
+ hmtxTable.reconstruct(data, self.ttFont)
+ data = hmtxTable.compile(self.ttFont)
+ return data
+
+ def _decompileTable(self, tag):
+ """Decompile table data and store it inside self.ttFont."""
+ data = self[tag]
+ if self.ttFont.isLoaded(tag):
+ return self.ttFont[tag]
+ tableClass = getTableClass(tag)
+ table = tableClass(tag)
+ self.ttFont.tables[tag] = table
+ table.decompile(data, self.ttFont)
class WOFF2Writer(SFNTWriter):
-
- flavor = "woff2"
-
- def __init__(self, file, numTables, sfntVersion="\000\001\000\000",
- flavor=None, flavorData=None):
- if not haveBrotli:
- log.error(
- 'The WOFF2 encoder requires the Brotli Python extension, available at: '
- 'https://github.com/google/brotli')
- raise ImportError("No module named brotli")
-
- self.file = file
- self.numTables = numTables
- self.sfntVersion = Tag(sfntVersion)
- self.flavorData = WOFF2FlavorData(data=flavorData)
-
- self.directoryFormat = woff2DirectoryFormat
- self.directorySize = woff2DirectorySize
- self.DirectoryEntry = WOFF2DirectoryEntry
-
- self.signature = Tag("wOF2")
-
- self.nextTableOffset = 0
- self.transformBuffer = BytesIO()
-
- self.tables = OrderedDict()
-
- # make empty TTFont to store data while normalising and transforming tables
- self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
-
- def __setitem__(self, tag, data):
- """Associate new entry named 'tag' with raw table data."""
- if tag in self.tables:
- raise TTLibError("cannot rewrite '%s' table" % tag)
- if tag == 'DSIG':
- # always drop DSIG table, since the encoding process can invalidate it
- self.numTables -= 1
- return
-
- entry = self.DirectoryEntry()
- entry.tag = Tag(tag)
- entry.flags = getKnownTagIndex(entry.tag)
- # WOFF2 table data are written to disk only on close(), after all tags
- # have been specified
- entry.data = data
-
- self.tables[tag] = entry
-
- def close(self):
- """ All tags must have been specified. Now write the table data and directory.
- """
- if len(self.tables) != self.numTables:
- raise TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables)))
-
- if self.sfntVersion in ("\x00\x01\x00\x00", "true"):
- isTrueType = True
- elif self.sfntVersion == "OTTO":
- isTrueType = False
- else:
- raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
-
- # The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned.
- # However, the reference WOFF2 implementation still fails to reconstruct
- # 'unpadded' glyf tables, therefore we need to 'normalise' them.
- # See:
- # https://github.com/khaledhosny/ots/issues/60
- # https://github.com/google/woff2/issues/15
- if (
- isTrueType
- and "glyf" in self.flavorData.transformedTables
- and "glyf" in self.tables
- ):
- self._normaliseGlyfAndLoca(padding=4)
- self._setHeadTransformFlag()
-
- # To pass the legacy OpenType Sanitiser currently included in browsers,
- # we must sort the table directory and data alphabetically by tag.
- # See:
- # https://github.com/google/woff2/pull/3
- # https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html
- # TODO(user): remove to match spec once browsers are on newer OTS
- self.tables = OrderedDict(sorted(self.tables.items()))
-
- self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets()
-
- fontData = self._transformTables()
- compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT)
-
- self.totalCompressedSize = len(compressedFont)
- self.length = self._calcTotalSize()
- self.majorVersion, self.minorVersion = self._getVersion()
- self.reserved = 0
-
- directory = self._packTableDirectory()
- self.file.seek(0)
- self.file.write(pad(directory + compressedFont, size=4))
- self._writeFlavorData()
-
- def _normaliseGlyfAndLoca(self, padding=4):
- """ Recompile glyf and loca tables, aligning glyph offsets to multiples of
- 'padding' size. Update the head table's 'indexToLocFormat' accordingly while
- compiling loca.
- """
- if self.sfntVersion == "OTTO":
- return
-
- for tag in ('maxp', 'head', 'loca', 'glyf'):
- self._decompileTable(tag)
- self.ttFont['glyf'].padding = padding
- for tag in ('glyf', 'loca'):
- self._compileTable(tag)
-
- def _setHeadTransformFlag(self):
- """ Set bit 11 of 'head' table flags to indicate that the font has undergone
- a lossless modifying transform. Re-compile head table data."""
- self._decompileTable('head')
- self.ttFont['head'].flags |= (1 << 11)
- self._compileTable('head')
-
- def _decompileTable(self, tag):
- """ Fetch table data, decompile it, and store it inside self.ttFont. """
- tag = Tag(tag)
- if tag not in self.tables:
- raise TTLibError("missing required table: %s" % tag)
- if self.ttFont.isLoaded(tag):
- return
- data = self.tables[tag].data
- if tag == 'loca':
- tableClass = WOFF2LocaTable
- elif tag == 'glyf':
- tableClass = WOFF2GlyfTable
- elif tag == 'hmtx':
- tableClass = WOFF2HmtxTable
- else:
- tableClass = getTableClass(tag)
- table = tableClass(tag)
- self.ttFont.tables[tag] = table
- table.decompile(data, self.ttFont)
-
- def _compileTable(self, tag):
- """ Compile table and store it in its 'data' attribute. """
- self.tables[tag].data = self.ttFont[tag].compile(self.ttFont)
-
- def _calcSFNTChecksumsLengthsAndOffsets(self):
- """ Compute the 'original' SFNT checksums, lengths and offsets for checksum
- adjustment calculation. Return the total size of the uncompressed font.
- """
- offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables)
- for tag, entry in self.tables.items():
- data = entry.data
- entry.origOffset = offset
- entry.origLength = len(data)
- if tag == 'head':
- entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
- else:
- entry.checkSum = calcChecksum(data)
- offset += (entry.origLength + 3) & ~3
- return offset
-
- def _transformTables(self):
- """Return transformed font data."""
- transformedTables = self.flavorData.transformedTables
- for tag, entry in self.tables.items():
- data = None
- if tag in transformedTables:
- data = self.transformTable(tag)
- if data is not None:
- entry.transformed = True
- if data is None:
- # pass-through the table data without transformation
- data = entry.data
- entry.transformed = False
- entry.offset = self.nextTableOffset
- entry.saveData(self.transformBuffer, data)
- self.nextTableOffset += entry.length
- self.writeMasterChecksum()
- fontData = self.transformBuffer.getvalue()
- return fontData
-
- def transformTable(self, tag):
- """Return transformed table data, or None if some pre-conditions aren't
- met -- in which case, the non-transformed table data will be used.
- """
- if tag == "loca":
- data = b""
- elif tag == "glyf":
- for tag in ('maxp', 'head', 'loca', 'glyf'):
- self._decompileTable(tag)
- glyfTable = self.ttFont['glyf']
- data = glyfTable.transform(self.ttFont)
- elif tag == "hmtx":
- if "glyf" not in self.tables:
- return
- for tag in ("maxp", "head", "hhea", "loca", "glyf", "hmtx"):
- self._decompileTable(tag)
- hmtxTable = self.ttFont["hmtx"]
- data = hmtxTable.transform(self.ttFont) # can be None
- else:
- raise TTLibError("Transform for table '%s' is unknown" % tag)
- return data
-
- def _calcMasterChecksum(self):
- """Calculate checkSumAdjustment."""
- tags = list(self.tables.keys())
- checksums = []
- for i in range(len(tags)):
- checksums.append(self.tables[tags[i]].checkSum)
-
- # Create a SFNT directory for checksum calculation purposes
- self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16)
- directory = sstruct.pack(sfntDirectoryFormat, self)
- tables = sorted(self.tables.items())
- for tag, entry in tables:
- sfntEntry = SFNTDirectoryEntry()
- sfntEntry.tag = entry.tag
- sfntEntry.checkSum = entry.checkSum
- sfntEntry.offset = entry.origOffset
- sfntEntry.length = entry.origLength
- directory = directory + sfntEntry.toString()
-
- directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
- assert directory_end == len(directory)
-
- checksums.append(calcChecksum(directory))
- checksum = sum(checksums) & 0xffffffff
- # BiboAfba!
- checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff
- return checksumadjustment
-
- def writeMasterChecksum(self):
- """Write checkSumAdjustment to the transformBuffer."""
- checksumadjustment = self._calcMasterChecksum()
- self.transformBuffer.seek(self.tables['head'].offset + 8)
- self.transformBuffer.write(struct.pack(">L", checksumadjustment))
-
- def _calcTotalSize(self):
- """Calculate total size of WOFF2 font, including any meta- and/or private data."""
- offset = self.directorySize
- for entry in self.tables.values():
- offset += len(entry.toString())
- offset += self.totalCompressedSize
- offset = (offset + 3) & ~3
- offset = self._calcFlavorDataOffsetsAndSize(offset)
- return offset
-
- def _calcFlavorDataOffsetsAndSize(self, start):
- """Calculate offsets and lengths for any meta- and/or private data."""
- offset = start
- data = self.flavorData
- if data.metaData:
- self.metaOrigLength = len(data.metaData)
- self.metaOffset = offset
- self.compressedMetaData = brotli.compress(
- data.metaData, mode=brotli.MODE_TEXT)
- self.metaLength = len(self.compressedMetaData)
- offset += self.metaLength
- else:
- self.metaOffset = self.metaLength = self.metaOrigLength = 0
- self.compressedMetaData = b""
- if data.privData:
- # make sure private data is padded to 4-byte boundary
- offset = (offset + 3) & ~3
- self.privOffset = offset
- self.privLength = len(data.privData)
- offset += self.privLength
- else:
- self.privOffset = self.privLength = 0
- return offset
-
- def _getVersion(self):
- """Return the WOFF2 font's (majorVersion, minorVersion) tuple."""
- data = self.flavorData
- if data.majorVersion is not None and data.minorVersion is not None:
- return data.majorVersion, data.minorVersion
- else:
- # if None, return 'fontRevision' from 'head' table
- if 'head' in self.tables:
- return struct.unpack(">HH", self.tables['head'].data[4:8])
- else:
- return 0, 0
-
- def _packTableDirectory(self):
- """Return WOFF2 table directory data."""
- directory = sstruct.pack(self.directoryFormat, self)
- for entry in self.tables.values():
- directory = directory + entry.toString()
- return directory
-
- def _writeFlavorData(self):
- """Write metadata and/or private data using appropiate padding."""
- compressedMetaData = self.compressedMetaData
- privData = self.flavorData.privData
- if compressedMetaData and privData:
- compressedMetaData = pad(compressedMetaData, size=4)
- if compressedMetaData:
- self.file.seek(self.metaOffset)
- assert self.file.tell() == self.metaOffset
- self.file.write(compressedMetaData)
- if privData:
- self.file.seek(self.privOffset)
- assert self.file.tell() == self.privOffset
- self.file.write(privData)
-
- def reordersTables(self):
- return True
+ flavor = "woff2"
+
+ def __init__(
+ self,
+ file,
+ numTables,
+ sfntVersion="\000\001\000\000",
+ flavor=None,
+ flavorData=None,
+ ):
+ if not haveBrotli:
+ log.error(
+ "The WOFF2 encoder requires the Brotli Python extension, available at: "
+ "https://github.com/google/brotli"
+ )
+ raise ImportError("No module named brotli")
+
+ self.file = file
+ self.numTables = numTables
+ self.sfntVersion = Tag(sfntVersion)
+ self.flavorData = WOFF2FlavorData(data=flavorData)
+
+ self.directoryFormat = woff2DirectoryFormat
+ self.directorySize = woff2DirectorySize
+ self.DirectoryEntry = WOFF2DirectoryEntry
+
+ self.signature = Tag("wOF2")
+
+ self.nextTableOffset = 0
+ self.transformBuffer = BytesIO()
+
+ self.tables = OrderedDict()
+
+ # make empty TTFont to store data while normalising and transforming tables
+ self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
+
+ def __setitem__(self, tag, data):
+ """Associate new entry named 'tag' with raw table data."""
+ if tag in self.tables:
+ raise TTLibError("cannot rewrite '%s' table" % tag)
+ if tag == "DSIG":
+ # always drop DSIG table, since the encoding process can invalidate it
+ self.numTables -= 1
+ return
+
+ entry = self.DirectoryEntry()
+ entry.tag = Tag(tag)
+ entry.flags = getKnownTagIndex(entry.tag)
+ # WOFF2 table data are written to disk only on close(), after all tags
+ # have been specified
+ entry.data = data
+
+ self.tables[tag] = entry
+
+ def close(self):
+ """All tags must have been specified. Now write the table data and directory."""
+ if len(self.tables) != self.numTables:
+ raise TTLibError(
+ "wrong number of tables; expected %d, found %d"
+ % (self.numTables, len(self.tables))
+ )
+
+ if self.sfntVersion in ("\x00\x01\x00\x00", "true"):
+ isTrueType = True
+ elif self.sfntVersion == "OTTO":
+ isTrueType = False
+ else:
+ raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
+
+ # The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned.
+ # However, the reference WOFF2 implementation still fails to reconstruct
+ # 'unpadded' glyf tables, therefore we need to 'normalise' them.
+ # See:
+ # https://github.com/khaledhosny/ots/issues/60
+ # https://github.com/google/woff2/issues/15
+ if (
+ isTrueType
+ and "glyf" in self.flavorData.transformedTables
+ and "glyf" in self.tables
+ ):
+ self._normaliseGlyfAndLoca(padding=4)
+ self._setHeadTransformFlag()
+
+ # To pass the legacy OpenType Sanitiser currently included in browsers,
+ # we must sort the table directory and data alphabetically by tag.
+ # See:
+ # https://github.com/google/woff2/pull/3
+ # https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html
+ #
+ # 2023: We rely on this in _transformTables where we expect that
+ # "loca" comes after "glyf" table.
+ self.tables = OrderedDict(sorted(self.tables.items()))
+
+ self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets()
+
+ fontData = self._transformTables()
+ compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT)
+
+ self.totalCompressedSize = len(compressedFont)
+ self.length = self._calcTotalSize()
+ self.majorVersion, self.minorVersion = self._getVersion()
+ self.reserved = 0
+
+ directory = self._packTableDirectory()
+ self.file.seek(0)
+ self.file.write(pad(directory + compressedFont, size=4))
+ self._writeFlavorData()
+
+ def _normaliseGlyfAndLoca(self, padding=4):
+ """Recompile glyf and loca tables, aligning glyph offsets to multiples of
+ 'padding' size. Update the head table's 'indexToLocFormat' accordingly while
+ compiling loca.
+ """
+ if self.sfntVersion == "OTTO":
+ return
+
+ for tag in ("maxp", "head", "loca", "glyf", "fvar"):
+ if tag in self.tables:
+ self._decompileTable(tag)
+ self.ttFont["glyf"].padding = padding
+ for tag in ("glyf", "loca"):
+ self._compileTable(tag)
+
+ def _setHeadTransformFlag(self):
+ """Set bit 11 of 'head' table flags to indicate that the font has undergone
+ a lossless modifying transform. Re-compile head table data."""
+ self._decompileTable("head")
+ self.ttFont["head"].flags |= 1 << 11
+ self._compileTable("head")
+
+ def _decompileTable(self, tag):
+ """Fetch table data, decompile it, and store it inside self.ttFont."""
+ tag = Tag(tag)
+ if tag not in self.tables:
+ raise TTLibError("missing required table: %s" % tag)
+ if self.ttFont.isLoaded(tag):
+ return
+ data = self.tables[tag].data
+ if tag == "loca":
+ tableClass = WOFF2LocaTable
+ elif tag == "glyf":
+ tableClass = WOFF2GlyfTable
+ elif tag == "hmtx":
+ tableClass = WOFF2HmtxTable
+ else:
+ tableClass = getTableClass(tag)
+ table = tableClass(tag)
+ self.ttFont.tables[tag] = table
+ table.decompile(data, self.ttFont)
+
+ def _compileTable(self, tag):
+ """Compile table and store it in its 'data' attribute."""
+ self.tables[tag].data = self.ttFont[tag].compile(self.ttFont)
+
+ def _calcSFNTChecksumsLengthsAndOffsets(self):
+ """Compute the 'original' SFNT checksums, lengths and offsets for checksum
+ adjustment calculation. Return the total size of the uncompressed font.
+ """
+ offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables)
+ for tag, entry in self.tables.items():
+ data = entry.data
+ entry.origOffset = offset
+ entry.origLength = len(data)
+ if tag == "head":
+ entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
+ else:
+ entry.checkSum = calcChecksum(data)
+ offset += (entry.origLength + 3) & ~3
+ return offset
+
+ def _transformTables(self):
+ """Return transformed font data."""
+ transformedTables = self.flavorData.transformedTables
+ for tag, entry in self.tables.items():
+ data = None
+ if tag in transformedTables:
+ data = self.transformTable(tag)
+ if data is not None:
+ entry.transformed = True
+ if data is None:
+ if tag == "glyf":
+ # Currently we always sort table tags so
+ # 'loca' comes after 'glyf'.
+ transformedTables.discard("loca")
+ # pass-through the table data without transformation
+ data = entry.data
+ entry.transformed = False
+ entry.offset = self.nextTableOffset
+ entry.saveData(self.transformBuffer, data)
+ self.nextTableOffset += entry.length
+ self.writeMasterChecksum()
+ fontData = self.transformBuffer.getvalue()
+ return fontData
+
+ def transformTable(self, tag):
+ """Return transformed table data, or None if some pre-conditions aren't
+ met -- in which case, the non-transformed table data will be used.
+ """
+ if tag == "loca":
+ data = b""
+ elif tag == "glyf":
+ for tag in ("maxp", "head", "loca", "glyf"):
+ self._decompileTable(tag)
+ glyfTable = self.ttFont["glyf"]
+ data = glyfTable.transform(self.ttFont)
+ elif tag == "hmtx":
+ if "glyf" not in self.tables:
+ return
+ for tag in ("maxp", "head", "hhea", "loca", "glyf", "hmtx"):
+ self._decompileTable(tag)
+ hmtxTable = self.ttFont["hmtx"]
+ data = hmtxTable.transform(self.ttFont) # can be None
+ else:
+ raise TTLibError("Transform for table '%s' is unknown" % tag)
+ return data
+
+ def _calcMasterChecksum(self):
+ """Calculate checkSumAdjustment."""
+ tags = list(self.tables.keys())
+ checksums = []
+ for i in range(len(tags)):
+ checksums.append(self.tables[tags[i]].checkSum)
+
+ # Create a SFNT directory for checksum calculation purposes
+ self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
+ self.numTables, 16
+ )
+ directory = sstruct.pack(sfntDirectoryFormat, self)
+ tables = sorted(self.tables.items())
+ for tag, entry in tables:
+ sfntEntry = SFNTDirectoryEntry()
+ sfntEntry.tag = entry.tag
+ sfntEntry.checkSum = entry.checkSum
+ sfntEntry.offset = entry.origOffset
+ sfntEntry.length = entry.origLength
+ directory = directory + sfntEntry.toString()
+
+ directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
+ assert directory_end == len(directory)
+
+ checksums.append(calcChecksum(directory))
+ checksum = sum(checksums) & 0xFFFFFFFF
+ # BiboAfba!
+ checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF
+ return checksumadjustment
+
+ def writeMasterChecksum(self):
+ """Write checkSumAdjustment to the transformBuffer."""
+ checksumadjustment = self._calcMasterChecksum()
+ self.transformBuffer.seek(self.tables["head"].offset + 8)
+ self.transformBuffer.write(struct.pack(">L", checksumadjustment))
+
+ def _calcTotalSize(self):
+ """Calculate total size of WOFF2 font, including any meta- and/or private data."""
+ offset = self.directorySize
+ for entry in self.tables.values():
+ offset += len(entry.toString())
+ offset += self.totalCompressedSize
+ offset = (offset + 3) & ~3
+ offset = self._calcFlavorDataOffsetsAndSize(offset)
+ return offset
+
+ def _calcFlavorDataOffsetsAndSize(self, start):
+ """Calculate offsets and lengths for any meta- and/or private data."""
+ offset = start
+ data = self.flavorData
+ if data.metaData:
+ self.metaOrigLength = len(data.metaData)
+ self.metaOffset = offset
+ self.compressedMetaData = brotli.compress(
+ data.metaData, mode=brotli.MODE_TEXT
+ )
+ self.metaLength = len(self.compressedMetaData)
+ offset += self.metaLength
+ else:
+ self.metaOffset = self.metaLength = self.metaOrigLength = 0
+ self.compressedMetaData = b""
+ if data.privData:
+ # make sure private data is padded to 4-byte boundary
+ offset = (offset + 3) & ~3
+ self.privOffset = offset
+ self.privLength = len(data.privData)
+ offset += self.privLength
+ else:
+ self.privOffset = self.privLength = 0
+ return offset
+
+ def _getVersion(self):
+ """Return the WOFF2 font's (majorVersion, minorVersion) tuple."""
+ data = self.flavorData
+ if data.majorVersion is not None and data.minorVersion is not None:
+ return data.majorVersion, data.minorVersion
+ else:
+ # if None, return 'fontRevision' from 'head' table
+ if "head" in self.tables:
+ return struct.unpack(">HH", self.tables["head"].data[4:8])
+ else:
+ return 0, 0
+
+ def _packTableDirectory(self):
+ """Return WOFF2 table directory data."""
+ directory = sstruct.pack(self.directoryFormat, self)
+ for entry in self.tables.values():
+ directory = directory + entry.toString()
+ return directory
+
+ def _writeFlavorData(self):
+ """Write metadata and/or private data using appropiate padding."""
+ compressedMetaData = self.compressedMetaData
+ privData = self.flavorData.privData
+ if compressedMetaData and privData:
+ compressedMetaData = pad(compressedMetaData, size=4)
+ if compressedMetaData:
+ self.file.seek(self.metaOffset)
+ assert self.file.tell() == self.metaOffset
+ self.file.write(compressedMetaData)
+ if privData:
+ self.file.seek(self.privOffset)
+ assert self.file.tell() == self.privOffset
+ self.file.write(privData)
+
+ def reordersTables(self):
+ return True
# -- woff2 directory helpers and cruft
@@ -492,13 +524,70 @@ woff2DirectoryFormat = """
woff2DirectorySize = sstruct.calcsize(woff2DirectoryFormat)
woff2KnownTags = (
- "cmap", "head", "hhea", "hmtx", "maxp", "name", "OS/2", "post", "cvt ",
- "fpgm", "glyf", "loca", "prep", "CFF ", "VORG", "EBDT", "EBLC", "gasp",
- "hdmx", "kern", "LTSH", "PCLT", "VDMX", "vhea", "vmtx", "BASE", "GDEF",
- "GPOS", "GSUB", "EBSC", "JSTF", "MATH", "CBDT", "CBLC", "COLR", "CPAL",
- "SVG ", "sbix", "acnt", "avar", "bdat", "bloc", "bsln", "cvar", "fdsc",
- "feat", "fmtx", "fvar", "gvar", "hsty", "just", "lcar", "mort", "morx",
- "opbd", "prop", "trak", "Zapf", "Silf", "Glat", "Gloc", "Feat", "Sill")
+ "cmap",
+ "head",
+ "hhea",
+ "hmtx",
+ "maxp",
+ "name",
+ "OS/2",
+ "post",
+ "cvt ",
+ "fpgm",
+ "glyf",
+ "loca",
+ "prep",
+ "CFF ",
+ "VORG",
+ "EBDT",
+ "EBLC",
+ "gasp",
+ "hdmx",
+ "kern",
+ "LTSH",
+ "PCLT",
+ "VDMX",
+ "vhea",
+ "vmtx",
+ "BASE",
+ "GDEF",
+ "GPOS",
+ "GSUB",
+ "EBSC",
+ "JSTF",
+ "MATH",
+ "CBDT",
+ "CBLC",
+ "COLR",
+ "CPAL",
+ "SVG ",
+ "sbix",
+ "acnt",
+ "avar",
+ "bdat",
+ "bloc",
+ "bsln",
+ "cvar",
+ "fdsc",
+ "feat",
+ "fmtx",
+ "fvar",
+ "gvar",
+ "hsty",
+ "just",
+ "lcar",
+ "mort",
+ "morx",
+ "opbd",
+ "prop",
+ "trak",
+ "Zapf",
+ "Silf",
+ "Glat",
+ "Gloc",
+ "Feat",
+ "Sill",
+)
woff2FlagsFormat = """
> # big endian
@@ -517,13 +606,16 @@ woff2UnknownTagSize = sstruct.calcsize(woff2UnknownTagFormat)
woff2UnknownTagIndex = 0x3F
woff2Base128MaxSize = 5
-woff2DirectoryEntryMaxSize = woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize
+woff2DirectoryEntryMaxSize = (
+ woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize
+)
-woff2TransformedTableTags = ('glyf', 'loca')
+woff2TransformedTableTags = ("glyf", "loca")
woff2GlyfTableFormat = """
> # big endian
- version: L # = 0x00000000
+ version: H # = 0x0000
+ optionFlags: H # Bit 0: we have overlapSimpleBitmap[], Bits 1-15: reserved
numGlyphs: H # Number of glyphs
indexFormat: H # Offset format for loca table
nContourStreamSize: L # Size of nContour stream
@@ -545,988 +637,1049 @@ bboxFormat = """
yMax: h
"""
+woff2OverlapSimpleBitmapFlag = 0x0001
+
def getKnownTagIndex(tag):
- """Return index of 'tag' in woff2KnownTags list. Return 63 if not found."""
- for i in range(len(woff2KnownTags)):
- if tag == woff2KnownTags[i]:
- return i
- return woff2UnknownTagIndex
+ """Return index of 'tag' in woff2KnownTags list. Return 63 if not found."""
+ for i in range(len(woff2KnownTags)):
+ if tag == woff2KnownTags[i]:
+ return i
+ return woff2UnknownTagIndex
class WOFF2DirectoryEntry(DirectoryEntry):
-
- def fromFile(self, file):
- pos = file.tell()
- data = file.read(woff2DirectoryEntryMaxSize)
- left = self.fromString(data)
- consumed = len(data) - len(left)
- file.seek(pos + consumed)
-
- def fromString(self, data):
- if len(data) < 1:
- raise TTLibError("can't read table 'flags': not enough data")
- dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self)
- if self.flags & 0x3F == 0x3F:
- # if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value
- if len(data) < woff2UnknownTagSize:
- raise TTLibError("can't read table 'tag': not enough data")
- dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self)
- else:
- # otherwise, tag is derived from a fixed 'Known Tags' table
- self.tag = woff2KnownTags[self.flags & 0x3F]
- self.tag = Tag(self.tag)
- self.origLength, data = unpackBase128(data)
- self.length = self.origLength
- if self.transformed:
- self.length, data = unpackBase128(data)
- if self.tag == 'loca' and self.length != 0:
- raise TTLibError(
- "the transformLength of the 'loca' table must be 0")
- # return left over data
- return data
-
- def toString(self):
- data = bytechr(self.flags)
- if (self.flags & 0x3F) == 0x3F:
- data += struct.pack('>4s', self.tag.tobytes())
- data += packBase128(self.origLength)
- if self.transformed:
- data += packBase128(self.length)
- return data
-
- @property
- def transformVersion(self):
- """Return bits 6-7 of table entry's flags, which indicate the preprocessing
- transformation version number (between 0 and 3).
- """
- return self.flags >> 6
-
- @transformVersion.setter
- def transformVersion(self, value):
- assert 0 <= value <= 3
- self.flags |= value << 6
-
- @property
- def transformed(self):
- """Return True if the table has any transformation, else return False."""
- # For all tables in a font, except for 'glyf' and 'loca', the transformation
- # version 0 indicates the null transform (where the original table data is
- # passed directly to the Brotli compressor). For 'glyf' and 'loca' tables,
- # transformation version 3 indicates the null transform
- if self.tag in {"glyf", "loca"}:
- return self.transformVersion != 3
- else:
- return self.transformVersion != 0
-
- @transformed.setter
- def transformed(self, booleanValue):
- # here we assume that a non-null transform means version 0 for 'glyf' and
- # 'loca' and 1 for every other table (e.g. hmtx); but that may change as
- # new transformation formats are introduced in the future (if ever).
- if self.tag in {"glyf", "loca"}:
- self.transformVersion = 3 if not booleanValue else 0
- else:
- self.transformVersion = int(booleanValue)
-
-
-class WOFF2LocaTable(getTableClass('loca')):
- """Same as parent class. The only difference is that it attempts to preserve
- the 'indexFormat' as encoded in the WOFF2 glyf table.
- """
-
- def __init__(self, tag=None):
- self.tableTag = Tag(tag or 'loca')
-
- def compile(self, ttFont):
- try:
- max_location = max(self.locations)
- except AttributeError:
- self.set([])
- max_location = 0
- if 'glyf' in ttFont and hasattr(ttFont['glyf'], 'indexFormat'):
- # copile loca using the indexFormat specified in the WOFF2 glyf table
- indexFormat = ttFont['glyf'].indexFormat
- if indexFormat == 0:
- if max_location >= 0x20000:
- raise TTLibError("indexFormat is 0 but local offsets > 0x20000")
- if not all(l % 2 == 0 for l in self.locations):
- raise TTLibError("indexFormat is 0 but local offsets not multiples of 2")
- locations = array.array("H")
- for i in range(len(self.locations)):
- locations.append(self.locations[i] // 2)
- else:
- locations = array.array("I", self.locations)
- if sys.byteorder != "big": locations.byteswap()
- data = locations.tobytes()
- else:
- # use the most compact indexFormat given the current glyph offsets
- data = super(WOFF2LocaTable, self).compile(ttFont)
- return data
-
-
-class WOFF2GlyfTable(getTableClass('glyf')):
- """Decoder/Encoder for WOFF2 'glyf' table transform."""
-
- subStreams = (
- 'nContourStream', 'nPointsStream', 'flagStream', 'glyphStream',
- 'compositeStream', 'bboxStream', 'instructionStream')
-
- def __init__(self, tag=None):
- self.tableTag = Tag(tag or 'glyf')
-
- def reconstruct(self, data, ttFont):
- """ Decompile transformed 'glyf' data. """
- inputDataSize = len(data)
-
- if inputDataSize < woff2GlyfTableFormatSize:
- raise TTLibError("not enough 'glyf' data")
- dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self)
- offset = woff2GlyfTableFormatSize
-
- for stream in self.subStreams:
- size = getattr(self, stream + 'Size')
- setattr(self, stream, data[:size])
- data = data[size:]
- offset += size
-
- if offset != inputDataSize:
- raise TTLibError(
- "incorrect size of transformed 'glyf' table: expected %d, received %d bytes"
- % (offset, inputDataSize))
-
- bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
- bboxBitmap = self.bboxStream[:bboxBitmapSize]
- self.bboxBitmap = array.array('B', bboxBitmap)
- self.bboxStream = self.bboxStream[bboxBitmapSize:]
-
- self.nContourStream = array.array("h", self.nContourStream)
- if sys.byteorder != "big": self.nContourStream.byteswap()
- assert len(self.nContourStream) == self.numGlyphs
-
- if 'head' in ttFont:
- ttFont['head'].indexToLocFormat = self.indexFormat
- try:
- self.glyphOrder = ttFont.getGlyphOrder()
- except:
- self.glyphOrder = None
- if self.glyphOrder is None:
- self.glyphOrder = [".notdef"]
- self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)])
- else:
- if len(self.glyphOrder) != self.numGlyphs:
- raise TTLibError(
- "incorrect glyphOrder: expected %d glyphs, found %d" %
- (len(self.glyphOrder), self.numGlyphs))
-
- glyphs = self.glyphs = {}
- for glyphID, glyphName in enumerate(self.glyphOrder):
- glyph = self._decodeGlyph(glyphID)
- glyphs[glyphName] = glyph
-
- def transform(self, ttFont):
- """ Return transformed 'glyf' data """
- self.numGlyphs = len(self.glyphs)
- assert len(self.glyphOrder) == self.numGlyphs
- if 'maxp' in ttFont:
- ttFont['maxp'].numGlyphs = self.numGlyphs
- self.indexFormat = ttFont['head'].indexToLocFormat
-
- for stream in self.subStreams:
- setattr(self, stream, b"")
- bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
- self.bboxBitmap = array.array('B', [0]*bboxBitmapSize)
-
- for glyphID in range(self.numGlyphs):
- self._encodeGlyph(glyphID)
-
- self.bboxStream = self.bboxBitmap.tobytes() + self.bboxStream
- for stream in self.subStreams:
- setattr(self, stream + 'Size', len(getattr(self, stream)))
- self.version = 0
- data = sstruct.pack(woff2GlyfTableFormat, self)
- data += bytesjoin([getattr(self, s) for s in self.subStreams])
- return data
-
- def _decodeGlyph(self, glyphID):
- glyph = getTableModule('glyf').Glyph()
- glyph.numberOfContours = self.nContourStream[glyphID]
- if glyph.numberOfContours == 0:
- return glyph
- elif glyph.isComposite():
- self._decodeComponents(glyph)
- else:
- self._decodeCoordinates(glyph)
- self._decodeBBox(glyphID, glyph)
- return glyph
-
- def _decodeComponents(self, glyph):
- data = self.compositeStream
- glyph.components = []
- more = 1
- haveInstructions = 0
- while more:
- component = getTableModule('glyf').GlyphComponent()
- more, haveInstr, data = component.decompile(data, self)
- haveInstructions = haveInstructions | haveInstr
- glyph.components.append(component)
- self.compositeStream = data
- if haveInstructions:
- self._decodeInstructions(glyph)
-
- def _decodeCoordinates(self, glyph):
- data = self.nPointsStream
- endPtsOfContours = []
- endPoint = -1
- for i in range(glyph.numberOfContours):
- ptsOfContour, data = unpack255UShort(data)
- endPoint += ptsOfContour
- endPtsOfContours.append(endPoint)
- glyph.endPtsOfContours = endPtsOfContours
- self.nPointsStream = data
- self._decodeTriplets(glyph)
- self._decodeInstructions(glyph)
-
- def _decodeInstructions(self, glyph):
- glyphStream = self.glyphStream
- instructionStream = self.instructionStream
- instructionLength, glyphStream = unpack255UShort(glyphStream)
- glyph.program = ttProgram.Program()
- glyph.program.fromBytecode(instructionStream[:instructionLength])
- self.glyphStream = glyphStream
- self.instructionStream = instructionStream[instructionLength:]
-
- def _decodeBBox(self, glyphID, glyph):
- haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7)))
- if glyph.isComposite() and not haveBBox:
- raise TTLibError('no bbox values for composite glyph %d' % glyphID)
- if haveBBox:
- dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph)
- else:
- glyph.recalcBounds(self)
-
- def _decodeTriplets(self, glyph):
-
- def withSign(flag, baseval):
- assert 0 <= baseval and baseval < 65536, 'integer overflow'
- return baseval if flag & 1 else -baseval
-
- nPoints = glyph.endPtsOfContours[-1] + 1
- flagSize = nPoints
- if flagSize > len(self.flagStream):
- raise TTLibError("not enough 'flagStream' data")
- flagsData = self.flagStream[:flagSize]
- self.flagStream = self.flagStream[flagSize:]
- flags = array.array('B', flagsData)
-
- triplets = array.array('B', self.glyphStream)
- nTriplets = len(triplets)
- assert nPoints <= nTriplets
-
- x = 0
- y = 0
- glyph.coordinates = getTableModule('glyf').GlyphCoordinates.zeros(nPoints)
- glyph.flags = array.array("B")
- tripletIndex = 0
- for i in range(nPoints):
- flag = flags[i]
- onCurve = not bool(flag >> 7)
- flag &= 0x7f
- if flag < 84:
- nBytes = 1
- elif flag < 120:
- nBytes = 2
- elif flag < 124:
- nBytes = 3
- else:
- nBytes = 4
- assert ((tripletIndex + nBytes) <= nTriplets)
- if flag < 10:
- dx = 0
- dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex])
- elif flag < 20:
- dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex])
- dy = 0
- elif flag < 84:
- b0 = flag - 20
- b1 = triplets[tripletIndex]
- dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4))
- dy = withSign(flag >> 1, 1 + ((b0 & 0x0c) << 2) + (b1 & 0x0f))
- elif flag < 120:
- b0 = flag - 84
- dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex])
- dy = withSign(flag >> 1,
- 1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1])
- elif flag < 124:
- b2 = triplets[tripletIndex + 1]
- dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4))
- dy = withSign(flag >> 1,
- ((b2 & 0x0f) << 8) + triplets[tripletIndex + 2])
- else:
- dx = withSign(flag,
- (triplets[tripletIndex] << 8) + triplets[tripletIndex + 1])
- dy = withSign(flag >> 1,
- (triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3])
- tripletIndex += nBytes
- x += dx
- y += dy
- glyph.coordinates[i] = (x, y)
- glyph.flags.append(int(onCurve))
- bytesConsumed = tripletIndex
- self.glyphStream = self.glyphStream[bytesConsumed:]
-
- def _encodeGlyph(self, glyphID):
- glyphName = self.getGlyphName(glyphID)
- glyph = self[glyphName]
- self.nContourStream += struct.pack(">h", glyph.numberOfContours)
- if glyph.numberOfContours == 0:
- return
- elif glyph.isComposite():
- self._encodeComponents(glyph)
- else:
- self._encodeCoordinates(glyph)
- self._encodeBBox(glyphID, glyph)
-
- def _encodeComponents(self, glyph):
- lastcomponent = len(glyph.components) - 1
- more = 1
- haveInstructions = 0
- for i in range(len(glyph.components)):
- if i == lastcomponent:
- haveInstructions = hasattr(glyph, "program")
- more = 0
- component = glyph.components[i]
- self.compositeStream += component.compile(more, haveInstructions, self)
- if haveInstructions:
- self._encodeInstructions(glyph)
-
- def _encodeCoordinates(self, glyph):
- lastEndPoint = -1
- for endPoint in glyph.endPtsOfContours:
- ptsOfContour = endPoint - lastEndPoint
- self.nPointsStream += pack255UShort(ptsOfContour)
- lastEndPoint = endPoint
- self._encodeTriplets(glyph)
- self._encodeInstructions(glyph)
-
- def _encodeInstructions(self, glyph):
- instructions = glyph.program.getBytecode()
- self.glyphStream += pack255UShort(len(instructions))
- self.instructionStream += instructions
-
- def _encodeBBox(self, glyphID, glyph):
- assert glyph.numberOfContours != 0, "empty glyph has no bbox"
- if not glyph.isComposite():
- # for simple glyphs, compare the encoded bounding box info with the calculated
- # values, and if they match omit the bounding box info
- currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax
- calculatedBBox = calcIntBounds(glyph.coordinates)
- if currentBBox == calculatedBBox:
- return
- self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7)
- self.bboxStream += sstruct.pack(bboxFormat, glyph)
-
- def _encodeTriplets(self, glyph):
- assert len(glyph.coordinates) == len(glyph.flags)
- coordinates = glyph.coordinates.copy()
- coordinates.absoluteToRelative()
-
- flags = array.array('B')
- triplets = array.array('B')
- for i in range(len(coordinates)):
- onCurve = glyph.flags[i] & _g_l_y_f.flagOnCurve
- x, y = coordinates[i]
- absX = abs(x)
- absY = abs(y)
- onCurveBit = 0 if onCurve else 128
- xSignBit = 0 if (x < 0) else 1
- ySignBit = 0 if (y < 0) else 1
- xySignBits = xSignBit + 2 * ySignBit
-
- if x == 0 and absY < 1280:
- flags.append(onCurveBit + ((absY & 0xf00) >> 7) + ySignBit)
- triplets.append(absY & 0xff)
- elif y == 0 and absX < 1280:
- flags.append(onCurveBit + 10 + ((absX & 0xf00) >> 7) + xSignBit)
- triplets.append(absX & 0xff)
- elif absX < 65 and absY < 65:
- flags.append(onCurveBit + 20 + ((absX - 1) & 0x30) + (((absY - 1) & 0x30) >> 2) + xySignBits)
- triplets.append((((absX - 1) & 0xf) << 4) | ((absY - 1) & 0xf))
- elif absX < 769 and absY < 769:
- flags.append(onCurveBit + 84 + 12 * (((absX - 1) & 0x300) >> 8) + (((absY - 1) & 0x300) >> 6) + xySignBits)
- triplets.append((absX - 1) & 0xff)
- triplets.append((absY - 1) & 0xff)
- elif absX < 4096 and absY < 4096:
- flags.append(onCurveBit + 120 + xySignBits)
- triplets.append(absX >> 4)
- triplets.append(((absX & 0xf) << 4) | (absY >> 8))
- triplets.append(absY & 0xff)
- else:
- flags.append(onCurveBit + 124 + xySignBits)
- triplets.append(absX >> 8)
- triplets.append(absX & 0xff)
- triplets.append(absY >> 8)
- triplets.append(absY & 0xff)
-
- self.flagStream += flags.tobytes()
- self.glyphStream += triplets.tobytes()
+ def fromFile(self, file):
+ pos = file.tell()
+ data = file.read(woff2DirectoryEntryMaxSize)
+ left = self.fromString(data)
+ consumed = len(data) - len(left)
+ file.seek(pos + consumed)
+
+ def fromString(self, data):
+ if len(data) < 1:
+ raise TTLibError("can't read table 'flags': not enough data")
+ dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self)
+ if self.flags & 0x3F == 0x3F:
+ # if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value
+ if len(data) < woff2UnknownTagSize:
+ raise TTLibError("can't read table 'tag': not enough data")
+ dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self)
+ else:
+ # otherwise, tag is derived from a fixed 'Known Tags' table
+ self.tag = woff2KnownTags[self.flags & 0x3F]
+ self.tag = Tag(self.tag)
+ self.origLength, data = unpackBase128(data)
+ self.length = self.origLength
+ if self.transformed:
+ self.length, data = unpackBase128(data)
+ if self.tag == "loca" and self.length != 0:
+ raise TTLibError("the transformLength of the 'loca' table must be 0")
+ # return left over data
+ return data
+
+ def toString(self):
+ data = bytechr(self.flags)
+ if (self.flags & 0x3F) == 0x3F:
+ data += struct.pack(">4s", self.tag.tobytes())
+ data += packBase128(self.origLength)
+ if self.transformed:
+ data += packBase128(self.length)
+ return data
+
+ @property
+ def transformVersion(self):
+ """Return bits 6-7 of table entry's flags, which indicate the preprocessing
+ transformation version number (between 0 and 3).
+ """
+ return self.flags >> 6
+
+ @transformVersion.setter
+ def transformVersion(self, value):
+ assert 0 <= value <= 3
+ self.flags |= value << 6
+
+ @property
+ def transformed(self):
+ """Return True if the table has any transformation, else return False."""
+ # For all tables in a font, except for 'glyf' and 'loca', the transformation
+ # version 0 indicates the null transform (where the original table data is
+ # passed directly to the Brotli compressor). For 'glyf' and 'loca' tables,
+ # transformation version 3 indicates the null transform
+ if self.tag in {"glyf", "loca"}:
+ return self.transformVersion != 3
+ else:
+ return self.transformVersion != 0
+
+ @transformed.setter
+ def transformed(self, booleanValue):
+ # here we assume that a non-null transform means version 0 for 'glyf' and
+ # 'loca' and 1 for every other table (e.g. hmtx); but that may change as
+ # new transformation formats are introduced in the future (if ever).
+ if self.tag in {"glyf", "loca"}:
+ self.transformVersion = 3 if not booleanValue else 0
+ else:
+ self.transformVersion = int(booleanValue)
+
+
+class WOFF2LocaTable(getTableClass("loca")):
+ """Same as parent class. The only difference is that it attempts to preserve
+ the 'indexFormat' as encoded in the WOFF2 glyf table.
+ """
+
+ def __init__(self, tag=None):
+ self.tableTag = Tag(tag or "loca")
+
+ def compile(self, ttFont):
+ try:
+ max_location = max(self.locations)
+ except AttributeError:
+ self.set([])
+ max_location = 0
+ if "glyf" in ttFont and hasattr(ttFont["glyf"], "indexFormat"):
+ # copile loca using the indexFormat specified in the WOFF2 glyf table
+ indexFormat = ttFont["glyf"].indexFormat
+ if indexFormat == 0:
+ if max_location >= 0x20000:
+ raise TTLibError("indexFormat is 0 but local offsets > 0x20000")
+ if not all(l % 2 == 0 for l in self.locations):
+ raise TTLibError(
+ "indexFormat is 0 but local offsets not multiples of 2"
+ )
+ locations = array.array("H")
+ for i in range(len(self.locations)):
+ locations.append(self.locations[i] // 2)
+ else:
+ locations = array.array("I", self.locations)
+ if sys.byteorder != "big":
+ locations.byteswap()
+ data = locations.tobytes()
+ else:
+ # use the most compact indexFormat given the current glyph offsets
+ data = super(WOFF2LocaTable, self).compile(ttFont)
+ return data
+
+
+class WOFF2GlyfTable(getTableClass("glyf")):
+ """Decoder/Encoder for WOFF2 'glyf' table transform."""
+
+ subStreams = (
+ "nContourStream",
+ "nPointsStream",
+ "flagStream",
+ "glyphStream",
+ "compositeStream",
+ "bboxStream",
+ "instructionStream",
+ )
+
+ def __init__(self, tag=None):
+ self.tableTag = Tag(tag or "glyf")
+
+ def reconstruct(self, data, ttFont):
+ """Decompile transformed 'glyf' data."""
+ inputDataSize = len(data)
+
+ if inputDataSize < woff2GlyfTableFormatSize:
+ raise TTLibError("not enough 'glyf' data")
+ dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self)
+ offset = woff2GlyfTableFormatSize
+
+ for stream in self.subStreams:
+ size = getattr(self, stream + "Size")
+ setattr(self, stream, data[:size])
+ data = data[size:]
+ offset += size
+
+ hasOverlapSimpleBitmap = self.optionFlags & woff2OverlapSimpleBitmapFlag
+ self.overlapSimpleBitmap = None
+ if hasOverlapSimpleBitmap:
+ overlapSimpleBitmapSize = (self.numGlyphs + 7) >> 3
+ self.overlapSimpleBitmap = array.array("B", data[:overlapSimpleBitmapSize])
+ offset += overlapSimpleBitmapSize
+
+ if offset != inputDataSize:
+ raise TTLibError(
+ "incorrect size of transformed 'glyf' table: expected %d, received %d bytes"
+ % (offset, inputDataSize)
+ )
+
+ bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
+ bboxBitmap = self.bboxStream[:bboxBitmapSize]
+ self.bboxBitmap = array.array("B", bboxBitmap)
+ self.bboxStream = self.bboxStream[bboxBitmapSize:]
+
+ self.nContourStream = array.array("h", self.nContourStream)
+ if sys.byteorder != "big":
+ self.nContourStream.byteswap()
+ assert len(self.nContourStream) == self.numGlyphs
+
+ if "head" in ttFont:
+ ttFont["head"].indexToLocFormat = self.indexFormat
+ try:
+ self.glyphOrder = ttFont.getGlyphOrder()
+ except:
+ self.glyphOrder = None
+ if self.glyphOrder is None:
+ self.glyphOrder = [".notdef"]
+ self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)])
+ else:
+ if len(self.glyphOrder) != self.numGlyphs:
+ raise TTLibError(
+ "incorrect glyphOrder: expected %d glyphs, found %d"
+ % (len(self.glyphOrder), self.numGlyphs)
+ )
+
+ glyphs = self.glyphs = {}
+ for glyphID, glyphName in enumerate(self.glyphOrder):
+ glyph = self._decodeGlyph(glyphID)
+ glyphs[glyphName] = glyph
+
+ def transform(self, ttFont):
+ """Return transformed 'glyf' data"""
+ self.numGlyphs = len(self.glyphs)
+ assert len(self.glyphOrder) == self.numGlyphs
+ if "maxp" in ttFont:
+ ttFont["maxp"].numGlyphs = self.numGlyphs
+ self.indexFormat = ttFont["head"].indexToLocFormat
+
+ for stream in self.subStreams:
+ setattr(self, stream, b"")
+ bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
+ self.bboxBitmap = array.array("B", [0] * bboxBitmapSize)
+
+ self.overlapSimpleBitmap = array.array("B", [0] * ((self.numGlyphs + 7) >> 3))
+ for glyphID in range(self.numGlyphs):
+ try:
+ self._encodeGlyph(glyphID)
+ except NotImplementedError:
+ return None
+ hasOverlapSimpleBitmap = any(self.overlapSimpleBitmap)
+
+ self.bboxStream = self.bboxBitmap.tobytes() + self.bboxStream
+ for stream in self.subStreams:
+ setattr(self, stream + "Size", len(getattr(self, stream)))
+ self.version = 0
+ self.optionFlags = 0
+ if hasOverlapSimpleBitmap:
+ self.optionFlags |= woff2OverlapSimpleBitmapFlag
+ data = sstruct.pack(woff2GlyfTableFormat, self)
+ data += bytesjoin([getattr(self, s) for s in self.subStreams])
+ if hasOverlapSimpleBitmap:
+ data += self.overlapSimpleBitmap.tobytes()
+ return data
+
+ def _decodeGlyph(self, glyphID):
+ glyph = getTableModule("glyf").Glyph()
+ glyph.numberOfContours = self.nContourStream[glyphID]
+ if glyph.numberOfContours == 0:
+ return glyph
+ elif glyph.isComposite():
+ self._decodeComponents(glyph)
+ else:
+ self._decodeCoordinates(glyph)
+ self._decodeOverlapSimpleFlag(glyph, glyphID)
+ self._decodeBBox(glyphID, glyph)
+ return glyph
+
+ def _decodeComponents(self, glyph):
+ data = self.compositeStream
+ glyph.components = []
+ more = 1
+ haveInstructions = 0
+ while more:
+ component = getTableModule("glyf").GlyphComponent()
+ more, haveInstr, data = component.decompile(data, self)
+ haveInstructions = haveInstructions | haveInstr
+ glyph.components.append(component)
+ self.compositeStream = data
+ if haveInstructions:
+ self._decodeInstructions(glyph)
+
+ def _decodeCoordinates(self, glyph):
+ data = self.nPointsStream
+ endPtsOfContours = []
+ endPoint = -1
+ for i in range(glyph.numberOfContours):
+ ptsOfContour, data = unpack255UShort(data)
+ endPoint += ptsOfContour
+ endPtsOfContours.append(endPoint)
+ glyph.endPtsOfContours = endPtsOfContours
+ self.nPointsStream = data
+ self._decodeTriplets(glyph)
+ self._decodeInstructions(glyph)
+
+ def _decodeOverlapSimpleFlag(self, glyph, glyphID):
+ if self.overlapSimpleBitmap is None or glyph.numberOfContours <= 0:
+ return
+ byte = glyphID >> 3
+ bit = glyphID & 7
+ if self.overlapSimpleBitmap[byte] & (0x80 >> bit):
+ glyph.flags[0] |= _g_l_y_f.flagOverlapSimple
+
+ def _decodeInstructions(self, glyph):
+ glyphStream = self.glyphStream
+ instructionStream = self.instructionStream
+ instructionLength, glyphStream = unpack255UShort(glyphStream)
+ glyph.program = ttProgram.Program()
+ glyph.program.fromBytecode(instructionStream[:instructionLength])
+ self.glyphStream = glyphStream
+ self.instructionStream = instructionStream[instructionLength:]
+
+ def _decodeBBox(self, glyphID, glyph):
+ haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7)))
+ if glyph.isComposite() and not haveBBox:
+ raise TTLibError("no bbox values for composite glyph %d" % glyphID)
+ if haveBBox:
+ dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph)
+ else:
+ glyph.recalcBounds(self)
+
+ def _decodeTriplets(self, glyph):
+ def withSign(flag, baseval):
+ assert 0 <= baseval and baseval < 65536, "integer overflow"
+ return baseval if flag & 1 else -baseval
+
+ nPoints = glyph.endPtsOfContours[-1] + 1
+ flagSize = nPoints
+ if flagSize > len(self.flagStream):
+ raise TTLibError("not enough 'flagStream' data")
+ flagsData = self.flagStream[:flagSize]
+ self.flagStream = self.flagStream[flagSize:]
+ flags = array.array("B", flagsData)
+
+ triplets = array.array("B", self.glyphStream)
+ nTriplets = len(triplets)
+ assert nPoints <= nTriplets
+
+ x = 0
+ y = 0
+ glyph.coordinates = getTableModule("glyf").GlyphCoordinates.zeros(nPoints)
+ glyph.flags = array.array("B")
+ tripletIndex = 0
+ for i in range(nPoints):
+ flag = flags[i]
+ onCurve = not bool(flag >> 7)
+ flag &= 0x7F
+ if flag < 84:
+ nBytes = 1
+ elif flag < 120:
+ nBytes = 2
+ elif flag < 124:
+ nBytes = 3
+ else:
+ nBytes = 4
+ assert (tripletIndex + nBytes) <= nTriplets
+ if flag < 10:
+ dx = 0
+ dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex])
+ elif flag < 20:
+ dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex])
+ dy = 0
+ elif flag < 84:
+ b0 = flag - 20
+ b1 = triplets[tripletIndex]
+ dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4))
+ dy = withSign(flag >> 1, 1 + ((b0 & 0x0C) << 2) + (b1 & 0x0F))
+ elif flag < 120:
+ b0 = flag - 84
+ dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex])
+ dy = withSign(
+ flag >> 1, 1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1]
+ )
+ elif flag < 124:
+ b2 = triplets[tripletIndex + 1]
+ dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4))
+ dy = withSign(
+ flag >> 1, ((b2 & 0x0F) << 8) + triplets[tripletIndex + 2]
+ )
+ else:
+ dx = withSign(
+ flag, (triplets[tripletIndex] << 8) + triplets[tripletIndex + 1]
+ )
+ dy = withSign(
+ flag >> 1,
+ (triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3],
+ )
+ tripletIndex += nBytes
+ x += dx
+ y += dy
+ glyph.coordinates[i] = (x, y)
+ glyph.flags.append(int(onCurve))
+ bytesConsumed = tripletIndex
+ self.glyphStream = self.glyphStream[bytesConsumed:]
+
+ def _encodeGlyph(self, glyphID):
+ glyphName = self.getGlyphName(glyphID)
+ glyph = self[glyphName]
+ self.nContourStream += struct.pack(">h", glyph.numberOfContours)
+ if glyph.numberOfContours == 0:
+ return
+ elif glyph.isComposite():
+ self._encodeComponents(glyph)
+ elif glyph.isVarComposite():
+ raise NotImplementedError
+ else:
+ self._encodeCoordinates(glyph)
+ self._encodeOverlapSimpleFlag(glyph, glyphID)
+ self._encodeBBox(glyphID, glyph)
+
+ def _encodeComponents(self, glyph):
+ lastcomponent = len(glyph.components) - 1
+ more = 1
+ haveInstructions = 0
+ for i in range(len(glyph.components)):
+ if i == lastcomponent:
+ haveInstructions = hasattr(glyph, "program")
+ more = 0
+ component = glyph.components[i]
+ self.compositeStream += component.compile(more, haveInstructions, self)
+ if haveInstructions:
+ self._encodeInstructions(glyph)
+
+ def _encodeCoordinates(self, glyph):
+ lastEndPoint = -1
+ if _g_l_y_f.flagCubic in glyph.flags:
+ raise NotImplementedError
+ for endPoint in glyph.endPtsOfContours:
+ ptsOfContour = endPoint - lastEndPoint
+ self.nPointsStream += pack255UShort(ptsOfContour)
+ lastEndPoint = endPoint
+ self._encodeTriplets(glyph)
+ self._encodeInstructions(glyph)
+
+ def _encodeOverlapSimpleFlag(self, glyph, glyphID):
+ if glyph.numberOfContours <= 0:
+ return
+ if glyph.flags[0] & _g_l_y_f.flagOverlapSimple:
+ byte = glyphID >> 3
+ bit = glyphID & 7
+ self.overlapSimpleBitmap[byte] |= 0x80 >> bit
+
+ def _encodeInstructions(self, glyph):
+ instructions = glyph.program.getBytecode()
+ self.glyphStream += pack255UShort(len(instructions))
+ self.instructionStream += instructions
+
+ def _encodeBBox(self, glyphID, glyph):
+ assert glyph.numberOfContours != 0, "empty glyph has no bbox"
+ if not glyph.isComposite():
+ # for simple glyphs, compare the encoded bounding box info with the calculated
+ # values, and if they match omit the bounding box info
+ currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax
+ calculatedBBox = calcIntBounds(glyph.coordinates)
+ if currentBBox == calculatedBBox:
+ return
+ self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7)
+ self.bboxStream += sstruct.pack(bboxFormat, glyph)
+
+ def _encodeTriplets(self, glyph):
+ assert len(glyph.coordinates) == len(glyph.flags)
+ coordinates = glyph.coordinates.copy()
+ coordinates.absoluteToRelative()
+
+ flags = array.array("B")
+ triplets = array.array("B")
+ for i in range(len(coordinates)):
+ onCurve = glyph.flags[i] & _g_l_y_f.flagOnCurve
+ x, y = coordinates[i]
+ absX = abs(x)
+ absY = abs(y)
+ onCurveBit = 0 if onCurve else 128
+ xSignBit = 0 if (x < 0) else 1
+ ySignBit = 0 if (y < 0) else 1
+ xySignBits = xSignBit + 2 * ySignBit
+
+ if x == 0 and absY < 1280:
+ flags.append(onCurveBit + ((absY & 0xF00) >> 7) + ySignBit)
+ triplets.append(absY & 0xFF)
+ elif y == 0 and absX < 1280:
+ flags.append(onCurveBit + 10 + ((absX & 0xF00) >> 7) + xSignBit)
+ triplets.append(absX & 0xFF)
+ elif absX < 65 and absY < 65:
+ flags.append(
+ onCurveBit
+ + 20
+ + ((absX - 1) & 0x30)
+ + (((absY - 1) & 0x30) >> 2)
+ + xySignBits
+ )
+ triplets.append((((absX - 1) & 0xF) << 4) | ((absY - 1) & 0xF))
+ elif absX < 769 and absY < 769:
+ flags.append(
+ onCurveBit
+ + 84
+ + 12 * (((absX - 1) & 0x300) >> 8)
+ + (((absY - 1) & 0x300) >> 6)
+ + xySignBits
+ )
+ triplets.append((absX - 1) & 0xFF)
+ triplets.append((absY - 1) & 0xFF)
+ elif absX < 4096 and absY < 4096:
+ flags.append(onCurveBit + 120 + xySignBits)
+ triplets.append(absX >> 4)
+ triplets.append(((absX & 0xF) << 4) | (absY >> 8))
+ triplets.append(absY & 0xFF)
+ else:
+ flags.append(onCurveBit + 124 + xySignBits)
+ triplets.append(absX >> 8)
+ triplets.append(absX & 0xFF)
+ triplets.append(absY >> 8)
+ triplets.append(absY & 0xFF)
+
+ self.flagStream += flags.tobytes()
+ self.glyphStream += triplets.tobytes()
class WOFF2HmtxTable(getTableClass("hmtx")):
-
- def __init__(self, tag=None):
- self.tableTag = Tag(tag or 'hmtx')
-
- def reconstruct(self, data, ttFont):
- flags, = struct.unpack(">B", data[:1])
- data = data[1:]
- if flags & 0b11111100 != 0:
- raise TTLibError("Bits 2-7 of '%s' flags are reserved" % self.tableTag)
-
- # When bit 0 is _not_ set, the lsb[] array is present
- hasLsbArray = flags & 1 == 0
- # When bit 1 is _not_ set, the leftSideBearing[] array is present
- hasLeftSideBearingArray = flags & 2 == 0
- if hasLsbArray and hasLeftSideBearingArray:
- raise TTLibError(
- "either bits 0 or 1 (or both) must set in transformed '%s' flags"
- % self.tableTag
- )
-
- glyfTable = ttFont["glyf"]
- headerTable = ttFont["hhea"]
- glyphOrder = glyfTable.glyphOrder
- numGlyphs = len(glyphOrder)
- numberOfHMetrics = min(int(headerTable.numberOfHMetrics), numGlyphs)
-
- assert len(data) >= 2 * numberOfHMetrics
- advanceWidthArray = array.array("H", data[:2 * numberOfHMetrics])
- if sys.byteorder != "big":
- advanceWidthArray.byteswap()
- data = data[2 * numberOfHMetrics:]
-
- if hasLsbArray:
- assert len(data) >= 2 * numberOfHMetrics
- lsbArray = array.array("h", data[:2 * numberOfHMetrics])
- if sys.byteorder != "big":
- lsbArray.byteswap()
- data = data[2 * numberOfHMetrics:]
- else:
- # compute (proportional) glyphs' lsb from their xMin
- lsbArray = array.array("h")
- for i, glyphName in enumerate(glyphOrder):
- if i >= numberOfHMetrics:
- break
- glyph = glyfTable[glyphName]
- xMin = getattr(glyph, "xMin", 0)
- lsbArray.append(xMin)
-
- numberOfSideBearings = numGlyphs - numberOfHMetrics
- if hasLeftSideBearingArray:
- assert len(data) >= 2 * numberOfSideBearings
- leftSideBearingArray = array.array("h", data[:2 * numberOfSideBearings])
- if sys.byteorder != "big":
- leftSideBearingArray.byteswap()
- data = data[2 * numberOfSideBearings:]
- else:
- # compute (monospaced) glyphs' leftSideBearing from their xMin
- leftSideBearingArray = array.array("h")
- for i, glyphName in enumerate(glyphOrder):
- if i < numberOfHMetrics:
- continue
- glyph = glyfTable[glyphName]
- xMin = getattr(glyph, "xMin", 0)
- leftSideBearingArray.append(xMin)
-
- if data:
- raise TTLibError("too much '%s' table data" % self.tableTag)
-
- self.metrics = {}
- for i in range(numberOfHMetrics):
- glyphName = glyphOrder[i]
- advanceWidth, lsb = advanceWidthArray[i], lsbArray[i]
- self.metrics[glyphName] = (advanceWidth, lsb)
- lastAdvance = advanceWidthArray[-1]
- for i in range(numberOfSideBearings):
- glyphName = glyphOrder[i + numberOfHMetrics]
- self.metrics[glyphName] = (lastAdvance, leftSideBearingArray[i])
-
- def transform(self, ttFont):
- glyphOrder = ttFont.getGlyphOrder()
- glyf = ttFont["glyf"]
- hhea = ttFont["hhea"]
- numberOfHMetrics = hhea.numberOfHMetrics
-
- # check if any of the proportional glyphs has left sidebearings that
- # differ from their xMin bounding box values.
- hasLsbArray = False
- for i in range(numberOfHMetrics):
- glyphName = glyphOrder[i]
- lsb = self.metrics[glyphName][1]
- if lsb != getattr(glyf[glyphName], "xMin", 0):
- hasLsbArray = True
- break
-
- # do the same for the monospaced glyphs (if any) at the end of hmtx table
- hasLeftSideBearingArray = False
- for i in range(numberOfHMetrics, len(glyphOrder)):
- glyphName = glyphOrder[i]
- lsb = self.metrics[glyphName][1]
- if lsb != getattr(glyf[glyphName], "xMin", 0):
- hasLeftSideBearingArray = True
- break
-
- # if we need to encode both sidebearings arrays, then no transformation is
- # applicable, and we must use the untransformed hmtx data
- if hasLsbArray and hasLeftSideBearingArray:
- return
-
- # set bit 0 and 1 when the respective arrays are _not_ present
- flags = 0
- if not hasLsbArray:
- flags |= 1 << 0
- if not hasLeftSideBearingArray:
- flags |= 1 << 1
-
- data = struct.pack(">B", flags)
-
- advanceWidthArray = array.array(
- "H",
- [
- self.metrics[glyphName][0]
- for i, glyphName in enumerate(glyphOrder)
- if i < numberOfHMetrics
- ]
- )
- if sys.byteorder != "big":
- advanceWidthArray.byteswap()
- data += advanceWidthArray.tobytes()
-
- if hasLsbArray:
- lsbArray = array.array(
- "h",
- [
- self.metrics[glyphName][1]
- for i, glyphName in enumerate(glyphOrder)
- if i < numberOfHMetrics
- ]
- )
- if sys.byteorder != "big":
- lsbArray.byteswap()
- data += lsbArray.tobytes()
-
- if hasLeftSideBearingArray:
- leftSideBearingArray = array.array(
- "h",
- [
- self.metrics[glyphOrder[i]][1]
- for i in range(numberOfHMetrics, len(glyphOrder))
- ]
- )
- if sys.byteorder != "big":
- leftSideBearingArray.byteswap()
- data += leftSideBearingArray.tobytes()
-
- return data
+ def __init__(self, tag=None):
+ self.tableTag = Tag(tag or "hmtx")
+
+ def reconstruct(self, data, ttFont):
+ (flags,) = struct.unpack(">B", data[:1])
+ data = data[1:]
+ if flags & 0b11111100 != 0:
+ raise TTLibError("Bits 2-7 of '%s' flags are reserved" % self.tableTag)
+
+ # When bit 0 is _not_ set, the lsb[] array is present
+ hasLsbArray = flags & 1 == 0
+ # When bit 1 is _not_ set, the leftSideBearing[] array is present
+ hasLeftSideBearingArray = flags & 2 == 0
+ if hasLsbArray and hasLeftSideBearingArray:
+ raise TTLibError(
+ "either bits 0 or 1 (or both) must set in transformed '%s' flags"
+ % self.tableTag
+ )
+
+ glyfTable = ttFont["glyf"]
+ headerTable = ttFont["hhea"]
+ glyphOrder = glyfTable.glyphOrder
+ numGlyphs = len(glyphOrder)
+ numberOfHMetrics = min(int(headerTable.numberOfHMetrics), numGlyphs)
+
+ assert len(data) >= 2 * numberOfHMetrics
+ advanceWidthArray = array.array("H", data[: 2 * numberOfHMetrics])
+ if sys.byteorder != "big":
+ advanceWidthArray.byteswap()
+ data = data[2 * numberOfHMetrics :]
+
+ if hasLsbArray:
+ assert len(data) >= 2 * numberOfHMetrics
+ lsbArray = array.array("h", data[: 2 * numberOfHMetrics])
+ if sys.byteorder != "big":
+ lsbArray.byteswap()
+ data = data[2 * numberOfHMetrics :]
+ else:
+ # compute (proportional) glyphs' lsb from their xMin
+ lsbArray = array.array("h")
+ for i, glyphName in enumerate(glyphOrder):
+ if i >= numberOfHMetrics:
+ break
+ glyph = glyfTable[glyphName]
+ xMin = getattr(glyph, "xMin", 0)
+ lsbArray.append(xMin)
+
+ numberOfSideBearings = numGlyphs - numberOfHMetrics
+ if hasLeftSideBearingArray:
+ assert len(data) >= 2 * numberOfSideBearings
+ leftSideBearingArray = array.array("h", data[: 2 * numberOfSideBearings])
+ if sys.byteorder != "big":
+ leftSideBearingArray.byteswap()
+ data = data[2 * numberOfSideBearings :]
+ else:
+ # compute (monospaced) glyphs' leftSideBearing from their xMin
+ leftSideBearingArray = array.array("h")
+ for i, glyphName in enumerate(glyphOrder):
+ if i < numberOfHMetrics:
+ continue
+ glyph = glyfTable[glyphName]
+ xMin = getattr(glyph, "xMin", 0)
+ leftSideBearingArray.append(xMin)
+
+ if data:
+ raise TTLibError("too much '%s' table data" % self.tableTag)
+
+ self.metrics = {}
+ for i in range(numberOfHMetrics):
+ glyphName = glyphOrder[i]
+ advanceWidth, lsb = advanceWidthArray[i], lsbArray[i]
+ self.metrics[glyphName] = (advanceWidth, lsb)
+ lastAdvance = advanceWidthArray[-1]
+ for i in range(numberOfSideBearings):
+ glyphName = glyphOrder[i + numberOfHMetrics]
+ self.metrics[glyphName] = (lastAdvance, leftSideBearingArray[i])
+
+ def transform(self, ttFont):
+ glyphOrder = ttFont.getGlyphOrder()
+ glyf = ttFont["glyf"]
+ hhea = ttFont["hhea"]
+ numberOfHMetrics = hhea.numberOfHMetrics
+
+ # check if any of the proportional glyphs has left sidebearings that
+ # differ from their xMin bounding box values.
+ hasLsbArray = False
+ for i in range(numberOfHMetrics):
+ glyphName = glyphOrder[i]
+ lsb = self.metrics[glyphName][1]
+ if lsb != getattr(glyf[glyphName], "xMin", 0):
+ hasLsbArray = True
+ break
+
+ # do the same for the monospaced glyphs (if any) at the end of hmtx table
+ hasLeftSideBearingArray = False
+ for i in range(numberOfHMetrics, len(glyphOrder)):
+ glyphName = glyphOrder[i]
+ lsb = self.metrics[glyphName][1]
+ if lsb != getattr(glyf[glyphName], "xMin", 0):
+ hasLeftSideBearingArray = True
+ break
+
+ # if we need to encode both sidebearings arrays, then no transformation is
+ # applicable, and we must use the untransformed hmtx data
+ if hasLsbArray and hasLeftSideBearingArray:
+ return
+
+ # set bit 0 and 1 when the respective arrays are _not_ present
+ flags = 0
+ if not hasLsbArray:
+ flags |= 1 << 0
+ if not hasLeftSideBearingArray:
+ flags |= 1 << 1
+
+ data = struct.pack(">B", flags)
+
+ advanceWidthArray = array.array(
+ "H",
+ [
+ self.metrics[glyphName][0]
+ for i, glyphName in enumerate(glyphOrder)
+ if i < numberOfHMetrics
+ ],
+ )
+ if sys.byteorder != "big":
+ advanceWidthArray.byteswap()
+ data += advanceWidthArray.tobytes()
+
+ if hasLsbArray:
+ lsbArray = array.array(
+ "h",
+ [
+ self.metrics[glyphName][1]
+ for i, glyphName in enumerate(glyphOrder)
+ if i < numberOfHMetrics
+ ],
+ )
+ if sys.byteorder != "big":
+ lsbArray.byteswap()
+ data += lsbArray.tobytes()
+
+ if hasLeftSideBearingArray:
+ leftSideBearingArray = array.array(
+ "h",
+ [
+ self.metrics[glyphOrder[i]][1]
+ for i in range(numberOfHMetrics, len(glyphOrder))
+ ],
+ )
+ if sys.byteorder != "big":
+ leftSideBearingArray.byteswap()
+ data += leftSideBearingArray.tobytes()
+
+ return data
class WOFF2FlavorData(WOFFFlavorData):
-
- Flavor = 'woff2'
-
- def __init__(self, reader=None, data=None, transformedTables=None):
- """Data class that holds the WOFF2 header major/minor version, any
- metadata or private data (as bytes strings), and the set of
- table tags that have transformations applied (if reader is not None),
- or will have once the WOFF2 font is compiled.
-
- Args:
- reader: an SFNTReader (or subclass) object to read flavor data from.
- data: another WOFFFlavorData object to initialise data from.
- transformedTables: set of strings containing table tags to be transformed.
-
- Raises:
- ImportError if the brotli module is not installed.
-
- NOTE: The 'reader' argument, on the one hand, and the 'data' and
- 'transformedTables' arguments, on the other hand, are mutually exclusive.
- """
- if not haveBrotli:
- raise ImportError("No module named brotli")
-
- if reader is not None:
- if data is not None:
- raise TypeError(
- "'reader' and 'data' arguments are mutually exclusive"
- )
- if transformedTables is not None:
- raise TypeError(
- "'reader' and 'transformedTables' arguments are mutually exclusive"
- )
-
- if transformedTables is not None and (
- "glyf" in transformedTables and "loca" not in transformedTables
- or "loca" in transformedTables and "glyf" not in transformedTables
- ):
- raise ValueError(
- "'glyf' and 'loca' must be transformed (or not) together"
- )
- super(WOFF2FlavorData, self).__init__(reader=reader)
- if reader:
- transformedTables = [
- tag
- for tag, entry in reader.tables.items()
- if entry.transformed
- ]
- elif data:
- self.majorVersion = data.majorVersion
- self.majorVersion = data.minorVersion
- self.metaData = data.metaData
- self.privData = data.privData
- if transformedTables is None and hasattr(data, "transformedTables"):
- transformedTables = data.transformedTables
-
- if transformedTables is None:
- transformedTables = woff2TransformedTableTags
-
- self.transformedTables = set(transformedTables)
-
- def _decompress(self, rawData):
- return brotli.decompress(rawData)
+ Flavor = "woff2"
+
+ def __init__(self, reader=None, data=None, transformedTables=None):
+ """Data class that holds the WOFF2 header major/minor version, any
+ metadata or private data (as bytes strings), and the set of
+ table tags that have transformations applied (if reader is not None),
+ or will have once the WOFF2 font is compiled.
+
+ Args:
+ reader: an SFNTReader (or subclass) object to read flavor data from.
+ data: another WOFFFlavorData object to initialise data from.
+ transformedTables: set of strings containing table tags to be transformed.
+
+ Raises:
+ ImportError if the brotli module is not installed.
+
+ NOTE: The 'reader' argument, on the one hand, and the 'data' and
+ 'transformedTables' arguments, on the other hand, are mutually exclusive.
+ """
+ if not haveBrotli:
+ raise ImportError("No module named brotli")
+
+ if reader is not None:
+ if data is not None:
+ raise TypeError("'reader' and 'data' arguments are mutually exclusive")
+ if transformedTables is not None:
+ raise TypeError(
+ "'reader' and 'transformedTables' arguments are mutually exclusive"
+ )
+
+ if transformedTables is not None and (
+ "glyf" in transformedTables
+ and "loca" not in transformedTables
+ or "loca" in transformedTables
+ and "glyf" not in transformedTables
+ ):
+ raise ValueError("'glyf' and 'loca' must be transformed (or not) together")
+ super(WOFF2FlavorData, self).__init__(reader=reader)
+ if reader:
+ transformedTables = [
+ tag for tag, entry in reader.tables.items() if entry.transformed
+ ]
+ elif data:
+ self.majorVersion = data.majorVersion
+ self.majorVersion = data.minorVersion
+ self.metaData = data.metaData
+ self.privData = data.privData
+ if transformedTables is None and hasattr(data, "transformedTables"):
+ transformedTables = data.transformedTables
+
+ if transformedTables is None:
+ transformedTables = woff2TransformedTableTags
+
+ self.transformedTables = set(transformedTables)
+
+ def _decompress(self, rawData):
+ return brotli.decompress(rawData)
def unpackBase128(data):
- r""" Read one to five bytes from UIntBase128-encoded input string, and return
- a tuple containing the decoded integer plus any leftover data.
-
- >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00")
- True
- >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295
- True
- >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- TTLibError: UIntBase128 value must not start with leading zeros
- >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- TTLibError: UIntBase128-encoded sequence is longer than 5 bytes
- >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- TTLibError: UIntBase128 value exceeds 2**32-1
- """
- if len(data) == 0:
- raise TTLibError('not enough data to unpack UIntBase128')
- result = 0
- if byteord(data[0]) == 0x80:
- # font must be rejected if UIntBase128 value starts with 0x80
- raise TTLibError('UIntBase128 value must not start with leading zeros')
- for i in range(woff2Base128MaxSize):
- if len(data) == 0:
- raise TTLibError('not enough data to unpack UIntBase128')
- code = byteord(data[0])
- data = data[1:]
- # if any of the top seven bits are set then we're about to overflow
- if result & 0xFE000000:
- raise TTLibError('UIntBase128 value exceeds 2**32-1')
- # set current value = old value times 128 bitwise-or (byte bitwise-and 127)
- result = (result << 7) | (code & 0x7f)
- # repeat until the most significant bit of byte is false
- if (code & 0x80) == 0:
- # return result plus left over data
- return result, data
- # make sure not to exceed the size bound
- raise TTLibError('UIntBase128-encoded sequence is longer than 5 bytes')
+ r"""Read one to five bytes from UIntBase128-encoded input string, and return
+ a tuple containing the decoded integer plus any leftover data.
+
+ >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00")
+ True
+ >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295
+ True
+ >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ TTLibError: UIntBase128 value must not start with leading zeros
+ >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ TTLibError: UIntBase128-encoded sequence is longer than 5 bytes
+ >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ TTLibError: UIntBase128 value exceeds 2**32-1
+ """
+ if len(data) == 0:
+ raise TTLibError("not enough data to unpack UIntBase128")
+ result = 0
+ if byteord(data[0]) == 0x80:
+ # font must be rejected if UIntBase128 value starts with 0x80
+ raise TTLibError("UIntBase128 value must not start with leading zeros")
+ for i in range(woff2Base128MaxSize):
+ if len(data) == 0:
+ raise TTLibError("not enough data to unpack UIntBase128")
+ code = byteord(data[0])
+ data = data[1:]
+ # if any of the top seven bits are set then we're about to overflow
+ if result & 0xFE000000:
+ raise TTLibError("UIntBase128 value exceeds 2**32-1")
+ # set current value = old value times 128 bitwise-or (byte bitwise-and 127)
+ result = (result << 7) | (code & 0x7F)
+ # repeat until the most significant bit of byte is false
+ if (code & 0x80) == 0:
+ # return result plus left over data
+ return result, data
+ # make sure not to exceed the size bound
+ raise TTLibError("UIntBase128-encoded sequence is longer than 5 bytes")
def base128Size(n):
- """ Return the length in bytes of a UIntBase128-encoded sequence with value n.
-
- >>> base128Size(0)
- 1
- >>> base128Size(24567)
- 3
- >>> base128Size(2**32-1)
- 5
- """
- assert n >= 0
- size = 1
- while n >= 128:
- size += 1
- n >>= 7
- return size
+ """Return the length in bytes of a UIntBase128-encoded sequence with value n.
+
+ >>> base128Size(0)
+ 1
+ >>> base128Size(24567)
+ 3
+ >>> base128Size(2**32-1)
+ 5
+ """
+ assert n >= 0
+ size = 1
+ while n >= 128:
+ size += 1
+ n >>= 7
+ return size
def packBase128(n):
- r""" Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of
- bytes using UIntBase128 variable-length encoding. Produce the shortest possible
- encoding.
-
- >>> packBase128(63) == b"\x3f"
- True
- >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f'
- True
- """
- if n < 0 or n >= 2**32:
- raise TTLibError(
- "UIntBase128 format requires 0 <= integer <= 2**32-1")
- data = b''
- size = base128Size(n)
- for i in range(size):
- b = (n >> (7 * (size - i - 1))) & 0x7f
- if i < size - 1:
- b |= 0x80
- data += struct.pack('B', b)
- return data
+ r"""Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of
+ bytes using UIntBase128 variable-length encoding. Produce the shortest possible
+ encoding.
+
+ >>> packBase128(63) == b"\x3f"
+ True
+ >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f'
+ True
+ """
+ if n < 0 or n >= 2**32:
+ raise TTLibError("UIntBase128 format requires 0 <= integer <= 2**32-1")
+ data = b""
+ size = base128Size(n)
+ for i in range(size):
+ b = (n >> (7 * (size - i - 1))) & 0x7F
+ if i < size - 1:
+ b |= 0x80
+ data += struct.pack("B", b)
+ return data
def unpack255UShort(data):
- """ Read one to three bytes from 255UInt16-encoded input string, and return a
- tuple containing the decoded integer plus any leftover data.
-
- >>> unpack255UShort(bytechr(252))[0]
- 252
-
- Note that some numbers (e.g. 506) can have multiple encodings:
- >>> unpack255UShort(struct.pack("BB", 254, 0))[0]
- 506
- >>> unpack255UShort(struct.pack("BB", 255, 253))[0]
- 506
- >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0]
- 506
- """
- code = byteord(data[:1])
- data = data[1:]
- if code == 253:
- # read two more bytes as an unsigned short
- if len(data) < 2:
- raise TTLibError('not enough data to unpack 255UInt16')
- result, = struct.unpack(">H", data[:2])
- data = data[2:]
- elif code == 254:
- # read another byte, plus 253 * 2
- if len(data) == 0:
- raise TTLibError('not enough data to unpack 255UInt16')
- result = byteord(data[:1])
- result += 506
- data = data[1:]
- elif code == 255:
- # read another byte, plus 253
- if len(data) == 0:
- raise TTLibError('not enough data to unpack 255UInt16')
- result = byteord(data[:1])
- result += 253
- data = data[1:]
- else:
- # leave as is if lower than 253
- result = code
- # return result plus left over data
- return result, data
+ """Read one to three bytes from 255UInt16-encoded input string, and return a
+ tuple containing the decoded integer plus any leftover data.
+
+ >>> unpack255UShort(bytechr(252))[0]
+ 252
+
+ Note that some numbers (e.g. 506) can have multiple encodings:
+ >>> unpack255UShort(struct.pack("BB", 254, 0))[0]
+ 506
+ >>> unpack255UShort(struct.pack("BB", 255, 253))[0]
+ 506
+ >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0]
+ 506
+ """
+ code = byteord(data[:1])
+ data = data[1:]
+ if code == 253:
+ # read two more bytes as an unsigned short
+ if len(data) < 2:
+ raise TTLibError("not enough data to unpack 255UInt16")
+ (result,) = struct.unpack(">H", data[:2])
+ data = data[2:]
+ elif code == 254:
+ # read another byte, plus 253 * 2
+ if len(data) == 0:
+ raise TTLibError("not enough data to unpack 255UInt16")
+ result = byteord(data[:1])
+ result += 506
+ data = data[1:]
+ elif code == 255:
+ # read another byte, plus 253
+ if len(data) == 0:
+ raise TTLibError("not enough data to unpack 255UInt16")
+ result = byteord(data[:1])
+ result += 253
+ data = data[1:]
+ else:
+ # leave as is if lower than 253
+ result = code
+ # return result plus left over data
+ return result, data
def pack255UShort(value):
- r""" Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring
- using 255UInt16 variable-length encoding.
-
- >>> pack255UShort(252) == b'\xfc'
- True
- >>> pack255UShort(506) == b'\xfe\x00'
- True
- >>> pack255UShort(762) == b'\xfd\x02\xfa'
- True
- """
- if value < 0 or value > 0xFFFF:
- raise TTLibError(
- "255UInt16 format requires 0 <= integer <= 65535")
- if value < 253:
- return struct.pack(">B", value)
- elif value < 506:
- return struct.pack(">BB", 255, value - 253)
- elif value < 762:
- return struct.pack(">BB", 254, value - 506)
- else:
- return struct.pack(">BH", 253, value)
+ r"""Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring
+ using 255UInt16 variable-length encoding.
+
+ >>> pack255UShort(252) == b'\xfc'
+ True
+ >>> pack255UShort(506) == b'\xfe\x00'
+ True
+ >>> pack255UShort(762) == b'\xfd\x02\xfa'
+ True
+ """
+ if value < 0 or value > 0xFFFF:
+ raise TTLibError("255UInt16 format requires 0 <= integer <= 65535")
+ if value < 253:
+ return struct.pack(">B", value)
+ elif value < 506:
+ return struct.pack(">BB", 255, value - 253)
+ elif value < 762:
+ return struct.pack(">BB", 254, value - 506)
+ else:
+ return struct.pack(">BH", 253, value)
def compress(input_file, output_file, transform_tables=None):
- """Compress OpenType font to WOFF2.
+ """Compress OpenType font to WOFF2.
- Args:
- input_file: a file path, file or file-like object (open in binary mode)
- containing an OpenType font (either CFF- or TrueType-flavored).
- output_file: a file path, file or file-like object where to save the
- compressed WOFF2 font.
- transform_tables: Optional[Iterable[str]]: a set of table tags for which
- to enable preprocessing transformations. By default, only 'glyf'
- and 'loca' tables are transformed. An empty set means disable all
- transformations.
- """
- log.info("Processing %s => %s" % (input_file, output_file))
+ Args:
+ input_file: a file path, file or file-like object (open in binary mode)
+ containing an OpenType font (either CFF- or TrueType-flavored).
+ output_file: a file path, file or file-like object where to save the
+ compressed WOFF2 font.
+ transform_tables: Optional[Iterable[str]]: a set of table tags for which
+ to enable preprocessing transformations. By default, only 'glyf'
+ and 'loca' tables are transformed. An empty set means disable all
+ transformations.
+ """
+ log.info("Processing %s => %s" % (input_file, output_file))
- font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
- font.flavor = "woff2"
+ font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
+ font.flavor = "woff2"
- if transform_tables is not None:
- font.flavorData = WOFF2FlavorData(
- data=font.flavorData, transformedTables=transform_tables
- )
+ if transform_tables is not None:
+ font.flavorData = WOFF2FlavorData(
+ data=font.flavorData, transformedTables=transform_tables
+ )
- font.save(output_file, reorderTables=False)
+ font.save(output_file, reorderTables=False)
def decompress(input_file, output_file):
- """Decompress WOFF2 font to OpenType font.
+ """Decompress WOFF2 font to OpenType font.
- Args:
- input_file: a file path, file or file-like object (open in binary mode)
- containing a compressed WOFF2 font.
- output_file: a file path, file or file-like object where to save the
- decompressed OpenType font.
- """
- log.info("Processing %s => %s" % (input_file, output_file))
+ Args:
+ input_file: a file path, file or file-like object (open in binary mode)
+ containing a compressed WOFF2 font.
+ output_file: a file path, file or file-like object where to save the
+ decompressed OpenType font.
+ """
+ log.info("Processing %s => %s" % (input_file, output_file))
- font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
- font.flavor = None
- font.flavorData = None
- font.save(output_file, reorderTables=True)
+ font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
+ font.flavor = None
+ font.flavorData = None
+ font.save(output_file, reorderTables=True)
def main(args=None):
- """Compress and decompress WOFF2 fonts"""
- import argparse
- from fontTools import configLogger
- from fontTools.ttx import makeOutputFileName
-
- class _HelpAction(argparse._HelpAction):
-
- def __call__(self, parser, namespace, values, option_string=None):
- subparsers_actions = [
- action for action in parser._actions
- if isinstance(action, argparse._SubParsersAction)]
- for subparsers_action in subparsers_actions:
- for choice, subparser in subparsers_action.choices.items():
- print(subparser.format_help())
- parser.exit()
-
- class _NoGlyfTransformAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string=None):
- namespace.transform_tables.difference_update({"glyf", "loca"})
-
- class _HmtxTransformAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string=None):
- namespace.transform_tables.add("hmtx")
-
- parser = argparse.ArgumentParser(
- prog="fonttools ttLib.woff2",
- description=main.__doc__,
- add_help = False
- )
-
- parser.add_argument('-h', '--help', action=_HelpAction,
- help='show this help message and exit')
-
- parser_group = parser.add_subparsers(title="sub-commands")
- parser_compress = parser_group.add_parser("compress",
- description = "Compress a TTF or OTF font to WOFF2")
- parser_decompress = parser_group.add_parser("decompress",
- description = "Decompress a WOFF2 font to OTF")
-
- for subparser in (parser_compress, parser_decompress):
- group = subparser.add_mutually_exclusive_group(required=False)
- group.add_argument(
- "-v",
- "--verbose",
- action="store_true",
- help="print more messages to console",
- )
- group.add_argument(
- "-q",
- "--quiet",
- action="store_true",
- help="do not print messages to console",
- )
-
- parser_compress.add_argument(
- "input_file",
- metavar="INPUT",
- help="the input OpenType font (.ttf or .otf)",
- )
- parser_decompress.add_argument(
- "input_file",
- metavar="INPUT",
- help="the input WOFF2 font",
- )
-
- parser_compress.add_argument(
- "-o",
- "--output-file",
- metavar="OUTPUT",
- help="the output WOFF2 font",
- )
- parser_decompress.add_argument(
- "-o",
- "--output-file",
- metavar="OUTPUT",
- help="the output OpenType font",
- )
-
- transform_group = parser_compress.add_argument_group()
- transform_group.add_argument(
- "--no-glyf-transform",
- dest="transform_tables",
- nargs=0,
- action=_NoGlyfTransformAction,
- help="Do not transform glyf (and loca) tables",
- )
- transform_group.add_argument(
- "--hmtx-transform",
- dest="transform_tables",
- nargs=0,
- action=_HmtxTransformAction,
- help="Enable optional transformation for 'hmtx' table",
- )
-
- parser_compress.set_defaults(
- subcommand=compress,
- transform_tables={"glyf", "loca"},
- )
- parser_decompress.set_defaults(subcommand=decompress)
-
- options = vars(parser.parse_args(args))
-
- subcommand = options.pop("subcommand", None)
- if not subcommand:
- parser.print_help()
- return
-
- quiet = options.pop("quiet")
- verbose = options.pop("verbose")
- configLogger(
- level=("ERROR" if quiet else "DEBUG" if verbose else "INFO"),
- )
-
- if not options["output_file"]:
- if subcommand is compress:
- extension = ".woff2"
- elif subcommand is decompress:
- # choose .ttf/.otf file extension depending on sfntVersion
- with open(options["input_file"], "rb") as f:
- f.seek(4) # skip 'wOF2' signature
- sfntVersion = f.read(4)
- assert len(sfntVersion) == 4, "not enough data"
- extension = ".otf" if sfntVersion == b"OTTO" else ".ttf"
- else:
- raise AssertionError(subcommand)
- options["output_file"] = makeOutputFileName(
- options["input_file"], outputDir=None, extension=extension
- )
-
- try:
- subcommand(**options)
- except TTLibError as e:
- parser.error(e)
+ """Compress and decompress WOFF2 fonts"""
+ import argparse
+ from fontTools import configLogger
+ from fontTools.ttx import makeOutputFileName
+
+ class _HelpAction(argparse._HelpAction):
+ def __call__(self, parser, namespace, values, option_string=None):
+ subparsers_actions = [
+ action
+ for action in parser._actions
+ if isinstance(action, argparse._SubParsersAction)
+ ]
+ for subparsers_action in subparsers_actions:
+ for choice, subparser in subparsers_action.choices.items():
+ print(subparser.format_help())
+ parser.exit()
+
+ class _NoGlyfTransformAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ namespace.transform_tables.difference_update({"glyf", "loca"})
+
+ class _HmtxTransformAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ namespace.transform_tables.add("hmtx")
+
+ parser = argparse.ArgumentParser(
+ prog="fonttools ttLib.woff2", description=main.__doc__, add_help=False
+ )
+
+ parser.add_argument(
+ "-h", "--help", action=_HelpAction, help="show this help message and exit"
+ )
+
+ parser_group = parser.add_subparsers(title="sub-commands")
+ parser_compress = parser_group.add_parser(
+ "compress", description="Compress a TTF or OTF font to WOFF2"
+ )
+ parser_decompress = parser_group.add_parser(
+ "decompress", description="Decompress a WOFF2 font to OTF"
+ )
+
+ for subparser in (parser_compress, parser_decompress):
+ group = subparser.add_mutually_exclusive_group(required=False)
+ group.add_argument(
+ "-v",
+ "--verbose",
+ action="store_true",
+ help="print more messages to console",
+ )
+ group.add_argument(
+ "-q",
+ "--quiet",
+ action="store_true",
+ help="do not print messages to console",
+ )
+
+ parser_compress.add_argument(
+ "input_file",
+ metavar="INPUT",
+ help="the input OpenType font (.ttf or .otf)",
+ )
+ parser_decompress.add_argument(
+ "input_file",
+ metavar="INPUT",
+ help="the input WOFF2 font",
+ )
+
+ parser_compress.add_argument(
+ "-o",
+ "--output-file",
+ metavar="OUTPUT",
+ help="the output WOFF2 font",
+ )
+ parser_decompress.add_argument(
+ "-o",
+ "--output-file",
+ metavar="OUTPUT",
+ help="the output OpenType font",
+ )
+
+ transform_group = parser_compress.add_argument_group()
+ transform_group.add_argument(
+ "--no-glyf-transform",
+ dest="transform_tables",
+ nargs=0,
+ action=_NoGlyfTransformAction,
+ help="Do not transform glyf (and loca) tables",
+ )
+ transform_group.add_argument(
+ "--hmtx-transform",
+ dest="transform_tables",
+ nargs=0,
+ action=_HmtxTransformAction,
+ help="Enable optional transformation for 'hmtx' table",
+ )
+
+ parser_compress.set_defaults(
+ subcommand=compress,
+ transform_tables={"glyf", "loca"},
+ )
+ parser_decompress.set_defaults(subcommand=decompress)
+
+ options = vars(parser.parse_args(args))
+
+ subcommand = options.pop("subcommand", None)
+ if not subcommand:
+ parser.print_help()
+ return
+
+ quiet = options.pop("quiet")
+ verbose = options.pop("verbose")
+ configLogger(
+ level=("ERROR" if quiet else "DEBUG" if verbose else "INFO"),
+ )
+
+ if not options["output_file"]:
+ if subcommand is compress:
+ extension = ".woff2"
+ elif subcommand is decompress:
+ # choose .ttf/.otf file extension depending on sfntVersion
+ with open(options["input_file"], "rb") as f:
+ f.seek(4) # skip 'wOF2' signature
+ sfntVersion = f.read(4)
+ assert len(sfntVersion) == 4, "not enough data"
+ extension = ".otf" if sfntVersion == b"OTTO" else ".ttf"
+ else:
+ raise AssertionError(subcommand)
+ options["output_file"] = makeOutputFileName(
+ options["input_file"], outputDir=None, extension=extension
+ )
+
+ try:
+ subcommand(**options)
+ except TTLibError as e:
+ parser.error(e)
if __name__ == "__main__":
- sys.exit(main())
+ sys.exit(main())
diff --git a/Lib/fontTools/ttx.py b/Lib/fontTools/ttx.py
index 3f06c58b..d8c2a3a7 100644
--- a/Lib/fontTools/ttx.py
+++ b/Lib/fontTools/ttx.py
@@ -5,8 +5,9 @@ TTX -- From OpenType To XML And Back
If an input file is a TrueType or OpenType font file, it will be
decompiled to a TTX file (an XML-based text format).
-If an input file is a TTX file, it will be compiled to whatever
+If an input file is a TTX file, it will be compiled to whatever
format the data is in, a TrueType or OpenType/CFF font file.
+A special input value of - means read from the standard input.
Output files are created so they are unique: an existing file is
never overwritten.
@@ -119,302 +120,349 @@ import logging
log = logging.getLogger("fontTools.ttx")
-opentypeheaderRE = re.compile('''sfntVersion=['"]OTTO["']''')
+opentypeheaderRE = re.compile("""sfntVersion=['"]OTTO["']""")
class Options(object):
-
- listTables = False
- outputDir = None
- outputFile = None
- overWrite = False
- verbose = False
- quiet = False
- splitTables = False
- splitGlyphs = False
- disassembleInstructions = True
- mergeFile = None
- recalcBBoxes = True
- ignoreDecompileErrors = True
- bitmapGlyphDataFormat = 'raw'
- unicodedata = None
- newlinestr = "\n"
- recalcTimestamp = None
- flavor = None
- useZopfli = False
-
- def __init__(self, rawOptions, numFiles):
- self.onlyTables = []
- self.skipTables = []
- self.fontNumber = -1
- for option, value in rawOptions:
- # general options
- if option == "-h":
- print(__doc__)
- sys.exit(0)
- elif option == "--version":
- from fontTools import version
- print(version)
- sys.exit(0)
- elif option == "-d":
- if not os.path.isdir(value):
- raise getopt.GetoptError("The -d option value must be an existing directory")
- self.outputDir = value
- elif option == "-o":
- self.outputFile = value
- elif option == "-f":
- self.overWrite = True
- elif option == "-v":
- self.verbose = True
- elif option == "-q":
- self.quiet = True
- # dump options
- elif option == "-l":
- self.listTables = True
- elif option == "-t":
- # pad with space if table tag length is less than 4
- value = value.ljust(4)
- self.onlyTables.append(value)
- elif option == "-x":
- # pad with space if table tag length is less than 4
- value = value.ljust(4)
- self.skipTables.append(value)
- elif option == "-s":
- self.splitTables = True
- elif option == "-g":
- # -g implies (and forces) splitTables
- self.splitGlyphs = True
- self.splitTables = True
- elif option == "-i":
- self.disassembleInstructions = False
- elif option == "-z":
- validOptions = ('raw', 'row', 'bitwise', 'extfile')
- if value not in validOptions:
- raise getopt.GetoptError(
- "-z does not allow %s as a format. Use %s" % (option, validOptions))
- self.bitmapGlyphDataFormat = value
- elif option == "-y":
- self.fontNumber = int(value)
- # compile options
- elif option == "-m":
- self.mergeFile = value
- elif option == "-b":
- self.recalcBBoxes = False
- elif option == "-e":
- self.ignoreDecompileErrors = False
- elif option == "--unicodedata":
- self.unicodedata = value
- elif option == "--newline":
- validOptions = ('LF', 'CR', 'CRLF')
- if value == "LF":
- self.newlinestr = "\n"
- elif value == "CR":
- self.newlinestr = "\r"
- elif value == "CRLF":
- self.newlinestr = "\r\n"
- else:
- raise getopt.GetoptError(
- "Invalid choice for --newline: %r (choose from %s)"
- % (value, ", ".join(map(repr, validOptions))))
- elif option == "--recalc-timestamp":
- self.recalcTimestamp = True
- elif option == "--no-recalc-timestamp":
- self.recalcTimestamp = False
- elif option == "--flavor":
- self.flavor = value
- elif option == "--with-zopfli":
- self.useZopfli = True
- if self.verbose and self.quiet:
- raise getopt.GetoptError("-q and -v options are mutually exclusive")
- if self.verbose:
- self.logLevel = logging.DEBUG
- elif self.quiet:
- self.logLevel = logging.WARNING
- else:
- self.logLevel = logging.INFO
- if self.mergeFile and self.flavor:
- raise getopt.GetoptError("-m and --flavor options are mutually exclusive")
- if self.onlyTables and self.skipTables:
- raise getopt.GetoptError("-t and -x options are mutually exclusive")
- if self.mergeFile and numFiles > 1:
- raise getopt.GetoptError("Must specify exactly one TTX source file when using -m")
- if self.flavor != 'woff' and self.useZopfli:
- raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'")
+ listTables = False
+ outputDir = None
+ outputFile = None
+ overWrite = False
+ verbose = False
+ quiet = False
+ splitTables = False
+ splitGlyphs = False
+ disassembleInstructions = True
+ mergeFile = None
+ recalcBBoxes = True
+ ignoreDecompileErrors = True
+ bitmapGlyphDataFormat = "raw"
+ unicodedata = None
+ newlinestr = "\n"
+ recalcTimestamp = None
+ flavor = None
+ useZopfli = False
+
+ def __init__(self, rawOptions, numFiles):
+ self.onlyTables = []
+ self.skipTables = []
+ self.fontNumber = -1
+ for option, value in rawOptions:
+ # general options
+ if option == "-h":
+ print(__doc__)
+ sys.exit(0)
+ elif option == "--version":
+ from fontTools import version
+
+ print(version)
+ sys.exit(0)
+ elif option == "-d":
+ if not os.path.isdir(value):
+ raise getopt.GetoptError(
+ "The -d option value must be an existing directory"
+ )
+ self.outputDir = value
+ elif option == "-o":
+ self.outputFile = value
+ elif option == "-f":
+ self.overWrite = True
+ elif option == "-v":
+ self.verbose = True
+ elif option == "-q":
+ self.quiet = True
+ # dump options
+ elif option == "-l":
+ self.listTables = True
+ elif option == "-t":
+ # pad with space if table tag length is less than 4
+ value = value.ljust(4)
+ self.onlyTables.append(value)
+ elif option == "-x":
+ # pad with space if table tag length is less than 4
+ value = value.ljust(4)
+ self.skipTables.append(value)
+ elif option == "-s":
+ self.splitTables = True
+ elif option == "-g":
+ # -g implies (and forces) splitTables
+ self.splitGlyphs = True
+ self.splitTables = True
+ elif option == "-i":
+ self.disassembleInstructions = False
+ elif option == "-z":
+ validOptions = ("raw", "row", "bitwise", "extfile")
+ if value not in validOptions:
+ raise getopt.GetoptError(
+ "-z does not allow %s as a format. Use %s"
+ % (option, validOptions)
+ )
+ self.bitmapGlyphDataFormat = value
+ elif option == "-y":
+ self.fontNumber = int(value)
+ # compile options
+ elif option == "-m":
+ self.mergeFile = value
+ elif option == "-b":
+ self.recalcBBoxes = False
+ elif option == "-e":
+ self.ignoreDecompileErrors = False
+ elif option == "--unicodedata":
+ self.unicodedata = value
+ elif option == "--newline":
+ validOptions = ("LF", "CR", "CRLF")
+ if value == "LF":
+ self.newlinestr = "\n"
+ elif value == "CR":
+ self.newlinestr = "\r"
+ elif value == "CRLF":
+ self.newlinestr = "\r\n"
+ else:
+ raise getopt.GetoptError(
+ "Invalid choice for --newline: %r (choose from %s)"
+ % (value, ", ".join(map(repr, validOptions)))
+ )
+ elif option == "--recalc-timestamp":
+ self.recalcTimestamp = True
+ elif option == "--no-recalc-timestamp":
+ self.recalcTimestamp = False
+ elif option == "--flavor":
+ self.flavor = value
+ elif option == "--with-zopfli":
+ self.useZopfli = True
+ if self.verbose and self.quiet:
+ raise getopt.GetoptError("-q and -v options are mutually exclusive")
+ if self.verbose:
+ self.logLevel = logging.DEBUG
+ elif self.quiet:
+ self.logLevel = logging.WARNING
+ else:
+ self.logLevel = logging.INFO
+ if self.mergeFile and self.flavor:
+ raise getopt.GetoptError("-m and --flavor options are mutually exclusive")
+ if self.onlyTables and self.skipTables:
+ raise getopt.GetoptError("-t and -x options are mutually exclusive")
+ if self.mergeFile and numFiles > 1:
+ raise getopt.GetoptError(
+ "Must specify exactly one TTX source file when using -m"
+ )
+ if self.flavor != "woff" and self.useZopfli:
+ raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'")
def ttList(input, output, options):
- ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True)
- reader = ttf.reader
- tags = sorted(reader.keys())
- print('Listing table info for "%s":' % input)
- format = " %4s %10s %8s %8s"
- print(format % ("tag ", " checksum", " length", " offset"))
- print(format % ("----", "----------", "--------", "--------"))
- for tag in tags:
- entry = reader.tables[tag]
- if ttf.flavor == "woff2":
- # WOFF2 doesn't store table checksums, so they must be calculated
- from fontTools.ttLib.sfnt import calcChecksum
- data = entry.loadData(reader.transformBuffer)
- checkSum = calcChecksum(data)
- else:
- checkSum = int(entry.checkSum)
- if checkSum < 0:
- checkSum = checkSum + 0x100000000
- checksum = "0x%08X" % checkSum
- print(format % (tag, checksum, entry.length, entry.offset))
- print()
- ttf.close()
-
-
-@Timer(log, 'Done dumping TTX in %(time).3f seconds')
+ ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True)
+ reader = ttf.reader
+ tags = sorted(reader.keys())
+ print('Listing table info for "%s":' % input)
+ format = " %4s %10s %8s %8s"
+ print(format % ("tag ", " checksum", " length", " offset"))
+ print(format % ("----", "----------", "--------", "--------"))
+ for tag in tags:
+ entry = reader.tables[tag]
+ if ttf.flavor == "woff2":
+ # WOFF2 doesn't store table checksums, so they must be calculated
+ from fontTools.ttLib.sfnt import calcChecksum
+
+ data = entry.loadData(reader.transformBuffer)
+ checkSum = calcChecksum(data)
+ else:
+ checkSum = int(entry.checkSum)
+ if checkSum < 0:
+ checkSum = checkSum + 0x100000000
+ checksum = "0x%08X" % checkSum
+ print(format % (tag, checksum, entry.length, entry.offset))
+ print()
+ ttf.close()
+
+
+@Timer(log, "Done dumping TTX in %(time).3f seconds")
def ttDump(input, output, options):
- log.info('Dumping "%s" to "%s"...', input, output)
- if options.unicodedata:
- setUnicodeData(options.unicodedata)
- ttf = TTFont(input, 0,
- ignoreDecompileErrors=options.ignoreDecompileErrors,
- fontNumber=options.fontNumber)
- ttf.saveXML(output,
- tables=options.onlyTables,
- skipTables=options.skipTables,
- splitTables=options.splitTables,
- splitGlyphs=options.splitGlyphs,
- disassembleInstructions=options.disassembleInstructions,
- bitmapGlyphDataFormat=options.bitmapGlyphDataFormat,
- newlinestr=options.newlinestr)
- ttf.close()
-
-
-@Timer(log, 'Done compiling TTX in %(time).3f seconds')
+ input_name = input
+ if input == "-":
+ input, input_name = sys.stdin.buffer, sys.stdin.name
+ output_name = output
+ if output == "-":
+ output, output_name = sys.stdout, sys.stdout.name
+ log.info('Dumping "%s" to "%s"...', input_name, output_name)
+ if options.unicodedata:
+ setUnicodeData(options.unicodedata)
+ ttf = TTFont(
+ input,
+ 0,
+ ignoreDecompileErrors=options.ignoreDecompileErrors,
+ fontNumber=options.fontNumber,
+ )
+ ttf.saveXML(
+ output,
+ tables=options.onlyTables,
+ skipTables=options.skipTables,
+ splitTables=options.splitTables,
+ splitGlyphs=options.splitGlyphs,
+ disassembleInstructions=options.disassembleInstructions,
+ bitmapGlyphDataFormat=options.bitmapGlyphDataFormat,
+ newlinestr=options.newlinestr,
+ )
+ ttf.close()
+
+
+@Timer(log, "Done compiling TTX in %(time).3f seconds")
def ttCompile(input, output, options):
- log.info('Compiling "%s" to "%s"...' % (input, output))
- if options.useZopfli:
- from fontTools.ttLib import sfnt
- sfnt.USE_ZOPFLI = True
- ttf = TTFont(options.mergeFile, flavor=options.flavor,
- recalcBBoxes=options.recalcBBoxes,
- recalcTimestamp=options.recalcTimestamp)
- ttf.importXML(input)
-
- if options.recalcTimestamp is None and 'head' in ttf:
- # use TTX file modification time for head "modified" timestamp
- mtime = os.path.getmtime(input)
- ttf['head'].modified = timestampSinceEpoch(mtime)
-
- ttf.save(output)
+ input_name = input
+ if input == "-":
+ input, input_name = sys.stdin, sys.stdin.name
+ output_name = output
+ if output == "-":
+ output, output_name = sys.stdout.buffer, sys.stdout.name
+ log.info('Compiling "%s" to "%s"...' % (input_name, output))
+ if options.useZopfli:
+ from fontTools.ttLib import sfnt
+
+ sfnt.USE_ZOPFLI = True
+ ttf = TTFont(
+ options.mergeFile,
+ flavor=options.flavor,
+ recalcBBoxes=options.recalcBBoxes,
+ recalcTimestamp=options.recalcTimestamp,
+ )
+ ttf.importXML(input)
+
+ if options.recalcTimestamp is None and "head" in ttf and input is not sys.stdin:
+ # use TTX file modification time for head "modified" timestamp
+ mtime = os.path.getmtime(input)
+ ttf["head"].modified = timestampSinceEpoch(mtime)
+
+ ttf.save(output)
def guessFileType(fileName):
- base, ext = os.path.splitext(fileName)
- try:
- with open(fileName, "rb") as f:
- header = f.read(256)
- except IOError:
- return None
-
- if header.startswith(b'\xef\xbb\xbf<?xml'):
- header = header.lstrip(b'\xef\xbb\xbf')
- cr, tp = getMacCreatorAndType(fileName)
- if tp in ("sfnt", "FFIL"):
- return "TTF"
- if ext == ".dfont":
- return "TTF"
- head = Tag(header[:4])
- if head == "OTTO":
- return "OTF"
- elif head == "ttcf":
- return "TTC"
- elif head in ("\0\1\0\0", "true"):
- return "TTF"
- elif head == "wOFF":
- return "WOFF"
- elif head == "wOF2":
- return "WOFF2"
- elif head == "<?xm":
- # Use 'latin1' because that can't fail.
- header = tostr(header, 'latin1')
- if opentypeheaderRE.search(header):
- return "OTX"
- else:
- return "TTX"
- return None
+ if fileName == "-":
+ header = sys.stdin.buffer.peek(256)
+ ext = ""
+ else:
+ base, ext = os.path.splitext(fileName)
+ try:
+ with open(fileName, "rb") as f:
+ header = f.read(256)
+ except IOError:
+ return None
+
+ if header.startswith(b"\xef\xbb\xbf<?xml"):
+ header = header.lstrip(b"\xef\xbb\xbf")
+ cr, tp = getMacCreatorAndType(fileName)
+ if tp in ("sfnt", "FFIL"):
+ return "TTF"
+ if ext == ".dfont":
+ return "TTF"
+ head = Tag(header[:4])
+ if head == "OTTO":
+ return "OTF"
+ elif head == "ttcf":
+ return "TTC"
+ elif head in ("\0\1\0\0", "true"):
+ return "TTF"
+ elif head == "wOFF":
+ return "WOFF"
+ elif head == "wOF2":
+ return "WOFF2"
+ elif head == "<?xm":
+ # Use 'latin1' because that can't fail.
+ header = tostr(header, "latin1")
+ if opentypeheaderRE.search(header):
+ return "OTX"
+ else:
+ return "TTX"
+ return None
def parseOptions(args):
- rawOptions, files = getopt.getopt(args, "ld:o:fvqht:x:sgim:z:baey:",
- ['unicodedata=', "recalc-timestamp", "no-recalc-timestamp",
- 'flavor=', 'version', 'with-zopfli', 'newline='])
-
- options = Options(rawOptions, len(files))
- jobs = []
-
- if not files:
- raise getopt.GetoptError('Must specify at least one input file')
-
- for input in files:
- if not os.path.isfile(input):
- raise getopt.GetoptError('File not found: "%s"' % input)
- tp = guessFileType(input)
- if tp in ("OTF", "TTF", "TTC", "WOFF", "WOFF2"):
- extension = ".ttx"
- if options.listTables:
- action = ttList
- else:
- action = ttDump
- elif tp == "TTX":
- extension = "."+options.flavor if options.flavor else ".ttf"
- action = ttCompile
- elif tp == "OTX":
- extension = "."+options.flavor if options.flavor else ".otf"
- action = ttCompile
- else:
- raise getopt.GetoptError('Unknown file type: "%s"' % input)
-
- if options.outputFile:
- output = options.outputFile
- else:
- output = makeOutputFileName(input, options.outputDir, extension, options.overWrite)
- # 'touch' output file to avoid race condition in choosing file names
- if action != ttList:
- open(output, 'a').close()
- jobs.append((action, input, output))
- return jobs, options
+ rawOptions, files = getopt.getopt(
+ args,
+ "ld:o:fvqht:x:sgim:z:baey:",
+ [
+ "unicodedata=",
+ "recalc-timestamp",
+ "no-recalc-timestamp",
+ "flavor=",
+ "version",
+ "with-zopfli",
+ "newline=",
+ ],
+ )
+
+ options = Options(rawOptions, len(files))
+ jobs = []
+
+ if not files:
+ raise getopt.GetoptError("Must specify at least one input file")
+
+ for input in files:
+ if input != "-" and not os.path.isfile(input):
+ raise getopt.GetoptError('File not found: "%s"' % input)
+ tp = guessFileType(input)
+ if tp in ("OTF", "TTF", "TTC", "WOFF", "WOFF2"):
+ extension = ".ttx"
+ if options.listTables:
+ action = ttList
+ else:
+ action = ttDump
+ elif tp == "TTX":
+ extension = "." + options.flavor if options.flavor else ".ttf"
+ action = ttCompile
+ elif tp == "OTX":
+ extension = "." + options.flavor if options.flavor else ".otf"
+ action = ttCompile
+ else:
+ raise getopt.GetoptError('Unknown file type: "%s"' % input)
+
+ if options.outputFile:
+ output = options.outputFile
+ else:
+ if input == "-":
+ raise getopt.GetoptError("Must provide -o when reading from stdin")
+ output = makeOutputFileName(
+ input, options.outputDir, extension, options.overWrite
+ )
+ # 'touch' output file to avoid race condition in choosing file names
+ if action != ttList:
+ open(output, "a").close()
+ jobs.append((action, input, output))
+ return jobs, options
def process(jobs, options):
- for action, input, output in jobs:
- action(input, output, options)
+ for action, input, output in jobs:
+ action(input, output, options)
def main(args=None):
- """Convert OpenType fonts to XML and back"""
- from fontTools import configLogger
-
- if args is None:
- args = sys.argv[1:]
- try:
- jobs, options = parseOptions(args)
- except getopt.GetoptError as e:
- print("%s\nERROR: %s" % (__doc__, e), file=sys.stderr)
- sys.exit(2)
-
- configLogger(level=options.logLevel)
-
- try:
- process(jobs, options)
- except KeyboardInterrupt:
- log.error("(Cancelled.)")
- sys.exit(1)
- except SystemExit:
- raise
- except TTLibError as e:
- log.error(e)
- sys.exit(1)
- except:
- log.exception('Unhandled exception has occurred')
- sys.exit(1)
+ """Convert OpenType fonts to XML and back"""
+ from fontTools import configLogger
+
+ if args is None:
+ args = sys.argv[1:]
+ try:
+ jobs, options = parseOptions(args)
+ except getopt.GetoptError as e:
+ print("%s\nERROR: %s" % (__doc__, e), file=sys.stderr)
+ sys.exit(2)
+
+ configLogger(level=options.logLevel)
+
+ try:
+ process(jobs, options)
+ except KeyboardInterrupt:
+ log.error("(Cancelled.)")
+ sys.exit(1)
+ except SystemExit:
+ raise
+ except TTLibError as e:
+ log.error(e)
+ sys.exit(1)
+ except:
+ log.exception("Unhandled exception has occurred")
+ sys.exit(1)
if __name__ == "__main__":
- sys.exit(main())
+ sys.exit(main())
diff --git a/Lib/fontTools/ufoLib/__init__.py b/Lib/fontTools/ufoLib/__init__.py
index fa6cb117..1a456a20 100755
--- a/Lib/fontTools/ufoLib/__init__.py
+++ b/Lib/fontTools/ufoLib/__init__.py
@@ -52,20 +52,20 @@ fontinfo.plist values between the possible format versions.
"""
__all__ = [
- "makeUFOPath",
- "UFOLibError",
- "UFOReader",
- "UFOWriter",
- "UFOReaderWriter",
- "UFOFileStructure",
- "fontInfoAttributesVersion1",
- "fontInfoAttributesVersion2",
- "fontInfoAttributesVersion3",
- "deprecatedFontInfoAttributesVersion2",
- "validateFontInfoVersion2ValueForAttribute",
- "validateFontInfoVersion3ValueForAttribute",
- "convertFontInfoValueForAttributeFromVersion1ToVersion2",
- "convertFontInfoValueForAttributeFromVersion2ToVersion1"
+ "makeUFOPath",
+ "UFOLibError",
+ "UFOReader",
+ "UFOWriter",
+ "UFOReaderWriter",
+ "UFOFileStructure",
+ "fontInfoAttributesVersion1",
+ "fontInfoAttributesVersion2",
+ "fontInfoAttributesVersion3",
+ "deprecatedFontInfoAttributesVersion2",
+ "validateFontInfoVersion2ValueForAttribute",
+ "validateFontInfoVersion3ValueForAttribute",
+ "convertFontInfoValueForAttributeFromVersion1ToVersion2",
+ "convertFontInfoValueForAttributeFromVersion2ToVersion1",
]
__version__ = "3.0.0"
@@ -94,9 +94,10 @@ DEFAULT_LAYER_NAME = "public.default"
class UFOFormatVersion(tuple, _VersionTupleEnumMixin, enum.Enum):
- FORMAT_1_0 = (1, 0)
- FORMAT_2_0 = (2, 0)
- FORMAT_3_0 = (3, 0)
+ FORMAT_1_0 = (1, 0)
+ FORMAT_2_0 = (2, 0)
+ FORMAT_3_0 = (3, 0)
+
# python 3.11 doesn't like when a mixin overrides a dunder method like __str__
# for some reasons it keep using Enum.__str__, see
@@ -105,8 +106,8 @@ UFOFormatVersion.__str__ = _VersionTupleEnumMixin.__str__
class UFOFileStructure(enum.Enum):
- ZIP = "zip"
- PACKAGE = "package"
+ ZIP = "zip"
+ PACKAGE = "package"
# --------------
@@ -115,1578 +116,1611 @@ class UFOFileStructure(enum.Enum):
class _UFOBaseIO:
-
- def getFileModificationTime(self, path):
- """
- Returns the modification time for the file at the given path, as a
- floating point number giving the number of seconds since the epoch.
- The path must be relative to the UFO path.
- Returns None if the file does not exist.
- """
- try:
- dt = self.fs.getinfo(fsdecode(path), namespaces=["details"]).modified
- except (fs.errors.MissingInfoNamespace, fs.errors.ResourceNotFound):
- return None
- else:
- return dt.timestamp()
-
- def _getPlist(self, fileName, default=None):
- """
- Read a property list relative to the UFO filesystem's root.
- Raises UFOLibError if the file is missing and default is None,
- otherwise default is returned.
-
- The errors that could be raised during the reading of a plist are
- unpredictable and/or too large to list, so, a blind try: except:
- is done. If an exception occurs, a UFOLibError will be raised.
- """
- try:
- with self.fs.open(fileName, "rb") as f:
- return plistlib.load(f)
- except fs.errors.ResourceNotFound:
- if default is None:
- raise UFOLibError(
- "'%s' is missing on %s. This file is required"
- % (fileName, self.fs)
- )
- else:
- return default
- except Exception as e:
- # TODO(anthrotype): try to narrow this down a little
- raise UFOLibError(
- f"'{fileName}' could not be read on {self.fs}: {e}"
- )
-
- def _writePlist(self, fileName, obj):
- """
- Write a property list to a file relative to the UFO filesystem's root.
-
- Do this sort of atomically, making it harder to corrupt existing files,
- for example when plistlib encounters an error halfway during write.
- This also checks to see if text matches the text that is already in the
- file at path. If so, the file is not rewritten so that the modification
- date is preserved.
-
- The errors that could be raised during the writing of a plist are
- unpredictable and/or too large to list, so, a blind try: except: is done.
- If an exception occurs, a UFOLibError will be raised.
- """
- if self._havePreviousFile:
- try:
- data = plistlib.dumps(obj)
- except Exception as e:
- raise UFOLibError(
- "'%s' could not be written on %s because "
- "the data is not properly formatted: %s"
- % (fileName, self.fs, e)
- )
- if self.fs.exists(fileName) and data == self.fs.readbytes(fileName):
- return
- self.fs.writebytes(fileName, data)
- else:
- with self.fs.openbin(fileName, mode="w") as fp:
- try:
- plistlib.dump(obj, fp)
- except Exception as e:
- raise UFOLibError(
- "'%s' could not be written on %s because "
- "the data is not properly formatted: %s"
- % (fileName, self.fs, e)
- )
+ def getFileModificationTime(self, path):
+ """
+ Returns the modification time for the file at the given path, as a
+ floating point number giving the number of seconds since the epoch.
+ The path must be relative to the UFO path.
+ Returns None if the file does not exist.
+ """
+ try:
+ dt = self.fs.getinfo(fsdecode(path), namespaces=["details"]).modified
+ except (fs.errors.MissingInfoNamespace, fs.errors.ResourceNotFound):
+ return None
+ else:
+ return dt.timestamp()
+
+ def _getPlist(self, fileName, default=None):
+ """
+ Read a property list relative to the UFO filesystem's root.
+ Raises UFOLibError if the file is missing and default is None,
+ otherwise default is returned.
+
+ The errors that could be raised during the reading of a plist are
+ unpredictable and/or too large to list, so, a blind try: except:
+ is done. If an exception occurs, a UFOLibError will be raised.
+ """
+ try:
+ with self.fs.open(fileName, "rb") as f:
+ return plistlib.load(f)
+ except fs.errors.ResourceNotFound:
+ if default is None:
+ raise UFOLibError(
+ "'%s' is missing on %s. This file is required" % (fileName, self.fs)
+ )
+ else:
+ return default
+ except Exception as e:
+ # TODO(anthrotype): try to narrow this down a little
+ raise UFOLibError(f"'{fileName}' could not be read on {self.fs}: {e}")
+
+ def _writePlist(self, fileName, obj):
+ """
+ Write a property list to a file relative to the UFO filesystem's root.
+
+ Do this sort of atomically, making it harder to corrupt existing files,
+ for example when plistlib encounters an error halfway during write.
+ This also checks to see if text matches the text that is already in the
+ file at path. If so, the file is not rewritten so that the modification
+ date is preserved.
+
+ The errors that could be raised during the writing of a plist are
+ unpredictable and/or too large to list, so, a blind try: except: is done.
+ If an exception occurs, a UFOLibError will be raised.
+ """
+ if self._havePreviousFile:
+ try:
+ data = plistlib.dumps(obj)
+ except Exception as e:
+ raise UFOLibError(
+ "'%s' could not be written on %s because "
+ "the data is not properly formatted: %s" % (fileName, self.fs, e)
+ )
+ if self.fs.exists(fileName) and data == self.fs.readbytes(fileName):
+ return
+ self.fs.writebytes(fileName, data)
+ else:
+ with self.fs.openbin(fileName, mode="w") as fp:
+ try:
+ plistlib.dump(obj, fp)
+ except Exception as e:
+ raise UFOLibError(
+ "'%s' could not be written on %s because "
+ "the data is not properly formatted: %s"
+ % (fileName, self.fs, e)
+ )
# ----------
# UFO Reader
# ----------
+
class UFOReader(_UFOBaseIO):
- """
- Read the various components of the .ufo.
-
- By default read data is validated. Set ``validate`` to
- ``False`` to not validate the data.
- """
-
- def __init__(self, path, validate=True):
- if hasattr(path, "__fspath__"): # support os.PathLike objects
- path = path.__fspath__()
-
- if isinstance(path, str):
- structure = _sniffFileStructure(path)
- try:
- if structure is UFOFileStructure.ZIP:
- parentFS = fs.zipfs.ZipFS(path, write=False, encoding="utf-8")
- else:
- parentFS = fs.osfs.OSFS(path)
- except fs.errors.CreateFailed as e:
- raise UFOLibError(f"unable to open '{path}': {e}")
-
- if structure is UFOFileStructure.ZIP:
- # .ufoz zip files must contain a single root directory, with arbitrary
- # name, containing all the UFO files
- rootDirs = [
- p.name for p in parentFS.scandir("/")
- # exclude macOS metadata contained in zip file
- if p.is_dir and p.name != "__MACOSX"
- ]
- if len(rootDirs) == 1:
- # 'ClosingSubFS' ensures that the parent zip file is closed when
- # its root subdirectory is closed
- self.fs = parentFS.opendir(
- rootDirs[0], factory=fs.subfs.ClosingSubFS
- )
- else:
- raise UFOLibError(
- "Expected exactly 1 root directory, found %d" % len(rootDirs)
- )
- else:
- # normal UFO 'packages' are just a single folder
- self.fs = parentFS
- # when passed a path string, we make sure we close the newly opened fs
- # upon calling UFOReader.close method or context manager's __exit__
- self._shouldClose = True
- self._fileStructure = structure
- elif isinstance(path, fs.base.FS):
- filesystem = path
- try:
- filesystem.check()
- except fs.errors.FilesystemClosed:
- raise UFOLibError("the filesystem '%s' is closed" % path)
- else:
- self.fs = filesystem
- try:
- path = filesystem.getsyspath("/")
- except fs.errors.NoSysPath:
- # network or in-memory FS may not map to the local one
- path = str(filesystem)
- # when user passed an already initialized fs instance, it is her
- # responsibility to close it, thus UFOReader.close/__exit__ are no-op
- self._shouldClose = False
- # default to a 'package' structure
- self._fileStructure = UFOFileStructure.PACKAGE
- else:
- raise TypeError(
- "Expected a path string or fs.base.FS object, found '%s'"
- % type(path).__name__
- )
- self._path = fsdecode(path)
- self._validate = validate
- self._upConvertedKerningData = None
-
- try:
- self.readMetaInfo(validate=validate)
- except UFOLibError:
- self.close()
- raise
-
- # properties
-
- def _get_path(self):
- import warnings
-
- warnings.warn(
- "The 'path' attribute is deprecated; use the 'fs' attribute instead",
- DeprecationWarning,
- stacklevel=2,
- )
- return self._path
-
- path = property(_get_path, doc="The path of the UFO (DEPRECATED).")
-
- def _get_formatVersion(self):
- import warnings
-
- warnings.warn(
- "The 'formatVersion' attribute is deprecated; use the 'formatVersionTuple'",
- DeprecationWarning,
- stacklevel=2,
- )
- return self._formatVersion.major
-
- formatVersion = property(
- _get_formatVersion,
- doc="The (major) format version of the UFO. DEPRECATED: Use formatVersionTuple"
- )
-
- @property
- def formatVersionTuple(self):
- """The (major, minor) format version of the UFO.
- This is determined by reading metainfo.plist during __init__.
- """
- return self._formatVersion
-
- def _get_fileStructure(self):
- return self._fileStructure
-
- fileStructure = property(
- _get_fileStructure,
- doc=(
- "The file structure of the UFO: "
- "either UFOFileStructure.ZIP or UFOFileStructure.PACKAGE"
- )
- )
-
- # up conversion
-
- def _upConvertKerning(self, validate):
- """
- Up convert kerning and groups in UFO 1 and 2.
- The data will be held internally until each bit of data
- has been retrieved. The conversion of both must be done
- at once, so the raw data is cached and an error is raised
- if one bit of data becomes obsolete before it is called.
-
- ``validate`` will validate the data.
- """
- if self._upConvertedKerningData:
- testKerning = self._readKerning()
- if testKerning != self._upConvertedKerningData["originalKerning"]:
- raise UFOLibError("The data in kerning.plist has been modified since it was converted to UFO 3 format.")
- testGroups = self._readGroups()
- if testGroups != self._upConvertedKerningData["originalGroups"]:
- raise UFOLibError("The data in groups.plist has been modified since it was converted to UFO 3 format.")
- else:
- groups = self._readGroups()
- if validate:
- invalidFormatMessage = "groups.plist is not properly formatted."
- if not isinstance(groups, dict):
- raise UFOLibError(invalidFormatMessage)
- for groupName, glyphList in groups.items():
- if not isinstance(groupName, str):
- raise UFOLibError(invalidFormatMessage)
- elif not isinstance(glyphList, list):
- raise UFOLibError(invalidFormatMessage)
- for glyphName in glyphList:
- if not isinstance(glyphName, str):
- raise UFOLibError(invalidFormatMessage)
- self._upConvertedKerningData = dict(
- kerning={},
- originalKerning=self._readKerning(),
- groups={},
- originalGroups=groups
- )
- # convert kerning and groups
- kerning, groups, conversionMaps = convertUFO1OrUFO2KerningToUFO3Kerning(
- self._upConvertedKerningData["originalKerning"],
- deepcopy(self._upConvertedKerningData["originalGroups"]),
- self.getGlyphSet()
- )
- # store
- self._upConvertedKerningData["kerning"] = kerning
- self._upConvertedKerningData["groups"] = groups
- self._upConvertedKerningData["groupRenameMaps"] = conversionMaps
-
- # support methods
-
- def readBytesFromPath(self, path):
- """
- Returns the bytes in the file at the given path.
- The path must be relative to the UFO's filesystem root.
- Returns None if the file does not exist.
- """
- try:
- return self.fs.readbytes(fsdecode(path))
- except fs.errors.ResourceNotFound:
- return None
-
- def getReadFileForPath(self, path, encoding=None):
- """
- Returns a file (or file-like) object for the file at the given path.
- The path must be relative to the UFO path.
- Returns None if the file does not exist.
- By default the file is opened in binary mode (reads bytes).
- If encoding is passed, the file is opened in text mode (reads str).
-
- Note: The caller is responsible for closing the open file.
- """
- path = fsdecode(path)
- try:
- if encoding is None:
- return self.fs.openbin(path)
- else:
- return self.fs.open(path, mode="r", encoding=encoding)
- except fs.errors.ResourceNotFound:
- return None
- # metainfo.plist
-
- def _readMetaInfo(self, validate=None):
- """
- Read metainfo.plist and return raw data. Only used for internal operations.
-
- ``validate`` will validate the read data, by default it is set
- to the class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- data = self._getPlist(METAINFO_FILENAME)
- if validate and not isinstance(data, dict):
- raise UFOLibError("metainfo.plist is not properly formatted.")
- try:
- formatVersionMajor = data["formatVersion"]
- except KeyError:
- raise UFOLibError(
- f"Missing required formatVersion in '{METAINFO_FILENAME}' on {self.fs}"
- )
- formatVersionMinor = data.setdefault("formatVersionMinor", 0)
-
- try:
- formatVersion = UFOFormatVersion((formatVersionMajor, formatVersionMinor))
- except ValueError as e:
- unsupportedMsg = (
- f"Unsupported UFO format ({formatVersionMajor}.{formatVersionMinor}) "
- f"in '{METAINFO_FILENAME}' on {self.fs}"
- )
- if validate:
- from fontTools.ufoLib.errors import UnsupportedUFOFormat
-
- raise UnsupportedUFOFormat(unsupportedMsg) from e
-
- formatVersion = UFOFormatVersion.default()
- logger.warning(
- "%s. Assuming the latest supported version (%s). "
- "Some data may be skipped or parsed incorrectly",
- unsupportedMsg, formatVersion
- )
- data["formatVersionTuple"] = formatVersion
- return data
-
- def readMetaInfo(self, validate=None):
- """
- Read metainfo.plist and set formatVersion. Only used for internal operations.
-
- ``validate`` will validate the read data, by default it is set
- to the class's validate value, can be overridden.
- """
- data = self._readMetaInfo(validate=validate)
- self._formatVersion = data["formatVersionTuple"]
-
- # groups.plist
-
- def _readGroups(self):
- groups = self._getPlist(GROUPS_FILENAME, {})
- # remove any duplicate glyphs in a kerning group
- for groupName, glyphList in groups.items():
- if groupName.startswith(('public.kern1.', 'public.kern2.')):
- groups[groupName] = list(OrderedDict.fromkeys(glyphList))
- return groups
-
- def readGroups(self, validate=None):
- """
- Read groups.plist. Returns a dict.
- ``validate`` will validate the read data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- # handle up conversion
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- self._upConvertKerning(validate)
- groups = self._upConvertedKerningData["groups"]
- # normal
- else:
- groups = self._readGroups()
- if validate:
- valid, message = groupsValidator(groups)
- if not valid:
- raise UFOLibError(message)
- return groups
-
- def getKerningGroupConversionRenameMaps(self, validate=None):
- """
- Get maps defining the renaming that was done during any
- needed kerning group conversion. This method returns a
- dictionary of this form::
-
- {
- "side1" : {"old group name" : "new group name"},
- "side2" : {"old group name" : "new group name"}
- }
-
- When no conversion has been performed, the side1 and side2
- dictionaries will be empty.
-
- ``validate`` will validate the groups, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion >= UFOFormatVersion.FORMAT_3_0:
- return dict(side1={}, side2={})
- # use the public group reader to force the load and
- # conversion of the data if it hasn't happened yet.
- self.readGroups(validate=validate)
- return self._upConvertedKerningData["groupRenameMaps"]
-
- # fontinfo.plist
-
- def _readInfo(self, validate):
- data = self._getPlist(FONTINFO_FILENAME, {})
- if validate and not isinstance(data, dict):
- raise UFOLibError("fontinfo.plist is not properly formatted.")
- return data
-
- def readInfo(self, info, validate=None):
- """
- Read fontinfo.plist. It requires an object that allows
- setting attributes with names that follow the fontinfo.plist
- version 3 specification. This will write the attributes
- defined in the file into the object.
-
- ``validate`` will validate the read data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- infoDict = self._readInfo(validate)
- infoDataToSet = {}
- # version 1
- if self._formatVersion == UFOFormatVersion.FORMAT_1_0:
- for attr in fontInfoAttributesVersion1:
- value = infoDict.get(attr)
- if value is not None:
- infoDataToSet[attr] = value
- infoDataToSet = _convertFontInfoDataVersion1ToVersion2(infoDataToSet)
- infoDataToSet = _convertFontInfoDataVersion2ToVersion3(infoDataToSet)
- # version 2
- elif self._formatVersion == UFOFormatVersion.FORMAT_2_0:
- for attr, dataValidationDict in list(fontInfoAttributesVersion2ValueData.items()):
- value = infoDict.get(attr)
- if value is None:
- continue
- infoDataToSet[attr] = value
- infoDataToSet = _convertFontInfoDataVersion2ToVersion3(infoDataToSet)
- # version 3.x
- elif self._formatVersion.major == UFOFormatVersion.FORMAT_3_0.major:
- for attr, dataValidationDict in list(fontInfoAttributesVersion3ValueData.items()):
- value = infoDict.get(attr)
- if value is None:
- continue
- infoDataToSet[attr] = value
- # unsupported version
- else:
- raise NotImplementedError(self._formatVersion)
- # validate data
- if validate:
- infoDataToSet = validateInfoVersion3Data(infoDataToSet)
- # populate the object
- for attr, value in list(infoDataToSet.items()):
- try:
- setattr(info, attr, value)
- except AttributeError:
- raise UFOLibError("The supplied info object does not support setting a necessary attribute (%s)." % attr)
-
- # kerning.plist
-
- def _readKerning(self):
- data = self._getPlist(KERNING_FILENAME, {})
- return data
-
- def readKerning(self, validate=None):
- """
- Read kerning.plist. Returns a dict.
-
- ``validate`` will validate the kerning data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- # handle up conversion
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- self._upConvertKerning(validate)
- kerningNested = self._upConvertedKerningData["kerning"]
- # normal
- else:
- kerningNested = self._readKerning()
- if validate:
- valid, message = kerningValidator(kerningNested)
- if not valid:
- raise UFOLibError(message)
- # flatten
- kerning = {}
- for left in kerningNested:
- for right in kerningNested[left]:
- value = kerningNested[left][right]
- kerning[left, right] = value
- return kerning
-
- # lib.plist
-
- def readLib(self, validate=None):
- """
- Read lib.plist. Returns a dict.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- data = self._getPlist(LIB_FILENAME, {})
- if validate:
- valid, message = fontLibValidator(data)
- if not valid:
- raise UFOLibError(message)
- return data
-
- # features.fea
-
- def readFeatures(self):
- """
- Read features.fea. Return a string.
- The returned string is empty if the file is missing.
- """
- try:
- with self.fs.open(FEATURES_FILENAME, "r", encoding="utf-8") as f:
- return f.read()
- except fs.errors.ResourceNotFound:
- return ""
-
- # glyph sets & layers
-
- def _readLayerContents(self, validate):
- """
- Rebuild the layer contents list by checking what glyphsets
- are available on disk.
-
- ``validate`` will validate the layer contents.
- """
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- return [(DEFAULT_LAYER_NAME, DEFAULT_GLYPHS_DIRNAME)]
- contents = self._getPlist(LAYERCONTENTS_FILENAME)
- if validate:
- valid, error = layerContentsValidator(contents, self.fs)
- if not valid:
- raise UFOLibError(error)
- return contents
-
- def getLayerNames(self, validate=None):
- """
- Get the ordered layer names from layercontents.plist.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- layerContents = self._readLayerContents(validate)
- layerNames = [layerName for layerName, directoryName in layerContents]
- return layerNames
-
- def getDefaultLayerName(self, validate=None):
- """
- Get the default layer name from layercontents.plist.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- layerContents = self._readLayerContents(validate)
- for layerName, layerDirectory in layerContents:
- if layerDirectory == DEFAULT_GLYPHS_DIRNAME:
- return layerName
- # this will already have been raised during __init__
- raise UFOLibError("The default layer is not defined in layercontents.plist.")
-
- def getGlyphSet(self, layerName=None, validateRead=None, validateWrite=None):
- """
- Return the GlyphSet associated with the
- glyphs directory mapped to layerName
- in the UFO. If layerName is not provided,
- the name retrieved with getDefaultLayerName
- will be used.
-
- ``validateRead`` will validate the read data, by default it is set to the
- class's validate value, can be overridden.
- ``validateWrite`` will validate the written data, by default it is set to the
- class's validate value, can be overridden.
- """
- from fontTools.ufoLib.glifLib import GlyphSet
-
- if validateRead is None:
- validateRead = self._validate
- if validateWrite is None:
- validateWrite = self._validate
- if layerName is None:
- layerName = self.getDefaultLayerName(validate=validateRead)
- directory = None
- layerContents = self._readLayerContents(validateRead)
- for storedLayerName, storedLayerDirectory in layerContents:
- if layerName == storedLayerName:
- directory = storedLayerDirectory
- break
- if directory is None:
- raise UFOLibError("No glyphs directory is mapped to \"%s\"." % layerName)
- try:
- glyphSubFS = self.fs.opendir(directory)
- except fs.errors.ResourceNotFound:
- raise UFOLibError(
- f"No '{directory}' directory for layer '{layerName}'"
- )
- return GlyphSet(
- glyphSubFS,
- ufoFormatVersion=self._formatVersion,
- validateRead=validateRead,
- validateWrite=validateWrite,
- expectContentsFile=True
- )
-
- def getCharacterMapping(self, layerName=None, validate=None):
- """
- Return a dictionary that maps unicode values (ints) to
- lists of glyph names.
- """
- if validate is None:
- validate = self._validate
- glyphSet = self.getGlyphSet(layerName, validateRead=validate, validateWrite=True)
- allUnicodes = glyphSet.getUnicodes()
- cmap = {}
- for glyphName, unicodes in allUnicodes.items():
- for code in unicodes:
- if code in cmap:
- cmap[code].append(glyphName)
- else:
- cmap[code] = [glyphName]
- return cmap
-
- # /data
-
- def getDataDirectoryListing(self):
- """
- Returns a list of all files in the data directory.
- The returned paths will be relative to the UFO.
- This will not list directory names, only file names.
- Thus, empty directories will be skipped.
- """
- try:
- self._dataFS = self.fs.opendir(DATA_DIRNAME)
- except fs.errors.ResourceNotFound:
- return []
- except fs.errors.DirectoryExpected:
- raise UFOLibError("The UFO contains a \"data\" file instead of a directory.")
- try:
- # fs Walker.files method returns "absolute" paths (in terms of the
- # root of the 'data' SubFS), so we strip the leading '/' to make
- # them relative
- return [
- p.lstrip("/") for p in self._dataFS.walk.files()
- ]
- except fs.errors.ResourceError:
- return []
-
- def getImageDirectoryListing(self, validate=None):
- """
- Returns a list of all image file names in
- the images directory. Each of the images will
- have been verified to have the PNG signature.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- return []
- if validate is None:
- validate = self._validate
- try:
- self._imagesFS = imagesFS = self.fs.opendir(IMAGES_DIRNAME)
- except fs.errors.ResourceNotFound:
- return []
- except fs.errors.DirectoryExpected:
- raise UFOLibError("The UFO contains an \"images\" file instead of a directory.")
- result = []
- for path in imagesFS.scandir("/"):
- if path.is_dir:
- # silently skip this as version control
- # systems often have hidden directories
- continue
- if validate:
- with imagesFS.openbin(path.name) as fp:
- valid, error = pngValidator(fileObj=fp)
- if valid:
- result.append(path.name)
- else:
- result.append(path.name)
- return result
-
- def readData(self, fileName):
- """
- Return bytes for the file named 'fileName' inside the 'data/' directory.
- """
- fileName = fsdecode(fileName)
- try:
- try:
- dataFS = self._dataFS
- except AttributeError:
- # in case readData is called before getDataDirectoryListing
- dataFS = self.fs.opendir(DATA_DIRNAME)
- data = dataFS.readbytes(fileName)
- except fs.errors.ResourceNotFound:
- raise UFOLibError(f"No data file named '{fileName}' on {self.fs}")
- return data
-
- def readImage(self, fileName, validate=None):
- """
- Return image data for the file named fileName.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- raise UFOLibError(
- f"Reading images is not allowed in UFO {self._formatVersion.major}."
- )
- fileName = fsdecode(fileName)
- try:
- try:
- imagesFS = self._imagesFS
- except AttributeError:
- # in case readImage is called before getImageDirectoryListing
- imagesFS = self.fs.opendir(IMAGES_DIRNAME)
- data = imagesFS.readbytes(fileName)
- except fs.errors.ResourceNotFound:
- raise UFOLibError(f"No image file named '{fileName}' on {self.fs}")
- if validate:
- valid, error = pngValidator(data=data)
- if not valid:
- raise UFOLibError(error)
- return data
-
- def close(self):
- if self._shouldClose:
- self.fs.close()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, exc_tb):
- self.close()
+ """
+ Read the various components of the .ufo.
+
+ By default read data is validated. Set ``validate`` to
+ ``False`` to not validate the data.
+ """
+
+ def __init__(self, path, validate=True):
+ if hasattr(path, "__fspath__"): # support os.PathLike objects
+ path = path.__fspath__()
+
+ if isinstance(path, str):
+ structure = _sniffFileStructure(path)
+ try:
+ if structure is UFOFileStructure.ZIP:
+ parentFS = fs.zipfs.ZipFS(path, write=False, encoding="utf-8")
+ else:
+ parentFS = fs.osfs.OSFS(path)
+ except fs.errors.CreateFailed as e:
+ raise UFOLibError(f"unable to open '{path}': {e}")
+
+ if structure is UFOFileStructure.ZIP:
+ # .ufoz zip files must contain a single root directory, with arbitrary
+ # name, containing all the UFO files
+ rootDirs = [
+ p.name
+ for p in parentFS.scandir("/")
+ # exclude macOS metadata contained in zip file
+ if p.is_dir and p.name != "__MACOSX"
+ ]
+ if len(rootDirs) == 1:
+ # 'ClosingSubFS' ensures that the parent zip file is closed when
+ # its root subdirectory is closed
+ self.fs = parentFS.opendir(
+ rootDirs[0], factory=fs.subfs.ClosingSubFS
+ )
+ else:
+ raise UFOLibError(
+ "Expected exactly 1 root directory, found %d" % len(rootDirs)
+ )
+ else:
+ # normal UFO 'packages' are just a single folder
+ self.fs = parentFS
+ # when passed a path string, we make sure we close the newly opened fs
+ # upon calling UFOReader.close method or context manager's __exit__
+ self._shouldClose = True
+ self._fileStructure = structure
+ elif isinstance(path, fs.base.FS):
+ filesystem = path
+ try:
+ filesystem.check()
+ except fs.errors.FilesystemClosed:
+ raise UFOLibError("the filesystem '%s' is closed" % path)
+ else:
+ self.fs = filesystem
+ try:
+ path = filesystem.getsyspath("/")
+ except fs.errors.NoSysPath:
+ # network or in-memory FS may not map to the local one
+ path = str(filesystem)
+ # when user passed an already initialized fs instance, it is her
+ # responsibility to close it, thus UFOReader.close/__exit__ are no-op
+ self._shouldClose = False
+ # default to a 'package' structure
+ self._fileStructure = UFOFileStructure.PACKAGE
+ else:
+ raise TypeError(
+ "Expected a path string or fs.base.FS object, found '%s'"
+ % type(path).__name__
+ )
+ self._path = fsdecode(path)
+ self._validate = validate
+ self._upConvertedKerningData = None
+
+ try:
+ self.readMetaInfo(validate=validate)
+ except UFOLibError:
+ self.close()
+ raise
+
+ # properties
+
+ def _get_path(self):
+ import warnings
+
+ warnings.warn(
+ "The 'path' attribute is deprecated; use the 'fs' attribute instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self._path
+
+ path = property(_get_path, doc="The path of the UFO (DEPRECATED).")
+
+ def _get_formatVersion(self):
+ import warnings
+
+ warnings.warn(
+ "The 'formatVersion' attribute is deprecated; use the 'formatVersionTuple'",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self._formatVersion.major
+
+ formatVersion = property(
+ _get_formatVersion,
+ doc="The (major) format version of the UFO. DEPRECATED: Use formatVersionTuple",
+ )
+
+ @property
+ def formatVersionTuple(self):
+ """The (major, minor) format version of the UFO.
+ This is determined by reading metainfo.plist during __init__.
+ """
+ return self._formatVersion
+
+ def _get_fileStructure(self):
+ return self._fileStructure
+
+ fileStructure = property(
+ _get_fileStructure,
+ doc=(
+ "The file structure of the UFO: "
+ "either UFOFileStructure.ZIP or UFOFileStructure.PACKAGE"
+ ),
+ )
+
+ # up conversion
+
+ def _upConvertKerning(self, validate):
+ """
+ Up convert kerning and groups in UFO 1 and 2.
+ The data will be held internally until each bit of data
+ has been retrieved. The conversion of both must be done
+ at once, so the raw data is cached and an error is raised
+ if one bit of data becomes obsolete before it is called.
+
+ ``validate`` will validate the data.
+ """
+ if self._upConvertedKerningData:
+ testKerning = self._readKerning()
+ if testKerning != self._upConvertedKerningData["originalKerning"]:
+ raise UFOLibError(
+ "The data in kerning.plist has been modified since it was converted to UFO 3 format."
+ )
+ testGroups = self._readGroups()
+ if testGroups != self._upConvertedKerningData["originalGroups"]:
+ raise UFOLibError(
+ "The data in groups.plist has been modified since it was converted to UFO 3 format."
+ )
+ else:
+ groups = self._readGroups()
+ if validate:
+ invalidFormatMessage = "groups.plist is not properly formatted."
+ if not isinstance(groups, dict):
+ raise UFOLibError(invalidFormatMessage)
+ for groupName, glyphList in groups.items():
+ if not isinstance(groupName, str):
+ raise UFOLibError(invalidFormatMessage)
+ elif not isinstance(glyphList, list):
+ raise UFOLibError(invalidFormatMessage)
+ for glyphName in glyphList:
+ if not isinstance(glyphName, str):
+ raise UFOLibError(invalidFormatMessage)
+ self._upConvertedKerningData = dict(
+ kerning={},
+ originalKerning=self._readKerning(),
+ groups={},
+ originalGroups=groups,
+ )
+ # convert kerning and groups
+ kerning, groups, conversionMaps = convertUFO1OrUFO2KerningToUFO3Kerning(
+ self._upConvertedKerningData["originalKerning"],
+ deepcopy(self._upConvertedKerningData["originalGroups"]),
+ self.getGlyphSet(),
+ )
+ # store
+ self._upConvertedKerningData["kerning"] = kerning
+ self._upConvertedKerningData["groups"] = groups
+ self._upConvertedKerningData["groupRenameMaps"] = conversionMaps
+
+ # support methods
+
+ def readBytesFromPath(self, path):
+ """
+ Returns the bytes in the file at the given path.
+ The path must be relative to the UFO's filesystem root.
+ Returns None if the file does not exist.
+ """
+ try:
+ return self.fs.readbytes(fsdecode(path))
+ except fs.errors.ResourceNotFound:
+ return None
+
+ def getReadFileForPath(self, path, encoding=None):
+ """
+ Returns a file (or file-like) object for the file at the given path.
+ The path must be relative to the UFO path.
+ Returns None if the file does not exist.
+ By default the file is opened in binary mode (reads bytes).
+ If encoding is passed, the file is opened in text mode (reads str).
+
+ Note: The caller is responsible for closing the open file.
+ """
+ path = fsdecode(path)
+ try:
+ if encoding is None:
+ return self.fs.openbin(path)
+ else:
+ return self.fs.open(path, mode="r", encoding=encoding)
+ except fs.errors.ResourceNotFound:
+ return None
+
+ # metainfo.plist
+
+ def _readMetaInfo(self, validate=None):
+ """
+ Read metainfo.plist and return raw data. Only used for internal operations.
+
+ ``validate`` will validate the read data, by default it is set
+ to the class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ data = self._getPlist(METAINFO_FILENAME)
+ if validate and not isinstance(data, dict):
+ raise UFOLibError("metainfo.plist is not properly formatted.")
+ try:
+ formatVersionMajor = data["formatVersion"]
+ except KeyError:
+ raise UFOLibError(
+ f"Missing required formatVersion in '{METAINFO_FILENAME}' on {self.fs}"
+ )
+ formatVersionMinor = data.setdefault("formatVersionMinor", 0)
+
+ try:
+ formatVersion = UFOFormatVersion((formatVersionMajor, formatVersionMinor))
+ except ValueError as e:
+ unsupportedMsg = (
+ f"Unsupported UFO format ({formatVersionMajor}.{formatVersionMinor}) "
+ f"in '{METAINFO_FILENAME}' on {self.fs}"
+ )
+ if validate:
+ from fontTools.ufoLib.errors import UnsupportedUFOFormat
+
+ raise UnsupportedUFOFormat(unsupportedMsg) from e
+
+ formatVersion = UFOFormatVersion.default()
+ logger.warning(
+ "%s. Assuming the latest supported version (%s). "
+ "Some data may be skipped or parsed incorrectly",
+ unsupportedMsg,
+ formatVersion,
+ )
+ data["formatVersionTuple"] = formatVersion
+ return data
+
+ def readMetaInfo(self, validate=None):
+ """
+ Read metainfo.plist and set formatVersion. Only used for internal operations.
+
+ ``validate`` will validate the read data, by default it is set
+ to the class's validate value, can be overridden.
+ """
+ data = self._readMetaInfo(validate=validate)
+ self._formatVersion = data["formatVersionTuple"]
+
+ # groups.plist
+
+ def _readGroups(self):
+ groups = self._getPlist(GROUPS_FILENAME, {})
+ # remove any duplicate glyphs in a kerning group
+ for groupName, glyphList in groups.items():
+ if groupName.startswith(("public.kern1.", "public.kern2.")):
+ groups[groupName] = list(OrderedDict.fromkeys(glyphList))
+ return groups
+
+ def readGroups(self, validate=None):
+ """
+ Read groups.plist. Returns a dict.
+ ``validate`` will validate the read data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ # handle up conversion
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ self._upConvertKerning(validate)
+ groups = self._upConvertedKerningData["groups"]
+ # normal
+ else:
+ groups = self._readGroups()
+ if validate:
+ valid, message = groupsValidator(groups)
+ if not valid:
+ raise UFOLibError(message)
+ return groups
+
+ def getKerningGroupConversionRenameMaps(self, validate=None):
+ """
+ Get maps defining the renaming that was done during any
+ needed kerning group conversion. This method returns a
+ dictionary of this form::
+
+ {
+ "side1" : {"old group name" : "new group name"},
+ "side2" : {"old group name" : "new group name"}
+ }
+
+ When no conversion has been performed, the side1 and side2
+ dictionaries will be empty.
+
+ ``validate`` will validate the groups, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ if self._formatVersion >= UFOFormatVersion.FORMAT_3_0:
+ return dict(side1={}, side2={})
+ # use the public group reader to force the load and
+ # conversion of the data if it hasn't happened yet.
+ self.readGroups(validate=validate)
+ return self._upConvertedKerningData["groupRenameMaps"]
+
+ # fontinfo.plist
+
+ def _readInfo(self, validate):
+ data = self._getPlist(FONTINFO_FILENAME, {})
+ if validate and not isinstance(data, dict):
+ raise UFOLibError("fontinfo.plist is not properly formatted.")
+ return data
+
+ def readInfo(self, info, validate=None):
+ """
+ Read fontinfo.plist. It requires an object that allows
+ setting attributes with names that follow the fontinfo.plist
+ version 3 specification. This will write the attributes
+ defined in the file into the object.
+
+ ``validate`` will validate the read data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ infoDict = self._readInfo(validate)
+ infoDataToSet = {}
+ # version 1
+ if self._formatVersion == UFOFormatVersion.FORMAT_1_0:
+ for attr in fontInfoAttributesVersion1:
+ value = infoDict.get(attr)
+ if value is not None:
+ infoDataToSet[attr] = value
+ infoDataToSet = _convertFontInfoDataVersion1ToVersion2(infoDataToSet)
+ infoDataToSet = _convertFontInfoDataVersion2ToVersion3(infoDataToSet)
+ # version 2
+ elif self._formatVersion == UFOFormatVersion.FORMAT_2_0:
+ for attr, dataValidationDict in list(
+ fontInfoAttributesVersion2ValueData.items()
+ ):
+ value = infoDict.get(attr)
+ if value is None:
+ continue
+ infoDataToSet[attr] = value
+ infoDataToSet = _convertFontInfoDataVersion2ToVersion3(infoDataToSet)
+ # version 3.x
+ elif self._formatVersion.major == UFOFormatVersion.FORMAT_3_0.major:
+ for attr, dataValidationDict in list(
+ fontInfoAttributesVersion3ValueData.items()
+ ):
+ value = infoDict.get(attr)
+ if value is None:
+ continue
+ infoDataToSet[attr] = value
+ # unsupported version
+ else:
+ raise NotImplementedError(self._formatVersion)
+ # validate data
+ if validate:
+ infoDataToSet = validateInfoVersion3Data(infoDataToSet)
+ # populate the object
+ for attr, value in list(infoDataToSet.items()):
+ try:
+ setattr(info, attr, value)
+ except AttributeError:
+ raise UFOLibError(
+ "The supplied info object does not support setting a necessary attribute (%s)."
+ % attr
+ )
+
+ # kerning.plist
+
+ def _readKerning(self):
+ data = self._getPlist(KERNING_FILENAME, {})
+ return data
+
+ def readKerning(self, validate=None):
+ """
+ Read kerning.plist. Returns a dict.
+
+ ``validate`` will validate the kerning data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ # handle up conversion
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ self._upConvertKerning(validate)
+ kerningNested = self._upConvertedKerningData["kerning"]
+ # normal
+ else:
+ kerningNested = self._readKerning()
+ if validate:
+ valid, message = kerningValidator(kerningNested)
+ if not valid:
+ raise UFOLibError(message)
+ # flatten
+ kerning = {}
+ for left in kerningNested:
+ for right in kerningNested[left]:
+ value = kerningNested[left][right]
+ kerning[left, right] = value
+ return kerning
+
+ # lib.plist
+
+ def readLib(self, validate=None):
+ """
+ Read lib.plist. Returns a dict.
+
+ ``validate`` will validate the data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ data = self._getPlist(LIB_FILENAME, {})
+ if validate:
+ valid, message = fontLibValidator(data)
+ if not valid:
+ raise UFOLibError(message)
+ return data
+
+ # features.fea
+
+ def readFeatures(self):
+ """
+ Read features.fea. Return a string.
+ The returned string is empty if the file is missing.
+ """
+ try:
+ with self.fs.open(FEATURES_FILENAME, "r", encoding="utf-8") as f:
+ return f.read()
+ except fs.errors.ResourceNotFound:
+ return ""
+
+ # glyph sets & layers
+
+ def _readLayerContents(self, validate):
+ """
+ Rebuild the layer contents list by checking what glyphsets
+ are available on disk.
+
+ ``validate`` will validate the layer contents.
+ """
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ return [(DEFAULT_LAYER_NAME, DEFAULT_GLYPHS_DIRNAME)]
+ contents = self._getPlist(LAYERCONTENTS_FILENAME)
+ if validate:
+ valid, error = layerContentsValidator(contents, self.fs)
+ if not valid:
+ raise UFOLibError(error)
+ return contents
+
+ def getLayerNames(self, validate=None):
+ """
+ Get the ordered layer names from layercontents.plist.
+
+ ``validate`` will validate the data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ layerContents = self._readLayerContents(validate)
+ layerNames = [layerName for layerName, directoryName in layerContents]
+ return layerNames
+
+ def getDefaultLayerName(self, validate=None):
+ """
+ Get the default layer name from layercontents.plist.
+
+ ``validate`` will validate the data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ layerContents = self._readLayerContents(validate)
+ for layerName, layerDirectory in layerContents:
+ if layerDirectory == DEFAULT_GLYPHS_DIRNAME:
+ return layerName
+ # this will already have been raised during __init__
+ raise UFOLibError("The default layer is not defined in layercontents.plist.")
+
+ def getGlyphSet(self, layerName=None, validateRead=None, validateWrite=None):
+ """
+ Return the GlyphSet associated with the
+ glyphs directory mapped to layerName
+ in the UFO. If layerName is not provided,
+ the name retrieved with getDefaultLayerName
+ will be used.
+
+ ``validateRead`` will validate the read data, by default it is set to the
+ class's validate value, can be overridden.
+ ``validateWrite`` will validate the written data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ from fontTools.ufoLib.glifLib import GlyphSet
+
+ if validateRead is None:
+ validateRead = self._validate
+ if validateWrite is None:
+ validateWrite = self._validate
+ if layerName is None:
+ layerName = self.getDefaultLayerName(validate=validateRead)
+ directory = None
+ layerContents = self._readLayerContents(validateRead)
+ for storedLayerName, storedLayerDirectory in layerContents:
+ if layerName == storedLayerName:
+ directory = storedLayerDirectory
+ break
+ if directory is None:
+ raise UFOLibError('No glyphs directory is mapped to "%s".' % layerName)
+ try:
+ glyphSubFS = self.fs.opendir(directory)
+ except fs.errors.ResourceNotFound:
+ raise UFOLibError(f"No '{directory}' directory for layer '{layerName}'")
+ return GlyphSet(
+ glyphSubFS,
+ ufoFormatVersion=self._formatVersion,
+ validateRead=validateRead,
+ validateWrite=validateWrite,
+ expectContentsFile=True,
+ )
+
+ def getCharacterMapping(self, layerName=None, validate=None):
+ """
+ Return a dictionary that maps unicode values (ints) to
+ lists of glyph names.
+ """
+ if validate is None:
+ validate = self._validate
+ glyphSet = self.getGlyphSet(
+ layerName, validateRead=validate, validateWrite=True
+ )
+ allUnicodes = glyphSet.getUnicodes()
+ cmap = {}
+ for glyphName, unicodes in allUnicodes.items():
+ for code in unicodes:
+ if code in cmap:
+ cmap[code].append(glyphName)
+ else:
+ cmap[code] = [glyphName]
+ return cmap
+
+ # /data
+
+ def getDataDirectoryListing(self):
+ """
+ Returns a list of all files in the data directory.
+ The returned paths will be relative to the UFO.
+ This will not list directory names, only file names.
+ Thus, empty directories will be skipped.
+ """
+ try:
+ self._dataFS = self.fs.opendir(DATA_DIRNAME)
+ except fs.errors.ResourceNotFound:
+ return []
+ except fs.errors.DirectoryExpected:
+ raise UFOLibError('The UFO contains a "data" file instead of a directory.')
+ try:
+ # fs Walker.files method returns "absolute" paths (in terms of the
+ # root of the 'data' SubFS), so we strip the leading '/' to make
+ # them relative
+ return [p.lstrip("/") for p in self._dataFS.walk.files()]
+ except fs.errors.ResourceError:
+ return []
+
+ def getImageDirectoryListing(self, validate=None):
+ """
+ Returns a list of all image file names in
+ the images directory. Each of the images will
+ have been verified to have the PNG signature.
+
+ ``validate`` will validate the data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ return []
+ if validate is None:
+ validate = self._validate
+ try:
+ self._imagesFS = imagesFS = self.fs.opendir(IMAGES_DIRNAME)
+ except fs.errors.ResourceNotFound:
+ return []
+ except fs.errors.DirectoryExpected:
+ raise UFOLibError(
+ 'The UFO contains an "images" file instead of a directory.'
+ )
+ result = []
+ for path in imagesFS.scandir("/"):
+ if path.is_dir:
+ # silently skip this as version control
+ # systems often have hidden directories
+ continue
+ if validate:
+ with imagesFS.openbin(path.name) as fp:
+ valid, error = pngValidator(fileObj=fp)
+ if valid:
+ result.append(path.name)
+ else:
+ result.append(path.name)
+ return result
+
+ def readData(self, fileName):
+ """
+ Return bytes for the file named 'fileName' inside the 'data/' directory.
+ """
+ fileName = fsdecode(fileName)
+ try:
+ try:
+ dataFS = self._dataFS
+ except AttributeError:
+ # in case readData is called before getDataDirectoryListing
+ dataFS = self.fs.opendir(DATA_DIRNAME)
+ data = dataFS.readbytes(fileName)
+ except fs.errors.ResourceNotFound:
+ raise UFOLibError(f"No data file named '{fileName}' on {self.fs}")
+ return data
+
+ def readImage(self, fileName, validate=None):
+ """
+ Return image data for the file named fileName.
+
+ ``validate`` will validate the data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ raise UFOLibError(
+ f"Reading images is not allowed in UFO {self._formatVersion.major}."
+ )
+ fileName = fsdecode(fileName)
+ try:
+ try:
+ imagesFS = self._imagesFS
+ except AttributeError:
+ # in case readImage is called before getImageDirectoryListing
+ imagesFS = self.fs.opendir(IMAGES_DIRNAME)
+ data = imagesFS.readbytes(fileName)
+ except fs.errors.ResourceNotFound:
+ raise UFOLibError(f"No image file named '{fileName}' on {self.fs}")
+ if validate:
+ valid, error = pngValidator(data=data)
+ if not valid:
+ raise UFOLibError(error)
+ return data
+
+ def close(self):
+ if self._shouldClose:
+ self.fs.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ self.close()
# ----------
# UFO Writer
# ----------
+
class UFOWriter(UFOReader):
- """
- Write the various components of the .ufo.
-
- By default, the written data will be validated before writing. Set ``validate`` to
- ``False`` if you do not want to validate the data. Validation can also be overriden
- on a per method level if desired.
-
- The ``formatVersion`` argument allows to specify the UFO format version as a tuple
- of integers (major, minor), or as a single integer for the major digit only (minor
- is implied as 0). By default the latest formatVersion will be used; currently it's
- 3.0, which is equivalent to formatVersion=(3, 0).
-
- An UnsupportedUFOFormat exception is raised if the requested UFO formatVersion is
- not supported.
- """
-
- def __init__(
- self,
- path,
- formatVersion=None,
- fileCreator="com.github.fonttools.ufoLib",
- structure=None,
- validate=True,
- ):
- try:
- formatVersion = UFOFormatVersion(formatVersion)
- except ValueError as e:
- from fontTools.ufoLib.errors import UnsupportedUFOFormat
-
- raise UnsupportedUFOFormat(f"Unsupported UFO format: {formatVersion!r}") from e
-
- if hasattr(path, "__fspath__"): # support os.PathLike objects
- path = path.__fspath__()
-
- if isinstance(path, str):
- # normalize path by removing trailing or double slashes
- path = os.path.normpath(path)
- havePreviousFile = os.path.exists(path)
- if havePreviousFile:
- # ensure we use the same structure as the destination
- existingStructure = _sniffFileStructure(path)
- if structure is not None:
- try:
- structure = UFOFileStructure(structure)
- except ValueError:
- raise UFOLibError(
- "Invalid or unsupported structure: '%s'" % structure
- )
- if structure is not existingStructure:
- raise UFOLibError(
- "A UFO with a different structure (%s) already exists "
- "at the given path: '%s'" % (existingStructure, path)
- )
- else:
- structure = existingStructure
- else:
- # if not exists, default to 'package' structure
- if structure is None:
- structure = UFOFileStructure.PACKAGE
- dirName = os.path.dirname(path)
- if dirName and not os.path.isdir(dirName):
- raise UFOLibError(
- "Cannot write to '%s': directory does not exist" % path
- )
- if structure is UFOFileStructure.ZIP:
- if havePreviousFile:
- # we can't write a zip in-place, so we have to copy its
- # contents to a temporary location and work from there, then
- # upon closing UFOWriter we create the final zip file
- parentFS = fs.tempfs.TempFS()
- with fs.zipfs.ZipFS(path, encoding="utf-8") as origFS:
- fs.copy.copy_fs(origFS, parentFS)
- # if output path is an existing zip, we require that it contains
- # one, and only one, root directory (with arbitrary name), in turn
- # containing all the existing UFO contents
- rootDirs = [
- p.name for p in parentFS.scandir("/")
- # exclude macOS metadata contained in zip file
- if p.is_dir and p.name != "__MACOSX"
- ]
- if len(rootDirs) != 1:
- raise UFOLibError(
- "Expected exactly 1 root directory, found %d" % len(rootDirs)
- )
- else:
- # 'ClosingSubFS' ensures that the parent filesystem is closed
- # when its root subdirectory is closed
- self.fs = parentFS.opendir(
- rootDirs[0], factory=fs.subfs.ClosingSubFS
- )
- else:
- # if the output zip file didn't exist, we create the root folder;
- # we name it the same as input 'path', but with '.ufo' extension
- rootDir = os.path.splitext(os.path.basename(path))[0] + ".ufo"
- parentFS = fs.zipfs.ZipFS(path, write=True, encoding="utf-8")
- parentFS.makedir(rootDir)
- self.fs = parentFS.opendir(rootDir, factory=fs.subfs.ClosingSubFS)
- else:
- self.fs = fs.osfs.OSFS(path, create=True)
- self._fileStructure = structure
- self._havePreviousFile = havePreviousFile
- self._shouldClose = True
- elif isinstance(path, fs.base.FS):
- filesystem = path
- try:
- filesystem.check()
- except fs.errors.FilesystemClosed:
- raise UFOLibError("the filesystem '%s' is closed" % path)
- else:
- self.fs = filesystem
- try:
- path = filesystem.getsyspath("/")
- except fs.errors.NoSysPath:
- # network or in-memory FS may not map to the local one
- path = str(filesystem)
- # if passed an FS object, always use 'package' structure
- if structure and structure is not UFOFileStructure.PACKAGE:
- import warnings
-
- warnings.warn(
- "The 'structure' argument is not used when input is an FS object",
- UserWarning,
- stacklevel=2,
- )
- self._fileStructure = UFOFileStructure.PACKAGE
- # if FS contains a "metainfo.plist", we consider it non-empty
- self._havePreviousFile = filesystem.exists(METAINFO_FILENAME)
- # the user is responsible for closing the FS object
- self._shouldClose = False
- else:
- raise TypeError(
- "Expected a path string or fs object, found %s"
- % type(path).__name__
- )
-
- # establish some basic stuff
- self._path = fsdecode(path)
- self._formatVersion = formatVersion
- self._fileCreator = fileCreator
- self._downConversionKerningData = None
- self._validate = validate
- # if the file already exists, get the format version.
- # this will be needed for up and down conversion.
- previousFormatVersion = None
- if self._havePreviousFile:
- metaInfo = self._readMetaInfo(validate=validate)
- previousFormatVersion = metaInfo["formatVersionTuple"]
- # catch down conversion
- if previousFormatVersion > formatVersion:
- from fontTools.ufoLib.errors import UnsupportedUFOFormat
-
- raise UnsupportedUFOFormat(
- "The UFO located at this path is a higher version "
- f"({previousFormatVersion}) than the version ({formatVersion}) "
- "that is trying to be written. This is not supported."
- )
- # handle the layer contents
- self.layerContents = {}
- if previousFormatVersion is not None and previousFormatVersion.major >= 3:
- # already exists
- self.layerContents = OrderedDict(self._readLayerContents(validate))
- else:
- # previous < 3
- # imply the layer contents
- if self.fs.exists(DEFAULT_GLYPHS_DIRNAME):
- self.layerContents = {DEFAULT_LAYER_NAME : DEFAULT_GLYPHS_DIRNAME}
- # write the new metainfo
- self._writeMetaInfo()
-
- # properties
-
- def _get_fileCreator(self):
- return self._fileCreator
-
- fileCreator = property(_get_fileCreator, doc="The file creator of the UFO. This is set into metainfo.plist during __init__.")
-
- # support methods for file system interaction
-
- def copyFromReader(self, reader, sourcePath, destPath):
- """
- Copy the sourcePath in the provided UFOReader to destPath
- in this writer. The paths must be relative. This works with
- both individual files and directories.
- """
- if not isinstance(reader, UFOReader):
- raise UFOLibError("The reader must be an instance of UFOReader.")
- sourcePath = fsdecode(sourcePath)
- destPath = fsdecode(destPath)
- if not reader.fs.exists(sourcePath):
- raise UFOLibError("The reader does not have data located at \"%s\"." % sourcePath)
- if self.fs.exists(destPath):
- raise UFOLibError("A file named \"%s\" already exists." % destPath)
- # create the destination directory if it doesn't exist
- self.fs.makedirs(fs.path.dirname(destPath), recreate=True)
- if reader.fs.isdir(sourcePath):
- fs.copy.copy_dir(reader.fs, sourcePath, self.fs, destPath)
- else:
- fs.copy.copy_file(reader.fs, sourcePath, self.fs, destPath)
-
- def writeBytesToPath(self, path, data):
- """
- Write bytes to a path relative to the UFO filesystem's root.
- If writing to an existing UFO, check to see if data matches the data
- that is already in the file at path; if so, the file is not rewritten
- so that the modification date is preserved.
- If needed, the directory tree for the given path will be built.
- """
- path = fsdecode(path)
- if self._havePreviousFile:
- if self.fs.isfile(path) and data == self.fs.readbytes(path):
- return
- try:
- self.fs.writebytes(path, data)
- except fs.errors.FileExpected:
- raise UFOLibError("A directory exists at '%s'" % path)
- except fs.errors.ResourceNotFound:
- self.fs.makedirs(fs.path.dirname(path), recreate=True)
- self.fs.writebytes(path, data)
-
- def getFileObjectForPath(self, path, mode="w", encoding=None):
- """
- Returns a file (or file-like) object for the
- file at the given path. The path must be relative
- to the UFO path. Returns None if the file does
- not exist and the mode is "r" or "rb.
- An encoding may be passed if the file is opened in text mode.
-
- Note: The caller is responsible for closing the open file.
- """
- path = fsdecode(path)
- try:
- return self.fs.open(path, mode=mode, encoding=encoding)
- except fs.errors.ResourceNotFound as e:
- m = mode[0]
- if m == "r":
- # XXX I think we should just let it raise. The docstring,
- # however, says that this returns None if mode is 'r'
- return None
- elif m == "w" or m == "a" or m == "x":
- self.fs.makedirs(fs.path.dirname(path), recreate=True)
- return self.fs.open(path, mode=mode, encoding=encoding)
- except fs.errors.ResourceError as e:
- return UFOLibError(
- f"unable to open '{path}' on {self.fs}: {e}"
- )
-
- def removePath(self, path, force=False, removeEmptyParents=True):
- """
- Remove the file (or directory) at path. The path
- must be relative to the UFO.
- Raises UFOLibError if the path doesn't exist.
- If force=True, ignore non-existent paths.
- If the directory where 'path' is located becomes empty, it will
- be automatically removed, unless 'removeEmptyParents' is False.
- """
- path = fsdecode(path)
- try:
- self.fs.remove(path)
- except fs.errors.FileExpected:
- self.fs.removetree(path)
- except fs.errors.ResourceNotFound:
- if not force:
- raise UFOLibError(
- f"'{path}' does not exist on {self.fs}"
- )
- if removeEmptyParents:
- parent = fs.path.dirname(path)
- if parent:
- fs.tools.remove_empty(self.fs, parent)
-
- # alias kept for backward compatibility with old API
- removeFileForPath = removePath
-
- # UFO mod time
-
- def setModificationTime(self):
- """
- Set the UFO modification time to the current time.
- This is never called automatically. It is up to the
- caller to call this when finished working on the UFO.
- """
- path = self._path
- if path is not None and os.path.exists(path):
- try:
- # this may fail on some filesystems (e.g. SMB servers)
- os.utime(path, None)
- except OSError as e:
- logger.warning("Failed to set modified time: %s", e)
-
- # metainfo.plist
-
- def _writeMetaInfo(self):
- metaInfo = dict(
- creator=self._fileCreator,
- formatVersion=self._formatVersion.major,
- )
- if self._formatVersion.minor != 0:
- metaInfo["formatVersionMinor"] = self._formatVersion.minor
- self._writePlist(METAINFO_FILENAME, metaInfo)
-
- # groups.plist
-
- def setKerningGroupConversionRenameMaps(self, maps):
- """
- Set maps defining the renaming that should be done
- when writing groups and kerning in UFO 1 and UFO 2.
- This will effectively undo the conversion done when
- UFOReader reads this data. The dictionary should have
- this form::
-
- {
- "side1" : {"group name to use when writing" : "group name in data"},
- "side2" : {"group name to use when writing" : "group name in data"}
- }
-
- This is the same form returned by UFOReader's
- getKerningGroupConversionRenameMaps method.
- """
- if self._formatVersion >= UFOFormatVersion.FORMAT_3_0:
- return # XXX raise an error here
- # flip the dictionaries
- remap = {}
- for side in ("side1", "side2"):
- for writeName, dataName in list(maps[side].items()):
- remap[dataName] = writeName
- self._downConversionKerningData = dict(groupRenameMap=remap)
-
- def writeGroups(self, groups, validate=None):
- """
- Write groups.plist. This method requires a
- dict of glyph groups as an argument.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- # validate the data structure
- if validate:
- valid, message = groupsValidator(groups)
- if not valid:
- raise UFOLibError(message)
- # down convert
- if (
- self._formatVersion < UFOFormatVersion.FORMAT_3_0
- and self._downConversionKerningData is not None
- ):
- remap = self._downConversionKerningData["groupRenameMap"]
- remappedGroups = {}
- # there are some edge cases here that are ignored:
- # 1. if a group is being renamed to a name that
- # already exists, the existing group is always
- # overwritten. (this is why there are two loops
- # below.) there doesn't seem to be a logical
- # solution to groups mismatching and overwriting
- # with the specifiecd group seems like a better
- # solution than throwing an error.
- # 2. if side 1 and side 2 groups are being renamed
- # to the same group name there is no check to
- # ensure that the contents are identical. that
- # is left up to the caller.
- for name, contents in list(groups.items()):
- if name in remap:
- continue
- remappedGroups[name] = contents
- for name, contents in list(groups.items()):
- if name not in remap:
- continue
- name = remap[name]
- remappedGroups[name] = contents
- groups = remappedGroups
- # pack and write
- groupsNew = {}
- for key, value in groups.items():
- groupsNew[key] = list(value)
- if groupsNew:
- self._writePlist(GROUPS_FILENAME, groupsNew)
- elif self._havePreviousFile:
- self.removePath(GROUPS_FILENAME, force=True, removeEmptyParents=False)
-
- # fontinfo.plist
-
- def writeInfo(self, info, validate=None):
- """
- Write info.plist. This method requires an object
- that supports getting attributes that follow the
- fontinfo.plist version 2 specification. Attributes
- will be taken from the given object and written
- into the file.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- # gather version 3 data
- infoData = {}
- for attr in list(fontInfoAttributesVersion3ValueData.keys()):
- if hasattr(info, attr):
- try:
- value = getattr(info, attr)
- except AttributeError:
- raise UFOLibError("The supplied info object does not support getting a necessary attribute (%s)." % attr)
- if value is None:
- continue
- infoData[attr] = value
- # down convert data if necessary and validate
- if self._formatVersion == UFOFormatVersion.FORMAT_3_0:
- if validate:
- infoData = validateInfoVersion3Data(infoData)
- elif self._formatVersion == UFOFormatVersion.FORMAT_2_0:
- infoData = _convertFontInfoDataVersion3ToVersion2(infoData)
- if validate:
- infoData = validateInfoVersion2Data(infoData)
- elif self._formatVersion == UFOFormatVersion.FORMAT_1_0:
- infoData = _convertFontInfoDataVersion3ToVersion2(infoData)
- if validate:
- infoData = validateInfoVersion2Data(infoData)
- infoData = _convertFontInfoDataVersion2ToVersion1(infoData)
- # write file if there is anything to write
- if infoData:
- self._writePlist(FONTINFO_FILENAME, infoData)
-
- # kerning.plist
-
- def writeKerning(self, kerning, validate=None):
- """
- Write kerning.plist. This method requires a
- dict of kerning pairs as an argument.
-
- This performs basic structural validation of the kerning,
- but it does not check for compliance with the spec in
- regards to conflicting pairs. The assumption is that the
- kerning data being passed is standards compliant.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- # validate the data structure
- if validate:
- invalidFormatMessage = "The kerning is not properly formatted."
- if not isDictEnough(kerning):
- raise UFOLibError(invalidFormatMessage)
- for pair, value in list(kerning.items()):
- if not isinstance(pair, (list, tuple)):
- raise UFOLibError(invalidFormatMessage)
- if not len(pair) == 2:
- raise UFOLibError(invalidFormatMessage)
- if not isinstance(pair[0], str):
- raise UFOLibError(invalidFormatMessage)
- if not isinstance(pair[1], str):
- raise UFOLibError(invalidFormatMessage)
- if not isinstance(value, numberTypes):
- raise UFOLibError(invalidFormatMessage)
- # down convert
- if (
- self._formatVersion < UFOFormatVersion.FORMAT_3_0
- and self._downConversionKerningData is not None
- ):
- remap = self._downConversionKerningData["groupRenameMap"]
- remappedKerning = {}
- for (side1, side2), value in list(kerning.items()):
- side1 = remap.get(side1, side1)
- side2 = remap.get(side2, side2)
- remappedKerning[side1, side2] = value
- kerning = remappedKerning
- # pack and write
- kerningDict = {}
- for left, right in kerning.keys():
- value = kerning[left, right]
- if left not in kerningDict:
- kerningDict[left] = {}
- kerningDict[left][right] = value
- if kerningDict:
- self._writePlist(KERNING_FILENAME, kerningDict)
- elif self._havePreviousFile:
- self.removePath(KERNING_FILENAME, force=True, removeEmptyParents=False)
-
- # lib.plist
-
- def writeLib(self, libDict, validate=None):
- """
- Write lib.plist. This method requires a
- lib dict as an argument.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- if validate:
- valid, message = fontLibValidator(libDict)
- if not valid:
- raise UFOLibError(message)
- if libDict:
- self._writePlist(LIB_FILENAME, libDict)
- elif self._havePreviousFile:
- self.removePath(LIB_FILENAME, force=True, removeEmptyParents=False)
-
- # features.fea
-
- def writeFeatures(self, features, validate=None):
- """
- Write features.fea. This method requires a
- features string as an argument.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion == UFOFormatVersion.FORMAT_1_0:
- raise UFOLibError("features.fea is not allowed in UFO Format Version 1.")
- if validate:
- if not isinstance(features, str):
- raise UFOLibError("The features are not text.")
- if features:
- self.writeBytesToPath(FEATURES_FILENAME, features.encode("utf8"))
- elif self._havePreviousFile:
- self.removePath(FEATURES_FILENAME, force=True, removeEmptyParents=False)
-
- # glyph sets & layers
-
- def writeLayerContents(self, layerOrder=None, validate=None):
- """
- Write the layercontents.plist file. This method *must* be called
- after all glyph sets have been written.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- return
- if layerOrder is not None:
- newOrder = []
- for layerName in layerOrder:
- if layerName is None:
- layerName = DEFAULT_LAYER_NAME
- newOrder.append(layerName)
- layerOrder = newOrder
- else:
- layerOrder = list(self.layerContents.keys())
- if validate and set(layerOrder) != set(self.layerContents.keys()):
- raise UFOLibError("The layer order content does not match the glyph sets that have been created.")
- layerContents = [(layerName, self.layerContents[layerName]) for layerName in layerOrder]
- self._writePlist(LAYERCONTENTS_FILENAME, layerContents)
-
- def _findDirectoryForLayerName(self, layerName):
- foundDirectory = None
- for existingLayerName, directoryName in list(self.layerContents.items()):
- if layerName is None and directoryName == DEFAULT_GLYPHS_DIRNAME:
- foundDirectory = directoryName
- break
- elif existingLayerName == layerName:
- foundDirectory = directoryName
- break
- if not foundDirectory:
- raise UFOLibError("Could not locate a glyph set directory for the layer named %s." % layerName)
- return foundDirectory
-
- def getGlyphSet(
- self,
- layerName=None,
- defaultLayer=True,
- glyphNameToFileNameFunc=None,
- validateRead=None,
- validateWrite=None,
- expectContentsFile=False,
- ):
- """
- Return the GlyphSet object associated with the
- appropriate glyph directory in the .ufo.
- If layerName is None, the default glyph set
- will be used. The defaultLayer flag indictes
- that the layer should be saved into the default
- glyphs directory.
-
- ``validateRead`` will validate the read data, by default it is set to the
- class's validate value, can be overridden.
- ``validateWrte`` will validate the written data, by default it is set to the
- class's validate value, can be overridden.
- ``expectContentsFile`` will raise a GlifLibError if a contents.plist file is
- not found on the glyph set file system. This should be set to ``True`` if you
- are reading an existing UFO and ``False`` if you use ``getGlyphSet`` to create
- a fresh glyph set.
- """
- if validateRead is None:
- validateRead = self._validate
- if validateWrite is None:
- validateWrite = self._validate
- # only default can be written in < 3
- if (
- self._formatVersion < UFOFormatVersion.FORMAT_3_0
- and (not defaultLayer or layerName is not None)
- ):
- raise UFOLibError(
- f"Only the default layer can be writen in UFO {self._formatVersion.major}."
- )
- # locate a layer name when None has been given
- if layerName is None and defaultLayer:
- for existingLayerName, directory in self.layerContents.items():
- if directory == DEFAULT_GLYPHS_DIRNAME:
- layerName = existingLayerName
- if layerName is None:
- layerName = DEFAULT_LAYER_NAME
- elif layerName is None and not defaultLayer:
- raise UFOLibError("A layer name must be provided for non-default layers.")
- # move along to format specific writing
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- return self._getDefaultGlyphSet(
- validateRead,
- validateWrite,
- glyphNameToFileNameFunc=glyphNameToFileNameFunc,
- expectContentsFile=expectContentsFile
- )
- elif self._formatVersion.major == UFOFormatVersion.FORMAT_3_0.major:
- return self._getGlyphSetFormatVersion3(
- validateRead,
- validateWrite,
- layerName=layerName,
- defaultLayer=defaultLayer,
- glyphNameToFileNameFunc=glyphNameToFileNameFunc,
- expectContentsFile=expectContentsFile,
- )
- else:
- raise NotImplementedError(self._formatVersion)
-
- def _getDefaultGlyphSet(
- self,
- validateRead,
- validateWrite,
- glyphNameToFileNameFunc=None,
- expectContentsFile=False,
- ):
- from fontTools.ufoLib.glifLib import GlyphSet
-
- glyphSubFS = self.fs.makedir(DEFAULT_GLYPHS_DIRNAME, recreate=True)
- return GlyphSet(
- glyphSubFS,
- glyphNameToFileNameFunc=glyphNameToFileNameFunc,
- ufoFormatVersion=self._formatVersion,
- validateRead=validateRead,
- validateWrite=validateWrite,
- expectContentsFile=expectContentsFile,
- )
-
- def _getGlyphSetFormatVersion3(
- self,
- validateRead,
- validateWrite,
- layerName=None,
- defaultLayer=True,
- glyphNameToFileNameFunc=None,
- expectContentsFile=False,
- ):
- from fontTools.ufoLib.glifLib import GlyphSet
-
- # if the default flag is on, make sure that the default in the file
- # matches the default being written. also make sure that this layer
- # name is not already linked to a non-default layer.
- if defaultLayer:
- for existingLayerName, directory in self.layerContents.items():
- if directory == DEFAULT_GLYPHS_DIRNAME:
- if existingLayerName != layerName:
- raise UFOLibError(
- "Another layer ('%s') is already mapped to the default directory."
- % existingLayerName
- )
- elif existingLayerName == layerName:
- raise UFOLibError("The layer name is already mapped to a non-default layer.")
- # get an existing directory name
- if layerName in self.layerContents:
- directory = self.layerContents[layerName]
- # get a new directory name
- else:
- if defaultLayer:
- directory = DEFAULT_GLYPHS_DIRNAME
- else:
- # not caching this could be slightly expensive,
- # but caching it will be cumbersome
- existing = {d.lower() for d in self.layerContents.values()}
- directory = userNameToFileName(layerName, existing=existing, prefix="glyphs.")
- # make the directory
- glyphSubFS = self.fs.makedir(directory, recreate=True)
- # store the mapping
- self.layerContents[layerName] = directory
- # load the glyph set
- return GlyphSet(
- glyphSubFS,
- glyphNameToFileNameFunc=glyphNameToFileNameFunc,
- ufoFormatVersion=self._formatVersion,
- validateRead=validateRead,
- validateWrite=validateWrite,
- expectContentsFile=expectContentsFile,
- )
-
- def renameGlyphSet(self, layerName, newLayerName, defaultLayer=False):
- """
- Rename a glyph set.
-
- Note: if a GlyphSet object has already been retrieved for
- layerName, it is up to the caller to inform that object that
- the directory it represents has changed.
- """
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- # ignore renaming glyph sets for UFO1 UFO2
- # just write the data from the default layer
- return
- # the new and old names can be the same
- # as long as the default is being switched
- if layerName == newLayerName:
- # if the default is off and the layer is already not the default, skip
- if self.layerContents[layerName] != DEFAULT_GLYPHS_DIRNAME and not defaultLayer:
- return
- # if the default is on and the layer is already the default, skip
- if self.layerContents[layerName] == DEFAULT_GLYPHS_DIRNAME and defaultLayer:
- return
- else:
- # make sure the new layer name doesn't already exist
- if newLayerName is None:
- newLayerName = DEFAULT_LAYER_NAME
- if newLayerName in self.layerContents:
- raise UFOLibError("A layer named %s already exists." % newLayerName)
- # make sure the default layer doesn't already exist
- if defaultLayer and DEFAULT_GLYPHS_DIRNAME in self.layerContents.values():
- raise UFOLibError("A default layer already exists.")
- # get the paths
- oldDirectory = self._findDirectoryForLayerName(layerName)
- if defaultLayer:
- newDirectory = DEFAULT_GLYPHS_DIRNAME
- else:
- existing = {name.lower() for name in self.layerContents.values()}
- newDirectory = userNameToFileName(newLayerName, existing=existing, prefix="glyphs.")
- # update the internal mapping
- del self.layerContents[layerName]
- self.layerContents[newLayerName] = newDirectory
- # do the file system copy
- self.fs.movedir(oldDirectory, newDirectory, create=True)
-
- def deleteGlyphSet(self, layerName):
- """
- Remove the glyph set matching layerName.
- """
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- # ignore deleting glyph sets for UFO1 UFO2 as there are no layers
- # just write the data from the default layer
- return
- foundDirectory = self._findDirectoryForLayerName(layerName)
- self.removePath(foundDirectory, removeEmptyParents=False)
- del self.layerContents[layerName]
-
- def writeData(self, fileName, data):
- """
- Write data to fileName in the 'data' directory.
- The data must be a bytes string.
- """
- self.writeBytesToPath(f"{DATA_DIRNAME}/{fsdecode(fileName)}", data)
-
- def removeData(self, fileName):
- """
- Remove the file named fileName from the data directory.
- """
- self.removePath(f"{DATA_DIRNAME}/{fsdecode(fileName)}")
-
- # /images
-
- def writeImage(self, fileName, data, validate=None):
- """
- Write data to fileName in the images directory.
- The data must be a valid PNG.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- raise UFOLibError(
- f"Images are not allowed in UFO {self._formatVersion.major}."
- )
- fileName = fsdecode(fileName)
- if validate:
- valid, error = pngValidator(data=data)
- if not valid:
- raise UFOLibError(error)
- self.writeBytesToPath(f"{IMAGES_DIRNAME}/{fileName}", data)
-
- def removeImage(self, fileName, validate=None): # XXX remove unused 'validate'?
- """
- Remove the file named fileName from the
- images directory.
- """
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- raise UFOLibError(
- f"Images are not allowed in UFO {self._formatVersion.major}."
- )
- self.removePath(f"{IMAGES_DIRNAME}/{fsdecode(fileName)}")
-
- def copyImageFromReader(self, reader, sourceFileName, destFileName, validate=None):
- """
- Copy the sourceFileName in the provided UFOReader to destFileName
- in this writer. This uses the most memory efficient method possible
- for copying the data possible.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- raise UFOLibError(
- f"Images are not allowed in UFO {self._formatVersion.major}."
- )
- sourcePath = f"{IMAGES_DIRNAME}/{fsdecode(sourceFileName)}"
- destPath = f"{IMAGES_DIRNAME}/{fsdecode(destFileName)}"
- self.copyFromReader(reader, sourcePath, destPath)
-
- def close(self):
- if self._havePreviousFile and self._fileStructure is UFOFileStructure.ZIP:
- # if we are updating an existing zip file, we can now compress the
- # contents of the temporary filesystem in the destination path
- rootDir = os.path.splitext(os.path.basename(self._path))[0] + ".ufo"
- with fs.zipfs.ZipFS(self._path, write=True, encoding="utf-8") as destFS:
- fs.copy.copy_fs(self.fs, destFS.makedir(rootDir))
- super().close()
+ """
+ Write the various components of the .ufo.
+
+ By default, the written data will be validated before writing. Set ``validate`` to
+ ``False`` if you do not want to validate the data. Validation can also be overriden
+ on a per method level if desired.
+
+ The ``formatVersion`` argument allows to specify the UFO format version as a tuple
+ of integers (major, minor), or as a single integer for the major digit only (minor
+ is implied as 0). By default the latest formatVersion will be used; currently it's
+ 3.0, which is equivalent to formatVersion=(3, 0).
+
+ An UnsupportedUFOFormat exception is raised if the requested UFO formatVersion is
+ not supported.
+ """
+
+ def __init__(
+ self,
+ path,
+ formatVersion=None,
+ fileCreator="com.github.fonttools.ufoLib",
+ structure=None,
+ validate=True,
+ ):
+ try:
+ formatVersion = UFOFormatVersion(formatVersion)
+ except ValueError as e:
+ from fontTools.ufoLib.errors import UnsupportedUFOFormat
+
+ raise UnsupportedUFOFormat(
+ f"Unsupported UFO format: {formatVersion!r}"
+ ) from e
+
+ if hasattr(path, "__fspath__"): # support os.PathLike objects
+ path = path.__fspath__()
+
+ if isinstance(path, str):
+ # normalize path by removing trailing or double slashes
+ path = os.path.normpath(path)
+ havePreviousFile = os.path.exists(path)
+ if havePreviousFile:
+ # ensure we use the same structure as the destination
+ existingStructure = _sniffFileStructure(path)
+ if structure is not None:
+ try:
+ structure = UFOFileStructure(structure)
+ except ValueError:
+ raise UFOLibError(
+ "Invalid or unsupported structure: '%s'" % structure
+ )
+ if structure is not existingStructure:
+ raise UFOLibError(
+ "A UFO with a different structure (%s) already exists "
+ "at the given path: '%s'" % (existingStructure, path)
+ )
+ else:
+ structure = existingStructure
+ else:
+ # if not exists, default to 'package' structure
+ if structure is None:
+ structure = UFOFileStructure.PACKAGE
+ dirName = os.path.dirname(path)
+ if dirName and not os.path.isdir(dirName):
+ raise UFOLibError(
+ "Cannot write to '%s': directory does not exist" % path
+ )
+ if structure is UFOFileStructure.ZIP:
+ if havePreviousFile:
+ # we can't write a zip in-place, so we have to copy its
+ # contents to a temporary location and work from there, then
+ # upon closing UFOWriter we create the final zip file
+ parentFS = fs.tempfs.TempFS()
+ with fs.zipfs.ZipFS(path, encoding="utf-8") as origFS:
+ fs.copy.copy_fs(origFS, parentFS)
+ # if output path is an existing zip, we require that it contains
+ # one, and only one, root directory (with arbitrary name), in turn
+ # containing all the existing UFO contents
+ rootDirs = [
+ p.name
+ for p in parentFS.scandir("/")
+ # exclude macOS metadata contained in zip file
+ if p.is_dir and p.name != "__MACOSX"
+ ]
+ if len(rootDirs) != 1:
+ raise UFOLibError(
+ "Expected exactly 1 root directory, found %d"
+ % len(rootDirs)
+ )
+ else:
+ # 'ClosingSubFS' ensures that the parent filesystem is closed
+ # when its root subdirectory is closed
+ self.fs = parentFS.opendir(
+ rootDirs[0], factory=fs.subfs.ClosingSubFS
+ )
+ else:
+ # if the output zip file didn't exist, we create the root folder;
+ # we name it the same as input 'path', but with '.ufo' extension
+ rootDir = os.path.splitext(os.path.basename(path))[0] + ".ufo"
+ parentFS = fs.zipfs.ZipFS(path, write=True, encoding="utf-8")
+ parentFS.makedir(rootDir)
+ self.fs = parentFS.opendir(rootDir, factory=fs.subfs.ClosingSubFS)
+ else:
+ self.fs = fs.osfs.OSFS(path, create=True)
+ self._fileStructure = structure
+ self._havePreviousFile = havePreviousFile
+ self._shouldClose = True
+ elif isinstance(path, fs.base.FS):
+ filesystem = path
+ try:
+ filesystem.check()
+ except fs.errors.FilesystemClosed:
+ raise UFOLibError("the filesystem '%s' is closed" % path)
+ else:
+ self.fs = filesystem
+ try:
+ path = filesystem.getsyspath("/")
+ except fs.errors.NoSysPath:
+ # network or in-memory FS may not map to the local one
+ path = str(filesystem)
+ # if passed an FS object, always use 'package' structure
+ if structure and structure is not UFOFileStructure.PACKAGE:
+ import warnings
+
+ warnings.warn(
+ "The 'structure' argument is not used when input is an FS object",
+ UserWarning,
+ stacklevel=2,
+ )
+ self._fileStructure = UFOFileStructure.PACKAGE
+ # if FS contains a "metainfo.plist", we consider it non-empty
+ self._havePreviousFile = filesystem.exists(METAINFO_FILENAME)
+ # the user is responsible for closing the FS object
+ self._shouldClose = False
+ else:
+ raise TypeError(
+ "Expected a path string or fs object, found %s" % type(path).__name__
+ )
+
+ # establish some basic stuff
+ self._path = fsdecode(path)
+ self._formatVersion = formatVersion
+ self._fileCreator = fileCreator
+ self._downConversionKerningData = None
+ self._validate = validate
+ # if the file already exists, get the format version.
+ # this will be needed for up and down conversion.
+ previousFormatVersion = None
+ if self._havePreviousFile:
+ metaInfo = self._readMetaInfo(validate=validate)
+ previousFormatVersion = metaInfo["formatVersionTuple"]
+ # catch down conversion
+ if previousFormatVersion > formatVersion:
+ from fontTools.ufoLib.errors import UnsupportedUFOFormat
+
+ raise UnsupportedUFOFormat(
+ "The UFO located at this path is a higher version "
+ f"({previousFormatVersion}) than the version ({formatVersion}) "
+ "that is trying to be written. This is not supported."
+ )
+ # handle the layer contents
+ self.layerContents = {}
+ if previousFormatVersion is not None and previousFormatVersion.major >= 3:
+ # already exists
+ self.layerContents = OrderedDict(self._readLayerContents(validate))
+ else:
+ # previous < 3
+ # imply the layer contents
+ if self.fs.exists(DEFAULT_GLYPHS_DIRNAME):
+ self.layerContents = {DEFAULT_LAYER_NAME: DEFAULT_GLYPHS_DIRNAME}
+ # write the new metainfo
+ self._writeMetaInfo()
+
+ # properties
+
+ def _get_fileCreator(self):
+ return self._fileCreator
+
+ fileCreator = property(
+ _get_fileCreator,
+ doc="The file creator of the UFO. This is set into metainfo.plist during __init__.",
+ )
+
+ # support methods for file system interaction
+
+ def copyFromReader(self, reader, sourcePath, destPath):
+ """
+ Copy the sourcePath in the provided UFOReader to destPath
+ in this writer. The paths must be relative. This works with
+ both individual files and directories.
+ """
+ if not isinstance(reader, UFOReader):
+ raise UFOLibError("The reader must be an instance of UFOReader.")
+ sourcePath = fsdecode(sourcePath)
+ destPath = fsdecode(destPath)
+ if not reader.fs.exists(sourcePath):
+ raise UFOLibError(
+ 'The reader does not have data located at "%s".' % sourcePath
+ )
+ if self.fs.exists(destPath):
+ raise UFOLibError('A file named "%s" already exists.' % destPath)
+ # create the destination directory if it doesn't exist
+ self.fs.makedirs(fs.path.dirname(destPath), recreate=True)
+ if reader.fs.isdir(sourcePath):
+ fs.copy.copy_dir(reader.fs, sourcePath, self.fs, destPath)
+ else:
+ fs.copy.copy_file(reader.fs, sourcePath, self.fs, destPath)
+
+ def writeBytesToPath(self, path, data):
+ """
+ Write bytes to a path relative to the UFO filesystem's root.
+ If writing to an existing UFO, check to see if data matches the data
+ that is already in the file at path; if so, the file is not rewritten
+ so that the modification date is preserved.
+ If needed, the directory tree for the given path will be built.
+ """
+ path = fsdecode(path)
+ if self._havePreviousFile:
+ if self.fs.isfile(path) and data == self.fs.readbytes(path):
+ return
+ try:
+ self.fs.writebytes(path, data)
+ except fs.errors.FileExpected:
+ raise UFOLibError("A directory exists at '%s'" % path)
+ except fs.errors.ResourceNotFound:
+ self.fs.makedirs(fs.path.dirname(path), recreate=True)
+ self.fs.writebytes(path, data)
+
+ def getFileObjectForPath(self, path, mode="w", encoding=None):
+ """
+ Returns a file (or file-like) object for the
+ file at the given path. The path must be relative
+ to the UFO path. Returns None if the file does
+ not exist and the mode is "r" or "rb.
+ An encoding may be passed if the file is opened in text mode.
+
+ Note: The caller is responsible for closing the open file.
+ """
+ path = fsdecode(path)
+ try:
+ return self.fs.open(path, mode=mode, encoding=encoding)
+ except fs.errors.ResourceNotFound as e:
+ m = mode[0]
+ if m == "r":
+ # XXX I think we should just let it raise. The docstring,
+ # however, says that this returns None if mode is 'r'
+ return None
+ elif m == "w" or m == "a" or m == "x":
+ self.fs.makedirs(fs.path.dirname(path), recreate=True)
+ return self.fs.open(path, mode=mode, encoding=encoding)
+ except fs.errors.ResourceError as e:
+ return UFOLibError(f"unable to open '{path}' on {self.fs}: {e}")
+
+ def removePath(self, path, force=False, removeEmptyParents=True):
+ """
+ Remove the file (or directory) at path. The path
+ must be relative to the UFO.
+ Raises UFOLibError if the path doesn't exist.
+ If force=True, ignore non-existent paths.
+ If the directory where 'path' is located becomes empty, it will
+ be automatically removed, unless 'removeEmptyParents' is False.
+ """
+ path = fsdecode(path)
+ try:
+ self.fs.remove(path)
+ except fs.errors.FileExpected:
+ self.fs.removetree(path)
+ except fs.errors.ResourceNotFound:
+ if not force:
+ raise UFOLibError(f"'{path}' does not exist on {self.fs}")
+ if removeEmptyParents:
+ parent = fs.path.dirname(path)
+ if parent:
+ fs.tools.remove_empty(self.fs, parent)
+
+ # alias kept for backward compatibility with old API
+ removeFileForPath = removePath
+
+ # UFO mod time
+
+ def setModificationTime(self):
+ """
+ Set the UFO modification time to the current time.
+ This is never called automatically. It is up to the
+ caller to call this when finished working on the UFO.
+ """
+ path = self._path
+ if path is not None and os.path.exists(path):
+ try:
+ # this may fail on some filesystems (e.g. SMB servers)
+ os.utime(path, None)
+ except OSError as e:
+ logger.warning("Failed to set modified time: %s", e)
+
+ # metainfo.plist
+
+ def _writeMetaInfo(self):
+ metaInfo = dict(
+ creator=self._fileCreator,
+ formatVersion=self._formatVersion.major,
+ )
+ if self._formatVersion.minor != 0:
+ metaInfo["formatVersionMinor"] = self._formatVersion.minor
+ self._writePlist(METAINFO_FILENAME, metaInfo)
+
+ # groups.plist
+
+ def setKerningGroupConversionRenameMaps(self, maps):
+ """
+ Set maps defining the renaming that should be done
+ when writing groups and kerning in UFO 1 and UFO 2.
+ This will effectively undo the conversion done when
+ UFOReader reads this data. The dictionary should have
+ this form::
+
+ {
+ "side1" : {"group name to use when writing" : "group name in data"},
+ "side2" : {"group name to use when writing" : "group name in data"}
+ }
+
+ This is the same form returned by UFOReader's
+ getKerningGroupConversionRenameMaps method.
+ """
+ if self._formatVersion >= UFOFormatVersion.FORMAT_3_0:
+ return # XXX raise an error here
+ # flip the dictionaries
+ remap = {}
+ for side in ("side1", "side2"):
+ for writeName, dataName in list(maps[side].items()):
+ remap[dataName] = writeName
+ self._downConversionKerningData = dict(groupRenameMap=remap)
+
+ def writeGroups(self, groups, validate=None):
+ """
+ Write groups.plist. This method requires a
+ dict of glyph groups as an argument.
+
+ ``validate`` will validate the data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ # validate the data structure
+ if validate:
+ valid, message = groupsValidator(groups)
+ if not valid:
+ raise UFOLibError(message)
+ # down convert
+ if (
+ self._formatVersion < UFOFormatVersion.FORMAT_3_0
+ and self._downConversionKerningData is not None
+ ):
+ remap = self._downConversionKerningData["groupRenameMap"]
+ remappedGroups = {}
+ # there are some edge cases here that are ignored:
+ # 1. if a group is being renamed to a name that
+ # already exists, the existing group is always
+ # overwritten. (this is why there are two loops
+ # below.) there doesn't seem to be a logical
+ # solution to groups mismatching and overwriting
+ # with the specifiecd group seems like a better
+ # solution than throwing an error.
+ # 2. if side 1 and side 2 groups are being renamed
+ # to the same group name there is no check to
+ # ensure that the contents are identical. that
+ # is left up to the caller.
+ for name, contents in list(groups.items()):
+ if name in remap:
+ continue
+ remappedGroups[name] = contents
+ for name, contents in list(groups.items()):
+ if name not in remap:
+ continue
+ name = remap[name]
+ remappedGroups[name] = contents
+ groups = remappedGroups
+ # pack and write
+ groupsNew = {}
+ for key, value in groups.items():
+ groupsNew[key] = list(value)
+ if groupsNew:
+ self._writePlist(GROUPS_FILENAME, groupsNew)
+ elif self._havePreviousFile:
+ self.removePath(GROUPS_FILENAME, force=True, removeEmptyParents=False)
+
+ # fontinfo.plist
+
+ def writeInfo(self, info, validate=None):
+ """
+ Write info.plist. This method requires an object
+ that supports getting attributes that follow the
+ fontinfo.plist version 2 specification. Attributes
+ will be taken from the given object and written
+ into the file.
+
+ ``validate`` will validate the data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ # gather version 3 data
+ infoData = {}
+ for attr in list(fontInfoAttributesVersion3ValueData.keys()):
+ if hasattr(info, attr):
+ try:
+ value = getattr(info, attr)
+ except AttributeError:
+ raise UFOLibError(
+ "The supplied info object does not support getting a necessary attribute (%s)."
+ % attr
+ )
+ if value is None:
+ continue
+ infoData[attr] = value
+ # down convert data if necessary and validate
+ if self._formatVersion == UFOFormatVersion.FORMAT_3_0:
+ if validate:
+ infoData = validateInfoVersion3Data(infoData)
+ elif self._formatVersion == UFOFormatVersion.FORMAT_2_0:
+ infoData = _convertFontInfoDataVersion3ToVersion2(infoData)
+ if validate:
+ infoData = validateInfoVersion2Data(infoData)
+ elif self._formatVersion == UFOFormatVersion.FORMAT_1_0:
+ infoData = _convertFontInfoDataVersion3ToVersion2(infoData)
+ if validate:
+ infoData = validateInfoVersion2Data(infoData)
+ infoData = _convertFontInfoDataVersion2ToVersion1(infoData)
+ # write file if there is anything to write
+ if infoData:
+ self._writePlist(FONTINFO_FILENAME, infoData)
+
+ # kerning.plist
+
+ def writeKerning(self, kerning, validate=None):
+ """
+ Write kerning.plist. This method requires a
+ dict of kerning pairs as an argument.
+
+ This performs basic structural validation of the kerning,
+ but it does not check for compliance with the spec in
+ regards to conflicting pairs. The assumption is that the
+ kerning data being passed is standards compliant.
+
+ ``validate`` will validate the data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ # validate the data structure
+ if validate:
+ invalidFormatMessage = "The kerning is not properly formatted."
+ if not isDictEnough(kerning):
+ raise UFOLibError(invalidFormatMessage)
+ for pair, value in list(kerning.items()):
+ if not isinstance(pair, (list, tuple)):
+ raise UFOLibError(invalidFormatMessage)
+ if not len(pair) == 2:
+ raise UFOLibError(invalidFormatMessage)
+ if not isinstance(pair[0], str):
+ raise UFOLibError(invalidFormatMessage)
+ if not isinstance(pair[1], str):
+ raise UFOLibError(invalidFormatMessage)
+ if not isinstance(value, numberTypes):
+ raise UFOLibError(invalidFormatMessage)
+ # down convert
+ if (
+ self._formatVersion < UFOFormatVersion.FORMAT_3_0
+ and self._downConversionKerningData is not None
+ ):
+ remap = self._downConversionKerningData["groupRenameMap"]
+ remappedKerning = {}
+ for (side1, side2), value in list(kerning.items()):
+ side1 = remap.get(side1, side1)
+ side2 = remap.get(side2, side2)
+ remappedKerning[side1, side2] = value
+ kerning = remappedKerning
+ # pack and write
+ kerningDict = {}
+ for left, right in kerning.keys():
+ value = kerning[left, right]
+ if left not in kerningDict:
+ kerningDict[left] = {}
+ kerningDict[left][right] = value
+ if kerningDict:
+ self._writePlist(KERNING_FILENAME, kerningDict)
+ elif self._havePreviousFile:
+ self.removePath(KERNING_FILENAME, force=True, removeEmptyParents=False)
+
+ # lib.plist
+
+ def writeLib(self, libDict, validate=None):
+ """
+ Write lib.plist. This method requires a
+ lib dict as an argument.
+
+ ``validate`` will validate the data, by default it is set to the
+ class's validate value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validate
+ if validate:
+ valid, message = fontLibValidator(libDict)
+ if not valid:
+ raise UFOLibError(message)
+ if libDict:
+ self._writePlist(LIB_FILENAME, libDict)
+ elif self._havePreviousFile:
+ self.removePath(LIB_FILENAME, force=True, removeEmptyParents=False)
+
+ # features.fea
+
+ def writeFeatures(self, features, validate=None):
+ """
+ Write features.fea. This method requires a
+ features string as an argument.
+ """
+ if validate is None:
+ validate = self._validate
+ if self._formatVersion == UFOFormatVersion.FORMAT_1_0:
+ raise UFOLibError("features.fea is not allowed in UFO Format Version 1.")
+ if validate:
+ if not isinstance(features, str):
+ raise UFOLibError("The features are not text.")
+ if features:
+ self.writeBytesToPath(FEATURES_FILENAME, features.encode("utf8"))
+ elif self._havePreviousFile:
+ self.removePath(FEATURES_FILENAME, force=True, removeEmptyParents=False)
+
+ # glyph sets & layers
+
+ def writeLayerContents(self, layerOrder=None, validate=None):
+ """
+ Write the layercontents.plist file. This method *must* be called
+ after all glyph sets have been written.
+ """
+ if validate is None:
+ validate = self._validate
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ return
+ if layerOrder is not None:
+ newOrder = []
+ for layerName in layerOrder:
+ if layerName is None:
+ layerName = DEFAULT_LAYER_NAME
+ newOrder.append(layerName)
+ layerOrder = newOrder
+ else:
+ layerOrder = list(self.layerContents.keys())
+ if validate and set(layerOrder) != set(self.layerContents.keys()):
+ raise UFOLibError(
+ "The layer order content does not match the glyph sets that have been created."
+ )
+ layerContents = [
+ (layerName, self.layerContents[layerName]) for layerName in layerOrder
+ ]
+ self._writePlist(LAYERCONTENTS_FILENAME, layerContents)
+
+ def _findDirectoryForLayerName(self, layerName):
+ foundDirectory = None
+ for existingLayerName, directoryName in list(self.layerContents.items()):
+ if layerName is None and directoryName == DEFAULT_GLYPHS_DIRNAME:
+ foundDirectory = directoryName
+ break
+ elif existingLayerName == layerName:
+ foundDirectory = directoryName
+ break
+ if not foundDirectory:
+ raise UFOLibError(
+ "Could not locate a glyph set directory for the layer named %s."
+ % layerName
+ )
+ return foundDirectory
+
+ def getGlyphSet(
+ self,
+ layerName=None,
+ defaultLayer=True,
+ glyphNameToFileNameFunc=None,
+ validateRead=None,
+ validateWrite=None,
+ expectContentsFile=False,
+ ):
+ """
+ Return the GlyphSet object associated with the
+ appropriate glyph directory in the .ufo.
+ If layerName is None, the default glyph set
+ will be used. The defaultLayer flag indictes
+ that the layer should be saved into the default
+ glyphs directory.
+
+ ``validateRead`` will validate the read data, by default it is set to the
+ class's validate value, can be overridden.
+ ``validateWrte`` will validate the written data, by default it is set to the
+ class's validate value, can be overridden.
+ ``expectContentsFile`` will raise a GlifLibError if a contents.plist file is
+ not found on the glyph set file system. This should be set to ``True`` if you
+ are reading an existing UFO and ``False`` if you use ``getGlyphSet`` to create
+ a fresh glyph set.
+ """
+ if validateRead is None:
+ validateRead = self._validate
+ if validateWrite is None:
+ validateWrite = self._validate
+ # only default can be written in < 3
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0 and (
+ not defaultLayer or layerName is not None
+ ):
+ raise UFOLibError(
+ f"Only the default layer can be writen in UFO {self._formatVersion.major}."
+ )
+ # locate a layer name when None has been given
+ if layerName is None and defaultLayer:
+ for existingLayerName, directory in self.layerContents.items():
+ if directory == DEFAULT_GLYPHS_DIRNAME:
+ layerName = existingLayerName
+ if layerName is None:
+ layerName = DEFAULT_LAYER_NAME
+ elif layerName is None and not defaultLayer:
+ raise UFOLibError("A layer name must be provided for non-default layers.")
+ # move along to format specific writing
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ return self._getDefaultGlyphSet(
+ validateRead,
+ validateWrite,
+ glyphNameToFileNameFunc=glyphNameToFileNameFunc,
+ expectContentsFile=expectContentsFile,
+ )
+ elif self._formatVersion.major == UFOFormatVersion.FORMAT_3_0.major:
+ return self._getGlyphSetFormatVersion3(
+ validateRead,
+ validateWrite,
+ layerName=layerName,
+ defaultLayer=defaultLayer,
+ glyphNameToFileNameFunc=glyphNameToFileNameFunc,
+ expectContentsFile=expectContentsFile,
+ )
+ else:
+ raise NotImplementedError(self._formatVersion)
+
+ def _getDefaultGlyphSet(
+ self,
+ validateRead,
+ validateWrite,
+ glyphNameToFileNameFunc=None,
+ expectContentsFile=False,
+ ):
+ from fontTools.ufoLib.glifLib import GlyphSet
+
+ glyphSubFS = self.fs.makedir(DEFAULT_GLYPHS_DIRNAME, recreate=True)
+ return GlyphSet(
+ glyphSubFS,
+ glyphNameToFileNameFunc=glyphNameToFileNameFunc,
+ ufoFormatVersion=self._formatVersion,
+ validateRead=validateRead,
+ validateWrite=validateWrite,
+ expectContentsFile=expectContentsFile,
+ )
+
+ def _getGlyphSetFormatVersion3(
+ self,
+ validateRead,
+ validateWrite,
+ layerName=None,
+ defaultLayer=True,
+ glyphNameToFileNameFunc=None,
+ expectContentsFile=False,
+ ):
+ from fontTools.ufoLib.glifLib import GlyphSet
+
+ # if the default flag is on, make sure that the default in the file
+ # matches the default being written. also make sure that this layer
+ # name is not already linked to a non-default layer.
+ if defaultLayer:
+ for existingLayerName, directory in self.layerContents.items():
+ if directory == DEFAULT_GLYPHS_DIRNAME:
+ if existingLayerName != layerName:
+ raise UFOLibError(
+ "Another layer ('%s') is already mapped to the default directory."
+ % existingLayerName
+ )
+ elif existingLayerName == layerName:
+ raise UFOLibError(
+ "The layer name is already mapped to a non-default layer."
+ )
+ # get an existing directory name
+ if layerName in self.layerContents:
+ directory = self.layerContents[layerName]
+ # get a new directory name
+ else:
+ if defaultLayer:
+ directory = DEFAULT_GLYPHS_DIRNAME
+ else:
+ # not caching this could be slightly expensive,
+ # but caching it will be cumbersome
+ existing = {d.lower() for d in self.layerContents.values()}
+ directory = userNameToFileName(
+ layerName, existing=existing, prefix="glyphs."
+ )
+ # make the directory
+ glyphSubFS = self.fs.makedir(directory, recreate=True)
+ # store the mapping
+ self.layerContents[layerName] = directory
+ # load the glyph set
+ return GlyphSet(
+ glyphSubFS,
+ glyphNameToFileNameFunc=glyphNameToFileNameFunc,
+ ufoFormatVersion=self._formatVersion,
+ validateRead=validateRead,
+ validateWrite=validateWrite,
+ expectContentsFile=expectContentsFile,
+ )
+
+ def renameGlyphSet(self, layerName, newLayerName, defaultLayer=False):
+ """
+ Rename a glyph set.
+
+ Note: if a GlyphSet object has already been retrieved for
+ layerName, it is up to the caller to inform that object that
+ the directory it represents has changed.
+ """
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ # ignore renaming glyph sets for UFO1 UFO2
+ # just write the data from the default layer
+ return
+ # the new and old names can be the same
+ # as long as the default is being switched
+ if layerName == newLayerName:
+ # if the default is off and the layer is already not the default, skip
+ if (
+ self.layerContents[layerName] != DEFAULT_GLYPHS_DIRNAME
+ and not defaultLayer
+ ):
+ return
+ # if the default is on and the layer is already the default, skip
+ if self.layerContents[layerName] == DEFAULT_GLYPHS_DIRNAME and defaultLayer:
+ return
+ else:
+ # make sure the new layer name doesn't already exist
+ if newLayerName is None:
+ newLayerName = DEFAULT_LAYER_NAME
+ if newLayerName in self.layerContents:
+ raise UFOLibError("A layer named %s already exists." % newLayerName)
+ # make sure the default layer doesn't already exist
+ if defaultLayer and DEFAULT_GLYPHS_DIRNAME in self.layerContents.values():
+ raise UFOLibError("A default layer already exists.")
+ # get the paths
+ oldDirectory = self._findDirectoryForLayerName(layerName)
+ if defaultLayer:
+ newDirectory = DEFAULT_GLYPHS_DIRNAME
+ else:
+ existing = {name.lower() for name in self.layerContents.values()}
+ newDirectory = userNameToFileName(
+ newLayerName, existing=existing, prefix="glyphs."
+ )
+ # update the internal mapping
+ del self.layerContents[layerName]
+ self.layerContents[newLayerName] = newDirectory
+ # do the file system copy
+ self.fs.movedir(oldDirectory, newDirectory, create=True)
+
+ def deleteGlyphSet(self, layerName):
+ """
+ Remove the glyph set matching layerName.
+ """
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ # ignore deleting glyph sets for UFO1 UFO2 as there are no layers
+ # just write the data from the default layer
+ return
+ foundDirectory = self._findDirectoryForLayerName(layerName)
+ self.removePath(foundDirectory, removeEmptyParents=False)
+ del self.layerContents[layerName]
+
+ def writeData(self, fileName, data):
+ """
+ Write data to fileName in the 'data' directory.
+ The data must be a bytes string.
+ """
+ self.writeBytesToPath(f"{DATA_DIRNAME}/{fsdecode(fileName)}", data)
+
+ def removeData(self, fileName):
+ """
+ Remove the file named fileName from the data directory.
+ """
+ self.removePath(f"{DATA_DIRNAME}/{fsdecode(fileName)}")
+
+ # /images
+
+ def writeImage(self, fileName, data, validate=None):
+ """
+ Write data to fileName in the images directory.
+ The data must be a valid PNG.
+ """
+ if validate is None:
+ validate = self._validate
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ raise UFOLibError(
+ f"Images are not allowed in UFO {self._formatVersion.major}."
+ )
+ fileName = fsdecode(fileName)
+ if validate:
+ valid, error = pngValidator(data=data)
+ if not valid:
+ raise UFOLibError(error)
+ self.writeBytesToPath(f"{IMAGES_DIRNAME}/{fileName}", data)
+
+ def removeImage(self, fileName, validate=None): # XXX remove unused 'validate'?
+ """
+ Remove the file named fileName from the
+ images directory.
+ """
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ raise UFOLibError(
+ f"Images are not allowed in UFO {self._formatVersion.major}."
+ )
+ self.removePath(f"{IMAGES_DIRNAME}/{fsdecode(fileName)}")
+
+ def copyImageFromReader(self, reader, sourceFileName, destFileName, validate=None):
+ """
+ Copy the sourceFileName in the provided UFOReader to destFileName
+ in this writer. This uses the most memory efficient method possible
+ for copying the data possible.
+ """
+ if validate is None:
+ validate = self._validate
+ if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
+ raise UFOLibError(
+ f"Images are not allowed in UFO {self._formatVersion.major}."
+ )
+ sourcePath = f"{IMAGES_DIRNAME}/{fsdecode(sourceFileName)}"
+ destPath = f"{IMAGES_DIRNAME}/{fsdecode(destFileName)}"
+ self.copyFromReader(reader, sourcePath, destPath)
+
+ def close(self):
+ if self._havePreviousFile and self._fileStructure is UFOFileStructure.ZIP:
+ # if we are updating an existing zip file, we can now compress the
+ # contents of the temporary filesystem in the destination path
+ rootDir = os.path.splitext(os.path.basename(self._path))[0] + ".ufo"
+ with fs.zipfs.ZipFS(self._path, write=True, encoding="utf-8") as destFS:
+ fs.copy.copy_fs(self.fs, destFS.makedir(rootDir))
+ super().close()
# just an alias, makes it more explicit
@@ -1699,38 +1733,39 @@ UFOReaderWriter = UFOWriter
def _sniffFileStructure(ufo_path):
- """Return UFOFileStructure.ZIP if the UFO at path 'ufo_path' (str)
- is a zip file, else return UFOFileStructure.PACKAGE if 'ufo_path' is a
- directory.
- Raise UFOLibError if it is a file with unknown structure, or if the path
- does not exist.
- """
- if zipfile.is_zipfile(ufo_path):
- return UFOFileStructure.ZIP
- elif os.path.isdir(ufo_path):
- return UFOFileStructure.PACKAGE
- elif os.path.isfile(ufo_path):
- raise UFOLibError(
- "The specified UFO does not have a known structure: '%s'" % ufo_path
- )
- else:
- raise UFOLibError("No such file or directory: '%s'" % ufo_path)
+ """Return UFOFileStructure.ZIP if the UFO at path 'ufo_path' (str)
+ is a zip file, else return UFOFileStructure.PACKAGE if 'ufo_path' is a
+ directory.
+ Raise UFOLibError if it is a file with unknown structure, or if the path
+ does not exist.
+ """
+ if zipfile.is_zipfile(ufo_path):
+ return UFOFileStructure.ZIP
+ elif os.path.isdir(ufo_path):
+ return UFOFileStructure.PACKAGE
+ elif os.path.isfile(ufo_path):
+ raise UFOLibError(
+ "The specified UFO does not have a known structure: '%s'" % ufo_path
+ )
+ else:
+ raise UFOLibError("No such file or directory: '%s'" % ufo_path)
def makeUFOPath(path):
- """
- Return a .ufo pathname.
-
- >>> makeUFOPath("directory/something.ext") == (
- ... os.path.join('directory', 'something.ufo'))
- True
- >>> makeUFOPath("directory/something.another.thing.ext") == (
- ... os.path.join('directory', 'something.another.thing.ufo'))
- True
- """
- dir, name = os.path.split(path)
- name = ".".join([".".join(name.split(".")[:-1]), "ufo"])
- return os.path.join(dir, name)
+ """
+ Return a .ufo pathname.
+
+ >>> makeUFOPath("directory/something.ext") == (
+ ... os.path.join('directory', 'something.ufo'))
+ True
+ >>> makeUFOPath("directory/something.another.thing.ext") == (
+ ... os.path.join('directory', 'something.another.thing.ufo'))
+ True
+ """
+ dir, name = os.path.split(path)
+ name = ".".join([".".join(name.split(".")[:-1]), "ufo"])
+ return os.path.join(dir, name)
+
# ----------------------
# fontinfo.plist Support
@@ -1742,93 +1777,98 @@ def makeUFOPath(path):
# The version 1 spec was very loose and there were numerous
# cases of invalid values.
+
def validateFontInfoVersion2ValueForAttribute(attr, value):
- """
- This performs very basic validation of the value for attribute
- following the UFO 2 fontinfo.plist specification. The results
- of this should not be interpretted as *correct* for the font
- that they are part of. This merely indicates that the value
- is of the proper type and, where the specification defines
- a set range of possible values for an attribute, that the
- value is in the accepted range.
- """
- dataValidationDict = fontInfoAttributesVersion2ValueData[attr]
- valueType = dataValidationDict.get("type")
- validator = dataValidationDict.get("valueValidator")
- valueOptions = dataValidationDict.get("valueOptions")
- # have specific options for the validator
- if valueOptions is not None:
- isValidValue = validator(value, valueOptions)
- # no specific options
- else:
- if validator == genericTypeValidator:
- isValidValue = validator(value, valueType)
- else:
- isValidValue = validator(value)
- return isValidValue
+ """
+ This performs very basic validation of the value for attribute
+ following the UFO 2 fontinfo.plist specification. The results
+ of this should not be interpretted as *correct* for the font
+ that they are part of. This merely indicates that the value
+ is of the proper type and, where the specification defines
+ a set range of possible values for an attribute, that the
+ value is in the accepted range.
+ """
+ dataValidationDict = fontInfoAttributesVersion2ValueData[attr]
+ valueType = dataValidationDict.get("type")
+ validator = dataValidationDict.get("valueValidator")
+ valueOptions = dataValidationDict.get("valueOptions")
+ # have specific options for the validator
+ if valueOptions is not None:
+ isValidValue = validator(value, valueOptions)
+ # no specific options
+ else:
+ if validator == genericTypeValidator:
+ isValidValue = validator(value, valueType)
+ else:
+ isValidValue = validator(value)
+ return isValidValue
+
def validateInfoVersion2Data(infoData):
- """
- This performs very basic validation of the value for infoData
- following the UFO 2 fontinfo.plist specification. The results
- of this should not be interpretted as *correct* for the font
- that they are part of. This merely indicates that the values
- are of the proper type and, where the specification defines
- a set range of possible values for an attribute, that the
- value is in the accepted range.
- """
- validInfoData = {}
- for attr, value in list(infoData.items()):
- isValidValue = validateFontInfoVersion2ValueForAttribute(attr, value)
- if not isValidValue:
- raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).")
- else:
- validInfoData[attr] = value
- return validInfoData
+ """
+ This performs very basic validation of the value for infoData
+ following the UFO 2 fontinfo.plist specification. The results
+ of this should not be interpretted as *correct* for the font
+ that they are part of. This merely indicates that the values
+ are of the proper type and, where the specification defines
+ a set range of possible values for an attribute, that the
+ value is in the accepted range.
+ """
+ validInfoData = {}
+ for attr, value in list(infoData.items()):
+ isValidValue = validateFontInfoVersion2ValueForAttribute(attr, value)
+ if not isValidValue:
+ raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).")
+ else:
+ validInfoData[attr] = value
+ return validInfoData
+
def validateFontInfoVersion3ValueForAttribute(attr, value):
- """
- This performs very basic validation of the value for attribute
- following the UFO 3 fontinfo.plist specification. The results
- of this should not be interpretted as *correct* for the font
- that they are part of. This merely indicates that the value
- is of the proper type and, where the specification defines
- a set range of possible values for an attribute, that the
- value is in the accepted range.
- """
- dataValidationDict = fontInfoAttributesVersion3ValueData[attr]
- valueType = dataValidationDict.get("type")
- validator = dataValidationDict.get("valueValidator")
- valueOptions = dataValidationDict.get("valueOptions")
- # have specific options for the validator
- if valueOptions is not None:
- isValidValue = validator(value, valueOptions)
- # no specific options
- else:
- if validator == genericTypeValidator:
- isValidValue = validator(value, valueType)
- else:
- isValidValue = validator(value)
- return isValidValue
+ """
+ This performs very basic validation of the value for attribute
+ following the UFO 3 fontinfo.plist specification. The results
+ of this should not be interpretted as *correct* for the font
+ that they are part of. This merely indicates that the value
+ is of the proper type and, where the specification defines
+ a set range of possible values for an attribute, that the
+ value is in the accepted range.
+ """
+ dataValidationDict = fontInfoAttributesVersion3ValueData[attr]
+ valueType = dataValidationDict.get("type")
+ validator = dataValidationDict.get("valueValidator")
+ valueOptions = dataValidationDict.get("valueOptions")
+ # have specific options for the validator
+ if valueOptions is not None:
+ isValidValue = validator(value, valueOptions)
+ # no specific options
+ else:
+ if validator == genericTypeValidator:
+ isValidValue = validator(value, valueType)
+ else:
+ isValidValue = validator(value)
+ return isValidValue
+
def validateInfoVersion3Data(infoData):
- """
- This performs very basic validation of the value for infoData
- following the UFO 3 fontinfo.plist specification. The results
- of this should not be interpretted as *correct* for the font
- that they are part of. This merely indicates that the values
- are of the proper type and, where the specification defines
- a set range of possible values for an attribute, that the
- value is in the accepted range.
- """
- validInfoData = {}
- for attr, value in list(infoData.items()):
- isValidValue = validateFontInfoVersion3ValueForAttribute(attr, value)
- if not isValidValue:
- raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).")
- else:
- validInfoData[attr] = value
- return validInfoData
+ """
+ This performs very basic validation of the value for infoData
+ following the UFO 3 fontinfo.plist specification. The results
+ of this should not be interpretted as *correct* for the font
+ that they are part of. This merely indicates that the values
+ are of the proper type and, where the specification defines
+ a set range of possible values for an attribute, that the
+ value is in the accepted range.
+ """
+ validInfoData = {}
+ for attr, value in list(infoData.items()):
+ isValidValue = validateFontInfoVersion3ValueForAttribute(attr, value)
+ if not isValidValue:
+ raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).")
+ else:
+ validInfoData[attr] = value
+ return validInfoData
+
# Value Options
@@ -1844,264 +1884,346 @@ fontInfoOpenTypeOS2TypeOptions = [0, 1, 2, 3, 8, 9]
# fontinfo.plist.
fontInfoAttributesVersion1 = {
- "familyName",
- "styleName",
- "fullName",
- "fontName",
- "menuName",
- "fontStyle",
- "note",
- "versionMajor",
- "versionMinor",
- "year",
- "copyright",
- "notice",
- "trademark",
- "license",
- "licenseURL",
- "createdBy",
- "designer",
- "designerURL",
- "vendorURL",
- "unitsPerEm",
- "ascender",
- "descender",
- "capHeight",
- "xHeight",
- "defaultWidth",
- "slantAngle",
- "italicAngle",
- "widthName",
- "weightName",
- "weightValue",
- "fondName",
- "otFamilyName",
- "otStyleName",
- "otMacName",
- "msCharSet",
- "fondID",
- "uniqueID",
- "ttVendor",
- "ttUniqueID",
- "ttVersion",
+ "familyName",
+ "styleName",
+ "fullName",
+ "fontName",
+ "menuName",
+ "fontStyle",
+ "note",
+ "versionMajor",
+ "versionMinor",
+ "year",
+ "copyright",
+ "notice",
+ "trademark",
+ "license",
+ "licenseURL",
+ "createdBy",
+ "designer",
+ "designerURL",
+ "vendorURL",
+ "unitsPerEm",
+ "ascender",
+ "descender",
+ "capHeight",
+ "xHeight",
+ "defaultWidth",
+ "slantAngle",
+ "italicAngle",
+ "widthName",
+ "weightName",
+ "weightValue",
+ "fondName",
+ "otFamilyName",
+ "otStyleName",
+ "otMacName",
+ "msCharSet",
+ "fondID",
+ "uniqueID",
+ "ttVendor",
+ "ttUniqueID",
+ "ttVersion",
}
fontInfoAttributesVersion2ValueData = {
- "familyName" : dict(type=str),
- "styleName" : dict(type=str),
- "styleMapFamilyName" : dict(type=str),
- "styleMapStyleName" : dict(type=str, valueValidator=fontInfoStyleMapStyleNameValidator),
- "versionMajor" : dict(type=int),
- "versionMinor" : dict(type=int),
- "year" : dict(type=int),
- "copyright" : dict(type=str),
- "trademark" : dict(type=str),
- "unitsPerEm" : dict(type=(int, float)),
- "descender" : dict(type=(int, float)),
- "xHeight" : dict(type=(int, float)),
- "capHeight" : dict(type=(int, float)),
- "ascender" : dict(type=(int, float)),
- "italicAngle" : dict(type=(float, int)),
- "note" : dict(type=str),
- "openTypeHeadCreated" : dict(type=str, valueValidator=fontInfoOpenTypeHeadCreatedValidator),
- "openTypeHeadLowestRecPPEM" : dict(type=(int, float)),
- "openTypeHeadFlags" : dict(type="integerList", valueValidator=genericIntListValidator, valueOptions=fontInfoOpenTypeHeadFlagsOptions),
- "openTypeHheaAscender" : dict(type=(int, float)),
- "openTypeHheaDescender" : dict(type=(int, float)),
- "openTypeHheaLineGap" : dict(type=(int, float)),
- "openTypeHheaCaretSlopeRise" : dict(type=int),
- "openTypeHheaCaretSlopeRun" : dict(type=int),
- "openTypeHheaCaretOffset" : dict(type=(int, float)),
- "openTypeNameDesigner" : dict(type=str),
- "openTypeNameDesignerURL" : dict(type=str),
- "openTypeNameManufacturer" : dict(type=str),
- "openTypeNameManufacturerURL" : dict(type=str),
- "openTypeNameLicense" : dict(type=str),
- "openTypeNameLicenseURL" : dict(type=str),
- "openTypeNameVersion" : dict(type=str),
- "openTypeNameUniqueID" : dict(type=str),
- "openTypeNameDescription" : dict(type=str),
- "openTypeNamePreferredFamilyName" : dict(type=str),
- "openTypeNamePreferredSubfamilyName" : dict(type=str),
- "openTypeNameCompatibleFullName" : dict(type=str),
- "openTypeNameSampleText" : dict(type=str),
- "openTypeNameWWSFamilyName" : dict(type=str),
- "openTypeNameWWSSubfamilyName" : dict(type=str),
- "openTypeOS2WidthClass" : dict(type=int, valueValidator=fontInfoOpenTypeOS2WidthClassValidator),
- "openTypeOS2WeightClass" : dict(type=int, valueValidator=fontInfoOpenTypeOS2WeightClassValidator),
- "openTypeOS2Selection" : dict(type="integerList", valueValidator=genericIntListValidator, valueOptions=fontInfoOpenTypeOS2SelectionOptions),
- "openTypeOS2VendorID" : dict(type=str),
- "openTypeOS2Panose" : dict(type="integerList", valueValidator=fontInfoVersion2OpenTypeOS2PanoseValidator),
- "openTypeOS2FamilyClass" : dict(type="integerList", valueValidator=fontInfoOpenTypeOS2FamilyClassValidator),
- "openTypeOS2UnicodeRanges" : dict(type="integerList", valueValidator=genericIntListValidator, valueOptions=fontInfoOpenTypeOS2UnicodeRangesOptions),
- "openTypeOS2CodePageRanges" : dict(type="integerList", valueValidator=genericIntListValidator, valueOptions=fontInfoOpenTypeOS2CodePageRangesOptions),
- "openTypeOS2TypoAscender" : dict(type=(int, float)),
- "openTypeOS2TypoDescender" : dict(type=(int, float)),
- "openTypeOS2TypoLineGap" : dict(type=(int, float)),
- "openTypeOS2WinAscent" : dict(type=(int, float)),
- "openTypeOS2WinDescent" : dict(type=(int, float)),
- "openTypeOS2Type" : dict(type="integerList", valueValidator=genericIntListValidator, valueOptions=fontInfoOpenTypeOS2TypeOptions),
- "openTypeOS2SubscriptXSize" : dict(type=(int, float)),
- "openTypeOS2SubscriptYSize" : dict(type=(int, float)),
- "openTypeOS2SubscriptXOffset" : dict(type=(int, float)),
- "openTypeOS2SubscriptYOffset" : dict(type=(int, float)),
- "openTypeOS2SuperscriptXSize" : dict(type=(int, float)),
- "openTypeOS2SuperscriptYSize" : dict(type=(int, float)),
- "openTypeOS2SuperscriptXOffset" : dict(type=(int, float)),
- "openTypeOS2SuperscriptYOffset" : dict(type=(int, float)),
- "openTypeOS2StrikeoutSize" : dict(type=(int, float)),
- "openTypeOS2StrikeoutPosition" : dict(type=(int, float)),
- "openTypeVheaVertTypoAscender" : dict(type=(int, float)),
- "openTypeVheaVertTypoDescender" : dict(type=(int, float)),
- "openTypeVheaVertTypoLineGap" : dict(type=(int, float)),
- "openTypeVheaCaretSlopeRise" : dict(type=int),
- "openTypeVheaCaretSlopeRun" : dict(type=int),
- "openTypeVheaCaretOffset" : dict(type=(int, float)),
- "postscriptFontName" : dict(type=str),
- "postscriptFullName" : dict(type=str),
- "postscriptSlantAngle" : dict(type=(float, int)),
- "postscriptUniqueID" : dict(type=int),
- "postscriptUnderlineThickness" : dict(type=(int, float)),
- "postscriptUnderlinePosition" : dict(type=(int, float)),
- "postscriptIsFixedPitch" : dict(type=bool),
- "postscriptBlueValues" : dict(type="integerList", valueValidator=fontInfoPostscriptBluesValidator),
- "postscriptOtherBlues" : dict(type="integerList", valueValidator=fontInfoPostscriptOtherBluesValidator),
- "postscriptFamilyBlues" : dict(type="integerList", valueValidator=fontInfoPostscriptBluesValidator),
- "postscriptFamilyOtherBlues" : dict(type="integerList", valueValidator=fontInfoPostscriptOtherBluesValidator),
- "postscriptStemSnapH" : dict(type="integerList", valueValidator=fontInfoPostscriptStemsValidator),
- "postscriptStemSnapV" : dict(type="integerList", valueValidator=fontInfoPostscriptStemsValidator),
- "postscriptBlueFuzz" : dict(type=(int, float)),
- "postscriptBlueShift" : dict(type=(int, float)),
- "postscriptBlueScale" : dict(type=(float, int)),
- "postscriptForceBold" : dict(type=bool),
- "postscriptDefaultWidthX" : dict(type=(int, float)),
- "postscriptNominalWidthX" : dict(type=(int, float)),
- "postscriptWeightName" : dict(type=str),
- "postscriptDefaultCharacter" : dict(type=str),
- "postscriptWindowsCharacterSet" : dict(type=int, valueValidator=fontInfoPostscriptWindowsCharacterSetValidator),
- "macintoshFONDFamilyID" : dict(type=int),
- "macintoshFONDName" : dict(type=str),
+ "familyName": dict(type=str),
+ "styleName": dict(type=str),
+ "styleMapFamilyName": dict(type=str),
+ "styleMapStyleName": dict(
+ type=str, valueValidator=fontInfoStyleMapStyleNameValidator
+ ),
+ "versionMajor": dict(type=int),
+ "versionMinor": dict(type=int),
+ "year": dict(type=int),
+ "copyright": dict(type=str),
+ "trademark": dict(type=str),
+ "unitsPerEm": dict(type=(int, float)),
+ "descender": dict(type=(int, float)),
+ "xHeight": dict(type=(int, float)),
+ "capHeight": dict(type=(int, float)),
+ "ascender": dict(type=(int, float)),
+ "italicAngle": dict(type=(float, int)),
+ "note": dict(type=str),
+ "openTypeHeadCreated": dict(
+ type=str, valueValidator=fontInfoOpenTypeHeadCreatedValidator
+ ),
+ "openTypeHeadLowestRecPPEM": dict(type=(int, float)),
+ "openTypeHeadFlags": dict(
+ type="integerList",
+ valueValidator=genericIntListValidator,
+ valueOptions=fontInfoOpenTypeHeadFlagsOptions,
+ ),
+ "openTypeHheaAscender": dict(type=(int, float)),
+ "openTypeHheaDescender": dict(type=(int, float)),
+ "openTypeHheaLineGap": dict(type=(int, float)),
+ "openTypeHheaCaretSlopeRise": dict(type=int),
+ "openTypeHheaCaretSlopeRun": dict(type=int),
+ "openTypeHheaCaretOffset": dict(type=(int, float)),
+ "openTypeNameDesigner": dict(type=str),
+ "openTypeNameDesignerURL": dict(type=str),
+ "openTypeNameManufacturer": dict(type=str),
+ "openTypeNameManufacturerURL": dict(type=str),
+ "openTypeNameLicense": dict(type=str),
+ "openTypeNameLicenseURL": dict(type=str),
+ "openTypeNameVersion": dict(type=str),
+ "openTypeNameUniqueID": dict(type=str),
+ "openTypeNameDescription": dict(type=str),
+ "openTypeNamePreferredFamilyName": dict(type=str),
+ "openTypeNamePreferredSubfamilyName": dict(type=str),
+ "openTypeNameCompatibleFullName": dict(type=str),
+ "openTypeNameSampleText": dict(type=str),
+ "openTypeNameWWSFamilyName": dict(type=str),
+ "openTypeNameWWSSubfamilyName": dict(type=str),
+ "openTypeOS2WidthClass": dict(
+ type=int, valueValidator=fontInfoOpenTypeOS2WidthClassValidator
+ ),
+ "openTypeOS2WeightClass": dict(
+ type=int, valueValidator=fontInfoOpenTypeOS2WeightClassValidator
+ ),
+ "openTypeOS2Selection": dict(
+ type="integerList",
+ valueValidator=genericIntListValidator,
+ valueOptions=fontInfoOpenTypeOS2SelectionOptions,
+ ),
+ "openTypeOS2VendorID": dict(type=str),
+ "openTypeOS2Panose": dict(
+ type="integerList", valueValidator=fontInfoVersion2OpenTypeOS2PanoseValidator
+ ),
+ "openTypeOS2FamilyClass": dict(
+ type="integerList", valueValidator=fontInfoOpenTypeOS2FamilyClassValidator
+ ),
+ "openTypeOS2UnicodeRanges": dict(
+ type="integerList",
+ valueValidator=genericIntListValidator,
+ valueOptions=fontInfoOpenTypeOS2UnicodeRangesOptions,
+ ),
+ "openTypeOS2CodePageRanges": dict(
+ type="integerList",
+ valueValidator=genericIntListValidator,
+ valueOptions=fontInfoOpenTypeOS2CodePageRangesOptions,
+ ),
+ "openTypeOS2TypoAscender": dict(type=(int, float)),
+ "openTypeOS2TypoDescender": dict(type=(int, float)),
+ "openTypeOS2TypoLineGap": dict(type=(int, float)),
+ "openTypeOS2WinAscent": dict(type=(int, float)),
+ "openTypeOS2WinDescent": dict(type=(int, float)),
+ "openTypeOS2Type": dict(
+ type="integerList",
+ valueValidator=genericIntListValidator,
+ valueOptions=fontInfoOpenTypeOS2TypeOptions,
+ ),
+ "openTypeOS2SubscriptXSize": dict(type=(int, float)),
+ "openTypeOS2SubscriptYSize": dict(type=(int, float)),
+ "openTypeOS2SubscriptXOffset": dict(type=(int, float)),
+ "openTypeOS2SubscriptYOffset": dict(type=(int, float)),
+ "openTypeOS2SuperscriptXSize": dict(type=(int, float)),
+ "openTypeOS2SuperscriptYSize": dict(type=(int, float)),
+ "openTypeOS2SuperscriptXOffset": dict(type=(int, float)),
+ "openTypeOS2SuperscriptYOffset": dict(type=(int, float)),
+ "openTypeOS2StrikeoutSize": dict(type=(int, float)),
+ "openTypeOS2StrikeoutPosition": dict(type=(int, float)),
+ "openTypeVheaVertTypoAscender": dict(type=(int, float)),
+ "openTypeVheaVertTypoDescender": dict(type=(int, float)),
+ "openTypeVheaVertTypoLineGap": dict(type=(int, float)),
+ "openTypeVheaCaretSlopeRise": dict(type=int),
+ "openTypeVheaCaretSlopeRun": dict(type=int),
+ "openTypeVheaCaretOffset": dict(type=(int, float)),
+ "postscriptFontName": dict(type=str),
+ "postscriptFullName": dict(type=str),
+ "postscriptSlantAngle": dict(type=(float, int)),
+ "postscriptUniqueID": dict(type=int),
+ "postscriptUnderlineThickness": dict(type=(int, float)),
+ "postscriptUnderlinePosition": dict(type=(int, float)),
+ "postscriptIsFixedPitch": dict(type=bool),
+ "postscriptBlueValues": dict(
+ type="integerList", valueValidator=fontInfoPostscriptBluesValidator
+ ),
+ "postscriptOtherBlues": dict(
+ type="integerList", valueValidator=fontInfoPostscriptOtherBluesValidator
+ ),
+ "postscriptFamilyBlues": dict(
+ type="integerList", valueValidator=fontInfoPostscriptBluesValidator
+ ),
+ "postscriptFamilyOtherBlues": dict(
+ type="integerList", valueValidator=fontInfoPostscriptOtherBluesValidator
+ ),
+ "postscriptStemSnapH": dict(
+ type="integerList", valueValidator=fontInfoPostscriptStemsValidator
+ ),
+ "postscriptStemSnapV": dict(
+ type="integerList", valueValidator=fontInfoPostscriptStemsValidator
+ ),
+ "postscriptBlueFuzz": dict(type=(int, float)),
+ "postscriptBlueShift": dict(type=(int, float)),
+ "postscriptBlueScale": dict(type=(float, int)),
+ "postscriptForceBold": dict(type=bool),
+ "postscriptDefaultWidthX": dict(type=(int, float)),
+ "postscriptNominalWidthX": dict(type=(int, float)),
+ "postscriptWeightName": dict(type=str),
+ "postscriptDefaultCharacter": dict(type=str),
+ "postscriptWindowsCharacterSet": dict(
+ type=int, valueValidator=fontInfoPostscriptWindowsCharacterSetValidator
+ ),
+ "macintoshFONDFamilyID": dict(type=int),
+ "macintoshFONDName": dict(type=str),
}
fontInfoAttributesVersion2 = set(fontInfoAttributesVersion2ValueData.keys())
fontInfoAttributesVersion3ValueData = deepcopy(fontInfoAttributesVersion2ValueData)
-fontInfoAttributesVersion3ValueData.update({
- "versionMinor" : dict(type=int, valueValidator=genericNonNegativeIntValidator),
- "unitsPerEm" : dict(type=(int, float), valueValidator=genericNonNegativeNumberValidator),
- "openTypeHeadLowestRecPPEM" : dict(type=int, valueValidator=genericNonNegativeNumberValidator),
- "openTypeHheaAscender" : dict(type=int),
- "openTypeHheaDescender" : dict(type=int),
- "openTypeHheaLineGap" : dict(type=int),
- "openTypeHheaCaretOffset" : dict(type=int),
- "openTypeOS2Panose" : dict(type="integerList", valueValidator=fontInfoVersion3OpenTypeOS2PanoseValidator),
- "openTypeOS2TypoAscender" : dict(type=int),
- "openTypeOS2TypoDescender" : dict(type=int),
- "openTypeOS2TypoLineGap" : dict(type=int),
- "openTypeOS2WinAscent" : dict(type=int, valueValidator=genericNonNegativeNumberValidator),
- "openTypeOS2WinDescent" : dict(type=int, valueValidator=genericNonNegativeNumberValidator),
- "openTypeOS2SubscriptXSize" : dict(type=int),
- "openTypeOS2SubscriptYSize" : dict(type=int),
- "openTypeOS2SubscriptXOffset" : dict(type=int),
- "openTypeOS2SubscriptYOffset" : dict(type=int),
- "openTypeOS2SuperscriptXSize" : dict(type=int),
- "openTypeOS2SuperscriptYSize" : dict(type=int),
- "openTypeOS2SuperscriptXOffset" : dict(type=int),
- "openTypeOS2SuperscriptYOffset" : dict(type=int),
- "openTypeOS2StrikeoutSize" : dict(type=int),
- "openTypeOS2StrikeoutPosition" : dict(type=int),
- "openTypeGaspRangeRecords" : dict(type="dictList", valueValidator=fontInfoOpenTypeGaspRangeRecordsValidator),
- "openTypeNameRecords" : dict(type="dictList", valueValidator=fontInfoOpenTypeNameRecordsValidator),
- "openTypeVheaVertTypoAscender" : dict(type=int),
- "openTypeVheaVertTypoDescender" : dict(type=int),
- "openTypeVheaVertTypoLineGap" : dict(type=int),
- "openTypeVheaCaretOffset" : dict(type=int),
- "woffMajorVersion" : dict(type=int, valueValidator=genericNonNegativeIntValidator),
- "woffMinorVersion" : dict(type=int, valueValidator=genericNonNegativeIntValidator),
- "woffMetadataUniqueID" : dict(type=dict, valueValidator=fontInfoWOFFMetadataUniqueIDValidator),
- "woffMetadataVendor" : dict(type=dict, valueValidator=fontInfoWOFFMetadataVendorValidator),
- "woffMetadataCredits" : dict(type=dict, valueValidator=fontInfoWOFFMetadataCreditsValidator),
- "woffMetadataDescription" : dict(type=dict, valueValidator=fontInfoWOFFMetadataDescriptionValidator),
- "woffMetadataLicense" : dict(type=dict, valueValidator=fontInfoWOFFMetadataLicenseValidator),
- "woffMetadataCopyright" : dict(type=dict, valueValidator=fontInfoWOFFMetadataCopyrightValidator),
- "woffMetadataTrademark" : dict(type=dict, valueValidator=fontInfoWOFFMetadataTrademarkValidator),
- "woffMetadataLicensee" : dict(type=dict, valueValidator=fontInfoWOFFMetadataLicenseeValidator),
- "woffMetadataExtensions" : dict(type=list, valueValidator=fontInfoWOFFMetadataExtensionsValidator),
- "guidelines" : dict(type=list, valueValidator=guidelinesValidator)
-})
+fontInfoAttributesVersion3ValueData.update(
+ {
+ "versionMinor": dict(type=int, valueValidator=genericNonNegativeIntValidator),
+ "unitsPerEm": dict(
+ type=(int, float), valueValidator=genericNonNegativeNumberValidator
+ ),
+ "openTypeHeadLowestRecPPEM": dict(
+ type=int, valueValidator=genericNonNegativeNumberValidator
+ ),
+ "openTypeHheaAscender": dict(type=int),
+ "openTypeHheaDescender": dict(type=int),
+ "openTypeHheaLineGap": dict(type=int),
+ "openTypeHheaCaretOffset": dict(type=int),
+ "openTypeOS2Panose": dict(
+ type="integerList",
+ valueValidator=fontInfoVersion3OpenTypeOS2PanoseValidator,
+ ),
+ "openTypeOS2TypoAscender": dict(type=int),
+ "openTypeOS2TypoDescender": dict(type=int),
+ "openTypeOS2TypoLineGap": dict(type=int),
+ "openTypeOS2WinAscent": dict(
+ type=int, valueValidator=genericNonNegativeNumberValidator
+ ),
+ "openTypeOS2WinDescent": dict(
+ type=int, valueValidator=genericNonNegativeNumberValidator
+ ),
+ "openTypeOS2SubscriptXSize": dict(type=int),
+ "openTypeOS2SubscriptYSize": dict(type=int),
+ "openTypeOS2SubscriptXOffset": dict(type=int),
+ "openTypeOS2SubscriptYOffset": dict(type=int),
+ "openTypeOS2SuperscriptXSize": dict(type=int),
+ "openTypeOS2SuperscriptYSize": dict(type=int),
+ "openTypeOS2SuperscriptXOffset": dict(type=int),
+ "openTypeOS2SuperscriptYOffset": dict(type=int),
+ "openTypeOS2StrikeoutSize": dict(type=int),
+ "openTypeOS2StrikeoutPosition": dict(type=int),
+ "openTypeGaspRangeRecords": dict(
+ type="dictList", valueValidator=fontInfoOpenTypeGaspRangeRecordsValidator
+ ),
+ "openTypeNameRecords": dict(
+ type="dictList", valueValidator=fontInfoOpenTypeNameRecordsValidator
+ ),
+ "openTypeVheaVertTypoAscender": dict(type=int),
+ "openTypeVheaVertTypoDescender": dict(type=int),
+ "openTypeVheaVertTypoLineGap": dict(type=int),
+ "openTypeVheaCaretOffset": dict(type=int),
+ "woffMajorVersion": dict(
+ type=int, valueValidator=genericNonNegativeIntValidator
+ ),
+ "woffMinorVersion": dict(
+ type=int, valueValidator=genericNonNegativeIntValidator
+ ),
+ "woffMetadataUniqueID": dict(
+ type=dict, valueValidator=fontInfoWOFFMetadataUniqueIDValidator
+ ),
+ "woffMetadataVendor": dict(
+ type=dict, valueValidator=fontInfoWOFFMetadataVendorValidator
+ ),
+ "woffMetadataCredits": dict(
+ type=dict, valueValidator=fontInfoWOFFMetadataCreditsValidator
+ ),
+ "woffMetadataDescription": dict(
+ type=dict, valueValidator=fontInfoWOFFMetadataDescriptionValidator
+ ),
+ "woffMetadataLicense": dict(
+ type=dict, valueValidator=fontInfoWOFFMetadataLicenseValidator
+ ),
+ "woffMetadataCopyright": dict(
+ type=dict, valueValidator=fontInfoWOFFMetadataCopyrightValidator
+ ),
+ "woffMetadataTrademark": dict(
+ type=dict, valueValidator=fontInfoWOFFMetadataTrademarkValidator
+ ),
+ "woffMetadataLicensee": dict(
+ type=dict, valueValidator=fontInfoWOFFMetadataLicenseeValidator
+ ),
+ "woffMetadataExtensions": dict(
+ type=list, valueValidator=fontInfoWOFFMetadataExtensionsValidator
+ ),
+ "guidelines": dict(type=list, valueValidator=guidelinesValidator),
+ }
+)
fontInfoAttributesVersion3 = set(fontInfoAttributesVersion3ValueData.keys())
# insert the type validator for all attrs that
# have no defined validator.
for attr, dataDict in list(fontInfoAttributesVersion2ValueData.items()):
- if "valueValidator" not in dataDict:
- dataDict["valueValidator"] = genericTypeValidator
+ if "valueValidator" not in dataDict:
+ dataDict["valueValidator"] = genericTypeValidator
for attr, dataDict in list(fontInfoAttributesVersion3ValueData.items()):
- if "valueValidator" not in dataDict:
- dataDict["valueValidator"] = genericTypeValidator
+ if "valueValidator" not in dataDict:
+ dataDict["valueValidator"] = genericTypeValidator
# Version Conversion Support
# These are used from converting from version 1
# to version 2 or vice-versa.
+
def _flipDict(d):
- flipped = {}
- for key, value in list(d.items()):
- flipped[value] = key
- return flipped
+ flipped = {}
+ for key, value in list(d.items()):
+ flipped[value] = key
+ return flipped
+
fontInfoAttributesVersion1To2 = {
- "menuName" : "styleMapFamilyName",
- "designer" : "openTypeNameDesigner",
- "designerURL" : "openTypeNameDesignerURL",
- "createdBy" : "openTypeNameManufacturer",
- "vendorURL" : "openTypeNameManufacturerURL",
- "license" : "openTypeNameLicense",
- "licenseURL" : "openTypeNameLicenseURL",
- "ttVersion" : "openTypeNameVersion",
- "ttUniqueID" : "openTypeNameUniqueID",
- "notice" : "openTypeNameDescription",
- "otFamilyName" : "openTypeNamePreferredFamilyName",
- "otStyleName" : "openTypeNamePreferredSubfamilyName",
- "otMacName" : "openTypeNameCompatibleFullName",
- "weightName" : "postscriptWeightName",
- "weightValue" : "openTypeOS2WeightClass",
- "ttVendor" : "openTypeOS2VendorID",
- "uniqueID" : "postscriptUniqueID",
- "fontName" : "postscriptFontName",
- "fondID" : "macintoshFONDFamilyID",
- "fondName" : "macintoshFONDName",
- "defaultWidth" : "postscriptDefaultWidthX",
- "slantAngle" : "postscriptSlantAngle",
- "fullName" : "postscriptFullName",
- # require special value conversion
- "fontStyle" : "styleMapStyleName",
- "widthName" : "openTypeOS2WidthClass",
- "msCharSet" : "postscriptWindowsCharacterSet"
+ "menuName": "styleMapFamilyName",
+ "designer": "openTypeNameDesigner",
+ "designerURL": "openTypeNameDesignerURL",
+ "createdBy": "openTypeNameManufacturer",
+ "vendorURL": "openTypeNameManufacturerURL",
+ "license": "openTypeNameLicense",
+ "licenseURL": "openTypeNameLicenseURL",
+ "ttVersion": "openTypeNameVersion",
+ "ttUniqueID": "openTypeNameUniqueID",
+ "notice": "openTypeNameDescription",
+ "otFamilyName": "openTypeNamePreferredFamilyName",
+ "otStyleName": "openTypeNamePreferredSubfamilyName",
+ "otMacName": "openTypeNameCompatibleFullName",
+ "weightName": "postscriptWeightName",
+ "weightValue": "openTypeOS2WeightClass",
+ "ttVendor": "openTypeOS2VendorID",
+ "uniqueID": "postscriptUniqueID",
+ "fontName": "postscriptFontName",
+ "fondID": "macintoshFONDFamilyID",
+ "fondName": "macintoshFONDName",
+ "defaultWidth": "postscriptDefaultWidthX",
+ "slantAngle": "postscriptSlantAngle",
+ "fullName": "postscriptFullName",
+ # require special value conversion
+ "fontStyle": "styleMapStyleName",
+ "widthName": "openTypeOS2WidthClass",
+ "msCharSet": "postscriptWindowsCharacterSet",
}
fontInfoAttributesVersion2To1 = _flipDict(fontInfoAttributesVersion1To2)
deprecatedFontInfoAttributesVersion2 = set(fontInfoAttributesVersion1To2.keys())
-_fontStyle1To2 = {
- 64 : "regular",
- 1 : "italic",
- 32 : "bold",
- 33 : "bold italic"
-}
+_fontStyle1To2 = {64: "regular", 1: "italic", 32: "bold", 33: "bold italic"}
_fontStyle2To1 = _flipDict(_fontStyle1To2)
# Some UFO 1 files have 0
_fontStyle1To2[0] = "regular"
_widthName1To2 = {
- "Ultra-condensed" : 1,
- "Extra-condensed" : 2,
- "Condensed" : 3,
- "Semi-condensed" : 4,
- "Medium (normal)" : 5,
- "Semi-expanded" : 6,
- "Expanded" : 7,
- "Extra-expanded" : 8,
- "Ultra-expanded" : 9
+ "Ultra-condensed": 1,
+ "Extra-condensed": 2,
+ "Condensed": 3,
+ "Semi-condensed": 4,
+ "Medium (normal)": 5,
+ "Semi-expanded": 6,
+ "Expanded": 7,
+ "Extra-expanded": 8,
+ "Ultra-expanded": 9,
}
_widthName2To1 = _flipDict(_widthName1To2)
# FontLab's default width value is "Normal".
@@ -2116,198 +2238,227 @@ _widthName1To2["medium"] = 5
_widthName1To2["Medium"] = 5
_msCharSet1To2 = {
- 0 : 1,
- 1 : 2,
- 2 : 3,
- 77 : 4,
- 128 : 5,
- 129 : 6,
- 130 : 7,
- 134 : 8,
- 136 : 9,
- 161 : 10,
- 162 : 11,
- 163 : 12,
- 177 : 13,
- 178 : 14,
- 186 : 15,
- 200 : 16,
- 204 : 17,
- 222 : 18,
- 238 : 19,
- 255 : 20
+ 0: 1,
+ 1: 2,
+ 2: 3,
+ 77: 4,
+ 128: 5,
+ 129: 6,
+ 130: 7,
+ 134: 8,
+ 136: 9,
+ 161: 10,
+ 162: 11,
+ 163: 12,
+ 177: 13,
+ 178: 14,
+ 186: 15,
+ 200: 16,
+ 204: 17,
+ 222: 18,
+ 238: 19,
+ 255: 20,
}
_msCharSet2To1 = _flipDict(_msCharSet1To2)
# 1 <-> 2
+
def convertFontInfoValueForAttributeFromVersion1ToVersion2(attr, value):
- """
- Convert value from version 1 to version 2 format.
- Returns the new attribute name and the converted value.
- If the value is None, None will be returned for the new value.
- """
- # convert floats to ints if possible
- if isinstance(value, float):
- if int(value) == value:
- value = int(value)
- if value is not None:
- if attr == "fontStyle":
- v = _fontStyle1To2.get(value)
- if v is None:
- raise UFOLibError(f"Cannot convert value ({value!r}) for attribute {attr}.")
- value = v
- elif attr == "widthName":
- v = _widthName1To2.get(value)
- if v is None:
- raise UFOLibError(f"Cannot convert value ({value!r}) for attribute {attr}.")
- value = v
- elif attr == "msCharSet":
- v = _msCharSet1To2.get(value)
- if v is None:
- raise UFOLibError(f"Cannot convert value ({value!r}) for attribute {attr}.")
- value = v
- attr = fontInfoAttributesVersion1To2.get(attr, attr)
- return attr, value
+ """
+ Convert value from version 1 to version 2 format.
+ Returns the new attribute name and the converted value.
+ If the value is None, None will be returned for the new value.
+ """
+ # convert floats to ints if possible
+ if isinstance(value, float):
+ if int(value) == value:
+ value = int(value)
+ if value is not None:
+ if attr == "fontStyle":
+ v = _fontStyle1To2.get(value)
+ if v is None:
+ raise UFOLibError(
+ f"Cannot convert value ({value!r}) for attribute {attr}."
+ )
+ value = v
+ elif attr == "widthName":
+ v = _widthName1To2.get(value)
+ if v is None:
+ raise UFOLibError(
+ f"Cannot convert value ({value!r}) for attribute {attr}."
+ )
+ value = v
+ elif attr == "msCharSet":
+ v = _msCharSet1To2.get(value)
+ if v is None:
+ raise UFOLibError(
+ f"Cannot convert value ({value!r}) for attribute {attr}."
+ )
+ value = v
+ attr = fontInfoAttributesVersion1To2.get(attr, attr)
+ return attr, value
+
def convertFontInfoValueForAttributeFromVersion2ToVersion1(attr, value):
- """
- Convert value from version 2 to version 1 format.
- Returns the new attribute name and the converted value.
- If the value is None, None will be returned for the new value.
- """
- if value is not None:
- if attr == "styleMapStyleName":
- value = _fontStyle2To1.get(value)
- elif attr == "openTypeOS2WidthClass":
- value = _widthName2To1.get(value)
- elif attr == "postscriptWindowsCharacterSet":
- value = _msCharSet2To1.get(value)
- attr = fontInfoAttributesVersion2To1.get(attr, attr)
- return attr, value
+ """
+ Convert value from version 2 to version 1 format.
+ Returns the new attribute name and the converted value.
+ If the value is None, None will be returned for the new value.
+ """
+ if value is not None:
+ if attr == "styleMapStyleName":
+ value = _fontStyle2To1.get(value)
+ elif attr == "openTypeOS2WidthClass":
+ value = _widthName2To1.get(value)
+ elif attr == "postscriptWindowsCharacterSet":
+ value = _msCharSet2To1.get(value)
+ attr = fontInfoAttributesVersion2To1.get(attr, attr)
+ return attr, value
+
def _convertFontInfoDataVersion1ToVersion2(data):
- converted = {}
- for attr, value in list(data.items()):
- # FontLab gives -1 for the weightValue
- # for fonts wil no defined value. Many
- # format version 1 UFOs will have this.
- if attr == "weightValue" and value == -1:
- continue
- newAttr, newValue = convertFontInfoValueForAttributeFromVersion1ToVersion2(attr, value)
- # skip if the attribute is not part of version 2
- if newAttr not in fontInfoAttributesVersion2:
- continue
- # catch values that can't be converted
- if value is None:
- raise UFOLibError(f"Cannot convert value ({value!r}) for attribute {newAttr}.")
- # store
- converted[newAttr] = newValue
- return converted
+ converted = {}
+ for attr, value in list(data.items()):
+ # FontLab gives -1 for the weightValue
+ # for fonts wil no defined value. Many
+ # format version 1 UFOs will have this.
+ if attr == "weightValue" and value == -1:
+ continue
+ newAttr, newValue = convertFontInfoValueForAttributeFromVersion1ToVersion2(
+ attr, value
+ )
+ # skip if the attribute is not part of version 2
+ if newAttr not in fontInfoAttributesVersion2:
+ continue
+ # catch values that can't be converted
+ if value is None:
+ raise UFOLibError(
+ f"Cannot convert value ({value!r}) for attribute {newAttr}."
+ )
+ # store
+ converted[newAttr] = newValue
+ return converted
+
def _convertFontInfoDataVersion2ToVersion1(data):
- converted = {}
- for attr, value in list(data.items()):
- newAttr, newValue = convertFontInfoValueForAttributeFromVersion2ToVersion1(attr, value)
- # only take attributes that are registered for version 1
- if newAttr not in fontInfoAttributesVersion1:
- continue
- # catch values that can't be converted
- if value is None:
- raise UFOLibError(f"Cannot convert value ({value!r}) for attribute {newAttr}.")
- # store
- converted[newAttr] = newValue
- return converted
+ converted = {}
+ for attr, value in list(data.items()):
+ newAttr, newValue = convertFontInfoValueForAttributeFromVersion2ToVersion1(
+ attr, value
+ )
+ # only take attributes that are registered for version 1
+ if newAttr not in fontInfoAttributesVersion1:
+ continue
+ # catch values that can't be converted
+ if value is None:
+ raise UFOLibError(
+ f"Cannot convert value ({value!r}) for attribute {newAttr}."
+ )
+ # store
+ converted[newAttr] = newValue
+ return converted
+
# 2 <-> 3
_ufo2To3NonNegativeInt = {
- "versionMinor",
- "openTypeHeadLowestRecPPEM",
- "openTypeOS2WinAscent",
- "openTypeOS2WinDescent"
+ "versionMinor",
+ "openTypeHeadLowestRecPPEM",
+ "openTypeOS2WinAscent",
+ "openTypeOS2WinDescent",
}
_ufo2To3NonNegativeIntOrFloat = {
- "unitsPerEm",
+ "unitsPerEm",
}
_ufo2To3FloatToInt = {
- "openTypeHeadLowestRecPPEM",
- "openTypeHheaAscender",
- "openTypeHheaDescender",
- "openTypeHheaLineGap",
- "openTypeHheaCaretOffset",
- "openTypeOS2TypoAscender",
- "openTypeOS2TypoDescender",
- "openTypeOS2TypoLineGap",
- "openTypeOS2WinAscent",
- "openTypeOS2WinDescent",
- "openTypeOS2SubscriptXSize",
- "openTypeOS2SubscriptYSize",
- "openTypeOS2SubscriptXOffset",
- "openTypeOS2SubscriptYOffset",
- "openTypeOS2SuperscriptXSize",
- "openTypeOS2SuperscriptYSize",
- "openTypeOS2SuperscriptXOffset",
- "openTypeOS2SuperscriptYOffset",
- "openTypeOS2StrikeoutSize",
- "openTypeOS2StrikeoutPosition",
- "openTypeVheaVertTypoAscender",
- "openTypeVheaVertTypoDescender",
- "openTypeVheaVertTypoLineGap",
- "openTypeVheaCaretOffset"
+ "openTypeHeadLowestRecPPEM",
+ "openTypeHheaAscender",
+ "openTypeHheaDescender",
+ "openTypeHheaLineGap",
+ "openTypeHheaCaretOffset",
+ "openTypeOS2TypoAscender",
+ "openTypeOS2TypoDescender",
+ "openTypeOS2TypoLineGap",
+ "openTypeOS2WinAscent",
+ "openTypeOS2WinDescent",
+ "openTypeOS2SubscriptXSize",
+ "openTypeOS2SubscriptYSize",
+ "openTypeOS2SubscriptXOffset",
+ "openTypeOS2SubscriptYOffset",
+ "openTypeOS2SuperscriptXSize",
+ "openTypeOS2SuperscriptYSize",
+ "openTypeOS2SuperscriptXOffset",
+ "openTypeOS2SuperscriptYOffset",
+ "openTypeOS2StrikeoutSize",
+ "openTypeOS2StrikeoutPosition",
+ "openTypeVheaVertTypoAscender",
+ "openTypeVheaVertTypoDescender",
+ "openTypeVheaVertTypoLineGap",
+ "openTypeVheaCaretOffset",
}
+
def convertFontInfoValueForAttributeFromVersion2ToVersion3(attr, value):
- """
- Convert value from version 2 to version 3 format.
- Returns the new attribute name and the converted value.
- If the value is None, None will be returned for the new value.
- """
- if attr in _ufo2To3FloatToInt:
- try:
- value = round(value)
- except (ValueError, TypeError):
- raise UFOLibError("Could not convert value for %s." % attr)
- if attr in _ufo2To3NonNegativeInt:
- try:
- value = int(abs(value))
- except (ValueError, TypeError):
- raise UFOLibError("Could not convert value for %s." % attr)
- elif attr in _ufo2To3NonNegativeIntOrFloat:
- try:
- v = float(abs(value))
- except (ValueError, TypeError):
- raise UFOLibError("Could not convert value for %s." % attr)
- if v == int(v):
- v = int(v)
- if v != value:
- value = v
- return attr, value
+ """
+ Convert value from version 2 to version 3 format.
+ Returns the new attribute name and the converted value.
+ If the value is None, None will be returned for the new value.
+ """
+ if attr in _ufo2To3FloatToInt:
+ try:
+ value = round(value)
+ except (ValueError, TypeError):
+ raise UFOLibError("Could not convert value for %s." % attr)
+ if attr in _ufo2To3NonNegativeInt:
+ try:
+ value = int(abs(value))
+ except (ValueError, TypeError):
+ raise UFOLibError("Could not convert value for %s." % attr)
+ elif attr in _ufo2To3NonNegativeIntOrFloat:
+ try:
+ v = float(abs(value))
+ except (ValueError, TypeError):
+ raise UFOLibError("Could not convert value for %s." % attr)
+ if v == int(v):
+ v = int(v)
+ if v != value:
+ value = v
+ return attr, value
+
def convertFontInfoValueForAttributeFromVersion3ToVersion2(attr, value):
- """
- Convert value from version 3 to version 2 format.
- Returns the new attribute name and the converted value.
- If the value is None, None will be returned for the new value.
- """
- return attr, value
+ """
+ Convert value from version 3 to version 2 format.
+ Returns the new attribute name and the converted value.
+ If the value is None, None will be returned for the new value.
+ """
+ return attr, value
+
def _convertFontInfoDataVersion3ToVersion2(data):
- converted = {}
- for attr, value in list(data.items()):
- newAttr, newValue = convertFontInfoValueForAttributeFromVersion3ToVersion2(attr, value)
- if newAttr not in fontInfoAttributesVersion2:
- continue
- converted[newAttr] = newValue
- return converted
+ converted = {}
+ for attr, value in list(data.items()):
+ newAttr, newValue = convertFontInfoValueForAttributeFromVersion3ToVersion2(
+ attr, value
+ )
+ if newAttr not in fontInfoAttributesVersion2:
+ continue
+ converted[newAttr] = newValue
+ return converted
+
def _convertFontInfoDataVersion2ToVersion3(data):
- converted = {}
- for attr, value in list(data.items()):
- attr, value = convertFontInfoValueForAttributeFromVersion2ToVersion3(attr, value)
- converted[attr] = value
- return converted
+ converted = {}
+ for attr, value in list(data.items()):
+ attr, value = convertFontInfoValueForAttributeFromVersion2ToVersion3(
+ attr, value
+ )
+ converted[attr] = value
+ return converted
+
if __name__ == "__main__":
- import doctest
- doctest.testmod()
+ import doctest
+
+ doctest.testmod()
diff --git a/Lib/fontTools/ufoLib/converters.py b/Lib/fontTools/ufoLib/converters.py
index 3b8112c3..daccf782 100644
--- a/Lib/fontTools/ufoLib/converters.py
+++ b/Lib/fontTools/ufoLib/converters.py
@@ -3,9 +3,9 @@ Conversion functions.
"""
-
# adapted from the UFO spec
+
def convertUFO1OrUFO2KerningToUFO3Kerning(kerning, groups, glyphSet=()):
# gather known kerning groups based on the prefixes
firstReferencedGroups, secondReferencedGroups = findKnownKerningGroups(groups)
@@ -62,6 +62,7 @@ def convertUFO1OrUFO2KerningToUFO3Kerning(kerning, groups, glyphSet=()):
# Return the kerning and the groups.
return newKerning, groups, dict(side1=firstRenamedGroups, side2=secondRenamedGroups)
+
def findKnownKerningGroups(groups):
"""
This will find kerning groups with known prefixes.
@@ -93,12 +94,8 @@ def findKnownKerningGroups(groups):
>>> sorted(second) == ['@MMK_R_1', '@MMK_R_2', '@MMK_R_3']
True
"""
- knownFirstGroupPrefixes = [
- "@MMK_L_"
- ]
- knownSecondGroupPrefixes = [
- "@MMK_R_"
- ]
+ knownFirstGroupPrefixes = ["@MMK_L_"]
+ knownSecondGroupPrefixes = ["@MMK_R_"]
firstGroups = set()
secondGroups = set()
for groupName in list(groups.keys()):
@@ -124,6 +121,7 @@ def makeUniqueGroupName(name, groupNames, counter=0):
# Otherwise send back the new name.
return newName
+
def test():
"""
No known prefixes.
@@ -330,6 +328,8 @@ def test():
True
"""
+
if __name__ == "__main__":
import doctest
+
doctest.testmod()
diff --git a/Lib/fontTools/ufoLib/errors.py b/Lib/fontTools/ufoLib/errors.py
index 304345e4..e05dd438 100644
--- a/Lib/fontTools/ufoLib/errors.py
+++ b/Lib/fontTools/ufoLib/errors.py
@@ -1,3 +1,4 @@
+from __future__ import annotations
class UFOLibError(Exception):
@@ -9,7 +10,12 @@ class UnsupportedUFOFormat(UFOLibError):
class GlifLibError(UFOLibError):
- pass
+ def _add_note(self, note: str) -> None:
+ # Loose backport of PEP 678 until we only support Python 3.11+, used for
+ # adding additional context to errors.
+ # TODO: Replace with https://docs.python.org/3.11/library/exceptions.html#BaseException.add_note
+ (message, *rest) = self.args
+ self.args = ((message + "\n" + note), *rest)
class UnsupportedGLIFFormat(GlifLibError):
diff --git a/Lib/fontTools/ufoLib/filenames.py b/Lib/fontTools/ufoLib/filenames.py
index baf22076..7f1af58e 100644
--- a/Lib/fontTools/ufoLib/filenames.py
+++ b/Lib/fontTools/ufoLib/filenames.py
@@ -89,199 +89,203 @@ maxFileNameLength = 255
class NameTranslationError(Exception):
- pass
+ pass
def userNameToFileName(userName: str, existing=(), prefix="", suffix=""):
- """
- `existing` should be a set-like object.
+ """
+ `existing` should be a set-like object.
+
+ >>> userNameToFileName("a") == "a"
+ True
+ >>> userNameToFileName("A") == "A_"
+ True
+ >>> userNameToFileName("AE") == "A_E_"
+ True
+ >>> userNameToFileName("Ae") == "A_e"
+ True
+ >>> userNameToFileName("ae") == "ae"
+ True
+ >>> userNameToFileName("aE") == "aE_"
+ True
+ >>> userNameToFileName("a.alt") == "a.alt"
+ True
+ >>> userNameToFileName("A.alt") == "A_.alt"
+ True
+ >>> userNameToFileName("A.Alt") == "A_.A_lt"
+ True
+ >>> userNameToFileName("A.aLt") == "A_.aL_t"
+ True
+ >>> userNameToFileName(u"A.alT") == "A_.alT_"
+ True
+ >>> userNameToFileName("T_H") == "T__H_"
+ True
+ >>> userNameToFileName("T_h") == "T__h"
+ True
+ >>> userNameToFileName("t_h") == "t_h"
+ True
+ >>> userNameToFileName("F_F_I") == "F__F__I_"
+ True
+ >>> userNameToFileName("f_f_i") == "f_f_i"
+ True
+ >>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
+ True
+ >>> userNameToFileName(".notdef") == "_notdef"
+ True
+ >>> userNameToFileName("con") == "_con"
+ True
+ >>> userNameToFileName("CON") == "C_O_N_"
+ True
+ >>> userNameToFileName("con.alt") == "_con.alt"
+ True
+ >>> userNameToFileName("alt.con") == "alt._con"
+ True
+ """
+ # the incoming name must be a string
+ if not isinstance(userName, str):
+ raise ValueError("The value for userName must be a string.")
+ # establish the prefix and suffix lengths
+ prefixLength = len(prefix)
+ suffixLength = len(suffix)
+ # replace an initial period with an _
+ # if no prefix is to be added
+ if not prefix and userName[0] == ".":
+ userName = "_" + userName[1:]
+ # filter the user name
+ filteredUserName = []
+ for character in userName:
+ # replace illegal characters with _
+ if character in illegalCharacters:
+ character = "_"
+ # add _ to all non-lower characters
+ elif character != character.lower():
+ character += "_"
+ filteredUserName.append(character)
+ userName = "".join(filteredUserName)
+ # clip to 255
+ sliceLength = maxFileNameLength - prefixLength - suffixLength
+ userName = userName[:sliceLength]
+ # test for illegal files names
+ parts = []
+ for part in userName.split("."):
+ if part.lower() in reservedFileNames:
+ part = "_" + part
+ parts.append(part)
+ userName = ".".join(parts)
+ # test for clash
+ fullName = prefix + userName + suffix
+ if fullName.lower() in existing:
+ fullName = handleClash1(userName, existing, prefix, suffix)
+ # finished
+ return fullName
- >>> userNameToFileName("a") == "a"
- True
- >>> userNameToFileName("A") == "A_"
- True
- >>> userNameToFileName("AE") == "A_E_"
- True
- >>> userNameToFileName("Ae") == "A_e"
- True
- >>> userNameToFileName("ae") == "ae"
- True
- >>> userNameToFileName("aE") == "aE_"
- True
- >>> userNameToFileName("a.alt") == "a.alt"
- True
- >>> userNameToFileName("A.alt") == "A_.alt"
- True
- >>> userNameToFileName("A.Alt") == "A_.A_lt"
- True
- >>> userNameToFileName("A.aLt") == "A_.aL_t"
- True
- >>> userNameToFileName(u"A.alT") == "A_.alT_"
- True
- >>> userNameToFileName("T_H") == "T__H_"
- True
- >>> userNameToFileName("T_h") == "T__h"
- True
- >>> userNameToFileName("t_h") == "t_h"
- True
- >>> userNameToFileName("F_F_I") == "F__F__I_"
- True
- >>> userNameToFileName("f_f_i") == "f_f_i"
- True
- >>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
- True
- >>> userNameToFileName(".notdef") == "_notdef"
- True
- >>> userNameToFileName("con") == "_con"
- True
- >>> userNameToFileName("CON") == "C_O_N_"
- True
- >>> userNameToFileName("con.alt") == "_con.alt"
- True
- >>> userNameToFileName("alt.con") == "alt._con"
- True
- """
- # the incoming name must be a string
- if not isinstance(userName, str):
- raise ValueError("The value for userName must be a string.")
- # establish the prefix and suffix lengths
- prefixLength = len(prefix)
- suffixLength = len(suffix)
- # replace an initial period with an _
- # if no prefix is to be added
- if not prefix and userName[0] == ".":
- userName = "_" + userName[1:]
- # filter the user name
- filteredUserName = []
- for character in userName:
- # replace illegal characters with _
- if character in illegalCharacters:
- character = "_"
- # add _ to all non-lower characters
- elif character != character.lower():
- character += "_"
- filteredUserName.append(character)
- userName = "".join(filteredUserName)
- # clip to 255
- sliceLength = maxFileNameLength - prefixLength - suffixLength
- userName = userName[:sliceLength]
- # test for illegal files names
- parts = []
- for part in userName.split("."):
- if part.lower() in reservedFileNames:
- part = "_" + part
- parts.append(part)
- userName = ".".join(parts)
- # test for clash
- fullName = prefix + userName + suffix
- if fullName.lower() in existing:
- fullName = handleClash1(userName, existing, prefix, suffix)
- # finished
- return fullName
def handleClash1(userName, existing=[], prefix="", suffix=""):
- """
- existing should be a case-insensitive list
- of all existing file names.
+ """
+ existing should be a case-insensitive list
+ of all existing file names.
+
+ >>> prefix = ("0" * 5) + "."
+ >>> suffix = "." + ("0" * 10)
+ >>> existing = ["a" * 5]
- >>> prefix = ("0" * 5) + "."
- >>> suffix = "." + ("0" * 10)
- >>> existing = ["a" * 5]
+ >>> e = list(existing)
+ >>> handleClash1(userName="A" * 5, existing=e,
+ ... prefix=prefix, suffix=suffix) == (
+ ... '00000.AAAAA000000000000001.0000000000')
+ True
- >>> e = list(existing)
- >>> handleClash1(userName="A" * 5, existing=e,
- ... prefix=prefix, suffix=suffix) == (
- ... '00000.AAAAA000000000000001.0000000000')
- True
+ >>> e = list(existing)
+ >>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
+ >>> handleClash1(userName="A" * 5, existing=e,
+ ... prefix=prefix, suffix=suffix) == (
+ ... '00000.AAAAA000000000000002.0000000000')
+ True
- >>> e = list(existing)
- >>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
- >>> handleClash1(userName="A" * 5, existing=e,
- ... prefix=prefix, suffix=suffix) == (
- ... '00000.AAAAA000000000000002.0000000000')
- True
+ >>> e = list(existing)
+ >>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
+ >>> handleClash1(userName="A" * 5, existing=e,
+ ... prefix=prefix, suffix=suffix) == (
+ ... '00000.AAAAA000000000000001.0000000000')
+ True
+ """
+ # if the prefix length + user name length + suffix length + 15 is at
+ # or past the maximum length, silce 15 characters off of the user name
+ prefixLength = len(prefix)
+ suffixLength = len(suffix)
+ if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
+ l = prefixLength + len(userName) + suffixLength + 15
+ sliceLength = maxFileNameLength - l
+ userName = userName[:sliceLength]
+ finalName = None
+ # try to add numbers to create a unique name
+ counter = 1
+ while finalName is None:
+ name = userName + str(counter).zfill(15)
+ fullName = prefix + name + suffix
+ if fullName.lower() not in existing:
+ finalName = fullName
+ break
+ else:
+ counter += 1
+ if counter >= 999999999999999:
+ break
+ # if there is a clash, go to the next fallback
+ if finalName is None:
+ finalName = handleClash2(existing, prefix, suffix)
+ # finished
+ return finalName
- >>> e = list(existing)
- >>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
- >>> handleClash1(userName="A" * 5, existing=e,
- ... prefix=prefix, suffix=suffix) == (
- ... '00000.AAAAA000000000000001.0000000000')
- True
- """
- # if the prefix length + user name length + suffix length + 15 is at
- # or past the maximum length, silce 15 characters off of the user name
- prefixLength = len(prefix)
- suffixLength = len(suffix)
- if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
- l = (prefixLength + len(userName) + suffixLength + 15)
- sliceLength = maxFileNameLength - l
- userName = userName[:sliceLength]
- finalName = None
- # try to add numbers to create a unique name
- counter = 1
- while finalName is None:
- name = userName + str(counter).zfill(15)
- fullName = prefix + name + suffix
- if fullName.lower() not in existing:
- finalName = fullName
- break
- else:
- counter += 1
- if counter >= 999999999999999:
- break
- # if there is a clash, go to the next fallback
- if finalName is None:
- finalName = handleClash2(existing, prefix, suffix)
- # finished
- return finalName
def handleClash2(existing=[], prefix="", suffix=""):
- """
- existing should be a case-insensitive list
- of all existing file names.
+ """
+ existing should be a case-insensitive list
+ of all existing file names.
- >>> prefix = ("0" * 5) + "."
- >>> suffix = "." + ("0" * 10)
- >>> existing = [prefix + str(i) + suffix for i in range(100)]
+ >>> prefix = ("0" * 5) + "."
+ >>> suffix = "." + ("0" * 10)
+ >>> existing = [prefix + str(i) + suffix for i in range(100)]
- >>> e = list(existing)
- >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
- ... '00000.100.0000000000')
- True
+ >>> e = list(existing)
+ >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
+ ... '00000.100.0000000000')
+ True
- >>> e = list(existing)
- >>> e.remove(prefix + "1" + suffix)
- >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
- ... '00000.1.0000000000')
- True
+ >>> e = list(existing)
+ >>> e.remove(prefix + "1" + suffix)
+ >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
+ ... '00000.1.0000000000')
+ True
+
+ >>> e = list(existing)
+ >>> e.remove(prefix + "2" + suffix)
+ >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
+ ... '00000.2.0000000000')
+ True
+ """
+ # calculate the longest possible string
+ maxLength = maxFileNameLength - len(prefix) - len(suffix)
+ maxValue = int("9" * maxLength)
+ # try to find a number
+ finalName = None
+ counter = 1
+ while finalName is None:
+ fullName = prefix + str(counter) + suffix
+ if fullName.lower() not in existing:
+ finalName = fullName
+ break
+ else:
+ counter += 1
+ if counter >= maxValue:
+ break
+ # raise an error if nothing has been found
+ if finalName is None:
+ raise NameTranslationError("No unique name could be found.")
+ # finished
+ return finalName
- >>> e = list(existing)
- >>> e.remove(prefix + "2" + suffix)
- >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
- ... '00000.2.0000000000')
- True
- """
- # calculate the longest possible string
- maxLength = maxFileNameLength - len(prefix) - len(suffix)
- maxValue = int("9" * maxLength)
- # try to find a number
- finalName = None
- counter = 1
- while finalName is None:
- fullName = prefix + str(counter) + suffix
- if fullName.lower() not in existing:
- finalName = fullName
- break
- else:
- counter += 1
- if counter >= maxValue:
- break
- # raise an error if nothing has been found
- if finalName is None:
- raise NameTranslationError("No unique name could be found.")
- # finished
- return finalName
if __name__ == "__main__":
- import doctest
- doctest.testmod()
+ import doctest
+
+ doctest.testmod()
diff --git a/Lib/fontTools/ufoLib/glifLib.py b/Lib/fontTools/ufoLib/glifLib.py
index 7d28eaf7..6dee9db3 100755
--- a/Lib/fontTools/ufoLib/glifLib.py
+++ b/Lib/fontTools/ufoLib/glifLib.py
@@ -27,13 +27,13 @@ from fontTools.pens.pointPen import AbstractPointPen, PointToSegmentPen
from fontTools.ufoLib.errors import GlifLibError
from fontTools.ufoLib.filenames import userNameToFileName
from fontTools.ufoLib.validators import (
- genericTypeValidator,
- colorValidator,
- guidelinesValidator,
- anchorsValidator,
- identifierValidator,
- imageValidator,
- glyphLibValidator,
+ genericTypeValidator,
+ colorValidator,
+ guidelinesValidator,
+ anchorsValidator,
+ identifierValidator,
+ imageValidator,
+ glyphLibValidator,
)
from fontTools.misc import etree
from fontTools.ufoLib import _UFOBaseIO, UFOFormatVersion
@@ -41,10 +41,11 @@ from fontTools.ufoLib.utils import numberTypes, _VersionTupleEnumMixin
__all__ = [
- "GlyphSet",
- "GlifLibError",
- "readGlyphFromString", "writeGlyphToString",
- "glyphNameToFileName"
+ "GlyphSet",
+ "GlifLibError",
+ "readGlyphFromString",
+ "writeGlyphToString",
+ "glyphNameToFileName",
]
logger = logging.getLogger(__name__)
@@ -59,25 +60,26 @@ LAYERINFO_FILENAME = "layerinfo.plist"
class GLIFFormatVersion(tuple, _VersionTupleEnumMixin, enum.Enum):
- FORMAT_1_0 = (1, 0)
- FORMAT_2_0 = (2, 0)
-
- @classmethod
- def default(cls, ufoFormatVersion=None):
- if ufoFormatVersion is not None:
- return max(cls.supported_versions(ufoFormatVersion))
- return super().default()
-
- @classmethod
- def supported_versions(cls, ufoFormatVersion=None):
- if ufoFormatVersion is None:
- # if ufo format unspecified, return all the supported GLIF formats
- return super().supported_versions()
- # else only return the GLIF formats supported by the given UFO format
- versions = {cls.FORMAT_1_0}
- if ufoFormatVersion >= UFOFormatVersion.FORMAT_3_0:
- versions.add(cls.FORMAT_2_0)
- return frozenset(versions)
+ FORMAT_1_0 = (1, 0)
+ FORMAT_2_0 = (2, 0)
+
+ @classmethod
+ def default(cls, ufoFormatVersion=None):
+ if ufoFormatVersion is not None:
+ return max(cls.supported_versions(ufoFormatVersion))
+ return super().default()
+
+ @classmethod
+ def supported_versions(cls, ufoFormatVersion=None):
+ if ufoFormatVersion is None:
+ # if ufo format unspecified, return all the supported GLIF formats
+ return super().supported_versions()
+ # else only return the GLIF formats supported by the given UFO format
+ versions = {cls.FORMAT_1_0}
+ if ufoFormatVersion >= UFOFormatVersion.FORMAT_3_0:
+ versions.add(cls.FORMAT_2_0)
+ return frozenset(versions)
+
# workaround for py3.11, see https://github.com/fonttools/fonttools/pull/2655
GLIFFormatVersion.__str__ = _VersionTupleEnumMixin.__str__
@@ -87,1188 +89,1295 @@ GLIFFormatVersion.__str__ = _VersionTupleEnumMixin.__str__
# Simple Glyph
# ------------
+
class Glyph:
- """
- Minimal glyph object. It has no glyph attributes until either
- the draw() or the drawPoints() method has been called.
- """
+ """
+ Minimal glyph object. It has no glyph attributes until either
+ the draw() or the drawPoints() method has been called.
+ """
- def __init__(self, glyphName, glyphSet):
- self.glyphName = glyphName
- self.glyphSet = glyphSet
+ def __init__(self, glyphName, glyphSet):
+ self.glyphName = glyphName
+ self.glyphSet = glyphSet
- def draw(self, pen, outputImpliedClosingLine=False):
- """
- Draw this glyph onto a *FontTools* Pen.
- """
- pointPen = PointToSegmentPen(pen, outputImpliedClosingLine=outputImpliedClosingLine)
- self.drawPoints(pointPen)
+ def draw(self, pen, outputImpliedClosingLine=False):
+ """
+ Draw this glyph onto a *FontTools* Pen.
+ """
+ pointPen = PointToSegmentPen(
+ pen, outputImpliedClosingLine=outputImpliedClosingLine
+ )
+ self.drawPoints(pointPen)
- def drawPoints(self, pointPen):
- """
- Draw this glyph onto a PointPen.
- """
- self.glyphSet.readGlyph(self.glyphName, self, pointPen)
+ def drawPoints(self, pointPen):
+ """
+ Draw this glyph onto a PointPen.
+ """
+ self.glyphSet.readGlyph(self.glyphName, self, pointPen)
# ---------
# Glyph Set
# ---------
+
class GlyphSet(_UFOBaseIO):
- """
- GlyphSet manages a set of .glif files inside one directory.
-
- GlyphSet's constructor takes a path to an existing directory as it's
- first argument. Reading glyph data can either be done through the
- readGlyph() method, or by using GlyphSet's dictionary interface, where
- the keys are glyph names and the values are (very) simple glyph objects.
-
- To write a glyph to the glyph set, you use the writeGlyph() method.
- The simple glyph objects returned through the dict interface do not
- support writing, they are just a convenient way to get at the glyph data.
- """
-
- glyphClass = Glyph
-
- def __init__(
- self,
- path,
- glyphNameToFileNameFunc=None,
- ufoFormatVersion=None,
- validateRead=True,
- validateWrite=True,
- expectContentsFile=False,
- ):
- """
- 'path' should be a path (string) to an existing local directory, or
- an instance of fs.base.FS class.
-
- The optional 'glyphNameToFileNameFunc' argument must be a callback
- function that takes two arguments: a glyph name and a list of all
- existing filenames (if any exist). It should return a file name
- (including the .glif extension). The glyphNameToFileName function
- is called whenever a file name is created for a given glyph name.
-
- ``validateRead`` will validate read operations. Its default is ``True``.
- ``validateWrite`` will validate write operations. Its default is ``True``.
- ``expectContentsFile`` will raise a GlifLibError if a contents.plist file is
- not found on the glyph set file system. This should be set to ``True`` if you
- are reading an existing UFO and ``False`` if you create a fresh glyph set.
- """
- try:
- ufoFormatVersion = UFOFormatVersion(ufoFormatVersion)
- except ValueError as e:
- from fontTools.ufoLib.errors import UnsupportedUFOFormat
-
- raise UnsupportedUFOFormat(
- f"Unsupported UFO format: {ufoFormatVersion!r}"
- ) from e
-
- if hasattr(path, "__fspath__"): # support os.PathLike objects
- path = path.__fspath__()
-
- if isinstance(path, str):
- try:
- filesystem = fs.osfs.OSFS(path)
- except fs.errors.CreateFailed:
- raise GlifLibError("No glyphs directory '%s'" % path)
- self._shouldClose = True
- elif isinstance(path, fs.base.FS):
- filesystem = path
- try:
- filesystem.check()
- except fs.errors.FilesystemClosed:
- raise GlifLibError("the filesystem '%s' is closed" % filesystem)
- self._shouldClose = False
- else:
- raise TypeError(
- "Expected a path string or fs object, found %s"
- % type(path).__name__
- )
- try:
- path = filesystem.getsyspath("/")
- except fs.errors.NoSysPath:
- # network or in-memory FS may not map to the local one
- path = str(filesystem)
- # 'dirName' is kept for backward compatibility only, but it's DEPRECATED
- # as it's not guaranteed that it maps to an existing OSFS directory.
- # Client could use the FS api via the `self.fs` attribute instead.
- self.dirName = fs.path.parts(path)[-1]
- self.fs = filesystem
- # if glyphSet contains no 'contents.plist', we consider it empty
- self._havePreviousFile = filesystem.exists(CONTENTS_FILENAME)
- if expectContentsFile and not self._havePreviousFile:
- raise GlifLibError(f"{CONTENTS_FILENAME} is missing.")
- # attribute kept for backward compatibility
- self.ufoFormatVersion = ufoFormatVersion.major
- self.ufoFormatVersionTuple = ufoFormatVersion
- if glyphNameToFileNameFunc is None:
- glyphNameToFileNameFunc = glyphNameToFileName
- self.glyphNameToFileName = glyphNameToFileNameFunc
- self._validateRead = validateRead
- self._validateWrite = validateWrite
- self._existingFileNames: set[str] | None = None
- self._reverseContents = None
-
- self.rebuildContents()
-
- def rebuildContents(self, validateRead=None):
- """
- Rebuild the contents dict by loading contents.plist.
-
- ``validateRead`` will validate the data, by default it is set to the
- class's ``validateRead`` value, can be overridden.
- """
- if validateRead is None:
- validateRead = self._validateRead
- contents = self._getPlist(CONTENTS_FILENAME, {})
- # validate the contents
- if validateRead:
- invalidFormat = False
- if not isinstance(contents, dict):
- invalidFormat = True
- else:
- for name, fileName in contents.items():
- if not isinstance(name, str):
- invalidFormat = True
- if not isinstance(fileName, str):
- invalidFormat = True
- elif not self.fs.exists(fileName):
- raise GlifLibError(
- "%s references a file that does not exist: %s"
- % (CONTENTS_FILENAME, fileName)
- )
- if invalidFormat:
- raise GlifLibError("%s is not properly formatted" % CONTENTS_FILENAME)
- self.contents = contents
- self._existingFileNames = None
- self._reverseContents = None
-
- def getReverseContents(self):
- """
- Return a reversed dict of self.contents, mapping file names to
- glyph names. This is primarily an aid for custom glyph name to file
- name schemes that want to make sure they don't generate duplicate
- file names. The file names are converted to lowercase so we can
- reliably check for duplicates that only differ in case, which is
- important for case-insensitive file systems.
- """
- if self._reverseContents is None:
- d = {}
- for k, v in self.contents.items():
- d[v.lower()] = k
- self._reverseContents = d
- return self._reverseContents
-
- def writeContents(self):
- """
- Write the contents.plist file out to disk. Call this method when
- you're done writing glyphs.
- """
- self._writePlist(CONTENTS_FILENAME, self.contents)
-
- # layer info
-
- def readLayerInfo(self, info, validateRead=None):
- """
- ``validateRead`` will validate the data, by default it is set to the
- class's ``validateRead`` value, can be overridden.
- """
- if validateRead is None:
- validateRead = self._validateRead
- infoDict = self._getPlist(LAYERINFO_FILENAME, {})
- if validateRead:
- if not isinstance(infoDict, dict):
- raise GlifLibError("layerinfo.plist is not properly formatted.")
- infoDict = validateLayerInfoVersion3Data(infoDict)
- # populate the object
- for attr, value in infoDict.items():
- try:
- setattr(info, attr, value)
- except AttributeError:
- raise GlifLibError("The supplied layer info object does not support setting a necessary attribute (%s)." % attr)
-
- def writeLayerInfo(self, info, validateWrite=None):
- """
- ``validateWrite`` will validate the data, by default it is set to the
- class's ``validateWrite`` value, can be overridden.
- """
- if validateWrite is None:
- validateWrite = self._validateWrite
- if self.ufoFormatVersionTuple.major < 3:
- raise GlifLibError(
- "layerinfo.plist is not allowed in UFO %d." % self.ufoFormatVersionTuple.major
- )
- # gather data
- infoData = {}
- for attr in layerInfoVersion3ValueData.keys():
- if hasattr(info, attr):
- try:
- value = getattr(info, attr)
- except AttributeError:
- raise GlifLibError("The supplied info object does not support getting a necessary attribute (%s)." % attr)
- if value is None or (attr == 'lib' and not value):
- continue
- infoData[attr] = value
- if infoData:
- # validate
- if validateWrite:
- infoData = validateLayerInfoVersion3Data(infoData)
- # write file
- self._writePlist(LAYERINFO_FILENAME, infoData)
- elif self._havePreviousFile and self.fs.exists(LAYERINFO_FILENAME):
- # data empty, remove existing file
- self.fs.remove(LAYERINFO_FILENAME)
-
- def getGLIF(self, glyphName):
- """
- Get the raw GLIF text for a given glyph name. This only works
- for GLIF files that are already on disk.
-
- This method is useful in situations when the raw XML needs to be
- read from a glyph set for a particular glyph before fully parsing
- it into an object structure via the readGlyph method.
-
- Raises KeyError if 'glyphName' is not in contents.plist, or
- GlifLibError if the file associated with can't be found.
- """
- fileName = self.contents[glyphName]
- try:
- return self.fs.readbytes(fileName)
- except fs.errors.ResourceNotFound:
- raise GlifLibError(
- "The file '%s' associated with glyph '%s' in contents.plist "
- "does not exist on %s" % (fileName, glyphName, self.fs)
- )
-
- def getGLIFModificationTime(self, glyphName):
- """
- Returns the modification time for the GLIF file with 'glyphName', as
- a floating point number giving the number of seconds since the epoch.
- Return None if the associated file does not exist or the underlying
- filesystem does not support getting modified times.
- Raises KeyError if the glyphName is not in contents.plist.
- """
- fileName = self.contents[glyphName]
- return self.getFileModificationTime(fileName)
-
- # reading/writing API
-
- def readGlyph(self, glyphName, glyphObject=None, pointPen=None, validate=None):
- """
- Read a .glif file for 'glyphName' from the glyph set. The
- 'glyphObject' argument can be any kind of object (even None);
- the readGlyph() method will attempt to set the following
- attributes on it:
-
- width
- the advance width of the glyph
- height
- the advance height of the glyph
- unicodes
- a list of unicode values for this glyph
- note
- a string
- lib
- a dictionary containing custom data
- image
- a dictionary containing image data
- guidelines
- a list of guideline data dictionaries
- anchors
- a list of anchor data dictionaries
-
- All attributes are optional, in two ways:
-
- 1) An attribute *won't* be set if the .glif file doesn't
- contain data for it. 'glyphObject' will have to deal
- with default values itself.
- 2) If setting the attribute fails with an AttributeError
- (for example if the 'glyphObject' attribute is read-
- only), readGlyph() will not propagate that exception,
- but ignore that attribute.
-
- To retrieve outline information, you need to pass an object
- conforming to the PointPen protocol as the 'pointPen' argument.
- This argument may be None if you don't need the outline data.
-
- readGlyph() will raise KeyError if the glyph is not present in
- the glyph set.
-
- ``validate`` will validate the data, by default it is set to the
- class's ``validateRead`` value, can be overridden.
- """
- if validate is None:
- validate = self._validateRead
- text = self.getGLIF(glyphName)
- tree = _glifTreeFromString(text)
- formatVersions = GLIFFormatVersion.supported_versions(self.ufoFormatVersionTuple)
- _readGlyphFromTree(tree, glyphObject, pointPen, formatVersions=formatVersions, validate=validate)
-
- def writeGlyph(self, glyphName, glyphObject=None, drawPointsFunc=None, formatVersion=None, validate=None):
- """
- Write a .glif file for 'glyphName' to the glyph set. The
- 'glyphObject' argument can be any kind of object (even None);
- the writeGlyph() method will attempt to get the following
- attributes from it:
-
- width
- the advance width of the glyph
- height
- the advance height of the glyph
- unicodes
- a list of unicode values for this glyph
- note
- a string
- lib
- a dictionary containing custom data
- image
- a dictionary containing image data
- guidelines
- a list of guideline data dictionaries
- anchors
- a list of anchor data dictionaries
-
- All attributes are optional: if 'glyphObject' doesn't
- have the attribute, it will simply be skipped.
-
- To write outline data to the .glif file, writeGlyph() needs
- a function (any callable object actually) that will take one
- argument: an object that conforms to the PointPen protocol.
- The function will be called by writeGlyph(); it has to call the
- proper PointPen methods to transfer the outline to the .glif file.
-
- The GLIF format version will be chosen based on the ufoFormatVersion
- passed during the creation of this object. If a particular format
- version is desired, it can be passed with the formatVersion argument.
- The formatVersion argument accepts either a tuple of integers for
- (major, minor), or a single integer for the major digit only (with
- minor digit implied as 0).
-
- An UnsupportedGLIFFormat exception is raised if the requested GLIF
- formatVersion is not supported.
-
- ``validate`` will validate the data, by default it is set to the
- class's ``validateWrite`` value, can be overridden.
- """
- if formatVersion is None:
- formatVersion = GLIFFormatVersion.default(self.ufoFormatVersionTuple)
- else:
- try:
- formatVersion = GLIFFormatVersion(formatVersion)
- except ValueError as e:
- from fontTools.ufoLib.errors import UnsupportedGLIFFormat
-
- raise UnsupportedGLIFFormat(
- f"Unsupported GLIF format version: {formatVersion!r}"
- ) from e
- if formatVersion not in GLIFFormatVersion.supported_versions(
- self.ufoFormatVersionTuple
- ):
- from fontTools.ufoLib.errors import UnsupportedGLIFFormat
-
- raise UnsupportedGLIFFormat(
- f"Unsupported GLIF format version ({formatVersion!s}) "
- f"for UFO format version {self.ufoFormatVersionTuple!s}."
- )
- if validate is None:
- validate = self._validateWrite
- fileName = self.contents.get(glyphName)
- if fileName is None:
- if self._existingFileNames is None:
- self._existingFileNames = {
- fileName.lower() for fileName in self.contents.values()
- }
- fileName = self.glyphNameToFileName(glyphName, self._existingFileNames)
- self.contents[glyphName] = fileName
- self._existingFileNames.add(fileName.lower())
- if self._reverseContents is not None:
- self._reverseContents[fileName.lower()] = glyphName
- data = _writeGlyphToBytes(
- glyphName,
- glyphObject,
- drawPointsFunc,
- formatVersion=formatVersion,
- validate=validate,
- )
- if (
- self._havePreviousFile
- and self.fs.exists(fileName)
- and data == self.fs.readbytes(fileName)
- ):
- return
- self.fs.writebytes(fileName, data)
-
- def deleteGlyph(self, glyphName):
- """Permanently delete the glyph from the glyph set on disk. Will
- raise KeyError if the glyph is not present in the glyph set.
- """
- fileName = self.contents[glyphName]
- self.fs.remove(fileName)
- if self._existingFileNames is not None:
- self._existingFileNames.remove(fileName.lower())
- if self._reverseContents is not None:
- del self._reverseContents[fileName.lower()]
- del self.contents[glyphName]
-
- # dict-like support
-
- def keys(self):
- return list(self.contents.keys())
-
- def has_key(self, glyphName):
- return glyphName in self.contents
-
- __contains__ = has_key
-
- def __len__(self):
- return len(self.contents)
-
- def __getitem__(self, glyphName):
- if glyphName not in self.contents:
- raise KeyError(glyphName)
- return self.glyphClass(glyphName, self)
-
- # quickly fetch unicode values
-
- def getUnicodes(self, glyphNames=None):
- """
- Return a dictionary that maps glyph names to lists containing
- the unicode value[s] for that glyph, if any. This parses the .glif
- files partially, so it is a lot faster than parsing all files completely.
- By default this checks all glyphs, but a subset can be passed with glyphNames.
- """
- unicodes = {}
- if glyphNames is None:
- glyphNames = self.contents.keys()
- for glyphName in glyphNames:
- text = self.getGLIF(glyphName)
- unicodes[glyphName] = _fetchUnicodes(text)
- return unicodes
-
- def getComponentReferences(self, glyphNames=None):
- """
- Return a dictionary that maps glyph names to lists containing the
- base glyph name of components in the glyph. This parses the .glif
- files partially, so it is a lot faster than parsing all files completely.
- By default this checks all glyphs, but a subset can be passed with glyphNames.
- """
- components = {}
- if glyphNames is None:
- glyphNames = self.contents.keys()
- for glyphName in glyphNames:
- text = self.getGLIF(glyphName)
- components[glyphName] = _fetchComponentBases(text)
- return components
-
- def getImageReferences(self, glyphNames=None):
- """
- Return a dictionary that maps glyph names to the file name of the image
- referenced by the glyph. This parses the .glif files partially, so it is a
- lot faster than parsing all files completely.
- By default this checks all glyphs, but a subset can be passed with glyphNames.
- """
- images = {}
- if glyphNames is None:
- glyphNames = self.contents.keys()
- for glyphName in glyphNames:
- text = self.getGLIF(glyphName)
- images[glyphName] = _fetchImageFileName(text)
- return images
-
- def close(self):
- if self._shouldClose:
- self.fs.close()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, exc_tb):
- self.close()
+ """
+ GlyphSet manages a set of .glif files inside one directory.
+
+ GlyphSet's constructor takes a path to an existing directory as it's
+ first argument. Reading glyph data can either be done through the
+ readGlyph() method, or by using GlyphSet's dictionary interface, where
+ the keys are glyph names and the values are (very) simple glyph objects.
+
+ To write a glyph to the glyph set, you use the writeGlyph() method.
+ The simple glyph objects returned through the dict interface do not
+ support writing, they are just a convenient way to get at the glyph data.
+ """
+
+ glyphClass = Glyph
+
+ def __init__(
+ self,
+ path,
+ glyphNameToFileNameFunc=None,
+ ufoFormatVersion=None,
+ validateRead=True,
+ validateWrite=True,
+ expectContentsFile=False,
+ ):
+ """
+ 'path' should be a path (string) to an existing local directory, or
+ an instance of fs.base.FS class.
+
+ The optional 'glyphNameToFileNameFunc' argument must be a callback
+ function that takes two arguments: a glyph name and a list of all
+ existing filenames (if any exist). It should return a file name
+ (including the .glif extension). The glyphNameToFileName function
+ is called whenever a file name is created for a given glyph name.
+
+ ``validateRead`` will validate read operations. Its default is ``True``.
+ ``validateWrite`` will validate write operations. Its default is ``True``.
+ ``expectContentsFile`` will raise a GlifLibError if a contents.plist file is
+ not found on the glyph set file system. This should be set to ``True`` if you
+ are reading an existing UFO and ``False`` if you create a fresh glyph set.
+ """
+ try:
+ ufoFormatVersion = UFOFormatVersion(ufoFormatVersion)
+ except ValueError as e:
+ from fontTools.ufoLib.errors import UnsupportedUFOFormat
+
+ raise UnsupportedUFOFormat(
+ f"Unsupported UFO format: {ufoFormatVersion!r}"
+ ) from e
+
+ if hasattr(path, "__fspath__"): # support os.PathLike objects
+ path = path.__fspath__()
+
+ if isinstance(path, str):
+ try:
+ filesystem = fs.osfs.OSFS(path)
+ except fs.errors.CreateFailed:
+ raise GlifLibError("No glyphs directory '%s'" % path)
+ self._shouldClose = True
+ elif isinstance(path, fs.base.FS):
+ filesystem = path
+ try:
+ filesystem.check()
+ except fs.errors.FilesystemClosed:
+ raise GlifLibError("the filesystem '%s' is closed" % filesystem)
+ self._shouldClose = False
+ else:
+ raise TypeError(
+ "Expected a path string or fs object, found %s" % type(path).__name__
+ )
+ try:
+ path = filesystem.getsyspath("/")
+ except fs.errors.NoSysPath:
+ # network or in-memory FS may not map to the local one
+ path = str(filesystem)
+ # 'dirName' is kept for backward compatibility only, but it's DEPRECATED
+ # as it's not guaranteed that it maps to an existing OSFS directory.
+ # Client could use the FS api via the `self.fs` attribute instead.
+ self.dirName = fs.path.parts(path)[-1]
+ self.fs = filesystem
+ # if glyphSet contains no 'contents.plist', we consider it empty
+ self._havePreviousFile = filesystem.exists(CONTENTS_FILENAME)
+ if expectContentsFile and not self._havePreviousFile:
+ raise GlifLibError(f"{CONTENTS_FILENAME} is missing.")
+ # attribute kept for backward compatibility
+ self.ufoFormatVersion = ufoFormatVersion.major
+ self.ufoFormatVersionTuple = ufoFormatVersion
+ if glyphNameToFileNameFunc is None:
+ glyphNameToFileNameFunc = glyphNameToFileName
+ self.glyphNameToFileName = glyphNameToFileNameFunc
+ self._validateRead = validateRead
+ self._validateWrite = validateWrite
+ self._existingFileNames: set[str] | None = None
+ self._reverseContents = None
+
+ self.rebuildContents()
+
+ def rebuildContents(self, validateRead=None):
+ """
+ Rebuild the contents dict by loading contents.plist.
+
+ ``validateRead`` will validate the data, by default it is set to the
+ class's ``validateRead`` value, can be overridden.
+ """
+ if validateRead is None:
+ validateRead = self._validateRead
+ contents = self._getPlist(CONTENTS_FILENAME, {})
+ # validate the contents
+ if validateRead:
+ invalidFormat = False
+ if not isinstance(contents, dict):
+ invalidFormat = True
+ else:
+ for name, fileName in contents.items():
+ if not isinstance(name, str):
+ invalidFormat = True
+ if not isinstance(fileName, str):
+ invalidFormat = True
+ elif not self.fs.exists(fileName):
+ raise GlifLibError(
+ "%s references a file that does not exist: %s"
+ % (CONTENTS_FILENAME, fileName)
+ )
+ if invalidFormat:
+ raise GlifLibError("%s is not properly formatted" % CONTENTS_FILENAME)
+ self.contents = contents
+ self._existingFileNames = None
+ self._reverseContents = None
+
+ def getReverseContents(self):
+ """
+ Return a reversed dict of self.contents, mapping file names to
+ glyph names. This is primarily an aid for custom glyph name to file
+ name schemes that want to make sure they don't generate duplicate
+ file names. The file names are converted to lowercase so we can
+ reliably check for duplicates that only differ in case, which is
+ important for case-insensitive file systems.
+ """
+ if self._reverseContents is None:
+ d = {}
+ for k, v in self.contents.items():
+ d[v.lower()] = k
+ self._reverseContents = d
+ return self._reverseContents
+
+ def writeContents(self):
+ """
+ Write the contents.plist file out to disk. Call this method when
+ you're done writing glyphs.
+ """
+ self._writePlist(CONTENTS_FILENAME, self.contents)
+
+ # layer info
+
+ def readLayerInfo(self, info, validateRead=None):
+ """
+ ``validateRead`` will validate the data, by default it is set to the
+ class's ``validateRead`` value, can be overridden.
+ """
+ if validateRead is None:
+ validateRead = self._validateRead
+ infoDict = self._getPlist(LAYERINFO_FILENAME, {})
+ if validateRead:
+ if not isinstance(infoDict, dict):
+ raise GlifLibError("layerinfo.plist is not properly formatted.")
+ infoDict = validateLayerInfoVersion3Data(infoDict)
+ # populate the object
+ for attr, value in infoDict.items():
+ try:
+ setattr(info, attr, value)
+ except AttributeError:
+ raise GlifLibError(
+ "The supplied layer info object does not support setting a necessary attribute (%s)."
+ % attr
+ )
+
+ def writeLayerInfo(self, info, validateWrite=None):
+ """
+ ``validateWrite`` will validate the data, by default it is set to the
+ class's ``validateWrite`` value, can be overridden.
+ """
+ if validateWrite is None:
+ validateWrite = self._validateWrite
+ if self.ufoFormatVersionTuple.major < 3:
+ raise GlifLibError(
+ "layerinfo.plist is not allowed in UFO %d."
+ % self.ufoFormatVersionTuple.major
+ )
+ # gather data
+ infoData = {}
+ for attr in layerInfoVersion3ValueData.keys():
+ if hasattr(info, attr):
+ try:
+ value = getattr(info, attr)
+ except AttributeError:
+ raise GlifLibError(
+ "The supplied info object does not support getting a necessary attribute (%s)."
+ % attr
+ )
+ if value is None or (attr == "lib" and not value):
+ continue
+ infoData[attr] = value
+ if infoData:
+ # validate
+ if validateWrite:
+ infoData = validateLayerInfoVersion3Data(infoData)
+ # write file
+ self._writePlist(LAYERINFO_FILENAME, infoData)
+ elif self._havePreviousFile and self.fs.exists(LAYERINFO_FILENAME):
+ # data empty, remove existing file
+ self.fs.remove(LAYERINFO_FILENAME)
+
+ def getGLIF(self, glyphName):
+ """
+ Get the raw GLIF text for a given glyph name. This only works
+ for GLIF files that are already on disk.
+
+ This method is useful in situations when the raw XML needs to be
+ read from a glyph set for a particular glyph before fully parsing
+ it into an object structure via the readGlyph method.
+
+ Raises KeyError if 'glyphName' is not in contents.plist, or
+ GlifLibError if the file associated with can't be found.
+ """
+ fileName = self.contents[glyphName]
+ try:
+ return self.fs.readbytes(fileName)
+ except fs.errors.ResourceNotFound:
+ raise GlifLibError(
+ "The file '%s' associated with glyph '%s' in contents.plist "
+ "does not exist on %s" % (fileName, glyphName, self.fs)
+ )
+
+ def getGLIFModificationTime(self, glyphName):
+ """
+ Returns the modification time for the GLIF file with 'glyphName', as
+ a floating point number giving the number of seconds since the epoch.
+ Return None if the associated file does not exist or the underlying
+ filesystem does not support getting modified times.
+ Raises KeyError if the glyphName is not in contents.plist.
+ """
+ fileName = self.contents[glyphName]
+ return self.getFileModificationTime(fileName)
+
+ # reading/writing API
+
+ def readGlyph(self, glyphName, glyphObject=None, pointPen=None, validate=None):
+ """
+ Read a .glif file for 'glyphName' from the glyph set. The
+ 'glyphObject' argument can be any kind of object (even None);
+ the readGlyph() method will attempt to set the following
+ attributes on it:
+
+ width
+ the advance width of the glyph
+ height
+ the advance height of the glyph
+ unicodes
+ a list of unicode values for this glyph
+ note
+ a string
+ lib
+ a dictionary containing custom data
+ image
+ a dictionary containing image data
+ guidelines
+ a list of guideline data dictionaries
+ anchors
+ a list of anchor data dictionaries
+
+ All attributes are optional, in two ways:
+
+ 1) An attribute *won't* be set if the .glif file doesn't
+ contain data for it. 'glyphObject' will have to deal
+ with default values itself.
+ 2) If setting the attribute fails with an AttributeError
+ (for example if the 'glyphObject' attribute is read-
+ only), readGlyph() will not propagate that exception,
+ but ignore that attribute.
+
+ To retrieve outline information, you need to pass an object
+ conforming to the PointPen protocol as the 'pointPen' argument.
+ This argument may be None if you don't need the outline data.
+
+ readGlyph() will raise KeyError if the glyph is not present in
+ the glyph set.
+
+ ``validate`` will validate the data, by default it is set to the
+ class's ``validateRead`` value, can be overridden.
+ """
+ if validate is None:
+ validate = self._validateRead
+ text = self.getGLIF(glyphName)
+ try:
+ tree = _glifTreeFromString(text)
+ formatVersions = GLIFFormatVersion.supported_versions(
+ self.ufoFormatVersionTuple
+ )
+ _readGlyphFromTree(
+ tree,
+ glyphObject,
+ pointPen,
+ formatVersions=formatVersions,
+ validate=validate,
+ )
+ except GlifLibError as glifLibError:
+ # Re-raise with a note that gives extra context, describing where
+ # the error occurred.
+ fileName = self.contents[glyphName]
+ try:
+ glifLocation = f"'{self.fs.getsyspath(fileName)}'"
+ except fs.errors.NoSysPath:
+ # Network or in-memory FS may not map to a local path, so use
+ # the best string representation we have.
+ glifLocation = f"'{fileName}' from '{str(self.fs)}'"
+
+ glifLibError._add_note(
+ f"The issue is in glyph '{glyphName}', located in {glifLocation}."
+ )
+ raise
+
+ def writeGlyph(
+ self,
+ glyphName,
+ glyphObject=None,
+ drawPointsFunc=None,
+ formatVersion=None,
+ validate=None,
+ ):
+ """
+ Write a .glif file for 'glyphName' to the glyph set. The
+ 'glyphObject' argument can be any kind of object (even None);
+ the writeGlyph() method will attempt to get the following
+ attributes from it:
+
+ width
+ the advance width of the glyph
+ height
+ the advance height of the glyph
+ unicodes
+ a list of unicode values for this glyph
+ note
+ a string
+ lib
+ a dictionary containing custom data
+ image
+ a dictionary containing image data
+ guidelines
+ a list of guideline data dictionaries
+ anchors
+ a list of anchor data dictionaries
+
+ All attributes are optional: if 'glyphObject' doesn't
+ have the attribute, it will simply be skipped.
+
+ To write outline data to the .glif file, writeGlyph() needs
+ a function (any callable object actually) that will take one
+ argument: an object that conforms to the PointPen protocol.
+ The function will be called by writeGlyph(); it has to call the
+ proper PointPen methods to transfer the outline to the .glif file.
+
+ The GLIF format version will be chosen based on the ufoFormatVersion
+ passed during the creation of this object. If a particular format
+ version is desired, it can be passed with the formatVersion argument.
+ The formatVersion argument accepts either a tuple of integers for
+ (major, minor), or a single integer for the major digit only (with
+ minor digit implied as 0).
+
+ An UnsupportedGLIFFormat exception is raised if the requested GLIF
+ formatVersion is not supported.
+
+ ``validate`` will validate the data, by default it is set to the
+ class's ``validateWrite`` value, can be overridden.
+ """
+ if formatVersion is None:
+ formatVersion = GLIFFormatVersion.default(self.ufoFormatVersionTuple)
+ else:
+ try:
+ formatVersion = GLIFFormatVersion(formatVersion)
+ except ValueError as e:
+ from fontTools.ufoLib.errors import UnsupportedGLIFFormat
+
+ raise UnsupportedGLIFFormat(
+ f"Unsupported GLIF format version: {formatVersion!r}"
+ ) from e
+ if formatVersion not in GLIFFormatVersion.supported_versions(
+ self.ufoFormatVersionTuple
+ ):
+ from fontTools.ufoLib.errors import UnsupportedGLIFFormat
+
+ raise UnsupportedGLIFFormat(
+ f"Unsupported GLIF format version ({formatVersion!s}) "
+ f"for UFO format version {self.ufoFormatVersionTuple!s}."
+ )
+ if validate is None:
+ validate = self._validateWrite
+ fileName = self.contents.get(glyphName)
+ if fileName is None:
+ if self._existingFileNames is None:
+ self._existingFileNames = {
+ fileName.lower() for fileName in self.contents.values()
+ }
+ fileName = self.glyphNameToFileName(glyphName, self._existingFileNames)
+ self.contents[glyphName] = fileName
+ self._existingFileNames.add(fileName.lower())
+ if self._reverseContents is not None:
+ self._reverseContents[fileName.lower()] = glyphName
+ data = _writeGlyphToBytes(
+ glyphName,
+ glyphObject,
+ drawPointsFunc,
+ formatVersion=formatVersion,
+ validate=validate,
+ )
+ if (
+ self._havePreviousFile
+ and self.fs.exists(fileName)
+ and data == self.fs.readbytes(fileName)
+ ):
+ return
+ self.fs.writebytes(fileName, data)
+
+ def deleteGlyph(self, glyphName):
+ """Permanently delete the glyph from the glyph set on disk. Will
+ raise KeyError if the glyph is not present in the glyph set.
+ """
+ fileName = self.contents[glyphName]
+ self.fs.remove(fileName)
+ if self._existingFileNames is not None:
+ self._existingFileNames.remove(fileName.lower())
+ if self._reverseContents is not None:
+ del self._reverseContents[fileName.lower()]
+ del self.contents[glyphName]
+
+ # dict-like support
+
+ def keys(self):
+ return list(self.contents.keys())
+
+ def has_key(self, glyphName):
+ return glyphName in self.contents
+
+ __contains__ = has_key
+
+ def __len__(self):
+ return len(self.contents)
+
+ def __getitem__(self, glyphName):
+ if glyphName not in self.contents:
+ raise KeyError(glyphName)
+ return self.glyphClass(glyphName, self)
+
+ # quickly fetch unicode values
+
+ def getUnicodes(self, glyphNames=None):
+ """
+ Return a dictionary that maps glyph names to lists containing
+ the unicode value[s] for that glyph, if any. This parses the .glif
+ files partially, so it is a lot faster than parsing all files completely.
+ By default this checks all glyphs, but a subset can be passed with glyphNames.
+ """
+ unicodes = {}
+ if glyphNames is None:
+ glyphNames = self.contents.keys()
+ for glyphName in glyphNames:
+ text = self.getGLIF(glyphName)
+ unicodes[glyphName] = _fetchUnicodes(text)
+ return unicodes
+
+ def getComponentReferences(self, glyphNames=None):
+ """
+ Return a dictionary that maps glyph names to lists containing the
+ base glyph name of components in the glyph. This parses the .glif
+ files partially, so it is a lot faster than parsing all files completely.
+ By default this checks all glyphs, but a subset can be passed with glyphNames.
+ """
+ components = {}
+ if glyphNames is None:
+ glyphNames = self.contents.keys()
+ for glyphName in glyphNames:
+ text = self.getGLIF(glyphName)
+ components[glyphName] = _fetchComponentBases(text)
+ return components
+
+ def getImageReferences(self, glyphNames=None):
+ """
+ Return a dictionary that maps glyph names to the file name of the image
+ referenced by the glyph. This parses the .glif files partially, so it is a
+ lot faster than parsing all files completely.
+ By default this checks all glyphs, but a subset can be passed with glyphNames.
+ """
+ images = {}
+ if glyphNames is None:
+ glyphNames = self.contents.keys()
+ for glyphName in glyphNames:
+ text = self.getGLIF(glyphName)
+ images[glyphName] = _fetchImageFileName(text)
+ return images
+
+ def close(self):
+ if self._shouldClose:
+ self.fs.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ self.close()
# -----------------------
# Glyph Name to File Name
# -----------------------
+
def glyphNameToFileName(glyphName, existingFileNames):
- """
- Wrapper around the userNameToFileName function in filenames.py
+ """
+ Wrapper around the userNameToFileName function in filenames.py
+
+ Note that existingFileNames should be a set for large glyphsets
+ or performance will suffer.
+ """
+ if existingFileNames is None:
+ existingFileNames = set()
+ return userNameToFileName(glyphName, existing=existingFileNames, suffix=".glif")
- Note that existingFileNames should be a set for large glyphsets
- or performance will suffer.
- """
- if existingFileNames is None:
- existingFileNames = set()
- return userNameToFileName(glyphName, existing=existingFileNames, suffix=".glif")
# -----------------------
# GLIF To and From String
# -----------------------
+
def readGlyphFromString(
- aString,
- glyphObject=None,
- pointPen=None,
- formatVersions=None,
- validate=True,
+ aString,
+ glyphObject=None,
+ pointPen=None,
+ formatVersions=None,
+ validate=True,
):
- """
- Read .glif data from a string into a glyph object.
-
- The 'glyphObject' argument can be any kind of object (even None);
- the readGlyphFromString() method will attempt to set the following
- attributes on it:
-
- width
- the advance width of the glyph
- height
- the advance height of the glyph
- unicodes
- a list of unicode values for this glyph
- note
- a string
- lib
- a dictionary containing custom data
- image
- a dictionary containing image data
- guidelines
- a list of guideline data dictionaries
- anchors
- a list of anchor data dictionaries
-
- All attributes are optional, in two ways:
-
- 1) An attribute *won't* be set if the .glif file doesn't
- contain data for it. 'glyphObject' will have to deal
- with default values itself.
- 2) If setting the attribute fails with an AttributeError
- (for example if the 'glyphObject' attribute is read-
- only), readGlyphFromString() will not propagate that
- exception, but ignore that attribute.
-
- To retrieve outline information, you need to pass an object
- conforming to the PointPen protocol as the 'pointPen' argument.
- This argument may be None if you don't need the outline data.
-
- The formatVersions optional argument define the GLIF format versions
- that are allowed to be read.
- The type is Optional[Iterable[Tuple[int, int], int]]. It can contain
- either integers (for the major versions to be allowed, with minor
- digits defaulting to 0), or tuples of integers to specify both
- (major, minor) versions.
- By default when formatVersions is None all the GLIF format versions
- currently defined are allowed to be read.
-
- ``validate`` will validate the read data. It is set to ``True`` by default.
- """
- tree = _glifTreeFromString(aString)
-
- if formatVersions is None:
- validFormatVersions = GLIFFormatVersion.supported_versions()
- else:
- validFormatVersions, invalidFormatVersions = set(), set()
- for v in formatVersions:
- try:
- formatVersion = GLIFFormatVersion(v)
- except ValueError:
- invalidFormatVersions.add(v)
- else:
- validFormatVersions.add(formatVersion)
- if not validFormatVersions:
- raise ValueError(
- "None of the requested GLIF formatVersions are supported: "
- f"{formatVersions!r}"
- )
-
- _readGlyphFromTree(
- tree, glyphObject, pointPen, formatVersions=validFormatVersions, validate=validate
- )
+ """
+ Read .glif data from a string into a glyph object.
+
+ The 'glyphObject' argument can be any kind of object (even None);
+ the readGlyphFromString() method will attempt to set the following
+ attributes on it:
+
+ width
+ the advance width of the glyph
+ height
+ the advance height of the glyph
+ unicodes
+ a list of unicode values for this glyph
+ note
+ a string
+ lib
+ a dictionary containing custom data
+ image
+ a dictionary containing image data
+ guidelines
+ a list of guideline data dictionaries
+ anchors
+ a list of anchor data dictionaries
+
+ All attributes are optional, in two ways:
+
+ 1) An attribute *won't* be set if the .glif file doesn't
+ contain data for it. 'glyphObject' will have to deal
+ with default values itself.
+ 2) If setting the attribute fails with an AttributeError
+ (for example if the 'glyphObject' attribute is read-
+ only), readGlyphFromString() will not propagate that
+ exception, but ignore that attribute.
+
+ To retrieve outline information, you need to pass an object
+ conforming to the PointPen protocol as the 'pointPen' argument.
+ This argument may be None if you don't need the outline data.
+
+ The formatVersions optional argument define the GLIF format versions
+ that are allowed to be read.
+ The type is Optional[Iterable[Tuple[int, int], int]]. It can contain
+ either integers (for the major versions to be allowed, with minor
+ digits defaulting to 0), or tuples of integers to specify both
+ (major, minor) versions.
+ By default when formatVersions is None all the GLIF format versions
+ currently defined are allowed to be read.
+
+ ``validate`` will validate the read data. It is set to ``True`` by default.
+ """
+ tree = _glifTreeFromString(aString)
+
+ if formatVersions is None:
+ validFormatVersions = GLIFFormatVersion.supported_versions()
+ else:
+ validFormatVersions, invalidFormatVersions = set(), set()
+ for v in formatVersions:
+ try:
+ formatVersion = GLIFFormatVersion(v)
+ except ValueError:
+ invalidFormatVersions.add(v)
+ else:
+ validFormatVersions.add(formatVersion)
+ if not validFormatVersions:
+ raise ValueError(
+ "None of the requested GLIF formatVersions are supported: "
+ f"{formatVersions!r}"
+ )
+
+ _readGlyphFromTree(
+ tree,
+ glyphObject,
+ pointPen,
+ formatVersions=validFormatVersions,
+ validate=validate,
+ )
def _writeGlyphToBytes(
- glyphName,
- glyphObject=None,
- drawPointsFunc=None,
- writer=None,
- formatVersion=None,
- validate=True,
+ glyphName,
+ glyphObject=None,
+ drawPointsFunc=None,
+ writer=None,
+ formatVersion=None,
+ validate=True,
):
- """Return .glif data for a glyph as a UTF-8 encoded bytes string."""
- try:
- formatVersion = GLIFFormatVersion(formatVersion)
- except ValueError:
- from fontTools.ufoLib.errors import UnsupportedGLIFFormat
-
- raise UnsupportedGLIFFormat("Unsupported GLIF format version: {formatVersion!r}")
- # start
- if validate and not isinstance(glyphName, str):
- raise GlifLibError("The glyph name is not properly formatted.")
- if validate and len(glyphName) == 0:
- raise GlifLibError("The glyph name is empty.")
- glyphAttrs = OrderedDict([("name", glyphName), ("format", repr(formatVersion.major))])
- if formatVersion.minor != 0:
- glyphAttrs["formatMinor"] = repr(formatVersion.minor)
- root = etree.Element("glyph", glyphAttrs)
- identifiers = set()
- # advance
- _writeAdvance(glyphObject, root, validate)
- # unicodes
- if getattr(glyphObject, "unicodes", None):
- _writeUnicodes(glyphObject, root, validate)
- # note
- if getattr(glyphObject, "note", None):
- _writeNote(glyphObject, root, validate)
- # image
- if formatVersion.major >= 2 and getattr(glyphObject, "image", None):
- _writeImage(glyphObject, root, validate)
- # guidelines
- if formatVersion.major >= 2 and getattr(glyphObject, "guidelines", None):
- _writeGuidelines(glyphObject, root, identifiers, validate)
- # anchors
- anchors = getattr(glyphObject, "anchors", None)
- if formatVersion.major >= 2 and anchors:
- _writeAnchors(glyphObject, root, identifiers, validate)
- # outline
- if drawPointsFunc is not None:
- outline = etree.SubElement(root, "outline")
- pen = GLIFPointPen(outline, identifiers=identifiers, validate=validate)
- drawPointsFunc(pen)
- if formatVersion.major == 1 and anchors:
- _writeAnchorsFormat1(pen, anchors, validate)
- # prevent lxml from writing self-closing tags
- if not len(outline):
- outline.text = "\n "
- # lib
- if getattr(glyphObject, "lib", None):
- _writeLib(glyphObject, root, validate)
- # return the text
- data = etree.tostring(
- root, encoding="UTF-8", xml_declaration=True, pretty_print=True
- )
- return data
+ """Return .glif data for a glyph as a UTF-8 encoded bytes string."""
+ try:
+ formatVersion = GLIFFormatVersion(formatVersion)
+ except ValueError:
+ from fontTools.ufoLib.errors import UnsupportedGLIFFormat
+
+ raise UnsupportedGLIFFormat(
+ "Unsupported GLIF format version: {formatVersion!r}"
+ )
+ # start
+ if validate and not isinstance(glyphName, str):
+ raise GlifLibError("The glyph name is not properly formatted.")
+ if validate and len(glyphName) == 0:
+ raise GlifLibError("The glyph name is empty.")
+ glyphAttrs = OrderedDict(
+ [("name", glyphName), ("format", repr(formatVersion.major))]
+ )
+ if formatVersion.minor != 0:
+ glyphAttrs["formatMinor"] = repr(formatVersion.minor)
+ root = etree.Element("glyph", glyphAttrs)
+ identifiers = set()
+ # advance
+ _writeAdvance(glyphObject, root, validate)
+ # unicodes
+ if getattr(glyphObject, "unicodes", None):
+ _writeUnicodes(glyphObject, root, validate)
+ # note
+ if getattr(glyphObject, "note", None):
+ _writeNote(glyphObject, root, validate)
+ # image
+ if formatVersion.major >= 2 and getattr(glyphObject, "image", None):
+ _writeImage(glyphObject, root, validate)
+ # guidelines
+ if formatVersion.major >= 2 and getattr(glyphObject, "guidelines", None):
+ _writeGuidelines(glyphObject, root, identifiers, validate)
+ # anchors
+ anchors = getattr(glyphObject, "anchors", None)
+ if formatVersion.major >= 2 and anchors:
+ _writeAnchors(glyphObject, root, identifiers, validate)
+ # outline
+ if drawPointsFunc is not None:
+ outline = etree.SubElement(root, "outline")
+ pen = GLIFPointPen(outline, identifiers=identifiers, validate=validate)
+ drawPointsFunc(pen)
+ if formatVersion.major == 1 and anchors:
+ _writeAnchorsFormat1(pen, anchors, validate)
+ # prevent lxml from writing self-closing tags
+ if not len(outline):
+ outline.text = "\n "
+ # lib
+ if getattr(glyphObject, "lib", None):
+ _writeLib(glyphObject, root, validate)
+ # return the text
+ data = etree.tostring(
+ root, encoding="UTF-8", xml_declaration=True, pretty_print=True
+ )
+ return data
def writeGlyphToString(
- glyphName,
- glyphObject=None,
- drawPointsFunc=None,
- formatVersion=None,
- validate=True,
+ glyphName,
+ glyphObject=None,
+ drawPointsFunc=None,
+ formatVersion=None,
+ validate=True,
):
- """
- Return .glif data for a glyph as a string. The XML declaration's
- encoding is always set to "UTF-8".
- The 'glyphObject' argument can be any kind of object (even None);
- the writeGlyphToString() method will attempt to get the following
- attributes from it:
-
- width
- the advance width of the glyph
- height
- the advance height of the glyph
- unicodes
- a list of unicode values for this glyph
- note
- a string
- lib
- a dictionary containing custom data
- image
- a dictionary containing image data
- guidelines
- a list of guideline data dictionaries
- anchors
- a list of anchor data dictionaries
-
- All attributes are optional: if 'glyphObject' doesn't
- have the attribute, it will simply be skipped.
-
- To write outline data to the .glif file, writeGlyphToString() needs
- a function (any callable object actually) that will take one
- argument: an object that conforms to the PointPen protocol.
- The function will be called by writeGlyphToString(); it has to call the
- proper PointPen methods to transfer the outline to the .glif file.
-
- The GLIF format version can be specified with the formatVersion argument.
- This accepts either a tuple of integers for (major, minor), or a single
- integer for the major digit only (with minor digit implied as 0).
- By default when formatVesion is None the latest GLIF format version will
- be used; currently it's 2.0, which is equivalent to formatVersion=(2, 0).
-
- An UnsupportedGLIFFormat exception is raised if the requested UFO
- formatVersion is not supported.
-
- ``validate`` will validate the written data. It is set to ``True`` by default.
- """
- data = _writeGlyphToBytes(
- glyphName,
- glyphObject=glyphObject,
- drawPointsFunc=drawPointsFunc,
- formatVersion=formatVersion,
- validate=validate,
- )
- return data.decode("utf-8")
+ """
+ Return .glif data for a glyph as a string. The XML declaration's
+ encoding is always set to "UTF-8".
+ The 'glyphObject' argument can be any kind of object (even None);
+ the writeGlyphToString() method will attempt to get the following
+ attributes from it:
+
+ width
+ the advance width of the glyph
+ height
+ the advance height of the glyph
+ unicodes
+ a list of unicode values for this glyph
+ note
+ a string
+ lib
+ a dictionary containing custom data
+ image
+ a dictionary containing image data
+ guidelines
+ a list of guideline data dictionaries
+ anchors
+ a list of anchor data dictionaries
+
+ All attributes are optional: if 'glyphObject' doesn't
+ have the attribute, it will simply be skipped.
+
+ To write outline data to the .glif file, writeGlyphToString() needs
+ a function (any callable object actually) that will take one
+ argument: an object that conforms to the PointPen protocol.
+ The function will be called by writeGlyphToString(); it has to call the
+ proper PointPen methods to transfer the outline to the .glif file.
+
+ The GLIF format version can be specified with the formatVersion argument.
+ This accepts either a tuple of integers for (major, minor), or a single
+ integer for the major digit only (with minor digit implied as 0).
+ By default when formatVesion is None the latest GLIF format version will
+ be used; currently it's 2.0, which is equivalent to formatVersion=(2, 0).
+
+ An UnsupportedGLIFFormat exception is raised if the requested UFO
+ formatVersion is not supported.
+
+ ``validate`` will validate the written data. It is set to ``True`` by default.
+ """
+ data = _writeGlyphToBytes(
+ glyphName,
+ glyphObject=glyphObject,
+ drawPointsFunc=drawPointsFunc,
+ formatVersion=formatVersion,
+ validate=validate,
+ )
+ return data.decode("utf-8")
def _writeAdvance(glyphObject, element, validate):
- width = getattr(glyphObject, "width", None)
- if width is not None:
- if validate and not isinstance(width, numberTypes):
- raise GlifLibError("width attribute must be int or float")
- if width == 0:
- width = None
- height = getattr(glyphObject, "height", None)
- if height is not None:
- if validate and not isinstance(height, numberTypes):
- raise GlifLibError("height attribute must be int or float")
- if height == 0:
- height = None
- if width is not None and height is not None:
- etree.SubElement(element, "advance", OrderedDict([("height", repr(height)), ("width", repr(width))]))
- elif width is not None:
- etree.SubElement(element, "advance", dict(width=repr(width)))
- elif height is not None:
- etree.SubElement(element, "advance", dict(height=repr(height)))
+ width = getattr(glyphObject, "width", None)
+ if width is not None:
+ if validate and not isinstance(width, numberTypes):
+ raise GlifLibError("width attribute must be int or float")
+ if width == 0:
+ width = None
+ height = getattr(glyphObject, "height", None)
+ if height is not None:
+ if validate and not isinstance(height, numberTypes):
+ raise GlifLibError("height attribute must be int or float")
+ if height == 0:
+ height = None
+ if width is not None and height is not None:
+ etree.SubElement(
+ element,
+ "advance",
+ OrderedDict([("height", repr(height)), ("width", repr(width))]),
+ )
+ elif width is not None:
+ etree.SubElement(element, "advance", dict(width=repr(width)))
+ elif height is not None:
+ etree.SubElement(element, "advance", dict(height=repr(height)))
+
def _writeUnicodes(glyphObject, element, validate):
- unicodes = getattr(glyphObject, "unicodes", None)
- if validate and isinstance(unicodes, int):
- unicodes = [unicodes]
- seen = set()
- for code in unicodes:
- if validate and not isinstance(code, int):
- raise GlifLibError("unicode values must be int")
- if code in seen:
- continue
- seen.add(code)
- hexCode = "%04X" % code
- etree.SubElement(element, "unicode", dict(hex=hexCode))
+ unicodes = getattr(glyphObject, "unicodes", None)
+ if validate and isinstance(unicodes, int):
+ unicodes = [unicodes]
+ seen = set()
+ for code in unicodes:
+ if validate and not isinstance(code, int):
+ raise GlifLibError("unicode values must be int")
+ if code in seen:
+ continue
+ seen.add(code)
+ hexCode = "%04X" % code
+ etree.SubElement(element, "unicode", dict(hex=hexCode))
+
def _writeNote(glyphObject, element, validate):
- note = getattr(glyphObject, "note", None)
- if validate and not isinstance(note, str):
- raise GlifLibError("note attribute must be str")
- note = note.strip()
- note = "\n" + note + "\n"
- etree.SubElement(element, "note").text = note
+ note = getattr(glyphObject, "note", None)
+ if validate and not isinstance(note, str):
+ raise GlifLibError("note attribute must be str")
+ note = note.strip()
+ note = "\n" + note + "\n"
+ etree.SubElement(element, "note").text = note
+
def _writeImage(glyphObject, element, validate):
- image = getattr(glyphObject, "image", None)
- if validate and not imageValidator(image):
- raise GlifLibError("image attribute must be a dict or dict-like object with the proper structure.")
- attrs = OrderedDict([("fileName", image["fileName"])])
- for attr, default in _transformationInfo:
- value = image.get(attr, default)
- if value != default:
- attrs[attr] = repr(value)
- color = image.get("color")
- if color is not None:
- attrs["color"] = color
- etree.SubElement(element, "image", attrs)
+ image = getattr(glyphObject, "image", None)
+ if validate and not imageValidator(image):
+ raise GlifLibError(
+ "image attribute must be a dict or dict-like object with the proper structure."
+ )
+ attrs = OrderedDict([("fileName", image["fileName"])])
+ for attr, default in _transformationInfo:
+ value = image.get(attr, default)
+ if value != default:
+ attrs[attr] = repr(value)
+ color = image.get("color")
+ if color is not None:
+ attrs["color"] = color
+ etree.SubElement(element, "image", attrs)
+
def _writeGuidelines(glyphObject, element, identifiers, validate):
- guidelines = getattr(glyphObject, "guidelines", [])
- if validate and not guidelinesValidator(guidelines):
- raise GlifLibError("guidelines attribute does not have the proper structure.")
- for guideline in guidelines:
- attrs = OrderedDict()
- x = guideline.get("x")
- if x is not None:
- attrs["x"] = repr(x)
- y = guideline.get("y")
- if y is not None:
- attrs["y"] = repr(y)
- angle = guideline.get("angle")
- if angle is not None:
- attrs["angle"] = repr(angle)
- name = guideline.get("name")
- if name is not None:
- attrs["name"] = name
- color = guideline.get("color")
- if color is not None:
- attrs["color"] = color
- identifier = guideline.get("identifier")
- if identifier is not None:
- if validate and identifier in identifiers:
- raise GlifLibError("identifier used more than once: %s" % identifier)
- attrs["identifier"] = identifier
- identifiers.add(identifier)
- etree.SubElement(element, "guideline", attrs)
+ guidelines = getattr(glyphObject, "guidelines", [])
+ if validate and not guidelinesValidator(guidelines):
+ raise GlifLibError("guidelines attribute does not have the proper structure.")
+ for guideline in guidelines:
+ attrs = OrderedDict()
+ x = guideline.get("x")
+ if x is not None:
+ attrs["x"] = repr(x)
+ y = guideline.get("y")
+ if y is not None:
+ attrs["y"] = repr(y)
+ angle = guideline.get("angle")
+ if angle is not None:
+ attrs["angle"] = repr(angle)
+ name = guideline.get("name")
+ if name is not None:
+ attrs["name"] = name
+ color = guideline.get("color")
+ if color is not None:
+ attrs["color"] = color
+ identifier = guideline.get("identifier")
+ if identifier is not None:
+ if validate and identifier in identifiers:
+ raise GlifLibError("identifier used more than once: %s" % identifier)
+ attrs["identifier"] = identifier
+ identifiers.add(identifier)
+ etree.SubElement(element, "guideline", attrs)
+
def _writeAnchorsFormat1(pen, anchors, validate):
- if validate and not anchorsValidator(anchors):
- raise GlifLibError("anchors attribute does not have the proper structure.")
- for anchor in anchors:
- attrs = {}
- x = anchor["x"]
- attrs["x"] = repr(x)
- y = anchor["y"]
- attrs["y"] = repr(y)
- name = anchor.get("name")
- if name is not None:
- attrs["name"] = name
- pen.beginPath()
- pen.addPoint((x, y), segmentType="move", name=name)
- pen.endPath()
+ if validate and not anchorsValidator(anchors):
+ raise GlifLibError("anchors attribute does not have the proper structure.")
+ for anchor in anchors:
+ attrs = {}
+ x = anchor["x"]
+ attrs["x"] = repr(x)
+ y = anchor["y"]
+ attrs["y"] = repr(y)
+ name = anchor.get("name")
+ if name is not None:
+ attrs["name"] = name
+ pen.beginPath()
+ pen.addPoint((x, y), segmentType="move", name=name)
+ pen.endPath()
+
def _writeAnchors(glyphObject, element, identifiers, validate):
- anchors = getattr(glyphObject, "anchors", [])
- if validate and not anchorsValidator(anchors):
- raise GlifLibError("anchors attribute does not have the proper structure.")
- for anchor in anchors:
- attrs = OrderedDict()
- x = anchor["x"]
- attrs["x"] = repr(x)
- y = anchor["y"]
- attrs["y"] = repr(y)
- name = anchor.get("name")
- if name is not None:
- attrs["name"] = name
- color = anchor.get("color")
- if color is not None:
- attrs["color"] = color
- identifier = anchor.get("identifier")
- if identifier is not None:
- if validate and identifier in identifiers:
- raise GlifLibError("identifier used more than once: %s" % identifier)
- attrs["identifier"] = identifier
- identifiers.add(identifier)
- etree.SubElement(element, "anchor", attrs)
+ anchors = getattr(glyphObject, "anchors", [])
+ if validate and not anchorsValidator(anchors):
+ raise GlifLibError("anchors attribute does not have the proper structure.")
+ for anchor in anchors:
+ attrs = OrderedDict()
+ x = anchor["x"]
+ attrs["x"] = repr(x)
+ y = anchor["y"]
+ attrs["y"] = repr(y)
+ name = anchor.get("name")
+ if name is not None:
+ attrs["name"] = name
+ color = anchor.get("color")
+ if color is not None:
+ attrs["color"] = color
+ identifier = anchor.get("identifier")
+ if identifier is not None:
+ if validate and identifier in identifiers:
+ raise GlifLibError("identifier used more than once: %s" % identifier)
+ attrs["identifier"] = identifier
+ identifiers.add(identifier)
+ etree.SubElement(element, "anchor", attrs)
+
def _writeLib(glyphObject, element, validate):
- lib = getattr(glyphObject, "lib", None)
- if not lib:
- # don't write empty lib
- return
- if validate:
- valid, message = glyphLibValidator(lib)
- if not valid:
- raise GlifLibError(message)
- if not isinstance(lib, dict):
- lib = dict(lib)
- # plist inside GLIF begins with 2 levels of indentation
- e = plistlib.totree(lib, indent_level=2)
- etree.SubElement(element, "lib").append(e)
+ lib = getattr(glyphObject, "lib", None)
+ if not lib:
+ # don't write empty lib
+ return
+ if validate:
+ valid, message = glyphLibValidator(lib)
+ if not valid:
+ raise GlifLibError(message)
+ if not isinstance(lib, dict):
+ lib = dict(lib)
+ # plist inside GLIF begins with 2 levels of indentation
+ e = plistlib.totree(lib, indent_level=2)
+ etree.SubElement(element, "lib").append(e)
+
# -----------------------
# layerinfo.plist Support
# -----------------------
layerInfoVersion3ValueData = {
- "color" : dict(type=str, valueValidator=colorValidator),
- "lib" : dict(type=dict, valueValidator=genericTypeValidator)
+ "color": dict(type=str, valueValidator=colorValidator),
+ "lib": dict(type=dict, valueValidator=genericTypeValidator),
}
+
def validateLayerInfoVersion3ValueForAttribute(attr, value):
- """
- This performs very basic validation of the value for attribute
- following the UFO 3 fontinfo.plist specification. The results
- of this should not be interpretted as *correct* for the font
- that they are part of. This merely indicates that the value
- is of the proper type and, where the specification defines
- a set range of possible values for an attribute, that the
- value is in the accepted range.
- """
- if attr not in layerInfoVersion3ValueData:
- return False
- dataValidationDict = layerInfoVersion3ValueData[attr]
- valueType = dataValidationDict.get("type")
- validator = dataValidationDict.get("valueValidator")
- valueOptions = dataValidationDict.get("valueOptions")
- # have specific options for the validator
- if valueOptions is not None:
- isValidValue = validator(value, valueOptions)
- # no specific options
- else:
- if validator == genericTypeValidator:
- isValidValue = validator(value, valueType)
- else:
- isValidValue = validator(value)
- return isValidValue
+ """
+ This performs very basic validation of the value for attribute
+ following the UFO 3 fontinfo.plist specification. The results
+ of this should not be interpretted as *correct* for the font
+ that they are part of. This merely indicates that the value
+ is of the proper type and, where the specification defines
+ a set range of possible values for an attribute, that the
+ value is in the accepted range.
+ """
+ if attr not in layerInfoVersion3ValueData:
+ return False
+ dataValidationDict = layerInfoVersion3ValueData[attr]
+ valueType = dataValidationDict.get("type")
+ validator = dataValidationDict.get("valueValidator")
+ valueOptions = dataValidationDict.get("valueOptions")
+ # have specific options for the validator
+ if valueOptions is not None:
+ isValidValue = validator(value, valueOptions)
+ # no specific options
+ else:
+ if validator == genericTypeValidator:
+ isValidValue = validator(value, valueType)
+ else:
+ isValidValue = validator(value)
+ return isValidValue
+
def validateLayerInfoVersion3Data(infoData):
- """
- This performs very basic validation of the value for infoData
- following the UFO 3 layerinfo.plist specification. The results
- of this should not be interpretted as *correct* for the font
- that they are part of. This merely indicates that the values
- are of the proper type and, where the specification defines
- a set range of possible values for an attribute, that the
- value is in the accepted range.
- """
- for attr, value in infoData.items():
- if attr not in layerInfoVersion3ValueData:
- raise GlifLibError("Unknown attribute %s." % attr)
- isValidValue = validateLayerInfoVersion3ValueForAttribute(attr, value)
- if not isValidValue:
- raise GlifLibError(f"Invalid value for attribute {attr} ({value!r}).")
- return infoData
+ """
+ This performs very basic validation of the value for infoData
+ following the UFO 3 layerinfo.plist specification. The results
+ of this should not be interpretted as *correct* for the font
+ that they are part of. This merely indicates that the values
+ are of the proper type and, where the specification defines
+ a set range of possible values for an attribute, that the
+ value is in the accepted range.
+ """
+ for attr, value in infoData.items():
+ if attr not in layerInfoVersion3ValueData:
+ raise GlifLibError("Unknown attribute %s." % attr)
+ isValidValue = validateLayerInfoVersion3ValueForAttribute(attr, value)
+ if not isValidValue:
+ raise GlifLibError(f"Invalid value for attribute {attr} ({value!r}).")
+ return infoData
+
# -----------------
# GLIF Tree Support
# -----------------
+
def _glifTreeFromFile(aFile):
- if etree._have_lxml:
- tree = etree.parse(aFile, parser=etree.XMLParser(remove_comments=True))
- else:
- tree = etree.parse(aFile)
- root = tree.getroot()
- if root.tag != "glyph":
- raise GlifLibError("The GLIF is not properly formatted.")
- if root.text and root.text.strip() != '':
- raise GlifLibError("Invalid GLIF structure.")
- return root
+ if etree._have_lxml:
+ tree = etree.parse(aFile, parser=etree.XMLParser(remove_comments=True))
+ else:
+ tree = etree.parse(aFile)
+ root = tree.getroot()
+ if root.tag != "glyph":
+ raise GlifLibError("The GLIF is not properly formatted.")
+ if root.text and root.text.strip() != "":
+ raise GlifLibError("Invalid GLIF structure.")
+ return root
def _glifTreeFromString(aString):
- data = tobytes(aString, encoding="utf-8")
- if etree._have_lxml:
- root = etree.fromstring(data, parser=etree.XMLParser(remove_comments=True))
- else:
- root = etree.fromstring(data)
- if root.tag != "glyph":
- raise GlifLibError("The GLIF is not properly formatted.")
- if root.text and root.text.strip() != '':
- raise GlifLibError("Invalid GLIF structure.")
- return root
+ data = tobytes(aString, encoding="utf-8")
+ try:
+ if etree._have_lxml:
+ root = etree.fromstring(data, parser=etree.XMLParser(remove_comments=True))
+ else:
+ root = etree.fromstring(data)
+ except Exception as etree_exception:
+ raise GlifLibError("GLIF contains invalid XML.") from etree_exception
+
+ if root.tag != "glyph":
+ raise GlifLibError("The GLIF is not properly formatted.")
+ if root.text and root.text.strip() != "":
+ raise GlifLibError("Invalid GLIF structure.")
+ return root
def _readGlyphFromTree(
- tree,
- glyphObject=None,
- pointPen=None,
- formatVersions=GLIFFormatVersion.supported_versions(),
- validate=True,
+ tree,
+ glyphObject=None,
+ pointPen=None,
+ formatVersions=GLIFFormatVersion.supported_versions(),
+ validate=True,
):
- # check the format version
- formatVersionMajor = tree.get("format")
- if validate and formatVersionMajor is None:
- raise GlifLibError("Unspecified format version in GLIF.")
- formatVersionMinor = tree.get("formatMinor", 0)
- try:
- formatVersion = GLIFFormatVersion((int(formatVersionMajor), int(formatVersionMinor)))
- except ValueError as e:
- msg = "Unsupported GLIF format: %s.%s" % (formatVersionMajor, formatVersionMinor)
- if validate:
- from fontTools.ufoLib.errors import UnsupportedGLIFFormat
-
- raise UnsupportedGLIFFormat(msg) from e
- # warn but continue using the latest supported format
- formatVersion = GLIFFormatVersion.default()
- logger.warning(
- "%s. Assuming the latest supported version (%s). "
- "Some data may be skipped or parsed incorrectly.",
- msg,
- formatVersion,
- )
-
- if validate and formatVersion not in formatVersions:
- raise GlifLibError(f"Forbidden GLIF format version: {formatVersion!s}")
-
- try:
- readGlyphFromTree = _READ_GLYPH_FROM_TREE_FUNCS[formatVersion]
- except KeyError:
- raise NotImplementedError(formatVersion)
-
- readGlyphFromTree(
- tree=tree,
- glyphObject=glyphObject,
- pointPen=pointPen,
- validate=validate,
- formatMinor=formatVersion.minor,
- )
-
-
-def _readGlyphFromTreeFormat1(tree, glyphObject=None, pointPen=None, validate=None, **kwargs):
- # get the name
- _readName(glyphObject, tree, validate)
- # populate the sub elements
- unicodes = []
- haveSeenAdvance = haveSeenOutline = haveSeenLib = haveSeenNote = False
- for element in tree:
- if element.tag == "outline":
- if validate:
- if haveSeenOutline:
- raise GlifLibError("The outline element occurs more than once.")
- if element.attrib:
- raise GlifLibError("The outline element contains unknown attributes.")
- if element.text and element.text.strip() != '':
- raise GlifLibError("Invalid outline structure.")
- haveSeenOutline = True
- buildOutlineFormat1(glyphObject, pointPen, element, validate)
- elif glyphObject is None:
- continue
- elif element.tag == "advance":
- if validate and haveSeenAdvance:
- raise GlifLibError("The advance element occurs more than once.")
- haveSeenAdvance = True
- _readAdvance(glyphObject, element)
- elif element.tag == "unicode":
- try:
- v = element.get("hex")
- v = int(v, 16)
- if v not in unicodes:
- unicodes.append(v)
- except ValueError:
- raise GlifLibError("Illegal value for hex attribute of unicode element.")
- elif element.tag == "note":
- if validate and haveSeenNote:
- raise GlifLibError("The note element occurs more than once.")
- haveSeenNote = True
- _readNote(glyphObject, element)
- elif element.tag == "lib":
- if validate and haveSeenLib:
- raise GlifLibError("The lib element occurs more than once.")
- haveSeenLib = True
- _readLib(glyphObject, element, validate)
- else:
- raise GlifLibError("Unknown element in GLIF: %s" % element)
- # set the collected unicodes
- if unicodes:
- _relaxedSetattr(glyphObject, "unicodes", unicodes)
+ # check the format version
+ formatVersionMajor = tree.get("format")
+ if validate and formatVersionMajor is None:
+ raise GlifLibError("Unspecified format version in GLIF.")
+ formatVersionMinor = tree.get("formatMinor", 0)
+ try:
+ formatVersion = GLIFFormatVersion(
+ (int(formatVersionMajor), int(formatVersionMinor))
+ )
+ except ValueError as e:
+ msg = "Unsupported GLIF format: %s.%s" % (
+ formatVersionMajor,
+ formatVersionMinor,
+ )
+ if validate:
+ from fontTools.ufoLib.errors import UnsupportedGLIFFormat
+
+ raise UnsupportedGLIFFormat(msg) from e
+ # warn but continue using the latest supported format
+ formatVersion = GLIFFormatVersion.default()
+ logger.warning(
+ "%s. Assuming the latest supported version (%s). "
+ "Some data may be skipped or parsed incorrectly.",
+ msg,
+ formatVersion,
+ )
+
+ if validate and formatVersion not in formatVersions:
+ raise GlifLibError(f"Forbidden GLIF format version: {formatVersion!s}")
+
+ try:
+ readGlyphFromTree = _READ_GLYPH_FROM_TREE_FUNCS[formatVersion]
+ except KeyError:
+ raise NotImplementedError(formatVersion)
+
+ readGlyphFromTree(
+ tree=tree,
+ glyphObject=glyphObject,
+ pointPen=pointPen,
+ validate=validate,
+ formatMinor=formatVersion.minor,
+ )
+
+
+def _readGlyphFromTreeFormat1(
+ tree, glyphObject=None, pointPen=None, validate=None, **kwargs
+):
+ # get the name
+ _readName(glyphObject, tree, validate)
+ # populate the sub elements
+ unicodes = []
+ haveSeenAdvance = haveSeenOutline = haveSeenLib = haveSeenNote = False
+ for element in tree:
+ if element.tag == "outline":
+ if validate:
+ if haveSeenOutline:
+ raise GlifLibError("The outline element occurs more than once.")
+ if element.attrib:
+ raise GlifLibError(
+ "The outline element contains unknown attributes."
+ )
+ if element.text and element.text.strip() != "":
+ raise GlifLibError("Invalid outline structure.")
+ haveSeenOutline = True
+ buildOutlineFormat1(glyphObject, pointPen, element, validate)
+ elif glyphObject is None:
+ continue
+ elif element.tag == "advance":
+ if validate and haveSeenAdvance:
+ raise GlifLibError("The advance element occurs more than once.")
+ haveSeenAdvance = True
+ _readAdvance(glyphObject, element)
+ elif element.tag == "unicode":
+ try:
+ v = element.get("hex")
+ v = int(v, 16)
+ if v not in unicodes:
+ unicodes.append(v)
+ except ValueError:
+ raise GlifLibError(
+ "Illegal value for hex attribute of unicode element."
+ )
+ elif element.tag == "note":
+ if validate and haveSeenNote:
+ raise GlifLibError("The note element occurs more than once.")
+ haveSeenNote = True
+ _readNote(glyphObject, element)
+ elif element.tag == "lib":
+ if validate and haveSeenLib:
+ raise GlifLibError("The lib element occurs more than once.")
+ haveSeenLib = True
+ _readLib(glyphObject, element, validate)
+ else:
+ raise GlifLibError("Unknown element in GLIF: %s" % element)
+ # set the collected unicodes
+ if unicodes:
+ _relaxedSetattr(glyphObject, "unicodes", unicodes)
+
def _readGlyphFromTreeFormat2(
- tree, glyphObject=None, pointPen=None, validate=None, formatMinor=0
+ tree, glyphObject=None, pointPen=None, validate=None, formatMinor=0
):
- # get the name
- _readName(glyphObject, tree, validate)
- # populate the sub elements
- unicodes = []
- guidelines = []
- anchors = []
- haveSeenAdvance = haveSeenImage = haveSeenOutline = haveSeenLib = haveSeenNote = False
- identifiers = set()
- for element in tree:
- if element.tag == "outline":
- if validate:
- if haveSeenOutline:
- raise GlifLibError("The outline element occurs more than once.")
- if element.attrib:
- raise GlifLibError("The outline element contains unknown attributes.")
- if element.text and element.text.strip() != '':
- raise GlifLibError("Invalid outline structure.")
- haveSeenOutline = True
- if pointPen is not None:
- buildOutlineFormat2(glyphObject, pointPen, element, identifiers, validate)
- elif glyphObject is None:
- continue
- elif element.tag == "advance":
- if validate and haveSeenAdvance:
- raise GlifLibError("The advance element occurs more than once.")
- haveSeenAdvance = True
- _readAdvance(glyphObject, element)
- elif element.tag == "unicode":
- try:
- v = element.get("hex")
- v = int(v, 16)
- if v not in unicodes:
- unicodes.append(v)
- except ValueError:
- raise GlifLibError("Illegal value for hex attribute of unicode element.")
- elif element.tag == "guideline":
- if validate and len(element):
- raise GlifLibError("Unknown children in guideline element.")
- attrib = dict(element.attrib)
- for attr in ("x", "y", "angle"):
- if attr in attrib:
- attrib[attr] = _number(attrib[attr])
- guidelines.append(attrib)
- elif element.tag == "anchor":
- if validate and len(element):
- raise GlifLibError("Unknown children in anchor element.")
- attrib = dict(element.attrib)
- for attr in ("x", "y"):
- if attr in element.attrib:
- attrib[attr] = _number(attrib[attr])
- anchors.append(attrib)
- elif element.tag == "image":
- if validate:
- if haveSeenImage:
- raise GlifLibError("The image element occurs more than once.")
- if len(element):
- raise GlifLibError("Unknown children in image element.")
- haveSeenImage = True
- _readImage(glyphObject, element, validate)
- elif element.tag == "note":
- if validate and haveSeenNote:
- raise GlifLibError("The note element occurs more than once.")
- haveSeenNote = True
- _readNote(glyphObject, element)
- elif element.tag == "lib":
- if validate and haveSeenLib:
- raise GlifLibError("The lib element occurs more than once.")
- haveSeenLib = True
- _readLib(glyphObject, element, validate)
- else:
- raise GlifLibError("Unknown element in GLIF: %s" % element)
- # set the collected unicodes
- if unicodes:
- _relaxedSetattr(glyphObject, "unicodes", unicodes)
- # set the collected guidelines
- if guidelines:
- if validate and not guidelinesValidator(guidelines, identifiers):
- raise GlifLibError("The guidelines are improperly formatted.")
- _relaxedSetattr(glyphObject, "guidelines", guidelines)
- # set the collected anchors
- if anchors:
- if validate and not anchorsValidator(anchors, identifiers):
- raise GlifLibError("The anchors are improperly formatted.")
- _relaxedSetattr(glyphObject, "anchors", anchors)
+ # get the name
+ _readName(glyphObject, tree, validate)
+ # populate the sub elements
+ unicodes = []
+ guidelines = []
+ anchors = []
+ haveSeenAdvance = (
+ haveSeenImage
+ ) = haveSeenOutline = haveSeenLib = haveSeenNote = False
+ identifiers = set()
+ for element in tree:
+ if element.tag == "outline":
+ if validate:
+ if haveSeenOutline:
+ raise GlifLibError("The outline element occurs more than once.")
+ if element.attrib:
+ raise GlifLibError(
+ "The outline element contains unknown attributes."
+ )
+ if element.text and element.text.strip() != "":
+ raise GlifLibError("Invalid outline structure.")
+ haveSeenOutline = True
+ if pointPen is not None:
+ buildOutlineFormat2(
+ glyphObject, pointPen, element, identifiers, validate
+ )
+ elif glyphObject is None:
+ continue
+ elif element.tag == "advance":
+ if validate and haveSeenAdvance:
+ raise GlifLibError("The advance element occurs more than once.")
+ haveSeenAdvance = True
+ _readAdvance(glyphObject, element)
+ elif element.tag == "unicode":
+ try:
+ v = element.get("hex")
+ v = int(v, 16)
+ if v not in unicodes:
+ unicodes.append(v)
+ except ValueError:
+ raise GlifLibError(
+ "Illegal value for hex attribute of unicode element."
+ )
+ elif element.tag == "guideline":
+ if validate and len(element):
+ raise GlifLibError("Unknown children in guideline element.")
+ attrib = dict(element.attrib)
+ for attr in ("x", "y", "angle"):
+ if attr in attrib:
+ attrib[attr] = _number(attrib[attr])
+ guidelines.append(attrib)
+ elif element.tag == "anchor":
+ if validate and len(element):
+ raise GlifLibError("Unknown children in anchor element.")
+ attrib = dict(element.attrib)
+ for attr in ("x", "y"):
+ if attr in element.attrib:
+ attrib[attr] = _number(attrib[attr])
+ anchors.append(attrib)
+ elif element.tag == "image":
+ if validate:
+ if haveSeenImage:
+ raise GlifLibError("The image element occurs more than once.")
+ if len(element):
+ raise GlifLibError("Unknown children in image element.")
+ haveSeenImage = True
+ _readImage(glyphObject, element, validate)
+ elif element.tag == "note":
+ if validate and haveSeenNote:
+ raise GlifLibError("The note element occurs more than once.")
+ haveSeenNote = True
+ _readNote(glyphObject, element)
+ elif element.tag == "lib":
+ if validate and haveSeenLib:
+ raise GlifLibError("The lib element occurs more than once.")
+ haveSeenLib = True
+ _readLib(glyphObject, element, validate)
+ else:
+ raise GlifLibError("Unknown element in GLIF: %s" % element)
+ # set the collected unicodes
+ if unicodes:
+ _relaxedSetattr(glyphObject, "unicodes", unicodes)
+ # set the collected guidelines
+ if guidelines:
+ if validate and not guidelinesValidator(guidelines, identifiers):
+ raise GlifLibError("The guidelines are improperly formatted.")
+ _relaxedSetattr(glyphObject, "guidelines", guidelines)
+ # set the collected anchors
+ if anchors:
+ if validate and not anchorsValidator(anchors, identifiers):
+ raise GlifLibError("The anchors are improperly formatted.")
+ _relaxedSetattr(glyphObject, "anchors", anchors)
_READ_GLYPH_FROM_TREE_FUNCS = {
- GLIFFormatVersion.FORMAT_1_0: _readGlyphFromTreeFormat1,
- GLIFFormatVersion.FORMAT_2_0: _readGlyphFromTreeFormat2,
+ GLIFFormatVersion.FORMAT_1_0: _readGlyphFromTreeFormat1,
+ GLIFFormatVersion.FORMAT_2_0: _readGlyphFromTreeFormat2,
}
def _readName(glyphObject, root, validate):
- glyphName = root.get("name")
- if validate and not glyphName:
- raise GlifLibError("Empty glyph name in GLIF.")
- if glyphName and glyphObject is not None:
- _relaxedSetattr(glyphObject, "name", glyphName)
+ glyphName = root.get("name")
+ if validate and not glyphName:
+ raise GlifLibError("Empty glyph name in GLIF.")
+ if glyphName and glyphObject is not None:
+ _relaxedSetattr(glyphObject, "name", glyphName)
+
def _readAdvance(glyphObject, advance):
- width = _number(advance.get("width", 0))
- _relaxedSetattr(glyphObject, "width", width)
- height = _number(advance.get("height", 0))
- _relaxedSetattr(glyphObject, "height", height)
+ width = _number(advance.get("width", 0))
+ _relaxedSetattr(glyphObject, "width", width)
+ height = _number(advance.get("height", 0))
+ _relaxedSetattr(glyphObject, "height", height)
+
def _readNote(glyphObject, note):
- lines = note.text.split("\n")
- note = "\n".join(line.strip() for line in lines if line.strip())
- _relaxedSetattr(glyphObject, "note", note)
+ lines = note.text.split("\n")
+ note = "\n".join(line.strip() for line in lines if line.strip())
+ _relaxedSetattr(glyphObject, "note", note)
+
def _readLib(glyphObject, lib, validate):
- assert len(lib) == 1
- child = lib[0]
- plist = plistlib.fromtree(child)
- if validate:
- valid, message = glyphLibValidator(plist)
- if not valid:
- raise GlifLibError(message)
- _relaxedSetattr(glyphObject, "lib", plist)
+ assert len(lib) == 1
+ child = lib[0]
+ plist = plistlib.fromtree(child)
+ if validate:
+ valid, message = glyphLibValidator(plist)
+ if not valid:
+ raise GlifLibError(message)
+ _relaxedSetattr(glyphObject, "lib", plist)
+
def _readImage(glyphObject, image, validate):
- imageData = dict(image.attrib)
- for attr, default in _transformationInfo:
- value = imageData.get(attr, default)
- imageData[attr] = _number(value)
- if validate and not imageValidator(imageData):
- raise GlifLibError("The image element is not properly formatted.")
- _relaxedSetattr(glyphObject, "image", imageData)
+ imageData = dict(image.attrib)
+ for attr, default in _transformationInfo:
+ value = imageData.get(attr, default)
+ imageData[attr] = _number(value)
+ if validate and not imageValidator(imageData):
+ raise GlifLibError("The image element is not properly formatted.")
+ _relaxedSetattr(glyphObject, "image", imageData)
+
# ----------------
# GLIF to PointPen
# ----------------
contourAttributesFormat2 = {"identifier"}
-componentAttributesFormat1 = {"base", "xScale", "xyScale", "yxScale", "yScale", "xOffset", "yOffset"}
+componentAttributesFormat1 = {
+ "base",
+ "xScale",
+ "xyScale",
+ "yxScale",
+ "yScale",
+ "xOffset",
+ "yOffset",
+}
componentAttributesFormat2 = componentAttributesFormat1 | {"identifier"}
pointAttributesFormat1 = {"x", "y", "type", "smooth", "name"}
pointAttributesFormat2 = pointAttributesFormat1 | {"identifier"}
@@ -1277,303 +1386,357 @@ pointTypeOptions = {"move", "line", "offcurve", "curve", "qcurve"}
# format 1
+
def buildOutlineFormat1(glyphObject, pen, outline, validate):
- anchors = []
- for element in outline:
- if element.tag == "contour":
- if len(element) == 1:
- point = element[0]
- if point.tag == "point":
- anchor = _buildAnchorFormat1(point, validate)
- if anchor is not None:
- anchors.append(anchor)
- continue
- if pen is not None:
- _buildOutlineContourFormat1(pen, element, validate)
- elif element.tag == "component":
- if pen is not None:
- _buildOutlineComponentFormat1(pen, element, validate)
- else:
- raise GlifLibError("Unknown element in outline element: %s" % element)
- if glyphObject is not None and anchors:
- if validate and not anchorsValidator(anchors):
- raise GlifLibError("GLIF 1 anchors are not properly formatted.")
- _relaxedSetattr(glyphObject, "anchors", anchors)
+ anchors = []
+ for element in outline:
+ if element.tag == "contour":
+ if len(element) == 1:
+ point = element[0]
+ if point.tag == "point":
+ anchor = _buildAnchorFormat1(point, validate)
+ if anchor is not None:
+ anchors.append(anchor)
+ continue
+ if pen is not None:
+ _buildOutlineContourFormat1(pen, element, validate)
+ elif element.tag == "component":
+ if pen is not None:
+ _buildOutlineComponentFormat1(pen, element, validate)
+ else:
+ raise GlifLibError("Unknown element in outline element: %s" % element)
+ if glyphObject is not None and anchors:
+ if validate and not anchorsValidator(anchors):
+ raise GlifLibError("GLIF 1 anchors are not properly formatted.")
+ _relaxedSetattr(glyphObject, "anchors", anchors)
+
def _buildAnchorFormat1(point, validate):
- if point.get("type") != "move":
- return None
- name = point.get("name")
- if name is None:
- return None
- x = point.get("x")
- y = point.get("y")
- if validate and x is None:
- raise GlifLibError("Required x attribute is missing in point element.")
- if validate and y is None:
- raise GlifLibError("Required y attribute is missing in point element.")
- x = _number(x)
- y = _number(y)
- anchor = dict(x=x, y=y, name=name)
- return anchor
+ if point.get("type") != "move":
+ return None
+ name = point.get("name")
+ if name is None:
+ return None
+ x = point.get("x")
+ y = point.get("y")
+ if validate and x is None:
+ raise GlifLibError("Required x attribute is missing in point element.")
+ if validate and y is None:
+ raise GlifLibError("Required y attribute is missing in point element.")
+ x = _number(x)
+ y = _number(y)
+ anchor = dict(x=x, y=y, name=name)
+ return anchor
+
def _buildOutlineContourFormat1(pen, contour, validate):
- if validate and contour.attrib:
- raise GlifLibError("Unknown attributes in contour element.")
- pen.beginPath()
- if len(contour):
- massaged = _validateAndMassagePointStructures(contour, pointAttributesFormat1, openContourOffCurveLeniency=True, validate=validate)
- _buildOutlinePointsFormat1(pen, massaged)
- pen.endPath()
+ if validate and contour.attrib:
+ raise GlifLibError("Unknown attributes in contour element.")
+ pen.beginPath()
+ if len(contour):
+ massaged = _validateAndMassagePointStructures(
+ contour,
+ pointAttributesFormat1,
+ openContourOffCurveLeniency=True,
+ validate=validate,
+ )
+ _buildOutlinePointsFormat1(pen, massaged)
+ pen.endPath()
+
def _buildOutlinePointsFormat1(pen, contour):
- for point in contour:
- x = point["x"]
- y = point["y"]
- segmentType = point["segmentType"]
- smooth = point["smooth"]
- name = point["name"]
- pen.addPoint((x, y), segmentType=segmentType, smooth=smooth, name=name)
+ for point in contour:
+ x = point["x"]
+ y = point["y"]
+ segmentType = point["segmentType"]
+ smooth = point["smooth"]
+ name = point["name"]
+ pen.addPoint((x, y), segmentType=segmentType, smooth=smooth, name=name)
+
def _buildOutlineComponentFormat1(pen, component, validate):
- if validate:
- if len(component):
- raise GlifLibError("Unknown child elements of component element.")
- for attr in component.attrib.keys():
- if attr not in componentAttributesFormat1:
- raise GlifLibError("Unknown attribute in component element: %s" % attr)
- baseGlyphName = component.get("base")
- if validate and baseGlyphName is None:
- raise GlifLibError("The base attribute is not defined in the component.")
- transformation = []
- for attr, default in _transformationInfo:
- value = component.get(attr)
- if value is None:
- value = default
- else:
- value = _number(value)
- transformation.append(value)
- pen.addComponent(baseGlyphName, tuple(transformation))
+ if validate:
+ if len(component):
+ raise GlifLibError("Unknown child elements of component element.")
+ for attr in component.attrib.keys():
+ if attr not in componentAttributesFormat1:
+ raise GlifLibError("Unknown attribute in component element: %s" % attr)
+ baseGlyphName = component.get("base")
+ if validate and baseGlyphName is None:
+ raise GlifLibError("The base attribute is not defined in the component.")
+ transformation = []
+ for attr, default in _transformationInfo:
+ value = component.get(attr)
+ if value is None:
+ value = default
+ else:
+ value = _number(value)
+ transformation.append(value)
+ pen.addComponent(baseGlyphName, tuple(transformation))
+
# format 2
+
def buildOutlineFormat2(glyphObject, pen, outline, identifiers, validate):
- for element in outline:
- if element.tag == "contour":
- _buildOutlineContourFormat2(pen, element, identifiers, validate)
- elif element.tag == "component":
- _buildOutlineComponentFormat2(pen, element, identifiers, validate)
- else:
- raise GlifLibError("Unknown element in outline element: %s" % element.tag)
+ for element in outline:
+ if element.tag == "contour":
+ _buildOutlineContourFormat2(pen, element, identifiers, validate)
+ elif element.tag == "component":
+ _buildOutlineComponentFormat2(pen, element, identifiers, validate)
+ else:
+ raise GlifLibError("Unknown element in outline element: %s" % element.tag)
+
def _buildOutlineContourFormat2(pen, contour, identifiers, validate):
- if validate:
- for attr in contour.attrib.keys():
- if attr not in contourAttributesFormat2:
- raise GlifLibError("Unknown attribute in contour element: %s" % attr)
- identifier = contour.get("identifier")
- if identifier is not None:
- if validate:
- if identifier in identifiers:
- raise GlifLibError("The identifier %s is used more than once." % identifier)
- if not identifierValidator(identifier):
- raise GlifLibError("The contour identifier %s is not valid." % identifier)
- identifiers.add(identifier)
- try:
- pen.beginPath(identifier=identifier)
- except TypeError:
- pen.beginPath()
- warn("The beginPath method needs an identifier kwarg. The contour's identifier value has been discarded.", DeprecationWarning)
- if len(contour):
- massaged = _validateAndMassagePointStructures(contour, pointAttributesFormat2, validate=validate)
- _buildOutlinePointsFormat2(pen, massaged, identifiers, validate)
- pen.endPath()
+ if validate:
+ for attr in contour.attrib.keys():
+ if attr not in contourAttributesFormat2:
+ raise GlifLibError("Unknown attribute in contour element: %s" % attr)
+ identifier = contour.get("identifier")
+ if identifier is not None:
+ if validate:
+ if identifier in identifiers:
+ raise GlifLibError(
+ "The identifier %s is used more than once." % identifier
+ )
+ if not identifierValidator(identifier):
+ raise GlifLibError(
+ "The contour identifier %s is not valid." % identifier
+ )
+ identifiers.add(identifier)
+ try:
+ pen.beginPath(identifier=identifier)
+ except TypeError:
+ pen.beginPath()
+ warn(
+ "The beginPath method needs an identifier kwarg. The contour's identifier value has been discarded.",
+ DeprecationWarning,
+ )
+ if len(contour):
+ massaged = _validateAndMassagePointStructures(
+ contour, pointAttributesFormat2, validate=validate
+ )
+ _buildOutlinePointsFormat2(pen, massaged, identifiers, validate)
+ pen.endPath()
+
def _buildOutlinePointsFormat2(pen, contour, identifiers, validate):
- for point in contour:
- x = point["x"]
- y = point["y"]
- segmentType = point["segmentType"]
- smooth = point["smooth"]
- name = point["name"]
- identifier = point.get("identifier")
- if identifier is not None:
- if validate:
- if identifier in identifiers:
- raise GlifLibError("The identifier %s is used more than once." % identifier)
- if not identifierValidator(identifier):
- raise GlifLibError("The identifier %s is not valid." % identifier)
- identifiers.add(identifier)
- try:
- pen.addPoint((x, y), segmentType=segmentType, smooth=smooth, name=name, identifier=identifier)
- except TypeError:
- pen.addPoint((x, y), segmentType=segmentType, smooth=smooth, name=name)
- warn("The addPoint method needs an identifier kwarg. The point's identifier value has been discarded.", DeprecationWarning)
+ for point in contour:
+ x = point["x"]
+ y = point["y"]
+ segmentType = point["segmentType"]
+ smooth = point["smooth"]
+ name = point["name"]
+ identifier = point.get("identifier")
+ if identifier is not None:
+ if validate:
+ if identifier in identifiers:
+ raise GlifLibError(
+ "The identifier %s is used more than once." % identifier
+ )
+ if not identifierValidator(identifier):
+ raise GlifLibError("The identifier %s is not valid." % identifier)
+ identifiers.add(identifier)
+ try:
+ pen.addPoint(
+ (x, y),
+ segmentType=segmentType,
+ smooth=smooth,
+ name=name,
+ identifier=identifier,
+ )
+ except TypeError:
+ pen.addPoint((x, y), segmentType=segmentType, smooth=smooth, name=name)
+ warn(
+ "The addPoint method needs an identifier kwarg. The point's identifier value has been discarded.",
+ DeprecationWarning,
+ )
+
def _buildOutlineComponentFormat2(pen, component, identifiers, validate):
- if validate:
- if len(component):
- raise GlifLibError("Unknown child elements of component element.")
- for attr in component.attrib.keys():
- if attr not in componentAttributesFormat2:
- raise GlifLibError("Unknown attribute in component element: %s" % attr)
- baseGlyphName = component.get("base")
- if validate and baseGlyphName is None:
- raise GlifLibError("The base attribute is not defined in the component.")
- transformation = []
- for attr, default in _transformationInfo:
- value = component.get(attr)
- if value is None:
- value = default
- else:
- value = _number(value)
- transformation.append(value)
- identifier = component.get("identifier")
- if identifier is not None:
- if validate:
- if identifier in identifiers:
- raise GlifLibError("The identifier %s is used more than once." % identifier)
- if validate and not identifierValidator(identifier):
- raise GlifLibError("The identifier %s is not valid." % identifier)
- identifiers.add(identifier)
- try:
- pen.addComponent(baseGlyphName, tuple(transformation), identifier=identifier)
- except TypeError:
- pen.addComponent(baseGlyphName, tuple(transformation))
- warn("The addComponent method needs an identifier kwarg. The component's identifier value has been discarded.", DeprecationWarning)
+ if validate:
+ if len(component):
+ raise GlifLibError("Unknown child elements of component element.")
+ for attr in component.attrib.keys():
+ if attr not in componentAttributesFormat2:
+ raise GlifLibError("Unknown attribute in component element: %s" % attr)
+ baseGlyphName = component.get("base")
+ if validate and baseGlyphName is None:
+ raise GlifLibError("The base attribute is not defined in the component.")
+ transformation = []
+ for attr, default in _transformationInfo:
+ value = component.get(attr)
+ if value is None:
+ value = default
+ else:
+ value = _number(value)
+ transformation.append(value)
+ identifier = component.get("identifier")
+ if identifier is not None:
+ if validate:
+ if identifier in identifiers:
+ raise GlifLibError(
+ "The identifier %s is used more than once." % identifier
+ )
+ if validate and not identifierValidator(identifier):
+ raise GlifLibError("The identifier %s is not valid." % identifier)
+ identifiers.add(identifier)
+ try:
+ pen.addComponent(baseGlyphName, tuple(transformation), identifier=identifier)
+ except TypeError:
+ pen.addComponent(baseGlyphName, tuple(transformation))
+ warn(
+ "The addComponent method needs an identifier kwarg. The component's identifier value has been discarded.",
+ DeprecationWarning,
+ )
+
# all formats
-def _validateAndMassagePointStructures(contour, pointAttributes, openContourOffCurveLeniency=False, validate=True):
- if not len(contour):
- return
- # store some data for later validation
- lastOnCurvePoint = None
- haveOffCurvePoint = False
- # validate and massage the individual point elements
- massaged = []
- for index, element in enumerate(contour):
- # not <point>
- if element.tag != "point":
- raise GlifLibError("Unknown child element (%s) of contour element." % element.tag)
- point = dict(element.attrib)
- massaged.append(point)
- if validate:
- # unknown attributes
- for attr in point.keys():
- if attr not in pointAttributes:
- raise GlifLibError("Unknown attribute in point element: %s" % attr)
- # search for unknown children
- if len(element):
- raise GlifLibError("Unknown child elements in point element.")
- # x and y are required
- for attr in ("x", "y"):
- try:
- point[attr] = _number(point[attr])
- except KeyError as e:
- raise GlifLibError(f"Required {attr} attribute is missing in point element.") from e
- # segment type
- pointType = point.pop("type", "offcurve")
- if validate and pointType not in pointTypeOptions:
- raise GlifLibError("Unknown point type: %s" % pointType)
- if pointType == "offcurve":
- pointType = None
- point["segmentType"] = pointType
- if pointType is None:
- haveOffCurvePoint = True
- else:
- lastOnCurvePoint = index
- # move can only occur as the first point
- if validate and pointType == "move" and index != 0:
- raise GlifLibError("A move point occurs after the first point in the contour.")
- # smooth is optional
- smooth = point.get("smooth", "no")
- if validate and smooth is not None:
- if smooth not in pointSmoothOptions:
- raise GlifLibError("Unknown point smooth value: %s" % smooth)
- smooth = smooth == "yes"
- point["smooth"] = smooth
- # smooth can only be applied to curve and qcurve
- if validate and smooth and pointType is None:
- raise GlifLibError("smooth attribute set in an offcurve point.")
- # name is optional
- if "name" not in element.attrib:
- point["name"] = None
- if openContourOffCurveLeniency:
- # remove offcurves that precede a move. this is technically illegal,
- # but we let it slide because there are fonts out there in the wild like this.
- if massaged[0]["segmentType"] == "move":
- count = 0
- for point in reversed(massaged):
- if point["segmentType"] is None:
- count += 1
- else:
- break
- if count:
- massaged = massaged[:-count]
- # validate the off-curves in the segments
- if validate and haveOffCurvePoint and lastOnCurvePoint is not None:
- # we only care about how many offCurves there are before an onCurve
- # filter out the trailing offCurves
- offCurvesCount = len(massaged) - 1 - lastOnCurvePoint
- for point in massaged:
- segmentType = point["segmentType"]
- if segmentType is None:
- offCurvesCount += 1
- else:
- if offCurvesCount:
- # move and line can't be preceded by off-curves
- if segmentType == "move":
- # this will have been filtered out already
- raise GlifLibError("move can not have an offcurve.")
- elif segmentType == "line":
- raise GlifLibError("line can not have an offcurve.")
- elif segmentType == "curve":
- if offCurvesCount > 2:
- raise GlifLibError("Too many offcurves defined for curve.")
- elif segmentType == "qcurve":
- pass
- else:
- # unknown segment type. it'll be caught later.
- pass
- offCurvesCount = 0
- return massaged
+
+def _validateAndMassagePointStructures(
+ contour, pointAttributes, openContourOffCurveLeniency=False, validate=True
+):
+ if not len(contour):
+ return
+ # store some data for later validation
+ lastOnCurvePoint = None
+ haveOffCurvePoint = False
+ # validate and massage the individual point elements
+ massaged = []
+ for index, element in enumerate(contour):
+ # not <point>
+ if element.tag != "point":
+ raise GlifLibError(
+ "Unknown child element (%s) of contour element." % element.tag
+ )
+ point = dict(element.attrib)
+ massaged.append(point)
+ if validate:
+ # unknown attributes
+ for attr in point.keys():
+ if attr not in pointAttributes:
+ raise GlifLibError("Unknown attribute in point element: %s" % attr)
+ # search for unknown children
+ if len(element):
+ raise GlifLibError("Unknown child elements in point element.")
+ # x and y are required
+ for attr in ("x", "y"):
+ try:
+ point[attr] = _number(point[attr])
+ except KeyError as e:
+ raise GlifLibError(
+ f"Required {attr} attribute is missing in point element."
+ ) from e
+ # segment type
+ pointType = point.pop("type", "offcurve")
+ if validate and pointType not in pointTypeOptions:
+ raise GlifLibError("Unknown point type: %s" % pointType)
+ if pointType == "offcurve":
+ pointType = None
+ point["segmentType"] = pointType
+ if pointType is None:
+ haveOffCurvePoint = True
+ else:
+ lastOnCurvePoint = index
+ # move can only occur as the first point
+ if validate and pointType == "move" and index != 0:
+ raise GlifLibError(
+ "A move point occurs after the first point in the contour."
+ )
+ # smooth is optional
+ smooth = point.get("smooth", "no")
+ if validate and smooth is not None:
+ if smooth not in pointSmoothOptions:
+ raise GlifLibError("Unknown point smooth value: %s" % smooth)
+ smooth = smooth == "yes"
+ point["smooth"] = smooth
+ # smooth can only be applied to curve and qcurve
+ if validate and smooth and pointType is None:
+ raise GlifLibError("smooth attribute set in an offcurve point.")
+ # name is optional
+ if "name" not in element.attrib:
+ point["name"] = None
+ if openContourOffCurveLeniency:
+ # remove offcurves that precede a move. this is technically illegal,
+ # but we let it slide because there are fonts out there in the wild like this.
+ if massaged[0]["segmentType"] == "move":
+ count = 0
+ for point in reversed(massaged):
+ if point["segmentType"] is None:
+ count += 1
+ else:
+ break
+ if count:
+ massaged = massaged[:-count]
+ # validate the off-curves in the segments
+ if validate and haveOffCurvePoint and lastOnCurvePoint is not None:
+ # we only care about how many offCurves there are before an onCurve
+ # filter out the trailing offCurves
+ offCurvesCount = len(massaged) - 1 - lastOnCurvePoint
+ for point in massaged:
+ segmentType = point["segmentType"]
+ if segmentType is None:
+ offCurvesCount += 1
+ else:
+ if offCurvesCount:
+ # move and line can't be preceded by off-curves
+ if segmentType == "move":
+ # this will have been filtered out already
+ raise GlifLibError("move can not have an offcurve.")
+ elif segmentType == "line":
+ raise GlifLibError("line can not have an offcurve.")
+ elif segmentType == "curve":
+ if offCurvesCount > 2:
+ raise GlifLibError("Too many offcurves defined for curve.")
+ elif segmentType == "qcurve":
+ pass
+ else:
+ # unknown segment type. it'll be caught later.
+ pass
+ offCurvesCount = 0
+ return massaged
+
# ---------------------
# Misc Helper Functions
# ---------------------
+
def _relaxedSetattr(object, attr, value):
- try:
- setattr(object, attr, value)
- except AttributeError:
- pass
+ try:
+ setattr(object, attr, value)
+ except AttributeError:
+ pass
+
def _number(s):
- """
- Given a numeric string, return an integer or a float, whichever
- the string indicates. _number("1") will return the integer 1,
- _number("1.0") will return the float 1.0.
-
- >>> _number("1")
- 1
- >>> _number("1.0")
- 1.0
- >>> _number("a") # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- GlifLibError: Could not convert a to an int or float.
- """
- try:
- n = int(s)
- return n
- except ValueError:
- pass
- try:
- n = float(s)
- return n
- except ValueError:
- raise GlifLibError("Could not convert %s to an int or float." % s)
+ """
+ Given a numeric string, return an integer or a float, whichever
+ the string indicates. _number("1") will return the integer 1,
+ _number("1.0") will return the float 1.0.
+
+ >>> _number("1")
+ 1
+ >>> _number("1.0")
+ 1.0
+ >>> _number("a") # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ GlifLibError: Could not convert a to an int or float.
+ """
+ try:
+ n = int(s)
+ return n
+ except ValueError:
+ pass
+ try:
+ n = float(s)
+ return n
+ except ValueError:
+ raise GlifLibError("Could not convert %s to an int or float." % s)
+
# --------------------
# Rapid Value Fetching
@@ -1581,234 +1744,274 @@ def _number(s):
# base
-class _DoneParsing(Exception): pass
+
+class _DoneParsing(Exception):
+ pass
+
class _BaseParser:
+ def __init__(self):
+ self._elementStack = []
- def __init__(self):
- self._elementStack = []
+ def parse(self, text):
+ from xml.parsers.expat import ParserCreate
- def parse(self, text):
- from xml.parsers.expat import ParserCreate
- parser = ParserCreate()
- parser.StartElementHandler = self.startElementHandler
- parser.EndElementHandler = self.endElementHandler
- parser.Parse(text)
+ parser = ParserCreate()
+ parser.StartElementHandler = self.startElementHandler
+ parser.EndElementHandler = self.endElementHandler
+ parser.Parse(text)
- def startElementHandler(self, name, attrs):
- self._elementStack.append(name)
+ def startElementHandler(self, name, attrs):
+ self._elementStack.append(name)
- def endElementHandler(self, name):
- other = self._elementStack.pop(-1)
- assert other == name
+ def endElementHandler(self, name):
+ other = self._elementStack.pop(-1)
+ assert other == name
# unicodes
+
def _fetchUnicodes(glif):
- """
- Get a list of unicodes listed in glif.
- """
- parser = _FetchUnicodesParser()
- parser.parse(glif)
- return parser.unicodes
+ """
+ Get a list of unicodes listed in glif.
+ """
+ parser = _FetchUnicodesParser()
+ parser.parse(glif)
+ return parser.unicodes
+
class _FetchUnicodesParser(_BaseParser):
+ def __init__(self):
+ self.unicodes = []
+ super().__init__()
+
+ def startElementHandler(self, name, attrs):
+ if (
+ name == "unicode"
+ and self._elementStack
+ and self._elementStack[-1] == "glyph"
+ ):
+ value = attrs.get("hex")
+ if value is not None:
+ try:
+ value = int(value, 16)
+ if value not in self.unicodes:
+ self.unicodes.append(value)
+ except ValueError:
+ pass
+ super().startElementHandler(name, attrs)
- def __init__(self):
- self.unicodes = []
- super().__init__()
-
- def startElementHandler(self, name, attrs):
- if name == "unicode" and self._elementStack and self._elementStack[-1] == "glyph":
- value = attrs.get("hex")
- if value is not None:
- try:
- value = int(value, 16)
- if value not in self.unicodes:
- self.unicodes.append(value)
- except ValueError:
- pass
- super().startElementHandler(name, attrs)
# image
+
def _fetchImageFileName(glif):
- """
- The image file name (if any) from glif.
- """
- parser = _FetchImageFileNameParser()
- try:
- parser.parse(glif)
- except _DoneParsing:
- pass
- return parser.fileName
+ """
+ The image file name (if any) from glif.
+ """
+ parser = _FetchImageFileNameParser()
+ try:
+ parser.parse(glif)
+ except _DoneParsing:
+ pass
+ return parser.fileName
+
class _FetchImageFileNameParser(_BaseParser):
+ def __init__(self):
+ self.fileName = None
+ super().__init__()
- def __init__(self):
- self.fileName = None
- super().__init__()
+ def startElementHandler(self, name, attrs):
+ if name == "image" and self._elementStack and self._elementStack[-1] == "glyph":
+ self.fileName = attrs.get("fileName")
+ raise _DoneParsing
+ super().startElementHandler(name, attrs)
- def startElementHandler(self, name, attrs):
- if name == "image" and self._elementStack and self._elementStack[-1] == "glyph":
- self.fileName = attrs.get("fileName")
- raise _DoneParsing
- super().startElementHandler(name, attrs)
# component references
-def _fetchComponentBases(glif):
- """
- Get a list of component base glyphs listed in glif.
- """
- parser = _FetchComponentBasesParser()
- try:
- parser.parse(glif)
- except _DoneParsing:
- pass
- return list(parser.bases)
-class _FetchComponentBasesParser(_BaseParser):
+def _fetchComponentBases(glif):
+ """
+ Get a list of component base glyphs listed in glif.
+ """
+ parser = _FetchComponentBasesParser()
+ try:
+ parser.parse(glif)
+ except _DoneParsing:
+ pass
+ return list(parser.bases)
- def __init__(self):
- self.bases = []
- super().__init__()
- def startElementHandler(self, name, attrs):
- if name == "component" and self._elementStack and self._elementStack[-1] == "outline":
- base = attrs.get("base")
- if base is not None:
- self.bases.append(base)
- super().startElementHandler(name, attrs)
+class _FetchComponentBasesParser(_BaseParser):
+ def __init__(self):
+ self.bases = []
+ super().__init__()
+
+ def startElementHandler(self, name, attrs):
+ if (
+ name == "component"
+ and self._elementStack
+ and self._elementStack[-1] == "outline"
+ ):
+ base = attrs.get("base")
+ if base is not None:
+ self.bases.append(base)
+ super().startElementHandler(name, attrs)
+
+ def endElementHandler(self, name):
+ if name == "outline":
+ raise _DoneParsing
+ super().endElementHandler(name)
- def endElementHandler(self, name):
- if name == "outline":
- raise _DoneParsing
- super().endElementHandler(name)
# --------------
# GLIF Point Pen
# --------------
_transformationInfo = [
- # field name, default value
- ("xScale", 1),
- ("xyScale", 0),
- ("yxScale", 0),
- ("yScale", 1),
- ("xOffset", 0),
- ("yOffset", 0),
+ # field name, default value
+ ("xScale", 1),
+ ("xyScale", 0),
+ ("yxScale", 0),
+ ("yScale", 1),
+ ("xOffset", 0),
+ ("yOffset", 0),
]
+
class GLIFPointPen(AbstractPointPen):
- """
- Helper class using the PointPen protocol to write the <outline>
- part of .glif files.
- """
-
- def __init__(self, element, formatVersion=None, identifiers=None, validate=True):
- if identifiers is None:
- identifiers = set()
- self.formatVersion = GLIFFormatVersion(formatVersion)
- self.identifiers = identifiers
- self.outline = element
- self.contour = None
- self.prevOffCurveCount = 0
- self.prevPointTypes = []
- self.validate = validate
-
- def beginPath(self, identifier=None, **kwargs):
- attrs = OrderedDict()
- if identifier is not None and self.formatVersion.major >= 2:
- if self.validate:
- if identifier in self.identifiers:
- raise GlifLibError("identifier used more than once: %s" % identifier)
- if not identifierValidator(identifier):
- raise GlifLibError("identifier not formatted properly: %s" % identifier)
- attrs["identifier"] = identifier
- self.identifiers.add(identifier)
- self.contour = etree.SubElement(self.outline, "contour", attrs)
- self.prevOffCurveCount = 0
-
- def endPath(self):
- if self.prevPointTypes and self.prevPointTypes[0] == "move":
- if self.validate and self.prevPointTypes[-1] == "offcurve":
- raise GlifLibError("open contour has loose offcurve point")
- # prevent lxml from writing self-closing tags
- if not len(self.contour):
- self.contour.text = "\n "
- self.contour = None
- self.prevPointType = None
- self.prevOffCurveCount = 0
- self.prevPointTypes = []
-
- def addPoint(self, pt, segmentType=None, smooth=None, name=None, identifier=None, **kwargs):
- attrs = OrderedDict()
- # coordinates
- if pt is not None:
- if self.validate:
- for coord in pt:
- if not isinstance(coord, numberTypes):
- raise GlifLibError("coordinates must be int or float")
- attrs["x"] = repr(pt[0])
- attrs["y"] = repr(pt[1])
- # segment type
- if segmentType == "offcurve":
- segmentType = None
- if self.validate:
- if segmentType == "move" and self.prevPointTypes:
- raise GlifLibError("move occurs after a point has already been added to the contour.")
- if segmentType in ("move", "line") and self.prevPointTypes and self.prevPointTypes[-1] == "offcurve":
- raise GlifLibError("offcurve occurs before %s point." % segmentType)
- if segmentType == "curve" and self.prevOffCurveCount > 2:
- raise GlifLibError("too many offcurve points before curve point.")
- if segmentType is not None:
- attrs["type"] = segmentType
- else:
- segmentType = "offcurve"
- if segmentType == "offcurve":
- self.prevOffCurveCount += 1
- else:
- self.prevOffCurveCount = 0
- self.prevPointTypes.append(segmentType)
- # smooth
- if smooth:
- if self.validate and segmentType == "offcurve":
- raise GlifLibError("can't set smooth in an offcurve point.")
- attrs["smooth"] = "yes"
- # name
- if name is not None:
- attrs["name"] = name
- # identifier
- if identifier is not None and self.formatVersion.major >= 2:
- if self.validate:
- if identifier in self.identifiers:
- raise GlifLibError("identifier used more than once: %s" % identifier)
- if not identifierValidator(identifier):
- raise GlifLibError("identifier not formatted properly: %s" % identifier)
- attrs["identifier"] = identifier
- self.identifiers.add(identifier)
- etree.SubElement(self.contour, "point", attrs)
-
- def addComponent(self, glyphName, transformation, identifier=None, **kwargs):
- attrs = OrderedDict([("base", glyphName)])
- for (attr, default), value in zip(_transformationInfo, transformation):
- if self.validate and not isinstance(value, numberTypes):
- raise GlifLibError("transformation values must be int or float")
- if value != default:
- attrs[attr] = repr(value)
- if identifier is not None and self.formatVersion.major >= 2:
- if self.validate:
- if identifier in self.identifiers:
- raise GlifLibError("identifier used more than once: %s" % identifier)
- if self.validate and not identifierValidator(identifier):
- raise GlifLibError("identifier not formatted properly: %s" % identifier)
- attrs["identifier"] = identifier
- self.identifiers.add(identifier)
- etree.SubElement(self.outline, "component", attrs)
+ """
+ Helper class using the PointPen protocol to write the <outline>
+ part of .glif files.
+ """
+
+ def __init__(self, element, formatVersion=None, identifiers=None, validate=True):
+ if identifiers is None:
+ identifiers = set()
+ self.formatVersion = GLIFFormatVersion(formatVersion)
+ self.identifiers = identifiers
+ self.outline = element
+ self.contour = None
+ self.prevOffCurveCount = 0
+ self.prevPointTypes = []
+ self.validate = validate
+
+ def beginPath(self, identifier=None, **kwargs):
+ attrs = OrderedDict()
+ if identifier is not None and self.formatVersion.major >= 2:
+ if self.validate:
+ if identifier in self.identifiers:
+ raise GlifLibError(
+ "identifier used more than once: %s" % identifier
+ )
+ if not identifierValidator(identifier):
+ raise GlifLibError(
+ "identifier not formatted properly: %s" % identifier
+ )
+ attrs["identifier"] = identifier
+ self.identifiers.add(identifier)
+ self.contour = etree.SubElement(self.outline, "contour", attrs)
+ self.prevOffCurveCount = 0
+
+ def endPath(self):
+ if self.prevPointTypes and self.prevPointTypes[0] == "move":
+ if self.validate and self.prevPointTypes[-1] == "offcurve":
+ raise GlifLibError("open contour has loose offcurve point")
+ # prevent lxml from writing self-closing tags
+ if not len(self.contour):
+ self.contour.text = "\n "
+ self.contour = None
+ self.prevPointType = None
+ self.prevOffCurveCount = 0
+ self.prevPointTypes = []
+
+ def addPoint(
+ self, pt, segmentType=None, smooth=None, name=None, identifier=None, **kwargs
+ ):
+ attrs = OrderedDict()
+ # coordinates
+ if pt is not None:
+ if self.validate:
+ for coord in pt:
+ if not isinstance(coord, numberTypes):
+ raise GlifLibError("coordinates must be int or float")
+ attrs["x"] = repr(pt[0])
+ attrs["y"] = repr(pt[1])
+ # segment type
+ if segmentType == "offcurve":
+ segmentType = None
+ if self.validate:
+ if segmentType == "move" and self.prevPointTypes:
+ raise GlifLibError(
+ "move occurs after a point has already been added to the contour."
+ )
+ if (
+ segmentType in ("move", "line")
+ and self.prevPointTypes
+ and self.prevPointTypes[-1] == "offcurve"
+ ):
+ raise GlifLibError("offcurve occurs before %s point." % segmentType)
+ if segmentType == "curve" and self.prevOffCurveCount > 2:
+ raise GlifLibError("too many offcurve points before curve point.")
+ if segmentType is not None:
+ attrs["type"] = segmentType
+ else:
+ segmentType = "offcurve"
+ if segmentType == "offcurve":
+ self.prevOffCurveCount += 1
+ else:
+ self.prevOffCurveCount = 0
+ self.prevPointTypes.append(segmentType)
+ # smooth
+ if smooth:
+ if self.validate and segmentType == "offcurve":
+ raise GlifLibError("can't set smooth in an offcurve point.")
+ attrs["smooth"] = "yes"
+ # name
+ if name is not None:
+ attrs["name"] = name
+ # identifier
+ if identifier is not None and self.formatVersion.major >= 2:
+ if self.validate:
+ if identifier in self.identifiers:
+ raise GlifLibError(
+ "identifier used more than once: %s" % identifier
+ )
+ if not identifierValidator(identifier):
+ raise GlifLibError(
+ "identifier not formatted properly: %s" % identifier
+ )
+ attrs["identifier"] = identifier
+ self.identifiers.add(identifier)
+ etree.SubElement(self.contour, "point", attrs)
+
+ def addComponent(self, glyphName, transformation, identifier=None, **kwargs):
+ attrs = OrderedDict([("base", glyphName)])
+ for (attr, default), value in zip(_transformationInfo, transformation):
+ if self.validate and not isinstance(value, numberTypes):
+ raise GlifLibError("transformation values must be int or float")
+ if value != default:
+ attrs[attr] = repr(value)
+ if identifier is not None and self.formatVersion.major >= 2:
+ if self.validate:
+ if identifier in self.identifiers:
+ raise GlifLibError(
+ "identifier used more than once: %s" % identifier
+ )
+ if self.validate and not identifierValidator(identifier):
+ raise GlifLibError(
+ "identifier not formatted properly: %s" % identifier
+ )
+ attrs["identifier"] = identifier
+ self.identifiers.add(identifier)
+ etree.SubElement(self.outline, "component", attrs)
+
if __name__ == "__main__":
- import doctest
- doctest.testmod()
+ import doctest
+
+ doctest.testmod()
diff --git a/Lib/fontTools/ufoLib/kerning.py b/Lib/fontTools/ufoLib/kerning.py
index 947222a4..8a1dca5b 100644
--- a/Lib/fontTools/ufoLib/kerning.py
+++ b/Lib/fontTools/ufoLib/kerning.py
@@ -1,89 +1,91 @@
+def lookupKerningValue(
+ pair, kerning, groups, fallback=0, glyphToFirstGroup=None, glyphToSecondGroup=None
+):
+ """
+ Note: This expects kerning to be a flat dictionary
+ of kerning pairs, not the nested structure used
+ in kerning.plist.
+ >>> groups = {
+ ... "public.kern1.O" : ["O", "D", "Q"],
+ ... "public.kern2.E" : ["E", "F"]
+ ... }
+ >>> kerning = {
+ ... ("public.kern1.O", "public.kern2.E") : -100,
+ ... ("public.kern1.O", "F") : -200,
+ ... ("D", "F") : -300
+ ... }
+ >>> lookupKerningValue(("D", "F"), kerning, groups)
+ -300
+ >>> lookupKerningValue(("O", "F"), kerning, groups)
+ -200
+ >>> lookupKerningValue(("O", "E"), kerning, groups)
+ -100
+ >>> lookupKerningValue(("O", "O"), kerning, groups)
+ 0
+ >>> lookupKerningValue(("E", "E"), kerning, groups)
+ 0
+ >>> lookupKerningValue(("E", "O"), kerning, groups)
+ 0
+ >>> lookupKerningValue(("X", "X"), kerning, groups)
+ 0
+ >>> lookupKerningValue(("public.kern1.O", "public.kern2.E"),
+ ... kerning, groups)
+ -100
+ >>> lookupKerningValue(("public.kern1.O", "F"), kerning, groups)
+ -200
+ >>> lookupKerningValue(("O", "public.kern2.E"), kerning, groups)
+ -100
+ >>> lookupKerningValue(("public.kern1.X", "public.kern2.X"), kerning, groups)
+ 0
+ """
+ # quickly check to see if the pair is in the kerning dictionary
+ if pair in kerning:
+ return kerning[pair]
+ # create glyph to group mapping
+ if glyphToFirstGroup is not None:
+ assert glyphToSecondGroup is not None
+ if glyphToSecondGroup is not None:
+ assert glyphToFirstGroup is not None
+ if glyphToFirstGroup is None:
+ glyphToFirstGroup = {}
+ glyphToSecondGroup = {}
+ for group, groupMembers in groups.items():
+ if group.startswith("public.kern1."):
+ for glyph in groupMembers:
+ glyphToFirstGroup[glyph] = group
+ elif group.startswith("public.kern2."):
+ for glyph in groupMembers:
+ glyphToSecondGroup[glyph] = group
+ # get group names and make sure first and second are glyph names
+ first, second = pair
+ firstGroup = secondGroup = None
+ if first.startswith("public.kern1."):
+ firstGroup = first
+ first = None
+ else:
+ firstGroup = glyphToFirstGroup.get(first)
+ if second.startswith("public.kern2."):
+ secondGroup = second
+ second = None
+ else:
+ secondGroup = glyphToSecondGroup.get(second)
+ # make an ordered list of pairs to look up
+ pairs = [
+ (first, second),
+ (first, secondGroup),
+ (firstGroup, second),
+ (firstGroup, secondGroup),
+ ]
+ # look up the pairs and return any matches
+ for pair in pairs:
+ if pair in kerning:
+ return kerning[pair]
+ # use the fallback value
+ return fallback
-def lookupKerningValue(pair, kerning, groups, fallback=0, glyphToFirstGroup=None, glyphToSecondGroup=None):
- """
- Note: This expects kerning to be a flat dictionary
- of kerning pairs, not the nested structure used
- in kerning.plist.
-
- >>> groups = {
- ... "public.kern1.O" : ["O", "D", "Q"],
- ... "public.kern2.E" : ["E", "F"]
- ... }
- >>> kerning = {
- ... ("public.kern1.O", "public.kern2.E") : -100,
- ... ("public.kern1.O", "F") : -200,
- ... ("D", "F") : -300
- ... }
- >>> lookupKerningValue(("D", "F"), kerning, groups)
- -300
- >>> lookupKerningValue(("O", "F"), kerning, groups)
- -200
- >>> lookupKerningValue(("O", "E"), kerning, groups)
- -100
- >>> lookupKerningValue(("O", "O"), kerning, groups)
- 0
- >>> lookupKerningValue(("E", "E"), kerning, groups)
- 0
- >>> lookupKerningValue(("E", "O"), kerning, groups)
- 0
- >>> lookupKerningValue(("X", "X"), kerning, groups)
- 0
- >>> lookupKerningValue(("public.kern1.O", "public.kern2.E"),
- ... kerning, groups)
- -100
- >>> lookupKerningValue(("public.kern1.O", "F"), kerning, groups)
- -200
- >>> lookupKerningValue(("O", "public.kern2.E"), kerning, groups)
- -100
- >>> lookupKerningValue(("public.kern1.X", "public.kern2.X"), kerning, groups)
- 0
- """
- # quickly check to see if the pair is in the kerning dictionary
- if pair in kerning:
- return kerning[pair]
- # create glyph to group mapping
- if glyphToFirstGroup is not None:
- assert glyphToSecondGroup is not None
- if glyphToSecondGroup is not None:
- assert glyphToFirstGroup is not None
- if glyphToFirstGroup is None:
- glyphToFirstGroup = {}
- glyphToSecondGroup = {}
- for group, groupMembers in groups.items():
- if group.startswith("public.kern1."):
- for glyph in groupMembers:
- glyphToFirstGroup[glyph] = group
- elif group.startswith("public.kern2."):
- for glyph in groupMembers:
- glyphToSecondGroup[glyph] = group
- # get group names and make sure first and second are glyph names
- first, second = pair
- firstGroup = secondGroup = None
- if first.startswith("public.kern1."):
- firstGroup = first
- first = None
- else:
- firstGroup = glyphToFirstGroup.get(first)
- if second.startswith("public.kern2."):
- secondGroup = second
- second = None
- else:
- secondGroup = glyphToSecondGroup.get(second)
- # make an ordered list of pairs to look up
- pairs = [
- (first, second),
- (first, secondGroup),
- (firstGroup, second),
- (firstGroup, secondGroup)
- ]
- # look up the pairs and return any matches
- for pair in pairs:
- if pair in kerning:
- return kerning[pair]
- # use the fallback value
- return fallback
if __name__ == "__main__":
- import doctest
- doctest.testmod()
+ import doctest
+
+ doctest.testmod()
diff --git a/Lib/fontTools/ufoLib/validators.py b/Lib/fontTools/ufoLib/validators.py
index 49cb0e49..01e3124f 100644
--- a/Lib/fontTools/ufoLib/validators.py
+++ b/Lib/fontTools/ufoLib/validators.py
@@ -13,6 +13,7 @@ from fontTools.ufoLib.utils import numberTypes
# Generic
# -------
+
def isDictEnough(value):
"""
Some objects will likely come in that aren't
@@ -25,72 +26,78 @@ def isDictEnough(value):
return False
return True
+
def genericTypeValidator(value, typ):
- """
- Generic. (Added at version 2.)
- """
- return isinstance(value, typ)
+ """
+ Generic. (Added at version 2.)
+ """
+ return isinstance(value, typ)
+
def genericIntListValidator(values, validValues):
- """
- Generic. (Added at version 2.)
- """
- if not isinstance(values, (list, tuple)):
- return False
- valuesSet = set(values)
- validValuesSet = set(validValues)
- if valuesSet - validValuesSet:
- return False
- for value in values:
- if not isinstance(value, int):
- return False
- return True
+ """
+ Generic. (Added at version 2.)
+ """
+ if not isinstance(values, (list, tuple)):
+ return False
+ valuesSet = set(values)
+ validValuesSet = set(validValues)
+ if valuesSet - validValuesSet:
+ return False
+ for value in values:
+ if not isinstance(value, int):
+ return False
+ return True
+
def genericNonNegativeIntValidator(value):
- """
- Generic. (Added at version 3.)
- """
- if not isinstance(value, int):
- return False
- if value < 0:
- return False
- return True
+ """
+ Generic. (Added at version 3.)
+ """
+ if not isinstance(value, int):
+ return False
+ if value < 0:
+ return False
+ return True
+
def genericNonNegativeNumberValidator(value):
- """
- Generic. (Added at version 3.)
- """
- if not isinstance(value, numberTypes):
- return False
- if value < 0:
- return False
- return True
+ """
+ Generic. (Added at version 3.)
+ """
+ if not isinstance(value, numberTypes):
+ return False
+ if value < 0:
+ return False
+ return True
+
def genericDictValidator(value, prototype):
- """
- Generic. (Added at version 3.)
- """
- # not a dict
- if not isinstance(value, Mapping):
- return False
- # missing required keys
- for key, (typ, required) in prototype.items():
- if not required:
- continue
- if key not in value:
- return False
- # unknown keys
- for key in value.keys():
- if key not in prototype:
- return False
- # incorrect types
- for key, v in value.items():
- prototypeType, required = prototype[key]
- if v is None and not required:
- continue
- if not isinstance(v, prototypeType):
- return False
- return True
+ """
+ Generic. (Added at version 3.)
+ """
+ # not a dict
+ if not isinstance(value, Mapping):
+ return False
+ # missing required keys
+ for key, (typ, required) in prototype.items():
+ if not required:
+ continue
+ if key not in value:
+ return False
+ # unknown keys
+ for key in value.keys():
+ if key not in prototype:
+ return False
+ # incorrect types
+ for key, v in value.items():
+ prototypeType, required = prototype[key]
+ if v is None and not required:
+ continue
+ if not isinstance(v, prototypeType):
+ return False
+ return True
+
# --------------
# fontinfo.plist
@@ -98,620 +105,698 @@ def genericDictValidator(value, prototype):
# Data Validators
+
def fontInfoStyleMapStyleNameValidator(value):
- """
- Version 2+.
- """
- options = ["regular", "italic", "bold", "bold italic"]
- return value in options
+ """
+ Version 2+.
+ """
+ options = ["regular", "italic", "bold", "bold italic"]
+ return value in options
+
def fontInfoOpenTypeGaspRangeRecordsValidator(value):
- """
- Version 3+.
- """
- if not isinstance(value, list):
- return False
- if len(value) == 0:
- return True
- validBehaviors = [0, 1, 2, 3]
- dictPrototype = dict(rangeMaxPPEM=(int, True), rangeGaspBehavior=(list, True))
- ppemOrder = []
- for rangeRecord in value:
- if not genericDictValidator(rangeRecord, dictPrototype):
- return False
- ppem = rangeRecord["rangeMaxPPEM"]
- behavior = rangeRecord["rangeGaspBehavior"]
- ppemValidity = genericNonNegativeIntValidator(ppem)
- if not ppemValidity:
- return False
- behaviorValidity = genericIntListValidator(behavior, validBehaviors)
- if not behaviorValidity:
- return False
- ppemOrder.append(ppem)
- if ppemOrder != sorted(ppemOrder):
- return False
- return True
+ """
+ Version 3+.
+ """
+ if not isinstance(value, list):
+ return False
+ if len(value) == 0:
+ return True
+ validBehaviors = [0, 1, 2, 3]
+ dictPrototype = dict(rangeMaxPPEM=(int, True), rangeGaspBehavior=(list, True))
+ ppemOrder = []
+ for rangeRecord in value:
+ if not genericDictValidator(rangeRecord, dictPrototype):
+ return False
+ ppem = rangeRecord["rangeMaxPPEM"]
+ behavior = rangeRecord["rangeGaspBehavior"]
+ ppemValidity = genericNonNegativeIntValidator(ppem)
+ if not ppemValidity:
+ return False
+ behaviorValidity = genericIntListValidator(behavior, validBehaviors)
+ if not behaviorValidity:
+ return False
+ ppemOrder.append(ppem)
+ if ppemOrder != sorted(ppemOrder):
+ return False
+ return True
+
def fontInfoOpenTypeHeadCreatedValidator(value):
- """
- Version 2+.
- """
- # format: 0000/00/00 00:00:00
- if not isinstance(value, str):
- return False
- # basic formatting
- if not len(value) == 19:
- return False
- if value.count(" ") != 1:
- return False
- date, time = value.split(" ")
- if date.count("/") != 2:
- return False
- if time.count(":") != 2:
- return False
- # date
- year, month, day = date.split("/")
- if len(year) != 4:
- return False
- if len(month) != 2:
- return False
- if len(day) != 2:
- return False
- try:
- year = int(year)
- month = int(month)
- day = int(day)
- except ValueError:
- return False
- if month < 1 or month > 12:
- return False
- monthMaxDay = calendar.monthrange(year, month)[1]
- if day < 1 or day > monthMaxDay:
- return False
- # time
- hour, minute, second = time.split(":")
- if len(hour) != 2:
- return False
- if len(minute) != 2:
- return False
- if len(second) != 2:
- return False
- try:
- hour = int(hour)
- minute = int(minute)
- second = int(second)
- except ValueError:
- return False
- if hour < 0 or hour > 23:
- return False
- if minute < 0 or minute > 59:
- return False
- if second < 0 or second > 59:
- return False
- # fallback
- return True
+ """
+ Version 2+.
+ """
+ # format: 0000/00/00 00:00:00
+ if not isinstance(value, str):
+ return False
+ # basic formatting
+ if not len(value) == 19:
+ return False
+ if value.count(" ") != 1:
+ return False
+ date, time = value.split(" ")
+ if date.count("/") != 2:
+ return False
+ if time.count(":") != 2:
+ return False
+ # date
+ year, month, day = date.split("/")
+ if len(year) != 4:
+ return False
+ if len(month) != 2:
+ return False
+ if len(day) != 2:
+ return False
+ try:
+ year = int(year)
+ month = int(month)
+ day = int(day)
+ except ValueError:
+ return False
+ if month < 1 or month > 12:
+ return False
+ monthMaxDay = calendar.monthrange(year, month)[1]
+ if day < 1 or day > monthMaxDay:
+ return False
+ # time
+ hour, minute, second = time.split(":")
+ if len(hour) != 2:
+ return False
+ if len(minute) != 2:
+ return False
+ if len(second) != 2:
+ return False
+ try:
+ hour = int(hour)
+ minute = int(minute)
+ second = int(second)
+ except ValueError:
+ return False
+ if hour < 0 or hour > 23:
+ return False
+ if minute < 0 or minute > 59:
+ return False
+ if second < 0 or second > 59:
+ return False
+ # fallback
+ return True
+
def fontInfoOpenTypeNameRecordsValidator(value):
- """
- Version 3+.
- """
- if not isinstance(value, list):
- return False
- dictPrototype = dict(nameID=(int, True), platformID=(int, True), encodingID=(int, True), languageID=(int, True), string=(str, True))
- for nameRecord in value:
- if not genericDictValidator(nameRecord, dictPrototype):
- return False
- return True
+ """
+ Version 3+.
+ """
+ if not isinstance(value, list):
+ return False
+ dictPrototype = dict(
+ nameID=(int, True),
+ platformID=(int, True),
+ encodingID=(int, True),
+ languageID=(int, True),
+ string=(str, True),
+ )
+ for nameRecord in value:
+ if not genericDictValidator(nameRecord, dictPrototype):
+ return False
+ return True
+
def fontInfoOpenTypeOS2WeightClassValidator(value):
- """
- Version 2+.
- """
- if not isinstance(value, int):
- return False
- if value < 0:
- return False
- return True
+ """
+ Version 2+.
+ """
+ if not isinstance(value, int):
+ return False
+ if value < 0:
+ return False
+ return True
+
def fontInfoOpenTypeOS2WidthClassValidator(value):
- """
- Version 2+.
- """
- if not isinstance(value, int):
- return False
- if value < 1:
- return False
- if value > 9:
- return False
- return True
+ """
+ Version 2+.
+ """
+ if not isinstance(value, int):
+ return False
+ if value < 1:
+ return False
+ if value > 9:
+ return False
+ return True
+
def fontInfoVersion2OpenTypeOS2PanoseValidator(values):
- """
- Version 2.
- """
- if not isinstance(values, (list, tuple)):
- return False
- if len(values) != 10:
- return False
- for value in values:
- if not isinstance(value, int):
- return False
- # XXX further validation?
- return True
+ """
+ Version 2.
+ """
+ if not isinstance(values, (list, tuple)):
+ return False
+ if len(values) != 10:
+ return False
+ for value in values:
+ if not isinstance(value, int):
+ return False
+ # XXX further validation?
+ return True
+
def fontInfoVersion3OpenTypeOS2PanoseValidator(values):
- """
- Version 3+.
- """
- if not isinstance(values, (list, tuple)):
- return False
- if len(values) != 10:
- return False
- for value in values:
- if not isinstance(value, int):
- return False
- if value < 0:
- return False
- # XXX further validation?
- return True
+ """
+ Version 3+.
+ """
+ if not isinstance(values, (list, tuple)):
+ return False
+ if len(values) != 10:
+ return False
+ for value in values:
+ if not isinstance(value, int):
+ return False
+ if value < 0:
+ return False
+ # XXX further validation?
+ return True
+
def fontInfoOpenTypeOS2FamilyClassValidator(values):
- """
- Version 2+.
- """
- if not isinstance(values, (list, tuple)):
- return False
- if len(values) != 2:
- return False
- for value in values:
- if not isinstance(value, int):
- return False
- classID, subclassID = values
- if classID < 0 or classID > 14:
- return False
- if subclassID < 0 or subclassID > 15:
- return False
- return True
+ """
+ Version 2+.
+ """
+ if not isinstance(values, (list, tuple)):
+ return False
+ if len(values) != 2:
+ return False
+ for value in values:
+ if not isinstance(value, int):
+ return False
+ classID, subclassID = values
+ if classID < 0 or classID > 14:
+ return False
+ if subclassID < 0 or subclassID > 15:
+ return False
+ return True
+
def fontInfoPostscriptBluesValidator(values):
- """
- Version 2+.
- """
- if not isinstance(values, (list, tuple)):
- return False
- if len(values) > 14:
- return False
- if len(values) % 2:
- return False
- for value in values:
- if not isinstance(value, numberTypes):
- return False
- return True
+ """
+ Version 2+.
+ """
+ if not isinstance(values, (list, tuple)):
+ return False
+ if len(values) > 14:
+ return False
+ if len(values) % 2:
+ return False
+ for value in values:
+ if not isinstance(value, numberTypes):
+ return False
+ return True
+
def fontInfoPostscriptOtherBluesValidator(values):
- """
- Version 2+.
- """
- if not isinstance(values, (list, tuple)):
- return False
- if len(values) > 10:
- return False
- if len(values) % 2:
- return False
- for value in values:
- if not isinstance(value, numberTypes):
- return False
- return True
+ """
+ Version 2+.
+ """
+ if not isinstance(values, (list, tuple)):
+ return False
+ if len(values) > 10:
+ return False
+ if len(values) % 2:
+ return False
+ for value in values:
+ if not isinstance(value, numberTypes):
+ return False
+ return True
+
def fontInfoPostscriptStemsValidator(values):
- """
- Version 2+.
- """
- if not isinstance(values, (list, tuple)):
- return False
- if len(values) > 12:
- return False
- for value in values:
- if not isinstance(value, numberTypes):
- return False
- return True
+ """
+ Version 2+.
+ """
+ if not isinstance(values, (list, tuple)):
+ return False
+ if len(values) > 12:
+ return False
+ for value in values:
+ if not isinstance(value, numberTypes):
+ return False
+ return True
+
def fontInfoPostscriptWindowsCharacterSetValidator(value):
- """
- Version 2+.
- """
- validValues = list(range(1, 21))
- if value not in validValues:
- return False
- return True
+ """
+ Version 2+.
+ """
+ validValues = list(range(1, 21))
+ if value not in validValues:
+ return False
+ return True
+
def fontInfoWOFFMetadataUniqueIDValidator(value):
- """
- Version 3+.
- """
- dictPrototype = dict(id=(str, True))
- if not genericDictValidator(value, dictPrototype):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = dict(id=(str, True))
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ return True
+
def fontInfoWOFFMetadataVendorValidator(value):
- """
- Version 3+.
- """
- dictPrototype = {"name" : (str, True), "url" : (str, False), "dir" : (str, False), "class" : (str, False)}
- if not genericDictValidator(value, dictPrototype):
- return False
- if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = {
+ "name": (str, True),
+ "url": (str, False),
+ "dir": (str, False),
+ "class": (str, False),
+ }
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
+ return False
+ return True
+
def fontInfoWOFFMetadataCreditsValidator(value):
- """
- Version 3+.
- """
- dictPrototype = dict(credits=(list, True))
- if not genericDictValidator(value, dictPrototype):
- return False
- if not len(value["credits"]):
- return False
- dictPrototype = {"name" : (str, True), "url" : (str, False), "role" : (str, False), "dir" : (str, False), "class" : (str, False)}
- for credit in value["credits"]:
- if not genericDictValidator(credit, dictPrototype):
- return False
- if "dir" in credit and credit.get("dir") not in ("ltr", "rtl"):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = dict(credits=(list, True))
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ if not len(value["credits"]):
+ return False
+ dictPrototype = {
+ "name": (str, True),
+ "url": (str, False),
+ "role": (str, False),
+ "dir": (str, False),
+ "class": (str, False),
+ }
+ for credit in value["credits"]:
+ if not genericDictValidator(credit, dictPrototype):
+ return False
+ if "dir" in credit and credit.get("dir") not in ("ltr", "rtl"):
+ return False
+ return True
+
def fontInfoWOFFMetadataDescriptionValidator(value):
- """
- Version 3+.
- """
- dictPrototype = dict(url=(str, False), text=(list, True))
- if not genericDictValidator(value, dictPrototype):
- return False
- for text in value["text"]:
- if not fontInfoWOFFMetadataTextValue(text):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = dict(url=(str, False), text=(list, True))
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ for text in value["text"]:
+ if not fontInfoWOFFMetadataTextValue(text):
+ return False
+ return True
+
def fontInfoWOFFMetadataLicenseValidator(value):
- """
- Version 3+.
- """
- dictPrototype = dict(url=(str, False), text=(list, False), id=(str, False))
- if not genericDictValidator(value, dictPrototype):
- return False
- if "text" in value:
- for text in value["text"]:
- if not fontInfoWOFFMetadataTextValue(text):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = dict(url=(str, False), text=(list, False), id=(str, False))
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ if "text" in value:
+ for text in value["text"]:
+ if not fontInfoWOFFMetadataTextValue(text):
+ return False
+ return True
+
def fontInfoWOFFMetadataTrademarkValidator(value):
- """
- Version 3+.
- """
- dictPrototype = dict(text=(list, True))
- if not genericDictValidator(value, dictPrototype):
- return False
- for text in value["text"]:
- if not fontInfoWOFFMetadataTextValue(text):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = dict(text=(list, True))
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ for text in value["text"]:
+ if not fontInfoWOFFMetadataTextValue(text):
+ return False
+ return True
+
def fontInfoWOFFMetadataCopyrightValidator(value):
- """
- Version 3+.
- """
- dictPrototype = dict(text=(list, True))
- if not genericDictValidator(value, dictPrototype):
- return False
- for text in value["text"]:
- if not fontInfoWOFFMetadataTextValue(text):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = dict(text=(list, True))
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ for text in value["text"]:
+ if not fontInfoWOFFMetadataTextValue(text):
+ return False
+ return True
+
def fontInfoWOFFMetadataLicenseeValidator(value):
- """
- Version 3+.
- """
- dictPrototype = {"name" : (str, True), "dir" : (str, False), "class" : (str, False)}
- if not genericDictValidator(value, dictPrototype):
- return False
- if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = {"name": (str, True), "dir": (str, False), "class": (str, False)}
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
+ return False
+ return True
+
def fontInfoWOFFMetadataTextValue(value):
- """
- Version 3+.
- """
- dictPrototype = {"text" : (str, True), "language" : (str, False), "dir" : (str, False), "class" : (str, False)}
- if not genericDictValidator(value, dictPrototype):
- return False
- if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = {
+ "text": (str, True),
+ "language": (str, False),
+ "dir": (str, False),
+ "class": (str, False),
+ }
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
+ return False
+ return True
+
def fontInfoWOFFMetadataExtensionsValidator(value):
- """
- Version 3+.
- """
- if not isinstance(value, list):
- return False
- if not value:
- return False
- for extension in value:
- if not fontInfoWOFFMetadataExtensionValidator(extension):
- return False
- return True
+ """
+ Version 3+.
+ """
+ if not isinstance(value, list):
+ return False
+ if not value:
+ return False
+ for extension in value:
+ if not fontInfoWOFFMetadataExtensionValidator(extension):
+ return False
+ return True
+
def fontInfoWOFFMetadataExtensionValidator(value):
- """
- Version 3+.
- """
- dictPrototype = dict(names=(list, False), items=(list, True), id=(str, False))
- if not genericDictValidator(value, dictPrototype):
- return False
- if "names" in value:
- for name in value["names"]:
- if not fontInfoWOFFMetadataExtensionNameValidator(name):
- return False
- for item in value["items"]:
- if not fontInfoWOFFMetadataExtensionItemValidator(item):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = dict(names=(list, False), items=(list, True), id=(str, False))
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ if "names" in value:
+ for name in value["names"]:
+ if not fontInfoWOFFMetadataExtensionNameValidator(name):
+ return False
+ for item in value["items"]:
+ if not fontInfoWOFFMetadataExtensionItemValidator(item):
+ return False
+ return True
+
def fontInfoWOFFMetadataExtensionItemValidator(value):
- """
- Version 3+.
- """
- dictPrototype = dict(id=(str, False), names=(list, True), values=(list, True))
- if not genericDictValidator(value, dictPrototype):
- return False
- for name in value["names"]:
- if not fontInfoWOFFMetadataExtensionNameValidator(name):
- return False
- for val in value["values"]:
- if not fontInfoWOFFMetadataExtensionValueValidator(val):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = dict(id=(str, False), names=(list, True), values=(list, True))
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ for name in value["names"]:
+ if not fontInfoWOFFMetadataExtensionNameValidator(name):
+ return False
+ for val in value["values"]:
+ if not fontInfoWOFFMetadataExtensionValueValidator(val):
+ return False
+ return True
+
def fontInfoWOFFMetadataExtensionNameValidator(value):
- """
- Version 3+.
- """
- dictPrototype = {"text" : (str, True), "language" : (str, False), "dir" : (str, False), "class" : (str, False)}
- if not genericDictValidator(value, dictPrototype):
- return False
- if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = {
+ "text": (str, True),
+ "language": (str, False),
+ "dir": (str, False),
+ "class": (str, False),
+ }
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
+ return False
+ return True
+
def fontInfoWOFFMetadataExtensionValueValidator(value):
- """
- Version 3+.
- """
- dictPrototype = {"text" : (str, True), "language" : (str, False), "dir" : (str, False), "class" : (str, False)}
- if not genericDictValidator(value, dictPrototype):
- return False
- if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
- return False
- return True
+ """
+ Version 3+.
+ """
+ dictPrototype = {
+ "text": (str, True),
+ "language": (str, False),
+ "dir": (str, False),
+ "class": (str, False),
+ }
+ if not genericDictValidator(value, dictPrototype):
+ return False
+ if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
+ return False
+ return True
+
# ----------
# Guidelines
# ----------
+
def guidelinesValidator(value, identifiers=None):
- """
- Version 3+.
- """
- if not isinstance(value, list):
- return False
- if identifiers is None:
- identifiers = set()
- for guide in value:
- if not guidelineValidator(guide):
- return False
- identifier = guide.get("identifier")
- if identifier is not None:
- if identifier in identifiers:
- return False
- identifiers.add(identifier)
- return True
+ """
+ Version 3+.
+ """
+ if not isinstance(value, list):
+ return False
+ if identifiers is None:
+ identifiers = set()
+ for guide in value:
+ if not guidelineValidator(guide):
+ return False
+ identifier = guide.get("identifier")
+ if identifier is not None:
+ if identifier in identifiers:
+ return False
+ identifiers.add(identifier)
+ return True
+
_guidelineDictPrototype = dict(
- x=((int, float), False), y=((int, float), False), angle=((int, float), False),
- name=(str, False), color=(str, False), identifier=(str, False)
+ x=((int, float), False),
+ y=((int, float), False),
+ angle=((int, float), False),
+ name=(str, False),
+ color=(str, False),
+ identifier=(str, False),
)
+
def guidelineValidator(value):
- """
- Version 3+.
- """
- if not genericDictValidator(value, _guidelineDictPrototype):
- return False
- x = value.get("x")
- y = value.get("y")
- angle = value.get("angle")
- # x or y must be present
- if x is None and y is None:
- return False
- # if x or y are None, angle must not be present
- if x is None or y is None:
- if angle is not None:
- return False
- # if x and y are defined, angle must be defined
- if x is not None and y is not None and angle is None:
- return False
- # angle must be between 0 and 360
- if angle is not None:
- if angle < 0:
- return False
- if angle > 360:
- return False
- # identifier must be 1 or more characters
- identifier = value.get("identifier")
- if identifier is not None and not identifierValidator(identifier):
- return False
- # color must follow the proper format
- color = value.get("color")
- if color is not None and not colorValidator(color):
- return False
- return True
+ """
+ Version 3+.
+ """
+ if not genericDictValidator(value, _guidelineDictPrototype):
+ return False
+ x = value.get("x")
+ y = value.get("y")
+ angle = value.get("angle")
+ # x or y must be present
+ if x is None and y is None:
+ return False
+ # if x or y are None, angle must not be present
+ if x is None or y is None:
+ if angle is not None:
+ return False
+ # if x and y are defined, angle must be defined
+ if x is not None and y is not None and angle is None:
+ return False
+ # angle must be between 0 and 360
+ if angle is not None:
+ if angle < 0:
+ return False
+ if angle > 360:
+ return False
+ # identifier must be 1 or more characters
+ identifier = value.get("identifier")
+ if identifier is not None and not identifierValidator(identifier):
+ return False
+ # color must follow the proper format
+ color = value.get("color")
+ if color is not None and not colorValidator(color):
+ return False
+ return True
+
# -------
# Anchors
# -------
+
def anchorsValidator(value, identifiers=None):
- """
- Version 3+.
- """
- if not isinstance(value, list):
- return False
- if identifiers is None:
- identifiers = set()
- for anchor in value:
- if not anchorValidator(anchor):
- return False
- identifier = anchor.get("identifier")
- if identifier is not None:
- if identifier in identifiers:
- return False
- identifiers.add(identifier)
- return True
+ """
+ Version 3+.
+ """
+ if not isinstance(value, list):
+ return False
+ if identifiers is None:
+ identifiers = set()
+ for anchor in value:
+ if not anchorValidator(anchor):
+ return False
+ identifier = anchor.get("identifier")
+ if identifier is not None:
+ if identifier in identifiers:
+ return False
+ identifiers.add(identifier)
+ return True
+
_anchorDictPrototype = dict(
- x=((int, float), False), y=((int, float), False),
- name=(str, False), color=(str, False),
- identifier=(str, False)
+ x=((int, float), False),
+ y=((int, float), False),
+ name=(str, False),
+ color=(str, False),
+ identifier=(str, False),
)
+
def anchorValidator(value):
- """
- Version 3+.
- """
- if not genericDictValidator(value, _anchorDictPrototype):
- return False
- x = value.get("x")
- y = value.get("y")
- # x and y must be present
- if x is None or y is None:
- return False
- # identifier must be 1 or more characters
- identifier = value.get("identifier")
- if identifier is not None and not identifierValidator(identifier):
- return False
- # color must follow the proper format
- color = value.get("color")
- if color is not None and not colorValidator(color):
- return False
- return True
+ """
+ Version 3+.
+ """
+ if not genericDictValidator(value, _anchorDictPrototype):
+ return False
+ x = value.get("x")
+ y = value.get("y")
+ # x and y must be present
+ if x is None or y is None:
+ return False
+ # identifier must be 1 or more characters
+ identifier = value.get("identifier")
+ if identifier is not None and not identifierValidator(identifier):
+ return False
+ # color must follow the proper format
+ color = value.get("color")
+ if color is not None and not colorValidator(color):
+ return False
+ return True
+
# ----------
# Identifier
# ----------
+
def identifierValidator(value):
- """
- Version 3+.
-
- >>> identifierValidator("a")
- True
- >>> identifierValidator("")
- False
- >>> identifierValidator("a" * 101)
- False
- """
- validCharactersMin = 0x20
- validCharactersMax = 0x7E
- if not isinstance(value, str):
- return False
- if not value:
- return False
- if len(value) > 100:
- return False
- for c in value:
- c = ord(c)
- if c < validCharactersMin or c > validCharactersMax:
- return False
- return True
+ """
+ Version 3+.
+
+ >>> identifierValidator("a")
+ True
+ >>> identifierValidator("")
+ False
+ >>> identifierValidator("a" * 101)
+ False
+ """
+ validCharactersMin = 0x20
+ validCharactersMax = 0x7E
+ if not isinstance(value, str):
+ return False
+ if not value:
+ return False
+ if len(value) > 100:
+ return False
+ for c in value:
+ c = ord(c)
+ if c < validCharactersMin or c > validCharactersMax:
+ return False
+ return True
+
# -----
# Color
# -----
+
def colorValidator(value):
- """
- Version 3+.
-
- >>> colorValidator("0,0,0,0")
- True
- >>> colorValidator(".5,.5,.5,.5")
- True
- >>> colorValidator("0.5,0.5,0.5,0.5")
- True
- >>> colorValidator("1,1,1,1")
- True
-
- >>> colorValidator("2,0,0,0")
- False
- >>> colorValidator("0,2,0,0")
- False
- >>> colorValidator("0,0,2,0")
- False
- >>> colorValidator("0,0,0,2")
- False
-
- >>> colorValidator("1r,1,1,1")
- False
- >>> colorValidator("1,1g,1,1")
- False
- >>> colorValidator("1,1,1b,1")
- False
- >>> colorValidator("1,1,1,1a")
- False
-
- >>> colorValidator("1 1 1 1")
- False
- >>> colorValidator("1 1,1,1")
- False
- >>> colorValidator("1,1 1,1")
- False
- >>> colorValidator("1,1,1 1")
- False
-
- >>> colorValidator("1, 1, 1, 1")
- True
- """
- if not isinstance(value, str):
- return False
- parts = value.split(",")
- if len(parts) != 4:
- return False
- for part in parts:
- part = part.strip()
- converted = False
- try:
- part = int(part)
- converted = True
- except ValueError:
- pass
- if not converted:
- try:
- part = float(part)
- converted = True
- except ValueError:
- pass
- if not converted:
- return False
- if part < 0:
- return False
- if part > 1:
- return False
- return True
+ """
+ Version 3+.
+
+ >>> colorValidator("0,0,0,0")
+ True
+ >>> colorValidator(".5,.5,.5,.5")
+ True
+ >>> colorValidator("0.5,0.5,0.5,0.5")
+ True
+ >>> colorValidator("1,1,1,1")
+ True
+
+ >>> colorValidator("2,0,0,0")
+ False
+ >>> colorValidator("0,2,0,0")
+ False
+ >>> colorValidator("0,0,2,0")
+ False
+ >>> colorValidator("0,0,0,2")
+ False
+
+ >>> colorValidator("1r,1,1,1")
+ False
+ >>> colorValidator("1,1g,1,1")
+ False
+ >>> colorValidator("1,1,1b,1")
+ False
+ >>> colorValidator("1,1,1,1a")
+ False
+
+ >>> colorValidator("1 1 1 1")
+ False
+ >>> colorValidator("1 1,1,1")
+ False
+ >>> colorValidator("1,1 1,1")
+ False
+ >>> colorValidator("1,1,1 1")
+ False
+
+ >>> colorValidator("1, 1, 1, 1")
+ True
+ """
+ if not isinstance(value, str):
+ return False
+ parts = value.split(",")
+ if len(parts) != 4:
+ return False
+ for part in parts:
+ part = part.strip()
+ converted = False
+ try:
+ part = int(part)
+ converted = True
+ except ValueError:
+ pass
+ if not converted:
+ try:
+ part = float(part)
+ converted = True
+ except ValueError:
+ pass
+ if not converted:
+ return False
+ if part < 0:
+ return False
+ if part > 1:
+ return False
+ return True
+
# -----
# image
@@ -720,227 +805,263 @@ def colorValidator(value):
pngSignature = b"\x89PNG\r\n\x1a\n"
_imageDictPrototype = dict(
- fileName=(str, True),
- xScale=((int, float), False), xyScale=((int, float), False),
- yxScale=((int, float), False), yScale=((int, float), False),
- xOffset=((int, float), False), yOffset=((int, float), False),
- color=(str, False)
+ fileName=(str, True),
+ xScale=((int, float), False),
+ xyScale=((int, float), False),
+ yxScale=((int, float), False),
+ yScale=((int, float), False),
+ xOffset=((int, float), False),
+ yOffset=((int, float), False),
+ color=(str, False),
)
+
def imageValidator(value):
- """
- Version 3+.
- """
- if not genericDictValidator(value, _imageDictPrototype):
- return False
- # fileName must be one or more characters
- if not value["fileName"]:
- return False
- # color must follow the proper format
- color = value.get("color")
- if color is not None and not colorValidator(color):
- return False
- return True
+ """
+ Version 3+.
+ """
+ if not genericDictValidator(value, _imageDictPrototype):
+ return False
+ # fileName must be one or more characters
+ if not value["fileName"]:
+ return False
+ # color must follow the proper format
+ color = value.get("color")
+ if color is not None and not colorValidator(color):
+ return False
+ return True
+
def pngValidator(path=None, data=None, fileObj=None):
- """
- Version 3+.
-
- This checks the signature of the image data.
- """
- assert path is not None or data is not None or fileObj is not None
- if path is not None:
- with open(path, "rb") as f:
- signature = f.read(8)
- elif data is not None:
- signature = data[:8]
- elif fileObj is not None:
- pos = fileObj.tell()
- signature = fileObj.read(8)
- fileObj.seek(pos)
- if signature != pngSignature:
- return False, "Image does not begin with the PNG signature."
- return True, None
+ """
+ Version 3+.
+
+ This checks the signature of the image data.
+ """
+ assert path is not None or data is not None or fileObj is not None
+ if path is not None:
+ with open(path, "rb") as f:
+ signature = f.read(8)
+ elif data is not None:
+ signature = data[:8]
+ elif fileObj is not None:
+ pos = fileObj.tell()
+ signature = fileObj.read(8)
+ fileObj.seek(pos)
+ if signature != pngSignature:
+ return False, "Image does not begin with the PNG signature."
+ return True, None
+
# -------------------
# layercontents.plist
# -------------------
+
def layerContentsValidator(value, ufoPathOrFileSystem):
- """
- Check the validity of layercontents.plist.
- Version 3+.
- """
- if isinstance(ufoPathOrFileSystem, fs.base.FS):
- fileSystem = ufoPathOrFileSystem
- else:
- fileSystem = fs.osfs.OSFS(ufoPathOrFileSystem)
-
- bogusFileMessage = "layercontents.plist in not in the correct format."
- # file isn't in the right format
- if not isinstance(value, list):
- return False, bogusFileMessage
- # work through each entry
- usedLayerNames = set()
- usedDirectories = set()
- contents = {}
- for entry in value:
- # layer entry in the incorrect format
- if not isinstance(entry, list):
- return False, bogusFileMessage
- if not len(entry) == 2:
- return False, bogusFileMessage
- for i in entry:
- if not isinstance(i, str):
- return False, bogusFileMessage
- layerName, directoryName = entry
- # check directory naming
- if directoryName != "glyphs":
- if not directoryName.startswith("glyphs."):
- return False, "Invalid directory name (%s) in layercontents.plist." % directoryName
- if len(layerName) == 0:
- return False, "Empty layer name in layercontents.plist."
- # directory doesn't exist
- if not fileSystem.exists(directoryName):
- return False, "A glyphset does not exist at %s." % directoryName
- # default layer name
- if layerName == "public.default" and directoryName != "glyphs":
- return False, "The name public.default is being used by a layer that is not the default."
- # check usage
- if layerName in usedLayerNames:
- return False, "The layer name %s is used by more than one layer." % layerName
- usedLayerNames.add(layerName)
- if directoryName in usedDirectories:
- return False, "The directory %s is used by more than one layer." % directoryName
- usedDirectories.add(directoryName)
- # store
- contents[layerName] = directoryName
- # missing default layer
- foundDefault = "glyphs" in contents.values()
- if not foundDefault:
- return False, "The required default glyph set is not in the UFO."
- return True, None
+ """
+ Check the validity of layercontents.plist.
+ Version 3+.
+ """
+ if isinstance(ufoPathOrFileSystem, fs.base.FS):
+ fileSystem = ufoPathOrFileSystem
+ else:
+ fileSystem = fs.osfs.OSFS(ufoPathOrFileSystem)
+
+ bogusFileMessage = "layercontents.plist in not in the correct format."
+ # file isn't in the right format
+ if not isinstance(value, list):
+ return False, bogusFileMessage
+ # work through each entry
+ usedLayerNames = set()
+ usedDirectories = set()
+ contents = {}
+ for entry in value:
+ # layer entry in the incorrect format
+ if not isinstance(entry, list):
+ return False, bogusFileMessage
+ if not len(entry) == 2:
+ return False, bogusFileMessage
+ for i in entry:
+ if not isinstance(i, str):
+ return False, bogusFileMessage
+ layerName, directoryName = entry
+ # check directory naming
+ if directoryName != "glyphs":
+ if not directoryName.startswith("glyphs."):
+ return (
+ False,
+ "Invalid directory name (%s) in layercontents.plist."
+ % directoryName,
+ )
+ if len(layerName) == 0:
+ return False, "Empty layer name in layercontents.plist."
+ # directory doesn't exist
+ if not fileSystem.exists(directoryName):
+ return False, "A glyphset does not exist at %s." % directoryName
+ # default layer name
+ if layerName == "public.default" and directoryName != "glyphs":
+ return (
+ False,
+ "The name public.default is being used by a layer that is not the default.",
+ )
+ # check usage
+ if layerName in usedLayerNames:
+ return (
+ False,
+ "The layer name %s is used by more than one layer." % layerName,
+ )
+ usedLayerNames.add(layerName)
+ if directoryName in usedDirectories:
+ return (
+ False,
+ "The directory %s is used by more than one layer." % directoryName,
+ )
+ usedDirectories.add(directoryName)
+ # store
+ contents[layerName] = directoryName
+ # missing default layer
+ foundDefault = "glyphs" in contents.values()
+ if not foundDefault:
+ return False, "The required default glyph set is not in the UFO."
+ return True, None
+
# ------------
# groups.plist
# ------------
+
def groupsValidator(value):
- """
- Check the validity of the groups.
- Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
-
- >>> groups = {"A" : ["A", "A"], "A2" : ["A"]}
- >>> groupsValidator(groups)
- (True, None)
-
- >>> groups = {"" : ["A"]}
- >>> valid, msg = groupsValidator(groups)
- >>> valid
- False
- >>> print(msg)
- A group has an empty name.
-
- >>> groups = {"public.awesome" : ["A"]}
- >>> groupsValidator(groups)
- (True, None)
-
- >>> groups = {"public.kern1." : ["A"]}
- >>> valid, msg = groupsValidator(groups)
- >>> valid
- False
- >>> print(msg)
- The group data contains a kerning group with an incomplete name.
- >>> groups = {"public.kern2." : ["A"]}
- >>> valid, msg = groupsValidator(groups)
- >>> valid
- False
- >>> print(msg)
- The group data contains a kerning group with an incomplete name.
-
- >>> groups = {"public.kern1.A" : ["A"], "public.kern2.A" : ["A"]}
- >>> groupsValidator(groups)
- (True, None)
-
- >>> groups = {"public.kern1.A1" : ["A"], "public.kern1.A2" : ["A"]}
- >>> valid, msg = groupsValidator(groups)
- >>> valid
- False
- >>> print(msg)
- The glyph "A" occurs in too many kerning groups.
- """
- bogusFormatMessage = "The group data is not in the correct format."
- if not isDictEnough(value):
- return False, bogusFormatMessage
- firstSideMapping = {}
- secondSideMapping = {}
- for groupName, glyphList in value.items():
- if not isinstance(groupName, (str)):
- return False, bogusFormatMessage
- if not isinstance(glyphList, (list, tuple)):
- return False, bogusFormatMessage
- if not groupName:
- return False, "A group has an empty name."
- if groupName.startswith("public."):
- if not groupName.startswith("public.kern1.") and not groupName.startswith("public.kern2."):
- # unknown public.* name. silently skip.
- continue
- else:
- if len("public.kernN.") == len(groupName):
- return False, "The group data contains a kerning group with an incomplete name."
- if groupName.startswith("public.kern1."):
- d = firstSideMapping
- else:
- d = secondSideMapping
- for glyphName in glyphList:
- if not isinstance(glyphName, str):
- return False, "The group data %s contains an invalid member." % groupName
- if glyphName in d:
- return False, "The glyph \"%s\" occurs in too many kerning groups." % glyphName
- d[glyphName] = groupName
- return True, None
+ """
+ Check the validity of the groups.
+ Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
+
+ >>> groups = {"A" : ["A", "A"], "A2" : ["A"]}
+ >>> groupsValidator(groups)
+ (True, None)
+
+ >>> groups = {"" : ["A"]}
+ >>> valid, msg = groupsValidator(groups)
+ >>> valid
+ False
+ >>> print(msg)
+ A group has an empty name.
+
+ >>> groups = {"public.awesome" : ["A"]}
+ >>> groupsValidator(groups)
+ (True, None)
+
+ >>> groups = {"public.kern1." : ["A"]}
+ >>> valid, msg = groupsValidator(groups)
+ >>> valid
+ False
+ >>> print(msg)
+ The group data contains a kerning group with an incomplete name.
+ >>> groups = {"public.kern2." : ["A"]}
+ >>> valid, msg = groupsValidator(groups)
+ >>> valid
+ False
+ >>> print(msg)
+ The group data contains a kerning group with an incomplete name.
+
+ >>> groups = {"public.kern1.A" : ["A"], "public.kern2.A" : ["A"]}
+ >>> groupsValidator(groups)
+ (True, None)
+
+ >>> groups = {"public.kern1.A1" : ["A"], "public.kern1.A2" : ["A"]}
+ >>> valid, msg = groupsValidator(groups)
+ >>> valid
+ False
+ >>> print(msg)
+ The glyph "A" occurs in too many kerning groups.
+ """
+ bogusFormatMessage = "The group data is not in the correct format."
+ if not isDictEnough(value):
+ return False, bogusFormatMessage
+ firstSideMapping = {}
+ secondSideMapping = {}
+ for groupName, glyphList in value.items():
+ if not isinstance(groupName, (str)):
+ return False, bogusFormatMessage
+ if not isinstance(glyphList, (list, tuple)):
+ return False, bogusFormatMessage
+ if not groupName:
+ return False, "A group has an empty name."
+ if groupName.startswith("public."):
+ if not groupName.startswith("public.kern1.") and not groupName.startswith(
+ "public.kern2."
+ ):
+ # unknown public.* name. silently skip.
+ continue
+ else:
+ if len("public.kernN.") == len(groupName):
+ return (
+ False,
+ "The group data contains a kerning group with an incomplete name.",
+ )
+ if groupName.startswith("public.kern1."):
+ d = firstSideMapping
+ else:
+ d = secondSideMapping
+ for glyphName in glyphList:
+ if not isinstance(glyphName, str):
+ return (
+ False,
+ "The group data %s contains an invalid member." % groupName,
+ )
+ if glyphName in d:
+ return (
+ False,
+ 'The glyph "%s" occurs in too many kerning groups.' % glyphName,
+ )
+ d[glyphName] = groupName
+ return True, None
+
# -------------
# kerning.plist
# -------------
+
def kerningValidator(data):
- """
- Check the validity of the kerning data structure.
- Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
-
- >>> kerning = {"A" : {"B" : 100}}
- >>> kerningValidator(kerning)
- (True, None)
-
- >>> kerning = {"A" : ["B"]}
- >>> valid, msg = kerningValidator(kerning)
- >>> valid
- False
- >>> print(msg)
- The kerning data is not in the correct format.
-
- >>> kerning = {"A" : {"B" : "100"}}
- >>> valid, msg = kerningValidator(kerning)
- >>> valid
- False
- >>> print(msg)
- The kerning data is not in the correct format.
- """
- bogusFormatMessage = "The kerning data is not in the correct format."
- if not isinstance(data, Mapping):
- return False, bogusFormatMessage
- for first, secondDict in data.items():
- if not isinstance(first, str):
- return False, bogusFormatMessage
- elif not isinstance(secondDict, Mapping):
- return False, bogusFormatMessage
- for second, value in secondDict.items():
- if not isinstance(second, str):
- return False, bogusFormatMessage
- elif not isinstance(value, numberTypes):
- return False, bogusFormatMessage
- return True, None
+ """
+ Check the validity of the kerning data structure.
+ Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
+
+ >>> kerning = {"A" : {"B" : 100}}
+ >>> kerningValidator(kerning)
+ (True, None)
+
+ >>> kerning = {"A" : ["B"]}
+ >>> valid, msg = kerningValidator(kerning)
+ >>> valid
+ False
+ >>> print(msg)
+ The kerning data is not in the correct format.
+
+ >>> kerning = {"A" : {"B" : "100"}}
+ >>> valid, msg = kerningValidator(kerning)
+ >>> valid
+ False
+ >>> print(msg)
+ The kerning data is not in the correct format.
+ """
+ bogusFormatMessage = "The kerning data is not in the correct format."
+ if not isinstance(data, Mapping):
+ return False, bogusFormatMessage
+ for first, secondDict in data.items():
+ if not isinstance(first, str):
+ return False, bogusFormatMessage
+ elif not isinstance(secondDict, Mapping):
+ return False, bogusFormatMessage
+ for second, value in secondDict.items():
+ if not isinstance(second, str):
+ return False, bogusFormatMessage
+ elif not isinstance(value, numberTypes):
+ return False, bogusFormatMessage
+ return True, None
+
# -------------
# lib.plist/lib
@@ -948,113 +1069,118 @@ def kerningValidator(data):
_bogusLibFormatMessage = "The lib data is not in the correct format: %s"
+
def fontLibValidator(value):
- """
- Check the validity of the lib.
- Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
-
- >>> lib = {"foo" : "bar"}
- >>> fontLibValidator(lib)
- (True, None)
-
- >>> lib = {"public.awesome" : "hello"}
- >>> fontLibValidator(lib)
- (True, None)
-
- >>> lib = {"public.glyphOrder" : ["A", "C", "B"]}
- >>> fontLibValidator(lib)
- (True, None)
-
- >>> lib = "hello"
- >>> valid, msg = fontLibValidator(lib)
- >>> valid
- False
- >>> print(msg) # doctest: +ELLIPSIS
- The lib data is not in the correct format: expected a dictionary, ...
-
- >>> lib = {1: "hello"}
- >>> valid, msg = fontLibValidator(lib)
- >>> valid
- False
- >>> print(msg)
- The lib key is not properly formatted: expected str, found int: 1
-
- >>> lib = {"public.glyphOrder" : "hello"}
- >>> valid, msg = fontLibValidator(lib)
- >>> valid
- False
- >>> print(msg) # doctest: +ELLIPSIS
- public.glyphOrder is not properly formatted: expected list or tuple,...
-
- >>> lib = {"public.glyphOrder" : ["A", 1, "B"]}
- >>> valid, msg = fontLibValidator(lib)
- >>> valid
- False
- >>> print(msg) # doctest: +ELLIPSIS
- public.glyphOrder is not properly formatted: expected str,...
- """
- if not isDictEnough(value):
- reason = "expected a dictionary, found %s" % type(value).__name__
- return False, _bogusLibFormatMessage % reason
- for key, value in value.items():
- if not isinstance(key, str):
- return False, (
- "The lib key is not properly formatted: expected str, found %s: %r" %
- (type(key).__name__, key))
- # public.glyphOrder
- if key == "public.glyphOrder":
- bogusGlyphOrderMessage = "public.glyphOrder is not properly formatted: %s"
- if not isinstance(value, (list, tuple)):
- reason = "expected list or tuple, found %s" % type(value).__name__
- return False, bogusGlyphOrderMessage % reason
- for glyphName in value:
- if not isinstance(glyphName, str):
- reason = "expected str, found %s" % type(glyphName).__name__
- return False, bogusGlyphOrderMessage % reason
- return True, None
+ """
+ Check the validity of the lib.
+ Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
+
+ >>> lib = {"foo" : "bar"}
+ >>> fontLibValidator(lib)
+ (True, None)
+
+ >>> lib = {"public.awesome" : "hello"}
+ >>> fontLibValidator(lib)
+ (True, None)
+
+ >>> lib = {"public.glyphOrder" : ["A", "C", "B"]}
+ >>> fontLibValidator(lib)
+ (True, None)
+
+ >>> lib = "hello"
+ >>> valid, msg = fontLibValidator(lib)
+ >>> valid
+ False
+ >>> print(msg) # doctest: +ELLIPSIS
+ The lib data is not in the correct format: expected a dictionary, ...
+
+ >>> lib = {1: "hello"}
+ >>> valid, msg = fontLibValidator(lib)
+ >>> valid
+ False
+ >>> print(msg)
+ The lib key is not properly formatted: expected str, found int: 1
+
+ >>> lib = {"public.glyphOrder" : "hello"}
+ >>> valid, msg = fontLibValidator(lib)
+ >>> valid
+ False
+ >>> print(msg) # doctest: +ELLIPSIS
+ public.glyphOrder is not properly formatted: expected list or tuple,...
+
+ >>> lib = {"public.glyphOrder" : ["A", 1, "B"]}
+ >>> valid, msg = fontLibValidator(lib)
+ >>> valid
+ False
+ >>> print(msg) # doctest: +ELLIPSIS
+ public.glyphOrder is not properly formatted: expected str,...
+ """
+ if not isDictEnough(value):
+ reason = "expected a dictionary, found %s" % type(value).__name__
+ return False, _bogusLibFormatMessage % reason
+ for key, value in value.items():
+ if not isinstance(key, str):
+ return False, (
+ "The lib key is not properly formatted: expected str, found %s: %r"
+ % (type(key).__name__, key)
+ )
+ # public.glyphOrder
+ if key == "public.glyphOrder":
+ bogusGlyphOrderMessage = "public.glyphOrder is not properly formatted: %s"
+ if not isinstance(value, (list, tuple)):
+ reason = "expected list or tuple, found %s" % type(value).__name__
+ return False, bogusGlyphOrderMessage % reason
+ for glyphName in value:
+ if not isinstance(glyphName, str):
+ reason = "expected str, found %s" % type(glyphName).__name__
+ return False, bogusGlyphOrderMessage % reason
+ return True, None
+
# --------
# GLIF lib
# --------
+
def glyphLibValidator(value):
- """
- Check the validity of the lib.
- Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
-
- >>> lib = {"foo" : "bar"}
- >>> glyphLibValidator(lib)
- (True, None)
-
- >>> lib = {"public.awesome" : "hello"}
- >>> glyphLibValidator(lib)
- (True, None)
-
- >>> lib = {"public.markColor" : "1,0,0,0.5"}
- >>> glyphLibValidator(lib)
- (True, None)
-
- >>> lib = {"public.markColor" : 1}
- >>> valid, msg = glyphLibValidator(lib)
- >>> valid
- False
- >>> print(msg)
- public.markColor is not properly formatted.
- """
- if not isDictEnough(value):
- reason = "expected a dictionary, found %s" % type(value).__name__
- return False, _bogusLibFormatMessage % reason
- for key, value in value.items():
- if not isinstance(key, str):
- reason = "key (%s) should be a string" % key
- return False, _bogusLibFormatMessage % reason
- # public.markColor
- if key == "public.markColor":
- if not colorValidator(value):
- return False, "public.markColor is not properly formatted."
- return True, None
+ """
+ Check the validity of the lib.
+ Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
+
+ >>> lib = {"foo" : "bar"}
+ >>> glyphLibValidator(lib)
+ (True, None)
+
+ >>> lib = {"public.awesome" : "hello"}
+ >>> glyphLibValidator(lib)
+ (True, None)
+
+ >>> lib = {"public.markColor" : "1,0,0,0.5"}
+ >>> glyphLibValidator(lib)
+ (True, None)
+
+ >>> lib = {"public.markColor" : 1}
+ >>> valid, msg = glyphLibValidator(lib)
+ >>> valid
+ False
+ >>> print(msg)
+ public.markColor is not properly formatted.
+ """
+ if not isDictEnough(value):
+ reason = "expected a dictionary, found %s" % type(value).__name__
+ return False, _bogusLibFormatMessage % reason
+ for key, value in value.items():
+ if not isinstance(key, str):
+ reason = "key (%s) should be a string" % key
+ return False, _bogusLibFormatMessage % reason
+ # public.markColor
+ if key == "public.markColor":
+ if not colorValidator(value):
+ return False, "public.markColor is not properly formatted."
+ return True, None
if __name__ == "__main__":
- import doctest
- doctest.testmod()
+ import doctest
+
+ doctest.testmod()
diff --git a/Lib/fontTools/unicode.py b/Lib/fontTools/unicode.py
index e0867aa1..a9ffeefa 100644
--- a/Lib/fontTools/unicode.py
+++ b/Lib/fontTools/unicode.py
@@ -1,47 +1,50 @@
def _makeunicodes(f):
- lines = iter(f.readlines())
- unicodes = {}
- for line in lines:
- if not line: continue
- num, name = line.split(';')[:2]
- if name[0] == '<': continue # "<control>", etc.
- num = int(num, 16)
- unicodes[num] = name
- return unicodes
+ lines = iter(f.readlines())
+ unicodes = {}
+ for line in lines:
+ if not line:
+ continue
+ num, name = line.split(";")[:2]
+ if name[0] == "<":
+ continue # "<control>", etc.
+ num = int(num, 16)
+ unicodes[num] = name
+ return unicodes
class _UnicodeCustom(object):
+ def __init__(self, f):
+ if isinstance(f, str):
+ with open(f) as fd:
+ codes = _makeunicodes(fd)
+ else:
+ codes = _makeunicodes(f)
+ self.codes = codes
- def __init__(self, f):
- if isinstance(f, str):
- with open(f) as fd:
- codes = _makeunicodes(fd)
- else:
- codes = _makeunicodes(f)
- self.codes = codes
+ def __getitem__(self, charCode):
+ try:
+ return self.codes[charCode]
+ except KeyError:
+ return "????"
- def __getitem__(self, charCode):
- try:
- return self.codes[charCode]
- except KeyError:
- return "????"
class _UnicodeBuiltin(object):
+ def __getitem__(self, charCode):
+ try:
+ # use unicodedata backport to python2, if available:
+ # https://github.com/mikekap/unicodedata2
+ import unicodedata2 as unicodedata
+ except ImportError:
+ import unicodedata
+ try:
+ return unicodedata.name(chr(charCode))
+ except ValueError:
+ return "????"
- def __getitem__(self, charCode):
- try:
- # use unicodedata backport to python2, if available:
- # https://github.com/mikekap/unicodedata2
- import unicodedata2 as unicodedata
- except ImportError:
- import unicodedata
- try:
- return unicodedata.name(chr(charCode))
- except ValueError:
- return "????"
Unicode = _UnicodeBuiltin()
+
def setUnicodeData(f):
- global Unicode
- Unicode = _UnicodeCustom(f)
+ global Unicode
+ Unicode = _UnicodeCustom(f)
diff --git a/Lib/fontTools/unicodedata/Blocks.py b/Lib/fontTools/unicodedata/Blocks.py
index 2b30be67..b35c93d9 100644
--- a/Lib/fontTools/unicodedata/Blocks.py
+++ b/Lib/fontTools/unicodedata/Blocks.py
@@ -4,13 +4,13 @@
# Source: https://unicode.org/Public/UNIDATA/Blocks.txt
# License: http://unicode.org/copyright.html#License
#
-# Blocks-14.0.0.txt
-# Date: 2021-01-22, 23:29:00 GMT [KW]
-# © 2021 Unicode®, Inc.
-# For terms of use, see http://www.unicode.org/terms_of_use.html
+# Blocks-15.0.0.txt
+# Date: 2022-01-28, 20:58:00 GMT [KW]
+# © 2022 Unicode®, Inc.
+# For terms of use, see https://www.unicode.org/terms_of_use.html
#
# Unicode Character Database
-# For documentation, see http://www.unicode.org/reports/tr44/
+# For documentation, see https://www.unicode.org/reports/tr44/
#
# Format:
# Start Code..End Code; Block Name
@@ -237,7 +237,7 @@ RANGES = [
0x10D40, # .. 0x10E5F ; No_Block
0x10E60, # .. 0x10E7F ; Rumi Numeral Symbols
0x10E80, # .. 0x10EBF ; Yezidi
- 0x10EC0, # .. 0x10EFF ; No_Block
+ 0x10EC0, # .. 0x10EFF ; Arabic Extended-C
0x10F00, # .. 0x10F2F ; Old Sogdian
0x10F30, # .. 0x10F6F ; Sogdian
0x10F70, # .. 0x10FAF ; Old Uyghur
@@ -276,7 +276,8 @@ RANGES = [
0x11A50, # .. 0x11AAF ; Soyombo
0x11AB0, # .. 0x11ABF ; Unified Canadian Aboriginal Syllabics Extended-A
0x11AC0, # .. 0x11AFF ; Pau Cin Hau
- 0x11B00, # .. 0x11BFF ; No_Block
+ 0x11B00, # .. 0x11B5F ; Devanagari Extended-A
+ 0x11B60, # .. 0x11BFF ; No_Block
0x11C00, # .. 0x11C6F ; Bhaiksuki
0x11C70, # .. 0x11CBF ; Marchen
0x11CC0, # .. 0x11CFF ; No_Block
@@ -284,7 +285,8 @@ RANGES = [
0x11D60, # .. 0x11DAF ; Gunjala Gondi
0x11DB0, # .. 0x11EDF ; No_Block
0x11EE0, # .. 0x11EFF ; Makasar
- 0x11F00, # .. 0x11FAF ; No_Block
+ 0x11F00, # .. 0x11F5F ; Kawi
+ 0x11F60, # .. 0x11FAF ; No_Block
0x11FB0, # .. 0x11FBF ; Lisu Supplement
0x11FC0, # .. 0x11FFF ; Tamil Supplement
0x12000, # .. 0x123FF ; Cuneiform
@@ -293,8 +295,8 @@ RANGES = [
0x12550, # .. 0x12F8F ; No_Block
0x12F90, # .. 0x12FFF ; Cypro-Minoan
0x13000, # .. 0x1342F ; Egyptian Hieroglyphs
- 0x13430, # .. 0x1343F ; Egyptian Hieroglyph Format Controls
- 0x13440, # .. 0x143FF ; No_Block
+ 0x13430, # .. 0x1345F ; Egyptian Hieroglyph Format Controls
+ 0x13460, # .. 0x143FF ; No_Block
0x14400, # .. 0x1467F ; Anatolian Hieroglyphs
0x14680, # .. 0x167FF ; No_Block
0x16800, # .. 0x16A3F ; Bamum Supplement
@@ -327,7 +329,8 @@ RANGES = [
0x1D000, # .. 0x1D0FF ; Byzantine Musical Symbols
0x1D100, # .. 0x1D1FF ; Musical Symbols
0x1D200, # .. 0x1D24F ; Ancient Greek Musical Notation
- 0x1D250, # .. 0x1D2DF ; No_Block
+ 0x1D250, # .. 0x1D2BF ; No_Block
+ 0x1D2C0, # .. 0x1D2DF ; Kaktovik Numerals
0x1D2E0, # .. 0x1D2FF ; Mayan Numerals
0x1D300, # .. 0x1D35F ; Tai Xuan Jing Symbols
0x1D360, # .. 0x1D37F ; Counting Rod Numerals
@@ -337,12 +340,15 @@ RANGES = [
0x1DAB0, # .. 0x1DEFF ; No_Block
0x1DF00, # .. 0x1DFFF ; Latin Extended-G
0x1E000, # .. 0x1E02F ; Glagolitic Supplement
- 0x1E030, # .. 0x1E0FF ; No_Block
+ 0x1E030, # .. 0x1E08F ; Cyrillic Extended-D
+ 0x1E090, # .. 0x1E0FF ; No_Block
0x1E100, # .. 0x1E14F ; Nyiakeng Puachue Hmong
0x1E150, # .. 0x1E28F ; No_Block
0x1E290, # .. 0x1E2BF ; Toto
0x1E2C0, # .. 0x1E2FF ; Wancho
- 0x1E300, # .. 0x1E7DF ; No_Block
+ 0x1E300, # .. 0x1E4CF ; No_Block
+ 0x1E4D0, # .. 0x1E4FF ; Nag Mundari
+ 0x1E500, # .. 0x1E7DF ; No_Block
0x1E7E0, # .. 0x1E7FF ; Ethiopic Extended-B
0x1E800, # .. 0x1E8DF ; Mende Kikakui
0x1E8E0, # .. 0x1E8FF ; No_Block
@@ -381,7 +387,8 @@ RANGES = [
0x2F800, # .. 0x2FA1F ; CJK Compatibility Ideographs Supplement
0x2FA20, # .. 0x2FFFF ; No_Block
0x30000, # .. 0x3134F ; CJK Unified Ideographs Extension G
- 0x31350, # .. 0xDFFFF ; No_Block
+ 0x31350, # .. 0x323AF ; CJK Unified Ideographs Extension H
+ 0x323B0, # .. 0xDFFFF ; No_Block
0xE0000, # .. 0xE007F ; Tags
0xE0080, # .. 0xE00FF ; No_Block
0xE0100, # .. 0xE01EF ; Variation Selectors Supplement
@@ -391,375 +398,382 @@ RANGES = [
]
VALUES = [
- 'Basic Latin', # 0000..007F
- 'Latin-1 Supplement', # 0080..00FF
- 'Latin Extended-A', # 0100..017F
- 'Latin Extended-B', # 0180..024F
- 'IPA Extensions', # 0250..02AF
- 'Spacing Modifier Letters', # 02B0..02FF
- 'Combining Diacritical Marks', # 0300..036F
- 'Greek and Coptic', # 0370..03FF
- 'Cyrillic', # 0400..04FF
- 'Cyrillic Supplement', # 0500..052F
- 'Armenian', # 0530..058F
- 'Hebrew', # 0590..05FF
- 'Arabic', # 0600..06FF
- 'Syriac', # 0700..074F
- 'Arabic Supplement', # 0750..077F
- 'Thaana', # 0780..07BF
- 'NKo', # 07C0..07FF
- 'Samaritan', # 0800..083F
- 'Mandaic', # 0840..085F
- 'Syriac Supplement', # 0860..086F
- 'Arabic Extended-B', # 0870..089F
- 'Arabic Extended-A', # 08A0..08FF
- 'Devanagari', # 0900..097F
- 'Bengali', # 0980..09FF
- 'Gurmukhi', # 0A00..0A7F
- 'Gujarati', # 0A80..0AFF
- 'Oriya', # 0B00..0B7F
- 'Tamil', # 0B80..0BFF
- 'Telugu', # 0C00..0C7F
- 'Kannada', # 0C80..0CFF
- 'Malayalam', # 0D00..0D7F
- 'Sinhala', # 0D80..0DFF
- 'Thai', # 0E00..0E7F
- 'Lao', # 0E80..0EFF
- 'Tibetan', # 0F00..0FFF
- 'Myanmar', # 1000..109F
- 'Georgian', # 10A0..10FF
- 'Hangul Jamo', # 1100..11FF
- 'Ethiopic', # 1200..137F
- 'Ethiopic Supplement', # 1380..139F
- 'Cherokee', # 13A0..13FF
- 'Unified Canadian Aboriginal Syllabics', # 1400..167F
- 'Ogham', # 1680..169F
- 'Runic', # 16A0..16FF
- 'Tagalog', # 1700..171F
- 'Hanunoo', # 1720..173F
- 'Buhid', # 1740..175F
- 'Tagbanwa', # 1760..177F
- 'Khmer', # 1780..17FF
- 'Mongolian', # 1800..18AF
- 'Unified Canadian Aboriginal Syllabics Extended', # 18B0..18FF
- 'Limbu', # 1900..194F
- 'Tai Le', # 1950..197F
- 'New Tai Lue', # 1980..19DF
- 'Khmer Symbols', # 19E0..19FF
- 'Buginese', # 1A00..1A1F
- 'Tai Tham', # 1A20..1AAF
- 'Combining Diacritical Marks Extended', # 1AB0..1AFF
- 'Balinese', # 1B00..1B7F
- 'Sundanese', # 1B80..1BBF
- 'Batak', # 1BC0..1BFF
- 'Lepcha', # 1C00..1C4F
- 'Ol Chiki', # 1C50..1C7F
- 'Cyrillic Extended-C', # 1C80..1C8F
- 'Georgian Extended', # 1C90..1CBF
- 'Sundanese Supplement', # 1CC0..1CCF
- 'Vedic Extensions', # 1CD0..1CFF
- 'Phonetic Extensions', # 1D00..1D7F
- 'Phonetic Extensions Supplement', # 1D80..1DBF
- 'Combining Diacritical Marks Supplement', # 1DC0..1DFF
- 'Latin Extended Additional', # 1E00..1EFF
- 'Greek Extended', # 1F00..1FFF
- 'General Punctuation', # 2000..206F
- 'Superscripts and Subscripts', # 2070..209F
- 'Currency Symbols', # 20A0..20CF
- 'Combining Diacritical Marks for Symbols', # 20D0..20FF
- 'Letterlike Symbols', # 2100..214F
- 'Number Forms', # 2150..218F
- 'Arrows', # 2190..21FF
- 'Mathematical Operators', # 2200..22FF
- 'Miscellaneous Technical', # 2300..23FF
- 'Control Pictures', # 2400..243F
- 'Optical Character Recognition', # 2440..245F
- 'Enclosed Alphanumerics', # 2460..24FF
- 'Box Drawing', # 2500..257F
- 'Block Elements', # 2580..259F
- 'Geometric Shapes', # 25A0..25FF
- 'Miscellaneous Symbols', # 2600..26FF
- 'Dingbats', # 2700..27BF
- 'Miscellaneous Mathematical Symbols-A', # 27C0..27EF
- 'Supplemental Arrows-A', # 27F0..27FF
- 'Braille Patterns', # 2800..28FF
- 'Supplemental Arrows-B', # 2900..297F
- 'Miscellaneous Mathematical Symbols-B', # 2980..29FF
- 'Supplemental Mathematical Operators', # 2A00..2AFF
- 'Miscellaneous Symbols and Arrows', # 2B00..2BFF
- 'Glagolitic', # 2C00..2C5F
- 'Latin Extended-C', # 2C60..2C7F
- 'Coptic', # 2C80..2CFF
- 'Georgian Supplement', # 2D00..2D2F
- 'Tifinagh', # 2D30..2D7F
- 'Ethiopic Extended', # 2D80..2DDF
- 'Cyrillic Extended-A', # 2DE0..2DFF
- 'Supplemental Punctuation', # 2E00..2E7F
- 'CJK Radicals Supplement', # 2E80..2EFF
- 'Kangxi Radicals', # 2F00..2FDF
- 'No_Block', # 2FE0..2FEF
- 'Ideographic Description Characters', # 2FF0..2FFF
- 'CJK Symbols and Punctuation', # 3000..303F
- 'Hiragana', # 3040..309F
- 'Katakana', # 30A0..30FF
- 'Bopomofo', # 3100..312F
- 'Hangul Compatibility Jamo', # 3130..318F
- 'Kanbun', # 3190..319F
- 'Bopomofo Extended', # 31A0..31BF
- 'CJK Strokes', # 31C0..31EF
- 'Katakana Phonetic Extensions', # 31F0..31FF
- 'Enclosed CJK Letters and Months', # 3200..32FF
- 'CJK Compatibility', # 3300..33FF
- 'CJK Unified Ideographs Extension A', # 3400..4DBF
- 'Yijing Hexagram Symbols', # 4DC0..4DFF
- 'CJK Unified Ideographs', # 4E00..9FFF
- 'Yi Syllables', # A000..A48F
- 'Yi Radicals', # A490..A4CF
- 'Lisu', # A4D0..A4FF
- 'Vai', # A500..A63F
- 'Cyrillic Extended-B', # A640..A69F
- 'Bamum', # A6A0..A6FF
- 'Modifier Tone Letters', # A700..A71F
- 'Latin Extended-D', # A720..A7FF
- 'Syloti Nagri', # A800..A82F
- 'Common Indic Number Forms', # A830..A83F
- 'Phags-pa', # A840..A87F
- 'Saurashtra', # A880..A8DF
- 'Devanagari Extended', # A8E0..A8FF
- 'Kayah Li', # A900..A92F
- 'Rejang', # A930..A95F
- 'Hangul Jamo Extended-A', # A960..A97F
- 'Javanese', # A980..A9DF
- 'Myanmar Extended-B', # A9E0..A9FF
- 'Cham', # AA00..AA5F
- 'Myanmar Extended-A', # AA60..AA7F
- 'Tai Viet', # AA80..AADF
- 'Meetei Mayek Extensions', # AAE0..AAFF
- 'Ethiopic Extended-A', # AB00..AB2F
- 'Latin Extended-E', # AB30..AB6F
- 'Cherokee Supplement', # AB70..ABBF
- 'Meetei Mayek', # ABC0..ABFF
- 'Hangul Syllables', # AC00..D7AF
- 'Hangul Jamo Extended-B', # D7B0..D7FF
- 'High Surrogates', # D800..DB7F
- 'High Private Use Surrogates', # DB80..DBFF
- 'Low Surrogates', # DC00..DFFF
- 'Private Use Area', # E000..F8FF
- 'CJK Compatibility Ideographs', # F900..FAFF
- 'Alphabetic Presentation Forms', # FB00..FB4F
- 'Arabic Presentation Forms-A', # FB50..FDFF
- 'Variation Selectors', # FE00..FE0F
- 'Vertical Forms', # FE10..FE1F
- 'Combining Half Marks', # FE20..FE2F
- 'CJK Compatibility Forms', # FE30..FE4F
- 'Small Form Variants', # FE50..FE6F
- 'Arabic Presentation Forms-B', # FE70..FEFF
- 'Halfwidth and Fullwidth Forms', # FF00..FFEF
- 'Specials', # FFF0..FFFF
- 'Linear B Syllabary', # 10000..1007F
- 'Linear B Ideograms', # 10080..100FF
- 'Aegean Numbers', # 10100..1013F
- 'Ancient Greek Numbers', # 10140..1018F
- 'Ancient Symbols', # 10190..101CF
- 'Phaistos Disc', # 101D0..101FF
- 'No_Block', # 10200..1027F
- 'Lycian', # 10280..1029F
- 'Carian', # 102A0..102DF
- 'Coptic Epact Numbers', # 102E0..102FF
- 'Old Italic', # 10300..1032F
- 'Gothic', # 10330..1034F
- 'Old Permic', # 10350..1037F
- 'Ugaritic', # 10380..1039F
- 'Old Persian', # 103A0..103DF
- 'No_Block', # 103E0..103FF
- 'Deseret', # 10400..1044F
- 'Shavian', # 10450..1047F
- 'Osmanya', # 10480..104AF
- 'Osage', # 104B0..104FF
- 'Elbasan', # 10500..1052F
- 'Caucasian Albanian', # 10530..1056F
- 'Vithkuqi', # 10570..105BF
- 'No_Block', # 105C0..105FF
- 'Linear A', # 10600..1077F
- 'Latin Extended-F', # 10780..107BF
- 'No_Block', # 107C0..107FF
- 'Cypriot Syllabary', # 10800..1083F
- 'Imperial Aramaic', # 10840..1085F
- 'Palmyrene', # 10860..1087F
- 'Nabataean', # 10880..108AF
- 'No_Block', # 108B0..108DF
- 'Hatran', # 108E0..108FF
- 'Phoenician', # 10900..1091F
- 'Lydian', # 10920..1093F
- 'No_Block', # 10940..1097F
- 'Meroitic Hieroglyphs', # 10980..1099F
- 'Meroitic Cursive', # 109A0..109FF
- 'Kharoshthi', # 10A00..10A5F
- 'Old South Arabian', # 10A60..10A7F
- 'Old North Arabian', # 10A80..10A9F
- 'No_Block', # 10AA0..10ABF
- 'Manichaean', # 10AC0..10AFF
- 'Avestan', # 10B00..10B3F
- 'Inscriptional Parthian', # 10B40..10B5F
- 'Inscriptional Pahlavi', # 10B60..10B7F
- 'Psalter Pahlavi', # 10B80..10BAF
- 'No_Block', # 10BB0..10BFF
- 'Old Turkic', # 10C00..10C4F
- 'No_Block', # 10C50..10C7F
- 'Old Hungarian', # 10C80..10CFF
- 'Hanifi Rohingya', # 10D00..10D3F
- 'No_Block', # 10D40..10E5F
- 'Rumi Numeral Symbols', # 10E60..10E7F
- 'Yezidi', # 10E80..10EBF
- 'No_Block', # 10EC0..10EFF
- 'Old Sogdian', # 10F00..10F2F
- 'Sogdian', # 10F30..10F6F
- 'Old Uyghur', # 10F70..10FAF
- 'Chorasmian', # 10FB0..10FDF
- 'Elymaic', # 10FE0..10FFF
- 'Brahmi', # 11000..1107F
- 'Kaithi', # 11080..110CF
- 'Sora Sompeng', # 110D0..110FF
- 'Chakma', # 11100..1114F
- 'Mahajani', # 11150..1117F
- 'Sharada', # 11180..111DF
- 'Sinhala Archaic Numbers', # 111E0..111FF
- 'Khojki', # 11200..1124F
- 'No_Block', # 11250..1127F
- 'Multani', # 11280..112AF
- 'Khudawadi', # 112B0..112FF
- 'Grantha', # 11300..1137F
- 'No_Block', # 11380..113FF
- 'Newa', # 11400..1147F
- 'Tirhuta', # 11480..114DF
- 'No_Block', # 114E0..1157F
- 'Siddham', # 11580..115FF
- 'Modi', # 11600..1165F
- 'Mongolian Supplement', # 11660..1167F
- 'Takri', # 11680..116CF
- 'No_Block', # 116D0..116FF
- 'Ahom', # 11700..1174F
- 'No_Block', # 11750..117FF
- 'Dogra', # 11800..1184F
- 'No_Block', # 11850..1189F
- 'Warang Citi', # 118A0..118FF
- 'Dives Akuru', # 11900..1195F
- 'No_Block', # 11960..1199F
- 'Nandinagari', # 119A0..119FF
- 'Zanabazar Square', # 11A00..11A4F
- 'Soyombo', # 11A50..11AAF
- 'Unified Canadian Aboriginal Syllabics Extended-A', # 11AB0..11ABF
- 'Pau Cin Hau', # 11AC0..11AFF
- 'No_Block', # 11B00..11BFF
- 'Bhaiksuki', # 11C00..11C6F
- 'Marchen', # 11C70..11CBF
- 'No_Block', # 11CC0..11CFF
- 'Masaram Gondi', # 11D00..11D5F
- 'Gunjala Gondi', # 11D60..11DAF
- 'No_Block', # 11DB0..11EDF
- 'Makasar', # 11EE0..11EFF
- 'No_Block', # 11F00..11FAF
- 'Lisu Supplement', # 11FB0..11FBF
- 'Tamil Supplement', # 11FC0..11FFF
- 'Cuneiform', # 12000..123FF
- 'Cuneiform Numbers and Punctuation', # 12400..1247F
- 'Early Dynastic Cuneiform', # 12480..1254F
- 'No_Block', # 12550..12F8F
- 'Cypro-Minoan', # 12F90..12FFF
- 'Egyptian Hieroglyphs', # 13000..1342F
- 'Egyptian Hieroglyph Format Controls', # 13430..1343F
- 'No_Block', # 13440..143FF
- 'Anatolian Hieroglyphs', # 14400..1467F
- 'No_Block', # 14680..167FF
- 'Bamum Supplement', # 16800..16A3F
- 'Mro', # 16A40..16A6F
- 'Tangsa', # 16A70..16ACF
- 'Bassa Vah', # 16AD0..16AFF
- 'Pahawh Hmong', # 16B00..16B8F
- 'No_Block', # 16B90..16E3F
- 'Medefaidrin', # 16E40..16E9F
- 'No_Block', # 16EA0..16EFF
- 'Miao', # 16F00..16F9F
- 'No_Block', # 16FA0..16FDF
- 'Ideographic Symbols and Punctuation', # 16FE0..16FFF
- 'Tangut', # 17000..187FF
- 'Tangut Components', # 18800..18AFF
- 'Khitan Small Script', # 18B00..18CFF
- 'Tangut Supplement', # 18D00..18D7F
- 'No_Block', # 18D80..1AFEF
- 'Kana Extended-B', # 1AFF0..1AFFF
- 'Kana Supplement', # 1B000..1B0FF
- 'Kana Extended-A', # 1B100..1B12F
- 'Small Kana Extension', # 1B130..1B16F
- 'Nushu', # 1B170..1B2FF
- 'No_Block', # 1B300..1BBFF
- 'Duployan', # 1BC00..1BC9F
- 'Shorthand Format Controls', # 1BCA0..1BCAF
- 'No_Block', # 1BCB0..1CEFF
- 'Znamenny Musical Notation', # 1CF00..1CFCF
- 'No_Block', # 1CFD0..1CFFF
- 'Byzantine Musical Symbols', # 1D000..1D0FF
- 'Musical Symbols', # 1D100..1D1FF
- 'Ancient Greek Musical Notation', # 1D200..1D24F
- 'No_Block', # 1D250..1D2DF
- 'Mayan Numerals', # 1D2E0..1D2FF
- 'Tai Xuan Jing Symbols', # 1D300..1D35F
- 'Counting Rod Numerals', # 1D360..1D37F
- 'No_Block', # 1D380..1D3FF
- 'Mathematical Alphanumeric Symbols', # 1D400..1D7FF
- 'Sutton SignWriting', # 1D800..1DAAF
- 'No_Block', # 1DAB0..1DEFF
- 'Latin Extended-G', # 1DF00..1DFFF
- 'Glagolitic Supplement', # 1E000..1E02F
- 'No_Block', # 1E030..1E0FF
- 'Nyiakeng Puachue Hmong', # 1E100..1E14F
- 'No_Block', # 1E150..1E28F
- 'Toto', # 1E290..1E2BF
- 'Wancho', # 1E2C0..1E2FF
- 'No_Block', # 1E300..1E7DF
- 'Ethiopic Extended-B', # 1E7E0..1E7FF
- 'Mende Kikakui', # 1E800..1E8DF
- 'No_Block', # 1E8E0..1E8FF
- 'Adlam', # 1E900..1E95F
- 'No_Block', # 1E960..1EC6F
- 'Indic Siyaq Numbers', # 1EC70..1ECBF
- 'No_Block', # 1ECC0..1ECFF
- 'Ottoman Siyaq Numbers', # 1ED00..1ED4F
- 'No_Block', # 1ED50..1EDFF
- 'Arabic Mathematical Alphabetic Symbols', # 1EE00..1EEFF
- 'No_Block', # 1EF00..1EFFF
- 'Mahjong Tiles', # 1F000..1F02F
- 'Domino Tiles', # 1F030..1F09F
- 'Playing Cards', # 1F0A0..1F0FF
- 'Enclosed Alphanumeric Supplement', # 1F100..1F1FF
- 'Enclosed Ideographic Supplement', # 1F200..1F2FF
- 'Miscellaneous Symbols and Pictographs', # 1F300..1F5FF
- 'Emoticons', # 1F600..1F64F
- 'Ornamental Dingbats', # 1F650..1F67F
- 'Transport and Map Symbols', # 1F680..1F6FF
- 'Alchemical Symbols', # 1F700..1F77F
- 'Geometric Shapes Extended', # 1F780..1F7FF
- 'Supplemental Arrows-C', # 1F800..1F8FF
- 'Supplemental Symbols and Pictographs', # 1F900..1F9FF
- 'Chess Symbols', # 1FA00..1FA6F
- 'Symbols and Pictographs Extended-A', # 1FA70..1FAFF
- 'Symbols for Legacy Computing', # 1FB00..1FBFF
- 'No_Block', # 1FC00..1FFFF
- 'CJK Unified Ideographs Extension B', # 20000..2A6DF
- 'No_Block', # 2A6E0..2A6FF
- 'CJK Unified Ideographs Extension C', # 2A700..2B73F
- 'CJK Unified Ideographs Extension D', # 2B740..2B81F
- 'CJK Unified Ideographs Extension E', # 2B820..2CEAF
- 'CJK Unified Ideographs Extension F', # 2CEB0..2EBEF
- 'No_Block', # 2EBF0..2F7FF
- 'CJK Compatibility Ideographs Supplement', # 2F800..2FA1F
- 'No_Block', # 2FA20..2FFFF
- 'CJK Unified Ideographs Extension G', # 30000..3134F
- 'No_Block', # 31350..DFFFF
- 'Tags', # E0000..E007F
- 'No_Block', # E0080..E00FF
- 'Variation Selectors Supplement', # E0100..E01EF
- 'No_Block', # E01F0..EFFFF
- 'Supplementary Private Use Area-A', # F0000..FFFFF
- 'Supplementary Private Use Area-B', # 100000..10FFFF
+ "Basic Latin", # 0000..007F
+ "Latin-1 Supplement", # 0080..00FF
+ "Latin Extended-A", # 0100..017F
+ "Latin Extended-B", # 0180..024F
+ "IPA Extensions", # 0250..02AF
+ "Spacing Modifier Letters", # 02B0..02FF
+ "Combining Diacritical Marks", # 0300..036F
+ "Greek and Coptic", # 0370..03FF
+ "Cyrillic", # 0400..04FF
+ "Cyrillic Supplement", # 0500..052F
+ "Armenian", # 0530..058F
+ "Hebrew", # 0590..05FF
+ "Arabic", # 0600..06FF
+ "Syriac", # 0700..074F
+ "Arabic Supplement", # 0750..077F
+ "Thaana", # 0780..07BF
+ "NKo", # 07C0..07FF
+ "Samaritan", # 0800..083F
+ "Mandaic", # 0840..085F
+ "Syriac Supplement", # 0860..086F
+ "Arabic Extended-B", # 0870..089F
+ "Arabic Extended-A", # 08A0..08FF
+ "Devanagari", # 0900..097F
+ "Bengali", # 0980..09FF
+ "Gurmukhi", # 0A00..0A7F
+ "Gujarati", # 0A80..0AFF
+ "Oriya", # 0B00..0B7F
+ "Tamil", # 0B80..0BFF
+ "Telugu", # 0C00..0C7F
+ "Kannada", # 0C80..0CFF
+ "Malayalam", # 0D00..0D7F
+ "Sinhala", # 0D80..0DFF
+ "Thai", # 0E00..0E7F
+ "Lao", # 0E80..0EFF
+ "Tibetan", # 0F00..0FFF
+ "Myanmar", # 1000..109F
+ "Georgian", # 10A0..10FF
+ "Hangul Jamo", # 1100..11FF
+ "Ethiopic", # 1200..137F
+ "Ethiopic Supplement", # 1380..139F
+ "Cherokee", # 13A0..13FF
+ "Unified Canadian Aboriginal Syllabics", # 1400..167F
+ "Ogham", # 1680..169F
+ "Runic", # 16A0..16FF
+ "Tagalog", # 1700..171F
+ "Hanunoo", # 1720..173F
+ "Buhid", # 1740..175F
+ "Tagbanwa", # 1760..177F
+ "Khmer", # 1780..17FF
+ "Mongolian", # 1800..18AF
+ "Unified Canadian Aboriginal Syllabics Extended", # 18B0..18FF
+ "Limbu", # 1900..194F
+ "Tai Le", # 1950..197F
+ "New Tai Lue", # 1980..19DF
+ "Khmer Symbols", # 19E0..19FF
+ "Buginese", # 1A00..1A1F
+ "Tai Tham", # 1A20..1AAF
+ "Combining Diacritical Marks Extended", # 1AB0..1AFF
+ "Balinese", # 1B00..1B7F
+ "Sundanese", # 1B80..1BBF
+ "Batak", # 1BC0..1BFF
+ "Lepcha", # 1C00..1C4F
+ "Ol Chiki", # 1C50..1C7F
+ "Cyrillic Extended-C", # 1C80..1C8F
+ "Georgian Extended", # 1C90..1CBF
+ "Sundanese Supplement", # 1CC0..1CCF
+ "Vedic Extensions", # 1CD0..1CFF
+ "Phonetic Extensions", # 1D00..1D7F
+ "Phonetic Extensions Supplement", # 1D80..1DBF
+ "Combining Diacritical Marks Supplement", # 1DC0..1DFF
+ "Latin Extended Additional", # 1E00..1EFF
+ "Greek Extended", # 1F00..1FFF
+ "General Punctuation", # 2000..206F
+ "Superscripts and Subscripts", # 2070..209F
+ "Currency Symbols", # 20A0..20CF
+ "Combining Diacritical Marks for Symbols", # 20D0..20FF
+ "Letterlike Symbols", # 2100..214F
+ "Number Forms", # 2150..218F
+ "Arrows", # 2190..21FF
+ "Mathematical Operators", # 2200..22FF
+ "Miscellaneous Technical", # 2300..23FF
+ "Control Pictures", # 2400..243F
+ "Optical Character Recognition", # 2440..245F
+ "Enclosed Alphanumerics", # 2460..24FF
+ "Box Drawing", # 2500..257F
+ "Block Elements", # 2580..259F
+ "Geometric Shapes", # 25A0..25FF
+ "Miscellaneous Symbols", # 2600..26FF
+ "Dingbats", # 2700..27BF
+ "Miscellaneous Mathematical Symbols-A", # 27C0..27EF
+ "Supplemental Arrows-A", # 27F0..27FF
+ "Braille Patterns", # 2800..28FF
+ "Supplemental Arrows-B", # 2900..297F
+ "Miscellaneous Mathematical Symbols-B", # 2980..29FF
+ "Supplemental Mathematical Operators", # 2A00..2AFF
+ "Miscellaneous Symbols and Arrows", # 2B00..2BFF
+ "Glagolitic", # 2C00..2C5F
+ "Latin Extended-C", # 2C60..2C7F
+ "Coptic", # 2C80..2CFF
+ "Georgian Supplement", # 2D00..2D2F
+ "Tifinagh", # 2D30..2D7F
+ "Ethiopic Extended", # 2D80..2DDF
+ "Cyrillic Extended-A", # 2DE0..2DFF
+ "Supplemental Punctuation", # 2E00..2E7F
+ "CJK Radicals Supplement", # 2E80..2EFF
+ "Kangxi Radicals", # 2F00..2FDF
+ "No_Block", # 2FE0..2FEF
+ "Ideographic Description Characters", # 2FF0..2FFF
+ "CJK Symbols and Punctuation", # 3000..303F
+ "Hiragana", # 3040..309F
+ "Katakana", # 30A0..30FF
+ "Bopomofo", # 3100..312F
+ "Hangul Compatibility Jamo", # 3130..318F
+ "Kanbun", # 3190..319F
+ "Bopomofo Extended", # 31A0..31BF
+ "CJK Strokes", # 31C0..31EF
+ "Katakana Phonetic Extensions", # 31F0..31FF
+ "Enclosed CJK Letters and Months", # 3200..32FF
+ "CJK Compatibility", # 3300..33FF
+ "CJK Unified Ideographs Extension A", # 3400..4DBF
+ "Yijing Hexagram Symbols", # 4DC0..4DFF
+ "CJK Unified Ideographs", # 4E00..9FFF
+ "Yi Syllables", # A000..A48F
+ "Yi Radicals", # A490..A4CF
+ "Lisu", # A4D0..A4FF
+ "Vai", # A500..A63F
+ "Cyrillic Extended-B", # A640..A69F
+ "Bamum", # A6A0..A6FF
+ "Modifier Tone Letters", # A700..A71F
+ "Latin Extended-D", # A720..A7FF
+ "Syloti Nagri", # A800..A82F
+ "Common Indic Number Forms", # A830..A83F
+ "Phags-pa", # A840..A87F
+ "Saurashtra", # A880..A8DF
+ "Devanagari Extended", # A8E0..A8FF
+ "Kayah Li", # A900..A92F
+ "Rejang", # A930..A95F
+ "Hangul Jamo Extended-A", # A960..A97F
+ "Javanese", # A980..A9DF
+ "Myanmar Extended-B", # A9E0..A9FF
+ "Cham", # AA00..AA5F
+ "Myanmar Extended-A", # AA60..AA7F
+ "Tai Viet", # AA80..AADF
+ "Meetei Mayek Extensions", # AAE0..AAFF
+ "Ethiopic Extended-A", # AB00..AB2F
+ "Latin Extended-E", # AB30..AB6F
+ "Cherokee Supplement", # AB70..ABBF
+ "Meetei Mayek", # ABC0..ABFF
+ "Hangul Syllables", # AC00..D7AF
+ "Hangul Jamo Extended-B", # D7B0..D7FF
+ "High Surrogates", # D800..DB7F
+ "High Private Use Surrogates", # DB80..DBFF
+ "Low Surrogates", # DC00..DFFF
+ "Private Use Area", # E000..F8FF
+ "CJK Compatibility Ideographs", # F900..FAFF
+ "Alphabetic Presentation Forms", # FB00..FB4F
+ "Arabic Presentation Forms-A", # FB50..FDFF
+ "Variation Selectors", # FE00..FE0F
+ "Vertical Forms", # FE10..FE1F
+ "Combining Half Marks", # FE20..FE2F
+ "CJK Compatibility Forms", # FE30..FE4F
+ "Small Form Variants", # FE50..FE6F
+ "Arabic Presentation Forms-B", # FE70..FEFF
+ "Halfwidth and Fullwidth Forms", # FF00..FFEF
+ "Specials", # FFF0..FFFF
+ "Linear B Syllabary", # 10000..1007F
+ "Linear B Ideograms", # 10080..100FF
+ "Aegean Numbers", # 10100..1013F
+ "Ancient Greek Numbers", # 10140..1018F
+ "Ancient Symbols", # 10190..101CF
+ "Phaistos Disc", # 101D0..101FF
+ "No_Block", # 10200..1027F
+ "Lycian", # 10280..1029F
+ "Carian", # 102A0..102DF
+ "Coptic Epact Numbers", # 102E0..102FF
+ "Old Italic", # 10300..1032F
+ "Gothic", # 10330..1034F
+ "Old Permic", # 10350..1037F
+ "Ugaritic", # 10380..1039F
+ "Old Persian", # 103A0..103DF
+ "No_Block", # 103E0..103FF
+ "Deseret", # 10400..1044F
+ "Shavian", # 10450..1047F
+ "Osmanya", # 10480..104AF
+ "Osage", # 104B0..104FF
+ "Elbasan", # 10500..1052F
+ "Caucasian Albanian", # 10530..1056F
+ "Vithkuqi", # 10570..105BF
+ "No_Block", # 105C0..105FF
+ "Linear A", # 10600..1077F
+ "Latin Extended-F", # 10780..107BF
+ "No_Block", # 107C0..107FF
+ "Cypriot Syllabary", # 10800..1083F
+ "Imperial Aramaic", # 10840..1085F
+ "Palmyrene", # 10860..1087F
+ "Nabataean", # 10880..108AF
+ "No_Block", # 108B0..108DF
+ "Hatran", # 108E0..108FF
+ "Phoenician", # 10900..1091F
+ "Lydian", # 10920..1093F
+ "No_Block", # 10940..1097F
+ "Meroitic Hieroglyphs", # 10980..1099F
+ "Meroitic Cursive", # 109A0..109FF
+ "Kharoshthi", # 10A00..10A5F
+ "Old South Arabian", # 10A60..10A7F
+ "Old North Arabian", # 10A80..10A9F
+ "No_Block", # 10AA0..10ABF
+ "Manichaean", # 10AC0..10AFF
+ "Avestan", # 10B00..10B3F
+ "Inscriptional Parthian", # 10B40..10B5F
+ "Inscriptional Pahlavi", # 10B60..10B7F
+ "Psalter Pahlavi", # 10B80..10BAF
+ "No_Block", # 10BB0..10BFF
+ "Old Turkic", # 10C00..10C4F
+ "No_Block", # 10C50..10C7F
+ "Old Hungarian", # 10C80..10CFF
+ "Hanifi Rohingya", # 10D00..10D3F
+ "No_Block", # 10D40..10E5F
+ "Rumi Numeral Symbols", # 10E60..10E7F
+ "Yezidi", # 10E80..10EBF
+ "Arabic Extended-C", # 10EC0..10EFF
+ "Old Sogdian", # 10F00..10F2F
+ "Sogdian", # 10F30..10F6F
+ "Old Uyghur", # 10F70..10FAF
+ "Chorasmian", # 10FB0..10FDF
+ "Elymaic", # 10FE0..10FFF
+ "Brahmi", # 11000..1107F
+ "Kaithi", # 11080..110CF
+ "Sora Sompeng", # 110D0..110FF
+ "Chakma", # 11100..1114F
+ "Mahajani", # 11150..1117F
+ "Sharada", # 11180..111DF
+ "Sinhala Archaic Numbers", # 111E0..111FF
+ "Khojki", # 11200..1124F
+ "No_Block", # 11250..1127F
+ "Multani", # 11280..112AF
+ "Khudawadi", # 112B0..112FF
+ "Grantha", # 11300..1137F
+ "No_Block", # 11380..113FF
+ "Newa", # 11400..1147F
+ "Tirhuta", # 11480..114DF
+ "No_Block", # 114E0..1157F
+ "Siddham", # 11580..115FF
+ "Modi", # 11600..1165F
+ "Mongolian Supplement", # 11660..1167F
+ "Takri", # 11680..116CF
+ "No_Block", # 116D0..116FF
+ "Ahom", # 11700..1174F
+ "No_Block", # 11750..117FF
+ "Dogra", # 11800..1184F
+ "No_Block", # 11850..1189F
+ "Warang Citi", # 118A0..118FF
+ "Dives Akuru", # 11900..1195F
+ "No_Block", # 11960..1199F
+ "Nandinagari", # 119A0..119FF
+ "Zanabazar Square", # 11A00..11A4F
+ "Soyombo", # 11A50..11AAF
+ "Unified Canadian Aboriginal Syllabics Extended-A", # 11AB0..11ABF
+ "Pau Cin Hau", # 11AC0..11AFF
+ "Devanagari Extended-A", # 11B00..11B5F
+ "No_Block", # 11B60..11BFF
+ "Bhaiksuki", # 11C00..11C6F
+ "Marchen", # 11C70..11CBF
+ "No_Block", # 11CC0..11CFF
+ "Masaram Gondi", # 11D00..11D5F
+ "Gunjala Gondi", # 11D60..11DAF
+ "No_Block", # 11DB0..11EDF
+ "Makasar", # 11EE0..11EFF
+ "Kawi", # 11F00..11F5F
+ "No_Block", # 11F60..11FAF
+ "Lisu Supplement", # 11FB0..11FBF
+ "Tamil Supplement", # 11FC0..11FFF
+ "Cuneiform", # 12000..123FF
+ "Cuneiform Numbers and Punctuation", # 12400..1247F
+ "Early Dynastic Cuneiform", # 12480..1254F
+ "No_Block", # 12550..12F8F
+ "Cypro-Minoan", # 12F90..12FFF
+ "Egyptian Hieroglyphs", # 13000..1342F
+ "Egyptian Hieroglyph Format Controls", # 13430..1345F
+ "No_Block", # 13460..143FF
+ "Anatolian Hieroglyphs", # 14400..1467F
+ "No_Block", # 14680..167FF
+ "Bamum Supplement", # 16800..16A3F
+ "Mro", # 16A40..16A6F
+ "Tangsa", # 16A70..16ACF
+ "Bassa Vah", # 16AD0..16AFF
+ "Pahawh Hmong", # 16B00..16B8F
+ "No_Block", # 16B90..16E3F
+ "Medefaidrin", # 16E40..16E9F
+ "No_Block", # 16EA0..16EFF
+ "Miao", # 16F00..16F9F
+ "No_Block", # 16FA0..16FDF
+ "Ideographic Symbols and Punctuation", # 16FE0..16FFF
+ "Tangut", # 17000..187FF
+ "Tangut Components", # 18800..18AFF
+ "Khitan Small Script", # 18B00..18CFF
+ "Tangut Supplement", # 18D00..18D7F
+ "No_Block", # 18D80..1AFEF
+ "Kana Extended-B", # 1AFF0..1AFFF
+ "Kana Supplement", # 1B000..1B0FF
+ "Kana Extended-A", # 1B100..1B12F
+ "Small Kana Extension", # 1B130..1B16F
+ "Nushu", # 1B170..1B2FF
+ "No_Block", # 1B300..1BBFF
+ "Duployan", # 1BC00..1BC9F
+ "Shorthand Format Controls", # 1BCA0..1BCAF
+ "No_Block", # 1BCB0..1CEFF
+ "Znamenny Musical Notation", # 1CF00..1CFCF
+ "No_Block", # 1CFD0..1CFFF
+ "Byzantine Musical Symbols", # 1D000..1D0FF
+ "Musical Symbols", # 1D100..1D1FF
+ "Ancient Greek Musical Notation", # 1D200..1D24F
+ "No_Block", # 1D250..1D2BF
+ "Kaktovik Numerals", # 1D2C0..1D2DF
+ "Mayan Numerals", # 1D2E0..1D2FF
+ "Tai Xuan Jing Symbols", # 1D300..1D35F
+ "Counting Rod Numerals", # 1D360..1D37F
+ "No_Block", # 1D380..1D3FF
+ "Mathematical Alphanumeric Symbols", # 1D400..1D7FF
+ "Sutton SignWriting", # 1D800..1DAAF
+ "No_Block", # 1DAB0..1DEFF
+ "Latin Extended-G", # 1DF00..1DFFF
+ "Glagolitic Supplement", # 1E000..1E02F
+ "Cyrillic Extended-D", # 1E030..1E08F
+ "No_Block", # 1E090..1E0FF
+ "Nyiakeng Puachue Hmong", # 1E100..1E14F
+ "No_Block", # 1E150..1E28F
+ "Toto", # 1E290..1E2BF
+ "Wancho", # 1E2C0..1E2FF
+ "No_Block", # 1E300..1E4CF
+ "Nag Mundari", # 1E4D0..1E4FF
+ "No_Block", # 1E500..1E7DF
+ "Ethiopic Extended-B", # 1E7E0..1E7FF
+ "Mende Kikakui", # 1E800..1E8DF
+ "No_Block", # 1E8E0..1E8FF
+ "Adlam", # 1E900..1E95F
+ "No_Block", # 1E960..1EC6F
+ "Indic Siyaq Numbers", # 1EC70..1ECBF
+ "No_Block", # 1ECC0..1ECFF
+ "Ottoman Siyaq Numbers", # 1ED00..1ED4F
+ "No_Block", # 1ED50..1EDFF
+ "Arabic Mathematical Alphabetic Symbols", # 1EE00..1EEFF
+ "No_Block", # 1EF00..1EFFF
+ "Mahjong Tiles", # 1F000..1F02F
+ "Domino Tiles", # 1F030..1F09F
+ "Playing Cards", # 1F0A0..1F0FF
+ "Enclosed Alphanumeric Supplement", # 1F100..1F1FF
+ "Enclosed Ideographic Supplement", # 1F200..1F2FF
+ "Miscellaneous Symbols and Pictographs", # 1F300..1F5FF
+ "Emoticons", # 1F600..1F64F
+ "Ornamental Dingbats", # 1F650..1F67F
+ "Transport and Map Symbols", # 1F680..1F6FF
+ "Alchemical Symbols", # 1F700..1F77F
+ "Geometric Shapes Extended", # 1F780..1F7FF
+ "Supplemental Arrows-C", # 1F800..1F8FF
+ "Supplemental Symbols and Pictographs", # 1F900..1F9FF
+ "Chess Symbols", # 1FA00..1FA6F
+ "Symbols and Pictographs Extended-A", # 1FA70..1FAFF
+ "Symbols for Legacy Computing", # 1FB00..1FBFF
+ "No_Block", # 1FC00..1FFFF
+ "CJK Unified Ideographs Extension B", # 20000..2A6DF
+ "No_Block", # 2A6E0..2A6FF
+ "CJK Unified Ideographs Extension C", # 2A700..2B73F
+ "CJK Unified Ideographs Extension D", # 2B740..2B81F
+ "CJK Unified Ideographs Extension E", # 2B820..2CEAF
+ "CJK Unified Ideographs Extension F", # 2CEB0..2EBEF
+ "No_Block", # 2EBF0..2F7FF
+ "CJK Compatibility Ideographs Supplement", # 2F800..2FA1F
+ "No_Block", # 2FA20..2FFFF
+ "CJK Unified Ideographs Extension G", # 30000..3134F
+ "CJK Unified Ideographs Extension H", # 31350..323AF
+ "No_Block", # 323B0..DFFFF
+ "Tags", # E0000..E007F
+ "No_Block", # E0080..E00FF
+ "Variation Selectors Supplement", # E0100..E01EF
+ "No_Block", # E01F0..EFFFF
+ "Supplementary Private Use Area-A", # F0000..FFFFF
+ "Supplementary Private Use Area-B", # 100000..10FFFF
]
diff --git a/Lib/fontTools/unicodedata/OTTags.py b/Lib/fontTools/unicodedata/OTTags.py
index a9d8cd1c..859a3bcd 100644
--- a/Lib/fontTools/unicodedata/OTTags.py
+++ b/Lib/fontTools/unicodedata/OTTags.py
@@ -22,11 +22,16 @@ SCRIPT_EXCEPTIONS = {
"Yiii": "yi ",
"Nkoo": "nko ",
"Vaii": "vai ",
+ "Zmth": "math",
"Zinh": DEFAULT_SCRIPT,
"Zyyy": DEFAULT_SCRIPT,
"Zzzz": DEFAULT_SCRIPT,
}
+SCRIPT_EXCEPTIONS_REVERSED = {
+ "math": "Zmth",
+}
+
NEW_SCRIPT_TAGS = {
"Beng": ("bng2",),
"Deva": ("dev2",),
diff --git a/Lib/fontTools/unicodedata/ScriptExtensions.py b/Lib/fontTools/unicodedata/ScriptExtensions.py
index b078c13e..2ecc5dae 100644
--- a/Lib/fontTools/unicodedata/ScriptExtensions.py
+++ b/Lib/fontTools/unicodedata/ScriptExtensions.py
@@ -4,14 +4,14 @@
# Source: https://unicode.org/Public/UNIDATA/ScriptExtensions.txt
# License: http://unicode.org/copyright.html#License
#
-# ScriptExtensions-14.0.0.txt
-# Date: 2021-06-04, 02:19:38 GMT
-# © 2021 Unicode®, Inc.
+# ScriptExtensions-15.0.0.txt
+# Date: 2022-02-02, 00:57:11 GMT
+# © 2022 Unicode®, Inc.
# Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries.
-# For terms of use, see http://www.unicode.org/terms_of_use.html
+# For terms of use, see https://www.unicode.org/terms_of_use.html
#
# Unicode Character Database
-# For documentation, see http://www.unicode.org/reports/tr44/
+# For documentation, see https://www.unicode.org/reports/tr44/
#
# The Script_Extensions property indicates which characters are commonly used
# with more than one script, but with a limited number of scripts.
@@ -244,204 +244,325 @@ RANGES = [
]
VALUES = [
- None, # 0000..0341
- {'Grek'}, # 0342..0342
- None, # 0343..0344
- {'Grek'}, # 0345..0345
- None, # 0346..0362
- {'Latn'}, # 0363..036F
- None, # 0370..0482
- {'Cyrl', 'Perm'}, # 0483..0483
- {'Cyrl', 'Glag'}, # 0484..0484
- {'Cyrl', 'Latn'}, # 0485..0486
- {'Cyrl', 'Glag'}, # 0487..0487
- None, # 0488..060B
- {'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}, # 060C..060C
- None, # 060D..061A
- {'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}, # 061B..061B
- {'Arab', 'Syrc', 'Thaa'}, # 061C..061C
- None, # 061D..061E
- {'Adlm', 'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}, # 061F..061F
- None, # 0620..063F
- {'Adlm', 'Arab', 'Mand', 'Mani', 'Ougr', 'Phlp', 'Rohg', 'Sogd', 'Syrc'}, # 0640..0640
- None, # 0641..064A
- {'Arab', 'Syrc'}, # 064B..0655
- None, # 0656..065F
- {'Arab', 'Thaa', 'Yezi'}, # 0660..0669
- None, # 066A..066F
- {'Arab', 'Syrc'}, # 0670..0670
- None, # 0671..06D3
- {'Arab', 'Rohg'}, # 06D4..06D4
- None, # 06D5..0950
- {'Beng', 'Deva', 'Gran', 'Gujr', 'Guru', 'Knda', 'Latn', 'Mlym', 'Orya', 'Shrd', 'Taml', 'Telu', 'Tirh'}, # 0951..0951
- {'Beng', 'Deva', 'Gran', 'Gujr', 'Guru', 'Knda', 'Latn', 'Mlym', 'Orya', 'Taml', 'Telu', 'Tirh'}, # 0952..0952
- None, # 0953..0963
- {'Beng', 'Deva', 'Dogr', 'Gong', 'Gonm', 'Gran', 'Gujr', 'Guru', 'Knda', 'Mahj', 'Mlym', 'Nand', 'Orya', 'Sind', 'Sinh', 'Sylo', 'Takr', 'Taml', 'Telu', 'Tirh'}, # 0964..0964
- {'Beng', 'Deva', 'Dogr', 'Gong', 'Gonm', 'Gran', 'Gujr', 'Guru', 'Knda', 'Limb', 'Mahj', 'Mlym', 'Nand', 'Orya', 'Sind', 'Sinh', 'Sylo', 'Takr', 'Taml', 'Telu', 'Tirh'}, # 0965..0965
- {'Deva', 'Dogr', 'Kthi', 'Mahj'}, # 0966..096F
- None, # 0970..09E5
- {'Beng', 'Cakm', 'Sylo'}, # 09E6..09EF
- None, # 09F0..0A65
- {'Guru', 'Mult'}, # 0A66..0A6F
- None, # 0A70..0AE5
- {'Gujr', 'Khoj'}, # 0AE6..0AEF
- None, # 0AF0..0BE5
- {'Gran', 'Taml'}, # 0BE6..0BF3
- None, # 0BF4..0CE5
- {'Knda', 'Nand'}, # 0CE6..0CEF
- None, # 0CF0..103F
- {'Cakm', 'Mymr', 'Tale'}, # 1040..1049
- None, # 104A..10FA
- {'Geor', 'Latn'}, # 10FB..10FB
- None, # 10FC..1734
- {'Buhd', 'Hano', 'Tagb', 'Tglg'}, # 1735..1736
- None, # 1737..1801
- {'Mong', 'Phag'}, # 1802..1803
- None, # 1804..1804
- {'Mong', 'Phag'}, # 1805..1805
- None, # 1806..1CCF
- {'Beng', 'Deva', 'Gran', 'Knda'}, # 1CD0..1CD0
- {'Deva'}, # 1CD1..1CD1
- {'Beng', 'Deva', 'Gran', 'Knda'}, # 1CD2..1CD2
- {'Deva', 'Gran'}, # 1CD3..1CD3
- {'Deva'}, # 1CD4..1CD4
- {'Beng', 'Deva'}, # 1CD5..1CD6
- {'Deva', 'Shrd'}, # 1CD7..1CD7
- {'Beng', 'Deva'}, # 1CD8..1CD8
- {'Deva', 'Shrd'}, # 1CD9..1CD9
- {'Deva', 'Knda', 'Mlym', 'Orya', 'Taml', 'Telu'}, # 1CDA..1CDA
- {'Deva'}, # 1CDB..1CDB
- {'Deva', 'Shrd'}, # 1CDC..1CDD
- {'Deva'}, # 1CDE..1CDF
- {'Deva', 'Shrd'}, # 1CE0..1CE0
- {'Beng', 'Deva'}, # 1CE1..1CE1
- {'Deva'}, # 1CE2..1CE8
- {'Deva', 'Nand'}, # 1CE9..1CE9
- {'Beng', 'Deva'}, # 1CEA..1CEA
- {'Deva'}, # 1CEB..1CEC
- {'Beng', 'Deva'}, # 1CED..1CED
- {'Deva'}, # 1CEE..1CF1
- {'Beng', 'Deva', 'Gran', 'Knda', 'Nand', 'Orya', 'Telu', 'Tirh'}, # 1CF2..1CF2
- {'Deva', 'Gran'}, # 1CF3..1CF3
- {'Deva', 'Gran', 'Knda'}, # 1CF4..1CF4
- {'Beng', 'Deva'}, # 1CF5..1CF6
- {'Beng'}, # 1CF7..1CF7
- {'Deva', 'Gran'}, # 1CF8..1CF9
- {'Nand'}, # 1CFA..1CFA
- None, # 1CFB..1DBF
- {'Grek'}, # 1DC0..1DC1
- None, # 1DC2..1DF7
- {'Cyrl', 'Syrc'}, # 1DF8..1DF8
- None, # 1DF9..1DF9
- {'Syrc'}, # 1DFA..1DFA
- None, # 1DFB..202E
- {'Latn', 'Mong'}, # 202F..202F
- None, # 2030..20EF
- {'Deva', 'Gran', 'Latn'}, # 20F0..20F0
- None, # 20F1..2E42
- {'Cyrl', 'Glag'}, # 2E43..2E43
- None, # 2E44..3000
- {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'}, # 3001..3002
- {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}, # 3003..3003
- None, # 3004..3005
- {'Hani'}, # 3006..3006
- None, # 3007..3007
- {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'}, # 3008..3011
- None, # 3012..3012
- {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}, # 3013..3013
- {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'}, # 3014..301B
- {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}, # 301C..301F
- None, # 3020..3029
- {'Bopo', 'Hani'}, # 302A..302D
- None, # 302E..302F
- {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}, # 3030..3030
- {'Hira', 'Kana'}, # 3031..3035
- None, # 3036..3036
- {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}, # 3037..3037
- None, # 3038..303B
- {'Hani', 'Hira', 'Kana'}, # 303C..303D
- {'Hani'}, # 303E..303F
- None, # 3040..3098
- {'Hira', 'Kana'}, # 3099..309C
- None, # 309D..309F
- {'Hira', 'Kana'}, # 30A0..30A0
- None, # 30A1..30FA
- {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'}, # 30FB..30FB
- {'Hira', 'Kana'}, # 30FC..30FC
- None, # 30FD..318F
- {'Hani'}, # 3190..319F
- None, # 31A0..31BF
- {'Hani'}, # 31C0..31E3
- None, # 31E4..321F
- {'Hani'}, # 3220..3247
- None, # 3248..327F
- {'Hani'}, # 3280..32B0
- None, # 32B1..32BF
- {'Hani'}, # 32C0..32CB
- None, # 32CC..32FE
- {'Hani'}, # 32FF..32FF
- None, # 3300..3357
- {'Hani'}, # 3358..3370
- None, # 3371..337A
- {'Hani'}, # 337B..337F
- None, # 3380..33DF
- {'Hani'}, # 33E0..33FE
- None, # 33FF..A66E
- {'Cyrl', 'Glag'}, # A66F..A66F
- None, # A670..A6FF
- {'Hani', 'Latn'}, # A700..A707
- None, # A708..A82F
- {'Deva', 'Dogr', 'Gujr', 'Guru', 'Khoj', 'Knda', 'Kthi', 'Mahj', 'Mlym', 'Modi', 'Nand', 'Sind', 'Takr', 'Tirh'}, # A830..A832
- {'Deva', 'Dogr', 'Gujr', 'Guru', 'Khoj', 'Knda', 'Kthi', 'Mahj', 'Modi', 'Nand', 'Sind', 'Takr', 'Tirh'}, # A833..A835
- {'Deva', 'Dogr', 'Gujr', 'Guru', 'Khoj', 'Kthi', 'Mahj', 'Modi', 'Sind', 'Takr', 'Tirh'}, # A836..A839
- None, # A83A..A8F0
- {'Beng', 'Deva'}, # A8F1..A8F1
- None, # A8F2..A8F2
- {'Deva', 'Taml'}, # A8F3..A8F3
- None, # A8F4..A92D
- {'Kali', 'Latn', 'Mymr'}, # A92E..A92E
- None, # A92F..A9CE
- {'Bugi', 'Java'}, # A9CF..A9CF
- None, # A9D0..FD3D
- {'Arab', 'Nkoo'}, # FD3E..FD3F
- None, # FD40..FDF1
- {'Arab', 'Thaa'}, # FDF2..FDF2
- None, # FDF3..FDFC
- {'Arab', 'Thaa'}, # FDFD..FDFD
- None, # FDFE..FE44
- {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}, # FE45..FE46
- None, # FE47..FF60
- {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'}, # FF61..FF65
- None, # FF66..FF6F
- {'Hira', 'Kana'}, # FF70..FF70
- None, # FF71..FF9D
- {'Hira', 'Kana'}, # FF9E..FF9F
- None, # FFA0..100FF
- {'Cpmn', 'Cprt', 'Linb'}, # 10100..10101
- {'Cprt', 'Linb'}, # 10102..10102
- None, # 10103..10106
- {'Cprt', 'Lina', 'Linb'}, # 10107..10133
- None, # 10134..10136
- {'Cprt', 'Linb'}, # 10137..1013F
- None, # 10140..102DF
- {'Arab', 'Copt'}, # 102E0..102FB
- None, # 102FC..10AF1
- {'Mani', 'Ougr'}, # 10AF2..10AF2
- None, # 10AF3..11300
- {'Gran', 'Taml'}, # 11301..11301
- None, # 11302..11302
- {'Gran', 'Taml'}, # 11303..11303
- None, # 11304..1133A
- {'Gran', 'Taml'}, # 1133B..1133C
- None, # 1133D..11FCF
- {'Gran', 'Taml'}, # 11FD0..11FD1
- None, # 11FD2..11FD2
- {'Gran', 'Taml'}, # 11FD3..11FD3
- None, # 11FD4..1BC9F
- {'Dupl'}, # 1BCA0..1BCA3
- None, # 1BCA4..1D35F
- {'Hani'}, # 1D360..1D371
- None, # 1D372..1F24F
- {'Hani'}, # 1F250..1F251
- None, # 1F252..10FFFF
+ None, # 0000..0341
+ {"Grek"}, # 0342..0342
+ None, # 0343..0344
+ {"Grek"}, # 0345..0345
+ None, # 0346..0362
+ {"Latn"}, # 0363..036F
+ None, # 0370..0482
+ {"Cyrl", "Perm"}, # 0483..0483
+ {"Cyrl", "Glag"}, # 0484..0484
+ {"Cyrl", "Latn"}, # 0485..0486
+ {"Cyrl", "Glag"}, # 0487..0487
+ None, # 0488..060B
+ {"Arab", "Nkoo", "Rohg", "Syrc", "Thaa", "Yezi"}, # 060C..060C
+ None, # 060D..061A
+ {"Arab", "Nkoo", "Rohg", "Syrc", "Thaa", "Yezi"}, # 061B..061B
+ {"Arab", "Syrc", "Thaa"}, # 061C..061C
+ None, # 061D..061E
+ {"Adlm", "Arab", "Nkoo", "Rohg", "Syrc", "Thaa", "Yezi"}, # 061F..061F
+ None, # 0620..063F
+ {
+ "Adlm",
+ "Arab",
+ "Mand",
+ "Mani",
+ "Ougr",
+ "Phlp",
+ "Rohg",
+ "Sogd",
+ "Syrc",
+ }, # 0640..0640
+ None, # 0641..064A
+ {"Arab", "Syrc"}, # 064B..0655
+ None, # 0656..065F
+ {"Arab", "Thaa", "Yezi"}, # 0660..0669
+ None, # 066A..066F
+ {"Arab", "Syrc"}, # 0670..0670
+ None, # 0671..06D3
+ {"Arab", "Rohg"}, # 06D4..06D4
+ None, # 06D5..0950
+ {
+ "Beng",
+ "Deva",
+ "Gran",
+ "Gujr",
+ "Guru",
+ "Knda",
+ "Latn",
+ "Mlym",
+ "Orya",
+ "Shrd",
+ "Taml",
+ "Telu",
+ "Tirh",
+ }, # 0951..0951
+ {
+ "Beng",
+ "Deva",
+ "Gran",
+ "Gujr",
+ "Guru",
+ "Knda",
+ "Latn",
+ "Mlym",
+ "Orya",
+ "Taml",
+ "Telu",
+ "Tirh",
+ }, # 0952..0952
+ None, # 0953..0963
+ {
+ "Beng",
+ "Deva",
+ "Dogr",
+ "Gong",
+ "Gonm",
+ "Gran",
+ "Gujr",
+ "Guru",
+ "Knda",
+ "Mahj",
+ "Mlym",
+ "Nand",
+ "Orya",
+ "Sind",
+ "Sinh",
+ "Sylo",
+ "Takr",
+ "Taml",
+ "Telu",
+ "Tirh",
+ }, # 0964..0964
+ {
+ "Beng",
+ "Deva",
+ "Dogr",
+ "Gong",
+ "Gonm",
+ "Gran",
+ "Gujr",
+ "Guru",
+ "Knda",
+ "Limb",
+ "Mahj",
+ "Mlym",
+ "Nand",
+ "Orya",
+ "Sind",
+ "Sinh",
+ "Sylo",
+ "Takr",
+ "Taml",
+ "Telu",
+ "Tirh",
+ }, # 0965..0965
+ {"Deva", "Dogr", "Kthi", "Mahj"}, # 0966..096F
+ None, # 0970..09E5
+ {"Beng", "Cakm", "Sylo"}, # 09E6..09EF
+ None, # 09F0..0A65
+ {"Guru", "Mult"}, # 0A66..0A6F
+ None, # 0A70..0AE5
+ {"Gujr", "Khoj"}, # 0AE6..0AEF
+ None, # 0AF0..0BE5
+ {"Gran", "Taml"}, # 0BE6..0BF3
+ None, # 0BF4..0CE5
+ {"Knda", "Nand"}, # 0CE6..0CEF
+ None, # 0CF0..103F
+ {"Cakm", "Mymr", "Tale"}, # 1040..1049
+ None, # 104A..10FA
+ {"Geor", "Latn"}, # 10FB..10FB
+ None, # 10FC..1734
+ {"Buhd", "Hano", "Tagb", "Tglg"}, # 1735..1736
+ None, # 1737..1801
+ {"Mong", "Phag"}, # 1802..1803
+ None, # 1804..1804
+ {"Mong", "Phag"}, # 1805..1805
+ None, # 1806..1CCF
+ {"Beng", "Deva", "Gran", "Knda"}, # 1CD0..1CD0
+ {"Deva"}, # 1CD1..1CD1
+ {"Beng", "Deva", "Gran", "Knda"}, # 1CD2..1CD2
+ {"Deva", "Gran"}, # 1CD3..1CD3
+ {"Deva"}, # 1CD4..1CD4
+ {"Beng", "Deva"}, # 1CD5..1CD6
+ {"Deva", "Shrd"}, # 1CD7..1CD7
+ {"Beng", "Deva"}, # 1CD8..1CD8
+ {"Deva", "Shrd"}, # 1CD9..1CD9
+ {"Deva", "Knda", "Mlym", "Orya", "Taml", "Telu"}, # 1CDA..1CDA
+ {"Deva"}, # 1CDB..1CDB
+ {"Deva", "Shrd"}, # 1CDC..1CDD
+ {"Deva"}, # 1CDE..1CDF
+ {"Deva", "Shrd"}, # 1CE0..1CE0
+ {"Beng", "Deva"}, # 1CE1..1CE1
+ {"Deva"}, # 1CE2..1CE8
+ {"Deva", "Nand"}, # 1CE9..1CE9
+ {"Beng", "Deva"}, # 1CEA..1CEA
+ {"Deva"}, # 1CEB..1CEC
+ {"Beng", "Deva"}, # 1CED..1CED
+ {"Deva"}, # 1CEE..1CF1
+ {"Beng", "Deva", "Gran", "Knda", "Nand", "Orya", "Telu", "Tirh"}, # 1CF2..1CF2
+ {"Deva", "Gran"}, # 1CF3..1CF3
+ {"Deva", "Gran", "Knda"}, # 1CF4..1CF4
+ {"Beng", "Deva"}, # 1CF5..1CF6
+ {"Beng"}, # 1CF7..1CF7
+ {"Deva", "Gran"}, # 1CF8..1CF9
+ {"Nand"}, # 1CFA..1CFA
+ None, # 1CFB..1DBF
+ {"Grek"}, # 1DC0..1DC1
+ None, # 1DC2..1DF7
+ {"Cyrl", "Syrc"}, # 1DF8..1DF8
+ None, # 1DF9..1DF9
+ {"Syrc"}, # 1DFA..1DFA
+ None, # 1DFB..202E
+ {"Latn", "Mong"}, # 202F..202F
+ None, # 2030..20EF
+ {"Deva", "Gran", "Latn"}, # 20F0..20F0
+ None, # 20F1..2E42
+ {"Cyrl", "Glag"}, # 2E43..2E43
+ None, # 2E44..3000
+ {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # 3001..3002
+ {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3003..3003
+ None, # 3004..3005
+ {"Hani"}, # 3006..3006
+ None, # 3007..3007
+ {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # 3008..3011
+ None, # 3012..3012
+ {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3013..3013
+ {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # 3014..301B
+ {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 301C..301F
+ None, # 3020..3029
+ {"Bopo", "Hani"}, # 302A..302D
+ None, # 302E..302F
+ {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3030..3030
+ {"Hira", "Kana"}, # 3031..3035
+ None, # 3036..3036
+ {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3037..3037
+ None, # 3038..303B
+ {"Hani", "Hira", "Kana"}, # 303C..303D
+ {"Hani"}, # 303E..303F
+ None, # 3040..3098
+ {"Hira", "Kana"}, # 3099..309C
+ None, # 309D..309F
+ {"Hira", "Kana"}, # 30A0..30A0
+ None, # 30A1..30FA
+ {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # 30FB..30FB
+ {"Hira", "Kana"}, # 30FC..30FC
+ None, # 30FD..318F
+ {"Hani"}, # 3190..319F
+ None, # 31A0..31BF
+ {"Hani"}, # 31C0..31E3
+ None, # 31E4..321F
+ {"Hani"}, # 3220..3247
+ None, # 3248..327F
+ {"Hani"}, # 3280..32B0
+ None, # 32B1..32BF
+ {"Hani"}, # 32C0..32CB
+ None, # 32CC..32FE
+ {"Hani"}, # 32FF..32FF
+ None, # 3300..3357
+ {"Hani"}, # 3358..3370
+ None, # 3371..337A
+ {"Hani"}, # 337B..337F
+ None, # 3380..33DF
+ {"Hani"}, # 33E0..33FE
+ None, # 33FF..A66E
+ {"Cyrl", "Glag"}, # A66F..A66F
+ None, # A670..A6FF
+ {"Hani", "Latn"}, # A700..A707
+ None, # A708..A82F
+ {
+ "Deva",
+ "Dogr",
+ "Gujr",
+ "Guru",
+ "Khoj",
+ "Knda",
+ "Kthi",
+ "Mahj",
+ "Mlym",
+ "Modi",
+ "Nand",
+ "Sind",
+ "Takr",
+ "Tirh",
+ }, # A830..A832
+ {
+ "Deva",
+ "Dogr",
+ "Gujr",
+ "Guru",
+ "Khoj",
+ "Knda",
+ "Kthi",
+ "Mahj",
+ "Modi",
+ "Nand",
+ "Sind",
+ "Takr",
+ "Tirh",
+ }, # A833..A835
+ {
+ "Deva",
+ "Dogr",
+ "Gujr",
+ "Guru",
+ "Khoj",
+ "Kthi",
+ "Mahj",
+ "Modi",
+ "Sind",
+ "Takr",
+ "Tirh",
+ }, # A836..A839
+ None, # A83A..A8F0
+ {"Beng", "Deva"}, # A8F1..A8F1
+ None, # A8F2..A8F2
+ {"Deva", "Taml"}, # A8F3..A8F3
+ None, # A8F4..A92D
+ {"Kali", "Latn", "Mymr"}, # A92E..A92E
+ None, # A92F..A9CE
+ {"Bugi", "Java"}, # A9CF..A9CF
+ None, # A9D0..FD3D
+ {"Arab", "Nkoo"}, # FD3E..FD3F
+ None, # FD40..FDF1
+ {"Arab", "Thaa"}, # FDF2..FDF2
+ None, # FDF3..FDFC
+ {"Arab", "Thaa"}, # FDFD..FDFD
+ None, # FDFE..FE44
+ {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # FE45..FE46
+ None, # FE47..FF60
+ {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # FF61..FF65
+ None, # FF66..FF6F
+ {"Hira", "Kana"}, # FF70..FF70
+ None, # FF71..FF9D
+ {"Hira", "Kana"}, # FF9E..FF9F
+ None, # FFA0..100FF
+ {"Cpmn", "Cprt", "Linb"}, # 10100..10101
+ {"Cprt", "Linb"}, # 10102..10102
+ None, # 10103..10106
+ {"Cprt", "Lina", "Linb"}, # 10107..10133
+ None, # 10134..10136
+ {"Cprt", "Linb"}, # 10137..1013F
+ None, # 10140..102DF
+ {"Arab", "Copt"}, # 102E0..102FB
+ None, # 102FC..10AF1
+ {"Mani", "Ougr"}, # 10AF2..10AF2
+ None, # 10AF3..11300
+ {"Gran", "Taml"}, # 11301..11301
+ None, # 11302..11302
+ {"Gran", "Taml"}, # 11303..11303
+ None, # 11304..1133A
+ {"Gran", "Taml"}, # 1133B..1133C
+ None, # 1133D..11FCF
+ {"Gran", "Taml"}, # 11FD0..11FD1
+ None, # 11FD2..11FD2
+ {"Gran", "Taml"}, # 11FD3..11FD3
+ None, # 11FD4..1BC9F
+ {"Dupl"}, # 1BCA0..1BCA3
+ None, # 1BCA4..1D35F
+ {"Hani"}, # 1D360..1D371
+ None, # 1D372..1F24F
+ {"Hani"}, # 1F250..1F251
+ None, # 1F252..10FFFF
]
diff --git a/Lib/fontTools/unicodedata/Scripts.py b/Lib/fontTools/unicodedata/Scripts.py
index 18cada93..68bb91b3 100644
--- a/Lib/fontTools/unicodedata/Scripts.py
+++ b/Lib/fontTools/unicodedata/Scripts.py
@@ -4,14 +4,14 @@
# Source: https://unicode.org/Public/UNIDATA/Scripts.txt
# License: http://unicode.org/copyright.html#License
#
-# Scripts-14.0.0.txt
-# Date: 2021-07-10, 00:35:31 GMT
-# © 2021 Unicode®, Inc.
+# Scripts-15.0.0.txt
+# Date: 2022-04-26, 23:15:02 GMT
+# © 2022 Unicode®, Inc.
# Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries.
-# For terms of use, see http://www.unicode.org/terms_of_use.html
+# For terms of use, see https://www.unicode.org/terms_of_use.html
#
# Unicode Character Database
-# For documentation, see http://www.unicode.org/reports/tr44/
+# For documentation, see https://www.unicode.org/reports/tr44/
# For more information, see:
# UAX #24, Unicode Script Property: https://www.unicode.org/reports/tr24/
# Especially the sections:
@@ -325,8 +325,8 @@ RANGES = [
0x0CE4, # .. 0x0CE5 ; Unknown
0x0CE6, # .. 0x0CEF ; Kannada
0x0CF0, # .. 0x0CF0 ; Unknown
- 0x0CF1, # .. 0x0CF2 ; Kannada
- 0x0CF3, # .. 0x0CFF ; Unknown
+ 0x0CF1, # .. 0x0CF3 ; Kannada
+ 0x0CF4, # .. 0x0CFF ; Unknown
0x0D00, # .. 0x0D0C ; Malayalam
0x0D0D, # .. 0x0D0D ; Unknown
0x0D0E, # .. 0x0D10 ; Malayalam
@@ -386,8 +386,8 @@ RANGES = [
0x0EC5, # .. 0x0EC5 ; Unknown
0x0EC6, # .. 0x0EC6 ; Lao
0x0EC7, # .. 0x0EC7 ; Unknown
- 0x0EC8, # .. 0x0ECD ; Lao
- 0x0ECE, # .. 0x0ECF ; Unknown
+ 0x0EC8, # .. 0x0ECE ; Lao
+ 0x0ECF, # .. 0x0ECF ; Unknown
0x0ED0, # .. 0x0ED9 ; Lao
0x0EDA, # .. 0x0EDB ; Unknown
0x0EDC, # .. 0x0EDF ; Lao
@@ -1086,7 +1086,8 @@ RANGES = [
0x10EAB, # .. 0x10EAD ; Yezidi
0x10EAE, # .. 0x10EAF ; Unknown
0x10EB0, # .. 0x10EB1 ; Yezidi
- 0x10EB2, # .. 0x10EFF ; Unknown
+ 0x10EB2, # .. 0x10EFC ; Unknown
+ 0x10EFD, # .. 0x10EFF ; Arabic
0x10F00, # .. 0x10F27 ; Old_Sogdian
0x10F28, # .. 0x10F2F ; Unknown
0x10F30, # .. 0x10F59 ; Sogdian
@@ -1122,8 +1123,8 @@ RANGES = [
0x111F5, # .. 0x111FF ; Unknown
0x11200, # .. 0x11211 ; Khojki
0x11212, # .. 0x11212 ; Unknown
- 0x11213, # .. 0x1123E ; Khojki
- 0x1123F, # .. 0x1127F ; Unknown
+ 0x11213, # .. 0x11241 ; Khojki
+ 0x11242, # .. 0x1127F ; Unknown
0x11280, # .. 0x11286 ; Multani
0x11287, # .. 0x11287 ; Unknown
0x11288, # .. 0x11288 ; Multani
@@ -1230,7 +1231,9 @@ RANGES = [
0x11AA3, # .. 0x11AAF ; Unknown
0x11AB0, # .. 0x11ABF ; Canadian_Aboriginal
0x11AC0, # .. 0x11AF8 ; Pau_Cin_Hau
- 0x11AF9, # .. 0x11BFF ; Unknown
+ 0x11AF9, # .. 0x11AFF ; Unknown
+ 0x11B00, # .. 0x11B09 ; Devanagari
+ 0x11B0A, # .. 0x11BFF ; Unknown
0x11C00, # .. 0x11C08 ; Bhaiksuki
0x11C09, # .. 0x11C09 ; Unknown
0x11C0A, # .. 0x11C36 ; Bhaiksuki
@@ -1272,7 +1275,13 @@ RANGES = [
0x11DA0, # .. 0x11DA9 ; Gunjala_Gondi
0x11DAA, # .. 0x11EDF ; Unknown
0x11EE0, # .. 0x11EF8 ; Makasar
- 0x11EF9, # .. 0x11FAF ; Unknown
+ 0x11EF9, # .. 0x11EFF ; Unknown
+ 0x11F00, # .. 0x11F10 ; Kawi
+ 0x11F11, # .. 0x11F11 ; Unknown
+ 0x11F12, # .. 0x11F3A ; Kawi
+ 0x11F3B, # .. 0x11F3D ; Unknown
+ 0x11F3E, # .. 0x11F59 ; Kawi
+ 0x11F5A, # .. 0x11FAF ; Unknown
0x11FB0, # .. 0x11FB0 ; Lisu
0x11FB1, # .. 0x11FBF ; Unknown
0x11FC0, # .. 0x11FF1 ; Tamil
@@ -1288,10 +1297,8 @@ RANGES = [
0x12544, # .. 0x12F8F ; Unknown
0x12F90, # .. 0x12FF2 ; Cypro_Minoan
0x12FF3, # .. 0x12FFF ; Unknown
- 0x13000, # .. 0x1342E ; Egyptian_Hieroglyphs
- 0x1342F, # .. 0x1342F ; Unknown
- 0x13430, # .. 0x13438 ; Egyptian_Hieroglyphs
- 0x13439, # .. 0x143FF ; Unknown
+ 0x13000, # .. 0x13455 ; Egyptian_Hieroglyphs
+ 0x13456, # .. 0x143FF ; Unknown
0x14400, # .. 0x14646 ; Anatolian_Hieroglyphs
0x14647, # .. 0x167FF ; Unknown
0x16800, # .. 0x16A38 ; Bamum
@@ -1350,9 +1357,13 @@ RANGES = [
0x1B000, # .. 0x1B000 ; Katakana
0x1B001, # .. 0x1B11F ; Hiragana
0x1B120, # .. 0x1B122 ; Katakana
- 0x1B123, # .. 0x1B14F ; Unknown
+ 0x1B123, # .. 0x1B131 ; Unknown
+ 0x1B132, # .. 0x1B132 ; Hiragana
+ 0x1B133, # .. 0x1B14F ; Unknown
0x1B150, # .. 0x1B152 ; Hiragana
- 0x1B153, # .. 0x1B163 ; Unknown
+ 0x1B153, # .. 0x1B154 ; Unknown
+ 0x1B155, # .. 0x1B155 ; Katakana
+ 0x1B156, # .. 0x1B163 ; Unknown
0x1B164, # .. 0x1B167 ; Katakana
0x1B168, # .. 0x1B16F ; Unknown
0x1B170, # .. 0x1B2FB ; Nushu
@@ -1389,7 +1400,9 @@ RANGES = [
0x1D1AE, # .. 0x1D1EA ; Common
0x1D1EB, # .. 0x1D1FF ; Unknown
0x1D200, # .. 0x1D245 ; Greek
- 0x1D246, # .. 0x1D2DF ; Unknown
+ 0x1D246, # .. 0x1D2BF ; Unknown
+ 0x1D2C0, # .. 0x1D2D3 ; Common
+ 0x1D2D4, # .. 0x1D2DF ; Unknown
0x1D2E0, # .. 0x1D2F3 ; Common
0x1D2F4, # .. 0x1D2FF ; Unknown
0x1D300, # .. 0x1D356 ; Common
@@ -1444,7 +1457,9 @@ RANGES = [
0x1DAA1, # .. 0x1DAAF ; SignWriting
0x1DAB0, # .. 0x1DEFF ; Unknown
0x1DF00, # .. 0x1DF1E ; Latin
- 0x1DF1F, # .. 0x1DFFF ; Unknown
+ 0x1DF1F, # .. 0x1DF24 ; Unknown
+ 0x1DF25, # .. 0x1DF2A ; Latin
+ 0x1DF2B, # .. 0x1DFFF ; Unknown
0x1E000, # .. 0x1E006 ; Glagolitic
0x1E007, # .. 0x1E007 ; Unknown
0x1E008, # .. 0x1E018 ; Glagolitic
@@ -1454,7 +1469,11 @@ RANGES = [
0x1E023, # .. 0x1E024 ; Glagolitic
0x1E025, # .. 0x1E025 ; Unknown
0x1E026, # .. 0x1E02A ; Glagolitic
- 0x1E02B, # .. 0x1E0FF ; Unknown
+ 0x1E02B, # .. 0x1E02F ; Unknown
+ 0x1E030, # .. 0x1E06D ; Cyrillic
+ 0x1E06E, # .. 0x1E08E ; Unknown
+ 0x1E08F, # .. 0x1E08F ; Cyrillic
+ 0x1E090, # .. 0x1E0FF ; Unknown
0x1E100, # .. 0x1E12C ; Nyiakeng_Puachue_Hmong
0x1E12D, # .. 0x1E12F ; Unknown
0x1E130, # .. 0x1E13D ; Nyiakeng_Puachue_Hmong
@@ -1468,7 +1487,9 @@ RANGES = [
0x1E2C0, # .. 0x1E2F9 ; Wancho
0x1E2FA, # .. 0x1E2FE ; Unknown
0x1E2FF, # .. 0x1E2FF ; Wancho
- 0x1E300, # .. 0x1E7DF ; Unknown
+ 0x1E300, # .. 0x1E4CF ; Unknown
+ 0x1E4D0, # .. 0x1E4F9 ; Nag_Mundari
+ 0x1E4FA, # .. 0x1E7DF ; Unknown
0x1E7E0, # .. 0x1E7E6 ; Ethiopic
0x1E7E7, # .. 0x1E7E7 ; Unknown
0x1E7E8, # .. 0x1E7EB ; Ethiopic
@@ -1586,15 +1607,15 @@ RANGES = [
0x1F260, # .. 0x1F265 ; Common
0x1F266, # .. 0x1F2FF ; Unknown
0x1F300, # .. 0x1F6D7 ; Common
- 0x1F6D8, # .. 0x1F6DC ; Unknown
- 0x1F6DD, # .. 0x1F6EC ; Common
+ 0x1F6D8, # .. 0x1F6DB ; Unknown
+ 0x1F6DC, # .. 0x1F6EC ; Common
0x1F6ED, # .. 0x1F6EF ; Unknown
0x1F6F0, # .. 0x1F6FC ; Common
0x1F6FD, # .. 0x1F6FF ; Unknown
- 0x1F700, # .. 0x1F773 ; Common
- 0x1F774, # .. 0x1F77F ; Unknown
- 0x1F780, # .. 0x1F7D8 ; Common
- 0x1F7D9, # .. 0x1F7DF ; Unknown
+ 0x1F700, # .. 0x1F776 ; Common
+ 0x1F777, # .. 0x1F77A ; Unknown
+ 0x1F77B, # .. 0x1F7D9 ; Common
+ 0x1F7DA, # .. 0x1F7DF ; Unknown
0x1F7E0, # .. 0x1F7EB ; Common
0x1F7EC, # .. 0x1F7EF ; Unknown
0x1F7F0, # .. 0x1F7F0 ; Common
@@ -1615,24 +1636,20 @@ RANGES = [
0x1FA54, # .. 0x1FA5F ; Unknown
0x1FA60, # .. 0x1FA6D ; Common
0x1FA6E, # .. 0x1FA6F ; Unknown
- 0x1FA70, # .. 0x1FA74 ; Common
- 0x1FA75, # .. 0x1FA77 ; Unknown
- 0x1FA78, # .. 0x1FA7C ; Common
+ 0x1FA70, # .. 0x1FA7C ; Common
0x1FA7D, # .. 0x1FA7F ; Unknown
- 0x1FA80, # .. 0x1FA86 ; Common
- 0x1FA87, # .. 0x1FA8F ; Unknown
- 0x1FA90, # .. 0x1FAAC ; Common
- 0x1FAAD, # .. 0x1FAAF ; Unknown
- 0x1FAB0, # .. 0x1FABA ; Common
- 0x1FABB, # .. 0x1FABF ; Unknown
- 0x1FAC0, # .. 0x1FAC5 ; Common
- 0x1FAC6, # .. 0x1FACF ; Unknown
- 0x1FAD0, # .. 0x1FAD9 ; Common
- 0x1FADA, # .. 0x1FADF ; Unknown
- 0x1FAE0, # .. 0x1FAE7 ; Common
- 0x1FAE8, # .. 0x1FAEF ; Unknown
- 0x1FAF0, # .. 0x1FAF6 ; Common
- 0x1FAF7, # .. 0x1FAFF ; Unknown
+ 0x1FA80, # .. 0x1FA88 ; Common
+ 0x1FA89, # .. 0x1FA8F ; Unknown
+ 0x1FA90, # .. 0x1FABD ; Common
+ 0x1FABE, # .. 0x1FABE ; Unknown
+ 0x1FABF, # .. 0x1FAC5 ; Common
+ 0x1FAC6, # .. 0x1FACD ; Unknown
+ 0x1FACE, # .. 0x1FADB ; Common
+ 0x1FADC, # .. 0x1FADF ; Unknown
+ 0x1FAE0, # .. 0x1FAE8 ; Common
+ 0x1FAE9, # .. 0x1FAEF ; Unknown
+ 0x1FAF0, # .. 0x1FAF8 ; Common
+ 0x1FAF9, # .. 0x1FAFF ; Unknown
0x1FB00, # .. 0x1FB92 ; Common
0x1FB93, # .. 0x1FB93 ; Unknown
0x1FB94, # .. 0x1FBCA ; Common
@@ -1641,8 +1658,8 @@ RANGES = [
0x1FBFA, # .. 0x1FFFF ; Unknown
0x20000, # .. 0x2A6DF ; Han
0x2A6E0, # .. 0x2A6FF ; Unknown
- 0x2A700, # .. 0x2B738 ; Han
- 0x2B739, # .. 0x2B73F ; Unknown
+ 0x2A700, # .. 0x2B739 ; Han
+ 0x2B73A, # .. 0x2B73F ; Unknown
0x2B740, # .. 0x2B81D ; Han
0x2B81E, # .. 0x2B81F ; Unknown
0x2B820, # .. 0x2CEA1 ; Han
@@ -1652,7 +1669,9 @@ RANGES = [
0x2F800, # .. 0x2FA1D ; Han
0x2FA1E, # .. 0x2FFFF ; Unknown
0x30000, # .. 0x3134A ; Han
- 0x3134B, # .. 0xE0000 ; Unknown
+ 0x3134B, # .. 0x3134F ; Unknown
+ 0x31350, # .. 0x323AF ; Han
+ 0x323B0, # .. 0xE0000 ; Unknown
0xE0001, # .. 0xE0001 ; Common
0xE0002, # .. 0xE001F ; Unknown
0xE0020, # .. 0xE007F ; Common
@@ -1662,1808 +1681,1829 @@ RANGES = [
]
VALUES = [
- 'Zyyy', # 0000..0040 ; Common
- 'Latn', # 0041..005A ; Latin
- 'Zyyy', # 005B..0060 ; Common
- 'Latn', # 0061..007A ; Latin
- 'Zyyy', # 007B..00A9 ; Common
- 'Latn', # 00AA..00AA ; Latin
- 'Zyyy', # 00AB..00B9 ; Common
- 'Latn', # 00BA..00BA ; Latin
- 'Zyyy', # 00BB..00BF ; Common
- 'Latn', # 00C0..00D6 ; Latin
- 'Zyyy', # 00D7..00D7 ; Common
- 'Latn', # 00D8..00F6 ; Latin
- 'Zyyy', # 00F7..00F7 ; Common
- 'Latn', # 00F8..02B8 ; Latin
- 'Zyyy', # 02B9..02DF ; Common
- 'Latn', # 02E0..02E4 ; Latin
- 'Zyyy', # 02E5..02E9 ; Common
- 'Bopo', # 02EA..02EB ; Bopomofo
- 'Zyyy', # 02EC..02FF ; Common
- 'Zinh', # 0300..036F ; Inherited
- 'Grek', # 0370..0373 ; Greek
- 'Zyyy', # 0374..0374 ; Common
- 'Grek', # 0375..0377 ; Greek
- 'Zzzz', # 0378..0379 ; Unknown
- 'Grek', # 037A..037D ; Greek
- 'Zyyy', # 037E..037E ; Common
- 'Grek', # 037F..037F ; Greek
- 'Zzzz', # 0380..0383 ; Unknown
- 'Grek', # 0384..0384 ; Greek
- 'Zyyy', # 0385..0385 ; Common
- 'Grek', # 0386..0386 ; Greek
- 'Zyyy', # 0387..0387 ; Common
- 'Grek', # 0388..038A ; Greek
- 'Zzzz', # 038B..038B ; Unknown
- 'Grek', # 038C..038C ; Greek
- 'Zzzz', # 038D..038D ; Unknown
- 'Grek', # 038E..03A1 ; Greek
- 'Zzzz', # 03A2..03A2 ; Unknown
- 'Grek', # 03A3..03E1 ; Greek
- 'Copt', # 03E2..03EF ; Coptic
- 'Grek', # 03F0..03FF ; Greek
- 'Cyrl', # 0400..0484 ; Cyrillic
- 'Zinh', # 0485..0486 ; Inherited
- 'Cyrl', # 0487..052F ; Cyrillic
- 'Zzzz', # 0530..0530 ; Unknown
- 'Armn', # 0531..0556 ; Armenian
- 'Zzzz', # 0557..0558 ; Unknown
- 'Armn', # 0559..058A ; Armenian
- 'Zzzz', # 058B..058C ; Unknown
- 'Armn', # 058D..058F ; Armenian
- 'Zzzz', # 0590..0590 ; Unknown
- 'Hebr', # 0591..05C7 ; Hebrew
- 'Zzzz', # 05C8..05CF ; Unknown
- 'Hebr', # 05D0..05EA ; Hebrew
- 'Zzzz', # 05EB..05EE ; Unknown
- 'Hebr', # 05EF..05F4 ; Hebrew
- 'Zzzz', # 05F5..05FF ; Unknown
- 'Arab', # 0600..0604 ; Arabic
- 'Zyyy', # 0605..0605 ; Common
- 'Arab', # 0606..060B ; Arabic
- 'Zyyy', # 060C..060C ; Common
- 'Arab', # 060D..061A ; Arabic
- 'Zyyy', # 061B..061B ; Common
- 'Arab', # 061C..061E ; Arabic
- 'Zyyy', # 061F..061F ; Common
- 'Arab', # 0620..063F ; Arabic
- 'Zyyy', # 0640..0640 ; Common
- 'Arab', # 0641..064A ; Arabic
- 'Zinh', # 064B..0655 ; Inherited
- 'Arab', # 0656..066F ; Arabic
- 'Zinh', # 0670..0670 ; Inherited
- 'Arab', # 0671..06DC ; Arabic
- 'Zyyy', # 06DD..06DD ; Common
- 'Arab', # 06DE..06FF ; Arabic
- 'Syrc', # 0700..070D ; Syriac
- 'Zzzz', # 070E..070E ; Unknown
- 'Syrc', # 070F..074A ; Syriac
- 'Zzzz', # 074B..074C ; Unknown
- 'Syrc', # 074D..074F ; Syriac
- 'Arab', # 0750..077F ; Arabic
- 'Thaa', # 0780..07B1 ; Thaana
- 'Zzzz', # 07B2..07BF ; Unknown
- 'Nkoo', # 07C0..07FA ; Nko
- 'Zzzz', # 07FB..07FC ; Unknown
- 'Nkoo', # 07FD..07FF ; Nko
- 'Samr', # 0800..082D ; Samaritan
- 'Zzzz', # 082E..082F ; Unknown
- 'Samr', # 0830..083E ; Samaritan
- 'Zzzz', # 083F..083F ; Unknown
- 'Mand', # 0840..085B ; Mandaic
- 'Zzzz', # 085C..085D ; Unknown
- 'Mand', # 085E..085E ; Mandaic
- 'Zzzz', # 085F..085F ; Unknown
- 'Syrc', # 0860..086A ; Syriac
- 'Zzzz', # 086B..086F ; Unknown
- 'Arab', # 0870..088E ; Arabic
- 'Zzzz', # 088F..088F ; Unknown
- 'Arab', # 0890..0891 ; Arabic
- 'Zzzz', # 0892..0897 ; Unknown
- 'Arab', # 0898..08E1 ; Arabic
- 'Zyyy', # 08E2..08E2 ; Common
- 'Arab', # 08E3..08FF ; Arabic
- 'Deva', # 0900..0950 ; Devanagari
- 'Zinh', # 0951..0954 ; Inherited
- 'Deva', # 0955..0963 ; Devanagari
- 'Zyyy', # 0964..0965 ; Common
- 'Deva', # 0966..097F ; Devanagari
- 'Beng', # 0980..0983 ; Bengali
- 'Zzzz', # 0984..0984 ; Unknown
- 'Beng', # 0985..098C ; Bengali
- 'Zzzz', # 098D..098E ; Unknown
- 'Beng', # 098F..0990 ; Bengali
- 'Zzzz', # 0991..0992 ; Unknown
- 'Beng', # 0993..09A8 ; Bengali
- 'Zzzz', # 09A9..09A9 ; Unknown
- 'Beng', # 09AA..09B0 ; Bengali
- 'Zzzz', # 09B1..09B1 ; Unknown
- 'Beng', # 09B2..09B2 ; Bengali
- 'Zzzz', # 09B3..09B5 ; Unknown
- 'Beng', # 09B6..09B9 ; Bengali
- 'Zzzz', # 09BA..09BB ; Unknown
- 'Beng', # 09BC..09C4 ; Bengali
- 'Zzzz', # 09C5..09C6 ; Unknown
- 'Beng', # 09C7..09C8 ; Bengali
- 'Zzzz', # 09C9..09CA ; Unknown
- 'Beng', # 09CB..09CE ; Bengali
- 'Zzzz', # 09CF..09D6 ; Unknown
- 'Beng', # 09D7..09D7 ; Bengali
- 'Zzzz', # 09D8..09DB ; Unknown
- 'Beng', # 09DC..09DD ; Bengali
- 'Zzzz', # 09DE..09DE ; Unknown
- 'Beng', # 09DF..09E3 ; Bengali
- 'Zzzz', # 09E4..09E5 ; Unknown
- 'Beng', # 09E6..09FE ; Bengali
- 'Zzzz', # 09FF..0A00 ; Unknown
- 'Guru', # 0A01..0A03 ; Gurmukhi
- 'Zzzz', # 0A04..0A04 ; Unknown
- 'Guru', # 0A05..0A0A ; Gurmukhi
- 'Zzzz', # 0A0B..0A0E ; Unknown
- 'Guru', # 0A0F..0A10 ; Gurmukhi
- 'Zzzz', # 0A11..0A12 ; Unknown
- 'Guru', # 0A13..0A28 ; Gurmukhi
- 'Zzzz', # 0A29..0A29 ; Unknown
- 'Guru', # 0A2A..0A30 ; Gurmukhi
- 'Zzzz', # 0A31..0A31 ; Unknown
- 'Guru', # 0A32..0A33 ; Gurmukhi
- 'Zzzz', # 0A34..0A34 ; Unknown
- 'Guru', # 0A35..0A36 ; Gurmukhi
- 'Zzzz', # 0A37..0A37 ; Unknown
- 'Guru', # 0A38..0A39 ; Gurmukhi
- 'Zzzz', # 0A3A..0A3B ; Unknown
- 'Guru', # 0A3C..0A3C ; Gurmukhi
- 'Zzzz', # 0A3D..0A3D ; Unknown
- 'Guru', # 0A3E..0A42 ; Gurmukhi
- 'Zzzz', # 0A43..0A46 ; Unknown
- 'Guru', # 0A47..0A48 ; Gurmukhi
- 'Zzzz', # 0A49..0A4A ; Unknown
- 'Guru', # 0A4B..0A4D ; Gurmukhi
- 'Zzzz', # 0A4E..0A50 ; Unknown
- 'Guru', # 0A51..0A51 ; Gurmukhi
- 'Zzzz', # 0A52..0A58 ; Unknown
- 'Guru', # 0A59..0A5C ; Gurmukhi
- 'Zzzz', # 0A5D..0A5D ; Unknown
- 'Guru', # 0A5E..0A5E ; Gurmukhi
- 'Zzzz', # 0A5F..0A65 ; Unknown
- 'Guru', # 0A66..0A76 ; Gurmukhi
- 'Zzzz', # 0A77..0A80 ; Unknown
- 'Gujr', # 0A81..0A83 ; Gujarati
- 'Zzzz', # 0A84..0A84 ; Unknown
- 'Gujr', # 0A85..0A8D ; Gujarati
- 'Zzzz', # 0A8E..0A8E ; Unknown
- 'Gujr', # 0A8F..0A91 ; Gujarati
- 'Zzzz', # 0A92..0A92 ; Unknown
- 'Gujr', # 0A93..0AA8 ; Gujarati
- 'Zzzz', # 0AA9..0AA9 ; Unknown
- 'Gujr', # 0AAA..0AB0 ; Gujarati
- 'Zzzz', # 0AB1..0AB1 ; Unknown
- 'Gujr', # 0AB2..0AB3 ; Gujarati
- 'Zzzz', # 0AB4..0AB4 ; Unknown
- 'Gujr', # 0AB5..0AB9 ; Gujarati
- 'Zzzz', # 0ABA..0ABB ; Unknown
- 'Gujr', # 0ABC..0AC5 ; Gujarati
- 'Zzzz', # 0AC6..0AC6 ; Unknown
- 'Gujr', # 0AC7..0AC9 ; Gujarati
- 'Zzzz', # 0ACA..0ACA ; Unknown
- 'Gujr', # 0ACB..0ACD ; Gujarati
- 'Zzzz', # 0ACE..0ACF ; Unknown
- 'Gujr', # 0AD0..0AD0 ; Gujarati
- 'Zzzz', # 0AD1..0ADF ; Unknown
- 'Gujr', # 0AE0..0AE3 ; Gujarati
- 'Zzzz', # 0AE4..0AE5 ; Unknown
- 'Gujr', # 0AE6..0AF1 ; Gujarati
- 'Zzzz', # 0AF2..0AF8 ; Unknown
- 'Gujr', # 0AF9..0AFF ; Gujarati
- 'Zzzz', # 0B00..0B00 ; Unknown
- 'Orya', # 0B01..0B03 ; Oriya
- 'Zzzz', # 0B04..0B04 ; Unknown
- 'Orya', # 0B05..0B0C ; Oriya
- 'Zzzz', # 0B0D..0B0E ; Unknown
- 'Orya', # 0B0F..0B10 ; Oriya
- 'Zzzz', # 0B11..0B12 ; Unknown
- 'Orya', # 0B13..0B28 ; Oriya
- 'Zzzz', # 0B29..0B29 ; Unknown
- 'Orya', # 0B2A..0B30 ; Oriya
- 'Zzzz', # 0B31..0B31 ; Unknown
- 'Orya', # 0B32..0B33 ; Oriya
- 'Zzzz', # 0B34..0B34 ; Unknown
- 'Orya', # 0B35..0B39 ; Oriya
- 'Zzzz', # 0B3A..0B3B ; Unknown
- 'Orya', # 0B3C..0B44 ; Oriya
- 'Zzzz', # 0B45..0B46 ; Unknown
- 'Orya', # 0B47..0B48 ; Oriya
- 'Zzzz', # 0B49..0B4A ; Unknown
- 'Orya', # 0B4B..0B4D ; Oriya
- 'Zzzz', # 0B4E..0B54 ; Unknown
- 'Orya', # 0B55..0B57 ; Oriya
- 'Zzzz', # 0B58..0B5B ; Unknown
- 'Orya', # 0B5C..0B5D ; Oriya
- 'Zzzz', # 0B5E..0B5E ; Unknown
- 'Orya', # 0B5F..0B63 ; Oriya
- 'Zzzz', # 0B64..0B65 ; Unknown
- 'Orya', # 0B66..0B77 ; Oriya
- 'Zzzz', # 0B78..0B81 ; Unknown
- 'Taml', # 0B82..0B83 ; Tamil
- 'Zzzz', # 0B84..0B84 ; Unknown
- 'Taml', # 0B85..0B8A ; Tamil
- 'Zzzz', # 0B8B..0B8D ; Unknown
- 'Taml', # 0B8E..0B90 ; Tamil
- 'Zzzz', # 0B91..0B91 ; Unknown
- 'Taml', # 0B92..0B95 ; Tamil
- 'Zzzz', # 0B96..0B98 ; Unknown
- 'Taml', # 0B99..0B9A ; Tamil
- 'Zzzz', # 0B9B..0B9B ; Unknown
- 'Taml', # 0B9C..0B9C ; Tamil
- 'Zzzz', # 0B9D..0B9D ; Unknown
- 'Taml', # 0B9E..0B9F ; Tamil
- 'Zzzz', # 0BA0..0BA2 ; Unknown
- 'Taml', # 0BA3..0BA4 ; Tamil
- 'Zzzz', # 0BA5..0BA7 ; Unknown
- 'Taml', # 0BA8..0BAA ; Tamil
- 'Zzzz', # 0BAB..0BAD ; Unknown
- 'Taml', # 0BAE..0BB9 ; Tamil
- 'Zzzz', # 0BBA..0BBD ; Unknown
- 'Taml', # 0BBE..0BC2 ; Tamil
- 'Zzzz', # 0BC3..0BC5 ; Unknown
- 'Taml', # 0BC6..0BC8 ; Tamil
- 'Zzzz', # 0BC9..0BC9 ; Unknown
- 'Taml', # 0BCA..0BCD ; Tamil
- 'Zzzz', # 0BCE..0BCF ; Unknown
- 'Taml', # 0BD0..0BD0 ; Tamil
- 'Zzzz', # 0BD1..0BD6 ; Unknown
- 'Taml', # 0BD7..0BD7 ; Tamil
- 'Zzzz', # 0BD8..0BE5 ; Unknown
- 'Taml', # 0BE6..0BFA ; Tamil
- 'Zzzz', # 0BFB..0BFF ; Unknown
- 'Telu', # 0C00..0C0C ; Telugu
- 'Zzzz', # 0C0D..0C0D ; Unknown
- 'Telu', # 0C0E..0C10 ; Telugu
- 'Zzzz', # 0C11..0C11 ; Unknown
- 'Telu', # 0C12..0C28 ; Telugu
- 'Zzzz', # 0C29..0C29 ; Unknown
- 'Telu', # 0C2A..0C39 ; Telugu
- 'Zzzz', # 0C3A..0C3B ; Unknown
- 'Telu', # 0C3C..0C44 ; Telugu
- 'Zzzz', # 0C45..0C45 ; Unknown
- 'Telu', # 0C46..0C48 ; Telugu
- 'Zzzz', # 0C49..0C49 ; Unknown
- 'Telu', # 0C4A..0C4D ; Telugu
- 'Zzzz', # 0C4E..0C54 ; Unknown
- 'Telu', # 0C55..0C56 ; Telugu
- 'Zzzz', # 0C57..0C57 ; Unknown
- 'Telu', # 0C58..0C5A ; Telugu
- 'Zzzz', # 0C5B..0C5C ; Unknown
- 'Telu', # 0C5D..0C5D ; Telugu
- 'Zzzz', # 0C5E..0C5F ; Unknown
- 'Telu', # 0C60..0C63 ; Telugu
- 'Zzzz', # 0C64..0C65 ; Unknown
- 'Telu', # 0C66..0C6F ; Telugu
- 'Zzzz', # 0C70..0C76 ; Unknown
- 'Telu', # 0C77..0C7F ; Telugu
- 'Knda', # 0C80..0C8C ; Kannada
- 'Zzzz', # 0C8D..0C8D ; Unknown
- 'Knda', # 0C8E..0C90 ; Kannada
- 'Zzzz', # 0C91..0C91 ; Unknown
- 'Knda', # 0C92..0CA8 ; Kannada
- 'Zzzz', # 0CA9..0CA9 ; Unknown
- 'Knda', # 0CAA..0CB3 ; Kannada
- 'Zzzz', # 0CB4..0CB4 ; Unknown
- 'Knda', # 0CB5..0CB9 ; Kannada
- 'Zzzz', # 0CBA..0CBB ; Unknown
- 'Knda', # 0CBC..0CC4 ; Kannada
- 'Zzzz', # 0CC5..0CC5 ; Unknown
- 'Knda', # 0CC6..0CC8 ; Kannada
- 'Zzzz', # 0CC9..0CC9 ; Unknown
- 'Knda', # 0CCA..0CCD ; Kannada
- 'Zzzz', # 0CCE..0CD4 ; Unknown
- 'Knda', # 0CD5..0CD6 ; Kannada
- 'Zzzz', # 0CD7..0CDC ; Unknown
- 'Knda', # 0CDD..0CDE ; Kannada
- 'Zzzz', # 0CDF..0CDF ; Unknown
- 'Knda', # 0CE0..0CE3 ; Kannada
- 'Zzzz', # 0CE4..0CE5 ; Unknown
- 'Knda', # 0CE6..0CEF ; Kannada
- 'Zzzz', # 0CF0..0CF0 ; Unknown
- 'Knda', # 0CF1..0CF2 ; Kannada
- 'Zzzz', # 0CF3..0CFF ; Unknown
- 'Mlym', # 0D00..0D0C ; Malayalam
- 'Zzzz', # 0D0D..0D0D ; Unknown
- 'Mlym', # 0D0E..0D10 ; Malayalam
- 'Zzzz', # 0D11..0D11 ; Unknown
- 'Mlym', # 0D12..0D44 ; Malayalam
- 'Zzzz', # 0D45..0D45 ; Unknown
- 'Mlym', # 0D46..0D48 ; Malayalam
- 'Zzzz', # 0D49..0D49 ; Unknown
- 'Mlym', # 0D4A..0D4F ; Malayalam
- 'Zzzz', # 0D50..0D53 ; Unknown
- 'Mlym', # 0D54..0D63 ; Malayalam
- 'Zzzz', # 0D64..0D65 ; Unknown
- 'Mlym', # 0D66..0D7F ; Malayalam
- 'Zzzz', # 0D80..0D80 ; Unknown
- 'Sinh', # 0D81..0D83 ; Sinhala
- 'Zzzz', # 0D84..0D84 ; Unknown
- 'Sinh', # 0D85..0D96 ; Sinhala
- 'Zzzz', # 0D97..0D99 ; Unknown
- 'Sinh', # 0D9A..0DB1 ; Sinhala
- 'Zzzz', # 0DB2..0DB2 ; Unknown
- 'Sinh', # 0DB3..0DBB ; Sinhala
- 'Zzzz', # 0DBC..0DBC ; Unknown
- 'Sinh', # 0DBD..0DBD ; Sinhala
- 'Zzzz', # 0DBE..0DBF ; Unknown
- 'Sinh', # 0DC0..0DC6 ; Sinhala
- 'Zzzz', # 0DC7..0DC9 ; Unknown
- 'Sinh', # 0DCA..0DCA ; Sinhala
- 'Zzzz', # 0DCB..0DCE ; Unknown
- 'Sinh', # 0DCF..0DD4 ; Sinhala
- 'Zzzz', # 0DD5..0DD5 ; Unknown
- 'Sinh', # 0DD6..0DD6 ; Sinhala
- 'Zzzz', # 0DD7..0DD7 ; Unknown
- 'Sinh', # 0DD8..0DDF ; Sinhala
- 'Zzzz', # 0DE0..0DE5 ; Unknown
- 'Sinh', # 0DE6..0DEF ; Sinhala
- 'Zzzz', # 0DF0..0DF1 ; Unknown
- 'Sinh', # 0DF2..0DF4 ; Sinhala
- 'Zzzz', # 0DF5..0E00 ; Unknown
- 'Thai', # 0E01..0E3A ; Thai
- 'Zzzz', # 0E3B..0E3E ; Unknown
- 'Zyyy', # 0E3F..0E3F ; Common
- 'Thai', # 0E40..0E5B ; Thai
- 'Zzzz', # 0E5C..0E80 ; Unknown
- 'Laoo', # 0E81..0E82 ; Lao
- 'Zzzz', # 0E83..0E83 ; Unknown
- 'Laoo', # 0E84..0E84 ; Lao
- 'Zzzz', # 0E85..0E85 ; Unknown
- 'Laoo', # 0E86..0E8A ; Lao
- 'Zzzz', # 0E8B..0E8B ; Unknown
- 'Laoo', # 0E8C..0EA3 ; Lao
- 'Zzzz', # 0EA4..0EA4 ; Unknown
- 'Laoo', # 0EA5..0EA5 ; Lao
- 'Zzzz', # 0EA6..0EA6 ; Unknown
- 'Laoo', # 0EA7..0EBD ; Lao
- 'Zzzz', # 0EBE..0EBF ; Unknown
- 'Laoo', # 0EC0..0EC4 ; Lao
- 'Zzzz', # 0EC5..0EC5 ; Unknown
- 'Laoo', # 0EC6..0EC6 ; Lao
- 'Zzzz', # 0EC7..0EC7 ; Unknown
- 'Laoo', # 0EC8..0ECD ; Lao
- 'Zzzz', # 0ECE..0ECF ; Unknown
- 'Laoo', # 0ED0..0ED9 ; Lao
- 'Zzzz', # 0EDA..0EDB ; Unknown
- 'Laoo', # 0EDC..0EDF ; Lao
- 'Zzzz', # 0EE0..0EFF ; Unknown
- 'Tibt', # 0F00..0F47 ; Tibetan
- 'Zzzz', # 0F48..0F48 ; Unknown
- 'Tibt', # 0F49..0F6C ; Tibetan
- 'Zzzz', # 0F6D..0F70 ; Unknown
- 'Tibt', # 0F71..0F97 ; Tibetan
- 'Zzzz', # 0F98..0F98 ; Unknown
- 'Tibt', # 0F99..0FBC ; Tibetan
- 'Zzzz', # 0FBD..0FBD ; Unknown
- 'Tibt', # 0FBE..0FCC ; Tibetan
- 'Zzzz', # 0FCD..0FCD ; Unknown
- 'Tibt', # 0FCE..0FD4 ; Tibetan
- 'Zyyy', # 0FD5..0FD8 ; Common
- 'Tibt', # 0FD9..0FDA ; Tibetan
- 'Zzzz', # 0FDB..0FFF ; Unknown
- 'Mymr', # 1000..109F ; Myanmar
- 'Geor', # 10A0..10C5 ; Georgian
- 'Zzzz', # 10C6..10C6 ; Unknown
- 'Geor', # 10C7..10C7 ; Georgian
- 'Zzzz', # 10C8..10CC ; Unknown
- 'Geor', # 10CD..10CD ; Georgian
- 'Zzzz', # 10CE..10CF ; Unknown
- 'Geor', # 10D0..10FA ; Georgian
- 'Zyyy', # 10FB..10FB ; Common
- 'Geor', # 10FC..10FF ; Georgian
- 'Hang', # 1100..11FF ; Hangul
- 'Ethi', # 1200..1248 ; Ethiopic
- 'Zzzz', # 1249..1249 ; Unknown
- 'Ethi', # 124A..124D ; Ethiopic
- 'Zzzz', # 124E..124F ; Unknown
- 'Ethi', # 1250..1256 ; Ethiopic
- 'Zzzz', # 1257..1257 ; Unknown
- 'Ethi', # 1258..1258 ; Ethiopic
- 'Zzzz', # 1259..1259 ; Unknown
- 'Ethi', # 125A..125D ; Ethiopic
- 'Zzzz', # 125E..125F ; Unknown
- 'Ethi', # 1260..1288 ; Ethiopic
- 'Zzzz', # 1289..1289 ; Unknown
- 'Ethi', # 128A..128D ; Ethiopic
- 'Zzzz', # 128E..128F ; Unknown
- 'Ethi', # 1290..12B0 ; Ethiopic
- 'Zzzz', # 12B1..12B1 ; Unknown
- 'Ethi', # 12B2..12B5 ; Ethiopic
- 'Zzzz', # 12B6..12B7 ; Unknown
- 'Ethi', # 12B8..12BE ; Ethiopic
- 'Zzzz', # 12BF..12BF ; Unknown
- 'Ethi', # 12C0..12C0 ; Ethiopic
- 'Zzzz', # 12C1..12C1 ; Unknown
- 'Ethi', # 12C2..12C5 ; Ethiopic
- 'Zzzz', # 12C6..12C7 ; Unknown
- 'Ethi', # 12C8..12D6 ; Ethiopic
- 'Zzzz', # 12D7..12D7 ; Unknown
- 'Ethi', # 12D8..1310 ; Ethiopic
- 'Zzzz', # 1311..1311 ; Unknown
- 'Ethi', # 1312..1315 ; Ethiopic
- 'Zzzz', # 1316..1317 ; Unknown
- 'Ethi', # 1318..135A ; Ethiopic
- 'Zzzz', # 135B..135C ; Unknown
- 'Ethi', # 135D..137C ; Ethiopic
- 'Zzzz', # 137D..137F ; Unknown
- 'Ethi', # 1380..1399 ; Ethiopic
- 'Zzzz', # 139A..139F ; Unknown
- 'Cher', # 13A0..13F5 ; Cherokee
- 'Zzzz', # 13F6..13F7 ; Unknown
- 'Cher', # 13F8..13FD ; Cherokee
- 'Zzzz', # 13FE..13FF ; Unknown
- 'Cans', # 1400..167F ; Canadian_Aboriginal
- 'Ogam', # 1680..169C ; Ogham
- 'Zzzz', # 169D..169F ; Unknown
- 'Runr', # 16A0..16EA ; Runic
- 'Zyyy', # 16EB..16ED ; Common
- 'Runr', # 16EE..16F8 ; Runic
- 'Zzzz', # 16F9..16FF ; Unknown
- 'Tglg', # 1700..1715 ; Tagalog
- 'Zzzz', # 1716..171E ; Unknown
- 'Tglg', # 171F..171F ; Tagalog
- 'Hano', # 1720..1734 ; Hanunoo
- 'Zyyy', # 1735..1736 ; Common
- 'Zzzz', # 1737..173F ; Unknown
- 'Buhd', # 1740..1753 ; Buhid
- 'Zzzz', # 1754..175F ; Unknown
- 'Tagb', # 1760..176C ; Tagbanwa
- 'Zzzz', # 176D..176D ; Unknown
- 'Tagb', # 176E..1770 ; Tagbanwa
- 'Zzzz', # 1771..1771 ; Unknown
- 'Tagb', # 1772..1773 ; Tagbanwa
- 'Zzzz', # 1774..177F ; Unknown
- 'Khmr', # 1780..17DD ; Khmer
- 'Zzzz', # 17DE..17DF ; Unknown
- 'Khmr', # 17E0..17E9 ; Khmer
- 'Zzzz', # 17EA..17EF ; Unknown
- 'Khmr', # 17F0..17F9 ; Khmer
- 'Zzzz', # 17FA..17FF ; Unknown
- 'Mong', # 1800..1801 ; Mongolian
- 'Zyyy', # 1802..1803 ; Common
- 'Mong', # 1804..1804 ; Mongolian
- 'Zyyy', # 1805..1805 ; Common
- 'Mong', # 1806..1819 ; Mongolian
- 'Zzzz', # 181A..181F ; Unknown
- 'Mong', # 1820..1878 ; Mongolian
- 'Zzzz', # 1879..187F ; Unknown
- 'Mong', # 1880..18AA ; Mongolian
- 'Zzzz', # 18AB..18AF ; Unknown
- 'Cans', # 18B0..18F5 ; Canadian_Aboriginal
- 'Zzzz', # 18F6..18FF ; Unknown
- 'Limb', # 1900..191E ; Limbu
- 'Zzzz', # 191F..191F ; Unknown
- 'Limb', # 1920..192B ; Limbu
- 'Zzzz', # 192C..192F ; Unknown
- 'Limb', # 1930..193B ; Limbu
- 'Zzzz', # 193C..193F ; Unknown
- 'Limb', # 1940..1940 ; Limbu
- 'Zzzz', # 1941..1943 ; Unknown
- 'Limb', # 1944..194F ; Limbu
- 'Tale', # 1950..196D ; Tai_Le
- 'Zzzz', # 196E..196F ; Unknown
- 'Tale', # 1970..1974 ; Tai_Le
- 'Zzzz', # 1975..197F ; Unknown
- 'Talu', # 1980..19AB ; New_Tai_Lue
- 'Zzzz', # 19AC..19AF ; Unknown
- 'Talu', # 19B0..19C9 ; New_Tai_Lue
- 'Zzzz', # 19CA..19CF ; Unknown
- 'Talu', # 19D0..19DA ; New_Tai_Lue
- 'Zzzz', # 19DB..19DD ; Unknown
- 'Talu', # 19DE..19DF ; New_Tai_Lue
- 'Khmr', # 19E0..19FF ; Khmer
- 'Bugi', # 1A00..1A1B ; Buginese
- 'Zzzz', # 1A1C..1A1D ; Unknown
- 'Bugi', # 1A1E..1A1F ; Buginese
- 'Lana', # 1A20..1A5E ; Tai_Tham
- 'Zzzz', # 1A5F..1A5F ; Unknown
- 'Lana', # 1A60..1A7C ; Tai_Tham
- 'Zzzz', # 1A7D..1A7E ; Unknown
- 'Lana', # 1A7F..1A89 ; Tai_Tham
- 'Zzzz', # 1A8A..1A8F ; Unknown
- 'Lana', # 1A90..1A99 ; Tai_Tham
- 'Zzzz', # 1A9A..1A9F ; Unknown
- 'Lana', # 1AA0..1AAD ; Tai_Tham
- 'Zzzz', # 1AAE..1AAF ; Unknown
- 'Zinh', # 1AB0..1ACE ; Inherited
- 'Zzzz', # 1ACF..1AFF ; Unknown
- 'Bali', # 1B00..1B4C ; Balinese
- 'Zzzz', # 1B4D..1B4F ; Unknown
- 'Bali', # 1B50..1B7E ; Balinese
- 'Zzzz', # 1B7F..1B7F ; Unknown
- 'Sund', # 1B80..1BBF ; Sundanese
- 'Batk', # 1BC0..1BF3 ; Batak
- 'Zzzz', # 1BF4..1BFB ; Unknown
- 'Batk', # 1BFC..1BFF ; Batak
- 'Lepc', # 1C00..1C37 ; Lepcha
- 'Zzzz', # 1C38..1C3A ; Unknown
- 'Lepc', # 1C3B..1C49 ; Lepcha
- 'Zzzz', # 1C4A..1C4C ; Unknown
- 'Lepc', # 1C4D..1C4F ; Lepcha
- 'Olck', # 1C50..1C7F ; Ol_Chiki
- 'Cyrl', # 1C80..1C88 ; Cyrillic
- 'Zzzz', # 1C89..1C8F ; Unknown
- 'Geor', # 1C90..1CBA ; Georgian
- 'Zzzz', # 1CBB..1CBC ; Unknown
- 'Geor', # 1CBD..1CBF ; Georgian
- 'Sund', # 1CC0..1CC7 ; Sundanese
- 'Zzzz', # 1CC8..1CCF ; Unknown
- 'Zinh', # 1CD0..1CD2 ; Inherited
- 'Zyyy', # 1CD3..1CD3 ; Common
- 'Zinh', # 1CD4..1CE0 ; Inherited
- 'Zyyy', # 1CE1..1CE1 ; Common
- 'Zinh', # 1CE2..1CE8 ; Inherited
- 'Zyyy', # 1CE9..1CEC ; Common
- 'Zinh', # 1CED..1CED ; Inherited
- 'Zyyy', # 1CEE..1CF3 ; Common
- 'Zinh', # 1CF4..1CF4 ; Inherited
- 'Zyyy', # 1CF5..1CF7 ; Common
- 'Zinh', # 1CF8..1CF9 ; Inherited
- 'Zyyy', # 1CFA..1CFA ; Common
- 'Zzzz', # 1CFB..1CFF ; Unknown
- 'Latn', # 1D00..1D25 ; Latin
- 'Grek', # 1D26..1D2A ; Greek
- 'Cyrl', # 1D2B..1D2B ; Cyrillic
- 'Latn', # 1D2C..1D5C ; Latin
- 'Grek', # 1D5D..1D61 ; Greek
- 'Latn', # 1D62..1D65 ; Latin
- 'Grek', # 1D66..1D6A ; Greek
- 'Latn', # 1D6B..1D77 ; Latin
- 'Cyrl', # 1D78..1D78 ; Cyrillic
- 'Latn', # 1D79..1DBE ; Latin
- 'Grek', # 1DBF..1DBF ; Greek
- 'Zinh', # 1DC0..1DFF ; Inherited
- 'Latn', # 1E00..1EFF ; Latin
- 'Grek', # 1F00..1F15 ; Greek
- 'Zzzz', # 1F16..1F17 ; Unknown
- 'Grek', # 1F18..1F1D ; Greek
- 'Zzzz', # 1F1E..1F1F ; Unknown
- 'Grek', # 1F20..1F45 ; Greek
- 'Zzzz', # 1F46..1F47 ; Unknown
- 'Grek', # 1F48..1F4D ; Greek
- 'Zzzz', # 1F4E..1F4F ; Unknown
- 'Grek', # 1F50..1F57 ; Greek
- 'Zzzz', # 1F58..1F58 ; Unknown
- 'Grek', # 1F59..1F59 ; Greek
- 'Zzzz', # 1F5A..1F5A ; Unknown
- 'Grek', # 1F5B..1F5B ; Greek
- 'Zzzz', # 1F5C..1F5C ; Unknown
- 'Grek', # 1F5D..1F5D ; Greek
- 'Zzzz', # 1F5E..1F5E ; Unknown
- 'Grek', # 1F5F..1F7D ; Greek
- 'Zzzz', # 1F7E..1F7F ; Unknown
- 'Grek', # 1F80..1FB4 ; Greek
- 'Zzzz', # 1FB5..1FB5 ; Unknown
- 'Grek', # 1FB6..1FC4 ; Greek
- 'Zzzz', # 1FC5..1FC5 ; Unknown
- 'Grek', # 1FC6..1FD3 ; Greek
- 'Zzzz', # 1FD4..1FD5 ; Unknown
- 'Grek', # 1FD6..1FDB ; Greek
- 'Zzzz', # 1FDC..1FDC ; Unknown
- 'Grek', # 1FDD..1FEF ; Greek
- 'Zzzz', # 1FF0..1FF1 ; Unknown
- 'Grek', # 1FF2..1FF4 ; Greek
- 'Zzzz', # 1FF5..1FF5 ; Unknown
- 'Grek', # 1FF6..1FFE ; Greek
- 'Zzzz', # 1FFF..1FFF ; Unknown
- 'Zyyy', # 2000..200B ; Common
- 'Zinh', # 200C..200D ; Inherited
- 'Zyyy', # 200E..2064 ; Common
- 'Zzzz', # 2065..2065 ; Unknown
- 'Zyyy', # 2066..2070 ; Common
- 'Latn', # 2071..2071 ; Latin
- 'Zzzz', # 2072..2073 ; Unknown
- 'Zyyy', # 2074..207E ; Common
- 'Latn', # 207F..207F ; Latin
- 'Zyyy', # 2080..208E ; Common
- 'Zzzz', # 208F..208F ; Unknown
- 'Latn', # 2090..209C ; Latin
- 'Zzzz', # 209D..209F ; Unknown
- 'Zyyy', # 20A0..20C0 ; Common
- 'Zzzz', # 20C1..20CF ; Unknown
- 'Zinh', # 20D0..20F0 ; Inherited
- 'Zzzz', # 20F1..20FF ; Unknown
- 'Zyyy', # 2100..2125 ; Common
- 'Grek', # 2126..2126 ; Greek
- 'Zyyy', # 2127..2129 ; Common
- 'Latn', # 212A..212B ; Latin
- 'Zyyy', # 212C..2131 ; Common
- 'Latn', # 2132..2132 ; Latin
- 'Zyyy', # 2133..214D ; Common
- 'Latn', # 214E..214E ; Latin
- 'Zyyy', # 214F..215F ; Common
- 'Latn', # 2160..2188 ; Latin
- 'Zyyy', # 2189..218B ; Common
- 'Zzzz', # 218C..218F ; Unknown
- 'Zyyy', # 2190..2426 ; Common
- 'Zzzz', # 2427..243F ; Unknown
- 'Zyyy', # 2440..244A ; Common
- 'Zzzz', # 244B..245F ; Unknown
- 'Zyyy', # 2460..27FF ; Common
- 'Brai', # 2800..28FF ; Braille
- 'Zyyy', # 2900..2B73 ; Common
- 'Zzzz', # 2B74..2B75 ; Unknown
- 'Zyyy', # 2B76..2B95 ; Common
- 'Zzzz', # 2B96..2B96 ; Unknown
- 'Zyyy', # 2B97..2BFF ; Common
- 'Glag', # 2C00..2C5F ; Glagolitic
- 'Latn', # 2C60..2C7F ; Latin
- 'Copt', # 2C80..2CF3 ; Coptic
- 'Zzzz', # 2CF4..2CF8 ; Unknown
- 'Copt', # 2CF9..2CFF ; Coptic
- 'Geor', # 2D00..2D25 ; Georgian
- 'Zzzz', # 2D26..2D26 ; Unknown
- 'Geor', # 2D27..2D27 ; Georgian
- 'Zzzz', # 2D28..2D2C ; Unknown
- 'Geor', # 2D2D..2D2D ; Georgian
- 'Zzzz', # 2D2E..2D2F ; Unknown
- 'Tfng', # 2D30..2D67 ; Tifinagh
- 'Zzzz', # 2D68..2D6E ; Unknown
- 'Tfng', # 2D6F..2D70 ; Tifinagh
- 'Zzzz', # 2D71..2D7E ; Unknown
- 'Tfng', # 2D7F..2D7F ; Tifinagh
- 'Ethi', # 2D80..2D96 ; Ethiopic
- 'Zzzz', # 2D97..2D9F ; Unknown
- 'Ethi', # 2DA0..2DA6 ; Ethiopic
- 'Zzzz', # 2DA7..2DA7 ; Unknown
- 'Ethi', # 2DA8..2DAE ; Ethiopic
- 'Zzzz', # 2DAF..2DAF ; Unknown
- 'Ethi', # 2DB0..2DB6 ; Ethiopic
- 'Zzzz', # 2DB7..2DB7 ; Unknown
- 'Ethi', # 2DB8..2DBE ; Ethiopic
- 'Zzzz', # 2DBF..2DBF ; Unknown
- 'Ethi', # 2DC0..2DC6 ; Ethiopic
- 'Zzzz', # 2DC7..2DC7 ; Unknown
- 'Ethi', # 2DC8..2DCE ; Ethiopic
- 'Zzzz', # 2DCF..2DCF ; Unknown
- 'Ethi', # 2DD0..2DD6 ; Ethiopic
- 'Zzzz', # 2DD7..2DD7 ; Unknown
- 'Ethi', # 2DD8..2DDE ; Ethiopic
- 'Zzzz', # 2DDF..2DDF ; Unknown
- 'Cyrl', # 2DE0..2DFF ; Cyrillic
- 'Zyyy', # 2E00..2E5D ; Common
- 'Zzzz', # 2E5E..2E7F ; Unknown
- 'Hani', # 2E80..2E99 ; Han
- 'Zzzz', # 2E9A..2E9A ; Unknown
- 'Hani', # 2E9B..2EF3 ; Han
- 'Zzzz', # 2EF4..2EFF ; Unknown
- 'Hani', # 2F00..2FD5 ; Han
- 'Zzzz', # 2FD6..2FEF ; Unknown
- 'Zyyy', # 2FF0..2FFB ; Common
- 'Zzzz', # 2FFC..2FFF ; Unknown
- 'Zyyy', # 3000..3004 ; Common
- 'Hani', # 3005..3005 ; Han
- 'Zyyy', # 3006..3006 ; Common
- 'Hani', # 3007..3007 ; Han
- 'Zyyy', # 3008..3020 ; Common
- 'Hani', # 3021..3029 ; Han
- 'Zinh', # 302A..302D ; Inherited
- 'Hang', # 302E..302F ; Hangul
- 'Zyyy', # 3030..3037 ; Common
- 'Hani', # 3038..303B ; Han
- 'Zyyy', # 303C..303F ; Common
- 'Zzzz', # 3040..3040 ; Unknown
- 'Hira', # 3041..3096 ; Hiragana
- 'Zzzz', # 3097..3098 ; Unknown
- 'Zinh', # 3099..309A ; Inherited
- 'Zyyy', # 309B..309C ; Common
- 'Hira', # 309D..309F ; Hiragana
- 'Zyyy', # 30A0..30A0 ; Common
- 'Kana', # 30A1..30FA ; Katakana
- 'Zyyy', # 30FB..30FC ; Common
- 'Kana', # 30FD..30FF ; Katakana
- 'Zzzz', # 3100..3104 ; Unknown
- 'Bopo', # 3105..312F ; Bopomofo
- 'Zzzz', # 3130..3130 ; Unknown
- 'Hang', # 3131..318E ; Hangul
- 'Zzzz', # 318F..318F ; Unknown
- 'Zyyy', # 3190..319F ; Common
- 'Bopo', # 31A0..31BF ; Bopomofo
- 'Zyyy', # 31C0..31E3 ; Common
- 'Zzzz', # 31E4..31EF ; Unknown
- 'Kana', # 31F0..31FF ; Katakana
- 'Hang', # 3200..321E ; Hangul
- 'Zzzz', # 321F..321F ; Unknown
- 'Zyyy', # 3220..325F ; Common
- 'Hang', # 3260..327E ; Hangul
- 'Zyyy', # 327F..32CF ; Common
- 'Kana', # 32D0..32FE ; Katakana
- 'Zyyy', # 32FF..32FF ; Common
- 'Kana', # 3300..3357 ; Katakana
- 'Zyyy', # 3358..33FF ; Common
- 'Hani', # 3400..4DBF ; Han
- 'Zyyy', # 4DC0..4DFF ; Common
- 'Hani', # 4E00..9FFF ; Han
- 'Yiii', # A000..A48C ; Yi
- 'Zzzz', # A48D..A48F ; Unknown
- 'Yiii', # A490..A4C6 ; Yi
- 'Zzzz', # A4C7..A4CF ; Unknown
- 'Lisu', # A4D0..A4FF ; Lisu
- 'Vaii', # A500..A62B ; Vai
- 'Zzzz', # A62C..A63F ; Unknown
- 'Cyrl', # A640..A69F ; Cyrillic
- 'Bamu', # A6A0..A6F7 ; Bamum
- 'Zzzz', # A6F8..A6FF ; Unknown
- 'Zyyy', # A700..A721 ; Common
- 'Latn', # A722..A787 ; Latin
- 'Zyyy', # A788..A78A ; Common
- 'Latn', # A78B..A7CA ; Latin
- 'Zzzz', # A7CB..A7CF ; Unknown
- 'Latn', # A7D0..A7D1 ; Latin
- 'Zzzz', # A7D2..A7D2 ; Unknown
- 'Latn', # A7D3..A7D3 ; Latin
- 'Zzzz', # A7D4..A7D4 ; Unknown
- 'Latn', # A7D5..A7D9 ; Latin
- 'Zzzz', # A7DA..A7F1 ; Unknown
- 'Latn', # A7F2..A7FF ; Latin
- 'Sylo', # A800..A82C ; Syloti_Nagri
- 'Zzzz', # A82D..A82F ; Unknown
- 'Zyyy', # A830..A839 ; Common
- 'Zzzz', # A83A..A83F ; Unknown
- 'Phag', # A840..A877 ; Phags_Pa
- 'Zzzz', # A878..A87F ; Unknown
- 'Saur', # A880..A8C5 ; Saurashtra
- 'Zzzz', # A8C6..A8CD ; Unknown
- 'Saur', # A8CE..A8D9 ; Saurashtra
- 'Zzzz', # A8DA..A8DF ; Unknown
- 'Deva', # A8E0..A8FF ; Devanagari
- 'Kali', # A900..A92D ; Kayah_Li
- 'Zyyy', # A92E..A92E ; Common
- 'Kali', # A92F..A92F ; Kayah_Li
- 'Rjng', # A930..A953 ; Rejang
- 'Zzzz', # A954..A95E ; Unknown
- 'Rjng', # A95F..A95F ; Rejang
- 'Hang', # A960..A97C ; Hangul
- 'Zzzz', # A97D..A97F ; Unknown
- 'Java', # A980..A9CD ; Javanese
- 'Zzzz', # A9CE..A9CE ; Unknown
- 'Zyyy', # A9CF..A9CF ; Common
- 'Java', # A9D0..A9D9 ; Javanese
- 'Zzzz', # A9DA..A9DD ; Unknown
- 'Java', # A9DE..A9DF ; Javanese
- 'Mymr', # A9E0..A9FE ; Myanmar
- 'Zzzz', # A9FF..A9FF ; Unknown
- 'Cham', # AA00..AA36 ; Cham
- 'Zzzz', # AA37..AA3F ; Unknown
- 'Cham', # AA40..AA4D ; Cham
- 'Zzzz', # AA4E..AA4F ; Unknown
- 'Cham', # AA50..AA59 ; Cham
- 'Zzzz', # AA5A..AA5B ; Unknown
- 'Cham', # AA5C..AA5F ; Cham
- 'Mymr', # AA60..AA7F ; Myanmar
- 'Tavt', # AA80..AAC2 ; Tai_Viet
- 'Zzzz', # AAC3..AADA ; Unknown
- 'Tavt', # AADB..AADF ; Tai_Viet
- 'Mtei', # AAE0..AAF6 ; Meetei_Mayek
- 'Zzzz', # AAF7..AB00 ; Unknown
- 'Ethi', # AB01..AB06 ; Ethiopic
- 'Zzzz', # AB07..AB08 ; Unknown
- 'Ethi', # AB09..AB0E ; Ethiopic
- 'Zzzz', # AB0F..AB10 ; Unknown
- 'Ethi', # AB11..AB16 ; Ethiopic
- 'Zzzz', # AB17..AB1F ; Unknown
- 'Ethi', # AB20..AB26 ; Ethiopic
- 'Zzzz', # AB27..AB27 ; Unknown
- 'Ethi', # AB28..AB2E ; Ethiopic
- 'Zzzz', # AB2F..AB2F ; Unknown
- 'Latn', # AB30..AB5A ; Latin
- 'Zyyy', # AB5B..AB5B ; Common
- 'Latn', # AB5C..AB64 ; Latin
- 'Grek', # AB65..AB65 ; Greek
- 'Latn', # AB66..AB69 ; Latin
- 'Zyyy', # AB6A..AB6B ; Common
- 'Zzzz', # AB6C..AB6F ; Unknown
- 'Cher', # AB70..ABBF ; Cherokee
- 'Mtei', # ABC0..ABED ; Meetei_Mayek
- 'Zzzz', # ABEE..ABEF ; Unknown
- 'Mtei', # ABF0..ABF9 ; Meetei_Mayek
- 'Zzzz', # ABFA..ABFF ; Unknown
- 'Hang', # AC00..D7A3 ; Hangul
- 'Zzzz', # D7A4..D7AF ; Unknown
- 'Hang', # D7B0..D7C6 ; Hangul
- 'Zzzz', # D7C7..D7CA ; Unknown
- 'Hang', # D7CB..D7FB ; Hangul
- 'Zzzz', # D7FC..F8FF ; Unknown
- 'Hani', # F900..FA6D ; Han
- 'Zzzz', # FA6E..FA6F ; Unknown
- 'Hani', # FA70..FAD9 ; Han
- 'Zzzz', # FADA..FAFF ; Unknown
- 'Latn', # FB00..FB06 ; Latin
- 'Zzzz', # FB07..FB12 ; Unknown
- 'Armn', # FB13..FB17 ; Armenian
- 'Zzzz', # FB18..FB1C ; Unknown
- 'Hebr', # FB1D..FB36 ; Hebrew
- 'Zzzz', # FB37..FB37 ; Unknown
- 'Hebr', # FB38..FB3C ; Hebrew
- 'Zzzz', # FB3D..FB3D ; Unknown
- 'Hebr', # FB3E..FB3E ; Hebrew
- 'Zzzz', # FB3F..FB3F ; Unknown
- 'Hebr', # FB40..FB41 ; Hebrew
- 'Zzzz', # FB42..FB42 ; Unknown
- 'Hebr', # FB43..FB44 ; Hebrew
- 'Zzzz', # FB45..FB45 ; Unknown
- 'Hebr', # FB46..FB4F ; Hebrew
- 'Arab', # FB50..FBC2 ; Arabic
- 'Zzzz', # FBC3..FBD2 ; Unknown
- 'Arab', # FBD3..FD3D ; Arabic
- 'Zyyy', # FD3E..FD3F ; Common
- 'Arab', # FD40..FD8F ; Arabic
- 'Zzzz', # FD90..FD91 ; Unknown
- 'Arab', # FD92..FDC7 ; Arabic
- 'Zzzz', # FDC8..FDCE ; Unknown
- 'Arab', # FDCF..FDCF ; Arabic
- 'Zzzz', # FDD0..FDEF ; Unknown
- 'Arab', # FDF0..FDFF ; Arabic
- 'Zinh', # FE00..FE0F ; Inherited
- 'Zyyy', # FE10..FE19 ; Common
- 'Zzzz', # FE1A..FE1F ; Unknown
- 'Zinh', # FE20..FE2D ; Inherited
- 'Cyrl', # FE2E..FE2F ; Cyrillic
- 'Zyyy', # FE30..FE52 ; Common
- 'Zzzz', # FE53..FE53 ; Unknown
- 'Zyyy', # FE54..FE66 ; Common
- 'Zzzz', # FE67..FE67 ; Unknown
- 'Zyyy', # FE68..FE6B ; Common
- 'Zzzz', # FE6C..FE6F ; Unknown
- 'Arab', # FE70..FE74 ; Arabic
- 'Zzzz', # FE75..FE75 ; Unknown
- 'Arab', # FE76..FEFC ; Arabic
- 'Zzzz', # FEFD..FEFE ; Unknown
- 'Zyyy', # FEFF..FEFF ; Common
- 'Zzzz', # FF00..FF00 ; Unknown
- 'Zyyy', # FF01..FF20 ; Common
- 'Latn', # FF21..FF3A ; Latin
- 'Zyyy', # FF3B..FF40 ; Common
- 'Latn', # FF41..FF5A ; Latin
- 'Zyyy', # FF5B..FF65 ; Common
- 'Kana', # FF66..FF6F ; Katakana
- 'Zyyy', # FF70..FF70 ; Common
- 'Kana', # FF71..FF9D ; Katakana
- 'Zyyy', # FF9E..FF9F ; Common
- 'Hang', # FFA0..FFBE ; Hangul
- 'Zzzz', # FFBF..FFC1 ; Unknown
- 'Hang', # FFC2..FFC7 ; Hangul
- 'Zzzz', # FFC8..FFC9 ; Unknown
- 'Hang', # FFCA..FFCF ; Hangul
- 'Zzzz', # FFD0..FFD1 ; Unknown
- 'Hang', # FFD2..FFD7 ; Hangul
- 'Zzzz', # FFD8..FFD9 ; Unknown
- 'Hang', # FFDA..FFDC ; Hangul
- 'Zzzz', # FFDD..FFDF ; Unknown
- 'Zyyy', # FFE0..FFE6 ; Common
- 'Zzzz', # FFE7..FFE7 ; Unknown
- 'Zyyy', # FFE8..FFEE ; Common
- 'Zzzz', # FFEF..FFF8 ; Unknown
- 'Zyyy', # FFF9..FFFD ; Common
- 'Zzzz', # FFFE..FFFF ; Unknown
- 'Linb', # 10000..1000B ; Linear_B
- 'Zzzz', # 1000C..1000C ; Unknown
- 'Linb', # 1000D..10026 ; Linear_B
- 'Zzzz', # 10027..10027 ; Unknown
- 'Linb', # 10028..1003A ; Linear_B
- 'Zzzz', # 1003B..1003B ; Unknown
- 'Linb', # 1003C..1003D ; Linear_B
- 'Zzzz', # 1003E..1003E ; Unknown
- 'Linb', # 1003F..1004D ; Linear_B
- 'Zzzz', # 1004E..1004F ; Unknown
- 'Linb', # 10050..1005D ; Linear_B
- 'Zzzz', # 1005E..1007F ; Unknown
- 'Linb', # 10080..100FA ; Linear_B
- 'Zzzz', # 100FB..100FF ; Unknown
- 'Zyyy', # 10100..10102 ; Common
- 'Zzzz', # 10103..10106 ; Unknown
- 'Zyyy', # 10107..10133 ; Common
- 'Zzzz', # 10134..10136 ; Unknown
- 'Zyyy', # 10137..1013F ; Common
- 'Grek', # 10140..1018E ; Greek
- 'Zzzz', # 1018F..1018F ; Unknown
- 'Zyyy', # 10190..1019C ; Common
- 'Zzzz', # 1019D..1019F ; Unknown
- 'Grek', # 101A0..101A0 ; Greek
- 'Zzzz', # 101A1..101CF ; Unknown
- 'Zyyy', # 101D0..101FC ; Common
- 'Zinh', # 101FD..101FD ; Inherited
- 'Zzzz', # 101FE..1027F ; Unknown
- 'Lyci', # 10280..1029C ; Lycian
- 'Zzzz', # 1029D..1029F ; Unknown
- 'Cari', # 102A0..102D0 ; Carian
- 'Zzzz', # 102D1..102DF ; Unknown
- 'Zinh', # 102E0..102E0 ; Inherited
- 'Zyyy', # 102E1..102FB ; Common
- 'Zzzz', # 102FC..102FF ; Unknown
- 'Ital', # 10300..10323 ; Old_Italic
- 'Zzzz', # 10324..1032C ; Unknown
- 'Ital', # 1032D..1032F ; Old_Italic
- 'Goth', # 10330..1034A ; Gothic
- 'Zzzz', # 1034B..1034F ; Unknown
- 'Perm', # 10350..1037A ; Old_Permic
- 'Zzzz', # 1037B..1037F ; Unknown
- 'Ugar', # 10380..1039D ; Ugaritic
- 'Zzzz', # 1039E..1039E ; Unknown
- 'Ugar', # 1039F..1039F ; Ugaritic
- 'Xpeo', # 103A0..103C3 ; Old_Persian
- 'Zzzz', # 103C4..103C7 ; Unknown
- 'Xpeo', # 103C8..103D5 ; Old_Persian
- 'Zzzz', # 103D6..103FF ; Unknown
- 'Dsrt', # 10400..1044F ; Deseret
- 'Shaw', # 10450..1047F ; Shavian
- 'Osma', # 10480..1049D ; Osmanya
- 'Zzzz', # 1049E..1049F ; Unknown
- 'Osma', # 104A0..104A9 ; Osmanya
- 'Zzzz', # 104AA..104AF ; Unknown
- 'Osge', # 104B0..104D3 ; Osage
- 'Zzzz', # 104D4..104D7 ; Unknown
- 'Osge', # 104D8..104FB ; Osage
- 'Zzzz', # 104FC..104FF ; Unknown
- 'Elba', # 10500..10527 ; Elbasan
- 'Zzzz', # 10528..1052F ; Unknown
- 'Aghb', # 10530..10563 ; Caucasian_Albanian
- 'Zzzz', # 10564..1056E ; Unknown
- 'Aghb', # 1056F..1056F ; Caucasian_Albanian
- 'Vith', # 10570..1057A ; Vithkuqi
- 'Zzzz', # 1057B..1057B ; Unknown
- 'Vith', # 1057C..1058A ; Vithkuqi
- 'Zzzz', # 1058B..1058B ; Unknown
- 'Vith', # 1058C..10592 ; Vithkuqi
- 'Zzzz', # 10593..10593 ; Unknown
- 'Vith', # 10594..10595 ; Vithkuqi
- 'Zzzz', # 10596..10596 ; Unknown
- 'Vith', # 10597..105A1 ; Vithkuqi
- 'Zzzz', # 105A2..105A2 ; Unknown
- 'Vith', # 105A3..105B1 ; Vithkuqi
- 'Zzzz', # 105B2..105B2 ; Unknown
- 'Vith', # 105B3..105B9 ; Vithkuqi
- 'Zzzz', # 105BA..105BA ; Unknown
- 'Vith', # 105BB..105BC ; Vithkuqi
- 'Zzzz', # 105BD..105FF ; Unknown
- 'Lina', # 10600..10736 ; Linear_A
- 'Zzzz', # 10737..1073F ; Unknown
- 'Lina', # 10740..10755 ; Linear_A
- 'Zzzz', # 10756..1075F ; Unknown
- 'Lina', # 10760..10767 ; Linear_A
- 'Zzzz', # 10768..1077F ; Unknown
- 'Latn', # 10780..10785 ; Latin
- 'Zzzz', # 10786..10786 ; Unknown
- 'Latn', # 10787..107B0 ; Latin
- 'Zzzz', # 107B1..107B1 ; Unknown
- 'Latn', # 107B2..107BA ; Latin
- 'Zzzz', # 107BB..107FF ; Unknown
- 'Cprt', # 10800..10805 ; Cypriot
- 'Zzzz', # 10806..10807 ; Unknown
- 'Cprt', # 10808..10808 ; Cypriot
- 'Zzzz', # 10809..10809 ; Unknown
- 'Cprt', # 1080A..10835 ; Cypriot
- 'Zzzz', # 10836..10836 ; Unknown
- 'Cprt', # 10837..10838 ; Cypriot
- 'Zzzz', # 10839..1083B ; Unknown
- 'Cprt', # 1083C..1083C ; Cypriot
- 'Zzzz', # 1083D..1083E ; Unknown
- 'Cprt', # 1083F..1083F ; Cypriot
- 'Armi', # 10840..10855 ; Imperial_Aramaic
- 'Zzzz', # 10856..10856 ; Unknown
- 'Armi', # 10857..1085F ; Imperial_Aramaic
- 'Palm', # 10860..1087F ; Palmyrene
- 'Nbat', # 10880..1089E ; Nabataean
- 'Zzzz', # 1089F..108A6 ; Unknown
- 'Nbat', # 108A7..108AF ; Nabataean
- 'Zzzz', # 108B0..108DF ; Unknown
- 'Hatr', # 108E0..108F2 ; Hatran
- 'Zzzz', # 108F3..108F3 ; Unknown
- 'Hatr', # 108F4..108F5 ; Hatran
- 'Zzzz', # 108F6..108FA ; Unknown
- 'Hatr', # 108FB..108FF ; Hatran
- 'Phnx', # 10900..1091B ; Phoenician
- 'Zzzz', # 1091C..1091E ; Unknown
- 'Phnx', # 1091F..1091F ; Phoenician
- 'Lydi', # 10920..10939 ; Lydian
- 'Zzzz', # 1093A..1093E ; Unknown
- 'Lydi', # 1093F..1093F ; Lydian
- 'Zzzz', # 10940..1097F ; Unknown
- 'Mero', # 10980..1099F ; Meroitic_Hieroglyphs
- 'Merc', # 109A0..109B7 ; Meroitic_Cursive
- 'Zzzz', # 109B8..109BB ; Unknown
- 'Merc', # 109BC..109CF ; Meroitic_Cursive
- 'Zzzz', # 109D0..109D1 ; Unknown
- 'Merc', # 109D2..109FF ; Meroitic_Cursive
- 'Khar', # 10A00..10A03 ; Kharoshthi
- 'Zzzz', # 10A04..10A04 ; Unknown
- 'Khar', # 10A05..10A06 ; Kharoshthi
- 'Zzzz', # 10A07..10A0B ; Unknown
- 'Khar', # 10A0C..10A13 ; Kharoshthi
- 'Zzzz', # 10A14..10A14 ; Unknown
- 'Khar', # 10A15..10A17 ; Kharoshthi
- 'Zzzz', # 10A18..10A18 ; Unknown
- 'Khar', # 10A19..10A35 ; Kharoshthi
- 'Zzzz', # 10A36..10A37 ; Unknown
- 'Khar', # 10A38..10A3A ; Kharoshthi
- 'Zzzz', # 10A3B..10A3E ; Unknown
- 'Khar', # 10A3F..10A48 ; Kharoshthi
- 'Zzzz', # 10A49..10A4F ; Unknown
- 'Khar', # 10A50..10A58 ; Kharoshthi
- 'Zzzz', # 10A59..10A5F ; Unknown
- 'Sarb', # 10A60..10A7F ; Old_South_Arabian
- 'Narb', # 10A80..10A9F ; Old_North_Arabian
- 'Zzzz', # 10AA0..10ABF ; Unknown
- 'Mani', # 10AC0..10AE6 ; Manichaean
- 'Zzzz', # 10AE7..10AEA ; Unknown
- 'Mani', # 10AEB..10AF6 ; Manichaean
- 'Zzzz', # 10AF7..10AFF ; Unknown
- 'Avst', # 10B00..10B35 ; Avestan
- 'Zzzz', # 10B36..10B38 ; Unknown
- 'Avst', # 10B39..10B3F ; Avestan
- 'Prti', # 10B40..10B55 ; Inscriptional_Parthian
- 'Zzzz', # 10B56..10B57 ; Unknown
- 'Prti', # 10B58..10B5F ; Inscriptional_Parthian
- 'Phli', # 10B60..10B72 ; Inscriptional_Pahlavi
- 'Zzzz', # 10B73..10B77 ; Unknown
- 'Phli', # 10B78..10B7F ; Inscriptional_Pahlavi
- 'Phlp', # 10B80..10B91 ; Psalter_Pahlavi
- 'Zzzz', # 10B92..10B98 ; Unknown
- 'Phlp', # 10B99..10B9C ; Psalter_Pahlavi
- 'Zzzz', # 10B9D..10BA8 ; Unknown
- 'Phlp', # 10BA9..10BAF ; Psalter_Pahlavi
- 'Zzzz', # 10BB0..10BFF ; Unknown
- 'Orkh', # 10C00..10C48 ; Old_Turkic
- 'Zzzz', # 10C49..10C7F ; Unknown
- 'Hung', # 10C80..10CB2 ; Old_Hungarian
- 'Zzzz', # 10CB3..10CBF ; Unknown
- 'Hung', # 10CC0..10CF2 ; Old_Hungarian
- 'Zzzz', # 10CF3..10CF9 ; Unknown
- 'Hung', # 10CFA..10CFF ; Old_Hungarian
- 'Rohg', # 10D00..10D27 ; Hanifi_Rohingya
- 'Zzzz', # 10D28..10D2F ; Unknown
- 'Rohg', # 10D30..10D39 ; Hanifi_Rohingya
- 'Zzzz', # 10D3A..10E5F ; Unknown
- 'Arab', # 10E60..10E7E ; Arabic
- 'Zzzz', # 10E7F..10E7F ; Unknown
- 'Yezi', # 10E80..10EA9 ; Yezidi
- 'Zzzz', # 10EAA..10EAA ; Unknown
- 'Yezi', # 10EAB..10EAD ; Yezidi
- 'Zzzz', # 10EAE..10EAF ; Unknown
- 'Yezi', # 10EB0..10EB1 ; Yezidi
- 'Zzzz', # 10EB2..10EFF ; Unknown
- 'Sogo', # 10F00..10F27 ; Old_Sogdian
- 'Zzzz', # 10F28..10F2F ; Unknown
- 'Sogd', # 10F30..10F59 ; Sogdian
- 'Zzzz', # 10F5A..10F6F ; Unknown
- 'Ougr', # 10F70..10F89 ; Old_Uyghur
- 'Zzzz', # 10F8A..10FAF ; Unknown
- 'Chrs', # 10FB0..10FCB ; Chorasmian
- 'Zzzz', # 10FCC..10FDF ; Unknown
- 'Elym', # 10FE0..10FF6 ; Elymaic
- 'Zzzz', # 10FF7..10FFF ; Unknown
- 'Brah', # 11000..1104D ; Brahmi
- 'Zzzz', # 1104E..11051 ; Unknown
- 'Brah', # 11052..11075 ; Brahmi
- 'Zzzz', # 11076..1107E ; Unknown
- 'Brah', # 1107F..1107F ; Brahmi
- 'Kthi', # 11080..110C2 ; Kaithi
- 'Zzzz', # 110C3..110CC ; Unknown
- 'Kthi', # 110CD..110CD ; Kaithi
- 'Zzzz', # 110CE..110CF ; Unknown
- 'Sora', # 110D0..110E8 ; Sora_Sompeng
- 'Zzzz', # 110E9..110EF ; Unknown
- 'Sora', # 110F0..110F9 ; Sora_Sompeng
- 'Zzzz', # 110FA..110FF ; Unknown
- 'Cakm', # 11100..11134 ; Chakma
- 'Zzzz', # 11135..11135 ; Unknown
- 'Cakm', # 11136..11147 ; Chakma
- 'Zzzz', # 11148..1114F ; Unknown
- 'Mahj', # 11150..11176 ; Mahajani
- 'Zzzz', # 11177..1117F ; Unknown
- 'Shrd', # 11180..111DF ; Sharada
- 'Zzzz', # 111E0..111E0 ; Unknown
- 'Sinh', # 111E1..111F4 ; Sinhala
- 'Zzzz', # 111F5..111FF ; Unknown
- 'Khoj', # 11200..11211 ; Khojki
- 'Zzzz', # 11212..11212 ; Unknown
- 'Khoj', # 11213..1123E ; Khojki
- 'Zzzz', # 1123F..1127F ; Unknown
- 'Mult', # 11280..11286 ; Multani
- 'Zzzz', # 11287..11287 ; Unknown
- 'Mult', # 11288..11288 ; Multani
- 'Zzzz', # 11289..11289 ; Unknown
- 'Mult', # 1128A..1128D ; Multani
- 'Zzzz', # 1128E..1128E ; Unknown
- 'Mult', # 1128F..1129D ; Multani
- 'Zzzz', # 1129E..1129E ; Unknown
- 'Mult', # 1129F..112A9 ; Multani
- 'Zzzz', # 112AA..112AF ; Unknown
- 'Sind', # 112B0..112EA ; Khudawadi
- 'Zzzz', # 112EB..112EF ; Unknown
- 'Sind', # 112F0..112F9 ; Khudawadi
- 'Zzzz', # 112FA..112FF ; Unknown
- 'Gran', # 11300..11303 ; Grantha
- 'Zzzz', # 11304..11304 ; Unknown
- 'Gran', # 11305..1130C ; Grantha
- 'Zzzz', # 1130D..1130E ; Unknown
- 'Gran', # 1130F..11310 ; Grantha
- 'Zzzz', # 11311..11312 ; Unknown
- 'Gran', # 11313..11328 ; Grantha
- 'Zzzz', # 11329..11329 ; Unknown
- 'Gran', # 1132A..11330 ; Grantha
- 'Zzzz', # 11331..11331 ; Unknown
- 'Gran', # 11332..11333 ; Grantha
- 'Zzzz', # 11334..11334 ; Unknown
- 'Gran', # 11335..11339 ; Grantha
- 'Zzzz', # 1133A..1133A ; Unknown
- 'Zinh', # 1133B..1133B ; Inherited
- 'Gran', # 1133C..11344 ; Grantha
- 'Zzzz', # 11345..11346 ; Unknown
- 'Gran', # 11347..11348 ; Grantha
- 'Zzzz', # 11349..1134A ; Unknown
- 'Gran', # 1134B..1134D ; Grantha
- 'Zzzz', # 1134E..1134F ; Unknown
- 'Gran', # 11350..11350 ; Grantha
- 'Zzzz', # 11351..11356 ; Unknown
- 'Gran', # 11357..11357 ; Grantha
- 'Zzzz', # 11358..1135C ; Unknown
- 'Gran', # 1135D..11363 ; Grantha
- 'Zzzz', # 11364..11365 ; Unknown
- 'Gran', # 11366..1136C ; Grantha
- 'Zzzz', # 1136D..1136F ; Unknown
- 'Gran', # 11370..11374 ; Grantha
- 'Zzzz', # 11375..113FF ; Unknown
- 'Newa', # 11400..1145B ; Newa
- 'Zzzz', # 1145C..1145C ; Unknown
- 'Newa', # 1145D..11461 ; Newa
- 'Zzzz', # 11462..1147F ; Unknown
- 'Tirh', # 11480..114C7 ; Tirhuta
- 'Zzzz', # 114C8..114CF ; Unknown
- 'Tirh', # 114D0..114D9 ; Tirhuta
- 'Zzzz', # 114DA..1157F ; Unknown
- 'Sidd', # 11580..115B5 ; Siddham
- 'Zzzz', # 115B6..115B7 ; Unknown
- 'Sidd', # 115B8..115DD ; Siddham
- 'Zzzz', # 115DE..115FF ; Unknown
- 'Modi', # 11600..11644 ; Modi
- 'Zzzz', # 11645..1164F ; Unknown
- 'Modi', # 11650..11659 ; Modi
- 'Zzzz', # 1165A..1165F ; Unknown
- 'Mong', # 11660..1166C ; Mongolian
- 'Zzzz', # 1166D..1167F ; Unknown
- 'Takr', # 11680..116B9 ; Takri
- 'Zzzz', # 116BA..116BF ; Unknown
- 'Takr', # 116C0..116C9 ; Takri
- 'Zzzz', # 116CA..116FF ; Unknown
- 'Ahom', # 11700..1171A ; Ahom
- 'Zzzz', # 1171B..1171C ; Unknown
- 'Ahom', # 1171D..1172B ; Ahom
- 'Zzzz', # 1172C..1172F ; Unknown
- 'Ahom', # 11730..11746 ; Ahom
- 'Zzzz', # 11747..117FF ; Unknown
- 'Dogr', # 11800..1183B ; Dogra
- 'Zzzz', # 1183C..1189F ; Unknown
- 'Wara', # 118A0..118F2 ; Warang_Citi
- 'Zzzz', # 118F3..118FE ; Unknown
- 'Wara', # 118FF..118FF ; Warang_Citi
- 'Diak', # 11900..11906 ; Dives_Akuru
- 'Zzzz', # 11907..11908 ; Unknown
- 'Diak', # 11909..11909 ; Dives_Akuru
- 'Zzzz', # 1190A..1190B ; Unknown
- 'Diak', # 1190C..11913 ; Dives_Akuru
- 'Zzzz', # 11914..11914 ; Unknown
- 'Diak', # 11915..11916 ; Dives_Akuru
- 'Zzzz', # 11917..11917 ; Unknown
- 'Diak', # 11918..11935 ; Dives_Akuru
- 'Zzzz', # 11936..11936 ; Unknown
- 'Diak', # 11937..11938 ; Dives_Akuru
- 'Zzzz', # 11939..1193A ; Unknown
- 'Diak', # 1193B..11946 ; Dives_Akuru
- 'Zzzz', # 11947..1194F ; Unknown
- 'Diak', # 11950..11959 ; Dives_Akuru
- 'Zzzz', # 1195A..1199F ; Unknown
- 'Nand', # 119A0..119A7 ; Nandinagari
- 'Zzzz', # 119A8..119A9 ; Unknown
- 'Nand', # 119AA..119D7 ; Nandinagari
- 'Zzzz', # 119D8..119D9 ; Unknown
- 'Nand', # 119DA..119E4 ; Nandinagari
- 'Zzzz', # 119E5..119FF ; Unknown
- 'Zanb', # 11A00..11A47 ; Zanabazar_Square
- 'Zzzz', # 11A48..11A4F ; Unknown
- 'Soyo', # 11A50..11AA2 ; Soyombo
- 'Zzzz', # 11AA3..11AAF ; Unknown
- 'Cans', # 11AB0..11ABF ; Canadian_Aboriginal
- 'Pauc', # 11AC0..11AF8 ; Pau_Cin_Hau
- 'Zzzz', # 11AF9..11BFF ; Unknown
- 'Bhks', # 11C00..11C08 ; Bhaiksuki
- 'Zzzz', # 11C09..11C09 ; Unknown
- 'Bhks', # 11C0A..11C36 ; Bhaiksuki
- 'Zzzz', # 11C37..11C37 ; Unknown
- 'Bhks', # 11C38..11C45 ; Bhaiksuki
- 'Zzzz', # 11C46..11C4F ; Unknown
- 'Bhks', # 11C50..11C6C ; Bhaiksuki
- 'Zzzz', # 11C6D..11C6F ; Unknown
- 'Marc', # 11C70..11C8F ; Marchen
- 'Zzzz', # 11C90..11C91 ; Unknown
- 'Marc', # 11C92..11CA7 ; Marchen
- 'Zzzz', # 11CA8..11CA8 ; Unknown
- 'Marc', # 11CA9..11CB6 ; Marchen
- 'Zzzz', # 11CB7..11CFF ; Unknown
- 'Gonm', # 11D00..11D06 ; Masaram_Gondi
- 'Zzzz', # 11D07..11D07 ; Unknown
- 'Gonm', # 11D08..11D09 ; Masaram_Gondi
- 'Zzzz', # 11D0A..11D0A ; Unknown
- 'Gonm', # 11D0B..11D36 ; Masaram_Gondi
- 'Zzzz', # 11D37..11D39 ; Unknown
- 'Gonm', # 11D3A..11D3A ; Masaram_Gondi
- 'Zzzz', # 11D3B..11D3B ; Unknown
- 'Gonm', # 11D3C..11D3D ; Masaram_Gondi
- 'Zzzz', # 11D3E..11D3E ; Unknown
- 'Gonm', # 11D3F..11D47 ; Masaram_Gondi
- 'Zzzz', # 11D48..11D4F ; Unknown
- 'Gonm', # 11D50..11D59 ; Masaram_Gondi
- 'Zzzz', # 11D5A..11D5F ; Unknown
- 'Gong', # 11D60..11D65 ; Gunjala_Gondi
- 'Zzzz', # 11D66..11D66 ; Unknown
- 'Gong', # 11D67..11D68 ; Gunjala_Gondi
- 'Zzzz', # 11D69..11D69 ; Unknown
- 'Gong', # 11D6A..11D8E ; Gunjala_Gondi
- 'Zzzz', # 11D8F..11D8F ; Unknown
- 'Gong', # 11D90..11D91 ; Gunjala_Gondi
- 'Zzzz', # 11D92..11D92 ; Unknown
- 'Gong', # 11D93..11D98 ; Gunjala_Gondi
- 'Zzzz', # 11D99..11D9F ; Unknown
- 'Gong', # 11DA0..11DA9 ; Gunjala_Gondi
- 'Zzzz', # 11DAA..11EDF ; Unknown
- 'Maka', # 11EE0..11EF8 ; Makasar
- 'Zzzz', # 11EF9..11FAF ; Unknown
- 'Lisu', # 11FB0..11FB0 ; Lisu
- 'Zzzz', # 11FB1..11FBF ; Unknown
- 'Taml', # 11FC0..11FF1 ; Tamil
- 'Zzzz', # 11FF2..11FFE ; Unknown
- 'Taml', # 11FFF..11FFF ; Tamil
- 'Xsux', # 12000..12399 ; Cuneiform
- 'Zzzz', # 1239A..123FF ; Unknown
- 'Xsux', # 12400..1246E ; Cuneiform
- 'Zzzz', # 1246F..1246F ; Unknown
- 'Xsux', # 12470..12474 ; Cuneiform
- 'Zzzz', # 12475..1247F ; Unknown
- 'Xsux', # 12480..12543 ; Cuneiform
- 'Zzzz', # 12544..12F8F ; Unknown
- 'Cpmn', # 12F90..12FF2 ; Cypro_Minoan
- 'Zzzz', # 12FF3..12FFF ; Unknown
- 'Egyp', # 13000..1342E ; Egyptian_Hieroglyphs
- 'Zzzz', # 1342F..1342F ; Unknown
- 'Egyp', # 13430..13438 ; Egyptian_Hieroglyphs
- 'Zzzz', # 13439..143FF ; Unknown
- 'Hluw', # 14400..14646 ; Anatolian_Hieroglyphs
- 'Zzzz', # 14647..167FF ; Unknown
- 'Bamu', # 16800..16A38 ; Bamum
- 'Zzzz', # 16A39..16A3F ; Unknown
- 'Mroo', # 16A40..16A5E ; Mro
- 'Zzzz', # 16A5F..16A5F ; Unknown
- 'Mroo', # 16A60..16A69 ; Mro
- 'Zzzz', # 16A6A..16A6D ; Unknown
- 'Mroo', # 16A6E..16A6F ; Mro
- 'Tnsa', # 16A70..16ABE ; Tangsa
- 'Zzzz', # 16ABF..16ABF ; Unknown
- 'Tnsa', # 16AC0..16AC9 ; Tangsa
- 'Zzzz', # 16ACA..16ACF ; Unknown
- 'Bass', # 16AD0..16AED ; Bassa_Vah
- 'Zzzz', # 16AEE..16AEF ; Unknown
- 'Bass', # 16AF0..16AF5 ; Bassa_Vah
- 'Zzzz', # 16AF6..16AFF ; Unknown
- 'Hmng', # 16B00..16B45 ; Pahawh_Hmong
- 'Zzzz', # 16B46..16B4F ; Unknown
- 'Hmng', # 16B50..16B59 ; Pahawh_Hmong
- 'Zzzz', # 16B5A..16B5A ; Unknown
- 'Hmng', # 16B5B..16B61 ; Pahawh_Hmong
- 'Zzzz', # 16B62..16B62 ; Unknown
- 'Hmng', # 16B63..16B77 ; Pahawh_Hmong
- 'Zzzz', # 16B78..16B7C ; Unknown
- 'Hmng', # 16B7D..16B8F ; Pahawh_Hmong
- 'Zzzz', # 16B90..16E3F ; Unknown
- 'Medf', # 16E40..16E9A ; Medefaidrin
- 'Zzzz', # 16E9B..16EFF ; Unknown
- 'Plrd', # 16F00..16F4A ; Miao
- 'Zzzz', # 16F4B..16F4E ; Unknown
- 'Plrd', # 16F4F..16F87 ; Miao
- 'Zzzz', # 16F88..16F8E ; Unknown
- 'Plrd', # 16F8F..16F9F ; Miao
- 'Zzzz', # 16FA0..16FDF ; Unknown
- 'Tang', # 16FE0..16FE0 ; Tangut
- 'Nshu', # 16FE1..16FE1 ; Nushu
- 'Hani', # 16FE2..16FE3 ; Han
- 'Kits', # 16FE4..16FE4 ; Khitan_Small_Script
- 'Zzzz', # 16FE5..16FEF ; Unknown
- 'Hani', # 16FF0..16FF1 ; Han
- 'Zzzz', # 16FF2..16FFF ; Unknown
- 'Tang', # 17000..187F7 ; Tangut
- 'Zzzz', # 187F8..187FF ; Unknown
- 'Tang', # 18800..18AFF ; Tangut
- 'Kits', # 18B00..18CD5 ; Khitan_Small_Script
- 'Zzzz', # 18CD6..18CFF ; Unknown
- 'Tang', # 18D00..18D08 ; Tangut
- 'Zzzz', # 18D09..1AFEF ; Unknown
- 'Kana', # 1AFF0..1AFF3 ; Katakana
- 'Zzzz', # 1AFF4..1AFF4 ; Unknown
- 'Kana', # 1AFF5..1AFFB ; Katakana
- 'Zzzz', # 1AFFC..1AFFC ; Unknown
- 'Kana', # 1AFFD..1AFFE ; Katakana
- 'Zzzz', # 1AFFF..1AFFF ; Unknown
- 'Kana', # 1B000..1B000 ; Katakana
- 'Hira', # 1B001..1B11F ; Hiragana
- 'Kana', # 1B120..1B122 ; Katakana
- 'Zzzz', # 1B123..1B14F ; Unknown
- 'Hira', # 1B150..1B152 ; Hiragana
- 'Zzzz', # 1B153..1B163 ; Unknown
- 'Kana', # 1B164..1B167 ; Katakana
- 'Zzzz', # 1B168..1B16F ; Unknown
- 'Nshu', # 1B170..1B2FB ; Nushu
- 'Zzzz', # 1B2FC..1BBFF ; Unknown
- 'Dupl', # 1BC00..1BC6A ; Duployan
- 'Zzzz', # 1BC6B..1BC6F ; Unknown
- 'Dupl', # 1BC70..1BC7C ; Duployan
- 'Zzzz', # 1BC7D..1BC7F ; Unknown
- 'Dupl', # 1BC80..1BC88 ; Duployan
- 'Zzzz', # 1BC89..1BC8F ; Unknown
- 'Dupl', # 1BC90..1BC99 ; Duployan
- 'Zzzz', # 1BC9A..1BC9B ; Unknown
- 'Dupl', # 1BC9C..1BC9F ; Duployan
- 'Zyyy', # 1BCA0..1BCA3 ; Common
- 'Zzzz', # 1BCA4..1CEFF ; Unknown
- 'Zinh', # 1CF00..1CF2D ; Inherited
- 'Zzzz', # 1CF2E..1CF2F ; Unknown
- 'Zinh', # 1CF30..1CF46 ; Inherited
- 'Zzzz', # 1CF47..1CF4F ; Unknown
- 'Zyyy', # 1CF50..1CFC3 ; Common
- 'Zzzz', # 1CFC4..1CFFF ; Unknown
- 'Zyyy', # 1D000..1D0F5 ; Common
- 'Zzzz', # 1D0F6..1D0FF ; Unknown
- 'Zyyy', # 1D100..1D126 ; Common
- 'Zzzz', # 1D127..1D128 ; Unknown
- 'Zyyy', # 1D129..1D166 ; Common
- 'Zinh', # 1D167..1D169 ; Inherited
- 'Zyyy', # 1D16A..1D17A ; Common
- 'Zinh', # 1D17B..1D182 ; Inherited
- 'Zyyy', # 1D183..1D184 ; Common
- 'Zinh', # 1D185..1D18B ; Inherited
- 'Zyyy', # 1D18C..1D1A9 ; Common
- 'Zinh', # 1D1AA..1D1AD ; Inherited
- 'Zyyy', # 1D1AE..1D1EA ; Common
- 'Zzzz', # 1D1EB..1D1FF ; Unknown
- 'Grek', # 1D200..1D245 ; Greek
- 'Zzzz', # 1D246..1D2DF ; Unknown
- 'Zyyy', # 1D2E0..1D2F3 ; Common
- 'Zzzz', # 1D2F4..1D2FF ; Unknown
- 'Zyyy', # 1D300..1D356 ; Common
- 'Zzzz', # 1D357..1D35F ; Unknown
- 'Zyyy', # 1D360..1D378 ; Common
- 'Zzzz', # 1D379..1D3FF ; Unknown
- 'Zyyy', # 1D400..1D454 ; Common
- 'Zzzz', # 1D455..1D455 ; Unknown
- 'Zyyy', # 1D456..1D49C ; Common
- 'Zzzz', # 1D49D..1D49D ; Unknown
- 'Zyyy', # 1D49E..1D49F ; Common
- 'Zzzz', # 1D4A0..1D4A1 ; Unknown
- 'Zyyy', # 1D4A2..1D4A2 ; Common
- 'Zzzz', # 1D4A3..1D4A4 ; Unknown
- 'Zyyy', # 1D4A5..1D4A6 ; Common
- 'Zzzz', # 1D4A7..1D4A8 ; Unknown
- 'Zyyy', # 1D4A9..1D4AC ; Common
- 'Zzzz', # 1D4AD..1D4AD ; Unknown
- 'Zyyy', # 1D4AE..1D4B9 ; Common
- 'Zzzz', # 1D4BA..1D4BA ; Unknown
- 'Zyyy', # 1D4BB..1D4BB ; Common
- 'Zzzz', # 1D4BC..1D4BC ; Unknown
- 'Zyyy', # 1D4BD..1D4C3 ; Common
- 'Zzzz', # 1D4C4..1D4C4 ; Unknown
- 'Zyyy', # 1D4C5..1D505 ; Common
- 'Zzzz', # 1D506..1D506 ; Unknown
- 'Zyyy', # 1D507..1D50A ; Common
- 'Zzzz', # 1D50B..1D50C ; Unknown
- 'Zyyy', # 1D50D..1D514 ; Common
- 'Zzzz', # 1D515..1D515 ; Unknown
- 'Zyyy', # 1D516..1D51C ; Common
- 'Zzzz', # 1D51D..1D51D ; Unknown
- 'Zyyy', # 1D51E..1D539 ; Common
- 'Zzzz', # 1D53A..1D53A ; Unknown
- 'Zyyy', # 1D53B..1D53E ; Common
- 'Zzzz', # 1D53F..1D53F ; Unknown
- 'Zyyy', # 1D540..1D544 ; Common
- 'Zzzz', # 1D545..1D545 ; Unknown
- 'Zyyy', # 1D546..1D546 ; Common
- 'Zzzz', # 1D547..1D549 ; Unknown
- 'Zyyy', # 1D54A..1D550 ; Common
- 'Zzzz', # 1D551..1D551 ; Unknown
- 'Zyyy', # 1D552..1D6A5 ; Common
- 'Zzzz', # 1D6A6..1D6A7 ; Unknown
- 'Zyyy', # 1D6A8..1D7CB ; Common
- 'Zzzz', # 1D7CC..1D7CD ; Unknown
- 'Zyyy', # 1D7CE..1D7FF ; Common
- 'Sgnw', # 1D800..1DA8B ; SignWriting
- 'Zzzz', # 1DA8C..1DA9A ; Unknown
- 'Sgnw', # 1DA9B..1DA9F ; SignWriting
- 'Zzzz', # 1DAA0..1DAA0 ; Unknown
- 'Sgnw', # 1DAA1..1DAAF ; SignWriting
- 'Zzzz', # 1DAB0..1DEFF ; Unknown
- 'Latn', # 1DF00..1DF1E ; Latin
- 'Zzzz', # 1DF1F..1DFFF ; Unknown
- 'Glag', # 1E000..1E006 ; Glagolitic
- 'Zzzz', # 1E007..1E007 ; Unknown
- 'Glag', # 1E008..1E018 ; Glagolitic
- 'Zzzz', # 1E019..1E01A ; Unknown
- 'Glag', # 1E01B..1E021 ; Glagolitic
- 'Zzzz', # 1E022..1E022 ; Unknown
- 'Glag', # 1E023..1E024 ; Glagolitic
- 'Zzzz', # 1E025..1E025 ; Unknown
- 'Glag', # 1E026..1E02A ; Glagolitic
- 'Zzzz', # 1E02B..1E0FF ; Unknown
- 'Hmnp', # 1E100..1E12C ; Nyiakeng_Puachue_Hmong
- 'Zzzz', # 1E12D..1E12F ; Unknown
- 'Hmnp', # 1E130..1E13D ; Nyiakeng_Puachue_Hmong
- 'Zzzz', # 1E13E..1E13F ; Unknown
- 'Hmnp', # 1E140..1E149 ; Nyiakeng_Puachue_Hmong
- 'Zzzz', # 1E14A..1E14D ; Unknown
- 'Hmnp', # 1E14E..1E14F ; Nyiakeng_Puachue_Hmong
- 'Zzzz', # 1E150..1E28F ; Unknown
- 'Toto', # 1E290..1E2AE ; Toto
- 'Zzzz', # 1E2AF..1E2BF ; Unknown
- 'Wcho', # 1E2C0..1E2F9 ; Wancho
- 'Zzzz', # 1E2FA..1E2FE ; Unknown
- 'Wcho', # 1E2FF..1E2FF ; Wancho
- 'Zzzz', # 1E300..1E7DF ; Unknown
- 'Ethi', # 1E7E0..1E7E6 ; Ethiopic
- 'Zzzz', # 1E7E7..1E7E7 ; Unknown
- 'Ethi', # 1E7E8..1E7EB ; Ethiopic
- 'Zzzz', # 1E7EC..1E7EC ; Unknown
- 'Ethi', # 1E7ED..1E7EE ; Ethiopic
- 'Zzzz', # 1E7EF..1E7EF ; Unknown
- 'Ethi', # 1E7F0..1E7FE ; Ethiopic
- 'Zzzz', # 1E7FF..1E7FF ; Unknown
- 'Mend', # 1E800..1E8C4 ; Mende_Kikakui
- 'Zzzz', # 1E8C5..1E8C6 ; Unknown
- 'Mend', # 1E8C7..1E8D6 ; Mende_Kikakui
- 'Zzzz', # 1E8D7..1E8FF ; Unknown
- 'Adlm', # 1E900..1E94B ; Adlam
- 'Zzzz', # 1E94C..1E94F ; Unknown
- 'Adlm', # 1E950..1E959 ; Adlam
- 'Zzzz', # 1E95A..1E95D ; Unknown
- 'Adlm', # 1E95E..1E95F ; Adlam
- 'Zzzz', # 1E960..1EC70 ; Unknown
- 'Zyyy', # 1EC71..1ECB4 ; Common
- 'Zzzz', # 1ECB5..1ED00 ; Unknown
- 'Zyyy', # 1ED01..1ED3D ; Common
- 'Zzzz', # 1ED3E..1EDFF ; Unknown
- 'Arab', # 1EE00..1EE03 ; Arabic
- 'Zzzz', # 1EE04..1EE04 ; Unknown
- 'Arab', # 1EE05..1EE1F ; Arabic
- 'Zzzz', # 1EE20..1EE20 ; Unknown
- 'Arab', # 1EE21..1EE22 ; Arabic
- 'Zzzz', # 1EE23..1EE23 ; Unknown
- 'Arab', # 1EE24..1EE24 ; Arabic
- 'Zzzz', # 1EE25..1EE26 ; Unknown
- 'Arab', # 1EE27..1EE27 ; Arabic
- 'Zzzz', # 1EE28..1EE28 ; Unknown
- 'Arab', # 1EE29..1EE32 ; Arabic
- 'Zzzz', # 1EE33..1EE33 ; Unknown
- 'Arab', # 1EE34..1EE37 ; Arabic
- 'Zzzz', # 1EE38..1EE38 ; Unknown
- 'Arab', # 1EE39..1EE39 ; Arabic
- 'Zzzz', # 1EE3A..1EE3A ; Unknown
- 'Arab', # 1EE3B..1EE3B ; Arabic
- 'Zzzz', # 1EE3C..1EE41 ; Unknown
- 'Arab', # 1EE42..1EE42 ; Arabic
- 'Zzzz', # 1EE43..1EE46 ; Unknown
- 'Arab', # 1EE47..1EE47 ; Arabic
- 'Zzzz', # 1EE48..1EE48 ; Unknown
- 'Arab', # 1EE49..1EE49 ; Arabic
- 'Zzzz', # 1EE4A..1EE4A ; Unknown
- 'Arab', # 1EE4B..1EE4B ; Arabic
- 'Zzzz', # 1EE4C..1EE4C ; Unknown
- 'Arab', # 1EE4D..1EE4F ; Arabic
- 'Zzzz', # 1EE50..1EE50 ; Unknown
- 'Arab', # 1EE51..1EE52 ; Arabic
- 'Zzzz', # 1EE53..1EE53 ; Unknown
- 'Arab', # 1EE54..1EE54 ; Arabic
- 'Zzzz', # 1EE55..1EE56 ; Unknown
- 'Arab', # 1EE57..1EE57 ; Arabic
- 'Zzzz', # 1EE58..1EE58 ; Unknown
- 'Arab', # 1EE59..1EE59 ; Arabic
- 'Zzzz', # 1EE5A..1EE5A ; Unknown
- 'Arab', # 1EE5B..1EE5B ; Arabic
- 'Zzzz', # 1EE5C..1EE5C ; Unknown
- 'Arab', # 1EE5D..1EE5D ; Arabic
- 'Zzzz', # 1EE5E..1EE5E ; Unknown
- 'Arab', # 1EE5F..1EE5F ; Arabic
- 'Zzzz', # 1EE60..1EE60 ; Unknown
- 'Arab', # 1EE61..1EE62 ; Arabic
- 'Zzzz', # 1EE63..1EE63 ; Unknown
- 'Arab', # 1EE64..1EE64 ; Arabic
- 'Zzzz', # 1EE65..1EE66 ; Unknown
- 'Arab', # 1EE67..1EE6A ; Arabic
- 'Zzzz', # 1EE6B..1EE6B ; Unknown
- 'Arab', # 1EE6C..1EE72 ; Arabic
- 'Zzzz', # 1EE73..1EE73 ; Unknown
- 'Arab', # 1EE74..1EE77 ; Arabic
- 'Zzzz', # 1EE78..1EE78 ; Unknown
- 'Arab', # 1EE79..1EE7C ; Arabic
- 'Zzzz', # 1EE7D..1EE7D ; Unknown
- 'Arab', # 1EE7E..1EE7E ; Arabic
- 'Zzzz', # 1EE7F..1EE7F ; Unknown
- 'Arab', # 1EE80..1EE89 ; Arabic
- 'Zzzz', # 1EE8A..1EE8A ; Unknown
- 'Arab', # 1EE8B..1EE9B ; Arabic
- 'Zzzz', # 1EE9C..1EEA0 ; Unknown
- 'Arab', # 1EEA1..1EEA3 ; Arabic
- 'Zzzz', # 1EEA4..1EEA4 ; Unknown
- 'Arab', # 1EEA5..1EEA9 ; Arabic
- 'Zzzz', # 1EEAA..1EEAA ; Unknown
- 'Arab', # 1EEAB..1EEBB ; Arabic
- 'Zzzz', # 1EEBC..1EEEF ; Unknown
- 'Arab', # 1EEF0..1EEF1 ; Arabic
- 'Zzzz', # 1EEF2..1EFFF ; Unknown
- 'Zyyy', # 1F000..1F02B ; Common
- 'Zzzz', # 1F02C..1F02F ; Unknown
- 'Zyyy', # 1F030..1F093 ; Common
- 'Zzzz', # 1F094..1F09F ; Unknown
- 'Zyyy', # 1F0A0..1F0AE ; Common
- 'Zzzz', # 1F0AF..1F0B0 ; Unknown
- 'Zyyy', # 1F0B1..1F0BF ; Common
- 'Zzzz', # 1F0C0..1F0C0 ; Unknown
- 'Zyyy', # 1F0C1..1F0CF ; Common
- 'Zzzz', # 1F0D0..1F0D0 ; Unknown
- 'Zyyy', # 1F0D1..1F0F5 ; Common
- 'Zzzz', # 1F0F6..1F0FF ; Unknown
- 'Zyyy', # 1F100..1F1AD ; Common
- 'Zzzz', # 1F1AE..1F1E5 ; Unknown
- 'Zyyy', # 1F1E6..1F1FF ; Common
- 'Hira', # 1F200..1F200 ; Hiragana
- 'Zyyy', # 1F201..1F202 ; Common
- 'Zzzz', # 1F203..1F20F ; Unknown
- 'Zyyy', # 1F210..1F23B ; Common
- 'Zzzz', # 1F23C..1F23F ; Unknown
- 'Zyyy', # 1F240..1F248 ; Common
- 'Zzzz', # 1F249..1F24F ; Unknown
- 'Zyyy', # 1F250..1F251 ; Common
- 'Zzzz', # 1F252..1F25F ; Unknown
- 'Zyyy', # 1F260..1F265 ; Common
- 'Zzzz', # 1F266..1F2FF ; Unknown
- 'Zyyy', # 1F300..1F6D7 ; Common
- 'Zzzz', # 1F6D8..1F6DC ; Unknown
- 'Zyyy', # 1F6DD..1F6EC ; Common
- 'Zzzz', # 1F6ED..1F6EF ; Unknown
- 'Zyyy', # 1F6F0..1F6FC ; Common
- 'Zzzz', # 1F6FD..1F6FF ; Unknown
- 'Zyyy', # 1F700..1F773 ; Common
- 'Zzzz', # 1F774..1F77F ; Unknown
- 'Zyyy', # 1F780..1F7D8 ; Common
- 'Zzzz', # 1F7D9..1F7DF ; Unknown
- 'Zyyy', # 1F7E0..1F7EB ; Common
- 'Zzzz', # 1F7EC..1F7EF ; Unknown
- 'Zyyy', # 1F7F0..1F7F0 ; Common
- 'Zzzz', # 1F7F1..1F7FF ; Unknown
- 'Zyyy', # 1F800..1F80B ; Common
- 'Zzzz', # 1F80C..1F80F ; Unknown
- 'Zyyy', # 1F810..1F847 ; Common
- 'Zzzz', # 1F848..1F84F ; Unknown
- 'Zyyy', # 1F850..1F859 ; Common
- 'Zzzz', # 1F85A..1F85F ; Unknown
- 'Zyyy', # 1F860..1F887 ; Common
- 'Zzzz', # 1F888..1F88F ; Unknown
- 'Zyyy', # 1F890..1F8AD ; Common
- 'Zzzz', # 1F8AE..1F8AF ; Unknown
- 'Zyyy', # 1F8B0..1F8B1 ; Common
- 'Zzzz', # 1F8B2..1F8FF ; Unknown
- 'Zyyy', # 1F900..1FA53 ; Common
- 'Zzzz', # 1FA54..1FA5F ; Unknown
- 'Zyyy', # 1FA60..1FA6D ; Common
- 'Zzzz', # 1FA6E..1FA6F ; Unknown
- 'Zyyy', # 1FA70..1FA74 ; Common
- 'Zzzz', # 1FA75..1FA77 ; Unknown
- 'Zyyy', # 1FA78..1FA7C ; Common
- 'Zzzz', # 1FA7D..1FA7F ; Unknown
- 'Zyyy', # 1FA80..1FA86 ; Common
- 'Zzzz', # 1FA87..1FA8F ; Unknown
- 'Zyyy', # 1FA90..1FAAC ; Common
- 'Zzzz', # 1FAAD..1FAAF ; Unknown
- 'Zyyy', # 1FAB0..1FABA ; Common
- 'Zzzz', # 1FABB..1FABF ; Unknown
- 'Zyyy', # 1FAC0..1FAC5 ; Common
- 'Zzzz', # 1FAC6..1FACF ; Unknown
- 'Zyyy', # 1FAD0..1FAD9 ; Common
- 'Zzzz', # 1FADA..1FADF ; Unknown
- 'Zyyy', # 1FAE0..1FAE7 ; Common
- 'Zzzz', # 1FAE8..1FAEF ; Unknown
- 'Zyyy', # 1FAF0..1FAF6 ; Common
- 'Zzzz', # 1FAF7..1FAFF ; Unknown
- 'Zyyy', # 1FB00..1FB92 ; Common
- 'Zzzz', # 1FB93..1FB93 ; Unknown
- 'Zyyy', # 1FB94..1FBCA ; Common
- 'Zzzz', # 1FBCB..1FBEF ; Unknown
- 'Zyyy', # 1FBF0..1FBF9 ; Common
- 'Zzzz', # 1FBFA..1FFFF ; Unknown
- 'Hani', # 20000..2A6DF ; Han
- 'Zzzz', # 2A6E0..2A6FF ; Unknown
- 'Hani', # 2A700..2B738 ; Han
- 'Zzzz', # 2B739..2B73F ; Unknown
- 'Hani', # 2B740..2B81D ; Han
- 'Zzzz', # 2B81E..2B81F ; Unknown
- 'Hani', # 2B820..2CEA1 ; Han
- 'Zzzz', # 2CEA2..2CEAF ; Unknown
- 'Hani', # 2CEB0..2EBE0 ; Han
- 'Zzzz', # 2EBE1..2F7FF ; Unknown
- 'Hani', # 2F800..2FA1D ; Han
- 'Zzzz', # 2FA1E..2FFFF ; Unknown
- 'Hani', # 30000..3134A ; Han
- 'Zzzz', # 3134B..E0000 ; Unknown
- 'Zyyy', # E0001..E0001 ; Common
- 'Zzzz', # E0002..E001F ; Unknown
- 'Zyyy', # E0020..E007F ; Common
- 'Zzzz', # E0080..E00FF ; Unknown
- 'Zinh', # E0100..E01EF ; Inherited
- 'Zzzz', # E01F0..10FFFF ; Unknown
+ "Zyyy", # 0000..0040 ; Common
+ "Latn", # 0041..005A ; Latin
+ "Zyyy", # 005B..0060 ; Common
+ "Latn", # 0061..007A ; Latin
+ "Zyyy", # 007B..00A9 ; Common
+ "Latn", # 00AA..00AA ; Latin
+ "Zyyy", # 00AB..00B9 ; Common
+ "Latn", # 00BA..00BA ; Latin
+ "Zyyy", # 00BB..00BF ; Common
+ "Latn", # 00C0..00D6 ; Latin
+ "Zyyy", # 00D7..00D7 ; Common
+ "Latn", # 00D8..00F6 ; Latin
+ "Zyyy", # 00F7..00F7 ; Common
+ "Latn", # 00F8..02B8 ; Latin
+ "Zyyy", # 02B9..02DF ; Common
+ "Latn", # 02E0..02E4 ; Latin
+ "Zyyy", # 02E5..02E9 ; Common
+ "Bopo", # 02EA..02EB ; Bopomofo
+ "Zyyy", # 02EC..02FF ; Common
+ "Zinh", # 0300..036F ; Inherited
+ "Grek", # 0370..0373 ; Greek
+ "Zyyy", # 0374..0374 ; Common
+ "Grek", # 0375..0377 ; Greek
+ "Zzzz", # 0378..0379 ; Unknown
+ "Grek", # 037A..037D ; Greek
+ "Zyyy", # 037E..037E ; Common
+ "Grek", # 037F..037F ; Greek
+ "Zzzz", # 0380..0383 ; Unknown
+ "Grek", # 0384..0384 ; Greek
+ "Zyyy", # 0385..0385 ; Common
+ "Grek", # 0386..0386 ; Greek
+ "Zyyy", # 0387..0387 ; Common
+ "Grek", # 0388..038A ; Greek
+ "Zzzz", # 038B..038B ; Unknown
+ "Grek", # 038C..038C ; Greek
+ "Zzzz", # 038D..038D ; Unknown
+ "Grek", # 038E..03A1 ; Greek
+ "Zzzz", # 03A2..03A2 ; Unknown
+ "Grek", # 03A3..03E1 ; Greek
+ "Copt", # 03E2..03EF ; Coptic
+ "Grek", # 03F0..03FF ; Greek
+ "Cyrl", # 0400..0484 ; Cyrillic
+ "Zinh", # 0485..0486 ; Inherited
+ "Cyrl", # 0487..052F ; Cyrillic
+ "Zzzz", # 0530..0530 ; Unknown
+ "Armn", # 0531..0556 ; Armenian
+ "Zzzz", # 0557..0558 ; Unknown
+ "Armn", # 0559..058A ; Armenian
+ "Zzzz", # 058B..058C ; Unknown
+ "Armn", # 058D..058F ; Armenian
+ "Zzzz", # 0590..0590 ; Unknown
+ "Hebr", # 0591..05C7 ; Hebrew
+ "Zzzz", # 05C8..05CF ; Unknown
+ "Hebr", # 05D0..05EA ; Hebrew
+ "Zzzz", # 05EB..05EE ; Unknown
+ "Hebr", # 05EF..05F4 ; Hebrew
+ "Zzzz", # 05F5..05FF ; Unknown
+ "Arab", # 0600..0604 ; Arabic
+ "Zyyy", # 0605..0605 ; Common
+ "Arab", # 0606..060B ; Arabic
+ "Zyyy", # 060C..060C ; Common
+ "Arab", # 060D..061A ; Arabic
+ "Zyyy", # 061B..061B ; Common
+ "Arab", # 061C..061E ; Arabic
+ "Zyyy", # 061F..061F ; Common
+ "Arab", # 0620..063F ; Arabic
+ "Zyyy", # 0640..0640 ; Common
+ "Arab", # 0641..064A ; Arabic
+ "Zinh", # 064B..0655 ; Inherited
+ "Arab", # 0656..066F ; Arabic
+ "Zinh", # 0670..0670 ; Inherited
+ "Arab", # 0671..06DC ; Arabic
+ "Zyyy", # 06DD..06DD ; Common
+ "Arab", # 06DE..06FF ; Arabic
+ "Syrc", # 0700..070D ; Syriac
+ "Zzzz", # 070E..070E ; Unknown
+ "Syrc", # 070F..074A ; Syriac
+ "Zzzz", # 074B..074C ; Unknown
+ "Syrc", # 074D..074F ; Syriac
+ "Arab", # 0750..077F ; Arabic
+ "Thaa", # 0780..07B1 ; Thaana
+ "Zzzz", # 07B2..07BF ; Unknown
+ "Nkoo", # 07C0..07FA ; Nko
+ "Zzzz", # 07FB..07FC ; Unknown
+ "Nkoo", # 07FD..07FF ; Nko
+ "Samr", # 0800..082D ; Samaritan
+ "Zzzz", # 082E..082F ; Unknown
+ "Samr", # 0830..083E ; Samaritan
+ "Zzzz", # 083F..083F ; Unknown
+ "Mand", # 0840..085B ; Mandaic
+ "Zzzz", # 085C..085D ; Unknown
+ "Mand", # 085E..085E ; Mandaic
+ "Zzzz", # 085F..085F ; Unknown
+ "Syrc", # 0860..086A ; Syriac
+ "Zzzz", # 086B..086F ; Unknown
+ "Arab", # 0870..088E ; Arabic
+ "Zzzz", # 088F..088F ; Unknown
+ "Arab", # 0890..0891 ; Arabic
+ "Zzzz", # 0892..0897 ; Unknown
+ "Arab", # 0898..08E1 ; Arabic
+ "Zyyy", # 08E2..08E2 ; Common
+ "Arab", # 08E3..08FF ; Arabic
+ "Deva", # 0900..0950 ; Devanagari
+ "Zinh", # 0951..0954 ; Inherited
+ "Deva", # 0955..0963 ; Devanagari
+ "Zyyy", # 0964..0965 ; Common
+ "Deva", # 0966..097F ; Devanagari
+ "Beng", # 0980..0983 ; Bengali
+ "Zzzz", # 0984..0984 ; Unknown
+ "Beng", # 0985..098C ; Bengali
+ "Zzzz", # 098D..098E ; Unknown
+ "Beng", # 098F..0990 ; Bengali
+ "Zzzz", # 0991..0992 ; Unknown
+ "Beng", # 0993..09A8 ; Bengali
+ "Zzzz", # 09A9..09A9 ; Unknown
+ "Beng", # 09AA..09B0 ; Bengali
+ "Zzzz", # 09B1..09B1 ; Unknown
+ "Beng", # 09B2..09B2 ; Bengali
+ "Zzzz", # 09B3..09B5 ; Unknown
+ "Beng", # 09B6..09B9 ; Bengali
+ "Zzzz", # 09BA..09BB ; Unknown
+ "Beng", # 09BC..09C4 ; Bengali
+ "Zzzz", # 09C5..09C6 ; Unknown
+ "Beng", # 09C7..09C8 ; Bengali
+ "Zzzz", # 09C9..09CA ; Unknown
+ "Beng", # 09CB..09CE ; Bengali
+ "Zzzz", # 09CF..09D6 ; Unknown
+ "Beng", # 09D7..09D7 ; Bengali
+ "Zzzz", # 09D8..09DB ; Unknown
+ "Beng", # 09DC..09DD ; Bengali
+ "Zzzz", # 09DE..09DE ; Unknown
+ "Beng", # 09DF..09E3 ; Bengali
+ "Zzzz", # 09E4..09E5 ; Unknown
+ "Beng", # 09E6..09FE ; Bengali
+ "Zzzz", # 09FF..0A00 ; Unknown
+ "Guru", # 0A01..0A03 ; Gurmukhi
+ "Zzzz", # 0A04..0A04 ; Unknown
+ "Guru", # 0A05..0A0A ; Gurmukhi
+ "Zzzz", # 0A0B..0A0E ; Unknown
+ "Guru", # 0A0F..0A10 ; Gurmukhi
+ "Zzzz", # 0A11..0A12 ; Unknown
+ "Guru", # 0A13..0A28 ; Gurmukhi
+ "Zzzz", # 0A29..0A29 ; Unknown
+ "Guru", # 0A2A..0A30 ; Gurmukhi
+ "Zzzz", # 0A31..0A31 ; Unknown
+ "Guru", # 0A32..0A33 ; Gurmukhi
+ "Zzzz", # 0A34..0A34 ; Unknown
+ "Guru", # 0A35..0A36 ; Gurmukhi
+ "Zzzz", # 0A37..0A37 ; Unknown
+ "Guru", # 0A38..0A39 ; Gurmukhi
+ "Zzzz", # 0A3A..0A3B ; Unknown
+ "Guru", # 0A3C..0A3C ; Gurmukhi
+ "Zzzz", # 0A3D..0A3D ; Unknown
+ "Guru", # 0A3E..0A42 ; Gurmukhi
+ "Zzzz", # 0A43..0A46 ; Unknown
+ "Guru", # 0A47..0A48 ; Gurmukhi
+ "Zzzz", # 0A49..0A4A ; Unknown
+ "Guru", # 0A4B..0A4D ; Gurmukhi
+ "Zzzz", # 0A4E..0A50 ; Unknown
+ "Guru", # 0A51..0A51 ; Gurmukhi
+ "Zzzz", # 0A52..0A58 ; Unknown
+ "Guru", # 0A59..0A5C ; Gurmukhi
+ "Zzzz", # 0A5D..0A5D ; Unknown
+ "Guru", # 0A5E..0A5E ; Gurmukhi
+ "Zzzz", # 0A5F..0A65 ; Unknown
+ "Guru", # 0A66..0A76 ; Gurmukhi
+ "Zzzz", # 0A77..0A80 ; Unknown
+ "Gujr", # 0A81..0A83 ; Gujarati
+ "Zzzz", # 0A84..0A84 ; Unknown
+ "Gujr", # 0A85..0A8D ; Gujarati
+ "Zzzz", # 0A8E..0A8E ; Unknown
+ "Gujr", # 0A8F..0A91 ; Gujarati
+ "Zzzz", # 0A92..0A92 ; Unknown
+ "Gujr", # 0A93..0AA8 ; Gujarati
+ "Zzzz", # 0AA9..0AA9 ; Unknown
+ "Gujr", # 0AAA..0AB0 ; Gujarati
+ "Zzzz", # 0AB1..0AB1 ; Unknown
+ "Gujr", # 0AB2..0AB3 ; Gujarati
+ "Zzzz", # 0AB4..0AB4 ; Unknown
+ "Gujr", # 0AB5..0AB9 ; Gujarati
+ "Zzzz", # 0ABA..0ABB ; Unknown
+ "Gujr", # 0ABC..0AC5 ; Gujarati
+ "Zzzz", # 0AC6..0AC6 ; Unknown
+ "Gujr", # 0AC7..0AC9 ; Gujarati
+ "Zzzz", # 0ACA..0ACA ; Unknown
+ "Gujr", # 0ACB..0ACD ; Gujarati
+ "Zzzz", # 0ACE..0ACF ; Unknown
+ "Gujr", # 0AD0..0AD0 ; Gujarati
+ "Zzzz", # 0AD1..0ADF ; Unknown
+ "Gujr", # 0AE0..0AE3 ; Gujarati
+ "Zzzz", # 0AE4..0AE5 ; Unknown
+ "Gujr", # 0AE6..0AF1 ; Gujarati
+ "Zzzz", # 0AF2..0AF8 ; Unknown
+ "Gujr", # 0AF9..0AFF ; Gujarati
+ "Zzzz", # 0B00..0B00 ; Unknown
+ "Orya", # 0B01..0B03 ; Oriya
+ "Zzzz", # 0B04..0B04 ; Unknown
+ "Orya", # 0B05..0B0C ; Oriya
+ "Zzzz", # 0B0D..0B0E ; Unknown
+ "Orya", # 0B0F..0B10 ; Oriya
+ "Zzzz", # 0B11..0B12 ; Unknown
+ "Orya", # 0B13..0B28 ; Oriya
+ "Zzzz", # 0B29..0B29 ; Unknown
+ "Orya", # 0B2A..0B30 ; Oriya
+ "Zzzz", # 0B31..0B31 ; Unknown
+ "Orya", # 0B32..0B33 ; Oriya
+ "Zzzz", # 0B34..0B34 ; Unknown
+ "Orya", # 0B35..0B39 ; Oriya
+ "Zzzz", # 0B3A..0B3B ; Unknown
+ "Orya", # 0B3C..0B44 ; Oriya
+ "Zzzz", # 0B45..0B46 ; Unknown
+ "Orya", # 0B47..0B48 ; Oriya
+ "Zzzz", # 0B49..0B4A ; Unknown
+ "Orya", # 0B4B..0B4D ; Oriya
+ "Zzzz", # 0B4E..0B54 ; Unknown
+ "Orya", # 0B55..0B57 ; Oriya
+ "Zzzz", # 0B58..0B5B ; Unknown
+ "Orya", # 0B5C..0B5D ; Oriya
+ "Zzzz", # 0B5E..0B5E ; Unknown
+ "Orya", # 0B5F..0B63 ; Oriya
+ "Zzzz", # 0B64..0B65 ; Unknown
+ "Orya", # 0B66..0B77 ; Oriya
+ "Zzzz", # 0B78..0B81 ; Unknown
+ "Taml", # 0B82..0B83 ; Tamil
+ "Zzzz", # 0B84..0B84 ; Unknown
+ "Taml", # 0B85..0B8A ; Tamil
+ "Zzzz", # 0B8B..0B8D ; Unknown
+ "Taml", # 0B8E..0B90 ; Tamil
+ "Zzzz", # 0B91..0B91 ; Unknown
+ "Taml", # 0B92..0B95 ; Tamil
+ "Zzzz", # 0B96..0B98 ; Unknown
+ "Taml", # 0B99..0B9A ; Tamil
+ "Zzzz", # 0B9B..0B9B ; Unknown
+ "Taml", # 0B9C..0B9C ; Tamil
+ "Zzzz", # 0B9D..0B9D ; Unknown
+ "Taml", # 0B9E..0B9F ; Tamil
+ "Zzzz", # 0BA0..0BA2 ; Unknown
+ "Taml", # 0BA3..0BA4 ; Tamil
+ "Zzzz", # 0BA5..0BA7 ; Unknown
+ "Taml", # 0BA8..0BAA ; Tamil
+ "Zzzz", # 0BAB..0BAD ; Unknown
+ "Taml", # 0BAE..0BB9 ; Tamil
+ "Zzzz", # 0BBA..0BBD ; Unknown
+ "Taml", # 0BBE..0BC2 ; Tamil
+ "Zzzz", # 0BC3..0BC5 ; Unknown
+ "Taml", # 0BC6..0BC8 ; Tamil
+ "Zzzz", # 0BC9..0BC9 ; Unknown
+ "Taml", # 0BCA..0BCD ; Tamil
+ "Zzzz", # 0BCE..0BCF ; Unknown
+ "Taml", # 0BD0..0BD0 ; Tamil
+ "Zzzz", # 0BD1..0BD6 ; Unknown
+ "Taml", # 0BD7..0BD7 ; Tamil
+ "Zzzz", # 0BD8..0BE5 ; Unknown
+ "Taml", # 0BE6..0BFA ; Tamil
+ "Zzzz", # 0BFB..0BFF ; Unknown
+ "Telu", # 0C00..0C0C ; Telugu
+ "Zzzz", # 0C0D..0C0D ; Unknown
+ "Telu", # 0C0E..0C10 ; Telugu
+ "Zzzz", # 0C11..0C11 ; Unknown
+ "Telu", # 0C12..0C28 ; Telugu
+ "Zzzz", # 0C29..0C29 ; Unknown
+ "Telu", # 0C2A..0C39 ; Telugu
+ "Zzzz", # 0C3A..0C3B ; Unknown
+ "Telu", # 0C3C..0C44 ; Telugu
+ "Zzzz", # 0C45..0C45 ; Unknown
+ "Telu", # 0C46..0C48 ; Telugu
+ "Zzzz", # 0C49..0C49 ; Unknown
+ "Telu", # 0C4A..0C4D ; Telugu
+ "Zzzz", # 0C4E..0C54 ; Unknown
+ "Telu", # 0C55..0C56 ; Telugu
+ "Zzzz", # 0C57..0C57 ; Unknown
+ "Telu", # 0C58..0C5A ; Telugu
+ "Zzzz", # 0C5B..0C5C ; Unknown
+ "Telu", # 0C5D..0C5D ; Telugu
+ "Zzzz", # 0C5E..0C5F ; Unknown
+ "Telu", # 0C60..0C63 ; Telugu
+ "Zzzz", # 0C64..0C65 ; Unknown
+ "Telu", # 0C66..0C6F ; Telugu
+ "Zzzz", # 0C70..0C76 ; Unknown
+ "Telu", # 0C77..0C7F ; Telugu
+ "Knda", # 0C80..0C8C ; Kannada
+ "Zzzz", # 0C8D..0C8D ; Unknown
+ "Knda", # 0C8E..0C90 ; Kannada
+ "Zzzz", # 0C91..0C91 ; Unknown
+ "Knda", # 0C92..0CA8 ; Kannada
+ "Zzzz", # 0CA9..0CA9 ; Unknown
+ "Knda", # 0CAA..0CB3 ; Kannada
+ "Zzzz", # 0CB4..0CB4 ; Unknown
+ "Knda", # 0CB5..0CB9 ; Kannada
+ "Zzzz", # 0CBA..0CBB ; Unknown
+ "Knda", # 0CBC..0CC4 ; Kannada
+ "Zzzz", # 0CC5..0CC5 ; Unknown
+ "Knda", # 0CC6..0CC8 ; Kannada
+ "Zzzz", # 0CC9..0CC9 ; Unknown
+ "Knda", # 0CCA..0CCD ; Kannada
+ "Zzzz", # 0CCE..0CD4 ; Unknown
+ "Knda", # 0CD5..0CD6 ; Kannada
+ "Zzzz", # 0CD7..0CDC ; Unknown
+ "Knda", # 0CDD..0CDE ; Kannada
+ "Zzzz", # 0CDF..0CDF ; Unknown
+ "Knda", # 0CE0..0CE3 ; Kannada
+ "Zzzz", # 0CE4..0CE5 ; Unknown
+ "Knda", # 0CE6..0CEF ; Kannada
+ "Zzzz", # 0CF0..0CF0 ; Unknown
+ "Knda", # 0CF1..0CF3 ; Kannada
+ "Zzzz", # 0CF4..0CFF ; Unknown
+ "Mlym", # 0D00..0D0C ; Malayalam
+ "Zzzz", # 0D0D..0D0D ; Unknown
+ "Mlym", # 0D0E..0D10 ; Malayalam
+ "Zzzz", # 0D11..0D11 ; Unknown
+ "Mlym", # 0D12..0D44 ; Malayalam
+ "Zzzz", # 0D45..0D45 ; Unknown
+ "Mlym", # 0D46..0D48 ; Malayalam
+ "Zzzz", # 0D49..0D49 ; Unknown
+ "Mlym", # 0D4A..0D4F ; Malayalam
+ "Zzzz", # 0D50..0D53 ; Unknown
+ "Mlym", # 0D54..0D63 ; Malayalam
+ "Zzzz", # 0D64..0D65 ; Unknown
+ "Mlym", # 0D66..0D7F ; Malayalam
+ "Zzzz", # 0D80..0D80 ; Unknown
+ "Sinh", # 0D81..0D83 ; Sinhala
+ "Zzzz", # 0D84..0D84 ; Unknown
+ "Sinh", # 0D85..0D96 ; Sinhala
+ "Zzzz", # 0D97..0D99 ; Unknown
+ "Sinh", # 0D9A..0DB1 ; Sinhala
+ "Zzzz", # 0DB2..0DB2 ; Unknown
+ "Sinh", # 0DB3..0DBB ; Sinhala
+ "Zzzz", # 0DBC..0DBC ; Unknown
+ "Sinh", # 0DBD..0DBD ; Sinhala
+ "Zzzz", # 0DBE..0DBF ; Unknown
+ "Sinh", # 0DC0..0DC6 ; Sinhala
+ "Zzzz", # 0DC7..0DC9 ; Unknown
+ "Sinh", # 0DCA..0DCA ; Sinhala
+ "Zzzz", # 0DCB..0DCE ; Unknown
+ "Sinh", # 0DCF..0DD4 ; Sinhala
+ "Zzzz", # 0DD5..0DD5 ; Unknown
+ "Sinh", # 0DD6..0DD6 ; Sinhala
+ "Zzzz", # 0DD7..0DD7 ; Unknown
+ "Sinh", # 0DD8..0DDF ; Sinhala
+ "Zzzz", # 0DE0..0DE5 ; Unknown
+ "Sinh", # 0DE6..0DEF ; Sinhala
+ "Zzzz", # 0DF0..0DF1 ; Unknown
+ "Sinh", # 0DF2..0DF4 ; Sinhala
+ "Zzzz", # 0DF5..0E00 ; Unknown
+ "Thai", # 0E01..0E3A ; Thai
+ "Zzzz", # 0E3B..0E3E ; Unknown
+ "Zyyy", # 0E3F..0E3F ; Common
+ "Thai", # 0E40..0E5B ; Thai
+ "Zzzz", # 0E5C..0E80 ; Unknown
+ "Laoo", # 0E81..0E82 ; Lao
+ "Zzzz", # 0E83..0E83 ; Unknown
+ "Laoo", # 0E84..0E84 ; Lao
+ "Zzzz", # 0E85..0E85 ; Unknown
+ "Laoo", # 0E86..0E8A ; Lao
+ "Zzzz", # 0E8B..0E8B ; Unknown
+ "Laoo", # 0E8C..0EA3 ; Lao
+ "Zzzz", # 0EA4..0EA4 ; Unknown
+ "Laoo", # 0EA5..0EA5 ; Lao
+ "Zzzz", # 0EA6..0EA6 ; Unknown
+ "Laoo", # 0EA7..0EBD ; Lao
+ "Zzzz", # 0EBE..0EBF ; Unknown
+ "Laoo", # 0EC0..0EC4 ; Lao
+ "Zzzz", # 0EC5..0EC5 ; Unknown
+ "Laoo", # 0EC6..0EC6 ; Lao
+ "Zzzz", # 0EC7..0EC7 ; Unknown
+ "Laoo", # 0EC8..0ECE ; Lao
+ "Zzzz", # 0ECF..0ECF ; Unknown
+ "Laoo", # 0ED0..0ED9 ; Lao
+ "Zzzz", # 0EDA..0EDB ; Unknown
+ "Laoo", # 0EDC..0EDF ; Lao
+ "Zzzz", # 0EE0..0EFF ; Unknown
+ "Tibt", # 0F00..0F47 ; Tibetan
+ "Zzzz", # 0F48..0F48 ; Unknown
+ "Tibt", # 0F49..0F6C ; Tibetan
+ "Zzzz", # 0F6D..0F70 ; Unknown
+ "Tibt", # 0F71..0F97 ; Tibetan
+ "Zzzz", # 0F98..0F98 ; Unknown
+ "Tibt", # 0F99..0FBC ; Tibetan
+ "Zzzz", # 0FBD..0FBD ; Unknown
+ "Tibt", # 0FBE..0FCC ; Tibetan
+ "Zzzz", # 0FCD..0FCD ; Unknown
+ "Tibt", # 0FCE..0FD4 ; Tibetan
+ "Zyyy", # 0FD5..0FD8 ; Common
+ "Tibt", # 0FD9..0FDA ; Tibetan
+ "Zzzz", # 0FDB..0FFF ; Unknown
+ "Mymr", # 1000..109F ; Myanmar
+ "Geor", # 10A0..10C5 ; Georgian
+ "Zzzz", # 10C6..10C6 ; Unknown
+ "Geor", # 10C7..10C7 ; Georgian
+ "Zzzz", # 10C8..10CC ; Unknown
+ "Geor", # 10CD..10CD ; Georgian
+ "Zzzz", # 10CE..10CF ; Unknown
+ "Geor", # 10D0..10FA ; Georgian
+ "Zyyy", # 10FB..10FB ; Common
+ "Geor", # 10FC..10FF ; Georgian
+ "Hang", # 1100..11FF ; Hangul
+ "Ethi", # 1200..1248 ; Ethiopic
+ "Zzzz", # 1249..1249 ; Unknown
+ "Ethi", # 124A..124D ; Ethiopic
+ "Zzzz", # 124E..124F ; Unknown
+ "Ethi", # 1250..1256 ; Ethiopic
+ "Zzzz", # 1257..1257 ; Unknown
+ "Ethi", # 1258..1258 ; Ethiopic
+ "Zzzz", # 1259..1259 ; Unknown
+ "Ethi", # 125A..125D ; Ethiopic
+ "Zzzz", # 125E..125F ; Unknown
+ "Ethi", # 1260..1288 ; Ethiopic
+ "Zzzz", # 1289..1289 ; Unknown
+ "Ethi", # 128A..128D ; Ethiopic
+ "Zzzz", # 128E..128F ; Unknown
+ "Ethi", # 1290..12B0 ; Ethiopic
+ "Zzzz", # 12B1..12B1 ; Unknown
+ "Ethi", # 12B2..12B5 ; Ethiopic
+ "Zzzz", # 12B6..12B7 ; Unknown
+ "Ethi", # 12B8..12BE ; Ethiopic
+ "Zzzz", # 12BF..12BF ; Unknown
+ "Ethi", # 12C0..12C0 ; Ethiopic
+ "Zzzz", # 12C1..12C1 ; Unknown
+ "Ethi", # 12C2..12C5 ; Ethiopic
+ "Zzzz", # 12C6..12C7 ; Unknown
+ "Ethi", # 12C8..12D6 ; Ethiopic
+ "Zzzz", # 12D7..12D7 ; Unknown
+ "Ethi", # 12D8..1310 ; Ethiopic
+ "Zzzz", # 1311..1311 ; Unknown
+ "Ethi", # 1312..1315 ; Ethiopic
+ "Zzzz", # 1316..1317 ; Unknown
+ "Ethi", # 1318..135A ; Ethiopic
+ "Zzzz", # 135B..135C ; Unknown
+ "Ethi", # 135D..137C ; Ethiopic
+ "Zzzz", # 137D..137F ; Unknown
+ "Ethi", # 1380..1399 ; Ethiopic
+ "Zzzz", # 139A..139F ; Unknown
+ "Cher", # 13A0..13F5 ; Cherokee
+ "Zzzz", # 13F6..13F7 ; Unknown
+ "Cher", # 13F8..13FD ; Cherokee
+ "Zzzz", # 13FE..13FF ; Unknown
+ "Cans", # 1400..167F ; Canadian_Aboriginal
+ "Ogam", # 1680..169C ; Ogham
+ "Zzzz", # 169D..169F ; Unknown
+ "Runr", # 16A0..16EA ; Runic
+ "Zyyy", # 16EB..16ED ; Common
+ "Runr", # 16EE..16F8 ; Runic
+ "Zzzz", # 16F9..16FF ; Unknown
+ "Tglg", # 1700..1715 ; Tagalog
+ "Zzzz", # 1716..171E ; Unknown
+ "Tglg", # 171F..171F ; Tagalog
+ "Hano", # 1720..1734 ; Hanunoo
+ "Zyyy", # 1735..1736 ; Common
+ "Zzzz", # 1737..173F ; Unknown
+ "Buhd", # 1740..1753 ; Buhid
+ "Zzzz", # 1754..175F ; Unknown
+ "Tagb", # 1760..176C ; Tagbanwa
+ "Zzzz", # 176D..176D ; Unknown
+ "Tagb", # 176E..1770 ; Tagbanwa
+ "Zzzz", # 1771..1771 ; Unknown
+ "Tagb", # 1772..1773 ; Tagbanwa
+ "Zzzz", # 1774..177F ; Unknown
+ "Khmr", # 1780..17DD ; Khmer
+ "Zzzz", # 17DE..17DF ; Unknown
+ "Khmr", # 17E0..17E9 ; Khmer
+ "Zzzz", # 17EA..17EF ; Unknown
+ "Khmr", # 17F0..17F9 ; Khmer
+ "Zzzz", # 17FA..17FF ; Unknown
+ "Mong", # 1800..1801 ; Mongolian
+ "Zyyy", # 1802..1803 ; Common
+ "Mong", # 1804..1804 ; Mongolian
+ "Zyyy", # 1805..1805 ; Common
+ "Mong", # 1806..1819 ; Mongolian
+ "Zzzz", # 181A..181F ; Unknown
+ "Mong", # 1820..1878 ; Mongolian
+ "Zzzz", # 1879..187F ; Unknown
+ "Mong", # 1880..18AA ; Mongolian
+ "Zzzz", # 18AB..18AF ; Unknown
+ "Cans", # 18B0..18F5 ; Canadian_Aboriginal
+ "Zzzz", # 18F6..18FF ; Unknown
+ "Limb", # 1900..191E ; Limbu
+ "Zzzz", # 191F..191F ; Unknown
+ "Limb", # 1920..192B ; Limbu
+ "Zzzz", # 192C..192F ; Unknown
+ "Limb", # 1930..193B ; Limbu
+ "Zzzz", # 193C..193F ; Unknown
+ "Limb", # 1940..1940 ; Limbu
+ "Zzzz", # 1941..1943 ; Unknown
+ "Limb", # 1944..194F ; Limbu
+ "Tale", # 1950..196D ; Tai_Le
+ "Zzzz", # 196E..196F ; Unknown
+ "Tale", # 1970..1974 ; Tai_Le
+ "Zzzz", # 1975..197F ; Unknown
+ "Talu", # 1980..19AB ; New_Tai_Lue
+ "Zzzz", # 19AC..19AF ; Unknown
+ "Talu", # 19B0..19C9 ; New_Tai_Lue
+ "Zzzz", # 19CA..19CF ; Unknown
+ "Talu", # 19D0..19DA ; New_Tai_Lue
+ "Zzzz", # 19DB..19DD ; Unknown
+ "Talu", # 19DE..19DF ; New_Tai_Lue
+ "Khmr", # 19E0..19FF ; Khmer
+ "Bugi", # 1A00..1A1B ; Buginese
+ "Zzzz", # 1A1C..1A1D ; Unknown
+ "Bugi", # 1A1E..1A1F ; Buginese
+ "Lana", # 1A20..1A5E ; Tai_Tham
+ "Zzzz", # 1A5F..1A5F ; Unknown
+ "Lana", # 1A60..1A7C ; Tai_Tham
+ "Zzzz", # 1A7D..1A7E ; Unknown
+ "Lana", # 1A7F..1A89 ; Tai_Tham
+ "Zzzz", # 1A8A..1A8F ; Unknown
+ "Lana", # 1A90..1A99 ; Tai_Tham
+ "Zzzz", # 1A9A..1A9F ; Unknown
+ "Lana", # 1AA0..1AAD ; Tai_Tham
+ "Zzzz", # 1AAE..1AAF ; Unknown
+ "Zinh", # 1AB0..1ACE ; Inherited
+ "Zzzz", # 1ACF..1AFF ; Unknown
+ "Bali", # 1B00..1B4C ; Balinese
+ "Zzzz", # 1B4D..1B4F ; Unknown
+ "Bali", # 1B50..1B7E ; Balinese
+ "Zzzz", # 1B7F..1B7F ; Unknown
+ "Sund", # 1B80..1BBF ; Sundanese
+ "Batk", # 1BC0..1BF3 ; Batak
+ "Zzzz", # 1BF4..1BFB ; Unknown
+ "Batk", # 1BFC..1BFF ; Batak
+ "Lepc", # 1C00..1C37 ; Lepcha
+ "Zzzz", # 1C38..1C3A ; Unknown
+ "Lepc", # 1C3B..1C49 ; Lepcha
+ "Zzzz", # 1C4A..1C4C ; Unknown
+ "Lepc", # 1C4D..1C4F ; Lepcha
+ "Olck", # 1C50..1C7F ; Ol_Chiki
+ "Cyrl", # 1C80..1C88 ; Cyrillic
+ "Zzzz", # 1C89..1C8F ; Unknown
+ "Geor", # 1C90..1CBA ; Georgian
+ "Zzzz", # 1CBB..1CBC ; Unknown
+ "Geor", # 1CBD..1CBF ; Georgian
+ "Sund", # 1CC0..1CC7 ; Sundanese
+ "Zzzz", # 1CC8..1CCF ; Unknown
+ "Zinh", # 1CD0..1CD2 ; Inherited
+ "Zyyy", # 1CD3..1CD3 ; Common
+ "Zinh", # 1CD4..1CE0 ; Inherited
+ "Zyyy", # 1CE1..1CE1 ; Common
+ "Zinh", # 1CE2..1CE8 ; Inherited
+ "Zyyy", # 1CE9..1CEC ; Common
+ "Zinh", # 1CED..1CED ; Inherited
+ "Zyyy", # 1CEE..1CF3 ; Common
+ "Zinh", # 1CF4..1CF4 ; Inherited
+ "Zyyy", # 1CF5..1CF7 ; Common
+ "Zinh", # 1CF8..1CF9 ; Inherited
+ "Zyyy", # 1CFA..1CFA ; Common
+ "Zzzz", # 1CFB..1CFF ; Unknown
+ "Latn", # 1D00..1D25 ; Latin
+ "Grek", # 1D26..1D2A ; Greek
+ "Cyrl", # 1D2B..1D2B ; Cyrillic
+ "Latn", # 1D2C..1D5C ; Latin
+ "Grek", # 1D5D..1D61 ; Greek
+ "Latn", # 1D62..1D65 ; Latin
+ "Grek", # 1D66..1D6A ; Greek
+ "Latn", # 1D6B..1D77 ; Latin
+ "Cyrl", # 1D78..1D78 ; Cyrillic
+ "Latn", # 1D79..1DBE ; Latin
+ "Grek", # 1DBF..1DBF ; Greek
+ "Zinh", # 1DC0..1DFF ; Inherited
+ "Latn", # 1E00..1EFF ; Latin
+ "Grek", # 1F00..1F15 ; Greek
+ "Zzzz", # 1F16..1F17 ; Unknown
+ "Grek", # 1F18..1F1D ; Greek
+ "Zzzz", # 1F1E..1F1F ; Unknown
+ "Grek", # 1F20..1F45 ; Greek
+ "Zzzz", # 1F46..1F47 ; Unknown
+ "Grek", # 1F48..1F4D ; Greek
+ "Zzzz", # 1F4E..1F4F ; Unknown
+ "Grek", # 1F50..1F57 ; Greek
+ "Zzzz", # 1F58..1F58 ; Unknown
+ "Grek", # 1F59..1F59 ; Greek
+ "Zzzz", # 1F5A..1F5A ; Unknown
+ "Grek", # 1F5B..1F5B ; Greek
+ "Zzzz", # 1F5C..1F5C ; Unknown
+ "Grek", # 1F5D..1F5D ; Greek
+ "Zzzz", # 1F5E..1F5E ; Unknown
+ "Grek", # 1F5F..1F7D ; Greek
+ "Zzzz", # 1F7E..1F7F ; Unknown
+ "Grek", # 1F80..1FB4 ; Greek
+ "Zzzz", # 1FB5..1FB5 ; Unknown
+ "Grek", # 1FB6..1FC4 ; Greek
+ "Zzzz", # 1FC5..1FC5 ; Unknown
+ "Grek", # 1FC6..1FD3 ; Greek
+ "Zzzz", # 1FD4..1FD5 ; Unknown
+ "Grek", # 1FD6..1FDB ; Greek
+ "Zzzz", # 1FDC..1FDC ; Unknown
+ "Grek", # 1FDD..1FEF ; Greek
+ "Zzzz", # 1FF0..1FF1 ; Unknown
+ "Grek", # 1FF2..1FF4 ; Greek
+ "Zzzz", # 1FF5..1FF5 ; Unknown
+ "Grek", # 1FF6..1FFE ; Greek
+ "Zzzz", # 1FFF..1FFF ; Unknown
+ "Zyyy", # 2000..200B ; Common
+ "Zinh", # 200C..200D ; Inherited
+ "Zyyy", # 200E..2064 ; Common
+ "Zzzz", # 2065..2065 ; Unknown
+ "Zyyy", # 2066..2070 ; Common
+ "Latn", # 2071..2071 ; Latin
+ "Zzzz", # 2072..2073 ; Unknown
+ "Zyyy", # 2074..207E ; Common
+ "Latn", # 207F..207F ; Latin
+ "Zyyy", # 2080..208E ; Common
+ "Zzzz", # 208F..208F ; Unknown
+ "Latn", # 2090..209C ; Latin
+ "Zzzz", # 209D..209F ; Unknown
+ "Zyyy", # 20A0..20C0 ; Common
+ "Zzzz", # 20C1..20CF ; Unknown
+ "Zinh", # 20D0..20F0 ; Inherited
+ "Zzzz", # 20F1..20FF ; Unknown
+ "Zyyy", # 2100..2125 ; Common
+ "Grek", # 2126..2126 ; Greek
+ "Zyyy", # 2127..2129 ; Common
+ "Latn", # 212A..212B ; Latin
+ "Zyyy", # 212C..2131 ; Common
+ "Latn", # 2132..2132 ; Latin
+ "Zyyy", # 2133..214D ; Common
+ "Latn", # 214E..214E ; Latin
+ "Zyyy", # 214F..215F ; Common
+ "Latn", # 2160..2188 ; Latin
+ "Zyyy", # 2189..218B ; Common
+ "Zzzz", # 218C..218F ; Unknown
+ "Zyyy", # 2190..2426 ; Common
+ "Zzzz", # 2427..243F ; Unknown
+ "Zyyy", # 2440..244A ; Common
+ "Zzzz", # 244B..245F ; Unknown
+ "Zyyy", # 2460..27FF ; Common
+ "Brai", # 2800..28FF ; Braille
+ "Zyyy", # 2900..2B73 ; Common
+ "Zzzz", # 2B74..2B75 ; Unknown
+ "Zyyy", # 2B76..2B95 ; Common
+ "Zzzz", # 2B96..2B96 ; Unknown
+ "Zyyy", # 2B97..2BFF ; Common
+ "Glag", # 2C00..2C5F ; Glagolitic
+ "Latn", # 2C60..2C7F ; Latin
+ "Copt", # 2C80..2CF3 ; Coptic
+ "Zzzz", # 2CF4..2CF8 ; Unknown
+ "Copt", # 2CF9..2CFF ; Coptic
+ "Geor", # 2D00..2D25 ; Georgian
+ "Zzzz", # 2D26..2D26 ; Unknown
+ "Geor", # 2D27..2D27 ; Georgian
+ "Zzzz", # 2D28..2D2C ; Unknown
+ "Geor", # 2D2D..2D2D ; Georgian
+ "Zzzz", # 2D2E..2D2F ; Unknown
+ "Tfng", # 2D30..2D67 ; Tifinagh
+ "Zzzz", # 2D68..2D6E ; Unknown
+ "Tfng", # 2D6F..2D70 ; Tifinagh
+ "Zzzz", # 2D71..2D7E ; Unknown
+ "Tfng", # 2D7F..2D7F ; Tifinagh
+ "Ethi", # 2D80..2D96 ; Ethiopic
+ "Zzzz", # 2D97..2D9F ; Unknown
+ "Ethi", # 2DA0..2DA6 ; Ethiopic
+ "Zzzz", # 2DA7..2DA7 ; Unknown
+ "Ethi", # 2DA8..2DAE ; Ethiopic
+ "Zzzz", # 2DAF..2DAF ; Unknown
+ "Ethi", # 2DB0..2DB6 ; Ethiopic
+ "Zzzz", # 2DB7..2DB7 ; Unknown
+ "Ethi", # 2DB8..2DBE ; Ethiopic
+ "Zzzz", # 2DBF..2DBF ; Unknown
+ "Ethi", # 2DC0..2DC6 ; Ethiopic
+ "Zzzz", # 2DC7..2DC7 ; Unknown
+ "Ethi", # 2DC8..2DCE ; Ethiopic
+ "Zzzz", # 2DCF..2DCF ; Unknown
+ "Ethi", # 2DD0..2DD6 ; Ethiopic
+ "Zzzz", # 2DD7..2DD7 ; Unknown
+ "Ethi", # 2DD8..2DDE ; Ethiopic
+ "Zzzz", # 2DDF..2DDF ; Unknown
+ "Cyrl", # 2DE0..2DFF ; Cyrillic
+ "Zyyy", # 2E00..2E5D ; Common
+ "Zzzz", # 2E5E..2E7F ; Unknown
+ "Hani", # 2E80..2E99 ; Han
+ "Zzzz", # 2E9A..2E9A ; Unknown
+ "Hani", # 2E9B..2EF3 ; Han
+ "Zzzz", # 2EF4..2EFF ; Unknown
+ "Hani", # 2F00..2FD5 ; Han
+ "Zzzz", # 2FD6..2FEF ; Unknown
+ "Zyyy", # 2FF0..2FFB ; Common
+ "Zzzz", # 2FFC..2FFF ; Unknown
+ "Zyyy", # 3000..3004 ; Common
+ "Hani", # 3005..3005 ; Han
+ "Zyyy", # 3006..3006 ; Common
+ "Hani", # 3007..3007 ; Han
+ "Zyyy", # 3008..3020 ; Common
+ "Hani", # 3021..3029 ; Han
+ "Zinh", # 302A..302D ; Inherited
+ "Hang", # 302E..302F ; Hangul
+ "Zyyy", # 3030..3037 ; Common
+ "Hani", # 3038..303B ; Han
+ "Zyyy", # 303C..303F ; Common
+ "Zzzz", # 3040..3040 ; Unknown
+ "Hira", # 3041..3096 ; Hiragana
+ "Zzzz", # 3097..3098 ; Unknown
+ "Zinh", # 3099..309A ; Inherited
+ "Zyyy", # 309B..309C ; Common
+ "Hira", # 309D..309F ; Hiragana
+ "Zyyy", # 30A0..30A0 ; Common
+ "Kana", # 30A1..30FA ; Katakana
+ "Zyyy", # 30FB..30FC ; Common
+ "Kana", # 30FD..30FF ; Katakana
+ "Zzzz", # 3100..3104 ; Unknown
+ "Bopo", # 3105..312F ; Bopomofo
+ "Zzzz", # 3130..3130 ; Unknown
+ "Hang", # 3131..318E ; Hangul
+ "Zzzz", # 318F..318F ; Unknown
+ "Zyyy", # 3190..319F ; Common
+ "Bopo", # 31A0..31BF ; Bopomofo
+ "Zyyy", # 31C0..31E3 ; Common
+ "Zzzz", # 31E4..31EF ; Unknown
+ "Kana", # 31F0..31FF ; Katakana
+ "Hang", # 3200..321E ; Hangul
+ "Zzzz", # 321F..321F ; Unknown
+ "Zyyy", # 3220..325F ; Common
+ "Hang", # 3260..327E ; Hangul
+ "Zyyy", # 327F..32CF ; Common
+ "Kana", # 32D0..32FE ; Katakana
+ "Zyyy", # 32FF..32FF ; Common
+ "Kana", # 3300..3357 ; Katakana
+ "Zyyy", # 3358..33FF ; Common
+ "Hani", # 3400..4DBF ; Han
+ "Zyyy", # 4DC0..4DFF ; Common
+ "Hani", # 4E00..9FFF ; Han
+ "Yiii", # A000..A48C ; Yi
+ "Zzzz", # A48D..A48F ; Unknown
+ "Yiii", # A490..A4C6 ; Yi
+ "Zzzz", # A4C7..A4CF ; Unknown
+ "Lisu", # A4D0..A4FF ; Lisu
+ "Vaii", # A500..A62B ; Vai
+ "Zzzz", # A62C..A63F ; Unknown
+ "Cyrl", # A640..A69F ; Cyrillic
+ "Bamu", # A6A0..A6F7 ; Bamum
+ "Zzzz", # A6F8..A6FF ; Unknown
+ "Zyyy", # A700..A721 ; Common
+ "Latn", # A722..A787 ; Latin
+ "Zyyy", # A788..A78A ; Common
+ "Latn", # A78B..A7CA ; Latin
+ "Zzzz", # A7CB..A7CF ; Unknown
+ "Latn", # A7D0..A7D1 ; Latin
+ "Zzzz", # A7D2..A7D2 ; Unknown
+ "Latn", # A7D3..A7D3 ; Latin
+ "Zzzz", # A7D4..A7D4 ; Unknown
+ "Latn", # A7D5..A7D9 ; Latin
+ "Zzzz", # A7DA..A7F1 ; Unknown
+ "Latn", # A7F2..A7FF ; Latin
+ "Sylo", # A800..A82C ; Syloti_Nagri
+ "Zzzz", # A82D..A82F ; Unknown
+ "Zyyy", # A830..A839 ; Common
+ "Zzzz", # A83A..A83F ; Unknown
+ "Phag", # A840..A877 ; Phags_Pa
+ "Zzzz", # A878..A87F ; Unknown
+ "Saur", # A880..A8C5 ; Saurashtra
+ "Zzzz", # A8C6..A8CD ; Unknown
+ "Saur", # A8CE..A8D9 ; Saurashtra
+ "Zzzz", # A8DA..A8DF ; Unknown
+ "Deva", # A8E0..A8FF ; Devanagari
+ "Kali", # A900..A92D ; Kayah_Li
+ "Zyyy", # A92E..A92E ; Common
+ "Kali", # A92F..A92F ; Kayah_Li
+ "Rjng", # A930..A953 ; Rejang
+ "Zzzz", # A954..A95E ; Unknown
+ "Rjng", # A95F..A95F ; Rejang
+ "Hang", # A960..A97C ; Hangul
+ "Zzzz", # A97D..A97F ; Unknown
+ "Java", # A980..A9CD ; Javanese
+ "Zzzz", # A9CE..A9CE ; Unknown
+ "Zyyy", # A9CF..A9CF ; Common
+ "Java", # A9D0..A9D9 ; Javanese
+ "Zzzz", # A9DA..A9DD ; Unknown
+ "Java", # A9DE..A9DF ; Javanese
+ "Mymr", # A9E0..A9FE ; Myanmar
+ "Zzzz", # A9FF..A9FF ; Unknown
+ "Cham", # AA00..AA36 ; Cham
+ "Zzzz", # AA37..AA3F ; Unknown
+ "Cham", # AA40..AA4D ; Cham
+ "Zzzz", # AA4E..AA4F ; Unknown
+ "Cham", # AA50..AA59 ; Cham
+ "Zzzz", # AA5A..AA5B ; Unknown
+ "Cham", # AA5C..AA5F ; Cham
+ "Mymr", # AA60..AA7F ; Myanmar
+ "Tavt", # AA80..AAC2 ; Tai_Viet
+ "Zzzz", # AAC3..AADA ; Unknown
+ "Tavt", # AADB..AADF ; Tai_Viet
+ "Mtei", # AAE0..AAF6 ; Meetei_Mayek
+ "Zzzz", # AAF7..AB00 ; Unknown
+ "Ethi", # AB01..AB06 ; Ethiopic
+ "Zzzz", # AB07..AB08 ; Unknown
+ "Ethi", # AB09..AB0E ; Ethiopic
+ "Zzzz", # AB0F..AB10 ; Unknown
+ "Ethi", # AB11..AB16 ; Ethiopic
+ "Zzzz", # AB17..AB1F ; Unknown
+ "Ethi", # AB20..AB26 ; Ethiopic
+ "Zzzz", # AB27..AB27 ; Unknown
+ "Ethi", # AB28..AB2E ; Ethiopic
+ "Zzzz", # AB2F..AB2F ; Unknown
+ "Latn", # AB30..AB5A ; Latin
+ "Zyyy", # AB5B..AB5B ; Common
+ "Latn", # AB5C..AB64 ; Latin
+ "Grek", # AB65..AB65 ; Greek
+ "Latn", # AB66..AB69 ; Latin
+ "Zyyy", # AB6A..AB6B ; Common
+ "Zzzz", # AB6C..AB6F ; Unknown
+ "Cher", # AB70..ABBF ; Cherokee
+ "Mtei", # ABC0..ABED ; Meetei_Mayek
+ "Zzzz", # ABEE..ABEF ; Unknown
+ "Mtei", # ABF0..ABF9 ; Meetei_Mayek
+ "Zzzz", # ABFA..ABFF ; Unknown
+ "Hang", # AC00..D7A3 ; Hangul
+ "Zzzz", # D7A4..D7AF ; Unknown
+ "Hang", # D7B0..D7C6 ; Hangul
+ "Zzzz", # D7C7..D7CA ; Unknown
+ "Hang", # D7CB..D7FB ; Hangul
+ "Zzzz", # D7FC..F8FF ; Unknown
+ "Hani", # F900..FA6D ; Han
+ "Zzzz", # FA6E..FA6F ; Unknown
+ "Hani", # FA70..FAD9 ; Han
+ "Zzzz", # FADA..FAFF ; Unknown
+ "Latn", # FB00..FB06 ; Latin
+ "Zzzz", # FB07..FB12 ; Unknown
+ "Armn", # FB13..FB17 ; Armenian
+ "Zzzz", # FB18..FB1C ; Unknown
+ "Hebr", # FB1D..FB36 ; Hebrew
+ "Zzzz", # FB37..FB37 ; Unknown
+ "Hebr", # FB38..FB3C ; Hebrew
+ "Zzzz", # FB3D..FB3D ; Unknown
+ "Hebr", # FB3E..FB3E ; Hebrew
+ "Zzzz", # FB3F..FB3F ; Unknown
+ "Hebr", # FB40..FB41 ; Hebrew
+ "Zzzz", # FB42..FB42 ; Unknown
+ "Hebr", # FB43..FB44 ; Hebrew
+ "Zzzz", # FB45..FB45 ; Unknown
+ "Hebr", # FB46..FB4F ; Hebrew
+ "Arab", # FB50..FBC2 ; Arabic
+ "Zzzz", # FBC3..FBD2 ; Unknown
+ "Arab", # FBD3..FD3D ; Arabic
+ "Zyyy", # FD3E..FD3F ; Common
+ "Arab", # FD40..FD8F ; Arabic
+ "Zzzz", # FD90..FD91 ; Unknown
+ "Arab", # FD92..FDC7 ; Arabic
+ "Zzzz", # FDC8..FDCE ; Unknown
+ "Arab", # FDCF..FDCF ; Arabic
+ "Zzzz", # FDD0..FDEF ; Unknown
+ "Arab", # FDF0..FDFF ; Arabic
+ "Zinh", # FE00..FE0F ; Inherited
+ "Zyyy", # FE10..FE19 ; Common
+ "Zzzz", # FE1A..FE1F ; Unknown
+ "Zinh", # FE20..FE2D ; Inherited
+ "Cyrl", # FE2E..FE2F ; Cyrillic
+ "Zyyy", # FE30..FE52 ; Common
+ "Zzzz", # FE53..FE53 ; Unknown
+ "Zyyy", # FE54..FE66 ; Common
+ "Zzzz", # FE67..FE67 ; Unknown
+ "Zyyy", # FE68..FE6B ; Common
+ "Zzzz", # FE6C..FE6F ; Unknown
+ "Arab", # FE70..FE74 ; Arabic
+ "Zzzz", # FE75..FE75 ; Unknown
+ "Arab", # FE76..FEFC ; Arabic
+ "Zzzz", # FEFD..FEFE ; Unknown
+ "Zyyy", # FEFF..FEFF ; Common
+ "Zzzz", # FF00..FF00 ; Unknown
+ "Zyyy", # FF01..FF20 ; Common
+ "Latn", # FF21..FF3A ; Latin
+ "Zyyy", # FF3B..FF40 ; Common
+ "Latn", # FF41..FF5A ; Latin
+ "Zyyy", # FF5B..FF65 ; Common
+ "Kana", # FF66..FF6F ; Katakana
+ "Zyyy", # FF70..FF70 ; Common
+ "Kana", # FF71..FF9D ; Katakana
+ "Zyyy", # FF9E..FF9F ; Common
+ "Hang", # FFA0..FFBE ; Hangul
+ "Zzzz", # FFBF..FFC1 ; Unknown
+ "Hang", # FFC2..FFC7 ; Hangul
+ "Zzzz", # FFC8..FFC9 ; Unknown
+ "Hang", # FFCA..FFCF ; Hangul
+ "Zzzz", # FFD0..FFD1 ; Unknown
+ "Hang", # FFD2..FFD7 ; Hangul
+ "Zzzz", # FFD8..FFD9 ; Unknown
+ "Hang", # FFDA..FFDC ; Hangul
+ "Zzzz", # FFDD..FFDF ; Unknown
+ "Zyyy", # FFE0..FFE6 ; Common
+ "Zzzz", # FFE7..FFE7 ; Unknown
+ "Zyyy", # FFE8..FFEE ; Common
+ "Zzzz", # FFEF..FFF8 ; Unknown
+ "Zyyy", # FFF9..FFFD ; Common
+ "Zzzz", # FFFE..FFFF ; Unknown
+ "Linb", # 10000..1000B ; Linear_B
+ "Zzzz", # 1000C..1000C ; Unknown
+ "Linb", # 1000D..10026 ; Linear_B
+ "Zzzz", # 10027..10027 ; Unknown
+ "Linb", # 10028..1003A ; Linear_B
+ "Zzzz", # 1003B..1003B ; Unknown
+ "Linb", # 1003C..1003D ; Linear_B
+ "Zzzz", # 1003E..1003E ; Unknown
+ "Linb", # 1003F..1004D ; Linear_B
+ "Zzzz", # 1004E..1004F ; Unknown
+ "Linb", # 10050..1005D ; Linear_B
+ "Zzzz", # 1005E..1007F ; Unknown
+ "Linb", # 10080..100FA ; Linear_B
+ "Zzzz", # 100FB..100FF ; Unknown
+ "Zyyy", # 10100..10102 ; Common
+ "Zzzz", # 10103..10106 ; Unknown
+ "Zyyy", # 10107..10133 ; Common
+ "Zzzz", # 10134..10136 ; Unknown
+ "Zyyy", # 10137..1013F ; Common
+ "Grek", # 10140..1018E ; Greek
+ "Zzzz", # 1018F..1018F ; Unknown
+ "Zyyy", # 10190..1019C ; Common
+ "Zzzz", # 1019D..1019F ; Unknown
+ "Grek", # 101A0..101A0 ; Greek
+ "Zzzz", # 101A1..101CF ; Unknown
+ "Zyyy", # 101D0..101FC ; Common
+ "Zinh", # 101FD..101FD ; Inherited
+ "Zzzz", # 101FE..1027F ; Unknown
+ "Lyci", # 10280..1029C ; Lycian
+ "Zzzz", # 1029D..1029F ; Unknown
+ "Cari", # 102A0..102D0 ; Carian
+ "Zzzz", # 102D1..102DF ; Unknown
+ "Zinh", # 102E0..102E0 ; Inherited
+ "Zyyy", # 102E1..102FB ; Common
+ "Zzzz", # 102FC..102FF ; Unknown
+ "Ital", # 10300..10323 ; Old_Italic
+ "Zzzz", # 10324..1032C ; Unknown
+ "Ital", # 1032D..1032F ; Old_Italic
+ "Goth", # 10330..1034A ; Gothic
+ "Zzzz", # 1034B..1034F ; Unknown
+ "Perm", # 10350..1037A ; Old_Permic
+ "Zzzz", # 1037B..1037F ; Unknown
+ "Ugar", # 10380..1039D ; Ugaritic
+ "Zzzz", # 1039E..1039E ; Unknown
+ "Ugar", # 1039F..1039F ; Ugaritic
+ "Xpeo", # 103A0..103C3 ; Old_Persian
+ "Zzzz", # 103C4..103C7 ; Unknown
+ "Xpeo", # 103C8..103D5 ; Old_Persian
+ "Zzzz", # 103D6..103FF ; Unknown
+ "Dsrt", # 10400..1044F ; Deseret
+ "Shaw", # 10450..1047F ; Shavian
+ "Osma", # 10480..1049D ; Osmanya
+ "Zzzz", # 1049E..1049F ; Unknown
+ "Osma", # 104A0..104A9 ; Osmanya
+ "Zzzz", # 104AA..104AF ; Unknown
+ "Osge", # 104B0..104D3 ; Osage
+ "Zzzz", # 104D4..104D7 ; Unknown
+ "Osge", # 104D8..104FB ; Osage
+ "Zzzz", # 104FC..104FF ; Unknown
+ "Elba", # 10500..10527 ; Elbasan
+ "Zzzz", # 10528..1052F ; Unknown
+ "Aghb", # 10530..10563 ; Caucasian_Albanian
+ "Zzzz", # 10564..1056E ; Unknown
+ "Aghb", # 1056F..1056F ; Caucasian_Albanian
+ "Vith", # 10570..1057A ; Vithkuqi
+ "Zzzz", # 1057B..1057B ; Unknown
+ "Vith", # 1057C..1058A ; Vithkuqi
+ "Zzzz", # 1058B..1058B ; Unknown
+ "Vith", # 1058C..10592 ; Vithkuqi
+ "Zzzz", # 10593..10593 ; Unknown
+ "Vith", # 10594..10595 ; Vithkuqi
+ "Zzzz", # 10596..10596 ; Unknown
+ "Vith", # 10597..105A1 ; Vithkuqi
+ "Zzzz", # 105A2..105A2 ; Unknown
+ "Vith", # 105A3..105B1 ; Vithkuqi
+ "Zzzz", # 105B2..105B2 ; Unknown
+ "Vith", # 105B3..105B9 ; Vithkuqi
+ "Zzzz", # 105BA..105BA ; Unknown
+ "Vith", # 105BB..105BC ; Vithkuqi
+ "Zzzz", # 105BD..105FF ; Unknown
+ "Lina", # 10600..10736 ; Linear_A
+ "Zzzz", # 10737..1073F ; Unknown
+ "Lina", # 10740..10755 ; Linear_A
+ "Zzzz", # 10756..1075F ; Unknown
+ "Lina", # 10760..10767 ; Linear_A
+ "Zzzz", # 10768..1077F ; Unknown
+ "Latn", # 10780..10785 ; Latin
+ "Zzzz", # 10786..10786 ; Unknown
+ "Latn", # 10787..107B0 ; Latin
+ "Zzzz", # 107B1..107B1 ; Unknown
+ "Latn", # 107B2..107BA ; Latin
+ "Zzzz", # 107BB..107FF ; Unknown
+ "Cprt", # 10800..10805 ; Cypriot
+ "Zzzz", # 10806..10807 ; Unknown
+ "Cprt", # 10808..10808 ; Cypriot
+ "Zzzz", # 10809..10809 ; Unknown
+ "Cprt", # 1080A..10835 ; Cypriot
+ "Zzzz", # 10836..10836 ; Unknown
+ "Cprt", # 10837..10838 ; Cypriot
+ "Zzzz", # 10839..1083B ; Unknown
+ "Cprt", # 1083C..1083C ; Cypriot
+ "Zzzz", # 1083D..1083E ; Unknown
+ "Cprt", # 1083F..1083F ; Cypriot
+ "Armi", # 10840..10855 ; Imperial_Aramaic
+ "Zzzz", # 10856..10856 ; Unknown
+ "Armi", # 10857..1085F ; Imperial_Aramaic
+ "Palm", # 10860..1087F ; Palmyrene
+ "Nbat", # 10880..1089E ; Nabataean
+ "Zzzz", # 1089F..108A6 ; Unknown
+ "Nbat", # 108A7..108AF ; Nabataean
+ "Zzzz", # 108B0..108DF ; Unknown
+ "Hatr", # 108E0..108F2 ; Hatran
+ "Zzzz", # 108F3..108F3 ; Unknown
+ "Hatr", # 108F4..108F5 ; Hatran
+ "Zzzz", # 108F6..108FA ; Unknown
+ "Hatr", # 108FB..108FF ; Hatran
+ "Phnx", # 10900..1091B ; Phoenician
+ "Zzzz", # 1091C..1091E ; Unknown
+ "Phnx", # 1091F..1091F ; Phoenician
+ "Lydi", # 10920..10939 ; Lydian
+ "Zzzz", # 1093A..1093E ; Unknown
+ "Lydi", # 1093F..1093F ; Lydian
+ "Zzzz", # 10940..1097F ; Unknown
+ "Mero", # 10980..1099F ; Meroitic_Hieroglyphs
+ "Merc", # 109A0..109B7 ; Meroitic_Cursive
+ "Zzzz", # 109B8..109BB ; Unknown
+ "Merc", # 109BC..109CF ; Meroitic_Cursive
+ "Zzzz", # 109D0..109D1 ; Unknown
+ "Merc", # 109D2..109FF ; Meroitic_Cursive
+ "Khar", # 10A00..10A03 ; Kharoshthi
+ "Zzzz", # 10A04..10A04 ; Unknown
+ "Khar", # 10A05..10A06 ; Kharoshthi
+ "Zzzz", # 10A07..10A0B ; Unknown
+ "Khar", # 10A0C..10A13 ; Kharoshthi
+ "Zzzz", # 10A14..10A14 ; Unknown
+ "Khar", # 10A15..10A17 ; Kharoshthi
+ "Zzzz", # 10A18..10A18 ; Unknown
+ "Khar", # 10A19..10A35 ; Kharoshthi
+ "Zzzz", # 10A36..10A37 ; Unknown
+ "Khar", # 10A38..10A3A ; Kharoshthi
+ "Zzzz", # 10A3B..10A3E ; Unknown
+ "Khar", # 10A3F..10A48 ; Kharoshthi
+ "Zzzz", # 10A49..10A4F ; Unknown
+ "Khar", # 10A50..10A58 ; Kharoshthi
+ "Zzzz", # 10A59..10A5F ; Unknown
+ "Sarb", # 10A60..10A7F ; Old_South_Arabian
+ "Narb", # 10A80..10A9F ; Old_North_Arabian
+ "Zzzz", # 10AA0..10ABF ; Unknown
+ "Mani", # 10AC0..10AE6 ; Manichaean
+ "Zzzz", # 10AE7..10AEA ; Unknown
+ "Mani", # 10AEB..10AF6 ; Manichaean
+ "Zzzz", # 10AF7..10AFF ; Unknown
+ "Avst", # 10B00..10B35 ; Avestan
+ "Zzzz", # 10B36..10B38 ; Unknown
+ "Avst", # 10B39..10B3F ; Avestan
+ "Prti", # 10B40..10B55 ; Inscriptional_Parthian
+ "Zzzz", # 10B56..10B57 ; Unknown
+ "Prti", # 10B58..10B5F ; Inscriptional_Parthian
+ "Phli", # 10B60..10B72 ; Inscriptional_Pahlavi
+ "Zzzz", # 10B73..10B77 ; Unknown
+ "Phli", # 10B78..10B7F ; Inscriptional_Pahlavi
+ "Phlp", # 10B80..10B91 ; Psalter_Pahlavi
+ "Zzzz", # 10B92..10B98 ; Unknown
+ "Phlp", # 10B99..10B9C ; Psalter_Pahlavi
+ "Zzzz", # 10B9D..10BA8 ; Unknown
+ "Phlp", # 10BA9..10BAF ; Psalter_Pahlavi
+ "Zzzz", # 10BB0..10BFF ; Unknown
+ "Orkh", # 10C00..10C48 ; Old_Turkic
+ "Zzzz", # 10C49..10C7F ; Unknown
+ "Hung", # 10C80..10CB2 ; Old_Hungarian
+ "Zzzz", # 10CB3..10CBF ; Unknown
+ "Hung", # 10CC0..10CF2 ; Old_Hungarian
+ "Zzzz", # 10CF3..10CF9 ; Unknown
+ "Hung", # 10CFA..10CFF ; Old_Hungarian
+ "Rohg", # 10D00..10D27 ; Hanifi_Rohingya
+ "Zzzz", # 10D28..10D2F ; Unknown
+ "Rohg", # 10D30..10D39 ; Hanifi_Rohingya
+ "Zzzz", # 10D3A..10E5F ; Unknown
+ "Arab", # 10E60..10E7E ; Arabic
+ "Zzzz", # 10E7F..10E7F ; Unknown
+ "Yezi", # 10E80..10EA9 ; Yezidi
+ "Zzzz", # 10EAA..10EAA ; Unknown
+ "Yezi", # 10EAB..10EAD ; Yezidi
+ "Zzzz", # 10EAE..10EAF ; Unknown
+ "Yezi", # 10EB0..10EB1 ; Yezidi
+ "Zzzz", # 10EB2..10EFC ; Unknown
+ "Arab", # 10EFD..10EFF ; Arabic
+ "Sogo", # 10F00..10F27 ; Old_Sogdian
+ "Zzzz", # 10F28..10F2F ; Unknown
+ "Sogd", # 10F30..10F59 ; Sogdian
+ "Zzzz", # 10F5A..10F6F ; Unknown
+ "Ougr", # 10F70..10F89 ; Old_Uyghur
+ "Zzzz", # 10F8A..10FAF ; Unknown
+ "Chrs", # 10FB0..10FCB ; Chorasmian
+ "Zzzz", # 10FCC..10FDF ; Unknown
+ "Elym", # 10FE0..10FF6 ; Elymaic
+ "Zzzz", # 10FF7..10FFF ; Unknown
+ "Brah", # 11000..1104D ; Brahmi
+ "Zzzz", # 1104E..11051 ; Unknown
+ "Brah", # 11052..11075 ; Brahmi
+ "Zzzz", # 11076..1107E ; Unknown
+ "Brah", # 1107F..1107F ; Brahmi
+ "Kthi", # 11080..110C2 ; Kaithi
+ "Zzzz", # 110C3..110CC ; Unknown
+ "Kthi", # 110CD..110CD ; Kaithi
+ "Zzzz", # 110CE..110CF ; Unknown
+ "Sora", # 110D0..110E8 ; Sora_Sompeng
+ "Zzzz", # 110E9..110EF ; Unknown
+ "Sora", # 110F0..110F9 ; Sora_Sompeng
+ "Zzzz", # 110FA..110FF ; Unknown
+ "Cakm", # 11100..11134 ; Chakma
+ "Zzzz", # 11135..11135 ; Unknown
+ "Cakm", # 11136..11147 ; Chakma
+ "Zzzz", # 11148..1114F ; Unknown
+ "Mahj", # 11150..11176 ; Mahajani
+ "Zzzz", # 11177..1117F ; Unknown
+ "Shrd", # 11180..111DF ; Sharada
+ "Zzzz", # 111E0..111E0 ; Unknown
+ "Sinh", # 111E1..111F4 ; Sinhala
+ "Zzzz", # 111F5..111FF ; Unknown
+ "Khoj", # 11200..11211 ; Khojki
+ "Zzzz", # 11212..11212 ; Unknown
+ "Khoj", # 11213..11241 ; Khojki
+ "Zzzz", # 11242..1127F ; Unknown
+ "Mult", # 11280..11286 ; Multani
+ "Zzzz", # 11287..11287 ; Unknown
+ "Mult", # 11288..11288 ; Multani
+ "Zzzz", # 11289..11289 ; Unknown
+ "Mult", # 1128A..1128D ; Multani
+ "Zzzz", # 1128E..1128E ; Unknown
+ "Mult", # 1128F..1129D ; Multani
+ "Zzzz", # 1129E..1129E ; Unknown
+ "Mult", # 1129F..112A9 ; Multani
+ "Zzzz", # 112AA..112AF ; Unknown
+ "Sind", # 112B0..112EA ; Khudawadi
+ "Zzzz", # 112EB..112EF ; Unknown
+ "Sind", # 112F0..112F9 ; Khudawadi
+ "Zzzz", # 112FA..112FF ; Unknown
+ "Gran", # 11300..11303 ; Grantha
+ "Zzzz", # 11304..11304 ; Unknown
+ "Gran", # 11305..1130C ; Grantha
+ "Zzzz", # 1130D..1130E ; Unknown
+ "Gran", # 1130F..11310 ; Grantha
+ "Zzzz", # 11311..11312 ; Unknown
+ "Gran", # 11313..11328 ; Grantha
+ "Zzzz", # 11329..11329 ; Unknown
+ "Gran", # 1132A..11330 ; Grantha
+ "Zzzz", # 11331..11331 ; Unknown
+ "Gran", # 11332..11333 ; Grantha
+ "Zzzz", # 11334..11334 ; Unknown
+ "Gran", # 11335..11339 ; Grantha
+ "Zzzz", # 1133A..1133A ; Unknown
+ "Zinh", # 1133B..1133B ; Inherited
+ "Gran", # 1133C..11344 ; Grantha
+ "Zzzz", # 11345..11346 ; Unknown
+ "Gran", # 11347..11348 ; Grantha
+ "Zzzz", # 11349..1134A ; Unknown
+ "Gran", # 1134B..1134D ; Grantha
+ "Zzzz", # 1134E..1134F ; Unknown
+ "Gran", # 11350..11350 ; Grantha
+ "Zzzz", # 11351..11356 ; Unknown
+ "Gran", # 11357..11357 ; Grantha
+ "Zzzz", # 11358..1135C ; Unknown
+ "Gran", # 1135D..11363 ; Grantha
+ "Zzzz", # 11364..11365 ; Unknown
+ "Gran", # 11366..1136C ; Grantha
+ "Zzzz", # 1136D..1136F ; Unknown
+ "Gran", # 11370..11374 ; Grantha
+ "Zzzz", # 11375..113FF ; Unknown
+ "Newa", # 11400..1145B ; Newa
+ "Zzzz", # 1145C..1145C ; Unknown
+ "Newa", # 1145D..11461 ; Newa
+ "Zzzz", # 11462..1147F ; Unknown
+ "Tirh", # 11480..114C7 ; Tirhuta
+ "Zzzz", # 114C8..114CF ; Unknown
+ "Tirh", # 114D0..114D9 ; Tirhuta
+ "Zzzz", # 114DA..1157F ; Unknown
+ "Sidd", # 11580..115B5 ; Siddham
+ "Zzzz", # 115B6..115B7 ; Unknown
+ "Sidd", # 115B8..115DD ; Siddham
+ "Zzzz", # 115DE..115FF ; Unknown
+ "Modi", # 11600..11644 ; Modi
+ "Zzzz", # 11645..1164F ; Unknown
+ "Modi", # 11650..11659 ; Modi
+ "Zzzz", # 1165A..1165F ; Unknown
+ "Mong", # 11660..1166C ; Mongolian
+ "Zzzz", # 1166D..1167F ; Unknown
+ "Takr", # 11680..116B9 ; Takri
+ "Zzzz", # 116BA..116BF ; Unknown
+ "Takr", # 116C0..116C9 ; Takri
+ "Zzzz", # 116CA..116FF ; Unknown
+ "Ahom", # 11700..1171A ; Ahom
+ "Zzzz", # 1171B..1171C ; Unknown
+ "Ahom", # 1171D..1172B ; Ahom
+ "Zzzz", # 1172C..1172F ; Unknown
+ "Ahom", # 11730..11746 ; Ahom
+ "Zzzz", # 11747..117FF ; Unknown
+ "Dogr", # 11800..1183B ; Dogra
+ "Zzzz", # 1183C..1189F ; Unknown
+ "Wara", # 118A0..118F2 ; Warang_Citi
+ "Zzzz", # 118F3..118FE ; Unknown
+ "Wara", # 118FF..118FF ; Warang_Citi
+ "Diak", # 11900..11906 ; Dives_Akuru
+ "Zzzz", # 11907..11908 ; Unknown
+ "Diak", # 11909..11909 ; Dives_Akuru
+ "Zzzz", # 1190A..1190B ; Unknown
+ "Diak", # 1190C..11913 ; Dives_Akuru
+ "Zzzz", # 11914..11914 ; Unknown
+ "Diak", # 11915..11916 ; Dives_Akuru
+ "Zzzz", # 11917..11917 ; Unknown
+ "Diak", # 11918..11935 ; Dives_Akuru
+ "Zzzz", # 11936..11936 ; Unknown
+ "Diak", # 11937..11938 ; Dives_Akuru
+ "Zzzz", # 11939..1193A ; Unknown
+ "Diak", # 1193B..11946 ; Dives_Akuru
+ "Zzzz", # 11947..1194F ; Unknown
+ "Diak", # 11950..11959 ; Dives_Akuru
+ "Zzzz", # 1195A..1199F ; Unknown
+ "Nand", # 119A0..119A7 ; Nandinagari
+ "Zzzz", # 119A8..119A9 ; Unknown
+ "Nand", # 119AA..119D7 ; Nandinagari
+ "Zzzz", # 119D8..119D9 ; Unknown
+ "Nand", # 119DA..119E4 ; Nandinagari
+ "Zzzz", # 119E5..119FF ; Unknown
+ "Zanb", # 11A00..11A47 ; Zanabazar_Square
+ "Zzzz", # 11A48..11A4F ; Unknown
+ "Soyo", # 11A50..11AA2 ; Soyombo
+ "Zzzz", # 11AA3..11AAF ; Unknown
+ "Cans", # 11AB0..11ABF ; Canadian_Aboriginal
+ "Pauc", # 11AC0..11AF8 ; Pau_Cin_Hau
+ "Zzzz", # 11AF9..11AFF ; Unknown
+ "Deva", # 11B00..11B09 ; Devanagari
+ "Zzzz", # 11B0A..11BFF ; Unknown
+ "Bhks", # 11C00..11C08 ; Bhaiksuki
+ "Zzzz", # 11C09..11C09 ; Unknown
+ "Bhks", # 11C0A..11C36 ; Bhaiksuki
+ "Zzzz", # 11C37..11C37 ; Unknown
+ "Bhks", # 11C38..11C45 ; Bhaiksuki
+ "Zzzz", # 11C46..11C4F ; Unknown
+ "Bhks", # 11C50..11C6C ; Bhaiksuki
+ "Zzzz", # 11C6D..11C6F ; Unknown
+ "Marc", # 11C70..11C8F ; Marchen
+ "Zzzz", # 11C90..11C91 ; Unknown
+ "Marc", # 11C92..11CA7 ; Marchen
+ "Zzzz", # 11CA8..11CA8 ; Unknown
+ "Marc", # 11CA9..11CB6 ; Marchen
+ "Zzzz", # 11CB7..11CFF ; Unknown
+ "Gonm", # 11D00..11D06 ; Masaram_Gondi
+ "Zzzz", # 11D07..11D07 ; Unknown
+ "Gonm", # 11D08..11D09 ; Masaram_Gondi
+ "Zzzz", # 11D0A..11D0A ; Unknown
+ "Gonm", # 11D0B..11D36 ; Masaram_Gondi
+ "Zzzz", # 11D37..11D39 ; Unknown
+ "Gonm", # 11D3A..11D3A ; Masaram_Gondi
+ "Zzzz", # 11D3B..11D3B ; Unknown
+ "Gonm", # 11D3C..11D3D ; Masaram_Gondi
+ "Zzzz", # 11D3E..11D3E ; Unknown
+ "Gonm", # 11D3F..11D47 ; Masaram_Gondi
+ "Zzzz", # 11D48..11D4F ; Unknown
+ "Gonm", # 11D50..11D59 ; Masaram_Gondi
+ "Zzzz", # 11D5A..11D5F ; Unknown
+ "Gong", # 11D60..11D65 ; Gunjala_Gondi
+ "Zzzz", # 11D66..11D66 ; Unknown
+ "Gong", # 11D67..11D68 ; Gunjala_Gondi
+ "Zzzz", # 11D69..11D69 ; Unknown
+ "Gong", # 11D6A..11D8E ; Gunjala_Gondi
+ "Zzzz", # 11D8F..11D8F ; Unknown
+ "Gong", # 11D90..11D91 ; Gunjala_Gondi
+ "Zzzz", # 11D92..11D92 ; Unknown
+ "Gong", # 11D93..11D98 ; Gunjala_Gondi
+ "Zzzz", # 11D99..11D9F ; Unknown
+ "Gong", # 11DA0..11DA9 ; Gunjala_Gondi
+ "Zzzz", # 11DAA..11EDF ; Unknown
+ "Maka", # 11EE0..11EF8 ; Makasar
+ "Zzzz", # 11EF9..11EFF ; Unknown
+ "Kawi", # 11F00..11F10 ; Kawi
+ "Zzzz", # 11F11..11F11 ; Unknown
+ "Kawi", # 11F12..11F3A ; Kawi
+ "Zzzz", # 11F3B..11F3D ; Unknown
+ "Kawi", # 11F3E..11F59 ; Kawi
+ "Zzzz", # 11F5A..11FAF ; Unknown
+ "Lisu", # 11FB0..11FB0 ; Lisu
+ "Zzzz", # 11FB1..11FBF ; Unknown
+ "Taml", # 11FC0..11FF1 ; Tamil
+ "Zzzz", # 11FF2..11FFE ; Unknown
+ "Taml", # 11FFF..11FFF ; Tamil
+ "Xsux", # 12000..12399 ; Cuneiform
+ "Zzzz", # 1239A..123FF ; Unknown
+ "Xsux", # 12400..1246E ; Cuneiform
+ "Zzzz", # 1246F..1246F ; Unknown
+ "Xsux", # 12470..12474 ; Cuneiform
+ "Zzzz", # 12475..1247F ; Unknown
+ "Xsux", # 12480..12543 ; Cuneiform
+ "Zzzz", # 12544..12F8F ; Unknown
+ "Cpmn", # 12F90..12FF2 ; Cypro_Minoan
+ "Zzzz", # 12FF3..12FFF ; Unknown
+ "Egyp", # 13000..13455 ; Egyptian_Hieroglyphs
+ "Zzzz", # 13456..143FF ; Unknown
+ "Hluw", # 14400..14646 ; Anatolian_Hieroglyphs
+ "Zzzz", # 14647..167FF ; Unknown
+ "Bamu", # 16800..16A38 ; Bamum
+ "Zzzz", # 16A39..16A3F ; Unknown
+ "Mroo", # 16A40..16A5E ; Mro
+ "Zzzz", # 16A5F..16A5F ; Unknown
+ "Mroo", # 16A60..16A69 ; Mro
+ "Zzzz", # 16A6A..16A6D ; Unknown
+ "Mroo", # 16A6E..16A6F ; Mro
+ "Tnsa", # 16A70..16ABE ; Tangsa
+ "Zzzz", # 16ABF..16ABF ; Unknown
+ "Tnsa", # 16AC0..16AC9 ; Tangsa
+ "Zzzz", # 16ACA..16ACF ; Unknown
+ "Bass", # 16AD0..16AED ; Bassa_Vah
+ "Zzzz", # 16AEE..16AEF ; Unknown
+ "Bass", # 16AF0..16AF5 ; Bassa_Vah
+ "Zzzz", # 16AF6..16AFF ; Unknown
+ "Hmng", # 16B00..16B45 ; Pahawh_Hmong
+ "Zzzz", # 16B46..16B4F ; Unknown
+ "Hmng", # 16B50..16B59 ; Pahawh_Hmong
+ "Zzzz", # 16B5A..16B5A ; Unknown
+ "Hmng", # 16B5B..16B61 ; Pahawh_Hmong
+ "Zzzz", # 16B62..16B62 ; Unknown
+ "Hmng", # 16B63..16B77 ; Pahawh_Hmong
+ "Zzzz", # 16B78..16B7C ; Unknown
+ "Hmng", # 16B7D..16B8F ; Pahawh_Hmong
+ "Zzzz", # 16B90..16E3F ; Unknown
+ "Medf", # 16E40..16E9A ; Medefaidrin
+ "Zzzz", # 16E9B..16EFF ; Unknown
+ "Plrd", # 16F00..16F4A ; Miao
+ "Zzzz", # 16F4B..16F4E ; Unknown
+ "Plrd", # 16F4F..16F87 ; Miao
+ "Zzzz", # 16F88..16F8E ; Unknown
+ "Plrd", # 16F8F..16F9F ; Miao
+ "Zzzz", # 16FA0..16FDF ; Unknown
+ "Tang", # 16FE0..16FE0 ; Tangut
+ "Nshu", # 16FE1..16FE1 ; Nushu
+ "Hani", # 16FE2..16FE3 ; Han
+ "Kits", # 16FE4..16FE4 ; Khitan_Small_Script
+ "Zzzz", # 16FE5..16FEF ; Unknown
+ "Hani", # 16FF0..16FF1 ; Han
+ "Zzzz", # 16FF2..16FFF ; Unknown
+ "Tang", # 17000..187F7 ; Tangut
+ "Zzzz", # 187F8..187FF ; Unknown
+ "Tang", # 18800..18AFF ; Tangut
+ "Kits", # 18B00..18CD5 ; Khitan_Small_Script
+ "Zzzz", # 18CD6..18CFF ; Unknown
+ "Tang", # 18D00..18D08 ; Tangut
+ "Zzzz", # 18D09..1AFEF ; Unknown
+ "Kana", # 1AFF0..1AFF3 ; Katakana
+ "Zzzz", # 1AFF4..1AFF4 ; Unknown
+ "Kana", # 1AFF5..1AFFB ; Katakana
+ "Zzzz", # 1AFFC..1AFFC ; Unknown
+ "Kana", # 1AFFD..1AFFE ; Katakana
+ "Zzzz", # 1AFFF..1AFFF ; Unknown
+ "Kana", # 1B000..1B000 ; Katakana
+ "Hira", # 1B001..1B11F ; Hiragana
+ "Kana", # 1B120..1B122 ; Katakana
+ "Zzzz", # 1B123..1B131 ; Unknown
+ "Hira", # 1B132..1B132 ; Hiragana
+ "Zzzz", # 1B133..1B14F ; Unknown
+ "Hira", # 1B150..1B152 ; Hiragana
+ "Zzzz", # 1B153..1B154 ; Unknown
+ "Kana", # 1B155..1B155 ; Katakana
+ "Zzzz", # 1B156..1B163 ; Unknown
+ "Kana", # 1B164..1B167 ; Katakana
+ "Zzzz", # 1B168..1B16F ; Unknown
+ "Nshu", # 1B170..1B2FB ; Nushu
+ "Zzzz", # 1B2FC..1BBFF ; Unknown
+ "Dupl", # 1BC00..1BC6A ; Duployan
+ "Zzzz", # 1BC6B..1BC6F ; Unknown
+ "Dupl", # 1BC70..1BC7C ; Duployan
+ "Zzzz", # 1BC7D..1BC7F ; Unknown
+ "Dupl", # 1BC80..1BC88 ; Duployan
+ "Zzzz", # 1BC89..1BC8F ; Unknown
+ "Dupl", # 1BC90..1BC99 ; Duployan
+ "Zzzz", # 1BC9A..1BC9B ; Unknown
+ "Dupl", # 1BC9C..1BC9F ; Duployan
+ "Zyyy", # 1BCA0..1BCA3 ; Common
+ "Zzzz", # 1BCA4..1CEFF ; Unknown
+ "Zinh", # 1CF00..1CF2D ; Inherited
+ "Zzzz", # 1CF2E..1CF2F ; Unknown
+ "Zinh", # 1CF30..1CF46 ; Inherited
+ "Zzzz", # 1CF47..1CF4F ; Unknown
+ "Zyyy", # 1CF50..1CFC3 ; Common
+ "Zzzz", # 1CFC4..1CFFF ; Unknown
+ "Zyyy", # 1D000..1D0F5 ; Common
+ "Zzzz", # 1D0F6..1D0FF ; Unknown
+ "Zyyy", # 1D100..1D126 ; Common
+ "Zzzz", # 1D127..1D128 ; Unknown
+ "Zyyy", # 1D129..1D166 ; Common
+ "Zinh", # 1D167..1D169 ; Inherited
+ "Zyyy", # 1D16A..1D17A ; Common
+ "Zinh", # 1D17B..1D182 ; Inherited
+ "Zyyy", # 1D183..1D184 ; Common
+ "Zinh", # 1D185..1D18B ; Inherited
+ "Zyyy", # 1D18C..1D1A9 ; Common
+ "Zinh", # 1D1AA..1D1AD ; Inherited
+ "Zyyy", # 1D1AE..1D1EA ; Common
+ "Zzzz", # 1D1EB..1D1FF ; Unknown
+ "Grek", # 1D200..1D245 ; Greek
+ "Zzzz", # 1D246..1D2BF ; Unknown
+ "Zyyy", # 1D2C0..1D2D3 ; Common
+ "Zzzz", # 1D2D4..1D2DF ; Unknown
+ "Zyyy", # 1D2E0..1D2F3 ; Common
+ "Zzzz", # 1D2F4..1D2FF ; Unknown
+ "Zyyy", # 1D300..1D356 ; Common
+ "Zzzz", # 1D357..1D35F ; Unknown
+ "Zyyy", # 1D360..1D378 ; Common
+ "Zzzz", # 1D379..1D3FF ; Unknown
+ "Zyyy", # 1D400..1D454 ; Common
+ "Zzzz", # 1D455..1D455 ; Unknown
+ "Zyyy", # 1D456..1D49C ; Common
+ "Zzzz", # 1D49D..1D49D ; Unknown
+ "Zyyy", # 1D49E..1D49F ; Common
+ "Zzzz", # 1D4A0..1D4A1 ; Unknown
+ "Zyyy", # 1D4A2..1D4A2 ; Common
+ "Zzzz", # 1D4A3..1D4A4 ; Unknown
+ "Zyyy", # 1D4A5..1D4A6 ; Common
+ "Zzzz", # 1D4A7..1D4A8 ; Unknown
+ "Zyyy", # 1D4A9..1D4AC ; Common
+ "Zzzz", # 1D4AD..1D4AD ; Unknown
+ "Zyyy", # 1D4AE..1D4B9 ; Common
+ "Zzzz", # 1D4BA..1D4BA ; Unknown
+ "Zyyy", # 1D4BB..1D4BB ; Common
+ "Zzzz", # 1D4BC..1D4BC ; Unknown
+ "Zyyy", # 1D4BD..1D4C3 ; Common
+ "Zzzz", # 1D4C4..1D4C4 ; Unknown
+ "Zyyy", # 1D4C5..1D505 ; Common
+ "Zzzz", # 1D506..1D506 ; Unknown
+ "Zyyy", # 1D507..1D50A ; Common
+ "Zzzz", # 1D50B..1D50C ; Unknown
+ "Zyyy", # 1D50D..1D514 ; Common
+ "Zzzz", # 1D515..1D515 ; Unknown
+ "Zyyy", # 1D516..1D51C ; Common
+ "Zzzz", # 1D51D..1D51D ; Unknown
+ "Zyyy", # 1D51E..1D539 ; Common
+ "Zzzz", # 1D53A..1D53A ; Unknown
+ "Zyyy", # 1D53B..1D53E ; Common
+ "Zzzz", # 1D53F..1D53F ; Unknown
+ "Zyyy", # 1D540..1D544 ; Common
+ "Zzzz", # 1D545..1D545 ; Unknown
+ "Zyyy", # 1D546..1D546 ; Common
+ "Zzzz", # 1D547..1D549 ; Unknown
+ "Zyyy", # 1D54A..1D550 ; Common
+ "Zzzz", # 1D551..1D551 ; Unknown
+ "Zyyy", # 1D552..1D6A5 ; Common
+ "Zzzz", # 1D6A6..1D6A7 ; Unknown
+ "Zyyy", # 1D6A8..1D7CB ; Common
+ "Zzzz", # 1D7CC..1D7CD ; Unknown
+ "Zyyy", # 1D7CE..1D7FF ; Common
+ "Sgnw", # 1D800..1DA8B ; SignWriting
+ "Zzzz", # 1DA8C..1DA9A ; Unknown
+ "Sgnw", # 1DA9B..1DA9F ; SignWriting
+ "Zzzz", # 1DAA0..1DAA0 ; Unknown
+ "Sgnw", # 1DAA1..1DAAF ; SignWriting
+ "Zzzz", # 1DAB0..1DEFF ; Unknown
+ "Latn", # 1DF00..1DF1E ; Latin
+ "Zzzz", # 1DF1F..1DF24 ; Unknown
+ "Latn", # 1DF25..1DF2A ; Latin
+ "Zzzz", # 1DF2B..1DFFF ; Unknown
+ "Glag", # 1E000..1E006 ; Glagolitic
+ "Zzzz", # 1E007..1E007 ; Unknown
+ "Glag", # 1E008..1E018 ; Glagolitic
+ "Zzzz", # 1E019..1E01A ; Unknown
+ "Glag", # 1E01B..1E021 ; Glagolitic
+ "Zzzz", # 1E022..1E022 ; Unknown
+ "Glag", # 1E023..1E024 ; Glagolitic
+ "Zzzz", # 1E025..1E025 ; Unknown
+ "Glag", # 1E026..1E02A ; Glagolitic
+ "Zzzz", # 1E02B..1E02F ; Unknown
+ "Cyrl", # 1E030..1E06D ; Cyrillic
+ "Zzzz", # 1E06E..1E08E ; Unknown
+ "Cyrl", # 1E08F..1E08F ; Cyrillic
+ "Zzzz", # 1E090..1E0FF ; Unknown
+ "Hmnp", # 1E100..1E12C ; Nyiakeng_Puachue_Hmong
+ "Zzzz", # 1E12D..1E12F ; Unknown
+ "Hmnp", # 1E130..1E13D ; Nyiakeng_Puachue_Hmong
+ "Zzzz", # 1E13E..1E13F ; Unknown
+ "Hmnp", # 1E140..1E149 ; Nyiakeng_Puachue_Hmong
+ "Zzzz", # 1E14A..1E14D ; Unknown
+ "Hmnp", # 1E14E..1E14F ; Nyiakeng_Puachue_Hmong
+ "Zzzz", # 1E150..1E28F ; Unknown
+ "Toto", # 1E290..1E2AE ; Toto
+ "Zzzz", # 1E2AF..1E2BF ; Unknown
+ "Wcho", # 1E2C0..1E2F9 ; Wancho
+ "Zzzz", # 1E2FA..1E2FE ; Unknown
+ "Wcho", # 1E2FF..1E2FF ; Wancho
+ "Zzzz", # 1E300..1E4CF ; Unknown
+ "Nagm", # 1E4D0..1E4F9 ; Nag_Mundari
+ "Zzzz", # 1E4FA..1E7DF ; Unknown
+ "Ethi", # 1E7E0..1E7E6 ; Ethiopic
+ "Zzzz", # 1E7E7..1E7E7 ; Unknown
+ "Ethi", # 1E7E8..1E7EB ; Ethiopic
+ "Zzzz", # 1E7EC..1E7EC ; Unknown
+ "Ethi", # 1E7ED..1E7EE ; Ethiopic
+ "Zzzz", # 1E7EF..1E7EF ; Unknown
+ "Ethi", # 1E7F0..1E7FE ; Ethiopic
+ "Zzzz", # 1E7FF..1E7FF ; Unknown
+ "Mend", # 1E800..1E8C4 ; Mende_Kikakui
+ "Zzzz", # 1E8C5..1E8C6 ; Unknown
+ "Mend", # 1E8C7..1E8D6 ; Mende_Kikakui
+ "Zzzz", # 1E8D7..1E8FF ; Unknown
+ "Adlm", # 1E900..1E94B ; Adlam
+ "Zzzz", # 1E94C..1E94F ; Unknown
+ "Adlm", # 1E950..1E959 ; Adlam
+ "Zzzz", # 1E95A..1E95D ; Unknown
+ "Adlm", # 1E95E..1E95F ; Adlam
+ "Zzzz", # 1E960..1EC70 ; Unknown
+ "Zyyy", # 1EC71..1ECB4 ; Common
+ "Zzzz", # 1ECB5..1ED00 ; Unknown
+ "Zyyy", # 1ED01..1ED3D ; Common
+ "Zzzz", # 1ED3E..1EDFF ; Unknown
+ "Arab", # 1EE00..1EE03 ; Arabic
+ "Zzzz", # 1EE04..1EE04 ; Unknown
+ "Arab", # 1EE05..1EE1F ; Arabic
+ "Zzzz", # 1EE20..1EE20 ; Unknown
+ "Arab", # 1EE21..1EE22 ; Arabic
+ "Zzzz", # 1EE23..1EE23 ; Unknown
+ "Arab", # 1EE24..1EE24 ; Arabic
+ "Zzzz", # 1EE25..1EE26 ; Unknown
+ "Arab", # 1EE27..1EE27 ; Arabic
+ "Zzzz", # 1EE28..1EE28 ; Unknown
+ "Arab", # 1EE29..1EE32 ; Arabic
+ "Zzzz", # 1EE33..1EE33 ; Unknown
+ "Arab", # 1EE34..1EE37 ; Arabic
+ "Zzzz", # 1EE38..1EE38 ; Unknown
+ "Arab", # 1EE39..1EE39 ; Arabic
+ "Zzzz", # 1EE3A..1EE3A ; Unknown
+ "Arab", # 1EE3B..1EE3B ; Arabic
+ "Zzzz", # 1EE3C..1EE41 ; Unknown
+ "Arab", # 1EE42..1EE42 ; Arabic
+ "Zzzz", # 1EE43..1EE46 ; Unknown
+ "Arab", # 1EE47..1EE47 ; Arabic
+ "Zzzz", # 1EE48..1EE48 ; Unknown
+ "Arab", # 1EE49..1EE49 ; Arabic
+ "Zzzz", # 1EE4A..1EE4A ; Unknown
+ "Arab", # 1EE4B..1EE4B ; Arabic
+ "Zzzz", # 1EE4C..1EE4C ; Unknown
+ "Arab", # 1EE4D..1EE4F ; Arabic
+ "Zzzz", # 1EE50..1EE50 ; Unknown
+ "Arab", # 1EE51..1EE52 ; Arabic
+ "Zzzz", # 1EE53..1EE53 ; Unknown
+ "Arab", # 1EE54..1EE54 ; Arabic
+ "Zzzz", # 1EE55..1EE56 ; Unknown
+ "Arab", # 1EE57..1EE57 ; Arabic
+ "Zzzz", # 1EE58..1EE58 ; Unknown
+ "Arab", # 1EE59..1EE59 ; Arabic
+ "Zzzz", # 1EE5A..1EE5A ; Unknown
+ "Arab", # 1EE5B..1EE5B ; Arabic
+ "Zzzz", # 1EE5C..1EE5C ; Unknown
+ "Arab", # 1EE5D..1EE5D ; Arabic
+ "Zzzz", # 1EE5E..1EE5E ; Unknown
+ "Arab", # 1EE5F..1EE5F ; Arabic
+ "Zzzz", # 1EE60..1EE60 ; Unknown
+ "Arab", # 1EE61..1EE62 ; Arabic
+ "Zzzz", # 1EE63..1EE63 ; Unknown
+ "Arab", # 1EE64..1EE64 ; Arabic
+ "Zzzz", # 1EE65..1EE66 ; Unknown
+ "Arab", # 1EE67..1EE6A ; Arabic
+ "Zzzz", # 1EE6B..1EE6B ; Unknown
+ "Arab", # 1EE6C..1EE72 ; Arabic
+ "Zzzz", # 1EE73..1EE73 ; Unknown
+ "Arab", # 1EE74..1EE77 ; Arabic
+ "Zzzz", # 1EE78..1EE78 ; Unknown
+ "Arab", # 1EE79..1EE7C ; Arabic
+ "Zzzz", # 1EE7D..1EE7D ; Unknown
+ "Arab", # 1EE7E..1EE7E ; Arabic
+ "Zzzz", # 1EE7F..1EE7F ; Unknown
+ "Arab", # 1EE80..1EE89 ; Arabic
+ "Zzzz", # 1EE8A..1EE8A ; Unknown
+ "Arab", # 1EE8B..1EE9B ; Arabic
+ "Zzzz", # 1EE9C..1EEA0 ; Unknown
+ "Arab", # 1EEA1..1EEA3 ; Arabic
+ "Zzzz", # 1EEA4..1EEA4 ; Unknown
+ "Arab", # 1EEA5..1EEA9 ; Arabic
+ "Zzzz", # 1EEAA..1EEAA ; Unknown
+ "Arab", # 1EEAB..1EEBB ; Arabic
+ "Zzzz", # 1EEBC..1EEEF ; Unknown
+ "Arab", # 1EEF0..1EEF1 ; Arabic
+ "Zzzz", # 1EEF2..1EFFF ; Unknown
+ "Zyyy", # 1F000..1F02B ; Common
+ "Zzzz", # 1F02C..1F02F ; Unknown
+ "Zyyy", # 1F030..1F093 ; Common
+ "Zzzz", # 1F094..1F09F ; Unknown
+ "Zyyy", # 1F0A0..1F0AE ; Common
+ "Zzzz", # 1F0AF..1F0B0 ; Unknown
+ "Zyyy", # 1F0B1..1F0BF ; Common
+ "Zzzz", # 1F0C0..1F0C0 ; Unknown
+ "Zyyy", # 1F0C1..1F0CF ; Common
+ "Zzzz", # 1F0D0..1F0D0 ; Unknown
+ "Zyyy", # 1F0D1..1F0F5 ; Common
+ "Zzzz", # 1F0F6..1F0FF ; Unknown
+ "Zyyy", # 1F100..1F1AD ; Common
+ "Zzzz", # 1F1AE..1F1E5 ; Unknown
+ "Zyyy", # 1F1E6..1F1FF ; Common
+ "Hira", # 1F200..1F200 ; Hiragana
+ "Zyyy", # 1F201..1F202 ; Common
+ "Zzzz", # 1F203..1F20F ; Unknown
+ "Zyyy", # 1F210..1F23B ; Common
+ "Zzzz", # 1F23C..1F23F ; Unknown
+ "Zyyy", # 1F240..1F248 ; Common
+ "Zzzz", # 1F249..1F24F ; Unknown
+ "Zyyy", # 1F250..1F251 ; Common
+ "Zzzz", # 1F252..1F25F ; Unknown
+ "Zyyy", # 1F260..1F265 ; Common
+ "Zzzz", # 1F266..1F2FF ; Unknown
+ "Zyyy", # 1F300..1F6D7 ; Common
+ "Zzzz", # 1F6D8..1F6DB ; Unknown
+ "Zyyy", # 1F6DC..1F6EC ; Common
+ "Zzzz", # 1F6ED..1F6EF ; Unknown
+ "Zyyy", # 1F6F0..1F6FC ; Common
+ "Zzzz", # 1F6FD..1F6FF ; Unknown
+ "Zyyy", # 1F700..1F776 ; Common
+ "Zzzz", # 1F777..1F77A ; Unknown
+ "Zyyy", # 1F77B..1F7D9 ; Common
+ "Zzzz", # 1F7DA..1F7DF ; Unknown
+ "Zyyy", # 1F7E0..1F7EB ; Common
+ "Zzzz", # 1F7EC..1F7EF ; Unknown
+ "Zyyy", # 1F7F0..1F7F0 ; Common
+ "Zzzz", # 1F7F1..1F7FF ; Unknown
+ "Zyyy", # 1F800..1F80B ; Common
+ "Zzzz", # 1F80C..1F80F ; Unknown
+ "Zyyy", # 1F810..1F847 ; Common
+ "Zzzz", # 1F848..1F84F ; Unknown
+ "Zyyy", # 1F850..1F859 ; Common
+ "Zzzz", # 1F85A..1F85F ; Unknown
+ "Zyyy", # 1F860..1F887 ; Common
+ "Zzzz", # 1F888..1F88F ; Unknown
+ "Zyyy", # 1F890..1F8AD ; Common
+ "Zzzz", # 1F8AE..1F8AF ; Unknown
+ "Zyyy", # 1F8B0..1F8B1 ; Common
+ "Zzzz", # 1F8B2..1F8FF ; Unknown
+ "Zyyy", # 1F900..1FA53 ; Common
+ "Zzzz", # 1FA54..1FA5F ; Unknown
+ "Zyyy", # 1FA60..1FA6D ; Common
+ "Zzzz", # 1FA6E..1FA6F ; Unknown
+ "Zyyy", # 1FA70..1FA7C ; Common
+ "Zzzz", # 1FA7D..1FA7F ; Unknown
+ "Zyyy", # 1FA80..1FA88 ; Common
+ "Zzzz", # 1FA89..1FA8F ; Unknown
+ "Zyyy", # 1FA90..1FABD ; Common
+ "Zzzz", # 1FABE..1FABE ; Unknown
+ "Zyyy", # 1FABF..1FAC5 ; Common
+ "Zzzz", # 1FAC6..1FACD ; Unknown
+ "Zyyy", # 1FACE..1FADB ; Common
+ "Zzzz", # 1FADC..1FADF ; Unknown
+ "Zyyy", # 1FAE0..1FAE8 ; Common
+ "Zzzz", # 1FAE9..1FAEF ; Unknown
+ "Zyyy", # 1FAF0..1FAF8 ; Common
+ "Zzzz", # 1FAF9..1FAFF ; Unknown
+ "Zyyy", # 1FB00..1FB92 ; Common
+ "Zzzz", # 1FB93..1FB93 ; Unknown
+ "Zyyy", # 1FB94..1FBCA ; Common
+ "Zzzz", # 1FBCB..1FBEF ; Unknown
+ "Zyyy", # 1FBF0..1FBF9 ; Common
+ "Zzzz", # 1FBFA..1FFFF ; Unknown
+ "Hani", # 20000..2A6DF ; Han
+ "Zzzz", # 2A6E0..2A6FF ; Unknown
+ "Hani", # 2A700..2B739 ; Han
+ "Zzzz", # 2B73A..2B73F ; Unknown
+ "Hani", # 2B740..2B81D ; Han
+ "Zzzz", # 2B81E..2B81F ; Unknown
+ "Hani", # 2B820..2CEA1 ; Han
+ "Zzzz", # 2CEA2..2CEAF ; Unknown
+ "Hani", # 2CEB0..2EBE0 ; Han
+ "Zzzz", # 2EBE1..2F7FF ; Unknown
+ "Hani", # 2F800..2FA1D ; Han
+ "Zzzz", # 2FA1E..2FFFF ; Unknown
+ "Hani", # 30000..3134A ; Han
+ "Zzzz", # 3134B..3134F ; Unknown
+ "Hani", # 31350..323AF ; Han
+ "Zzzz", # 323B0..E0000 ; Unknown
+ "Zyyy", # E0001..E0001 ; Common
+ "Zzzz", # E0002..E001F ; Unknown
+ "Zyyy", # E0020..E007F ; Common
+ "Zzzz", # E0080..E00FF ; Unknown
+ "Zinh", # E0100..E01EF ; Inherited
+ "Zzzz", # E01F0..10FFFF ; Unknown
]
NAMES = {
- 'Adlm': 'Adlam',
- 'Aghb': 'Caucasian_Albanian',
- 'Ahom': 'Ahom',
- 'Arab': 'Arabic',
- 'Armi': 'Imperial_Aramaic',
- 'Armn': 'Armenian',
- 'Avst': 'Avestan',
- 'Bali': 'Balinese',
- 'Bamu': 'Bamum',
- 'Bass': 'Bassa_Vah',
- 'Batk': 'Batak',
- 'Beng': 'Bengali',
- 'Bhks': 'Bhaiksuki',
- 'Bopo': 'Bopomofo',
- 'Brah': 'Brahmi',
- 'Brai': 'Braille',
- 'Bugi': 'Buginese',
- 'Buhd': 'Buhid',
- 'Cakm': 'Chakma',
- 'Cans': 'Canadian_Aboriginal',
- 'Cari': 'Carian',
- 'Cham': 'Cham',
- 'Cher': 'Cherokee',
- 'Chrs': 'Chorasmian',
- 'Copt': 'Coptic',
- 'Cpmn': 'Cypro_Minoan',
- 'Cprt': 'Cypriot',
- 'Cyrl': 'Cyrillic',
- 'Deva': 'Devanagari',
- 'Diak': 'Dives_Akuru',
- 'Dogr': 'Dogra',
- 'Dsrt': 'Deseret',
- 'Dupl': 'Duployan',
- 'Egyp': 'Egyptian_Hieroglyphs',
- 'Elba': 'Elbasan',
- 'Elym': 'Elymaic',
- 'Ethi': 'Ethiopic',
- 'Geor': 'Georgian',
- 'Glag': 'Glagolitic',
- 'Gong': 'Gunjala_Gondi',
- 'Gonm': 'Masaram_Gondi',
- 'Goth': 'Gothic',
- 'Gran': 'Grantha',
- 'Grek': 'Greek',
- 'Gujr': 'Gujarati',
- 'Guru': 'Gurmukhi',
- 'Hang': 'Hangul',
- 'Hani': 'Han',
- 'Hano': 'Hanunoo',
- 'Hatr': 'Hatran',
- 'Hebr': 'Hebrew',
- 'Hira': 'Hiragana',
- 'Hluw': 'Anatolian_Hieroglyphs',
- 'Hmng': 'Pahawh_Hmong',
- 'Hmnp': 'Nyiakeng_Puachue_Hmong',
- 'Hrkt': 'Katakana_Or_Hiragana',
- 'Hung': 'Old_Hungarian',
- 'Ital': 'Old_Italic',
- 'Java': 'Javanese',
- 'Kali': 'Kayah_Li',
- 'Kana': 'Katakana',
- 'Khar': 'Kharoshthi',
- 'Khmr': 'Khmer',
- 'Khoj': 'Khojki',
- 'Kits': 'Khitan_Small_Script',
- 'Knda': 'Kannada',
- 'Kthi': 'Kaithi',
- 'Lana': 'Tai_Tham',
- 'Laoo': 'Lao',
- 'Latn': 'Latin',
- 'Lepc': 'Lepcha',
- 'Limb': 'Limbu',
- 'Lina': 'Linear_A',
- 'Linb': 'Linear_B',
- 'Lisu': 'Lisu',
- 'Lyci': 'Lycian',
- 'Lydi': 'Lydian',
- 'Mahj': 'Mahajani',
- 'Maka': 'Makasar',
- 'Mand': 'Mandaic',
- 'Mani': 'Manichaean',
- 'Marc': 'Marchen',
- 'Medf': 'Medefaidrin',
- 'Mend': 'Mende_Kikakui',
- 'Merc': 'Meroitic_Cursive',
- 'Mero': 'Meroitic_Hieroglyphs',
- 'Mlym': 'Malayalam',
- 'Modi': 'Modi',
- 'Mong': 'Mongolian',
- 'Mroo': 'Mro',
- 'Mtei': 'Meetei_Mayek',
- 'Mult': 'Multani',
- 'Mymr': 'Myanmar',
- 'Nand': 'Nandinagari',
- 'Narb': 'Old_North_Arabian',
- 'Nbat': 'Nabataean',
- 'Newa': 'Newa',
- 'Nkoo': 'Nko',
- 'Nshu': 'Nushu',
- 'Ogam': 'Ogham',
- 'Olck': 'Ol_Chiki',
- 'Orkh': 'Old_Turkic',
- 'Orya': 'Oriya',
- 'Osge': 'Osage',
- 'Osma': 'Osmanya',
- 'Ougr': 'Old_Uyghur',
- 'Palm': 'Palmyrene',
- 'Pauc': 'Pau_Cin_Hau',
- 'Perm': 'Old_Permic',
- 'Phag': 'Phags_Pa',
- 'Phli': 'Inscriptional_Pahlavi',
- 'Phlp': 'Psalter_Pahlavi',
- 'Phnx': 'Phoenician',
- 'Plrd': 'Miao',
- 'Prti': 'Inscriptional_Parthian',
- 'Rjng': 'Rejang',
- 'Rohg': 'Hanifi_Rohingya',
- 'Runr': 'Runic',
- 'Samr': 'Samaritan',
- 'Sarb': 'Old_South_Arabian',
- 'Saur': 'Saurashtra',
- 'Sgnw': 'SignWriting',
- 'Shaw': 'Shavian',
- 'Shrd': 'Sharada',
- 'Sidd': 'Siddham',
- 'Sind': 'Khudawadi',
- 'Sinh': 'Sinhala',
- 'Sogd': 'Sogdian',
- 'Sogo': 'Old_Sogdian',
- 'Sora': 'Sora_Sompeng',
- 'Soyo': 'Soyombo',
- 'Sund': 'Sundanese',
- 'Sylo': 'Syloti_Nagri',
- 'Syrc': 'Syriac',
- 'Tagb': 'Tagbanwa',
- 'Takr': 'Takri',
- 'Tale': 'Tai_Le',
- 'Talu': 'New_Tai_Lue',
- 'Taml': 'Tamil',
- 'Tang': 'Tangut',
- 'Tavt': 'Tai_Viet',
- 'Telu': 'Telugu',
- 'Tfng': 'Tifinagh',
- 'Tglg': 'Tagalog',
- 'Thaa': 'Thaana',
- 'Thai': 'Thai',
- 'Tibt': 'Tibetan',
- 'Tirh': 'Tirhuta',
- 'Tnsa': 'Tangsa',
- 'Toto': 'Toto',
- 'Ugar': 'Ugaritic',
- 'Vaii': 'Vai',
- 'Vith': 'Vithkuqi',
- 'Wara': 'Warang_Citi',
- 'Wcho': 'Wancho',
- 'Xpeo': 'Old_Persian',
- 'Xsux': 'Cuneiform',
- 'Yezi': 'Yezidi',
- 'Yiii': 'Yi',
- 'Zanb': 'Zanabazar_Square',
- 'Zinh': 'Inherited',
- 'Zyyy': 'Common',
- 'Zzzz': 'Unknown',
+ "Adlm": "Adlam",
+ "Aghb": "Caucasian_Albanian",
+ "Ahom": "Ahom",
+ "Arab": "Arabic",
+ "Armi": "Imperial_Aramaic",
+ "Armn": "Armenian",
+ "Avst": "Avestan",
+ "Bali": "Balinese",
+ "Bamu": "Bamum",
+ "Bass": "Bassa_Vah",
+ "Batk": "Batak",
+ "Beng": "Bengali",
+ "Bhks": "Bhaiksuki",
+ "Bopo": "Bopomofo",
+ "Brah": "Brahmi",
+ "Brai": "Braille",
+ "Bugi": "Buginese",
+ "Buhd": "Buhid",
+ "Cakm": "Chakma",
+ "Cans": "Canadian_Aboriginal",
+ "Cari": "Carian",
+ "Cham": "Cham",
+ "Cher": "Cherokee",
+ "Chrs": "Chorasmian",
+ "Copt": "Coptic",
+ "Cpmn": "Cypro_Minoan",
+ "Cprt": "Cypriot",
+ "Cyrl": "Cyrillic",
+ "Deva": "Devanagari",
+ "Diak": "Dives_Akuru",
+ "Dogr": "Dogra",
+ "Dsrt": "Deseret",
+ "Dupl": "Duployan",
+ "Egyp": "Egyptian_Hieroglyphs",
+ "Elba": "Elbasan",
+ "Elym": "Elymaic",
+ "Ethi": "Ethiopic",
+ "Geor": "Georgian",
+ "Glag": "Glagolitic",
+ "Gong": "Gunjala_Gondi",
+ "Gonm": "Masaram_Gondi",
+ "Goth": "Gothic",
+ "Gran": "Grantha",
+ "Grek": "Greek",
+ "Gujr": "Gujarati",
+ "Guru": "Gurmukhi",
+ "Hang": "Hangul",
+ "Hani": "Han",
+ "Hano": "Hanunoo",
+ "Hatr": "Hatran",
+ "Hebr": "Hebrew",
+ "Hira": "Hiragana",
+ "Hluw": "Anatolian_Hieroglyphs",
+ "Hmng": "Pahawh_Hmong",
+ "Hmnp": "Nyiakeng_Puachue_Hmong",
+ "Hrkt": "Katakana_Or_Hiragana",
+ "Hung": "Old_Hungarian",
+ "Ital": "Old_Italic",
+ "Java": "Javanese",
+ "Kali": "Kayah_Li",
+ "Kana": "Katakana",
+ "Kawi": "Kawi",
+ "Khar": "Kharoshthi",
+ "Khmr": "Khmer",
+ "Khoj": "Khojki",
+ "Kits": "Khitan_Small_Script",
+ "Knda": "Kannada",
+ "Kthi": "Kaithi",
+ "Lana": "Tai_Tham",
+ "Laoo": "Lao",
+ "Latn": "Latin",
+ "Lepc": "Lepcha",
+ "Limb": "Limbu",
+ "Lina": "Linear_A",
+ "Linb": "Linear_B",
+ "Lisu": "Lisu",
+ "Lyci": "Lycian",
+ "Lydi": "Lydian",
+ "Mahj": "Mahajani",
+ "Maka": "Makasar",
+ "Mand": "Mandaic",
+ "Mani": "Manichaean",
+ "Marc": "Marchen",
+ "Medf": "Medefaidrin",
+ "Mend": "Mende_Kikakui",
+ "Merc": "Meroitic_Cursive",
+ "Mero": "Meroitic_Hieroglyphs",
+ "Mlym": "Malayalam",
+ "Modi": "Modi",
+ "Mong": "Mongolian",
+ "Mroo": "Mro",
+ "Mtei": "Meetei_Mayek",
+ "Mult": "Multani",
+ "Mymr": "Myanmar",
+ "Nagm": "Nag_Mundari",
+ "Nand": "Nandinagari",
+ "Narb": "Old_North_Arabian",
+ "Nbat": "Nabataean",
+ "Newa": "Newa",
+ "Nkoo": "Nko",
+ "Nshu": "Nushu",
+ "Ogam": "Ogham",
+ "Olck": "Ol_Chiki",
+ "Orkh": "Old_Turkic",
+ "Orya": "Oriya",
+ "Osge": "Osage",
+ "Osma": "Osmanya",
+ "Ougr": "Old_Uyghur",
+ "Palm": "Palmyrene",
+ "Pauc": "Pau_Cin_Hau",
+ "Perm": "Old_Permic",
+ "Phag": "Phags_Pa",
+ "Phli": "Inscriptional_Pahlavi",
+ "Phlp": "Psalter_Pahlavi",
+ "Phnx": "Phoenician",
+ "Plrd": "Miao",
+ "Prti": "Inscriptional_Parthian",
+ "Rjng": "Rejang",
+ "Rohg": "Hanifi_Rohingya",
+ "Runr": "Runic",
+ "Samr": "Samaritan",
+ "Sarb": "Old_South_Arabian",
+ "Saur": "Saurashtra",
+ "Sgnw": "SignWriting",
+ "Shaw": "Shavian",
+ "Shrd": "Sharada",
+ "Sidd": "Siddham",
+ "Sind": "Khudawadi",
+ "Sinh": "Sinhala",
+ "Sogd": "Sogdian",
+ "Sogo": "Old_Sogdian",
+ "Sora": "Sora_Sompeng",
+ "Soyo": "Soyombo",
+ "Sund": "Sundanese",
+ "Sylo": "Syloti_Nagri",
+ "Syrc": "Syriac",
+ "Tagb": "Tagbanwa",
+ "Takr": "Takri",
+ "Tale": "Tai_Le",
+ "Talu": "New_Tai_Lue",
+ "Taml": "Tamil",
+ "Tang": "Tangut",
+ "Tavt": "Tai_Viet",
+ "Telu": "Telugu",
+ "Tfng": "Tifinagh",
+ "Tglg": "Tagalog",
+ "Thaa": "Thaana",
+ "Thai": "Thai",
+ "Tibt": "Tibetan",
+ "Tirh": "Tirhuta",
+ "Tnsa": "Tangsa",
+ "Toto": "Toto",
+ "Ugar": "Ugaritic",
+ "Vaii": "Vai",
+ "Vith": "Vithkuqi",
+ "Wara": "Warang_Citi",
+ "Wcho": "Wancho",
+ "Xpeo": "Old_Persian",
+ "Xsux": "Cuneiform",
+ "Yezi": "Yezidi",
+ "Yiii": "Yi",
+ "Zanb": "Zanabazar_Square",
+ "Zinh": "Inherited",
+ "Zyyy": "Common",
+ "Zzzz": "Unknown",
}
diff --git a/Lib/fontTools/unicodedata/__init__.py b/Lib/fontTools/unicodedata/__init__.py
index 4546ef3f..808c9c72 100644
--- a/Lib/fontTools/unicodedata/__init__.py
+++ b/Lib/fontTools/unicodedata/__init__.py
@@ -1,7 +1,11 @@
+from __future__ import annotations
+
from fontTools.misc.textTools import byteord, tostr
import re
from bisect import bisect_right
+from typing import Literal, TypeVar, overload
+
try:
# use unicodedata backport compatible with python2:
@@ -14,7 +18,7 @@ except ImportError: # pragma: no cover
from . import Blocks, Scripts, ScriptExtensions, OTTags
-__all__ = [tostr(s) for s in (
+__all__ = [
# names from built-in unicodedata module
"lookup",
"name",
@@ -39,11 +43,11 @@ __all__ = [tostr(s) for s in (
"script_horizontal_direction",
"ot_tags_from_script",
"ot_tag_to_script",
-)]
+]
def script(char):
- """ Return the four-letter script code assigned to the Unicode character
+ """Return the four-letter script code assigned to the Unicode character
'char' as string.
>>> script("a")
@@ -64,11 +68,11 @@ def script(char):
# contains the given codepoint: i.e. whose start is less than or equal
# to the codepoint. Thus, we subtract -1 from the index returned.
i = bisect_right(Scripts.RANGES, code)
- return Scripts.VALUES[i-1]
+ return Scripts.VALUES[i - 1]
def script_extension(char):
- """ Return the script extension property assigned to the Unicode character
+ """Return the script extension property assigned to the Unicode character
'char' as a set of string.
>>> script_extension("a") == {'Latn'}
@@ -80,7 +84,7 @@ def script_extension(char):
"""
code = byteord(char)
i = bisect_right(ScriptExtensions.RANGES, code)
- value = ScriptExtensions.VALUES[i-1]
+ value = ScriptExtensions.VALUES[i - 1]
if value is None:
# code points not explicitly listed for Script Extensions
# have as their value the corresponding Script property value
@@ -89,7 +93,7 @@ def script_extension(char):
def script_name(code, default=KeyError):
- """ Return the long, human-readable script name given a four-letter
+ """Return the long, human-readable script name given a four-letter
Unicode script code.
If no matching name is found, a KeyError is raised by default.
@@ -113,8 +117,7 @@ def _normalize_property_name(string):
return _normalize_re.sub("", string).lower()
-_SCRIPT_CODES = {_normalize_property_name(v): k
- for k, v in Scripts.NAMES.items()}
+_SCRIPT_CODES = {_normalize_property_name(v): k for k, v in Scripts.NAMES.items()}
def script_code(script_name, default=KeyError):
@@ -140,86 +143,90 @@ def script_code(script_name, default=KeyError):
# https://docs.google.com/spreadsheets/d/1Y90M0Ie3MUJ6UVCRDOypOtijlMDLNNyyLk36T6iMu0o
RTL_SCRIPTS = {
# Unicode-1.1 additions
- 'Arab', # Arabic
- 'Hebr', # Hebrew
-
+ "Arab", # Arabic
+ "Hebr", # Hebrew
# Unicode-3.0 additions
- 'Syrc', # Syriac
- 'Thaa', # Thaana
-
+ "Syrc", # Syriac
+ "Thaa", # Thaana
# Unicode-4.0 additions
- 'Cprt', # Cypriot
-
+ "Cprt", # Cypriot
# Unicode-4.1 additions
- 'Khar', # Kharoshthi
-
+ "Khar", # Kharoshthi
# Unicode-5.0 additions
- 'Phnx', # Phoenician
- 'Nkoo', # Nko
-
+ "Phnx", # Phoenician
+ "Nkoo", # Nko
# Unicode-5.1 additions
- 'Lydi', # Lydian
-
+ "Lydi", # Lydian
# Unicode-5.2 additions
- 'Avst', # Avestan
- 'Armi', # Imperial Aramaic
- 'Phli', # Inscriptional Pahlavi
- 'Prti', # Inscriptional Parthian
- 'Sarb', # Old South Arabian
- 'Orkh', # Old Turkic
- 'Samr', # Samaritan
-
+ "Avst", # Avestan
+ "Armi", # Imperial Aramaic
+ "Phli", # Inscriptional Pahlavi
+ "Prti", # Inscriptional Parthian
+ "Sarb", # Old South Arabian
+ "Orkh", # Old Turkic
+ "Samr", # Samaritan
# Unicode-6.0 additions
- 'Mand', # Mandaic
-
+ "Mand", # Mandaic
# Unicode-6.1 additions
- 'Merc', # Meroitic Cursive
- 'Mero', # Meroitic Hieroglyphs
-
+ "Merc", # Meroitic Cursive
+ "Mero", # Meroitic Hieroglyphs
# Unicode-7.0 additions
- 'Mani', # Manichaean
- 'Mend', # Mende Kikakui
- 'Nbat', # Nabataean
- 'Narb', # Old North Arabian
- 'Palm', # Palmyrene
- 'Phlp', # Psalter Pahlavi
-
+ "Mani", # Manichaean
+ "Mend", # Mende Kikakui
+ "Nbat", # Nabataean
+ "Narb", # Old North Arabian
+ "Palm", # Palmyrene
+ "Phlp", # Psalter Pahlavi
# Unicode-8.0 additions
- 'Hatr', # Hatran
- 'Hung', # Old Hungarian
-
+ "Hatr", # Hatran
+ "Hung", # Old Hungarian
# Unicode-9.0 additions
- 'Adlm', # Adlam
-
+ "Adlm", # Adlam
# Unicode-11.0 additions
- 'Rohg', # Hanifi Rohingya
- 'Sogo', # Old Sogdian
- 'Sogd', # Sogdian
-
+ "Rohg", # Hanifi Rohingya
+ "Sogo", # Old Sogdian
+ "Sogd", # Sogdian
# Unicode-12.0 additions
- 'Elym', # Elymaic
-
+ "Elym", # Elymaic
# Unicode-13.0 additions
- 'Chrs', # Chorasmian
- 'Yezi', # Yezidi
-
+ "Chrs", # Chorasmian
+ "Yezi", # Yezidi
# Unicode-14.0 additions
- 'Ougr', # Old Uyghur
+ "Ougr", # Old Uyghur
}
-def script_horizontal_direction(script_code, default=KeyError):
- """ Return "RTL" for scripts that contain right-to-left characters
+
+HorizDirection = Literal["RTL", "LTR"]
+T = TypeVar("T")
+
+
+@overload
+def script_horizontal_direction(script_code: str, default: T) -> HorizDirection | T:
+ ...
+
+
+@overload
+def script_horizontal_direction(
+ script_code: str, default: type[KeyError] = KeyError
+) -> HorizDirection:
+ ...
+
+
+def script_horizontal_direction(
+ script_code: str, default: T | type[KeyError] = KeyError
+) -> HorizDirection | T:
+ """Return "RTL" for scripts that contain right-to-left characters
according to the Bidi_Class property. Otherwise return "LTR".
"""
if script_code not in Scripts.NAMES:
if isinstance(default, type) and issubclass(default, KeyError):
raise default(script_code)
return default
- return str("RTL") if script_code in RTL_SCRIPTS else str("LTR")
+ return "RTL" if script_code in RTL_SCRIPTS else "LTR"
def block(char):
- """ Return the block property assigned to the Unicode character 'char'
+ """Return the block property assigned to the Unicode character 'char'
as a string.
>>> block("a")
@@ -231,23 +238,21 @@ def block(char):
"""
code = byteord(char)
i = bisect_right(Blocks.RANGES, code)
- return Blocks.VALUES[i-1]
+ return Blocks.VALUES[i - 1]
def ot_tags_from_script(script_code):
- """ Return a list of OpenType script tags associated with a given
+ """Return a list of OpenType script tags associated with a given
Unicode script code.
Return ['DFLT'] script tag for invalid/unknown script codes.
"""
+ if script_code in OTTags.SCRIPT_EXCEPTIONS:
+ return [OTTags.SCRIPT_EXCEPTIONS[script_code]]
+
if script_code not in Scripts.NAMES:
return [OTTags.DEFAULT_SCRIPT]
- script_tags = [
- OTTags.SCRIPT_EXCEPTIONS.get(
- script_code,
- script_code[0].lower() + script_code[1:]
- )
- ]
+ script_tags = [script_code[0].lower() + script_code[1:]]
if script_code in OTTags.NEW_SCRIPT_TAGS:
script_tags.extend(OTTags.NEW_SCRIPT_TAGS[script_code])
script_tags.reverse() # last in, first out
@@ -256,7 +261,7 @@ def ot_tags_from_script(script_code):
def ot_tag_to_script(tag):
- """ Return the Unicode script code for the given OpenType script tag, or
+ """Return the Unicode script code for the given OpenType script tag, or
None for "DFLT" tag or if there is no Unicode script associated with it.
Raises ValueError if the tag is invalid.
"""
@@ -278,6 +283,9 @@ def ot_tag_to_script(tag):
if tag in OTTags.NEW_SCRIPT_TAGS_REVERSED:
return OTTags.NEW_SCRIPT_TAGS_REVERSED[tag]
+ if tag in OTTags.SCRIPT_EXCEPTIONS_REVERSED:
+ return OTTags.SCRIPT_EXCEPTIONS_REVERSED[tag]
+
# This side of the conversion is fully algorithmic
# Any spaces at the end of the tag are replaced by repeating the last
@@ -285,7 +293,7 @@ def ot_tag_to_script(tag):
# Change first char to uppercase
script_code = tag[0].upper() + tag[1]
for i in range(2, 4):
- script_code += (script_code[i-1] if tag[i] == " " else tag[i])
+ script_code += script_code[i - 1] if tag[i] == " " else tag[i]
if script_code not in Scripts.NAMES:
return None
diff --git a/Lib/fontTools/varLib/__init__.py b/Lib/fontTools/varLib/__init__.py
index f1ca99ff..b130d5b2 100644
--- a/Lib/fontTools/varLib/__init__.py
+++ b/Lib/fontTools/varLib/__init__.py
@@ -21,10 +21,11 @@ API *will* change in near future.
from typing import List
from fontTools.misc.vector import Vector
from fontTools.misc.roundTools import noRound, otRound
+from fontTools.misc.fixedTools import floatToFixed as fl2fi
from fontTools.misc.textTools import Tag, tostr
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables._f_v_a_r import Axis, NamedInstance
-from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
+from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates, dropImpliedOnCurvePoints
from fontTools.ttLib.tables.ttProgram import Program
from fontTools.ttLib.tables.TupleVariation import TupleVariation
from fontTools.ttLib.tables import otTables as ot
@@ -40,11 +41,12 @@ from fontTools.varLib.stat import buildVFStatTable
from fontTools.colorLib.builder import buildColrV1
from fontTools.colorLib.unbuilder import unbuildColrV1
from functools import partial
-from collections import OrderedDict, namedtuple
+from collections import OrderedDict, defaultdict, namedtuple
import os.path
import logging
from copy import deepcopy
from pprint import pformat
+from re import fullmatch
from .errors import VarLibError, VarLibValidationError
log = logging.getLogger("fontTools.varLib")
@@ -58,1121 +60,1393 @@ FEAVAR_FEATURETAG_LIB_KEY = "com.github.fonttools.varLib.featureVarsFeatureTag"
# Creation routines
#
+
def _add_fvar(font, axes, instances: List[InstanceDescriptor]):
- """
- Add 'fvar' table to font.
-
- axes is an ordered dictionary of DesignspaceAxis objects.
-
- instances is list of dictionary objects with 'location', 'stylename',
- and possibly 'postscriptfontname' entries.
- """
-
- assert axes
- assert isinstance(axes, OrderedDict)
-
- log.info("Generating fvar")
-
- fvar = newTable('fvar')
- nameTable = font['name']
-
- for a in axes.values():
- axis = Axis()
- axis.axisTag = Tag(a.tag)
- # TODO Skip axes that have no variation.
- axis.minValue, axis.defaultValue, axis.maxValue = a.minimum, a.default, a.maximum
- axis.axisNameID = nameTable.addMultilingualName(a.labelNames, font, minNameID=256)
- axis.flags = int(a.hidden)
- fvar.axes.append(axis)
-
- for instance in instances:
- # Filter out discrete axis locations
- coordinates = {name: value for name, value in instance.location.items() if name in axes}
-
- if "en" not in instance.localisedStyleName:
- if not instance.styleName:
- raise VarLibValidationError(
- f"Instance at location '{coordinates}' must have a default English "
- "style name ('stylename' attribute on the instance element or a "
- "stylename element with an 'xml:lang=\"en\"' attribute)."
- )
- localisedStyleName = dict(instance.localisedStyleName)
- localisedStyleName["en"] = tostr(instance.styleName)
- else:
- localisedStyleName = instance.localisedStyleName
-
- psname = instance.postScriptFontName
-
- inst = NamedInstance()
- inst.subfamilyNameID = nameTable.addMultilingualName(localisedStyleName)
- if psname is not None:
- psname = tostr(psname)
- inst.postscriptNameID = nameTable.addName(psname)
- inst.coordinates = {axes[k].tag:axes[k].map_backward(v) for k,v in coordinates.items()}
- #inst.coordinates = {axes[k].tag:v for k,v in coordinates.items()}
- fvar.instances.append(inst)
-
- assert "fvar" not in font
- font['fvar'] = fvar
-
- return fvar
-
-def _add_avar(font, axes):
- """
- Add 'avar' table to font.
-
- axes is an ordered dictionary of AxisDescriptor objects.
- """
-
- assert axes
- assert isinstance(axes, OrderedDict)
-
- log.info("Generating avar")
-
- avar = newTable('avar')
-
- interesting = False
- for axis in axes.values():
- # Currently, some rasterizers require that the default value maps
- # (-1 to -1, 0 to 0, and 1 to 1) be present for all the segment
- # maps, even when the default normalization mapping for the axis
- # was not modified.
- # https://github.com/googlei18n/fontmake/issues/295
- # https://github.com/fonttools/fonttools/issues/1011
- # TODO(anthrotype) revert this (and 19c4b37) when issue is fixed
- curve = avar.segments[axis.tag] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}
- if not axis.map:
- continue
-
- items = sorted(axis.map)
- keys = [item[0] for item in items]
- vals = [item[1] for item in items]
-
- # Current avar requirements. We don't have to enforce
- # these on the designer and can deduce some ourselves,
- # but for now just enforce them.
- if axis.minimum != min(keys):
- raise VarLibValidationError(
- f"Axis '{axis.name}': there must be a mapping for the axis minimum "
- f"value {axis.minimum} and it must be the lowest input mapping value."
- )
- if axis.maximum != max(keys):
- raise VarLibValidationError(
- f"Axis '{axis.name}': there must be a mapping for the axis maximum "
- f"value {axis.maximum} and it must be the highest input mapping value."
- )
- if axis.default not in keys:
- raise VarLibValidationError(
- f"Axis '{axis.name}': there must be a mapping for the axis default "
- f"value {axis.default}."
- )
- # No duplicate input values (output values can be >= their preceeding value).
- if len(set(keys)) != len(keys):
- raise VarLibValidationError(
- f"Axis '{axis.name}': All axis mapping input='...' values must be "
- "unique, but we found duplicates."
- )
- # Ascending values
- if sorted(vals) != vals:
- raise VarLibValidationError(
- f"Axis '{axis.name}': mapping output values must be in ascending order."
- )
-
- keys_triple = (axis.minimum, axis.default, axis.maximum)
- vals_triple = tuple(axis.map_forward(v) for v in keys_triple)
-
- keys = [models.normalizeValue(v, keys_triple) for v in keys]
- vals = [models.normalizeValue(v, vals_triple) for v in vals]
-
- if all(k == v for k, v in zip(keys, vals)):
- continue
- interesting = True
-
- curve.update(zip(keys, vals))
-
- assert 0.0 in curve and curve[0.0] == 0.0
- assert -1.0 not in curve or curve[-1.0] == -1.0
- assert +1.0 not in curve or curve[+1.0] == +1.0
- # curve.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0})
-
- assert "avar" not in font
- if not interesting:
- log.info("No need for avar")
- avar = None
- else:
- font['avar'] = avar
-
- return avar
+ """
+ Add 'fvar' table to font.
+
+ axes is an ordered dictionary of DesignspaceAxis objects.
+
+ instances is list of dictionary objects with 'location', 'stylename',
+ and possibly 'postscriptfontname' entries.
+ """
+
+ assert axes
+ assert isinstance(axes, OrderedDict)
+
+ log.info("Generating fvar")
+
+ fvar = newTable("fvar")
+ nameTable = font["name"]
+
+ for a in axes.values():
+ axis = Axis()
+ axis.axisTag = Tag(a.tag)
+ # TODO Skip axes that have no variation.
+ axis.minValue, axis.defaultValue, axis.maxValue = (
+ a.minimum,
+ a.default,
+ a.maximum,
+ )
+ axis.axisNameID = nameTable.addMultilingualName(
+ a.labelNames, font, minNameID=256
+ )
+ axis.flags = int(a.hidden)
+ fvar.axes.append(axis)
+
+ for instance in instances:
+ # Filter out discrete axis locations
+ coordinates = {
+ name: value for name, value in instance.location.items() if name in axes
+ }
+
+ if "en" not in instance.localisedStyleName:
+ if not instance.styleName:
+ raise VarLibValidationError(
+ f"Instance at location '{coordinates}' must have a default English "
+ "style name ('stylename' attribute on the instance element or a "
+ "stylename element with an 'xml:lang=\"en\"' attribute)."
+ )
+ localisedStyleName = dict(instance.localisedStyleName)
+ localisedStyleName["en"] = tostr(instance.styleName)
+ else:
+ localisedStyleName = instance.localisedStyleName
+
+ psname = instance.postScriptFontName
+
+ inst = NamedInstance()
+ inst.subfamilyNameID = nameTable.addMultilingualName(localisedStyleName)
+ if psname is not None:
+ psname = tostr(psname)
+ inst.postscriptNameID = nameTable.addName(psname)
+ inst.coordinates = {
+ axes[k].tag: axes[k].map_backward(v) for k, v in coordinates.items()
+ }
+ # inst.coordinates = {axes[k].tag:v for k,v in coordinates.items()}
+ fvar.instances.append(inst)
+
+ assert "fvar" not in font
+ font["fvar"] = fvar
+
+ return fvar
+
+
+def _add_avar(font, axes, mappings, axisTags):
+ """
+ Add 'avar' table to font.
+
+ axes is an ordered dictionary of AxisDescriptor objects.
+ """
+
+ assert axes
+ assert isinstance(axes, OrderedDict)
+
+ log.info("Generating avar")
+
+ avar = newTable("avar")
+
+ interesting = False
+ vals_triples = {}
+ for axis in axes.values():
+ # Currently, some rasterizers require that the default value maps
+ # (-1 to -1, 0 to 0, and 1 to 1) be present for all the segment
+ # maps, even when the default normalization mapping for the axis
+ # was not modified.
+ # https://github.com/googlei18n/fontmake/issues/295
+ # https://github.com/fonttools/fonttools/issues/1011
+ # TODO(anthrotype) revert this (and 19c4b37) when issue is fixed
+ curve = avar.segments[axis.tag] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}
+
+ keys_triple = (axis.minimum, axis.default, axis.maximum)
+ vals_triple = tuple(axis.map_forward(v) for v in keys_triple)
+ vals_triples[axis.tag] = vals_triple
+
+ if not axis.map:
+ continue
+
+ items = sorted(axis.map)
+ keys = [item[0] for item in items]
+ vals = [item[1] for item in items]
+
+ # Current avar requirements. We don't have to enforce
+ # these on the designer and can deduce some ourselves,
+ # but for now just enforce them.
+ if axis.minimum != min(keys):
+ raise VarLibValidationError(
+ f"Axis '{axis.name}': there must be a mapping for the axis minimum "
+ f"value {axis.minimum} and it must be the lowest input mapping value."
+ )
+ if axis.maximum != max(keys):
+ raise VarLibValidationError(
+ f"Axis '{axis.name}': there must be a mapping for the axis maximum "
+ f"value {axis.maximum} and it must be the highest input mapping value."
+ )
+ if axis.default not in keys:
+ raise VarLibValidationError(
+ f"Axis '{axis.name}': there must be a mapping for the axis default "
+ f"value {axis.default}."
+ )
+ # No duplicate input values (output values can be >= their preceeding value).
+ if len(set(keys)) != len(keys):
+ raise VarLibValidationError(
+ f"Axis '{axis.name}': All axis mapping input='...' values must be "
+ "unique, but we found duplicates."
+ )
+ # Ascending values
+ if sorted(vals) != vals:
+ raise VarLibValidationError(
+ f"Axis '{axis.name}': mapping output values must be in ascending order."
+ )
+
+ keys = [models.normalizeValue(v, keys_triple) for v in keys]
+ vals = [models.normalizeValue(v, vals_triple) for v in vals]
+
+ if all(k == v for k, v in zip(keys, vals)):
+ continue
+ interesting = True
+
+ curve.update(zip(keys, vals))
+
+ assert 0.0 in curve and curve[0.0] == 0.0
+ assert -1.0 not in curve or curve[-1.0] == -1.0
+ assert +1.0 not in curve or curve[+1.0] == +1.0
+ # curve.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0})
+
+ if mappings:
+ interesting = True
+
+ hiddenAxes = [axis for axis in axes.values() if axis.hidden]
+
+ inputLocations = [
+ {
+ axes[name].tag: models.normalizeValue(v, vals_triples[axes[name].tag])
+ for name, v in mapping.inputLocation.items()
+ }
+ for mapping in mappings
+ ]
+ outputLocations = [
+ {
+ axes[name].tag: models.normalizeValue(v, vals_triples[axes[name].tag])
+ for name, v in mapping.outputLocation.items()
+ }
+ for mapping in mappings
+ ]
+ assert len(inputLocations) == len(outputLocations)
+
+ # If base-master is missing, insert it at zero location.
+ if not any(all(v == 0 for k, v in loc.items()) for loc in inputLocations):
+ inputLocations.insert(0, {})
+ outputLocations.insert(0, {})
+
+ model = models.VariationModel(inputLocations, axisTags)
+ storeBuilder = varStore.OnlineVarStoreBuilder(axisTags)
+ storeBuilder.setModel(model)
+ varIdxes = {}
+ for tag in axisTags:
+ masterValues = []
+ for vo, vi in zip(outputLocations, inputLocations):
+ if tag not in vo:
+ masterValues.append(0)
+ continue
+ v = vo[tag] - vi.get(tag, 0)
+ masterValues.append(fl2fi(v, 14))
+ varIdxes[tag] = storeBuilder.storeMasters(masterValues)[1]
+
+ store = storeBuilder.finish()
+ optimized = store.optimize()
+ varIdxes = {axis: optimized[value] for axis, value in varIdxes.items()}
+
+ varIdxMap = builder.buildDeltaSetIndexMap(varIdxes[t] for t in axisTags)
+
+ avar.majorVersion = 2
+ avar.table = ot.avar()
+ avar.table.VarIdxMap = varIdxMap
+ avar.table.VarStore = store
+
+ assert "avar" not in font
+ if not interesting:
+ log.info("No need for avar")
+ avar = None
+ else:
+ font["avar"] = avar
+
+ return avar
+
def _add_stat(font):
- # Note: this function only gets called by old code that calls `build()`
- # directly. Newer code that wants to benefit from STAT data from the
- # designspace should call `build_many()`
+ # Note: this function only gets called by old code that calls `build()`
+ # directly. Newer code that wants to benefit from STAT data from the
+ # designspace should call `build_many()`
+
+ if "STAT" in font:
+ return
+
+ from ..otlLib.builder import buildStatTable
- if "STAT" in font:
- return
+ fvarTable = font["fvar"]
+ axes = [dict(tag=a.axisTag, name=a.axisNameID) for a in fvarTable.axes]
+ buildStatTable(font, axes)
- from ..otlLib.builder import buildStatTable
- fvarTable = font['fvar']
- axes = [dict(tag=a.axisTag, name=a.axisNameID) for a in fvarTable.axes]
- buildStatTable(font, axes)
-_MasterData = namedtuple('_MasterData', ['glyf', 'hMetrics', 'vMetrics'])
+_MasterData = namedtuple("_MasterData", ["glyf", "hMetrics", "vMetrics"])
+
def _add_gvar(font, masterModel, master_ttfs, tolerance=0.5, optimize=True):
- if tolerance < 0:
- raise ValueError("`tolerance` must be a positive number.")
-
- log.info("Generating gvar")
- assert "gvar" not in font
- gvar = font["gvar"] = newTable('gvar')
- glyf = font['glyf']
- defaultMasterIndex = masterModel.reverseMapping[0]
-
- master_datas = [_MasterData(m['glyf'],
- m['hmtx'].metrics,
- getattr(m.get('vmtx'), 'metrics', None))
- for m in master_ttfs]
-
- for glyph in font.getGlyphOrder():
- log.debug("building gvar for glyph '%s'", glyph)
- isComposite = glyf[glyph].isComposite()
-
- allData = [
- m.glyf._getCoordinatesAndControls(glyph, m.hMetrics, m.vMetrics)
- for m in master_datas
- ]
-
- if allData[defaultMasterIndex][1].numberOfContours != 0:
- # If the default master is not empty, interpret empty non-default masters
- # as missing glyphs from a sparse master
- allData = [
- d if d is not None and d[1].numberOfContours != 0 else None
- for d in allData
- ]
-
- model, allData = masterModel.getSubModel(allData)
-
- allCoords = [d[0] for d in allData]
- allControls = [d[1] for d in allData]
- control = allControls[0]
- if not models.allEqual(allControls):
- log.warning("glyph %s has incompatible masters; skipping" % glyph)
- continue
- del allControls
-
- # Update gvar
- gvar.variations[glyph] = []
- deltas = model.getDeltas(allCoords, round=partial(GlyphCoordinates.__round__, round=round))
- supports = model.supports
- assert len(deltas) == len(supports)
-
- # Prepare for IUP optimization
- origCoords = deltas[0]
- endPts = control.endPts
-
- for i,(delta,support) in enumerate(zip(deltas[1:], supports[1:])):
- if all(v == 0 for v in delta.array) and not isComposite:
- continue
- var = TupleVariation(support, delta)
- if optimize:
- delta_opt = iup_delta_optimize(delta, origCoords, endPts, tolerance=tolerance)
-
- if None in delta_opt:
- """In composite glyphs, there should be one 0 entry
- to make sure the gvar entry is written to the font.
-
- This is to work around an issue with macOS 10.14 and can be
- removed once the behaviour of macOS is changed.
-
- https://github.com/fonttools/fonttools/issues/1381
- """
- if all(d is None for d in delta_opt):
- delta_opt = [(0, 0)] + [None] * (len(delta_opt) - 1)
- # Use "optimized" version only if smaller...
- var_opt = TupleVariation(support, delta_opt)
-
- axis_tags = sorted(support.keys()) # Shouldn't matter that this is different from fvar...?
- tupleData, auxData = var.compile(axis_tags)
- unoptimized_len = len(tupleData) + len(auxData)
- tupleData, auxData = var_opt.compile(axis_tags)
- optimized_len = len(tupleData) + len(auxData)
-
- if optimized_len < unoptimized_len:
- var = var_opt
-
- gvar.variations[glyph].append(var)
+ if tolerance < 0:
+ raise ValueError("`tolerance` must be a positive number.")
+
+ log.info("Generating gvar")
+ assert "gvar" not in font
+ gvar = font["gvar"] = newTable("gvar")
+ glyf = font["glyf"]
+ defaultMasterIndex = masterModel.reverseMapping[0]
+
+ master_datas = [
+ _MasterData(
+ m["glyf"], m["hmtx"].metrics, getattr(m.get("vmtx"), "metrics", None)
+ )
+ for m in master_ttfs
+ ]
+
+ for glyph in font.getGlyphOrder():
+ log.debug("building gvar for glyph '%s'", glyph)
+ isComposite = glyf[glyph].isComposite()
+
+ allData = [
+ m.glyf._getCoordinatesAndControls(glyph, m.hMetrics, m.vMetrics)
+ for m in master_datas
+ ]
+
+ if allData[defaultMasterIndex][1].numberOfContours != 0:
+ # If the default master is not empty, interpret empty non-default masters
+ # as missing glyphs from a sparse master
+ allData = [
+ d if d is not None and d[1].numberOfContours != 0 else None
+ for d in allData
+ ]
+
+ model, allData = masterModel.getSubModel(allData)
+
+ allCoords = [d[0] for d in allData]
+ allControls = [d[1] for d in allData]
+ control = allControls[0]
+ if not models.allEqual(allControls):
+ log.warning("glyph %s has incompatible masters; skipping" % glyph)
+ continue
+ del allControls
+
+ # Update gvar
+ gvar.variations[glyph] = []
+ deltas = model.getDeltas(
+ allCoords, round=partial(GlyphCoordinates.__round__, round=round)
+ )
+ supports = model.supports
+ assert len(deltas) == len(supports)
+
+ # Prepare for IUP optimization
+ origCoords = deltas[0]
+ endPts = control.endPts
+
+ for i, (delta, support) in enumerate(zip(deltas[1:], supports[1:])):
+ if all(v == 0 for v in delta.array) and not isComposite:
+ continue
+ var = TupleVariation(support, delta)
+ if optimize:
+ delta_opt = iup_delta_optimize(
+ delta, origCoords, endPts, tolerance=tolerance
+ )
+
+ if None in delta_opt:
+ """In composite glyphs, there should be one 0 entry
+ to make sure the gvar entry is written to the font.
+
+ This is to work around an issue with macOS 10.14 and can be
+ removed once the behaviour of macOS is changed.
+
+ https://github.com/fonttools/fonttools/issues/1381
+ """
+ if all(d is None for d in delta_opt):
+ delta_opt = [(0, 0)] + [None] * (len(delta_opt) - 1)
+ # Use "optimized" version only if smaller...
+ var_opt = TupleVariation(support, delta_opt)
+
+ axis_tags = sorted(
+ support.keys()
+ ) # Shouldn't matter that this is different from fvar...?
+ tupleData, auxData = var.compile(axis_tags)
+ unoptimized_len = len(tupleData) + len(auxData)
+ tupleData, auxData = var_opt.compile(axis_tags)
+ optimized_len = len(tupleData) + len(auxData)
+
+ if optimized_len < unoptimized_len:
+ var = var_opt
+
+ gvar.variations[glyph].append(var)
def _remove_TTHinting(font):
- for tag in ("cvar", "cvt ", "fpgm", "prep"):
- if tag in font:
- del font[tag]
- maxp = font['maxp']
- for attr in ("maxTwilightPoints", "maxStorage", "maxFunctionDefs", "maxInstructionDefs", "maxStackElements", "maxSizeOfInstructions"):
- setattr(maxp, attr, 0)
- maxp.maxZones = 1
- font["glyf"].removeHinting()
- # TODO: Modify gasp table to deactivate gridfitting for all ranges?
+ for tag in ("cvar", "cvt ", "fpgm", "prep"):
+ if tag in font:
+ del font[tag]
+ maxp = font["maxp"]
+ for attr in (
+ "maxTwilightPoints",
+ "maxStorage",
+ "maxFunctionDefs",
+ "maxInstructionDefs",
+ "maxStackElements",
+ "maxSizeOfInstructions",
+ ):
+ setattr(maxp, attr, 0)
+ maxp.maxZones = 1
+ font["glyf"].removeHinting()
+ # TODO: Modify gasp table to deactivate gridfitting for all ranges?
+
def _merge_TTHinting(font, masterModel, master_ttfs):
+ log.info("Merging TT hinting")
+ assert "cvar" not in font
+
+ # Check that the existing hinting is compatible
+
+ # fpgm and prep table
+
+ for tag in ("fpgm", "prep"):
+ all_pgms = [m[tag].program for m in master_ttfs if tag in m]
+ if not all_pgms:
+ continue
+ font_pgm = getattr(font.get(tag), "program", None)
+ if any(pgm != font_pgm for pgm in all_pgms):
+ log.warning(
+ "Masters have incompatible %s tables, hinting is discarded." % tag
+ )
+ _remove_TTHinting(font)
+ return
+
+ # glyf table
+
+ font_glyf = font["glyf"]
+ master_glyfs = [m["glyf"] for m in master_ttfs]
+ for name, glyph in font_glyf.glyphs.items():
+ all_pgms = [getattr(glyf.get(name), "program", None) for glyf in master_glyfs]
+ if not any(all_pgms):
+ continue
+ glyph.expand(font_glyf)
+ font_pgm = getattr(glyph, "program", None)
+ if any(pgm != font_pgm for pgm in all_pgms if pgm):
+ log.warning(
+ "Masters have incompatible glyph programs in glyph '%s', hinting is discarded."
+ % name
+ )
+ # TODO Only drop hinting from this glyph.
+ _remove_TTHinting(font)
+ return
+
+ # cvt table
+
+ all_cvs = [Vector(m["cvt "].values) if "cvt " in m else None for m in master_ttfs]
+
+ nonNone_cvs = models.nonNone(all_cvs)
+ if not nonNone_cvs:
+ # There is no cvt table to make a cvar table from, we're done here.
+ return
+
+ if not models.allEqual(len(c) for c in nonNone_cvs):
+ log.warning("Masters have incompatible cvt tables, hinting is discarded.")
+ _remove_TTHinting(font)
+ return
+
+ variations = []
+ deltas, supports = masterModel.getDeltasAndSupports(
+ all_cvs, round=round
+ ) # builtin round calls into Vector.__round__, which uses builtin round as we like
+ for i, (delta, support) in enumerate(zip(deltas[1:], supports[1:])):
+ if all(v == 0 for v in delta):
+ continue
+ var = TupleVariation(support, delta)
+ variations.append(var)
+
+ # We can build the cvar table now.
+ if variations:
+ cvar = font["cvar"] = newTable("cvar")
+ cvar.version = 1
+ cvar.variations = variations
+
+
+_MetricsFields = namedtuple(
+ "_MetricsFields",
+ ["tableTag", "metricsTag", "sb1", "sb2", "advMapping", "vOrigMapping"],
+)
+
+HVAR_FIELDS = _MetricsFields(
+ tableTag="HVAR",
+ metricsTag="hmtx",
+ sb1="LsbMap",
+ sb2="RsbMap",
+ advMapping="AdvWidthMap",
+ vOrigMapping=None,
+)
+
+VVAR_FIELDS = _MetricsFields(
+ tableTag="VVAR",
+ metricsTag="vmtx",
+ sb1="TsbMap",
+ sb2="BsbMap",
+ advMapping="AdvHeightMap",
+ vOrigMapping="VOrgMap",
+)
- log.info("Merging TT hinting")
- assert "cvar" not in font
-
- # Check that the existing hinting is compatible
-
- # fpgm and prep table
-
- for tag in ("fpgm", "prep"):
- all_pgms = [m[tag].program for m in master_ttfs if tag in m]
- if not all_pgms:
- continue
- font_pgm = getattr(font.get(tag), 'program', None)
- if any(pgm != font_pgm for pgm in all_pgms):
- log.warning("Masters have incompatible %s tables, hinting is discarded." % tag)
- _remove_TTHinting(font)
- return
-
- # glyf table
-
- font_glyf = font['glyf']
- master_glyfs = [m['glyf'] for m in master_ttfs]
- for name, glyph in font_glyf.glyphs.items():
- all_pgms = [
- getattr(glyf.get(name), 'program', None)
- for glyf in master_glyfs
- ]
- if not any(all_pgms):
- continue
- glyph.expand(font_glyf)
- font_pgm = getattr(glyph, 'program', None)
- if any(pgm != font_pgm for pgm in all_pgms if pgm):
- log.warning("Masters have incompatible glyph programs in glyph '%s', hinting is discarded." % name)
- # TODO Only drop hinting from this glyph.
- _remove_TTHinting(font)
- return
-
- # cvt table
-
- all_cvs = [Vector(m["cvt "].values) if 'cvt ' in m else None
- for m in master_ttfs]
-
- nonNone_cvs = models.nonNone(all_cvs)
- if not nonNone_cvs:
- # There is no cvt table to make a cvar table from, we're done here.
- return
-
- if not models.allEqual(len(c) for c in nonNone_cvs):
- log.warning("Masters have incompatible cvt tables, hinting is discarded.")
- _remove_TTHinting(font)
- return
-
- variations = []
- deltas, supports = masterModel.getDeltasAndSupports(all_cvs, round=round) # builtin round calls into Vector.__round__, which uses builtin round as we like
- for i,(delta,support) in enumerate(zip(deltas[1:], supports[1:])):
- if all(v == 0 for v in delta):
- continue
- var = TupleVariation(support, delta)
- variations.append(var)
-
- # We can build the cvar table now.
- if variations:
- cvar = font["cvar"] = newTable('cvar')
- cvar.version = 1
- cvar.variations = variations
-
-
-_MetricsFields = namedtuple('_MetricsFields',
- ['tableTag', 'metricsTag', 'sb1', 'sb2', 'advMapping', 'vOrigMapping'])
-
-HVAR_FIELDS = _MetricsFields(tableTag='HVAR', metricsTag='hmtx', sb1='LsbMap',
- sb2='RsbMap', advMapping='AdvWidthMap', vOrigMapping=None)
-
-VVAR_FIELDS = _MetricsFields(tableTag='VVAR', metricsTag='vmtx', sb1='TsbMap',
- sb2='BsbMap', advMapping='AdvHeightMap', vOrigMapping='VOrgMap')
def _add_HVAR(font, masterModel, master_ttfs, axisTags):
- _add_VHVAR(font, masterModel, master_ttfs, axisTags, HVAR_FIELDS)
+ _add_VHVAR(font, masterModel, master_ttfs, axisTags, HVAR_FIELDS)
+
def _add_VVAR(font, masterModel, master_ttfs, axisTags):
- _add_VHVAR(font, masterModel, master_ttfs, axisTags, VVAR_FIELDS)
+ _add_VHVAR(font, masterModel, master_ttfs, axisTags, VVAR_FIELDS)
+
def _add_VHVAR(font, masterModel, master_ttfs, axisTags, tableFields):
+ tableTag = tableFields.tableTag
+ assert tableTag not in font
+ log.info("Generating " + tableTag)
+ VHVAR = newTable(tableTag)
+ tableClass = getattr(ot, tableTag)
+ vhvar = VHVAR.table = tableClass()
+ vhvar.Version = 0x00010000
+
+ glyphOrder = font.getGlyphOrder()
+
+ # Build list of source font advance widths for each glyph
+ metricsTag = tableFields.metricsTag
+ advMetricses = [m[metricsTag].metrics for m in master_ttfs]
+
+ # Build list of source font vertical origin coords for each glyph
+ if tableTag == "VVAR" and "VORG" in master_ttfs[0]:
+ vOrigMetricses = [m["VORG"].VOriginRecords for m in master_ttfs]
+ defaultYOrigs = [m["VORG"].defaultVertOriginY for m in master_ttfs]
+ vOrigMetricses = list(zip(vOrigMetricses, defaultYOrigs))
+ else:
+ vOrigMetricses = None
+
+ metricsStore, advanceMapping, vOrigMapping = _get_advance_metrics(
+ font,
+ masterModel,
+ master_ttfs,
+ axisTags,
+ glyphOrder,
+ advMetricses,
+ vOrigMetricses,
+ )
+
+ vhvar.VarStore = metricsStore
+ if advanceMapping is None:
+ setattr(vhvar, tableFields.advMapping, None)
+ else:
+ setattr(vhvar, tableFields.advMapping, advanceMapping)
+ if vOrigMapping is not None:
+ setattr(vhvar, tableFields.vOrigMapping, vOrigMapping)
+ setattr(vhvar, tableFields.sb1, None)
+ setattr(vhvar, tableFields.sb2, None)
+
+ font[tableTag] = VHVAR
+ return
+
+
+def _get_advance_metrics(
+ font,
+ masterModel,
+ master_ttfs,
+ axisTags,
+ glyphOrder,
+ advMetricses,
+ vOrigMetricses=None,
+):
+ vhAdvanceDeltasAndSupports = {}
+ vOrigDeltasAndSupports = {}
+ # HACK: we treat width 65535 as a sentinel value to signal that a glyph
+ # from a non-default master should not participate in computing {H,V}VAR,
+ # as if it were missing. Allows to variate other glyph-related data independently
+ # from glyph metrics
+ sparse_advance = 0xFFFF
+ for glyph in glyphOrder:
+ vhAdvances = [
+ metrics[glyph][0]
+ if glyph in metrics and metrics[glyph][0] != sparse_advance
+ else None
+ for metrics in advMetricses
+ ]
+ vhAdvanceDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports(
+ vhAdvances, round=round
+ )
+
+ singleModel = models.allEqual(id(v[1]) for v in vhAdvanceDeltasAndSupports.values())
+
+ if vOrigMetricses:
+ singleModel = False
+ for glyph in glyphOrder:
+ # We need to supply a vOrigs tuple with non-None default values
+ # for each glyph. vOrigMetricses contains values only for those
+ # glyphs which have a non-default vOrig.
+ vOrigs = [
+ metrics[glyph] if glyph in metrics else defaultVOrig
+ for metrics, defaultVOrig in vOrigMetricses
+ ]
+ vOrigDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports(
+ vOrigs, round=round
+ )
+
+ directStore = None
+ if singleModel:
+ # Build direct mapping
+ supports = next(iter(vhAdvanceDeltasAndSupports.values()))[1][1:]
+ varTupleList = builder.buildVarRegionList(supports, axisTags)
+ varTupleIndexes = list(range(len(supports)))
+ varData = builder.buildVarData(varTupleIndexes, [], optimize=False)
+ for glyphName in glyphOrder:
+ varData.addItem(vhAdvanceDeltasAndSupports[glyphName][0], round=noRound)
+ varData.optimize()
+ directStore = builder.buildVarStore(varTupleList, [varData])
+
+ # Build optimized indirect mapping
+ storeBuilder = varStore.OnlineVarStoreBuilder(axisTags)
+ advMapping = {}
+ for glyphName in glyphOrder:
+ deltas, supports = vhAdvanceDeltasAndSupports[glyphName]
+ storeBuilder.setSupports(supports)
+ advMapping[glyphName] = storeBuilder.storeDeltas(deltas, round=noRound)
+
+ if vOrigMetricses:
+ vOrigMap = {}
+ for glyphName in glyphOrder:
+ deltas, supports = vOrigDeltasAndSupports[glyphName]
+ storeBuilder.setSupports(supports)
+ vOrigMap[glyphName] = storeBuilder.storeDeltas(deltas, round=noRound)
+
+ indirectStore = storeBuilder.finish()
+ mapping2 = indirectStore.optimize(use_NO_VARIATION_INDEX=False)
+ advMapping = [mapping2[advMapping[g]] for g in glyphOrder]
+ advanceMapping = builder.buildVarIdxMap(advMapping, glyphOrder)
+
+ if vOrigMetricses:
+ vOrigMap = [mapping2[vOrigMap[g]] for g in glyphOrder]
+
+ useDirect = False
+ vOrigMapping = None
+ if directStore:
+ # Compile both, see which is more compact
+
+ writer = OTTableWriter()
+ directStore.compile(writer, font)
+ directSize = len(writer.getAllData())
+
+ writer = OTTableWriter()
+ indirectStore.compile(writer, font)
+ advanceMapping.compile(writer, font)
+ indirectSize = len(writer.getAllData())
+
+ useDirect = directSize < indirectSize
+
+ if useDirect:
+ metricsStore = directStore
+ advanceMapping = None
+ else:
+ metricsStore = indirectStore
+ if vOrigMetricses:
+ vOrigMapping = builder.buildVarIdxMap(vOrigMap, glyphOrder)
+
+ return metricsStore, advanceMapping, vOrigMapping
- tableTag = tableFields.tableTag
- assert tableTag not in font
- log.info("Generating " + tableTag)
- VHVAR = newTable(tableTag)
- tableClass = getattr(ot, tableTag)
- vhvar = VHVAR.table = tableClass()
- vhvar.Version = 0x00010000
-
- glyphOrder = font.getGlyphOrder()
-
- # Build list of source font advance widths for each glyph
- metricsTag = tableFields.metricsTag
- advMetricses = [m[metricsTag].metrics for m in master_ttfs]
-
- # Build list of source font vertical origin coords for each glyph
- if tableTag == 'VVAR' and 'VORG' in master_ttfs[0]:
- vOrigMetricses = [m['VORG'].VOriginRecords for m in master_ttfs]
- defaultYOrigs = [m['VORG'].defaultVertOriginY for m in master_ttfs]
- vOrigMetricses = list(zip(vOrigMetricses, defaultYOrigs))
- else:
- vOrigMetricses = None
-
- metricsStore, advanceMapping, vOrigMapping = _get_advance_metrics(font,
- masterModel, master_ttfs, axisTags, glyphOrder, advMetricses,
- vOrigMetricses)
-
- vhvar.VarStore = metricsStore
- if advanceMapping is None:
- setattr(vhvar, tableFields.advMapping, None)
- else:
- setattr(vhvar, tableFields.advMapping, advanceMapping)
- if vOrigMapping is not None:
- setattr(vhvar, tableFields.vOrigMapping, vOrigMapping)
- setattr(vhvar, tableFields.sb1, None)
- setattr(vhvar, tableFields.sb2, None)
-
- font[tableTag] = VHVAR
- return
-
-def _get_advance_metrics(font, masterModel, master_ttfs,
- axisTags, glyphOrder, advMetricses, vOrigMetricses=None):
-
- vhAdvanceDeltasAndSupports = {}
- vOrigDeltasAndSupports = {}
- for glyph in glyphOrder:
- vhAdvances = [metrics[glyph][0] if glyph in metrics else None for metrics in advMetricses]
- vhAdvanceDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports(vhAdvances, round=round)
-
- singleModel = models.allEqual(id(v[1]) for v in vhAdvanceDeltasAndSupports.values())
-
- if vOrigMetricses:
- singleModel = False
- for glyph in glyphOrder:
- # We need to supply a vOrigs tuple with non-None default values
- # for each glyph. vOrigMetricses contains values only for those
- # glyphs which have a non-default vOrig.
- vOrigs = [metrics[glyph] if glyph in metrics else defaultVOrig
- for metrics, defaultVOrig in vOrigMetricses]
- vOrigDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports(vOrigs, round=round)
-
- directStore = None
- if singleModel:
- # Build direct mapping
- supports = next(iter(vhAdvanceDeltasAndSupports.values()))[1][1:]
- varTupleList = builder.buildVarRegionList(supports, axisTags)
- varTupleIndexes = list(range(len(supports)))
- varData = builder.buildVarData(varTupleIndexes, [], optimize=False)
- for glyphName in glyphOrder:
- varData.addItem(vhAdvanceDeltasAndSupports[glyphName][0], round=noRound)
- varData.optimize()
- directStore = builder.buildVarStore(varTupleList, [varData])
-
- # Build optimized indirect mapping
- storeBuilder = varStore.OnlineVarStoreBuilder(axisTags)
- advMapping = {}
- for glyphName in glyphOrder:
- deltas, supports = vhAdvanceDeltasAndSupports[glyphName]
- storeBuilder.setSupports(supports)
- advMapping[glyphName] = storeBuilder.storeDeltas(deltas, round=noRound)
-
- if vOrigMetricses:
- vOrigMap = {}
- for glyphName in glyphOrder:
- deltas, supports = vOrigDeltasAndSupports[glyphName]
- storeBuilder.setSupports(supports)
- vOrigMap[glyphName] = storeBuilder.storeDeltas(deltas, round=noRound)
-
- indirectStore = storeBuilder.finish()
- mapping2 = indirectStore.optimize(use_NO_VARIATION_INDEX=False)
- advMapping = [mapping2[advMapping[g]] for g in glyphOrder]
- advanceMapping = builder.buildVarIdxMap(advMapping, glyphOrder)
-
- if vOrigMetricses:
- vOrigMap = [mapping2[vOrigMap[g]] for g in glyphOrder]
-
- useDirect = False
- vOrigMapping = None
- if directStore:
- # Compile both, see which is more compact
-
- writer = OTTableWriter()
- directStore.compile(writer, font)
- directSize = len(writer.getAllData())
-
- writer = OTTableWriter()
- indirectStore.compile(writer, font)
- advanceMapping.compile(writer, font)
- indirectSize = len(writer.getAllData())
-
- useDirect = directSize < indirectSize
-
- if useDirect:
- metricsStore = directStore
- advanceMapping = None
- else:
- metricsStore = indirectStore
- if vOrigMetricses:
- vOrigMapping = builder.buildVarIdxMap(vOrigMap, glyphOrder)
-
- return metricsStore, advanceMapping, vOrigMapping
def _add_MVAR(font, masterModel, master_ttfs, axisTags):
-
- log.info("Generating MVAR")
-
- store_builder = varStore.OnlineVarStoreBuilder(axisTags)
-
- records = []
- lastTableTag = None
- fontTable = None
- tables = None
- # HACK: we need to special-case post.underlineThickness and .underlinePosition
- # and unilaterally/arbitrarily define a sentinel value to distinguish the case
- # when a post table is present in a given master simply because that's where
- # the glyph names in TrueType must be stored, but the underline values are not
- # meant to be used for building MVAR's deltas. The value of -0x8000 (-36768)
- # the minimum FWord (int16) value, was chosen for its unlikelyhood to appear
- # in real-world underline position/thickness values.
- specialTags = {"unds": -0x8000, "undo": -0x8000}
-
- for tag, (tableTag, itemName) in sorted(MVAR_ENTRIES.items(), key=lambda kv: kv[1]):
- # For each tag, fetch the associated table from all fonts (or not when we are
- # still looking at a tag from the same tables) and set up the variation model
- # for them.
- if tableTag != lastTableTag:
- tables = fontTable = None
- if tableTag in font:
- fontTable = font[tableTag]
- tables = []
- for master in master_ttfs:
- if tableTag not in master or (
- tag in specialTags
- and getattr(master[tableTag], itemName) == specialTags[tag]
- ):
- tables.append(None)
- else:
- tables.append(master[tableTag])
- model, tables = masterModel.getSubModel(tables)
- store_builder.setModel(model)
- lastTableTag = tableTag
-
- if tables is None: # Tag not applicable to the master font.
- continue
-
- # TODO support gasp entries
-
- master_values = [getattr(table, itemName) for table in tables]
- if models.allEqual(master_values):
- base, varIdx = master_values[0], None
- else:
- base, varIdx = store_builder.storeMasters(master_values)
- setattr(fontTable, itemName, base)
-
- if varIdx is None:
- continue
- log.info(' %s: %s.%s %s', tag, tableTag, itemName, master_values)
- rec = ot.MetricsValueRecord()
- rec.ValueTag = tag
- rec.VarIdx = varIdx
- records.append(rec)
-
- assert "MVAR" not in font
- if records:
- store = store_builder.finish()
- # Optimize
- mapping = store.optimize()
- for rec in records:
- rec.VarIdx = mapping[rec.VarIdx]
-
- MVAR = font["MVAR"] = newTable('MVAR')
- mvar = MVAR.table = ot.MVAR()
- mvar.Version = 0x00010000
- mvar.Reserved = 0
- mvar.VarStore = store
- # XXX these should not be hard-coded but computed automatically
- mvar.ValueRecordSize = 8
- mvar.ValueRecordCount = len(records)
- mvar.ValueRecord = sorted(records, key=lambda r: r.ValueTag)
+ log.info("Generating MVAR")
+
+ store_builder = varStore.OnlineVarStoreBuilder(axisTags)
+
+ records = []
+ lastTableTag = None
+ fontTable = None
+ tables = None
+ # HACK: we need to special-case post.underlineThickness and .underlinePosition
+ # and unilaterally/arbitrarily define a sentinel value to distinguish the case
+ # when a post table is present in a given master simply because that's where
+ # the glyph names in TrueType must be stored, but the underline values are not
+ # meant to be used for building MVAR's deltas. The value of -0x8000 (-36768)
+ # the minimum FWord (int16) value, was chosen for its unlikelyhood to appear
+ # in real-world underline position/thickness values.
+ specialTags = {"unds": -0x8000, "undo": -0x8000}
+
+ for tag, (tableTag, itemName) in sorted(MVAR_ENTRIES.items(), key=lambda kv: kv[1]):
+ # For each tag, fetch the associated table from all fonts (or not when we are
+ # still looking at a tag from the same tables) and set up the variation model
+ # for them.
+ if tableTag != lastTableTag:
+ tables = fontTable = None
+ if tableTag in font:
+ fontTable = font[tableTag]
+ tables = []
+ for master in master_ttfs:
+ if tableTag not in master or (
+ tag in specialTags
+ and getattr(master[tableTag], itemName) == specialTags[tag]
+ ):
+ tables.append(None)
+ else:
+ tables.append(master[tableTag])
+ model, tables = masterModel.getSubModel(tables)
+ store_builder.setModel(model)
+ lastTableTag = tableTag
+
+ if tables is None: # Tag not applicable to the master font.
+ continue
+
+ # TODO support gasp entries
+
+ master_values = [getattr(table, itemName) for table in tables]
+ if models.allEqual(master_values):
+ base, varIdx = master_values[0], None
+ else:
+ base, varIdx = store_builder.storeMasters(master_values)
+ setattr(fontTable, itemName, base)
+
+ if varIdx is None:
+ continue
+ log.info(" %s: %s.%s %s", tag, tableTag, itemName, master_values)
+ rec = ot.MetricsValueRecord()
+ rec.ValueTag = tag
+ rec.VarIdx = varIdx
+ records.append(rec)
+
+ assert "MVAR" not in font
+ if records:
+ store = store_builder.finish()
+ # Optimize
+ mapping = store.optimize()
+ for rec in records:
+ rec.VarIdx = mapping[rec.VarIdx]
+
+ MVAR = font["MVAR"] = newTable("MVAR")
+ mvar = MVAR.table = ot.MVAR()
+ mvar.Version = 0x00010000
+ mvar.Reserved = 0
+ mvar.VarStore = store
+ # XXX these should not be hard-coded but computed automatically
+ mvar.ValueRecordSize = 8
+ mvar.ValueRecordCount = len(records)
+ mvar.ValueRecord = sorted(records, key=lambda r: r.ValueTag)
def _add_BASE(font, masterModel, master_ttfs, axisTags):
+ log.info("Generating BASE")
- log.info("Generating BASE")
+ merger = VariationMerger(masterModel, axisTags, font)
+ merger.mergeTables(font, master_ttfs, ["BASE"])
+ store = merger.store_builder.finish()
- merger = VariationMerger(masterModel, axisTags, font)
- merger.mergeTables(font, master_ttfs, ['BASE'])
- store = merger.store_builder.finish()
-
- if not store:
- return
- base = font['BASE'].table
- assert base.Version == 0x00010000
- base.Version = 0x00010001
- base.VarStore = store
+ if not store:
+ return
+ base = font["BASE"].table
+ assert base.Version == 0x00010000
+ base.Version = 0x00010001
+ base.VarStore = store
def _merge_OTL(font, model, master_fonts, axisTags):
-
- log.info("Merging OpenType Layout tables")
- merger = VariationMerger(model, axisTags, font)
-
- merger.mergeTables(font, master_fonts, ['GSUB', 'GDEF', 'GPOS'])
- store = merger.store_builder.finish()
- if not store:
- return
- try:
- GDEF = font['GDEF'].table
- assert GDEF.Version <= 0x00010002
- except KeyError:
- font['GDEF'] = newTable('GDEF')
- GDEFTable = font["GDEF"] = newTable('GDEF')
- GDEF = GDEFTable.table = ot.GDEF()
- GDEF.GlyphClassDef = None
- GDEF.AttachList = None
- GDEF.LigCaretList = None
- GDEF.MarkAttachClassDef = None
- GDEF.MarkGlyphSetsDef = None
-
- GDEF.Version = 0x00010003
- GDEF.VarStore = store
-
- # Optimize
- varidx_map = store.optimize()
- GDEF.remap_device_varidxes(varidx_map)
- if 'GPOS' in font:
- font['GPOS'].table.remap_device_varidxes(varidx_map)
+ log.info("Merging OpenType Layout tables")
+ merger = VariationMerger(model, axisTags, font)
+
+ merger.mergeTables(font, master_fonts, ["GSUB", "GDEF", "GPOS"])
+ store = merger.store_builder.finish()
+ if not store:
+ return
+ try:
+ GDEF = font["GDEF"].table
+ assert GDEF.Version <= 0x00010002
+ except KeyError:
+ font["GDEF"] = newTable("GDEF")
+ GDEFTable = font["GDEF"] = newTable("GDEF")
+ GDEF = GDEFTable.table = ot.GDEF()
+ GDEF.GlyphClassDef = None
+ GDEF.AttachList = None
+ GDEF.LigCaretList = None
+ GDEF.MarkAttachClassDef = None
+ GDEF.MarkGlyphSetsDef = None
+
+ GDEF.Version = 0x00010003
+ GDEF.VarStore = store
+
+ # Optimize
+ varidx_map = store.optimize()
+ GDEF.remap_device_varidxes(varidx_map)
+ if "GPOS" in font:
+ font["GPOS"].table.remap_device_varidxes(varidx_map)
def _add_GSUB_feature_variations(font, axes, internal_axis_supports, rules, featureTag):
+ def normalize(name, value):
+ return models.normalizeLocation({name: value}, internal_axis_supports)[name]
- def normalize(name, value):
- return models.normalizeLocation(
- {name: value}, internal_axis_supports
- )[name]
+ log.info("Generating GSUB FeatureVariations")
- log.info("Generating GSUB FeatureVariations")
+ axis_tags = {name: axis.tag for name, axis in axes.items()}
- axis_tags = {name: axis.tag for name, axis in axes.items()}
+ conditional_subs = []
+ for rule in rules:
+ region = []
+ for conditions in rule.conditionSets:
+ space = {}
+ for condition in conditions:
+ axis_name = condition["name"]
+ if condition["minimum"] is not None:
+ minimum = normalize(axis_name, condition["minimum"])
+ else:
+ minimum = -1.0
+ if condition["maximum"] is not None:
+ maximum = normalize(axis_name, condition["maximum"])
+ else:
+ maximum = 1.0
+ tag = axis_tags[axis_name]
+ space[tag] = (minimum, maximum)
+ region.append(space)
- conditional_subs = []
- for rule in rules:
+ subs = {k: v for k, v in rule.subs}
- region = []
- for conditions in rule.conditionSets:
- space = {}
- for condition in conditions:
- axis_name = condition["name"]
- if condition["minimum"] is not None:
- minimum = normalize(axis_name, condition["minimum"])
- else:
- minimum = -1.0
- if condition["maximum"] is not None:
- maximum = normalize(axis_name, condition["maximum"])
- else:
- maximum = 1.0
- tag = axis_tags[axis_name]
- space[tag] = (minimum, maximum)
- region.append(space)
+ conditional_subs.append((region, subs))
- subs = {k: v for k, v in rule.subs}
-
- conditional_subs.append((region, subs))
-
- addFeatureVariations(font, conditional_subs, featureTag)
+ addFeatureVariations(font, conditional_subs, featureTag)
_DesignSpaceData = namedtuple(
- "_DesignSpaceData",
- [
- "axes",
- "internal_axis_supports",
- "base_idx",
- "normalized_master_locs",
- "masters",
- "instances",
- "rules",
- "rulesProcessingLast",
- "lib",
- ],
+ "_DesignSpaceData",
+ [
+ "axes",
+ "axisMappings",
+ "internal_axis_supports",
+ "base_idx",
+ "normalized_master_locs",
+ "masters",
+ "instances",
+ "rules",
+ "rulesProcessingLast",
+ "lib",
+ ],
)
def _add_CFF2(varFont, model, master_fonts):
- from .cff import merge_region_fonts
- glyphOrder = varFont.getGlyphOrder()
- if "CFF2" not in varFont:
- from .cff import convertCFFtoCFF2
- convertCFFtoCFF2(varFont)
- ordered_fonts_list = model.reorderMasters(master_fonts, model.reverseMapping)
- # re-ordering the master list simplifies building the CFF2 data item lists.
- merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder)
+ from .cff import merge_region_fonts
+
+ glyphOrder = varFont.getGlyphOrder()
+ if "CFF2" not in varFont:
+ from .cff import convertCFFtoCFF2
+
+ convertCFFtoCFF2(varFont)
+ ordered_fonts_list = model.reorderMasters(master_fonts, model.reverseMapping)
+ # re-ordering the master list simplifies building the CFF2 data item lists.
+ merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder)
def _add_COLR(font, model, master_fonts, axisTags, colr_layer_reuse=True):
- merger = COLRVariationMerger(model, axisTags, font, allowLayerReuse=colr_layer_reuse)
- merger.mergeTables(font, master_fonts)
- store = merger.store_builder.finish()
+ merger = COLRVariationMerger(
+ model, axisTags, font, allowLayerReuse=colr_layer_reuse
+ )
+ merger.mergeTables(font, master_fonts)
+ store = merger.store_builder.finish()
- colr = font["COLR"].table
- if store:
- mapping = store.optimize()
- colr.VarStore = store
- varIdxes = [mapping[v] for v in merger.varIdxes]
- colr.VarIndexMap = builder.buildDeltaSetIndexMap(varIdxes)
+ colr = font["COLR"].table
+ if store:
+ mapping = store.optimize()
+ colr.VarStore = store
+ varIdxes = [mapping[v] for v in merger.varIdxes]
+ colr.VarIndexMap = builder.buildDeltaSetIndexMap(varIdxes)
def load_designspace(designspace):
- # TODO: remove this and always assume 'designspace' is a DesignSpaceDocument,
- # never a file path, as that's already handled by caller
- if hasattr(designspace, "sources"): # Assume a DesignspaceDocument
- ds = designspace
- else: # Assume a file path
- ds = DesignSpaceDocument.fromfile(designspace)
-
- masters = ds.sources
- if not masters:
- raise VarLibValidationError("Designspace must have at least one source.")
- instances = ds.instances
-
- # TODO: Use fontTools.designspaceLib.tagForAxisName instead.
- standard_axis_map = OrderedDict([
- ('weight', ('wght', {'en': u'Weight'})),
- ('width', ('wdth', {'en': u'Width'})),
- ('slant', ('slnt', {'en': u'Slant'})),
- ('optical', ('opsz', {'en': u'Optical Size'})),
- ('italic', ('ital', {'en': u'Italic'})),
- ])
-
- # Setup axes
- if not ds.axes:
- raise VarLibValidationError(f"Designspace must have at least one axis.")
-
- axes = OrderedDict()
- for axis_index, axis in enumerate(ds.axes):
- axis_name = axis.name
- if not axis_name:
- if not axis.tag:
- raise VarLibValidationError(f"Axis at index {axis_index} needs a tag.")
- axis_name = axis.name = axis.tag
-
- if axis_name in standard_axis_map:
- if axis.tag is None:
- axis.tag = standard_axis_map[axis_name][0]
- if not axis.labelNames:
- axis.labelNames.update(standard_axis_map[axis_name][1])
- else:
- if not axis.tag:
- raise VarLibValidationError(f"Axis at index {axis_index} needs a tag.")
- if not axis.labelNames:
- axis.labelNames["en"] = tostr(axis_name)
-
- axes[axis_name] = axis
- log.info("Axes:\n%s", pformat([axis.asdict() for axis in axes.values()]))
-
- # Check all master and instance locations are valid and fill in defaults
- for obj in masters+instances:
- obj_name = obj.name or obj.styleName or ''
- loc = obj.getFullDesignLocation(ds)
- obj.designLocation = loc
- if loc is None:
- raise VarLibValidationError(
- f"Source or instance '{obj_name}' has no location."
- )
- for axis_name in loc.keys():
- if axis_name not in axes:
- raise VarLibValidationError(
- f"Location axis '{axis_name}' unknown for '{obj_name}'."
- )
- for axis_name,axis in axes.items():
- v = axis.map_backward(loc[axis_name])
- if not (axis.minimum <= v <= axis.maximum):
- raise VarLibValidationError(
- f"Source or instance '{obj_name}' has out-of-range location "
- f"for axis '{axis_name}': is mapped to {v} but must be in "
- f"mapped range [{axis.minimum}..{axis.maximum}] (NOTE: all "
- "values are in user-space)."
- )
-
- # Normalize master locations
-
- internal_master_locs = [o.getFullDesignLocation(ds) for o in masters]
- log.info("Internal master locations:\n%s", pformat(internal_master_locs))
-
- # TODO This mapping should ideally be moved closer to logic in _add_fvar/avar
- internal_axis_supports = {}
- for axis in axes.values():
- triple = (axis.minimum, axis.default, axis.maximum)
- internal_axis_supports[axis.name] = [axis.map_forward(v) for v in triple]
- log.info("Internal axis supports:\n%s", pformat(internal_axis_supports))
-
- normalized_master_locs = [models.normalizeLocation(m, internal_axis_supports) for m in internal_master_locs]
- log.info("Normalized master locations:\n%s", pformat(normalized_master_locs))
-
- # Find base master
- base_idx = None
- for i,m in enumerate(normalized_master_locs):
- if all(v == 0 for v in m.values()):
- if base_idx is not None:
- raise VarLibValidationError(
- "More than one base master found in Designspace."
- )
- base_idx = i
- if base_idx is None:
- raise VarLibValidationError(
- "Base master not found; no master at default location?"
- )
- log.info("Index of base master: %s", base_idx)
-
- return _DesignSpaceData(
- axes,
- internal_axis_supports,
- base_idx,
- normalized_master_locs,
- masters,
- instances,
- ds.rules,
- ds.rulesProcessingLast,
- ds.lib,
- )
+ # TODO: remove this and always assume 'designspace' is a DesignSpaceDocument,
+ # never a file path, as that's already handled by caller
+ if hasattr(designspace, "sources"): # Assume a DesignspaceDocument
+ ds = designspace
+ else: # Assume a file path
+ ds = DesignSpaceDocument.fromfile(designspace)
+
+ masters = ds.sources
+ if not masters:
+ raise VarLibValidationError("Designspace must have at least one source.")
+ instances = ds.instances
+
+ # TODO: Use fontTools.designspaceLib.tagForAxisName instead.
+ standard_axis_map = OrderedDict(
+ [
+ ("weight", ("wght", {"en": "Weight"})),
+ ("width", ("wdth", {"en": "Width"})),
+ ("slant", ("slnt", {"en": "Slant"})),
+ ("optical", ("opsz", {"en": "Optical Size"})),
+ ("italic", ("ital", {"en": "Italic"})),
+ ]
+ )
+
+ # Setup axes
+ if not ds.axes:
+ raise VarLibValidationError(f"Designspace must have at least one axis.")
+
+ axes = OrderedDict()
+ for axis_index, axis in enumerate(ds.axes):
+ axis_name = axis.name
+ if not axis_name:
+ if not axis.tag:
+ raise VarLibValidationError(f"Axis at index {axis_index} needs a tag.")
+ axis_name = axis.name = axis.tag
+
+ if axis_name in standard_axis_map:
+ if axis.tag is None:
+ axis.tag = standard_axis_map[axis_name][0]
+ if not axis.labelNames:
+ axis.labelNames.update(standard_axis_map[axis_name][1])
+ else:
+ if not axis.tag:
+ raise VarLibValidationError(f"Axis at index {axis_index} needs a tag.")
+ if not axis.labelNames:
+ axis.labelNames["en"] = tostr(axis_name)
+
+ axes[axis_name] = axis
+ log.info("Axes:\n%s", pformat([axis.asdict() for axis in axes.values()]))
+
+ axisMappings = ds.axisMappings
+ if axisMappings:
+ log.info("Mappings:\n%s", pformat(axisMappings))
+
+ # Check all master and instance locations are valid and fill in defaults
+ for obj in masters + instances:
+ obj_name = obj.name or obj.styleName or ""
+ loc = obj.getFullDesignLocation(ds)
+ obj.designLocation = loc
+ if loc is None:
+ raise VarLibValidationError(
+ f"Source or instance '{obj_name}' has no location."
+ )
+ for axis_name in loc.keys():
+ if axis_name not in axes:
+ raise VarLibValidationError(
+ f"Location axis '{axis_name}' unknown for '{obj_name}'."
+ )
+ for axis_name, axis in axes.items():
+ v = axis.map_backward(loc[axis_name])
+ if not (axis.minimum <= v <= axis.maximum):
+ raise VarLibValidationError(
+ f"Source or instance '{obj_name}' has out-of-range location "
+ f"for axis '{axis_name}': is mapped to {v} but must be in "
+ f"mapped range [{axis.minimum}..{axis.maximum}] (NOTE: all "
+ "values are in user-space)."
+ )
+
+ # Normalize master locations
+
+ internal_master_locs = [o.getFullDesignLocation(ds) for o in masters]
+ log.info("Internal master locations:\n%s", pformat(internal_master_locs))
+
+ # TODO This mapping should ideally be moved closer to logic in _add_fvar/avar
+ internal_axis_supports = {}
+ for axis in axes.values():
+ triple = (axis.minimum, axis.default, axis.maximum)
+ internal_axis_supports[axis.name] = [axis.map_forward(v) for v in triple]
+ log.info("Internal axis supports:\n%s", pformat(internal_axis_supports))
+
+ normalized_master_locs = [
+ models.normalizeLocation(m, internal_axis_supports)
+ for m in internal_master_locs
+ ]
+ log.info("Normalized master locations:\n%s", pformat(normalized_master_locs))
+
+ # Find base master
+ base_idx = None
+ for i, m in enumerate(normalized_master_locs):
+ if all(v == 0 for v in m.values()):
+ if base_idx is not None:
+ raise VarLibValidationError(
+ "More than one base master found in Designspace."
+ )
+ base_idx = i
+ if base_idx is None:
+ raise VarLibValidationError(
+ "Base master not found; no master at default location?"
+ )
+ log.info("Index of base master: %s", base_idx)
+
+ return _DesignSpaceData(
+ axes,
+ axisMappings,
+ internal_axis_supports,
+ base_idx,
+ normalized_master_locs,
+ masters,
+ instances,
+ ds.rules,
+ ds.rulesProcessingLast,
+ ds.lib,
+ )
# https://docs.microsoft.com/en-us/typography/opentype/spec/os2#uswidthclass
WDTH_VALUE_TO_OS2_WIDTH_CLASS = {
- 50: 1,
- 62.5: 2,
- 75: 3,
- 87.5: 4,
- 100: 5,
- 112.5: 6,
- 125: 7,
- 150: 8,
- 200: 9,
+ 50: 1,
+ 62.5: 2,
+ 75: 3,
+ 87.5: 4,
+ 100: 5,
+ 112.5: 6,
+ 125: 7,
+ 150: 8,
+ 200: 9,
}
def set_default_weight_width_slant(font, location):
- if "OS/2" in font:
- if "wght" in location:
- weight_class = otRound(max(1, min(location["wght"], 1000)))
- if font["OS/2"].usWeightClass != weight_class:
- log.info("Setting OS/2.usWeightClass = %s", weight_class)
- font["OS/2"].usWeightClass = weight_class
-
- if "wdth" in location:
- # map 'wdth' axis (50..200) to OS/2.usWidthClass (1..9), rounding to closest
- widthValue = min(max(location["wdth"], 50), 200)
- widthClass = otRound(
- models.piecewiseLinearMap(widthValue, WDTH_VALUE_TO_OS2_WIDTH_CLASS)
- )
- if font["OS/2"].usWidthClass != widthClass:
- log.info("Setting OS/2.usWidthClass = %s", widthClass)
- font["OS/2"].usWidthClass = widthClass
-
- if "slnt" in location and "post" in font:
- italicAngle = max(-90, min(location["slnt"], 90))
- if font["post"].italicAngle != italicAngle:
- log.info("Setting post.italicAngle = %s", italicAngle)
- font["post"].italicAngle = italicAngle
+ if "OS/2" in font:
+ if "wght" in location:
+ weight_class = otRound(max(1, min(location["wght"], 1000)))
+ if font["OS/2"].usWeightClass != weight_class:
+ log.info("Setting OS/2.usWeightClass = %s", weight_class)
+ font["OS/2"].usWeightClass = weight_class
+
+ if "wdth" in location:
+ # map 'wdth' axis (50..200) to OS/2.usWidthClass (1..9), rounding to closest
+ widthValue = min(max(location["wdth"], 50), 200)
+ widthClass = otRound(
+ models.piecewiseLinearMap(widthValue, WDTH_VALUE_TO_OS2_WIDTH_CLASS)
+ )
+ if font["OS/2"].usWidthClass != widthClass:
+ log.info("Setting OS/2.usWidthClass = %s", widthClass)
+ font["OS/2"].usWidthClass = widthClass
+
+ if "slnt" in location and "post" in font:
+ italicAngle = max(-90, min(location["slnt"], 90))
+ if font["post"].italicAngle != italicAngle:
+ log.info("Setting post.italicAngle = %s", italicAngle)
+ font["post"].italicAngle = italicAngle
+
+
+def drop_implied_oncurve_points(*masters: TTFont) -> int:
+ """Drop impliable on-curve points from all the simple glyphs in masters.
+
+ In TrueType glyf outlines, on-curve points can be implied when they are located
+ exactly at the midpoint of the line connecting two consecutive off-curve points.
+
+ The input masters' glyf tables are assumed to contain same-named glyphs that are
+ interpolatable. Oncurve points are only dropped if they can be implied for all
+ the masters. The fonts are modified in-place.
+
+ Args:
+ masters: The TTFont(s) to modify
+
+ Returns:
+ The total number of points that were dropped if any.
+
+ Reference:
+ https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html
+ """
+
+ count = 0
+ glyph_masters = defaultdict(list)
+ # multiple DS source may point to the same TTFont object and we want to
+ # avoid processing the same glyph twice as they are modified in-place
+ for font in {id(m): m for m in masters}.values():
+ glyf = font["glyf"]
+ for glyphName in glyf.keys():
+ glyph_masters[glyphName].append(glyf[glyphName])
+ count = 0
+ for glyphName, glyphs in glyph_masters.items():
+ try:
+ dropped = dropImpliedOnCurvePoints(*glyphs)
+ except ValueError as e:
+ # we don't fail for incompatible glyphs in _add_gvar so we shouldn't here
+ log.warning("Failed to drop implied oncurves for %r: %s", glyphName, e)
+ else:
+ count += len(dropped)
+ return count
def build_many(
- designspace: DesignSpaceDocument,
- master_finder=lambda s:s,
- exclude=[],
- optimize=True,
- skip_vf=lambda vf_name: False,
- colr_layer_reuse=True,
+ designspace: DesignSpaceDocument,
+ master_finder=lambda s: s,
+ exclude=[],
+ optimize=True,
+ skip_vf=lambda vf_name: False,
+ colr_layer_reuse=True,
+ drop_implied_oncurves=False,
):
- """
- Build variable fonts from a designspace file, version 5 which can define
- several VFs, or version 4 which has implicitly one VF covering the whole doc.
-
- If master_finder is set, it should be a callable that takes master
- filename as found in designspace file and map it to master font
- binary as to be opened (eg. .ttf or .otf).
-
- skip_vf can be used to skip building some of the variable fonts defined in
- the input designspace. It's a predicate that takes as argument the name
- of the variable font and returns `bool`.
-
- Always returns a Dict[str, TTFont] keyed by VariableFontDescriptor.name
- """
- res = {}
- for _location, subDoc in splitInterpolable(designspace):
- for name, vfDoc in splitVariableFonts(subDoc):
- if skip_vf(name):
- log.debug(f"Skipping variable TTF font: {name}")
- continue
- vf = build(
- vfDoc,
- master_finder,
- exclude=list(exclude) + ["STAT"],
- optimize=optimize,
- colr_layer_reuse=colr_layer_reuse,
- )[0]
- if "STAT" not in exclude:
- buildVFStatTable(vf, designspace, name)
- res[name] = vf
- return res
+ """
+ Build variable fonts from a designspace file, version 5 which can define
+ several VFs, or version 4 which has implicitly one VF covering the whole doc.
+
+ If master_finder is set, it should be a callable that takes master
+ filename as found in designspace file and map it to master font
+ binary as to be opened (eg. .ttf or .otf).
+
+ skip_vf can be used to skip building some of the variable fonts defined in
+ the input designspace. It's a predicate that takes as argument the name
+ of the variable font and returns `bool`.
+
+ Always returns a Dict[str, TTFont] keyed by VariableFontDescriptor.name
+ """
+ res = {}
+ # varLib.build (used further below) by default only builds an incomplete 'STAT'
+ # with an empty AxisValueArray--unless the VF inherited 'STAT' from its base master.
+ # Designspace version 5 can also be used to define 'STAT' labels or customize
+ # axes ordering, etc. To avoid overwriting a pre-existing 'STAT' or redoing the
+ # same work twice, here we check if designspace contains any 'STAT' info before
+ # proceeding to call buildVFStatTable for each VF.
+ # https://github.com/fonttools/fonttools/pull/3024
+ # https://github.com/fonttools/fonttools/issues/3045
+ doBuildStatFromDSv5 = (
+ "STAT" not in exclude
+ and designspace.formatTuple >= (5, 0)
+ and (
+ any(a.axisLabels or a.axisOrdering is not None for a in designspace.axes)
+ or designspace.locationLabels
+ )
+ )
+ for _location, subDoc in splitInterpolable(designspace):
+ for name, vfDoc in splitVariableFonts(subDoc):
+ if skip_vf(name):
+ log.debug(f"Skipping variable TTF font: {name}")
+ continue
+ vf = build(
+ vfDoc,
+ master_finder,
+ exclude=exclude,
+ optimize=optimize,
+ colr_layer_reuse=colr_layer_reuse,
+ drop_implied_oncurves=drop_implied_oncurves,
+ )[0]
+ if doBuildStatFromDSv5:
+ buildVFStatTable(vf, designspace, name)
+ res[name] = vf
+ return res
+
def build(
- designspace,
- master_finder=lambda s:s,
- exclude=[],
- optimize=True,
- colr_layer_reuse=True,
+ designspace,
+ master_finder=lambda s: s,
+ exclude=[],
+ optimize=True,
+ colr_layer_reuse=True,
+ drop_implied_oncurves=False,
):
- """
- Build variation font from a designspace file.
-
- If master_finder is set, it should be a callable that takes master
- filename as found in designspace file and map it to master font
- binary as to be opened (eg. .ttf or .otf).
- """
- if hasattr(designspace, "sources"): # Assume a DesignspaceDocument
- pass
- else: # Assume a file path
- designspace = DesignSpaceDocument.fromfile(designspace)
-
- ds = load_designspace(designspace)
- log.info("Building variable font")
-
- log.info("Loading master fonts")
- master_fonts = load_masters(designspace, master_finder)
-
- # TODO: 'master_ttfs' is unused except for return value, remove later
- master_ttfs = []
- for master in master_fonts:
- try:
- master_ttfs.append(master.reader.file.name)
- except AttributeError:
- master_ttfs.append(None) # in-memory fonts have no path
-
- # Copy the base master to work from it
- vf = deepcopy(master_fonts[ds.base_idx])
-
- # TODO append masters as named-instances as well; needs .designspace change.
- fvar = _add_fvar(vf, ds.axes, ds.instances)
- if 'STAT' not in exclude:
- _add_stat(vf)
- if 'avar' not in exclude:
- _add_avar(vf, ds.axes)
-
- # Map from axis names to axis tags...
- normalized_master_locs = [
- {ds.axes[k].tag: v for k,v in loc.items()} for loc in ds.normalized_master_locs
- ]
- # From here on, we use fvar axes only
- axisTags = [axis.axisTag for axis in fvar.axes]
-
- # Assume single-model for now.
- model = models.VariationModel(normalized_master_locs, axisOrder=axisTags)
- assert 0 == model.mapping[ds.base_idx]
-
- log.info("Building variations tables")
- if 'BASE' not in exclude and 'BASE' in vf:
- _add_BASE(vf, model, master_fonts, axisTags)
- if 'MVAR' not in exclude:
- _add_MVAR(vf, model, master_fonts, axisTags)
- if 'HVAR' not in exclude:
- _add_HVAR(vf, model, master_fonts, axisTags)
- if 'VVAR' not in exclude and 'vmtx' in vf:
- _add_VVAR(vf, model, master_fonts, axisTags)
- if 'GDEF' not in exclude or 'GPOS' not in exclude:
- _merge_OTL(vf, model, master_fonts, axisTags)
- if 'gvar' not in exclude and 'glyf' in vf:
- _add_gvar(vf, model, master_fonts, optimize=optimize)
- if 'cvar' not in exclude and 'glyf' in vf:
- _merge_TTHinting(vf, model, master_fonts)
- if 'GSUB' not in exclude and ds.rules:
- featureTag = ds.lib.get(
- FEAVAR_FEATURETAG_LIB_KEY,
- "rclt" if ds.rulesProcessingLast else "rvrn"
- )
- _add_GSUB_feature_variations(vf, ds.axes, ds.internal_axis_supports, ds.rules, featureTag)
- if 'CFF2' not in exclude and ('CFF ' in vf or 'CFF2' in vf):
- _add_CFF2(vf, model, master_fonts)
- if "post" in vf:
- # set 'post' to format 2 to keep the glyph names dropped from CFF2
- post = vf["post"]
- if post.formatType != 2.0:
- post.formatType = 2.0
- post.extraNames = []
- post.mapping = {}
- if 'COLR' not in exclude and 'COLR' in vf and vf['COLR'].version > 0:
- _add_COLR(vf, model, master_fonts, axisTags, colr_layer_reuse)
-
- set_default_weight_width_slant(
- vf, location={axis.axisTag: axis.defaultValue for axis in vf["fvar"].axes}
- )
-
- for tag in exclude:
- if tag in vf:
- del vf[tag]
-
- # TODO: Only return vf for 4.0+, the rest is unused.
- return vf, model, master_ttfs
+ """
+ Build variation font from a designspace file.
+
+ If master_finder is set, it should be a callable that takes master
+ filename as found in designspace file and map it to master font
+ binary as to be opened (eg. .ttf or .otf).
+ """
+ if hasattr(designspace, "sources"): # Assume a DesignspaceDocument
+ pass
+ else: # Assume a file path
+ designspace = DesignSpaceDocument.fromfile(designspace)
+
+ ds = load_designspace(designspace)
+ log.info("Building variable font")
+
+ log.info("Loading master fonts")
+ master_fonts = load_masters(designspace, master_finder)
+
+ # TODO: 'master_ttfs' is unused except for return value, remove later
+ master_ttfs = []
+ for master in master_fonts:
+ try:
+ master_ttfs.append(master.reader.file.name)
+ except AttributeError:
+ master_ttfs.append(None) # in-memory fonts have no path
+
+ if drop_implied_oncurves and "glyf" in master_fonts[ds.base_idx]:
+ drop_count = drop_implied_oncurve_points(*master_fonts)
+ log.info(
+ "Dropped %s on-curve points from simple glyphs in the 'glyf' table",
+ drop_count,
+ )
+
+ # Copy the base master to work from it
+ vf = deepcopy(master_fonts[ds.base_idx])
+
+ if "DSIG" in vf:
+ del vf["DSIG"]
+
+ # TODO append masters as named-instances as well; needs .designspace change.
+ fvar = _add_fvar(vf, ds.axes, ds.instances)
+ if "STAT" not in exclude:
+ _add_stat(vf)
+
+ # Map from axis names to axis tags...
+ normalized_master_locs = [
+ {ds.axes[k].tag: v for k, v in loc.items()} for loc in ds.normalized_master_locs
+ ]
+ # From here on, we use fvar axes only
+ axisTags = [axis.axisTag for axis in fvar.axes]
+
+ # Assume single-model for now.
+ model = models.VariationModel(normalized_master_locs, axisOrder=axisTags)
+ assert 0 == model.mapping[ds.base_idx]
+
+ log.info("Building variations tables")
+ if "avar" not in exclude:
+ _add_avar(vf, ds.axes, ds.axisMappings, axisTags)
+ if "BASE" not in exclude and "BASE" in vf:
+ _add_BASE(vf, model, master_fonts, axisTags)
+ if "MVAR" not in exclude:
+ _add_MVAR(vf, model, master_fonts, axisTags)
+ if "HVAR" not in exclude:
+ _add_HVAR(vf, model, master_fonts, axisTags)
+ if "VVAR" not in exclude and "vmtx" in vf:
+ _add_VVAR(vf, model, master_fonts, axisTags)
+ if "GDEF" not in exclude or "GPOS" not in exclude:
+ _merge_OTL(vf, model, master_fonts, axisTags)
+ if "gvar" not in exclude and "glyf" in vf:
+ _add_gvar(vf, model, master_fonts, optimize=optimize)
+ if "cvar" not in exclude and "glyf" in vf:
+ _merge_TTHinting(vf, model, master_fonts)
+ if "GSUB" not in exclude and ds.rules:
+ featureTag = ds.lib.get(
+ FEAVAR_FEATURETAG_LIB_KEY, "rclt" if ds.rulesProcessingLast else "rvrn"
+ )
+ _add_GSUB_feature_variations(
+ vf, ds.axes, ds.internal_axis_supports, ds.rules, featureTag
+ )
+ if "CFF2" not in exclude and ("CFF " in vf or "CFF2" in vf):
+ _add_CFF2(vf, model, master_fonts)
+ if "post" in vf:
+ # set 'post' to format 2 to keep the glyph names dropped from CFF2
+ post = vf["post"]
+ if post.formatType != 2.0:
+ post.formatType = 2.0
+ post.extraNames = []
+ post.mapping = {}
+ if "COLR" not in exclude and "COLR" in vf and vf["COLR"].version > 0:
+ _add_COLR(vf, model, master_fonts, axisTags, colr_layer_reuse)
+
+ set_default_weight_width_slant(
+ vf, location={axis.axisTag: axis.defaultValue for axis in vf["fvar"].axes}
+ )
+
+ for tag in exclude:
+ if tag in vf:
+ del vf[tag]
+
+ # TODO: Only return vf for 4.0+, the rest is unused.
+ return vf, model, master_ttfs
def _open_font(path, master_finder=lambda s: s):
- # load TTFont masters from given 'path': this can be either a .TTX or an
- # OpenType binary font; or if neither of these, try use the 'master_finder'
- # callable to resolve the path to a valid .TTX or OpenType font binary.
- from fontTools.ttx import guessFileType
-
- master_path = os.path.normpath(path)
- tp = guessFileType(master_path)
- if tp is None:
- # not an OpenType binary/ttx, fall back to the master finder.
- master_path = master_finder(master_path)
- tp = guessFileType(master_path)
- if tp in ("TTX", "OTX"):
- font = TTFont()
- font.importXML(master_path)
- elif tp in ("TTF", "OTF", "WOFF", "WOFF2"):
- font = TTFont(master_path)
- else:
- raise VarLibValidationError("Invalid master path: %r" % master_path)
- return font
+ # load TTFont masters from given 'path': this can be either a .TTX or an
+ # OpenType binary font; or if neither of these, try use the 'master_finder'
+ # callable to resolve the path to a valid .TTX or OpenType font binary.
+ from fontTools.ttx import guessFileType
+
+ master_path = os.path.normpath(path)
+ tp = guessFileType(master_path)
+ if tp is None:
+ # not an OpenType binary/ttx, fall back to the master finder.
+ master_path = master_finder(master_path)
+ tp = guessFileType(master_path)
+ if tp in ("TTX", "OTX"):
+ font = TTFont()
+ font.importXML(master_path)
+ elif tp in ("TTF", "OTF", "WOFF", "WOFF2"):
+ font = TTFont(master_path)
+ else:
+ raise VarLibValidationError("Invalid master path: %r" % master_path)
+ return font
def load_masters(designspace, master_finder=lambda s: s):
- """Ensure that all SourceDescriptor.font attributes have an appropriate TTFont
- object loaded, or else open TTFont objects from the SourceDescriptor.path
- attributes.
-
- The paths can point to either an OpenType font, a TTX file, or a UFO. In the
- latter case, use the provided master_finder callable to map from UFO paths to
- the respective master font binaries (e.g. .ttf, .otf or .ttx).
-
- Return list of master TTFont objects in the same order they are listed in the
- DesignSpaceDocument.
- """
- for master in designspace.sources:
- # If a SourceDescriptor has a layer name, demand that the compiled TTFont
- # be supplied by the caller. This spares us from modifying MasterFinder.
- if master.layerName and master.font is None:
- raise VarLibValidationError(
- f"Designspace source '{master.name or '<Unknown>'}' specified a "
- "layer name but lacks the required TTFont object in the 'font' "
- "attribute."
- )
-
- return designspace.loadSourceFonts(_open_font, master_finder=master_finder)
+ """Ensure that all SourceDescriptor.font attributes have an appropriate TTFont
+ object loaded, or else open TTFont objects from the SourceDescriptor.path
+ attributes.
+
+ The paths can point to either an OpenType font, a TTX file, or a UFO. In the
+ latter case, use the provided master_finder callable to map from UFO paths to
+ the respective master font binaries (e.g. .ttf, .otf or .ttx).
+
+ Return list of master TTFont objects in the same order they are listed in the
+ DesignSpaceDocument.
+ """
+ for master in designspace.sources:
+ # If a SourceDescriptor has a layer name, demand that the compiled TTFont
+ # be supplied by the caller. This spares us from modifying MasterFinder.
+ if master.layerName and master.font is None:
+ raise VarLibValidationError(
+ f"Designspace source '{master.name or '<Unknown>'}' specified a "
+ "layer name but lacks the required TTFont object in the 'font' "
+ "attribute."
+ )
+
+ return designspace.loadSourceFonts(_open_font, master_finder=master_finder)
class MasterFinder(object):
-
- def __init__(self, template):
- self.template = template
-
- def __call__(self, src_path):
- fullname = os.path.abspath(src_path)
- dirname, basename = os.path.split(fullname)
- stem, ext = os.path.splitext(basename)
- path = self.template.format(
- fullname=fullname,
- dirname=dirname,
- basename=basename,
- stem=stem,
- ext=ext,
- )
- return os.path.normpath(path)
+ def __init__(self, template):
+ self.template = template
+
+ def __call__(self, src_path):
+ fullname = os.path.abspath(src_path)
+ dirname, basename = os.path.split(fullname)
+ stem, ext = os.path.splitext(basename)
+ path = self.template.format(
+ fullname=fullname,
+ dirname=dirname,
+ basename=basename,
+ stem=stem,
+ ext=ext,
+ )
+ return os.path.normpath(path)
def main(args=None):
- """Build a variable font from a designspace file and masters"""
- from argparse import ArgumentParser
- from fontTools import configLogger
-
- parser = ArgumentParser(prog='varLib', description = main.__doc__)
- parser.add_argument('designspace')
- parser.add_argument(
- '-o',
- metavar='OUTPUTFILE',
- dest='outfile',
- default=None,
- help='output file'
- )
- parser.add_argument(
- '-x',
- metavar='TAG',
- dest='exclude',
- action='append',
- default=[],
- help='exclude table'
- )
- parser.add_argument(
- '--disable-iup',
- dest='optimize',
- action='store_false',
- help='do not perform IUP optimization'
- )
- parser.add_argument(
- '--no-colr-layer-reuse',
- dest='colr_layer_reuse',
- action='store_false',
- help='do not rebuild variable COLR table to optimize COLR layer reuse',
- )
- parser.add_argument(
- '--master-finder',
- default='master_ttf_interpolatable/{stem}.ttf',
- help=(
- 'templated string used for finding binary font '
- 'files given the source file names defined in the '
- 'designspace document. The following special strings '
- 'are defined: {fullname} is the absolute source file '
- 'name; {basename} is the file name without its '
- 'directory; {stem} is the basename without the file '
- 'extension; {ext} is the source file extension; '
- '{dirname} is the directory of the absolute file '
- 'name. The default value is "%(default)s".'
- )
- )
- logging_group = parser.add_mutually_exclusive_group(required=False)
- logging_group.add_argument(
- "-v", "--verbose",
- action="store_true",
- help="Run more verbosely.")
- logging_group.add_argument(
- "-q", "--quiet",
- action="store_true",
- help="Turn verbosity off.")
- options = parser.parse_args(args)
-
- configLogger(level=(
- "DEBUG" if options.verbose else
- "ERROR" if options.quiet else
- "INFO"))
-
- designspace_filename = options.designspace
- finder = MasterFinder(options.master_finder)
-
- vf, _, _ = build(
- designspace_filename,
- finder,
- exclude=options.exclude,
- optimize=options.optimize,
- colr_layer_reuse=options.colr_layer_reuse,
- )
-
- outfile = options.outfile
- if outfile is None:
- ext = "otf" if vf.sfntVersion == "OTTO" else "ttf"
- outfile = os.path.splitext(designspace_filename)[0] + '-VF.' + ext
-
- log.info("Saving variation font %s", outfile)
- vf.save(outfile)
+ """Build variable fonts from a designspace file and masters"""
+ from argparse import ArgumentParser
+ from fontTools import configLogger
+
+ parser = ArgumentParser(prog="varLib", description=main.__doc__)
+ parser.add_argument("designspace")
+ output_group = parser.add_mutually_exclusive_group()
+ output_group.add_argument(
+ "-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file"
+ )
+ output_group.add_argument(
+ "-d",
+ "--output-dir",
+ metavar="OUTPUTDIR",
+ default=None,
+ help="output dir (default: same as input designspace file)",
+ )
+ parser.add_argument(
+ "-x",
+ metavar="TAG",
+ dest="exclude",
+ action="append",
+ default=[],
+ help="exclude table",
+ )
+ parser.add_argument(
+ "--disable-iup",
+ dest="optimize",
+ action="store_false",
+ help="do not perform IUP optimization",
+ )
+ parser.add_argument(
+ "--no-colr-layer-reuse",
+ dest="colr_layer_reuse",
+ action="store_false",
+ help="do not rebuild variable COLR table to optimize COLR layer reuse",
+ )
+ parser.add_argument(
+ "--drop-implied-oncurves",
+ action="store_true",
+ help=(
+ "drop on-curve points that can be implied when exactly in the middle of "
+ "two off-curve points (only applies to TrueType fonts)"
+ ),
+ )
+ parser.add_argument(
+ "--master-finder",
+ default="master_ttf_interpolatable/{stem}.ttf",
+ help=(
+ "templated string used for finding binary font "
+ "files given the source file names defined in the "
+ "designspace document. The following special strings "
+ "are defined: {fullname} is the absolute source file "
+ "name; {basename} is the file name without its "
+ "directory; {stem} is the basename without the file "
+ "extension; {ext} is the source file extension; "
+ "{dirname} is the directory of the absolute file "
+ 'name. The default value is "%(default)s".'
+ ),
+ )
+ parser.add_argument(
+ "--variable-fonts",
+ default=".*",
+ metavar="VF_NAME",
+ help=(
+ "Filter the list of variable fonts produced from the input "
+ "Designspace v5 file. By default all listed variable fonts are "
+ "generated. To generate a specific variable font (or variable fonts) "
+ 'that match a given "name" attribute, you can pass as argument '
+ "the full name or a regular expression. E.g.: --variable-fonts "
+ '"MyFontVF_WeightOnly"; or --variable-fonts "MyFontVFItalic_.*".'
+ ),
+ )
+ logging_group = parser.add_mutually_exclusive_group(required=False)
+ logging_group.add_argument(
+ "-v", "--verbose", action="store_true", help="Run more verbosely."
+ )
+ logging_group.add_argument(
+ "-q", "--quiet", action="store_true", help="Turn verbosity off."
+ )
+ options = parser.parse_args(args)
+
+ configLogger(
+ level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
+ )
+
+ designspace_filename = options.designspace
+ designspace = DesignSpaceDocument.fromfile(designspace_filename)
+
+ vf_descriptors = designspace.getVariableFonts()
+ if not vf_descriptors:
+ parser.error(f"No variable fonts in given designspace {designspace.path!r}")
+
+ vfs_to_build = []
+ for vf in vf_descriptors:
+ # Skip variable fonts that do not match the user's inclusion regex if given.
+ if not fullmatch(options.variable_fonts, vf.name):
+ continue
+ vfs_to_build.append(vf)
+
+ if not vfs_to_build:
+ parser.error(f"No variable fonts matching {options.variable_fonts!r}")
+
+ if options.outfile is not None and len(vfs_to_build) > 1:
+ parser.error(
+ "can't specify -o because there are multiple VFs to build; "
+ "use --output-dir, or select a single VF with --variable-fonts"
+ )
+
+ output_dir = options.output_dir
+ if output_dir is None:
+ output_dir = os.path.dirname(designspace_filename)
+
+ vf_name_to_output_path = {}
+ if len(vfs_to_build) == 1 and options.outfile is not None:
+ vf_name_to_output_path[vfs_to_build[0].name] = options.outfile
+ else:
+ for vf in vfs_to_build:
+ filename = vf.filename if vf.filename is not None else vf.name + ".{ext}"
+ vf_name_to_output_path[vf.name] = os.path.join(output_dir, filename)
+
+ finder = MasterFinder(options.master_finder)
+
+ vfs = build_many(
+ designspace,
+ finder,
+ exclude=options.exclude,
+ optimize=options.optimize,
+ colr_layer_reuse=options.colr_layer_reuse,
+ drop_implied_oncurves=options.drop_implied_oncurves,
+ )
+
+ for vf_name, vf in vfs.items():
+ ext = "otf" if vf.sfntVersion == "OTTO" else "ttf"
+ output_path = vf_name_to_output_path[vf_name].format(ext=ext)
+ output_dir = os.path.dirname(output_path)
+ if output_dir:
+ os.makedirs(output_dir, exist_ok=True)
+ log.info("Saving variation font %s", output_path)
+ vf.save(output_path)
if __name__ == "__main__":
- import sys
- if len(sys.argv) > 1:
- sys.exit(main())
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+
+ if len(sys.argv) > 1:
+ sys.exit(main())
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/varLib/__main__.py b/Lib/fontTools/varLib/__main__.py
index 4b3a0f53..56fab06e 100644
--- a/Lib/fontTools/varLib/__main__.py
+++ b/Lib/fontTools/varLib/__main__.py
@@ -2,5 +2,5 @@ import sys
from fontTools.varLib import main
-if __name__ == '__main__':
- sys.exit(main())
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/Lib/fontTools/varLib/avar.py b/Lib/fontTools/varLib/avar.py
new file mode 100644
index 00000000..60f0d7e7
--- /dev/null
+++ b/Lib/fontTools/varLib/avar.py
@@ -0,0 +1,70 @@
+from fontTools.varLib import _add_avar, load_designspace
+from fontTools.misc.cliTools import makeOutputFileName
+import logging
+
+log = logging.getLogger("fontTools.varLib.avar")
+
+
+def main(args=None):
+ """Add `avar` table from designspace file to variable font."""
+
+ if args is None:
+ import sys
+
+ args = sys.argv[1:]
+
+ from fontTools import configLogger
+ from fontTools.ttLib import TTFont
+ from fontTools.designspaceLib import DesignSpaceDocument
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ "fonttools varLib.avar",
+ description="Add `avar` table from designspace file to variable font.",
+ )
+ parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.")
+ parser.add_argument(
+ "designspace", metavar="family.designspace", help="Designspace file."
+ )
+ parser.add_argument(
+ "-o",
+ "--output-file",
+ type=str,
+ help="Output font file name.",
+ )
+ parser.add_argument(
+ "-v", "--verbose", action="store_true", help="Run more verbosely."
+ )
+
+ options = parser.parse_args(args)
+
+ configLogger(level=("INFO" if options.verbose else "WARNING"))
+
+ font = TTFont(options.font)
+ if not "fvar" in font:
+ log.error("Not a variable font.")
+ return 1
+
+ axisTags = [a.axisTag for a in font["fvar"].axes]
+
+ ds = load_designspace(options.designspace)
+
+ if "avar" in font:
+ log.warning("avar table already present, overwriting.")
+ del font["avar"]
+
+ _add_avar(font, ds.axes, ds.axisMappings, axisTags)
+
+ if options.output_file is None:
+ outfile = makeOutputFileName(options.font, overWrite=True, suffix=".avar")
+ else:
+ outfile = options.output_file
+ if outfile:
+ log.info("Saving %s", outfile)
+ font.save(outfile)
+
+
+if __name__ == "__main__":
+ import sys
+
+ sys.exit(main())
diff --git a/Lib/fontTools/varLib/avarPlanner.py b/Lib/fontTools/varLib/avarPlanner.py
new file mode 100644
index 00000000..2e173443
--- /dev/null
+++ b/Lib/fontTools/varLib/avarPlanner.py
@@ -0,0 +1,1004 @@
+from fontTools.ttLib import newTable
+from fontTools.ttLib.tables._f_v_a_r import Axis as fvarAxis
+from fontTools.pens.areaPen import AreaPen
+from fontTools.pens.basePen import NullPen
+from fontTools.pens.statisticsPen import StatisticsPen
+from fontTools.varLib.models import piecewiseLinearMap, normalizeValue
+from fontTools.misc.cliTools import makeOutputFileName
+import math
+import logging
+from pprint import pformat
+
+__all__ = [
+ "planWeightAxis",
+ "planWidthAxis",
+ "planSlantAxis",
+ "planOpticalSizeAxis",
+ "planAxis",
+ "sanitizeWeight",
+ "sanitizeWidth",
+ "sanitizeSlant",
+ "measureWeight",
+ "measureWidth",
+ "measureSlant",
+ "normalizeLinear",
+ "normalizeLog",
+ "normalizeDegrees",
+ "interpolateLinear",
+ "interpolateLog",
+ "processAxis",
+ "makeDesignspaceSnippet",
+ "addEmptyAvar",
+ "main",
+]
+
+log = logging.getLogger("fontTools.varLib.avarPlanner")
+
+WEIGHTS = [
+ 50,
+ 100,
+ 150,
+ 200,
+ 250,
+ 300,
+ 350,
+ 400,
+ 450,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 750,
+ 800,
+ 850,
+ 900,
+ 950,
+]
+
+WIDTHS = [
+ 25.0,
+ 37.5,
+ 50.0,
+ 62.5,
+ 75.0,
+ 87.5,
+ 100.0,
+ 112.5,
+ 125.0,
+ 137.5,
+ 150.0,
+ 162.5,
+ 175.0,
+ 187.5,
+ 200.0,
+]
+
+SLANTS = list(math.degrees(math.atan(d / 20.0)) for d in range(-20, 21))
+
+SIZES = [
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 14,
+ 18,
+ 24,
+ 30,
+ 36,
+ 48,
+ 60,
+ 72,
+ 96,
+ 120,
+ 144,
+ 192,
+ 240,
+ 288,
+]
+
+
+SAMPLES = 8
+
+
+def normalizeLinear(value, rangeMin, rangeMax):
+ """Linearly normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation."""
+ return (value - rangeMin) / (rangeMax - rangeMin)
+
+
+def interpolateLinear(t, a, b):
+ """Linear interpolation between a and b, with t typically in [0, 1]."""
+ return a + t * (b - a)
+
+
+def normalizeLog(value, rangeMin, rangeMax):
+ """Logarithmically normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation."""
+ logMin = math.log(rangeMin)
+ logMax = math.log(rangeMax)
+ return (math.log(value) - logMin) / (logMax - logMin)
+
+
+def interpolateLog(t, a, b):
+ """Logarithmic interpolation between a and b, with t typically in [0, 1]."""
+ logA = math.log(a)
+ logB = math.log(b)
+ return math.exp(logA + t * (logB - logA))
+
+
+def normalizeDegrees(value, rangeMin, rangeMax):
+ """Angularly normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation."""
+ tanMin = math.tan(math.radians(rangeMin))
+ tanMax = math.tan(math.radians(rangeMax))
+ return (math.tan(math.radians(value)) - tanMin) / (tanMax - tanMin)
+
+
+def measureWeight(glyphset, glyphs=None):
+ """Measure the perceptual average weight of the given glyphs."""
+ if isinstance(glyphs, dict):
+ frequencies = glyphs
+ else:
+ frequencies = {g: 1 for g in glyphs}
+
+ wght_sum = wdth_sum = 0
+ for glyph_name in glyphs:
+ if frequencies is not None:
+ frequency = frequencies.get(glyph_name, 0)
+ if frequency == 0:
+ continue
+ else:
+ frequency = 1
+
+ glyph = glyphset[glyph_name]
+
+ pen = AreaPen(glyphset=glyphset)
+ glyph.draw(pen)
+
+ mult = glyph.width * frequency
+ wght_sum += mult * abs(pen.value)
+ wdth_sum += mult
+
+ return wght_sum / wdth_sum
+
+
+def measureWidth(glyphset, glyphs=None):
+ """Measure the average width of the given glyphs."""
+ if isinstance(glyphs, dict):
+ frequencies = glyphs
+ else:
+ frequencies = {g: 1 for g in glyphs}
+
+ wdth_sum = 0
+ freq_sum = 0
+ for glyph_name in glyphs:
+ if frequencies is not None:
+ frequency = frequencies.get(glyph_name, 0)
+ if frequency == 0:
+ continue
+ else:
+ frequency = 1
+
+ glyph = glyphset[glyph_name]
+
+ pen = NullPen()
+ glyph.draw(pen)
+
+ wdth_sum += glyph.width * frequency
+ freq_sum += frequency
+
+ return wdth_sum / freq_sum
+
+
+def measureSlant(glyphset, glyphs=None):
+ """Measure the perceptual average slant angle of the given glyphs."""
+ if isinstance(glyphs, dict):
+ frequencies = glyphs
+ else:
+ frequencies = {g: 1 for g in glyphs}
+
+ slnt_sum = 0
+ freq_sum = 0
+ for glyph_name in glyphs:
+ if frequencies is not None:
+ frequency = frequencies.get(glyph_name, 0)
+ if frequency == 0:
+ continue
+ else:
+ frequency = 1
+
+ glyph = glyphset[glyph_name]
+
+ pen = StatisticsPen(glyphset=glyphset)
+ glyph.draw(pen)
+
+ mult = glyph.width * frequency
+ slnt_sum += mult * pen.slant
+ freq_sum += mult
+
+ return -math.degrees(math.atan(slnt_sum / freq_sum))
+
+
+def sanitizeWidth(userTriple, designTriple, pins, measurements):
+ """Sanitize the width axis limits."""
+
+ minVal, defaultVal, maxVal = (
+ measurements[designTriple[0]],
+ measurements[designTriple[1]],
+ measurements[designTriple[2]],
+ )
+
+ calculatedMinVal = userTriple[1] * (minVal / defaultVal)
+ calculatedMaxVal = userTriple[1] * (maxVal / defaultVal)
+
+ log.info("Original width axis limits: %g:%g:%g", *userTriple)
+ log.info(
+ "Calculated width axis limits: %g:%g:%g",
+ calculatedMinVal,
+ userTriple[1],
+ calculatedMaxVal,
+ )
+
+ if (
+ abs(calculatedMinVal - userTriple[0]) / userTriple[1] > 0.05
+ or abs(calculatedMaxVal - userTriple[2]) / userTriple[1] > 0.05
+ ):
+ log.warning("Calculated width axis min/max do not match user input.")
+ log.warning(
+ " Current width axis limits: %g:%g:%g",
+ *userTriple,
+ )
+ log.warning(
+ " Suggested width axis limits: %g:%g:%g",
+ calculatedMinVal,
+ userTriple[1],
+ calculatedMaxVal,
+ )
+
+ return False
+
+ return True
+
+
+def sanitizeWeight(userTriple, designTriple, pins, measurements):
+ """Sanitize the weight axis limits."""
+
+ if len(set(userTriple)) < 3:
+ return True
+
+ minVal, defaultVal, maxVal = (
+ measurements[designTriple[0]],
+ measurements[designTriple[1]],
+ measurements[designTriple[2]],
+ )
+
+ logMin = math.log(minVal)
+ logDefault = math.log(defaultVal)
+ logMax = math.log(maxVal)
+
+ t = (userTriple[1] - userTriple[0]) / (userTriple[2] - userTriple[0])
+ y = math.exp(logMin + t * (logMax - logMin))
+ t = (y - minVal) / (maxVal - minVal)
+ calculatedDefaultVal = userTriple[0] + t * (userTriple[2] - userTriple[0])
+
+ log.info("Original weight axis limits: %g:%g:%g", *userTriple)
+ log.info(
+ "Calculated weight axis limits: %g:%g:%g",
+ userTriple[0],
+ calculatedDefaultVal,
+ userTriple[2],
+ )
+
+ if abs(calculatedDefaultVal - userTriple[1]) / userTriple[1] > 0.05:
+ log.warning("Calculated weight axis default does not match user input.")
+
+ log.warning(
+ " Current weight axis limits: %g:%g:%g",
+ *userTriple,
+ )
+
+ log.warning(
+ " Suggested weight axis limits, changing default: %g:%g:%g",
+ userTriple[0],
+ calculatedDefaultVal,
+ userTriple[2],
+ )
+
+ t = (userTriple[2] - userTriple[0]) / (userTriple[1] - userTriple[0])
+ y = math.exp(logMin + t * (logDefault - logMin))
+ t = (y - minVal) / (defaultVal - minVal)
+ calculatedMaxVal = userTriple[0] + t * (userTriple[1] - userTriple[0])
+ log.warning(
+ " Suggested weight axis limits, changing maximum: %g:%g:%g",
+ userTriple[0],
+ userTriple[1],
+ calculatedMaxVal,
+ )
+
+ t = (userTriple[0] - userTriple[2]) / (userTriple[1] - userTriple[2])
+ y = math.exp(logMax + t * (logDefault - logMax))
+ t = (y - maxVal) / (defaultVal - maxVal)
+ calculatedMinVal = userTriple[2] + t * (userTriple[1] - userTriple[2])
+ log.warning(
+ " Suggested weight axis limits, changing minimum: %g:%g:%g",
+ calculatedMinVal,
+ userTriple[1],
+ userTriple[2],
+ )
+
+ return False
+
+ return True
+
+
+def sanitizeSlant(userTriple, designTriple, pins, measurements):
+ """Sanitize the slant axis limits."""
+
+ log.info("Original slant axis limits: %g:%g:%g", *userTriple)
+ log.info(
+ "Calculated slant axis limits: %g:%g:%g",
+ measurements[designTriple[0]],
+ measurements[designTriple[1]],
+ measurements[designTriple[2]],
+ )
+
+ if (
+ abs(measurements[designTriple[0]] - userTriple[0]) > 1
+ or abs(measurements[designTriple[1]] - userTriple[1]) > 1
+ or abs(measurements[designTriple[2]] - userTriple[2]) > 1
+ ):
+ log.warning("Calculated slant axis min/default/max do not match user input.")
+ log.warning(
+ " Current slant axis limits: %g:%g:%g",
+ *userTriple,
+ )
+ log.warning(
+ " Suggested slant axis limits: %g:%g:%g",
+ measurements[designTriple[0]],
+ measurements[designTriple[1]],
+ measurements[designTriple[2]],
+ )
+
+ return False
+
+ return True
+
+
+def planAxis(
+ measureFunc,
+ normalizeFunc,
+ interpolateFunc,
+ glyphSetFunc,
+ axisTag,
+ axisLimits,
+ values,
+ samples=None,
+ glyphs=None,
+ designLimits=None,
+ pins=None,
+ sanitizeFunc=None,
+):
+ """Plan an axis.
+
+ measureFunc: callable that takes a glyphset and an optional
+ list of glyphnames, and returns the glyphset-wide measurement
+ to be used for the axis.
+
+ normalizeFunc: callable that takes a measurement and a minimum
+ and maximum, and normalizes the measurement into the range 0..1,
+ possibly extrapolating too.
+
+ interpolateFunc: callable that takes a normalized t value, and a
+ minimum and maximum, and returns the interpolated value,
+ possibly extrapolating too.
+
+ glyphSetFunc: callable that takes a variations "location" dictionary,
+ and returns a glyphset.
+
+ axisTag: the axis tag string.
+
+ axisLimits: a triple of minimum, default, and maximum values for
+ the axis. Or an `fvar` Axis object.
+
+ values: a list of output values to map for this axis.
+
+ samples: the number of samples to use when sampling. Default 8.
+
+ glyphs: a list of glyph names to use when sampling. Defaults to None,
+ which will process all glyphs.
+
+ designLimits: an optional triple of minimum, default, and maximum values
+ represenging the "design" limits for the axis. If not provided, the
+ axisLimits will be used.
+
+ pins: an optional dictionary of before/after mapping entries to pin in
+ the output.
+
+ sanitizeFunc: an optional callable to call to sanitize the axis limits.
+ """
+
+ if isinstance(axisLimits, fvarAxis):
+ axisLimits = (axisLimits.minValue, axisLimits.defaultValue, axisLimits.maxValue)
+ minValue, defaultValue, maxValue = axisLimits
+
+ if samples is None:
+ samples = SAMPLES
+ if glyphs is None:
+ glyphs = glyphSetFunc({}).keys()
+ if pins is None:
+ pins = {}
+ else:
+ pins = pins.copy()
+
+ log.info(
+ "Axis limits min %g / default %g / max %g", minValue, defaultValue, maxValue
+ )
+ triple = (minValue, defaultValue, maxValue)
+
+ if designLimits is not None:
+ log.info("Axis design-limits min %g / default %g / max %g", *designLimits)
+ else:
+ designLimits = triple
+
+ if pins:
+ log.info("Pins %s", sorted(pins.items()))
+ pins.update(
+ {
+ minValue: designLimits[0],
+ defaultValue: designLimits[1],
+ maxValue: designLimits[2],
+ }
+ )
+
+ out = {}
+ outNormalized = {}
+
+ axisMeasurements = {}
+ for value in sorted({minValue, defaultValue, maxValue} | set(pins.keys())):
+ glyphset = glyphSetFunc(location={axisTag: value})
+ designValue = pins[value]
+ axisMeasurements[designValue] = measureFunc(glyphset, glyphs)
+
+ if sanitizeFunc is not None:
+ log.info("Sanitizing axis limit values for the `%s` axis.", axisTag)
+ sanitizeFunc(triple, designLimits, pins, axisMeasurements)
+
+ log.debug("Calculated average value:\n%s", pformat(axisMeasurements))
+
+ for (rangeMin, targetMin), (rangeMax, targetMax) in zip(
+ list(sorted(pins.items()))[:-1],
+ list(sorted(pins.items()))[1:],
+ ):
+ targetValues = {w for w in values if rangeMin < w < rangeMax}
+ if not targetValues:
+ continue
+
+ normalizedMin = normalizeValue(rangeMin, triple)
+ normalizedMax = normalizeValue(rangeMax, triple)
+ normalizedTargetMin = normalizeValue(targetMin, designLimits)
+ normalizedTargetMax = normalizeValue(targetMax, designLimits)
+
+ log.info("Planning target values %s.", sorted(targetValues))
+ log.info("Sampling %u points in range %g,%g.", samples, rangeMin, rangeMax)
+ valueMeasurements = axisMeasurements.copy()
+ for sample in range(1, samples + 1):
+ value = rangeMin + (rangeMax - rangeMin) * sample / (samples + 1)
+ log.debug("Sampling value %g.", value)
+ glyphset = glyphSetFunc(location={axisTag: value})
+ designValue = piecewiseLinearMap(value, pins)
+ valueMeasurements[designValue] = measureFunc(glyphset, glyphs)
+ log.debug("Sampled average value:\n%s", pformat(valueMeasurements))
+
+ measurementValue = {}
+ for value in sorted(valueMeasurements):
+ measurementValue[valueMeasurements[value]] = value
+
+ out[rangeMin] = targetMin
+ outNormalized[normalizedMin] = normalizedTargetMin
+ for value in sorted(targetValues):
+ t = normalizeFunc(value, rangeMin, rangeMax)
+ targetMeasurement = interpolateFunc(
+ t, valueMeasurements[targetMin], valueMeasurements[targetMax]
+ )
+ targetValue = piecewiseLinearMap(targetMeasurement, measurementValue)
+ log.debug("Planned mapping value %g to %g." % (value, targetValue))
+ out[value] = targetValue
+ valueNormalized = normalizedMin + (value - rangeMin) / (
+ rangeMax - rangeMin
+ ) * (normalizedMax - normalizedMin)
+ outNormalized[valueNormalized] = normalizedTargetMin + (
+ targetValue - targetMin
+ ) / (targetMax - targetMin) * (normalizedTargetMax - normalizedTargetMin)
+ out[rangeMax] = targetMax
+ outNormalized[normalizedMax] = normalizedTargetMax
+
+ log.info("Planned mapping for the `%s` axis:\n%s", axisTag, pformat(out))
+ log.info(
+ "Planned normalized mapping for the `%s` axis:\n%s",
+ axisTag,
+ pformat(outNormalized),
+ )
+
+ if all(abs(k - v) < 0.01 for k, v in outNormalized.items()):
+ log.info("Detected identity mapping for the `%s` axis. Dropping.", axisTag)
+ out = {}
+ outNormalized = {}
+
+ return out, outNormalized
+
+
+def planWeightAxis(
+ glyphSetFunc,
+ axisLimits,
+ weights=None,
+ samples=None,
+ glyphs=None,
+ designLimits=None,
+ pins=None,
+ sanitize=False,
+):
+ """Plan a weight (`wght`) axis.
+
+ weights: A list of weight values to plan for. If None, the default
+ values are used.
+
+ This function simply calls planAxis with values=weights, and the appropriate
+ arguments. See documenation for planAxis for more information.
+ """
+
+ if weights is None:
+ weights = WEIGHTS
+
+ return planAxis(
+ measureWeight,
+ normalizeLinear,
+ interpolateLog,
+ glyphSetFunc,
+ "wght",
+ axisLimits,
+ values=weights,
+ samples=samples,
+ glyphs=glyphs,
+ designLimits=designLimits,
+ pins=pins,
+ sanitizeFunc=sanitizeWeight if sanitize else None,
+ )
+
+
+def planWidthAxis(
+ glyphSetFunc,
+ axisLimits,
+ widths=None,
+ samples=None,
+ glyphs=None,
+ designLimits=None,
+ pins=None,
+ sanitize=False,
+):
+ """Plan a width (`wdth`) axis.
+
+ widths: A list of width values (percentages) to plan for. If None, the default
+ values are used.
+
+ This function simply calls planAxis with values=widths, and the appropriate
+ arguments. See documenation for planAxis for more information.
+ """
+
+ if widths is None:
+ widths = WIDTHS
+
+ return planAxis(
+ measureWidth,
+ normalizeLinear,
+ interpolateLinear,
+ glyphSetFunc,
+ "wdth",
+ axisLimits,
+ values=widths,
+ samples=samples,
+ glyphs=glyphs,
+ designLimits=designLimits,
+ pins=pins,
+ sanitizeFunc=sanitizeWidth if sanitize else None,
+ )
+
+
+def planSlantAxis(
+ glyphSetFunc,
+ axisLimits,
+ slants=None,
+ samples=None,
+ glyphs=None,
+ designLimits=None,
+ pins=None,
+ sanitize=False,
+):
+ """Plan a slant (`slnt`) axis.
+
+ slants: A list slant angles to plan for. If None, the default
+ values are used.
+
+ This function simply calls planAxis with values=slants, and the appropriate
+ arguments. See documenation for planAxis for more information.
+ """
+
+ if slants is None:
+ slants = SLANTS
+
+ return planAxis(
+ measureSlant,
+ normalizeDegrees,
+ interpolateLinear,
+ glyphSetFunc,
+ "slnt",
+ axisLimits,
+ values=slants,
+ samples=samples,
+ glyphs=glyphs,
+ designLimits=designLimits,
+ pins=pins,
+ sanitizeFunc=sanitizeSlant if sanitize else None,
+ )
+
+
+def planOpticalSizeAxis(
+ glyphSetFunc,
+ axisLimits,
+ sizes=None,
+ samples=None,
+ glyphs=None,
+ designLimits=None,
+ pins=None,
+ sanitize=False,
+):
+ """Plan a optical-size (`opsz`) axis.
+
+ sizes: A list of optical size values to plan for. If None, the default
+ values are used.
+
+ This function simply calls planAxis with values=sizes, and the appropriate
+ arguments. See documenation for planAxis for more information.
+ """
+
+ if sizes is None:
+ sizes = SIZES
+
+ return planAxis(
+ measureWeight,
+ normalizeLog,
+ interpolateLog,
+ glyphSetFunc,
+ "opsz",
+ axisLimits,
+ values=sizes,
+ samples=samples,
+ glyphs=glyphs,
+ designLimits=designLimits,
+ pins=pins,
+ )
+
+
+def makeDesignspaceSnippet(axisTag, axisName, axisLimit, mapping):
+ """Make a designspace snippet for a single axis."""
+
+ designspaceSnippet = (
+ ' <axis tag="%s" name="%s" minimum="%g" default="%g" maximum="%g"'
+ % ((axisTag, axisName) + axisLimit)
+ )
+ if mapping:
+ designspaceSnippet += ">\n"
+ else:
+ designspaceSnippet += "/>"
+
+ for key, value in mapping.items():
+ designspaceSnippet += ' <map input="%g" output="%g"/>\n' % (key, value)
+
+ if mapping:
+ designspaceSnippet += " </axis>"
+
+ return designspaceSnippet
+
+
+def addEmptyAvar(font):
+ """Add an empty `avar` table to the font."""
+ font["avar"] = avar = newTable("avar")
+ for axis in fvar.axes:
+ avar.segments[axis.axisTag] = {}
+
+
+def processAxis(
+ font,
+ planFunc,
+ axisTag,
+ axisName,
+ values,
+ samples=None,
+ glyphs=None,
+ designLimits=None,
+ pins=None,
+ sanitize=False,
+ plot=False,
+):
+ """Process a single axis."""
+
+ axisLimits = None
+ for axis in font["fvar"].axes:
+ if axis.axisTag == axisTag:
+ axisLimits = axis
+ break
+ if axisLimits is None:
+ return ""
+ axisLimits = (axisLimits.minValue, axisLimits.defaultValue, axisLimits.maxValue)
+
+ log.info("Planning %s axis.", axisName)
+
+ if "avar" in font:
+ existingMapping = font["avar"].segments[axisTag]
+ font["avar"].segments[axisTag] = {}
+ else:
+ existingMapping = None
+
+ if values is not None and isinstance(values, str):
+ values = [float(w) for w in values.split()]
+
+ if designLimits is not None and isinstance(designLimits, str):
+ designLimits = [float(d) for d in options.designLimits.split(":")]
+ assert (
+ len(designLimits) == 3
+ and designLimits[0] <= designLimits[1] <= designLimits[2]
+ )
+ else:
+ designLimits = None
+
+ if pins is not None and isinstance(pins, str):
+ newPins = {}
+ for pin in pins.split():
+ before, after = pin.split(":")
+ newPins[float(before)] = float(after)
+ pins = newPins
+ del newPins
+
+ mapping, mappingNormalized = planFunc(
+ font.getGlyphSet,
+ axisLimits,
+ values,
+ samples=samples,
+ glyphs=glyphs,
+ designLimits=designLimits,
+ pins=pins,
+ sanitize=sanitize,
+ )
+
+ if plot:
+ from matplotlib import pyplot
+
+ pyplot.plot(
+ sorted(mappingNormalized),
+ [mappingNormalized[k] for k in sorted(mappingNormalized)],
+ )
+ pyplot.show()
+
+ if existingMapping is not None:
+ log.info("Existing %s mapping:\n%s", axisName, pformat(existingMapping))
+
+ if mapping:
+ if "avar" not in font:
+ addEmptyAvar(font)
+ font["avar"].segments[axisTag] = mappingNormalized
+ else:
+ if "avar" in font:
+ font["avar"].segments[axisTag] = {}
+
+ designspaceSnippet = makeDesignspaceSnippet(
+ axisTag,
+ axisName,
+ axisLimits,
+ mapping,
+ )
+ return designspaceSnippet
+
+
+def main(args=None):
+ """Plan the standard axis mappings for a variable font"""
+
+ if args is None:
+ import sys
+
+ args = sys.argv[1:]
+
+ from fontTools import configLogger
+ from fontTools.ttLib import TTFont
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ "fonttools varLib.avarPlanner",
+ description="Plan `avar` table for variable font",
+ )
+ parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.")
+ parser.add_argument(
+ "-o",
+ "--output-file",
+ type=str,
+ help="Output font file name.",
+ )
+ parser.add_argument(
+ "--weights", type=str, help="Space-separate list of weights to generate."
+ )
+ parser.add_argument(
+ "--widths", type=str, help="Space-separate list of widths to generate."
+ )
+ parser.add_argument(
+ "--slants", type=str, help="Space-separate list of slants to generate."
+ )
+ parser.add_argument(
+ "--sizes", type=str, help="Space-separate list of optical-sizes to generate."
+ )
+ parser.add_argument("--samples", type=int, help="Number of samples.")
+ parser.add_argument(
+ "-s", "--sanitize", action="store_true", help="Sanitize axis limits"
+ )
+ parser.add_argument(
+ "-g",
+ "--glyphs",
+ type=str,
+ help="Space-separate list of glyphs to use for sampling.",
+ )
+ parser.add_argument(
+ "--weight-design-limits",
+ type=str,
+ help="min:default:max in design units for the `wght` axis.",
+ )
+ parser.add_argument(
+ "--width-design-limits",
+ type=str,
+ help="min:default:max in design units for the `wdth` axis.",
+ )
+ parser.add_argument(
+ "--slant-design-limits",
+ type=str,
+ help="min:default:max in design units for the `slnt` axis.",
+ )
+ parser.add_argument(
+ "--optical-size-design-limits",
+ type=str,
+ help="min:default:max in design units for the `opsz` axis.",
+ )
+ parser.add_argument(
+ "--weight-pins",
+ type=str,
+ help="Space-separate list of before:after pins for the `wght` axis.",
+ )
+ parser.add_argument(
+ "--width-pins",
+ type=str,
+ help="Space-separate list of before:after pins for the `wdth` axis.",
+ )
+ parser.add_argument(
+ "--slant-pins",
+ type=str,
+ help="Space-separate list of before:after pins for the `slnt` axis.",
+ )
+ parser.add_argument(
+ "--optical-size-pins",
+ type=str,
+ help="Space-separate list of before:after pins for the `opsz` axis.",
+ )
+ parser.add_argument(
+ "-p", "--plot", action="store_true", help="Plot the resulting mapping."
+ )
+
+ logging_group = parser.add_mutually_exclusive_group(required=False)
+ logging_group.add_argument(
+ "-v", "--verbose", action="store_true", help="Run more verbosely."
+ )
+ logging_group.add_argument(
+ "-q", "--quiet", action="store_true", help="Turn verbosity off."
+ )
+
+ options = parser.parse_args(args)
+
+ configLogger(
+ level=("DEBUG" if options.verbose else "WARNING" if options.quiet else "INFO")
+ )
+
+ font = TTFont(options.font)
+ if not "fvar" in font:
+ log.error("Not a variable font.")
+ return 1
+
+ if options.glyphs is not None:
+ glyphs = options.glyphs.split()
+ if ":" in options.glyphs:
+ glyphs = {}
+ for g in options.glyphs.split():
+ if ":" in g:
+ glyph, frequency = g.split(":")
+ glyphs[glyph] = float(frequency)
+ else:
+ glyphs[g] = 1.0
+ else:
+ glyphs = None
+
+ designspaceSnippets = []
+
+ designspaceSnippets.append(
+ processAxis(
+ font,
+ planWeightAxis,
+ "wght",
+ "Weight",
+ values=options.weights,
+ samples=options.samples,
+ glyphs=glyphs,
+ designLimits=options.weight_design_limits,
+ pins=options.weight_pins,
+ sanitize=options.sanitize,
+ plot=options.plot,
+ )
+ )
+ designspaceSnippets.append(
+ processAxis(
+ font,
+ planWidthAxis,
+ "wdth",
+ "Width",
+ values=options.widths,
+ samples=options.samples,
+ glyphs=glyphs,
+ designLimits=options.width_design_limits,
+ pins=options.width_pins,
+ sanitize=options.sanitize,
+ plot=options.plot,
+ )
+ )
+ designspaceSnippets.append(
+ processAxis(
+ font,
+ planSlantAxis,
+ "slnt",
+ "Slant",
+ values=options.slants,
+ samples=options.samples,
+ glyphs=glyphs,
+ designLimits=options.slant_design_limits,
+ pins=options.slant_pins,
+ sanitize=options.sanitize,
+ plot=options.plot,
+ )
+ )
+ designspaceSnippets.append(
+ processAxis(
+ font,
+ planOpticalSizeAxis,
+ "opsz",
+ "OpticalSize",
+ values=options.sizes,
+ samples=options.samples,
+ glyphs=glyphs,
+ designLimits=options.optical_size_design_limits,
+ pins=options.optical_size_pins,
+ sanitize=options.sanitize,
+ plot=options.plot,
+ )
+ )
+
+ log.info("Designspace snippet:")
+ for snippet in designspaceSnippets:
+ if snippet:
+ print(snippet)
+
+ if options.output_file is None:
+ outfile = makeOutputFileName(options.font, overWrite=True, suffix=".avar")
+ else:
+ outfile = options.output_file
+ if outfile:
+ log.info("Saving %s", outfile)
+ font.save(outfile)
+
+
+if __name__ == "__main__":
+ import sys
+
+ sys.exit(main())
diff --git a/Lib/fontTools/varLib/builder.py b/Lib/fontTools/varLib/builder.py
index 60d7172e..94cc5bf0 100644
--- a/Lib/fontTools/varLib/builder.py
+++ b/Lib/fontTools/varLib/builder.py
@@ -3,135 +3,155 @@ from fontTools.ttLib.tables import otTables as ot
# VariationStore
+
def buildVarRegionAxis(axisSupport):
- self = ot.VarRegionAxis()
- self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport]
- return self
+ self = ot.VarRegionAxis()
+ self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport]
+ return self
+
def buildVarRegion(support, axisTags):
- assert all(tag in axisTags for tag in support.keys()), ("Unknown axis tag found.", support, axisTags)
- self = ot.VarRegion()
- self.VarRegionAxis = []
- for tag in axisTags:
- self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0,0,0))))
- return self
+ assert all(tag in axisTags for tag in support.keys()), (
+ "Unknown axis tag found.",
+ support,
+ axisTags,
+ )
+ self = ot.VarRegion()
+ self.VarRegionAxis = []
+ for tag in axisTags:
+ self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0, 0, 0))))
+ return self
+
def buildVarRegionList(supports, axisTags):
- self = ot.VarRegionList()
- self.RegionAxisCount = len(axisTags)
- self.Region = []
- for support in supports:
- self.Region.append(buildVarRegion(support, axisTags))
- self.RegionCount = len(self.Region)
- return self
+ self = ot.VarRegionList()
+ self.RegionAxisCount = len(axisTags)
+ self.Region = []
+ for support in supports:
+ self.Region.append(buildVarRegion(support, axisTags))
+ self.RegionCount = len(self.Region)
+ return self
def _reorderItem(lst, mapping):
- return [lst[i] for i in mapping]
+ return [lst[i] for i in mapping]
+
def VarData_calculateNumShorts(self, optimize=False):
- count = self.VarRegionCount
- items = self.Item
- bit_lengths = [0] * count
- for item in items:
- # The "+ (i < -1)" magic is to handle two's-compliment.
- # That is, we want to get back 7 for -128, whereas
- # bit_length() returns 8. Similarly for -65536.
- # The reason "i < -1" is used instead of "i < 0" is that
- # the latter would make it return 0 for "-1" instead of 1.
- bl = [(i + (i < -1)).bit_length() for i in item]
- bit_lengths = [max(*pair) for pair in zip(bl, bit_lengths)]
- # The addition of 8, instead of seven, is to account for the sign bit.
- # This "((b + 8) >> 3) if b else 0" when combined with the above
- # "(i + (i < -1)).bit_length()" is a faster way to compute byte-lengths
- # conforming to:
- #
- # byte_length = (0 if i == 0 else
- # 1 if -128 <= i < 128 else
- # 2 if -65536 <= i < 65536 else
- # ...)
- byte_lengths = [((b + 8) >> 3) if b else 0 for b in bit_lengths]
-
- # https://github.com/fonttools/fonttools/issues/2279
- longWords = any(b > 2 for b in byte_lengths)
-
- if optimize:
- # Reorder columns such that wider columns come before narrower columns
- mapping = []
- mapping.extend(i for i,b in enumerate(byte_lengths) if b > 2)
- mapping.extend(i for i,b in enumerate(byte_lengths) if b == 2)
- mapping.extend(i for i,b in enumerate(byte_lengths) if b == 1)
-
- byte_lengths = _reorderItem(byte_lengths, mapping)
- self.VarRegionIndex = _reorderItem(self.VarRegionIndex, mapping)
- self.VarRegionCount = len(self.VarRegionIndex)
- for i in range(len(items)):
- items[i] = _reorderItem(items[i], mapping)
-
- if longWords:
- self.NumShorts = max((i for i,b in enumerate(byte_lengths) if b > 2), default=-1) + 1
- self.NumShorts |= 0x8000
- else:
- self.NumShorts = max((i for i,b in enumerate(byte_lengths) if b > 1), default=-1) + 1
-
- self.VarRegionCount = len(self.VarRegionIndex)
- return self
+ count = self.VarRegionCount
+ items = self.Item
+ bit_lengths = [0] * count
+ for item in items:
+ # The "+ (i < -1)" magic is to handle two's-compliment.
+ # That is, we want to get back 7 for -128, whereas
+ # bit_length() returns 8. Similarly for -65536.
+ # The reason "i < -1" is used instead of "i < 0" is that
+ # the latter would make it return 0 for "-1" instead of 1.
+ bl = [(i + (i < -1)).bit_length() for i in item]
+ bit_lengths = [max(*pair) for pair in zip(bl, bit_lengths)]
+ # The addition of 8, instead of seven, is to account for the sign bit.
+ # This "((b + 8) >> 3) if b else 0" when combined with the above
+ # "(i + (i < -1)).bit_length()" is a faster way to compute byte-lengths
+ # conforming to:
+ #
+ # byte_length = (0 if i == 0 else
+ # 1 if -128 <= i < 128 else
+ # 2 if -65536 <= i < 65536 else
+ # ...)
+ byte_lengths = [((b + 8) >> 3) if b else 0 for b in bit_lengths]
+
+ # https://github.com/fonttools/fonttools/issues/2279
+ longWords = any(b > 2 for b in byte_lengths)
+
+ if optimize:
+ # Reorder columns such that wider columns come before narrower columns
+ mapping = []
+ mapping.extend(i for i, b in enumerate(byte_lengths) if b > 2)
+ mapping.extend(i for i, b in enumerate(byte_lengths) if b == 2)
+ mapping.extend(i for i, b in enumerate(byte_lengths) if b == 1)
+
+ byte_lengths = _reorderItem(byte_lengths, mapping)
+ self.VarRegionIndex = _reorderItem(self.VarRegionIndex, mapping)
+ self.VarRegionCount = len(self.VarRegionIndex)
+ for i in range(len(items)):
+ items[i] = _reorderItem(items[i], mapping)
+
+ if longWords:
+ self.NumShorts = (
+ max((i for i, b in enumerate(byte_lengths) if b > 2), default=-1) + 1
+ )
+ self.NumShorts |= 0x8000
+ else:
+ self.NumShorts = (
+ max((i for i, b in enumerate(byte_lengths) if b > 1), default=-1) + 1
+ )
+
+ self.VarRegionCount = len(self.VarRegionIndex)
+ return self
+
ot.VarData.calculateNumShorts = VarData_calculateNumShorts
+
def VarData_CalculateNumShorts(self, optimize=True):
- """Deprecated name for VarData_calculateNumShorts() which
- defaults to optimize=True. Use varData.calculateNumShorts()
- or varData.optimize()."""
- return VarData_calculateNumShorts(self, optimize=optimize)
+ """Deprecated name for VarData_calculateNumShorts() which
+ defaults to optimize=True. Use varData.calculateNumShorts()
+ or varData.optimize()."""
+ return VarData_calculateNumShorts(self, optimize=optimize)
+
def VarData_optimize(self):
- return VarData_calculateNumShorts(self, optimize=True)
+ return VarData_calculateNumShorts(self, optimize=True)
+
ot.VarData.optimize = VarData_optimize
def buildVarData(varRegionIndices, items, optimize=True):
- self = ot.VarData()
- self.VarRegionIndex = list(varRegionIndices)
- regionCount = self.VarRegionCount = len(self.VarRegionIndex)
- records = self.Item = []
- if items:
- for item in items:
- assert len(item) == regionCount
- records.append(list(item))
- self.ItemCount = len(self.Item)
- self.calculateNumShorts(optimize=optimize)
- return self
+ self = ot.VarData()
+ self.VarRegionIndex = list(varRegionIndices)
+ regionCount = self.VarRegionCount = len(self.VarRegionIndex)
+ records = self.Item = []
+ if items:
+ for item in items:
+ assert len(item) == regionCount
+ records.append(list(item))
+ self.ItemCount = len(self.Item)
+ self.calculateNumShorts(optimize=optimize)
+ return self
def buildVarStore(varRegionList, varDataList):
- self = ot.VarStore()
- self.Format = 1
- self.VarRegionList = varRegionList
- self.VarData = list(varDataList)
- self.VarDataCount = len(self.VarData)
- return self
+ self = ot.VarStore()
+ self.Format = 1
+ self.VarRegionList = varRegionList
+ self.VarData = list(varDataList)
+ self.VarDataCount = len(self.VarData)
+ return self
# Variation helpers
+
def buildVarIdxMap(varIdxes, glyphOrder):
- self = ot.VarIdxMap()
- self.mapping = {g:v for g,v in zip(glyphOrder, varIdxes)}
- return self
+ self = ot.VarIdxMap()
+ self.mapping = {g: v for g, v in zip(glyphOrder, varIdxes)}
+ return self
def buildDeltaSetIndexMap(varIdxes):
- self = ot.DeltaSetIndexMap()
- self.mapping = list(varIdxes)
- self.Format = 1 if len(varIdxes) > 0xFFFF else 0
- return self
+ mapping = list(varIdxes)
+ if all(i == v for i, v in enumerate(mapping)):
+ return None
+ self = ot.DeltaSetIndexMap()
+ self.mapping = mapping
+ self.Format = 1 if len(mapping) > 0xFFFF else 0
+ return self
def buildVarDevTable(varIdx):
- self = ot.Device()
- self.DeltaFormat = 0x8000
- self.StartSize = varIdx >> 16
- self.EndSize = varIdx & 0xFFFF
- return self
+ self = ot.Device()
+ self.DeltaFormat = 0x8000
+ self.StartSize = varIdx >> 16
+ self.EndSize = varIdx & 0xFFFF
+ return self
diff --git a/Lib/fontTools/varLib/cff.py b/Lib/fontTools/varLib/cff.py
index 727efa70..52e6a884 100644
--- a/Lib/fontTools/varLib/cff.py
+++ b/Lib/fontTools/varLib/cff.py
@@ -1,19 +1,18 @@
from collections import namedtuple
from fontTools.cffLib import (
- maxStackLimit,
- TopDictIndex,
- buildOrder,
- topDictOperators,
- topDictOperators2,
- privateDictOperators,
- privateDictOperators2,
- FDArrayIndex,
- FontDict,
- VarStoreData
+ maxStackLimit,
+ TopDictIndex,
+ buildOrder,
+ topDictOperators,
+ topDictOperators2,
+ privateDictOperators,
+ privateDictOperators2,
+ FDArrayIndex,
+ FontDict,
+ VarStoreData,
)
from io import BytesIO
-from fontTools.cffLib.specializer import (
- specializeCommands, commandsToProgram)
+from fontTools.cffLib.specializer import specializeCommands, commandsToProgram
from fontTools.ttLib import newTable
from fontTools import varLib
from fontTools.varLib.models import allEqual
@@ -23,8 +22,11 @@ from fontTools.pens.t2CharStringPen import T2CharStringPen
from functools import partial
from .errors import (
- VarLibCFFDictMergeError, VarLibCFFPointTypeMergeError,
- VarLibCFFHintTypeMergeError,VarLibMergeError)
+ VarLibCFFDictMergeError,
+ VarLibCFFPointTypeMergeError,
+ VarLibCFFHintTypeMergeError,
+ VarLibMergeError,
+)
# Backwards compatibility
@@ -33,196 +35,206 @@ MergeTypeError = VarLibCFFPointTypeMergeError
def addCFFVarStore(varFont, varModel, varDataList, masterSupports):
- fvarTable = varFont['fvar']
- axisKeys = [axis.axisTag for axis in fvarTable.axes]
- varTupleList = varLib.builder.buildVarRegionList(masterSupports, axisKeys)
- varStoreCFFV = varLib.builder.buildVarStore(varTupleList, varDataList)
+ fvarTable = varFont["fvar"]
+ axisKeys = [axis.axisTag for axis in fvarTable.axes]
+ varTupleList = varLib.builder.buildVarRegionList(masterSupports, axisKeys)
+ varStoreCFFV = varLib.builder.buildVarStore(varTupleList, varDataList)
- topDict = varFont['CFF2'].cff.topDictIndex[0]
- topDict.VarStore = VarStoreData(otVarStore=varStoreCFFV)
- if topDict.FDArray[0].vstore is None:
- fdArray = topDict.FDArray
- for fontDict in fdArray:
- if hasattr(fontDict, "Private"):
- fontDict.Private.vstore = topDict.VarStore
+ topDict = varFont["CFF2"].cff.topDictIndex[0]
+ topDict.VarStore = VarStoreData(otVarStore=varStoreCFFV)
+ if topDict.FDArray[0].vstore is None:
+ fdArray = topDict.FDArray
+ for fontDict in fdArray:
+ if hasattr(fontDict, "Private"):
+ fontDict.Private.vstore = topDict.VarStore
def lib_convertCFFToCFF2(cff, otFont):
- # This assumes a decompiled CFF table.
- cff2GetGlyphOrder = cff.otFont.getGlyphOrder
- topDictData = TopDictIndex(None, cff2GetGlyphOrder, None)
- topDictData.items = cff.topDictIndex.items
- cff.topDictIndex = topDictData
- topDict = topDictData[0]
- if hasattr(topDict, 'Private'):
- privateDict = topDict.Private
- else:
- privateDict = None
- opOrder = buildOrder(topDictOperators2)
- topDict.order = opOrder
- topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
- if not hasattr(topDict, "FDArray"):
- fdArray = topDict.FDArray = FDArrayIndex()
- fdArray.strings = None
- fdArray.GlobalSubrs = topDict.GlobalSubrs
- topDict.GlobalSubrs.fdArray = fdArray
- charStrings = topDict.CharStrings
- if charStrings.charStringsAreIndexed:
- charStrings.charStringsIndex.fdArray = fdArray
- else:
- charStrings.fdArray = fdArray
- fontDict = FontDict()
- fontDict.setCFF2(True)
- fdArray.append(fontDict)
- fontDict.Private = privateDict
- privateOpOrder = buildOrder(privateDictOperators2)
- if privateDict is not None:
- for entry in privateDictOperators:
- key = entry[1]
- if key not in privateOpOrder:
- if key in privateDict.rawDict:
- # print "Removing private dict", key
- del privateDict.rawDict[key]
- if hasattr(privateDict, key):
- delattr(privateDict, key)
- # print "Removing privateDict attr", key
- else:
- # clean up the PrivateDicts in the fdArray
- fdArray = topDict.FDArray
- privateOpOrder = buildOrder(privateDictOperators2)
- for fontDict in fdArray:
- fontDict.setCFF2(True)
- for key in list(fontDict.rawDict.keys()):
- if key not in fontDict.order:
- del fontDict.rawDict[key]
- if hasattr(fontDict, key):
- delattr(fontDict, key)
-
- privateDict = fontDict.Private
- for entry in privateDictOperators:
- key = entry[1]
- if key not in privateOpOrder:
- if key in privateDict.rawDict:
- # print "Removing private dict", key
- del privateDict.rawDict[key]
- if hasattr(privateDict, key):
- delattr(privateDict, key)
- # print "Removing privateDict attr", key
- # Now delete up the decrecated topDict operators from CFF 1.0
- for entry in topDictOperators:
- key = entry[1]
- if key not in opOrder:
- if key in topDict.rawDict:
- del topDict.rawDict[key]
- if hasattr(topDict, key):
- delattr(topDict, key)
-
- # At this point, the Subrs and Charstrings are all still T2Charstring class
- # easiest to fix this by compiling, then decompiling again
- cff.major = 2
- file = BytesIO()
- cff.compile(file, otFont, isCFF2=True)
- file.seek(0)
- cff.decompile(file, otFont, isCFF2=True)
+ # This assumes a decompiled CFF table.
+ cff2GetGlyphOrder = cff.otFont.getGlyphOrder
+ topDictData = TopDictIndex(None, cff2GetGlyphOrder, None)
+ topDictData.items = cff.topDictIndex.items
+ cff.topDictIndex = topDictData
+ topDict = topDictData[0]
+ if hasattr(topDict, "Private"):
+ privateDict = topDict.Private
+ else:
+ privateDict = None
+ opOrder = buildOrder(topDictOperators2)
+ topDict.order = opOrder
+ topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
+ if not hasattr(topDict, "FDArray"):
+ fdArray = topDict.FDArray = FDArrayIndex()
+ fdArray.strings = None
+ fdArray.GlobalSubrs = topDict.GlobalSubrs
+ topDict.GlobalSubrs.fdArray = fdArray
+ charStrings = topDict.CharStrings
+ if charStrings.charStringsAreIndexed:
+ charStrings.charStringsIndex.fdArray = fdArray
+ else:
+ charStrings.fdArray = fdArray
+ fontDict = FontDict()
+ fontDict.setCFF2(True)
+ fdArray.append(fontDict)
+ fontDict.Private = privateDict
+ privateOpOrder = buildOrder(privateDictOperators2)
+ if privateDict is not None:
+ for entry in privateDictOperators:
+ key = entry[1]
+ if key not in privateOpOrder:
+ if key in privateDict.rawDict:
+ # print "Removing private dict", key
+ del privateDict.rawDict[key]
+ if hasattr(privateDict, key):
+ delattr(privateDict, key)
+ # print "Removing privateDict attr", key
+ else:
+ # clean up the PrivateDicts in the fdArray
+ fdArray = topDict.FDArray
+ privateOpOrder = buildOrder(privateDictOperators2)
+ for fontDict in fdArray:
+ fontDict.setCFF2(True)
+ for key in list(fontDict.rawDict.keys()):
+ if key not in fontDict.order:
+ del fontDict.rawDict[key]
+ if hasattr(fontDict, key):
+ delattr(fontDict, key)
+
+ privateDict = fontDict.Private
+ for entry in privateDictOperators:
+ key = entry[1]
+ if key not in privateOpOrder:
+ if key in privateDict.rawDict:
+ # print "Removing private dict", key
+ del privateDict.rawDict[key]
+ if hasattr(privateDict, key):
+ delattr(privateDict, key)
+ # print "Removing privateDict attr", key
+ # Now delete up the deprecated topDict operators from CFF 1.0
+ for entry in topDictOperators:
+ key = entry[1]
+ if key not in opOrder:
+ if key in topDict.rawDict:
+ del topDict.rawDict[key]
+ if hasattr(topDict, key):
+ delattr(topDict, key)
+
+ # At this point, the Subrs and Charstrings are all still T2Charstring class
+ # easiest to fix this by compiling, then decompiling again
+ cff.major = 2
+ file = BytesIO()
+ cff.compile(file, otFont, isCFF2=True)
+ file.seek(0)
+ cff.decompile(file, otFont, isCFF2=True)
def convertCFFtoCFF2(varFont):
- # Convert base font to a single master CFF2 font.
- cffTable = varFont['CFF ']
- lib_convertCFFToCFF2(cffTable.cff, varFont)
- newCFF2 = newTable("CFF2")
- newCFF2.cff = cffTable.cff
- varFont['CFF2'] = newCFF2
- del varFont['CFF ']
+ # Convert base font to a single master CFF2 font.
+ cffTable = varFont["CFF "]
+ lib_convertCFFToCFF2(cffTable.cff, varFont)
+ newCFF2 = newTable("CFF2")
+ newCFF2.cff = cffTable.cff
+ varFont["CFF2"] = newCFF2
+ del varFont["CFF "]
def conv_to_int(num):
- if isinstance(num, float) and num.is_integer():
- return int(num)
- return num
-
-
-pd_blend_fields = ("BlueValues", "OtherBlues", "FamilyBlues",
- "FamilyOtherBlues", "BlueScale", "BlueShift",
- "BlueFuzz", "StdHW", "StdVW", "StemSnapH",
- "StemSnapV")
+ if isinstance(num, float) and num.is_integer():
+ return int(num)
+ return num
+
+
+pd_blend_fields = (
+ "BlueValues",
+ "OtherBlues",
+ "FamilyBlues",
+ "FamilyOtherBlues",
+ "BlueScale",
+ "BlueShift",
+ "BlueFuzz",
+ "StdHW",
+ "StdVW",
+ "StemSnapH",
+ "StemSnapV",
+)
def get_private(regionFDArrays, fd_index, ri, fd_map):
- region_fdArray = regionFDArrays[ri]
- region_fd_map = fd_map[fd_index]
- if ri in region_fd_map:
- region_fdIndex = region_fd_map[ri]
- private = region_fdArray[region_fdIndex].Private
- else:
- private = None
- return private
+ region_fdArray = regionFDArrays[ri]
+ region_fd_map = fd_map[fd_index]
+ if ri in region_fd_map:
+ region_fdIndex = region_fd_map[ri]
+ private = region_fdArray[region_fdIndex].Private
+ else:
+ private = None
+ return private
def merge_PrivateDicts(top_dicts, vsindex_dict, var_model, fd_map):
- """
- I step through the FontDicts in the FDArray of the varfont TopDict.
- For each varfont FontDict:
-
- * step through each key in FontDict.Private.
- * For each key, step through each relevant source font Private dict, and
- build a list of values to blend.
-
- The 'relevant' source fonts are selected by first getting the right
- submodel using ``vsindex_dict[vsindex]``. The indices of the
- ``subModel.locations`` are mapped to source font list indices by
- assuming the latter order is the same as the order of the
- ``var_model.locations``. I can then get the index of each subModel
- location in the list of ``var_model.locations``.
- """
-
- topDict = top_dicts[0]
- region_top_dicts = top_dicts[1:]
- if hasattr(region_top_dicts[0], 'FDArray'):
- regionFDArrays = [fdTopDict.FDArray for fdTopDict in region_top_dicts]
- else:
- regionFDArrays = [[fdTopDict] for fdTopDict in region_top_dicts]
- for fd_index, font_dict in enumerate(topDict.FDArray):
- private_dict = font_dict.Private
- vsindex = getattr(private_dict, 'vsindex', 0)
- # At the moment, no PrivateDict has a vsindex key, but let's support
- # how it should work. See comment at end of
- # merge_charstrings() - still need to optimize use of vsindex.
- sub_model, _ = vsindex_dict[vsindex]
- master_indices = []
- for loc in sub_model.locations[1:]:
- i = var_model.locations.index(loc) - 1
- master_indices.append(i)
- pds = [private_dict]
- last_pd = private_dict
- for ri in master_indices:
- pd = get_private(regionFDArrays, fd_index, ri, fd_map)
- # If the region font doesn't have this FontDict, just reference
- # the last one used.
- if pd is None:
- pd = last_pd
- else:
- last_pd = pd
- pds.append(pd)
- num_masters = len(pds)
- for key, value in private_dict.rawDict.items():
- dataList = []
- if key not in pd_blend_fields:
- continue
- if isinstance(value, list):
- try:
- values = [pd.rawDict[key] for pd in pds]
- except KeyError:
- print(
- "Warning: {key} in default font Private dict is "
- "missing from another font, and was "
- "discarded.".format(key=key))
- continue
- try:
- values = zip(*values)
- except IndexError:
- raise VarLibCFFDictMergeError(key, value, values)
- """
+ """
+ I step through the FontDicts in the FDArray of the varfont TopDict.
+ For each varfont FontDict:
+
+ * step through each key in FontDict.Private.
+ * For each key, step through each relevant source font Private dict, and
+ build a list of values to blend.
+
+ The 'relevant' source fonts are selected by first getting the right
+ submodel using ``vsindex_dict[vsindex]``. The indices of the
+ ``subModel.locations`` are mapped to source font list indices by
+ assuming the latter order is the same as the order of the
+ ``var_model.locations``. I can then get the index of each subModel
+ location in the list of ``var_model.locations``.
+ """
+
+ topDict = top_dicts[0]
+ region_top_dicts = top_dicts[1:]
+ if hasattr(region_top_dicts[0], "FDArray"):
+ regionFDArrays = [fdTopDict.FDArray for fdTopDict in region_top_dicts]
+ else:
+ regionFDArrays = [[fdTopDict] for fdTopDict in region_top_dicts]
+ for fd_index, font_dict in enumerate(topDict.FDArray):
+ private_dict = font_dict.Private
+ vsindex = getattr(private_dict, "vsindex", 0)
+ # At the moment, no PrivateDict has a vsindex key, but let's support
+ # how it should work. See comment at end of
+ # merge_charstrings() - still need to optimize use of vsindex.
+ sub_model, _ = vsindex_dict[vsindex]
+ master_indices = []
+ for loc in sub_model.locations[1:]:
+ i = var_model.locations.index(loc) - 1
+ master_indices.append(i)
+ pds = [private_dict]
+ last_pd = private_dict
+ for ri in master_indices:
+ pd = get_private(regionFDArrays, fd_index, ri, fd_map)
+ # If the region font doesn't have this FontDict, just reference
+ # the last one used.
+ if pd is None:
+ pd = last_pd
+ else:
+ last_pd = pd
+ pds.append(pd)
+ num_masters = len(pds)
+ for key, value in private_dict.rawDict.items():
+ dataList = []
+ if key not in pd_blend_fields:
+ continue
+ if isinstance(value, list):
+ try:
+ values = [pd.rawDict[key] for pd in pds]
+ except KeyError:
+ print(
+ "Warning: {key} in default font Private dict is "
+ "missing from another font, and was "
+ "discarded.".format(key=key)
+ )
+ continue
+ try:
+ values = zip(*values)
+ except IndexError:
+ raise VarLibCFFDictMergeError(key, value, values)
+ """
Row 0 contains the first value from each master.
Convert each row from absolute values to relative
values from the previous row.
@@ -235,427 +247,466 @@ def merge_PrivateDicts(top_dicts, vsindex_dict, var_model, fd_map):
and is converted finally to:
OtherBlues = [[-217, 17.0, 46.0], [-205, 0.0, 0.0]]
"""
- prev_val_list = [0] * num_masters
- any_points_differ = False
- for val_list in values:
- rel_list = [(val - prev_val_list[i]) for (
- i, val) in enumerate(val_list)]
- if (not any_points_differ) and not allEqual(rel_list):
- any_points_differ = True
- prev_val_list = val_list
- deltas = sub_model.getDeltas(rel_list)
- # For PrivateDict BlueValues, the default font
- # values are absolute, not relative to the prior value.
- deltas[0] = val_list[0]
- dataList.append(deltas)
- # If there are no blend values,then
- # we can collapse the blend lists.
- if not any_points_differ:
- dataList = [data[0] for data in dataList]
- else:
- values = [pd.rawDict[key] for pd in pds]
- if not allEqual(values):
- dataList = sub_model.getDeltas(values)
- else:
- dataList = values[0]
-
- # Convert numbers with no decimal part to an int
- if isinstance(dataList, list):
- for i, item in enumerate(dataList):
- if isinstance(item, list):
- for j, jtem in enumerate(item):
- dataList[i][j] = conv_to_int(jtem)
- else:
- dataList[i] = conv_to_int(item)
- else:
- dataList = conv_to_int(dataList)
-
- private_dict.rawDict[key] = dataList
+ prev_val_list = [0] * num_masters
+ any_points_differ = False
+ for val_list in values:
+ rel_list = [
+ (val - prev_val_list[i]) for (i, val) in enumerate(val_list)
+ ]
+ if (not any_points_differ) and not allEqual(rel_list):
+ any_points_differ = True
+ prev_val_list = val_list
+ deltas = sub_model.getDeltas(rel_list)
+ # For PrivateDict BlueValues, the default font
+ # values are absolute, not relative to the prior value.
+ deltas[0] = val_list[0]
+ dataList.append(deltas)
+ # If there are no blend values,then
+ # we can collapse the blend lists.
+ if not any_points_differ:
+ dataList = [data[0] for data in dataList]
+ else:
+ values = [pd.rawDict[key] for pd in pds]
+ if not allEqual(values):
+ dataList = sub_model.getDeltas(values)
+ else:
+ dataList = values[0]
+
+ # Convert numbers with no decimal part to an int
+ if isinstance(dataList, list):
+ for i, item in enumerate(dataList):
+ if isinstance(item, list):
+ for j, jtem in enumerate(item):
+ dataList[i][j] = conv_to_int(jtem)
+ else:
+ dataList[i] = conv_to_int(item)
+ else:
+ dataList = conv_to_int(dataList)
+
+ private_dict.rawDict[key] = dataList
def _cff_or_cff2(font):
- if "CFF " in font:
- return font["CFF "]
- return font["CFF2"]
+ if "CFF " in font:
+ return font["CFF "]
+ return font["CFF2"]
def getfd_map(varFont, fonts_list):
- """ Since a subset source font may have fewer FontDicts in their
- FDArray than the default font, we have to match up the FontDicts in
- the different fonts . We do this with the FDSelect array, and by
- assuming that the same glyph will reference matching FontDicts in
- each source font. We return a mapping from fdIndex in the default
- font to a dictionary which maps each master list index of each
- region font to the equivalent fdIndex in the region font."""
- fd_map = {}
- default_font = fonts_list[0]
- region_fonts = fonts_list[1:]
- num_regions = len(region_fonts)
- topDict = _cff_or_cff2(default_font).cff.topDictIndex[0]
- if not hasattr(topDict, 'FDSelect'):
- # All glyphs reference only one FontDict.
- # Map the FD index for regions to index 0.
- fd_map[0] = {ri:0 for ri in range(num_regions)}
- return fd_map
-
- gname_mapping = {}
- default_fdSelect = topDict.FDSelect
- glyphOrder = default_font.getGlyphOrder()
- for gid, fdIndex in enumerate(default_fdSelect):
- gname_mapping[glyphOrder[gid]] = fdIndex
- if fdIndex not in fd_map:
- fd_map[fdIndex] = {}
- for ri, region_font in enumerate(region_fonts):
- region_glyphOrder = region_font.getGlyphOrder()
- region_topDict = _cff_or_cff2(region_font).cff.topDictIndex[0]
- if not hasattr(region_topDict, 'FDSelect'):
- # All the glyphs share the same FontDict. Pick any glyph.
- default_fdIndex = gname_mapping[region_glyphOrder[0]]
- fd_map[default_fdIndex][ri] = 0
- else:
- region_fdSelect = region_topDict.FDSelect
- for gid, fdIndex in enumerate(region_fdSelect):
- default_fdIndex = gname_mapping[region_glyphOrder[gid]]
- region_map = fd_map[default_fdIndex]
- if ri not in region_map:
- region_map[ri] = fdIndex
- return fd_map
-
-
-CVarData = namedtuple('CVarData', 'varDataList masterSupports vsindex_dict')
+ """Since a subset source font may have fewer FontDicts in their
+ FDArray than the default font, we have to match up the FontDicts in
+ the different fonts . We do this with the FDSelect array, and by
+ assuming that the same glyph will reference matching FontDicts in
+ each source font. We return a mapping from fdIndex in the default
+ font to a dictionary which maps each master list index of each
+ region font to the equivalent fdIndex in the region font."""
+ fd_map = {}
+ default_font = fonts_list[0]
+ region_fonts = fonts_list[1:]
+ num_regions = len(region_fonts)
+ topDict = _cff_or_cff2(default_font).cff.topDictIndex[0]
+ if not hasattr(topDict, "FDSelect"):
+ # All glyphs reference only one FontDict.
+ # Map the FD index for regions to index 0.
+ fd_map[0] = {ri: 0 for ri in range(num_regions)}
+ return fd_map
+
+ gname_mapping = {}
+ default_fdSelect = topDict.FDSelect
+ glyphOrder = default_font.getGlyphOrder()
+ for gid, fdIndex in enumerate(default_fdSelect):
+ gname_mapping[glyphOrder[gid]] = fdIndex
+ if fdIndex not in fd_map:
+ fd_map[fdIndex] = {}
+ for ri, region_font in enumerate(region_fonts):
+ region_glyphOrder = region_font.getGlyphOrder()
+ region_topDict = _cff_or_cff2(region_font).cff.topDictIndex[0]
+ if not hasattr(region_topDict, "FDSelect"):
+ # All the glyphs share the same FontDict. Pick any glyph.
+ default_fdIndex = gname_mapping[region_glyphOrder[0]]
+ fd_map[default_fdIndex][ri] = 0
+ else:
+ region_fdSelect = region_topDict.FDSelect
+ for gid, fdIndex in enumerate(region_fdSelect):
+ default_fdIndex = gname_mapping[region_glyphOrder[gid]]
+ region_map = fd_map[default_fdIndex]
+ if ri not in region_map:
+ region_map[ri] = fdIndex
+ return fd_map
+
+
+CVarData = namedtuple("CVarData", "varDataList masterSupports vsindex_dict")
+
+
def merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder):
- topDict = varFont['CFF2'].cff.topDictIndex[0]
- top_dicts = [topDict] + [
- _cff_or_cff2(ttFont).cff.topDictIndex[0]
- for ttFont in ordered_fonts_list[1:]
- ]
- num_masters = len(model.mapping)
- cvData = merge_charstrings(glyphOrder, num_masters, top_dicts, model)
- fd_map = getfd_map(varFont, ordered_fonts_list)
- merge_PrivateDicts(top_dicts, cvData.vsindex_dict, model, fd_map)
- addCFFVarStore(varFont, model, cvData.varDataList,
- cvData.masterSupports)
-
-
-def _get_cs(charstrings, glyphName):
- if glyphName not in charstrings:
- return None
- return charstrings[glyphName]
-
-def _add_new_vsindex(model, key, masterSupports, vsindex_dict,
- vsindex_by_key, varDataList):
- varTupleIndexes = []
- for support in model.supports[1:]:
- if support not in masterSupports:
- masterSupports.append(support)
- varTupleIndexes.append(masterSupports.index(support))
- var_data = varLib.builder.buildVarData(varTupleIndexes, None, False)
- vsindex = len(vsindex_dict)
- vsindex_by_key[key] = vsindex
- vsindex_dict[vsindex] = (model, [key])
- varDataList.append(var_data)
- return vsindex
+ topDict = varFont["CFF2"].cff.topDictIndex[0]
+ top_dicts = [topDict] + [
+ _cff_or_cff2(ttFont).cff.topDictIndex[0] for ttFont in ordered_fonts_list[1:]
+ ]
+ num_masters = len(model.mapping)
+ cvData = merge_charstrings(glyphOrder, num_masters, top_dicts, model)
+ fd_map = getfd_map(varFont, ordered_fonts_list)
+ merge_PrivateDicts(top_dicts, cvData.vsindex_dict, model, fd_map)
+ addCFFVarStore(varFont, model, cvData.varDataList, cvData.masterSupports)
+
+
+def _get_cs(charstrings, glyphName, filterEmpty=False):
+ if glyphName not in charstrings:
+ return None
+ cs = charstrings[glyphName]
+
+ if filterEmpty:
+ cs.decompile()
+ if cs.program == []: # CFF2 empty charstring
+ return None
+ elif (
+ len(cs.program) <= 2
+ and cs.program[-1] == "endchar"
+ and (len(cs.program) == 1 or type(cs.program[0]) in (int, float))
+ ): # CFF1 empty charstring
+ return None
+
+ return cs
+
+
+def _add_new_vsindex(
+ model, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList
+):
+ varTupleIndexes = []
+ for support in model.supports[1:]:
+ if support not in masterSupports:
+ masterSupports.append(support)
+ varTupleIndexes.append(masterSupports.index(support))
+ var_data = varLib.builder.buildVarData(varTupleIndexes, None, False)
+ vsindex = len(vsindex_dict)
+ vsindex_by_key[key] = vsindex
+ vsindex_dict[vsindex] = (model, [key])
+ varDataList.append(var_data)
+ return vsindex
-def merge_charstrings(glyphOrder, num_masters, top_dicts, masterModel):
- vsindex_dict = {}
- vsindex_by_key = {}
- varDataList = []
- masterSupports = []
- default_charstrings = top_dicts[0].CharStrings
- for gid, gname in enumerate(glyphOrder):
- all_cs = [
- _get_cs(td.CharStrings, gname)
- for td in top_dicts]
- if len([gs for gs in all_cs if gs is not None]) == 1:
- continue
- model, model_cs = masterModel.getSubModel(all_cs)
- # create the first pass CFF2 charstring, from
- # the default charstring.
- default_charstring = model_cs[0]
- var_pen = CFF2CharStringMergePen([], gname, num_masters, 0)
- # We need to override outlineExtractor because these
- # charstrings do have widths in the 'program'; we need to drop these
- # values rather than post assertion error for them.
- default_charstring.outlineExtractor = MergeOutlineExtractor
- default_charstring.draw(var_pen)
-
- # Add the coordinates from all the other regions to the
- # blend lists in the CFF2 charstring.
- region_cs = model_cs[1:]
- for region_idx, region_charstring in enumerate(region_cs, start=1):
- var_pen.restart(region_idx)
- region_charstring.outlineExtractor = MergeOutlineExtractor
- region_charstring.draw(var_pen)
-
- # Collapse each coordinate list to a blend operator and its args.
- new_cs = var_pen.getCharString(
- private=default_charstring.private,
- globalSubrs=default_charstring.globalSubrs,
- var_model=model, optimize=True)
- default_charstrings[gname] = new_cs
-
- if (not var_pen.seen_moveto) or ('blend' not in new_cs.program):
- # If this is not a marking glyph, or if there are no blend
- # arguments, then we can use vsindex 0. No need to
- # check if we need a new vsindex.
- continue
-
- # If the charstring required a new model, create
- # a VarData table to go with, and set vsindex.
- key = tuple(v is not None for v in all_cs)
- try:
- vsindex = vsindex_by_key[key]
- except KeyError:
- vsindex = _add_new_vsindex(model, key, masterSupports, vsindex_dict,
- vsindex_by_key, varDataList)
- # We do not need to check for an existing new_cs.private.vsindex,
- # as we know it doesn't exist yet.
- if vsindex != 0:
- new_cs.program[:0] = [vsindex, 'vsindex']
-
- # If there is no variation in any of the charstrings, then vsindex_dict
- # never gets built. This could still be needed if there is variation
- # in the PrivatDict, so we will build the default data for vsindex = 0.
- if not vsindex_dict:
- key = (True,) * num_masters
- _add_new_vsindex(masterModel, key, masterSupports, vsindex_dict,
- vsindex_by_key, varDataList)
- cvData = CVarData(varDataList=varDataList, masterSupports=masterSupports,
- vsindex_dict=vsindex_dict)
- # XXX To do: optimize use of vsindex between the PrivateDicts and
- # charstrings
- return cvData
+def merge_charstrings(glyphOrder, num_masters, top_dicts, masterModel):
+ vsindex_dict = {}
+ vsindex_by_key = {}
+ varDataList = []
+ masterSupports = []
+ default_charstrings = top_dicts[0].CharStrings
+ for gid, gname in enumerate(glyphOrder):
+ # interpret empty non-default masters as missing glyphs from a sparse master
+ all_cs = [
+ _get_cs(td.CharStrings, gname, i != 0) for i, td in enumerate(top_dicts)
+ ]
+ model, model_cs = masterModel.getSubModel(all_cs)
+ # create the first pass CFF2 charstring, from
+ # the default charstring.
+ default_charstring = model_cs[0]
+ var_pen = CFF2CharStringMergePen([], gname, num_masters, 0)
+ # We need to override outlineExtractor because these
+ # charstrings do have widths in the 'program'; we need to drop these
+ # values rather than post assertion error for them.
+ default_charstring.outlineExtractor = MergeOutlineExtractor
+ default_charstring.draw(var_pen)
+
+ # Add the coordinates from all the other regions to the
+ # blend lists in the CFF2 charstring.
+ region_cs = model_cs[1:]
+ for region_idx, region_charstring in enumerate(region_cs, start=1):
+ var_pen.restart(region_idx)
+ region_charstring.outlineExtractor = MergeOutlineExtractor
+ region_charstring.draw(var_pen)
+
+ # Collapse each coordinate list to a blend operator and its args.
+ new_cs = var_pen.getCharString(
+ private=default_charstring.private,
+ globalSubrs=default_charstring.globalSubrs,
+ var_model=model,
+ optimize=True,
+ )
+ default_charstrings[gname] = new_cs
+
+ if not region_cs:
+ continue
+
+ if (not var_pen.seen_moveto) or ("blend" not in new_cs.program):
+ # If this is not a marking glyph, or if there are no blend
+ # arguments, then we can use vsindex 0. No need to
+ # check if we need a new vsindex.
+ continue
+
+ # If the charstring required a new model, create
+ # a VarData table to go with, and set vsindex.
+ key = tuple(v is not None for v in all_cs)
+ try:
+ vsindex = vsindex_by_key[key]
+ except KeyError:
+ vsindex = _add_new_vsindex(
+ model, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList
+ )
+ # We do not need to check for an existing new_cs.private.vsindex,
+ # as we know it doesn't exist yet.
+ if vsindex != 0:
+ new_cs.program[:0] = [vsindex, "vsindex"]
+
+ # If there is no variation in any of the charstrings, then vsindex_dict
+ # never gets built. This could still be needed if there is variation
+ # in the PrivatDict, so we will build the default data for vsindex = 0.
+ if not vsindex_dict:
+ key = (True,) * num_masters
+ _add_new_vsindex(
+ masterModel, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList
+ )
+ cvData = CVarData(
+ varDataList=varDataList,
+ masterSupports=masterSupports,
+ vsindex_dict=vsindex_dict,
+ )
+ # XXX To do: optimize use of vsindex between the PrivateDicts and
+ # charstrings
+ return cvData
class CFFToCFF2OutlineExtractor(T2OutlineExtractor):
- """ This class is used to remove the initial width from the CFF
- charstring without trying to add the width to self.nominalWidthX,
- which is None. """
- def popallWidth(self, evenOdd=0):
- args = self.popall()
- if not self.gotWidth:
- if evenOdd ^ (len(args) % 2):
- args = args[1:]
- self.width = self.defaultWidthX
- self.gotWidth = 1
- return args
+ """This class is used to remove the initial width from the CFF
+ charstring without trying to add the width to self.nominalWidthX,
+ which is None."""
+
+ def popallWidth(self, evenOdd=0):
+ args = self.popall()
+ if not self.gotWidth:
+ if evenOdd ^ (len(args) % 2):
+ args = args[1:]
+ self.width = self.defaultWidthX
+ self.gotWidth = 1
+ return args
class MergeOutlineExtractor(CFFToCFF2OutlineExtractor):
- """ Used to extract the charstring commands - including hints - from a
- CFF charstring in order to merge it as another set of region data
- into a CFF2 variable font charstring."""
-
- def __init__(self, pen, localSubrs, globalSubrs,
- nominalWidthX, defaultWidthX, private=None):
- super().__init__(pen, localSubrs,
- globalSubrs, nominalWidthX, defaultWidthX, private)
-
- def countHints(self):
- args = self.popallWidth()
- self.hintCount = self.hintCount + len(args) // 2
- return args
-
- def _hint_op(self, type, args):
- self.pen.add_hint(type, args)
-
- def op_hstem(self, index):
- args = self.countHints()
- self._hint_op('hstem', args)
-
- def op_vstem(self, index):
- args = self.countHints()
- self._hint_op('vstem', args)
-
- def op_hstemhm(self, index):
- args = self.countHints()
- self._hint_op('hstemhm', args)
-
- def op_vstemhm(self, index):
- args = self.countHints()
- self._hint_op('vstemhm', args)
-
- def _get_hintmask(self, index):
- if not self.hintMaskBytes:
- args = self.countHints()
- if args:
- self._hint_op('vstemhm', args)
- self.hintMaskBytes = (self.hintCount + 7) // 8
- hintMaskBytes, index = self.callingStack[-1].getBytes(index,
- self.hintMaskBytes)
- return index, hintMaskBytes
-
- def op_hintmask(self, index):
- index, hintMaskBytes = self._get_hintmask(index)
- self.pen.add_hintmask('hintmask', [hintMaskBytes])
- return hintMaskBytes, index
-
- def op_cntrmask(self, index):
- index, hintMaskBytes = self._get_hintmask(index)
- self.pen.add_hintmask('cntrmask', [hintMaskBytes])
- return hintMaskBytes, index
+ """Used to extract the charstring commands - including hints - from a
+ CFF charstring in order to merge it as another set of region data
+ into a CFF2 variable font charstring."""
+
+ def __init__(
+ self,
+ pen,
+ localSubrs,
+ globalSubrs,
+ nominalWidthX,
+ defaultWidthX,
+ private=None,
+ blender=None,
+ ):
+ super().__init__(
+ pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private, blender
+ )
+
+ def countHints(self):
+ args = self.popallWidth()
+ self.hintCount = self.hintCount + len(args) // 2
+ return args
+
+ def _hint_op(self, type, args):
+ self.pen.add_hint(type, args)
+
+ def op_hstem(self, index):
+ args = self.countHints()
+ self._hint_op("hstem", args)
+
+ def op_vstem(self, index):
+ args = self.countHints()
+ self._hint_op("vstem", args)
+
+ def op_hstemhm(self, index):
+ args = self.countHints()
+ self._hint_op("hstemhm", args)
+
+ def op_vstemhm(self, index):
+ args = self.countHints()
+ self._hint_op("vstemhm", args)
+
+ def _get_hintmask(self, index):
+ if not self.hintMaskBytes:
+ args = self.countHints()
+ if args:
+ self._hint_op("vstemhm", args)
+ self.hintMaskBytes = (self.hintCount + 7) // 8
+ hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes)
+ return index, hintMaskBytes
+
+ def op_hintmask(self, index):
+ index, hintMaskBytes = self._get_hintmask(index)
+ self.pen.add_hintmask("hintmask", [hintMaskBytes])
+ return hintMaskBytes, index
+
+ def op_cntrmask(self, index):
+ index, hintMaskBytes = self._get_hintmask(index)
+ self.pen.add_hintmask("cntrmask", [hintMaskBytes])
+ return hintMaskBytes, index
class CFF2CharStringMergePen(T2CharStringPen):
- """Pen to merge Type 2 CharStrings.
- """
- def __init__(
- self, default_commands, glyphName, num_masters, master_idx,
- roundTolerance=0.5):
- super().__init__(
- width=None,
- glyphSet=None, CFF2=True,
- roundTolerance=roundTolerance)
- self.pt_index = 0
- self._commands = default_commands
- self.m_index = master_idx
- self.num_masters = num_masters
- self.prev_move_idx = 0
- self.seen_moveto = False
- self.glyphName = glyphName
- self.round = roundFunc(roundTolerance, round=round)
-
- def add_point(self, point_type, pt_coords):
- if self.m_index == 0:
- self._commands.append([point_type, [pt_coords]])
- else:
- cmd = self._commands[self.pt_index]
- if cmd[0] != point_type:
- raise VarLibCFFPointTypeMergeError(
- point_type,
- self.pt_index, len(cmd[1]),
- cmd[0], self.glyphName)
- cmd[1].append(pt_coords)
- self.pt_index += 1
-
- def add_hint(self, hint_type, args):
- if self.m_index == 0:
- self._commands.append([hint_type, [args]])
- else:
- cmd = self._commands[self.pt_index]
- if cmd[0] != hint_type:
- raise VarLibCFFHintTypeMergeError(hint_type, self.pt_index, len(cmd[1]),
- cmd[0], self.glyphName)
- cmd[1].append(args)
- self.pt_index += 1
-
- def add_hintmask(self, hint_type, abs_args):
- # For hintmask, fonttools.cffLib.specializer.py expects
- # each of these to be represented by two sequential commands:
- # first holding only the operator name, with an empty arg list,
- # second with an empty string as the op name, and the mask arg list.
- if self.m_index == 0:
- self._commands.append([hint_type, []])
- self._commands.append(["", [abs_args]])
- else:
- cmd = self._commands[self.pt_index]
- if cmd[0] != hint_type:
- raise VarLibCFFHintTypeMergeError(hint_type, self.pt_index, len(cmd[1]),
- cmd[0], self.glyphName)
- self.pt_index += 1
- cmd = self._commands[self.pt_index]
- cmd[1].append(abs_args)
- self.pt_index += 1
-
- def _moveTo(self, pt):
- if not self.seen_moveto:
- self.seen_moveto = True
- pt_coords = self._p(pt)
- self.add_point('rmoveto', pt_coords)
- # I set prev_move_idx here because add_point()
- # can change self.pt_index.
- self.prev_move_idx = self.pt_index - 1
-
- def _lineTo(self, pt):
- pt_coords = self._p(pt)
- self.add_point('rlineto', pt_coords)
-
- def _curveToOne(self, pt1, pt2, pt3):
- _p = self._p
- pt_coords = _p(pt1)+_p(pt2)+_p(pt3)
- self.add_point('rrcurveto', pt_coords)
-
- def _closePath(self):
- pass
-
- def _endPath(self):
- pass
-
- def restart(self, region_idx):
- self.pt_index = 0
- self.m_index = region_idx
- self._p0 = (0, 0)
-
- def getCommands(self):
- return self._commands
-
- def reorder_blend_args(self, commands, get_delta_func):
- """
- We first re-order the master coordinate values.
- For a moveto to lineto, the args are now arranged as::
-
- [ [master_0 x,y], [master_1 x,y], [master_2 x,y] ]
-
- We re-arrange this to::
-
- [ [master_0 x, master_1 x, master_2 x],
- [master_0 y, master_1 y, master_2 y]
- ]
-
- If the master values are all the same, we collapse the list to
- as single value instead of a list.
-
- We then convert this to::
-
- [ [master_0 x] + [x delta tuple] + [numBlends=1]
- [master_0 y] + [y delta tuple] + [numBlends=1]
- ]
- """
- for cmd in commands:
- # arg[i] is the set of arguments for this operator from master i.
- args = cmd[1]
- m_args = zip(*args)
- # m_args[n] is now all num_master args for the i'th argument
- # for this operation.
- cmd[1] = list(m_args)
- lastOp = None
- for cmd in commands:
- op = cmd[0]
- # masks are represented by two cmd's: first has only op names,
- # second has only args.
- if lastOp in ['hintmask', 'cntrmask']:
- coord = list(cmd[1])
- if not allEqual(coord):
- raise VarLibMergeError("Hintmask values cannot differ between source fonts.")
- cmd[1] = [coord[0][0]]
- else:
- coords = cmd[1]
- new_coords = []
- for coord in coords:
- if allEqual(coord):
- new_coords.append(coord[0])
- else:
- # convert to deltas
- deltas = get_delta_func(coord)[1:]
- coord = [coord[0]] + deltas
- coord.append(1)
- new_coords.append(coord)
- cmd[1] = new_coords
- lastOp = op
- return commands
-
- def getCharString(
- self, private=None, globalSubrs=None,
- var_model=None, optimize=True):
- commands = self._commands
- commands = self.reorder_blend_args(commands, partial (var_model.getDeltas, round=self.round))
- if optimize:
- commands = specializeCommands(
- commands, generalizeFirst=False,
- maxstack=maxStackLimit)
- program = commandsToProgram(commands)
- charString = T2CharString(
- program=program, private=private,
- globalSubrs=globalSubrs)
- return charString
+ """Pen to merge Type 2 CharStrings."""
+
+ def __init__(
+ self, default_commands, glyphName, num_masters, master_idx, roundTolerance=0.01
+ ):
+ # For roundTolerance see https://github.com/fonttools/fonttools/issues/2838
+ super().__init__(
+ width=None, glyphSet=None, CFF2=True, roundTolerance=roundTolerance
+ )
+ self.pt_index = 0
+ self._commands = default_commands
+ self.m_index = master_idx
+ self.num_masters = num_masters
+ self.prev_move_idx = 0
+ self.seen_moveto = False
+ self.glyphName = glyphName
+ self.round = roundFunc(roundTolerance, round=round)
+
+ def add_point(self, point_type, pt_coords):
+ if self.m_index == 0:
+ self._commands.append([point_type, [pt_coords]])
+ else:
+ cmd = self._commands[self.pt_index]
+ if cmd[0] != point_type:
+ raise VarLibCFFPointTypeMergeError(
+ point_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName
+ )
+ cmd[1].append(pt_coords)
+ self.pt_index += 1
+
+ def add_hint(self, hint_type, args):
+ if self.m_index == 0:
+ self._commands.append([hint_type, [args]])
+ else:
+ cmd = self._commands[self.pt_index]
+ if cmd[0] != hint_type:
+ raise VarLibCFFHintTypeMergeError(
+ hint_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName
+ )
+ cmd[1].append(args)
+ self.pt_index += 1
+
+ def add_hintmask(self, hint_type, abs_args):
+ # For hintmask, fonttools.cffLib.specializer.py expects
+ # each of these to be represented by two sequential commands:
+ # first holding only the operator name, with an empty arg list,
+ # second with an empty string as the op name, and the mask arg list.
+ if self.m_index == 0:
+ self._commands.append([hint_type, []])
+ self._commands.append(["", [abs_args]])
+ else:
+ cmd = self._commands[self.pt_index]
+ if cmd[0] != hint_type:
+ raise VarLibCFFHintTypeMergeError(
+ hint_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName
+ )
+ self.pt_index += 1
+ cmd = self._commands[self.pt_index]
+ cmd[1].append(abs_args)
+ self.pt_index += 1
+
+ def _moveTo(self, pt):
+ if not self.seen_moveto:
+ self.seen_moveto = True
+ pt_coords = self._p(pt)
+ self.add_point("rmoveto", pt_coords)
+ # I set prev_move_idx here because add_point()
+ # can change self.pt_index.
+ self.prev_move_idx = self.pt_index - 1
+
+ def _lineTo(self, pt):
+ pt_coords = self._p(pt)
+ self.add_point("rlineto", pt_coords)
+
+ def _curveToOne(self, pt1, pt2, pt3):
+ _p = self._p
+ pt_coords = _p(pt1) + _p(pt2) + _p(pt3)
+ self.add_point("rrcurveto", pt_coords)
+
+ def _closePath(self):
+ pass
+
+ def _endPath(self):
+ pass
+
+ def restart(self, region_idx):
+ self.pt_index = 0
+ self.m_index = region_idx
+ self._p0 = (0, 0)
+
+ def getCommands(self):
+ return self._commands
+
+ def reorder_blend_args(self, commands, get_delta_func):
+ """
+ We first re-order the master coordinate values.
+ For a moveto to lineto, the args are now arranged as::
+
+ [ [master_0 x,y], [master_1 x,y], [master_2 x,y] ]
+
+ We re-arrange this to::
+
+ [ [master_0 x, master_1 x, master_2 x],
+ [master_0 y, master_1 y, master_2 y]
+ ]
+
+ If the master values are all the same, we collapse the list to
+ as single value instead of a list.
+
+ We then convert this to::
+
+ [ [master_0 x] + [x delta tuple] + [numBlends=1]
+ [master_0 y] + [y delta tuple] + [numBlends=1]
+ ]
+ """
+ for cmd in commands:
+ # arg[i] is the set of arguments for this operator from master i.
+ args = cmd[1]
+ m_args = zip(*args)
+ # m_args[n] is now all num_master args for the i'th argument
+ # for this operation.
+ cmd[1] = list(m_args)
+ lastOp = None
+ for cmd in commands:
+ op = cmd[0]
+ # masks are represented by two cmd's: first has only op names,
+ # second has only args.
+ if lastOp in ["hintmask", "cntrmask"]:
+ coord = list(cmd[1])
+ if not allEqual(coord):
+ raise VarLibMergeError(
+ "Hintmask values cannot differ between source fonts."
+ )
+ cmd[1] = [coord[0][0]]
+ else:
+ coords = cmd[1]
+ new_coords = []
+ for coord in coords:
+ if allEqual(coord):
+ new_coords.append(coord[0])
+ else:
+ # convert to deltas
+ deltas = get_delta_func(coord)[1:]
+ coord = [coord[0]] + deltas
+ coord.append(1)
+ new_coords.append(coord)
+ cmd[1] = new_coords
+ lastOp = op
+ return commands
+
+ def getCharString(
+ self, private=None, globalSubrs=None, var_model=None, optimize=True
+ ):
+ commands = self._commands
+ commands = self.reorder_blend_args(
+ commands, partial(var_model.getDeltas, round=self.round)
+ )
+ if optimize:
+ commands = specializeCommands(
+ commands, generalizeFirst=False, maxstack=maxStackLimit
+ )
+ program = commandsToProgram(commands)
+ charString = T2CharString(
+ program=program, private=private, globalSubrs=globalSubrs
+ )
+ return charString
diff --git a/Lib/fontTools/varLib/featureVars.py b/Lib/fontTools/varLib/featureVars.py
index ad47ab8e..f0403d76 100644
--- a/Lib/fontTools/varLib/featureVars.py
+++ b/Lib/fontTools/varLib/featureVars.py
@@ -4,16 +4,17 @@ https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#featurevariat
NOTE: The API is experimental and subject to change.
"""
from fontTools.misc.dictTools import hashdict
-from fontTools.misc.intTools import popCount
+from fontTools.misc.intTools import bit_count
from fontTools.ttLib import newTable
from fontTools.ttLib.tables import otTables as ot
+from fontTools.ttLib.ttVisitor import TTVisitor
from fontTools.otlLib.builder import buildLookup, buildSingleSubstSubtable
from collections import OrderedDict
from .errors import VarLibError, VarLibValidationError
-def addFeatureVariations(font, conditionalSubstitutions, featureTag='rvrn'):
+def addFeatureVariations(font, conditionalSubstitutions, featureTag="rvrn"):
"""Add conditional substitutions to a Variable Font.
The `conditionalSubstitutions` argument is a list of (Region, Substitutions)
@@ -44,6 +45,8 @@ def addFeatureVariations(font, conditionalSubstitutions, featureTag='rvrn'):
# >>> f.save(dstPath)
"""
+ processLast = featureTag != "rvrn"
+
_checkSubstitutionGlyphsExist(
glyphNames=set(font.getGlyphOrder()),
substitutions=conditionalSubstitutions,
@@ -52,36 +55,42 @@ def addFeatureVariations(font, conditionalSubstitutions, featureTag='rvrn'):
substitutions = overlayFeatureVariations(conditionalSubstitutions)
# turn substitution dicts into tuples of tuples, so they are hashable
- conditionalSubstitutions, allSubstitutions = makeSubstitutionsHashable(substitutions)
+ conditionalSubstitutions, allSubstitutions = makeSubstitutionsHashable(
+ substitutions
+ )
if "GSUB" not in font:
font["GSUB"] = buildGSUB()
# setup lookups
- lookupMap = buildSubstitutionLookups(font["GSUB"].table, allSubstitutions)
+ lookupMap = buildSubstitutionLookups(
+ font["GSUB"].table, allSubstitutions, processLast
+ )
# addFeatureVariationsRaw takes a list of
# ( {condition}, [ lookup indices ] )
# so rearrange our lookups to match
conditionsAndLookups = []
for conditionSet, substitutions in conditionalSubstitutions:
- conditionsAndLookups.append((conditionSet, [lookupMap[s] for s in substitutions]))
+ conditionsAndLookups.append(
+ (conditionSet, [lookupMap[s] for s in substitutions])
+ )
+
+ addFeatureVariationsRaw(font, font["GSUB"].table, conditionsAndLookups, featureTag)
- addFeatureVariationsRaw(font, font["GSUB"].table,
- conditionsAndLookups,
- featureTag)
def _checkSubstitutionGlyphsExist(glyphNames, substitutions):
referencedGlyphNames = set()
for _, substitution in substitutions:
- referencedGlyphNames |= substitution.keys()
- referencedGlyphNames |= set(substitution.values())
+ referencedGlyphNames |= substitution.keys()
+ referencedGlyphNames |= set(substitution.values())
missing = referencedGlyphNames - glyphNames
if missing:
- raise VarLibValidationError(
+ raise VarLibValidationError(
"Missing glyphs are referenced in conditional substitution rules:"
f" {', '.join(missing)}"
)
+
def overlayFeatureVariations(conditionalSubstitutions):
"""Compute overlaps between all conditional substitutions.
@@ -130,13 +139,13 @@ def overlayFeatureVariations(conditionalSubstitutions):
# Merge same-substitutions rules, as this creates fewer number oflookups.
merged = OrderedDict()
- for value,key in conditionalSubstitutions:
+ for value, key in conditionalSubstitutions:
key = hashdict(key)
if key in merged:
merged[key].extend(value)
else:
merged[key] = value
- conditionalSubstitutions = [(v,dict(k)) for k,v in merged.items()]
+ conditionalSubstitutions = [(v, dict(k)) for k, v in merged.items()]
del merged
# Merge same-region rules, as this is cheaper.
@@ -145,9 +154,13 @@ def overlayFeatureVariations(conditionalSubstitutions):
# Reversing is such that earlier entries win in case of conflicting substitution
# rules for the same region.
merged = OrderedDict()
- for key,value in reversed(conditionalSubstitutions):
- key = tuple(sorted((hashdict(cleanupBox(k)) for k in key),
- key=lambda d: tuple(sorted(d.items()))))
+ for key, value in reversed(conditionalSubstitutions):
+ key = tuple(
+ sorted(
+ (hashdict(cleanupBox(k)) for k in key),
+ key=lambda d: tuple(sorted(d.items())),
+ )
+ )
if key in merged:
merged[key].update(value)
else:
@@ -158,17 +171,17 @@ def overlayFeatureVariations(conditionalSubstitutions):
# Overlay
#
# Rank is the bit-set of the index of all contributing layers.
- initMapInit = ((hashdict(),0),) # Initializer representing the entire space
- boxMap = OrderedDict(initMapInit) # Map from Box to Rank
- for i,(currRegion,_) in enumerate(conditionalSubstitutions):
+ initMapInit = ((hashdict(), 0),) # Initializer representing the entire space
+ boxMap = OrderedDict(initMapInit) # Map from Box to Rank
+ for i, (currRegion, _) in enumerate(conditionalSubstitutions):
newMap = OrderedDict(initMapInit)
- currRank = 1<<i
- for box,rank in boxMap.items():
+ currRank = 1 << i
+ for box, rank in boxMap.items():
for currBox in currRegion:
intersection, remainder = overlayBox(currBox, box)
if intersection is not None:
intersection = hashdict(intersection)
- newMap[intersection] = newMap.get(intersection, 0) | rank|currRank
+ newMap[intersection] = newMap.get(intersection, 0) | rank | currRank
if remainder is not None:
remainder = hashdict(remainder)
newMap[remainder] = newMap.get(remainder, 0) | rank
@@ -176,19 +189,20 @@ def overlayFeatureVariations(conditionalSubstitutions):
# Generate output
items = []
- for box,rank in sorted(boxMap.items(),
- key=(lambda BoxAndRank: -popCount(BoxAndRank[1]))):
+ for box, rank in sorted(
+ boxMap.items(), key=(lambda BoxAndRank: -bit_count(BoxAndRank[1]))
+ ):
# Skip any box that doesn't have any substitution.
if rank == 0:
continue
substsList = []
i = 0
while rank:
- if rank & 1:
- substsList.append(conditionalSubstitutions[i][1])
- rank >>= 1
- i += 1
- items.append((dict(box),substsList))
+ if rank & 1:
+ substsList.append(conditionalSubstitutions[i][1])
+ rank >>= 1
+ i += 1
+ items.append((dict(box), substsList))
return items
@@ -201,6 +215,7 @@ def overlayFeatureVariations(conditionalSubstitutions):
# from the corresponding axes.
#
+
def overlayBox(top, bot):
"""Overlays ``top`` box on top of ``bot`` box.
@@ -222,31 +237,36 @@ def overlayBox(top, bot):
minimum = max(min1, min2)
maximum = min(max1, max2)
if not minimum < maximum:
- return None, bot # Do not intersect
- intersection[axisTag] = minimum,maximum
+ return None, bot # Do not intersect
+ intersection[axisTag] = minimum, maximum
# Remainder
#
# Remainder is empty if bot's each axis range lies within that of intersection.
#
# Remainder is shrank if bot's each, except for exactly one, axis range lies
- # within that of intersection, and that one axis, it spills out of the
+ # within that of intersection, and that one axis, it extrudes out of the
# intersection only on one side.
#
# Bot is returned in full as remainder otherwise, as true remainder is not
# representable as a single box.
remainder = dict(bot)
- exactlyOne = False
- fullyInside = False
+ extruding = False
+ fullyInside = True
+ for axisTag in top:
+ if axisTag in bot:
+ continue
+ extruding = True
+ fullyInside = False
+ break
for axisTag in bot:
- if axisTag not in intersection:
- fullyInside = False
- continue # Axis range lies fully within
+ if axisTag not in top:
+ continue # Axis range lies fully within
min1, max1 = intersection[axisTag]
min2, max2 = bot[axisTag]
if min1 <= min2 and max2 <= max1:
- continue # Axis range lies fully within
+ continue # Axis range lies fully within
# Bot's range doesn't fully lie within that of top's for this axis.
# We know they intersect, so it cannot lie fully without either; so they
@@ -254,9 +274,9 @@ def overlayBox(top, bot):
# If we have had an overlapping axis before, remainder is not
# representable as a box, so return full bottom and go home.
- if exactlyOne:
+ if extruding:
return intersection, bot
- exactlyOne = True
+ extruding = True
fullyInside = False
# Otherwise, cut remainder on this axis and continue.
@@ -272,7 +292,7 @@ def overlayBox(top, bot):
# Remainder leaks out from both sides. Can't cut either.
return intersection, bot
- remainder[axisTag] = minimum,maximum
+ remainder[axisTag] = minimum, maximum
if fullyInside:
# bot is fully within intersection. Remainder is empty.
@@ -280,15 +300,16 @@ def overlayBox(top, bot):
return intersection, remainder
+
def cleanupBox(box):
"""Return a sparse copy of `box`, without redundant (default) values.
- >>> cleanupBox({})
- {}
- >>> cleanupBox({'wdth': (0.0, 1.0)})
- {'wdth': (0.0, 1.0)}
- >>> cleanupBox({'wdth': (-1.0, 1.0)})
- {}
+ >>> cleanupBox({})
+ {}
+ >>> cleanupBox({'wdth': (0.0, 1.0)})
+ {'wdth': (0.0, 1.0)}
+ >>> cleanupBox({'wdth': (-1.0, 1.0)})
+ {}
"""
return {tag: limit for tag, limit in box.items() if limit != (-1.0, 1.0)}
@@ -298,10 +319,13 @@ def cleanupBox(box):
# Low level implementation
#
-def addFeatureVariationsRaw(font, table, conditionalSubstitutions, featureTag='rvrn'):
+
+def addFeatureVariationsRaw(font, table, conditionalSubstitutions, featureTag="rvrn"):
"""Low level implementation of addFeatureVariations that directly
models the possibilities of the FeatureVariations table."""
+ processLast = featureTag != "rvrn"
+
#
# if there is no <featureTag> feature:
# make empty <featureTag> feature
@@ -337,10 +361,13 @@ def addFeatureVariationsRaw(font, table, conditionalSubstitutions, featureTag='r
langSystems = [lsr.LangSys for lsr in scriptRecord.Script.LangSysRecord]
for langSys in [scriptRecord.Script.DefaultLangSys] + langSystems:
langSys.FeatureIndex.append(varFeatureIndex)
+ langSys.FeatureCount = len(langSys.FeatureIndex)
varFeatureIndices = [varFeatureIndex]
- axisIndices = {axis.axisTag: axisIndex for axisIndex, axis in enumerate(font["fvar"].axes)}
+ axisIndices = {
+ axis.axisTag: axisIndex for axisIndex, axis in enumerate(font["fvar"].axes)
+ }
featureVariationRecords = []
for conditionSet, lookupIndices in conditionalSubstitutions:
@@ -354,9 +381,23 @@ def addFeatureVariationsRaw(font, table, conditionalSubstitutions, featureTag='r
conditionTable.append(ct)
records = []
for varFeatureIndex in varFeatureIndices:
- existingLookupIndices = table.FeatureList.FeatureRecord[varFeatureIndex].Feature.LookupListIndex
- records.append(buildFeatureTableSubstitutionRecord(varFeatureIndex, existingLookupIndices + lookupIndices))
- featureVariationRecords.append(buildFeatureVariationRecord(conditionTable, records))
+ existingLookupIndices = table.FeatureList.FeatureRecord[
+ varFeatureIndex
+ ].Feature.LookupListIndex
+ combinedLookupIndices = (
+ existingLookupIndices + lookupIndices
+ if processLast
+ else lookupIndices + existingLookupIndices
+ )
+
+ records.append(
+ buildFeatureTableSubstitutionRecord(
+ varFeatureIndex, combinedLookupIndices
+ )
+ )
+ featureVariationRecords.append(
+ buildFeatureVariationRecord(conditionTable, records)
+ )
table.FeatureVariations = buildFeatureVariations(featureVariationRecords)
@@ -365,6 +406,7 @@ def addFeatureVariationsRaw(font, table, conditionalSubstitutions, featureTag='r
# Building GSUB/FeatureVariations internals
#
+
def buildGSUB():
"""Build a GSUB table from scratch."""
fontTable = newTable("GSUB")
@@ -379,10 +421,11 @@ def buildGSUB():
gsub.LookupList.Lookup = []
srec = ot.ScriptRecord()
- srec.ScriptTag = 'DFLT'
+ srec.ScriptTag = "DFLT"
srec.Script = ot.Script()
srec.Script.DefaultLangSys = None
srec.Script.LangSysRecord = []
+ srec.Script.LangSysCount = 0
langrec = ot.LangSysRecord()
langrec.LangSys = ot.LangSys()
@@ -413,18 +456,51 @@ def makeSubstitutionsHashable(conditionalSubstitutions):
return condSubst, sorted(allSubstitutions)
-def buildSubstitutionLookups(gsub, allSubstitutions):
+class ShifterVisitor(TTVisitor):
+ def __init__(self, shift):
+ self.shift = shift
+
+
+@ShifterVisitor.register_attr(ot.Feature, "LookupListIndex") # GSUB/GPOS
+def visit(visitor, obj, attr, value):
+ shift = visitor.shift
+ value = [l + shift for l in value]
+ setattr(obj, attr, value)
+
+
+@ShifterVisitor.register_attr(
+ (ot.SubstLookupRecord, ot.PosLookupRecord), "LookupListIndex"
+)
+def visit(visitor, obj, attr, value):
+ setattr(obj, attr, visitor.shift + value)
+
+
+def buildSubstitutionLookups(gsub, allSubstitutions, processLast=False):
"""Build the lookups for the glyph substitutions, return a dict mapping
the substitution to lookup indices."""
- firstIndex = len(gsub.LookupList.Lookup)
+
+ # Insert lookups at the beginning of the lookup vector
+ # https://github.com/googlefonts/fontmake/issues/950
+
+ firstIndex = len(gsub.LookupList.Lookup) if processLast else 0
lookupMap = {}
for i, substitutionMap in enumerate(allSubstitutions):
- lookupMap[substitutionMap] = i + firstIndex
+ lookupMap[substitutionMap] = firstIndex + i
+
+ if not processLast:
+ # Shift all lookup indices in gsub by len(allSubstitutions)
+ shift = len(allSubstitutions)
+ visitor = ShifterVisitor(shift)
+ visitor.visit(gsub.FeatureList.FeatureRecord)
+ visitor.visit(gsub.LookupList.Lookup)
- for subst in allSubstitutions:
+ for i, subst in enumerate(allSubstitutions):
substMap = dict(subst)
lookup = buildLookup([buildSingleSubstSubtable(substMap)])
- gsub.LookupList.Lookup.append(lookup)
+ if processLast:
+ gsub.LookupList.Lookup.append(lookup)
+ else:
+ gsub.LookupList.Lookup.insert(i, lookup)
assert gsub.LookupList.Lookup[lookupMap[subst]] is lookup
gsub.LookupList.LookupCount = len(gsub.LookupList.Lookup)
return lookupMap
@@ -487,10 +563,15 @@ def sortFeatureList(table):
elsewhere. This is needed after the feature list has been modified.
"""
# decorate, sort, undecorate, because we need to make an index remapping table
- tagIndexFea = [(fea.FeatureTag, index, fea) for index, fea in enumerate(table.FeatureList.FeatureRecord)]
+ tagIndexFea = [
+ (fea.FeatureTag, index, fea)
+ for index, fea in enumerate(table.FeatureList.FeatureRecord)
+ ]
tagIndexFea.sort()
table.FeatureList.FeatureRecord = [fea for tag, index, fea in tagIndexFea]
- featureRemap = dict(zip([index for tag, index, fea in tagIndexFea], range(len(tagIndexFea))))
+ featureRemap = dict(
+ zip([index for tag, index, fea in tagIndexFea], range(len(tagIndexFea)))
+ )
# Remap the feature indices
remapFeatures(table, featureRemap)
@@ -513,11 +594,12 @@ def remapFeatures(table, featureRemap):
def _remapLangSys(langSys, featureRemap):
- if langSys.ReqFeatureIndex != 0xffff:
+ if langSys.ReqFeatureIndex != 0xFFFF:
langSys.ReqFeatureIndex = featureRemap[langSys.ReqFeatureIndex]
langSys.FeatureIndex = [featureRemap[index] for index in langSys.FeatureIndex]
if __name__ == "__main__":
import doctest, sys
+
sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/varLib/instancer/__init__.py b/Lib/fontTools/varLib/instancer/__init__.py
index 8f976123..cde1d39f 100644
--- a/Lib/fontTools/varLib/instancer/__init__.py
+++ b/Lib/fontTools/varLib/instancer/__init__.py
@@ -51,19 +51,30 @@ Note that, unlike varLib.mutator, when an axis is not mentioned in the input
location, the varLib.instancer will keep the axis and the corresponding deltas,
whereas mutator implicitly drops the axis at its default coordinate.
-The module currently supports only the first three "levels" of partial instancing,
-with the rest planned to be implemented in the future, namely:
+The module supports all the following "levels" of instancing, which can of
+course be combined:
L1
dropping one or more axes while leaving the default tables unmodified;
+
+ | >>> font = instancer.instantiateVariableFont(varfont, {"wght": None})
+
L2
dropping one or more axes while pinning them at non-default locations;
+
+ | >>> font = instancer.instantiateVariableFont(varfont, {"wght": 700})
+
L3
restricting the range of variation of one or more axes, by setting either
a new minimum or maximum, potentially -- though not necessarily -- dropping
entire regions of variations that fall completely outside this new range.
+
+ | >>> font = instancer.instantiateVariableFont(varfont, {"wght": (100, 300)})
+
L4
- moving the default location of an axis.
+ moving the default location of an axis, by specifying (min,defalt,max) values:
+
+ | >>> font = instancer.instantiateVariableFont(varfont, {"wght": (100, 300, 700)})
Currently only TrueType-flavored variable fonts (i.e. containing 'glyf' table)
are supported, but support for CFF2 variable fonts will be added soon.
@@ -75,9 +86,8 @@ from fontTools.misc.fixedTools import (
floatToFixedToFloat,
strToFixedToFloat,
otRound,
- MAX_F2DOT14,
)
-from fontTools.varLib.models import supportScalar, normalizeValue, piecewiseLinearMap
+from fontTools.varLib.models import normalizeValue, piecewiseLinearMap
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.TupleVariation import TupleVariation
from fontTools.ttLib.tables import _g_l_y_f
@@ -90,41 +100,353 @@ from fontTools.varLib import builder
from fontTools.varLib.mvar import MVAR_ENTRIES
from fontTools.varLib.merger import MutatorMerger
from fontTools.varLib.instancer import names
+from .featureVars import instantiateFeatureVariations
from fontTools.misc.cliTools import makeOutputFileName
+from fontTools.varLib.instancer import solver
import collections
+import dataclasses
from copy import deepcopy
from enum import IntEnum
import logging
import os
import re
+from typing import Dict, Iterable, Mapping, Optional, Sequence, Tuple, Union
+import warnings
log = logging.getLogger("fontTools.varLib.instancer")
-class AxisRange(collections.namedtuple("AxisRange", "minimum maximum")):
- def __new__(cls, *args, **kwargs):
- self = super().__new__(cls, *args, **kwargs)
- if self.minimum > self.maximum:
+def AxisRange(minimum, maximum):
+ warnings.warn(
+ "AxisRange is deprecated; use AxisTriple instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return AxisTriple(minimum, None, maximum)
+
+
+def NormalizedAxisRange(minimum, maximum):
+ warnings.warn(
+ "NormalizedAxisRange is deprecated; use AxisTriple instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return NormalizedAxisTriple(minimum, None, maximum)
+
+
+@dataclasses.dataclass(frozen=True, order=True, repr=False)
+class AxisTriple(Sequence):
+ """A triple of (min, default, max) axis values.
+
+ Any of the values can be None, in which case the limitRangeAndPopulateDefaults()
+ method can be used to fill in the missing values based on the fvar axis values.
+ """
+
+ minimum: Optional[float]
+ default: Optional[float]
+ maximum: Optional[float]
+
+ def __post_init__(self):
+ if self.default is None and self.minimum == self.maximum:
+ object.__setattr__(self, "default", self.minimum)
+ if (
+ (
+ self.minimum is not None
+ and self.default is not None
+ and self.minimum > self.default
+ )
+ or (
+ self.default is not None
+ and self.maximum is not None
+ and self.default > self.maximum
+ )
+ or (
+ self.minimum is not None
+ and self.maximum is not None
+ and self.minimum > self.maximum
+ )
+ ):
raise ValueError(
- f"Range minimum ({self.minimum:g}) must be <= maximum ({self.maximum:g})"
+ f"{type(self).__name__} minimum ({self.minimum}), default ({self.default}), maximum ({self.maximum}) must be in sorted order"
)
- return self
+
+ def __getitem__(self, i):
+ fields = dataclasses.fields(self)
+ return getattr(self, fields[i].name)
+
+ def __len__(self):
+ return len(dataclasses.fields(self))
+
+ def _replace(self, **kwargs):
+ return dataclasses.replace(self, **kwargs)
def __repr__(self):
- return f"{type(self).__name__}({self.minimum:g}, {self.maximum:g})"
+ return (
+ f"({', '.join(format(v, 'g') if v is not None else 'None' for v in self)})"
+ )
+
+ @classmethod
+ def expand(
+ cls,
+ v: Union[
+ "AxisTriple",
+ float, # pin axis at single value, same as min==default==max
+ Tuple[float, float], # (min, max), restrict axis and keep default
+ Tuple[float, float, float], # (min, default, max)
+ ],
+ ) -> "AxisTriple":
+ """Convert a single value or a tuple into an AxisTriple.
+
+ If the input is a single value, it is interpreted as a pin at that value.
+ If the input is a tuple, it is interpreted as (min, max) or (min, default, max).
+ """
+ if isinstance(v, cls):
+ return v
+ if isinstance(v, (int, float)):
+ return cls(v, v, v)
+ try:
+ n = len(v)
+ except TypeError as e:
+ raise ValueError(
+ f"expected float, 2- or 3-tuple of floats; got {type(v)}: {v!r}"
+ ) from e
+ default = None
+ if n == 2:
+ minimum, maximum = v
+ elif n >= 3:
+ return cls(*v)
+ else:
+ raise ValueError(f"expected sequence of 2 or 3; got {n}: {v!r}")
+ return cls(minimum, default, maximum)
+
+ def limitRangeAndPopulateDefaults(self, fvarTriple) -> "AxisTriple":
+ """Return a new AxisTriple with the default value filled in.
+
+ Set default to fvar axis default if the latter is within the min/max range,
+ otherwise set default to the min or max value, whichever is closer to the
+ fvar axis default.
+ If the default value is already set, return self.
+ """
+ minimum = self.minimum
+ if minimum is None:
+ minimum = fvarTriple[0]
+ default = self.default
+ if default is None:
+ default = fvarTriple[1]
+ maximum = self.maximum
+ if maximum is None:
+ maximum = fvarTriple[2]
+
+ minimum = max(minimum, fvarTriple[0])
+ maximum = max(maximum, fvarTriple[0])
+ minimum = min(minimum, fvarTriple[2])
+ maximum = min(maximum, fvarTriple[2])
+ default = max(minimum, min(maximum, default))
+
+ return AxisTriple(minimum, default, maximum)
+
+
+@dataclasses.dataclass(frozen=True, order=True, repr=False)
+class NormalizedAxisTriple(AxisTriple):
+ """A triple of (min, default, max) normalized axis values."""
+
+ minimum: float
+ default: float
+ maximum: float
+
+ def __post_init__(self):
+ if self.default is None:
+ object.__setattr__(self, "default", max(self.minimum, min(self.maximum, 0)))
+ if not (-1.0 <= self.minimum <= self.default <= self.maximum <= 1.0):
+ raise ValueError(
+ "Normalized axis values not in -1..+1 range; got "
+ f"minimum={self.minimum:g}, default={self.default:g}, maximum={self.maximum:g})"
+ )
+
+
+@dataclasses.dataclass(frozen=True, order=True, repr=False)
+class NormalizedAxisTripleAndDistances(AxisTriple):
+ """A triple of (min, default, max) normalized axis values,
+ with distances between min and default, and default and max,
+ in the *pre-normalized* space."""
+
+ minimum: float
+ default: float
+ maximum: float
+ distanceNegative: Optional[float] = 1
+ distancePositive: Optional[float] = 1
+
+ def __post_init__(self):
+ if self.default is None:
+ object.__setattr__(self, "default", max(self.minimum, min(self.maximum, 0)))
+ if not (-1.0 <= self.minimum <= self.default <= self.maximum <= 1.0):
+ raise ValueError(
+ "Normalized axis values not in -1..+1 range; got "
+ f"minimum={self.minimum:g}, default={self.default:g}, maximum={self.maximum:g})"
+ )
+
+ def reverse_negate(self):
+ v = self
+ return self.__class__(-v[2], -v[1], -v[0], v[4], v[3])
+
+ def renormalizeValue(self, v, extrapolate=True):
+ """Renormalizes a normalized value v to the range of this axis,
+ considering the pre-normalized distances as well as the new
+ axis limits."""
+
+ lower, default, upper, distanceNegative, distancePositive = self
+ assert lower <= default <= upper
+
+ if not extrapolate:
+ v = max(lower, min(upper, v))
+
+ if v == default:
+ return 0
+
+ if default < 0:
+ return -self.reverse_negate().renormalizeValue(-v, extrapolate=extrapolate)
+
+ # default >= 0 and v != default
+
+ if v > default:
+ return (v - default) / (upper - default)
+
+ # v < default
+ if lower >= 0:
+ return (v - default) / (default - lower)
-class NormalizedAxisRange(AxisRange):
- def __new__(cls, *args, **kwargs):
- self = super().__new__(cls, *args, **kwargs)
- if self.minimum < -1.0 or self.maximum > 1.0:
- raise ValueError("Axis range values must be normalized to -1..+1 range")
- if self.minimum > 0:
- raise ValueError(f"Expected axis range minimum <= 0; got {self.minimum}")
- if self.maximum < 0:
- raise ValueError(f"Expected axis range maximum >= 0; got {self.maximum}")
- return self
+ # lower < 0 and v < default
+
+ totalDistance = distanceNegative * -lower + distancePositive * default
+
+ if v >= 0:
+ vDistance = (default - v) * distancePositive
+ else:
+ vDistance = -v * distanceNegative + distancePositive * default
+
+ return -vDistance / totalDistance
+
+
+class _BaseAxisLimits(Mapping[str, AxisTriple]):
+ def __getitem__(self, key: str) -> AxisTriple:
+ return self._data[key]
+
+ def __iter__(self) -> Iterable[str]:
+ return iter(self._data)
+
+ def __len__(self) -> int:
+ return len(self._data)
+
+ def __repr__(self) -> str:
+ return f"{type(self).__name__}({self._data!r})"
+
+ def __str__(self) -> str:
+ return str(self._data)
+
+ def defaultLocation(self) -> Dict[str, float]:
+ """Return a dict of default axis values."""
+ return {k: v.default for k, v in self.items()}
+
+ def pinnedLocation(self) -> Dict[str, float]:
+ """Return a location dict with only the pinned axes."""
+ return {k: v.default for k, v in self.items() if v.minimum == v.maximum}
+
+
+class AxisLimits(_BaseAxisLimits):
+ """Maps axis tags (str) to AxisTriple values."""
+
+ def __init__(self, *args, **kwargs):
+ self._data = data = {}
+ for k, v in dict(*args, **kwargs).items():
+ if v is None:
+ # will be filled in by limitAxesAndPopulateDefaults
+ data[k] = v
+ else:
+ try:
+ triple = AxisTriple.expand(v)
+ except ValueError as e:
+ raise ValueError(f"Invalid axis limits for {k!r}: {v!r}") from e
+ data[k] = triple
+
+ def limitAxesAndPopulateDefaults(self, varfont) -> "AxisLimits":
+ """Return a new AxisLimits with defaults filled in from fvar table.
+
+ If all axis limits already have defaults, return self.
+ """
+ fvar = varfont["fvar"]
+ fvarTriples = {
+ a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in fvar.axes
+ }
+ newLimits = {}
+ for axisTag, triple in self.items():
+ fvarTriple = fvarTriples[axisTag]
+ default = fvarTriple[1]
+ if triple is None:
+ newLimits[axisTag] = AxisTriple(default, default, default)
+ else:
+ newLimits[axisTag] = triple.limitRangeAndPopulateDefaults(fvarTriple)
+ return type(self)(newLimits)
+
+ def normalize(self, varfont, usingAvar=True) -> "NormalizedAxisLimits":
+ """Return a new NormalizedAxisLimits with normalized -1..0..+1 values.
+
+ If usingAvar is True, the avar table is used to warp the default normalization.
+ """
+ fvar = varfont["fvar"]
+ badLimits = set(self.keys()).difference(a.axisTag for a in fvar.axes)
+ if badLimits:
+ raise ValueError("Cannot limit: {} not present in fvar".format(badLimits))
+
+ axes = {
+ a.axisTag: (a.minValue, a.defaultValue, a.maxValue)
+ for a in fvar.axes
+ if a.axisTag in self
+ }
+
+ avarSegments = {}
+ if usingAvar and "avar" in varfont:
+ avarSegments = varfont["avar"].segments
+
+ normalizedLimits = {}
+
+ for axis_tag, triple in axes.items():
+ distanceNegative = triple[1] - triple[0]
+ distancePositive = triple[2] - triple[1]
+
+ if self[axis_tag] is None:
+ normalizedLimits[axis_tag] = NormalizedAxisTripleAndDistances(
+ 0, 0, 0, distanceNegative, distancePositive
+ )
+ continue
+
+ minV, defaultV, maxV = self[axis_tag]
+
+ if defaultV is None:
+ defaultV = triple[1]
+
+ avarMapping = avarSegments.get(axis_tag, None)
+ normalizedLimits[axis_tag] = NormalizedAxisTripleAndDistances(
+ *(normalize(v, triple, avarMapping) for v in (minV, defaultV, maxV)),
+ distanceNegative,
+ distancePositive,
+ )
+
+ return NormalizedAxisLimits(normalizedLimits)
+
+
+class NormalizedAxisLimits(_BaseAxisLimits):
+ """Maps axis tags (str) to NormalizedAxisTriple values."""
+
+ def __init__(self, *args, **kwargs):
+ self._data = data = {}
+ for k, v in dict(*args, **kwargs).items():
+ try:
+ triple = NormalizedAxisTripleAndDistances.expand(v)
+ except ValueError as e:
+ raise ValueError(f"Invalid axis limits for {k!r}: {v!r}") from e
+ data[k] = triple
class OverlapMode(IntEnum):
@@ -140,8 +462,9 @@ def instantiateTupleVariationStore(
"""Instantiate TupleVariation list at the given location, or limit axes' min/max.
The 'variations' list of TupleVariation objects is modified in-place.
- The 'axisLimits' (dict) maps axis tags (str) to either a single coordinate along the
- axis (float), or to minimum/maximum coordinates (NormalizedAxisRange).
+ The 'axisLimits' (dict) maps axis tags (str) to NormalizedAxisTriple namedtuples
+ specifying (minimum, default, maximum) in the -1,0,+1 normalized space. Pinned axes
+ have minimum == default == maximum.
A 'full' instance (i.e. static font) is produced when all the axes are pinned to
single coordinates; a 'partial' instance (i.e. a less variable font) is produced
@@ -158,8 +481,8 @@ def instantiateTupleVariationStore(
Args:
variations: List[TupleVariation] from either 'gvar' or 'cvar'.
- axisLimits: Dict[str, Union[float, NormalizedAxisRange]]: axes' coordinates for
- the full or partial instance, or ranges for restricting an axis' min/max.
+ axisLimits: NormalizedAxisLimits: map from axis tags to (min, default, max)
+ normalized coordinates for the full or partial instance.
origCoords: GlyphCoordinates: default instance's coordinates for computing 'gvar'
inferred points (cf. table__g_l_y_f._getCoordinatesAndControls).
endPts: List[int]: indices of contour end points, for inferring 'gvar' deltas.
@@ -167,17 +490,8 @@ def instantiateTupleVariationStore(
Returns:
List[float]: the overall delta adjustment after applicable deltas were summed.
"""
- pinnedLocation, axisRanges = splitAxisLocationAndRanges(
- axisLimits, rangeType=NormalizedAxisRange
- )
-
- newVariations = variations
- if pinnedLocation:
- newVariations = pinTupleVariationAxes(variations, pinnedLocation)
-
- if axisRanges:
- newVariations = limitTupleVariationAxisRanges(newVariations, axisRanges)
+ newVariations = changeTupleVariationsAxisLimits(variations, axisLimits)
mergedVariations = collections.OrderedDict()
for var in newVariations:
@@ -203,129 +517,47 @@ def instantiateTupleVariationStore(
return defaultVar.coordinates if defaultVar is not None else []
-def pinTupleVariationAxes(variations, location):
- newVariations = []
- for var in variations:
- # Compute the scalar support of the axes to be pinned at the desired location,
- # excluding any axes that we are not pinning.
- # If a TupleVariation doesn't mention an axis, it implies that the axis peak
- # is 0 (i.e. the axis does not participate).
- support = {axis: var.axes.pop(axis, (-1, 0, +1)) for axis in location}
- scalar = supportScalar(location, support)
- if scalar == 0.0:
- # no influence, drop the TupleVariation
- continue
-
- var.scaleDeltas(scalar)
- newVariations.append(var)
- return newVariations
-
-
-def limitTupleVariationAxisRanges(variations, axisRanges):
- for axisTag, axisRange in sorted(axisRanges.items()):
+def changeTupleVariationsAxisLimits(variations, axisLimits):
+ for axisTag, axisLimit in sorted(axisLimits.items()):
newVariations = []
for var in variations:
- newVariations.extend(limitTupleVariationAxisRange(var, axisTag, axisRange))
+ newVariations.extend(changeTupleVariationAxisLimit(var, axisTag, axisLimit))
variations = newVariations
return variations
-def _negate(*values):
- yield from (-1 * v for v in values)
+def changeTupleVariationAxisLimit(var, axisTag, axisLimit):
+ assert isinstance(axisLimit, NormalizedAxisTripleAndDistances)
-
-def limitTupleVariationAxisRange(var, axisTag, axisRange):
- if not isinstance(axisRange, NormalizedAxisRange):
- axisRange = NormalizedAxisRange(*axisRange)
-
- # skip when current axis is missing (i.e. doesn't participate), or when the
- # 'tent' isn't fully on either the negative or positive side
+ # Skip when current axis is missing (i.e. doesn't participate),
lower, peak, upper = var.axes.get(axisTag, (-1, 0, 1))
- if peak == 0 or lower > peak or peak > upper or (lower < 0 and upper > 0):
- return [var]
-
- negative = lower < 0
- if negative:
- if axisRange.minimum == -1.0:
- return [var]
- elif axisRange.minimum == 0.0:
- return []
- else:
- if axisRange.maximum == 1.0:
- return [var]
- elif axisRange.maximum == 0.0:
- return []
-
- limit = axisRange.minimum if negative else axisRange.maximum
-
- # Rebase axis bounds onto the new limit, which then becomes the new -1.0 or +1.0.
- # The results are always positive, because both dividend and divisor are either
- # all positive or all negative.
- newLower = lower / limit
- newPeak = peak / limit
- newUpper = upper / limit
- # for negative TupleVariation, swap lower and upper to simplify procedure
- if negative:
- newLower, newUpper = newUpper, newLower
-
- # special case when innermost bound == peak == limit
- if newLower == newPeak == 1.0:
- var.axes[axisTag] = (-1.0, -1.0, -1.0) if negative else (1.0, 1.0, 1.0)
+ if peak == 0:
return [var]
-
- # case 1: the whole deltaset falls outside the new limit; we can drop it
- elif newLower >= 1.0:
+ # Drop if the var 'tent' isn't well-formed
+ if not (lower <= peak <= upper) or (lower < 0 and upper > 0):
return []
- # case 2: only the peak and outermost bound fall outside the new limit;
- # we keep the deltaset, update peak and outermost bound and and scale deltas
- # by the scalar value for the restricted axis at the new limit.
- elif newPeak >= 1.0:
- scalar = supportScalar({axisTag: limit}, {axisTag: (lower, peak, upper)})
- var.scaleDeltas(scalar)
- newPeak = 1.0
- newUpper = 1.0
- if negative:
- newLower, newPeak, newUpper = _negate(newUpper, newPeak, newLower)
- var.axes[axisTag] = (newLower, newPeak, newUpper)
+ if axisTag not in var.axes:
return [var]
- # case 3: peak falls inside but outermost limit still fits within F2Dot14 bounds;
- # we keep deltas as is and only scale the axes bounds. Deltas beyond -1.0
- # or +1.0 will never be applied as implementations must clamp to that range.
- elif newUpper <= 2.0:
- if negative:
- newLower, newPeak, newUpper = _negate(newUpper, newPeak, newLower)
- elif MAX_F2DOT14 < newUpper <= 2.0:
- # we clamp +2.0 to the max F2Dot14 (~1.99994) for convenience
- newUpper = MAX_F2DOT14
- var.axes[axisTag] = (newLower, newPeak, newUpper)
- return [var]
+ tent = var.axes[axisTag]
- # case 4: new limit doesn't fit; we need to chop the deltaset into two 'tents',
- # because the shape of a triangle with part of one side cut off cannot be
- # represented as a triangle itself. It can be represented as sum of two triangles.
- # NOTE: This increases the file size!
- else:
- # duplicate the tent, then adjust lower/peak/upper so that the outermost limit
- # of the original tent is +/-2.0, whereas the new tent's starts as the old
- # one peaks and maxes out at +/-1.0.
- newVar = TupleVariation(var.axes, var.coordinates)
- if negative:
- var.axes[axisTag] = (-2.0, -1 * newPeak, -1 * newLower)
- newVar.axes[axisTag] = (-1.0, -1.0, -1 * newPeak)
+ solutions = solver.rebaseTent(tent, axisLimit)
+
+ out = []
+ for scalar, tent in solutions:
+ newVar = (
+ TupleVariation(var.axes, var.coordinates) if len(solutions) > 1 else var
+ )
+ if tent is None:
+ newVar.axes.pop(axisTag)
else:
- var.axes[axisTag] = (newLower, newPeak, MAX_F2DOT14)
- newVar.axes[axisTag] = (newPeak, 1.0, 1.0)
- # the new tent's deltas are scaled by the difference between the scalar value
- # for the old tent at the desired limit...
- scalar1 = supportScalar({axisTag: limit}, {axisTag: (lower, peak, upper)})
- # ... and the scalar value for the clamped tent (with outer limit +/-2.0),
- # which can be simplified like this:
- scalar2 = 1 / (2 - newPeak)
- newVar.scaleDeltas(scalar1 - scalar2)
+ assert tent[1] != 0, tent
+ newVar.axes[axisTag] = tent
+ newVar *= scalar
+ out.append(newVar)
- return [var, newVar]
+ return out
def _instantiateGvarGlyph(
@@ -345,6 +577,23 @@ def _instantiateGvarGlyph(
if defaultDeltas:
coordinates += _g_l_y_f.GlyphCoordinates(defaultDeltas)
+ glyph = glyf[glyphname]
+ if glyph.isVarComposite():
+ for component in glyph.components:
+ newLocation = {}
+ for tag, loc in component.location.items():
+ if tag not in axisLimits:
+ newLocation[tag] = loc
+ continue
+ if component.flags & _g_l_y_f.VarComponentFlags.AXES_HAVE_VARIATION:
+ raise NotImplementedError(
+ "Instancing accross VarComposite axes with variation is not supported."
+ )
+ limits = axisLimits[tag]
+ loc = limits.renormalizeValue(loc, extrapolate=False)
+ newLocation[tag] = loc
+ component.location = newLocation
+
# _setCoordinates also sets the hmtx/vmtx advance widths and sidebearings from
# the four phantom points and glyph bounding boxes.
# We call it unconditionally even if a glyph has no variations or no deltas are
@@ -394,7 +643,7 @@ def instantiateGvar(varfont, axisLimits, optimize=True):
glyf.glyphOrder,
key=lambda name: (
glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
- if glyf[name].isComposite()
+ if glyf[name].isComposite() or glyf[name].isVarComposite()
else 0,
name,
),
@@ -470,13 +719,12 @@ def _remapVarIdxMap(table, attrName, varIndexMapping, glyphOrder):
# TODO(anthrotype) Add support for HVAR/VVAR in CFF2
def _instantiateVHVAR(varfont, axisLimits, tableFields):
+ location = axisLimits.pinnedLocation()
tableTag = tableFields.tableTag
fvarAxes = varfont["fvar"].axes
# Deltas from gvar table have already been applied to the hmtx/vmtx. For full
# instances (i.e. all axes pinned), we can simply drop HVAR/VVAR and return
- if set(
- axisTag for axisTag, value in axisLimits.items() if not isinstance(value, tuple)
- ).issuperset(axis.axisTag for axis in fvarAxes):
+ if set(location).issuperset(axis.axisTag for axis in fvarAxes):
log.info("Dropping %s table", tableTag)
del varfont[tableTag]
return
@@ -570,11 +818,7 @@ class _TupleVarStoreAdapter(object):
# rebuild regions whose axes were dropped or limited
self.rebuildRegions()
- pinnedAxes = {
- axisTag
- for axisTag, value in axisLimits.items()
- if not isinstance(value, tuple)
- }
+ pinnedAxes = set(axisLimits.pinnedLocation())
self.axisOrder = [
axisTag for axisTag in self.axisOrder if axisTag not in pinnedAxes
]
@@ -619,9 +863,9 @@ def instantiateItemVariationStore(itemVarStore, fvarAxes, axisLimits):
Args:
varStore: An otTables.VarStore object (Item Variation Store)
fvarAxes: list of fvar's Axis objects
- axisLimits: Dict[str, float] mapping axis tags to normalized axis coordinates
- (float) or ranges for restricting an axis' min/max (NormalizedAxisRange).
- May not specify coordinates/ranges for all the fvar axes.
+ axisLimits: NormalizedAxisLimits: mapping axis tags to normalized
+ min/default/max axis coordinates. May not specify coordinates/ranges for
+ all the fvar axes.
Returns:
defaultDeltas: to be added to the default instance, of type dict of floats
@@ -703,169 +947,6 @@ def instantiateOTL(varfont, axisLimits):
del varfont["GDEF"]
-def instantiateFeatureVariations(varfont, axisLimits):
- for tableTag in ("GPOS", "GSUB"):
- if tableTag not in varfont or not getattr(
- varfont[tableTag].table, "FeatureVariations", None
- ):
- continue
- log.info("Instantiating FeatureVariations of %s table", tableTag)
- _instantiateFeatureVariations(
- varfont[tableTag].table, varfont["fvar"].axes, axisLimits
- )
- # remove unreferenced lookups
- varfont[tableTag].prune_lookups()
-
-
-def _featureVariationRecordIsUnique(rec, seen):
- conditionSet = []
- for cond in rec.ConditionSet.ConditionTable:
- if cond.Format != 1:
- # can't tell whether this is duplicate, assume is unique
- return True
- conditionSet.append(
- (cond.AxisIndex, cond.FilterRangeMinValue, cond.FilterRangeMaxValue)
- )
- # besides the set of conditions, we also include the FeatureTableSubstitution
- # version to identify unique FeatureVariationRecords, even though only one
- # version is currently defined. It's theoretically possible that multiple
- # records with same conditions but different substitution table version be
- # present in the same font for backward compatibility.
- recordKey = frozenset([rec.FeatureTableSubstitution.Version] + conditionSet)
- if recordKey in seen:
- return False
- else:
- seen.add(recordKey) # side effect
- return True
-
-
-def _limitFeatureVariationConditionRange(condition, axisRange):
- minValue = condition.FilterRangeMinValue
- maxValue = condition.FilterRangeMaxValue
-
- if (
- minValue > maxValue
- or minValue > axisRange.maximum
- or maxValue < axisRange.minimum
- ):
- # condition invalid or out of range
- return
-
- values = [minValue, maxValue]
- for i, value in enumerate(values):
- values[i] = normalizeValue(value, (axisRange.minimum, 0, axisRange.maximum))
-
- return AxisRange(*values)
-
-
-def _instantiateFeatureVariationRecord(
- record, recIdx, location, fvarAxes, axisIndexMap
-):
- applies = True
- newConditions = []
- for i, condition in enumerate(record.ConditionSet.ConditionTable):
- if condition.Format == 1:
- axisIdx = condition.AxisIndex
- axisTag = fvarAxes[axisIdx].axisTag
- if axisTag in location:
- minValue = condition.FilterRangeMinValue
- maxValue = condition.FilterRangeMaxValue
- v = location[axisTag]
- if not (minValue <= v <= maxValue):
- # condition not met so remove entire record
- applies = False
- newConditions = None
- break
- else:
- # axis not pinned, keep condition with remapped axis index
- applies = False
- condition.AxisIndex = axisIndexMap[axisTag]
- newConditions.append(condition)
- else:
- log.warning(
- "Condition table {0} of FeatureVariationRecord {1} has "
- "unsupported format ({2}); ignored".format(i, recIdx, condition.Format)
- )
- applies = False
- newConditions.append(condition)
-
- if newConditions:
- record.ConditionSet.ConditionTable = newConditions
- shouldKeep = True
- else:
- shouldKeep = False
-
- return applies, shouldKeep
-
-
-def _limitFeatureVariationRecord(record, axisRanges, axisOrder):
- newConditions = []
- for i, condition in enumerate(record.ConditionSet.ConditionTable):
- if condition.Format == 1:
- axisIdx = condition.AxisIndex
- axisTag = axisOrder[axisIdx]
- if axisTag in axisRanges:
- axisRange = axisRanges[axisTag]
- newRange = _limitFeatureVariationConditionRange(condition, axisRange)
- if newRange:
- # keep condition with updated limits and remapped axis index
- condition.FilterRangeMinValue = newRange.minimum
- condition.FilterRangeMaxValue = newRange.maximum
- newConditions.append(condition)
- else:
- # condition out of range, remove entire record
- newConditions = None
- break
- else:
- newConditions.append(condition)
- else:
- newConditions.append(condition)
-
- if newConditions:
- record.ConditionSet.ConditionTable = newConditions
- shouldKeep = True
- else:
- shouldKeep = False
-
- return shouldKeep
-
-
-def _instantiateFeatureVariations(table, fvarAxes, axisLimits):
- location, axisRanges = splitAxisLocationAndRanges(
- axisLimits, rangeType=NormalizedAxisRange
- )
- pinnedAxes = set(location.keys())
- axisOrder = [axis.axisTag for axis in fvarAxes if axis.axisTag not in pinnedAxes]
- axisIndexMap = {axisTag: axisOrder.index(axisTag) for axisTag in axisOrder}
-
- featureVariationApplied = False
- uniqueRecords = set()
- newRecords = []
-
- for i, record in enumerate(table.FeatureVariations.FeatureVariationRecord):
- applies, shouldKeep = _instantiateFeatureVariationRecord(
- record, i, location, fvarAxes, axisIndexMap
- )
- if shouldKeep:
- shouldKeep = _limitFeatureVariationRecord(record, axisRanges, axisOrder)
-
- if shouldKeep and _featureVariationRecordIsUnique(record, uniqueRecords):
- newRecords.append(record)
-
- if applies and not featureVariationApplied:
- assert record.FeatureTableSubstitution.Version == 0x00010000
- for rec in record.FeatureTableSubstitution.SubstitutionRecord:
- table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = rec.Feature
- # Set variations only once
- featureVariationApplied = True
-
- if newRecords:
- table.FeatureVariations.FeatureVariationRecord = newRecords
- table.FeatureVariations.FeatureVariationCount = len(newRecords)
- else:
- del table.FeatureVariations
-
-
def _isValidAvarSegmentMap(axisTag, segmentMap):
if not segmentMap:
return True
@@ -891,12 +972,10 @@ def _isValidAvarSegmentMap(axisTag, segmentMap):
def instantiateAvar(varfont, axisLimits):
# 'axisLimits' dict must contain user-space (non-normalized) coordinates.
- location, axisRanges = splitAxisLocationAndRanges(axisLimits)
-
segments = varfont["avar"].segments
# drop table if we instantiate all the axes
- pinnedAxes = set(location.keys())
+ pinnedAxes = set(axisLimits.pinnedLocation())
if pinnedAxes.issuperset(segments):
log.info("Dropping avar table")
del varfont["avar"]
@@ -907,7 +986,7 @@ def instantiateAvar(varfont, axisLimits):
if axis in segments:
del segments[axis]
- # First compute the default normalization for axisRanges coordinates: i.e.
+ # First compute the default normalization for axisLimits coordinates: i.e.
# min = -1.0, default = 0, max = +1.0, and in between values interpolated linearly,
# without using the avar table's mappings.
# Then, for each SegmentMap, if we are restricting its axis, compute the new
@@ -915,7 +994,7 @@ def instantiateAvar(varfont, axisLimits):
# dropping any mappings that fall outside the restricted range.
# The keys ('fromCoord') are specified in default normalized coordinate space,
# whereas the values ('toCoord') are "mapped forward" using the SegmentMap.
- normalizedRanges = normalizeAxisLimits(varfont, axisRanges, usingAvar=False)
+ normalizedRanges = axisLimits.normalize(varfont, usingAvar=False)
newSegments = {}
for axisTag, mapping in segments.items():
if not _isValidAvarSegmentMap(axisTag, mapping):
@@ -925,25 +1004,32 @@ def instantiateAvar(varfont, axisLimits):
mappedMin = floatToFixedToFloat(
piecewiseLinearMap(axisRange.minimum, mapping), 14
)
+ mappedDef = floatToFixedToFloat(
+ piecewiseLinearMap(axisRange.default, mapping), 14
+ )
mappedMax = floatToFixedToFloat(
piecewiseLinearMap(axisRange.maximum, mapping), 14
)
+ mappedAxisLimit = NormalizedAxisTripleAndDistances(
+ mappedMin,
+ mappedDef,
+ mappedMax,
+ axisRange.distanceNegative,
+ axisRange.distancePositive,
+ )
newMapping = {}
for fromCoord, toCoord in mapping.items():
-
if fromCoord < axisRange.minimum or fromCoord > axisRange.maximum:
continue
- fromCoord = normalizeValue(
- fromCoord, (axisRange.minimum, 0, axisRange.maximum)
- )
+ fromCoord = axisRange.renormalizeValue(fromCoord)
assert mappedMin <= toCoord <= mappedMax
- toCoord = normalizeValue(toCoord, (mappedMin, 0, mappedMax))
+ toCoord = mappedAxisLimit.renormalizeValue(toCoord)
fromCoord = floatToFixedToFloat(fromCoord, 14)
toCoord = floatToFixedToFloat(toCoord, 14)
newMapping[fromCoord] = toCoord
- newMapping.update({-1.0: -1.0, 1.0: 1.0})
+ newMapping.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0})
newSegments[axisTag] = newMapping
else:
newSegments[axisTag] = mapping
@@ -962,7 +1048,7 @@ def isInstanceWithinAxisRanges(location, axisRanges):
def instantiateFvar(varfont, axisLimits):
# 'axisLimits' dict must contain user-space (non-normalized) coordinates
- location, axisRanges = splitAxisLocationAndRanges(axisLimits, rangeType=AxisRange)
+ location = axisLimits.pinnedLocation()
fvar = varfont["fvar"]
@@ -979,8 +1065,11 @@ def instantiateFvar(varfont, axisLimits):
axisTag = axis.axisTag
if axisTag in location:
continue
- if axisTag in axisRanges:
- axis.minValue, axis.maxValue = axisRanges[axisTag]
+ if axisTag in axisLimits:
+ triple = axisLimits[axisTag]
+ if triple.default is None:
+ triple = (triple.minimum, axis.defaultValue, triple.maximum)
+ axis.minValue, axis.defaultValue, axis.maxValue = triple
axes.append(axis)
fvar.axes = axes
@@ -991,7 +1080,7 @@ def instantiateFvar(varfont, axisLimits):
continue
for axisTag in location:
del instance.coordinates[axisTag]
- if not isInstanceWithinAxisRanges(instance.coordinates, axisRanges):
+ if not isInstanceWithinAxisRanges(instance.coordinates, axisLimits):
continue
instances.append(instance)
fvar.instances = instances
@@ -1016,14 +1105,10 @@ def instantiateSTAT(varfont, axisLimits):
def axisValuesFromAxisLimits(stat, axisLimits):
- location, axisRanges = splitAxisLocationAndRanges(axisLimits, rangeType=AxisRange)
-
def isAxisValueOutsideLimits(axisTag, axisValue):
- if axisTag in location and axisValue != location[axisTag]:
- return True
- elif axisTag in axisRanges:
- axisRange = axisRanges[axisTag]
- if axisValue < axisRange.minimum or axisValue > axisRange.maximum:
+ if axisTag in axisLimits:
+ triple = axisLimits[axisTag]
+ if axisValue < triple.minimum or axisValue > triple.maximum:
return True
return False
@@ -1080,45 +1165,6 @@ def normalize(value, triple, avarMapping):
return floatToFixedToFloat(value, 14)
-def normalizeAxisLimits(varfont, axisLimits, usingAvar=True):
- fvar = varfont["fvar"]
- badLimits = set(axisLimits.keys()).difference(a.axisTag for a in fvar.axes)
- if badLimits:
- raise ValueError("Cannot limit: {} not present in fvar".format(badLimits))
-
- axes = {
- a.axisTag: (a.minValue, a.defaultValue, a.maxValue)
- for a in fvar.axes
- if a.axisTag in axisLimits
- }
-
- avarSegments = {}
- if usingAvar and "avar" in varfont:
- avarSegments = varfont["avar"].segments
-
- for axis_tag, (_, default, _) in axes.items():
- value = axisLimits[axis_tag]
- if isinstance(value, tuple):
- minV, maxV = value
- if minV > default or maxV < default:
- raise NotImplementedError(
- f"Unsupported range {axis_tag}={minV:g}:{maxV:g}; "
- f"can't change default position ({axis_tag}={default:g})"
- )
-
- normalizedLimits = {}
- for axis_tag, triple in axes.items():
- avarMapping = avarSegments.get(axis_tag, None)
- value = axisLimits[axis_tag]
- if isinstance(value, tuple):
- normalizedLimits[axis_tag] = NormalizedAxisRange(
- *(normalize(v, triple, avarMapping) for v in value)
- )
- else:
- normalizedLimits[axis_tag] = normalize(value, triple, avarMapping)
- return normalizedLimits
-
-
def sanityCheckVariableTables(varfont):
if "fvar" not in varfont:
raise ValueError("Missing required table fvar")
@@ -1130,17 +1176,6 @@ def sanityCheckVariableTables(varfont):
raise NotImplementedError("Instancing CFF2 variable fonts is not supported yet")
-def populateAxisDefaults(varfont, axisLimits):
- if any(value is None for value in axisLimits.values()):
- fvar = varfont["fvar"]
- defaultValues = {a.axisTag: a.defaultValue for a in fvar.axes}
- return {
- axisTag: defaultValues[axisTag] if value is None else value
- for axisTag, value in axisLimits.items()
- }
- return axisLimits
-
-
def instantiateVariableFont(
varfont,
axisLimits,
@@ -1193,15 +1228,20 @@ def instantiateVariableFont(
sanityCheckVariableTables(varfont)
- axisLimits = populateAxisDefaults(varfont, axisLimits)
+ axisLimits = AxisLimits(axisLimits).limitAxesAndPopulateDefaults(varfont)
+
+ log.info("Restricted limits: %s", axisLimits)
- normalizedLimits = normalizeAxisLimits(varfont, axisLimits)
+ normalizedLimits = axisLimits.normalize(varfont)
log.info("Normalized limits: %s", normalizedLimits)
if not inplace:
varfont = deepcopy(varfont)
+ if "DSIG" in varfont:
+ del varfont["DSIG"]
+
if updateFontNames:
log.info("Updating name table")
names.updateNameTable(varfont, axisLimits)
@@ -1247,13 +1287,11 @@ def instantiateVariableFont(
ignoreErrors=(overlap == OverlapMode.REMOVE_AND_IGNORE_ERRORS),
)
+ if "OS/2" in varfont:
+ varfont["OS/2"].recalcAvgCharWidth(varfont)
+
varLib.set_default_weight_width_slant(
- varfont,
- location={
- axisTag: limit
- for axisTag, limit in axisLimits.items()
- if not isinstance(limit, tuple)
- },
+ varfont, location=axisLimits.defaultLocation()
)
if updateFontNames:
@@ -1301,41 +1339,32 @@ def setRibbiBits(font):
font["OS/2"].fsSelection = selection
-def splitAxisLocationAndRanges(axisLimits, rangeType=AxisRange):
- location, axisRanges = {}, {}
- for axisTag, value in axisLimits.items():
- if isinstance(value, rangeType):
- axisRanges[axisTag] = value
- elif isinstance(value, (int, float)):
- location[axisTag] = value
- elif isinstance(value, tuple):
- axisRanges[axisTag] = rangeType(*value)
- else:
- raise TypeError(
- f"Expected number or {rangeType.__name__}, "
- f"got {type(value).__name__}: {value!r}"
- )
- return location, axisRanges
-
-
-def parseLimits(limits):
+def parseLimits(limits: Iterable[str]) -> Dict[str, Optional[AxisTriple]]:
result = {}
for limitString in limits:
- match = re.match(r"^(\w{1,4})=(?:(drop)|(?:([^:]+)(?:[:](.+))?))$", limitString)
+ match = re.match(
+ r"^(\w{1,4})=(?:(drop)|(?:([^:]*)(?:[:]([^:]*))?(?:[:]([^:]*))?))$",
+ limitString,
+ )
if not match:
raise ValueError("invalid location format: %r" % limitString)
tag = match.group(1).ljust(4)
+
if match.group(2): # 'drop'
- lbound = None
- else:
- lbound = strToFixedToFloat(match.group(3), precisionBits=16)
- ubound = lbound
- if match.group(4):
- ubound = strToFixedToFloat(match.group(4), precisionBits=16)
- if lbound != ubound:
- result[tag] = AxisRange(lbound, ubound)
- else:
- result[tag] = lbound
+ result[tag] = None
+ continue
+
+ triple = match.group(3, 4, 5)
+
+ if triple[1] is None: # "value" syntax
+ triple = (triple[0], triple[0], triple[0])
+ elif triple[2] is None: # "min:max" syntax
+ triple = (triple[0], None, triple[1])
+
+ triple = tuple(float(v) if v else None for v in triple)
+
+ result[tag] = AxisTriple(*triple)
+
return result
@@ -1363,9 +1392,11 @@ def parseArgs(args):
metavar="AXIS=LOC",
nargs="*",
help="List of space separated locations. A location consists of "
- "the tag of a variation axis, followed by '=' and one of number, "
- "number:number or the literal string 'drop'. "
- "E.g.: wdth=100 or wght=75.0:125.0 or wght=drop",
+ "the tag of a variation axis, followed by '=' and the literal, "
+ "string 'drop', or comma-separate list of one to three values, "
+ "each of which is the empty string, or a number. "
+ "E.g.: wdth=100 or wght=75.0:125.0 or wght=100:400:700 or wght=:500: "
+ "or wght=drop",
)
parser.add_argument(
"-o",
diff --git a/Lib/fontTools/varLib/instancer/featureVars.py b/Lib/fontTools/varLib/instancer/featureVars.py
new file mode 100644
index 00000000..d9370d9d
--- /dev/null
+++ b/Lib/fontTools/varLib/instancer/featureVars.py
@@ -0,0 +1,190 @@
+from fontTools.ttLib.tables import otTables as ot
+from copy import deepcopy
+import logging
+
+
+log = logging.getLogger("fontTools.varLib.instancer")
+
+
+def _featureVariationRecordIsUnique(rec, seen):
+ conditionSet = []
+ conditionSets = (
+ rec.ConditionSet.ConditionTable if rec.ConditionSet is not None else []
+ )
+ for cond in conditionSets:
+ if cond.Format != 1:
+ # can't tell whether this is duplicate, assume is unique
+ return True
+ conditionSet.append(
+ (cond.AxisIndex, cond.FilterRangeMinValue, cond.FilterRangeMaxValue)
+ )
+ # besides the set of conditions, we also include the FeatureTableSubstitution
+ # version to identify unique FeatureVariationRecords, even though only one
+ # version is currently defined. It's theoretically possible that multiple
+ # records with same conditions but different substitution table version be
+ # present in the same font for backward compatibility.
+ recordKey = frozenset([rec.FeatureTableSubstitution.Version] + conditionSet)
+ if recordKey in seen:
+ return False
+ else:
+ seen.add(recordKey) # side effect
+ return True
+
+
+def _limitFeatureVariationConditionRange(condition, axisLimit):
+ minValue = condition.FilterRangeMinValue
+ maxValue = condition.FilterRangeMaxValue
+
+ if (
+ minValue > maxValue
+ or minValue > axisLimit.maximum
+ or maxValue < axisLimit.minimum
+ ):
+ # condition invalid or out of range
+ return
+
+ return tuple(
+ axisLimit.renormalizeValue(v, extrapolate=False) for v in (minValue, maxValue)
+ )
+
+
+def _instantiateFeatureVariationRecord(
+ record, recIdx, axisLimits, fvarAxes, axisIndexMap
+):
+ applies = True
+ shouldKeep = False
+ newConditions = []
+ from fontTools.varLib.instancer import NormalizedAxisTripleAndDistances
+
+ default_triple = NormalizedAxisTripleAndDistances(-1, 0, +1)
+ if record.ConditionSet is None:
+ record.ConditionSet = ot.ConditionSet()
+ record.ConditionSet.ConditionTable = []
+ record.ConditionSet.ConditionCount = 0
+ for i, condition in enumerate(record.ConditionSet.ConditionTable):
+ if condition.Format == 1:
+ axisIdx = condition.AxisIndex
+ axisTag = fvarAxes[axisIdx].axisTag
+
+ minValue = condition.FilterRangeMinValue
+ maxValue = condition.FilterRangeMaxValue
+ triple = axisLimits.get(axisTag, default_triple)
+
+ if not (minValue <= triple.default <= maxValue):
+ applies = False
+
+ # if condition not met, remove entire record
+ if triple.minimum > maxValue or triple.maximum < minValue:
+ newConditions = None
+ break
+
+ if axisTag in axisIndexMap:
+ # remap axis index
+ condition.AxisIndex = axisIndexMap[axisTag]
+
+ # remap condition limits
+ newRange = _limitFeatureVariationConditionRange(condition, triple)
+ if newRange:
+ # keep condition with updated limits
+ minimum, maximum = newRange
+ condition.FilterRangeMinValue = minimum
+ condition.FilterRangeMaxValue = maximum
+ shouldKeep = True
+ if minimum != -1 or maximum != +1:
+ newConditions.append(condition)
+ else:
+ # condition out of range, remove entire record
+ newConditions = None
+ break
+
+ else:
+ log.warning(
+ "Condition table {0} of FeatureVariationRecord {1} has "
+ "unsupported format ({2}); ignored".format(i, recIdx, condition.Format)
+ )
+ applies = False
+ newConditions.append(condition)
+
+ if newConditions is not None and shouldKeep:
+ record.ConditionSet.ConditionTable = newConditions
+ if not newConditions:
+ record.ConditionSet = None
+ shouldKeep = True
+ else:
+ shouldKeep = False
+
+ # Does this *always* apply?
+ universal = shouldKeep and not newConditions
+
+ return applies, shouldKeep, universal
+
+
+def _instantiateFeatureVariations(table, fvarAxes, axisLimits):
+ pinnedAxes = set(axisLimits.pinnedLocation())
+ axisOrder = [axis.axisTag for axis in fvarAxes if axis.axisTag not in pinnedAxes]
+ axisIndexMap = {axisTag: axisOrder.index(axisTag) for axisTag in axisOrder}
+
+ featureVariationApplied = False
+ uniqueRecords = set()
+ newRecords = []
+ defaultsSubsts = None
+
+ for i, record in enumerate(table.FeatureVariations.FeatureVariationRecord):
+ applies, shouldKeep, universal = _instantiateFeatureVariationRecord(
+ record, i, axisLimits, fvarAxes, axisIndexMap
+ )
+
+ if shouldKeep and _featureVariationRecordIsUnique(record, uniqueRecords):
+ newRecords.append(record)
+
+ if applies and not featureVariationApplied:
+ assert record.FeatureTableSubstitution.Version == 0x00010000
+ defaultsSubsts = deepcopy(record.FeatureTableSubstitution)
+ for default, rec in zip(
+ defaultsSubsts.SubstitutionRecord,
+ record.FeatureTableSubstitution.SubstitutionRecord,
+ ):
+ default.Feature = deepcopy(
+ table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature
+ )
+ table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = deepcopy(
+ rec.Feature
+ )
+ # Set variations only once
+ featureVariationApplied = True
+
+ # Further records don't have a chance to apply after a universal record
+ if universal:
+ break
+
+ # Insert a catch-all record to reinstate the old features if necessary
+ if featureVariationApplied and newRecords and not universal:
+ defaultRecord = ot.FeatureVariationRecord()
+ defaultRecord.ConditionSet = ot.ConditionSet()
+ defaultRecord.ConditionSet.ConditionTable = []
+ defaultRecord.ConditionSet.ConditionCount = 0
+ defaultRecord.FeatureTableSubstitution = defaultsSubsts
+
+ newRecords.append(defaultRecord)
+
+ if newRecords:
+ table.FeatureVariations.FeatureVariationRecord = newRecords
+ table.FeatureVariations.FeatureVariationCount = len(newRecords)
+ else:
+ del table.FeatureVariations
+ # downgrade table version if there are no FeatureVariations left
+ table.Version = 0x00010000
+
+
+def instantiateFeatureVariations(varfont, axisLimits):
+ for tableTag in ("GPOS", "GSUB"):
+ if tableTag not in varfont or not getattr(
+ varfont[tableTag].table, "FeatureVariations", None
+ ):
+ continue
+ log.info("Instantiating FeatureVariations of %s table", tableTag)
+ _instantiateFeatureVariations(
+ varfont[tableTag].table, varfont["fvar"].axes, axisLimits
+ )
+ # remove unreferenced lookups
+ varfont[tableTag].prune_lookups()
diff --git a/Lib/fontTools/varLib/instancer/names.py b/Lib/fontTools/varLib/instancer/names.py
index cfe12a94..f9454688 100644
--- a/Lib/fontTools/varLib/instancer/names.py
+++ b/Lib/fontTools/varLib/instancer/names.py
@@ -37,6 +37,9 @@ def getVariationNameIDs(varfont):
used.append(axis.AxisNameID)
for value in stat.AxisValueArray.AxisValue if stat.AxisValueArray else ():
used.append(value.ValueNameID)
+ elidedFallbackNameID = getattr(stat, "ElidedFallbackNameID", None)
+ if elidedFallbackNameID is not None:
+ used.append(elidedFallbackNameID)
# nameIDs <= 255 are reserved by OT spec so we don't touch them
return {nameID for nameID in used if nameID > 255}
@@ -83,7 +86,7 @@ def updateNameTable(varfont, axisLimits):
Example: Updating a partial variable font:
| >>> ttFont = TTFont("OpenSans[wdth,wght].ttf")
- | >>> updateNameTable(ttFont, {"wght": AxisRange(400, 900), "wdth": 75})
+ | >>> updateNameTable(ttFont, {"wght": (400, 900), "wdth": 75})
The name table records will be updated in the following manner:
NameID 1 familyName: "Open Sans" --> "Open Sans Condensed"
@@ -99,7 +102,7 @@ def updateNameTable(varfont, axisLimits):
https://docs.microsoft.com/en-us/typography/opentype/spec/stat
https://docs.microsoft.com/en-us/typography/opentype/spec/name#name-ids
"""
- from . import AxisRange, axisValuesFromAxisLimits
+ from . import AxisLimits, axisValuesFromAxisLimits
if "STAT" not in varfont:
raise ValueError("Cannot update name table since there is no STAT table.")
@@ -110,17 +113,15 @@ def updateNameTable(varfont, axisLimits):
# The updated name table will reflect the new 'zero origin' of the font.
# If we're instantiating a partial font, we will populate the unpinned
- # axes with their default axis values.
+ # axes with their default axis values from fvar.
+ axisLimits = AxisLimits(axisLimits).limitAxesAndPopulateDefaults(varfont)
+ partialDefaults = axisLimits.defaultLocation()
fvarDefaults = {a.axisTag: a.defaultValue for a in fvar.axes}
- defaultAxisCoords = deepcopy(axisLimits)
- for axisTag, val in fvarDefaults.items():
- if axisTag not in defaultAxisCoords or isinstance(
- defaultAxisCoords[axisTag], AxisRange
- ):
- defaultAxisCoords[axisTag] = val
+ defaultAxisCoords = AxisLimits({**fvarDefaults, **partialDefaults})
+ assert all(v.minimum == v.maximum for v in defaultAxisCoords.values())
axisValueTables = axisValuesFromAxisLimits(stat, defaultAxisCoords)
- checkAxisValuesExist(stat, axisValueTables, defaultAxisCoords)
+ checkAxisValuesExist(stat, axisValueTables, defaultAxisCoords.pinnedLocation())
# ignore "elidable" axis values, should be omitted in application font menus.
axisValueTables = [
@@ -133,6 +134,14 @@ def updateNameTable(varfont, axisLimits):
def checkAxisValuesExist(stat, axisValues, axisCoords):
seen = set()
designAxes = stat.DesignAxisRecord.Axis
+ hasValues = set()
+ for value in stat.AxisValueArray.AxisValue:
+ if value.Format in (1, 2, 3):
+ hasValues.add(designAxes[value.AxisIndex].AxisTag)
+ elif value.Format == 4:
+ for rec in value.AxisValueRecord:
+ hasValues.add(designAxes[rec.AxisIndex].AxisTag)
+
for axisValueTable in axisValues:
axisValueFormat = axisValueTable.Format
if axisValueTable.Format in (1, 2, 3):
@@ -149,10 +158,10 @@ def checkAxisValuesExist(stat, axisValues, axisCoords):
if axisTag in axisCoords and rec.Value == axisCoords[axisTag]:
seen.add(axisTag)
- missingAxes = set(axisCoords) - seen
+ missingAxes = (set(axisCoords) - seen) & hasValues
if missingAxes:
- missing = ", ".join(f"'{i}={axisCoords[i]}'" for i in missingAxes)
- raise ValueError(f"Cannot find Axis Values [{missing}]")
+ missing = ", ".join(f"'{i}': {axisCoords[i]}" for i in missingAxes)
+ raise ValueError(f"Cannot find Axis Values {{{missing}}}")
def _sortAxisValues(axisValues):
diff --git a/Lib/fontTools/varLib/instancer/solver.py b/Lib/fontTools/varLib/instancer/solver.py
new file mode 100644
index 00000000..9c568fe9
--- /dev/null
+++ b/Lib/fontTools/varLib/instancer/solver.py
@@ -0,0 +1,307 @@
+from fontTools.varLib.models import supportScalar
+from fontTools.misc.fixedTools import MAX_F2DOT14
+from functools import lru_cache
+
+__all__ = ["rebaseTent"]
+
+EPSILON = 1 / (1 << 14)
+
+
+def _reverse_negate(v):
+ return (-v[2], -v[1], -v[0])
+
+
+def _solve(tent, axisLimit, negative=False):
+ axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit
+ lower, peak, upper = tent
+
+ # Mirror the problem such that axisDef <= peak
+ if axisDef > peak:
+ return [
+ (scalar, _reverse_negate(t) if t is not None else None)
+ for scalar, t in _solve(
+ _reverse_negate(tent),
+ axisLimit.reverse_negate(),
+ not negative,
+ )
+ ]
+ # axisDef <= peak
+
+ # case 1: The whole deltaset falls outside the new limit; we can drop it
+ #
+ # peak
+ # 1.........................................o..........
+ # / \
+ # / \
+ # / \
+ # / \
+ # 0---|-----------|----------|-------- o o----1
+ # axisMin axisDef axisMax lower upper
+ #
+ if axisMax <= lower and axisMax < peak:
+ return [] # No overlap
+
+ # case 2: Only the peak and outermost bound fall outside the new limit;
+ # we keep the deltaset, update peak and outermost bound and and scale deltas
+ # by the scalar value for the restricted axis at the new limit, and solve
+ # recursively.
+ #
+ # |peak
+ # 1...............................|.o..........
+ # |/ \
+ # / \
+ # /| \
+ # / | \
+ # 0--------------------------- o | o----1
+ # lower | upper
+ # |
+ # axisMax
+ #
+ # Convert to:
+ #
+ # 1............................................
+ # |
+ # o peak
+ # /|
+ # /x|
+ # 0--------------------------- o o upper ----1
+ # lower |
+ # |
+ # axisMax
+ if axisMax < peak:
+ mult = supportScalar({"tag": axisMax}, {"tag": tent})
+ tent = (lower, axisMax, axisMax)
+ return [(scalar * mult, t) for scalar, t in _solve(tent, axisLimit)]
+
+ # lower <= axisDef <= peak <= axisMax
+
+ gain = supportScalar({"tag": axisDef}, {"tag": tent})
+ out = [(gain, None)]
+
+ # First, the positive side
+
+ # outGain is the scalar of axisMax at the tent.
+ outGain = supportScalar({"tag": axisMax}, {"tag": tent})
+
+ # Case 3a: Gain is more than outGain. The tent down-slope crosses
+ # the axis into negative. We have to split it into multiples.
+ #
+ # | peak |
+ # 1...................|.o.....|..............
+ # |/x\_ |
+ # gain................+....+_.|..............
+ # /| |y\|
+ # ................../.|....|..+_......outGain
+ # / | | | \
+ # 0---|-----------o | | | o----------1
+ # axisMin lower | | | upper
+ # | | |
+ # axisDef | axisMax
+ # |
+ # crossing
+ if gain >= outGain:
+ # Note that this is the branch taken if both gain and outGain are 0.
+
+ # Crossing point on the axis.
+ crossing = peak + (1 - gain) * (upper - peak)
+
+ loc = (max(lower, axisDef), peak, crossing)
+ scalar = 1
+
+ # The part before the crossing point.
+ out.append((scalar - gain, loc))
+
+ # The part after the crossing point may use one or two tents,
+ # depending on whether upper is before axisMax or not, in one
+ # case we need to keep it down to eternity.
+
+ # Case 3a1, similar to case 1neg; just one tent needed, as in
+ # the drawing above.
+ if upper >= axisMax:
+ loc = (crossing, axisMax, axisMax)
+ scalar = outGain
+
+ out.append((scalar - gain, loc))
+
+ # Case 3a2: Similar to case 2neg; two tents needed, to keep
+ # down to eternity.
+ #
+ # | peak |
+ # 1...................|.o................|...
+ # |/ \_ |
+ # gain................+....+_............|...
+ # /| | \xxxxxxxxxxy|
+ # / | | \_xxxxxyyyy|
+ # / | | \xxyyyyyy|
+ # 0---|-----------o | | o-------|--1
+ # axisMin lower | | upper |
+ # | | |
+ # axisDef | axisMax
+ # |
+ # crossing
+ else:
+ # A tent's peak cannot fall on axis default. Nudge it.
+ if upper == axisDef:
+ upper += EPSILON
+
+ # Downslope.
+ loc1 = (crossing, upper, axisMax)
+ scalar1 = 0
+
+ # Eternity justify.
+ loc2 = (upper, axisMax, axisMax)
+ scalar2 = 0
+
+ out.append((scalar1 - gain, loc1))
+ out.append((scalar2 - gain, loc2))
+
+ else:
+ # Special-case if peak is at axisMax.
+ if axisMax == peak:
+ upper = peak
+
+ # Case 3:
+ # We keep delta as is and only scale the axis upper to achieve
+ # the desired new tent if feasible.
+ #
+ # peak
+ # 1.....................o....................
+ # / \_|
+ # ..................../....+_.........outGain
+ # / | \
+ # gain..............+......|..+_.............
+ # /| | | \
+ # 0---|-----------o | | | o----------1
+ # axisMin lower| | | upper
+ # | | newUpper
+ # axisDef axisMax
+ #
+ newUpper = peak + (1 - gain) * (upper - peak)
+ assert axisMax <= newUpper # Because outGain > gain
+ if newUpper <= axisDef + (axisMax - axisDef) * 2:
+ upper = newUpper
+ if not negative and axisDef + (axisMax - axisDef) * MAX_F2DOT14 < upper:
+ # we clamp +2.0 to the max F2Dot14 (~1.99994) for convenience
+ upper = axisDef + (axisMax - axisDef) * MAX_F2DOT14
+ assert peak < upper
+
+ loc = (max(axisDef, lower), peak, upper)
+ scalar = 1
+
+ out.append((scalar - gain, loc))
+
+ # Case 4: New limit doesn't fit; we need to chop into two tents,
+ # because the shape of a triangle with part of one side cut off
+ # cannot be represented as a triangle itself.
+ #
+ # | peak |
+ # 1.........|......o.|....................
+ # ..........|...../x\|.............outGain
+ # | |xxy|\_
+ # | /xxxy| \_
+ # | |xxxxy| \_
+ # | /xxxxy| \_
+ # 0---|-----|-oxxxxxx| o----------1
+ # axisMin | lower | upper
+ # | |
+ # axisDef axisMax
+ #
+ else:
+ loc1 = (max(axisDef, lower), peak, axisMax)
+ scalar1 = 1
+
+ loc2 = (peak, axisMax, axisMax)
+ scalar2 = outGain
+
+ out.append((scalar1 - gain, loc1))
+ # Don't add a dirac delta!
+ if peak < axisMax:
+ out.append((scalar2 - gain, loc2))
+
+ # Now, the negative side
+
+ # Case 1neg: Lower extends beyond axisMin: we chop. Simple.
+ #
+ # | |peak
+ # 1..................|...|.o.................
+ # | |/ \
+ # gain...............|...+...\...............
+ # |x_/| \
+ # |/ | \
+ # _/| | \
+ # 0---------------o | | o----------1
+ # lower | | upper
+ # | |
+ # axisMin axisDef
+ #
+ if lower <= axisMin:
+ loc = (axisMin, axisMin, axisDef)
+ scalar = supportScalar({"tag": axisMin}, {"tag": tent})
+
+ out.append((scalar - gain, loc))
+
+ # Case 2neg: Lower is betwen axisMin and axisDef: we add two
+ # tents to keep it down all the way to eternity.
+ #
+ # | |peak
+ # 1...|...............|.o.................
+ # | |/ \
+ # gain|...............+...\...............
+ # |yxxxxxxxxxxxxx/| \
+ # |yyyyyyxxxxxxx/ | \
+ # |yyyyyyyyyyyx/ | \
+ # 0---|-----------o | o----------1
+ # axisMin lower | upper
+ # |
+ # axisDef
+ #
+ else:
+ # A tent's peak cannot fall on axis default. Nudge it.
+ if lower == axisDef:
+ lower -= EPSILON
+
+ # Downslope.
+ loc1 = (axisMin, lower, axisDef)
+ scalar1 = 0
+
+ # Eternity justify.
+ loc2 = (axisMin, axisMin, lower)
+ scalar2 = 0
+
+ out.append((scalar1 - gain, loc1))
+ out.append((scalar2 - gain, loc2))
+
+ return out
+
+
+@lru_cache(128)
+def rebaseTent(tent, axisLimit):
+ """Given a tuple (lower,peak,upper) "tent" and new axis limits
+ (axisMin,axisDefault,axisMax), solves how to represent the tent
+ under the new axis configuration. All values are in normalized
+ -1,0,+1 coordinate system. Tent values can be outside this range.
+
+ Return value is a list of tuples. Each tuple is of the form
+ (scalar,tent), where scalar is a multipler to multiply any
+ delta-sets by, and tent is a new tent for that output delta-set.
+ If tent value is None, that is a special deltaset that should
+ be always-enabled (called "gain")."""
+
+ axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit
+ assert -1 <= axisMin <= axisDef <= axisMax <= +1
+
+ lower, peak, upper = tent
+ assert -2 <= lower <= peak <= upper <= +2
+
+ assert peak != 0
+
+ sols = _solve(tent, axisLimit)
+
+ n = lambda v: axisLimit.renormalizeValue(v)
+ sols = [
+ (scalar, (n(v[0]), n(v[1]), n(v[2])) if v is not None else None)
+ for scalar, v in sols
+ if scalar
+ ]
+
+ return sols
diff --git a/Lib/fontTools/varLib/interpolatable.py b/Lib/fontTools/varLib/interpolatable.py
index f86b6f9b..c3f01f46 100644
--- a/Lib/fontTools/varLib/interpolatable.py
+++ b/Lib/fontTools/varLib/interpolatable.py
@@ -7,21 +7,20 @@ $ fonttools varLib.interpolatable font1 font2 ...
"""
from fontTools.pens.basePen import AbstractPen, BasePen
-from fontTools.pens.pointPen import SegmentToPointPen
+from fontTools.pens.pointPen import AbstractPointPen, SegmentToPointPen
from fontTools.pens.recordingPen import RecordingPen
from fontTools.pens.statisticsPen import StatisticsPen
from fontTools.pens.momentsPen import OpenContourError
-from collections import OrderedDict
+from collections import defaultdict
+import math
import itertools
import sys
+
def _rot_list(l, k):
"""Rotate list by k items forward. Ie. item at position 0 will be
at position k in returned list. Negative k is allowed."""
- n = len(l)
- k %= n
- if not k: return l
- return l[n-k:] + l[:n-k]
+ return l[-k:] + l[:-k]
class PerContourPen(BasePen):
@@ -64,12 +63,11 @@ class PerContourOrComponentPen(PerContourPen):
self.value[-1].addComponent(glyphName, transformation)
-class RecordingPointPen(BasePen):
-
+class RecordingPointPen(AbstractPointPen):
def __init__(self):
self.value = []
- def beginPath(self, identifier = None, **kwargs):
+ def beginPath(self, identifier=None, **kwargs):
pass
def endPath(self) -> None:
@@ -79,47 +77,43 @@ class RecordingPointPen(BasePen):
self.value.append((pt, False if segmentType is None else True))
-def _vdiff(v0, v1):
- return tuple(b - a for a, b in zip(v0, v1))
-
+def _vdiff_hypot2(v0, v1):
+ s = 0
+ for x0, x1 in zip(v0, v1):
+ d = x1 - x0
+ s += d * d
+ return s
-def _vlen(vec):
- v = 0
- for x in vec:
- v += x * x
- return v
-def _complex_vlen(vec):
- v = 0
- for x in vec:
- v += abs(x) * abs(x)
- return v
+def _vdiff_hypot2_complex(v0, v1):
+ s = 0
+ for x0, x1 in zip(v0, v1):
+ d = x1 - x0
+ s += d.real * d.real + d.imag * d.imag
+ return s
def _matching_cost(G, matching):
return sum(G[i][j] for i, j in enumerate(matching))
-def min_cost_perfect_bipartite_matching(G):
+def min_cost_perfect_bipartite_matching_scipy(G):
n = len(G)
- try:
- from scipy.optimize import linear_sum_assignment
+ rows, cols = linear_sum_assignment(G)
+ assert (rows == list(range(n))).all()
+ return list(cols), _matching_cost(G, cols)
- rows, cols = linear_sum_assignment(G)
- assert (rows == list(range(n))).all()
- return list(cols), _matching_cost(G, cols)
- except ImportError:
- pass
- try:
- from munkres import Munkres
+def min_cost_perfect_bipartite_matching_munkres(G):
+ n = len(G)
+ cols = [None] * n
+ for row, col in Munkres().compute(G):
+ cols[row] = col
+ return cols, _matching_cost(G, cols)
- cols = [None] * n
- for row, col in Munkres().compute(G):
- cols[row] = col
- return cols, _matching_cost(G, cols)
- except ImportError:
- pass
+
+def min_cost_perfect_bipartite_matching_bruteforce(G):
+ n = len(G)
if n > 6:
raise Exception("Install Python module 'munkres' or 'scipy >= 0.17.0'")
@@ -135,33 +129,50 @@ def min_cost_perfect_bipartite_matching(G):
return best, best_cost
-def test(glyphsets, glyphs=None, names=None):
+try:
+ from scipy.optimize import linear_sum_assignment
+
+ min_cost_perfect_bipartite_matching = min_cost_perfect_bipartite_matching_scipy
+except ImportError:
+ try:
+ from munkres import Munkres
+
+ min_cost_perfect_bipartite_matching = (
+ min_cost_perfect_bipartite_matching_munkres
+ )
+ except ImportError:
+ min_cost_perfect_bipartite_matching = (
+ min_cost_perfect_bipartite_matching_bruteforce
+ )
+
+def test_gen(glyphsets, glyphs=None, names=None, ignore_missing=False):
if names is None:
names = glyphsets
if glyphs is None:
- glyphs = glyphsets[0].keys()
+ # `glyphs = glyphsets[0].keys()` is faster, certainly, but doesn't allow for sparse TTFs/OTFs given out of order
+ # ... risks the sparse master being the first one, and only processing a subset of the glyphs
+ glyphs = {g for glyphset in glyphsets for g in glyphset.keys()}
hist = []
- problems = OrderedDict()
-
- def add_problem(glyphname, problem):
- problems.setdefault(glyphname, []).append(problem)
for glyph_name in glyphs:
- # print()
- # print(glyph_name)
-
try:
+ m0idx = 0
allVectors = []
allNodeTypes = []
allContourIsomorphisms = []
- for glyphset, name in zip(glyphsets, names):
- # print('.', end='')
- if glyph_name not in glyphset:
- add_problem(glyph_name, {"type": "missing", "master": name})
+ allGlyphs = [glyphset[glyph_name] for glyphset in glyphsets]
+ if len([1 for glyph in allGlyphs if glyph is not None]) <= 1:
+ continue
+ for glyph, glyphset, name in zip(allGlyphs, glyphsets, names):
+ if glyph is None:
+ if not ignore_missing:
+ yield (glyph_name, {"type": "missing", "master": name})
+ allNodeTypes.append(None)
+ allVectors.append(None)
+ allContourIsomorphisms.append(None)
continue
- glyph = glyphset[glyph_name]
perContourPen = PerContourOrComponentPen(
RecordingPen, glyphset=glyphset
@@ -180,7 +191,6 @@ def test(glyphsets, glyphs=None, names=None):
allVectors.append(contourVectors)
allContourIsomorphisms.append(contourIsomorphisms)
for ix, contour in enumerate(contourPens):
-
nodeVecs = tuple(instruction[0] for instruction in contour.value)
nodeTypes.append(nodeVecs)
@@ -188,12 +198,12 @@ def test(glyphsets, glyphs=None, names=None):
try:
contour.replay(stats)
except OpenContourError as e:
- add_problem(
+ yield (
glyph_name,
{"master": name, "contour": ix, "type": "open_path"},
)
continue
- size = abs(stats.area) ** 0.5 * 0.5
+ size = math.sqrt(abs(stats.area)) * 0.5
vector = (
int(size),
int(stats.meanX),
@@ -206,10 +216,10 @@ def test(glyphsets, glyphs=None, names=None):
# print(vector)
# Check starting point
- if nodeVecs[0] == 'addComponent':
+ if nodeVecs[0] == "addComponent":
continue
- assert nodeVecs[0] == 'moveTo'
- assert nodeVecs[-1] in ('closePath', 'endPath')
+ assert nodeVecs[0] == "moveTo"
+ assert nodeVecs[-1] in ("closePath", "endPath")
points = RecordingPointPen()
converter = SegmentToPointPen(points, False)
contour.replay(converter)
@@ -217,35 +227,46 @@ def test(glyphsets, glyphs=None, names=None):
# now check all rotations and mirror-rotations of the contour and build list of isomorphic
# possible starting points.
bits = 0
- for pt,b in points.value:
+ for pt, b in points.value:
bits = (bits << 1) | b
n = len(points.value)
- mask = (1 << n ) - 1
+ mask = (1 << n) - 1
isomorphisms = []
contourIsomorphisms.append(isomorphisms)
+ complexPoints = [complex(*pt) for pt, bl in points.value]
for i in range(n):
b = ((bits << i) & mask) | ((bits >> (n - i)))
if b == bits:
- isomorphisms.append(_rot_list ([complex(*pt) for pt,bl in points.value], i))
+ isomorphisms.append(_rot_list(complexPoints, i))
# Add mirrored rotations
mirrored = list(reversed(points.value))
reversed_bits = 0
- for pt,b in mirrored:
+ for pt, b in mirrored:
reversed_bits = (reversed_bits << 1) | b
+ complexPoints = list(reversed(complexPoints))
for i in range(n):
b = ((reversed_bits << i) & mask) | ((reversed_bits >> (n - i)))
if b == bits:
- isomorphisms.append(_rot_list ([complex(*pt) for pt,bl in mirrored], i))
+ isomorphisms.append(_rot_list(complexPoints, i))
- # Check each master against the next one in the list.
- for i, (m0, m1) in enumerate(zip(allNodeTypes[:-1], allNodeTypes[1:])):
+ # m0idx should be the index of the first non-None item in allNodeTypes,
+ # else give it the last item.
+ m0idx = next(
+ (i for i, x in enumerate(allNodeTypes) if x is not None),
+ len(allNodeTypes) - 1,
+ )
+ # m0 is the first non-None item in allNodeTypes, or last one if all None
+ m0 = allNodeTypes[m0idx]
+ for i, m1 in enumerate(allNodeTypes[m0idx + 1 :]):
+ if m1 is None:
+ continue
if len(m0) != len(m1):
- add_problem(
+ yield (
glyph_name,
{
"type": "path_count",
- "master_1": names[i],
- "master_2": names[i + 1],
+ "master_1": names[m0idx],
+ "master_2": names[m0idx + i + 1],
"value_1": len(m0),
"value_2": len(m1),
},
@@ -256,13 +277,13 @@ def test(glyphsets, glyphs=None, names=None):
if nodes1 == nodes2:
continue
if len(nodes1) != len(nodes2):
- add_problem(
+ yield (
glyph_name,
{
"type": "node_count",
"path": pathIx,
- "master_1": names[i],
- "master_2": names[i + 1],
+ "master_1": names[m0idx],
+ "master_2": names[m0idx + i + 1],
"value_1": len(nodes1),
"value_2": len(nodes2),
},
@@ -270,72 +291,109 @@ def test(glyphsets, glyphs=None, names=None):
continue
for nodeIx, (n1, n2) in enumerate(zip(nodes1, nodes2)):
if n1 != n2:
- add_problem(
+ yield (
glyph_name,
{
"type": "node_incompatibility",
"path": pathIx,
"node": nodeIx,
- "master_1": names[i],
- "master_2": names[i + 1],
+ "master_1": names[m0idx],
+ "master_2": names[m0idx + i + 1],
"value_1": n1,
"value_2": n2,
},
)
continue
- for i, (m0, m1) in enumerate(zip(allVectors[:-1], allVectors[1:])):
- if len(m0) != len(m1):
- # We already reported this
- continue
- if not m0:
- continue
- costs = [[_vlen(_vdiff(v0, v1)) for v1 in m1] for v0 in m0]
- matching, matching_cost = min_cost_perfect_bipartite_matching(costs)
- identity_matching = list(range(len(m0)))
- identity_cost = sum(costs[i][i] for i in range(len(m0)))
- if matching != identity_matching and matching_cost < identity_cost * .95:
- add_problem(
- glyph_name,
- {
- "type": "contour_order",
- "master_1": names[i],
- "master_2": names[i + 1],
- "value_1": list(range(len(m0))),
- "value_2": matching,
- },
- )
- break
-
- for i, (m0, m1) in enumerate(zip(allContourIsomorphisms[:-1], allContourIsomorphisms[1:])):
- if len(m0) != len(m1):
- # We already reported this
- continue
- if not m0:
- continue
- for contour0,contour1 in zip(m0,m1):
- c0 = contour0[0]
- costs = [v for v in (_complex_vlen(_vdiff(c0, c1)) for c1 in contour1)]
- min_cost = min(costs)
- first_cost = costs[0]
- if min_cost < first_cost * .95:
- add_problem(
+ # m0idx should be the index of the first non-None item in allVectors,
+ # else give it the last item.
+ m0idx = next(
+ (i for i, x in enumerate(allVectors) if x is not None),
+ len(allVectors) - 1,
+ )
+ # m0 is the first non-None item in allVectors, or last one if all None
+ m0 = allVectors[m0idx]
+ if m0 is not None and len(m0) > 1:
+ for i, m1 in enumerate(allVectors[m0idx + 1 :]):
+ if m1 is None:
+ continue
+ if len(m0) != len(m1):
+ # We already reported this
+ continue
+ costs = [[_vdiff_hypot2(v0, v1) for v1 in m1] for v0 in m0]
+ matching, matching_cost = min_cost_perfect_bipartite_matching(costs)
+ identity_matching = list(range(len(m0)))
+ identity_cost = sum(costs[i][i] for i in range(len(m0)))
+ if (
+ matching != identity_matching
+ and matching_cost < identity_cost * 0.95
+ ):
+ yield (
glyph_name,
{
- "type": "wrong_start_point",
- "master_1": names[i],
- "master_2": names[i + 1],
+ "type": "contour_order",
+ "master_1": names[m0idx],
+ "master_2": names[m0idx + i + 1],
+ "value_1": list(range(len(m0))),
+ "value_2": matching,
},
)
+ break
+
+ # m0idx should be the index of the first non-None item in allContourIsomorphisms,
+ # else give it the last item.
+ m0idx = next(
+ (i for i, x in enumerate(allContourIsomorphisms) if x is not None),
+ len(allVectors) - 1,
+ )
+ # m0 is the first non-None item in allContourIsomorphisms, or last one if all None
+ m0 = allContourIsomorphisms[m0idx]
+ if m0:
+ for i, m1 in enumerate(allContourIsomorphisms[m0idx + 1 :]):
+ if m1 is None:
+ continue
+ if len(m0) != len(m1):
+ # We already reported this
+ continue
+ for ix, (contour0, contour1) in enumerate(zip(m0, m1)):
+ c0 = contour0[0]
+ costs = [_vdiff_hypot2_complex(c0, c1) for c1 in contour1]
+ min_cost = min(costs)
+ first_cost = costs[0]
+ if min_cost < first_cost * 0.95:
+ yield (
+ glyph_name,
+ {
+ "type": "wrong_start_point",
+ "contour": ix,
+ "master_1": names[m0idx],
+ "master_2": names[m0idx + i + 1],
+ },
+ )
except ValueError as e:
- add_problem(
+ yield (
glyph_name,
{"type": "math_error", "master": name, "error": e},
)
+
+
+def test(glyphsets, glyphs=None, names=None, ignore_missing=False):
+ problems = defaultdict(list)
+ for glyphname, problem in test_gen(glyphsets, glyphs, names, ignore_missing):
+ problems[glyphname].append(problem)
return problems
+def recursivelyAddGlyph(glyphname, glyphset, ttGlyphSet, glyf):
+ if glyphname in glyphset:
+ return
+ glyphset[glyphname] = ttGlyphSet[glyphname]
+
+ for component in getattr(glyf[glyphname], "components", []):
+ recursivelyAddGlyph(component.glyphName, glyphset, ttGlyphSet, glyf)
+
+
def main(args=None):
"""Test for interpolatability issues between fonts"""
import argparse
@@ -345,19 +403,36 @@ def main(args=None):
description=main.__doc__,
)
parser.add_argument(
+ "--glyphs",
+ action="store",
+ help="Space-separate name of glyphs to check",
+ )
+ parser.add_argument(
"--json",
action="store_true",
help="Output report in JSON format",
)
parser.add_argument(
- "inputs", metavar="FILE", type=str, nargs="+", help="Input TTF/UFO files"
+ "--quiet",
+ action="store_true",
+ help="Only exit with code 1 or 0, no output",
+ )
+ parser.add_argument(
+ "--ignore-missing",
+ action="store_true",
+ help="Will not report glyphs missing from sparse masters as errors",
+ )
+ parser.add_argument(
+ "inputs",
+ metavar="FILE",
+ type=str,
+ nargs="+",
+ help="Input a single variable font / DesignSpace / Glyphs file, or multiple TTF/UFO files",
)
args = parser.parse_args(args)
- glyphs = None
- # glyphs = ['uni08DB', 'uniFD76']
- # glyphs = ['uni08DE', 'uni0034']
- # glyphs = ['uni08DE', 'uni0034', 'uni0751', 'uni0753', 'uni0754', 'uni08A4', 'uni08A4.fina', 'uni08A5.fina']
+
+ glyphs = args.glyphs.split() if args.glyphs else None
from os.path import basename
@@ -365,74 +440,113 @@ def main(args=None):
names = []
if len(args.inputs) == 1:
- if args.inputs[0].endswith('.designspace'):
+ if args.inputs[0].endswith(".designspace"):
from fontTools.designspaceLib import DesignSpaceDocument
+
designspace = DesignSpaceDocument.fromfile(args.inputs[0])
args.inputs = [master.path for master in designspace.sources]
- elif args.inputs[0].endswith('.glyphs'):
+ elif args.inputs[0].endswith(".glyphs"):
from glyphsLib import GSFont, to_ufos
+
gsfont = GSFont(args.inputs[0])
fonts.extend(to_ufos(gsfont))
- names = ['%s-%s' % (f.info.familyName, f.info.styleName) for f in fonts]
+ names = ["%s-%s" % (f.info.familyName, f.info.styleName) for f in fonts]
args.inputs = []
- elif args.inputs[0].endswith('.ttf'):
+ elif args.inputs[0].endswith(".ttf"):
from fontTools.ttLib import TTFont
+
font = TTFont(args.inputs[0])
- if 'gvar' in font:
+ if "gvar" in font:
# Is variable font
- gvar = font['gvar']
- # Gather all "master" locations
- locs = set()
- for variations in gvar.variations.values():
- for var in variations:
+ gvar = font["gvar"]
+ glyf = font["glyf"]
+ # Gather all glyphs at their "master" locations
+ ttGlyphSets = {}
+ glyphsets = defaultdict(dict)
+
+ if glyphs is None:
+ glyphs = sorted(gvar.variations.keys())
+ for glyphname in glyphs:
+ for var in gvar.variations[glyphname]:
+ locDict = {}
loc = []
- for tag,val in sorted(var.axes.items()):
- loc.append((tag,val[1]))
- locs.add(tuple(loc))
- # Rebuild locs as dictionaries
- new_locs = [{}]
- for loc in sorted(locs, key=lambda v: (len(v), v)):
- names.append(str(loc))
- l = {}
- for tag,val in loc:
- l[tag] = val
- new_locs.append(l)
- locs = new_locs
- del new_locs
- # locs is all master locations now
-
- for loc in locs:
- fonts.append(font.getGlyphSet(location=loc, normalized=True))
+ for tag, val in sorted(var.axes.items()):
+ locDict[tag] = val[1]
+ loc.append((tag, val[1]))
+
+ locTuple = tuple(loc)
+ if locTuple not in ttGlyphSets:
+ ttGlyphSets[locTuple] = font.getGlyphSet(
+ location=locDict, normalized=True
+ )
- args.inputs = []
+ recursivelyAddGlyph(
+ glyphname, glyphsets[locTuple], ttGlyphSets[locTuple], glyf
+ )
+ names = ["()"]
+ fonts = [font.getGlyphSet()]
+ for locTuple in sorted(glyphsets.keys(), key=lambda v: (len(v), v)):
+ names.append(str(locTuple))
+ fonts.append(glyphsets[locTuple])
+ args.ignore_missing = True
+ args.inputs = []
for filename in args.inputs:
if filename.endswith(".ufo"):
from fontTools.ufoLib import UFOReader
+
fonts.append(UFOReader(filename))
else:
from fontTools.ttLib import TTFont
+
fonts.append(TTFont(filename))
names.append(basename(filename).rsplit(".", 1)[0])
- if hasattr(fonts[0], 'getGlyphSet'):
- glyphsets = [font.getGlyphSet() for font in fonts]
- else:
- glyphsets = fonts
+ glyphsets = []
+ for font in fonts:
+ if hasattr(font, "getGlyphSet"):
+ glyphset = font.getGlyphSet()
+ else:
+ glyphset = font
+ glyphsets.append({k: glyphset[k] for k in glyphset.keys()})
+
+ if not glyphs:
+ glyphs = sorted(set([gn for glyphset in glyphsets for gn in glyphset.keys()]))
+
+ glyphsSet = set(glyphs)
+ for glyphset in glyphsets:
+ glyphSetGlyphNames = set(glyphset.keys())
+ diff = glyphsSet - glyphSetGlyphNames
+ if diff:
+ for gn in diff:
+ glyphset[gn] = None
+
+ problems_gen = test_gen(
+ glyphsets, glyphs=glyphs, names=names, ignore_missing=args.ignore_missing
+ )
+ problems = defaultdict(list)
- problems = test(glyphsets, glyphs=glyphs, names=names)
- if args.json:
- import json
+ if not args.quiet:
+ if args.json:
+ import json
+
+ for glyphname, problem in problems_gen:
+ problems[glyphname].append(problem)
+
+ print(json.dumps(problems))
+ else:
+ last_glyphname = None
+ for glyphname, p in problems_gen:
+ problems[glyphname].append(p)
+
+ if glyphname != last_glyphname:
+ print(f"Glyph {glyphname} was not compatible: ")
+ last_glyphname = glyphname
- print(json.dumps(problems))
- else:
- for glyph, glyph_problems in problems.items():
- print(f"Glyph {glyph} was not compatible: ")
- for p in glyph_problems:
if p["type"] == "missing":
print(" Glyph was missing in master %s" % p["master"])
if p["type"] == "open_path":
@@ -477,12 +591,25 @@ def main(args=None):
)
if p["type"] == "wrong_start_point":
print(
- " Contour start point differs: %s, %s"
+ " Contour %d start point differs: %s, %s"
% (
+ p["contour"],
p["master_1"],
p["master_2"],
)
)
+ if p["type"] == "math_error":
+ print(
+ " Miscellaneous error in %s: %s"
+ % (
+ p["master"],
+ p["error"],
+ )
+ )
+ else:
+ for glyphname, problem in problems_gen:
+ problems[glyphname].append(problem)
+
if problems:
return problems
diff --git a/Lib/fontTools/varLib/interpolate_layout.py b/Lib/fontTools/varLib/interpolate_layout.py
index 6d0385dd..aa3f49c6 100644
--- a/Lib/fontTools/varLib/interpolate_layout.py
+++ b/Lib/fontTools/varLib/interpolate_layout.py
@@ -12,93 +12,112 @@ from pprint import pformat
log = logging.getLogger("fontTools.varLib.interpolate_layout")
-def interpolate_layout(designspace, loc, master_finder=lambda s:s, mapped=False):
- """
- Interpolate GPOS from a designspace file and location.
-
- If master_finder is set, it should be a callable that takes master
- filename as found in designspace file and map it to master font
- binary as to be opened (eg. .ttf or .otf).
-
- If mapped is False (default), then location is mapped using the
- map element of the axes in designspace file. If mapped is True,
- it is assumed that location is in designspace's internal space and
- no mapping is performed.
- """
- if hasattr(designspace, "sources"): # Assume a DesignspaceDocument
- pass
- else: # Assume a file path
- from fontTools.designspaceLib import DesignSpaceDocument
- designspace = DesignSpaceDocument.fromfile(designspace)
-
- ds = load_designspace(designspace)
- log.info("Building interpolated font")
-
- log.info("Loading master fonts")
- master_fonts = load_masters(designspace, master_finder)
- font = deepcopy(master_fonts[ds.base_idx])
-
- log.info("Location: %s", pformat(loc))
- if not mapped:
- loc = {name: ds.axes[name].map_forward(v) for name,v in loc.items()}
- log.info("Internal location: %s", pformat(loc))
- loc = models.normalizeLocation(loc, ds.internal_axis_supports)
- log.info("Normalized location: %s", pformat(loc))
-
- # Assume single-model for now.
- model = models.VariationModel(ds.normalized_master_locs)
- assert 0 == model.mapping[ds.base_idx]
-
- merger = InstancerMerger(font, model, loc)
-
- log.info("Building interpolated tables")
- # TODO GSUB/GDEF
- merger.mergeTables(font, master_fonts, ['GPOS'])
- return font
+def interpolate_layout(designspace, loc, master_finder=lambda s: s, mapped=False):
+ """
+ Interpolate GPOS from a designspace file and location.
+ If master_finder is set, it should be a callable that takes master
+ filename as found in designspace file and map it to master font
+ binary as to be opened (eg. .ttf or .otf).
-def main(args=None):
- """Interpolate GDEF/GPOS/GSUB tables for a point on a designspace"""
- from fontTools import configLogger
- import argparse
- import sys
+ If mapped is False (default), then location is mapped using the
+ map element of the axes in designspace file. If mapped is True,
+ it is assumed that location is in designspace's internal space and
+ no mapping is performed.
+ """
+ if hasattr(designspace, "sources"): # Assume a DesignspaceDocument
+ pass
+ else: # Assume a file path
+ from fontTools.designspaceLib import DesignSpaceDocument
+
+ designspace = DesignSpaceDocument.fromfile(designspace)
- parser = argparse.ArgumentParser(
- "fonttools varLib.interpolate_layout",
- description=main.__doc__,
- )
- parser.add_argument('designspace_filename', metavar='DESIGNSPACE',
- help="Input TTF files")
- parser.add_argument('locations', metavar='LOCATION', type=str, nargs='+',
- help="Axis locations (e.g. wdth=120")
- parser.add_argument('-o', '--output', metavar='OUTPUT',
- help="Output font file (defaults to <designspacename>-instance.ttf)")
- parser.add_argument('-l', '--loglevel', metavar='LEVEL', default="INFO",
- help="Logging level (defaults to INFO)")
+ ds = load_designspace(designspace)
+ log.info("Building interpolated font")
+ log.info("Loading master fonts")
+ master_fonts = load_masters(designspace, master_finder)
+ font = deepcopy(master_fonts[ds.base_idx])
- args = parser.parse_args(args)
+ log.info("Location: %s", pformat(loc))
+ if not mapped:
+ loc = {name: ds.axes[name].map_forward(v) for name, v in loc.items()}
+ log.info("Internal location: %s", pformat(loc))
+ loc = models.normalizeLocation(loc, ds.internal_axis_supports)
+ log.info("Normalized location: %s", pformat(loc))
- if not args.output:
- args.output = os.path.splitext(args.designspace_filename)[0] + '-instance.ttf'
+ # Assume single-model for now.
+ model = models.VariationModel(ds.normalized_master_locs)
+ assert 0 == model.mapping[ds.base_idx]
- configLogger(level=args.loglevel)
+ merger = InstancerMerger(font, model, loc)
- finder = lambda s: s.replace('master_ufo', 'master_ttf_interpolatable').replace('.ufo', '.ttf')
+ log.info("Building interpolated tables")
+ # TODO GSUB/GDEF
+ merger.mergeTables(font, master_fonts, ["GPOS"])
+ return font
- loc = {}
- for arg in args.locations:
- tag,val = arg.split('=')
- loc[tag] = float(val)
- font = interpolate_layout(args.designspace_filename, loc, finder)
- log.info("Saving font %s", args.output)
- font.save(args.output)
+def main(args=None):
+ """Interpolate GDEF/GPOS/GSUB tables for a point on a designspace"""
+ from fontTools import configLogger
+ import argparse
+ import sys
+
+ parser = argparse.ArgumentParser(
+ "fonttools varLib.interpolate_layout",
+ description=main.__doc__,
+ )
+ parser.add_argument(
+ "designspace_filename", metavar="DESIGNSPACE", help="Input TTF files"
+ )
+ parser.add_argument(
+ "locations",
+ metavar="LOCATION",
+ type=str,
+ nargs="+",
+ help="Axis locations (e.g. wdth=120",
+ )
+ parser.add_argument(
+ "-o",
+ "--output",
+ metavar="OUTPUT",
+ help="Output font file (defaults to <designspacename>-instance.ttf)",
+ )
+ parser.add_argument(
+ "-l",
+ "--loglevel",
+ metavar="LEVEL",
+ default="INFO",
+ help="Logging level (defaults to INFO)",
+ )
+
+ args = parser.parse_args(args)
+
+ if not args.output:
+ args.output = os.path.splitext(args.designspace_filename)[0] + "-instance.ttf"
+
+ configLogger(level=args.loglevel)
+
+ finder = lambda s: s.replace("master_ufo", "master_ttf_interpolatable").replace(
+ ".ufo", ".ttf"
+ )
+
+ loc = {}
+ for arg in args.locations:
+ tag, val = arg.split("=")
+ loc[tag] = float(val)
+
+ font = interpolate_layout(args.designspace_filename, loc, finder)
+ log.info("Saving font %s", args.output)
+ font.save(args.output)
if __name__ == "__main__":
- import sys
- if len(sys.argv) > 1:
- sys.exit(main())
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+
+ if len(sys.argv) > 1:
+ sys.exit(main())
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/varLib/iup.py b/Lib/fontTools/varLib/iup.py
index 9c5bc35b..76555f35 100644
--- a/Lib/fontTools/varLib/iup.py
+++ b/Lib/fontTools/varLib/iup.py
@@ -1,25 +1,19 @@
-from typing import (
- Sequence,
- Tuple,
- Union,
-)
-from numbers import (
- Integral,
- Real
-)
-
try:
- import cython
-except ImportError:
- # if cython not installed, use mock module with no-op decorators and types
- from fontTools.misc import cython
+ import cython
-if cython.compiled:
- # Yep, I'm compiled.
- COMPILED = True
-else:
- # Just a lowly interpreted script.
- COMPILED = False
+ COMPILED = cython.compiled
+except (AttributeError, ImportError):
+ # if cython not installed, use mock module with no-op decorators and types
+ from fontTools.misc import cython
+
+ COMPILED = False
+
+from typing import (
+ Sequence,
+ Tuple,
+ Union,
+)
+from numbers import Integral, Real
_Point = Tuple[Real, Real]
@@ -33,378 +27,460 @@ _Endpoints = Sequence[Integral]
MAX_LOOKBACK = 8
-def iup_segment(coords : _PointSegment,
- rc1 : _Point,
- rd1 : _Delta,
- rc2 : _Point,
- rd2 : _Delta) -> _DeltaSegment:
- """Given two reference coordinates `rc1` & `rc2` and their respective
- delta vectors `rd1` & `rd2`, returns interpolated deltas for the set of
- coordinates `coords`. """
-
- # rc1 = reference coord 1
- # rd1 = reference delta 1
- out_arrays = [None, None]
- for j in 0,1:
- out_arrays[j] = out = []
- x1, x2, d1, d2 = rc1[j], rc2[j], rd1[j], rd2[j]
-
- if x1 == x2:
- n = len(coords)
- if d1 == d2:
- out.extend([d1]*n)
- else:
- out.extend([0]*n)
- continue
-
- if x1 > x2:
- x1, x2 = x2, x1
- d1, d2 = d2, d1
-
- # x1 < x2
- scale = (d2 - d1) / (x2 - x1)
- for pair in coords:
- x = pair[j]
-
- if x <= x1:
- d = d1
- elif x >= x2:
- d = d2
- else:
- # Interpolate
- d = d1 + (x - x1) * scale
-
- out.append(d)
-
- return zip(*out_arrays)
-
-def iup_contour(deltas : _DeltaOrNoneSegment,
- coords : _PointSegment) -> _DeltaSegment:
- """For the contour given in `coords`, interpolate any missing
- delta values in delta vector `deltas`.
-
- Returns fully filled-out delta vector."""
-
- assert len(deltas) == len(coords)
- if None not in deltas:
- return deltas
-
- n = len(deltas)
- # indices of points with explicit deltas
- indices = [i for i,v in enumerate(deltas) if v is not None]
- if not indices:
- # All deltas are None. Return 0,0 for all.
- return [(0,0)]*n
-
- out = []
- it = iter(indices)
- start = next(it)
- if start != 0:
- # Initial segment that wraps around
- i1, i2, ri1, ri2 = 0, start, start, indices[-1]
- out.extend(iup_segment(coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]))
- out.append(deltas[start])
- for end in it:
- if end - start > 1:
- i1, i2, ri1, ri2 = start+1, end, start, end
- out.extend(iup_segment(coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]))
- out.append(deltas[end])
- start = end
- if start != n-1:
- # Final segment that wraps around
- i1, i2, ri1, ri2 = start+1, n, start, indices[0]
- out.extend(iup_segment(coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]))
-
- assert len(deltas) == len(out), (len(deltas), len(out))
- return out
-
-def iup_delta(deltas : _DeltaOrNoneSegment,
- coords : _PointSegment,
- ends: _Endpoints) -> _DeltaSegment:
- """For the outline given in `coords`, with contour endpoints given
- in sorted increasing order in `ends`, interpolate any missing
- delta values in delta vector `deltas`.
-
- Returns fully filled-out delta vector."""
-
- assert sorted(ends) == ends and len(coords) == (ends[-1]+1 if ends else 0) + 4
- n = len(coords)
- ends = ends + [n-4, n-3, n-2, n-1]
- out = []
- start = 0
- for end in ends:
- end += 1
- contour = iup_contour(deltas[start:end], coords[start:end])
- out.extend(contour)
- start = end
-
- return out
+
+@cython.cfunc
+@cython.locals(
+ j=cython.int,
+ n=cython.int,
+ x1=cython.double,
+ x2=cython.double,
+ d1=cython.double,
+ d2=cython.double,
+ scale=cython.double,
+ x=cython.double,
+ d=cython.double,
+)
+def iup_segment(
+ coords: _PointSegment, rc1: _Point, rd1: _Delta, rc2: _Point, rd2: _Delta
+): # -> _DeltaSegment:
+ """Given two reference coordinates `rc1` & `rc2` and their respective
+ delta vectors `rd1` & `rd2`, returns interpolated deltas for the set of
+ coordinates `coords`."""
+
+ # rc1 = reference coord 1
+ # rd1 = reference delta 1
+ out_arrays = [None, None]
+ for j in 0, 1:
+ out_arrays[j] = out = []
+ x1, x2, d1, d2 = rc1[j], rc2[j], rd1[j], rd2[j]
+
+ if x1 == x2:
+ n = len(coords)
+ if d1 == d2:
+ out.extend([d1] * n)
+ else:
+ out.extend([0] * n)
+ continue
+
+ if x1 > x2:
+ x1, x2 = x2, x1
+ d1, d2 = d2, d1
+
+ # x1 < x2
+ scale = (d2 - d1) / (x2 - x1)
+ for pair in coords:
+ x = pair[j]
+
+ if x <= x1:
+ d = d1
+ elif x >= x2:
+ d = d2
+ else:
+ # Interpolate
+ d = d1 + (x - x1) * scale
+
+ out.append(d)
+
+ return zip(*out_arrays)
+
+
+def iup_contour(deltas: _DeltaOrNoneSegment, coords: _PointSegment) -> _DeltaSegment:
+ """For the contour given in `coords`, interpolate any missing
+ delta values in delta vector `deltas`.
+
+ Returns fully filled-out delta vector."""
+
+ assert len(deltas) == len(coords)
+ if None not in deltas:
+ return deltas
+
+ n = len(deltas)
+ # indices of points with explicit deltas
+ indices = [i for i, v in enumerate(deltas) if v is not None]
+ if not indices:
+ # All deltas are None. Return 0,0 for all.
+ return [(0, 0)] * n
+
+ out = []
+ it = iter(indices)
+ start = next(it)
+ if start != 0:
+ # Initial segment that wraps around
+ i1, i2, ri1, ri2 = 0, start, start, indices[-1]
+ out.extend(
+ iup_segment(
+ coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]
+ )
+ )
+ out.append(deltas[start])
+ for end in it:
+ if end - start > 1:
+ i1, i2, ri1, ri2 = start + 1, end, start, end
+ out.extend(
+ iup_segment(
+ coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]
+ )
+ )
+ out.append(deltas[end])
+ start = end
+ if start != n - 1:
+ # Final segment that wraps around
+ i1, i2, ri1, ri2 = start + 1, n, start, indices[0]
+ out.extend(
+ iup_segment(
+ coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]
+ )
+ )
+
+ assert len(deltas) == len(out), (len(deltas), len(out))
+ return out
+
+
+def iup_delta(
+ deltas: _DeltaOrNoneSegment, coords: _PointSegment, ends: _Endpoints
+) -> _DeltaSegment:
+ """For the outline given in `coords`, with contour endpoints given
+ in sorted increasing order in `ends`, interpolate any missing
+ delta values in delta vector `deltas`.
+
+ Returns fully filled-out delta vector."""
+
+ assert sorted(ends) == ends and len(coords) == (ends[-1] + 1 if ends else 0) + 4
+ n = len(coords)
+ ends = ends + [n - 4, n - 3, n - 2, n - 1]
+ out = []
+ start = 0
+ for end in ends:
+ end += 1
+ contour = iup_contour(deltas[start:end], coords[start:end])
+ out.extend(contour)
+ start = end
+
+ return out
+
# Optimizer
-def can_iup_in_between(deltas : _DeltaSegment,
- coords : _PointSegment,
- i : Integral,
- j : Integral,
- tolerance : Real) -> bool:
- """Return true if the deltas for points at `i` and `j` (`i < j`) can be
- successfully used to interpolate deltas for points in between them within
- provided error tolerance."""
-
- assert j - i >= 2
- interp = list(iup_segment(coords[i+1:j], coords[i], deltas[i], coords[j], deltas[j]))
- deltas = deltas[i+1:j]
-
- assert len(deltas) == len(interp)
-
- return all(abs(complex(x-p, y-q)) <= tolerance for (x,y),(p,q) in zip(deltas, interp))
-
-def _iup_contour_bound_forced_set(deltas : _DeltaSegment,
- coords : _PointSegment,
- tolerance : Real = 0) -> set:
- """The forced set is a conservative set of points on the contour that must be encoded
- explicitly (ie. cannot be interpolated). Calculating this set allows for significantly
- speeding up the dynamic-programming, as well as resolve circularity in DP.
-
- The set is precise; that is, if an index is in the returned set, then there is no way
- that IUP can generate delta for that point, given `coords` and `deltas`.
- """
- assert len(deltas) == len(coords)
-
- n = len(deltas)
- forced = set()
- # Track "last" and "next" points on the contour as we sweep.
- for i in range(len(deltas)-1, -1, -1):
- ld, lc = deltas[i-1], coords[i-1]
- d, c = deltas[i], coords[i]
- nd, nc = deltas[i-n+1], coords[i-n+1]
-
- for j in (0,1): # For X and for Y
- cj = c[j]
- dj = d[j]
- lcj = lc[j]
- ldj = ld[j]
- ncj = nc[j]
- ndj = nd[j]
-
- if lcj <= ncj:
- c1, c2 = lcj, ncj
- d1, d2 = ldj, ndj
- else:
- c1, c2 = ncj, lcj
- d1, d2 = ndj, ldj
-
- force = False
-
- # If the two coordinates are the same, then the interpolation
- # algorithm produces the same delta if both deltas are equal,
- # and zero if they differ.
- #
- # This test has to be before the next one.
- if c1 == c2:
- if abs(d1 - d2) > tolerance and abs(dj) > tolerance:
- force = True
-
- # If coordinate for current point is between coordinate of adjacent
- # points on the two sides, but the delta for current point is NOT
- # between delta for those adjacent points (considering tolerance
- # allowance), then there is no way that current point can be IUP-ed.
- # Mark it forced.
- elif c1 <= cj <= c2: # and c1 != c2
- if not (min(d1,d2)-tolerance <= dj <= max(d1,d2)+tolerance):
- force = True
-
- # Otherwise, the delta should either match the closest, or have the
- # same sign as the interpolation of the two deltas.
- else: # cj < c1 or c2 < cj
- if d1 != d2:
- if cj < c1:
- if abs(dj) > tolerance and abs(dj - d1) > tolerance and ((dj-tolerance < d1) != (d1 < d2)):
- force = True
- else: # c2 < cj
- if abs(dj) > tolerance and abs(dj - d2) > tolerance and ((d2 < dj+tolerance) != (d1 < d2)):
- force = True
-
- if force:
- forced.add(i)
- break
-
- return forced
-
-def _iup_contour_optimize_dp(deltas : _DeltaSegment,
- coords : _PointSegment,
- forced={},
- tolerance : Real = 0,
- lookback : Integral =None):
- """Straightforward Dynamic-Programming. For each index i, find least-costly encoding of
- points 0 to i where i is explicitly encoded. We find this by considering all previous
- explicit points j and check whether interpolation can fill points between j and i.
-
- Note that solution always encodes last point explicitly. Higher-level is responsible
- for removing that restriction.
-
- As major speedup, we stop looking further whenever we see a "forced" point."""
-
- n = len(deltas)
- if lookback is None:
- lookback = n
- lookback = min(lookback, MAX_LOOKBACK)
- costs = {-1:0}
- chain = {-1:None}
- for i in range(0, n):
- best_cost = costs[i-1] + 1
-
- costs[i] = best_cost
- chain[i] = i - 1
-
- if i - 1 in forced:
- continue
-
- for j in range(i-2, max(i-lookback, -2), -1):
-
- cost = costs[j] + 1
-
- if cost < best_cost and can_iup_in_between(deltas, coords, j, i, tolerance):
- costs[i] = best_cost = cost
- chain[i] = j
-
- if j in forced:
- break
-
- return chain, costs
-
-def _rot_list(l : list, k : int):
- """Rotate list by k items forward. Ie. item at position 0 will be
- at position k in returned list. Negative k is allowed."""
- n = len(l)
- k %= n
- if not k: return l
- return l[n-k:] + l[:n-k]
-
-def _rot_set(s : set, k : int, n : int):
- k %= n
- if not k: return s
- return {(v + k) % n for v in s}
-
-def iup_contour_optimize(deltas : _DeltaSegment,
- coords : _PointSegment,
- tolerance : Real = 0.) -> _DeltaOrNoneSegment:
- """For contour with coordinates `coords`, optimize a set of delta
- values `deltas` within error `tolerance`.
-
- Returns delta vector that has most number of None items instead of
- the input delta.
- """
-
- n = len(deltas)
-
- # Get the easy cases out of the way:
-
- # If all are within tolerance distance of 0, encode nothing:
- if all(abs(complex(*p)) <= tolerance for p in deltas):
- return [None] * n
-
- # If there's exactly one point, return it:
- if n == 1:
- return deltas
-
- # If all deltas are exactly the same, return just one (the first one):
- d0 = deltas[0]
- if all(d0 == d for d in deltas):
- return [d0] + [None] * (n-1)
-
- # Else, solve the general problem using Dynamic Programming.
-
- forced = _iup_contour_bound_forced_set(deltas, coords, tolerance)
- # The _iup_contour_optimize_dp() routine returns the optimal encoding
- # solution given the constraint that the last point is always encoded.
- # To remove this constraint, we use two different methods, depending on
- # whether forced set is non-empty or not:
-
- # Debugging: Make the next if always take the second branch and observe
- # if the font size changes (reduced); that would mean the forced-set
- # has members it should not have.
- if forced:
- # Forced set is non-empty: rotate the contour start point
- # such that the last point in the list is a forced point.
- k = (n-1) - max(forced)
- assert k >= 0
-
- deltas = _rot_list(deltas, k)
- coords = _rot_list(coords, k)
- forced = _rot_set(forced, k, n)
-
- # Debugging: Pass a set() instead of forced variable to the next call
- # to exercise forced-set computation for under-counting.
- chain, costs = _iup_contour_optimize_dp(deltas, coords, forced, tolerance)
-
- # Assemble solution.
- solution = set()
- i = n - 1
- while i is not None:
- solution.add(i)
- i = chain[i]
- solution.remove(-1)
-
- #if not forced <= solution:
- # print("coord", coords)
- # print("deltas", deltas)
- # print("len", len(deltas))
- assert forced <= solution, (forced, solution)
-
- deltas = [deltas[i] if i in solution else None for i in range(n)]
-
- deltas = _rot_list(deltas, -k)
- else:
- # Repeat the contour an extra time, solve the new case, then look for solutions of the
- # circular n-length problem in the solution for new linear case. I cannot prove that
- # this always produces the optimal solution...
- chain, costs = _iup_contour_optimize_dp(deltas+deltas, coords+coords, forced, tolerance, n)
- best_sol, best_cost = None, n+1
-
- for start in range(n-1, len(costs) - 1):
- # Assemble solution.
- solution = set()
- i = start
- while i > start - n:
- solution.add(i % n)
- i = chain[i]
- if i == start - n:
- cost = costs[start] - costs[start - n]
- if cost <= best_cost:
- best_sol, best_cost = solution, cost
-
- #if not forced <= best_sol:
- # print("coord", coords)
- # print("deltas", deltas)
- # print("len", len(deltas))
- assert forced <= best_sol, (forced, best_sol)
-
- deltas = [deltas[i] if i in best_sol else None for i in range(n)]
-
-
- return deltas
-
-def iup_delta_optimize(deltas : _DeltaSegment,
- coords : _PointSegment,
- ends : _Endpoints,
- tolerance : Real = 0.) -> _DeltaOrNoneSegment:
- """For the outline given in `coords`, with contour endpoints given
- in sorted increasing order in `ends`, optimize a set of delta
- values `deltas` within error `tolerance`.
-
- Returns delta vector that has most number of None items instead of
- the input delta.
- """
- assert sorted(ends) == ends and len(coords) == (ends[-1]+1 if ends else 0) + 4
- n = len(coords)
- ends = ends + [n-4, n-3, n-2, n-1]
- out = []
- start = 0
- for end in ends:
- contour = iup_contour_optimize(deltas[start:end+1], coords[start:end+1], tolerance)
- assert len(contour) == end - start + 1
- out.extend(contour)
- start = end+1
-
- return out
+
+@cython.cfunc
+@cython.inline
+@cython.locals(
+ i=cython.int,
+ j=cython.int,
+ # tolerance=cython.double, # https://github.com/fonttools/fonttools/issues/3282
+ x=cython.double,
+ y=cython.double,
+ p=cython.double,
+ q=cython.double,
+)
+@cython.returns(int)
+def can_iup_in_between(
+ deltas: _DeltaSegment,
+ coords: _PointSegment,
+ i: Integral,
+ j: Integral,
+ tolerance: Real,
+): # -> bool:
+ """Return true if the deltas for points at `i` and `j` (`i < j`) can be
+ successfully used to interpolate deltas for points in between them within
+ provided error tolerance."""
+
+ assert j - i >= 2
+ interp = iup_segment(coords[i + 1 : j], coords[i], deltas[i], coords[j], deltas[j])
+ deltas = deltas[i + 1 : j]
+
+ return all(
+ abs(complex(x - p, y - q)) <= tolerance
+ for (x, y), (p, q) in zip(deltas, interp)
+ )
+
+
+@cython.locals(
+ cj=cython.double,
+ dj=cython.double,
+ lcj=cython.double,
+ ldj=cython.double,
+ ncj=cython.double,
+ ndj=cython.double,
+ force=cython.int,
+ forced=set,
+)
+def _iup_contour_bound_forced_set(
+ deltas: _DeltaSegment, coords: _PointSegment, tolerance: Real = 0
+) -> set:
+ """The forced set is a conservative set of points on the contour that must be encoded
+ explicitly (ie. cannot be interpolated). Calculating this set allows for significantly
+ speeding up the dynamic-programming, as well as resolve circularity in DP.
+
+ The set is precise; that is, if an index is in the returned set, then there is no way
+ that IUP can generate delta for that point, given `coords` and `deltas`.
+ """
+ assert len(deltas) == len(coords)
+
+ n = len(deltas)
+ forced = set()
+ # Track "last" and "next" points on the contour as we sweep.
+ for i in range(len(deltas) - 1, -1, -1):
+ ld, lc = deltas[i - 1], coords[i - 1]
+ d, c = deltas[i], coords[i]
+ nd, nc = deltas[i - n + 1], coords[i - n + 1]
+
+ for j in (0, 1): # For X and for Y
+ cj = c[j]
+ dj = d[j]
+ lcj = lc[j]
+ ldj = ld[j]
+ ncj = nc[j]
+ ndj = nd[j]
+
+ if lcj <= ncj:
+ c1, c2 = lcj, ncj
+ d1, d2 = ldj, ndj
+ else:
+ c1, c2 = ncj, lcj
+ d1, d2 = ndj, ldj
+
+ force = False
+
+ # If the two coordinates are the same, then the interpolation
+ # algorithm produces the same delta if both deltas are equal,
+ # and zero if they differ.
+ #
+ # This test has to be before the next one.
+ if c1 == c2:
+ if abs(d1 - d2) > tolerance and abs(dj) > tolerance:
+ force = True
+
+ # If coordinate for current point is between coordinate of adjacent
+ # points on the two sides, but the delta for current point is NOT
+ # between delta for those adjacent points (considering tolerance
+ # allowance), then there is no way that current point can be IUP-ed.
+ # Mark it forced.
+ elif c1 <= cj <= c2: # and c1 != c2
+ if not (min(d1, d2) - tolerance <= dj <= max(d1, d2) + tolerance):
+ force = True
+
+ # Otherwise, the delta should either match the closest, or have the
+ # same sign as the interpolation of the two deltas.
+ else: # cj < c1 or c2 < cj
+ if d1 != d2:
+ if cj < c1:
+ if (
+ abs(dj) > tolerance
+ and abs(dj - d1) > tolerance
+ and ((dj - tolerance < d1) != (d1 < d2))
+ ):
+ force = True
+ else: # c2 < cj
+ if (
+ abs(dj) > tolerance
+ and abs(dj - d2) > tolerance
+ and ((d2 < dj + tolerance) != (d1 < d2))
+ ):
+ force = True
+
+ if force:
+ forced.add(i)
+ break
+
+ return forced
+
+
+@cython.locals(
+ i=cython.int,
+ j=cython.int,
+ best_cost=cython.double,
+ best_j=cython.int,
+ cost=cython.double,
+ forced=set,
+ tolerance=cython.double,
+)
+def _iup_contour_optimize_dp(
+ deltas: _DeltaSegment,
+ coords: _PointSegment,
+ forced=set(),
+ tolerance: Real = 0,
+ lookback: Integral = None,
+):
+ """Straightforward Dynamic-Programming. For each index i, find least-costly encoding of
+ points 0 to i where i is explicitly encoded. We find this by considering all previous
+ explicit points j and check whether interpolation can fill points between j and i.
+
+ Note that solution always encodes last point explicitly. Higher-level is responsible
+ for removing that restriction.
+
+ As major speedup, we stop looking further whenever we see a "forced" point."""
+
+ n = len(deltas)
+ if lookback is None:
+ lookback = n
+ lookback = min(lookback, MAX_LOOKBACK)
+ costs = {-1: 0}
+ chain = {-1: None}
+ for i in range(0, n):
+ best_cost = costs[i - 1] + 1
+
+ costs[i] = best_cost
+ chain[i] = i - 1
+
+ if i - 1 in forced:
+ continue
+
+ for j in range(i - 2, max(i - lookback, -2), -1):
+ cost = costs[j] + 1
+
+ if cost < best_cost and can_iup_in_between(deltas, coords, j, i, tolerance):
+ costs[i] = best_cost = cost
+ chain[i] = j
+
+ if j in forced:
+ break
+
+ return chain, costs
+
+
+def _rot_list(l: list, k: int):
+ """Rotate list by k items forward. Ie. item at position 0 will be
+ at position k in returned list. Negative k is allowed."""
+ n = len(l)
+ k %= n
+ if not k:
+ return l
+ return l[n - k :] + l[: n - k]
+
+
+def _rot_set(s: set, k: int, n: int):
+ k %= n
+ if not k:
+ return s
+ return {(v + k) % n for v in s}
+
+
+def iup_contour_optimize(
+ deltas: _DeltaSegment, coords: _PointSegment, tolerance: Real = 0.0
+) -> _DeltaOrNoneSegment:
+ """For contour with coordinates `coords`, optimize a set of delta
+ values `deltas` within error `tolerance`.
+
+ Returns delta vector that has most number of None items instead of
+ the input delta.
+ """
+
+ n = len(deltas)
+
+ # Get the easy cases out of the way:
+
+ # If all are within tolerance distance of 0, encode nothing:
+ if all(abs(complex(*p)) <= tolerance for p in deltas):
+ return [None] * n
+
+ # If there's exactly one point, return it:
+ if n == 1:
+ return deltas
+
+ # If all deltas are exactly the same, return just one (the first one):
+ d0 = deltas[0]
+ if all(d0 == d for d in deltas):
+ return [d0] + [None] * (n - 1)
+
+ # Else, solve the general problem using Dynamic Programming.
+
+ forced = _iup_contour_bound_forced_set(deltas, coords, tolerance)
+ # The _iup_contour_optimize_dp() routine returns the optimal encoding
+ # solution given the constraint that the last point is always encoded.
+ # To remove this constraint, we use two different methods, depending on
+ # whether forced set is non-empty or not:
+
+ # Debugging: Make the next if always take the second branch and observe
+ # if the font size changes (reduced); that would mean the forced-set
+ # has members it should not have.
+ if forced:
+ # Forced set is non-empty: rotate the contour start point
+ # such that the last point in the list is a forced point.
+ k = (n - 1) - max(forced)
+ assert k >= 0
+
+ deltas = _rot_list(deltas, k)
+ coords = _rot_list(coords, k)
+ forced = _rot_set(forced, k, n)
+
+ # Debugging: Pass a set() instead of forced variable to the next call
+ # to exercise forced-set computation for under-counting.
+ chain, costs = _iup_contour_optimize_dp(deltas, coords, forced, tolerance)
+
+ # Assemble solution.
+ solution = set()
+ i = n - 1
+ while i is not None:
+ solution.add(i)
+ i = chain[i]
+ solution.remove(-1)
+
+ # if not forced <= solution:
+ # print("coord", coords)
+ # print("deltas", deltas)
+ # print("len", len(deltas))
+ assert forced <= solution, (forced, solution)
+
+ deltas = [deltas[i] if i in solution else None for i in range(n)]
+
+ deltas = _rot_list(deltas, -k)
+ else:
+ # Repeat the contour an extra time, solve the new case, then look for solutions of the
+ # circular n-length problem in the solution for new linear case. I cannot prove that
+ # this always produces the optimal solution...
+ chain, costs = _iup_contour_optimize_dp(
+ deltas + deltas, coords + coords, forced, tolerance, n
+ )
+ best_sol, best_cost = None, n + 1
+
+ for start in range(n - 1, len(costs) - 1):
+ # Assemble solution.
+ solution = set()
+ i = start
+ while i > start - n:
+ solution.add(i % n)
+ i = chain[i]
+ if i == start - n:
+ cost = costs[start] - costs[start - n]
+ if cost <= best_cost:
+ best_sol, best_cost = solution, cost
+
+ # if not forced <= best_sol:
+ # print("coord", coords)
+ # print("deltas", deltas)
+ # print("len", len(deltas))
+ assert forced <= best_sol, (forced, best_sol)
+
+ deltas = [deltas[i] if i in best_sol else None for i in range(n)]
+
+ return deltas
+
+
+def iup_delta_optimize(
+ deltas: _DeltaSegment,
+ coords: _PointSegment,
+ ends: _Endpoints,
+ tolerance: Real = 0.0,
+) -> _DeltaOrNoneSegment:
+ """For the outline given in `coords`, with contour endpoints given
+ in sorted increasing order in `ends`, optimize a set of delta
+ values `deltas` within error `tolerance`.
+
+ Returns delta vector that has most number of None items instead of
+ the input delta.
+ """
+ assert sorted(ends) == ends and len(coords) == (ends[-1] + 1 if ends else 0) + 4
+ n = len(coords)
+ ends = ends + [n - 4, n - 3, n - 2, n - 1]
+ out = []
+ start = 0
+ for end in ends:
+ contour = iup_contour_optimize(
+ deltas[start : end + 1], coords[start : end + 1], tolerance
+ )
+ assert len(contour) == end - start + 1
+ out.extend(contour)
+ start = end + 1
+
+ return out
diff --git a/Lib/fontTools/varLib/merger.py b/Lib/fontTools/varLib/merger.py
index c9a1d3e3..b2c34016 100644
--- a/Lib/fontTools/varLib/merger.py
+++ b/Lib/fontTools/varLib/merger.py
@@ -21,8 +21,8 @@ from fontTools.varLib.varStore import VarStoreInstancer
from functools import reduce
from fontTools.otlLib.builder import buildSinglePos
from fontTools.otlLib.optimize.gpos import (
- _compression_level_from_env,
- compact_pair_pos,
+ _compression_level_from_env,
+ compact_pair_pos,
)
log = logging.getLogger("fontTools.varLib.merger")
@@ -41,1502 +41,1668 @@ from .errors import (
VarLibMergeError,
)
+
class Merger(object):
+ def __init__(self, font=None):
+ self.font = font
+ # mergeTables populates this from the parent's master ttfs
+ self.ttfs = None
+
+ @classmethod
+ def merger(celf, clazzes, attrs=(None,)):
+ assert celf != Merger, "Subclass Merger instead."
+ if "mergers" not in celf.__dict__:
+ celf.mergers = {}
+ if type(clazzes) in (type, enum.EnumMeta):
+ clazzes = (clazzes,)
+ if type(attrs) == str:
+ attrs = (attrs,)
+
+ def wrapper(method):
+ assert method.__name__ == "merge"
+ done = []
+ for clazz in clazzes:
+ if clazz in done:
+ continue # Support multiple names of a clazz
+ done.append(clazz)
+ mergers = celf.mergers.setdefault(clazz, {})
+ for attr in attrs:
+ assert attr not in mergers, (
+ "Oops, class '%s' has merge function for '%s' defined already."
+ % (clazz.__name__, attr)
+ )
+ mergers[attr] = method
+ return None
+
+ return wrapper
+
+ @classmethod
+ def mergersFor(celf, thing, _default={}):
+ typ = type(thing)
+
+ for celf in celf.mro():
+ mergers = getattr(celf, "mergers", None)
+ if mergers is None:
+ break
+
+ m = celf.mergers.get(typ, None)
+ if m is not None:
+ return m
+
+ return _default
+
+ def mergeObjects(self, out, lst, exclude=()):
+ if hasattr(out, "ensureDecompiled"):
+ out.ensureDecompiled(recurse=False)
+ for item in lst:
+ if hasattr(item, "ensureDecompiled"):
+ item.ensureDecompiled(recurse=False)
+ keys = sorted(vars(out).keys())
+ if not all(keys == sorted(vars(v).keys()) for v in lst):
+ raise KeysDiffer(
+ self, expected=keys, got=[sorted(vars(v).keys()) for v in lst]
+ )
+ mergers = self.mergersFor(out)
+ defaultMerger = mergers.get("*", self.__class__.mergeThings)
+ try:
+ for key in keys:
+ if key in exclude:
+ continue
+ value = getattr(out, key)
+ values = [getattr(table, key) for table in lst]
+ mergerFunc = mergers.get(key, defaultMerger)
+ mergerFunc(self, value, values)
+ except VarLibMergeError as e:
+ e.stack.append("." + key)
+ raise
+
+ def mergeLists(self, out, lst):
+ if not allEqualTo(out, lst, len):
+ raise LengthsDiffer(self, expected=len(out), got=[len(x) for x in lst])
+ for i, (value, values) in enumerate(zip(out, zip(*lst))):
+ try:
+ self.mergeThings(value, values)
+ except VarLibMergeError as e:
+ e.stack.append("[%d]" % i)
+ raise
+
+ def mergeThings(self, out, lst):
+ if not allEqualTo(out, lst, type):
+ raise MismatchedTypes(
+ self, expected=type(out).__name__, got=[type(x).__name__ for x in lst]
+ )
+ mergerFunc = self.mergersFor(out).get(None, None)
+ if mergerFunc is not None:
+ mergerFunc(self, out, lst)
+ elif isinstance(out, enum.Enum):
+ # need to special-case Enums as have __dict__ but are not regular 'objects',
+ # otherwise mergeObjects/mergeThings get trapped in a RecursionError
+ if not allEqualTo(out, lst):
+ raise ShouldBeConstant(self, expected=out, got=lst)
+ elif hasattr(out, "__dict__"):
+ self.mergeObjects(out, lst)
+ elif isinstance(out, list):
+ self.mergeLists(out, lst)
+ else:
+ if not allEqualTo(out, lst):
+ raise ShouldBeConstant(self, expected=out, got=lst)
+
+ def mergeTables(self, font, master_ttfs, tableTags):
+ for tag in tableTags:
+ if tag not in font:
+ continue
+ try:
+ self.ttfs = master_ttfs
+ self.mergeThings(font[tag], [m.get(tag) for m in master_ttfs])
+ except VarLibMergeError as e:
+ e.stack.append(tag)
+ raise
- def __init__(self, font=None):
- self.font = font
- # mergeTables populates this from the parent's master ttfs
- self.ttfs = None
-
- @classmethod
- def merger(celf, clazzes, attrs=(None,)):
- assert celf != Merger, 'Subclass Merger instead.'
- if 'mergers' not in celf.__dict__:
- celf.mergers = {}
- if type(clazzes) in (type, enum.EnumMeta):
- clazzes = (clazzes,)
- if type(attrs) == str:
- attrs = (attrs,)
- def wrapper(method):
- assert method.__name__ == 'merge'
- done = []
- for clazz in clazzes:
- if clazz in done: continue # Support multiple names of a clazz
- done.append(clazz)
- mergers = celf.mergers.setdefault(clazz, {})
- for attr in attrs:
- assert attr not in mergers, \
- "Oops, class '%s' has merge function for '%s' defined already." % (clazz.__name__, attr)
- mergers[attr] = method
- return None
- return wrapper
-
- @classmethod
- def mergersFor(celf, thing, _default={}):
- typ = type(thing)
-
- for celf in celf.mro():
-
- mergers = getattr(celf, 'mergers', None)
- if mergers is None:
- break;
-
- m = celf.mergers.get(typ, None)
- if m is not None:
- return m
-
- return _default
-
- def mergeObjects(self, out, lst, exclude=()):
- if hasattr(out, "ensureDecompiled"):
- out.ensureDecompiled(recurse=False)
- for item in lst:
- if hasattr(item, "ensureDecompiled"):
- item.ensureDecompiled(recurse=False)
- keys = sorted(vars(out).keys())
- if not all(keys == sorted(vars(v).keys()) for v in lst):
- raise KeysDiffer(self, expected=keys,
- got=[sorted(vars(v).keys()) for v in lst]
- )
- mergers = self.mergersFor(out)
- defaultMerger = mergers.get('*', self.__class__.mergeThings)
- try:
- for key in keys:
- if key in exclude: continue
- value = getattr(out, key)
- values = [getattr(table, key) for table in lst]
- mergerFunc = mergers.get(key, defaultMerger)
- mergerFunc(self, value, values)
- except VarLibMergeError as e:
- e.stack.append('.'+key)
- raise
-
- def mergeLists(self, out, lst):
- if not allEqualTo(out, lst, len):
- raise LengthsDiffer(self, expected=len(out), got=[len(x) for x in lst])
- for i,(value,values) in enumerate(zip(out, zip(*lst))):
- try:
- self.mergeThings(value, values)
- except VarLibMergeError as e:
- e.stack.append('[%d]' % i)
- raise
-
- def mergeThings(self, out, lst):
- if not allEqualTo(out, lst, type):
- raise MismatchedTypes(self,
- expected=type(out).__name__,
- got=[type(x).__name__ for x in lst]
- )
- mergerFunc = self.mergersFor(out).get(None, None)
- if mergerFunc is not None:
- mergerFunc(self, out, lst)
- elif isinstance(out, enum.Enum):
- # need to special-case Enums as have __dict__ but are not regular 'objects',
- # otherwise mergeObjects/mergeThings get trapped in a RecursionError
- if not allEqualTo(out, lst):
- raise ShouldBeConstant(self, expected=out, got=lst)
- elif hasattr(out, '__dict__'):
- self.mergeObjects(out, lst)
- elif isinstance(out, list):
- self.mergeLists(out, lst)
- else:
- if not allEqualTo(out, lst):
- raise ShouldBeConstant(self, expected=out, got=lst)
-
- def mergeTables(self, font, master_ttfs, tableTags):
- for tag in tableTags:
- if tag not in font: continue
- try:
- self.ttfs = master_ttfs
- self.mergeThings(font[tag], [m.get(tag) for m in master_ttfs])
- except VarLibMergeError as e:
- e.stack.append(tag)
- raise
#
# Aligning merger
#
class AligningMerger(Merger):
- pass
+ pass
+
@AligningMerger.merger(ot.GDEF, "GlyphClassDef")
def merge(merger, self, lst):
- if self is None:
- if not allNone(lst):
- raise NotANone(merger, expected=None, got=lst)
- return
-
- lst = [l.classDefs for l in lst]
- self.classDefs = {}
- # We only care about the .classDefs
- self = self.classDefs
-
- allKeys = set()
- allKeys.update(*[l.keys() for l in lst])
- for k in allKeys:
- allValues = nonNone(l.get(k) for l in lst)
- if not allEqual(allValues):
- raise ShouldBeConstant(merger, expected=allValues[0], got=lst, stack=["." + k])
- if not allValues:
- self[k] = None
- else:
- self[k] = allValues[0]
+ if self is None:
+ if not allNone(lst):
+ raise NotANone(merger, expected=None, got=lst)
+ return
+
+ lst = [l.classDefs for l in lst]
+ self.classDefs = {}
+ # We only care about the .classDefs
+ self = self.classDefs
+
+ allKeys = set()
+ allKeys.update(*[l.keys() for l in lst])
+ for k in allKeys:
+ allValues = nonNone(l.get(k) for l in lst)
+ if not allEqual(allValues):
+ raise ShouldBeConstant(
+ merger, expected=allValues[0], got=lst, stack=["." + k]
+ )
+ if not allValues:
+ self[k] = None
+ else:
+ self[k] = allValues[0]
+
def _SinglePosUpgradeToFormat2(self):
- if self.Format == 2: return self
+ if self.Format == 2:
+ return self
+
+ ret = ot.SinglePos()
+ ret.Format = 2
+ ret.Coverage = self.Coverage
+ ret.ValueFormat = self.ValueFormat
+ ret.Value = [self.Value for _ in ret.Coverage.glyphs]
+ ret.ValueCount = len(ret.Value)
- ret = ot.SinglePos()
- ret.Format = 2
- ret.Coverage = self.Coverage
- ret.ValueFormat = self.ValueFormat
- ret.Value = [self.Value for _ in ret.Coverage.glyphs]
- ret.ValueCount = len(ret.Value)
+ return ret
- return ret
def _merge_GlyphOrders(font, lst, values_lst=None, default=None):
- """Takes font and list of glyph lists (must be sorted by glyph id), and returns
- two things:
- - Combined glyph list,
- - If values_lst is None, return input glyph lists, but padded with None when a glyph
- was missing in a list. Otherwise, return values_lst list-of-list, padded with None
- to match combined glyph lists.
- """
- if values_lst is None:
- dict_sets = [set(l) for l in lst]
- else:
- dict_sets = [{g:v for g,v in zip(l,vs)} for l,vs in zip(lst,values_lst)]
- combined = set()
- combined.update(*dict_sets)
-
- sortKey = font.getReverseGlyphMap().__getitem__
- order = sorted(combined, key=sortKey)
- # Make sure all input glyphsets were in proper order
- if not all(sorted(vs, key=sortKey) == vs for vs in lst):
- raise InconsistentGlyphOrder()
- del combined
-
- paddedValues = None
- if values_lst is None:
- padded = [[glyph if glyph in dict_set else default
- for glyph in order]
- for dict_set in dict_sets]
- else:
- assert len(lst) == len(values_lst)
- padded = [[dict_set[glyph] if glyph in dict_set else default
- for glyph in order]
- for dict_set in dict_sets]
- return order, padded
+ """Takes font and list of glyph lists (must be sorted by glyph id), and returns
+ two things:
+ - Combined glyph list,
+ - If values_lst is None, return input glyph lists, but padded with None when a glyph
+ was missing in a list. Otherwise, return values_lst list-of-list, padded with None
+ to match combined glyph lists.
+ """
+ if values_lst is None:
+ dict_sets = [set(l) for l in lst]
+ else:
+ dict_sets = [{g: v for g, v in zip(l, vs)} for l, vs in zip(lst, values_lst)]
+ combined = set()
+ combined.update(*dict_sets)
+
+ sortKey = font.getReverseGlyphMap().__getitem__
+ order = sorted(combined, key=sortKey)
+ # Make sure all input glyphsets were in proper order
+ if not all(sorted(vs, key=sortKey) == vs for vs in lst):
+ raise InconsistentGlyphOrder()
+ del combined
+
+ paddedValues = None
+ if values_lst is None:
+ padded = [
+ [glyph if glyph in dict_set else default for glyph in order]
+ for dict_set in dict_sets
+ ]
+ else:
+ assert len(lst) == len(values_lst)
+ padded = [
+ [dict_set[glyph] if glyph in dict_set else default for glyph in order]
+ for dict_set in dict_sets
+ ]
+ return order, padded
+
@AligningMerger.merger(otBase.ValueRecord)
def merge(merger, self, lst):
- # Code below sometimes calls us with self being
- # a new object. Copy it from lst and recurse.
- self.__dict__ = lst[0].__dict__.copy()
- merger.mergeObjects(self, lst)
+ # Code below sometimes calls us with self being
+ # a new object. Copy it from lst and recurse.
+ self.__dict__ = lst[0].__dict__.copy()
+ merger.mergeObjects(self, lst)
+
@AligningMerger.merger(ot.Anchor)
def merge(merger, self, lst):
- # Code below sometimes calls us with self being
- # a new object. Copy it from lst and recurse.
- self.__dict__ = lst[0].__dict__.copy()
- merger.mergeObjects(self, lst)
+ # Code below sometimes calls us with self being
+ # a new object. Copy it from lst and recurse.
+ self.__dict__ = lst[0].__dict__.copy()
+ merger.mergeObjects(self, lst)
+
def _Lookup_SinglePos_get_effective_value(merger, subtables, glyph):
- for self in subtables:
- if self is None or \
- type(self) != ot.SinglePos or \
- self.Coverage is None or \
- glyph not in self.Coverage.glyphs:
- continue
- if self.Format == 1:
- return self.Value
- elif self.Format == 2:
- return self.Value[self.Coverage.glyphs.index(glyph)]
- else:
- raise UnsupportedFormat(merger, subtable="single positioning lookup")
- return None
-
-def _Lookup_PairPos_get_effective_value_pair(merger, subtables, firstGlyph, secondGlyph):
- for self in subtables:
- if self is None or \
- type(self) != ot.PairPos or \
- self.Coverage is None or \
- firstGlyph not in self.Coverage.glyphs:
- continue
- if self.Format == 1:
- ps = self.PairSet[self.Coverage.glyphs.index(firstGlyph)]
- pvr = ps.PairValueRecord
- for rec in pvr: # TODO Speed up
- if rec.SecondGlyph == secondGlyph:
- return rec
- continue
- elif self.Format == 2:
- klass1 = self.ClassDef1.classDefs.get(firstGlyph, 0)
- klass2 = self.ClassDef2.classDefs.get(secondGlyph, 0)
- return self.Class1Record[klass1].Class2Record[klass2]
- else:
- raise UnsupportedFormat(merger, subtable="pair positioning lookup")
- return None
+ for self in subtables:
+ if (
+ self is None
+ or type(self) != ot.SinglePos
+ or self.Coverage is None
+ or glyph not in self.Coverage.glyphs
+ ):
+ continue
+ if self.Format == 1:
+ return self.Value
+ elif self.Format == 2:
+ return self.Value[self.Coverage.glyphs.index(glyph)]
+ else:
+ raise UnsupportedFormat(merger, subtable="single positioning lookup")
+ return None
+
+
+def _Lookup_PairPos_get_effective_value_pair(
+ merger, subtables, firstGlyph, secondGlyph
+):
+ for self in subtables:
+ if (
+ self is None
+ or type(self) != ot.PairPos
+ or self.Coverage is None
+ or firstGlyph not in self.Coverage.glyphs
+ ):
+ continue
+ if self.Format == 1:
+ ps = self.PairSet[self.Coverage.glyphs.index(firstGlyph)]
+ pvr = ps.PairValueRecord
+ for rec in pvr: # TODO Speed up
+ if rec.SecondGlyph == secondGlyph:
+ return rec
+ continue
+ elif self.Format == 2:
+ klass1 = self.ClassDef1.classDefs.get(firstGlyph, 0)
+ klass2 = self.ClassDef2.classDefs.get(secondGlyph, 0)
+ return self.Class1Record[klass1].Class2Record[klass2]
+ else:
+ raise UnsupportedFormat(merger, subtable="pair positioning lookup")
+ return None
+
@AligningMerger.merger(ot.SinglePos)
def merge(merger, self, lst):
- self.ValueFormat = valueFormat = reduce(int.__or__, [l.ValueFormat for l in lst], 0)
- if not (len(lst) == 1 or (valueFormat & ~0xF == 0)):
- raise UnsupportedFormat(merger, subtable="single positioning lookup")
-
- # If all have same coverage table and all are format 1,
- coverageGlyphs = self.Coverage.glyphs
- if all(v.Format == 1 for v in lst) and all(coverageGlyphs == v.Coverage.glyphs for v in lst):
- self.Value = otBase.ValueRecord(valueFormat, self.Value)
- if valueFormat != 0:
- merger.mergeThings(self.Value, [v.Value for v in lst])
- self.ValueFormat = self.Value.getFormat()
- return
-
- # Upgrade everything to Format=2
- self.Format = 2
- lst = [_SinglePosUpgradeToFormat2(v) for v in lst]
-
- # Align them
- glyphs, padded = _merge_GlyphOrders(merger.font,
- [v.Coverage.glyphs for v in lst],
- [v.Value for v in lst])
-
- self.Coverage.glyphs = glyphs
- self.Value = [otBase.ValueRecord(valueFormat) for _ in glyphs]
- self.ValueCount = len(self.Value)
-
- for i,values in enumerate(padded):
- for j,glyph in enumerate(glyphs):
- if values[j] is not None: continue
- # Fill in value from other subtables
- # Note!!! This *might* result in behavior change if ValueFormat2-zeroedness
- # is different between used subtable and current subtable!
- # TODO(behdad) Check and warn if that happens?
- v = _Lookup_SinglePos_get_effective_value(merger, merger.lookup_subtables[i], glyph)
- if v is None:
- v = otBase.ValueRecord(valueFormat)
- values[j] = v
-
- merger.mergeLists(self.Value, padded)
-
- # Merge everything else; though, there shouldn't be anything else. :)
- merger.mergeObjects(self, lst,
- exclude=('Format', 'Coverage', 'Value', 'ValueCount', 'ValueFormat'))
- self.ValueFormat = reduce(int.__or__, [v.getEffectiveFormat() for v in self.Value], 0)
+ self.ValueFormat = valueFormat = reduce(int.__or__, [l.ValueFormat for l in lst], 0)
+ if not (len(lst) == 1 or (valueFormat & ~0xF == 0)):
+ raise UnsupportedFormat(merger, subtable="single positioning lookup")
+
+ # If all have same coverage table and all are format 1,
+ coverageGlyphs = self.Coverage.glyphs
+ if all(v.Format == 1 for v in lst) and all(
+ coverageGlyphs == v.Coverage.glyphs for v in lst
+ ):
+ self.Value = otBase.ValueRecord(valueFormat, self.Value)
+ if valueFormat != 0:
+ # If v.Value is None, it means a kerning of 0; we want
+ # it to participate in the model still.
+ # https://github.com/fonttools/fonttools/issues/3111
+ merger.mergeThings(
+ self.Value,
+ [v.Value if v.Value is not None else otBase.ValueRecord() for v in lst],
+ )
+ self.ValueFormat = self.Value.getFormat()
+ return
+
+ # Upgrade everything to Format=2
+ self.Format = 2
+ lst = [_SinglePosUpgradeToFormat2(v) for v in lst]
+
+ # Align them
+ glyphs, padded = _merge_GlyphOrders(
+ merger.font, [v.Coverage.glyphs for v in lst], [v.Value for v in lst]
+ )
+
+ self.Coverage.glyphs = glyphs
+ self.Value = [otBase.ValueRecord(valueFormat) for _ in glyphs]
+ self.ValueCount = len(self.Value)
+
+ for i, values in enumerate(padded):
+ for j, glyph in enumerate(glyphs):
+ if values[j] is not None:
+ continue
+ # Fill in value from other subtables
+ # Note!!! This *might* result in behavior change if ValueFormat2-zeroedness
+ # is different between used subtable and current subtable!
+ # TODO(behdad) Check and warn if that happens?
+ v = _Lookup_SinglePos_get_effective_value(
+ merger, merger.lookup_subtables[i], glyph
+ )
+ if v is None:
+ v = otBase.ValueRecord(valueFormat)
+ values[j] = v
+
+ merger.mergeLists(self.Value, padded)
+
+ # Merge everything else; though, there shouldn't be anything else. :)
+ merger.mergeObjects(
+ self, lst, exclude=("Format", "Coverage", "Value", "ValueCount", "ValueFormat")
+ )
+ self.ValueFormat = reduce(
+ int.__or__, [v.getEffectiveFormat() for v in self.Value], 0
+ )
+
@AligningMerger.merger(ot.PairSet)
def merge(merger, self, lst):
- # Align them
- glyphs, padded = _merge_GlyphOrders(merger.font,
- [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst],
- [vs.PairValueRecord for vs in lst])
-
- self.PairValueRecord = pvrs = []
- for glyph in glyphs:
- pvr = ot.PairValueRecord()
- pvr.SecondGlyph = glyph
- pvr.Value1 = otBase.ValueRecord(merger.valueFormat1) if merger.valueFormat1 else None
- pvr.Value2 = otBase.ValueRecord(merger.valueFormat2) if merger.valueFormat2 else None
- pvrs.append(pvr)
- self.PairValueCount = len(self.PairValueRecord)
-
- for i,values in enumerate(padded):
- for j,glyph in enumerate(glyphs):
- # Fill in value from other subtables
- v = ot.PairValueRecord()
- v.SecondGlyph = glyph
- if values[j] is not None:
- vpair = values[j]
- else:
- vpair = _Lookup_PairPos_get_effective_value_pair(
- merger, merger.lookup_subtables[i], self._firstGlyph, glyph
- )
- if vpair is None:
- v1, v2 = None, None
- else:
- v1 = getattr(vpair, "Value1", None)
- v2 = getattr(vpair, "Value2", None)
- v.Value1 = otBase.ValueRecord(merger.valueFormat1, src=v1) if merger.valueFormat1 else None
- v.Value2 = otBase.ValueRecord(merger.valueFormat2, src=v2) if merger.valueFormat2 else None
- values[j] = v
- del self._firstGlyph
-
- merger.mergeLists(self.PairValueRecord, padded)
+ # Align them
+ glyphs, padded = _merge_GlyphOrders(
+ merger.font,
+ [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst],
+ [vs.PairValueRecord for vs in lst],
+ )
+
+ self.PairValueRecord = pvrs = []
+ for glyph in glyphs:
+ pvr = ot.PairValueRecord()
+ pvr.SecondGlyph = glyph
+ pvr.Value1 = (
+ otBase.ValueRecord(merger.valueFormat1) if merger.valueFormat1 else None
+ )
+ pvr.Value2 = (
+ otBase.ValueRecord(merger.valueFormat2) if merger.valueFormat2 else None
+ )
+ pvrs.append(pvr)
+ self.PairValueCount = len(self.PairValueRecord)
+
+ for i, values in enumerate(padded):
+ for j, glyph in enumerate(glyphs):
+ # Fill in value from other subtables
+ v = ot.PairValueRecord()
+ v.SecondGlyph = glyph
+ if values[j] is not None:
+ vpair = values[j]
+ else:
+ vpair = _Lookup_PairPos_get_effective_value_pair(
+ merger, merger.lookup_subtables[i], self._firstGlyph, glyph
+ )
+ if vpair is None:
+ v1, v2 = None, None
+ else:
+ v1 = getattr(vpair, "Value1", None)
+ v2 = getattr(vpair, "Value2", None)
+ v.Value1 = (
+ otBase.ValueRecord(merger.valueFormat1, src=v1)
+ if merger.valueFormat1
+ else None
+ )
+ v.Value2 = (
+ otBase.ValueRecord(merger.valueFormat2, src=v2)
+ if merger.valueFormat2
+ else None
+ )
+ values[j] = v
+ del self._firstGlyph
+
+ merger.mergeLists(self.PairValueRecord, padded)
+
def _PairPosFormat1_merge(self, lst, merger):
- assert allEqual([l.ValueFormat2 == 0 for l in lst if l.PairSet]), "Report bug against fonttools."
+ assert allEqual(
+ [l.ValueFormat2 == 0 for l in lst if l.PairSet]
+ ), "Report bug against fonttools."
+
+ # Merge everything else; makes sure Format is the same.
+ merger.mergeObjects(
+ self,
+ lst,
+ exclude=("Coverage", "PairSet", "PairSetCount", "ValueFormat1", "ValueFormat2"),
+ )
+
+ empty = ot.PairSet()
+ empty.PairValueRecord = []
+ empty.PairValueCount = 0
+
+ # Align them
+ glyphs, padded = _merge_GlyphOrders(
+ merger.font,
+ [v.Coverage.glyphs for v in lst],
+ [v.PairSet for v in lst],
+ default=empty,
+ )
+
+ self.Coverage.glyphs = glyphs
+ self.PairSet = [ot.PairSet() for _ in glyphs]
+ self.PairSetCount = len(self.PairSet)
+ for glyph, ps in zip(glyphs, self.PairSet):
+ ps._firstGlyph = glyph
+
+ merger.mergeLists(self.PairSet, padded)
- # Merge everything else; makes sure Format is the same.
- merger.mergeObjects(self, lst,
- exclude=('Coverage',
- 'PairSet', 'PairSetCount',
- 'ValueFormat1', 'ValueFormat2'))
- empty = ot.PairSet()
- empty.PairValueRecord = []
- empty.PairValueCount = 0
+def _ClassDef_invert(self, allGlyphs=None):
+ if isinstance(self, dict):
+ classDefs = self
+ else:
+ classDefs = self.classDefs if self and self.classDefs else {}
+ m = max(classDefs.values()) if classDefs else 0
- # Align them
- glyphs, padded = _merge_GlyphOrders(merger.font,
- [v.Coverage.glyphs for v in lst],
- [v.PairSet for v in lst],
- default=empty)
+ ret = []
+ for _ in range(m + 1):
+ ret.append(set())
- self.Coverage.glyphs = glyphs
- self.PairSet = [ot.PairSet() for _ in glyphs]
- self.PairSetCount = len(self.PairSet)
- for glyph, ps in zip(glyphs, self.PairSet):
- ps._firstGlyph = glyph
+ for k, v in classDefs.items():
+ ret[v].add(k)
- merger.mergeLists(self.PairSet, padded)
+ # Class-0 is special. It's "everything else".
+ if allGlyphs is None:
+ ret[0] = None
+ else:
+ # Limit all classes to glyphs in allGlyphs.
+ # Collect anything without a non-zero class into class=zero.
+ ret[0] = class0 = set(allGlyphs)
+ for s in ret[1:]:
+ s.intersection_update(class0)
+ class0.difference_update(s)
-def _ClassDef_invert(self, allGlyphs=None):
+ return ret
- if isinstance(self, dict):
- classDefs = self
- else:
- classDefs = self.classDefs if self and self.classDefs else {}
- m = max(classDefs.values()) if classDefs else 0
- ret = []
- for _ in range(m + 1):
- ret.append(set())
+def _ClassDef_merge_classify(lst, allGlyphses=None):
+ self = ot.ClassDef()
+ self.classDefs = classDefs = {}
+ allGlyphsesWasNone = allGlyphses is None
+ if allGlyphsesWasNone:
+ allGlyphses = [None] * len(lst)
- for k,v in classDefs.items():
- ret[v].add(k)
+ classifier = classifyTools.Classifier()
+ for classDef, allGlyphs in zip(lst, allGlyphses):
+ sets = _ClassDef_invert(classDef, allGlyphs)
+ if allGlyphs is None:
+ sets = sets[1:]
+ classifier.update(sets)
+ classes = classifier.getClasses()
- # Class-0 is special. It's "everything else".
- if allGlyphs is None:
- ret[0] = None
- else:
- # Limit all classes to glyphs in allGlyphs.
- # Collect anything without a non-zero class into class=zero.
- ret[0] = class0 = set(allGlyphs)
- for s in ret[1:]:
- s.intersection_update(class0)
- class0.difference_update(s)
+ if allGlyphsesWasNone:
+ classes.insert(0, set())
- return ret
+ for i, classSet in enumerate(classes):
+ if i == 0:
+ continue
+ for g in classSet:
+ classDefs[g] = i
+
+ return self, classes
-def _ClassDef_merge_classify(lst, allGlyphses=None):
- self = ot.ClassDef()
- self.classDefs = classDefs = {}
- allGlyphsesWasNone = allGlyphses is None
- if allGlyphsesWasNone:
- allGlyphses = [None] * len(lst)
-
- classifier = classifyTools.Classifier()
- for classDef,allGlyphs in zip(lst, allGlyphses):
- sets = _ClassDef_invert(classDef, allGlyphs)
- if allGlyphs is None:
- sets = sets[1:]
- classifier.update(sets)
- classes = classifier.getClasses()
-
- if allGlyphsesWasNone:
- classes.insert(0, set())
-
- for i,classSet in enumerate(classes):
- if i == 0:
- continue
- for g in classSet:
- classDefs[g] = i
-
- return self, classes
def _PairPosFormat2_align_matrices(self, lst, font, transparent=False):
+ matrices = [l.Class1Record for l in lst]
+
+ # Align first classes
+ self.ClassDef1, classes = _ClassDef_merge_classify(
+ [l.ClassDef1 for l in lst], [l.Coverage.glyphs for l in lst]
+ )
+ self.Class1Count = len(classes)
+ new_matrices = []
+ for l, matrix in zip(lst, matrices):
+ nullRow = None
+ coverage = set(l.Coverage.glyphs)
+ classDef1 = l.ClassDef1.classDefs
+ class1Records = []
+ for classSet in classes:
+ exemplarGlyph = next(iter(classSet))
+ if exemplarGlyph not in coverage:
+ # Follow-up to e6125b353e1f54a0280ded5434b8e40d042de69f,
+ # Fixes https://github.com/googlei18n/fontmake/issues/470
+ # Again, revert 8d441779e5afc664960d848f62c7acdbfc71d7b9
+ # when merger becomes selfless.
+ nullRow = None
+ if nullRow is None:
+ nullRow = ot.Class1Record()
+ class2records = nullRow.Class2Record = []
+ # TODO: When merger becomes selfless, revert e6125b353e1f54a0280ded5434b8e40d042de69f
+ for _ in range(l.Class2Count):
+ if transparent:
+ rec2 = None
+ else:
+ rec2 = ot.Class2Record()
+ rec2.Value1 = (
+ otBase.ValueRecord(self.ValueFormat1)
+ if self.ValueFormat1
+ else None
+ )
+ rec2.Value2 = (
+ otBase.ValueRecord(self.ValueFormat2)
+ if self.ValueFormat2
+ else None
+ )
+ class2records.append(rec2)
+ rec1 = nullRow
+ else:
+ klass = classDef1.get(exemplarGlyph, 0)
+ rec1 = matrix[klass] # TODO handle out-of-range?
+ class1Records.append(rec1)
+ new_matrices.append(class1Records)
+ matrices = new_matrices
+ del new_matrices
+
+ # Align second classes
+ self.ClassDef2, classes = _ClassDef_merge_classify([l.ClassDef2 for l in lst])
+ self.Class2Count = len(classes)
+ new_matrices = []
+ for l, matrix in zip(lst, matrices):
+ classDef2 = l.ClassDef2.classDefs
+ class1Records = []
+ for rec1old in matrix:
+ oldClass2Records = rec1old.Class2Record
+ rec1new = ot.Class1Record()
+ class2Records = rec1new.Class2Record = []
+ for classSet in classes:
+ if not classSet: # class=0
+ rec2 = oldClass2Records[0]
+ else:
+ exemplarGlyph = next(iter(classSet))
+ klass = classDef2.get(exemplarGlyph, 0)
+ rec2 = oldClass2Records[klass]
+ class2Records.append(copy.deepcopy(rec2))
+ class1Records.append(rec1new)
+ new_matrices.append(class1Records)
+ matrices = new_matrices
+ del new_matrices
+
+ return matrices
- matrices = [l.Class1Record for l in lst]
-
- # Align first classes
- self.ClassDef1, classes = _ClassDef_merge_classify([l.ClassDef1 for l in lst], [l.Coverage.glyphs for l in lst])
- self.Class1Count = len(classes)
- new_matrices = []
- for l,matrix in zip(lst, matrices):
- nullRow = None
- coverage = set(l.Coverage.glyphs)
- classDef1 = l.ClassDef1.classDefs
- class1Records = []
- for classSet in classes:
- exemplarGlyph = next(iter(classSet))
- if exemplarGlyph not in coverage:
- # Follow-up to e6125b353e1f54a0280ded5434b8e40d042de69f,
- # Fixes https://github.com/googlei18n/fontmake/issues/470
- # Again, revert 8d441779e5afc664960d848f62c7acdbfc71d7b9
- # when merger becomes selfless.
- nullRow = None
- if nullRow is None:
- nullRow = ot.Class1Record()
- class2records = nullRow.Class2Record = []
- # TODO: When merger becomes selfless, revert e6125b353e1f54a0280ded5434b8e40d042de69f
- for _ in range(l.Class2Count):
- if transparent:
- rec2 = None
- else:
- rec2 = ot.Class2Record()
- rec2.Value1 = otBase.ValueRecord(self.ValueFormat1) if self.ValueFormat1 else None
- rec2.Value2 = otBase.ValueRecord(self.ValueFormat2) if self.ValueFormat2 else None
- class2records.append(rec2)
- rec1 = nullRow
- else:
- klass = classDef1.get(exemplarGlyph, 0)
- rec1 = matrix[klass] # TODO handle out-of-range?
- class1Records.append(rec1)
- new_matrices.append(class1Records)
- matrices = new_matrices
- del new_matrices
-
- # Align second classes
- self.ClassDef2, classes = _ClassDef_merge_classify([l.ClassDef2 for l in lst])
- self.Class2Count = len(classes)
- new_matrices = []
- for l,matrix in zip(lst, matrices):
- classDef2 = l.ClassDef2.classDefs
- class1Records = []
- for rec1old in matrix:
- oldClass2Records = rec1old.Class2Record
- rec1new = ot.Class1Record()
- class2Records = rec1new.Class2Record = []
- for classSet in classes:
- if not classSet: # class=0
- rec2 = oldClass2Records[0]
- else:
- exemplarGlyph = next(iter(classSet))
- klass = classDef2.get(exemplarGlyph, 0)
- rec2 = oldClass2Records[klass]
- class2Records.append(copy.deepcopy(rec2))
- class1Records.append(rec1new)
- new_matrices.append(class1Records)
- matrices = new_matrices
- del new_matrices
-
- return matrices
def _PairPosFormat2_merge(self, lst, merger):
- assert allEqual([l.ValueFormat2 == 0 for l in lst if l.Class1Record]), "Report bug against fonttools."
-
- merger.mergeObjects(self, lst,
- exclude=('Coverage',
- 'ClassDef1', 'Class1Count',
- 'ClassDef2', 'Class2Count',
- 'Class1Record',
- 'ValueFormat1', 'ValueFormat2'))
-
- # Align coverages
- glyphs, _ = _merge_GlyphOrders(merger.font,
- [v.Coverage.glyphs for v in lst])
- self.Coverage.glyphs = glyphs
-
- # Currently, if the coverage of PairPosFormat2 subtables are different,
- # we do NOT bother walking down the subtable list when filling in new
- # rows for alignment. As such, this is only correct if current subtable
- # is the last subtable in the lookup. Ensure that.
- #
- # Note that our canonicalization process merges trailing PairPosFormat2's,
- # so in reality this is rare.
- for l,subtables in zip(lst,merger.lookup_subtables):
- if l.Coverage.glyphs != glyphs:
- assert l == subtables[-1]
-
- matrices = _PairPosFormat2_align_matrices(self, lst, merger.font)
-
- self.Class1Record = list(matrices[0]) # TODO move merger to be selfless
- merger.mergeLists(self.Class1Record, matrices)
+ assert allEqual(
+ [l.ValueFormat2 == 0 for l in lst if l.Class1Record]
+ ), "Report bug against fonttools."
+
+ merger.mergeObjects(
+ self,
+ lst,
+ exclude=(
+ "Coverage",
+ "ClassDef1",
+ "Class1Count",
+ "ClassDef2",
+ "Class2Count",
+ "Class1Record",
+ "ValueFormat1",
+ "ValueFormat2",
+ ),
+ )
+
+ # Align coverages
+ glyphs, _ = _merge_GlyphOrders(merger.font, [v.Coverage.glyphs for v in lst])
+ self.Coverage.glyphs = glyphs
+
+ # Currently, if the coverage of PairPosFormat2 subtables are different,
+ # we do NOT bother walking down the subtable list when filling in new
+ # rows for alignment. As such, this is only correct if current subtable
+ # is the last subtable in the lookup. Ensure that.
+ #
+ # Note that our canonicalization process merges trailing PairPosFormat2's,
+ # so in reality this is rare.
+ for l, subtables in zip(lst, merger.lookup_subtables):
+ if l.Coverage.glyphs != glyphs:
+ assert l == subtables[-1]
+
+ matrices = _PairPosFormat2_align_matrices(self, lst, merger.font)
+
+ self.Class1Record = list(matrices[0]) # TODO move merger to be selfless
+ merger.mergeLists(self.Class1Record, matrices)
+
@AligningMerger.merger(ot.PairPos)
def merge(merger, self, lst):
- merger.valueFormat1 = self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0)
- merger.valueFormat2 = self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0)
-
- if self.Format == 1:
- _PairPosFormat1_merge(self, lst, merger)
- elif self.Format == 2:
- _PairPosFormat2_merge(self, lst, merger)
- else:
- raise UnsupportedFormat(merger, subtable="pair positioning lookup")
-
- del merger.valueFormat1, merger.valueFormat2
-
- # Now examine the list of value records, and update to the union of format values,
- # as merge might have created new values.
- vf1 = 0
- vf2 = 0
- if self.Format == 1:
- for pairSet in self.PairSet:
- for pairValueRecord in pairSet.PairValueRecord:
- pv1 = getattr(pairValueRecord, "Value1", None)
- if pv1 is not None:
- vf1 |= pv1.getFormat()
- pv2 = getattr(pairValueRecord, "Value2", None)
- if pv2 is not None:
- vf2 |= pv2.getFormat()
- elif self.Format == 2:
- for class1Record in self.Class1Record:
- for class2Record in class1Record.Class2Record:
- pv1 = getattr(class2Record, "Value1", None)
- if pv1 is not None:
- vf1 |= pv1.getFormat()
- pv2 = getattr(class2Record, "Value2", None)
- if pv2 is not None:
- vf2 |= pv2.getFormat()
- self.ValueFormat1 = vf1
- self.ValueFormat2 = vf2
-
-def _MarkBasePosFormat1_merge(self, lst, merger, Mark='Mark', Base='Base'):
- self.ClassCount = max(l.ClassCount for l in lst)
-
- MarkCoverageGlyphs, MarkRecords = \
- _merge_GlyphOrders(merger.font,
- [getattr(l, Mark+'Coverage').glyphs for l in lst],
- [getattr(l, Mark+'Array').MarkRecord for l in lst])
- getattr(self, Mark+'Coverage').glyphs = MarkCoverageGlyphs
-
- BaseCoverageGlyphs, BaseRecords = \
- _merge_GlyphOrders(merger.font,
- [getattr(l, Base+'Coverage').glyphs for l in lst],
- [getattr(getattr(l, Base+'Array'), Base+'Record') for l in lst])
- getattr(self, Base+'Coverage').glyphs = BaseCoverageGlyphs
-
- # MarkArray
- records = []
- for g,glyphRecords in zip(MarkCoverageGlyphs, zip(*MarkRecords)):
- allClasses = [r.Class for r in glyphRecords if r is not None]
-
- # TODO Right now we require that all marks have same class in
- # all masters that cover them. This is not required.
- #
- # We can relax that by just requiring that all marks that have
- # the same class in a master, have the same class in every other
- # master. Indeed, if, say, a sparse master only covers one mark,
- # that mark probably will get class 0, which would possibly be
- # different from its class in other masters.
- #
- # We can even go further and reclassify marks to support any
- # input. But, since, it's unlikely that two marks being both,
- # say, "top" in one master, and one being "top" and other being
- # "top-right" in another master, we shouldn't do that, as any
- # failures in that case will probably signify mistakes in the
- # input masters.
-
- if not allEqual(allClasses):
- raise ShouldBeConstant(merger, expected=allClasses[0], got=allClasses)
- else:
- rec = ot.MarkRecord()
- rec.Class = allClasses[0]
- allAnchors = [None if r is None else r.MarkAnchor for r in glyphRecords]
- if allNone(allAnchors):
- anchor = None
- else:
- anchor = ot.Anchor()
- anchor.Format = 1
- merger.mergeThings(anchor, allAnchors)
- rec.MarkAnchor = anchor
- records.append(rec)
- array = ot.MarkArray()
- array.MarkRecord = records
- array.MarkCount = len(records)
- setattr(self, Mark+"Array", array)
-
- # BaseArray
- records = []
- for g,glyphRecords in zip(BaseCoverageGlyphs, zip(*BaseRecords)):
- if allNone(glyphRecords):
- rec = None
- else:
- rec = getattr(ot, Base+'Record')()
- anchors = []
- setattr(rec, Base+'Anchor', anchors)
- glyphAnchors = [[] if r is None else getattr(r, Base+'Anchor')
- for r in glyphRecords]
- for l in glyphAnchors:
- l.extend([None] * (self.ClassCount - len(l)))
- for allAnchors in zip(*glyphAnchors):
- if allNone(allAnchors):
- anchor = None
- else:
- anchor = ot.Anchor()
- anchor.Format = 1
- merger.mergeThings(anchor, allAnchors)
- anchors.append(anchor)
- records.append(rec)
- array = getattr(ot, Base+'Array')()
- setattr(array, Base+'Record', records)
- setattr(array, Base+'Count', len(records))
- setattr(self, Base+'Array', array)
+ merger.valueFormat1 = self.ValueFormat1 = reduce(
+ int.__or__, [l.ValueFormat1 for l in lst], 0
+ )
+ merger.valueFormat2 = self.ValueFormat2 = reduce(
+ int.__or__, [l.ValueFormat2 for l in lst], 0
+ )
+
+ if self.Format == 1:
+ _PairPosFormat1_merge(self, lst, merger)
+ elif self.Format == 2:
+ _PairPosFormat2_merge(self, lst, merger)
+ else:
+ raise UnsupportedFormat(merger, subtable="pair positioning lookup")
+
+ del merger.valueFormat1, merger.valueFormat2
+
+ # Now examine the list of value records, and update to the union of format values,
+ # as merge might have created new values.
+ vf1 = 0
+ vf2 = 0
+ if self.Format == 1:
+ for pairSet in self.PairSet:
+ for pairValueRecord in pairSet.PairValueRecord:
+ pv1 = getattr(pairValueRecord, "Value1", None)
+ if pv1 is not None:
+ vf1 |= pv1.getFormat()
+ pv2 = getattr(pairValueRecord, "Value2", None)
+ if pv2 is not None:
+ vf2 |= pv2.getFormat()
+ elif self.Format == 2:
+ for class1Record in self.Class1Record:
+ for class2Record in class1Record.Class2Record:
+ pv1 = getattr(class2Record, "Value1", None)
+ if pv1 is not None:
+ vf1 |= pv1.getFormat()
+ pv2 = getattr(class2Record, "Value2", None)
+ if pv2 is not None:
+ vf2 |= pv2.getFormat()
+ self.ValueFormat1 = vf1
+ self.ValueFormat2 = vf2
+
+
+def _MarkBasePosFormat1_merge(self, lst, merger, Mark="Mark", Base="Base"):
+ self.ClassCount = max(l.ClassCount for l in lst)
+
+ MarkCoverageGlyphs, MarkRecords = _merge_GlyphOrders(
+ merger.font,
+ [getattr(l, Mark + "Coverage").glyphs for l in lst],
+ [getattr(l, Mark + "Array").MarkRecord for l in lst],
+ )
+ getattr(self, Mark + "Coverage").glyphs = MarkCoverageGlyphs
+
+ BaseCoverageGlyphs, BaseRecords = _merge_GlyphOrders(
+ merger.font,
+ [getattr(l, Base + "Coverage").glyphs for l in lst],
+ [getattr(getattr(l, Base + "Array"), Base + "Record") for l in lst],
+ )
+ getattr(self, Base + "Coverage").glyphs = BaseCoverageGlyphs
+
+ # MarkArray
+ records = []
+ for g, glyphRecords in zip(MarkCoverageGlyphs, zip(*MarkRecords)):
+ allClasses = [r.Class for r in glyphRecords if r is not None]
+
+ # TODO Right now we require that all marks have same class in
+ # all masters that cover them. This is not required.
+ #
+ # We can relax that by just requiring that all marks that have
+ # the same class in a master, have the same class in every other
+ # master. Indeed, if, say, a sparse master only covers one mark,
+ # that mark probably will get class 0, which would possibly be
+ # different from its class in other masters.
+ #
+ # We can even go further and reclassify marks to support any
+ # input. But, since, it's unlikely that two marks being both,
+ # say, "top" in one master, and one being "top" and other being
+ # "top-right" in another master, we shouldn't do that, as any
+ # failures in that case will probably signify mistakes in the
+ # input masters.
+
+ if not allEqual(allClasses):
+ raise ShouldBeConstant(merger, expected=allClasses[0], got=allClasses)
+ else:
+ rec = ot.MarkRecord()
+ rec.Class = allClasses[0]
+ allAnchors = [None if r is None else r.MarkAnchor for r in glyphRecords]
+ if allNone(allAnchors):
+ anchor = None
+ else:
+ anchor = ot.Anchor()
+ anchor.Format = 1
+ merger.mergeThings(anchor, allAnchors)
+ rec.MarkAnchor = anchor
+ records.append(rec)
+ array = ot.MarkArray()
+ array.MarkRecord = records
+ array.MarkCount = len(records)
+ setattr(self, Mark + "Array", array)
+
+ # BaseArray
+ records = []
+ for g, glyphRecords in zip(BaseCoverageGlyphs, zip(*BaseRecords)):
+ if allNone(glyphRecords):
+ rec = None
+ else:
+ rec = getattr(ot, Base + "Record")()
+ anchors = []
+ setattr(rec, Base + "Anchor", anchors)
+ glyphAnchors = [
+ [] if r is None else getattr(r, Base + "Anchor") for r in glyphRecords
+ ]
+ for l in glyphAnchors:
+ l.extend([None] * (self.ClassCount - len(l)))
+ for allAnchors in zip(*glyphAnchors):
+ if allNone(allAnchors):
+ anchor = None
+ else:
+ anchor = ot.Anchor()
+ anchor.Format = 1
+ merger.mergeThings(anchor, allAnchors)
+ anchors.append(anchor)
+ records.append(rec)
+ array = getattr(ot, Base + "Array")()
+ setattr(array, Base + "Record", records)
+ setattr(array, Base + "Count", len(records))
+ setattr(self, Base + "Array", array)
+
@AligningMerger.merger(ot.MarkBasePos)
def merge(merger, self, lst):
- if not allEqualTo(self.Format, (l.Format for l in lst)):
- raise InconsistentFormats(
- merger,
- subtable="mark-to-base positioning lookup",
- expected=self.Format,
- got=[l.Format for l in lst]
- )
- if self.Format == 1:
- _MarkBasePosFormat1_merge(self, lst, merger)
- else:
- raise UnsupportedFormat(merger, subtable="mark-to-base positioning lookup")
+ if not allEqualTo(self.Format, (l.Format for l in lst)):
+ raise InconsistentFormats(
+ merger,
+ subtable="mark-to-base positioning lookup",
+ expected=self.Format,
+ got=[l.Format for l in lst],
+ )
+ if self.Format == 1:
+ _MarkBasePosFormat1_merge(self, lst, merger)
+ else:
+ raise UnsupportedFormat(merger, subtable="mark-to-base positioning lookup")
+
@AligningMerger.merger(ot.MarkMarkPos)
def merge(merger, self, lst):
- if not allEqualTo(self.Format, (l.Format for l in lst)):
- raise InconsistentFormats(
- merger,
- subtable="mark-to-mark positioning lookup",
- expected=self.Format,
- got=[l.Format for l in lst]
- )
- if self.Format == 1:
- _MarkBasePosFormat1_merge(self, lst, merger, 'Mark1', 'Mark2')
- else:
- raise UnsupportedFormat(merger, subtable="mark-to-mark positioning lookup")
+ if not allEqualTo(self.Format, (l.Format for l in lst)):
+ raise InconsistentFormats(
+ merger,
+ subtable="mark-to-mark positioning lookup",
+ expected=self.Format,
+ got=[l.Format for l in lst],
+ )
+ if self.Format == 1:
+ _MarkBasePosFormat1_merge(self, lst, merger, "Mark1", "Mark2")
+ else:
+ raise UnsupportedFormat(merger, subtable="mark-to-mark positioning lookup")
-def _PairSet_flatten(lst, font):
- self = ot.PairSet()
- self.Coverage = ot.Coverage()
-
- # Align them
- glyphs, padded = _merge_GlyphOrders(font,
- [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst],
- [vs.PairValueRecord for vs in lst])
-
- self.Coverage.glyphs = glyphs
- self.PairValueRecord = pvrs = []
- for values in zip(*padded):
- for v in values:
- if v is not None:
- pvrs.append(v)
- break
- else:
- assert False
- self.PairValueCount = len(self.PairValueRecord)
-
- return self
-def _Lookup_PairPosFormat1_subtables_flatten(lst, font):
- assert allEqual([l.ValueFormat2 == 0 for l in lst if l.PairSet]), "Report bug against fonttools."
+def _PairSet_flatten(lst, font):
+ self = ot.PairSet()
+ self.Coverage = ot.Coverage()
+
+ # Align them
+ glyphs, padded = _merge_GlyphOrders(
+ font,
+ [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst],
+ [vs.PairValueRecord for vs in lst],
+ )
+
+ self.Coverage.glyphs = glyphs
+ self.PairValueRecord = pvrs = []
+ for values in zip(*padded):
+ for v in values:
+ if v is not None:
+ pvrs.append(v)
+ break
+ else:
+ assert False
+ self.PairValueCount = len(self.PairValueRecord)
+
+ return self
- self = ot.PairPos()
- self.Format = 1
- self.Coverage = ot.Coverage()
- self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0)
- self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0)
- # Align them
- glyphs, padded = _merge_GlyphOrders(font,
- [v.Coverage.glyphs for v in lst],
- [v.PairSet for v in lst])
+def _Lookup_PairPosFormat1_subtables_flatten(lst, font):
+ assert allEqual(
+ [l.ValueFormat2 == 0 for l in lst if l.PairSet]
+ ), "Report bug against fonttools."
+
+ self = ot.PairPos()
+ self.Format = 1
+ self.Coverage = ot.Coverage()
+ self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0)
+ self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0)
+
+ # Align them
+ glyphs, padded = _merge_GlyphOrders(
+ font, [v.Coverage.glyphs for v in lst], [v.PairSet for v in lst]
+ )
+
+ self.Coverage.glyphs = glyphs
+ self.PairSet = [
+ _PairSet_flatten([v for v in values if v is not None], font)
+ for values in zip(*padded)
+ ]
+ self.PairSetCount = len(self.PairSet)
+ return self
- self.Coverage.glyphs = glyphs
- self.PairSet = [_PairSet_flatten([v for v in values if v is not None], font)
- for values in zip(*padded)]
- self.PairSetCount = len(self.PairSet)
- return self
def _Lookup_PairPosFormat2_subtables_flatten(lst, font):
- assert allEqual([l.ValueFormat2 == 0 for l in lst if l.Class1Record]), "Report bug against fonttools."
+ assert allEqual(
+ [l.ValueFormat2 == 0 for l in lst if l.Class1Record]
+ ), "Report bug against fonttools."
+
+ self = ot.PairPos()
+ self.Format = 2
+ self.Coverage = ot.Coverage()
+ self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0)
+ self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0)
- self = ot.PairPos()
- self.Format = 2
- self.Coverage = ot.Coverage()
- self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0)
- self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0)
+ # Align them
+ glyphs, _ = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst])
+ self.Coverage.glyphs = glyphs
- # Align them
- glyphs, _ = _merge_GlyphOrders(font,
- [v.Coverage.glyphs for v in lst])
- self.Coverage.glyphs = glyphs
+ matrices = _PairPosFormat2_align_matrices(self, lst, font, transparent=True)
- matrices = _PairPosFormat2_align_matrices(self, lst, font, transparent=True)
+ matrix = self.Class1Record = []
+ for rows in zip(*matrices):
+ row = ot.Class1Record()
+ matrix.append(row)
+ row.Class2Record = []
+ row = row.Class2Record
+ for cols in zip(*list(r.Class2Record for r in rows)):
+ col = next(iter(c for c in cols if c is not None))
+ row.append(col)
- matrix = self.Class1Record = []
- for rows in zip(*matrices):
- row = ot.Class1Record()
- matrix.append(row)
- row.Class2Record = []
- row = row.Class2Record
- for cols in zip(*list(r.Class2Record for r in rows)):
- col = next(iter(c for c in cols if c is not None))
- row.append(col)
+ return self
- return self
def _Lookup_PairPos_subtables_canonicalize(lst, font):
- """Merge multiple Format1 subtables at the beginning of lst,
- and merge multiple consecutive Format2 subtables that have the same
- Class2 (ie. were split because of offset overflows). Returns new list."""
- lst = list(lst)
+ """Merge multiple Format1 subtables at the beginning of lst,
+ and merge multiple consecutive Format2 subtables that have the same
+ Class2 (ie. were split because of offset overflows). Returns new list."""
+ lst = list(lst)
- l = len(lst)
- i = 0
- while i < l and lst[i].Format == 1:
- i += 1
- lst[:i] = [_Lookup_PairPosFormat1_subtables_flatten(lst[:i], font)]
+ l = len(lst)
+ i = 0
+ while i < l and lst[i].Format == 1:
+ i += 1
+ lst[:i] = [_Lookup_PairPosFormat1_subtables_flatten(lst[:i], font)]
- l = len(lst)
- i = l
- while i > 0 and lst[i - 1].Format == 2:
- i -= 1
- lst[i:] = [_Lookup_PairPosFormat2_subtables_flatten(lst[i:], font)]
+ l = len(lst)
+ i = l
+ while i > 0 and lst[i - 1].Format == 2:
+ i -= 1
+ lst[i:] = [_Lookup_PairPosFormat2_subtables_flatten(lst[i:], font)]
+
+ return lst
- return lst
def _Lookup_SinglePos_subtables_flatten(lst, font, min_inclusive_rec_format):
- glyphs, _ = _merge_GlyphOrders(font,
- [v.Coverage.glyphs for v in lst], None)
- num_glyphs = len(glyphs)
- new = ot.SinglePos()
- new.Format = 2
- new.ValueFormat = min_inclusive_rec_format
- new.Coverage = ot.Coverage()
- new.Coverage.glyphs = glyphs
- new.ValueCount = num_glyphs
- new.Value = [None] * num_glyphs
- for singlePos in lst:
- if singlePos.Format == 1:
- val_rec = singlePos.Value
- for gname in singlePos.Coverage.glyphs:
- i = glyphs.index(gname)
- new.Value[i] = copy.deepcopy(val_rec)
- elif singlePos.Format == 2:
- for j, gname in enumerate(singlePos.Coverage.glyphs):
- val_rec = singlePos.Value[j]
- i = glyphs.index(gname)
- new.Value[i] = copy.deepcopy(val_rec)
- return [new]
+ glyphs, _ = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst], None)
+ num_glyphs = len(glyphs)
+ new = ot.SinglePos()
+ new.Format = 2
+ new.ValueFormat = min_inclusive_rec_format
+ new.Coverage = ot.Coverage()
+ new.Coverage.glyphs = glyphs
+ new.ValueCount = num_glyphs
+ new.Value = [None] * num_glyphs
+ for singlePos in lst:
+ if singlePos.Format == 1:
+ val_rec = singlePos.Value
+ for gname in singlePos.Coverage.glyphs:
+ i = glyphs.index(gname)
+ new.Value[i] = copy.deepcopy(val_rec)
+ elif singlePos.Format == 2:
+ for j, gname in enumerate(singlePos.Coverage.glyphs):
+ val_rec = singlePos.Value[j]
+ i = glyphs.index(gname)
+ new.Value[i] = copy.deepcopy(val_rec)
+ return [new]
+
+
+@AligningMerger.merger(ot.CursivePos)
+def merge(merger, self, lst):
+ # Align them
+ glyphs, padded = _merge_GlyphOrders(
+ merger.font,
+ [l.Coverage.glyphs for l in lst],
+ [l.EntryExitRecord for l in lst],
+ )
+
+ self.Format = 1
+ self.Coverage = ot.Coverage()
+ self.Coverage.glyphs = glyphs
+ self.EntryExitRecord = []
+ for _ in glyphs:
+ rec = ot.EntryExitRecord()
+ rec.EntryAnchor = ot.Anchor()
+ rec.EntryAnchor.Format = 1
+ rec.ExitAnchor = ot.Anchor()
+ rec.ExitAnchor.Format = 1
+ self.EntryExitRecord.append(rec)
+ merger.mergeLists(self.EntryExitRecord, padded)
+ self.EntryExitCount = len(self.EntryExitRecord)
+
+
+@AligningMerger.merger(ot.EntryExitRecord)
+def merge(merger, self, lst):
+ if all(master.EntryAnchor is None for master in lst):
+ self.EntryAnchor = None
+ if all(master.ExitAnchor is None for master in lst):
+ self.ExitAnchor = None
+ merger.mergeObjects(self, lst)
+
@AligningMerger.merger(ot.Lookup)
def merge(merger, self, lst):
- subtables = merger.lookup_subtables = [l.SubTable for l in lst]
-
- # Remove Extension subtables
- for l,sts in list(zip(lst,subtables))+[(self,self.SubTable)]:
- if not sts:
- continue
- if sts[0].__class__.__name__.startswith('Extension'):
- if not allEqual([st.__class__ for st in sts]):
- raise InconsistentExtensions(
- merger,
- expected="Extension",
- got=[st.__class__.__name__ for st in sts]
- )
- if not allEqual([st.ExtensionLookupType for st in sts]):
- raise InconsistentExtensions(merger)
- l.LookupType = sts[0].ExtensionLookupType
- new_sts = [st.ExtSubTable for st in sts]
- del sts[:]
- sts.extend(new_sts)
-
- isPairPos = self.SubTable and isinstance(self.SubTable[0], ot.PairPos)
-
- if isPairPos:
- # AFDKO and feaLib sometimes generate two Format1 subtables instead of one.
- # Merge those before continuing.
- # https://github.com/fonttools/fonttools/issues/719
- self.SubTable = _Lookup_PairPos_subtables_canonicalize(self.SubTable, merger.font)
- subtables = merger.lookup_subtables = [_Lookup_PairPos_subtables_canonicalize(st, merger.font) for st in subtables]
- else:
- isSinglePos = self.SubTable and isinstance(self.SubTable[0], ot.SinglePos)
- if isSinglePos:
- numSubtables = [len(st) for st in subtables]
- if not all([nums == numSubtables[0] for nums in numSubtables]):
- # Flatten list of SinglePos subtables to single Format 2 subtable,
- # with all value records set to the rec format type.
- # We use buildSinglePos() to optimize the lookup after merging.
- valueFormatList = [t.ValueFormat for st in subtables for t in st]
- # Find the minimum value record that can accomodate all the singlePos subtables.
- mirf = reduce(ior, valueFormatList)
- self.SubTable = _Lookup_SinglePos_subtables_flatten(self.SubTable, merger.font, mirf)
- subtables = merger.lookup_subtables = [
- _Lookup_SinglePos_subtables_flatten(st, merger.font, mirf) for st in subtables]
- flattened = True
- else:
- flattened = False
-
- merger.mergeLists(self.SubTable, subtables)
- self.SubTableCount = len(self.SubTable)
-
- if isPairPos:
- # If format-1 subtable created during canonicalization is empty, remove it.
- assert len(self.SubTable) >= 1 and self.SubTable[0].Format == 1
- if not self.SubTable[0].Coverage.glyphs:
- self.SubTable.pop(0)
- self.SubTableCount -= 1
-
- # If format-2 subtable created during canonicalization is empty, remove it.
- assert len(self.SubTable) >= 1 and self.SubTable[-1].Format == 2
- if not self.SubTable[-1].Coverage.glyphs:
- self.SubTable.pop(-1)
- self.SubTableCount -= 1
-
- # Compact the merged subtables
- # This is a good moment to do it because the compaction should create
- # smaller subtables, which may prevent overflows from happening.
- # Keep reading the value from the ENV until ufo2ft switches to the config system
- level = merger.font.cfg.get(
- "fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL",
- default=_compression_level_from_env(),
+ subtables = merger.lookup_subtables = [l.SubTable for l in lst]
+
+ # Remove Extension subtables
+ for l, sts in list(zip(lst, subtables)) + [(self, self.SubTable)]:
+ if not sts:
+ continue
+ if sts[0].__class__.__name__.startswith("Extension"):
+ if not allEqual([st.__class__ for st in sts]):
+ raise InconsistentExtensions(
+ merger,
+ expected="Extension",
+ got=[st.__class__.__name__ for st in sts],
+ )
+ if not allEqual([st.ExtensionLookupType for st in sts]):
+ raise InconsistentExtensions(merger)
+ l.LookupType = sts[0].ExtensionLookupType
+ new_sts = [st.ExtSubTable for st in sts]
+ del sts[:]
+ sts.extend(new_sts)
+
+ isPairPos = self.SubTable and isinstance(self.SubTable[0], ot.PairPos)
+
+ if isPairPos:
+ # AFDKO and feaLib sometimes generate two Format1 subtables instead of one.
+ # Merge those before continuing.
+ # https://github.com/fonttools/fonttools/issues/719
+ self.SubTable = _Lookup_PairPos_subtables_canonicalize(
+ self.SubTable, merger.font
)
- if level != 0:
- log.info("Compacting GPOS...")
- self.SubTable = compact_pair_pos(merger.font, level, self.SubTable)
- self.SubTableCount = len(self.SubTable)
-
- elif isSinglePos and flattened:
- singlePosTable = self.SubTable[0]
- glyphs = singlePosTable.Coverage.glyphs
- # We know that singlePosTable is Format 2, as this is set
- # in _Lookup_SinglePos_subtables_flatten.
- singlePosMapping = {
- gname: valRecord
- for gname, valRecord in zip(glyphs, singlePosTable.Value)
- }
- self.SubTable = buildSinglePos(singlePosMapping, merger.font.getReverseGlyphMap())
- merger.mergeObjects(self, lst, exclude=['SubTable', 'SubTableCount'])
-
- del merger.lookup_subtables
+ subtables = merger.lookup_subtables = [
+ _Lookup_PairPos_subtables_canonicalize(st, merger.font) for st in subtables
+ ]
+ else:
+ isSinglePos = self.SubTable and isinstance(self.SubTable[0], ot.SinglePos)
+ if isSinglePos:
+ numSubtables = [len(st) for st in subtables]
+ if not all([nums == numSubtables[0] for nums in numSubtables]):
+ # Flatten list of SinglePos subtables to single Format 2 subtable,
+ # with all value records set to the rec format type.
+ # We use buildSinglePos() to optimize the lookup after merging.
+ valueFormatList = [t.ValueFormat for st in subtables for t in st]
+ # Find the minimum value record that can accomodate all the singlePos subtables.
+ mirf = reduce(ior, valueFormatList)
+ self.SubTable = _Lookup_SinglePos_subtables_flatten(
+ self.SubTable, merger.font, mirf
+ )
+ subtables = merger.lookup_subtables = [
+ _Lookup_SinglePos_subtables_flatten(st, merger.font, mirf)
+ for st in subtables
+ ]
+ flattened = True
+ else:
+ flattened = False
+
+ merger.mergeLists(self.SubTable, subtables)
+ self.SubTableCount = len(self.SubTable)
+
+ if isPairPos:
+ # If format-1 subtable created during canonicalization is empty, remove it.
+ assert len(self.SubTable) >= 1 and self.SubTable[0].Format == 1
+ if not self.SubTable[0].Coverage.glyphs:
+ self.SubTable.pop(0)
+ self.SubTableCount -= 1
+
+ # If format-2 subtable created during canonicalization is empty, remove it.
+ assert len(self.SubTable) >= 1 and self.SubTable[-1].Format == 2
+ if not self.SubTable[-1].Coverage.glyphs:
+ self.SubTable.pop(-1)
+ self.SubTableCount -= 1
+
+ # Compact the merged subtables
+ # This is a good moment to do it because the compaction should create
+ # smaller subtables, which may prevent overflows from happening.
+ # Keep reading the value from the ENV until ufo2ft switches to the config system
+ level = merger.font.cfg.get(
+ "fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL",
+ default=_compression_level_from_env(),
+ )
+ if level != 0:
+ log.info("Compacting GPOS...")
+ self.SubTable = compact_pair_pos(merger.font, level, self.SubTable)
+ self.SubTableCount = len(self.SubTable)
+
+ elif isSinglePos and flattened:
+ singlePosTable = self.SubTable[0]
+ glyphs = singlePosTable.Coverage.glyphs
+ # We know that singlePosTable is Format 2, as this is set
+ # in _Lookup_SinglePos_subtables_flatten.
+ singlePosMapping = {
+ gname: valRecord for gname, valRecord in zip(glyphs, singlePosTable.Value)
+ }
+ self.SubTable = buildSinglePos(
+ singlePosMapping, merger.font.getReverseGlyphMap()
+ )
+ merger.mergeObjects(self, lst, exclude=["SubTable", "SubTableCount"])
+
+ del merger.lookup_subtables
+
#
# InstancerMerger
#
+
class InstancerMerger(AligningMerger):
- """A merger that takes multiple master fonts, and instantiates
- an instance."""
+ """A merger that takes multiple master fonts, and instantiates
+ an instance."""
+
+ def __init__(self, font, model, location):
+ Merger.__init__(self, font)
+ self.model = model
+ self.location = location
+ self.scalars = model.getScalars(location)
- def __init__(self, font, model, location):
- Merger.__init__(self, font)
- self.model = model
- self.location = location
- self.scalars = model.getScalars(location)
@InstancerMerger.merger(ot.CaretValue)
def merge(merger, self, lst):
- assert self.Format == 1
- Coords = [a.Coordinate for a in lst]
- model = merger.model
- scalars = merger.scalars
- self.Coordinate = otRound(model.interpolateFromMastersAndScalars(Coords, scalars))
+ assert self.Format == 1
+ Coords = [a.Coordinate for a in lst]
+ model = merger.model
+ scalars = merger.scalars
+ self.Coordinate = otRound(model.interpolateFromMastersAndScalars(Coords, scalars))
+
@InstancerMerger.merger(ot.Anchor)
def merge(merger, self, lst):
- assert self.Format == 1
- XCoords = [a.XCoordinate for a in lst]
- YCoords = [a.YCoordinate for a in lst]
- model = merger.model
- scalars = merger.scalars
- self.XCoordinate = otRound(model.interpolateFromMastersAndScalars(XCoords, scalars))
- self.YCoordinate = otRound(model.interpolateFromMastersAndScalars(YCoords, scalars))
+ assert self.Format == 1
+ XCoords = [a.XCoordinate for a in lst]
+ YCoords = [a.YCoordinate for a in lst]
+ model = merger.model
+ scalars = merger.scalars
+ self.XCoordinate = otRound(model.interpolateFromMastersAndScalars(XCoords, scalars))
+ self.YCoordinate = otRound(model.interpolateFromMastersAndScalars(YCoords, scalars))
+
@InstancerMerger.merger(otBase.ValueRecord)
def merge(merger, self, lst):
- model = merger.model
- scalars = merger.scalars
- # TODO Handle differing valueformats
- for name, tableName in [('XAdvance','XAdvDevice'),
- ('YAdvance','YAdvDevice'),
- ('XPlacement','XPlaDevice'),
- ('YPlacement','YPlaDevice')]:
-
- assert not hasattr(self, tableName)
-
- if hasattr(self, name):
- values = [getattr(a, name, 0) for a in lst]
- value = otRound(model.interpolateFromMastersAndScalars(values, scalars))
- setattr(self, name, value)
+ model = merger.model
+ scalars = merger.scalars
+ # TODO Handle differing valueformats
+ for name, tableName in [
+ ("XAdvance", "XAdvDevice"),
+ ("YAdvance", "YAdvDevice"),
+ ("XPlacement", "XPlaDevice"),
+ ("YPlacement", "YPlaDevice"),
+ ]:
+ assert not hasattr(self, tableName)
+
+ if hasattr(self, name):
+ values = [getattr(a, name, 0) for a in lst]
+ value = otRound(model.interpolateFromMastersAndScalars(values, scalars))
+ setattr(self, name, value)
#
# MutatorMerger
#
+
class MutatorMerger(AligningMerger):
- """A merger that takes a variable font, and instantiates
- an instance. While there's no "merging" to be done per se,
- the operation can benefit from many operations that the
- aligning merger does."""
+ """A merger that takes a variable font, and instantiates
+ an instance. While there's no "merging" to be done per se,
+ the operation can benefit from many operations that the
+ aligning merger does."""
+
+ def __init__(self, font, instancer, deleteVariations=True):
+ Merger.__init__(self, font)
+ self.instancer = instancer
+ self.deleteVariations = deleteVariations
- def __init__(self, font, instancer, deleteVariations=True):
- Merger.__init__(self, font)
- self.instancer = instancer
- self.deleteVariations = deleteVariations
@MutatorMerger.merger(ot.CaretValue)
def merge(merger, self, lst):
+ # Hack till we become selfless.
+ self.__dict__ = lst[0].__dict__.copy()
- # Hack till we become selfless.
- self.__dict__ = lst[0].__dict__.copy()
+ if self.Format != 3:
+ return
- if self.Format != 3:
- return
+ instancer = merger.instancer
+ dev = self.DeviceTable
+ if merger.deleteVariations:
+ del self.DeviceTable
+ if dev:
+ assert dev.DeltaFormat == 0x8000
+ varidx = (dev.StartSize << 16) + dev.EndSize
+ delta = otRound(instancer[varidx])
+ self.Coordinate += delta
- instancer = merger.instancer
- dev = self.DeviceTable
- if merger.deleteVariations:
- del self.DeviceTable
- if dev:
- assert dev.DeltaFormat == 0x8000
- varidx = (dev.StartSize << 16) + dev.EndSize
- delta = otRound(instancer[varidx])
- self.Coordinate += delta
+ if merger.deleteVariations:
+ self.Format = 1
- if merger.deleteVariations:
- self.Format = 1
@MutatorMerger.merger(ot.Anchor)
def merge(merger, self, lst):
+ # Hack till we become selfless.
+ self.__dict__ = lst[0].__dict__.copy()
- # Hack till we become selfless.
- self.__dict__ = lst[0].__dict__.copy()
+ if self.Format != 3:
+ return
- if self.Format != 3:
- return
+ instancer = merger.instancer
+ for v in "XY":
+ tableName = v + "DeviceTable"
+ if not hasattr(self, tableName):
+ continue
+ dev = getattr(self, tableName)
+ if merger.deleteVariations:
+ delattr(self, tableName)
+ if dev is None:
+ continue
- instancer = merger.instancer
- for v in "XY":
- tableName = v+'DeviceTable'
- if not hasattr(self, tableName):
- continue
- dev = getattr(self, tableName)
- if merger.deleteVariations:
- delattr(self, tableName)
- if dev is None:
- continue
+ assert dev.DeltaFormat == 0x8000
+ varidx = (dev.StartSize << 16) + dev.EndSize
+ delta = otRound(instancer[varidx])
- assert dev.DeltaFormat == 0x8000
- varidx = (dev.StartSize << 16) + dev.EndSize
- delta = otRound(instancer[varidx])
+ attr = v + "Coordinate"
+ setattr(self, attr, getattr(self, attr) + delta)
- attr = v+'Coordinate'
- setattr(self, attr, getattr(self, attr) + delta)
+ if merger.deleteVariations:
+ self.Format = 1
- if merger.deleteVariations:
- self.Format = 1
@MutatorMerger.merger(otBase.ValueRecord)
def merge(merger, self, lst):
-
- # Hack till we become selfless.
- self.__dict__ = lst[0].__dict__.copy()
-
- instancer = merger.instancer
- for name, tableName in [('XAdvance','XAdvDevice'),
- ('YAdvance','YAdvDevice'),
- ('XPlacement','XPlaDevice'),
- ('YPlacement','YPlaDevice')]:
-
- if not hasattr(self, tableName):
- continue
- dev = getattr(self, tableName)
- if merger.deleteVariations:
- delattr(self, tableName)
- if dev is None:
- continue
-
- assert dev.DeltaFormat == 0x8000
- varidx = (dev.StartSize << 16) + dev.EndSize
- delta = otRound(instancer[varidx])
-
- setattr(self, name, getattr(self, name, 0) + delta)
+ # Hack till we become selfless.
+ self.__dict__ = lst[0].__dict__.copy()
+
+ instancer = merger.instancer
+ for name, tableName in [
+ ("XAdvance", "XAdvDevice"),
+ ("YAdvance", "YAdvDevice"),
+ ("XPlacement", "XPlaDevice"),
+ ("YPlacement", "YPlaDevice"),
+ ]:
+ if not hasattr(self, tableName):
+ continue
+ dev = getattr(self, tableName)
+ if merger.deleteVariations:
+ delattr(self, tableName)
+ if dev is None:
+ continue
+
+ assert dev.DeltaFormat == 0x8000
+ varidx = (dev.StartSize << 16) + dev.EndSize
+ delta = otRound(instancer[varidx])
+
+ setattr(self, name, getattr(self, name, 0) + delta)
#
# VariationMerger
#
+
class VariationMerger(AligningMerger):
- """A merger that takes multiple master fonts, and builds a
- variable font."""
+ """A merger that takes multiple master fonts, and builds a
+ variable font."""
- def __init__(self, model, axisTags, font):
- Merger.__init__(self, font)
- self.store_builder = varStore.OnlineVarStoreBuilder(axisTags)
- self.setModel(model)
+ def __init__(self, model, axisTags, font):
+ Merger.__init__(self, font)
+ self.store_builder = varStore.OnlineVarStoreBuilder(axisTags)
+ self.setModel(model)
- def setModel(self, model):
- self.model = model
- self.store_builder.setModel(model)
+ def setModel(self, model):
+ self.model = model
+ self.store_builder.setModel(model)
- def mergeThings(self, out, lst):
- masterModel = None
- origTTFs = None
- if None in lst:
- if allNone(lst):
- if out is not None:
- raise FoundANone(self, got=lst)
- return
+ def mergeThings(self, out, lst):
+ masterModel = None
+ origTTFs = None
+ if None in lst:
+ if allNone(lst):
+ if out is not None:
+ raise FoundANone(self, got=lst)
+ return
- # temporarily subset the list of master ttfs to the ones for which
- # master values are not None
- origTTFs = self.ttfs
- if self.ttfs:
- self.ttfs = subList([v is not None for v in lst], self.ttfs)
+ # temporarily subset the list of master ttfs to the ones for which
+ # master values are not None
+ origTTFs = self.ttfs
+ if self.ttfs:
+ self.ttfs = subList([v is not None for v in lst], self.ttfs)
- masterModel = self.model
- model, lst = masterModel.getSubModel(lst)
- self.setModel(model)
+ masterModel = self.model
+ model, lst = masterModel.getSubModel(lst)
+ self.setModel(model)
- super(VariationMerger, self).mergeThings(out, lst)
+ super(VariationMerger, self).mergeThings(out, lst)
- if masterModel:
- self.setModel(masterModel)
- if origTTFs:
- self.ttfs = origTTFs
+ if masterModel:
+ self.setModel(masterModel)
+ if origTTFs:
+ self.ttfs = origTTFs
def buildVarDevTable(store_builder, master_values):
- if allEqual(master_values):
- return master_values[0], None
- base, varIdx = store_builder.storeMasters(master_values)
- return base, builder.buildVarDevTable(varIdx)
+ if allEqual(master_values):
+ return master_values[0], None
+ base, varIdx = store_builder.storeMasters(master_values)
+ return base, builder.buildVarDevTable(varIdx)
+
@VariationMerger.merger(ot.BaseCoord)
def merge(merger, self, lst):
- if self.Format != 1:
- raise UnsupportedFormat(merger, subtable="a baseline coordinate")
- self.Coordinate, DeviceTable = buildVarDevTable(merger.store_builder, [a.Coordinate for a in lst])
- if DeviceTable:
- self.Format = 3
- self.DeviceTable = DeviceTable
+ if self.Format != 1:
+ raise UnsupportedFormat(merger, subtable="a baseline coordinate")
+ self.Coordinate, DeviceTable = buildVarDevTable(
+ merger.store_builder, [a.Coordinate for a in lst]
+ )
+ if DeviceTable:
+ self.Format = 3
+ self.DeviceTable = DeviceTable
+
@VariationMerger.merger(ot.CaretValue)
def merge(merger, self, lst):
- if self.Format != 1:
- raise UnsupportedFormat(merger, subtable="a caret")
- self.Coordinate, DeviceTable = buildVarDevTable(merger.store_builder, [a.Coordinate for a in lst])
- if DeviceTable:
- self.Format = 3
- self.DeviceTable = DeviceTable
+ if self.Format != 1:
+ raise UnsupportedFormat(merger, subtable="a caret")
+ self.Coordinate, DeviceTable = buildVarDevTable(
+ merger.store_builder, [a.Coordinate for a in lst]
+ )
+ if DeviceTable:
+ self.Format = 3
+ self.DeviceTable = DeviceTable
+
@VariationMerger.merger(ot.Anchor)
def merge(merger, self, lst):
- if self.Format != 1:
- raise UnsupportedFormat(merger, subtable="an anchor")
- self.XCoordinate, XDeviceTable = buildVarDevTable(merger.store_builder, [a.XCoordinate for a in lst])
- self.YCoordinate, YDeviceTable = buildVarDevTable(merger.store_builder, [a.YCoordinate for a in lst])
- if XDeviceTable or YDeviceTable:
- self.Format = 3
- self.XDeviceTable = XDeviceTable
- self.YDeviceTable = YDeviceTable
+ if self.Format != 1:
+ raise UnsupportedFormat(merger, subtable="an anchor")
+ self.XCoordinate, XDeviceTable = buildVarDevTable(
+ merger.store_builder, [a.XCoordinate for a in lst]
+ )
+ self.YCoordinate, YDeviceTable = buildVarDevTable(
+ merger.store_builder, [a.YCoordinate for a in lst]
+ )
+ if XDeviceTable or YDeviceTable:
+ self.Format = 3
+ self.XDeviceTable = XDeviceTable
+ self.YDeviceTable = YDeviceTable
+
@VariationMerger.merger(otBase.ValueRecord)
def merge(merger, self, lst):
- for name, tableName in [('XAdvance','XAdvDevice'),
- ('YAdvance','YAdvDevice'),
- ('XPlacement','XPlaDevice'),
- ('YPlacement','YPlaDevice')]:
-
- if hasattr(self, name):
- value, deviceTable = buildVarDevTable(merger.store_builder,
- [getattr(a, name, 0) for a in lst])
- setattr(self, name, value)
- if deviceTable:
- setattr(self, tableName, deviceTable)
+ for name, tableName in [
+ ("XAdvance", "XAdvDevice"),
+ ("YAdvance", "YAdvDevice"),
+ ("XPlacement", "XPlaDevice"),
+ ("YPlacement", "YPlaDevice"),
+ ]:
+ if hasattr(self, name):
+ value, deviceTable = buildVarDevTable(
+ merger.store_builder, [getattr(a, name, 0) for a in lst]
+ )
+ setattr(self, name, value)
+ if deviceTable:
+ setattr(self, tableName, deviceTable)
class COLRVariationMerger(VariationMerger):
- """A specialized VariationMerger that takes multiple master fonts containing
- COLRv1 tables, and builds a variable COLR font.
-
- COLR tables are special in that variable subtables can be associated with
- multiple delta-set indices (via VarIndexBase).
- They also contain tables that must change their type (not simply the Format)
- as they become variable (e.g. Affine2x3 -> VarAffine2x3) so this merger takes
- care of that too.
- """
-
- def __init__(self, model, axisTags, font, allowLayerReuse=True):
- VariationMerger.__init__(self, model, axisTags, font)
- # maps {tuple(varIdxes): VarIndexBase} to facilitate reuse of VarIndexBase
- # between variable tables with same varIdxes.
- self.varIndexCache = {}
- # flat list of all the varIdxes generated while merging
- self.varIdxes = []
- # set of id()s of the subtables that contain variations after merging
- # and need to be upgraded to the associated VarType.
- self.varTableIds = set()
- # we keep these around for rebuilding a LayerList while merging PaintColrLayers
- self.layers = []
- self.layerReuseCache = None
- if allowLayerReuse:
- self.layerReuseCache = LayerReuseCache()
- # flag to ensure BaseGlyphList is fully merged before LayerList gets processed
- self._doneBaseGlyphs = False
-
- def mergeTables(self, font, master_ttfs, tableTags=("COLR",)):
- if "COLR" in tableTags and "COLR" in font:
- # The merger modifies the destination COLR table in-place. If this contains
- # multiple PaintColrLayers referencing the same layers from LayerList, it's
- # a problem because we may risk modifying the same paint more than once, or
- # worse, fail while attempting to do that.
- # We don't know whether the master COLR table was built with layer reuse
- # disabled, thus to be safe we rebuild its LayerList so that it contains only
- # unique layers referenced from non-overlapping PaintColrLayers throughout
- # the base paint graphs.
- self.expandPaintColrLayers(font["COLR"].table)
- VariationMerger.mergeTables(self, font, master_ttfs, tableTags)
-
- def checkFormatEnum(self, out, lst, validate=lambda _: True):
- fmt = out.Format
- formatEnum = out.formatEnum
- ok = False
- try:
- fmt = formatEnum(fmt)
- except ValueError:
- pass
- else:
- ok = validate(fmt)
- if not ok:
- raise UnsupportedFormat(
- self, subtable=type(out).__name__, value=fmt
- )
- expected = fmt
- got = []
- for v in lst:
- fmt = getattr(v, "Format", None)
- try:
- fmt = formatEnum(fmt)
- except ValueError:
- pass
- got.append(fmt)
- if not allEqualTo(expected, got):
- raise InconsistentFormats(
- self,
- subtable=type(out).__name__,
- expected=expected,
- got=got,
- )
- return expected
-
- def mergeSparseDict(self, out, lst):
- for k in out.keys():
- try:
- self.mergeThings(out[k], [v.get(k) for v in lst])
- except VarLibMergeError as e:
- e.stack.append(f"[{k!r}]")
- raise
-
- def mergeAttrs(self, out, lst, attrs):
- for attr in attrs:
- value = getattr(out, attr)
- values = [getattr(item, attr) for item in lst]
- try:
- self.mergeThings(value, values)
- except VarLibMergeError as e:
- e.stack.append(f".{attr}")
- raise
-
- def storeMastersForAttr(self, out, lst, attr):
- master_values = [getattr(item, attr) for item in lst]
-
- # VarStore treats deltas for fixed-size floats as integers, so we
- # must convert master values to int before storing them in the builder
- # then back to float.
- is_fixed_size_float = False
- conv = out.getConverterByName(attr)
- if isinstance(conv, BaseFixedValue):
- is_fixed_size_float = True
- master_values = [conv.toInt(v) for v in master_values]
-
- baseValue = master_values[0]
- varIdx = ot.NO_VARIATION_INDEX
- if not allEqual(master_values):
- baseValue, varIdx = self.store_builder.storeMasters(master_values)
-
- if is_fixed_size_float:
- baseValue = conv.fromInt(baseValue)
-
- return baseValue, varIdx
-
- def storeVariationIndices(self, varIdxes) -> int:
- # try to reuse an existing VarIndexBase for the same varIdxes, or else
- # create a new one
- key = tuple(varIdxes)
- varIndexBase = self.varIndexCache.get(key)
-
- if varIndexBase is None:
- # scan for a full match anywhere in the self.varIdxes
- for i in range(len(self.varIdxes) - len(varIdxes) + 1):
- if self.varIdxes[i:i+len(varIdxes)] == varIdxes:
- self.varIndexCache[key] = varIndexBase = i
- break
-
- if varIndexBase is None:
- # try find a partial match at the end of the self.varIdxes
- for n in range(len(varIdxes)-1, 0, -1):
- if self.varIdxes[-n:] == varIdxes[:n]:
- varIndexBase = len(self.varIdxes) - n
- self.varIndexCache[key] = varIndexBase
- self.varIdxes.extend(varIdxes[n:])
- break
-
- if varIndexBase is None:
- # no match found, append at the end
- self.varIndexCache[key] = varIndexBase = len(self.varIdxes)
- self.varIdxes.extend(varIdxes)
-
- return varIndexBase
-
- def mergeVariableAttrs(self, out, lst, attrs) -> int:
- varIndexBase = ot.NO_VARIATION_INDEX
- varIdxes = []
- for attr in attrs:
- baseValue, varIdx = self.storeMastersForAttr(out, lst, attr)
- setattr(out, attr, baseValue)
- varIdxes.append(varIdx)
-
- if any(v != ot.NO_VARIATION_INDEX for v in varIdxes):
- varIndexBase = self.storeVariationIndices(varIdxes)
-
- return varIndexBase
-
- @classmethod
- def convertSubTablesToVarType(cls, table):
- for path in dfs_base_table(
- table,
- skip_root=True,
- predicate=lambda path: (
- getattr(type(path[-1].value), "VarType", None) is not None
- )
- ):
- st = path[-1]
- subTable = st.value
- varType = type(subTable).VarType
- newSubTable = varType()
- newSubTable.__dict__.update(subTable.__dict__)
- newSubTable.populateDefaults()
- parent = path[-2].value
- if st.index is not None:
- getattr(parent, st.name)[st.index] = newSubTable
- else:
- setattr(parent, st.name, newSubTable)
-
- @staticmethod
- def expandPaintColrLayers(colr):
- """Rebuild LayerList without PaintColrLayers reuse.
-
- Each base paint graph is fully DFS-traversed (with exception of PaintColrGlyph
- which are irrelevant for this); any layers referenced via PaintColrLayers are
- collected into a new LayerList and duplicated when reuse is detected, to ensure
- that all paints are distinct objects at the end of the process.
- PaintColrLayers's FirstLayerIndex/NumLayers are updated so that no overlap
- is left. Also, any consecutively nested PaintColrLayers are flattened.
- The COLR table's LayerList is replaced with the new unique layers.
- A side effect is also that any layer from the old LayerList which is not
- referenced by any PaintColrLayers is dropped.
- """
- if not colr.LayerList:
- # if no LayerList, there's nothing to expand
- return
- uniqueLayerIDs = set()
- newLayerList = []
- for rec in colr.BaseGlyphList.BaseGlyphPaintRecord:
- frontier = [rec.Paint]
- while frontier:
- paint = frontier.pop()
- if paint.Format == ot.PaintFormat.PaintColrGlyph:
- # don't traverse these, we treat them as constant for merging
- continue
- elif paint.Format == ot.PaintFormat.PaintColrLayers:
- # de-treeify any nested PaintColrLayers, append unique copies to
- # the new layer list and update PaintColrLayers index/count
- children = list(_flatten_layers(paint, colr))
- first_layer_index = len(newLayerList)
- for layer in children:
- if id(layer) in uniqueLayerIDs:
- layer = copy.deepcopy(layer)
- assert id(layer) not in uniqueLayerIDs
- newLayerList.append(layer)
- uniqueLayerIDs.add(id(layer))
- paint.FirstLayerIndex = first_layer_index
- paint.NumLayers = len(children)
- else:
- children = paint.getChildren(colr)
- frontier.extend(reversed(children))
- # sanity check all the new layers are distinct objects
- assert len(newLayerList) == len(uniqueLayerIDs)
- colr.LayerList.Paint = newLayerList
- colr.LayerList.LayerCount = len(newLayerList)
+ """A specialized VariationMerger that takes multiple master fonts containing
+ COLRv1 tables, and builds a variable COLR font.
+
+ COLR tables are special in that variable subtables can be associated with
+ multiple delta-set indices (via VarIndexBase).
+ They also contain tables that must change their type (not simply the Format)
+ as they become variable (e.g. Affine2x3 -> VarAffine2x3) so this merger takes
+ care of that too.
+ """
+
+ def __init__(self, model, axisTags, font, allowLayerReuse=True):
+ VariationMerger.__init__(self, model, axisTags, font)
+ # maps {tuple(varIdxes): VarIndexBase} to facilitate reuse of VarIndexBase
+ # between variable tables with same varIdxes.
+ self.varIndexCache = {}
+ # flat list of all the varIdxes generated while merging
+ self.varIdxes = []
+ # set of id()s of the subtables that contain variations after merging
+ # and need to be upgraded to the associated VarType.
+ self.varTableIds = set()
+ # we keep these around for rebuilding a LayerList while merging PaintColrLayers
+ self.layers = []
+ self.layerReuseCache = None
+ if allowLayerReuse:
+ self.layerReuseCache = LayerReuseCache()
+ # flag to ensure BaseGlyphList is fully merged before LayerList gets processed
+ self._doneBaseGlyphs = False
+
+ def mergeTables(self, font, master_ttfs, tableTags=("COLR",)):
+ if "COLR" in tableTags and "COLR" in font:
+ # The merger modifies the destination COLR table in-place. If this contains
+ # multiple PaintColrLayers referencing the same layers from LayerList, it's
+ # a problem because we may risk modifying the same paint more than once, or
+ # worse, fail while attempting to do that.
+ # We don't know whether the master COLR table was built with layer reuse
+ # disabled, thus to be safe we rebuild its LayerList so that it contains only
+ # unique layers referenced from non-overlapping PaintColrLayers throughout
+ # the base paint graphs.
+ self.expandPaintColrLayers(font["COLR"].table)
+ VariationMerger.mergeTables(self, font, master_ttfs, tableTags)
+
+ def checkFormatEnum(self, out, lst, validate=lambda _: True):
+ fmt = out.Format
+ formatEnum = out.formatEnum
+ ok = False
+ try:
+ fmt = formatEnum(fmt)
+ except ValueError:
+ pass
+ else:
+ ok = validate(fmt)
+ if not ok:
+ raise UnsupportedFormat(self, subtable=type(out).__name__, value=fmt)
+ expected = fmt
+ got = []
+ for v in lst:
+ fmt = getattr(v, "Format", None)
+ try:
+ fmt = formatEnum(fmt)
+ except ValueError:
+ pass
+ got.append(fmt)
+ if not allEqualTo(expected, got):
+ raise InconsistentFormats(
+ self,
+ subtable=type(out).__name__,
+ expected=expected,
+ got=got,
+ )
+ return expected
+
+ def mergeSparseDict(self, out, lst):
+ for k in out.keys():
+ try:
+ self.mergeThings(out[k], [v.get(k) for v in lst])
+ except VarLibMergeError as e:
+ e.stack.append(f"[{k!r}]")
+ raise
+
+ def mergeAttrs(self, out, lst, attrs):
+ for attr in attrs:
+ value = getattr(out, attr)
+ values = [getattr(item, attr) for item in lst]
+ try:
+ self.mergeThings(value, values)
+ except VarLibMergeError as e:
+ e.stack.append(f".{attr}")
+ raise
+
+ def storeMastersForAttr(self, out, lst, attr):
+ master_values = [getattr(item, attr) for item in lst]
+
+ # VarStore treats deltas for fixed-size floats as integers, so we
+ # must convert master values to int before storing them in the builder
+ # then back to float.
+ is_fixed_size_float = False
+ conv = out.getConverterByName(attr)
+ if isinstance(conv, BaseFixedValue):
+ is_fixed_size_float = True
+ master_values = [conv.toInt(v) for v in master_values]
+
+ baseValue = master_values[0]
+ varIdx = ot.NO_VARIATION_INDEX
+ if not allEqual(master_values):
+ baseValue, varIdx = self.store_builder.storeMasters(master_values)
+
+ if is_fixed_size_float:
+ baseValue = conv.fromInt(baseValue)
+
+ return baseValue, varIdx
+
+ def storeVariationIndices(self, varIdxes) -> int:
+ # try to reuse an existing VarIndexBase for the same varIdxes, or else
+ # create a new one
+ key = tuple(varIdxes)
+ varIndexBase = self.varIndexCache.get(key)
+
+ if varIndexBase is None:
+ # scan for a full match anywhere in the self.varIdxes
+ for i in range(len(self.varIdxes) - len(varIdxes) + 1):
+ if self.varIdxes[i : i + len(varIdxes)] == varIdxes:
+ self.varIndexCache[key] = varIndexBase = i
+ break
+
+ if varIndexBase is None:
+ # try find a partial match at the end of the self.varIdxes
+ for n in range(len(varIdxes) - 1, 0, -1):
+ if self.varIdxes[-n:] == varIdxes[:n]:
+ varIndexBase = len(self.varIdxes) - n
+ self.varIndexCache[key] = varIndexBase
+ self.varIdxes.extend(varIdxes[n:])
+ break
+
+ if varIndexBase is None:
+ # no match found, append at the end
+ self.varIndexCache[key] = varIndexBase = len(self.varIdxes)
+ self.varIdxes.extend(varIdxes)
+
+ return varIndexBase
+
+ def mergeVariableAttrs(self, out, lst, attrs) -> int:
+ varIndexBase = ot.NO_VARIATION_INDEX
+ varIdxes = []
+ for attr in attrs:
+ baseValue, varIdx = self.storeMastersForAttr(out, lst, attr)
+ setattr(out, attr, baseValue)
+ varIdxes.append(varIdx)
+
+ if any(v != ot.NO_VARIATION_INDEX for v in varIdxes):
+ varIndexBase = self.storeVariationIndices(varIdxes)
+
+ return varIndexBase
+
+ @classmethod
+ def convertSubTablesToVarType(cls, table):
+ for path in dfs_base_table(
+ table,
+ skip_root=True,
+ predicate=lambda path: (
+ getattr(type(path[-1].value), "VarType", None) is not None
+ ),
+ ):
+ st = path[-1]
+ subTable = st.value
+ varType = type(subTable).VarType
+ newSubTable = varType()
+ newSubTable.__dict__.update(subTable.__dict__)
+ newSubTable.populateDefaults()
+ parent = path[-2].value
+ if st.index is not None:
+ getattr(parent, st.name)[st.index] = newSubTable
+ else:
+ setattr(parent, st.name, newSubTable)
+
+ @staticmethod
+ def expandPaintColrLayers(colr):
+ """Rebuild LayerList without PaintColrLayers reuse.
+
+ Each base paint graph is fully DFS-traversed (with exception of PaintColrGlyph
+ which are irrelevant for this); any layers referenced via PaintColrLayers are
+ collected into a new LayerList and duplicated when reuse is detected, to ensure
+ that all paints are distinct objects at the end of the process.
+ PaintColrLayers's FirstLayerIndex/NumLayers are updated so that no overlap
+ is left. Also, any consecutively nested PaintColrLayers are flattened.
+ The COLR table's LayerList is replaced with the new unique layers.
+ A side effect is also that any layer from the old LayerList which is not
+ referenced by any PaintColrLayers is dropped.
+ """
+ if not colr.LayerList:
+ # if no LayerList, there's nothing to expand
+ return
+ uniqueLayerIDs = set()
+ newLayerList = []
+ for rec in colr.BaseGlyphList.BaseGlyphPaintRecord:
+ frontier = [rec.Paint]
+ while frontier:
+ paint = frontier.pop()
+ if paint.Format == ot.PaintFormat.PaintColrGlyph:
+ # don't traverse these, we treat them as constant for merging
+ continue
+ elif paint.Format == ot.PaintFormat.PaintColrLayers:
+ # de-treeify any nested PaintColrLayers, append unique copies to
+ # the new layer list and update PaintColrLayers index/count
+ children = list(_flatten_layers(paint, colr))
+ first_layer_index = len(newLayerList)
+ for layer in children:
+ if id(layer) in uniqueLayerIDs:
+ layer = copy.deepcopy(layer)
+ assert id(layer) not in uniqueLayerIDs
+ newLayerList.append(layer)
+ uniqueLayerIDs.add(id(layer))
+ paint.FirstLayerIndex = first_layer_index
+ paint.NumLayers = len(children)
+ else:
+ children = paint.getChildren(colr)
+ frontier.extend(reversed(children))
+ # sanity check all the new layers are distinct objects
+ assert len(newLayerList) == len(uniqueLayerIDs)
+ colr.LayerList.Paint = newLayerList
+ colr.LayerList.LayerCount = len(newLayerList)
@COLRVariationMerger.merger(ot.BaseGlyphList)
def merge(merger, self, lst):
- # ignore BaseGlyphCount, allow sparse glyph sets across masters
- out = {rec.BaseGlyph: rec for rec in self.BaseGlyphPaintRecord}
- masters = [{rec.BaseGlyph: rec for rec in m.BaseGlyphPaintRecord} for m in lst]
+ # ignore BaseGlyphCount, allow sparse glyph sets across masters
+ out = {rec.BaseGlyph: rec for rec in self.BaseGlyphPaintRecord}
+ masters = [{rec.BaseGlyph: rec for rec in m.BaseGlyphPaintRecord} for m in lst]
- for i, g in enumerate(out.keys()):
- try:
- # missing base glyphs don't participate in the merge
- merger.mergeThings(out[g], [v.get(g) for v in masters])
- except VarLibMergeError as e:
- e.stack.append(f".BaseGlyphPaintRecord[{i}]")
- e.cause["location"] = f"base glyph {g!r}"
- raise
+ for i, g in enumerate(out.keys()):
+ try:
+ # missing base glyphs don't participate in the merge
+ merger.mergeThings(out[g], [v.get(g) for v in masters])
+ except VarLibMergeError as e:
+ e.stack.append(f".BaseGlyphPaintRecord[{i}]")
+ e.cause["location"] = f"base glyph {g!r}"
+ raise
- merger._doneBaseGlyphs = True
+ merger._doneBaseGlyphs = True
@COLRVariationMerger.merger(ot.LayerList)
def merge(merger, self, lst):
- # nothing to merge for LayerList, assuming we have already merged all PaintColrLayers
- # found while traversing the paint graphs rooted at BaseGlyphPaintRecords.
- assert merger._doneBaseGlyphs, "BaseGlyphList must be merged before LayerList"
- # Simply flush the final list of layers and go home.
- self.LayerCount = len(merger.layers)
- self.Paint = merger.layers
+ # nothing to merge for LayerList, assuming we have already merged all PaintColrLayers
+ # found while traversing the paint graphs rooted at BaseGlyphPaintRecords.
+ assert merger._doneBaseGlyphs, "BaseGlyphList must be merged before LayerList"
+ # Simply flush the final list of layers and go home.
+ self.LayerCount = len(merger.layers)
+ self.Paint = merger.layers
def _flatten_layers(root, colr):
- assert root.Format == ot.PaintFormat.PaintColrLayers
- for paint in root.getChildren(colr):
- if paint.Format == ot.PaintFormat.PaintColrLayers:
- yield from _flatten_layers(paint, colr)
- else:
- yield paint
+ assert root.Format == ot.PaintFormat.PaintColrLayers
+ for paint in root.getChildren(colr):
+ if paint.Format == ot.PaintFormat.PaintColrLayers:
+ yield from _flatten_layers(paint, colr)
+ else:
+ yield paint
def _merge_PaintColrLayers(self, out, lst):
- # we only enforce that the (flat) number of layers is the same across all masters
- # but we allow FirstLayerIndex to differ to acommodate for sparse glyph sets.
-
- out_layers = list(_flatten_layers(out, self.font["COLR"].table))
-
- # sanity check ttfs are subset to current values (see VariationMerger.mergeThings)
- # before matching each master PaintColrLayers to its respective COLR by position
- assert len(self.ttfs) == len(lst)
- master_layerses = [
- list(_flatten_layers(lst[i], self.ttfs[i]["COLR"].table))
- for i in range(len(lst))
- ]
-
- try:
- self.mergeLists(out_layers, master_layerses)
- except VarLibMergeError as e:
- # NOTE: This attribute doesn't actually exist in PaintColrLayers but it's
- # handy to have it in the stack trace for debugging.
- e.stack.append(".Layers")
- raise
-
- # following block is very similar to LayerListBuilder._beforeBuildPaintColrLayers
- # but I couldn't find a nice way to share the code between the two...
-
- if self.layerReuseCache is not None:
- # successful reuse can make the list smaller
- out_layers = self.layerReuseCache.try_reuse(out_layers)
-
- # if the list is still too big we need to tree-fy it
- is_tree = len(out_layers) > MAX_PAINT_COLR_LAYER_COUNT
- out_layers = build_n_ary_tree(out_layers, n=MAX_PAINT_COLR_LAYER_COUNT)
-
- # We now have a tree of sequences with Paint leaves.
- # Convert the sequences into PaintColrLayers.
- def listToColrLayers(paint):
- if isinstance(paint, list):
- layers = [listToColrLayers(l) for l in paint]
- paint = ot.Paint()
- paint.Format = int(ot.PaintFormat.PaintColrLayers)
- paint.NumLayers = len(layers)
- paint.FirstLayerIndex = len(self.layers)
- self.layers.extend(layers)
- if self.layerReuseCache is not None:
- self.layerReuseCache.add(layers, paint.FirstLayerIndex)
- return paint
-
- out_layers = [listToColrLayers(l) for l in out_layers]
-
- if len(out_layers) == 1 and out_layers[0].Format == ot.PaintFormat.PaintColrLayers:
- # special case when the reuse cache finds a single perfect PaintColrLayers match
- # (it can only come from a successful reuse, _flatten_layers has gotten rid of
- # all nested PaintColrLayers already); we assign it directly and avoid creating
- # an extra table
- out.NumLayers = out_layers[0].NumLayers
- out.FirstLayerIndex = out_layers[0].FirstLayerIndex
- else:
- out.NumLayers = len(out_layers)
- out.FirstLayerIndex = len(self.layers)
-
- self.layers.extend(out_layers)
-
- # Register our parts for reuse provided we aren't a tree
- # If we are a tree the leaves registered for reuse and that will suffice
- if self.layerReuseCache is not None and not is_tree:
- self.layerReuseCache.add(out_layers, out.FirstLayerIndex)
+ # we only enforce that the (flat) number of layers is the same across all masters
+ # but we allow FirstLayerIndex to differ to acommodate for sparse glyph sets.
+
+ out_layers = list(_flatten_layers(out, self.font["COLR"].table))
+
+ # sanity check ttfs are subset to current values (see VariationMerger.mergeThings)
+ # before matching each master PaintColrLayers to its respective COLR by position
+ assert len(self.ttfs) == len(lst)
+ master_layerses = [
+ list(_flatten_layers(lst[i], self.ttfs[i]["COLR"].table))
+ for i in range(len(lst))
+ ]
+
+ try:
+ self.mergeLists(out_layers, master_layerses)
+ except VarLibMergeError as e:
+ # NOTE: This attribute doesn't actually exist in PaintColrLayers but it's
+ # handy to have it in the stack trace for debugging.
+ e.stack.append(".Layers")
+ raise
+
+ # following block is very similar to LayerListBuilder._beforeBuildPaintColrLayers
+ # but I couldn't find a nice way to share the code between the two...
+
+ if self.layerReuseCache is not None:
+ # successful reuse can make the list smaller
+ out_layers = self.layerReuseCache.try_reuse(out_layers)
+
+ # if the list is still too big we need to tree-fy it
+ is_tree = len(out_layers) > MAX_PAINT_COLR_LAYER_COUNT
+ out_layers = build_n_ary_tree(out_layers, n=MAX_PAINT_COLR_LAYER_COUNT)
+
+ # We now have a tree of sequences with Paint leaves.
+ # Convert the sequences into PaintColrLayers.
+ def listToColrLayers(paint):
+ if isinstance(paint, list):
+ layers = [listToColrLayers(l) for l in paint]
+ paint = ot.Paint()
+ paint.Format = int(ot.PaintFormat.PaintColrLayers)
+ paint.NumLayers = len(layers)
+ paint.FirstLayerIndex = len(self.layers)
+ self.layers.extend(layers)
+ if self.layerReuseCache is not None:
+ self.layerReuseCache.add(layers, paint.FirstLayerIndex)
+ return paint
+
+ out_layers = [listToColrLayers(l) for l in out_layers]
+
+ if len(out_layers) == 1 and out_layers[0].Format == ot.PaintFormat.PaintColrLayers:
+ # special case when the reuse cache finds a single perfect PaintColrLayers match
+ # (it can only come from a successful reuse, _flatten_layers has gotten rid of
+ # all nested PaintColrLayers already); we assign it directly and avoid creating
+ # an extra table
+ out.NumLayers = out_layers[0].NumLayers
+ out.FirstLayerIndex = out_layers[0].FirstLayerIndex
+ else:
+ out.NumLayers = len(out_layers)
+ out.FirstLayerIndex = len(self.layers)
+
+ self.layers.extend(out_layers)
+
+ # Register our parts for reuse provided we aren't a tree
+ # If we are a tree the leaves registered for reuse and that will suffice
+ if self.layerReuseCache is not None and not is_tree:
+ self.layerReuseCache.add(out_layers, out.FirstLayerIndex)
@COLRVariationMerger.merger((ot.Paint, ot.ClipBox))
def merge(merger, self, lst):
- fmt = merger.checkFormatEnum(self, lst, lambda fmt: not fmt.is_variable())
+ fmt = merger.checkFormatEnum(self, lst, lambda fmt: not fmt.is_variable())
- if fmt is ot.PaintFormat.PaintColrLayers:
- _merge_PaintColrLayers(merger, self, lst)
- return
+ if fmt is ot.PaintFormat.PaintColrLayers:
+ _merge_PaintColrLayers(merger, self, lst)
+ return
- varFormat = fmt.as_variable()
+ varFormat = fmt.as_variable()
- varAttrs = ()
- if varFormat is not None:
- varAttrs = otBase.getVariableAttrs(type(self), varFormat)
- staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs)
+ varAttrs = ()
+ if varFormat is not None:
+ varAttrs = otBase.getVariableAttrs(type(self), varFormat)
+ staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs)
- merger.mergeAttrs(self, lst, staticAttrs)
+ merger.mergeAttrs(self, lst, staticAttrs)
- varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs)
+ varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs)
- subTables = [st.value for st in self.iterSubTables()]
+ subTables = [st.value for st in self.iterSubTables()]
- # Convert table to variable if itself has variations or any subtables have
- isVariable = (
- varIndexBase != ot.NO_VARIATION_INDEX
- or any(id(table) in merger.varTableIds for table in subTables)
- )
+ # Convert table to variable if itself has variations or any subtables have
+ isVariable = varIndexBase != ot.NO_VARIATION_INDEX or any(
+ id(table) in merger.varTableIds for table in subTables
+ )
- if isVariable:
- if varAttrs:
- # Some PaintVar* don't have any scalar attributes that can vary,
- # only indirect offsets to other variable subtables, thus have
- # no VarIndexBase of their own (e.g. PaintVarTransform)
- self.VarIndexBase = varIndexBase
+ if isVariable:
+ if varAttrs:
+ # Some PaintVar* don't have any scalar attributes that can vary,
+ # only indirect offsets to other variable subtables, thus have
+ # no VarIndexBase of their own (e.g. PaintVarTransform)
+ self.VarIndexBase = varIndexBase
- if subTables:
- # Convert Affine2x3 -> VarAffine2x3, ColorLine -> VarColorLine, etc.
- merger.convertSubTablesToVarType(self)
+ if subTables:
+ # Convert Affine2x3 -> VarAffine2x3, ColorLine -> VarColorLine, etc.
+ merger.convertSubTablesToVarType(self)
- assert varFormat is not None
- self.Format = int(varFormat)
+ assert varFormat is not None
+ self.Format = int(varFormat)
@COLRVariationMerger.merger((ot.Affine2x3, ot.ColorStop))
def merge(merger, self, lst):
- varType = type(self).VarType
+ varType = type(self).VarType
- varAttrs = otBase.getVariableAttrs(varType)
- staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs)
+ varAttrs = otBase.getVariableAttrs(varType)
+ staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs)
- merger.mergeAttrs(self, lst, staticAttrs)
+ merger.mergeAttrs(self, lst, staticAttrs)
- varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs)
+ varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs)
- if varIndexBase != ot.NO_VARIATION_INDEX:
- self.VarIndexBase = varIndexBase
- # mark as having variations so the parent table will convert to Var{Type}
- merger.varTableIds.add(id(self))
+ if varIndexBase != ot.NO_VARIATION_INDEX:
+ self.VarIndexBase = varIndexBase
+ # mark as having variations so the parent table will convert to Var{Type}
+ merger.varTableIds.add(id(self))
@COLRVariationMerger.merger(ot.ColorLine)
def merge(merger, self, lst):
- merger.mergeAttrs(self, lst, (c.name for c in self.getConverters()))
+ merger.mergeAttrs(self, lst, (c.name for c in self.getConverters()))
- if any(id(stop) in merger.varTableIds for stop in self.ColorStop):
- merger.convertSubTablesToVarType(self)
- merger.varTableIds.add(id(self))
+ if any(id(stop) in merger.varTableIds for stop in self.ColorStop):
+ merger.convertSubTablesToVarType(self)
+ merger.varTableIds.add(id(self))
@COLRVariationMerger.merger(ot.ClipList, "clips")
def merge(merger, self, lst):
- # 'sparse' in that we allow non-default masters to omit ClipBox entries
- # for some/all glyphs (i.e. they don't participate)
- merger.mergeSparseDict(self, lst)
+ # 'sparse' in that we allow non-default masters to omit ClipBox entries
+ # for some/all glyphs (i.e. they don't participate)
+ merger.mergeSparseDict(self, lst)
diff --git a/Lib/fontTools/varLib/models.py b/Lib/fontTools/varLib/models.py
index a7e020b0..5bd66dba 100644
--- a/Lib/fontTools/varLib/models.py
+++ b/Lib/fontTools/varLib/models.py
@@ -43,15 +43,15 @@ def subList(truth, lst):
return [l for l, t in zip(lst, truth) if t]
-def normalizeValue(v, triple):
+def normalizeValue(v, triple, extrapolate=False):
"""Normalizes value based on a min/default/max triple.
- >>> normalizeValue(400, (100, 400, 900))
- 0.0
- >>> normalizeValue(100, (100, 400, 900))
- -1.0
- >>> normalizeValue(650, (100, 400, 900))
- 0.5
+ >>> normalizeValue(400, (100, 400, 900))
+ 0.0
+ >>> normalizeValue(100, (100, 400, 900))
+ -1.0
+ >>> normalizeValue(650, (100, 400, 900))
+ 0.5
"""
lower, default, upper = triple
if not (lower <= default <= upper):
@@ -59,68 +59,76 @@ def normalizeValue(v, triple):
f"Invalid axis values, must be minimum, default, maximum: "
f"{lower:3.3f}, {default:3.3f}, {upper:3.3f}"
)
- v = max(min(v, upper), lower)
- if v == default:
- v = 0.0
- elif v < default:
- v = (v - default) / (default - lower)
+ if not extrapolate:
+ v = max(min(v, upper), lower)
+
+ if v == default or lower == upper:
+ return 0.0
+
+ if (v < default and lower != default) or (v > default and upper == default):
+ return (v - default) / (default - lower)
else:
- v = (v - default) / (upper - default)
- return v
+ assert (v > default and upper != default) or (
+ v < default and lower == default
+ ), f"Ooops... v={v}, triple=({lower}, {default}, {upper})"
+ return (v - default) / (upper - default)
-def normalizeLocation(location, axes):
+def normalizeLocation(location, axes, extrapolate=False):
"""Normalizes location based on axis min/default/max values from axes.
- >>> axes = {"wght": (100, 400, 900)}
- >>> normalizeLocation({"wght": 400}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": 100}, axes)
- {'wght': -1.0}
- >>> normalizeLocation({"wght": 900}, axes)
- {'wght': 1.0}
- >>> normalizeLocation({"wght": 650}, axes)
- {'wght': 0.5}
- >>> normalizeLocation({"wght": 1000}, axes)
- {'wght': 1.0}
- >>> normalizeLocation({"wght": 0}, axes)
- {'wght': -1.0}
- >>> axes = {"wght": (0, 0, 1000)}
- >>> normalizeLocation({"wght": 0}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": -1}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": 1000}, axes)
- {'wght': 1.0}
- >>> normalizeLocation({"wght": 500}, axes)
- {'wght': 0.5}
- >>> normalizeLocation({"wght": 1001}, axes)
- {'wght': 1.0}
- >>> axes = {"wght": (0, 1000, 1000)}
- >>> normalizeLocation({"wght": 0}, axes)
- {'wght': -1.0}
- >>> normalizeLocation({"wght": -1}, axes)
- {'wght': -1.0}
- >>> normalizeLocation({"wght": 500}, axes)
- {'wght': -0.5}
- >>> normalizeLocation({"wght": 1000}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": 1001}, axes)
- {'wght': 0.0}
+ >>> axes = {"wght": (100, 400, 900)}
+ >>> normalizeLocation({"wght": 400}, axes)
+ {'wght': 0.0}
+ >>> normalizeLocation({"wght": 100}, axes)
+ {'wght': -1.0}
+ >>> normalizeLocation({"wght": 900}, axes)
+ {'wght': 1.0}
+ >>> normalizeLocation({"wght": 650}, axes)
+ {'wght': 0.5}
+ >>> normalizeLocation({"wght": 1000}, axes)
+ {'wght': 1.0}
+ >>> normalizeLocation({"wght": 0}, axes)
+ {'wght': -1.0}
+ >>> axes = {"wght": (0, 0, 1000)}
+ >>> normalizeLocation({"wght": 0}, axes)
+ {'wght': 0.0}
+ >>> normalizeLocation({"wght": -1}, axes)
+ {'wght': 0.0}
+ >>> normalizeLocation({"wght": 1000}, axes)
+ {'wght': 1.0}
+ >>> normalizeLocation({"wght": 500}, axes)
+ {'wght': 0.5}
+ >>> normalizeLocation({"wght": 1001}, axes)
+ {'wght': 1.0}
+ >>> axes = {"wght": (0, 1000, 1000)}
+ >>> normalizeLocation({"wght": 0}, axes)
+ {'wght': -1.0}
+ >>> normalizeLocation({"wght": -1}, axes)
+ {'wght': -1.0}
+ >>> normalizeLocation({"wght": 500}, axes)
+ {'wght': -0.5}
+ >>> normalizeLocation({"wght": 1000}, axes)
+ {'wght': 0.0}
+ >>> normalizeLocation({"wght": 1001}, axes)
+ {'wght': 0.0}
"""
out = {}
for tag, triple in axes.items():
v = location.get(tag, triple[1])
- out[tag] = normalizeValue(v, triple)
+ out[tag] = normalizeValue(v, triple, extrapolate=extrapolate)
return out
-def supportScalar(location, support, ot=True, extrapolate=False):
+def supportScalar(location, support, ot=True, extrapolate=False, axisRanges=None):
"""Returns the scalar multiplier at location, for a master
with support. If ot is True, then a peak value of zero
for support of an axis means "axis does not participate". That
is how OpenType Variation Font technology works.
+ If extrapolate is True, axisRanges must be a dict that maps axis
+ names to (axisMin, axisMax) tuples.
+
>>> supportScalar({}, {})
1.0
>>> supportScalar({'wght':.2}, {})
@@ -137,11 +145,17 @@ def supportScalar(location, support, ot=True, extrapolate=False):
0.75
>>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
0.75
- >>> supportScalar({'wght':4}, {'wght':(0,2,3)}, extrapolate=True)
- 2.0
- >>> supportScalar({'wght':4}, {'wght':(0,2,2)}, extrapolate=True)
- 2.0
+ >>> supportScalar({'wght':3}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
+ -1.0
+ >>> supportScalar({'wght':-1}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
+ -1.0
+ >>> supportScalar({'wght':3}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
+ 1.5
+ >>> supportScalar({'wght':-1}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
+ -0.5
"""
+ if extrapolate and axisRanges is None:
+ raise TypeError("axisRanges must be passed when extrapolate is True")
scalar = 1.0
for axis, (lower, peak, upper) in support.items():
if ot:
@@ -160,18 +174,19 @@ def supportScalar(location, support, ot=True, extrapolate=False):
continue
if extrapolate:
- if v < -1 and lower <= -1:
- if peak <= -1 and peak < upper:
+ axisMin, axisMax = axisRanges[axis]
+ if v < axisMin and lower <= axisMin:
+ if peak <= axisMin and peak < upper:
scalar *= (v - upper) / (peak - upper)
continue
- elif -1 < peak:
+ elif axisMin < peak:
scalar *= (v - lower) / (peak - lower)
continue
- elif +1 < v and +1 <= upper:
- if +1 <= peak and lower < peak:
+ elif axisMax < v and axisMax <= upper:
+ if axisMax <= peak and lower < peak:
scalar *= (v - lower) / (peak - lower)
continue
- elif peak < +1:
+ elif peak < axisMax:
scalar *= (v - upper) / (peak - upper)
continue
@@ -189,9 +204,8 @@ def supportScalar(location, support, ot=True, extrapolate=False):
class VariationModel(object):
"""Locations must have the base master at the origin (ie. 0).
- If the extrapolate argument is set to True, then location values are
- interpretted in the normalized space, ie. in the [-1,+1] range, and
- values are extrapolated outside this range.
+ If the extrapolate argument is set to True, then values are extrapolated
+ outside the axis range.
>>> from pprint import pprint
>>> locations = [ \
@@ -234,13 +248,13 @@ class VariationModel(object):
"""
def __init__(self, locations, axisOrder=None, extrapolate=False):
-
if len(set(tuple(sorted(l.items())) for l in locations)) != len(locations):
raise VariationModelError("Locations must be unique.")
self.origLocations = locations
self.axisOrder = axisOrder if axisOrder is not None else []
self.extrapolate = extrapolate
+ self.axisRanges = self.computeAxisRanges(locations) if extrapolate else None
locations = [{k: v for k, v in loc.items() if v != 0.0} for loc in locations]
keyFunc = self.getMasterLocationsSortKeyFunc(
@@ -266,6 +280,17 @@ class VariationModel(object):
return subModel, subList(key, items)
@staticmethod
+ def computeAxisRanges(locations):
+ axisRanges = {}
+ allAxes = {axis for loc in locations for axis in loc.keys()}
+ for loc in locations:
+ for axis in allAxes:
+ value = loc.get(axis, 0)
+ axisMin, axisMax = axisRanges.get(axis, (value, value))
+ axisRanges[axis] = min(value, axisMin), max(value, axisMax)
+ return axisRanges
+
+ @staticmethod
def getMasterLocationsSortKeyFunc(locations, axisOrder=[]):
if {} not in locations:
raise VariationModelError("Base master not found.")
@@ -339,12 +364,12 @@ class VariationModel(object):
# Walk over previous masters now
for prev_region in regions[:i]:
# Master with extra axes do not participte
- if not set(prev_region.keys()).issubset(locAxes):
+ if set(prev_region.keys()) != locAxes:
continue
# If it's NOT in the current box, it does not participate
relevant = True
for axis, (lower, peak, upper) in region.items():
- if axis not in prev_region or not (
+ if not (
prev_region[axis][1] == peak
or lower < prev_region[axis][1] < upper
):
@@ -439,8 +464,12 @@ class VariationModel(object):
return model.getDeltas(items, round=round), model.supports
def getScalars(self, loc):
- return [supportScalar(loc, support, extrapolate=self.extrapolate)
- for support in self.supports]
+ return [
+ supportScalar(
+ loc, support, extrapolate=self.extrapolate, axisRanges=self.axisRanges
+ )
+ for support in self.supports
+ ]
@staticmethod
def interpolateFromDeltasAndScalars(deltas, scalars):
diff --git a/Lib/fontTools/varLib/mutator.py b/Lib/fontTools/varLib/mutator.py
index 2e674798..d1d123ab 100644
--- a/Lib/fontTools/varLib/mutator.py
+++ b/Lib/fontTools/varLib/mutator.py
@@ -8,11 +8,15 @@ from fontTools.misc.roundTools import otRound
from fontTools.pens.boundsPen import BoundsPen
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables import ttProgram
-from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates, flagOverlapSimple, OVERLAP_COMPOUND
+from fontTools.ttLib.tables._g_l_y_f import (
+ GlyphCoordinates,
+ flagOverlapSimple,
+ OVERLAP_COMPOUND,
+)
from fontTools.varLib.models import (
- supportScalar,
- normalizeLocation,
- piecewiseLinearMap,
+ supportScalar,
+ normalizeLocation,
+ piecewiseLinearMap,
)
from fontTools.varLib.merger import MutatorMerger
from fontTools.varLib.varStore import VarStoreInstancer
@@ -30,435 +34,476 @@ log = logging.getLogger("fontTools.varlib.mutator")
OS2_WIDTH_CLASS_VALUES = {}
percents = [50.0, 62.5, 75.0, 87.5, 100.0, 112.5, 125.0, 150.0, 200.0]
for i, (prev, curr) in enumerate(zip(percents[:-1], percents[1:]), start=1):
- half = (prev + curr) / 2
- OS2_WIDTH_CLASS_VALUES[half] = i
+ half = (prev + curr) / 2
+ OS2_WIDTH_CLASS_VALUES[half] = i
def interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas):
- pd_blend_lists = ("BlueValues", "OtherBlues", "FamilyBlues",
- "FamilyOtherBlues", "StemSnapH",
- "StemSnapV")
- pd_blend_values = ("BlueScale", "BlueShift",
- "BlueFuzz", "StdHW", "StdVW")
- for fontDict in topDict.FDArray:
- pd = fontDict.Private
- vsindex = pd.vsindex if (hasattr(pd, 'vsindex')) else 0
- for key, value in pd.rawDict.items():
- if (key in pd_blend_values) and isinstance(value, list):
- delta = interpolateFromDeltas(vsindex, value[1:])
- pd.rawDict[key] = otRound(value[0] + delta)
- elif (key in pd_blend_lists) and isinstance(value[0], list):
- """If any argument in a BlueValues list is a blend list,
- then they all are. The first value of each list is an
- absolute value. The delta tuples are calculated from
- relative master values, hence we need to append all the
- deltas to date to each successive absolute value."""
- delta = 0
- for i, val_list in enumerate(value):
- delta += otRound(interpolateFromDeltas(vsindex,
- val_list[1:]))
- value[i] = val_list[0] + delta
+ pd_blend_lists = (
+ "BlueValues",
+ "OtherBlues",
+ "FamilyBlues",
+ "FamilyOtherBlues",
+ "StemSnapH",
+ "StemSnapV",
+ )
+ pd_blend_values = ("BlueScale", "BlueShift", "BlueFuzz", "StdHW", "StdVW")
+ for fontDict in topDict.FDArray:
+ pd = fontDict.Private
+ vsindex = pd.vsindex if (hasattr(pd, "vsindex")) else 0
+ for key, value in pd.rawDict.items():
+ if (key in pd_blend_values) and isinstance(value, list):
+ delta = interpolateFromDeltas(vsindex, value[1:])
+ pd.rawDict[key] = otRound(value[0] + delta)
+ elif (key in pd_blend_lists) and isinstance(value[0], list):
+ """If any argument in a BlueValues list is a blend list,
+ then they all are. The first value of each list is an
+ absolute value. The delta tuples are calculated from
+ relative master values, hence we need to append all the
+ deltas to date to each successive absolute value."""
+ delta = 0
+ for i, val_list in enumerate(value):
+ delta += otRound(interpolateFromDeltas(vsindex, val_list[1:]))
+ value[i] = val_list[0] + delta
def interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder):
- charstrings = topDict.CharStrings
- for gname in glyphOrder:
- # Interpolate charstring
- # e.g replace blend op args with regular args,
- # and use and discard vsindex op.
- charstring = charstrings[gname]
- new_program = []
- vsindex = 0
- last_i = 0
- for i, token in enumerate(charstring.program):
- if token == 'vsindex':
- vsindex = charstring.program[i - 1]
- if last_i != 0:
- new_program.extend(charstring.program[last_i:i - 1])
- last_i = i + 1
- elif token == 'blend':
- num_regions = charstring.getNumRegions(vsindex)
- numMasters = 1 + num_regions
- num_args = charstring.program[i - 1]
- # The program list starting at program[i] is now:
- # ..args for following operations
- # num_args values from the default font
- # num_args tuples, each with numMasters-1 delta values
- # num_blend_args
- # 'blend'
- argi = i - (num_args * numMasters + 1)
- end_args = tuplei = argi + num_args
- while argi < end_args:
- next_ti = tuplei + num_regions
- deltas = charstring.program[tuplei:next_ti]
- delta = interpolateFromDeltas(vsindex, deltas)
- charstring.program[argi] += otRound(delta)
- tuplei = next_ti
- argi += 1
- new_program.extend(charstring.program[last_i:end_args])
- last_i = i + 1
- if last_i != 0:
- new_program.extend(charstring.program[last_i:])
- charstring.program = new_program
+ charstrings = topDict.CharStrings
+ for gname in glyphOrder:
+ # Interpolate charstring
+ # e.g replace blend op args with regular args,
+ # and use and discard vsindex op.
+ charstring = charstrings[gname]
+ new_program = []
+ vsindex = 0
+ last_i = 0
+ for i, token in enumerate(charstring.program):
+ if token == "vsindex":
+ vsindex = charstring.program[i - 1]
+ if last_i != 0:
+ new_program.extend(charstring.program[last_i : i - 1])
+ last_i = i + 1
+ elif token == "blend":
+ num_regions = charstring.getNumRegions(vsindex)
+ numMasters = 1 + num_regions
+ num_args = charstring.program[i - 1]
+ # The program list starting at program[i] is now:
+ # ..args for following operations
+ # num_args values from the default font
+ # num_args tuples, each with numMasters-1 delta values
+ # num_blend_args
+ # 'blend'
+ argi = i - (num_args * numMasters + 1)
+ end_args = tuplei = argi + num_args
+ while argi < end_args:
+ next_ti = tuplei + num_regions
+ deltas = charstring.program[tuplei:next_ti]
+ delta = interpolateFromDeltas(vsindex, deltas)
+ charstring.program[argi] += otRound(delta)
+ tuplei = next_ti
+ argi += 1
+ new_program.extend(charstring.program[last_i:end_args])
+ last_i = i + 1
+ if last_i != 0:
+ new_program.extend(charstring.program[last_i:])
+ charstring.program = new_program
def interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc):
- """Unlike TrueType glyphs, neither advance width nor bounding box
- info is stored in a CFF2 charstring. The width data exists only in
- the hmtx and HVAR tables. Since LSB data cannot be interpolated
- reliably from the master LSB values in the hmtx table, we traverse
- the charstring to determine the actual bound box. """
-
- charstrings = topDict.CharStrings
- boundsPen = BoundsPen(glyphOrder)
- hmtx = varfont['hmtx']
- hvar_table = None
- if 'HVAR' in varfont:
- hvar_table = varfont['HVAR'].table
- fvar = varfont['fvar']
- varStoreInstancer = VarStoreInstancer(hvar_table.VarStore, fvar.axes, loc)
-
- for gid, gname in enumerate(glyphOrder):
- entry = list(hmtx[gname])
- # get width delta.
- if hvar_table:
- if hvar_table.AdvWidthMap:
- width_idx = hvar_table.AdvWidthMap.mapping[gname]
- else:
- width_idx = gid
- width_delta = otRound(varStoreInstancer[width_idx])
- else:
- width_delta = 0
-
- # get LSB.
- boundsPen.init()
- charstring = charstrings[gname]
- charstring.draw(boundsPen)
- if boundsPen.bounds is None:
- # Happens with non-marking glyphs
- lsb_delta = 0
- else:
- lsb = otRound(boundsPen.bounds[0])
- lsb_delta = entry[1] - lsb
-
- if lsb_delta or width_delta:
- if width_delta:
- entry[0] += width_delta
- if lsb_delta:
- entry[1] = lsb
- hmtx[gname] = tuple(entry)
+ """Unlike TrueType glyphs, neither advance width nor bounding box
+ info is stored in a CFF2 charstring. The width data exists only in
+ the hmtx and HVAR tables. Since LSB data cannot be interpolated
+ reliably from the master LSB values in the hmtx table, we traverse
+ the charstring to determine the actual bound box."""
+
+ charstrings = topDict.CharStrings
+ boundsPen = BoundsPen(glyphOrder)
+ hmtx = varfont["hmtx"]
+ hvar_table = None
+ if "HVAR" in varfont:
+ hvar_table = varfont["HVAR"].table
+ fvar = varfont["fvar"]
+ varStoreInstancer = VarStoreInstancer(hvar_table.VarStore, fvar.axes, loc)
+
+ for gid, gname in enumerate(glyphOrder):
+ entry = list(hmtx[gname])
+ # get width delta.
+ if hvar_table:
+ if hvar_table.AdvWidthMap:
+ width_idx = hvar_table.AdvWidthMap.mapping[gname]
+ else:
+ width_idx = gid
+ width_delta = otRound(varStoreInstancer[width_idx])
+ else:
+ width_delta = 0
+
+ # get LSB.
+ boundsPen.init()
+ charstring = charstrings[gname]
+ charstring.draw(boundsPen)
+ if boundsPen.bounds is None:
+ # Happens with non-marking glyphs
+ lsb_delta = 0
+ else:
+ lsb = otRound(boundsPen.bounds[0])
+ lsb_delta = entry[1] - lsb
+
+ if lsb_delta or width_delta:
+ if width_delta:
+ entry[0] = max(0, entry[0] + width_delta)
+ if lsb_delta:
+ entry[1] = lsb
+ hmtx[gname] = tuple(entry)
def instantiateVariableFont(varfont, location, inplace=False, overlap=True):
- """ Generate a static instance from a variable TTFont and a dictionary
- defining the desired location along the variable font's axes.
- The location values must be specified as user-space coordinates, e.g.:
-
- {'wght': 400, 'wdth': 100}
-
- By default, a new TTFont object is returned. If ``inplace`` is True, the
- input varfont is modified and reduced to a static font.
-
- When the overlap parameter is defined as True,
- OVERLAP_SIMPLE and OVERLAP_COMPOUND bits are set to 1. See
- https://docs.microsoft.com/en-us/typography/opentype/spec/glyf
- """
- if not inplace:
- # make a copy to leave input varfont unmodified
- stream = BytesIO()
- varfont.save(stream)
- stream.seek(0)
- varfont = TTFont(stream)
-
- fvar = varfont['fvar']
- axes = {a.axisTag:(a.minValue,a.defaultValue,a.maxValue) for a in fvar.axes}
- loc = normalizeLocation(location, axes)
- if 'avar' in varfont:
- maps = varfont['avar'].segments
- loc = {k: piecewiseLinearMap(v, maps[k]) for k,v in loc.items()}
- # Quantize to F2Dot14, to avoid surprise interpolations.
- loc = {k:floatToFixedToFloat(v, 14) for k,v in loc.items()}
- # Location is normalized now
- log.info("Normalized location: %s", loc)
-
- if 'gvar' in varfont:
- log.info("Mutating glyf/gvar tables")
- gvar = varfont['gvar']
- glyf = varfont['glyf']
- hMetrics = varfont['hmtx'].metrics
- vMetrics = getattr(varfont.get('vmtx'), 'metrics', None)
- # get list of glyph names in gvar sorted by component depth
- glyphnames = sorted(
- gvar.variations.keys(),
- key=lambda name: (
- glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
- if glyf[name].isComposite() else 0,
- name))
- for glyphname in glyphnames:
- variations = gvar.variations[glyphname]
- coordinates, _ = glyf._getCoordinatesAndControls(glyphname, hMetrics, vMetrics)
- origCoords, endPts = None, None
- for var in variations:
- scalar = supportScalar(loc, var.axes)
- if not scalar: continue
- delta = var.coordinates
- if None in delta:
- if origCoords is None:
- origCoords, g = glyf._getCoordinatesAndControls(glyphname, hMetrics, vMetrics)
- delta = iup_delta(delta, origCoords, g.endPts)
- coordinates += GlyphCoordinates(delta) * scalar
- glyf._setCoordinates(glyphname, coordinates, hMetrics, vMetrics)
- else:
- glyf = None
-
- if 'cvar' in varfont:
- log.info("Mutating cvt/cvar tables")
- cvar = varfont['cvar']
- cvt = varfont['cvt ']
- deltas = {}
- for var in cvar.variations:
- scalar = supportScalar(loc, var.axes)
- if not scalar: continue
- for i, c in enumerate(var.coordinates):
- if c is not None:
- deltas[i] = deltas.get(i, 0) + scalar * c
- for i, delta in deltas.items():
- cvt[i] += otRound(delta)
-
- if 'CFF2' in varfont:
- log.info("Mutating CFF2 table")
- glyphOrder = varfont.getGlyphOrder()
- CFF2 = varfont['CFF2']
- topDict = CFF2.cff.topDictIndex[0]
- vsInstancer = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, loc)
- interpolateFromDeltas = vsInstancer.interpolateFromDeltas
- interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas)
- CFF2.desubroutinize()
- interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder)
- interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc)
- del topDict.rawDict['VarStore']
- del topDict.VarStore
-
- if 'MVAR' in varfont:
- log.info("Mutating MVAR table")
- mvar = varfont['MVAR'].table
- varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc)
- records = mvar.ValueRecord
- for rec in records:
- mvarTag = rec.ValueTag
- if mvarTag not in MVAR_ENTRIES:
- continue
- tableTag, itemName = MVAR_ENTRIES[mvarTag]
- delta = otRound(varStoreInstancer[rec.VarIdx])
- if not delta:
- continue
- setattr(varfont[tableTag], itemName,
- getattr(varfont[tableTag], itemName) + delta)
-
- log.info("Mutating FeatureVariations")
- for tableTag in 'GSUB','GPOS':
- if not tableTag in varfont:
- continue
- table = varfont[tableTag].table
- if not getattr(table, 'FeatureVariations', None):
- continue
- variations = table.FeatureVariations
- for record in variations.FeatureVariationRecord:
- applies = True
- for condition in record.ConditionSet.ConditionTable:
- if condition.Format == 1:
- axisIdx = condition.AxisIndex
- axisTag = fvar.axes[axisIdx].axisTag
- Min = condition.FilterRangeMinValue
- Max = condition.FilterRangeMaxValue
- v = loc[axisTag]
- if not (Min <= v <= Max):
- applies = False
- else:
- applies = False
- if not applies:
- break
-
- if applies:
- assert record.FeatureTableSubstitution.Version == 0x00010000
- for rec in record.FeatureTableSubstitution.SubstitutionRecord:
- table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = rec.Feature
- break
- del table.FeatureVariations
-
- if 'GDEF' in varfont and varfont['GDEF'].table.Version >= 0x00010003:
- log.info("Mutating GDEF/GPOS/GSUB tables")
- gdef = varfont['GDEF'].table
- instancer = VarStoreInstancer(gdef.VarStore, fvar.axes, loc)
-
- merger = MutatorMerger(varfont, instancer)
- merger.mergeTables(varfont, [varfont], ['GDEF', 'GPOS'])
-
- # Downgrade GDEF.
- del gdef.VarStore
- gdef.Version = 0x00010002
- if gdef.MarkGlyphSetsDef is None:
- del gdef.MarkGlyphSetsDef
- gdef.Version = 0x00010000
-
- if not (gdef.LigCaretList or
- gdef.MarkAttachClassDef or
- gdef.GlyphClassDef or
- gdef.AttachList or
- (gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)):
- del varfont['GDEF']
-
- addidef = False
- if glyf:
- for glyph in glyf.glyphs.values():
- if hasattr(glyph, "program"):
- instructions = glyph.program.getAssembly()
- # If GETVARIATION opcode is used in bytecode of any glyph add IDEF
- addidef = any(op.startswith("GETVARIATION") for op in instructions)
- if addidef:
- break
- if overlap:
- for glyph_name in glyf.keys():
- glyph = glyf[glyph_name]
- # Set OVERLAP_COMPOUND bit for compound glyphs
- if glyph.isComposite():
- glyph.components[0].flags |= OVERLAP_COMPOUND
- # Set OVERLAP_SIMPLE bit for simple glyphs
- elif glyph.numberOfContours > 0:
- glyph.flags[0] |= flagOverlapSimple
- if addidef:
- log.info("Adding IDEF to fpgm table for GETVARIATION opcode")
- asm = []
- if 'fpgm' in varfont:
- fpgm = varfont['fpgm']
- asm = fpgm.program.getAssembly()
- else:
- fpgm = newTable('fpgm')
- fpgm.program = ttProgram.Program()
- varfont['fpgm'] = fpgm
- asm.append("PUSHB[000] 145")
- asm.append("IDEF[ ]")
- args = [str(len(loc))]
- for a in fvar.axes:
- args.append(str(floatToFixed(loc[a.axisTag], 14)))
- asm.append("NPUSHW[ ] " + ' '.join(args))
- asm.append("ENDF[ ]")
- fpgm.program.fromAssembly(asm)
-
- # Change maxp attributes as IDEF is added
- if 'maxp' in varfont:
- maxp = varfont['maxp']
- setattr(maxp, "maxInstructionDefs", 1 + getattr(maxp, "maxInstructionDefs", 0))
- setattr(maxp, "maxStackElements", max(len(loc), getattr(maxp, "maxStackElements", 0)))
-
- if 'name' in varfont:
- log.info("Pruning name table")
- exclude = {a.axisNameID for a in fvar.axes}
- for i in fvar.instances:
- exclude.add(i.subfamilyNameID)
- exclude.add(i.postscriptNameID)
- if 'ltag' in varfont:
- # Drop the whole 'ltag' table if all its language tags are referenced by
- # name records to be pruned.
- # TODO: prune unused ltag tags and re-enumerate langIDs accordingly
- excludedUnicodeLangIDs = [
- n.langID for n in varfont['name'].names
- if n.nameID in exclude and n.platformID == 0 and n.langID != 0xFFFF
- ]
- if set(excludedUnicodeLangIDs) == set(range(len((varfont['ltag'].tags)))):
- del varfont['ltag']
- varfont['name'].names[:] = [
- n for n in varfont['name'].names
- if n.nameID not in exclude
- ]
-
- if "wght" in location and "OS/2" in varfont:
- varfont["OS/2"].usWeightClass = otRound(
- max(1, min(location["wght"], 1000))
- )
- if "wdth" in location:
- wdth = location["wdth"]
- for percent, widthClass in sorted(OS2_WIDTH_CLASS_VALUES.items()):
- if wdth < percent:
- varfont["OS/2"].usWidthClass = widthClass
- break
- else:
- varfont["OS/2"].usWidthClass = 9
- if "slnt" in location and "post" in varfont:
- varfont["post"].italicAngle = max(-90, min(location["slnt"], 90))
-
- log.info("Removing variable tables")
- for tag in ('avar','cvar','fvar','gvar','HVAR','MVAR','VVAR','STAT'):
- if tag in varfont:
- del varfont[tag]
-
- return varfont
+ """Generate a static instance from a variable TTFont and a dictionary
+ defining the desired location along the variable font's axes.
+ The location values must be specified as user-space coordinates, e.g.:
+
+ {'wght': 400, 'wdth': 100}
+
+ By default, a new TTFont object is returned. If ``inplace`` is True, the
+ input varfont is modified and reduced to a static font.
+
+ When the overlap parameter is defined as True,
+ OVERLAP_SIMPLE and OVERLAP_COMPOUND bits are set to 1. See
+ https://docs.microsoft.com/en-us/typography/opentype/spec/glyf
+ """
+ if not inplace:
+ # make a copy to leave input varfont unmodified
+ stream = BytesIO()
+ varfont.save(stream)
+ stream.seek(0)
+ varfont = TTFont(stream)
+
+ fvar = varfont["fvar"]
+ axes = {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in fvar.axes}
+ loc = normalizeLocation(location, axes)
+ if "avar" in varfont:
+ maps = varfont["avar"].segments
+ loc = {k: piecewiseLinearMap(v, maps[k]) for k, v in loc.items()}
+ # Quantize to F2Dot14, to avoid surprise interpolations.
+ loc = {k: floatToFixedToFloat(v, 14) for k, v in loc.items()}
+ # Location is normalized now
+ log.info("Normalized location: %s", loc)
+
+ if "gvar" in varfont:
+ log.info("Mutating glyf/gvar tables")
+ gvar = varfont["gvar"]
+ glyf = varfont["glyf"]
+ hMetrics = varfont["hmtx"].metrics
+ vMetrics = getattr(varfont.get("vmtx"), "metrics", None)
+ # get list of glyph names in gvar sorted by component depth
+ glyphnames = sorted(
+ gvar.variations.keys(),
+ key=lambda name: (
+ glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
+ if glyf[name].isComposite() or glyf[name].isVarComposite()
+ else 0,
+ name,
+ ),
+ )
+ for glyphname in glyphnames:
+ variations = gvar.variations[glyphname]
+ coordinates, _ = glyf._getCoordinatesAndControls(
+ glyphname, hMetrics, vMetrics
+ )
+ origCoords, endPts = None, None
+ for var in variations:
+ scalar = supportScalar(loc, var.axes)
+ if not scalar:
+ continue
+ delta = var.coordinates
+ if None in delta:
+ if origCoords is None:
+ origCoords, g = glyf._getCoordinatesAndControls(
+ glyphname, hMetrics, vMetrics
+ )
+ delta = iup_delta(delta, origCoords, g.endPts)
+ coordinates += GlyphCoordinates(delta) * scalar
+ glyf._setCoordinates(glyphname, coordinates, hMetrics, vMetrics)
+ else:
+ glyf = None
+
+ if "DSIG" in varfont:
+ del varfont["DSIG"]
+
+ if "cvar" in varfont:
+ log.info("Mutating cvt/cvar tables")
+ cvar = varfont["cvar"]
+ cvt = varfont["cvt "]
+ deltas = {}
+ for var in cvar.variations:
+ scalar = supportScalar(loc, var.axes)
+ if not scalar:
+ continue
+ for i, c in enumerate(var.coordinates):
+ if c is not None:
+ deltas[i] = deltas.get(i, 0) + scalar * c
+ for i, delta in deltas.items():
+ cvt[i] += otRound(delta)
+
+ if "CFF2" in varfont:
+ log.info("Mutating CFF2 table")
+ glyphOrder = varfont.getGlyphOrder()
+ CFF2 = varfont["CFF2"]
+ topDict = CFF2.cff.topDictIndex[0]
+ vsInstancer = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, loc)
+ interpolateFromDeltas = vsInstancer.interpolateFromDeltas
+ interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas)
+ CFF2.desubroutinize()
+ interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder)
+ interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc)
+ del topDict.rawDict["VarStore"]
+ del topDict.VarStore
+
+ if "MVAR" in varfont:
+ log.info("Mutating MVAR table")
+ mvar = varfont["MVAR"].table
+ varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc)
+ records = mvar.ValueRecord
+ for rec in records:
+ mvarTag = rec.ValueTag
+ if mvarTag not in MVAR_ENTRIES:
+ continue
+ tableTag, itemName = MVAR_ENTRIES[mvarTag]
+ delta = otRound(varStoreInstancer[rec.VarIdx])
+ if not delta:
+ continue
+ setattr(
+ varfont[tableTag],
+ itemName,
+ getattr(varfont[tableTag], itemName) + delta,
+ )
+
+ log.info("Mutating FeatureVariations")
+ for tableTag in "GSUB", "GPOS":
+ if not tableTag in varfont:
+ continue
+ table = varfont[tableTag].table
+ if not getattr(table, "FeatureVariations", None):
+ continue
+ variations = table.FeatureVariations
+ for record in variations.FeatureVariationRecord:
+ applies = True
+ for condition in record.ConditionSet.ConditionTable:
+ if condition.Format == 1:
+ axisIdx = condition.AxisIndex
+ axisTag = fvar.axes[axisIdx].axisTag
+ Min = condition.FilterRangeMinValue
+ Max = condition.FilterRangeMaxValue
+ v = loc[axisTag]
+ if not (Min <= v <= Max):
+ applies = False
+ else:
+ applies = False
+ if not applies:
+ break
+
+ if applies:
+ assert record.FeatureTableSubstitution.Version == 0x00010000
+ for rec in record.FeatureTableSubstitution.SubstitutionRecord:
+ table.FeatureList.FeatureRecord[
+ rec.FeatureIndex
+ ].Feature = rec.Feature
+ break
+ del table.FeatureVariations
+
+ if "GDEF" in varfont and varfont["GDEF"].table.Version >= 0x00010003:
+ log.info("Mutating GDEF/GPOS/GSUB tables")
+ gdef = varfont["GDEF"].table
+ instancer = VarStoreInstancer(gdef.VarStore, fvar.axes, loc)
+
+ merger = MutatorMerger(varfont, instancer)
+ merger.mergeTables(varfont, [varfont], ["GDEF", "GPOS"])
+
+ # Downgrade GDEF.
+ del gdef.VarStore
+ gdef.Version = 0x00010002
+ if gdef.MarkGlyphSetsDef is None:
+ del gdef.MarkGlyphSetsDef
+ gdef.Version = 0x00010000
+
+ if not (
+ gdef.LigCaretList
+ or gdef.MarkAttachClassDef
+ or gdef.GlyphClassDef
+ or gdef.AttachList
+ or (gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)
+ ):
+ del varfont["GDEF"]
+
+ addidef = False
+ if glyf:
+ for glyph in glyf.glyphs.values():
+ if hasattr(glyph, "program"):
+ instructions = glyph.program.getAssembly()
+ # If GETVARIATION opcode is used in bytecode of any glyph add IDEF
+ addidef = any(op.startswith("GETVARIATION") for op in instructions)
+ if addidef:
+ break
+ if overlap:
+ for glyph_name in glyf.keys():
+ glyph = glyf[glyph_name]
+ # Set OVERLAP_COMPOUND bit for compound glyphs
+ if glyph.isComposite():
+ glyph.components[0].flags |= OVERLAP_COMPOUND
+ # Set OVERLAP_SIMPLE bit for simple glyphs
+ elif glyph.numberOfContours > 0:
+ glyph.flags[0] |= flagOverlapSimple
+ if addidef:
+ log.info("Adding IDEF to fpgm table for GETVARIATION opcode")
+ asm = []
+ if "fpgm" in varfont:
+ fpgm = varfont["fpgm"]
+ asm = fpgm.program.getAssembly()
+ else:
+ fpgm = newTable("fpgm")
+ fpgm.program = ttProgram.Program()
+ varfont["fpgm"] = fpgm
+ asm.append("PUSHB[000] 145")
+ asm.append("IDEF[ ]")
+ args = [str(len(loc))]
+ for a in fvar.axes:
+ args.append(str(floatToFixed(loc[a.axisTag], 14)))
+ asm.append("NPUSHW[ ] " + " ".join(args))
+ asm.append("ENDF[ ]")
+ fpgm.program.fromAssembly(asm)
+
+ # Change maxp attributes as IDEF is added
+ if "maxp" in varfont:
+ maxp = varfont["maxp"]
+ setattr(
+ maxp, "maxInstructionDefs", 1 + getattr(maxp, "maxInstructionDefs", 0)
+ )
+ setattr(
+ maxp,
+ "maxStackElements",
+ max(len(loc), getattr(maxp, "maxStackElements", 0)),
+ )
+
+ if "name" in varfont:
+ log.info("Pruning name table")
+ exclude = {a.axisNameID for a in fvar.axes}
+ for i in fvar.instances:
+ exclude.add(i.subfamilyNameID)
+ exclude.add(i.postscriptNameID)
+ if "ltag" in varfont:
+ # Drop the whole 'ltag' table if all its language tags are referenced by
+ # name records to be pruned.
+ # TODO: prune unused ltag tags and re-enumerate langIDs accordingly
+ excludedUnicodeLangIDs = [
+ n.langID
+ for n in varfont["name"].names
+ if n.nameID in exclude and n.platformID == 0 and n.langID != 0xFFFF
+ ]
+ if set(excludedUnicodeLangIDs) == set(range(len((varfont["ltag"].tags)))):
+ del varfont["ltag"]
+ varfont["name"].names[:] = [
+ n for n in varfont["name"].names if n.nameID not in exclude
+ ]
+
+ if "wght" in location and "OS/2" in varfont:
+ varfont["OS/2"].usWeightClass = otRound(max(1, min(location["wght"], 1000)))
+ if "wdth" in location:
+ wdth = location["wdth"]
+ for percent, widthClass in sorted(OS2_WIDTH_CLASS_VALUES.items()):
+ if wdth < percent:
+ varfont["OS/2"].usWidthClass = widthClass
+ break
+ else:
+ varfont["OS/2"].usWidthClass = 9
+ if "slnt" in location and "post" in varfont:
+ varfont["post"].italicAngle = max(-90, min(location["slnt"], 90))
+
+ log.info("Removing variable tables")
+ for tag in ("avar", "cvar", "fvar", "gvar", "HVAR", "MVAR", "VVAR", "STAT"):
+ if tag in varfont:
+ del varfont[tag]
+
+ return varfont
def main(args=None):
- """Instantiate a variation font"""
- from fontTools import configLogger
- import argparse
-
- parser = argparse.ArgumentParser(
- "fonttools varLib.mutator", description="Instantiate a variable font")
- parser.add_argument(
- "input", metavar="INPUT.ttf", help="Input variable TTF file.")
- parser.add_argument(
- "locargs", metavar="AXIS=LOC", nargs="*",
- help="List of space separated locations. A location consist in "
- "the name of a variation axis, followed by '=' and a number. E.g.: "
- " wght=700 wdth=80. The default is the location of the base master.")
- parser.add_argument(
- "-o", "--output", metavar="OUTPUT.ttf", default=None,
- help="Output instance TTF file (default: INPUT-instance.ttf).")
- parser.add_argument(
- "--no-recalc-timestamp", dest="recalc_timestamp", action='store_false',
- help="Don't set the output font's timestamp to the current time.")
- logging_group = parser.add_mutually_exclusive_group(required=False)
- logging_group.add_argument(
- "-v", "--verbose", action="store_true", help="Run more verbosely.")
- logging_group.add_argument(
- "-q", "--quiet", action="store_true", help="Turn verbosity off.")
- parser.add_argument(
- "--no-overlap",
- dest="overlap",
- action="store_false",
- help="Don't set OVERLAP_SIMPLE/OVERLAP_COMPOUND glyf flags."
- )
- options = parser.parse_args(args)
-
- varfilename = options.input
- outfile = (
- os.path.splitext(varfilename)[0] + '-instance.ttf'
- if not options.output else options.output)
- configLogger(level=(
- "DEBUG" if options.verbose else
- "ERROR" if options.quiet else
- "INFO"))
-
- loc = {}
- for arg in options.locargs:
- try:
- tag, val = arg.split('=')
- assert len(tag) <= 4
- loc[tag.ljust(4)] = float(val)
- except (ValueError, AssertionError):
- parser.error("invalid location argument format: %r" % arg)
- log.info("Location: %s", loc)
-
- log.info("Loading variable font")
- varfont = TTFont(varfilename, recalcTimestamp=options.recalc_timestamp)
-
- instantiateVariableFont(varfont, loc, inplace=True, overlap=options.overlap)
-
- log.info("Saving instance font %s", outfile)
- varfont.save(outfile)
+ """Instantiate a variation font"""
+ from fontTools import configLogger
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ "fonttools varLib.mutator", description="Instantiate a variable font"
+ )
+ parser.add_argument("input", metavar="INPUT.ttf", help="Input variable TTF file.")
+ parser.add_argument(
+ "locargs",
+ metavar="AXIS=LOC",
+ nargs="*",
+ help="List of space separated locations. A location consist in "
+ "the name of a variation axis, followed by '=' and a number. E.g.: "
+ " wght=700 wdth=80. The default is the location of the base master.",
+ )
+ parser.add_argument(
+ "-o",
+ "--output",
+ metavar="OUTPUT.ttf",
+ default=None,
+ help="Output instance TTF file (default: INPUT-instance.ttf).",
+ )
+ parser.add_argument(
+ "--no-recalc-timestamp",
+ dest="recalc_timestamp",
+ action="store_false",
+ help="Don't set the output font's timestamp to the current time.",
+ )
+ logging_group = parser.add_mutually_exclusive_group(required=False)
+ logging_group.add_argument(
+ "-v", "--verbose", action="store_true", help="Run more verbosely."
+ )
+ logging_group.add_argument(
+ "-q", "--quiet", action="store_true", help="Turn verbosity off."
+ )
+ parser.add_argument(
+ "--no-overlap",
+ dest="overlap",
+ action="store_false",
+ help="Don't set OVERLAP_SIMPLE/OVERLAP_COMPOUND glyf flags.",
+ )
+ options = parser.parse_args(args)
+
+ varfilename = options.input
+ outfile = (
+ os.path.splitext(varfilename)[0] + "-instance.ttf"
+ if not options.output
+ else options.output
+ )
+ configLogger(
+ level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
+ )
+
+ loc = {}
+ for arg in options.locargs:
+ try:
+ tag, val = arg.split("=")
+ assert len(tag) <= 4
+ loc[tag.ljust(4)] = float(val)
+ except (ValueError, AssertionError):
+ parser.error("invalid location argument format: %r" % arg)
+ log.info("Location: %s", loc)
+
+ log.info("Loading variable font")
+ varfont = TTFont(varfilename, recalcTimestamp=options.recalc_timestamp)
+
+ instantiateVariableFont(varfont, loc, inplace=True, overlap=options.overlap)
+
+ log.info("Saving instance font %s", outfile)
+ varfont.save(outfile)
if __name__ == "__main__":
- import sys
- if len(sys.argv) > 1:
- sys.exit(main())
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+
+ if len(sys.argv) > 1:
+ sys.exit(main())
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/varLib/mvar.py b/Lib/fontTools/varLib/mvar.py
index 8b1355ba..653aeb45 100644
--- a/Lib/fontTools/varLib/mvar.py
+++ b/Lib/fontTools/varLib/mvar.py
@@ -1,40 +1,40 @@
MVAR_ENTRIES = {
- 'hasc': ('OS/2', 'sTypoAscender'), # horizontal ascender
- 'hdsc': ('OS/2', 'sTypoDescender'), # horizontal descender
- 'hlgp': ('OS/2', 'sTypoLineGap'), # horizontal line gap
- 'hcla': ('OS/2', 'usWinAscent'), # horizontal clipping ascent
- 'hcld': ('OS/2', 'usWinDescent'), # horizontal clipping descent
- 'vasc': ('vhea', 'ascent'), # vertical ascender
- 'vdsc': ('vhea', 'descent'), # vertical descender
- 'vlgp': ('vhea', 'lineGap'), # vertical line gap
- 'hcrs': ('hhea', 'caretSlopeRise'), # horizontal caret rise
- 'hcrn': ('hhea', 'caretSlopeRun'), # horizontal caret run
- 'hcof': ('hhea', 'caretOffset'), # horizontal caret offset
- 'vcrs': ('vhea', 'caretSlopeRise'), # vertical caret rise
- 'vcrn': ('vhea', 'caretSlopeRun'), # vertical caret run
- 'vcof': ('vhea', 'caretOffset'), # vertical caret offset
- 'xhgt': ('OS/2', 'sxHeight'), # x height
- 'cpht': ('OS/2', 'sCapHeight'), # cap height
- 'sbxs': ('OS/2', 'ySubscriptXSize'), # subscript em x size
- 'sbys': ('OS/2', 'ySubscriptYSize'), # subscript em y size
- 'sbxo': ('OS/2', 'ySubscriptXOffset'), # subscript em x offset
- 'sbyo': ('OS/2', 'ySubscriptYOffset'), # subscript em y offset
- 'spxs': ('OS/2', 'ySuperscriptXSize'), # superscript em x size
- 'spys': ('OS/2', 'ySuperscriptYSize'), # superscript em y size
- 'spxo': ('OS/2', 'ySuperscriptXOffset'), # superscript em x offset
- 'spyo': ('OS/2', 'ySuperscriptYOffset'), # superscript em y offset
- 'strs': ('OS/2', 'yStrikeoutSize'), # strikeout size
- 'stro': ('OS/2', 'yStrikeoutPosition'), # strikeout offset
- 'unds': ('post', 'underlineThickness'), # underline size
- 'undo': ('post', 'underlinePosition'), # underline offset
- #'gsp0': ('gasp', 'gaspRange[0].rangeMaxPPEM'), # gaspRange[0]
- #'gsp1': ('gasp', 'gaspRange[1].rangeMaxPPEM'), # gaspRange[1]
- #'gsp2': ('gasp', 'gaspRange[2].rangeMaxPPEM'), # gaspRange[2]
- #'gsp3': ('gasp', 'gaspRange[3].rangeMaxPPEM'), # gaspRange[3]
- #'gsp4': ('gasp', 'gaspRange[4].rangeMaxPPEM'), # gaspRange[4]
- #'gsp5': ('gasp', 'gaspRange[5].rangeMaxPPEM'), # gaspRange[5]
- #'gsp6': ('gasp', 'gaspRange[6].rangeMaxPPEM'), # gaspRange[6]
- #'gsp7': ('gasp', 'gaspRange[7].rangeMaxPPEM'), # gaspRange[7]
- #'gsp8': ('gasp', 'gaspRange[8].rangeMaxPPEM'), # gaspRange[8]
- #'gsp9': ('gasp', 'gaspRange[9].rangeMaxPPEM'), # gaspRange[9]
+ "hasc": ("OS/2", "sTypoAscender"), # horizontal ascender
+ "hdsc": ("OS/2", "sTypoDescender"), # horizontal descender
+ "hlgp": ("OS/2", "sTypoLineGap"), # horizontal line gap
+ "hcla": ("OS/2", "usWinAscent"), # horizontal clipping ascent
+ "hcld": ("OS/2", "usWinDescent"), # horizontal clipping descent
+ "vasc": ("vhea", "ascent"), # vertical ascender
+ "vdsc": ("vhea", "descent"), # vertical descender
+ "vlgp": ("vhea", "lineGap"), # vertical line gap
+ "hcrs": ("hhea", "caretSlopeRise"), # horizontal caret rise
+ "hcrn": ("hhea", "caretSlopeRun"), # horizontal caret run
+ "hcof": ("hhea", "caretOffset"), # horizontal caret offset
+ "vcrs": ("vhea", "caretSlopeRise"), # vertical caret rise
+ "vcrn": ("vhea", "caretSlopeRun"), # vertical caret run
+ "vcof": ("vhea", "caretOffset"), # vertical caret offset
+ "xhgt": ("OS/2", "sxHeight"), # x height
+ "cpht": ("OS/2", "sCapHeight"), # cap height
+ "sbxs": ("OS/2", "ySubscriptXSize"), # subscript em x size
+ "sbys": ("OS/2", "ySubscriptYSize"), # subscript em y size
+ "sbxo": ("OS/2", "ySubscriptXOffset"), # subscript em x offset
+ "sbyo": ("OS/2", "ySubscriptYOffset"), # subscript em y offset
+ "spxs": ("OS/2", "ySuperscriptXSize"), # superscript em x size
+ "spys": ("OS/2", "ySuperscriptYSize"), # superscript em y size
+ "spxo": ("OS/2", "ySuperscriptXOffset"), # superscript em x offset
+ "spyo": ("OS/2", "ySuperscriptYOffset"), # superscript em y offset
+ "strs": ("OS/2", "yStrikeoutSize"), # strikeout size
+ "stro": ("OS/2", "yStrikeoutPosition"), # strikeout offset
+ "unds": ("post", "underlineThickness"), # underline size
+ "undo": ("post", "underlinePosition"), # underline offset
+ #'gsp0': ('gasp', 'gaspRange[0].rangeMaxPPEM'), # gaspRange[0]
+ #'gsp1': ('gasp', 'gaspRange[1].rangeMaxPPEM'), # gaspRange[1]
+ #'gsp2': ('gasp', 'gaspRange[2].rangeMaxPPEM'), # gaspRange[2]
+ #'gsp3': ('gasp', 'gaspRange[3].rangeMaxPPEM'), # gaspRange[3]
+ #'gsp4': ('gasp', 'gaspRange[4].rangeMaxPPEM'), # gaspRange[4]
+ #'gsp5': ('gasp', 'gaspRange[5].rangeMaxPPEM'), # gaspRange[5]
+ #'gsp6': ('gasp', 'gaspRange[6].rangeMaxPPEM'), # gaspRange[6]
+ #'gsp7': ('gasp', 'gaspRange[7].rangeMaxPPEM'), # gaspRange[7]
+ #'gsp8': ('gasp', 'gaspRange[8].rangeMaxPPEM'), # gaspRange[8]
+ #'gsp9': ('gasp', 'gaspRange[9].rangeMaxPPEM'), # gaspRange[9]
}
diff --git a/Lib/fontTools/varLib/plot.py b/Lib/fontTools/varLib/plot.py
index 811559fa..e0a7ca50 100644
--- a/Lib/fontTools/varLib/plot.py
+++ b/Lib/fontTools/varLib/plot.py
@@ -13,155 +13,226 @@ log = logging.getLogger(__name__)
def stops(support, count=10):
- a,b,c = support
+ a, b, c = support
- return [a + (b - a) * i / count for i in range(count)] + \
- [b + (c - b) * i / count for i in range(count)] + \
- [c]
+ return (
+ [a + (b - a) * i / count for i in range(count)]
+ + [b + (c - b) * i / count for i in range(count)]
+ + [c]
+ )
def _plotLocationsDots(locations, axes, subplot, **kwargs):
- for loc, color in zip(locations, cycle(pyplot.cm.Set1.colors)):
- if len(axes) == 1:
- subplot.plot(
- [loc.get(axes[0], 0)],
- [1.],
- 'o',
- color=color,
- **kwargs
- )
- elif len(axes) == 2:
- subplot.plot(
- [loc.get(axes[0], 0)],
- [loc.get(axes[1], 0)],
- [1.],
- 'o',
- color=color,
- **kwargs
- )
- else:
- raise AssertionError(len(axes))
+ for loc, color in zip(locations, cycle(pyplot.cm.Set1.colors)):
+ if len(axes) == 1:
+ subplot.plot([loc.get(axes[0], 0)], [1.0], "o", color=color, **kwargs)
+ elif len(axes) == 2:
+ subplot.plot(
+ [loc.get(axes[0], 0)],
+ [loc.get(axes[1], 0)],
+ [1.0],
+ "o",
+ color=color,
+ **kwargs,
+ )
+ else:
+ raise AssertionError(len(axes))
def plotLocations(locations, fig, names=None, **kwargs):
- n = len(locations)
- cols = math.ceil(n**.5)
- rows = math.ceil(n / cols)
+ n = len(locations)
+ cols = math.ceil(n**0.5)
+ rows = math.ceil(n / cols)
- if names is None:
- names = [None] * len(locations)
+ if names is None:
+ names = [None] * len(locations)
- model = VariationModel(locations)
- names = [names[model.reverseMapping[i]] for i in range(len(names))]
+ model = VariationModel(locations)
+ names = [names[model.reverseMapping[i]] for i in range(len(names))]
- axes = sorted(locations[0].keys())
- if len(axes) == 1:
- _plotLocations2D(
- model, axes[0], fig, cols, rows, names=names, **kwargs
- )
- elif len(axes) == 2:
- _plotLocations3D(
- model, axes, fig, cols, rows, names=names, **kwargs
- )
- else:
- raise ValueError("Only 1 or 2 axes are supported")
+ axes = sorted(locations[0].keys())
+ if len(axes) == 1:
+ _plotLocations2D(model, axes[0], fig, cols, rows, names=names, **kwargs)
+ elif len(axes) == 2:
+ _plotLocations3D(model, axes, fig, cols, rows, names=names, **kwargs)
+ else:
+ raise ValueError("Only 1 or 2 axes are supported")
def _plotLocations2D(model, axis, fig, cols, rows, names, **kwargs):
- subplot = fig.add_subplot(111)
- for i, (support, color, name) in enumerate(
- zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
- ):
- if name is not None:
- subplot.set_title(name)
- subplot.set_xlabel(axis)
- pyplot.xlim(-1.,+1.)
-
- Xs = support.get(axis, (-1.,0.,+1.))
- X, Y = [], []
- for x in stops(Xs):
- y = supportScalar({axis:x}, support)
- X.append(x)
- Y.append(y)
- subplot.plot(X, Y, color=color, **kwargs)
-
- _plotLocationsDots(model.locations, [axis], subplot)
+ subplot = fig.add_subplot(111)
+ for i, (support, color, name) in enumerate(
+ zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
+ ):
+ if name is not None:
+ subplot.set_title(name)
+ subplot.set_xlabel(axis)
+ pyplot.xlim(-1.0, +1.0)
+
+ Xs = support.get(axis, (-1.0, 0.0, +1.0))
+ X, Y = [], []
+ for x in stops(Xs):
+ y = supportScalar({axis: x}, support)
+ X.append(x)
+ Y.append(y)
+ subplot.plot(X, Y, color=color, **kwargs)
+
+ _plotLocationsDots(model.locations, [axis], subplot)
def _plotLocations3D(model, axes, fig, rows, cols, names, **kwargs):
- ax1, ax2 = axes
-
- axis3D = fig.add_subplot(111, projection='3d')
- for i, (support, color, name) in enumerate(
- zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
- ):
- if name is not None:
- axis3D.set_title(name)
- axis3D.set_xlabel(ax1)
- axis3D.set_ylabel(ax2)
- pyplot.xlim(-1.,+1.)
- pyplot.ylim(-1.,+1.)
-
- Xs = support.get(ax1, (-1.,0.,+1.))
- Ys = support.get(ax2, (-1.,0.,+1.))
- for x in stops(Xs):
- X, Y, Z = [], [], []
- for y in Ys:
- z = supportScalar({ax1:x, ax2:y}, support)
- X.append(x)
- Y.append(y)
- Z.append(z)
- axis3D.plot(X, Y, Z, color=color, **kwargs)
- for y in stops(Ys):
- X, Y, Z = [], [], []
- for x in Xs:
- z = supportScalar({ax1:x, ax2:y}, support)
- X.append(x)
- Y.append(y)
- Z.append(z)
- axis3D.plot(X, Y, Z, color=color, **kwargs)
-
- _plotLocationsDots(model.locations, [ax1, ax2], axis3D)
+ ax1, ax2 = axes
+
+ axis3D = fig.add_subplot(111, projection="3d")
+ for i, (support, color, name) in enumerate(
+ zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
+ ):
+ if name is not None:
+ axis3D.set_title(name)
+ axis3D.set_xlabel(ax1)
+ axis3D.set_ylabel(ax2)
+ pyplot.xlim(-1.0, +1.0)
+ pyplot.ylim(-1.0, +1.0)
+
+ Xs = support.get(ax1, (-1.0, 0.0, +1.0))
+ Ys = support.get(ax2, (-1.0, 0.0, +1.0))
+ for x in stops(Xs):
+ X, Y, Z = [], [], []
+ for y in Ys:
+ z = supportScalar({ax1: x, ax2: y}, support)
+ X.append(x)
+ Y.append(y)
+ Z.append(z)
+ axis3D.plot(X, Y, Z, color=color, **kwargs)
+ for y in stops(Ys):
+ X, Y, Z = [], [], []
+ for x in Xs:
+ z = supportScalar({ax1: x, ax2: y}, support)
+ X.append(x)
+ Y.append(y)
+ Z.append(z)
+ axis3D.plot(X, Y, Z, color=color, **kwargs)
+
+ _plotLocationsDots(model.locations, [ax1, ax2], axis3D)
def plotDocument(doc, fig, **kwargs):
- doc.normalize()
- locations = [s.location for s in doc.sources]
- names = [s.name for s in doc.sources]
- plotLocations(locations, fig, names, **kwargs)
+ doc.normalize()
+ locations = [s.location for s in doc.sources]
+ names = [s.name for s in doc.sources]
+ plotLocations(locations, fig, names, **kwargs)
+
+
+def _plotModelFromMasters2D(model, masterValues, fig, **kwargs):
+ assert len(model.axisOrder) == 1
+ axis = model.axisOrder[0]
+
+ axis_min = min(loc.get(axis, 0) for loc in model.locations)
+ axis_max = max(loc.get(axis, 0) for loc in model.locations)
+
+ import numpy as np
+
+ X = np.arange(axis_min, axis_max, (axis_max - axis_min) / 100)
+ Y = []
+
+ for x in X:
+ loc = {axis: x}
+ v = model.interpolateFromMasters(loc, masterValues)
+ Y.append(v)
+
+ subplot = fig.add_subplot(111)
+ subplot.plot(X, Y, "-", **kwargs)
+
+
+def _plotModelFromMasters3D(model, masterValues, fig, **kwargs):
+ assert len(model.axisOrder) == 2
+ axis1, axis2 = model.axisOrder[0], model.axisOrder[1]
+
+ axis1_min = min(loc.get(axis1, 0) for loc in model.locations)
+ axis1_max = max(loc.get(axis1, 0) for loc in model.locations)
+ axis2_min = min(loc.get(axis2, 0) for loc in model.locations)
+ axis2_max = max(loc.get(axis2, 0) for loc in model.locations)
+
+ import numpy as np
+
+ X = np.arange(axis1_min, axis1_max, (axis1_max - axis1_min) / 100)
+ Y = np.arange(axis2_min, axis2_max, (axis2_max - axis2_min) / 100)
+ X, Y = np.meshgrid(X, Y)
+ Z = []
+
+ for row_x, row_y in zip(X, Y):
+ z_row = []
+ Z.append(z_row)
+ for x, y in zip(row_x, row_y):
+ loc = {axis1: x, axis2: y}
+ v = model.interpolateFromMasters(loc, masterValues)
+ z_row.append(v)
+ Z = np.array(Z)
+
+ axis3D = fig.add_subplot(111, projection="3d")
+ axis3D.plot_surface(X, Y, Z, **kwargs)
+
+
+def plotModelFromMasters(model, masterValues, fig, **kwargs):
+ """Plot a variation model and set of master values corresponding
+ to the locations to the model into a pyplot figure. Variation
+ model must have axisOrder of size 1 or 2."""
+ if len(model.axisOrder) == 1:
+ _plotModelFromMasters2D(model, masterValues, fig, **kwargs)
+ elif len(model.axisOrder) == 2:
+ _plotModelFromMasters3D(model, masterValues, fig, **kwargs)
+ else:
+ raise ValueError("Only 1 or 2 axes are supported")
def main(args=None):
- from fontTools import configLogger
-
- if args is None:
- args = sys.argv[1:]
-
- # configure the library logger (for >= WARNING)
- configLogger()
- # comment this out to enable debug messages from logger
- # log.setLevel(logging.DEBUG)
-
- if len(args) < 1:
- print("usage: fonttools varLib.plot source.designspace", file=sys.stderr)
- print(" or")
- print("usage: fonttools varLib.plot location1 location2 ...", file=sys.stderr)
- sys.exit(1)
-
- fig = pyplot.figure()
- fig.set_tight_layout(True)
-
- if len(args) == 1 and args[0].endswith('.designspace'):
- doc = DesignSpaceDocument()
- doc.read(args[0])
- plotDocument(doc, fig)
- else:
- axes = [chr(c) for c in range(ord('A'), ord('Z')+1)]
- locs = [dict(zip(axes, (float(v) for v in s.split(',')))) for s in args]
- plotLocations(locs, fig)
-
- pyplot.show()
-
-if __name__ == '__main__':
- import sys
- sys.exit(main())
+ from fontTools import configLogger
+
+ if args is None:
+ args = sys.argv[1:]
+
+ # configure the library logger (for >= WARNING)
+ configLogger()
+ # comment this out to enable debug messages from logger
+ # log.setLevel(logging.DEBUG)
+
+ if len(args) < 1:
+ print("usage: fonttools varLib.plot source.designspace", file=sys.stderr)
+ print(" or")
+ print("usage: fonttools varLib.plot location1 location2 ...", file=sys.stderr)
+ print(" or")
+ print(
+ "usage: fonttools varLib.plot location1=value1 location2=value2 ...",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
+ fig = pyplot.figure()
+ fig.set_tight_layout(True)
+
+ if len(args) == 1 and args[0].endswith(".designspace"):
+ doc = DesignSpaceDocument()
+ doc.read(args[0])
+ plotDocument(doc, fig)
+ else:
+ axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)]
+ if "=" not in args[0]:
+ locs = [dict(zip(axes, (float(v) for v in s.split(",")))) for s in args]
+ plotLocations(locs, fig)
+ else:
+ locations = []
+ masterValues = []
+ for arg in args:
+ loc, v = arg.split("=")
+ locations.append(dict(zip(axes, (float(v) for v in loc.split(",")))))
+ masterValues.append(float(v))
+ model = VariationModel(locations, axes[: len(locations[0])])
+ plotModelFromMasters(model, masterValues, fig)
+
+ pyplot.show()
+
+
+if __name__ == "__main__":
+ import sys
+
+ sys.exit(main())
diff --git a/Lib/fontTools/varLib/varStore.py b/Lib/fontTools/varLib/varStore.py
index 2ffc6b13..78057690 100644
--- a/Lib/fontTools/varLib/varStore.py
+++ b/Lib/fontTools/varLib/varStore.py
@@ -1,10 +1,16 @@
from fontTools.misc.roundTools import noRound, otRound
+from fontTools.misc.intTools import bit_count
from fontTools.ttLib.tables import otTables as ot
from fontTools.varLib.models import supportScalar
-from fontTools.varLib.builder import (buildVarRegionList, buildVarStore,
- buildVarRegion, buildVarData)
+from fontTools.varLib.builder import (
+ buildVarRegionList,
+ buildVarStore,
+ buildVarRegion,
+ buildVarData,
+)
from functools import partial
from collections import defaultdict
+from heapq import heappush, heappop
NO_VARIATION_INDEX = ot.NO_VARIATION_INDEX
@@ -12,183 +18,188 @@ ot.VarStore.NO_VARIATION_INDEX = NO_VARIATION_INDEX
def _getLocationKey(loc):
- return tuple(sorted(loc.items(), key=lambda kv: kv[0]))
+ return tuple(sorted(loc.items(), key=lambda kv: kv[0]))
class OnlineVarStoreBuilder(object):
+ def __init__(self, axisTags):
+ self._axisTags = axisTags
+ self._regionMap = {}
+ self._regionList = buildVarRegionList([], axisTags)
+ self._store = buildVarStore(self._regionList, [])
+ self._data = None
+ self._model = None
+ self._supports = None
+ self._varDataIndices = {}
+ self._varDataCaches = {}
+ self._cache = {}
+
+ def setModel(self, model):
+ self.setSupports(model.supports)
+ self._model = model
+
+ def setSupports(self, supports):
+ self._model = None
+ self._supports = list(supports)
+ if not self._supports[0]:
+ del self._supports[0] # Drop base master support
+ self._cache = {}
+ self._data = None
+
+ def finish(self, optimize=True):
+ self._regionList.RegionCount = len(self._regionList.Region)
+ self._store.VarDataCount = len(self._store.VarData)
+ for data in self._store.VarData:
+ data.ItemCount = len(data.Item)
+ data.calculateNumShorts(optimize=optimize)
+ return self._store
+
+ def _add_VarData(self):
+ regionMap = self._regionMap
+ regionList = self._regionList
+
+ regions = self._supports
+ regionIndices = []
+ for region in regions:
+ key = _getLocationKey(region)
+ idx = regionMap.get(key)
+ if idx is None:
+ varRegion = buildVarRegion(region, self._axisTags)
+ idx = regionMap[key] = len(regionList.Region)
+ regionList.Region.append(varRegion)
+ regionIndices.append(idx)
+
+ # Check if we have one already...
+ key = tuple(regionIndices)
+ varDataIdx = self._varDataIndices.get(key)
+ if varDataIdx is not None:
+ self._outer = varDataIdx
+ self._data = self._store.VarData[varDataIdx]
+ self._cache = self._varDataCaches[key]
+ if len(self._data.Item) == 0xFFFF:
+ # This is full. Need new one.
+ varDataIdx = None
+
+ if varDataIdx is None:
+ self._data = buildVarData(regionIndices, [], optimize=False)
+ self._outer = len(self._store.VarData)
+ self._store.VarData.append(self._data)
+ self._varDataIndices[key] = self._outer
+ if key not in self._varDataCaches:
+ self._varDataCaches[key] = {}
+ self._cache = self._varDataCaches[key]
+
+ def storeMasters(self, master_values, *, round=round):
+ deltas = self._model.getDeltas(master_values, round=round)
+ base = deltas.pop(0)
+ return base, self.storeDeltas(deltas, round=noRound)
+
+ def storeDeltas(self, deltas, *, round=round):
+ deltas = [round(d) for d in deltas]
+ if len(deltas) == len(self._supports) + 1:
+ deltas = tuple(deltas[1:])
+ else:
+ assert len(deltas) == len(self._supports)
+ deltas = tuple(deltas)
+
+ varIdx = self._cache.get(deltas)
+ if varIdx is not None:
+ return varIdx
+
+ if not self._data:
+ self._add_VarData()
+ inner = len(self._data.Item)
+ if inner == 0xFFFF:
+ # Full array. Start new one.
+ self._add_VarData()
+ return self.storeDeltas(deltas)
+ self._data.addItem(deltas, round=noRound)
+
+ varIdx = (self._outer << 16) + inner
+ self._cache[deltas] = varIdx
+ return varIdx
- def __init__(self, axisTags):
- self._axisTags = axisTags
- self._regionMap = {}
- self._regionList = buildVarRegionList([], axisTags)
- self._store = buildVarStore(self._regionList, [])
- self._data = None
- self._model = None
- self._supports = None
- self._varDataIndices = {}
- self._varDataCaches = {}
- self._cache = {}
-
- def setModel(self, model):
- self.setSupports(model.supports)
- self._model = model
-
- def setSupports(self, supports):
- self._model = None
- self._supports = list(supports)
- if not self._supports[0]:
- del self._supports[0] # Drop base master support
- self._cache = {}
- self._data = None
-
- def finish(self, optimize=True):
- self._regionList.RegionCount = len(self._regionList.Region)
- self._store.VarDataCount = len(self._store.VarData)
- for data in self._store.VarData:
- data.ItemCount = len(data.Item)
- data.calculateNumShorts(optimize=optimize)
- return self._store
-
- def _add_VarData(self):
- regionMap = self._regionMap
- regionList = self._regionList
-
- regions = self._supports
- regionIndices = []
- for region in regions:
- key = _getLocationKey(region)
- idx = regionMap.get(key)
- if idx is None:
- varRegion = buildVarRegion(region, self._axisTags)
- idx = regionMap[key] = len(regionList.Region)
- regionList.Region.append(varRegion)
- regionIndices.append(idx)
-
- # Check if we have one already...
- key = tuple(regionIndices)
- varDataIdx = self._varDataIndices.get(key)
- if varDataIdx is not None:
- self._outer = varDataIdx
- self._data = self._store.VarData[varDataIdx]
- self._cache = self._varDataCaches[key]
- if len(self._data.Item) == 0xFFFF:
- # This is full. Need new one.
- varDataIdx = None
-
- if varDataIdx is None:
- self._data = buildVarData(regionIndices, [], optimize=False)
- self._outer = len(self._store.VarData)
- self._store.VarData.append(self._data)
- self._varDataIndices[key] = self._outer
- if key not in self._varDataCaches:
- self._varDataCaches[key] = {}
- self._cache = self._varDataCaches[key]
-
-
- def storeMasters(self, master_values):
- deltas = self._model.getDeltas(master_values, round=round)
- base = deltas.pop(0)
- return base, self.storeDeltas(deltas, round=noRound)
-
- def storeDeltas(self, deltas, *, round=round):
- deltas = [round(d) for d in deltas]
- if len(deltas) == len(self._supports) + 1:
- deltas = tuple(deltas[1:])
- else:
- assert len(deltas) == len(self._supports)
- deltas = tuple(deltas)
-
- varIdx = self._cache.get(deltas)
- if varIdx is not None:
- return varIdx
-
- if not self._data:
- self._add_VarData()
- inner = len(self._data.Item)
- if inner == 0xFFFF:
- # Full array. Start new one.
- self._add_VarData()
- return self.storeDeltas(deltas)
- self._data.addItem(deltas, round=noRound)
-
- varIdx = (self._outer << 16) + inner
- self._cache[deltas] = varIdx
- return varIdx
def VarData_addItem(self, deltas, *, round=round):
- deltas = [round(d) for d in deltas]
-
- countUs = self.VarRegionCount
- countThem = len(deltas)
- if countUs + 1 == countThem:
- deltas = tuple(deltas[1:])
- else:
- assert countUs == countThem, (countUs, countThem)
- deltas = tuple(deltas)
- self.Item.append(list(deltas))
- self.ItemCount = len(self.Item)
+ deltas = [round(d) for d in deltas]
+
+ countUs = self.VarRegionCount
+ countThem = len(deltas)
+ if countUs + 1 == countThem:
+ deltas = list(deltas[1:])
+ else:
+ assert countUs == countThem, (countUs, countThem)
+ deltas = list(deltas)
+ self.Item.append(deltas)
+ self.ItemCount = len(self.Item)
+
ot.VarData.addItem = VarData_addItem
+
def VarRegion_get_support(self, fvar_axes):
- return {
- fvar_axes[i].axisTag: (reg.StartCoord,reg.PeakCoord,reg.EndCoord)
- for i, reg in enumerate(self.VarRegionAxis)
- if reg.PeakCoord != 0
- }
+ return {
+ fvar_axes[i].axisTag: (reg.StartCoord, reg.PeakCoord, reg.EndCoord)
+ for i, reg in enumerate(self.VarRegionAxis)
+ if reg.PeakCoord != 0
+ }
+
ot.VarRegion.get_support = VarRegion_get_support
+
def VarStore___bool__(self):
return bool(self.VarData)
+
ot.VarStore.__bool__ = VarStore___bool__
-class VarStoreInstancer(object):
- def __init__(self, varstore, fvar_axes, location={}):
- self.fvar_axes = fvar_axes
- assert varstore is None or varstore.Format == 1
- self._varData = varstore.VarData if varstore else []
- self._regions = varstore.VarRegionList.Region if varstore else []
- self.setLocation(location)
-
- def setLocation(self, location):
- self.location = dict(location)
- self._clearCaches()
-
- def _clearCaches(self):
- self._scalars = {}
-
- def _getScalar(self, regionIdx):
- scalar = self._scalars.get(regionIdx)
- if scalar is None:
- support = self._regions[regionIdx].get_support(self.fvar_axes)
- scalar = supportScalar(self.location, support)
- self._scalars[regionIdx] = scalar
- return scalar
-
- @staticmethod
- def interpolateFromDeltasAndScalars(deltas, scalars):
- delta = 0.
- for d,s in zip(deltas, scalars):
- if not s: continue
- delta += d * s
- return delta
-
- def __getitem__(self, varidx):
- major, minor = varidx >> 16, varidx & 0xFFFF
- if varidx == NO_VARIATION_INDEX: return 0.
- varData = self._varData
- scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex]
- deltas = varData[major].Item[minor]
- return self.interpolateFromDeltasAndScalars(deltas, scalars)
-
- def interpolateFromDeltas(self, varDataIndex, deltas):
- varData = self._varData
- scalars = [self._getScalar(ri) for ri in
- varData[varDataIndex].VarRegionIndex]
- return self.interpolateFromDeltasAndScalars(deltas, scalars)
+class VarStoreInstancer(object):
+ def __init__(self, varstore, fvar_axes, location={}):
+ self.fvar_axes = fvar_axes
+ assert varstore is None or varstore.Format == 1
+ self._varData = varstore.VarData if varstore else []
+ self._regions = varstore.VarRegionList.Region if varstore else []
+ self.setLocation(location)
+
+ def setLocation(self, location):
+ self.location = dict(location)
+ self._clearCaches()
+
+ def _clearCaches(self):
+ self._scalars = {}
+
+ def _getScalar(self, regionIdx):
+ scalar = self._scalars.get(regionIdx)
+ if scalar is None:
+ support = self._regions[regionIdx].get_support(self.fvar_axes)
+ scalar = supportScalar(self.location, support)
+ self._scalars[regionIdx] = scalar
+ return scalar
+
+ @staticmethod
+ def interpolateFromDeltasAndScalars(deltas, scalars):
+ delta = 0.0
+ for d, s in zip(deltas, scalars):
+ if not s:
+ continue
+ delta += d * s
+ return delta
+
+ def __getitem__(self, varidx):
+ major, minor = varidx >> 16, varidx & 0xFFFF
+ if varidx == NO_VARIATION_INDEX:
+ return 0.0
+ varData = self._varData
+ scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex]
+ deltas = varData[major].Item[minor]
+ return self.interpolateFromDeltasAndScalars(deltas, scalars)
+
+ def interpolateFromDeltas(self, varDataIndex, deltas):
+ varData = self._varData
+ scalars = [self._getScalar(ri) for ri in varData[varDataIndex].VarRegionIndex]
+ return self.interpolateFromDeltasAndScalars(deltas, scalars)
#
@@ -197,426 +208,520 @@ class VarStoreInstancer(object):
# retainFirstMap - If true, major 0 mappings are retained. Deltas for unused indices are zeroed
# advIdxes - Set of major 0 indices for advance deltas to be listed first. Other major 0 indices follow.
-def VarStore_subset_varidxes(self, varIdxes, optimize=True, retainFirstMap=False, advIdxes=set()):
-
- # Sort out used varIdxes by major/minor.
- used = {}
- for varIdx in varIdxes:
- if varIdx == NO_VARIATION_INDEX:
- continue
- major = varIdx >> 16
- minor = varIdx & 0xFFFF
- d = used.get(major)
- if d is None:
- d = used[major] = set()
- d.add(minor)
- del varIdxes
-
- #
- # Subset VarData
- #
-
- varData = self.VarData
- newVarData = []
- varDataMap = {NO_VARIATION_INDEX: NO_VARIATION_INDEX}
- for major,data in enumerate(varData):
- usedMinors = used.get(major)
- if usedMinors is None:
- continue
- newMajor = len(newVarData)
- newVarData.append(data)
-
- items = data.Item
- newItems = []
- if major == 0 and retainFirstMap:
- for minor in range(len(items)):
- newItems.append(items[minor] if minor in usedMinors else [0] * len(items[minor]))
- varDataMap[minor] = minor
- else:
- if major == 0:
- minors = sorted(advIdxes) + sorted(usedMinors - advIdxes)
- else:
- minors = sorted(usedMinors)
- for minor in minors:
- newMinor = len(newItems)
- newItems.append(items[minor])
- varDataMap[(major<<16)+minor] = (newMajor<<16)+newMinor
-
- data.Item = newItems
- data.ItemCount = len(data.Item)
-
- data.calculateNumShorts(optimize=optimize)
-
- self.VarData = newVarData
- self.VarDataCount = len(self.VarData)
-
- self.prune_regions()
-
- return varDataMap
+
+def VarStore_subset_varidxes(
+ self, varIdxes, optimize=True, retainFirstMap=False, advIdxes=set()
+):
+ # Sort out used varIdxes by major/minor.
+ used = {}
+ for varIdx in varIdxes:
+ if varIdx == NO_VARIATION_INDEX:
+ continue
+ major = varIdx >> 16
+ minor = varIdx & 0xFFFF
+ d = used.get(major)
+ if d is None:
+ d = used[major] = set()
+ d.add(minor)
+ del varIdxes
+
+ #
+ # Subset VarData
+ #
+
+ varData = self.VarData
+ newVarData = []
+ varDataMap = {NO_VARIATION_INDEX: NO_VARIATION_INDEX}
+ for major, data in enumerate(varData):
+ usedMinors = used.get(major)
+ if usedMinors is None:
+ continue
+ newMajor = len(newVarData)
+ newVarData.append(data)
+
+ items = data.Item
+ newItems = []
+ if major == 0 and retainFirstMap:
+ for minor in range(len(items)):
+ newItems.append(
+ items[minor] if minor in usedMinors else [0] * len(items[minor])
+ )
+ varDataMap[minor] = minor
+ else:
+ if major == 0:
+ minors = sorted(advIdxes) + sorted(usedMinors - advIdxes)
+ else:
+ minors = sorted(usedMinors)
+ for minor in minors:
+ newMinor = len(newItems)
+ newItems.append(items[minor])
+ varDataMap[(major << 16) + minor] = (newMajor << 16) + newMinor
+
+ data.Item = newItems
+ data.ItemCount = len(data.Item)
+
+ data.calculateNumShorts(optimize=optimize)
+
+ self.VarData = newVarData
+ self.VarDataCount = len(self.VarData)
+
+ self.prune_regions()
+
+ return varDataMap
+
ot.VarStore.subset_varidxes = VarStore_subset_varidxes
+
def VarStore_prune_regions(self):
- """Remove unused VarRegions."""
- #
- # Subset VarRegionList
- #
-
- # Collect.
- usedRegions = set()
- for data in self.VarData:
- usedRegions.update(data.VarRegionIndex)
- # Subset.
- regionList = self.VarRegionList
- regions = regionList.Region
- newRegions = []
- regionMap = {}
- for i in sorted(usedRegions):
- regionMap[i] = len(newRegions)
- newRegions.append(regions[i])
- regionList.Region = newRegions
- regionList.RegionCount = len(regionList.Region)
- # Map.
- for data in self.VarData:
- data.VarRegionIndex = [regionMap[i] for i in data.VarRegionIndex]
+ """Remove unused VarRegions."""
+ #
+ # Subset VarRegionList
+ #
+
+ # Collect.
+ usedRegions = set()
+ for data in self.VarData:
+ usedRegions.update(data.VarRegionIndex)
+ # Subset.
+ regionList = self.VarRegionList
+ regions = regionList.Region
+ newRegions = []
+ regionMap = {}
+ for i in sorted(usedRegions):
+ regionMap[i] = len(newRegions)
+ newRegions.append(regions[i])
+ regionList.Region = newRegions
+ regionList.RegionCount = len(regionList.Region)
+ # Map.
+ for data in self.VarData:
+ data.VarRegionIndex = [regionMap[i] for i in data.VarRegionIndex]
+
ot.VarStore.prune_regions = VarStore_prune_regions
def _visit(self, func):
- """Recurse down from self, if type of an object is ot.Device,
- call func() on it. Works on otData-style classes."""
+ """Recurse down from self, if type of an object is ot.Device,
+ call func() on it. Works on otData-style classes."""
+
+ if type(self) == ot.Device:
+ func(self)
- if type(self) == ot.Device:
- func(self)
+ elif isinstance(self, list):
+ for that in self:
+ _visit(that, func)
- elif isinstance(self, list):
- for that in self:
- _visit(that, func)
+ elif hasattr(self, "getConverters") and not hasattr(self, "postRead"):
+ for conv in self.getConverters():
+ that = getattr(self, conv.name, None)
+ if that is not None:
+ _visit(that, func)
- elif hasattr(self, 'getConverters') and not hasattr(self, 'postRead'):
- for conv in self.getConverters():
- that = getattr(self, conv.name, None)
- if that is not None:
- _visit(that, func)
+ elif isinstance(self, ot.ValueRecord):
+ for that in self.__dict__.values():
+ _visit(that, func)
- elif isinstance(self, ot.ValueRecord):
- for that in self.__dict__.values():
- _visit(that, func)
def _Device_recordVarIdx(self, s):
- """Add VarIdx in this Device table (if any) to the set s."""
- if self.DeltaFormat == 0x8000:
- s.add((self.StartSize<<16)+self.EndSize)
+ """Add VarIdx in this Device table (if any) to the set s."""
+ if self.DeltaFormat == 0x8000:
+ s.add((self.StartSize << 16) + self.EndSize)
+
def Object_collect_device_varidxes(self, varidxes):
- adder = partial(_Device_recordVarIdx, s=varidxes)
- _visit(self, adder)
+ adder = partial(_Device_recordVarIdx, s=varidxes)
+ _visit(self, adder)
+
ot.GDEF.collect_device_varidxes = Object_collect_device_varidxes
ot.GPOS.collect_device_varidxes = Object_collect_device_varidxes
+
def _Device_mapVarIdx(self, mapping, done):
- """Map VarIdx in this Device table (if any) through mapping."""
- if id(self) in done:
- return
- done.add(id(self))
- if self.DeltaFormat == 0x8000:
- varIdx = mapping[(self.StartSize<<16)+self.EndSize]
- self.StartSize = varIdx >> 16
- self.EndSize = varIdx & 0xFFFF
+ """Map VarIdx in this Device table (if any) through mapping."""
+ if id(self) in done:
+ return
+ done.add(id(self))
+ if self.DeltaFormat == 0x8000:
+ varIdx = mapping[(self.StartSize << 16) + self.EndSize]
+ self.StartSize = varIdx >> 16
+ self.EndSize = varIdx & 0xFFFF
+
def Object_remap_device_varidxes(self, varidxes_map):
- mapper = partial(_Device_mapVarIdx, mapping=varidxes_map, done=set())
- _visit(self, mapper)
+ mapper = partial(_Device_mapVarIdx, mapping=varidxes_map, done=set())
+ _visit(self, mapper)
+
ot.GDEF.remap_device_varidxes = Object_remap_device_varidxes
ot.GPOS.remap_device_varidxes = Object_remap_device_varidxes
class _Encoding(object):
-
- def __init__(self, chars):
- self.chars = chars
- self.width = self._popcount(chars)
- self.overhead = self._characteristic_overhead(chars)
- self.items = set()
-
- def append(self, row):
- self.items.add(row)
-
- def extend(self, lst):
- self.items.update(lst)
-
- def get_room(self):
- """Maximum number of bytes that can be added to characteristic
- while still being beneficial to merge it into another one."""
- count = len(self.items)
- return max(0, (self.overhead - 1) // count - self.width)
- room = property(get_room)
-
- @property
- def gain(self):
- """Maximum possible byte gain from merging this into another
- characteristic."""
- count = len(self.items)
- return max(0, self.overhead - count * (self.width + 1))
-
- def sort_key(self):
- return self.width, self.chars
-
- def __len__(self):
- return len(self.items)
-
- def can_encode(self, chars):
- return not (chars & ~self.chars)
-
- def __sub__(self, other):
- return self._popcount(self.chars & ~other.chars)
-
- @staticmethod
- def _popcount(n):
- # Apparently this is the fastest native way to do it...
- # https://stackoverflow.com/a/9831671
- return bin(n).count('1')
-
- @staticmethod
- def _characteristic_overhead(chars):
- """Returns overhead in bytes of encoding this characteristic
- as a VarData."""
- c = 6
- while chars:
- if chars & 0b1111:
- c += 2
- chars >>= 4
- return c
-
- def _find_yourself_best_new_encoding(self, done_by_width):
- self.best_new_encoding = None
- for new_width in range(self.width+1, self.width+self.room+1):
- for new_encoding in done_by_width[new_width]:
- if new_encoding.can_encode(self.chars):
- break
- else:
- new_encoding = None
- self.best_new_encoding = new_encoding
+ def __init__(self, chars):
+ self.chars = chars
+ self.width = bit_count(chars)
+ self.columns = self._columns(chars)
+ self.overhead = self._characteristic_overhead(self.columns)
+ self.items = set()
+
+ def append(self, row):
+ self.items.add(row)
+
+ def extend(self, lst):
+ self.items.update(lst)
+
+ def get_room(self):
+ """Maximum number of bytes that can be added to characteristic
+ while still being beneficial to merge it into another one."""
+ count = len(self.items)
+ return max(0, (self.overhead - 1) // count - self.width)
+
+ room = property(get_room)
+
+ def get_gain(self):
+ """Maximum possible byte gain from merging this into another
+ characteristic."""
+ count = len(self.items)
+ return max(0, self.overhead - count)
+
+ gain = property(get_gain)
+
+ def gain_sort_key(self):
+ return self.gain, self.chars
+
+ def width_sort_key(self):
+ return self.width, self.chars
+
+ @staticmethod
+ def _characteristic_overhead(columns):
+ """Returns overhead in bytes of encoding this characteristic
+ as a VarData."""
+ c = 4 + 6 # 4 bytes for LOffset, 6 bytes for VarData header
+ c += bit_count(columns) * 2
+ return c
+
+ @staticmethod
+ def _columns(chars):
+ cols = 0
+ i = 1
+ while chars:
+ if chars & 0b1111:
+ cols |= i
+ chars >>= 4
+ i <<= 1
+ return cols
+
+ def gain_from_merging(self, other_encoding):
+ combined_chars = other_encoding.chars | self.chars
+ combined_width = bit_count(combined_chars)
+ combined_columns = self.columns | other_encoding.columns
+ combined_overhead = _Encoding._characteristic_overhead(combined_columns)
+ combined_gain = (
+ +self.overhead
+ + other_encoding.overhead
+ - combined_overhead
+ - (combined_width - self.width) * len(self.items)
+ - (combined_width - other_encoding.width) * len(other_encoding.items)
+ )
+ return combined_gain
class _EncodingDict(dict):
+ def __missing__(self, chars):
+ r = self[chars] = _Encoding(chars)
+ return r
+
+ def add_row(self, row):
+ chars = self._row_characteristics(row)
+ self[chars].append(row)
+
+ @staticmethod
+ def _row_characteristics(row):
+ """Returns encoding characteristics for a row."""
+ longWords = False
+
+ chars = 0
+ i = 1
+ for v in row:
+ if v:
+ chars += i
+ if not (-128 <= v <= 127):
+ chars += i * 0b0010
+ if not (-32768 <= v <= 32767):
+ longWords = True
+ break
+ i <<= 4
+
+ if longWords:
+ # Redo; only allow 2byte/4byte encoding
+ chars = 0
+ i = 1
+ for v in row:
+ if v:
+ chars += i * 0b0011
+ if not (-32768 <= v <= 32767):
+ chars += i * 0b1100
+ i <<= 4
+
+ return chars
+
+
+def VarStore_optimize(self, use_NO_VARIATION_INDEX=True, quantization=1):
+ """Optimize storage. Returns mapping from old VarIdxes to new ones."""
+
+ # Overview:
+ #
+ # For each VarData row, we first extend it with zeroes to have
+ # one column per region in VarRegionList. We then group the
+ # rows into _Encoding objects, by their "characteristic" bitmap.
+ # The characteristic bitmap is a binary number representing how
+ # many bytes each column of the data takes up to encode. Each
+ # column is encoded in four bits. For example, if a column has
+ # only values in the range -128..127, it would only have a single
+ # bit set in the characteristic bitmap for that column. If it has
+ # values in the range -32768..32767, it would have two bits set.
+ # The number of ones in the characteristic bitmap is the "width"
+ # of the encoding.
+ #
+ # Each encoding as such has a number of "active" (ie. non-zero)
+ # columns. The overhead of encoding the characteristic bitmap
+ # is 10 bytes, plus 2 bytes per active column.
+ #
+ # When an encoding is merged into another one, if the characteristic
+ # of the old encoding is a subset of the new one, then the overhead
+ # of the old encoding is completely eliminated. However, each row
+ # now would require more bytes to encode, to the tune of one byte
+ # per characteristic bit that is active in the new encoding but not
+ # in the old one. The number of bits that can be added to an encoding
+ # while still beneficial to merge it into another encoding is called
+ # the "room" for that encoding.
+ #
+ # The "gain" of an encodings is the maximum number of bytes we can
+ # save by merging it into another encoding. The "gain" of merging
+ # two encodings is how many bytes we save by doing so.
+ #
+ # High-level algorithm:
+ #
+ # - Each encoding has a minimal way to encode it. However, because
+ # of the overhead of encoding the characteristic bitmap, it may
+ # be beneficial to merge two encodings together, if there is
+ # gain in doing so. As such, we need to search for the best
+ # such successive merges.
+ #
+ # Algorithm:
+ #
+ # - Put all encodings into a "todo" list.
+ #
+ # - Sort todo list by decreasing gain (for stability).
+ #
+ # - Make a priority-queue of the gain from combining each two
+ # encodings in the todo list. The priority queue is sorted by
+ # decreasing gain. Only positive gains are included.
+ #
+ # - While priority queue is not empty:
+ # - Pop the first item from the priority queue,
+ # - Merge the two encodings it represents,
+ # - Remove the two encodings from the todo list,
+ # - Insert positive gains from combining the new encoding with
+ # all existing todo list items into the priority queue,
+ # - If a todo list item with the same characteristic bitmap as
+ # the new encoding exists, remove it from the todo list and
+ # merge it into the new encoding.
+ # - Insert the new encoding into the todo list,
+ #
+ # - Encode all remaining items in the todo list.
+ #
+ # The output is then sorted for stability, in the following way:
+ # - The VarRegionList of the input is kept intact.
+ # - All encodings are sorted before the main algorithm, by
+ # gain_key_sort(), which is a tuple of the following items:
+ # * The gain of the encoding.
+ # * The characteristic bitmap of the encoding, with higher-numbered
+ # columns compared first.
+ # - The VarData is sorted by width_sort_key(), which is a tuple
+ # of the following items:
+ # * The "width" of the encoding.
+ # * The characteristic bitmap of the encoding, with higher-numbered
+ # columns compared first.
+ # - Within each VarData, the items are sorted as vectors of numbers.
+ #
+ # Finally, each VarData is optimized to remove the empty columns and
+ # reorder columns as needed.
+
+ # TODO
+ # Check that no two VarRegions are the same; if they are, fold them.
+
+ n = len(self.VarRegionList.Region) # Number of columns
+ zeroes = [0] * n
+
+ front_mapping = {} # Map from old VarIdxes to full row tuples
+
+ encodings = _EncodingDict()
+
+ # Collect all items into a set of full rows (with lots of zeroes.)
+ for major, data in enumerate(self.VarData):
+ regionIndices = data.VarRegionIndex
+
+ for minor, item in enumerate(data.Item):
+ row = list(zeroes)
+
+ if quantization == 1:
+ for regionIdx, v in zip(regionIndices, item):
+ row[regionIdx] += v
+ else:
+ for regionIdx, v in zip(regionIndices, item):
+ row[regionIdx] += (
+ round(v / quantization) * quantization
+ ) # TODO https://github.com/fonttools/fonttools/pull/3126#discussion_r1205439785
+
+ row = tuple(row)
+
+ if use_NO_VARIATION_INDEX and not any(row):
+ front_mapping[(major << 16) + minor] = None
+ continue
+
+ encodings.add_row(row)
+ front_mapping[(major << 16) + minor] = row
+
+ # Prepare for the main algorithm.
+ todo = sorted(encodings.values(), key=_Encoding.gain_sort_key)
+ del encodings
+
+ # Repeatedly pick two best encodings to combine, and combine them.
+
+ heap = []
+ for i, encoding in enumerate(todo):
+ for j in range(i + 1, len(todo)):
+ other_encoding = todo[j]
+ combining_gain = encoding.gain_from_merging(other_encoding)
+ if combining_gain > 0:
+ heappush(heap, (-combining_gain, i, j))
+
+ while heap:
+ _, i, j = heappop(heap)
+ if todo[i] is None or todo[j] is None:
+ continue
+
+ encoding, other_encoding = todo[i], todo[j]
+ todo[i], todo[j] = None, None
+
+ # Combine the two encodings
+ combined_chars = other_encoding.chars | encoding.chars
+ combined_encoding = _Encoding(combined_chars)
+ combined_encoding.extend(encoding.items)
+ combined_encoding.extend(other_encoding.items)
+
+ for k, enc in enumerate(todo):
+ if enc is None:
+ continue
+
+ # In the unlikely event that the same encoding exists already,
+ # combine it.
+ if enc.chars == combined_chars:
+ combined_encoding.extend(enc.items)
+ todo[k] = None
+ continue
+
+ combining_gain = combined_encoding.gain_from_merging(enc)
+ if combining_gain > 0:
+ heappush(heap, (-combining_gain, k, len(todo)))
+
+ todo.append(combined_encoding)
+
+ encodings = [encoding for encoding in todo if encoding is not None]
+
+ # Assemble final store.
+ back_mapping = {} # Mapping from full rows to new VarIdxes
+ encodings.sort(key=_Encoding.width_sort_key)
+ self.VarData = []
+ for encoding in encodings:
+ items = sorted(encoding.items)
+
+ while items:
+ major = len(self.VarData)
+ data = ot.VarData()
+ self.VarData.append(data)
+ data.VarRegionIndex = range(n)
+ data.VarRegionCount = len(data.VarRegionIndex)
+
+ # Each major can only encode up to 0xFFFF entries.
+ data.Item, items = items[:0xFFFF], items[0xFFFF:]
+
+ for minor, item in enumerate(data.Item):
+ back_mapping[item] = (major << 16) + minor
+
+ # Compile final mapping.
+ varidx_map = {NO_VARIATION_INDEX: NO_VARIATION_INDEX}
+ for k, v in front_mapping.items():
+ varidx_map[k] = back_mapping[v] if v is not None else NO_VARIATION_INDEX
+
+ # Recalculate things and go home.
+ self.VarRegionList.RegionCount = len(self.VarRegionList.Region)
+ self.VarDataCount = len(self.VarData)
+ for data in self.VarData:
+ data.ItemCount = len(data.Item)
+ data.optimize()
+
+ # Remove unused regions.
+ self.prune_regions()
+
+ return varidx_map
- def __missing__(self, chars):
- r = self[chars] = _Encoding(chars)
- return r
-
- def add_row(self, row):
- chars = self._row_characteristics(row)
- self[chars].append(row)
-
- @staticmethod
- def _row_characteristics(row):
- """Returns encoding characteristics for a row."""
- longWords = False
-
- chars = 0
- i = 1
- for v in row:
- if v:
- chars += i
- if not (-128 <= v <= 127):
- chars += i * 0b0010
- if not (-32768 <= v <= 32767):
- longWords = True
- break
- i <<= 4
-
- if longWords:
- # Redo; only allow 2byte/4byte encoding
- chars = 0
- i = 1
- for v in row:
- if v:
- chars += i * 0b0011
- if not (-32768 <= v <= 32767):
- chars += i * 0b1100
- i <<= 4
-
- return chars
-
-
-def VarStore_optimize(self, use_NO_VARIATION_INDEX=True):
- """Optimize storage. Returns mapping from old VarIdxes to new ones."""
-
- # TODO
- # Check that no two VarRegions are the same; if they are, fold them.
-
- n = len(self.VarRegionList.Region) # Number of columns
- zeroes = [0] * n
-
- front_mapping = {} # Map from old VarIdxes to full row tuples
-
- encodings = _EncodingDict()
-
- # Collect all items into a set of full rows (with lots of zeroes.)
- for major,data in enumerate(self.VarData):
- regionIndices = data.VarRegionIndex
-
- for minor,item in enumerate(data.Item):
-
- row = list(zeroes)
- for regionIdx,v in zip(regionIndices, item):
- row[regionIdx] += v
- row = tuple(row)
-
- if use_NO_VARIATION_INDEX and not any(row):
- front_mapping[(major<<16)+minor] = None
- continue
-
- encodings.add_row(row)
- front_mapping[(major<<16)+minor] = row
-
- # Separate encodings that have no gain (are decided) and those having
- # possible gain (possibly to be merged into others.)
- encodings = sorted(encodings.values(), key=_Encoding.__len__, reverse=True)
- done_by_width = defaultdict(list)
- todo = []
- for encoding in encodings:
- if not encoding.gain:
- done_by_width[encoding.width].append(encoding)
- else:
- todo.append(encoding)
-
- # For each encoding that is possibly to be merged, find the best match
- # in the decided encodings, and record that.
- todo.sort(key=_Encoding.get_room)
- for encoding in todo:
- encoding._find_yourself_best_new_encoding(done_by_width)
-
- # Walk through todo encodings, for each, see if merging it with
- # another todo encoding gains more than each of them merging with
- # their best decided encoding. If yes, merge them and add resulting
- # encoding back to todo queue. If not, move the enconding to decided
- # list. Repeat till done.
- while todo:
- encoding = todo.pop()
- best_idx = None
- best_gain = 0
- for i,other_encoding in enumerate(todo):
- combined_chars = other_encoding.chars | encoding.chars
- combined_width = _Encoding._popcount(combined_chars)
- combined_overhead = _Encoding._characteristic_overhead(combined_chars)
- combined_gain = (
- + encoding.overhead
- + other_encoding.overhead
- - combined_overhead
- - (combined_width - encoding.width) * len(encoding)
- - (combined_width - other_encoding.width) * len(other_encoding)
- )
- this_gain = 0 if encoding.best_new_encoding is None else (
- + encoding.overhead
- - (encoding.best_new_encoding.width - encoding.width) * len(encoding)
- )
- other_gain = 0 if other_encoding.best_new_encoding is None else (
- + other_encoding.overhead
- - (other_encoding.best_new_encoding.width - other_encoding.width) * len(other_encoding)
- )
- separate_gain = this_gain + other_gain
-
- if combined_gain > separate_gain:
- best_idx = i
- best_gain = combined_gain - separate_gain
-
- if best_idx is None:
- # Encoding is decided as is
- done_by_width[encoding.width].append(encoding)
- else:
- other_encoding = todo[best_idx]
- combined_chars = other_encoding.chars | encoding.chars
- combined_encoding = _Encoding(combined_chars)
- combined_encoding.extend(encoding.items)
- combined_encoding.extend(other_encoding.items)
- combined_encoding._find_yourself_best_new_encoding(done_by_width)
- del todo[best_idx]
- todo.append(combined_encoding)
-
- # Assemble final store.
- back_mapping = {} # Mapping from full rows to new VarIdxes
- encodings = sum(done_by_width.values(), [])
- encodings.sort(key=_Encoding.sort_key)
- self.VarData = []
- for major,encoding in enumerate(encodings):
- data = ot.VarData()
- self.VarData.append(data)
- data.VarRegionIndex = range(n)
- data.VarRegionCount = len(data.VarRegionIndex)
- data.Item = sorted(encoding.items)
- for minor,item in enumerate(data.Item):
- back_mapping[item] = (major<<16)+minor
-
- # Compile final mapping.
- varidx_map = {NO_VARIATION_INDEX:NO_VARIATION_INDEX}
- for k,v in front_mapping.items():
- varidx_map[k] = back_mapping[v] if v is not None else NO_VARIATION_INDEX
-
- # Remove unused regions.
- self.prune_regions()
-
- # Recalculate things and go home.
- self.VarRegionList.RegionCount = len(self.VarRegionList.Region)
- self.VarDataCount = len(self.VarData)
- for data in self.VarData:
- data.ItemCount = len(data.Item)
- data.optimize()
-
- return varidx_map
ot.VarStore.optimize = VarStore_optimize
def main(args=None):
- """Optimize a font's GDEF variation store"""
- from argparse import ArgumentParser
- from fontTools import configLogger
- from fontTools.ttLib import TTFont
- from fontTools.ttLib.tables.otBase import OTTableWriter
+ """Optimize a font's GDEF variation store"""
+ from argparse import ArgumentParser
+ from fontTools import configLogger
+ from fontTools.ttLib import TTFont
+ from fontTools.ttLib.tables.otBase import OTTableWriter
- parser = ArgumentParser(prog='varLib.varStore', description= main.__doc__)
- parser.add_argument('fontfile')
- parser.add_argument('outfile', nargs='?')
- options = parser.parse_args(args)
+ parser = ArgumentParser(prog="varLib.varStore", description=main.__doc__)
+ parser.add_argument("--quantization", type=int, default=1)
+ parser.add_argument("fontfile")
+ parser.add_argument("outfile", nargs="?")
+ options = parser.parse_args(args)
- # TODO: allow user to configure logging via command-line options
- configLogger(level="INFO")
+ # TODO: allow user to configure logging via command-line options
+ configLogger(level="INFO")
- fontfile = options.fontfile
- outfile = options.outfile
+ quantization = options.quantization
+ fontfile = options.fontfile
+ outfile = options.outfile
- font = TTFont(fontfile)
- gdef = font['GDEF']
- store = gdef.table.VarStore
+ font = TTFont(fontfile)
+ gdef = font["GDEF"]
+ store = gdef.table.VarStore
- writer = OTTableWriter()
- store.compile(writer, font)
- size = len(writer.getAllData())
- print("Before: %7d bytes" % size)
+ writer = OTTableWriter()
+ store.compile(writer, font)
+ size = len(writer.getAllData())
+ print("Before: %7d bytes" % size)
- varidx_map = store.optimize()
+ varidx_map = store.optimize(quantization=quantization)
- gdef.table.remap_device_varidxes(varidx_map)
- if 'GPOS' in font:
- font['GPOS'].table.remap_device_varidxes(varidx_map)
+ writer = OTTableWriter()
+ store.compile(writer, font)
+ size = len(writer.getAllData())
+ print("After: %7d bytes" % size)
- writer = OTTableWriter()
- store.compile(writer, font)
- size = len(writer.getAllData())
- print("After: %7d bytes" % size)
+ if outfile is not None:
+ gdef.table.remap_device_varidxes(varidx_map)
+ if "GPOS" in font:
+ font["GPOS"].table.remap_device_varidxes(varidx_map)
- if outfile is not None:
- font.save(outfile)
+ font.save(outfile)
if __name__ == "__main__":
- import sys
- if len(sys.argv) > 1:
- sys.exit(main())
- import doctest
- sys.exit(doctest.testmod().failed)
+ import sys
+
+ if len(sys.argv) > 1:
+ sys.exit(main())
+ import doctest
+
+ sys.exit(doctest.testmod().failed)
diff --git a/Lib/fontTools/voltLib/ast.py b/Lib/fontTools/voltLib/ast.py
index 3a1f4a07..82c2cca8 100644
--- a/Lib/fontTools/voltLib/ast.py
+++ b/Lib/fontTools/voltLib/ast.py
@@ -11,15 +11,15 @@ class Pos(NamedTuple):
dy_adjust_by: dict
def __str__(self):
- res = ' POS'
- for attr in ('adv', 'dx', 'dy'):
+ res = " POS"
+ for attr in ("adv", "dx", "dy"):
value = getattr(self, attr)
if value is not None:
- res += f' {attr.upper()} {value}'
- adjust_by = getattr(self, f'{attr}_adjust_by', {})
+ res += f" {attr.upper()} {value}"
+ adjust_by = getattr(self, f"{attr}_adjust_by", {})
for size, adjustment in adjust_by.items():
- res += f' ADJUST_BY {adjustment} AT {size}'
- res += ' END_POS'
+ res += f" ADJUST_BY {adjustment} AT {size}"
+ res += " END_POS"
return res
@@ -52,7 +52,7 @@ class VoltFile(Statement):
s.build(builder)
def __str__(self):
- return '\n' + '\n'.join(str(s) for s in self.statements) + ' END\n'
+ return "\n" + "\n".join(str(s) for s in self.statements) + " END\n"
class GlyphDefinition(Statement):
@@ -68,15 +68,15 @@ class GlyphDefinition(Statement):
res = f'DEF_GLYPH "{self.name}" ID {self.id}'
if self.unicode is not None:
if len(self.unicode) > 1:
- unicodes = ','.join(f'U+{u:04X}' for u in self.unicode)
+ unicodes = ",".join(f"U+{u:04X}" for u in self.unicode)
res += f' UNICODEVALUES "{unicodes}"'
else:
- res += f' UNICODE {self.unicode[0]}'
+ res += f" UNICODE {self.unicode[0]}"
if self.type is not None:
- res += f' TYPE {self.type}'
+ res += f" TYPE {self.type}"
if self.components is not None:
- res += f' COMPONENTS {self.components}'
- res += ' END_GLYPH'
+ res += f" COMPONENTS {self.components}"
+ res += " END_GLYPH"
return res
@@ -90,8 +90,8 @@ class GroupDefinition(Statement):
def glyphSet(self, groups=None):
if groups is not None and self.name in groups:
raise VoltLibError(
- 'Group "%s" contains itself.' % (self.name),
- self.location)
+ 'Group "%s" contains itself.' % (self.name), self.location
+ )
if self.glyphs_ is None:
if groups is None:
groups = set({self.name})
@@ -101,12 +101,13 @@ class GroupDefinition(Statement):
return self.glyphs_
def __str__(self):
- enum = self.enum and str(self.enum) or ''
+ enum = self.enum and str(self.enum) or ""
return f'DEF_GROUP "{self.name}"\n{enum}\nEND_GROUP'
class GlyphName(Expression):
"""A single glyph name, such as cedilla."""
+
def __init__(self, glyph, location=None):
Expression.__init__(self, location)
self.glyph = glyph
@@ -120,6 +121,7 @@ class GlyphName(Expression):
class Enum(Expression):
"""An enum"""
+
def __init__(self, enum, location=None):
Expression.__init__(self, location)
self.enum = enum
@@ -138,12 +140,13 @@ class Enum(Expression):
return tuple(glyphs)
def __str__(self):
- enum = ''.join(str(e) for e in self.enum)
- return f' ENUM{enum} END_ENUM'
+ enum = "".join(str(e) for e in self.enum)
+ return f" ENUM{enum} END_ENUM"
class GroupName(Expression):
"""A glyph group"""
+
def __init__(self, group, parser, location=None):
Expression.__init__(self, location)
self.group = group
@@ -156,8 +159,8 @@ class GroupName(Expression):
return self.glyphs_
else:
raise VoltLibError(
- 'Group "%s" is used but undefined.' % (self.group),
- self.location)
+ 'Group "%s" is used but undefined.' % (self.group), self.location
+ )
def __str__(self):
return f' GROUP "{self.group}"'
@@ -165,6 +168,7 @@ class GroupName(Expression):
class Range(Expression):
"""A glyph range"""
+
def __init__(self, start, end, parser, location=None):
Expression.__init__(self, location)
self.start = start
@@ -186,13 +190,13 @@ class ScriptDefinition(Statement):
self.langs = langs
def __str__(self):
- res = 'DEF_SCRIPT'
+ res = "DEF_SCRIPT"
if self.name is not None:
res += f' NAME "{self.name}"'
res += f' TAG "{self.tag}"\n\n'
for lang in self.langs:
- res += f'{lang}'
- res += 'END_SCRIPT'
+ res += f"{lang}"
+ res += "END_SCRIPT"
return res
@@ -204,13 +208,13 @@ class LangSysDefinition(Statement):
self.features = features
def __str__(self):
- res = 'DEF_LANGSYS'
+ res = "DEF_LANGSYS"
if self.name is not None:
res += f' NAME "{self.name}"'
res += f' TAG "{self.tag}"\n\n'
for feature in self.features:
- res += f'{feature}'
- res += 'END_LANGSYS\n'
+ res += f"{feature}"
+ res += "END_LANGSYS\n"
return res
@@ -223,15 +227,26 @@ class FeatureDefinition(Statement):
def __str__(self):
res = f'DEF_FEATURE NAME "{self.name}" TAG "{self.tag}"\n'
- res += ' ' + ' '.join(f'LOOKUP "{l}"' for l in self.lookups) + '\n'
- res += 'END_FEATURE\n'
+ res += " " + " ".join(f'LOOKUP "{l}"' for l in self.lookups) + "\n"
+ res += "END_FEATURE\n"
return res
class LookupDefinition(Statement):
- def __init__(self, name, process_base, process_marks, mark_glyph_set,
- direction, reversal, comments, context, sub, pos,
- location=None):
+ def __init__(
+ self,
+ name,
+ process_base,
+ process_marks,
+ mark_glyph_set,
+ direction,
+ reversal,
+ comments,
+ context,
+ sub,
+ pos,
+ location=None,
+ ):
Statement.__init__(self, location)
self.name = name
self.process_base = process_base
@@ -248,30 +263,30 @@ class LookupDefinition(Statement):
res = f'DEF_LOOKUP "{self.name}"'
res += f' {self.process_base and "PROCESS_BASE" or "SKIP_BASE"}'
if self.process_marks:
- res += ' PROCESS_MARKS '
+ res += " PROCESS_MARKS "
if self.mark_glyph_set:
res += f'MARK_GLYPH_SET "{self.mark_glyph_set}"'
elif isinstance(self.process_marks, str):
res += f'"{self.process_marks}"'
else:
- res += 'ALL'
+ res += "ALL"
else:
- res += ' SKIP_MARKS'
+ res += " SKIP_MARKS"
if self.direction is not None:
- res += f' DIRECTION {self.direction}'
+ res += f" DIRECTION {self.direction}"
if self.reversal:
- res += ' REVERSAL'
+ res += " REVERSAL"
if self.comments is not None:
- comments = self.comments.replace('\n', r'\n')
+ comments = self.comments.replace("\n", r"\n")
res += f'\nCOMMENTS "{comments}"'
if self.context:
- res += '\n' + '\n'.join(str(c) for c in self.context)
+ res += "\n" + "\n".join(str(c) for c in self.context)
else:
- res += '\nIN_CONTEXT\nEND_CONTEXT'
+ res += "\nIN_CONTEXT\nEND_CONTEXT"
if self.sub:
- res += f'\n{self.sub}'
+ res += f"\n{self.sub}"
if self.pos:
- res += f'\n{self.pos}'
+ res += f"\n{self.pos}"
return res
@@ -281,12 +296,12 @@ class SubstitutionDefinition(Statement):
self.mapping = mapping
def __str__(self):
- res = 'AS_SUBSTITUTION\n'
+ res = "AS_SUBSTITUTION\n"
for src, dst in self.mapping.items():
- src = ''.join(str(s) for s in src)
- dst = ''.join(str(d) for d in dst)
- res += f'SUB{src}\nWITH{dst}\nEND_SUB\n'
- res += 'END_SUBSTITUTION'
+ src = "".join(str(s) for s in src)
+ dst = "".join(str(d) for d in dst)
+ res += f"SUB{src}\nWITH{dst}\nEND_SUB\n"
+ res += "END_SUBSTITUTION"
return res
@@ -313,12 +328,12 @@ class PositionAttachDefinition(Statement):
self.coverage_to = coverage_to
def __str__(self):
- coverage = ''.join(str(c) for c in self.coverage)
- res = f'AS_POSITION\nATTACH{coverage}\nTO'
+ coverage = "".join(str(c) for c in self.coverage)
+ res = f"AS_POSITION\nATTACH{coverage}\nTO"
for coverage, anchor in self.coverage_to:
- coverage = ''.join(str(c) for c in coverage)
+ coverage = "".join(str(c) for c in coverage)
res += f'{coverage} AT ANCHOR "{anchor}"'
- res += '\nEND_ATTACH\nEND_POSITION'
+ res += "\nEND_ATTACH\nEND_POSITION"
return res
@@ -329,14 +344,14 @@ class PositionAttachCursiveDefinition(Statement):
self.coverages_enter = coverages_enter
def __str__(self):
- res = 'AS_POSITION\nATTACH_CURSIVE'
+ res = "AS_POSITION\nATTACH_CURSIVE"
for coverage in self.coverages_exit:
- coverage = ''.join(str(c) for c in coverage)
- res += f'\nEXIT {coverage}'
+ coverage = "".join(str(c) for c in coverage)
+ res += f"\nEXIT {coverage}"
for coverage in self.coverages_enter:
- coverage = ''.join(str(c) for c in coverage)
- res += f'\nENTER {coverage}'
- res += '\nEND_ATTACH\nEND_POSITION'
+ coverage = "".join(str(c) for c in coverage)
+ res += f"\nENTER {coverage}"
+ res += "\nEND_ATTACH\nEND_POSITION"
return res
@@ -348,18 +363,18 @@ class PositionAdjustPairDefinition(Statement):
self.adjust_pair = adjust_pair
def __str__(self):
- res = 'AS_POSITION\nADJUST_PAIR\n'
+ res = "AS_POSITION\nADJUST_PAIR\n"
for coverage in self.coverages_1:
- coverage = ' '.join(str(c) for c in coverage)
- res += f' FIRST {coverage}'
- res += '\n'
+ coverage = " ".join(str(c) for c in coverage)
+ res += f" FIRST {coverage}"
+ res += "\n"
for coverage in self.coverages_2:
- coverage = ' '.join(str(c) for c in coverage)
- res += f' SECOND {coverage}'
- res += '\n'
+ coverage = " ".join(str(c) for c in coverage)
+ res += f" SECOND {coverage}"
+ res += "\n"
for (id_1, id_2), (pos_1, pos_2) in self.adjust_pair.items():
- res += f' {id_1} {id_2} BY{pos_1}{pos_2}\n'
- res += '\nEND_ADJUST\nEND_POSITION'
+ res += f" {id_1} {id_2} BY{pos_1}{pos_2}\n"
+ res += "\nEND_ADJUST\nEND_POSITION"
return res
@@ -369,15 +384,14 @@ class PositionAdjustSingleDefinition(Statement):
self.adjust_single = adjust_single
def __str__(self):
- res = 'AS_POSITION\nADJUST_SINGLE'
+ res = "AS_POSITION\nADJUST_SINGLE"
for coverage, pos in self.adjust_single:
- coverage = ''.join(str(c) for c in coverage)
- res += f'{coverage} BY{pos}'
- res += '\nEND_ADJUST\nEND_POSITION'
+ coverage = "".join(str(c) for c in coverage)
+ res += f"{coverage} BY{pos}"
+ res += "\nEND_ADJUST\nEND_POSITION"
return res
-
class ContextDefinition(Statement):
def __init__(self, ex_or_in, left=None, right=None, location=None):
Statement.__init__(self, location)
@@ -386,20 +400,19 @@ class ContextDefinition(Statement):
self.right = right if right is not None else []
def __str__(self):
- res = self.ex_or_in + '\n'
+ res = self.ex_or_in + "\n"
for coverage in self.left:
- coverage = ''.join(str(c) for c in coverage)
- res += f' LEFT{coverage}\n'
+ coverage = "".join(str(c) for c in coverage)
+ res += f" LEFT{coverage}\n"
for coverage in self.right:
- coverage = ''.join(str(c) for c in coverage)
- res += f' RIGHT{coverage}\n'
- res += 'END_CONTEXT'
+ coverage = "".join(str(c) for c in coverage)
+ res += f" RIGHT{coverage}\n"
+ res += "END_CONTEXT"
return res
class AnchorDefinition(Statement):
- def __init__(self, name, gid, glyph_name, component, locked,
- pos, location=None):
+ def __init__(self, name, gid, glyph_name, component, locked, pos, location=None):
Statement.__init__(self, location)
self.name = name
self.gid = gid
@@ -409,13 +422,15 @@ class AnchorDefinition(Statement):
self.pos = pos
def __str__(self):
- locked = self.locked and ' LOCKED' or ''
- return (f'DEF_ANCHOR "{self.name}"'
- f' ON {self.gid}'
- f' GLYPH {self.glyph_name}'
- f' COMPONENT {self.component}'
- f'{locked}'
- f' AT {self.pos} END_ANCHOR')
+ locked = self.locked and " LOCKED" or ""
+ return (
+ f'DEF_ANCHOR "{self.name}"'
+ f" ON {self.gid}"
+ f" GLYPH {self.glyph_name}"
+ f" COMPONENT {self.component}"
+ f"{locked}"
+ f" AT {self.pos} END_ANCHOR"
+ )
class SettingDefinition(Statement):
@@ -426,8 +441,8 @@ class SettingDefinition(Statement):
def __str__(self):
if self.value is True:
- return f'{self.name}'
+ return f"{self.name}"
if isinstance(self.value, (tuple, list)):
value = " ".join(str(v) for v in self.value)
- return f'{self.name} {value}'
- return f'{self.name} {self.value}'
+ return f"{self.name} {value}"
+ return f"{self.name} {self.value}"
diff --git a/Lib/fontTools/voltLib/error.py b/Lib/fontTools/voltLib/error.py
index a905de1e..c51d3b8f 100644
--- a/Lib/fontTools/voltLib/error.py
+++ b/Lib/fontTools/voltLib/error.py
@@ -1,5 +1,3 @@
-
-
class VoltLibError(Exception):
def __init__(self, message, location):
Exception.__init__(self, message)
diff --git a/Lib/fontTools/voltLib/lexer.py b/Lib/fontTools/voltLib/lexer.py
index bc982a7a..706b21bb 100644
--- a/Lib/fontTools/voltLib/lexer.py
+++ b/Lib/fontTools/voltLib/lexer.py
@@ -1,5 +1,6 @@
from fontTools.voltLib.error import VoltLibError
+
class Lexer(object):
NUMBER = "NUMBER"
STRING = "STRING"
@@ -13,8 +14,9 @@ class Lexer(object):
CHAR_LC_LETTER_ = "abcdefghijklmnopqrstuvwxyz"
CHAR_UNDERSCORE_ = "_"
CHAR_PERIOD_ = "."
- CHAR_NAME_START_ = CHAR_UC_LETTER_ + CHAR_LC_LETTER_ + CHAR_PERIOD_ + \
- CHAR_UNDERSCORE_
+ CHAR_NAME_START_ = (
+ CHAR_UC_LETTER_ + CHAR_LC_LETTER_ + CHAR_PERIOD_ + CHAR_UNDERSCORE_
+ )
CHAR_NAME_CONTINUATION_ = CHAR_NAME_START_ + CHAR_DIGIT_
def __init__(self, text, filename):
@@ -58,7 +60,7 @@ class Lexer(object):
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "\r":
- self.pos_ += (2 if next_char == "\n" else 1)
+ self.pos_ += 2 if next_char == "\n" else 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
@@ -67,24 +69,22 @@ class Lexer(object):
self.scan_until_('"\r\n')
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
self.pos_ += 1
- return (Lexer.STRING, text[start + 1:self.pos_ - 1], location)
+ return (Lexer.STRING, text[start + 1 : self.pos_ - 1], location)
else:
- raise VoltLibError("Expected '\"' to terminate string",
- location)
+ raise VoltLibError("Expected '\"' to terminate string", location)
if cur_char in Lexer.CHAR_NAME_START_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
- token = text[start:self.pos_]
+ token = text[start : self.pos_]
return (Lexer.NAME, token, location)
if cur_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
- return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)
+ return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
- return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)
- raise VoltLibError("Unexpected character: '%s'" % cur_char,
- location)
+ return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
+ raise VoltLibError("Unexpected character: '%s'" % cur_char, location)
def scan_over_(self, valid):
p = self.pos_
diff --git a/Lib/fontTools/voltLib/parser.py b/Lib/fontTools/voltLib/parser.py
index 0e68d539..1fa6b11d 100644
--- a/Lib/fontTools/voltLib/parser.py
+++ b/Lib/fontTools/voltLib/parser.py
@@ -55,7 +55,8 @@ class Parser(object):
else:
raise VoltLibError(
"Expected " + ", ".join(sorted(PARSE_FUNCS.keys())),
- self.cur_token_location_)
+ self.cur_token_location_,
+ )
return self.doc_
def parse_def_glyph_(self):
@@ -71,8 +72,7 @@ class Parser(object):
self.expect_keyword_("UNICODE")
gunicode = [self.expect_number_()]
if gunicode[0] < 0:
- raise VoltLibError("Invalid glyph UNICODE",
- self.cur_token_location_)
+ raise VoltLibError("Invalid glyph UNICODE", self.cur_token_location_)
elif self.next_token_ == "UNICODEVALUES":
self.expect_keyword_("UNICODEVALUES")
gunicode = self.parse_unicode_values_()
@@ -88,12 +88,11 @@ class Parser(object):
self.expect_keyword_("END_GLYPH")
if self.glyphs_.resolve(name) is not None:
raise VoltLibError(
- 'Glyph "%s" (gid %i) already defined' % (name, gid),
- location
+ 'Glyph "%s" (gid %i) already defined' % (name, gid), location
)
- def_glyph = ast.GlyphDefinition(name, gid,
- gunicode, gtype, components,
- location=location)
+ def_glyph = ast.GlyphDefinition(
+ name, gid, gunicode, gtype, components, location=location
+ )
self.glyphs_.define(name, def_glyph)
return def_glyph
@@ -108,11 +107,10 @@ class Parser(object):
if self.groups_.resolve(name) is not None:
raise VoltLibError(
'Glyph group "%s" already defined, '
- 'group names are case insensitive' % name,
- location
+ "group names are case insensitive" % name,
+ location,
)
- def_group = ast.GroupDefinition(name, enum,
- location=location)
+ def_group = ast.GroupDefinition(name, enum, location=location)
self.groups_.define(name, def_group)
return def_group
@@ -128,8 +126,8 @@ class Parser(object):
if self.scripts_.resolve(tag) is not None:
raise VoltLibError(
'Script "%s" already defined, '
- 'script tags are case insensitive' % tag,
- location
+ "script tags are case insensitive" % tag,
+ location,
)
self.langs_.enter_scope()
langs = []
@@ -140,8 +138,8 @@ class Parser(object):
if self.langs_.resolve(lang.tag) is not None:
raise VoltLibError(
'Language "%s" already defined in script "%s", '
- 'language tags are case insensitive' % (lang.tag, tag),
- location
+ "language tags are case insensitive" % (lang.tag, tag),
+ location,
)
self.langs_.define(lang.tag, lang)
langs.append(lang)
@@ -166,8 +164,7 @@ class Parser(object):
feature = self.parse_feature_()
self.expect_keyword_("END_FEATURE")
features.append(feature)
- def_langsys = ast.LangSysDefinition(name, tag, features,
- location=location)
+ def_langsys = ast.LangSysDefinition(name, tag, features, location=location)
return def_langsys
def parse_feature_(self):
@@ -183,8 +180,7 @@ class Parser(object):
self.expect_keyword_("LOOKUP")
lookup = self.expect_string_()
lookups.append(lookup)
- feature = ast.FeatureDefinition(name, tag, lookups,
- location=location)
+ feature = ast.FeatureDefinition(name, tag, lookups, location=location)
return feature
def parse_def_lookup_(self):
@@ -193,14 +189,13 @@ class Parser(object):
name = self.expect_string_()
if not name[0].isalpha():
raise VoltLibError(
- 'Lookup name "%s" must start with a letter' % name,
- location
+ 'Lookup name "%s" must start with a letter' % name, location
)
if self.lookups_.resolve(name) is not None:
raise VoltLibError(
'Lookup "%s" already defined, '
- 'lookup names are case insensitive' % name,
- location
+ "lookup names are case insensitive" % name,
+ location,
)
process_base = True
if self.next_token_ == "PROCESS_BASE":
@@ -226,7 +221,8 @@ class Parser(object):
raise VoltLibError(
"Expected ALL, NONE, MARK_GLYPH_SET or an ID. "
"Got %s" % (self.next_token_type_),
- location)
+ location,
+ )
elif self.next_token_ == "SKIP_MARKS":
self.advance_lexer_()
process_marks = False
@@ -242,7 +238,7 @@ class Parser(object):
comments = None
if self.next_token_ == "COMMENTS":
self.expect_keyword_("COMMENTS")
- comments = self.expect_string_().replace(r'\n', '\n')
+ comments = self.expect_string_().replace(r"\n", "\n")
context = []
while self.next_token_ in ("EXCEPT_CONTEXT", "IN_CONTEXT"):
context = self.parse_context_()
@@ -255,12 +251,22 @@ class Parser(object):
pos = self.parse_position_()
else:
raise VoltLibError(
- "Expected AS_SUBSTITUTION or AS_POSITION. "
- "Got %s" % (as_pos_or_sub),
- location)
+ "Expected AS_SUBSTITUTION or AS_POSITION. " "Got %s" % (as_pos_or_sub),
+ location,
+ )
def_lookup = ast.LookupDefinition(
- name, process_base, process_marks, mark_glyph_set, direction,
- reversal, comments, context, sub, pos, location=location)
+ name,
+ process_base,
+ process_marks,
+ mark_glyph_set,
+ direction,
+ reversal,
+ comments,
+ context,
+ sub,
+ pos,
+ location=location,
+ )
self.lookups_.define(name, def_lookup)
return def_lookup
@@ -283,8 +289,9 @@ class Parser(object):
else:
right.append(coverage)
self.expect_keyword_("END_CONTEXT")
- context = ast.ContextDefinition(ex_or_in, left,
- right, location=location)
+ context = ast.ContextDefinition(
+ ex_or_in, left, right, location=location
+ )
contexts.append(context)
else:
self.expect_keyword_("END_CONTEXT")
@@ -307,36 +314,32 @@ class Parser(object):
max_src = max([len(cov) for cov in src])
max_dest = max([len(cov) for cov in dest])
# many to many or mixed is invalid
- if ((max_src > 1 and max_dest > 1) or
- (reversal and (max_src > 1 or max_dest > 1))):
- raise VoltLibError(
- "Invalid substitution type",
- location)
+ if (max_src > 1 and max_dest > 1) or (
+ reversal and (max_src > 1 or max_dest > 1)
+ ):
+ raise VoltLibError("Invalid substitution type", location)
mapping = dict(zip(tuple(src), tuple(dest)))
if max_src == 1 and max_dest == 1:
if reversal:
sub = ast.SubstitutionReverseChainingSingleDefinition(
- mapping, location=location)
+ mapping, location=location
+ )
else:
- sub = ast.SubstitutionSingleDefinition(mapping,
- location=location)
+ sub = ast.SubstitutionSingleDefinition(mapping, location=location)
elif max_src == 1 and max_dest > 1:
- sub = ast.SubstitutionMultipleDefinition(mapping,
- location=location)
+ sub = ast.SubstitutionMultipleDefinition(mapping, location=location)
elif max_src > 1 and max_dest == 1:
- sub = ast.SubstitutionLigatureDefinition(mapping,
- location=location)
+ sub = ast.SubstitutionLigatureDefinition(mapping, location=location)
return sub
def parse_position_(self):
assert self.is_cur_keyword_("AS_POSITION")
location = self.cur_token_location_
pos_type = self.expect_name_()
- if pos_type not in (
- "ATTACH", "ATTACH_CURSIVE", "ADJUST_PAIR", "ADJUST_SINGLE"):
+ if pos_type not in ("ATTACH", "ATTACH_CURSIVE", "ADJUST_PAIR", "ADJUST_SINGLE"):
raise VoltLibError(
- "Expected ATTACH, ATTACH_CURSIVE, ADJUST_PAIR, ADJUST_SINGLE",
- location)
+ "Expected ATTACH, ATTACH_CURSIVE, ADJUST_PAIR, ADJUST_SINGLE", location
+ )
if pos_type == "ATTACH":
position = self.parse_attach_()
elif pos_type == "ATTACH_CURSIVE":
@@ -362,7 +365,8 @@ class Parser(object):
coverage_to.append((cov, anchor_name))
self.expect_keyword_("END_ATTACH")
position = ast.PositionAttachDefinition(
- coverage, coverage_to, location=location)
+ coverage, coverage_to, location=location
+ )
return position
def parse_attach_cursive_(self):
@@ -378,7 +382,8 @@ class Parser(object):
coverages_enter.append(self.parse_coverage_())
self.expect_keyword_("END_ATTACH")
position = ast.PositionAttachCursiveDefinition(
- coverages_exit, coverages_enter, location=location)
+ coverages_exit, coverages_enter, location=location
+ )
return position
def parse_adjust_pair_(self):
@@ -404,7 +409,8 @@ class Parser(object):
adjust_pair[(id_1, id_2)] = (pos_1, pos_2)
self.expect_keyword_("END_ADJUST")
position = ast.PositionAdjustPairDefinition(
- coverages_1, coverages_2, adjust_pair, location=location)
+ coverages_1, coverages_2, adjust_pair, location=location
+ )
return position
def parse_adjust_single_(self):
@@ -417,8 +423,7 @@ class Parser(object):
pos = self.parse_pos_()
adjust_single.append((coverages, pos))
self.expect_keyword_("END_ADJUST")
- position = ast.PositionAdjustSingleDefinition(
- adjust_single, location=location)
+ position = ast.PositionAdjustSingleDefinition(adjust_single, location=location)
return position
def parse_def_anchor_(self):
@@ -437,8 +442,8 @@ class Parser(object):
if anchor is not None and anchor.component == component:
raise VoltLibError(
'Anchor "%s" already defined, '
- 'anchor names are case insensitive' % name,
- location
+ "anchor names are case insensitive" % name,
+ location,
)
if self.next_token_ == "LOCKED":
locked = True
@@ -448,9 +453,9 @@ class Parser(object):
self.expect_keyword_("AT")
pos = self.parse_pos_()
self.expect_keyword_("END_ANCHOR")
- anchor = ast.AnchorDefinition(name, gid, glyph_name,
- component, locked, pos,
- location=location)
+ anchor = ast.AnchorDefinition(
+ name, gid, glyph_name, component, locked, pos, location=location
+ )
if glyph_name not in self.anchors_:
self.anchors_[glyph_name] = SymbolTable()
self.anchors_[glyph_name].define(name, anchor)
@@ -500,9 +505,7 @@ class Parser(object):
location = self.cur_token_location_
try:
unicode_values = self.expect_string_().split(",")
- unicode_values = [
- int(uni[2:], 16)
- for uni in unicode_values if uni != ""]
+ unicode_values = [int(uni[2:], 16) for uni in unicode_values if uni != ""]
except ValueError as err:
raise VoltLibError(str(err), location)
return unicode_values if unicode_values != [] else None
@@ -560,8 +563,7 @@ class Parser(object):
def parse_cmap_format(self):
location = self.cur_token_location_
name = self.cur_token_
- value = (self.expect_number_(), self.expect_number_(),
- self.expect_number_())
+ value = (self.expect_number_(), self.expect_number_(), self.expect_number_())
setting = ast.SettingDefinition(name, value, location=location)
return setting
@@ -578,8 +580,7 @@ class Parser(object):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword:
return self.cur_token_
- raise VoltLibError("Expected \"%s\"" % keyword,
- self.cur_token_location_)
+ raise VoltLibError('Expected "%s"' % keyword, self.cur_token_location_)
def expect_name_(self):
self.advance_lexer_()
@@ -595,12 +596,18 @@ class Parser(object):
def advance_lexer_(self):
self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
- self.next_token_type_, self.next_token_, self.next_token_location_)
+ self.next_token_type_,
+ self.next_token_,
+ self.next_token_location_,
+ )
try:
if self.is_cur_keyword_("END"):
raise StopIteration
- (self.next_token_type_, self.next_token_,
- self.next_token_location_) = self.lexer_.next()
+ (
+ self.next_token_type_,
+ self.next_token_,
+ self.next_token_location_,
+ ) = self.lexer_.next()
except StopIteration:
self.next_token_type_, self.next_token_ = (None, None)
@@ -645,5 +652,5 @@ class OrderedSymbolTable(SymbolTable):
if start in scope and end in scope:
start_idx = list(scope.keys()).index(start)
end_idx = list(scope.keys()).index(end)
- return list(scope.keys())[start_idx:end_idx + 1]
+ return list(scope.keys())[start_idx : end_idx + 1]
return None
diff --git a/Lib/fontTools/voltLib/voltToFea.py b/Lib/fontTools/voltLib/voltToFea.py
new file mode 100644
index 00000000..2265d502
--- /dev/null
+++ b/Lib/fontTools/voltLib/voltToFea.py
@@ -0,0 +1,726 @@
+"""\
+MS VOLT ``.vtp`` to AFDKO ``.fea`` OpenType Layout converter.
+
+Usage
+-----
+
+To convert a VTP project file:
+
+
+ $ fonttools voltLib.voltToFea input.vtp output.fea
+
+It is also possible convert font files with `TSIV` table (as saved from Volt),
+in this case the glyph names used in the Volt project will be mapped to the
+actual glyph names in the font files when written to the feature file:
+
+ $ fonttools voltLib.voltToFea input.ttf output.fea
+
+The ``--quiet`` option can be used to suppress warnings.
+
+The ``--traceback`` can be used to get Python traceback in case of exceptions,
+instead of suppressing the traceback.
+
+
+Limitations
+-----------
+
+* Not all VOLT features are supported, the script will error if it it
+ encounters something it does not understand. Please report an issue if this
+ happens.
+* AFDKO feature file syntax for mark positioning is awkward and does not allow
+ setting the mark coverage. It also defines mark anchors globally, as a result
+ some mark positioning lookups might cover many marks than what was in the VOLT
+ file. This should not be an issue in practice, but if it is then the only way
+ is to modify the VOLT file or the generated feature file manually to use unique
+ mark anchors for each lookup.
+* VOLT allows subtable breaks in any lookup type, but AFDKO feature file
+ implementations vary in their support; currently AFDKO’s makeOTF supports
+ subtable breaks in pair positioning lookups only, while FontTools’ feaLib
+ support it for most substitution lookups and only some positioning lookups.
+"""
+
+import logging
+import re
+from io import StringIO
+
+from fontTools.feaLib import ast
+from fontTools.ttLib import TTFont, TTLibError
+from fontTools.voltLib import ast as VAst
+from fontTools.voltLib.parser import Parser as VoltParser
+
+log = logging.getLogger("fontTools.voltLib.voltToFea")
+
+TABLES = ["GDEF", "GSUB", "GPOS"]
+
+
+class MarkClassDefinition(ast.MarkClassDefinition):
+ def asFea(self, indent=""):
+ res = ""
+ if not getattr(self, "used", False):
+ res += "#"
+ res += ast.MarkClassDefinition.asFea(self, indent)
+ return res
+
+
+# For sorting voltLib.ast.GlyphDefinition, see its use below.
+class Group:
+ def __init__(self, group):
+ self.name = group.name.lower()
+ self.groups = [
+ x.group.lower() for x in group.enum.enum if isinstance(x, VAst.GroupName)
+ ]
+
+ def __lt__(self, other):
+ if self.name in other.groups:
+ return True
+ if other.name in self.groups:
+ return False
+ if self.groups and not other.groups:
+ return False
+ if not self.groups and other.groups:
+ return True
+
+
+class VoltToFea:
+ _NOT_LOOKUP_NAME_RE = re.compile(r"[^A-Za-z_0-9.]")
+ _NOT_CLASS_NAME_RE = re.compile(r"[^A-Za-z_0-9.\-]")
+
+ def __init__(self, file_or_path, font=None):
+ self._file_or_path = file_or_path
+ self._font = font
+
+ self._glyph_map = {}
+ self._glyph_order = None
+
+ self._gdef = {}
+ self._glyphclasses = {}
+ self._features = {}
+ self._lookups = {}
+
+ self._marks = set()
+ self._ligatures = {}
+
+ self._markclasses = {}
+ self._anchors = {}
+
+ self._settings = {}
+
+ self._lookup_names = {}
+ self._class_names = {}
+
+ def _lookupName(self, name):
+ if name not in self._lookup_names:
+ res = self._NOT_LOOKUP_NAME_RE.sub("_", name)
+ while res in self._lookup_names.values():
+ res += "_"
+ self._lookup_names[name] = res
+ return self._lookup_names[name]
+
+ def _className(self, name):
+ if name not in self._class_names:
+ res = self._NOT_CLASS_NAME_RE.sub("_", name)
+ while res in self._class_names.values():
+ res += "_"
+ self._class_names[name] = res
+ return self._class_names[name]
+
+ def _collectStatements(self, doc, tables):
+ # Collect and sort group definitions first, to make sure a group
+ # definition that references other groups comes after them since VOLT
+ # does not enforce such ordering, and feature file require it.
+ groups = [s for s in doc.statements if isinstance(s, VAst.GroupDefinition)]
+ for statement in sorted(groups, key=lambda x: Group(x)):
+ self._groupDefinition(statement)
+
+ for statement in doc.statements:
+ if isinstance(statement, VAst.GlyphDefinition):
+ self._glyphDefinition(statement)
+ elif isinstance(statement, VAst.AnchorDefinition):
+ if "GPOS" in tables:
+ self._anchorDefinition(statement)
+ elif isinstance(statement, VAst.SettingDefinition):
+ self._settingDefinition(statement)
+ elif isinstance(statement, VAst.GroupDefinition):
+ pass # Handled above
+ elif isinstance(statement, VAst.ScriptDefinition):
+ self._scriptDefinition(statement)
+ elif not isinstance(statement, VAst.LookupDefinition):
+ raise NotImplementedError(statement)
+
+ # Lookup definitions need to be handled last as they reference glyph
+ # and mark classes that might be defined after them.
+ for statement in doc.statements:
+ if isinstance(statement, VAst.LookupDefinition):
+ if statement.pos and "GPOS" not in tables:
+ continue
+ if statement.sub and "GSUB" not in tables:
+ continue
+ self._lookupDefinition(statement)
+
+ def _buildFeatureFile(self, tables):
+ doc = ast.FeatureFile()
+ statements = doc.statements
+
+ if self._glyphclasses:
+ statements.append(ast.Comment("# Glyph classes"))
+ statements.extend(self._glyphclasses.values())
+
+ if self._markclasses:
+ statements.append(ast.Comment("\n# Mark classes"))
+ statements.extend(c[1] for c in sorted(self._markclasses.items()))
+
+ if self._lookups:
+ statements.append(ast.Comment("\n# Lookups"))
+ for lookup in self._lookups.values():
+ statements.extend(getattr(lookup, "targets", []))
+ statements.append(lookup)
+
+ # Prune features
+ features = self._features.copy()
+ for ftag in features:
+ scripts = features[ftag]
+ for stag in scripts:
+ langs = scripts[stag]
+ for ltag in langs:
+ langs[ltag] = [l for l in langs[ltag] if l.lower() in self._lookups]
+ scripts[stag] = {t: l for t, l in langs.items() if l}
+ features[ftag] = {t: s for t, s in scripts.items() if s}
+ features = {t: f for t, f in features.items() if f}
+
+ if features:
+ statements.append(ast.Comment("# Features"))
+ for ftag, scripts in features.items():
+ feature = ast.FeatureBlock(ftag)
+ stags = sorted(scripts, key=lambda k: 0 if k == "DFLT" else 1)
+ for stag in stags:
+ feature.statements.append(ast.ScriptStatement(stag))
+ ltags = sorted(scripts[stag], key=lambda k: 0 if k == "dflt" else 1)
+ for ltag in ltags:
+ include_default = True if ltag == "dflt" else False
+ feature.statements.append(
+ ast.LanguageStatement(ltag, include_default=include_default)
+ )
+ for name in scripts[stag][ltag]:
+ lookup = self._lookups[name.lower()]
+ lookupref = ast.LookupReferenceStatement(lookup)
+ feature.statements.append(lookupref)
+ statements.append(feature)
+
+ if self._gdef and "GDEF" in tables:
+ classes = []
+ for name in ("BASE", "MARK", "LIGATURE", "COMPONENT"):
+ if name in self._gdef:
+ classname = "GDEF_" + name.lower()
+ glyphclass = ast.GlyphClassDefinition(classname, self._gdef[name])
+ statements.append(glyphclass)
+ classes.append(ast.GlyphClassName(glyphclass))
+ else:
+ classes.append(None)
+
+ gdef = ast.TableBlock("GDEF")
+ gdef.statements.append(ast.GlyphClassDefStatement(*classes))
+ statements.append(gdef)
+
+ return doc
+
+ def convert(self, tables=None):
+ doc = VoltParser(self._file_or_path).parse()
+
+ if tables is None:
+ tables = TABLES
+ if self._font is not None:
+ self._glyph_order = self._font.getGlyphOrder()
+
+ self._collectStatements(doc, tables)
+ fea = self._buildFeatureFile(tables)
+ return fea.asFea()
+
+ def _glyphName(self, glyph):
+ try:
+ name = glyph.glyph
+ except AttributeError:
+ name = glyph
+ return ast.GlyphName(self._glyph_map.get(name, name))
+
+ def _groupName(self, group):
+ try:
+ name = group.group
+ except AttributeError:
+ name = group
+ return ast.GlyphClassName(self._glyphclasses[name.lower()])
+
+ def _coverage(self, coverage):
+ items = []
+ for item in coverage:
+ if isinstance(item, VAst.GlyphName):
+ items.append(self._glyphName(item))
+ elif isinstance(item, VAst.GroupName):
+ items.append(self._groupName(item))
+ elif isinstance(item, VAst.Enum):
+ items.append(self._enum(item))
+ elif isinstance(item, VAst.Range):
+ items.append((item.start, item.end))
+ else:
+ raise NotImplementedError(item)
+ return items
+
+ def _enum(self, enum):
+ return ast.GlyphClass(self._coverage(enum.enum))
+
+ def _context(self, context):
+ out = []
+ for item in context:
+ coverage = self._coverage(item)
+ if not isinstance(coverage, (tuple, list)):
+ coverage = [coverage]
+ out.extend(coverage)
+ return out
+
+ def _groupDefinition(self, group):
+ name = self._className(group.name)
+ glyphs = self._enum(group.enum)
+ glyphclass = ast.GlyphClassDefinition(name, glyphs)
+
+ self._glyphclasses[group.name.lower()] = glyphclass
+
+ def _glyphDefinition(self, glyph):
+ try:
+ self._glyph_map[glyph.name] = self._glyph_order[glyph.id]
+ except TypeError:
+ pass
+
+ if glyph.type in ("BASE", "MARK", "LIGATURE", "COMPONENT"):
+ if glyph.type not in self._gdef:
+ self._gdef[glyph.type] = ast.GlyphClass()
+ self._gdef[glyph.type].glyphs.append(self._glyphName(glyph.name))
+
+ if glyph.type == "MARK":
+ self._marks.add(glyph.name)
+ elif glyph.type == "LIGATURE":
+ self._ligatures[glyph.name] = glyph.components
+
+ def _scriptDefinition(self, script):
+ stag = script.tag
+ for lang in script.langs:
+ ltag = lang.tag
+ for feature in lang.features:
+ lookups = {l.split("\\")[0]: True for l in feature.lookups}
+ ftag = feature.tag
+ if ftag not in self._features:
+ self._features[ftag] = {}
+ if stag not in self._features[ftag]:
+ self._features[ftag][stag] = {}
+ assert ltag not in self._features[ftag][stag]
+ self._features[ftag][stag][ltag] = lookups.keys()
+
+ def _settingDefinition(self, setting):
+ if setting.name.startswith("COMPILER_"):
+ self._settings[setting.name] = setting.value
+ else:
+ log.warning(f"Unsupported setting ignored: {setting.name}")
+
+ def _adjustment(self, adjustment):
+ adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by = adjustment
+
+ adv_device = adv_adjust_by and adv_adjust_by.items() or None
+ dx_device = dx_adjust_by and dx_adjust_by.items() or None
+ dy_device = dy_adjust_by and dy_adjust_by.items() or None
+
+ return ast.ValueRecord(
+ xPlacement=dx,
+ yPlacement=dy,
+ xAdvance=adv,
+ xPlaDevice=dx_device,
+ yPlaDevice=dy_device,
+ xAdvDevice=adv_device,
+ )
+
+ def _anchor(self, adjustment):
+ adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by = adjustment
+
+ assert not adv_adjust_by
+ dx_device = dx_adjust_by and dx_adjust_by.items() or None
+ dy_device = dy_adjust_by and dy_adjust_by.items() or None
+
+ return ast.Anchor(
+ dx or 0,
+ dy or 0,
+ xDeviceTable=dx_device or None,
+ yDeviceTable=dy_device or None,
+ )
+
+ def _anchorDefinition(self, anchordef):
+ anchorname = anchordef.name
+ glyphname = anchordef.glyph_name
+ anchor = self._anchor(anchordef.pos)
+
+ if anchorname.startswith("MARK_"):
+ name = "_".join(anchorname.split("_")[1:])
+ markclass = ast.MarkClass(self._className(name))
+ glyph = self._glyphName(glyphname)
+ markdef = MarkClassDefinition(markclass, anchor, glyph)
+ self._markclasses[(glyphname, anchorname)] = markdef
+ else:
+ if glyphname not in self._anchors:
+ self._anchors[glyphname] = {}
+ if anchorname not in self._anchors[glyphname]:
+ self._anchors[glyphname][anchorname] = {}
+ self._anchors[glyphname][anchorname][anchordef.component] = anchor
+
+ def _gposLookup(self, lookup, fealookup):
+ statements = fealookup.statements
+
+ pos = lookup.pos
+ if isinstance(pos, VAst.PositionAdjustPairDefinition):
+ for (idx1, idx2), (pos1, pos2) in pos.adjust_pair.items():
+ coverage_1 = pos.coverages_1[idx1 - 1]
+ coverage_2 = pos.coverages_2[idx2 - 1]
+
+ # If not both are groups, use “enum pos” otherwise makeotf will
+ # fail.
+ enumerated = False
+ for item in coverage_1 + coverage_2:
+ if not isinstance(item, VAst.GroupName):
+ enumerated = True
+
+ glyphs1 = self._coverage(coverage_1)
+ glyphs2 = self._coverage(coverage_2)
+ record1 = self._adjustment(pos1)
+ record2 = self._adjustment(pos2)
+ assert len(glyphs1) == 1
+ assert len(glyphs2) == 1
+ statements.append(
+ ast.PairPosStatement(
+ glyphs1[0], record1, glyphs2[0], record2, enumerated=enumerated
+ )
+ )
+ elif isinstance(pos, VAst.PositionAdjustSingleDefinition):
+ for a, b in pos.adjust_single:
+ glyphs = self._coverage(a)
+ record = self._adjustment(b)
+ assert len(glyphs) == 1
+ statements.append(
+ ast.SinglePosStatement([(glyphs[0], record)], [], [], False)
+ )
+ elif isinstance(pos, VAst.PositionAttachDefinition):
+ anchors = {}
+ for marks, classname in pos.coverage_to:
+ for mark in marks:
+ # Set actually used mark classes. Basically a hack to get
+ # around the feature file syntax limitation of making mark
+ # classes global and not allowing mark positioning to
+ # specify mark coverage.
+ for name in mark.glyphSet():
+ key = (name, "MARK_" + classname)
+ self._markclasses[key].used = True
+ markclass = ast.MarkClass(self._className(classname))
+ for base in pos.coverage:
+ for name in base.glyphSet():
+ if name not in anchors:
+ anchors[name] = []
+ if classname not in anchors[name]:
+ anchors[name].append(classname)
+
+ for name in anchors:
+ components = 1
+ if name in self._ligatures:
+ components = self._ligatures[name]
+
+ marks = []
+ for mark in anchors[name]:
+ markclass = ast.MarkClass(self._className(mark))
+ for component in range(1, components + 1):
+ if len(marks) < component:
+ marks.append([])
+ anchor = None
+ if component in self._anchors[name][mark]:
+ anchor = self._anchors[name][mark][component]
+ marks[component - 1].append((anchor, markclass))
+
+ base = self._glyphName(name)
+ if name in self._marks:
+ mark = ast.MarkMarkPosStatement(base, marks[0])
+ elif name in self._ligatures:
+ mark = ast.MarkLigPosStatement(base, marks)
+ else:
+ mark = ast.MarkBasePosStatement(base, marks[0])
+ statements.append(mark)
+ elif isinstance(pos, VAst.PositionAttachCursiveDefinition):
+ # Collect enter and exit glyphs
+ enter_coverage = []
+ for coverage in pos.coverages_enter:
+ for base in coverage:
+ for name in base.glyphSet():
+ enter_coverage.append(name)
+ exit_coverage = []
+ for coverage in pos.coverages_exit:
+ for base in coverage:
+ for name in base.glyphSet():
+ exit_coverage.append(name)
+
+ # Write enter anchors, also check if the glyph has exit anchor and
+ # write it, too.
+ for name in enter_coverage:
+ glyph = self._glyphName(name)
+ entry = self._anchors[name]["entry"][1]
+ exit = None
+ if name in exit_coverage:
+ exit = self._anchors[name]["exit"][1]
+ exit_coverage.pop(exit_coverage.index(name))
+ statements.append(ast.CursivePosStatement(glyph, entry, exit))
+
+ # Write any remaining exit anchors.
+ for name in exit_coverage:
+ glyph = self._glyphName(name)
+ exit = self._anchors[name]["exit"][1]
+ statements.append(ast.CursivePosStatement(glyph, None, exit))
+ else:
+ raise NotImplementedError(pos)
+
+ def _gposContextLookup(
+ self, lookup, prefix, suffix, ignore, fealookup, targetlookup
+ ):
+ statements = fealookup.statements
+
+ assert not lookup.reversal
+
+ pos = lookup.pos
+ if isinstance(pos, VAst.PositionAdjustPairDefinition):
+ for (idx1, idx2), (pos1, pos2) in pos.adjust_pair.items():
+ glyphs1 = self._coverage(pos.coverages_1[idx1 - 1])
+ glyphs2 = self._coverage(pos.coverages_2[idx2 - 1])
+ assert len(glyphs1) == 1
+ assert len(glyphs2) == 1
+ glyphs = (glyphs1[0], glyphs2[0])
+
+ if ignore:
+ statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)])
+ else:
+ lookups = (targetlookup, targetlookup)
+ statement = ast.ChainContextPosStatement(
+ prefix, glyphs, suffix, lookups
+ )
+ statements.append(statement)
+ elif isinstance(pos, VAst.PositionAdjustSingleDefinition):
+ glyphs = [ast.GlyphClass()]
+ for a, b in pos.adjust_single:
+ glyph = self._coverage(a)
+ glyphs[0].extend(glyph)
+
+ if ignore:
+ statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)])
+ else:
+ statement = ast.ChainContextPosStatement(
+ prefix, glyphs, suffix, [targetlookup]
+ )
+ statements.append(statement)
+ elif isinstance(pos, VAst.PositionAttachDefinition):
+ glyphs = [ast.GlyphClass()]
+ for coverage, _ in pos.coverage_to:
+ glyphs[0].extend(self._coverage(coverage))
+
+ if ignore:
+ statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)])
+ else:
+ statement = ast.ChainContextPosStatement(
+ prefix, glyphs, suffix, [targetlookup]
+ )
+ statements.append(statement)
+ else:
+ raise NotImplementedError(pos)
+
+ def _gsubLookup(self, lookup, prefix, suffix, ignore, chain, fealookup):
+ statements = fealookup.statements
+
+ sub = lookup.sub
+ for key, val in sub.mapping.items():
+ if not key or not val:
+ path, line, column = sub.location
+ log.warning(f"{path}:{line}:{column}: Ignoring empty substitution")
+ continue
+ statement = None
+ glyphs = self._coverage(key)
+ replacements = self._coverage(val)
+ if ignore:
+ chain_context = (prefix, glyphs, suffix)
+ statement = ast.IgnoreSubstStatement([chain_context])
+ elif isinstance(sub, VAst.SubstitutionSingleDefinition):
+ assert len(glyphs) == 1
+ assert len(replacements) == 1
+ statement = ast.SingleSubstStatement(
+ glyphs, replacements, prefix, suffix, chain
+ )
+ elif isinstance(sub, VAst.SubstitutionReverseChainingSingleDefinition):
+ assert len(glyphs) == 1
+ assert len(replacements) == 1
+ statement = ast.ReverseChainSingleSubstStatement(
+ prefix, suffix, glyphs, replacements
+ )
+ elif isinstance(sub, VAst.SubstitutionMultipleDefinition):
+ assert len(glyphs) == 1
+ statement = ast.MultipleSubstStatement(
+ prefix, glyphs[0], suffix, replacements, chain
+ )
+ elif isinstance(sub, VAst.SubstitutionLigatureDefinition):
+ assert len(replacements) == 1
+ statement = ast.LigatureSubstStatement(
+ prefix, glyphs, suffix, replacements[0], chain
+ )
+ else:
+ raise NotImplementedError(sub)
+ statements.append(statement)
+
+ def _lookupDefinition(self, lookup):
+ mark_attachement = None
+ mark_filtering = None
+
+ flags = 0
+ if lookup.direction == "RTL":
+ flags |= 1
+ if not lookup.process_base:
+ flags |= 2
+ # FIXME: Does VOLT support this?
+ # if not lookup.process_ligatures:
+ # flags |= 4
+ if not lookup.process_marks:
+ flags |= 8
+ elif isinstance(lookup.process_marks, str):
+ mark_attachement = self._groupName(lookup.process_marks)
+ elif lookup.mark_glyph_set is not None:
+ mark_filtering = self._groupName(lookup.mark_glyph_set)
+
+ lookupflags = None
+ if flags or mark_attachement is not None or mark_filtering is not None:
+ lookupflags = ast.LookupFlagStatement(
+ flags, mark_attachement, mark_filtering
+ )
+ if "\\" in lookup.name:
+ # Merge sub lookups as subtables (lookups named “base\sub”),
+ # makeotf/feaLib will issue a warning and ignore the subtable
+ # statement if it is not a pairpos lookup, though.
+ name = lookup.name.split("\\")[0]
+ if name.lower() not in self._lookups:
+ fealookup = ast.LookupBlock(self._lookupName(name))
+ if lookupflags is not None:
+ fealookup.statements.append(lookupflags)
+ fealookup.statements.append(ast.Comment("# " + lookup.name))
+ else:
+ fealookup = self._lookups[name.lower()]
+ fealookup.statements.append(ast.SubtableStatement())
+ fealookup.statements.append(ast.Comment("# " + lookup.name))
+ self._lookups[name.lower()] = fealookup
+ else:
+ fealookup = ast.LookupBlock(self._lookupName(lookup.name))
+ if lookupflags is not None:
+ fealookup.statements.append(lookupflags)
+ self._lookups[lookup.name.lower()] = fealookup
+
+ if lookup.comments is not None:
+ fealookup.statements.append(ast.Comment("# " + lookup.comments))
+
+ contexts = []
+ if lookup.context:
+ for context in lookup.context:
+ prefix = self._context(context.left)
+ suffix = self._context(context.right)
+ ignore = context.ex_or_in == "EXCEPT_CONTEXT"
+ contexts.append([prefix, suffix, ignore, False])
+ # It seems that VOLT will create contextual substitution using
+ # only the input if there is no other contexts in this lookup.
+ if ignore and len(lookup.context) == 1:
+ contexts.append([[], [], False, True])
+ else:
+ contexts.append([[], [], False, False])
+
+ targetlookup = None
+ for prefix, suffix, ignore, chain in contexts:
+ if lookup.sub is not None:
+ self._gsubLookup(lookup, prefix, suffix, ignore, chain, fealookup)
+
+ if lookup.pos is not None:
+ if self._settings.get("COMPILER_USEEXTENSIONLOOKUPS"):
+ fealookup.use_extension = True
+ if prefix or suffix or chain or ignore:
+ if not ignore and targetlookup is None:
+ targetname = self._lookupName(lookup.name + " target")
+ targetlookup = ast.LookupBlock(targetname)
+ fealookup.targets = getattr(fealookup, "targets", [])
+ fealookup.targets.append(targetlookup)
+ self._gposLookup(lookup, targetlookup)
+ self._gposContextLookup(
+ lookup, prefix, suffix, ignore, fealookup, targetlookup
+ )
+ else:
+ self._gposLookup(lookup, fealookup)
+
+
+def main(args=None):
+ """Convert MS VOLT to AFDKO feature files."""
+
+ import argparse
+ from pathlib import Path
+
+ from fontTools import configLogger
+
+ parser = argparse.ArgumentParser(
+ "fonttools voltLib.voltToFea", description=main.__doc__
+ )
+ parser.add_argument(
+ "input", metavar="INPUT", type=Path, help="input font/VTP file to process"
+ )
+ parser.add_argument(
+ "featurefile", metavar="OUTPUT", type=Path, help="output feature file"
+ )
+ parser.add_argument(
+ "-t",
+ "--table",
+ action="append",
+ choices=TABLES,
+ dest="tables",
+ help="List of tables to write, by default all tables are written",
+ )
+ parser.add_argument(
+ "-q", "--quiet", action="store_true", help="Suppress non-error messages"
+ )
+ parser.add_argument(
+ "--traceback", action="store_true", help="Don’t catch exceptions"
+ )
+
+ options = parser.parse_args(args)
+
+ configLogger(level=("ERROR" if options.quiet else "INFO"))
+
+ file_or_path = options.input
+ font = None
+ try:
+ font = TTFont(file_or_path)
+ if "TSIV" in font:
+ file_or_path = StringIO(font["TSIV"].data.decode("utf-8"))
+ else:
+ log.error('"TSIV" table is missing, font was not saved from VOLT?')
+ return 1
+ except TTLibError:
+ pass
+
+ converter = VoltToFea(file_or_path, font)
+ try:
+ fea = converter.convert(options.tables)
+ except NotImplementedError as e:
+ if options.traceback:
+ raise
+ location = getattr(e.args[0], "location", None)
+ message = f'"{e}" is not supported'
+ if location:
+ path, line, column = location
+ log.error(f"{path}:{line}:{column}: {message}")
+ else:
+ log.error(message)
+ return 1
+ with open(options.featurefile, "w") as feafile:
+ feafile.write(fea)
+
+
+if __name__ == "__main__":
+ import sys
+
+ sys.exit(main())
diff --git a/METADATA b/METADATA
index 96cf81bf..50e856e1 100644
--- a/METADATA
+++ b/METADATA
@@ -1,3 +1,7 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update fonttools
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
+
name: "fonttools"
description: "fontTools is a library for manipulating fonts, written in Python."
third_party {
@@ -7,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://github.com/fonttools/fonttools/archive/4.37.1.zip"
+ value: "https://github.com/fonttools/fonttools/archive/4.44.0.zip"
}
- version: "4.37.1"
+ version: "4.44.0"
license_type: NOTICE
last_upgrade_date {
- year: 2022
- month: 8
- day: 24
+ year: 2023
+ month: 11
+ day: 10
}
}
diff --git a/MetaTools/buildTableList.py b/MetaTools/buildTableList.py
index c3766b98..c0a6453e 100755
--- a/MetaTools/buildTableList.py
+++ b/MetaTools/buildTableList.py
@@ -8,9 +8,8 @@ import textwrap
fontToolsDir = os.path.dirname(os.path.dirname(os.path.join(os.getcwd(), sys.argv[0])))
-fontToolsDir= os.path.normpath(fontToolsDir)
-tablesDir = os.path.join(fontToolsDir,
- "Lib", "fontTools", "ttLib", "tables")
+fontToolsDir = os.path.normpath(fontToolsDir)
+tablesDir = os.path.join(fontToolsDir, "Lib", "fontTools", "ttLib", "tables")
docFile = os.path.join(fontToolsDir, "Doc/source/ttx.rst")
names = glob.glob1(tablesDir, "*.py")
@@ -18,21 +17,21 @@ names = glob.glob1(tablesDir, "*.py")
modules = []
tables = []
for name in names:
- try:
- tag = identifierToTag(name[:-3])
- except:
- pass
- else:
- modules.append(name[:-3])
- tables.append(tag.strip())
+ try:
+ tag = identifierToTag(name[:-3])
+ except:
+ pass
+ else:
+ modules.append(name[:-3])
+ tables.append(tag.strip())
modules.sort()
tables.sort()
with open(os.path.join(tablesDir, "__init__.py"), "w") as file:
-
- file.write('''
+ file.write(
+ '''
# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.
def _moduleFinderHint():
"""Dummy function to let modulefinder know what tables may be
@@ -40,22 +39,25 @@ def _moduleFinderHint():
>>> _moduleFinderHint()
"""
-''')
+'''
+ )
- for module in modules:
- file.write("\tfrom . import %s\n" % module)
+ for module in modules:
+ file.write("\tfrom . import %s\n" % module)
- file.write('''
+ file.write(
+ """
if __name__ == "__main__":
import doctest, sys
sys.exit(doctest.testmod().failed)
-''')
+"""
+ )
begin = ".. begin table list\n"
end = ".. end table list"
with open(docFile) as f:
- doc = f.read()
+ doc = f.read()
beginPos = doc.find(begin)
assert beginPos > 0
beginPos = beginPos + len(begin) + 1
@@ -63,9 +65,9 @@ endPos = doc.find(end)
lines = textwrap.wrap(", ".join(tables[:-1]) + " and " + tables[-1], 66)
intro = "The following tables are currently supported::\n\n"
-blockquote = "\n".join(" "*4 + line for line in lines) + "\n"
+blockquote = "\n".join(" " * 4 + line for line in lines) + "\n"
doc = doc[:beginPos] + intro + blockquote + "\n" + doc[endPos:]
with open(docFile, "w") as f:
- f.write(doc)
+ f.write(doc)
diff --git a/MetaTools/buildUCD.py b/MetaTools/buildUCD.py
index 16ae150a..9eba747d 100755
--- a/MetaTools/buildUCD.py
+++ b/MetaTools/buildUCD.py
@@ -27,8 +27,9 @@ UNIDATA_URL = "https://unicode.org/Public/UNIDATA/"
UNIDATA_LICENSE_URL = "http://unicode.org/copyright.html#License"
# by default save output files to ../Lib/fontTools/unicodedata/
-UNIDATA_PATH = pjoin(abspath(dirname(__file__)), pardir,
- "Lib", "fontTools", "unicodedata") + sep
+UNIDATA_PATH = (
+ pjoin(abspath(dirname(__file__)), pardir, "Lib", "fontTools", "unicodedata") + sep
+)
SRC_ENCODING = "# -*- coding: utf-8 -*-\n"
@@ -75,7 +76,8 @@ def parse_range_properties(infile, default=None, is_set=False):
r"([0-9A-F]{4,6})" # first character code
r"(?:\.\.([0-9A-F]{4,6}))?" # optional second character code
r"\s*;\s*"
- r"([^#]+)") # everything up to the potential comment
+ r"([^#]+)"
+ ) # everything up to the potential comment
for line in infile:
match = line_regex.match(line)
if not match:
@@ -103,13 +105,13 @@ def parse_range_properties(infile, default=None, is_set=False):
assert last_end < start
assert start <= end
if start - last_end > 1:
- full_ranges.append((last_end+1, start-1, default))
+ full_ranges.append((last_end + 1, start - 1, default))
if is_set:
value = set(value.split())
full_ranges.append((start, end, value))
last_start, last_end = start, end
if last_end != MAX_UNICODE:
- full_ranges.append((last_end+1, MAX_UNICODE, default))
+ full_ranges.append((last_end + 1, MAX_UNICODE, default))
# reduce total number of ranges by combining continuous ones
last_start, last_end, last_value = full_ranges.pop(0)
@@ -118,14 +120,14 @@ def parse_range_properties(infile, default=None, is_set=False):
if value == last_value:
continue
else:
- merged_ranges.append((last_start, start-1, last_value))
+ merged_ranges.append((last_start, start - 1, last_value))
last_start, line_end, last_value = start, end, value
merged_ranges.append((last_start, MAX_UNICODE, last_value))
# make sure that the ranges cover the full unicode repertoire
assert merged_ranges[0][0] == 0
for (cs, ce, cv), (ns, ne, nv) in zip(merged_ranges, merged_ranges[1:]):
- assert ce+1 == ns
+ assert ce + 1 == ns
assert merged_ranges[-1][1] == MAX_UNICODE
return merged_ranges
@@ -140,21 +142,25 @@ def parse_semicolon_separated_data(infile):
"""
data = []
for line in infile:
- line = line.split('#', 1)[0].strip() # remove the comment
+ line = line.split("#", 1)[0].strip() # remove the comment
if not line:
continue
- fields = [str(field.strip()) for field in line.split(';')]
+ fields = [str(field.strip()) for field in line.split(";")]
data.append(fields)
return data
def _set_repr(value):
- return 'None' if value is None else "{{{}}}".format(
- ", ".join(repr(v) for v in sorted(value)))
+ return (
+ "None"
+ if value is None
+ else "{{{}}}".format(", ".join(repr(v) for v in sorted(value)))
+ )
-def build_ranges(filename, local_ucd=None, output_path=None,
- default=None, is_set=False, aliases=None):
+def build_ranges(
+ filename, local_ucd=None, output_path=None, default=None, is_set=False, aliases=None
+):
"""Fetch 'filename' UCD data file from Unicode official website, parse
the property ranges and values and write them as two Python lists
to 'fontTools.unicodedata.<filename>.py'.
@@ -196,12 +202,15 @@ def build_ranges(filename, local_ucd=None, output_path=None,
f.write("# Source: {}{}\n".format(UNIDATA_URL, filename))
f.write("# License: {}\n".format(UNIDATA_LICENSE_URL))
f.write("#\n")
- f.write(header+"\n\n")
+ f.write(header + "\n\n")
f.write("RANGES = [\n")
for first, last, value in ranges:
- f.write(" 0x{:0>4X}, # .. 0x{:0>4X} ; {}\n".format(
- first, last, _set_repr(value) if is_set else value))
+ f.write(
+ " 0x{:0>4X}, # .. 0x{:0>4X} ; {}\n".format(
+ first, last, _set_repr(value) if is_set else value
+ )
+ )
f.write("]\n")
f.write("\n")
@@ -216,8 +225,9 @@ def build_ranges(filename, local_ucd=None, output_path=None,
comment += " ; {}".format(value)
value = reversed_aliases[normalize(value)]
value_repr = "{!r},".format(value)
- f.write(" {} {}\n".format(
- value_repr.ljust(max_value_length+1), comment))
+ f.write(
+ " {} {}\n".format(value_repr.ljust(max_value_length + 1), comment)
+ )
f.write("]\n")
if aliases:
@@ -233,6 +243,7 @@ def build_ranges(filename, local_ucd=None, output_path=None,
_normalize_re = re.compile(r"[-_ ]+")
+
def normalize(string):
"""Remove case, strip space, '-' and '_' for loose matching."""
return _normalize_re.sub("", string).lower()
@@ -258,8 +269,7 @@ def parse_property_value_aliases(property_tag, local_ucd=None):
header = parse_unidata_header(f)
data = parse_semicolon_separated_data(f)
- aliases = {item[1]: item[2:] for item in data
- if item[0] == property_tag}
+ aliases = {item[1]: item[2:] for item in data if item[0] == property_tag}
return aliases
@@ -268,10 +278,12 @@ def main():
import argparse
parser = argparse.ArgumentParser(
- description="Generate fontTools.unicodedata from UCD data files")
+ description="Generate fontTools.unicodedata from UCD data files"
+ )
parser.add_argument(
- '--ucd-path', help="Path to local folder containing UCD data files")
- parser.add_argument('-q', '--quiet', action="store_true")
+ "--ucd-path", help="Path to local folder containing UCD data files"
+ )
+ parser.add_argument("-q", "--quiet", action="store_true")
options = parser.parse_args()
level = "WARNING" if options.quiet else "INFO"
@@ -280,12 +292,16 @@ def main():
build_ranges("Blocks.txt", local_ucd=options.ucd_path, default="No_Block")
script_aliases = parse_property_value_aliases("sc", options.ucd_path)
- build_ranges("Scripts.txt", local_ucd=options.ucd_path, default="Unknown",
- aliases=script_aliases)
- build_ranges("ScriptExtensions.txt", local_ucd=options.ucd_path,
- is_set=True)
+ build_ranges(
+ "Scripts.txt",
+ local_ucd=options.ucd_path,
+ default="Unknown",
+ aliases=script_aliases,
+ )
+ build_ranges("ScriptExtensions.txt", local_ucd=options.ucd_path, is_set=True)
if __name__ == "__main__":
import sys
+
sys.exit(main())
diff --git a/MetaTools/roundTrip.py b/MetaTools/roundTrip.py
index f9094ab0..e6df0ab6 100755
--- a/MetaTools/roundTrip.py
+++ b/MetaTools/roundTrip.py
@@ -21,75 +21,88 @@ import getopt
import traceback
from fontTools import ttx
-class Error(Exception): pass
+
+class Error(Exception):
+ pass
def usage():
- print(__doc__)
- sys.exit(2)
+ print(__doc__)
+ sys.exit(2)
def roundTrip(ttFile1, options, report):
- fn = os.path.basename(ttFile1)
- xmlFile1 = tempfile.mkstemp(".%s.ttx1" % fn)
- ttFile2 = tempfile.mkstemp(".%s" % fn)
- xmlFile2 = tempfile.mkstemp(".%s.ttx2" % fn)
-
- try:
- ttx.ttDump(ttFile1, xmlFile1, options)
- if options.onlyTables or options.skipTables:
- options.mergeFile = ttFile1
- ttx.ttCompile(xmlFile1, ttFile2, options)
- options.mergeFile = None
- ttx.ttDump(ttFile2, xmlFile2, options)
-
- diffcmd = 'diff -U2 -I ".*modified value\|checkSumAdjustment.*" "%s" "%s"' % (xmlFile1, xmlFile2)
- output = os.popen(diffcmd, "r", 1)
- lines = []
- while True:
- line = output.readline()
- if not line:
- break
- sys.stdout.write(line)
- lines.append(line)
- if lines:
- report.write("=============================================================\n")
- report.write(" \"%s\" differs after round tripping\n" % ttFile1)
- report.write("-------------------------------------------------------------\n")
- report.writelines(lines)
- else:
- print("(TTX files are the same)")
- finally:
- for tmpFile in (xmlFile1, ttFile2, xmlFile2):
- if os.path.exists(tmpFile):
- os.remove(tmpFile)
+ fn = os.path.basename(ttFile1)
+ xmlFile1 = tempfile.mkstemp(".%s.ttx1" % fn)
+ ttFile2 = tempfile.mkstemp(".%s" % fn)
+ xmlFile2 = tempfile.mkstemp(".%s.ttx2" % fn)
+
+ try:
+ ttx.ttDump(ttFile1, xmlFile1, options)
+ if options.onlyTables or options.skipTables:
+ options.mergeFile = ttFile1
+ ttx.ttCompile(xmlFile1, ttFile2, options)
+ options.mergeFile = None
+ ttx.ttDump(ttFile2, xmlFile2, options)
+
+ diffcmd = 'diff -U2 -I ".*modified value\|checkSumAdjustment.*" "%s" "%s"' % (
+ xmlFile1,
+ xmlFile2,
+ )
+ output = os.popen(diffcmd, "r", 1)
+ lines = []
+ while True:
+ line = output.readline()
+ if not line:
+ break
+ sys.stdout.write(line)
+ lines.append(line)
+ if lines:
+ report.write(
+ "=============================================================\n"
+ )
+ report.write(' "%s" differs after round tripping\n' % ttFile1)
+ report.write(
+ "-------------------------------------------------------------\n"
+ )
+ report.writelines(lines)
+ else:
+ print("(TTX files are the same)")
+ finally:
+ for tmpFile in (xmlFile1, ttFile2, xmlFile2):
+ if os.path.exists(tmpFile):
+ os.remove(tmpFile)
def main(args):
- try:
- rawOptions, files = getopt.getopt(args, "it:x:")
- except getopt.GetoptError:
- usage()
-
- if not files:
- usage()
-
- with open("report.txt", "a+") as report:
- options = ttx.Options(rawOptions, len(files))
- for ttFile in files:
- try:
- roundTrip(ttFile, options, report)
- except KeyboardInterrupt:
- print("(Cancelled)")
- break
- except:
- print("*** round tripping aborted ***")
- traceback.print_exc()
- report.write("=============================================================\n")
- report.write(" An exception occurred while round tripping")
- report.write(" \"%s\"\n" % ttFile)
- traceback.print_exc(file=report)
- report.write("-------------------------------------------------------------\n")
-
-
+ try:
+ rawOptions, files = getopt.getopt(args, "it:x:")
+ except getopt.GetoptError:
+ usage()
+
+ if not files:
+ usage()
+
+ with open("report.txt", "a+") as report:
+ options = ttx.Options(rawOptions, len(files))
+ for ttFile in files:
+ try:
+ roundTrip(ttFile, options, report)
+ except KeyboardInterrupt:
+ print("(Cancelled)")
+ break
+ except:
+ print("*** round tripping aborted ***")
+ traceback.print_exc()
+ report.write(
+ "=============================================================\n"
+ )
+ report.write(" An exception occurred while round tripping")
+ report.write(' "%s"\n' % ttFile)
+ traceback.print_exc(file=report)
+ report.write(
+ "-------------------------------------------------------------\n"
+ )
+
+
main(sys.argv[1:])
diff --git a/NEWS.rst b/NEWS.rst
index f022ad23..cddd851f 100644
--- a/NEWS.rst
+++ b/NEWS.rst
@@ -1,3 +1,316 @@
+4.44.0 (released 2023-11-03)
+----------------------------
+
+- [instancer] Recalc OS/2 AvgCharWidth after instancing if default changes (#3317).
+- [otlLib] Make ClassDefBuilder class order match varLib.merger's, i.e. large
+ classes first, then glyph lexicographic order (#3321, #3324).
+- [instancer] Allow not specifying any of min:default:max values and let be filled
+ up with fvar's values (#3322, #3323).
+- [instancer] When running --update-name-table ignore axes that have no STAT axis
+ values (#3318, #3319).
+- [Debg] When dumping to ttx, write the embedded JSON as multi-line string with
+ indentation (92cbfee0d).
+- [varStore] Handle > 65535 items per encoding by splitting VarData subtable (#3310).
+- [subset] Handle null-offsets in MarkLigPos subtables.
+- [subset] Keep East Asian spacing fatures vhal, halt, chws, vchw by default (#3305).
+- [instancer.solver] Fixed case where axisDef < lower and upper < axisMax (#3304).
+- [glyf] Speed up compilation, mostly around ``recalcBounds`` (#3301).
+- [varLib.interpolatable] Speed it up when working on variable fonts, plus various
+ micro-optimizations (#3300).
+- Require unicodedata2 >= 15.1.0 when installed with 'unicode' extra, contains UCD 15.1.
+
+4.43.1 (released 2023-10-06)
+----------------------------
+
+- [EBDT] Fixed TypeError exception in `_reverseBytes` method triggered when dumping
+ some bitmap fonts with `ttx -z bitwise` option (#3162).
+- [v/hhea] Fixed UnboundLocalError exception in ``recalc`` method when no vmtx or hmtx
+ tables are present (#3290).
+- [bezierTools] Fixed incorrectly typed cython local variable leading to TypeError when
+ calling ``calcQuadraticArcLength`` (#3288).
+- [feaLib/otlLib] Better error message when building Coverage table with missing glyph (#3286).
+
+4.43.0 (released 2023-09-29)
+----------------------------
+
+- [subset] Set up lxml ``XMLParser(resolve_entities=False)`` when parsing OT-SVG documents
+ to prevent XML External Entity (XXE) attacks (9f61271dc):
+ https://codeql.github.com/codeql-query-help/python/py-xxe/
+- [varLib.iup] Added workaround for a Cython bug in ``iup_delta_optimize`` that was
+ leading to IUP tolerance being incorrectly initialised, resulting in sub-optimal deltas
+ (60126435d, cython/cython#5732).
+- [varLib] Added new command-line entry point ``fonttools varLib.avar`` to add an
+ ``avar`` table to an existing VF from axes mappings in a .designspace file (0a3360e52).
+- [instancer] Fixed bug whereby no longer used variation regions were not correctly pruned
+ after VarData optimization (#3268).
+- Added support for Python 3.12 (#3283).
+
+4.42.1 (released 2023-08-20)
+----------------------------
+
+- [t1Lib] Fixed several Type 1 issues (#3238, #3240).
+- [otBase/packer] Allow sharing tables reached by different offset sizes (#3241, #3236).
+- [varLib/merger] Fix Cursive attachment merging error when all anchors are NULL (#3248, #3247).
+- [ttLib] Fixed warning when calling ``addMultilingualName`` and ``ttFont`` parameter was not
+ passed on to ``findMultilingualName`` (#3253).
+
+4.42.0 (released 2023-08-02)
+----------------------------
+
+- [varLib] Use sentinel value 0xFFFF to mark a glyph advance in hmtx/vmtx as non
+ participating, allowing sparse masters to contain glyphs for variation purposes other
+ than {H,V}VAR (#3235).
+- [varLib/cff] Treat empty glyphs in non-default masters as missing, thus not participating
+ in CFF2 delta computation, similarly to how varLib already treats them for gvar (#3234).
+- Added varLib.avarPlanner script to deduce 'correct' avar v1 axis mappings based on
+ glyph average weights (#3223).
+
+4.41.1 (released 2023-07-21)
+----------------------------
+
+- [subset] Fixed perf regression in v4.41.0 by making ``NameRecordVisitor`` only visit
+ tables that do contain nameID references (#3213, #3214).
+- [varLib.instancer] Support instancing fonts containing null ConditionSet offsets in
+ FeatureVariationRecords (#3211, #3212).
+- [statisticsPen] Report font glyph-average weight/width and font-wide slant.
+- [fontBuilder] Fixed head.created date incorrectly set to 0 instead of the current
+ timestamp, regression introduced in v4.40.0 (#3210).
+- [varLib.merger] Support sparse ``CursivePos`` masters (#3209).
+
+4.41.0 (released 2023-07-12)
+----------------------------
+
+- [fontBuilder] Fixed bug in setupOS2 with default panose attribute incorrectly being
+ set to a dict instead of a Panose object (#3201).
+- [name] Added method to ``removeUnusedNameRecords`` in the user range (#3185).
+- [varLib.instancer] Fixed issue with L4 instancing (moving default) (#3179).
+- [cffLib] Use latin1 so we can roundtrip non-ASCII in {Full,Font,Family}Name (#3202).
+- [designspaceLib] Mark <source name="..."> as optional in docs (as it is in the code).
+- [glyf-1] Fixed drawPoints() bug whereby last cubic segment becomes quadratic (#3189, #3190).
+- [fontBuilder] Propagate the 'hidden' flag to the fvar Axis instance (#3184).
+- [fontBuilder] Update setupAvar() to also support avar 2, fixing ``_add_avar()`` call
+ site (#3183).
+- Added new ``voltLib.voltToFea`` submodule (originally Tiro Typeworks' "Volto") for
+ converting VOLT OpenType Layout sources to FEA format (#3164).
+
+4.40.0 (released 2023-06-12)
+----------------------------
+
+- Published native binary wheels to PyPI for all the python minor versions and platform
+ and architectures currently supported that would benefit from this. They will include
+ precompiled Cython-accelerated modules (e.g. cu2qu) without requiring to compile them
+ from source. The pure-python wheel and source distribution will continue to be
+ published as always (pip will automatically chose them when no binary wheel is
+ available for the given platform, e.g. pypy). Use ``pip install --no-binary=fonttools fonttools``
+ to expliclity request pip to install from the pure-python source.
+- [designspaceLib|varLib] Add initial support for specifying axis mappings and build
+ ``avar2`` table from those (#3123).
+- [feaLib] Support variable ligature caret position (#3130).
+- [varLib|glyf] Added option to --drop-implied-oncurves; test for impliable oncurve
+ points either before or after rounding (#3146, #3147, #3155, #3156).
+- [TTGlyphPointPen] Don't error with empty contours, simply ignore them (#3145).
+- [sfnt] Fixed str vs bytes remnant of py3 transition in code dealing with de/compiling
+ WOFF metadata (#3129).
+- [instancer-solver] Fixed bug when moving default instance with sparse masters (#3139, #3140).
+- [feaLib] Simplify variable scalars that don’t vary (#3132).
+- [pens] Added filter pen that explicitly emits closing line when lastPt != movePt (#3100).
+- [varStore] Improve optimize algorithm and better document the algorithm (#3124, #3127).
+ Added ``quantization`` option (#3126).
+- Added CI workflow config file for building native binary wheels (#3121).
+- [fontBuilder] Added glyphDataFormat=0 option; raise error when glyphs contain cubic
+ outlines but glyphDataFormat was not explicitly set to 1 (#3113, #3119).
+- [subset] Prune emptied GDEF.MarkGlyphSetsDef and remap indices; ensure GDEF is
+ subsetted before GSUB and GPOS (#3114, #3118).
+- [xmlReader] Fixed issue whereby DSIG table data was incorrectly parsed (#3115, #2614).
+- [varLib/merger] Fixed merging of SinglePos with pos=0 (#3111, #3112).
+- [feaLib] Demote "Feature has not been defined" error to a warning when building aalt
+ and referenced feature is empty (#3110).
+- [feaLib] Dedupe multiple substitutions with classes (#3105).
+
+4.39.4 (released 2023-05-10)
+----------------------------
+
+- [varLib.interpolatable] Allow for sparse masters (#3075)
+- [merge] Handle differing default/nominalWidthX in CFF (#3070)
+- [ttLib] Add missing main.py file to ttLib package (#3088)
+- [ttx] Fix missing composite instructions in XML (#3092)
+- [ttx] Fix split tables option to work on filenames containing '%' (#3096)
+- [featureVars] Process lookups for features other than rvrn last (#3099)
+- [feaLib] support multiple substitution with classes (#3103)
+
+4.39.3 (released 2023-03-28)
+----------------------------
+
+- [sbix] Fixed TypeError when compiling empty glyphs whose imageData is None, regression
+ was introduced in v4.39 (#3059).
+- [ttFont] Fixed AttributeError on python <= 3.10 when opening a TTFont from a tempfile
+ SpooledTemporaryFile, seekable method only added on python 3.11 (#3052).
+
+4.39.2 (released 2023-03-16)
+----------------------------
+
+- [varLib] Fixed regression introduced in 4.39.1 whereby an incomplete 'STAT' table
+ would be built even though a DesignSpace v5 did contain 'STAT' definitions (#3045, #3046).
+
+4.39.1 (released 2023-03-16)
+----------------------------
+
+- [avar2] Added experimental support for reading/writing avar version 2 as specified in
+ this draft proposal: https://github.com/harfbuzz/boring-expansion-spec/blob/main/avar2.md
+- [glifLib] Wrap underlying XML library exceptions with GlifLibError when parsing GLIFs,
+ and also print the name and path of the glyph that fails to be parsed (#3042).
+- [feaLib] Consult avar for normalizing user-space values in ConditionSets and in
+ VariableScalars (#3042, #3043).
+- [ttProgram] Handle string input to Program.fromAssembly() (#3038).
+- [otlLib] Added a config option to emit GPOS 7 lookups, currently disabled by default
+ because of a macOS bug (#3034).
+- [COLRv1] Added method to automatically compute ClipBoxes (#3027).
+- [ttFont] Fixed getGlyphID to raise KeyError on missing glyphs instead of returning
+ None. The regression was introduced in v4.27.0 (#3032).
+- [sbix] Fixed UnboundLocalError: cannot access local variable 'rawdata' (#3031).
+- [varLib] When building VF, do not overwrite a pre-existing ``STAT`` table that was built
+ with feaLib from FEA feature file. Also, added support for building multiple VFs
+ defined in Designspace v5 from ``fonttools varLib`` script (#3024).
+- [mtiLib] Only add ``Debg`` table with lookup names when ``FONTTOOLS_LOOKUP_DEBUGGING``
+ env variable is set (#3023).
+
+4.39.0 (released 2023-03-06)
+----------------------------
+
+- [mtiLib] Optionally add `Debg` debug info for MTI feature builds (#3018).
+- [ttx] Support reading input file from standard input using special `-` character,
+ similar to existing `-o -` option to write output to standard output (#3020).
+- [cython] Prevent ``cython.compiled`` raise AttributeError if cython not installed
+ properly (#3017).
+- [OS/2] Guard against ZeroDivisionError when calculating xAvgCharWidth in the unlikely
+ scenario no glyph has non-zero advance (#3015).
+- [subset] Recompute xAvgCharWidth independently of --no-prune-unicode-ranges,
+ previously the two options were involuntarily bundled together (#3012).
+- [fontBuilder] Add ``debug`` parameter to addOpenTypeFeatures method to add source
+ debugging information to the font in the ``Debg`` private table (#3008).
+- [name] Make NameRecord `__lt__` comparison not fail on Unicode encoding errors (#3006).
+- [featureVars] Fixed bug in ``overlayBox`` (#3003, #3005).
+- [glyf] Added experimental support for cubic bezier curves in TrueType glyf table, as
+ outlined in glyf v1 proposal (#2988):
+ https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1-cubicOutlines.md
+- Added new qu2cu module and related qu2cuPen, the reverse of cu2qu for converting
+ TrueType quadratic splines to cubic bezier curves (#2993).
+- [glyf] Added experimental support for reading and writing Variable Composites/Components
+ as defined in glyf v1 spec proposal (#2958):
+ https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1-varComposites.md.
+- [pens]: Added `addVarComponent` method to pen protocols' base classes, which pens can implement
+ to handle varcomponents (by default they get decomposed) (#2958).
+- [misc.transform] Added DecomposedTransform class which implements an affine transformation
+ with separate translate, rotation, scale, skew, and transformation-center components (#2598)
+- [sbix] Ensure Glyph.referenceGlyphName is set; fixes error after dumping and
+ re-compiling sbix table with 'dupe' glyphs (#2984).
+- [feaLib] Be cleverer when merging chained single substitutions into same lookup
+ when they are specified using the inline notation (#2150, #2974).
+- [instancer] Clamp user-inputted axis ranges to those of fvar (#2959).
+- [otBase/subset] Define ``__getstate__`` for BaseTable so that a copied/pickled 'lazy'
+ object gets its own OTTableReader to read from; incidentally fixes a bug while
+ subsetting COLRv1 table containing ClipBoxes on python 3.11 (#2965, #2968).
+- [sbix] Handle glyphs with "dupe" graphic type on compile correctly (#2963).
+- [glyf] ``endPointsOfContours`` field should be unsigned! Kudos to behdad for
+ spotting one of the oldest bugs in FT. Probably nobody has ever dared to make
+ glyphs with more than 32767 points... (#2957).
+- [feaLib] Fixed handling of ``ignore`` statements with unmarked glyphs to match
+ makeotf behavior, which assumes the first glyph is marked (#2950).
+- Reformatted code with ``black`` and enforce new code style via CI check (#2925).
+- [feaLib] Sort name table entries following OT spec prescribed order in the builder (#2927).
+- [cu2quPen] Add Cu2QuMultiPen that converts multiple outlines at a time in
+ interpolation compatible way; its methods take a list of tuples arguments
+ that would normally be passed to individual segment pens, and at the end it
+ dispatches the converted outlines to each pen (#2912).
+- [reverseContourPen/ttGlyphPen] Add outputImpliedClosingLine option (#2913, #2914,
+ #2921, #2922, #2995).
+- [gvar] Avoid expanding all glyphs unnecessarily upon compile (#2918).
+- [scaleUpem] Fixed bug whereby CFF2 vsindex was scaled; it should not (#2893, #2894).
+- [designspaceLib] Add DS.getAxisByTag and refactor getAxis (#2891).
+- [unicodedata] map Zmth<->math in ot_tag_{to,from}_script (#1737, #2889).
+- [woff2] Support encoding/decoding OVERLAP_SIMPLE glyf flags (#2576, #2884).
+- [instancer] Update OS/2 class and post.italicAngle when default moved (L4)
+- Dropped support for Python 3.7 which reached EOL, fontTools requires 3.8+.
+- [instancer] Fixed instantiateFeatureVariations logic when a rule range becomes
+ default-applicable (#2737, #2880).
+- [ttLib] Add main to ttFont and ttCollection that just decompile and re-compile the
+ input font (#2869).
+- [featureVars] Insert 'rvrn' lookup at the beginning of LookupList, to work around bug
+ in Apple implementation of 'rvrn' feature which the spec says it should be processed
+ early whereas on macOS 10.15 it follows lookup order (#2140, #2867).
+- [instancer/mutator] Remove 'DSIG' table if present.
+- [svgPathPen] Don't close path in endPath(), assume open unless closePath() (#2089, #2865).
+
+4.38.0 (released 2022-10-21)
+----------------------------
+
+- [varLib.instancer] Added support for L4 instancing, i.e. moving the default value of
+ an axis while keeping it variable. Thanks Behdad! (#2728, #2861).
+ It's now also possible to restrict an axis min/max values beyond the current default
+ value, e.g. a font wght has min=100, def=400, max=900 and you want a partial VF that
+ only varies between 500 and 700, you can now do that.
+ You can either specify two min/max values (wght=500:700), and the new default will be
+ set to either the minimum or maximum, depending on which one is closer to the current
+ default (e.g. 500 in this case). Or you can specify three values (e.g. wght=500:600:700)
+ to specify the new default value explicitly.
+- [otlLib/featureVars] Set a few Count values so one doesn't need to compile the font
+ to update them (#2860).
+- [varLib.models] Make extrapolation work for 2-master models as well where one master
+ is at the default location (#2843, #2846).
+ Add optional extrapolate=False to normalizeLocation() (#2847, #2849).
+- [varLib.cff] Fixed sub-optimal packing of CFF2 deltas by no longer rounding them to
+ integer (#2838).
+- [scaleUpem] Calculate numShorts in VarData after scale; handle CFF hintmasks (#2840).
+
+4.37.4 (released 2022-09-30)
+----------------------------
+
+- [subset] Keep nameIDs used by CPAL palette entry labels (#2837).
+- [varLib] Avoid negative hmtx values when creating font from variable CFF2 font (#2827).
+- [instancer] Don't prune stat.ElidedFallbackNameID (#2828).
+- [unicodedata] Update Scripts/Blocks to Unicode 15.0 (#2833).
+
+4.37.3 (released 2022-09-20)
+----------------------------
+
+- Fix arguments in calls to (glyf) glyph.draw() and drawPoints(), whereby offset wasn't
+ correctly passed down; this fix also exposed a second bug, where lsb and tsb were not
+ set (#2824, #2825, adobe-type-tools/afdko#1560).
+
+4.37.2 (released 2022-09-15)
+----------------------------
+
+- [subset] Keep CPAL table and don't attempt to prune unused color indices if OT-SVG
+ table is present even if COLR table was subsetted away; OT-SVG may be referencing the
+ CPAL table; for now we assume that's the case (#2814, #2815).
+- [varLib.instancer] Downgrade GPOS/GSUB version if there are no more FeatureVariations
+ after instancing (#2812).
+- [subset] Added ``--no-lazy`` to optionally load fonts eagerly (mostly to ease
+ debugging of table lazy loading, no practical effects) (#2807).
+- [varLib] Avoid building empty COLR.DeltaSetIndexMap with only identity mappings (#2803).
+- [feaLib] Allow multiple value record types (by promoting to the most general format)
+ within the same PairPos subtable; e.g. this allows variable and non variable kerning
+ rules to share the same subtable. This also fixes a bug whereby some kerning pairs
+ would become unreachable while shapiong because of premature subtable splitting (#2772, #2776).
+- [feaLib] Speed up ``VarScalar`` by caching models for recurring master locations (#2798).
+- [feaLib] Optionally cythonize ``feaLib.lexer``, speeds up parsing FEA a bit (#2799).
+- [designspaceLib] Avoid crash when handling unbounded rule conditions (#2797).
+- [post] Don't crash if ``post`` legacy format 1 is malformed/improperly used (#2786)
+- [gvar] Don't be "lazy" (load all glyph variations up front) when TTFont.lazy=False (#2771).
+- [TTFont] Added ``normalizeLocation`` method to normalize a location dict from the
+ font's defined axes space (also known as "user space") into the normalized (-1..+1)
+ space. It applies ``avar`` mapping if the font contains an ``avar`` table (#2789).
+- [TTVarGlyphSet] Support drawing glyph instances from CFF2 variable glyph set (#2784).
+- [fontBuilder] Do not error when building cmap if there are zero code points (#2785).
+- [varLib.plot] Added ability to plot a variation model and set of accompaning master
+ values corresponding to the model's master locations into a pyplot figure (#2767).
+- [Snippets] Added ``statShape.py`` script to draw statistical shape of a glyph as an
+ ellips (requires pycairo) (baecd88).
+- [TTVarGlyphSet] implement drawPoints natively, avoiding going through
+ SegmentToPointPen (#2778).
+- [TTVarGlyphSet] Fixed bug whereby drawing a composite glyph multiple times, its
+ components would shif; needed an extra copy (#2774).
+
4.37.1 (released 2022-08-24)
----------------------------
@@ -177,30 +490,30 @@
- [OS/2 / merge] Automatically recalculate ``OS/2.xAvgCharWidth`` after merging
fonts with ``fontTools.merge`` (#2591, #2538).
- [misc/config] Added ``fontTools.misc.configTools`` module, a generic configuration
- system (#2416, #2439).
+ system (#2416, #2439).
Added ``fontTools.config`` module, a fontTools-specific configuration
- system using ``configTools`` above.
+ system using ``configTools`` above.
Attached a ``Config`` object to ``TTFont``.
- [otlLib] Replaced environment variable for GPOS compression level with an
equivalent option using the new config system.
-- [designspaceLib] Incremented format version to 5.0 (#2436).
+- [designspaceLib] Incremented format version to 5.0 (#2436).
Added discrete axes, variable fonts, STAT information, either design- or
- user-space location on instances.
+ user-space location on instances.
Added ``fontTools.designspaceLib.split`` module to split a designspace
into sub-spaces that interpolate and that represent the variable fonts
- listed in the document.
+ listed in the document.
Made instance names optional and allow computing them from STAT data instead.
- Added ``fontTools.designspaceLib.statNames`` module.
- Allow instances to have the same location as a previously defined STAT label.
- Deprecated some attributes:
- ``SourceDescriptor``: ``copyLib``, ``copyInfo``, ``copyGroups``, ``copyFeatures``.
+ Added ``fontTools.designspaceLib.statNames`` module.
+ Allow instances to have the same location as a previously defined STAT label.
+ Deprecated some attributes:
+ ``SourceDescriptor``: ``copyLib``, ``copyInfo``, ``copyGroups``, ``copyFeatures``.
``InstanceDescriptor``: ``kerning``, ``info``; ``glyphs``: use rules or sparse
- sources.
- For both, ``location``: use the more explicit designLocation.
- Note: all are soft deprecations and existing code should keep working.
+ sources.
+ For both, ``location``: use the more explicit designLocation.
+ Note: all are soft deprecations and existing code should keep working.
Updated documentation for Python methods and the XML format.
- [varLib] Added ``build_many`` to build several variable fonts from a single
- designspace document (#2436).
+ designspace document (#2436).
Added ``fontTools.varLib.stat`` module to build STAT tables from a designspace
document.
- [otBase] Try to use the Harfbuzz Repacker for packing GSUB/GPOS tables when
@@ -391,12 +704,12 @@
4.25.2 (released 2021-07-26)
----------------------------
-- [COLRv1] Various changes to sync with the latest CORLv1 draft spec. In particular:
- define COLR.VarIndexMap, remove/inline ColorIndex struct, add VarIndexBase to ``PaintVar*`` tables (#2372);
- add reduced-precicion specialized transform Paints;
- define Angle as fraction of half circle encoded as F2Dot14;
- use FWORD (int16) for all Paint center coordinates;
- change PaintTransform to have an offset to Affine2x3;
+- [COLRv1] Various changes to sync with the latest CORLv1 draft spec. In particular:
+ define COLR.VarIndexMap, remove/inline ColorIndex struct, add VarIndexBase to ``PaintVar*`` tables (#2372);
+ add reduced-precicion specialized transform Paints;
+ define Angle as fraction of half circle encoded as F2Dot14;
+ use FWORD (int16) for all Paint center coordinates;
+ change PaintTransform to have an offset to Affine2x3;
- [ttLib] when importing XML, only set sfntVersion if the font has no reader and is empty (#2376)
4.25.1 (released 2021-07-16)
@@ -840,7 +1153,7 @@
(#1872).
- [Snippets/otf2ttf] In otf2ttf.py script update LSB in hmtx to match xMin (#1873).
- [colorLib] Added experimental support for building ``COLR`` v1 tables as per
- the `colr-gradients-spec <https://github.com/googlefonts/colr-gradients-spec/blob/master/colr-gradients-spec.md>`__
+ the `colr-gradients-spec <https://github.com/googlefonts/colr-gradients-spec/blob/main/colr-gradients-spec.md>`__
draft proposal. **NOTE**: both the API and the XML dump of ``COLR`` v1 are
susceptible to change while the proposal is being discussed and formalized (#1822).
diff --git a/README.rst b/README.rst
index fdff2433..bcb7f0d4 100644
--- a/README.rst
+++ b/README.rst
@@ -18,8 +18,9 @@ are available at `Read the Docs <https://fonttools.readthedocs.io/>`_.
Installation
~~~~~~~~~~~~
-FontTools requires `Python <http://www.python.org/download/>`__ 3.7
-or later.
+FontTools requires `Python <http://www.python.org/download/>`__ 3.8
+or later. We try to follow the same schedule of minimum Python version support as
+NumPy (see `NEP 29 <https://numpy.org/neps/nep-0029-deprecation_policy.html>`__).
The package is listed in the Python Package Index (PyPI), so you can
install it with `pip <https://pip.pypa.io>`__:
@@ -120,8 +121,7 @@ are required to unlock the extra features named "ufo", etc.
* `unicodedata2 <https://pypi.python.org/pypi/unicodedata2>`__:
``unicodedata`` backport for Python 3.x updated to the latest Unicode
- version 14.0. Note this is not necessary if you use Python 3.11
- as the latter already comes with an up-to-date ``unicodedata``.
+ version 15.0.
*Extra:* ``unicode``
@@ -203,15 +203,25 @@ are required to unlock the extra features named "ufo", etc.
Pen to drawing glyphs with FreeType as raster images, requires:
- * `freetype-py <https://pypi.python.org/pypi/freetype-py>`__: Python binding
+ * `freetype-py <https://pypi.python.org/pypi/freetype-py>`__: Python binding
for the FreeType library.
+
+- ``Lib/fontTools/ttLib/tables/otBase.py``
+
+ Use the Harfbuzz library to serialize GPOS/GSUB using ``hb_repack`` method, requires:
+
+ * `uharfbuzz <https://pypi.python.org/pypi/uharfbuzz>`__: Streamlined Cython
+ bindings for the harfbuzz shaping engine
+
+ *Extra:* ``repacker``
How to make a new release
~~~~~~~~~~~~~~~~~~~~~~~~~
1) Update ``NEWS.rst`` with all the changes since the last release. Write a
changelog entry for each PR, with one or two short sentences summarizing it,
- as well as links to the PR and relevant issues addressed by the PR.
+ as well as links to the PR and relevant issues addressed by the PR. Do not
+ put a new title, the next command will do it for you.
2) Use semantic versioning to decide whether the new release will be a 'major',
'minor' or 'patch' release. It's usually one of the latter two, depending on
whether new backward compatible APIs were added, or simply some bugs were fixed.
@@ -226,7 +236,8 @@ How to make a new release
It also commits an additional version bump which opens the main branch for
the subsequent developmental cycle
4) Push both the tag and commit to the upstream repository, by running the command
- ``git push --follow-tags``.
+ ``git push --follow-tags``. Note: it may push other local tags as well, be
+ careful.
5) Let the CI build the wheel and source distribution packages and verify both
get uploaded to the Python Package Index (PyPI).
6) [Optional] Go to fonttools `Github Releases <https://github.com/fonttools/fonttools/releases>`__
@@ -242,17 +253,16 @@ Acknowledgements
In alphabetical order:
aschmitz, Olivier Berten, Samyak Bhuta, Erik van Blokland, Petr van Blokland,
-Jelle Bosma, Sascha Brawer, Tom Byrer, Antonio Cavedoni, Frédéric
-Coiffier, Vincent Connare, David Corbett, Simon Cozens, Dave Crossland,
-Simon Daniels, Peter Dekkers, Behdad Esfahbod, Behnam Esfahbod, Hannes
-Famira, Sam Fishman, Matt Fontaine, Takaaki Fuji, Yannis Haralambous, Greg
-Hitchcock, Jeremie Hornus, Khaled Hosny, John Hudson, Denis Moyogo Jacquerye,
-Jack Jansen, Tom Kacvinsky, Jens Kutilek, Antoine Leca, Werner Lemberg, Tal
-Leming, Peter Lofting, Cosimo Lupo, Olli Meier, Masaya Nakamura, Dave Opstad,
-Laurence Penney, Roozbeh Pournader, Garret Rieger, Read Roberts, Guido
-van Rossum, Just van Rossum, Andreas Seidel, Georg Seifert, Chris
-Simpkins, Miguel Sousa, Adam Twardoch, Adrien Tétar, Vitaly Volkov,
-Paul Wise.
+Jelle Bosma, Sascha Brawer, Tom Byrer, Antonio Cavedoni, Frédéric Coiffier,
+Vincent Connare, David Corbett, Simon Cozens, Dave Crossland, Simon Daniels,
+Peter Dekkers, Behdad Esfahbod, Behnam Esfahbod, Hannes Famira, Sam Fishman,
+Matt Fontaine, Takaaki Fuji, Rob Hagemans, Yannis Haralambous, Greg Hitchcock,
+Jeremie Hornus, Khaled Hosny, John Hudson, Denis Moyogo Jacquerye, Jack Jansen,
+Tom Kacvinsky, Jens Kutilek, Antoine Leca, Werner Lemberg, Tal Leming, Peter
+Lofting, Cosimo Lupo, Olli Meier, Masaya Nakamura, Dave Opstad, Laurence Penney,
+Roozbeh Pournader, Garret Rieger, Read Roberts, Colin Rofls, Guido van Rossum,
+Just van Rossum, Andreas Seidel, Georg Seifert, Chris Simpkins, Miguel Sousa,
+Adam Twardoch, Adrien Tétar, Vitaly Volkov, Paul Wise.
Copyrights
~~~~~~~~~~
@@ -273,7 +283,7 @@ Have fun!
.. |CI Build Status| image:: https://github.com/fonttools/fonttools/workflows/Test/badge.svg
:target: https://github.com/fonttools/fonttools/actions?query=workflow%3ATest
-.. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/master/graph/badge.svg
+.. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/main/graph/badge.svg
:target: https://codecov.io/gh/fonttools/fonttools
.. |PyPI| image:: https://img.shields.io/pypi/v/fonttools.svg
:target: https://pypi.org/project/FontTools
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 00000000..46eb4871
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,18 @@
+# Security Policy
+
+If you have discovered a security vulnerability in this project, please report it
+privately. **Do not disclose it as a public issue.** This gives us time to work with you
+to fix the issue before public exposure, reducing the chance that the exploit will be
+used before a patch is released.
+
+You may submit the report in the following ways:
+
+- send an email to cosimo@anthrotype.com, behdad@behdad.org and fonttools-admin@googlegroups.com; and/or
+- send us a [private vulnerability report](https://github.com/fonttools/fonttools/security/advisories/new)
+
+Please provide the following information in your report:
+
+- A description of the vulnerability and its impact
+- How to reproduce the issue
+
+Please allow us 90 days to work on a fix before public disclosure.
diff --git a/Snippets/checksum.py b/Snippets/checksum.py
index b965a357..097ddd53 100644
--- a/Snippets/checksum.py
+++ b/Snippets/checksum.py
@@ -11,11 +11,23 @@ from os.path import basename
from fontTools.ttLib import TTFont
-def write_checksum(filepaths, stdout_write=False, use_ttx=False, include_tables=None, exclude_tables=None, do_not_cleanup=False):
+def write_checksum(
+ filepaths,
+ stdout_write=False,
+ use_ttx=False,
+ include_tables=None,
+ exclude_tables=None,
+ do_not_cleanup=False,
+):
checksum_dict = {}
for path in filepaths:
if not os.path.exists(path):
- sys.stderr.write("[checksum.py] ERROR: " + path + " is not a valid file path" + os.linesep)
+ sys.stderr.write(
+ "[checksum.py] ERROR: "
+ + path
+ + " is not a valid file path"
+ + os.linesep
+ )
sys.exit(1)
if use_ttx:
@@ -33,12 +45,16 @@ def write_checksum(filepaths, stdout_write=False, use_ttx=False, include_tables=
checksum_path = temp_ttx_path
else:
if include_tables is not None:
- sys.stderr.write("[checksum.py] -i and --include are not supported for font binary filepaths. \
- Use these flags for checksums with the --ttx flag.")
+ sys.stderr.write(
+ "[checksum.py] -i and --include are not supported for font binary filepaths. \
+ Use these flags for checksums with the --ttx flag."
+ )
sys.exit(1)
if exclude_tables is not None:
- sys.stderr.write("[checksum.py] -e and --exclude are not supported for font binary filepaths. \
- Use these flags for checksums with the --ttx flag.")
+ sys.stderr.write(
+ "[checksum.py] -e and --exclude are not supported for font binary filepaths. \
+ Use these flags for checksums with the --ttx flag."
+ )
sys.exit(1)
checksum_path = path
@@ -69,10 +85,12 @@ def check_checksum(filepaths):
check_failed = False
for path in filepaths:
if not os.path.exists(path):
- sys.stderr.write("[checksum.py] ERROR: " + path + " is not a valid filepath" + os.linesep)
+ sys.stderr.write(
+ "[checksum.py] ERROR: " + path + " is not a valid filepath" + os.linesep
+ )
sys.exit(1)
- with open(path, mode='r') as file:
+ with open(path, mode="r") as file:
for line in file.readlines():
cleaned_line = line.rstrip()
line_list = cleaned_line.split(" ")
@@ -82,7 +100,10 @@ def check_checksum(filepaths):
expected_sha1 = line_list[0]
test_path = line_list[1]
else:
- sys.stderr.write("[checksum.py] ERROR: failed to parse checksum file values" + os.linesep)
+ sys.stderr.write(
+ "[checksum.py] ERROR: failed to parse checksum file values"
+ + os.linesep
+ )
sys.exit(1)
if not os.path.exists(test_path):
@@ -107,25 +128,60 @@ def check_checksum(filepaths):
def _read_binary(filepath):
- with open(filepath, mode='rb') as file:
+ with open(filepath, mode="rb") as file:
return file.read()
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(prog="checksum.py", description="A SHA1 hash checksum list generator and checksum testing script")
- parser.add_argument("-t", "--ttx", help="Calculate from ttx file", action="store_true")
- parser.add_argument("-s", "--stdout", help="Write output to stdout stream", action="store_true")
- parser.add_argument("-n", "--noclean", help="Do not discard *.ttx files used to calculate SHA1 hashes", action="store_true")
- parser.add_argument("-c", "--check", help="Verify checksum values vs. files", action="store_true")
- parser.add_argument("filepaths", nargs="+", help="One or more file paths. Use checksum file path for -c/--check. Use paths\
- to font files for all other commands.")
-
- parser.add_argument("-i", "--include", action="append", help="Included OpenType tables for ttx data dump")
- parser.add_argument("-e", "--exclude", action="append", help="Excluded OpenType tables for ttx data dump")
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ prog="checksum.py",
+ description="A SHA1 hash checksum list generator and checksum testing script",
+ )
+ parser.add_argument(
+ "-t", "--ttx", help="Calculate from ttx file", action="store_true"
+ )
+ parser.add_argument(
+ "-s", "--stdout", help="Write output to stdout stream", action="store_true"
+ )
+ parser.add_argument(
+ "-n",
+ "--noclean",
+ help="Do not discard *.ttx files used to calculate SHA1 hashes",
+ action="store_true",
+ )
+ parser.add_argument(
+ "-c", "--check", help="Verify checksum values vs. files", action="store_true"
+ )
+ parser.add_argument(
+ "filepaths",
+ nargs="+",
+ help="One or more file paths. Use checksum file path for -c/--check. Use paths\
+ to font files for all other commands.",
+ )
+
+ parser.add_argument(
+ "-i",
+ "--include",
+ action="append",
+ help="Included OpenType tables for ttx data dump",
+ )
+ parser.add_argument(
+ "-e",
+ "--exclude",
+ action="append",
+ help="Excluded OpenType tables for ttx data dump",
+ )
args = parser.parse_args(sys.argv[1:])
if args.check is True:
check_checksum(args.filepaths)
else:
- write_checksum(args.filepaths, stdout_write=args.stdout, use_ttx=args.ttx, do_not_cleanup=args.noclean, include_tables=args.include, exclude_tables=args.exclude)
+ write_checksum(
+ args.filepaths,
+ stdout_write=args.stdout,
+ use_ttx=args.ttx,
+ do_not_cleanup=args.noclean,
+ include_tables=args.include,
+ exclude_tables=args.exclude,
+ )
diff --git a/Snippets/cmap-format.py b/Snippets/cmap-format.py
index 0a78670f..735fcc07 100755
--- a/Snippets/cmap-format.py
+++ b/Snippets/cmap-format.py
@@ -15,24 +15,24 @@ from fontTools.ttLib.tables._c_m_a_p import CmapSubtable
import sys
if len(sys.argv) != 3:
- print("usage: cmap-format.py fontfile.ttf outfile.ttf")
- sys.exit(1)
+ print("usage: cmap-format.py fontfile.ttf outfile.ttf")
+ sys.exit(1)
fontfile = sys.argv[1]
outfile = sys.argv[2]
font = TTFont(fontfile)
-cmap = font['cmap']
+cmap = font["cmap"]
outtables = []
for table in cmap.tables:
- if table.format in [4, 12, 13, 14]:
- outtables.append(table)
- # Convert ot format4
- newtable = CmapSubtable.newSubtable(4)
- newtable.platformID = table.platformID
- newtable.platEncID = table.platEncID
- newtable.language = table.language
- newtable.cmap = table.cmap
- outtables.append(newtable)
+ if table.format in [4, 12, 13, 14]:
+ outtables.append(table)
+ # Convert ot format4
+ newtable = CmapSubtable.newSubtable(4)
+ newtable.platformID = table.platformID
+ newtable.platEncID = table.platEncID
+ newtable.language = table.language
+ newtable.cmap = table.cmap
+ outtables.append(newtable)
cmap.tables = outtables
font.save(outfile)
diff --git a/Snippets/dump_woff_metadata.py b/Snippets/dump_woff_metadata.py
index c9ea574f..e6539e99 100644
--- a/Snippets/dump_woff_metadata.py
+++ b/Snippets/dump_woff_metadata.py
@@ -8,8 +8,9 @@ def main(args=None):
args = sys.argv[1:]
if len(args) < 1:
- print("usage: dump_woff_metadata.py "
- "INPUT.woff [OUTPUT.xml]", file=sys.stderr)
+ print(
+ "usage: dump_woff_metadata.py " "INPUT.woff [OUTPUT.xml]", file=sys.stderr
+ )
return 1
infile = args[0]
diff --git a/Snippets/fix-dflt-langsys.py b/Snippets/fix-dflt-langsys.py
index c072117a..a9cc8696 100644
--- a/Snippets/fix-dflt-langsys.py
+++ b/Snippets/fix-dflt-langsys.py
@@ -14,8 +14,11 @@ def ProcessTable(table):
for rec in table.ScriptList.ScriptRecord:
if rec.ScriptTag == "DFLT" and rec.Script.LangSysCount != 0:
tags = [r.LangSysTag for r in rec.Script.LangSysRecord]
- logging.info("Removing %d extraneous LangSys records: %s",
- rec.Script.LangSysCount, " ".join(tags))
+ logging.info(
+ "Removing %d extraneous LangSys records: %s",
+ rec.Script.LangSysCount,
+ " ".join(tags),
+ )
rec.Script.LangSysRecord = []
rec.Script.LangSysCount = 0
found.update(tags)
@@ -29,8 +32,9 @@ def ProcessTable(table):
found -= tags
if found:
- logging.warning("Records are missing from non-DFLT scripts: %s",
- " ".join(found))
+ logging.warning(
+ "Records are missing from non-DFLT scripts: %s", " ".join(found)
+ )
return True
@@ -64,12 +68,13 @@ def ProcessFiles(filenames):
def main():
- parser = argparse.ArgumentParser(
- description="Fix LangSys records for DFLT script")
- parser.add_argument("files", metavar="FILE", type=str, nargs="+",
- help="input font to process")
- parser.add_argument("-s", "--silent", action='store_true',
- help="suppress normal messages")
+ parser = argparse.ArgumentParser(description="Fix LangSys records for DFLT script")
+ parser.add_argument(
+ "files", metavar="FILE", type=str, nargs="+", help="input font to process"
+ )
+ parser.add_argument(
+ "-s", "--silent", action="store_true", help="suppress normal messages"
+ )
args = parser.parse_args()
@@ -81,5 +86,6 @@ def main():
ProcessFiles(args.files)
+
if __name__ == "__main__":
sys.exit(main())
diff --git a/Snippets/interpolate.py b/Snippets/interpolate.py
index 063046c9..89300095 100755
--- a/Snippets/interpolate.py
+++ b/Snippets/interpolate.py
@@ -40,11 +40,12 @@ def AddFontVariations(font):
# https://www.microsoft.com/typography/otspec/os2.htm#wtc
for name, wght in (
- ("Thin", 100),
- ("Light", 300),
- ("Regular", 400),
- ("Bold", 700),
- ("Black", 900)):
+ ("Thin", 100),
+ ("Light", 300),
+ ("Regular", 400),
+ ("Bold", 700),
+ ("Black", 900),
+ ):
inst = NamedInstance()
inst.nameID = AddName(font, name).nameID
inst.coordinates = {"wght": wght}
@@ -72,19 +73,21 @@ def AddGlyphVariations(font, thin, regular, black):
regularCoord = GetCoordinates(regular, glyphName)
thinCoord = GetCoordinates(thin, glyphName)
blackCoord = GetCoordinates(black, glyphName)
- if not regularCoord or not blackCoord or not thinCoord:
- logging.warning("glyph %s not present in all input fonts",
- glyphName)
+ if not regularCoord or not blackCoord or not thinCoord:
+ logging.warning("glyph %s not present in all input fonts", glyphName)
continue
- if (len(regularCoord) != len(blackCoord) or
- len(regularCoord) != len(thinCoord)):
- logging.warning("glyph %s has not the same number of "
- "control points in all input fonts", glyphName)
+ if len(regularCoord) != len(blackCoord) or len(regularCoord) != len(thinCoord):
+ logging.warning(
+ "glyph %s has not the same number of "
+ "control points in all input fonts",
+ glyphName,
+ )
continue
thinDelta = []
blackDelta = []
- for ((regX, regY), (blackX, blackY), (thinX, thinY)) in \
- zip(regularCoord, blackCoord, thinCoord):
+ for (regX, regY), (blackX, blackY), (thinX, thinY) in zip(
+ regularCoord, blackCoord, thinCoord
+ ):
thinDelta.append(((thinX - regX, thinY - regY)))
blackDelta.append((blackX - regX, blackY - regY))
thinVar = TupleVariation({"wght": (-1.0, -1.0, 0.0)}, thinDelta)
@@ -111,7 +114,6 @@ def GetCoordinates(font, glyphName):
# Add phantom points for (left, right, top, bottom) positions.
horizontalAdvanceWidth, leftSideBearing = font["hmtx"].metrics[glyphName]
-
leftSideX = glyph.xMin - leftSideBearing
rightSideX = leftSideX + horizontalAdvanceWidth
@@ -119,10 +121,7 @@ def GetCoordinates(font, glyphName):
topSideY = glyph.yMax
bottomSideY = -glyph.yMin
- coord.extend([(leftSideX, 0),
- (rightSideX, 0),
- (0, topSideY),
- (0, bottomSideY)])
+ coord.extend([(leftSideX, 0), (rightSideX, 0), (0, topSideY), (0, bottomSideY)])
return coord
@@ -139,4 +138,5 @@ def main():
if __name__ == "__main__":
import sys
+
sys.exit(main())
diff --git a/Snippets/layout-features.py b/Snippets/layout-features.py
index 53e97355..4368332b 100755
--- a/Snippets/layout-features.py
+++ b/Snippets/layout-features.py
@@ -5,45 +5,47 @@ from fontTools.ttLib.tables import otTables
import sys
if len(sys.argv) != 2:
- print("usage: layout-features.py fontfile.ttf")
- sys.exit(1)
+ print("usage: layout-features.py fontfile.ttf")
+ sys.exit(1)
fontfile = sys.argv[1]
if fontfile.rsplit(".", 1)[-1] == "ttx":
- font = TTFont()
- font.importXML(fontfile)
+ font = TTFont()
+ font.importXML(fontfile)
else:
- font = TTFont(fontfile)
+ font = TTFont(fontfile)
-for tag in ('GSUB', 'GPOS'):
- if not tag in font: continue
- print("Table:", tag)
- table = font[tag].table
- if not table.ScriptList or not table.FeatureList: continue
- featureRecords = table.FeatureList.FeatureRecord
- for script in table.ScriptList.ScriptRecord:
- print(" Script:", script.ScriptTag)
- if not script.Script:
- print (" Null script.")
- continue
- languages = list(script.Script.LangSysRecord)
- if script.Script.DefaultLangSys:
- defaultlangsys = otTables.LangSysRecord()
- defaultlangsys.LangSysTag = "default"
- defaultlangsys.LangSys = script.Script.DefaultLangSys
- languages.insert(0, defaultlangsys)
- for langsys in languages:
- print(" Language:", langsys.LangSysTag)
- if not langsys.LangSys:
- print (" Null language.")
- continue
- features = [featureRecords[index] for index in langsys.LangSys.FeatureIndex]
- if langsys.LangSys.ReqFeatureIndex != 0xFFFF:
- record = featureRecords[langsys.LangSys.ReqFeatureIndex]
- requiredfeature = otTables.FeatureRecord()
- requiredfeature.FeatureTag = 'required(%s)' % record.FeatureTag
- requiredfeature.Feature = record.Feature
- features.insert(0, requiredfeature)
- for feature in features:
- print(" Feature:", feature.FeatureTag)
- lookups = feature.Feature.LookupListIndex
- print(" Lookups:", ','.join(str(l) for l in lookups))
+for tag in ("GSUB", "GPOS"):
+ if not tag in font:
+ continue
+ print("Table:", tag)
+ table = font[tag].table
+ if not table.ScriptList or not table.FeatureList:
+ continue
+ featureRecords = table.FeatureList.FeatureRecord
+ for script in table.ScriptList.ScriptRecord:
+ print(" Script:", script.ScriptTag)
+ if not script.Script:
+ print(" Null script.")
+ continue
+ languages = list(script.Script.LangSysRecord)
+ if script.Script.DefaultLangSys:
+ defaultlangsys = otTables.LangSysRecord()
+ defaultlangsys.LangSysTag = "default"
+ defaultlangsys.LangSys = script.Script.DefaultLangSys
+ languages.insert(0, defaultlangsys)
+ for langsys in languages:
+ print(" Language:", langsys.LangSysTag)
+ if not langsys.LangSys:
+ print(" Null language.")
+ continue
+ features = [featureRecords[index] for index in langsys.LangSys.FeatureIndex]
+ if langsys.LangSys.ReqFeatureIndex != 0xFFFF:
+ record = featureRecords[langsys.LangSys.ReqFeatureIndex]
+ requiredfeature = otTables.FeatureRecord()
+ requiredfeature.FeatureTag = "required(%s)" % record.FeatureTag
+ requiredfeature.Feature = record.Feature
+ features.insert(0, requiredfeature)
+ for feature in features:
+ print(" Feature:", feature.FeatureTag)
+ lookups = feature.Feature.LookupListIndex
+ print(" Lookups:", ",".join(str(l) for l in lookups))
diff --git a/Snippets/merge_woff_metadata.py b/Snippets/merge_woff_metadata.py
index d6e858f2..ac283a23 100644
--- a/Snippets/merge_woff_metadata.py
+++ b/Snippets/merge_woff_metadata.py
@@ -9,12 +9,14 @@ def main(args=None):
args = sys.argv[1:]
if len(args) < 2:
- print("usage: merge_woff_metadata.py METADATA.xml "
- "INPUT.woff [OUTPUT.woff]", file=sys.stderr)
+ print(
+ "usage: merge_woff_metadata.py METADATA.xml " "INPUT.woff [OUTPUT.woff]",
+ file=sys.stderr,
+ )
return 1
metadata_file = args[0]
- with open(metadata_file, 'rb') as f:
+ with open(metadata_file, "rb") as f:
metadata = f.read()
infile = args[1]
diff --git a/Snippets/otf2ttf.py b/Snippets/otf2ttf.py
index b925b33c..6682458f 100755
--- a/Snippets/otf2ttf.py
+++ b/Snippets/otf2ttf.py
@@ -25,14 +25,12 @@ POST_FORMAT = 2.0
REVERSE_DIRECTION = True
-def glyphs_to_quadratic(
- glyphs, max_err=MAX_ERR, reverse_direction=REVERSE_DIRECTION):
+def glyphs_to_quadratic(glyphs, max_err=MAX_ERR, reverse_direction=REVERSE_DIRECTION):
quadGlyphs = {}
for gname in glyphs.keys():
glyph = glyphs[gname]
ttPen = TTGlyphPen(glyphs)
- cu2quPen = Cu2QuPen(ttPen, max_err,
- reverse_direction=reverse_direction)
+ cu2quPen = Cu2QuPen(ttPen, max_err, reverse_direction=reverse_direction)
glyph.draw(cu2quPen)
quadGlyphs[gname] = ttPen.glyph()
return quadGlyphs
@@ -41,7 +39,7 @@ def glyphs_to_quadratic(
def update_hmtx(ttFont, glyf):
hmtx = ttFont["hmtx"]
for glyphName, glyph in glyf.glyphs.items():
- if hasattr(glyph, 'xMin'):
+ if hasattr(glyph, "xMin"):
hmtx[glyphName] = (hmtx[glyphName][0], glyph.xMin)
@@ -69,8 +67,9 @@ def otf_to_ttf(ttFont, post_format=POST_FORMAT, **kwargs):
maxp.maxStackElements = 0
maxp.maxSizeOfInstructions = 0
maxp.maxComponentElements = max(
- len(g.components if hasattr(g, 'components') else [])
- for g in glyf.glyphs.values())
+ len(g.components if hasattr(g, "components") else [])
+ for g in glyf.glyphs.values()
+ )
maxp.compile(ttFont)
post = ttFont["post"]
@@ -91,34 +90,42 @@ def main(args=None):
configLogger(logger=log)
parser = argparse.ArgumentParser()
- parser.add_argument("input", nargs='+', metavar="INPUT")
+ parser.add_argument("input", nargs="+", metavar="INPUT")
parser.add_argument("-o", "--output")
parser.add_argument("-e", "--max-error", type=float, default=MAX_ERR)
parser.add_argument("--post-format", type=float, default=POST_FORMAT)
parser.add_argument(
- "--keep-direction", dest='reverse_direction', action='store_false')
+ "--keep-direction", dest="reverse_direction", action="store_false"
+ )
parser.add_argument("--face-index", type=int, default=0)
- parser.add_argument("--overwrite", action='store_true')
+ parser.add_argument("--overwrite", action="store_true")
options = parser.parse_args(args)
if options.output and len(options.input) > 1:
if not os.path.isdir(options.output):
- parser.error("-o/--output option must be a directory when "
- "processing multiple fonts")
+ parser.error(
+ "-o/--output option must be a directory when "
+ "processing multiple fonts"
+ )
for path in options.input:
if options.output and not os.path.isdir(options.output):
output = options.output
else:
- output = makeOutputFileName(path, outputDir=options.output,
- extension='.ttf',
- overWrite=options.overwrite)
+ output = makeOutputFileName(
+ path,
+ outputDir=options.output,
+ extension=".ttf",
+ overWrite=options.overwrite,
+ )
font = TTFont(path, fontNumber=options.face_index)
- otf_to_ttf(font,
- post_format=options.post_format,
- max_err=options.max_error,
- reverse_direction=options.reverse_direction)
+ otf_to_ttf(
+ font,
+ post_format=options.post_format,
+ max_err=options.max_error,
+ reverse_direction=options.reverse_direction,
+ )
font.save(output)
diff --git a/Snippets/print-json.py b/Snippets/print-json.py
index bcd255ee..18d44c0a 100644
--- a/Snippets/print-json.py
+++ b/Snippets/print-json.py
@@ -139,7 +139,6 @@ def visit(self, obj):
if __name__ == "__main__":
-
from fontTools.ttLib import TTFont
import sys
diff --git a/Snippets/rename-fonts.py b/Snippets/rename-fonts.py
index 0a43dc2a..3ebd12aa 100755
--- a/Snippets/rename-fonts.py
+++ b/Snippets/rename-fonts.py
@@ -133,9 +133,7 @@ def main(args=None):
logging.basicConfig(level=level, format="%(message)s")
if options.output_file and len(options.input_fonts) > 1:
- parser.error(
- "argument -o/--output-file can't be used with multiple inputs"
- )
+ parser.error("argument -o/--output-file can't be used with multiple inputs")
if options.rename_files and (options.inplace or options.output_file):
parser.error("argument -R not allowed with arguments -i or -o")
@@ -151,9 +149,7 @@ def main(args=None):
output_name = options.output_file
else:
if options.rename_files:
- input_name = rename_file(
- input_name, family_name, options.suffix
- )
+ input_name = rename_file(input_name, family_name, options.suffix)
output_name = makeOutputFileName(input_name, options.output_dir)
font.save(output_name)
diff --git a/Snippets/statShape.py b/Snippets/statShape.py
new file mode 100644
index 00000000..e0b0d69e
--- /dev/null
+++ b/Snippets/statShape.py
@@ -0,0 +1,85 @@
+"""Draw statistical shape of a glyph as an ellipse."""
+
+from fontTools.ttLib import TTFont
+from fontTools.pens.recordingPen import RecordingPen
+from fontTools.pens.cairoPen import CairoPen
+from fontTools.pens.statisticsPen import StatisticsPen
+import cairo
+import math
+import sys
+
+
+font = TTFont(sys.argv[1])
+unicode = sys.argv[2]
+
+cmap = font["cmap"].getBestCmap()
+gid = cmap[ord(unicode)]
+
+hhea = font["hhea"]
+glyphset = font.getGlyphSet()
+with cairo.SVGSurface(
+ "example.svg", hhea.advanceWidthMax, hhea.ascent - hhea.descent
+) as surface:
+ context = cairo.Context(surface)
+ context.translate(0, +font["hhea"].ascent)
+ context.scale(1, -1)
+
+ glyph = glyphset[gid]
+
+ recording = RecordingPen()
+ glyph.draw(recording)
+
+ context.translate((hhea.advanceWidthMax - glyph.width) * 0.5, 0)
+
+ pen = CairoPen(glyphset, context)
+ glyph.draw(pen)
+ context.fill()
+
+ stats = StatisticsPen(glyphset)
+ glyph.draw(stats)
+
+ # https://cookierobotics.com/007/
+ a = stats.varianceX
+ b = stats.covariance
+ c = stats.varianceY
+ delta = (((a - c) * 0.5) ** 2 + b * b) ** 0.5
+ lambda1 = (a + c) * 0.5 + delta # Major eigenvalue
+ lambda2 = (a + c) * 0.5 - delta # Minor eigenvalue
+ theta = math.atan2(lambda1 - a, b) if b != 0 else (math.pi * 0.5 if a < c else 0)
+ mult = 4 # Empirical by drawing '.'
+ transform = cairo.Matrix()
+ transform.translate(stats.meanX, stats.meanY)
+ transform.rotate(theta)
+ transform.scale(math.sqrt(lambda1), math.sqrt(lambda2))
+ transform.scale(mult, mult)
+
+ ellipse_area = math.sqrt(lambda1) * math.sqrt(lambda2) * math.pi / 4 * mult * mult
+
+ if stats.area:
+ context.save()
+ context.set_line_cap(cairo.LINE_CAP_ROUND)
+ context.transform(transform)
+ context.move_to(0, 0)
+ context.line_to(0, 0)
+ context.set_line_width(1)
+ context.set_source_rgba(1, 0, 0, abs(stats.area / ellipse_area))
+ context.stroke()
+ context.restore()
+
+ context.save()
+ context.set_line_cap(cairo.LINE_CAP_ROUND)
+ context.set_source_rgb(0.8, 0, 0)
+ context.translate(stats.meanX, stats.meanY)
+
+ context.move_to(0, 0)
+ context.line_to(0, 0)
+ context.set_line_width(15)
+ context.stroke()
+
+ context.transform(cairo.Matrix(1, 0, stats.slant, 1, 0, 0))
+ context.move_to(0, -stats.meanY + font["hhea"].ascent)
+ context.line_to(0, -stats.meanY + font["hhea"].descent)
+ context.set_line_width(5)
+ context.stroke()
+
+ context.restore()
diff --git a/Snippets/subset-fpgm.py b/Snippets/subset-fpgm.py
index d06c3f5f..636e2155 100755
--- a/Snippets/subset-fpgm.py
+++ b/Snippets/subset-fpgm.py
@@ -4,13 +4,13 @@ from fontTools.ttLib import TTFont
import sys
if len(sys.argv) < 2:
- print("usage: subset-fpgm.py fontfile.ttf func-number...")
- sys.exit(1)
+ print("usage: subset-fpgm.py fontfile.ttf func-number...")
+ sys.exit(1)
fontfile = sys.argv[1]
func_nums = [int(x) for x in sys.argv[2:]]
font = TTFont(fontfile)
-fpgm = font['fpgm']
+fpgm = font["fpgm"]
# Parse fpgm
asm = fpgm.program.getAssembly()
@@ -18,39 +18,40 @@ funcs = {}
stack = []
tokens = iter(asm)
for token in tokens:
- if token.startswith("PUSH") or token.startswith("NPUSH"):
- for token in tokens:
- try:
- num = int(token)
- stack.append(num)
- except ValueError:
- break
- if token.startswith("FDEF"):
- num = stack.pop()
- body = []
- for token in tokens:
- if token.startswith("ENDF"):
- break
- body.append(token)
- funcs[num] = body
- continue
- assert 0, "Unexpected token in fpgm: %s" % token
+ if token.startswith("PUSH") or token.startswith("NPUSH"):
+ for token in tokens:
+ try:
+ num = int(token)
+ stack.append(num)
+ except ValueError:
+ break
+ if token.startswith("FDEF"):
+ num = stack.pop()
+ body = []
+ for token in tokens:
+ if token.startswith("ENDF"):
+ break
+ body.append(token)
+ funcs[num] = body
+ continue
+ assert 0, "Unexpected token in fpgm: %s" % token
# Subset!
-funcs = {i:funcs[i] for i in func_nums}
+funcs = {i: funcs[i] for i in func_nums}
# Put it back together:
asm = []
if funcs:
- asm.append("PUSH[ ]")
+ asm.append("PUSH[ ]")
nums = sorted(funcs.keys())
asm.extend(str(i) for i in nums)
for i in nums:
- asm.append("FDEF[ ]")
- asm.extend(funcs[i])
- asm.append("ENDF[ ]")
+ asm.append("FDEF[ ]")
+ asm.extend(funcs[i])
+ asm.append("ENDF[ ]")
import pprint
+
pprint.pprint(asm)
fpgm.program.fromAssembly(asm)
diff --git a/Snippets/svg2glif.py b/Snippets/svg2glif.py
index b28cb25d..c0aa822b 100755
--- a/Snippets/svg2glif.py
+++ b/Snippets/svg2glif.py
@@ -14,9 +14,8 @@ from fontTools.ufoLib.glifLib import writeGlyphToString
__all__ = ["svg2glif"]
-def svg2glif(svg, name, width=0, height=0, unicodes=None, transform=None,
- version=2):
- """ Convert an SVG outline to a UFO glyph with given 'name', advance
+def svg2glif(svg, name, width=0, height=0, unicodes=None, transform=None, version=2):
+ """Convert an SVG outline to a UFO glyph with given 'name', advance
'width' and 'height' (int), and 'unicodes' (list of int).
Return the resulting string in GLIF format (default: version 2).
If 'transform' is provided, apply a transformation matrix before the
@@ -33,10 +32,9 @@ def svg2glif(svg, name, width=0, height=0, unicodes=None, transform=None,
pen = SegmentToPointPen(pointPen)
outline.draw(pen)
- return writeGlyphToString(name,
- glyphObject=glyph,
- drawPointsFunc=drawPoints,
- formatVersion=version)
+ return writeGlyphToString(
+ name, glyphObject=glyph, drawPointsFunc=drawPoints, formatVersion=version
+ )
def parse_args(args):
@@ -60,32 +58,61 @@ def parse_args(args):
raise argparse.ArgumentTypeError(msg)
parser = argparse.ArgumentParser(
- description="Convert SVG outlines to UFO glyphs (.glif)")
+ description="Convert SVG outlines to UFO glyphs (.glif)"
+ )
parser.add_argument(
- "infile", metavar="INPUT.svg", help="Input SVG file containing "
- '<path> elements with "d" attributes.')
+ "infile",
+ metavar="INPUT.svg",
+ help="Input SVG file containing " '<path> elements with "d" attributes.',
+ )
parser.add_argument(
- "outfile", metavar="OUTPUT.glif", help="Output GLIF file (default: "
- "print to stdout)", nargs='?')
+ "outfile",
+ metavar="OUTPUT.glif",
+ help="Output GLIF file (default: " "print to stdout)",
+ nargs="?",
+ )
parser.add_argument(
- "-n", "--name", help="The glyph name (default: input SVG file "
- "basename, without the .svg extension)")
+ "-n",
+ "--name",
+ help="The glyph name (default: input SVG file "
+ "basename, without the .svg extension)",
+ )
parser.add_argument(
- "-w", "--width", help="The glyph advance width (default: 0)",
- type=int, default=0)
+ "-w",
+ "--width",
+ help="The glyph advance width (default: 0)",
+ type=int,
+ default=0,
+ )
parser.add_argument(
- "-H", "--height", help="The glyph vertical advance (optional if "
- '"width" is defined)', type=int, default=0)
+ "-H",
+ "--height",
+ help="The glyph vertical advance (optional if " '"width" is defined)',
+ type=int,
+ default=0,
+ )
parser.add_argument(
- "-u", "--unicodes", help="List of Unicode code points as hexadecimal "
+ "-u",
+ "--unicodes",
+ help="List of Unicode code points as hexadecimal "
'numbers (e.g. -u "0041 0042")',
- type=unicode_hex_list)
+ type=unicode_hex_list,
+ )
parser.add_argument(
- "-t", "--transform", help="Transformation matrix as a list of six "
- 'float values (e.g. -t "0.1 0 0 -0.1 -50 200")', type=transform_list)
+ "-t",
+ "--transform",
+ help="Transformation matrix as a list of six "
+ 'float values (e.g. -t "0.1 0 0 -0.1 -50 200")',
+ type=transform_list,
+ )
parser.add_argument(
- "-f", "--format", help="UFO GLIF format version (default: 2)",
- type=int, choices=(1, 2), default=2)
+ "-f",
+ "--format",
+ help="UFO GLIF format version (default: 2)",
+ type=int,
+ choices=(1, 2),
+ default=2,
+ )
return parser.parse_args(args)
@@ -101,25 +128,30 @@ def main(args=None):
name = options.name
else:
import os
+
name = os.path.splitext(os.path.basename(svg_file))[0]
with open(svg_file, "r", encoding="utf-8") as f:
svg = f.read()
- glif = svg2glif(svg, name,
- width=options.width,
- height=options.height,
- unicodes=options.unicodes,
- transform=options.transform,
- version=options.format)
+ glif = svg2glif(
+ svg,
+ name,
+ width=options.width,
+ height=options.height,
+ unicodes=options.unicodes,
+ transform=options.transform,
+ version=options.format,
+ )
if options.outfile is None:
print(glif)
else:
- with open(options.outfile, 'w', encoding='utf-8') as f:
+ with open(options.outfile, "w", encoding="utf-8") as f:
f.write(glif)
if __name__ == "__main__":
import sys
+
sys.exit(main())
diff --git a/Tests/afmLib/afmLib_test.py b/Tests/afmLib/afmLib_test.py
index 3e9d9d88..e3640819 100644
--- a/Tests/afmLib/afmLib_test.py
+++ b/Tests/afmLib/afmLib_test.py
@@ -4,50 +4,56 @@ from fontTools import afmLib
CWD = os.path.abspath(os.path.dirname(__file__))
-DATADIR = os.path.join(CWD, 'data')
-AFM = os.path.join(DATADIR, 'TestAFM.afm')
+DATADIR = os.path.join(CWD, "data")
+AFM = os.path.join(DATADIR, "TestAFM.afm")
class AFMTest(unittest.TestCase):
-
- def test_read_afm(self):
- afm = afmLib.AFM(AFM)
- self.assertEqual(sorted(afm.kernpairs()),
- sorted([('V', 'A'), ('T', 'comma'), ('V', 'd'), ('T', 'c'), ('T', 'period')]))
- self.assertEqual(afm['V', 'A'], -60)
- self.assertEqual(afm['V', 'd'], 30)
- self.assertEqual(afm['A'], (65, 668, (8, -25, 660, 666)))
-
- def test_write_afm(self):
- afm = afmLib.AFM(AFM)
- newAfm, afmData = self.write(afm)
- self.assertEqual(afm.kernpairs(), newAfm.kernpairs())
- self.assertEqual(afm.chars(), newAfm.chars())
- self.assertEqual(afm.comments(), newAfm.comments()[1:]) # skip the "generated by afmLib" comment
- for pair in afm.kernpairs():
- self.assertEqual(afm[pair], newAfm[pair])
- for char in afm.chars():
- self.assertEqual(afm[char], newAfm[char])
- with open(AFM, 'r') as f:
- originalLines = f.read().splitlines()
- newLines = afmData.splitlines()
- del newLines[1] # remove the "generated by afmLib" comment
- self.assertEqual(originalLines, newLines)
-
- @staticmethod
- def write(afm, sep='\r'):
- temp = os.path.join(DATADIR, 'temp.afm')
- try:
- afm.write(temp, sep)
- with open(temp, 'r') as f:
- afmData = f.read()
- afm = afmLib.AFM(temp)
- finally:
- if os.path.exists(temp):
- os.remove(temp)
- return afm, afmData
-
-
-if __name__ == '__main__':
- import sys
- sys.exit(unittest.main())
+ def test_read_afm(self):
+ afm = afmLib.AFM(AFM)
+ self.assertEqual(
+ sorted(afm.kernpairs()),
+ sorted(
+ [("V", "A"), ("T", "comma"), ("V", "d"), ("T", "c"), ("T", "period")]
+ ),
+ )
+ self.assertEqual(afm["V", "A"], -60)
+ self.assertEqual(afm["V", "d"], 30)
+ self.assertEqual(afm["A"], (65, 668, (8, -25, 660, 666)))
+
+ def test_write_afm(self):
+ afm = afmLib.AFM(AFM)
+ newAfm, afmData = self.write(afm)
+ self.assertEqual(afm.kernpairs(), newAfm.kernpairs())
+ self.assertEqual(afm.chars(), newAfm.chars())
+ self.assertEqual(
+ afm.comments(), newAfm.comments()[1:]
+ ) # skip the "generated by afmLib" comment
+ for pair in afm.kernpairs():
+ self.assertEqual(afm[pair], newAfm[pair])
+ for char in afm.chars():
+ self.assertEqual(afm[char], newAfm[char])
+ with open(AFM, "r") as f:
+ originalLines = f.read().splitlines()
+ newLines = afmData.splitlines()
+ del newLines[1] # remove the "generated by afmLib" comment
+ self.assertEqual(originalLines, newLines)
+
+ @staticmethod
+ def write(afm, sep="\r"):
+ temp = os.path.join(DATADIR, "temp.afm")
+ try:
+ afm.write(temp, sep)
+ with open(temp, "r") as f:
+ afmData = f.read()
+ afm = afmLib.AFM(temp)
+ finally:
+ if os.path.exists(temp):
+ os.remove(temp)
+ return afm, afmData
+
+
+if __name__ == "__main__":
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/agl_test.py b/Tests/agl_test.py
index f2fb72d0..d48c2b64 100644
--- a/Tests/agl_test.py
+++ b/Tests/agl_test.py
@@ -12,7 +12,8 @@ class AglToUnicodeTest(unittest.TestCase):
self.assertEqual(agl.toUnicode("uni20ac"), "")
self.assertEqual(
agl.toUnicode("Lcommaaccent_uni20AC0308_u1040C.alternate"),
- "\u013B\u20AC\u0308\U0001040C")
+ "\u013B\u20AC\u0308\U0001040C",
+ )
self.assertEqual(agl.toUnicode("Lcommaaccent_uni013B_u013B"), "ĻĻĻ")
self.assertEqual(agl.toUnicode("foo"), "")
self.assertEqual(agl.toUnicode(".notdef"), "")
@@ -55,4 +56,5 @@ class AglToUnicodeTest(unittest.TestCase):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/cffLib/cffLib_test.py b/Tests/cffLib/cffLib_test.py
index 7a6e9216..2d4d3023 100644
--- a/Tests/cffLib/cffLib_test.py
+++ b/Tests/cffLib/cffLib_test.py
@@ -8,11 +8,14 @@ import unittest
class CffLibTest(DataFilesHandler):
-
def test_topDict_recalcFontBBox(self):
topDict = TopDict()
topDict.CharStrings = CharStrings(None, None, None, PrivateDict(), None, None)
- topDict.CharStrings.fromXML(None, None, parseXML("""
+ topDict.CharStrings.fromXML(
+ None,
+ None,
+ parseXML(
+ """
<CharString name=".notdef">
endchar
</CharString>
@@ -25,7 +28,9 @@ class CffLibTest(DataFilesHandler):
<CharString name="baz"><!-- [-55.1, -55.1, 55.1, 55.1] -->
-55.1 -55.1 rmoveto 110.2 hlineto 110.2 vlineto -110.2 hlineto endchar
</CharString>
- """))
+ """
+ ),
+ )
topDict.recalcFontBBox()
self.assertEqual(topDict.FontBBox, [-56, -100, 300, 200])
@@ -33,20 +38,26 @@ class CffLibTest(DataFilesHandler):
def test_topDict_recalcFontBBox_empty(self):
topDict = TopDict()
topDict.CharStrings = CharStrings(None, None, None, PrivateDict(), None, None)
- topDict.CharStrings.fromXML(None, None, parseXML("""
+ topDict.CharStrings.fromXML(
+ None,
+ None,
+ parseXML(
+ """
<CharString name=".notdef">
endchar
</CharString>
<CharString name="space">
123 endchar
</CharString>
- """))
+ """
+ ),
+ )
topDict.recalcFontBBox()
self.assertEqual(topDict.FontBBox, [0, 0, 0, 0])
def test_topDict_set_Encoding(self):
- ttx_path = self.getpath('TestOTF.ttx')
+ ttx_path = self.getpath("TestOTF.ttx")
font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.importXML(ttx_path)
@@ -54,9 +65,9 @@ class CffLibTest(DataFilesHandler):
encoding = [".notdef"] * 256
encoding[0x20] = "space"
topDict.Encoding = encoding
-
+
self.temp_dir()
- save_path = os.path.join(self.tempdir, 'TestOTF.otf')
+ save_path = os.path.join(self.tempdir, "TestOTF.otf")
font.save(save_path)
font2 = TTFont(save_path)
@@ -79,12 +90,12 @@ class CffLibTest(DataFilesHandler):
copy.deepcopy(font)
def test_FDSelect_format_4(self):
- ttx_path = self.getpath('TestFDSelect4.ttx')
+ ttx_path = self.getpath("TestFDSelect4.ttx")
font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.importXML(ttx_path)
self.temp_dir()
- save_path = os.path.join(self.tempdir, 'TestOTF.otf')
+ save_path = os.path.join(self.tempdir, "TestOTF.otf")
font.save(save_path)
font2 = TTFont(save_path)
@@ -93,14 +104,14 @@ class CffLibTest(DataFilesHandler):
self.assertEqual(topDict2.FDSelect.gidArray, [0, 0, 1])
def test_unique_glyph_names(self):
- font_path = self.getpath('LinLibertine_RBI.otf')
+ font_path = self.getpath("LinLibertine_RBI.otf")
font = TTFont(font_path, recalcBBoxes=False, recalcTimestamp=False)
glyphOrder = font.getGlyphOrder()
self.assertEqual(len(glyphOrder), len(set(glyphOrder)))
self.temp_dir()
- save_path = os.path.join(self.tempdir, 'TestOTF.otf')
+ save_path = os.path.join(self.tempdir, "TestOTF.otf")
font.save(save_path)
font2 = TTFont(save_path)
diff --git a/Tests/cffLib/data/TestCFF2Widths.ttx b/Tests/cffLib/data/TestCFF2Widths.ttx
index e3a3c9c1..eba2c20c 100644
--- a/Tests/cffLib/data/TestCFF2Widths.ttx
+++ b/Tests/cffLib/data/TestCFF2Widths.ttx
@@ -637,6 +637,7 @@
</STAT>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="0.0" to="0.0"/>
diff --git a/Tests/cffLib/data/TestSparseCFF2VF.ttx b/Tests/cffLib/data/TestSparseCFF2VF.ttx
index f1ae063b..3dbf014a 100644
--- a/Tests/cffLib/data/TestSparseCFF2VF.ttx
+++ b/Tests/cffLib/data/TestSparseCFF2VF.ttx
@@ -1809,6 +1809,7 @@
</VORG>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="0.0" to="0.0"/>
diff --git a/Tests/cffLib/specializer_test.py b/Tests/cffLib/specializer_test.py
index a9b778c0..6a8e0190 100644
--- a/Tests/cffLib/specializer_test.py
+++ b/Tests/cffLib/specializer_test.py
@@ -1,8 +1,13 @@
-from fontTools.cffLib.specializer import (programToString, stringToProgram,
- generalizeProgram, specializeProgram,
- programToCommands, commandsToProgram,
- generalizeCommands,
- specializeCommands)
+from fontTools.cffLib.specializer import (
+ programToString,
+ stringToProgram,
+ generalizeProgram,
+ specializeProgram,
+ programToCommands,
+ commandsToProgram,
+ generalizeCommands,
+ specializeCommands,
+)
from fontTools.ttLib import TTFont
import os
import unittest
@@ -26,7 +31,6 @@ def get_specialized_charstr(charstr, **kwargs):
class CFFGeneralizeProgramTest(unittest.TestCase):
-
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
@@ -34,468 +38,503 @@ class CFFGeneralizeProgramTest(unittest.TestCase):
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
-# no arguments/operands
+ # no arguments/operands
def test_rmoveto_none(self):
- test_charstr = 'rmoveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "rmoveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_hmoveto_none(self):
- test_charstr = 'hmoveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "hmoveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_vmoveto_none(self):
- test_charstr = 'vmoveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "vmoveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_rlineto_none(self):
- test_charstr = 'rlineto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "rlineto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_hlineto_none(self):
- test_charstr = 'hlineto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "hlineto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_vlineto_none(self):
- test_charstr = 'vlineto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "vlineto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_rrcurveto_none(self):
- test_charstr = 'rrcurveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "rrcurveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_hhcurveto_none(self):
- test_charstr = 'hhcurveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "hhcurveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_vvcurveto_none(self):
- test_charstr = 'vvcurveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "vvcurveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_hvcurveto_none(self):
- test_charstr = 'hvcurveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "hvcurveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_vhcurveto_none(self):
- test_charstr = 'vhcurveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "vhcurveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_rcurveline_none(self):
- test_charstr = 'rcurveline'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "rcurveline"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
def test_rlinecurve_none(self):
- test_charstr = 'rlinecurve'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "rlinecurve"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_generalized_charstr(test_charstr)
-# rmoveto
+ # rmoveto
def test_rmoveto_zero(self):
- test_charstr = '0 0 rmoveto'
+ test_charstr = "0 0 rmoveto"
xpct_charstr = test_charstr
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rmoveto_zero_width(self):
- test_charstr = '100 0 0 rmoveto'
+ test_charstr = "100 0 0 rmoveto"
xpct_charstr = test_charstr
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rmoveto(self):
- test_charstr = '.55 -.8 rmoveto'
- xpct_charstr = '0.55 -0.8 rmoveto'
+ test_charstr = ".55 -.8 rmoveto"
+ xpct_charstr = "0.55 -0.8 rmoveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rmoveto_width(self):
- test_charstr = '100.5 50 -5.8 rmoveto'
+ test_charstr = "100.5 50 -5.8 rmoveto"
xpct_charstr = test_charstr
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# hmoveto
+ # hmoveto
def test_hmoveto_zero(self):
- test_charstr = '0 hmoveto'
- xpct_charstr = '0 0 rmoveto'
+ test_charstr = "0 hmoveto"
+ xpct_charstr = "0 0 rmoveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hmoveto_zero_width(self):
- test_charstr = '100 0 hmoveto'
- xpct_charstr = '100 0 0 rmoveto'
+ test_charstr = "100 0 hmoveto"
+ xpct_charstr = "100 0 0 rmoveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hmoveto(self):
- test_charstr = '.67 hmoveto'
- xpct_charstr = '0.67 0 rmoveto'
+ test_charstr = ".67 hmoveto"
+ xpct_charstr = "0.67 0 rmoveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hmoveto_width(self):
- test_charstr = '100 -70 hmoveto'
- xpct_charstr = '100 -70 0 rmoveto'
+ test_charstr = "100 -70 hmoveto"
+ xpct_charstr = "100 -70 0 rmoveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# vmoveto
+ # vmoveto
def test_vmoveto_zero(self):
- test_charstr = '0 vmoveto'
- xpct_charstr = '0 0 rmoveto'
+ test_charstr = "0 vmoveto"
+ xpct_charstr = "0 0 rmoveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vmoveto_zero_width(self):
- test_charstr = '100 0 vmoveto'
- xpct_charstr = '100 0 0 rmoveto'
+ test_charstr = "100 0 vmoveto"
+ xpct_charstr = "100 0 0 rmoveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vmoveto(self):
- test_charstr = '-.24 vmoveto'
- xpct_charstr = '0 -0.24 rmoveto'
+ test_charstr = "-.24 vmoveto"
+ xpct_charstr = "0 -0.24 rmoveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vmoveto_width(self):
- test_charstr = '100 44 vmoveto'
- xpct_charstr = '100 0 44 rmoveto'
+ test_charstr = "100 44 vmoveto"
+ xpct_charstr = "100 0 44 rmoveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# rlineto
+ # rlineto
def test_rlineto_zero(self):
- test_charstr = '0 0 rlineto'
+ test_charstr = "0 0 rlineto"
xpct_charstr = test_charstr
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rlineto_zero_mult(self):
- test_charstr = '0 0 0 0 0 0 rlineto'
- xpct_charstr = ('0 0 rlineto '*3).rstrip()
+ test_charstr = "0 0 0 0 0 0 rlineto"
+ xpct_charstr = ("0 0 rlineto " * 3).rstrip()
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rlineto(self):
- test_charstr = '.55 -.8 rlineto'
- xpct_charstr = '0.55 -0.8 rlineto'
+ test_charstr = ".55 -.8 rlineto"
+ xpct_charstr = "0.55 -0.8 rlineto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rlineto_mult(self):
- test_charstr = '.55 -.8 .55 -.8 .55 -.8 rlineto'
- xpct_charstr = ('0.55 -0.8 rlineto '*3).rstrip()
+ test_charstr = ".55 -.8 .55 -.8 .55 -.8 rlineto"
+ xpct_charstr = ("0.55 -0.8 rlineto " * 3).rstrip()
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# hlineto
+ # hlineto
def test_hlineto_zero(self):
- test_charstr = '0 hlineto'
- xpct_charstr = '0 0 rlineto'
+ test_charstr = "0 hlineto"
+ xpct_charstr = "0 0 rlineto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hlineto_zero_mult(self):
- test_charstr = '0 0 0 0 hlineto'
- xpct_charstr = ('0 0 rlineto '*4).rstrip()
+ test_charstr = "0 0 0 0 hlineto"
+ xpct_charstr = ("0 0 rlineto " * 4).rstrip()
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hlineto(self):
- test_charstr = '.67 hlineto'
- xpct_charstr = '0.67 0 rlineto'
+ test_charstr = ".67 hlineto"
+ xpct_charstr = "0.67 0 rlineto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hlineto_mult(self):
- test_charstr = '.67 -6.0 .67 hlineto'
- xpct_charstr = '0.67 0 rlineto 0 -6.0 rlineto 0.67 0 rlineto'
+ test_charstr = ".67 -6.0 .67 hlineto"
+ xpct_charstr = "0.67 0 rlineto 0 -6.0 rlineto 0.67 0 rlineto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# vlineto
+ # vlineto
def test_vlineto_zero(self):
- test_charstr = '0 vlineto'
- xpct_charstr = '0 0 rlineto'
+ test_charstr = "0 vlineto"
+ xpct_charstr = "0 0 rlineto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vlineto_zero_mult(self):
- test_charstr = '0 0 0 vlineto'
- xpct_charstr = ('0 0 rlineto '*3).rstrip()
+ test_charstr = "0 0 0 vlineto"
+ xpct_charstr = ("0 0 rlineto " * 3).rstrip()
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vlineto(self):
- test_charstr = '-.24 vlineto'
- xpct_charstr = '0 -0.24 rlineto'
+ test_charstr = "-.24 vlineto"
+ xpct_charstr = "0 -0.24 rlineto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vlineto_mult(self):
- test_charstr = '-.24 +50 30 -4 vlineto'
- xpct_charstr = '0 -0.24 rlineto 50 0 rlineto 0 30 rlineto -4 0 rlineto'
+ test_charstr = "-.24 +50 30 -4 vlineto"
+ xpct_charstr = "0 -0.24 rlineto 50 0 rlineto 0 30 rlineto -4 0 rlineto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# rrcurveto
+ # rrcurveto
def test_rrcurveto(self):
- test_charstr = '-1 56 -2 57 -1 57 rrcurveto'
+ test_charstr = "-1 56 -2 57 -1 57 rrcurveto"
xpct_charstr = test_charstr
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_mult(self):
- test_charstr = '-30 8 -36 15 -37 22 44 54 31 61 22 68 rrcurveto'
- xpct_charstr = '-30 8 -36 15 -37 22 rrcurveto 44 54 31 61 22 68 rrcurveto'
+ test_charstr = "-30 8 -36 15 -37 22 44 54 31 61 22 68 rrcurveto"
+ xpct_charstr = "-30 8 -36 15 -37 22 rrcurveto 44 54 31 61 22 68 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_d3947b8(self):
- test_charstr = '1 2 3 4 5 0 rrcurveto'
+ test_charstr = "1 2 3 4 5 0 rrcurveto"
xpct_charstr = test_charstr
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_v0_0h_h0(self):
- test_charstr = '0 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto'
- xpct_charstr = '0 10 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto'
+ test_charstr = "0 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto"
+ xpct_charstr = (
+ "0 10 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_h0_0h_h0(self):
- test_charstr = '10 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto'
- xpct_charstr = '10 0 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto'
+ test_charstr = "10 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto"
+ xpct_charstr = (
+ "10 0 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_00_0h_h0(self):
- test_charstr = '0 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto'
- xpct_charstr = '0 0 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto'
+ test_charstr = "0 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto"
+ xpct_charstr = (
+ "0 0 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_r0_0h_h0(self):
- test_charstr = '10 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto'
- xpct_charstr = '10 10 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto'
+ test_charstr = "10 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto"
+ xpct_charstr = (
+ "10 10 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_v0_0v_v0(self):
- test_charstr = '0 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto'
- xpct_charstr = '0 10 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto'
+ test_charstr = "0 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto"
+ xpct_charstr = (
+ "0 10 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_h0_0v_v0(self):
- test_charstr = '10 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto'
- xpct_charstr = '10 0 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto'
+ test_charstr = "10 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto"
+ xpct_charstr = (
+ "10 0 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_00_0v_v0(self):
- test_charstr = '0 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto'
- xpct_charstr = '0 0 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto'
+ test_charstr = "0 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto"
+ xpct_charstr = (
+ "0 0 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_r0_0v_v0(self):
- test_charstr = '10 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto'
- xpct_charstr = '10 10 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto'
+ test_charstr = "10 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto"
+ xpct_charstr = (
+ "10 10 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# hhcurveto
+ # hhcurveto
def test_hhcurveto_4(self):
- test_charstr = '10 30 0 10 hhcurveto'
- xpct_charstr = '10 0 30 0 10 0 rrcurveto'
+ test_charstr = "10 30 0 10 hhcurveto"
+ xpct_charstr = "10 0 30 0 10 0 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_5(self):
- test_charstr = '40 -38 -60 41 -91 hhcurveto'
- xpct_charstr = '-38 40 -60 41 -91 0 rrcurveto'
+ test_charstr = "40 -38 -60 41 -91 hhcurveto"
+ xpct_charstr = "-38 40 -60 41 -91 0 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_mult_4_4(self):
- test_charstr = '43 23 25 18 29 56 42 -84 hhcurveto'
- xpct_charstr = '43 0 23 25 18 0 rrcurveto 29 0 56 42 -84 0 rrcurveto'
+ test_charstr = "43 23 25 18 29 56 42 -84 hhcurveto"
+ xpct_charstr = "43 0 23 25 18 0 rrcurveto 29 0 56 42 -84 0 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_mult_5_4(self):
- test_charstr = '43 23 25 18 29 56 42 -84 79 hhcurveto'
- xpct_charstr = '23 43 25 18 29 0 rrcurveto 56 0 42 -84 79 0 rrcurveto'
+ test_charstr = "43 23 25 18 29 56 42 -84 79 hhcurveto"
+ xpct_charstr = "23 43 25 18 29 0 rrcurveto 56 0 42 -84 79 0 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_mult_4_4_4(self):
- test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 hhcurveto'
- xpct_charstr = '1 0 2 3 4 0 rrcurveto 5 0 6 7 8 0 rrcurveto 9 0 10 11 12 0 rrcurveto'
+ test_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 hhcurveto"
+ xpct_charstr = (
+ "1 0 2 3 4 0 rrcurveto 5 0 6 7 8 0 rrcurveto 9 0 10 11 12 0 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_mult_5_4_4(self):
- test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 13 hhcurveto'
- xpct_charstr = '2 1 3 4 5 0 rrcurveto 6 0 7 8 9 0 rrcurveto 10 0 11 12 13 0 rrcurveto'
+ test_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 13 hhcurveto"
+ xpct_charstr = (
+ "2 1 3 4 5 0 rrcurveto 6 0 7 8 9 0 rrcurveto 10 0 11 12 13 0 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# vvcurveto
+ # vvcurveto
def test_vvcurveto_4(self):
- test_charstr = '61 6 52 68 vvcurveto'
- xpct_charstr = '0 61 6 52 0 68 rrcurveto'
+ test_charstr = "61 6 52 68 vvcurveto"
+ xpct_charstr = "0 61 6 52 0 68 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_5(self):
- test_charstr = '61 38 35 56 72 vvcurveto'
- xpct_charstr = '61 38 35 56 0 72 rrcurveto'
+ test_charstr = "61 38 35 56 72 vvcurveto"
+ xpct_charstr = "61 38 35 56 0 72 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_mult_4_4(self):
- test_charstr = '-84 -88 -30 -90 -13 19 23 -11 vvcurveto'
- xpct_charstr = '0 -84 -88 -30 0 -90 rrcurveto 0 -13 19 23 0 -11 rrcurveto'
+ test_charstr = "-84 -88 -30 -90 -13 19 23 -11 vvcurveto"
+ xpct_charstr = "0 -84 -88 -30 0 -90 rrcurveto 0 -13 19 23 0 -11 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_mult_5_4(self):
- test_charstr = '43 12 17 32 65 68 -6 52 61 vvcurveto'
- xpct_charstr = '43 12 17 32 0 65 rrcurveto 0 68 -6 52 0 61 rrcurveto'
+ test_charstr = "43 12 17 32 65 68 -6 52 61 vvcurveto"
+ xpct_charstr = "43 12 17 32 0 65 rrcurveto 0 68 -6 52 0 61 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_mult_4_4_4(self):
- test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 vvcurveto'
- xpct_charstr = '0 1 2 3 0 4 rrcurveto 0 5 6 7 0 8 rrcurveto 0 9 10 11 0 12 rrcurveto'
+ test_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 vvcurveto"
+ xpct_charstr = (
+ "0 1 2 3 0 4 rrcurveto 0 5 6 7 0 8 rrcurveto 0 9 10 11 0 12 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_mult_5_4_4(self):
- test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 13 vvcurveto'
- xpct_charstr = '1 2 3 4 0 5 rrcurveto 0 6 7 8 0 9 rrcurveto 0 10 11 12 0 13 rrcurveto'
+ test_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 13 vvcurveto"
+ xpct_charstr = (
+ "1 2 3 4 0 5 rrcurveto 0 6 7 8 0 9 rrcurveto 0 10 11 12 0 13 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# hvcurveto
+ # hvcurveto
def test_hvcurveto_4(self):
- test_charstr = '1 2 3 4 hvcurveto'
- xpct_charstr = '1 0 2 3 0 4 rrcurveto'
+ test_charstr = "1 2 3 4 hvcurveto"
+ xpct_charstr = "1 0 2 3 0 4 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_5(self):
- test_charstr = '57 44 22 40 34 hvcurveto'
- xpct_charstr = '57 0 44 22 34 40 rrcurveto'
+ test_charstr = "57 44 22 40 34 hvcurveto"
+ xpct_charstr = "57 0 44 22 34 40 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_4(self):
- test_charstr = '65 33 -19 -45 -45 -29 -25 -71 hvcurveto'
- xpct_charstr = '65 0 33 -19 0 -45 rrcurveto 0 -45 -29 -25 -71 0 rrcurveto'
+ test_charstr = "65 33 -19 -45 -45 -29 -25 -71 hvcurveto"
+ xpct_charstr = "65 0 33 -19 0 -45 rrcurveto 0 -45 -29 -25 -71 0 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_5(self):
- test_charstr = '97 69 41 86 58 -36 34 -64 11 hvcurveto'
- xpct_charstr = '97 0 69 41 0 86 rrcurveto 0 58 -36 34 -64 11 rrcurveto'
+ test_charstr = "97 69 41 86 58 -36 34 -64 11 hvcurveto"
+ xpct_charstr = "97 0 69 41 0 86 rrcurveto 0 58 -36 34 -64 11 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_4_4(self):
- test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 hvcurveto'
- xpct_charstr = '1 0 2 3 0 4 rrcurveto 0 5 6 7 8 0 rrcurveto 9 0 10 11 0 12 rrcurveto'
+ test_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 hvcurveto"
+ xpct_charstr = (
+ "1 0 2 3 0 4 rrcurveto 0 5 6 7 8 0 rrcurveto 9 0 10 11 0 12 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_4_5(self):
- test_charstr = '-124 -79 104 165 163 82 102 124 56 43 -25 -37 35 hvcurveto'
- xpct_charstr = '-124 0 -79 104 0 165 rrcurveto 0 163 82 102 124 0 rrcurveto 56 0 43 -25 35 -37 rrcurveto'
+ test_charstr = "-124 -79 104 165 163 82 102 124 56 43 -25 -37 35 hvcurveto"
+ xpct_charstr = "-124 0 -79 104 0 165 rrcurveto 0 163 82 102 124 0 rrcurveto 56 0 43 -25 35 -37 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_4_4_4(self):
- test_charstr = '32 25 22 32 31 -25 22 -32 -32 -25 -22 -31 -32 25 -22 32 hvcurveto'
- xpct_charstr = '32 0 25 22 0 32 rrcurveto 0 31 -25 22 -32 0 rrcurveto -32 0 -25 -22 0 -31 rrcurveto 0 -32 25 -22 32 0 rrcurveto'
+ test_charstr = (
+ "32 25 22 32 31 -25 22 -32 -32 -25 -22 -31 -32 25 -22 32 hvcurveto"
+ )
+ xpct_charstr = "32 0 25 22 0 32 rrcurveto 0 31 -25 22 -32 0 rrcurveto -32 0 -25 -22 0 -31 rrcurveto 0 -32 25 -22 32 0 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_4_4_4_5(self):
- test_charstr = '-170 -128 111 195 234 172 151 178 182 95 -118 -161 -130 -71 -77 -63 -55 -19 38 79 20 hvcurveto'
- xpct_charstr = '-170 0 -128 111 0 195 rrcurveto 0 234 172 151 178 0 rrcurveto 182 0 95 -118 0 -161 rrcurveto 0 -130 -71 -77 -63 0 rrcurveto -55 0 -19 38 20 79 rrcurveto'
+ test_charstr = "-170 -128 111 195 234 172 151 178 182 95 -118 -161 -130 -71 -77 -63 -55 -19 38 79 20 hvcurveto"
+ xpct_charstr = "-170 0 -128 111 0 195 rrcurveto 0 234 172 151 178 0 rrcurveto 182 0 95 -118 0 -161 rrcurveto 0 -130 -71 -77 -63 0 rrcurveto -55 0 -19 38 20 79 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# vhcurveto
+ # vhcurveto
def test_vhcurveto_4(self):
- test_charstr = '-57 43 -30 53 vhcurveto'
- xpct_charstr = '0 -57 43 -30 53 0 rrcurveto'
+ test_charstr = "-57 43 -30 53 vhcurveto"
+ xpct_charstr = "0 -57 43 -30 53 0 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_5(self):
- test_charstr = '41 -27 19 -46 11 vhcurveto'
- xpct_charstr = '0 41 -27 19 -46 11 rrcurveto'
+ test_charstr = "41 -27 19 -46 11 vhcurveto"
+ xpct_charstr = "0 41 -27 19 -46 11 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_4_4(self):
- test_charstr = '1 2 3 4 5 6 7 8 vhcurveto'
- xpct_charstr = '0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto'
+ test_charstr = "1 2 3 4 5 6 7 8 vhcurveto"
+ xpct_charstr = "0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_4_5(self):
- test_charstr = '-64 -23 -25 -45 -30 -24 14 33 -19 vhcurveto'
- xpct_charstr = '0 -64 -23 -25 -45 0 rrcurveto -30 0 -24 14 -19 33 rrcurveto'
+ test_charstr = "-64 -23 -25 -45 -30 -24 14 33 -19 vhcurveto"
+ xpct_charstr = "0 -64 -23 -25 -45 0 rrcurveto -30 0 -24 14 -19 33 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_4_4_4(self):
- test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 vhcurveto'
- xpct_charstr = '0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto 0 9 10 11 12 0 rrcurveto'
+ test_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 vhcurveto"
+ xpct_charstr = (
+ "0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto 0 9 10 11 12 0 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_4_4_5(self):
- test_charstr = '108 59 81 98 99 59 -81 -108 -100 -46 -66 -63 -47 vhcurveto'
- xpct_charstr = '0 108 59 81 98 0 rrcurveto 99 0 59 -81 0 -108 rrcurveto 0 -100 -46 -66 -63 -47 rrcurveto'
+ test_charstr = "108 59 81 98 99 59 -81 -108 -100 -46 -66 -63 -47 vhcurveto"
+ xpct_charstr = "0 108 59 81 98 0 rrcurveto 99 0 59 -81 0 -108 rrcurveto 0 -100 -46 -66 -63 -47 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_4_4_4_5(self):
- test_charstr = '60 -26 37 -43 -33 -28 -22 -36 -37 27 -20 32 3 4 0 1 3 vhcurveto'
- xpct_charstr = '0 60 -26 37 -43 0 rrcurveto -33 0 -28 -22 0 -36 rrcurveto 0 -37 27 -20 32 0 rrcurveto 3 0 4 0 3 1 rrcurveto'
+ test_charstr = "60 -26 37 -43 -33 -28 -22 -36 -37 27 -20 32 3 4 0 1 3 vhcurveto"
+ xpct_charstr = "0 60 -26 37 -43 0 rrcurveto -33 0 -28 -22 0 -36 rrcurveto 0 -37 27 -20 32 0 rrcurveto 3 0 4 0 3 1 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# rcurveline
+ # rcurveline
def test_rcurveline_6_2(self):
- test_charstr = '21 -76 21 -72 24 -73 31 -100 rcurveline'
- xpct_charstr = '21 -76 21 -72 24 -73 rrcurveto 31 -100 rlineto'
+ test_charstr = "21 -76 21 -72 24 -73 31 -100 rcurveline"
+ xpct_charstr = "21 -76 21 -72 24 -73 rrcurveto 31 -100 rlineto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rcurveline_6_6_2(self):
- test_charstr = '-73 80 -80 121 -49 96 60 65 55 41 54 17 -8 78 rcurveline'
- xpct_charstr = '-73 80 -80 121 -49 96 rrcurveto 60 65 55 41 54 17 rrcurveto -8 78 rlineto'
+ test_charstr = "-73 80 -80 121 -49 96 60 65 55 41 54 17 -8 78 rcurveline"
+ xpct_charstr = (
+ "-73 80 -80 121 -49 96 rrcurveto 60 65 55 41 54 17 rrcurveto -8 78 rlineto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rcurveline_6_6_6_2(self):
- test_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 47 -89 63 -98 52 -59 91 8 rcurveline'
- xpct_charstr = '1 64 10 51 29 39 rrcurveto 15 21 15 20 15 18 rrcurveto 47 -89 63 -98 52 -59 rrcurveto 91 8 rlineto'
+ test_charstr = (
+ "1 64 10 51 29 39 15 21 15 20 15 18 47 -89 63 -98 52 -59 91 8 rcurveline"
+ )
+ xpct_charstr = "1 64 10 51 29 39 rrcurveto 15 21 15 20 15 18 rrcurveto 47 -89 63 -98 52 -59 rrcurveto 91 8 rlineto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rcurveline_6_6_6_6_2(self):
- test_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 46 -88 63 -97 52 -59 -38 -57 -49 -62 -52 -54 96 -8 rcurveline'
- xpct_charstr = '1 64 10 51 29 39 rrcurveto 15 21 15 20 15 18 rrcurveto 46 -88 63 -97 52 -59 rrcurveto -38 -57 -49 -62 -52 -54 rrcurveto 96 -8 rlineto'
+ test_charstr = "1 64 10 51 29 39 15 21 15 20 15 18 46 -88 63 -97 52 -59 -38 -57 -49 -62 -52 -54 96 -8 rcurveline"
+ xpct_charstr = "1 64 10 51 29 39 rrcurveto 15 21 15 20 15 18 rrcurveto 46 -88 63 -97 52 -59 rrcurveto -38 -57 -49 -62 -52 -54 rrcurveto 96 -8 rlineto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# rlinecurve
+ # rlinecurve
def test_rlinecurve_2_6(self):
- test_charstr = '21 -76 21 -72 24 -73 31 -100 rlinecurve'
- xpct_charstr = '21 -76 rlineto 21 -72 24 -73 31 -100 rrcurveto'
+ test_charstr = "21 -76 21 -72 24 -73 31 -100 rlinecurve"
+ xpct_charstr = "21 -76 rlineto 21 -72 24 -73 31 -100 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rlinecurve_2_2_6(self):
- test_charstr = '-73 80 -80 121 -49 96 60 65 55 41 rlinecurve'
- xpct_charstr = '-73 80 rlineto -80 121 rlineto -49 96 60 65 55 41 rrcurveto'
+ test_charstr = "-73 80 -80 121 -49 96 60 65 55 41 rlinecurve"
+ xpct_charstr = "-73 80 rlineto -80 121 rlineto -49 96 60 65 55 41 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rlinecurve_2_2_2_6(self):
- test_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 rlinecurve'
- xpct_charstr = '1 64 rlineto 10 51 rlineto 29 39 rlineto 15 21 15 20 15 18 rrcurveto'
+ test_charstr = "1 64 10 51 29 39 15 21 15 20 15 18 rlinecurve"
+ xpct_charstr = (
+ "1 64 rlineto 10 51 rlineto 29 39 rlineto 15 21 15 20 15 18 rrcurveto"
+ )
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
def test_rlinecurve_2_2_2_2_6(self):
- test_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 46 -88 rlinecurve'
- xpct_charstr = '1 64 rlineto 10 51 rlineto 29 39 rlineto 15 21 rlineto 15 20 15 18 46 -88 rrcurveto'
+ test_charstr = "1 64 10 51 29 39 15 21 15 20 15 18 46 -88 rlinecurve"
+ xpct_charstr = "1 64 rlineto 10 51 rlineto 29 39 rlineto 15 21 rlineto 15 20 15 18 46 -88 rrcurveto"
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# hstem/vstem
+ # hstem/vstem
def test_hstem_vstem(self):
- test_charstr = '95 0 58 542 60 hstem 89 65 344 67 vstem 89 45 rmoveto'
+ test_charstr = "95 0 58 542 60 hstem 89 65 344 67 vstem 89 45 rmoveto"
xpct_charstr = test_charstr
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# hstemhm/vstemhm
+ # hstemhm/vstemhm
def test_hstemhm_vstemhm(self):
- test_charstr = '-16 577 60 24 60 hstemhm 98 55 236 55 vstemhm 343 577 rmoveto'
+ test_charstr = "-16 577 60 24 60 hstemhm 98 55 236 55 vstemhm 343 577 rmoveto"
xpct_charstr = test_charstr
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# hintmask/cntrmask
+ # hintmask/cntrmask
def test_hintmask_cntrmask(self):
- test_charstr = '52 80 153 61 4 83 -71.5 71.5 hintmask 11011100 94 119 216 119 216 119 cntrmask 1110000 154 -12 rmoveto'
+ test_charstr = "52 80 153 61 4 83 -71.5 71.5 hintmask 11011100 94 119 216 119 216 119 cntrmask 1110000 154 -12 rmoveto"
xpct_charstr = test_charstr
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# endchar
+ # endchar
def test_endchar(self):
- test_charstr = '-255 319 rmoveto 266 57 rlineto endchar'
+ test_charstr = "-255 319 rmoveto 266 57 rlineto endchar"
xpct_charstr = test_charstr
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
-# xtra
+ # xtra
def test_xtra(self):
- test_charstr = '-255 319 rmoveto 266 57 rlineto xtra 90 34'
+ test_charstr = "-255 319 rmoveto 266 57 rlineto xtra 90 34"
xpct_charstr = test_charstr
self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr)
class CFFSpecializeProgramTest(unittest.TestCase):
-
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
@@ -503,429 +542,450 @@ class CFFSpecializeProgramTest(unittest.TestCase):
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
-# no arguments/operands
+ # no arguments/operands
def test_rmoveto_none(self):
- test_charstr = 'rmoveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "rmoveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_hmoveto_none(self):
- test_charstr = 'hmoveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "hmoveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_vmoveto_none(self):
- test_charstr = 'vmoveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "vmoveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_rlineto_none(self):
- test_charstr = 'rlineto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "rlineto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_hlineto_none(self):
- test_charstr = 'hlineto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "hlineto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_vlineto_none(self):
- test_charstr = 'vlineto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "vlineto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_rrcurveto_none(self):
- test_charstr = 'rrcurveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "rrcurveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_hhcurveto_none(self):
- test_charstr = 'hhcurveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "hhcurveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_vvcurveto_none(self):
- test_charstr = 'vvcurveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "vvcurveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_hvcurveto_none(self):
- test_charstr = 'hvcurveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "hvcurveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_vhcurveto_none(self):
- test_charstr = 'vhcurveto'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "vhcurveto"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_rcurveline_none(self):
- test_charstr = 'rcurveline'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "rcurveline"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
def test_rlinecurve_none(self):
- test_charstr = 'rlinecurve'
- with self.assertRaisesRegex(ValueError, r'\[\]'):
+ test_charstr = "rlinecurve"
+ with self.assertRaisesRegex(ValueError, r"\[\]"):
get_specialized_charstr(test_charstr)
-# rmoveto
+ # rmoveto
def test_rmoveto_zero(self):
- test_charstr = '0 0 rmoveto'
- xpct_charstr = '0 hmoveto'
- self.assertEqual(get_specialized_charstr(test_charstr,
- generalizeFirst=False), xpct_charstr)
+ test_charstr = "0 0 rmoveto"
+ xpct_charstr = "0 hmoveto"
+ self.assertEqual(
+ get_specialized_charstr(test_charstr, generalizeFirst=False), xpct_charstr
+ )
def test_rmoveto_zero_mult(self):
- test_charstr = '0 0 rmoveto '*3
- xpct_charstr = '0 hmoveto'
- self.assertEqual(get_specialized_charstr(test_charstr,
- generalizeFirst=False), xpct_charstr)
+ test_charstr = "0 0 rmoveto " * 3
+ xpct_charstr = "0 hmoveto"
+ self.assertEqual(
+ get_specialized_charstr(test_charstr, generalizeFirst=False), xpct_charstr
+ )
def test_rmoveto_zero_width(self):
- test_charstr = '100 0 0 rmoveto'
- xpct_charstr = '100 0 hmoveto'
+ test_charstr = "100 0 0 rmoveto"
+ xpct_charstr = "100 0 hmoveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rmoveto(self):
- test_charstr = '.55 -.8 rmoveto'
- xpct_charstr = '0.55 -0.8 rmoveto'
+ test_charstr = ".55 -.8 rmoveto"
+ xpct_charstr = "0.55 -0.8 rmoveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rmoveto_mult(self):
- test_charstr = '55 -8 rmoveto '*3
- xpct_charstr = '165 -24 rmoveto'
+ test_charstr = "55 -8 rmoveto " * 3
+ xpct_charstr = "165 -24 rmoveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rmoveto_width(self):
- test_charstr = '100.5 50 -5.8 rmoveto'
+ test_charstr = "100.5 50 -5.8 rmoveto"
xpct_charstr = test_charstr
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
-# rlineto
+ # rlineto
def test_rlineto_zero(self):
- test_charstr = '0 0 rlineto'
- xpct_charstr = ''
+ test_charstr = "0 0 rlineto"
+ xpct_charstr = ""
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rlineto_zero_mult(self):
- test_charstr = '0 0 rlineto '*3
- xpct_charstr = ''
+ test_charstr = "0 0 rlineto " * 3
+ xpct_charstr = ""
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rlineto(self):
- test_charstr = '.55 -.8 rlineto'
- xpct_charstr = '0.55 -0.8 rlineto'
+ test_charstr = ".55 -.8 rlineto"
+ xpct_charstr = "0.55 -0.8 rlineto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rlineto_mult(self):
- test_charstr = '.55 -.8 rlineto '*3
- xpct_charstr = '0.55 -0.8 0.55 -0.8 0.55 -0.8 rlineto'
+ test_charstr = ".55 -.8 rlineto " * 3
+ xpct_charstr = "0.55 -0.8 0.55 -0.8 0.55 -0.8 rlineto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hlineto(self):
- test_charstr = '.67 0 rlineto'
- xpct_charstr = '0.67 hlineto'
+ test_charstr = ".67 0 rlineto"
+ xpct_charstr = "0.67 hlineto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hlineto_zero_mult(self):
- test_charstr = '62 0 rlineto '*3
- xpct_charstr = '186 hlineto'
+ test_charstr = "62 0 rlineto " * 3
+ xpct_charstr = "186 hlineto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hlineto_mult(self):
- test_charstr = '.67 0 rlineto 0 -6.0 rlineto .67 0 rlineto'
- xpct_charstr = '0.67 -6.0 0.67 hlineto'
+ test_charstr = ".67 0 rlineto 0 -6.0 rlineto .67 0 rlineto"
+ xpct_charstr = "0.67 -6.0 0.67 hlineto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vlineto(self):
- test_charstr = '0 -.24 rlineto'
- xpct_charstr = '-0.24 vlineto'
+ test_charstr = "0 -.24 rlineto"
+ xpct_charstr = "-0.24 vlineto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vlineto_zero_mult(self):
- test_charstr = '0 -24 rlineto '*3
- xpct_charstr = '-72 vlineto'
+ test_charstr = "0 -24 rlineto " * 3
+ xpct_charstr = "-72 vlineto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vlineto_mult(self):
- test_charstr = '0 -.24 rlineto +50 0 rlineto 0 30 rlineto -4 0 rlineto'
- xpct_charstr = '-0.24 50 30 -4 vlineto'
+ test_charstr = "0 -.24 rlineto +50 0 rlineto 0 30 rlineto -4 0 rlineto"
+ xpct_charstr = "-0.24 50 30 -4 vlineto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_0lineto_peephole(self):
- test_charstr = '1 2 0 0 3 4 rlineto'
- xpct_charstr = '1 2 3 4 rlineto'
+ test_charstr = "1 2 0 0 3 4 rlineto"
+ xpct_charstr = "1 2 3 4 rlineto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hlineto_peephole(self):
- test_charstr = '1 2 5 0 3 4 rlineto'
+ test_charstr = "1 2 5 0 3 4 rlineto"
xpct_charstr = test_charstr
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vlineto_peephole(self):
- test_charstr = '1 2 0 5 3 4 rlineto'
+ test_charstr = "1 2 0 5 3 4 rlineto"
xpct_charstr = test_charstr
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
-# rrcurveto
+ # rrcurveto
def test_rrcurveto(self):
- test_charstr = '-1 56 -2 57 -1 57 rrcurveto'
+ test_charstr = "-1 56 -2 57 -1 57 rrcurveto"
xpct_charstr = test_charstr
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_mult(self):
- test_charstr = '-30 8 -36 15 -37 22 rrcurveto 44 54 31 61 22 68 rrcurveto'
- xpct_charstr = '-30 8 -36 15 -37 22 44 54 31 61 22 68 rrcurveto'
+ test_charstr = "-30 8 -36 15 -37 22 rrcurveto 44 54 31 61 22 68 rrcurveto"
+ xpct_charstr = "-30 8 -36 15 -37 22 44 54 31 61 22 68 rrcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_d3947b8(self):
- test_charstr = '1 2 3 4 5 0 rrcurveto'
- xpct_charstr = '2 1 3 4 5 hhcurveto'
+ test_charstr = "1 2 3 4 5 0 rrcurveto"
+ xpct_charstr = "2 1 3 4 5 hhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_4(self):
- test_charstr = '10 0 30 0 10 0 rrcurveto'
- xpct_charstr = '10 30 0 10 hhcurveto'
+ test_charstr = "10 0 30 0 10 0 rrcurveto"
+ xpct_charstr = "10 30 0 10 hhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_5(self):
- test_charstr = '-38 40 -60 41 -91 0 rrcurveto'
- xpct_charstr = '40 -38 -60 41 -91 hhcurveto'
+ test_charstr = "-38 40 -60 41 -91 0 rrcurveto"
+ xpct_charstr = "40 -38 -60 41 -91 hhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_mult_4_4(self):
- test_charstr = '43 0 23 25 18 0 rrcurveto 29 0 56 42 -84 0 rrcurveto'
- xpct_charstr = '43 23 25 18 29 56 42 -84 hhcurveto'
+ test_charstr = "43 0 23 25 18 0 rrcurveto 29 0 56 42 -84 0 rrcurveto"
+ xpct_charstr = "43 23 25 18 29 56 42 -84 hhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_mult_5_4(self):
- test_charstr = '23 43 25 18 29 0 rrcurveto 56 0 42 -84 79 0 rrcurveto'
- xpct_charstr = '43 23 25 18 29 56 42 -84 79 hhcurveto'
+ test_charstr = "23 43 25 18 29 0 rrcurveto 56 0 42 -84 79 0 rrcurveto"
+ xpct_charstr = "43 23 25 18 29 56 42 -84 79 hhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_mult_4_4_4(self):
- test_charstr = '1 0 2 3 4 0 rrcurveto 5 0 6 7 8 0 rrcurveto 9 0 10 11 12 0 rrcurveto'
- xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 hhcurveto'
+ test_charstr = (
+ "1 0 2 3 4 0 rrcurveto 5 0 6 7 8 0 rrcurveto 9 0 10 11 12 0 rrcurveto"
+ )
+ xpct_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 hhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_mult_5_4_4(self):
- test_charstr = '2 1 3 4 5 0 rrcurveto 6 0 7 8 9 0 rrcurveto 10 0 11 12 13 0 rrcurveto'
- xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 13 hhcurveto'
+ test_charstr = (
+ "2 1 3 4 5 0 rrcurveto 6 0 7 8 9 0 rrcurveto 10 0 11 12 13 0 rrcurveto"
+ )
+ xpct_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 13 hhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_4(self):
- test_charstr = '0 61 6 52 0 68 rrcurveto'
- xpct_charstr = '61 6 52 68 vvcurveto'
+ test_charstr = "0 61 6 52 0 68 rrcurveto"
+ xpct_charstr = "61 6 52 68 vvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_5(self):
- test_charstr = '61 38 35 56 0 72 rrcurveto'
- xpct_charstr = '61 38 35 56 72 vvcurveto'
+ test_charstr = "61 38 35 56 0 72 rrcurveto"
+ xpct_charstr = "61 38 35 56 72 vvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_mult_4_4(self):
- test_charstr = '0 -84 -88 -30 0 -90 rrcurveto 0 -13 19 23 0 -11 rrcurveto'
- xpct_charstr = '-84 -88 -30 -90 -13 19 23 -11 vvcurveto'
+ test_charstr = "0 -84 -88 -30 0 -90 rrcurveto 0 -13 19 23 0 -11 rrcurveto"
+ xpct_charstr = "-84 -88 -30 -90 -13 19 23 -11 vvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_mult_5_4(self):
- test_charstr = '43 12 17 32 0 65 rrcurveto 0 68 -6 52 0 61 rrcurveto'
- xpct_charstr = '43 12 17 32 65 68 -6 52 61 vvcurveto'
+ test_charstr = "43 12 17 32 0 65 rrcurveto 0 68 -6 52 0 61 rrcurveto"
+ xpct_charstr = "43 12 17 32 65 68 -6 52 61 vvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_mult_4_4_4(self):
- test_charstr = '0 1 2 3 0 4 rrcurveto 0 5 6 7 0 8 rrcurveto 0 9 10 11 0 12 rrcurveto'
- xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 vvcurveto'
+ test_charstr = (
+ "0 1 2 3 0 4 rrcurveto 0 5 6 7 0 8 rrcurveto 0 9 10 11 0 12 rrcurveto"
+ )
+ xpct_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 vvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_mult_5_4_4(self):
- test_charstr = '1 2 3 4 0 5 rrcurveto 0 6 7 8 0 9 rrcurveto 0 10 11 12 0 13 rrcurveto'
- xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 13 vvcurveto'
+ test_charstr = (
+ "1 2 3 4 0 5 rrcurveto 0 6 7 8 0 9 rrcurveto 0 10 11 12 0 13 rrcurveto"
+ )
+ xpct_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 13 vvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4(self):
- test_charstr = '1 0 2 3 0 4 rrcurveto'
- xpct_charstr = '1 2 3 4 hvcurveto'
+ test_charstr = "1 0 2 3 0 4 rrcurveto"
+ xpct_charstr = "1 2 3 4 hvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_5(self):
- test_charstr = '57 0 44 22 34 40 rrcurveto'
- xpct_charstr = '57 44 22 40 34 hvcurveto'
+ test_charstr = "57 0 44 22 34 40 rrcurveto"
+ xpct_charstr = "57 44 22 40 34 hvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_4(self):
- test_charstr = '65 0 33 -19 0 -45 rrcurveto 0 -45 -29 -25 -71 0 rrcurveto'
- xpct_charstr = '65 33 -19 -45 -45 -29 -25 -71 hvcurveto'
+ test_charstr = "65 0 33 -19 0 -45 rrcurveto 0 -45 -29 -25 -71 0 rrcurveto"
+ xpct_charstr = "65 33 -19 -45 -45 -29 -25 -71 hvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_5(self):
- test_charstr = '97 0 69 41 0 86 rrcurveto 0 58 -36 34 -64 11 rrcurveto'
- xpct_charstr = '97 69 41 86 58 -36 34 -64 11 hvcurveto'
+ test_charstr = "97 0 69 41 0 86 rrcurveto 0 58 -36 34 -64 11 rrcurveto"
+ xpct_charstr = "97 69 41 86 58 -36 34 -64 11 hvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_4_4(self):
- test_charstr = '1 0 2 3 0 4 rrcurveto 0 5 6 7 8 0 rrcurveto 9 0 10 11 0 12 rrcurveto'
- xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 hvcurveto'
+ test_charstr = (
+ "1 0 2 3 0 4 rrcurveto 0 5 6 7 8 0 rrcurveto 9 0 10 11 0 12 rrcurveto"
+ )
+ xpct_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 hvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_4_5(self):
- test_charstr = '-124 0 -79 104 0 165 rrcurveto 0 163 82 102 124 0 rrcurveto 56 0 43 -25 35 -37 rrcurveto'
- xpct_charstr = '-124 -79 104 165 163 82 102 124 56 43 -25 -37 35 hvcurveto'
+ test_charstr = "-124 0 -79 104 0 165 rrcurveto 0 163 82 102 124 0 rrcurveto 56 0 43 -25 35 -37 rrcurveto"
+ xpct_charstr = "-124 -79 104 165 163 82 102 124 56 43 -25 -37 35 hvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_4_4_4(self):
- test_charstr = '32 0 25 22 0 32 rrcurveto 0 31 -25 22 -32 0 rrcurveto -32 0 -25 -22 0 -31 rrcurveto 0 -32 25 -22 32 0 rrcurveto'
- xpct_charstr = '32 25 22 32 31 -25 22 -32 -32 -25 -22 -31 -32 25 -22 32 hvcurveto'
+ test_charstr = "32 0 25 22 0 32 rrcurveto 0 31 -25 22 -32 0 rrcurveto -32 0 -25 -22 0 -31 rrcurveto 0 -32 25 -22 32 0 rrcurveto"
+ xpct_charstr = (
+ "32 25 22 32 31 -25 22 -32 -32 -25 -22 -31 -32 25 -22 32 hvcurveto"
+ )
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_4_4_4_4_5(self):
- test_charstr = '-170 0 -128 111 0 195 rrcurveto 0 234 172 151 178 0 rrcurveto 182 0 95 -118 0 -161 rrcurveto 0 -130 -71 -77 -63 0 rrcurveto -55 0 -19 38 20 79 rrcurveto'
- xpct_charstr = '-170 -128 111 195 234 172 151 178 182 95 -118 -161 -130 -71 -77 -63 -55 -19 38 79 20 hvcurveto'
+ test_charstr = "-170 0 -128 111 0 195 rrcurveto 0 234 172 151 178 0 rrcurveto 182 0 95 -118 0 -161 rrcurveto 0 -130 -71 -77 -63 0 rrcurveto -55 0 -19 38 20 79 rrcurveto"
+ xpct_charstr = "-170 -128 111 195 234 172 151 178 182 95 -118 -161 -130 -71 -77 -63 -55 -19 38 79 20 hvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_4(self):
- test_charstr = '0 -57 43 -30 53 0 rrcurveto'
- xpct_charstr = '-57 43 -30 53 vhcurveto'
+ test_charstr = "0 -57 43 -30 53 0 rrcurveto"
+ xpct_charstr = "-57 43 -30 53 vhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_5(self):
- test_charstr = '0 41 -27 19 -46 11 rrcurveto'
- xpct_charstr = '41 -27 19 -46 11 vhcurveto'
+ test_charstr = "0 41 -27 19 -46 11 rrcurveto"
+ xpct_charstr = "41 -27 19 -46 11 vhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_4_4(self):
- test_charstr = '0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto'
- xpct_charstr = '1 2 3 4 5 6 7 8 vhcurveto'
+ test_charstr = "0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto"
+ xpct_charstr = "1 2 3 4 5 6 7 8 vhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_4_5(self):
- test_charstr = '0 -64 -23 -25 -45 0 rrcurveto -30 0 -24 14 -19 33 rrcurveto'
- xpct_charstr = '-64 -23 -25 -45 -30 -24 14 33 -19 vhcurveto'
+ test_charstr = "0 -64 -23 -25 -45 0 rrcurveto -30 0 -24 14 -19 33 rrcurveto"
+ xpct_charstr = "-64 -23 -25 -45 -30 -24 14 33 -19 vhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_4_4_4(self):
- test_charstr = '0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto 0 9 10 11 12 0 rrcurveto'
- xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 vhcurveto'
+ test_charstr = (
+ "0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto 0 9 10 11 12 0 rrcurveto"
+ )
+ xpct_charstr = "1 2 3 4 5 6 7 8 9 10 11 12 vhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_4_4_5(self):
- test_charstr = '0 108 59 81 98 0 rrcurveto 99 0 59 -81 0 -108 rrcurveto 0 -100 -46 -66 -63 -47 rrcurveto'
- xpct_charstr = '108 59 81 98 99 59 -81 -108 -100 -46 -66 -63 -47 vhcurveto'
+ test_charstr = "0 108 59 81 98 0 rrcurveto 99 0 59 -81 0 -108 rrcurveto 0 -100 -46 -66 -63 -47 rrcurveto"
+ xpct_charstr = "108 59 81 98 99 59 -81 -108 -100 -46 -66 -63 -47 vhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_4_4_4_5(self):
- test_charstr = '0 60 -26 37 -43 0 rrcurveto -33 0 -28 -22 0 -36 rrcurveto 0 -37 27 -20 32 0 rrcurveto 3 0 4 0 3 1 rrcurveto'
- xpct_charstr = '60 -26 37 -43 -33 -28 -22 -36 -37 27 -20 32 3 4 0 1 3 vhcurveto'
+ test_charstr = "0 60 -26 37 -43 0 rrcurveto -33 0 -28 -22 0 -36 rrcurveto 0 -37 27 -20 32 0 rrcurveto 3 0 4 0 3 1 rrcurveto"
+ xpct_charstr = "60 -26 37 -43 -33 -28 -22 -36 -37 27 -20 32 3 4 0 1 3 vhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_v0_0h_h0(self):
- test_charstr = '0 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto'
- xpct_charstr = '10 1 2 0 0 1 2 1 1 3 4 0 vhcurveto'
+ test_charstr = "0 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto"
+ xpct_charstr = "10 1 2 0 0 1 2 1 1 3 4 0 vhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_h0_0h_h0(self):
- test_charstr = '10 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto'
- xpct_charstr = '10 1 2 0 hhcurveto 0 1 2 1 1 3 4 0 hvcurveto'
+ test_charstr = "10 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto"
+ xpct_charstr = "10 1 2 0 hhcurveto 0 1 2 1 1 3 4 0 hvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_00_0h_h0(self):
- test_charstr = '0 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto'
- xpct_charstr = '1 2 rlineto 0 1 2 1 1 3 4 0 hvcurveto'
+ test_charstr = "0 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto"
+ xpct_charstr = "1 2 rlineto 0 1 2 1 1 3 4 0 hvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_r0_0h_h0(self):
- test_charstr = '10 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto'
- xpct_charstr = '10 10 1 2 0 0 1 2 1 1 3 4 0 vvcurveto'
+ test_charstr = "10 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto"
+ xpct_charstr = "10 10 1 2 0 0 1 2 1 1 3 4 0 vvcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_v0_0v_v0(self):
- test_charstr = '0 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto'
- xpct_charstr = '10 1 2 0 vhcurveto 0 1 2 1 1 3 4 0 hhcurveto'
+ test_charstr = "0 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto"
+ xpct_charstr = "10 1 2 0 vhcurveto 0 1 2 1 1 3 4 0 hhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_h0_0v_v0(self):
- test_charstr = '10 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto'
- xpct_charstr = '10 1 2 0 0 1 2 1 1 3 4 0 hhcurveto'
+ test_charstr = "10 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto"
+ xpct_charstr = "10 1 2 0 0 1 2 1 1 3 4 0 hhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_00_0v_v0(self):
- test_charstr = '0 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto'
- xpct_charstr = '1 2 rlineto 0 1 2 1 1 3 4 0 hhcurveto'
+ test_charstr = "0 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto"
+ xpct_charstr = "1 2 rlineto 0 1 2 1 1 3 4 0 hhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rrcurveto_r0_0v_v0(self):
- test_charstr = '10 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto'
- xpct_charstr = '10 10 1 2 0 0 1 2 1 1 3 4 0 hhcurveto'
+ test_charstr = "10 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto"
+ xpct_charstr = "10 10 1 2 0 0 1 2 1 1 3 4 0 hhcurveto"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hhcurveto_peephole(self):
- test_charstr = '1 2 3 4 5 6 1 2 3 4 5 0 1 2 3 4 5 6 rrcurveto'
+ test_charstr = "1 2 3 4 5 6 1 2 3 4 5 0 1 2 3 4 5 6 rrcurveto"
xpct_charstr = test_charstr
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vvcurveto_peephole(self):
- test_charstr = '1 2 3 4 5 6 1 2 3 4 0 6 1 2 3 4 5 6 rrcurveto'
+ test_charstr = "1 2 3 4 5 6 1 2 3 4 0 6 1 2 3 4 5 6 rrcurveto"
xpct_charstr = test_charstr
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_hvcurveto_peephole(self):
- test_charstr = '1 2 3 4 5 6 1 0 3 4 5 6 1 2 3 4 5 6 rrcurveto'
+ test_charstr = "1 2 3 4 5 6 1 0 3 4 5 6 1 2 3 4 5 6 rrcurveto"
xpct_charstr = test_charstr
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_vhcurveto_peephole(self):
- test_charstr = '1 2 3 4 5 6 0 2 3 4 5 6 1 2 3 4 5 6 rrcurveto'
+ test_charstr = "1 2 3 4 5 6 0 2 3 4 5 6 1 2 3 4 5 6 rrcurveto"
xpct_charstr = test_charstr
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rcurveline_6_2(self):
- test_charstr = '21 -76 21 -72 24 -73 rrcurveto 31 -100 rlineto'
- xpct_charstr = '21 -76 21 -72 24 -73 31 -100 rcurveline'
+ test_charstr = "21 -76 21 -72 24 -73 rrcurveto 31 -100 rlineto"
+ xpct_charstr = "21 -76 21 -72 24 -73 31 -100 rcurveline"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rcurveline_6_6_2(self):
- test_charstr = '-73 80 -80 121 -49 96 rrcurveto 60 65 55 41 54 17 rrcurveto -8 78 rlineto'
- xpct_charstr = '-73 80 -80 121 -49 96 60 65 55 41 54 17 -8 78 rcurveline'
+ test_charstr = (
+ "-73 80 -80 121 -49 96 rrcurveto 60 65 55 41 54 17 rrcurveto -8 78 rlineto"
+ )
+ xpct_charstr = "-73 80 -80 121 -49 96 60 65 55 41 54 17 -8 78 rcurveline"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rcurveline_6_6_6_2(self):
- test_charstr = '1 64 10 51 29 39 rrcurveto 15 21 15 20 15 18 rrcurveto 47 -89 63 -98 52 -59 rrcurveto 91 8 rlineto'
- xpct_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 47 -89 63 -98 52 -59 91 8 rcurveline'
+ test_charstr = "1 64 10 51 29 39 rrcurveto 15 21 15 20 15 18 rrcurveto 47 -89 63 -98 52 -59 rrcurveto 91 8 rlineto"
+ xpct_charstr = (
+ "1 64 10 51 29 39 15 21 15 20 15 18 47 -89 63 -98 52 -59 91 8 rcurveline"
+ )
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rlinecurve_2_6(self):
- test_charstr = '21 -76 rlineto 21 -72 24 -73 31 -100 rrcurveto'
- xpct_charstr = '21 -76 21 -72 24 -73 31 -100 rlinecurve'
+ test_charstr = "21 -76 rlineto 21 -72 24 -73 31 -100 rrcurveto"
+ xpct_charstr = "21 -76 21 -72 24 -73 31 -100 rlinecurve"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rlinecurve_2_2_6(self):
- test_charstr = '-73 80 rlineto -80 121 rlineto -49 96 60 65 55 41 rrcurveto'
- xpct_charstr = '-73 80 -80 121 -49 96 60 65 55 41 rlinecurve'
+ test_charstr = "-73 80 rlineto -80 121 rlineto -49 96 60 65 55 41 rrcurveto"
+ xpct_charstr = "-73 80 -80 121 -49 96 60 65 55 41 rlinecurve"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
def test_rlinecurve_2_2_2_6(self):
- test_charstr = '1 64 rlineto 10 51 rlineto 29 39 rlineto 15 21 15 20 15 18 rrcurveto'
- xpct_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 rlinecurve'
+ test_charstr = (
+ "1 64 rlineto 10 51 rlineto 29 39 rlineto 15 21 15 20 15 18 rrcurveto"
+ )
+ xpct_charstr = "1 64 10 51 29 39 15 21 15 20 15 18 rlinecurve"
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
-# maxstack CFF=48, specializer uses up to 47
+ # maxstack CFF=48, specializer uses up to 47
def test_maxstack(self):
- operands = '1 2 3 4 5 6 '
- operator = 'rrcurveto '
- test_charstr = (operands + operator)*9
- xpct_charstr = (operands*2 + operator + operands*7 + operator).rstrip()
+ operands = "1 2 3 4 5 6 "
+ operator = "rrcurveto "
+ test_charstr = (operands + operator) * 9
+ xpct_charstr = (operands * 2 + operator + operands * 7 + operator).rstrip()
self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr)
class CFF2VFTestSpecialize(DataFilesHandler):
-
def test_blend_round_trip(self):
- ttx_path = self.getpath('TestSparseCFF2VF.ttx')
+ ttx_path = self.getpath("TestSparseCFF2VF.ttx")
ttf_font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
ttf_font.importXML(ttx_path)
fontGlyphList = ttf_font.getGlyphOrder()
- topDict = ttf_font['CFF2'].cff.topDictIndex[0]
+ topDict = ttf_font["CFF2"].cff.topDictIndex[0]
charstrings = topDict.CharStrings
for glyphName in fontGlyphList:
cs = charstrings[glyphName]
@@ -942,11 +1002,11 @@ class CFF2VFTestSpecialize(DataFilesHandler):
self.assertEqual(program, program_g)
def test_blend_programToCommands(self):
- ttx_path = self.getpath('TestCFF2Widths.ttx')
+ ttx_path = self.getpath("TestCFF2Widths.ttx")
ttf_font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
ttf_font.importXML(ttx_path)
fontGlyphList = ttf_font.getGlyphOrder()
- topDict = ttf_font['CFF2'].cff.topDictIndex[0]
+ topDict = ttf_font["CFF2"].cff.topDictIndex[0]
charstrings = topDict.CharStrings
for glyphName in fontGlyphList:
cs = charstrings[glyphName]
@@ -958,4 +1018,5 @@ class CFF2VFTestSpecialize(DataFilesHandler):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/cu2qu/cli_test.py b/Tests/cu2qu/cli_test.py
index f6798a63..be646847 100644
--- a/Tests/cu2qu/cli_test.py
+++ b/Tests/cu2qu/cli_test.py
@@ -9,7 +9,7 @@ from fontTools.cu2qu.ufo import CURVE_TYPE_LIB_KEY
from fontTools.cu2qu.cli import main
-DATADIR = os.path.join(os.path.dirname(__file__), 'data')
+DATADIR = os.path.join(os.path.dirname(__file__), "data")
TEST_UFOS = [
py.path.local(DATADIR).join("RobotoSubset-Regular.ufo"),
@@ -28,7 +28,6 @@ def test_paths(tmpdir):
class MainTest(object):
-
@staticmethod
def run_main(*args):
main([str(p) for p in args if p])
@@ -44,13 +43,13 @@ class MainTest(object):
def test_single_input_output_file(self, tmpdir):
input_path = TEST_UFOS[0]
output_path = tmpdir / input_path.basename
- self.run_main('-o', output_path, input_path)
+ self.run_main("-o", output_path, input_path)
assert output_path.check(dir=1)
def test_multiple_inputs_output_dir(self, tmpdir):
output_dir = tmpdir / "output_dir"
- self.run_main('-d', output_dir, *TEST_UFOS)
+ self.run_main("-d", output_dir, *TEST_UFOS)
assert output_dir.check(dir=1)
outputs = set(p.basename for p in output_dir.listdir())
@@ -58,29 +57,28 @@ class MainTest(object):
assert "RobotoSubset-Bold.ufo" in outputs
def test_interpolatable_inplace(self, test_paths):
- self.run_main('-i', *test_paths)
- self.run_main('-i', *test_paths) # idempotent
+ self.run_main("-i", *test_paths)
+ self.run_main("-i", *test_paths) # idempotent
- @pytest.mark.parametrize(
- "mode", ["", "-i"], ids=["normal", "interpolatable"])
+ @pytest.mark.parametrize("mode", ["", "-i"], ids=["normal", "interpolatable"])
def test_copytree(self, mode, tmpdir):
output_dir = tmpdir / "output_dir"
- self.run_main(mode, '-d', output_dir, *TEST_UFOS)
+ self.run_main(mode, "-d", output_dir, *TEST_UFOS)
output_dir_2 = tmpdir / "output_dir_2"
# no conversion when curves are already quadratic, just copy
- self.run_main(mode, '-d', output_dir_2, *output_dir.listdir())
+ self.run_main(mode, "-d", output_dir_2, *output_dir.listdir())
# running again overwrites existing with the copy
- self.run_main(mode, '-d', output_dir_2, *output_dir.listdir())
+ self.run_main(mode, "-d", output_dir_2, *output_dir.listdir())
def test_multiprocessing(self, tmpdir, test_paths):
self.run_main(*(test_paths + ["-j"]))
def test_keep_direction(self, test_paths):
- self.run_main('--keep-direction', *test_paths)
+ self.run_main("--keep-direction", *test_paths)
def test_conversion_error(self, test_paths):
- self.run_main('--conversion-error', 0.002, *test_paths)
+ self.run_main("--conversion-error", 0.002, *test_paths)
def test_conversion_error_short(self, test_paths):
- self.run_main('-e', 0.003, test_paths[0])
+ self.run_main("-e", 0.003, test_paths[0])
diff --git a/Tests/cu2qu/cu2qu_test.py b/Tests/cu2qu/cu2qu_test.py
index 456d2103..b125f865 100644
--- a/Tests/cu2qu/cu2qu_test.py
+++ b/Tests/cu2qu/cu2qu_test.py
@@ -21,31 +21,31 @@ import json
from fontTools.cu2qu import curve_to_quadratic, curves_to_quadratic
-DATADIR = os.path.join(os.path.dirname(__file__), 'data')
+DATADIR = os.path.join(os.path.dirname(__file__), "data")
MAX_ERR = 5
class CurveToQuadraticTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
"""Do the curve conversion ahead of time, and run tests on results."""
with open(os.path.join(DATADIR, "curves.json"), "r") as fp:
curves = json.load(fp)
- cls.single_splines = [
- curve_to_quadratic(c, MAX_ERR) for c in curves]
+ cls.single_splines = [curve_to_quadratic(c, MAX_ERR) for c in curves]
cls.single_errors = [
- cls.curve_spline_dist(c, s)
- for c, s in zip(curves, cls.single_splines)]
+ cls.curve_spline_dist(c, s) for c, s in zip(curves, cls.single_splines)
+ ]
- curve_groups = [curves[i:i + 3] for i in range(0, 300, 3)]
+ curve_groups = [curves[i : i + 3] for i in range(0, 300, 3)]
cls.compat_splines = [
- curves_to_quadratic(c, [MAX_ERR] * 3) for c in curve_groups]
+ curves_to_quadratic(c, [MAX_ERR] * 3) for c in curve_groups
+ ]
cls.compat_errors = [
[cls.curve_spline_dist(c, s) for c, s in zip(curve_group, splines)]
- for curve_group, splines in zip(curve_groups, cls.compat_splines)]
+ for curve_group, splines in zip(curve_groups, cls.compat_splines)
+ ]
cls.results = []
@@ -54,10 +54,16 @@ class CurveToQuadraticTest(unittest.TestCase):
"""Print stats from conversion, as determined during tests."""
for tag, results in cls.results:
- print('\n%s\n%s' % (
- tag, '\n'.join(
- '%s: %s (%d)' % (k, '#' * (v // 10 + 1), v)
- for k, v in sorted(results.items()))))
+ print(
+ "\n%s\n%s"
+ % (
+ tag,
+ "\n".join(
+ "%s: %s (%d)" % (k, "#" * (v // 10 + 1), v)
+ for k, v in sorted(results.items())
+ ),
+ )
+ )
def test_results_unchanged(self):
"""Tests that the results of conversion haven't changed since the time
@@ -65,40 +71,30 @@ class CurveToQuadraticTest(unittest.TestCase):
the conversion algorithm.
"""
- expected = {
- 2: 6,
- 3: 26,
- 4: 82,
- 5: 232,
- 6: 360,
- 7: 266,
- 8: 28}
+ expected = {2: 6, 3: 26, 4: 82, 5: 232, 6: 360, 7: 266, 8: 28}
results = collections.defaultdict(int)
for spline in self.single_splines:
n = len(spline) - 2
results[n] += 1
self.assertEqual(results, expected)
- self.results.append(('single spline lengths', results))
+ self.results.append(("single spline lengths", results))
def test_results_unchanged_multiple(self):
"""Test that conversion results are unchanged for multiple curves."""
- expected = {
- 5: 11,
- 6: 35,
- 7: 49,
- 8: 5}
+ expected = {5: 11, 6: 35, 7: 49, 8: 5}
results = collections.defaultdict(int)
for splines in self.compat_splines:
n = len(splines[0]) - 2
for spline in splines[1:]:
- self.assertEqual(len(spline) - 2, n,
- 'Got incompatible conversion results')
+ self.assertEqual(
+ len(spline) - 2, n, "Got incompatible conversion results"
+ )
results[n] += 1
self.assertEqual(results, expected)
- self.results.append(('compatible spline lengths', results))
+ self.results.append(("compatible spline lengths", results))
def test_does_not_exceed_tolerance(self):
"""Test that conversion results do not exceed given error tolerance."""
@@ -107,7 +103,7 @@ class CurveToQuadraticTest(unittest.TestCase):
for error in self.single_errors:
results[round(error, 1)] += 1
self.assertLessEqual(error, MAX_ERR)
- self.results.append(('single errors', results))
+ self.results.append(("single errors", results))
def test_does_not_exceed_tolerance_multiple(self):
"""Test that error tolerance isn't exceeded for multiple curves."""
@@ -117,7 +113,7 @@ class CurveToQuadraticTest(unittest.TestCase):
for error in errors:
results[round(error, 1)] += 1
self.assertLessEqual(error, MAX_ERR)
- self.results.append(('compatible errors', results))
+ self.results.append(("compatible errors", results))
@classmethod
def curve_spline_dist(cls, bezier, spline, total_steps=20):
@@ -135,9 +131,13 @@ class CurveToQuadraticTest(unittest.TestCase):
p3 = spline[n + 2]
segment = p1, p2, p3
for j in range(steps):
- error = max(error, cls.dist(
- cls.cubic_bezier_at(bezier, (j / steps + i) / n),
- cls.quadratic_bezier_at(segment, j / steps)))
+ error = max(
+ error,
+ cls.dist(
+ cls.cubic_bezier_at(bezier, (j / steps + i) / n),
+ cls.quadratic_bezier_at(segment, j / steps),
+ ),
+ )
return error
@classmethod
@@ -157,8 +157,7 @@ class CurveToQuadraticTest(unittest.TestCase):
t2 = t * t
_t2 = _t * _t
_2_t_t = 2 * t * _t
- return (_t2 * x1 + _2_t_t * x2 + t2 * x3,
- _t2 * y1 + _2_t_t * y2 + t2 * y3)
+ return (_t2 * x1 + _2_t_t * x2 + t2 * x3, _t2 * y1 + _2_t_t * y2 + t2 * y3)
@classmethod
def cubic_bezier_at(cls, b, t):
@@ -170,9 +169,24 @@ class CurveToQuadraticTest(unittest.TestCase):
_t3 = _t * _t2
_3_t2_t = 3 * t2 * _t
_3_t_t2 = 3 * t * _t2
- return (_t3 * x1 + _3_t_t2 * x2 + _3_t2_t * x3 + t3 * x4,
- _t3 * y1 + _3_t_t2 * y2 + _3_t2_t * y3 + t3 * y4)
+ return (
+ _t3 * x1 + _3_t_t2 * x2 + _3_t2_t * x3 + t3 * x4,
+ _t3 * y1 + _3_t_t2 * y2 + _3_t2_t * y3 + t3 * y4,
+ )
+
+
+class AllQuadraticFalseTest(unittest.TestCase):
+ def test_cubic(self):
+ cubic = [(0, 0), (0, 1), (2, 1), (2, 0)]
+ result = curve_to_quadratic(cubic, 0.1, all_quadratic=False)
+ assert result == cubic
+
+ def test_quadratic(self):
+ cubic = [(0, 0), (2, 2), (4, 2), (6, 0)]
+ result = curve_to_quadratic(cubic, 0.1, all_quadratic=False)
+ quadratic = [(0, 0), (3, 3), (6, 0)]
+ assert result == quadratic
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/Tests/cu2qu/ufo_test.py b/Tests/cu2qu/ufo_test.py
index b678ae3d..aa9765e6 100644
--- a/Tests/cu2qu/ufo_test.py
+++ b/Tests/cu2qu/ufo_test.py
@@ -20,7 +20,7 @@ import pytest
ufoLib2 = pytest.importorskip("ufoLib2")
-DATADIR = os.path.join(os.path.dirname(__file__), 'data')
+DATADIR = os.path.join(os.path.dirname(__file__), "data")
TEST_UFOS = [
os.path.join(DATADIR, "RobotoSubset-Regular.ufo"),
@@ -34,7 +34,6 @@ def fonts():
class FontsToQuadraticTest(object):
-
def test_modified(self, fonts):
modified = fonts_to_quadratic(fonts)
assert modified
@@ -42,67 +41,74 @@ class FontsToQuadraticTest(object):
def test_stats(self, fonts):
stats = {}
fonts_to_quadratic(fonts, stats=stats)
- assert stats == {'1': 1, '2': 79, '3': 130, '4': 2}
+ assert stats == {"1": 1, "2": 79, "3": 130, "4": 2}
def test_dump_stats(self, fonts):
with CapturingLogHandler(logger, "INFO") as captor:
fonts_to_quadratic(fonts, dump_stats=True)
assert captor.assertRegex("New spline lengths:")
- def test_remember_curve_type(self, fonts):
+ def test_remember_curve_type_quadratic(self, fonts):
fonts_to_quadratic(fonts, remember_curve_type=True)
assert fonts[0].lib[CURVE_TYPE_LIB_KEY] == "quadratic"
with CapturingLogHandler(logger, "INFO") as captor:
fonts_to_quadratic(fonts, remember_curve_type=True)
assert captor.assertRegex("already converted")
+ def test_remember_curve_type_mixed(self, fonts):
+ fonts_to_quadratic(fonts, remember_curve_type=True, all_quadratic=False)
+ assert fonts[0].lib[CURVE_TYPE_LIB_KEY] == "mixed"
+ with CapturingLogHandler(logger, "INFO") as captor:
+ fonts_to_quadratic(fonts, remember_curve_type=True)
+ assert captor.assertRegex("already converted")
+
def test_no_remember_curve_type(self, fonts):
assert CURVE_TYPE_LIB_KEY not in fonts[0].lib
fonts_to_quadratic(fonts, remember_curve_type=False)
assert CURVE_TYPE_LIB_KEY not in fonts[0].lib
def test_different_glyphsets(self, fonts):
- del fonts[0]['a']
- assert 'a' not in fonts[0]
- assert 'a' in fonts[1]
+ del fonts[0]["a"]
+ assert "a" not in fonts[0]
+ assert "a" in fonts[1]
assert fonts_to_quadratic(fonts)
def test_max_err_em_float(self, fonts):
stats = {}
fonts_to_quadratic(fonts, max_err_em=0.002, stats=stats)
- assert stats == {'1': 5, '2': 193, '3': 14}
+ assert stats == {"1": 5, "2": 193, "3": 14}
def test_max_err_em_list(self, fonts):
stats = {}
fonts_to_quadratic(fonts, max_err_em=[0.002, 0.002], stats=stats)
- assert stats == {'1': 5, '2': 193, '3': 14}
+ assert stats == {"1": 5, "2": 193, "3": 14}
def test_max_err_float(self, fonts):
stats = {}
fonts_to_quadratic(fonts, max_err=4.096, stats=stats)
- assert stats == {'1': 5, '2': 193, '3': 14}
+ assert stats == {"1": 5, "2": 193, "3": 14}
def test_max_err_list(self, fonts):
stats = {}
fonts_to_quadratic(fonts, max_err=[4.096, 4.096], stats=stats)
- assert stats == {'1': 5, '2': 193, '3': 14}
+ assert stats == {"1": 5, "2": 193, "3": 14}
def test_both_max_err_and_max_err_em(self, fonts):
with pytest.raises(TypeError, match="Only one .* can be specified"):
fonts_to_quadratic(fonts, max_err=1.000, max_err_em=0.001)
def test_single_font(self, fonts):
- assert font_to_quadratic(fonts[0], max_err_em=0.002,
- reverse_direction=True)
+ assert font_to_quadratic(fonts[0], max_err_em=0.002, reverse_direction=True)
+ assert font_to_quadratic(
+ fonts[1], max_err_em=0.002, reverse_direction=True, all_quadratic=False
+ )
class GlyphsToQuadraticTest(object):
-
@pytest.mark.parametrize(
["glyph", "expected"],
- [('A', False), # contains no curves, it is not modified
- ('a', True)],
- ids=['lines-only', 'has-curves']
+ [("A", False), ("a", True)], # contains no curves, it is not modified
+ ids=["lines-only", "has-curves"],
)
def test_modified(self, fonts, glyph, expected):
glyphs = [f[glyph] for f in fonts]
@@ -110,28 +116,27 @@ class GlyphsToQuadraticTest(object):
def test_stats(self, fonts):
stats = {}
- glyphs_to_quadratic([f['a'] for f in fonts], stats=stats)
- assert stats == {'2': 1, '3': 7, '4': 3, '5': 1}
+ glyphs_to_quadratic([f["a"] for f in fonts], stats=stats)
+ assert stats == {"2": 1, "3": 7, "4": 3, "5": 1}
def test_max_err_float(self, fonts):
- glyphs = [f['a'] for f in fonts]
+ glyphs = [f["a"] for f in fonts]
stats = {}
glyphs_to_quadratic(glyphs, max_err=4.096, stats=stats)
- assert stats == {'2': 11, '3': 1}
+ assert stats == {"2": 11, "3": 1}
def test_max_err_list(self, fonts):
- glyphs = [f['a'] for f in fonts]
+ glyphs = [f["a"] for f in fonts]
stats = {}
glyphs_to_quadratic(glyphs, max_err=[4.096, 4.096], stats=stats)
- assert stats == {'2': 11, '3': 1}
+ assert stats == {"2": 11, "3": 1}
def test_reverse_direction(self, fonts):
- glyphs = [f['A'] for f in fonts]
+ glyphs = [f["A"] for f in fonts]
assert glyphs_to_quadratic(glyphs, reverse_direction=True)
def test_single_glyph(self, fonts):
- assert glyph_to_quadratic(fonts[0]['a'], max_err=4.096,
- reverse_direction=True)
+ assert glyph_to_quadratic(fonts[0]["a"], max_err=4.096, reverse_direction=True)
@pytest.mark.parametrize(
["outlines", "exception", "message"],
@@ -139,32 +144,31 @@ class GlyphsToQuadraticTest(object):
[
[
[
- ('moveTo', ((0, 0),)),
- ('curveTo', ((1, 1), (2, 2), (3, 3))),
- ('curveTo', ((4, 4), (5, 5), (6, 6))),
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((1, 1), (2, 2), (3, 3))),
+ ("curveTo", ((4, 4), (5, 5), (6, 6))),
+ ("closePath", ()),
],
[
- ('moveTo', ((7, 7),)),
- ('curveTo', ((8, 8), (9, 9), (10, 10))),
- ('closePath', ()),
- ]
+ ("moveTo", ((7, 7),)),
+ ("curveTo", ((8, 8), (9, 9), (10, 10))),
+ ("closePath", ()),
+ ],
],
IncompatibleSegmentNumberError,
"have different number of segments",
],
[
[
-
[
- ('moveTo', ((0, 0),)),
- ('curveTo', ((1, 1), (2, 2), (3, 3))),
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((1, 1), (2, 2), (3, 3))),
+ ("closePath", ()),
],
[
- ('moveTo', ((4, 4),)),
- ('lineTo', ((5, 5),)),
- ('closePath', ()),
+ ("moveTo", ((4, 4),)),
+ ("lineTo", ((5, 5),)),
+ ("closePath", ()),
],
],
IncompatibleSegmentTypesError,
@@ -174,7 +178,7 @@ class GlyphsToQuadraticTest(object):
ids=[
"unequal-length",
"different-segment-types",
- ]
+ ],
)
def test_incompatible_glyphs(self, outlines, exception, message):
glyphs = []
@@ -193,18 +197,22 @@ class GlyphsToQuadraticTest(object):
font1.info.unitsPerEm = 1000
glyph1 = font1.newGlyph("a")
pen1 = glyph1.getPen()
- for operator, args in [("moveTo", ((0, 0),)),
- ("lineTo", ((1, 1),)),
- ("endPath", ())]:
+ for operator, args in [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)),
+ ("endPath", ()),
+ ]:
getattr(pen1, operator)(*args)
font2 = ufoLib2.Font()
font2.info.unitsPerEm = 1000
glyph2 = font2.newGlyph("a")
pen2 = glyph2.getPen()
- for operator, args in [("moveTo", ((0, 0),)),
- ("curveTo", ((1, 1), (2, 2), (3, 3))),
- ("endPath", ())]:
+ for operator, args in [
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((1, 1), (2, 2), (3, 3))),
+ ("endPath", ()),
+ ]:
getattr(pen2, operator)(*args)
with pytest.raises(IncompatibleFontsError) as excinfo:
@@ -212,7 +220,7 @@ class GlyphsToQuadraticTest(object):
assert excinfo.match("fonts contains incompatible glyphs: 'a'")
assert hasattr(excinfo.value, "glyph_errors")
- error = excinfo.value.glyph_errors['a']
+ error = excinfo.value.glyph_errors["a"]
assert isinstance(error, IncompatibleSegmentTypesError)
assert error.segments == {1: ["line", "curve"]}
@@ -238,7 +246,7 @@ class GlyphsToQuadraticTest(object):
def test_ignore_components(self):
glyph = ufoLib2.objects.Glyph()
pen = glyph.getPen()
- pen.addComponent('a', (1, 0, 0, 1, 0, 0))
+ pen.addComponent("a", (1, 0, 0, 1, 0, 0))
pen.moveTo((0, 0))
pen.curveTo((1, 1), (2, 2), (3, 3))
pen.closePath()
@@ -276,10 +284,5 @@ class GlyphsToQuadraticTest(object):
(0, 101),
(0, 101),
],
- [
- (1, 651),
- (4, 651),
- (3, 101),
- (2, 101)
- ],
+ [(1, 651), (4, 651), (3, 101), (2, 101)],
]
diff --git a/Tests/designspaceLib/data/test_avar2.designspace b/Tests/designspaceLib/data/test_avar2.designspace
new file mode 100644
index 00000000..d54588a6
--- /dev/null
+++ b/Tests/designspaceLib/data/test_avar2.designspace
@@ -0,0 +1,117 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<designspace format="5.0">
+ <axes>
+ <axis tag="JSTF" name="Justify" minimum="-100" maximum="100" default="0"/>
+ <axis tag="wght" name="Weight" minimum="100" maximum="900" default="400">
+ <map input="100" output="26"/>
+ <map input="200" output="39"/>
+ <map input="300" output="58"/>
+ <map input="400" output="90"/>
+ <map input="500" output="108"/>
+ <map input="600" output="128"/>
+ <map input="700" output="151"/>
+ <map input="800" output="169"/>
+ <map input="900" output="190"/>
+ </axis>
+ <axis tag="wdth" name="Width" minimum="62.5" maximum="100" default="100">
+ <map input="62.5" output="70"/>
+ <map input="75" output="79"/>
+ <map input="87.5" output="89"/>
+ <map input="100" output="100"/>
+ </axis>
+ <mappings>
+ <mapping>
+ <input>
+ <dimension name="Justify" xvalue="-100"/>
+ <dimension name="Width" xvalue="100"/>
+ </input>
+ <output>
+ <dimension name="Width" xvalue="70"/>
+ </output>
+ </mapping>
+ </mappings>
+ </axes>
+ <variable-fonts>
+ <variable-font name="NotoSansArabic_Justify_Width">
+ <axis-subsets>
+ <axis-subset name="Justify"/>
+ <axis-subset name="Width"/>
+ </axis-subsets>
+ </variable-font>
+ <variable-font name="NotoSansArabic_Weight_Width">
+ <axis-subsets>
+ <axis-subset name="Weight"/>
+ <axis-subset name="Width"/>
+ </axis-subsets>
+ </variable-font>
+ <variable-font name="NotoSansArabic_Weight">
+ <axis-subsets>
+ <axis-subset name="Weight"/>
+ </axis-subsets>
+ </variable-font>
+ <variable-font name="NotoSansArabic_Width">
+ <axis-subsets>
+ <axis-subset name="Width"/>
+ </axis-subsets>
+ </variable-font>
+ <variable-font name="NotoSansArabic_Justify">
+ <axis-subsets>
+ <axis-subset name="Justify"/>
+ </axis-subsets>
+ </variable-font>
+ </variable-fonts>
+ <sources>
+ <source filename="NotoSansArabic-Light.ufo" name="Noto Sans Arabic Light" familyname="Noto Sans Arabic" stylename="Light">
+ <location>
+ <dimension name="Weight" xvalue="26"/>
+ <dimension name="Width" xvalue="100"/>
+ </location>
+ </source>
+ <source filename="NotoSansArabic-Regular.ufo" name="Noto Sans Arabic Regular" familyname="Noto Sans Arabic" stylename="Regular">
+ <lib copy="1"/>
+ <groups copy="1"/>
+ <features copy="1"/>
+ <info copy="1"/>
+ <location>
+ <dimension name="Weight" xvalue="90"/>
+ <dimension name="Width" xvalue="100"/>
+ </location>
+ </source>
+ <source filename="NotoSansArabic-SemiBold.ufo" name="Noto Sans Arabic SemiBold" familyname="Noto Sans Arabic" stylename="SemiBold">
+ <location>
+ <dimension name="Weight" xvalue="151"/>
+ <dimension name="Width" xvalue="100"/>
+ </location>
+ </source>
+ <source filename="NotoSansArabic-Bold.ufo" name="Noto Sans Arabic Bold" familyname="Noto Sans Arabic" stylename="Bold">
+ <location>
+ <dimension name="Weight" xvalue="190"/>
+ <dimension name="Width" xvalue="100"/>
+ </location>
+ </source>
+ <source filename="NotoSansArabic-CondensedLight.ufo" name="Noto Sans Arabic Condensed Light" familyname="Noto Sans Arabic" stylename="Condensed Light">
+ <location>
+ <dimension name="Weight" xvalue="26"/>
+ <dimension name="Width" xvalue="70"/>
+ </location>
+ </source>
+ <source filename="NotoSansArabic-Condensed.ufo" name="Noto Sans Arabic Condensed" familyname="Noto Sans Arabic" stylename="Condensed">
+ <location>
+ <dimension name="Weight" xvalue="90"/>
+ <dimension name="Width" xvalue="70"/>
+ </location>
+ </source>
+ <source filename="NotoSansArabic-CondensedSemiBold.ufo" name="Noto Sans Arabic Condensed SemiBold" familyname="Noto Sans Arabic" stylename="Condensed SemiBold">
+ <location>
+ <dimension name="Weight" xvalue="151"/>
+ <dimension name="Width" xvalue="70"/>
+ </location>
+ </source>
+ <source filename="NotoSansArabic-CondensedBold.ufo" name="Noto Sans Arabic Condensed Bold" familyname="Noto Sans Arabic" stylename="Condensed Bold">
+ <location>
+ <dimension name="Weight" xvalue="190"/>
+ <dimension name="Width" xvalue="70"/>
+ </location>
+ </source>
+ </sources>
+</designspace>
diff --git a/Tests/designspaceLib/data/test_v5.designspace b/Tests/designspaceLib/data/test_v5.designspace
index d2b3cdae..498956cb 100644
--- a/Tests/designspaceLib/data/test_v5.designspace
+++ b/Tests/designspaceLib/data/test_v5.designspace
@@ -21,6 +21,13 @@
<label uservalue="600" userminimum="450" usermaximum="650" name="Semi Bold"/>
<label uservalue="700" userminimum="650" usermaximum="850" name="Bold"/>
<label uservalue="900" userminimum="850" usermaximum="900" name="Black"/>
+ <!--
+ Add "recursive" linked user values, see:
+ https://github.com/fonttools/fonttools/issues/2852
+ https://github.com/fonttools/fonttools/discussions/2790
+ -->
+ <label uservalue="400" name="Regular" elidable="true" linkeduservalue="700"/>
+ <label uservalue="700" name="Bold" linkeduservalue="400"/>
</labels>
</axis>
diff --git a/Tests/designspaceLib/designspace_test.py b/Tests/designspaceLib/designspace_test.py
index ee2d19e6..ceddfd10 100644
--- a/Tests/designspaceLib/designspace_test.py
+++ b/Tests/designspaceLib/designspace_test.py
@@ -1,12 +1,15 @@
# coding=utf-8
import os
+from pathlib import Path
import re
+import shutil
import pytest
from fontTools import ttLib
from fontTools.designspaceLib import (
AxisDescriptor,
+ AxisMappingDescriptor,
AxisLabelDescriptor,
DesignSpaceDocument,
DesignSpaceDocumentError,
@@ -21,20 +24,22 @@ from fontTools.designspaceLib import (
from fontTools.designspaceLib.types import Range
from fontTools.misc import plistlib
+from .fixtures import datadir
+
def _axesAsDict(axes):
"""
- Make the axis data we have available in
+ Make the axis data we have available in
"""
axesDict = {}
for axisDescriptor in axes:
d = {
- 'name': axisDescriptor.name,
- 'tag': axisDescriptor.tag,
- 'minimum': axisDescriptor.minimum,
- 'maximum': axisDescriptor.maximum,
- 'default': axisDescriptor.default,
- 'map': axisDescriptor.map,
+ "name": axisDescriptor.name,
+ "tag": axisDescriptor.tag,
+ "minimum": axisDescriptor.minimum,
+ "maximum": axisDescriptor.maximum,
+ "default": axisDescriptor.default,
+ "map": axisDescriptor.map,
}
axesDict[axisDescriptor.name] = d
return axesDict
@@ -72,8 +77,8 @@ def test_fill_document(tmpdir):
a1.name = "weight"
a1.tag = "wght"
# note: just to test the element language, not an actual label name recommendations.
- a1.labelNames[u'fa-IR'] = u"قطر"
- a1.labelNames[u'en'] = u"Wéíght"
+ a1.labelNames["fa-IR"] = "قطر"
+ a1.labelNames["en"] = "Wéíght"
doc.addAxis(a1)
a2 = AxisDescriptor()
a2.minimum = 0
@@ -83,7 +88,7 @@ def test_fill_document(tmpdir):
a2.tag = "wdth"
a2.map = [(0.0, 10.0), (15.0, 20.0), (401.0, 66.0), (1000.0, 990.0)]
a2.hidden = True
- a2.labelNames[u'fr'] = u"Chasse"
+ a2.labelNames["fr"] = "Chasse"
doc.addAxis(a2)
# add master 1
@@ -131,18 +136,22 @@ def test_fill_document(tmpdir):
i1.familyName = "InstanceFamilyName"
i1.styleName = "InstanceStyleName"
i1.name = "instance.ufo1"
- i1.location = dict(weight=500, spooky=666) # this adds a dimension that is not defined.
+ i1.location = dict(
+ weight=500, spooky=666
+ ) # this adds a dimension that is not defined.
i1.postScriptFontName = "InstancePostscriptName"
i1.styleMapFamilyName = "InstanceStyleMapFamilyName"
i1.styleMapStyleName = "InstanceStyleMapStyleName"
i1.localisedStyleName = dict(fr="Demigras", ja="半ば")
i1.localisedFamilyName = dict(fr="Montserrat", ja="モンセラート")
i1.localisedStyleMapStyleName = dict(de="Standard")
- i1.localisedStyleMapFamilyName = dict(de="Montserrat Halbfett", ja="モンセラート SemiBold")
+ i1.localisedStyleMapFamilyName = dict(
+ de="Montserrat Halbfett", ja="モンセラート SemiBold"
+ )
glyphData = dict(name="arrow", mute=True, unicodes=[0x123, 0x124, 0x125])
- i1.glyphs['arrow'] = glyphData
- i1.lib['com.coolDesignspaceApp.binaryData'] = plistlib.Data(b'<binary gunk>')
- i1.lib['com.coolDesignspaceApp.specimenText'] = "Hamburgerwhatever"
+ i1.glyphs["arrow"] = glyphData
+ i1.lib["com.coolDesignspaceApp.binaryData"] = plistlib.Data(b"<binary gunk>")
+ i1.lib["com.coolDesignspaceApp.specimenText"] = "Hamburgerwhatever"
doc.addInstance(i1)
# add instance 2
i2 = InstanceDescriptor()
@@ -151,46 +160,51 @@ def test_fill_document(tmpdir):
i2.styleName = "InstanceStyleName"
i2.name = "instance.ufo2"
# anisotropic location
- i2.location = dict(weight=500, width=(400,300))
+ i2.location = dict(weight=500, width=(400, 300))
i2.postScriptFontName = "InstancePostscriptName"
i2.styleMapFamilyName = "InstanceStyleMapFamilyName"
i2.styleMapStyleName = "InstanceStyleMapStyleName"
- glyphMasters = [dict(font="master.ufo1", glyphName="BB", location=dict(width=20,weight=20)), dict(font="master.ufo2", glyphName="CC", location=dict(width=900,weight=900))]
+ glyphMasters = [
+ dict(font="master.ufo1", glyphName="BB", location=dict(width=20, weight=20)),
+ dict(font="master.ufo2", glyphName="CC", location=dict(width=900, weight=900)),
+ ]
glyphData = dict(name="arrow", unicodes=[101, 201, 301])
- glyphData['masters'] = glyphMasters
- glyphData['note'] = "A note about this glyph"
- glyphData['instanceLocation'] = dict(width=100, weight=120)
- i2.glyphs['arrow'] = glyphData
- i2.glyphs['arrow2'] = dict(mute=False)
+ glyphData["masters"] = glyphMasters
+ glyphData["note"] = "A note about this glyph"
+ glyphData["instanceLocation"] = dict(width=100, weight=120)
+ i2.glyphs["arrow"] = glyphData
+ i2.glyphs["arrow2"] = dict(mute=False)
doc.addInstance(i2)
doc.filename = "suggestedFileName.designspace"
- doc.lib['com.coolDesignspaceApp.previewSize'] = 30
+ doc.lib["com.coolDesignspaceApp.previewSize"] = 30
# write some rules
r1 = RuleDescriptor()
r1.name = "named.rule.1"
- r1.conditionSets.append([
- dict(name='axisName_a', minimum=0, maximum=1),
- dict(name='axisName_b', minimum=2, maximum=3)
- ])
+ r1.conditionSets.append(
+ [
+ dict(name="axisName_a", minimum=0, maximum=1),
+ dict(name="axisName_b", minimum=2, maximum=3),
+ ]
+ )
r1.subs.append(("a", "a.alt"))
doc.addRule(r1)
# write the document; without an explicit format it will be 5.0 by default
doc.write(testDocPath5)
assert os.path.exists(testDocPath5)
- assert_equals_test_file(testDocPath5, 'data/test_v5_original.designspace')
+ assert_equals_test_file(testDocPath5, "data/test_v5_original.designspace")
# write again with an explicit format = 4.1
doc.formatVersion = "4.1"
doc.write(testDocPath)
assert os.path.exists(testDocPath)
- assert_equals_test_file(testDocPath, 'data/test_v4_original.designspace')
+ assert_equals_test_file(testDocPath, "data/test_v4_original.designspace")
# import it again
new = DesignSpaceDocument()
new.read(testDocPath)
- assert new.default.location == {'width': 20.0, 'weight': 0.0}
- assert new.filename == 'test_v4.designspace'
+ assert new.default.location == {"width": 20.0, "weight": 0.0}
+ assert new.filename == "test_v4.designspace"
assert new.lib == doc.lib
assert new.instances[0].lib == doc.instances[0].lib
@@ -240,10 +254,10 @@ def test_unicodes(tmpdir):
i1.name = "instance.ufo1"
i1.location = dict(weight=500)
glyphData = dict(name="arrow", mute=True, unicodes=[100, 200, 300])
- i1.glyphs['arrow'] = glyphData
+ i1.glyphs["arrow"] = glyphData
doc.addInstance(i1)
# now we have sources and instances, but no axes yet.
- doc.axes = [] # clear the axes
+ doc.axes = [] # clear the axes
# write some axes
a1 = AxisDescriptor()
a1.minimum = 0
@@ -260,13 +274,13 @@ def test_unicodes(tmpdir):
new.read(testDocPath)
new.write(testDocPath2)
# compare the file contents
- with open(testDocPath, 'r', encoding='utf-8') as f1:
+ with open(testDocPath, "r", encoding="utf-8") as f1:
t1 = f1.read()
- with open(testDocPath2, 'r', encoding='utf-8') as f2:
+ with open(testDocPath2, "r", encoding="utf-8") as f2:
t2 = f2.read()
assert t1 == t2
# check the unicode values read from the document
- assert new.instances[0].glyphs['arrow']['unicodes'] == [100,200,300]
+ assert new.instances[0].glyphs["arrow"]["unicodes"] == [100, 200, 300]
def test_localisedNames(tmpdir):
@@ -299,20 +313,22 @@ def test_localisedNames(tmpdir):
i1.styleMapFamilyName = "Montserrat SemiBold"
i1.styleMapStyleName = "Regular"
i1.setFamilyName("Montserrat", "fr")
- i1.setFamilyName(u"モンセラート", "ja")
+ i1.setFamilyName("モンセラート", "ja")
i1.setStyleName("Demigras", "fr")
- i1.setStyleName(u"半ば", "ja")
- i1.setStyleMapStyleName(u"Standard", "de")
+ i1.setStyleName("半ば", "ja")
+ i1.setStyleMapStyleName("Standard", "de")
i1.setStyleMapFamilyName("Montserrat Halbfett", "de")
- i1.setStyleMapFamilyName(u"モンセラート SemiBold", "ja")
+ i1.setStyleMapFamilyName("モンセラート SemiBold", "ja")
i1.name = "instance.ufo1"
- i1.location = dict(weight=500, spooky=666) # this adds a dimension that is not defined.
+ i1.location = dict(
+ weight=500, spooky=666
+ ) # this adds a dimension that is not defined.
i1.postScriptFontName = "InstancePostscriptName"
glyphData = dict(name="arrow", mute=True, unicodes=[0x123])
- i1.glyphs['arrow'] = glyphData
+ i1.glyphs["arrow"] = glyphData
doc.addInstance(i1)
# now we have sources and instances, but no axes yet.
- doc.axes = [] # clear the axes
+ doc.axes = [] # clear the axes
# write some axes
a1 = AxisDescriptor()
a1.minimum = 0
@@ -321,8 +337,8 @@ def test_localisedNames(tmpdir):
a1.name = "weight"
a1.tag = "wght"
# note: just to test the element language, not an actual label name recommendations.
- a1.labelNames[u'fa-IR'] = u"قطر"
- a1.labelNames[u'en'] = u"Wéíght"
+ a1.labelNames["fa-IR"] = "قطر"
+ a1.labelNames["en"] = "Wéíght"
doc.addAxis(a1)
a2 = AxisDescriptor()
a2.minimum = 0
@@ -331,7 +347,7 @@ def test_localisedNames(tmpdir):
a2.name = "width"
a2.tag = "wdth"
a2.map = [(0.0, 10.0), (401.0, 66.0), (1000.0, 990.0)]
- a2.labelNames[u'fr'] = u"Poids"
+ a2.labelNames["fr"] = "Poids"
doc.addAxis(a2)
# add an axis that is not part of any location to see if that works
a3 = AxisDescriptor()
@@ -341,14 +357,16 @@ def test_localisedNames(tmpdir):
a3.name = "spooky"
a3.tag = "spok"
a3.map = [(0.0, 10.0), (401.0, 66.0), (1000.0, 990.0)]
- #doc.addAxis(a3) # uncomment this line to test the effects of default axes values
+ # doc.addAxis(a3) # uncomment this line to test the effects of default axes values
# write some rules
r1 = RuleDescriptor()
r1.name = "named.rule.1"
- r1.conditionSets.append([
- dict(name='weight', minimum=200, maximum=500),
- dict(name='width', minimum=0, maximum=150)
- ])
+ r1.conditionSets.append(
+ [
+ dict(name="weight", minimum=200, maximum=500),
+ dict(name="width", minimum=0, maximum=150),
+ ]
+ )
r1.subs.append(("a", "a.alt"))
doc.addRule(r1)
# write the document
@@ -358,9 +376,9 @@ def test_localisedNames(tmpdir):
new = DesignSpaceDocument()
new.read(testDocPath)
new.write(testDocPath2)
- with open(testDocPath, 'r', encoding='utf-8') as f1:
+ with open(testDocPath, "r", encoding="utf-8") as f1:
t1 = f1.read()
- with open(testDocPath2, 'r', encoding='utf-8') as f2:
+ with open(testDocPath2, "r", encoding="utf-8") as f2:
t2 = f2.read()
assert t1 == t2
@@ -378,7 +396,7 @@ def test_handleNoAxes(tmpdir):
# Case 1: No axes element in the document, but there are sources and instances
doc = DesignSpaceDocument()
- for name, value in [('One', 1),('Two', 2),('Three', 3)]:
+ for name, value in [("One", 1), ("Two", 2), ("Three", 3)]:
a = AxisDescriptor()
a.minimum = 0
a.maximum = 1000
@@ -417,7 +435,7 @@ def test_handleNoAxes(tmpdir):
i1.familyName = "InstanceFamilyName"
i1.styleName = "InstanceStyleName"
i1.name = "instance.ufo1"
- i1.location = dict(axisNameOne=(-1000,500), axisNameTwo=100)
+ i1.location = dict(axisNameOne=(-1000, 500), axisNameTwo=100)
i1.postScriptFontName = "InstancePostscriptName"
i1.styleMapFamilyName = "InstanceStyleMapFamilyName"
i1.styleMapStyleName = "InstanceStyleMapStyleName"
@@ -428,6 +446,7 @@ def test_handleNoAxes(tmpdir):
verify.read(testDocPath)
verify.write(testDocPath2)
+
def test_pathNameResolve(tmpdir):
tmpdir = str(tmpdir)
# test how descriptor.path and descriptor.filename are resolved
@@ -499,7 +518,9 @@ def test_pathNameResolve(tmpdir):
verify.read(testDocPath3)
assert verify.sources[0].filename == "../somewhere/over/the/rainbow.ufo"
# make the absolute path for filename so we can see if it matches the path
- p = os.path.abspath(os.path.join(os.path.dirname(testDocPath3), verify.sources[0].filename))
+ p = os.path.abspath(
+ os.path.join(os.path.dirname(testDocPath3), verify.sources[0].filename)
+ )
assert verify.sources[0].path == posix(p)
# Case 4: the filename points to one file, the path points to another. The path takes precedence.
@@ -529,7 +550,7 @@ def test_pathNameResolve(tmpdir):
s.familyName = "MasterFamilyName"
s.styleName = "MasterStyleNameOne"
doc.addSource(s)
- doc.write(testDocPath5) # so that the document has a path
+ doc.write(testDocPath5) # so that the document has a path
doc.updateFilenameFromPath()
assert doc.sources[0].filename == "masters/masterTest1.ufo"
@@ -543,7 +564,7 @@ def test_pathNameResolve(tmpdir):
s.location = dict(weight=0)
s.familyName = "MasterFamilyName"
s.styleName = "MasterStyleNameOne"
- doc.write(testDocPath5) # so that the document has a path
+ doc.write(testDocPath5) # so that the document has a path
doc.addSource(s)
assert doc.sources[0].filename == "../somewhere/over/the/rainbow.ufo"
doc.updateFilenameFromPath(force=True)
@@ -561,21 +582,22 @@ def test_normalise1():
a1.name = "axisName_a"
a1.tag = "TAGA"
doc.addAxis(a1)
- assert doc.normalizeLocation(dict(axisName_a=0)) == {'axisName_a': 0.0}
- assert doc.normalizeLocation(dict(axisName_a=1000)) == {'axisName_a': 1.0}
+ assert doc.normalizeLocation(dict(axisName_a=0)) == {"axisName_a": 0.0}
+ assert doc.normalizeLocation(dict(axisName_a=1000)) == {"axisName_a": 1.0}
# clipping beyond max values:
- assert doc.normalizeLocation(dict(axisName_a=1001)) == {'axisName_a': 1.0}
- assert doc.normalizeLocation(dict(axisName_a=500)) == {'axisName_a': 0.5}
- assert doc.normalizeLocation(dict(axisName_a=-1000)) == {'axisName_a': -1.0}
- assert doc.normalizeLocation(dict(axisName_a=-1001)) == {'axisName_a': -1.0}
+ assert doc.normalizeLocation(dict(axisName_a=1001)) == {"axisName_a": 1.0}
+ assert doc.normalizeLocation(dict(axisName_a=500)) == {"axisName_a": 0.5}
+ assert doc.normalizeLocation(dict(axisName_a=-1000)) == {"axisName_a": -1.0}
+ assert doc.normalizeLocation(dict(axisName_a=-1001)) == {"axisName_a": -1.0}
# anisotropic coordinates normalise to isotropic
- assert doc.normalizeLocation(dict(axisName_a=(1000, -1000))) == {'axisName_a': 1.0}
+ assert doc.normalizeLocation(dict(axisName_a=(1000, -1000))) == {"axisName_a": 1.0}
doc.normalize()
r = []
for axis in doc.axes:
r.append((axis.name, axis.minimum, axis.default, axis.maximum))
r.sort()
- assert r == [('axisName_a', -1.0, 0.0, 1.0)]
+ assert r == [("axisName_a", -1.0, 0.0, 1.0)]
+
def test_normalise2():
# normalisation with minimum > 0
@@ -587,22 +609,25 @@ def test_normalise2():
a2.default = 100
a2.name = "axisName_b"
doc.addAxis(a2)
- assert doc.normalizeLocation(dict(axisName_b=0)) == {'axisName_b': 0.0}
- assert doc.normalizeLocation(dict(axisName_b=1000)) == {'axisName_b': 1.0}
+ assert doc.normalizeLocation(dict(axisName_b=0)) == {"axisName_b": 0.0}
+ assert doc.normalizeLocation(dict(axisName_b=1000)) == {"axisName_b": 1.0}
# clipping beyond max values:
- assert doc.normalizeLocation(dict(axisName_b=1001)) == {'axisName_b': 1.0}
- assert doc.normalizeLocation(dict(axisName_b=500)) == {'axisName_b': 0.4444444444444444}
- assert doc.normalizeLocation(dict(axisName_b=-1000)) == {'axisName_b': 0.0}
- assert doc.normalizeLocation(dict(axisName_b=-1001)) == {'axisName_b': 0.0}
+ assert doc.normalizeLocation(dict(axisName_b=1001)) == {"axisName_b": 1.0}
+ assert doc.normalizeLocation(dict(axisName_b=500)) == {
+ "axisName_b": 0.4444444444444444
+ }
+ assert doc.normalizeLocation(dict(axisName_b=-1000)) == {"axisName_b": 0.0}
+ assert doc.normalizeLocation(dict(axisName_b=-1001)) == {"axisName_b": 0.0}
# anisotropic coordinates normalise to isotropic
- assert doc.normalizeLocation(dict(axisName_b=(1000,-1000))) == {'axisName_b': 1.0}
- assert doc.normalizeLocation(dict(axisName_b=1001)) == {'axisName_b': 1.0}
+ assert doc.normalizeLocation(dict(axisName_b=(1000, -1000))) == {"axisName_b": 1.0}
+ assert doc.normalizeLocation(dict(axisName_b=1001)) == {"axisName_b": 1.0}
doc.normalize()
r = []
for axis in doc.axes:
r.append((axis.name, axis.minimum, axis.default, axis.maximum))
r.sort()
- assert r == [('axisName_b', 0.0, 0.0, 1.0)]
+ assert r == [("axisName_b", 0.0, 0.0, 1.0)]
+
def test_normalise3():
# normalisation of negative values, with default == maximum
@@ -614,16 +639,17 @@ def test_normalise3():
a3.default = 0
a3.name = "ccc"
doc.addAxis(a3)
- assert doc.normalizeLocation(dict(ccc=0)) == {'ccc': 0.0}
- assert doc.normalizeLocation(dict(ccc=1)) == {'ccc': 0.0}
- assert doc.normalizeLocation(dict(ccc=-1000)) == {'ccc': -1.0}
- assert doc.normalizeLocation(dict(ccc=-1001)) == {'ccc': -1.0}
+ assert doc.normalizeLocation(dict(ccc=0)) == {"ccc": 0.0}
+ assert doc.normalizeLocation(dict(ccc=1)) == {"ccc": 0.0}
+ assert doc.normalizeLocation(dict(ccc=-1000)) == {"ccc": -1.0}
+ assert doc.normalizeLocation(dict(ccc=-1001)) == {"ccc": -1.0}
doc.normalize()
r = []
for axis in doc.axes:
r.append((axis.name, axis.minimum, axis.default, axis.maximum))
r.sort()
- assert r == [('ccc', -1.0, 0.0, 0.0)]
+ assert r == [("ccc", -1.0, 0.0, 0.0)]
+
def test_normalise4():
# normalisation with a map
@@ -634,14 +660,15 @@ def test_normalise4():
a4.maximum = 1000
a4.default = 0
a4.name = "ddd"
- a4.map = [(0,100), (300, 500), (600, 500), (1000,900)]
+ a4.map = [(0, 100), (300, 500), (600, 500), (1000, 900)]
doc.addAxis(a4)
doc.normalize()
r = []
for axis in doc.axes:
r.append((axis.name, axis.map))
r.sort()
- assert r == [('ddd', [(0, 0.0), (300, 0.5), (600, 0.5), (1000, 1.0)])]
+ assert r == [("ddd", [(0, 0.0), (300, 0.5), (600, 0.5), (1000, 1.0)])]
+
def test_axisMapping():
# note: because designspance lib does not do any actual
@@ -653,68 +680,113 @@ def test_axisMapping():
a4.maximum = 1000
a4.default = 0
a4.name = "ddd"
- a4.map = [(0,100), (300, 500), (600, 500), (1000,900)]
+ a4.map = [(0, 100), (300, 500), (600, 500), (1000, 900)]
doc.addAxis(a4)
doc.normalize()
r = []
for axis in doc.axes:
r.append((axis.name, axis.map))
r.sort()
- assert r == [('ddd', [(0, 0.0), (300, 0.5), (600, 0.5), (1000, 1.0)])]
+ assert r == [("ddd", [(0, 0.0), (300, 0.5), (600, 0.5), (1000, 1.0)])]
+
+
+def test_axisMappingsRoundtrip(tmpdir):
+ # tests of axisMappings in a document, roundtripping.
+
+ tmpdir = str(tmpdir)
+ srcDocPath = (Path(__file__) / "../data/test_avar2.designspace").resolve()
+ testDocPath = os.path.join(tmpdir, "test_avar2.designspace")
+ shutil.copy(srcDocPath, testDocPath)
+ testDocPath2 = os.path.join(tmpdir, "test_avar2_roundtrip.designspace")
+ doc = DesignSpaceDocument()
+ doc.read(testDocPath)
+ assert doc.axisMappings
+ assert len(doc.axisMappings) == 1
+ assert doc.axisMappings[0].inputLocation == {"Justify": -100.0, "Width": 100.0}
+
+ # This is a bit of a hack, but it's the only way to make sure
+ # that the save works on Windows if the tempdir and the data
+ # dir are on different drives.
+ for descriptor in doc.sources + doc.instances:
+ descriptor.path = None
+
+ doc.write(testDocPath2)
+ # verify these results
+ doc2 = DesignSpaceDocument()
+ doc2.read(testDocPath2)
+ assert [mapping.inputLocation for mapping in doc.axisMappings] == [
+ mapping.inputLocation for mapping in doc2.axisMappings
+ ]
+ assert [mapping.outputLocation for mapping in doc.axisMappings] == [
+ mapping.outputLocation for mapping in doc2.axisMappings
+ ]
+
def test_rulesConditions(tmpdir):
# tests of rules, conditionsets and conditions
r1 = RuleDescriptor()
r1.name = "named.rule.1"
- r1.conditionSets.append([
- dict(name='axisName_a', minimum=0, maximum=1000),
- dict(name='axisName_b', minimum=0, maximum=3000)
- ])
+ r1.conditionSets.append(
+ [
+ dict(name="axisName_a", minimum=0, maximum=1000),
+ dict(name="axisName_b", minimum=0, maximum=3000),
+ ]
+ )
r1.subs.append(("a", "a.alt"))
- assert evaluateRule(r1, dict(axisName_a = 500, axisName_b = 0)) == True
- assert evaluateRule(r1, dict(axisName_a = 0, axisName_b = 0)) == True
- assert evaluateRule(r1, dict(axisName_a = 1000, axisName_b = 0)) == True
- assert evaluateRule(r1, dict(axisName_a = 1000, axisName_b = -100)) == False
- assert evaluateRule(r1, dict(axisName_a = 1000.0001, axisName_b = 0)) == False
- assert evaluateRule(r1, dict(axisName_a = -0.0001, axisName_b = 0)) == False
- assert evaluateRule(r1, dict(axisName_a = -100, axisName_b = 0)) == False
- assert processRules([r1], dict(axisName_a = 500, axisName_b = 0), ["a", "b", "c"]) == ['a.alt', 'b', 'c']
- assert processRules([r1], dict(axisName_a = 500, axisName_b = 0), ["a.alt", "b", "c"]) == ['a.alt', 'b', 'c']
- assert processRules([r1], dict(axisName_a = 2000, axisName_b = 0), ["a", "b", "c"]) == ['a', 'b', 'c']
+ assert evaluateRule(r1, dict(axisName_a=500, axisName_b=0)) == True
+ assert evaluateRule(r1, dict(axisName_a=0, axisName_b=0)) == True
+ assert evaluateRule(r1, dict(axisName_a=1000, axisName_b=0)) == True
+ assert evaluateRule(r1, dict(axisName_a=1000, axisName_b=-100)) == False
+ assert evaluateRule(r1, dict(axisName_a=1000.0001, axisName_b=0)) == False
+ assert evaluateRule(r1, dict(axisName_a=-0.0001, axisName_b=0)) == False
+ assert evaluateRule(r1, dict(axisName_a=-100, axisName_b=0)) == False
+ assert processRules([r1], dict(axisName_a=500, axisName_b=0), ["a", "b", "c"]) == [
+ "a.alt",
+ "b",
+ "c",
+ ]
+ assert processRules(
+ [r1], dict(axisName_a=500, axisName_b=0), ["a.alt", "b", "c"]
+ ) == ["a.alt", "b", "c"]
+ assert processRules([r1], dict(axisName_a=2000, axisName_b=0), ["a", "b", "c"]) == [
+ "a",
+ "b",
+ "c",
+ ]
# rule with only a maximum
r2 = RuleDescriptor()
r2.name = "named.rule.2"
- r2.conditionSets.append([dict(name='axisName_a', maximum=500)])
+ r2.conditionSets.append([dict(name="axisName_a", maximum=500)])
r2.subs.append(("b", "b.alt"))
- assert evaluateRule(r2, dict(axisName_a = 0)) == True
- assert evaluateRule(r2, dict(axisName_a = -500)) == True
- assert evaluateRule(r2, dict(axisName_a = 1000)) == False
+ assert evaluateRule(r2, dict(axisName_a=0)) == True
+ assert evaluateRule(r2, dict(axisName_a=-500)) == True
+ assert evaluateRule(r2, dict(axisName_a=1000)) == False
# rule with only a minimum
r3 = RuleDescriptor()
r3.name = "named.rule.3"
- r3.conditionSets.append([dict(name='axisName_a', minimum=500)])
+ r3.conditionSets.append([dict(name="axisName_a", minimum=500)])
r3.subs.append(("c", "c.alt"))
- assert evaluateRule(r3, dict(axisName_a = 0)) == False
- assert evaluateRule(r3, dict(axisName_a = 1000)) == True
- assert evaluateRule(r3, dict(axisName_a = 1000)) == True
+ assert evaluateRule(r3, dict(axisName_a=0)) == False
+ assert evaluateRule(r3, dict(axisName_a=1000)) == True
+ assert evaluateRule(r3, dict(axisName_a=1000)) == True
# rule with only a minimum, maximum in separate conditions
r4 = RuleDescriptor()
r4.name = "named.rule.4"
- r4.conditionSets.append([
- dict(name='axisName_a', minimum=500),
- dict(name='axisName_b', maximum=500)
- ])
+ r4.conditionSets.append(
+ [dict(name="axisName_a", minimum=500), dict(name="axisName_b", maximum=500)]
+ )
r4.subs.append(("c", "c.alt"))
- assert evaluateRule(r4, dict(axisName_a = 1000, axisName_b = 0)) == True
- assert evaluateRule(r4, dict(axisName_a = 0, axisName_b = 0)) == False
- assert evaluateRule(r4, dict(axisName_a = 1000, axisName_b = 1000)) == False
+ assert evaluateRule(r4, dict(axisName_a=1000, axisName_b=0)) == True
+ assert evaluateRule(r4, dict(axisName_a=0, axisName_b=0)) == False
+ assert evaluateRule(r4, dict(axisName_a=1000, axisName_b=1000)) == False
+
def test_rulesDocument(tmpdir):
# tests of rules in a document, roundtripping.
@@ -739,26 +811,51 @@ def test_rulesDocument(tmpdir):
doc.addAxis(b1)
r1 = RuleDescriptor()
r1.name = "named.rule.1"
- r1.conditionSets.append([
- dict(name='axisName_a', minimum=0, maximum=1000),
- dict(name='axisName_b', minimum=0, maximum=3000)
- ])
+ r1.conditionSets.append(
+ [
+ dict(name="axisName_a", minimum=0, maximum=1000),
+ dict(name="axisName_b", minimum=0, maximum=3000),
+ ]
+ )
r1.subs.append(("a", "a.alt"))
# rule with minium and maximum
doc.addRule(r1)
assert len(doc.rules) == 1
assert len(doc.rules[0].conditionSets) == 1
assert len(doc.rules[0].conditionSets[0]) == 2
- assert _axesAsDict(doc.axes) == {'axisName_a': {'map': [], 'name': 'axisName_a', 'default': 0, 'minimum': 0, 'maximum': 1000, 'tag': 'TAGA'}, 'axisName_b': {'map': [], 'name': 'axisName_b', 'default': 2000, 'minimum': 2000, 'maximum': 3000, 'tag': 'TAGB'}}
- assert doc.rules[0].conditionSets == [[
- {'minimum': 0, 'maximum': 1000, 'name': 'axisName_a'},
- {'minimum': 0, 'maximum': 3000, 'name': 'axisName_b'}]]
- assert doc.rules[0].subs == [('a', 'a.alt')]
+ assert _axesAsDict(doc.axes) == {
+ "axisName_a": {
+ "map": [],
+ "name": "axisName_a",
+ "default": 0,
+ "minimum": 0,
+ "maximum": 1000,
+ "tag": "TAGA",
+ },
+ "axisName_b": {
+ "map": [],
+ "name": "axisName_b",
+ "default": 2000,
+ "minimum": 2000,
+ "maximum": 3000,
+ "tag": "TAGB",
+ },
+ }
+ assert doc.rules[0].conditionSets == [
+ [
+ {"minimum": 0, "maximum": 1000, "name": "axisName_a"},
+ {"minimum": 0, "maximum": 3000, "name": "axisName_b"},
+ ]
+ ]
+ assert doc.rules[0].subs == [("a", "a.alt")]
doc.normalize()
- assert doc.rules[0].name == 'named.rule.1'
- assert doc.rules[0].conditionSets == [[
- {'minimum': 0.0, 'maximum': 1.0, 'name': 'axisName_a'},
- {'minimum': 0.0, 'maximum': 1.0, 'name': 'axisName_b'}]]
+ assert doc.rules[0].name == "named.rule.1"
+ assert doc.rules[0].conditionSets == [
+ [
+ {"minimum": 0.0, "maximum": 1.0, "name": "axisName_a"},
+ {"minimum": 0.0, "maximum": 1.0, "name": "axisName_b"},
+ ]
+ ]
# still one conditionset
assert len(doc.rules[0].conditionSets) == 1
doc.write(testDocPath)
@@ -778,17 +875,22 @@ def test_rulesDocument(tmpdir):
assert len(doc3.rules) == 1
assert len(doc3.rules[0].conditionSets) == 2
+
def _addUnwrappedCondition(path):
# only for testing, so we can make an invalid designspace file
# older designspace files may have conditions that are not wrapped in a conditionset
# These can be read into a new conditionset.
- with open(path, 'r', encoding='utf-8') as f:
+ with open(path, "r", encoding="utf-8") as f:
d = f.read()
print(d)
- d = d.replace('<rule name="named.rule.1">', '<rule name="named.rule.1">\n\t<condition maximum="22" minimum="33" name="axisName_a" />')
- with open(path, 'w', encoding='utf-8') as f:
+ d = d.replace(
+ '<rule name="named.rule.1">',
+ '<rule name="named.rule.1">\n\t<condition maximum="22" minimum="33" name="axisName_a" />',
+ )
+ with open(path, "w", encoding="utf-8") as f:
f.write(d)
+
def test_documentLib(tmpdir):
# roundtrip test of the document lib with some nested data
tmpdir = str(tmpdir)
@@ -801,7 +903,7 @@ def test_documentLib(tmpdir):
a1.maximum = 1000
a1.default = 0
doc.addAxis(a1)
- dummyData = dict(a=123, b=u"äbc", c=[1,2,3], d={'a':123})
+ dummyData = dict(a=123, b="äbc", c=[1, 2, 3], d={"a": 123})
dummyKey = "org.fontTools.designspaceLib"
doc.lib = {dummyKey: dummyData}
doc.write(testDocPath1)
@@ -855,17 +957,15 @@ def test_updatePaths(tmpdir):
def test_read_with_path_object():
- import pathlib
- source = (pathlib.Path(__file__) / "../data/test_v4_original.designspace").resolve()
+ source = (Path(__file__) / "../data/test_v4_original.designspace").resolve()
assert source.exists()
doc = DesignSpaceDocument()
doc.read(source)
def test_with_with_path_object(tmpdir):
- import pathlib
tmpdir = str(tmpdir)
- dest = pathlib.Path(tmpdir) / "test_v4_original.designspace"
+ dest = Path(tmpdir) / "test_v4_original.designspace"
doc = DesignSpaceDocument()
doc.write(dest)
assert dest.exists()
@@ -934,7 +1034,6 @@ def test_findDefault_axis_mapping():
def test_loadSourceFonts():
-
def opener(path):
font = ttLib.TTFont()
font.importXML(path)
@@ -945,7 +1044,7 @@ def test_loadSourceFonts():
os.path.dirname(os.path.dirname(__file__)),
"varLib",
"data",
- "SparseMasters.designspace"
+ "SparseMasters.designspace",
)
designspace = DesignSpaceDocument.fromfile(path)
@@ -976,7 +1075,7 @@ def test_addAxisDescriptor():
ds = DesignSpaceDocument()
axis = ds.addAxisDescriptor(
- name="Weight", tag="wght", minimum=100, default=400, maximum=900
+ name="Weight", tag="wght", minimum=100, default=400, maximum=900
)
assert ds.axes[0] is axis
@@ -988,6 +1087,19 @@ def test_addAxisDescriptor():
assert axis.maximum == 900
+def test_addAxisDescriptor():
+ ds = DesignSpaceDocument()
+
+ mapping = ds.addAxisMappingDescriptor(
+ inputLocation={"weight": 900, "width": 150}, outputLocation={"weight": 870}
+ )
+
+ assert ds.axisMappings[0] is mapping
+ assert isinstance(mapping, AxisMappingDescriptor)
+ assert mapping.inputLocation == {"weight": 900, "width": 150}
+ assert mapping.outputLocation == {"weight": 870}
+
+
def test_addSourceDescriptor():
ds = DesignSpaceDocument()
@@ -1003,10 +1115,10 @@ def test_addInstanceDescriptor():
ds = DesignSpaceDocument()
instance = ds.addInstanceDescriptor(
- name="TestInstance",
- location={"Weight": 400},
- styleName="Regular",
- styleMapStyleName="regular",
+ name="TestInstance",
+ location={"Weight": 400},
+ styleName="Regular",
+ styleMapStyleName="regular",
)
assert ds.instances[0] is instance
@@ -1064,3 +1176,10 @@ def test_Range_post_init():
assert r.minimum == -1
assert r.maximum == 2
assert r.default == -1
+
+
+def test_get_axes(datadir: Path) -> None:
+ ds = DesignSpaceDocument.fromfile(datadir / "test_v5.designspace")
+
+ assert ds.getAxis("Width") is ds.getAxisByTag("wdth")
+ assert ds.getAxis("Italic") is ds.getAxisByTag("ital")
diff --git a/Tests/designspaceLib/designspace_v5_test.py b/Tests/designspaceLib/designspace_v5_test.py
index 35ad29b2..84c927a2 100644
--- a/Tests/designspaceLib/designspace_v5_test.py
+++ b/Tests/designspaceLib/designspace_v5_test.py
@@ -78,6 +78,15 @@ def test_read_v5_document_simple(datadir):
AxisLabelDescriptor(
name="Black", userMinimum=850, userValue=900, userMaximum=900
),
+ AxisLabelDescriptor(
+ name="Regular",
+ userValue=400,
+ linkedUserValue=700,
+ elidable=True,
+ ),
+ AxisLabelDescriptor(
+ name="Bold", userValue=700, linkedUserValue=400
+ ),
],
),
AxisDescriptor(
diff --git a/Tests/designspaceLib/split_test.py b/Tests/designspaceLib/split_test.py
index 8708f704..3364133f 100644
--- a/Tests/designspaceLib/split_test.py
+++ b/Tests/designspaceLib/split_test.py
@@ -1,9 +1,16 @@
+import math
import shutil
from pathlib import Path
import pytest
from fontTools.designspaceLib import DesignSpaceDocument
-from fontTools.designspaceLib.split import splitInterpolable, splitVariableFonts, convert5to4
+from fontTools.designspaceLib.split import (
+ _conditionSetFrom,
+ convert5to4,
+ splitInterpolable,
+ splitVariableFonts,
+)
+from fontTools.designspaceLib.types import ConditionSet, Range
from .fixtures import datadir
@@ -74,7 +81,9 @@ def test_split(datadir, tmpdir, test_ds, expected_interpolable_spaces):
vfs = list(splitVariableFonts(sub_doc))
assert expected_vf_names == set(vf[0] for vf in vfs)
- loc_str = "_".join(f"{name}_{value}"for name, value in sorted(location.items()))
+ loc_str = "_".join(
+ f"{name}_{value}" for name, value in sorted(location.items())
+ )
data_out = datadir / "split_output" / f"{temp_in.stem}_{loc_str}.designspace"
temp_out = Path(tmpdir) / "out" / f"{temp_in.stem}_{loc_str}.designspace"
temp_out.parent.mkdir(exist_ok=True)
@@ -103,8 +112,6 @@ def test_split(datadir, tmpdir, test_ds, expected_interpolable_spaces):
)
-
-
@pytest.mark.parametrize(
"test_ds,expected_vfs",
[
@@ -137,7 +144,9 @@ def test_convert5to4(datadir, tmpdir, test_ds, expected_vfs):
assert variable_fonts.keys() == expected_vfs
for vf_name, vf in variable_fonts.items():
- data_out = (datadir / "convert5to4_output" / vf_name).with_suffix(".designspace")
+ data_out = (datadir / "convert5to4_output" / vf_name).with_suffix(
+ ".designspace"
+ )
temp_out = (Path(tmpdir) / "out" / vf_name).with_suffix(".designspace")
temp_out.parent.mkdir(exist_ok=True)
vf.write(temp_out)
@@ -148,3 +157,73 @@ def test_convert5to4(datadir, tmpdir, test_ds, expected_vfs):
assert data_out.read_text(encoding="utf-8") == temp_out.read_text(
encoding="utf-8"
)
+
+
+@pytest.mark.parametrize(
+ ["unbounded_condition"],
+ [
+ ({"name": "Weight", "minimum": 500},),
+ ({"name": "Weight", "maximum": 500},),
+ ({"name": "Weight", "minimum": 500, "maximum": None},),
+ ({"name": "Weight", "minimum": None, "maximum": 500},),
+ ],
+)
+def test_optional_min_max(unbounded_condition):
+ """Check that split functions can handle conditions that are partially
+ unbounded without tripping over None values and missing keys."""
+ doc = DesignSpaceDocument()
+
+ doc.addAxisDescriptor(
+ name="Weight", tag="wght", minimum=400, maximum=1000, default=400
+ )
+
+ doc.addRuleDescriptor(
+ name="unbounded",
+ conditionSets=[[unbounded_condition]],
+ )
+
+ assert len(list(splitInterpolable(doc))) == 1
+ assert len(list(splitVariableFonts(doc))) == 1
+
+
+@pytest.mark.parametrize(
+ ["condition", "expected_set"],
+ [
+ (
+ {"name": "axis", "minimum": 0.5},
+ {"axis": Range(minimum=0.5, maximum=math.inf)},
+ ),
+ (
+ {"name": "axis", "maximum": 0.5},
+ {"axis": Range(minimum=-math.inf, maximum=0.5)},
+ ),
+ (
+ {"name": "axis", "minimum": 0.5, "maximum": None},
+ {"axis": Range(minimum=0.5, maximum=math.inf)},
+ ),
+ (
+ {"name": "axis", "minimum": None, "maximum": 0.5},
+ {"axis": Range(minimum=-math.inf, maximum=0.5)},
+ ),
+ ],
+)
+def test_optional_min_max_internal(condition, expected_set: ConditionSet):
+ """Check that split's internal helper functions produce the correct output
+ for conditions that are partially unbounded."""
+ assert _conditionSetFrom([condition]) == expected_set
+
+
+def test_avar2(datadir):
+ ds = DesignSpaceDocument()
+ ds.read(datadir / "test_avar2.designspace")
+ _, subDoc = next(splitInterpolable(ds))
+ assert len(subDoc.axisMappings) == 1
+
+ subDocs = list(splitVariableFonts(ds))
+ assert len(subDocs) == 5
+ for i, (_, subDoc) in enumerate(subDocs):
+ # Only the first one should have a mapping, according to the document
+ if i == 0:
+ assert len(subDoc.axisMappings) == 1
+ else:
+ assert len(subDoc.axisMappings) == 0
diff --git a/Tests/designspaceLib/statNames_test.py b/Tests/designspaceLib/statNames_test.py
index 99d1c7fa..dd5fb105 100644
--- a/Tests/designspaceLib/statNames_test.py
+++ b/Tests/designspaceLib/statNames_test.py
@@ -61,6 +61,28 @@ def test_detect_ribbi_aktiv(datadir):
)
+def test_detect_ribbi_recursive(datadir):
+ doc = DesignSpaceDocument.fromfile(datadir / "test_v5.designspace")
+
+ assert getStatNames(doc, {"Weight": 700, "Width": 125, "Italic": 1}) == StatNames(
+ familyNames={
+ "en": "MasterFamilyName",
+ "fr": "Montserrat",
+ "ja": "モンセラート",
+ },
+ styleNames={
+ "en": "Wide Bold Italic",
+ },
+ postScriptFontName="MasterFamilyName-WideBoldItalic",
+ styleMapFamilyNames={
+ "en": "MasterFamilyName Wide",
+ "fr": "Montserrat Wide",
+ "ja": "モンセラート Wide",
+ },
+ styleMapStyleName="bold italic",
+ )
+
+
def test_getStatNames_on_ds4_doesnt_make_up_bad_names(datadir):
"""See this issue on GitHub: https://github.com/googlefonts/ufo2ft/issues/630
diff --git a/Tests/encodings/codecs_test.py b/Tests/encodings/codecs_test.py
index 9dac416a..64237563 100644
--- a/Tests/encodings/codecs_test.py
+++ b/Tests/encodings/codecs_test.py
@@ -1,24 +1,30 @@
import unittest
-import fontTools.encodings.codecs # Not to be confused with "import codecs"
+import fontTools.encodings.codecs # Not to be confused with "import codecs"
+
class ExtendedCodecsTest(unittest.TestCase):
+ def test_decode_mac_japanese(self):
+ self.assertEqual(
+ b"x\xfe\xfdy".decode("x_mac_japanese_ttx"),
+ chr(0x78) + chr(0x2122) + chr(0x00A9) + chr(0x79),
+ )
+
+ def test_encode_mac_japanese(self):
+ self.assertEqual(
+ b"x\xfe\xfdy",
+ (chr(0x78) + chr(0x2122) + chr(0x00A9) + chr(0x79)).encode(
+ "x_mac_japanese_ttx"
+ ),
+ )
- def test_decode_mac_japanese(self):
- self.assertEqual(b'x\xfe\xfdy'.decode("x_mac_japanese_ttx"),
- chr(0x78)+chr(0x2122)+chr(0x00A9)+chr(0x79))
+ def test_decode_mac_trad_chinese(self):
+ self.assertEqual(b"\x80".decode("x_mac_trad_chinese_ttx"), chr(0x5C))
- def test_encode_mac_japanese(self):
- self.assertEqual(b'x\xfe\xfdy',
- (chr(0x78)+chr(0x2122)+chr(0x00A9)+chr(0x79)).encode("x_mac_japanese_ttx"))
+ def test_decode_mac_romanian(self):
+ self.assertEqual(b"x\xfb".decode("mac_romanian"), chr(0x78) + chr(0x02DA))
- def test_decode_mac_trad_chinese(self):
- self.assertEqual(b'\x80'.decode("x_mac_trad_chinese_ttx"),
- chr(0x5C))
- def test_decode_mac_romanian(self):
- self.assertEqual(b'x\xfb'.decode("mac_romanian"),
- chr(0x78)+chr(0x02DA))
+if __name__ == "__main__":
+ import sys
-if __name__ == '__main__':
- import sys
- sys.exit(unittest.main())
+ sys.exit(unittest.main())
diff --git a/Tests/feaLib/ast_test.py b/Tests/feaLib/ast_test.py
index 4462f052..ebae3dab 100644
--- a/Tests/feaLib/ast_test.py
+++ b/Tests/feaLib/ast_test.py
@@ -22,4 +22,5 @@ class AstTest(unittest.TestCase):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/feaLib/builder_test.py b/Tests/feaLib/builder_test.py
index 5c298e85..adcb058f 100644
--- a/Tests/feaLib/builder_test.py
+++ b/Tests/feaLib/builder_test.py
@@ -19,6 +19,7 @@ import sys
import tempfile
import logging
import unittest
+import warnings
def makeTTFont():
@@ -69,7 +70,7 @@ class BuilderTest(unittest.TestCase):
spec9a spec9b spec9c1 spec9c2 spec9c3 spec9d spec9e spec9f spec9g
spec10
bug453 bug457 bug463 bug501 bug502 bug504 bug505 bug506 bug509
- bug512 bug514 bug568 bug633 bug1307 bug1459 bug2276
+ bug512 bug514 bug568 bug633 bug1307 bug1459 bug2276 variable_bug2772
name size size2 multiple_feature_blocks omitted_GlyphClassDef
ZeroValue_SinglePos_horizontal ZeroValue_SinglePos_vertical
ZeroValue_PairPos_horizontal ZeroValue_PairPos_vertical
@@ -225,6 +226,21 @@ class BuilderTest(unittest.TestCase):
output.append(l)
return output
+ def make_mock_vf(self):
+ font = makeTTFont()
+ font["name"] = newTable("name")
+ addFvar(font, self.VARFONT_AXES, [])
+ del font["name"]
+ return font
+
+ @staticmethod
+ def get_region(var_region_axis):
+ return (
+ var_region_axis.StartCoord,
+ var_region_axis.PeakCoord,
+ var_region_axis.EndCoord,
+ )
+
def test_alternateSubst_multipleSubstitutionsForSameGlyph(self):
self.assertRaisesRegex(
FeatureLibError,
@@ -330,12 +346,10 @@ class BuilderTest(unittest.TestCase):
)
def test_feature_undefinedReference(self):
- self.assertRaisesRegex(
- FeatureLibError,
- "Feature none has not been defined",
- self.build,
- "feature aalt { feature none; } aalt;",
- )
+ with warnings.catch_warnings(record=True) as w:
+ self.build("feature aalt { feature none; } aalt;")
+ assert len(w) == 1
+ assert "Feature none has not been defined" in str(w[0].message)
def test_GlyphClassDef_conflictingClasses(self):
self.assertRaisesRegex(
@@ -954,15 +968,153 @@ class BuilderTest(unittest.TestCase):
FeatureLibError,
"Empty glyph class in mark class definition",
self.build,
- "markClass [] <anchor 150 -10> @TOPMARKS;"
+ "markClass [] <anchor 150 -10> @TOPMARKS;",
)
self.assertRaisesRegex(
FeatureLibError,
'Expected a glyph class with 1 elements after "by", but found a glyph class with 0 elements',
self.build,
- "feature test { sub a by []; test};"
+ "feature test { sub a by []; test};",
)
+ def test_unmarked_ignore_statement(self):
+ name = "bug2949"
+ logger = logging.getLogger("fontTools.feaLib.parser")
+ with CapturingLogHandler(logger, level="WARNING") as captor:
+ self.check_feature_file(name)
+ self.check_fea2fea_file(name)
+
+ for line, sub in {(3, "sub"), (8, "pos"), (13, "sub")}:
+ captor.assertRegex(
+ f'{name}.fea:{line}:12: Ambiguous "ignore {sub}", there should be least one marked glyph'
+ )
+
+ def test_condition_set_avar(self):
+ """Test that the `avar` table is consulted when normalizing user-space
+ values."""
+
+ features = """
+ languagesystem DFLT dflt;
+
+ lookup conditional_sub {
+ sub e by a;
+ } conditional_sub;
+
+ conditionset test {
+ wght 600 1000;
+ wdth 150 200;
+ } test;
+
+ variation rlig test {
+ lookup conditional_sub;
+ } rlig;
+ """
+
+ def make_mock_vf():
+ font = makeTTFont()
+ font["name"] = newTable("name")
+ addFvar(
+ font,
+ [("wght", 0, 0, 1000, "Weight"), ("wdth", 100, 100, 200, "Width")],
+ [],
+ )
+ del font["name"]
+ return font
+
+ # Without `avar`:
+ font = make_mock_vf()
+ addOpenTypeFeaturesFromString(font, features)
+ condition_table = (
+ font.tables["GSUB"]
+ .table.FeatureVariations.FeatureVariationRecord[0]
+ .ConditionSet.ConditionTable
+ )
+ # user-space wdth=150 and wght=600:
+ assert condition_table[0].FilterRangeMinValue == 0.5
+ assert condition_table[1].FilterRangeMinValue == 0.6
+
+ # With `avar`, shifting the wght axis' positive midpoint 0.5 a bit to
+ # the right, but leaving the wdth axis alone:
+ font = make_mock_vf()
+ font["avar"] = newTable("avar")
+ font["avar"].segments = {"wght": {-1.0: -1.0, 0.0: 0.0, 0.5: 0.625, 1.0: 1.0}}
+ addOpenTypeFeaturesFromString(font, features)
+ condition_table = (
+ font.tables["GSUB"]
+ .table.FeatureVariations.FeatureVariationRecord[0]
+ .ConditionSet.ConditionTable
+ )
+ # user-space wdth=150 as before and wght=600 shifted to the right:
+ assert condition_table[0].FilterRangeMinValue == 0.5
+ assert condition_table[1].FilterRangeMinValue == 0.7
+
+ def test_variable_scalar_avar(self):
+ """Test that the `avar` table is consulted when normalizing user-space
+ values."""
+
+ features = """
+ languagesystem DFLT dflt;
+
+ feature kern {
+ pos cursive one <anchor 0 (wght=200:12 wght=900:22 wdth=150,wght=900:42)> <anchor NULL>;
+ pos two <0 (wght=200:12 wght=900:22 wdth=150,wght=900:42) 0 0>;
+ } kern;
+ """
+
+ # Without `avar` (wght=200, wdth=100 is the default location):
+ font = self.make_mock_vf()
+ addOpenTypeFeaturesFromString(font, features)
+
+ var_region_list = font.tables["GDEF"].table.VarStore.VarRegionList
+ var_region_axis_wght = var_region_list.Region[0].VarRegionAxis[0]
+ var_region_axis_wdth = var_region_list.Region[0].VarRegionAxis[1]
+ assert self.get_region(var_region_axis_wght) == (0.0, 0.875, 0.875)
+ assert self.get_region(var_region_axis_wdth) == (0.0, 0.0, 0.0)
+ var_region_axis_wght = var_region_list.Region[1].VarRegionAxis[0]
+ var_region_axis_wdth = var_region_list.Region[1].VarRegionAxis[1]
+ assert self.get_region(var_region_axis_wght) == (0.0, 0.875, 0.875)
+ assert self.get_region(var_region_axis_wdth) == (0.0, 0.5, 0.5)
+
+ # With `avar`, shifting the wght axis' positive midpoint 0.5 a bit to
+ # the right, but leaving the wdth axis alone:
+ font = self.make_mock_vf()
+ font["avar"] = newTable("avar")
+ font["avar"].segments = {"wght": {-1.0: -1.0, 0.0: 0.0, 0.5: 0.625, 1.0: 1.0}}
+ addOpenTypeFeaturesFromString(font, features)
+
+ var_region_list = font.tables["GDEF"].table.VarStore.VarRegionList
+ var_region_axis_wght = var_region_list.Region[0].VarRegionAxis[0]
+ var_region_axis_wdth = var_region_list.Region[0].VarRegionAxis[1]
+ assert self.get_region(var_region_axis_wght) == (0.0, 0.90625, 0.90625)
+ assert self.get_region(var_region_axis_wdth) == (0.0, 0.0, 0.0)
+ var_region_axis_wght = var_region_list.Region[1].VarRegionAxis[0]
+ var_region_axis_wdth = var_region_list.Region[1].VarRegionAxis[1]
+ assert self.get_region(var_region_axis_wght) == (0.0, 0.90625, 0.90625)
+ assert self.get_region(var_region_axis_wdth) == (0.0, 0.5, 0.5)
+
+ def test_ligatureCaretByPos_variable_scalar(self):
+ """Test that the `avar` table is consulted when normalizing user-space
+ values."""
+
+ features = """
+ table GDEF {
+ LigatureCaretByPos f_i (wght=200:400 wght=900:1000) 380;
+ } GDEF;
+ """
+
+ font = self.make_mock_vf()
+ addOpenTypeFeaturesFromString(font, features)
+
+ table = font["GDEF"].table
+ lig_glyph = table.LigCaretList.LigGlyph[0]
+ assert lig_glyph.CaretValue[0].Format == 1
+ assert lig_glyph.CaretValue[0].Coordinate == 380
+ assert lig_glyph.CaretValue[1].Format == 3
+ assert lig_glyph.CaretValue[1].Coordinate == 400
+
+ var_region_list = table.VarStore.VarRegionList
+ var_region_axis = var_region_list.Region[0].VarRegionAxis[0]
+ assert self.get_region(var_region_axis) == (0.0, 0.875, 0.875)
def generate_feature_file_test(name):
diff --git a/Tests/feaLib/data/GPOS_1_zero.ttx b/Tests/feaLib/data/GPOS_1_zero.ttx
index b02db67c..e3162c5d 100644
--- a/Tests/feaLib/data/GPOS_1_zero.ttx
+++ b/Tests/feaLib/data/GPOS_1_zero.ttx
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<ttFont>
-
+
<GPOS>
<Version value="0x00010000"/>
<ScriptList>
diff --git a/Tests/feaLib/data/GSUB_2.fea b/Tests/feaLib/data/GSUB_2.fea
index d2a3cb10..21db452b 100644
--- a/Tests/feaLib/data/GSUB_2.fea
+++ b/Tests/feaLib/data/GSUB_2.fea
@@ -12,3 +12,24 @@ feature f2 {
sub f_i by f i;
sub f_f_i by f f i;
} f2;
+
+feature f3 {
+ sub [f_i f_l f_f_i f_f_l] by f [i l f_i f_l];
+} f3;
+
+feature f4 {
+ sub [f_i f_l f_f_i f_f_l] by [f f f_f f_f] [i l i l];
+} f4;
+
+@class = [f_i f_l];
+lookup l1 {
+ sub @class by f [i l];
+} l1;
+
+feature f5 {
+ sub @class' lookup l1 [i l];
+} f5;
+
+feature f6 {
+ sub [f_i f_i]' j by [f f] [i i];
+} f6;
diff --git a/Tests/feaLib/data/GSUB_2.ttx b/Tests/feaLib/data/GSUB_2.ttx
index b91c20fe..fb87a059 100644
--- a/Tests/feaLib/data/GSUB_2.ttx
+++ b/Tests/feaLib/data/GSUB_2.ttx
@@ -10,16 +10,20 @@
<Script>
<DefaultLangSys>
<ReqFeatureIndex value="65535"/>
- <!-- FeatureCount=2 -->
+ <!-- FeatureCount=6 -->
<FeatureIndex index="0" value="0"/>
<FeatureIndex index="1" value="1"/>
+ <FeatureIndex index="2" value="2"/>
+ <FeatureIndex index="3" value="3"/>
+ <FeatureIndex index="4" value="4"/>
+ <FeatureIndex index="5" value="5"/>
</DefaultLangSys>
<!-- LangSysCount=0 -->
</Script>
</ScriptRecord>
</ScriptList>
<FeatureList>
- <!-- FeatureCount=2 -->
+ <!-- FeatureCount=6 -->
<FeatureRecord index="0">
<FeatureTag value="f1 "/>
<Feature>
@@ -34,9 +38,37 @@
<LookupListIndex index="0" value="1"/>
</Feature>
</FeatureRecord>
+ <FeatureRecord index="2">
+ <FeatureTag value="f3 "/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="2"/>
+ </Feature>
+ </FeatureRecord>
+ <FeatureRecord index="3">
+ <FeatureTag value="f4 "/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="3"/>
+ </Feature>
+ </FeatureRecord>
+ <FeatureRecord index="4">
+ <FeatureTag value="f5 "/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="5"/>
+ </Feature>
+ </FeatureRecord>
+ <FeatureRecord index="5">
+ <FeatureTag value="f6 "/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="6"/>
+ </Feature>
+ </FeatureRecord>
</FeatureList>
<LookupList>
- <!-- LookupCount=2 -->
+ <!-- LookupCount=8 -->
<Lookup index="0">
<LookupType value="2"/>
<LookupFlag value="0"/>
@@ -57,6 +89,89 @@
<Substitution in="f_i" out="f,i"/>
</MultipleSubst>
</Lookup>
+ <Lookup index="2">
+ <LookupType value="2"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <MultipleSubst index="0">
+ <Substitution in="f_f_i" out="f,f_i"/>
+ <Substitution in="f_f_l" out="f,f_l"/>
+ <Substitution in="f_i" out="f,i"/>
+ <Substitution in="f_l" out="f,l"/>
+ </MultipleSubst>
+ </Lookup>
+ <Lookup index="3">
+ <LookupType value="2"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <MultipleSubst index="0">
+ <Substitution in="f_f_i" out="f_f,i"/>
+ <Substitution in="f_f_l" out="f_f,l"/>
+ <Substitution in="f_i" out="f,i"/>
+ <Substitution in="f_l" out="f,l"/>
+ </MultipleSubst>
+ </Lookup>
+ <Lookup index="4">
+ <LookupType value="2"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <MultipleSubst index="0">
+ <Substitution in="f_i" out="f,i"/>
+ <Substitution in="f_l" out="f,l"/>
+ </MultipleSubst>
+ </Lookup>
+ <Lookup index="5">
+ <LookupType value="6"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <ChainContextSubst index="0" Format="3">
+ <!-- BacktrackGlyphCount=0 -->
+ <!-- InputGlyphCount=1 -->
+ <InputCoverage index="0">
+ <Glyph value="f_l"/>
+ <Glyph value="f_i"/>
+ </InputCoverage>
+ <!-- LookAheadGlyphCount=1 -->
+ <LookAheadCoverage index="0">
+ <Glyph value="i"/>
+ <Glyph value="l"/>
+ </LookAheadCoverage>
+ <!-- SubstCount=1 -->
+ <SubstLookupRecord index="0">
+ <SequenceIndex value="0"/>
+ <LookupListIndex value="4"/>
+ </SubstLookupRecord>
+ </ChainContextSubst>
+ </Lookup>
+ <Lookup index="6">
+ <LookupType value="6"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <ChainContextSubst index="0" Format="3">
+ <!-- BacktrackGlyphCount=0 -->
+ <!-- InputGlyphCount=1 -->
+ <InputCoverage index="0">
+ <Glyph value="f_i"/>
+ </InputCoverage>
+ <!-- LookAheadGlyphCount=1 -->
+ <LookAheadCoverage index="0">
+ <Glyph value="j"/>
+ </LookAheadCoverage>
+ <!-- SubstCount=1 -->
+ <SubstLookupRecord index="0">
+ <SequenceIndex value="0"/>
+ <LookupListIndex value="7"/>
+ </SubstLookupRecord>
+ </ChainContextSubst>
+ </Lookup>
+ <Lookup index="7">
+ <LookupType value="2"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <MultipleSubst index="0">
+ <Substitution in="f_i" out="f,i"/>
+ </MultipleSubst>
+ </Lookup>
</LookupList>
</GSUB>
diff --git a/Tests/feaLib/data/GSUB_5_formats.fea b/Tests/feaLib/data/GSUB_5_formats.fea
index 3acb7edf..e9b3ba8b 100644
--- a/Tests/feaLib/data/GSUB_5_formats.fea
+++ b/Tests/feaLib/data/GSUB_5_formats.fea
@@ -1,16 +1,16 @@
lookup GSUB5f1 {
- ignore sub three four;
- ignore sub four five;
+ ignore sub three' four';
+ ignore sub four' five';
} GSUB5f1;
lookup GSUB5f2 {
- ignore sub [a - z] [A - H] [I - Z];
- ignore sub [a - z] [A - H] [I - Z];
- ignore sub [a - z] [I - Z] [A - H];
+ ignore sub [a - z]' [A - H]' [I - Z]';
+ ignore sub [a - z]' [A - H]' [I - Z]';
+ ignore sub [a - z]' [I - Z]' [A - H]';
} GSUB5f2;
lookup GSUB5f3 {
- ignore sub e;
+ ignore sub e';
} GSUB5f3;
feature test {
diff --git a/Tests/feaLib/data/PairPosSubtable.ttx b/Tests/feaLib/data/PairPosSubtable.ttx
index 2d78f64f..d671537e 100644
--- a/Tests/feaLib/data/PairPosSubtable.ttx
+++ b/Tests/feaLib/data/PairPosSubtable.ttx
@@ -93,14 +93,14 @@
<ValueFormat1 value="4"/>
<ValueFormat2 value="0"/>
<ClassDef1>
- <ClassDef glyph="b" class="1"/>
- <ClassDef glyph="o" class="1"/>
- </ClassDef1>
- <ClassDef2>
- <ClassDef glyph="c" class="2"/>
- <ClassDef glyph="d" class="2"/>
<ClassDef glyph="v" class="1"/>
<ClassDef glyph="w" class="1"/>
+ </ClassDef1>
+ <ClassDef2>
+ <ClassDef glyph="c" class="1"/>
+ <ClassDef glyph="d" class="1"/>
+ <ClassDef glyph="v" class="2"/>
+ <ClassDef glyph="w" class="2"/>
</ClassDef2>
<!-- Class1Count=2 -->
<!-- Class2Count=3 -->
@@ -112,7 +112,7 @@
<Value1 XAdvance="0"/>
</Class2Record>
<Class2Record index="2">
- <Value1 XAdvance="-20"/>
+ <Value1 XAdvance="-10"/>
</Class2Record>
</Class1Record>
<Class1Record index="1">
@@ -120,7 +120,7 @@
<Value1 XAdvance="0"/>
</Class2Record>
<Class2Record index="1">
- <Value1 XAdvance="-10"/>
+ <Value1 XAdvance="-20"/>
</Class2Record>
<Class2Record index="2">
<Value1 XAdvance="0"/>
diff --git a/Tests/feaLib/data/STAT_test.ttx b/Tests/feaLib/data/STAT_test.ttx
index d1b2b697..bab9b8ea 100644
--- a/Tests/feaLib/data/STAT_test.ttx
+++ b/Tests/feaLib/data/STAT_test.ttx
@@ -8,9 +8,6 @@
<namerecord nameID="256" platformID="3" platEncID="1" langID="0x409">
Roman
</namerecord>
- <namerecord nameID="256" platformID="3" platEncID="1" langID="0x411">
- ローマン
- </namerecord>
<namerecord nameID="257" platformID="3" platEncID="1" langID="0x409">
Optical Size
</namerecord>
@@ -68,6 +65,9 @@
<namerecord nameID="275" platformID="3" platEncID="1" langID="0x409">
Caption
</namerecord>
+ <namerecord nameID="256" platformID="3" platEncID="1" langID="0x411">
+ ローマン
+ </namerecord>
</name>
<STAT>
diff --git a/Tests/feaLib/data/bug2949.fea b/Tests/feaLib/data/bug2949.fea
new file mode 100644
index 00000000..cd0b7a86
--- /dev/null
+++ b/Tests/feaLib/data/bug2949.fea
@@ -0,0 +1,20 @@
+lookup lookup1 {
+#test-fea2fea: ignore sub three' four;
+ ignore sub three four;
+} lookup1;
+
+lookup lookup2 {
+#test-fea2fea: ignore pos three' four;
+ ignore pos three four;
+} lookup2;
+
+lookup lookup3 {
+#test-fea2fea: ignore sub one' two, three' four;
+ ignore sub one two, three four;
+} lookup3;
+
+feature test {
+ lookup lookup1;
+ lookup lookup2;
+ lookup lookup3;
+} test;
diff --git a/Tests/feaLib/data/bug2949.ttx b/Tests/feaLib/data/bug2949.ttx
new file mode 100644
index 00000000..8ad9ccb4
--- /dev/null
+++ b/Tests/feaLib/data/bug2949.ttx
@@ -0,0 +1,133 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont>
+
+ <GSUB>
+ <Version value="0x00010000"/>
+ <ScriptList>
+ <!-- ScriptCount=1 -->
+ <ScriptRecord index="0">
+ <ScriptTag value="DFLT"/>
+ <Script>
+ <DefaultLangSys>
+ <ReqFeatureIndex value="65535"/>
+ <!-- FeatureCount=1 -->
+ <FeatureIndex index="0" value="0"/>
+ </DefaultLangSys>
+ <!-- LangSysCount=0 -->
+ </Script>
+ </ScriptRecord>
+ </ScriptList>
+ <FeatureList>
+ <!-- FeatureCount=1 -->
+ <FeatureRecord index="0">
+ <FeatureTag value="test"/>
+ <Feature>
+ <!-- LookupCount=2 -->
+ <LookupListIndex index="0" value="0"/>
+ <LookupListIndex index="1" value="1"/>
+ </Feature>
+ </FeatureRecord>
+ </FeatureList>
+ <LookupList>
+ <!-- LookupCount=2 -->
+ <Lookup index="0">
+ <LookupType value="6"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <ChainContextSubst index="0" Format="3">
+ <!-- BacktrackGlyphCount=0 -->
+ <!-- InputGlyphCount=1 -->
+ <InputCoverage index="0">
+ <Glyph value="three"/>
+ </InputCoverage>
+ <!-- LookAheadGlyphCount=1 -->
+ <LookAheadCoverage index="0">
+ <Glyph value="four"/>
+ </LookAheadCoverage>
+ <!-- SubstCount=0 -->
+ </ChainContextSubst>
+ </Lookup>
+ <Lookup index="1">
+ <LookupType value="6"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <ChainContextSubst index="0" Format="1">
+ <Coverage>
+ <Glyph value="one"/>
+ <Glyph value="three"/>
+ </Coverage>
+ <!-- ChainSubRuleSetCount=2 -->
+ <ChainSubRuleSet index="0">
+ <!-- ChainSubRuleCount=1 -->
+ <ChainSubRule index="0">
+ <!-- BacktrackGlyphCount=0 -->
+ <!-- InputGlyphCount=1 -->
+ <!-- LookAheadGlyphCount=1 -->
+ <LookAhead index="0" value="two"/>
+ <!-- SubstCount=0 -->
+ </ChainSubRule>
+ </ChainSubRuleSet>
+ <ChainSubRuleSet index="1">
+ <!-- ChainSubRuleCount=1 -->
+ <ChainSubRule index="0">
+ <!-- BacktrackGlyphCount=0 -->
+ <!-- InputGlyphCount=1 -->
+ <!-- LookAheadGlyphCount=1 -->
+ <LookAhead index="0" value="four"/>
+ <!-- SubstCount=0 -->
+ </ChainSubRule>
+ </ChainSubRuleSet>
+ </ChainContextSubst>
+ </Lookup>
+ </LookupList>
+ </GSUB>
+
+ <GPOS>
+ <Version value="0x00010000"/>
+ <ScriptList>
+ <!-- ScriptCount=1 -->
+ <ScriptRecord index="0">
+ <ScriptTag value="DFLT"/>
+ <Script>
+ <DefaultLangSys>
+ <ReqFeatureIndex value="65535"/>
+ <!-- FeatureCount=1 -->
+ <FeatureIndex index="0" value="0"/>
+ </DefaultLangSys>
+ <!-- LangSysCount=0 -->
+ </Script>
+ </ScriptRecord>
+ </ScriptList>
+ <FeatureList>
+ <!-- FeatureCount=1 -->
+ <FeatureRecord index="0">
+ <FeatureTag value="test"/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="0"/>
+ </Feature>
+ </FeatureRecord>
+ </FeatureList>
+ <LookupList>
+ <!-- LookupCount=1 -->
+ <Lookup index="0">
+ <LookupType value="8"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <ChainContextPos index="0" Format="3">
+ <!-- BacktrackGlyphCount=0 -->
+ <!-- InputGlyphCount=1 -->
+ <InputCoverage index="0">
+ <Glyph value="three"/>
+ </InputCoverage>
+ <!-- LookAheadGlyphCount=1 -->
+ <LookAheadCoverage index="0">
+ <Glyph value="four"/>
+ </LookAheadCoverage>
+ <!-- PosCount=0 -->
+ </ChainContextPos>
+ </Lookup>
+ </LookupList>
+ </GPOS>
+
+</ttFont>
diff --git a/Tests/feaLib/data/bug509.fea b/Tests/feaLib/data/bug509.fea
index b7af056f..488e769a 100644
--- a/Tests/feaLib/data/bug509.fea
+++ b/Tests/feaLib/data/bug509.fea
@@ -1,5 +1,5 @@
@LETTER_A = [A A.sc A.alt1];
feature test {
- ignore sub A;
+ ignore sub A';
sub @LETTER_A' by a;
} test;
diff --git a/Tests/feaLib/data/bug512.ttx b/Tests/feaLib/data/bug512.ttx
index 693ebeb7..4ad3af2f 100644
--- a/Tests/feaLib/data/bug512.ttx
+++ b/Tests/feaLib/data/bug512.ttx
@@ -73,7 +73,7 @@
<!-- SubstCount=1 -->
<SubstLookupRecord index="0">
<SequenceIndex value="0"/>
- <LookupListIndex value="2"/>
+ <LookupListIndex value="1"/>
</SubstLookupRecord>
</SubRule>
</SubRuleSet>
@@ -94,7 +94,6 @@
<!-- SubTableCount=1 -->
<SingleSubst index="0">
<Substitution in="G" out="g"/>
- <Substitution in="H" out="H.swash"/>
</SingleSubst>
</Lookup>
</LookupList>
diff --git a/Tests/feaLib/data/bug633.ttx b/Tests/feaLib/data/bug633.ttx
index 075c1777..8be745cd 100644
--- a/Tests/feaLib/data/bug633.ttx
+++ b/Tests/feaLib/data/bug633.ttx
@@ -43,10 +43,10 @@
<ClassDef1>
</ClassDef1>
<ClassDef2>
- <ClassDef glyph="C" class="2"/>
- <ClassDef glyph="O" class="2"/>
- <ClassDef glyph="V" class="1"/>
- <ClassDef glyph="W" class="1"/>
+ <ClassDef glyph="C" class="1"/>
+ <ClassDef glyph="O" class="1"/>
+ <ClassDef glyph="V" class="2"/>
+ <ClassDef glyph="W" class="2"/>
</ClassDef2>
<!-- Class1Count=1 -->
<!-- Class2Count=3 -->
@@ -55,10 +55,10 @@
<Value1 XAdvance="0"/>
</Class2Record>
<Class2Record index="1">
- <Value1 XAdvance="0"/>
+ <Value1 XAdvance="-20"/>
</Class2Record>
<Class2Record index="2">
- <Value1 XAdvance="-20"/>
+ <Value1 XAdvance="0"/>
</Class2Record>
</Class1Record>
</PairPos>
diff --git a/Tests/feaLib/data/name.ttx b/Tests/feaLib/data/name.ttx
index 5014b251..51ecf324 100644
--- a/Tests/feaLib/data/name.ttx
+++ b/Tests/feaLib/data/name.ttx
@@ -2,6 +2,15 @@
<ttFont>
<name>
+ <namerecord nameID="8" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ Test8
+ </namerecord>
+ <namerecord nameID="10" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ Test10
+ </namerecord>
+ <namerecord nameID="11" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ Test11
+ </namerecord>
<namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
Test1
</namerecord>
@@ -23,18 +32,9 @@
<namerecord nameID="7" platformID="3" platEncID="1" langID="0x409">
Test7
</namerecord>
- <namerecord nameID="8" platformID="1" platEncID="0" langID="0x0" unicode="True">
- Test8
- </namerecord>
<namerecord nameID="9" platformID="3" platEncID="1" langID="0x409">
Test9
</namerecord>
- <namerecord nameID="10" platformID="1" platEncID="0" langID="0x0" unicode="True">
- Test10
- </namerecord>
- <namerecord nameID="11" platformID="1" platEncID="0" langID="0x0" unicode="True">
- Test11
- </namerecord>
</name>
</ttFont>
diff --git a/Tests/feaLib/data/spec5f_ii_3.ttx b/Tests/feaLib/data/spec5f_ii_3.ttx
index a94efcea..c03a81fb 100644
--- a/Tests/feaLib/data/spec5f_ii_3.ttx
+++ b/Tests/feaLib/data/spec5f_ii_3.ttx
@@ -66,9 +66,9 @@
<ClassDef glyph="z" class="1"/>
</BacktrackClassDef>
<InputClassDef>
- <ClassDef glyph="a" class="3"/>
+ <ClassDef glyph="a" class="1"/>
<ClassDef glyph="d" class="2"/>
- <ClassDef glyph="n" class="1"/>
+ <ClassDef glyph="n" class="3"/>
</InputClassDef>
<LookAheadClassDef>
<ClassDef glyph="a" class="1"/>
@@ -103,18 +103,12 @@
<!-- ChainSubClassRuleCount=0 -->
</ChainSubClassSet>
<ChainSubClassSet index="1">
- <!-- ChainSubClassRuleCount=0 -->
- </ChainSubClassSet>
- <ChainSubClassSet index="2">
- <!-- ChainSubClassRuleCount=0 -->
- </ChainSubClassSet>
- <ChainSubClassSet index="3">
<!-- ChainSubClassRuleCount=3 -->
<ChainSubClassRule index="0">
<!-- BacktrackGlyphCount=1 -->
<Backtrack index="0" value="1"/>
<!-- InputGlyphCount=3 -->
- <Input index="0" value="1"/>
+ <Input index="0" value="3"/>
<Input index="1" value="2"/>
<!-- LookAheadGlyphCount=0 -->
<!-- SubstCount=0 -->
@@ -122,7 +116,7 @@
<ChainSubClassRule index="1">
<!-- BacktrackGlyphCount=0 -->
<!-- InputGlyphCount=3 -->
- <Input index="0" value="1"/>
+ <Input index="0" value="3"/>
<Input index="1" value="2"/>
<!-- LookAheadGlyphCount=1 -->
<LookAhead index="0" value="1"/>
@@ -131,7 +125,7 @@
<ChainSubClassRule index="2">
<!-- BacktrackGlyphCount=0 -->
<!-- InputGlyphCount=3 -->
- <Input index="0" value="1"/>
+ <Input index="0" value="3"/>
<Input index="1" value="2"/>
<!-- LookAheadGlyphCount=0 -->
<!-- SubstCount=1 -->
@@ -141,6 +135,12 @@
</SubstLookupRecord>
</ChainSubClassRule>
</ChainSubClassSet>
+ <ChainSubClassSet index="2">
+ <!-- ChainSubClassRuleCount=0 -->
+ </ChainSubClassSet>
+ <ChainSubClassSet index="3">
+ <!-- ChainSubClassRuleCount=0 -->
+ </ChainSubClassSet>
</ChainContextSubst>
</Lookup>
<Lookup index="1">
diff --git a/Tests/feaLib/data/spec8b.ttx b/Tests/feaLib/data/spec8b.ttx
index 6e66c16b..5c8cba27 100644
--- a/Tests/feaLib/data/spec8b.ttx
+++ b/Tests/feaLib/data/spec8b.ttx
@@ -2,15 +2,15 @@
<ttFont>
<name>
- <namerecord nameID="256" platformID="3" platEncID="1" langID="0x409">
- Win MinionPro Size Name
- </namerecord>
<namerecord nameID="256" platformID="1" platEncID="0" langID="0x0" unicode="True">
Mac MinionPro Size Name
</namerecord>
<namerecord nameID="256" platformID="1" platEncID="0" langID="0x5" unicode="True">
Mac MinionPro Size Name
</namerecord>
+ <namerecord nameID="256" platformID="3" platEncID="1" langID="0x409">
+ Win MinionPro Size Name
+ </namerecord>
</name>
<GPOS>
@@ -37,7 +37,7 @@
<FeatureParamsSize>
<DesignSize value="10.0"/>
<SubfamilyID value="3"/>
- <SubfamilyNameID value="256"/> <!-- Win MinionPro Size Name -->
+ <SubfamilyNameID value="256"/> <!-- Mac MinionPro Size Name -->
<RangeStart value="8.0"/>
<RangeEnd value="13.9"/>
</FeatureParamsSize>
diff --git a/Tests/feaLib/data/spec8c.ttx b/Tests/feaLib/data/spec8c.ttx
index a5b55176..f17898db 100644
--- a/Tests/feaLib/data/spec8c.ttx
+++ b/Tests/feaLib/data/spec8c.ttx
@@ -2,18 +2,18 @@
<ttFont>
<name>
- <namerecord nameID="256" platformID="3" platEncID="1" langID="0x409">
- Feature description for MS Platform, script Unicode, language English
- </namerecord>
- <namerecord nameID="256" platformID="3" platEncID="1" langID="0x411">
- Feature description for MS Platform, script Unicode, language Japanese
- </namerecord>
<namerecord nameID="256" platformID="1" platEncID="0" langID="0x0" unicode="True">
Feature description for Apple Platform, script Roman, language unspecified
</namerecord>
<namerecord nameID="256" platformID="1" platEncID="1" langID="0xc" unicode="True">
Feature description for Apple Platform, script Japanese, language Japanese
</namerecord>
+ <namerecord nameID="256" platformID="3" platEncID="1" langID="0x409">
+ Feature description for MS Platform, script Unicode, language English
+ </namerecord>
+ <namerecord nameID="256" platformID="3" platEncID="1" langID="0x411">
+ Feature description for MS Platform, script Unicode, language Japanese
+ </namerecord>
</name>
<GSUB>
@@ -39,7 +39,7 @@
<Feature>
<FeatureParamsStylisticSet>
<Version value="0"/>
- <UINameID value="256"/> <!-- Feature description for MS Platform, script Unicode, language English -->
+ <UINameID value="256"/> <!-- Feature description for Apple Platform, script Roman, language unspecified -->
</FeatureParamsStylisticSet>
<!-- LookupCount=1 -->
<LookupListIndex index="0" value="0"/>
diff --git a/Tests/feaLib/data/spec8d.ttx b/Tests/feaLib/data/spec8d.ttx
index 9848a691..5ff20ef7 100644
--- a/Tests/feaLib/data/spec8d.ttx
+++ b/Tests/feaLib/data/spec8d.ttx
@@ -2,34 +2,34 @@
<ttFont>
<name>
- <namerecord nameID="256" platformID="3" platEncID="1" langID="0x409">
- uilabel simple a
- </namerecord>
<namerecord nameID="256" platformID="1" platEncID="0" langID="0x0" unicode="True">
uilabel simple a
</namerecord>
- <namerecord nameID="257" platformID="3" platEncID="1" langID="0x409">
- tool tip simple a
- </namerecord>
<namerecord nameID="257" platformID="1" platEncID="0" langID="0x0" unicode="True">
tool tip simple a
</namerecord>
- <namerecord nameID="258" platformID="3" platEncID="1" langID="0x409">
- sample text simple a
- </namerecord>
<namerecord nameID="258" platformID="1" platEncID="0" langID="0x0" unicode="True">
sample text simple a
</namerecord>
- <namerecord nameID="259" platformID="3" platEncID="1" langID="0x409">
- param1 text simple a
- </namerecord>
<namerecord nameID="259" platformID="1" platEncID="0" langID="0x0" unicode="True">
param1 text simple a
</namerecord>
- <namerecord nameID="260" platformID="3" platEncID="1" langID="0x409">
+ <namerecord nameID="260" platformID="1" platEncID="0" langID="0x0" unicode="True">
param2 text simple a
</namerecord>
- <namerecord nameID="260" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ <namerecord nameID="256" platformID="3" platEncID="1" langID="0x409">
+ uilabel simple a
+ </namerecord>
+ <namerecord nameID="257" platformID="3" platEncID="1" langID="0x409">
+ tool tip simple a
+ </namerecord>
+ <namerecord nameID="258" platformID="3" platEncID="1" langID="0x409">
+ sample text simple a
+ </namerecord>
+ <namerecord nameID="259" platformID="3" platEncID="1" langID="0x409">
+ param1 text simple a
+ </namerecord>
+ <namerecord nameID="260" platformID="3" platEncID="1" langID="0x409">
param2 text simple a
</namerecord>
</name>
diff --git a/Tests/feaLib/data/spec9e.ttx b/Tests/feaLib/data/spec9e.ttx
index 5119a5fc..4c63b47a 100644
--- a/Tests/feaLib/data/spec9e.ttx
+++ b/Tests/feaLib/data/spec9e.ttx
@@ -2,10 +2,10 @@
<ttFont>
<name>
- <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409">
+ <namerecord nameID="9" platformID="1" platEncID="0" langID="0x0" unicode="True">
Joachim Müller-Lancé
</namerecord>
- <namerecord nameID="9" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409">
Joachim Müller-Lancé
</namerecord>
</name>
diff --git a/Tests/feaLib/data/variable_bug2772.fea b/Tests/feaLib/data/variable_bug2772.fea
new file mode 100644
index 00000000..08326902
--- /dev/null
+++ b/Tests/feaLib/data/variable_bug2772.fea
@@ -0,0 +1,4 @@
+feature kern {
+ pos [p] g -5;
+ pos [p] y (wght=1000:-100 wght=200:0);
+} kern;
diff --git a/Tests/feaLib/data/variable_bug2772.ttx b/Tests/feaLib/data/variable_bug2772.ttx
new file mode 100644
index 00000000..12fda609
--- /dev/null
+++ b/Tests/feaLib/data/variable_bug2772.ttx
@@ -0,0 +1,103 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.37">
+
+ <GDEF>
+ <Version value="0x00010003"/>
+ <VarStore Format="1">
+ <Format value="1"/>
+ <VarRegionList>
+ <!-- RegionAxisCount=2 -->
+ <!-- RegionCount=1 -->
+ <Region index="0">
+ <VarRegionAxis index="0">
+ <StartCoord value="0.0"/>
+ <PeakCoord value="1.0"/>
+ <EndCoord value="1.0"/>
+ </VarRegionAxis>
+ <VarRegionAxis index="1">
+ <StartCoord value="0.0"/>
+ <PeakCoord value="0.0"/>
+ <EndCoord value="0.0"/>
+ </VarRegionAxis>
+ </Region>
+ </VarRegionList>
+ <!-- VarDataCount=1 -->
+ <VarData index="0">
+ <!-- ItemCount=1 -->
+ <NumShorts value="0"/>
+ <!-- VarRegionCount=1 -->
+ <VarRegionIndex index="0" value="0"/>
+ <Item index="0" value="[-100]"/>
+ </VarData>
+ </VarStore>
+ </GDEF>
+
+ <GPOS>
+ <Version value="0x00010000"/>
+ <ScriptList>
+ <!-- ScriptCount=1 -->
+ <ScriptRecord index="0">
+ <ScriptTag value="DFLT"/>
+ <Script>
+ <DefaultLangSys>
+ <ReqFeatureIndex value="65535"/>
+ <!-- FeatureCount=1 -->
+ <FeatureIndex index="0" value="0"/>
+ </DefaultLangSys>
+ <!-- LangSysCount=0 -->
+ </Script>
+ </ScriptRecord>
+ </ScriptList>
+ <FeatureList>
+ <!-- FeatureCount=1 -->
+ <FeatureRecord index="0">
+ <FeatureTag value="kern"/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="0"/>
+ </Feature>
+ </FeatureRecord>
+ </FeatureList>
+ <LookupList>
+ <!-- LookupCount=1 -->
+ <Lookup index="0">
+ <LookupType value="2"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <PairPos index="0" Format="2">
+ <Coverage>
+ <Glyph value="p"/>
+ </Coverage>
+ <ValueFormat1 value="68"/>
+ <ValueFormat2 value="0"/>
+ <ClassDef1>
+ </ClassDef1>
+ <ClassDef2>
+ <ClassDef glyph="g" class="1"/>
+ <ClassDef glyph="y" class="2"/>
+ </ClassDef2>
+ <!-- Class1Count=1 -->
+ <!-- Class2Count=3 -->
+ <Class1Record index="0">
+ <Class2Record index="0">
+ <Value1 XAdvance="0"/>
+ </Class2Record>
+ <Class2Record index="1">
+ <Value1 XAdvance="-5"/>
+ </Class2Record>
+ <Class2Record index="2">
+ <Value1 XAdvance="0">
+ <XAdvDevice>
+ <StartSize value="0"/>
+ <EndSize value="0"/>
+ <DeltaFormat value="32768"/>
+ </XAdvDevice>
+ </Value1>
+ </Class2Record>
+ </Class1Record>
+ </PairPos>
+ </Lookup>
+ </LookupList>
+ </GPOS>
+
+</ttFont>
diff --git a/Tests/feaLib/data/variable_scalar_valuerecord.fea b/Tests/feaLib/data/variable_scalar_valuerecord.fea
index bf9a26b7..0b402654 100644
--- a/Tests/feaLib/data/variable_scalar_valuerecord.fea
+++ b/Tests/feaLib/data/variable_scalar_valuerecord.fea
@@ -2,4 +2,5 @@ languagesystem DFLT dflt;
feature kern {
pos one 1;
pos two <0 (wght=200:12 wght=900:22 wdth=150,wght=900:42) 0 0>;
+ pos three <0 (wght=200:12 wght=900:12 wdth=150,wght=900:12) 0 0>;
} kern;
diff --git a/Tests/feaLib/data/variable_scalar_valuerecord.ttx b/Tests/feaLib/data/variable_scalar_valuerecord.ttx
index 338b7221..e3251f69 100644
--- a/Tests/feaLib/data/variable_scalar_valuerecord.ttx
+++ b/Tests/feaLib/data/variable_scalar_valuerecord.ttx
@@ -76,7 +76,7 @@
<Lookup index="0">
<LookupType value="1"/>
<LookupFlag value="0"/>
- <!-- SubTableCount=2 -->
+ <!-- SubTableCount=3 -->
<SinglePos index="0" Format="1">
<Coverage>
<Glyph value="one"/>
@@ -97,6 +97,13 @@
</YPlaDevice>
</Value>
</SinglePos>
+ <SinglePos index="2" Format="1">
+ <Coverage>
+ <Glyph value="three"/>
+ </Coverage>
+ <ValueFormat value="2"/>
+ <Value YPlacement="12"/>
+ </SinglePos>
</Lookup>
</LookupList>
</GPOS>
diff --git a/Tests/feaLib/error_test.py b/Tests/feaLib/error_test.py
index 2ebb3e4c..2972b5f3 100644
--- a/Tests/feaLib/error_test.py
+++ b/Tests/feaLib/error_test.py
@@ -15,4 +15,5 @@ class FeatureLibErrorTest(unittest.TestCase):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/feaLib/lexer_test.py b/Tests/feaLib/lexer_test.py
index 3df67f70..317a9a8c 100644
--- a/Tests/feaLib/lexer_test.py
+++ b/Tests/feaLib/lexer_test.py
@@ -39,68 +39,77 @@ class LexerTest(unittest.TestCase):
def test_glyphclass(self):
self.assertEqual(lex("@Vowel.sc"), [(Lexer.GLYPHCLASS, "Vowel.sc")])
self.assertEqual(lex("@Vowel-sc"), [(Lexer.GLYPHCLASS, "Vowel-sc")])
- self.assertRaisesRegex(FeatureLibError,
- "Expected glyph class", lex, "@(a)")
- self.assertRaisesRegex(FeatureLibError,
- "Expected glyph class", lex, "@ A")
- self.assertRaisesRegex(FeatureLibError,
- "not be longer than 63 characters",
- lex, "@" + ("A" * 64))
- self.assertRaisesRegex(FeatureLibError,
- "Glyph class names must consist of",
- lex, "@Ab:c")
+ self.assertRaisesRegex(FeatureLibError, "Expected glyph class", lex, "@(a)")
+ self.assertRaisesRegex(FeatureLibError, "Expected glyph class", lex, "@ A")
+ self.assertRaisesRegex(
+ FeatureLibError, "not be longer than 63 characters", lex, "@" + ("A" * 64)
+ )
+ self.assertRaisesRegex(
+ FeatureLibError, "Glyph class names must consist of", lex, "@Ab:c"
+ )
def test_include(self):
- self.assertEqual(lex("include (~/foo/bar baz.fea);"), [
- (Lexer.NAME, "include"),
- (Lexer.FILENAME, "~/foo/bar baz.fea"),
- (Lexer.SYMBOL, ";")
- ])
- self.assertEqual(lex("include # Comment\n (foo) \n;"), [
- (Lexer.NAME, "include"),
- (Lexer.COMMENT, "# Comment"),
- (Lexer.FILENAME, "foo"),
- (Lexer.SYMBOL, ";")
- ])
+ self.assertEqual(
+ lex("include (~/foo/bar baz.fea);"),
+ [
+ (Lexer.NAME, "include"),
+ (Lexer.FILENAME, "~/foo/bar baz.fea"),
+ (Lexer.SYMBOL, ";"),
+ ],
+ )
+ self.assertEqual(
+ lex("include # Comment\n (foo) \n;"),
+ [
+ (Lexer.NAME, "include"),
+ (Lexer.COMMENT, "# Comment"),
+ (Lexer.FILENAME, "foo"),
+ (Lexer.SYMBOL, ";"),
+ ],
+ )
self.assertRaises(FeatureLibError, lex, "include blah")
self.assertRaises(FeatureLibError, lex, "include (blah")
def test_number(self):
- self.assertEqual(lex("123 -456"),
- [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)])
+ self.assertEqual(lex("123 -456"), [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)])
self.assertEqual(lex("0xCAFED00D"), [(Lexer.HEXADECIMAL, 0xCAFED00D)])
self.assertEqual(lex("0xcafed00d"), [(Lexer.HEXADECIMAL, 0xCAFED00D)])
self.assertEqual(lex("010"), [(Lexer.OCTAL, 0o10)])
def test_float(self):
- self.assertEqual(lex("1.23 -4.5"),
- [(Lexer.FLOAT, 1.23), (Lexer.FLOAT, -4.5)])
+ self.assertEqual(lex("1.23 -4.5"), [(Lexer.FLOAT, 1.23), (Lexer.FLOAT, -4.5)])
def test_symbol(self):
self.assertEqual(lex("a'"), [(Lexer.NAME, "a"), (Lexer.SYMBOL, "'")])
- self.assertEqual(lex("-A-B"),
- [(Lexer.SYMBOL, "-"), (Lexer.NAME, "A-B")])
+ self.assertEqual(lex("-A-B"), [(Lexer.SYMBOL, "-"), (Lexer.NAME, "A-B")])
self.assertEqual(
lex("foo - -2"),
- [(Lexer.NAME, "foo"), (Lexer.SYMBOL, "-"), (Lexer.NUMBER, -2)])
+ [(Lexer.NAME, "foo"), (Lexer.SYMBOL, "-"), (Lexer.NUMBER, -2)],
+ )
def test_comment(self):
- self.assertEqual(lex("# Comment\n#"),
- [(Lexer.COMMENT, "# Comment"), (Lexer.COMMENT, "#")])
+ self.assertEqual(
+ lex("# Comment\n#"), [(Lexer.COMMENT, "# Comment"), (Lexer.COMMENT, "#")]
+ )
def test_string(self):
- self.assertEqual(lex('"foo" "bar"'),
- [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")])
- self.assertEqual(lex('"foo \nbar\r baz \r\nqux\n\n "'),
- [(Lexer.STRING, "foo bar baz qux ")])
+ self.assertEqual(
+ lex('"foo" "bar"'), [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")]
+ )
+ self.assertEqual(
+ lex('"foo \nbar\r baz \r\nqux\n\n "'), [(Lexer.STRING, "foo bar baz qux ")]
+ )
# The lexer should preserve escape sequences because they have
# different interpretations depending on context. For better
# or for worse, that is how the OpenType Feature File Syntax
# has been specified; see section 9.e (name table) for examples.
- self.assertEqual(lex(r'"M\00fcller-Lanc\00e9"'), # 'nameid 9'
- [(Lexer.STRING, r"M\00fcller-Lanc\00e9")])
- self.assertEqual(lex(r'"M\9fller-Lanc\8e"'), # 'nameid 9 1'
- [(Lexer.STRING, r"M\9fller-Lanc\8e")])
+ self.assertEqual(
+ lex(r'"M\00fcller-Lanc\00e9"'), # 'nameid 9'
+ [(Lexer.STRING, r"M\00fcller-Lanc\00e9")],
+ )
+ self.assertEqual(
+ lex(r'"M\9fller-Lanc\8e"'), # 'nameid 9 1'
+ [(Lexer.STRING, r"M\9fller-Lanc\8e")],
+ )
self.assertRaises(FeatureLibError, lex, '"foo\n bar')
def test_bad_character(self):
@@ -109,6 +118,7 @@ class LexerTest(unittest.TestCase):
def test_newline(self):
def lines(s):
return [loc.line for (_, _, loc) in Lexer(s, "test.fea")]
+
self.assertEqual(lines("FOO\n\nBAR\nBAZ"), [1, 3, 4]) # Unix
self.assertEqual(lines("FOO\r\rBAR\rBAZ"), [1, 3, 4]) # Macintosh
self.assertEqual(lines("FOO\r\n\r\n BAR\r\nBAZ"), [1, 3, 4]) # Windows
@@ -117,10 +127,17 @@ class LexerTest(unittest.TestCase):
def test_location(self):
def locs(s):
return [str(loc) for (_, _, loc) in Lexer(s, "test.fea")]
- self.assertEqual(locs("a b # Comment\n12 @x"), [
- "test.fea:1:1", "test.fea:1:3", "test.fea:1:5", "test.fea:2:1",
- "test.fea:2:4"
- ])
+
+ self.assertEqual(
+ locs("a b # Comment\n12 @x"),
+ [
+ "test.fea:1:1",
+ "test.fea:1:3",
+ "test.fea:1:5",
+ "test.fea:2:1",
+ "test.fea:2:4",
+ ],
+ )
def test_scan_over_(self):
lexer = Lexer("abbacabba12", "test.fea")
@@ -151,22 +168,27 @@ class IncludingLexerTest(unittest.TestCase):
def test_include(self):
lexer = IncludingLexer(self.getpath("include/include4.fea"))
- result = ['%s %s:%d' % (token, os.path.split(loc.file)[1], loc.line)
- for _, token, loc in lexer]
- self.assertEqual(result, [
- "I4a include4.fea:1",
- "I3a include3.fea:1",
- "I2a include2.fea:1",
- "I1a include1.fea:1",
- "I0 include0.fea:1",
- "I1b include1.fea:3",
- "; include2.fea:2",
- "I2b include2.fea:3",
- "; include3.fea:2",
- "I3b include3.fea:3",
- "; include4.fea:2",
- "I4b include4.fea:3"
- ])
+ result = [
+ "%s %s:%d" % (token, os.path.split(loc.file)[1], loc.line)
+ for _, token, loc in lexer
+ ]
+ self.assertEqual(
+ result,
+ [
+ "I4a include4.fea:1",
+ "I3a include3.fea:1",
+ "I2a include2.fea:1",
+ "I1a include1.fea:1",
+ "I0 include0.fea:1",
+ "I1b include1.fea:3",
+ "; include2.fea:2",
+ "I2b include2.fea:3",
+ "; include3.fea:2",
+ "I3b include3.fea:3",
+ "; include4.fea:2",
+ "I4b include4.fea:3",
+ ],
+ )
def test_include_limit(self):
lexer = IncludingLexer(self.getpath("include/include6.fea"))
@@ -178,11 +200,13 @@ class IncludingLexerTest(unittest.TestCase):
def test_include_missing_file(self):
lexer = IncludingLexer(self.getpath("include/includemissingfile.fea"))
- self.assertRaisesRegex(IncludedFeaNotFound,
- "includemissingfile.fea:1:8: The following feature file "
- "should be included but cannot be found: "
- "missingfile.fea",
- lambda: list(lexer))
+ self.assertRaisesRegex(
+ IncludedFeaNotFound,
+ "includemissingfile.fea:1:8: The following feature file "
+ "should be included but cannot be found: "
+ "missingfile.fea",
+ lambda: list(lexer),
+ )
def test_featurefilepath_None(self):
lexer = IncludingLexer(StringIO("# foobar"))
@@ -192,11 +216,16 @@ class IncludingLexerTest(unittest.TestCase):
def test_include_absolute_path(self):
with tempfile.NamedTemporaryFile(delete=False) as included:
- included.write(tobytes("""
+ included.write(
+ tobytes(
+ """
feature kern {
pos A B -40;
} kern;
- """, encoding="utf-8"))
+ """,
+ encoding="utf-8",
+ )
+ )
including = StringIO("include(%s);" % included.name)
try:
lexer = IncludingLexer(including)
@@ -211,13 +240,16 @@ class IncludingLexerTest(unittest.TestCase):
tmpdir = tempfile.mkdtemp()
try:
# create new feature file in a temporary directory
- with open(os.path.join(tmpdir, "included.fea"), "w",
- encoding="utf-8") as included:
- included.write("""
+ with open(
+ os.path.join(tmpdir, "included.fea"), "w", encoding="utf-8"
+ ) as included:
+ included.write(
+ """
feature kern {
pos A B -40;
} kern;
- """)
+ """
+ )
# change current folder to the temporary dir
os.chdir(tmpdir)
# instantiate a new lexer that includes the above file
@@ -237,4 +269,5 @@ class IncludingLexerTest(unittest.TestCase):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/feaLib/parser_test.py b/Tests/feaLib/parser_test.py
index b281e8ac..c140629a 100644
--- a/Tests/feaLib/parser_test.py
+++ b/Tests/feaLib/parser_test.py
@@ -44,7 +44,7 @@ GLYPHNAMES = (
a.swash b.swash x.swash y.swash z.swash
foobar foo.09 foo.1234 foo.9876
one two five six acute grave dieresis umlaut cedilla ogonek macron
- a_f_f_i o_f_f_i f_i f_f_i one.fitted one.oldstyle a.1 a.2 a.3 c_t
+ a_f_f_i o_f_f_i f_i f_l f_f_i one.fitted one.oldstyle a.1 a.2 a.3 c_t
PRE SUF FIX BACK TRACK LOOK AHEAD ampersand ampersand.1 ampersand.2
cid00001 cid00002 cid00003 cid00004 cid00005 cid00006 cid00007
cid12345 cid78987 cid00999 cid01000 cid01001 cid00998 cid00995
@@ -316,7 +316,9 @@ class ParserTest(unittest.TestCase):
def test_strict_glyph_name_check(self):
self.parse("@bad = [a b ccc];", glyphNames=("a", "b", "ccc"))
- with self.assertRaisesRegex(FeatureLibError, "(?s)missing from the glyph set:.*ccc"):
+ with self.assertRaisesRegex(
+ FeatureLibError, "(?s)missing from the glyph set:.*ccc"
+ ):
self.parse("@bad = [a b ccc];", glyphNames=("a", "b"))
def test_glyphclass(self):
@@ -705,6 +707,17 @@ class ParserTest(unittest.TestCase):
self.assertEqual(glyphstr([s.glyphs]), "f_i")
self.assertEqual(s.carets, [400, 380])
+ def test_ligatureCaretByPos_variable_scalar(self):
+ doc = self.parse(
+ "table GDEF {LigatureCaretByPos f_i (wght=200:400 wght=900:1000) 380;} GDEF;"
+ )
+ s = doc.statements[0].statements[0]
+ self.assertIsInstance(s, ast.LigatureCaretByPosStatement)
+ self.assertEqual(glyphstr([s.glyphs]), "f_i")
+ self.assertEqual(len(s.carets), 2)
+ self.assertEqual(str(s.carets[0]), "(wght=200:400 wght=900:1000)")
+ self.assertEqual(s.carets[1], 380)
+
def test_lookup_block(self):
[lookup] = self.parse("lookup Ligatures {} Ligatures;").statements
self.assertEqual(lookup.name, "Ligatures")
@@ -1608,24 +1621,54 @@ class ParserTest(unittest.TestCase):
doc = self.parse("lookup Look {substitute f_f_i by f f i;} Look;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.MultipleSubstStatement)
- self.assertEqual(sub.glyph, "f_f_i")
- self.assertEqual(sub.replacement, ("f", "f", "i"))
+ self.assertEqual(glyphstr([sub.glyph]), "f_f_i")
+ self.assertEqual(glyphstr(sub.replacement), "f f i")
def test_substitute_multiple_chained(self): # chain to GSUB LookupType 2
doc = self.parse("lookup L {sub [A-C] f_f_i' [X-Z] by f f i;} L;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.MultipleSubstStatement)
- self.assertEqual(sub.glyph, "f_f_i")
- self.assertEqual(sub.replacement, ("f", "f", "i"))
+ self.assertEqual(glyphstr([sub.glyph]), "f_f_i")
+ self.assertEqual(glyphstr(sub.replacement), "f f i")
def test_substitute_multiple_force_chained(self):
doc = self.parse("lookup L {sub f_f_i' by f f i;} L;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.MultipleSubstStatement)
- self.assertEqual(sub.glyph, "f_f_i")
- self.assertEqual(sub.replacement, ("f", "f", "i"))
+ self.assertEqual(glyphstr([sub.glyph]), "f_f_i")
+ self.assertEqual(glyphstr(sub.replacement), "f f i")
self.assertEqual(sub.asFea(), "sub f_f_i' by f f i;")
+ def test_substitute_multiple_classes(self):
+ doc = self.parse("lookup Look {substitute [f_i f_l] by [f f] [i l];} Look;")
+ sub = doc.statements[0].statements[0]
+ self.assertIsInstance(sub, ast.MultipleSubstStatement)
+ self.assertEqual(glyphstr([sub.glyph]), "[f_i f_l]")
+ self.assertEqual(glyphstr(sub.replacement), "[f f] [i l]")
+
+ def test_substitute_multiple_classes_mixed(self):
+ doc = self.parse("lookup Look {substitute [f_i f_l] by f [i l];} Look;")
+ sub = doc.statements[0].statements[0]
+ self.assertIsInstance(sub, ast.MultipleSubstStatement)
+ self.assertEqual(glyphstr([sub.glyph]), "[f_i f_l]")
+ self.assertEqual(glyphstr(sub.replacement), "f [i l]")
+
+ def test_substitute_multiple_classes_mixed_singleton(self):
+ doc = self.parse("lookup Look {substitute [f_i f_l] by [f] [i l];} Look;")
+ sub = doc.statements[0].statements[0]
+ self.assertIsInstance(sub, ast.MultipleSubstStatement)
+ self.assertEqual(glyphstr([sub.glyph]), "[f_i f_l]")
+ self.assertEqual(glyphstr(sub.replacement), "f [i l]")
+
+ def test_substitute_multiple_classes_mismatch(self):
+ self.assertRaisesRegex(
+ FeatureLibError,
+ 'Expected a glyph class with 1 or 3 elements after "by", '
+ "but found a glyph class with 2 elements",
+ self.parse,
+ "lookup Look {substitute [f_i f_l f_f_i] by [f f_f] [i l i];} Look;",
+ )
+
def test_substitute_multiple_by_mutliple(self):
self.assertRaisesRegex(
FeatureLibError,
@@ -2081,6 +2124,15 @@ class ParserTest(unittest.TestCase):
doc = Parser(fea_path, includeDir=include_dir).parse()
assert len(doc.statements) == 1 and doc.statements[0].text == "# Nothing"
+ def test_unmarked_ignore_statement(self):
+ with CapturingLogHandler("fontTools.feaLib.parser", level="WARNING") as caplog:
+ doc = self.parse("lookup foo { ignore sub A; } foo;")
+ self.assertEqual(doc.statements[0].statements[0].asFea(), "ignore sub A';")
+ self.assertEqual(len(caplog.records), 1)
+ caplog.assertRegex(
+ 'Ambiguous "ignore sub", there should be least one marked glyph'
+ )
+
def parse(self, text, glyphNames=GLYPHNAMES, followIncludes=True):
featurefile = StringIO(text)
p = Parser(featurefile, glyphNames, followIncludes=followIncludes)
diff --git a/Tests/fontBuilder/data/test_var.otf.ttx b/Tests/fontBuilder/data/test_var.otf.ttx
index ff148682..a94bf12b 100644
--- a/Tests/fontBuilder/data/test_var.otf.ttx
+++ b/Tests/fontBuilder/data/test_var.otf.ttx
@@ -273,6 +273,16 @@
</GlobalSubrs>
</CFF2>
+ <avar>
+ <version major="1" minor="0"/>
+ <segment axis="TEST">
+ <mapping from="-1.0" to="-1.0"/>
+ <mapping from="0.0" to="0.0"/>
+ <mapping from="0.4" to="0.6"/>
+ <mapping from="1.0" to="1.0"/>
+ </segment>
+ </avar>
+
<fvar>
<!-- Test Axis -->
diff --git a/Tests/fontBuilder/fontBuilder_test.py b/Tests/fontBuilder/fontBuilder_test.py
index 775e94d9..c831d02e 100644
--- a/Tests/fontBuilder/fontBuilder_test.py
+++ b/Tests/fontBuilder/fontBuilder_test.py
@@ -1,6 +1,6 @@
-
import os
import pytest
+from fontTools.designspaceLib import AxisDescriptor
from fontTools.ttLib import TTFont
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.pens.t2CharStringPen import T2CharStringPen
@@ -33,45 +33,75 @@ def _setupFontBuilder(isTTF, unitsPerEm=1024):
familyName = "HelloTestFont"
styleName = "TotallyNormal"
- nameStrings = dict(familyName=dict(en="HelloTestFont", nl="HalloTestFont"),
- styleName=dict(en="TotallyNormal", nl="TotaalNormaal"))
- nameStrings['psName'] = familyName + "-" + styleName
+ nameStrings = dict(
+ familyName=dict(en="HelloTestFont", nl="HalloTestFont"),
+ styleName=dict(en="TotallyNormal", nl="TotaalNormaal"),
+ )
+ nameStrings["psName"] = familyName + "-" + styleName
return fb, advanceWidths, nameStrings
def _setupFontBuilderFvar(fb):
- assert 'name' in fb.font, 'Must run setupNameTable() first.'
-
- axes = [
- ('TEST', 0, 0, 100, "Test Axis"),
- ]
+ assert "name" in fb.font, "Must run setupNameTable() first."
+
+ testAxis = AxisDescriptor()
+ testAxis.name = "Test Axis"
+ testAxis.tag = "TEST"
+ testAxis.minimum = 0
+ testAxis.default = 0
+ testAxis.maximum = 100
+ testAxis.map = [(0, 0), (40, 60), (100, 100)]
+ axes = [testAxis]
instances = [
dict(location=dict(TEST=0), stylename="TotallyNormal"),
dict(location=dict(TEST=100), stylename="TotallyTested"),
]
fb.setupFvar(axes, instances)
+ fb.setupAvar(axes)
return fb
def _setupFontBuilderCFF2(fb):
- assert 'fvar' in fb.font, 'Must run _setupFontBuilderFvar() first.'
+ assert "fvar" in fb.font, "Must run _setupFontBuilderFvar() first."
pen = T2CharStringPen(None, None, CFF2=True)
drawTestGlyph(pen)
charString = pen.getCharString()
program = [
- 200, 200, -200, -200, 2, "blend", "rmoveto",
- 400, 400, 1, "blend", "hlineto",
- 400, 400, 1, "blend", "vlineto",
- -400, -400, 1, "blend", "hlineto"
+ 200,
+ 200,
+ -200,
+ -200,
+ 2,
+ "blend",
+ "rmoveto",
+ 400,
+ 400,
+ 1,
+ "blend",
+ "hlineto",
+ 400,
+ 400,
+ 1,
+ "blend",
+ "vlineto",
+ -400,
+ -400,
+ 1,
+ "blend",
+ "hlineto",
]
charStringVariable = T2CharString(program=program)
- charStrings = {".notdef": charString, "A": charString,
- "a": charStringVariable, ".null": charString}
+ charStrings = {
+ ".notdef": charString,
+ "A": charString,
+ "a": charStringVariable,
+ ".null": charString,
+ }
fb.setupCFF2(charStrings, regions=[{"TEST": (0, 1, 1)}])
return fb
@@ -114,6 +144,29 @@ def test_build_ttf(tmpdir):
_verifyOutput(outPath)
+def test_build_cubic_ttf(tmp_path):
+ pen = TTGlyphPen(None)
+ pen.moveTo((100, 100))
+ pen.curveTo((200, 200), (300, 300), (400, 400))
+ pen.closePath()
+ glyph = pen.glyph()
+ glyphs = {"A": glyph}
+
+ # cubic outlines are not allowed in glyf table format 0
+ fb = FontBuilder(1000, isTTF=True, glyphDataFormat=0)
+ with pytest.raises(
+ ValueError, match="Glyph 'A' has cubic Bezier outlines, but glyphDataFormat=0"
+ ):
+ fb.setupGlyf(glyphs)
+ # can skip check if feeling adventurous
+ fb.setupGlyf(glyphs, validateGlyphFormat=False)
+
+ # cubics are (will be) allowed in glyf table format 1
+ fb = FontBuilder(1000, isTTF=True, glyphDataFormat=1)
+ fb.setupGlyf(glyphs)
+ assert "A" in fb.font["glyf"].glyphs
+
+
def test_build_otf(tmpdir):
outPath = os.path.join(str(tmpdir), "test.otf")
@@ -122,8 +175,15 @@ def test_build_otf(tmpdir):
pen = T2CharStringPen(600, None)
drawTestGlyph(pen)
charString = pen.getCharString()
- charStrings = {".notdef": charString, "A": charString, "a": charString, ".null": charString}
- fb.setupCFF(nameStrings['psName'], {"FullName": nameStrings['psName']}, charStrings, {})
+ charStrings = {
+ ".notdef": charString,
+ "A": charString,
+ "a": charString,
+ ".null": charString,
+ }
+ fb.setupCFF(
+ nameStrings["psName"], {"FullName": nameStrings["psName"]}, charStrings, {}
+ )
lsb = {gn: cs.calcBounds(None)[0] for gn, cs in charStrings.items()}
metrics = {}
@@ -179,10 +239,10 @@ def test_build_var(tmpdir):
fb.setupNameTable(nameStrings)
axes = [
- ('LEFT', 0, 0, 100, "Left"),
- ('RGHT', 0, 0, 100, "Right"),
- ('UPPP', 0, 0, 100, "Up"),
- ('DOWN', 0, 0, 100, "Down"),
+ ("LEFT", 0, 0, 100, "Left"),
+ ("RGHT", 0, 0, 100, "Right"),
+ ("UPPP", 0, 0, 100, "Up"),
+ ("DOWN", 0, 0, 100, "Down"),
]
instances = [
dict(location=dict(LEFT=0, RGHT=0, UPPP=0, DOWN=0), stylename="TotallyNormal"),
@@ -195,7 +255,7 @@ def test_build_var(tmpdir):
rightDeltas = [(0, 0), (0, 0), (200, 0), (200, 0), None, None, None, None]
upDeltas = [(0, 0), (0, 200), (0, 200), (0, 0), None, None, None, None]
downDeltas = [(0, -200), (0, 0), (0, 0), (0, -200), None, None, None, None]
- variations['a'] = [
+ variations["a"] = [
TupleVariation(dict(RGHT=(0, 1, 1)), rightDeltas),
TupleVariation(dict(LEFT=(0, 1, 1)), leftDeltas),
TupleVariation(dict(UPPP=(0, 1, 1)), upDeltas),
@@ -209,8 +269,8 @@ def test_build_var(tmpdir):
[
{"LEFT": (0.8, 1), "DOWN": (0.8, 1)},
{"RGHT": (0.8, 1), "UPPP": (0.8, 1)},
- ],
- {"A": "a"}
+ ],
+ {"A": "a"},
)
],
featureTag="rclt",
@@ -218,8 +278,10 @@ def test_build_var(tmpdir):
statAxes = []
for tag, minVal, defaultVal, maxVal, name in axes:
- values = [dict(name="Neutral", value=defaultVal, flags=0x2),
- dict(name=name, value=maxVal)]
+ values = [
+ dict(name="Neutral", value=defaultVal, flags=0x2),
+ dict(name=name, value=maxVal),
+ ]
statAxes.append(dict(tag=tag, name=name, values=values))
fb.setupStat(statAxes)
@@ -244,7 +306,9 @@ def test_build_cff2(tmpdir):
fb.setupHorizontalMetrics(metrics)
fb.setupHorizontalHeader(ascent=824, descent=200)
- fb.setupOS2(sTypoAscender=825, sTypoDescender=200, usWinAscent=824, usWinDescent=200)
+ fb.setupOS2(
+ sTypoAscender=825, sTypoDescender=200, usWinAscent=824, usWinDescent=200
+ )
fb.setupPost()
fb.save(outPath)
@@ -258,10 +322,16 @@ def test_build_cff_to_cff2(tmpdir):
pen = T2CharStringPen(600, None)
drawTestGlyph(pen)
charString = pen.getCharString()
- charStrings = {".notdef": charString, "A": charString, "a": charString, ".null": charString}
+ charStrings = {
+ ".notdef": charString,
+ "A": charString,
+ "a": charString,
+ ".null": charString,
+ }
fb.setupCFF("TestFont", {}, charStrings, {})
from fontTools.varLib.cff import convertCFFtoCFF2
+
convertCFFtoCFF2(fb.font)
@@ -281,14 +351,17 @@ def test_setupNameTable_no_windows():
assert not any(n for n in fb.font["name"].names if n.platformID == 3)
-@pytest.mark.parametrize('is_ttf, keep_glyph_names, make_cff2, post_format', [
- (True, True, False, 2), # TTF with post table format 2.0
- (True, False, False, 3), # TTF with post table format 3.0
- (False, True, False, 3), # CFF with post table format 3.0
- (False, False, False, 3), # CFF with post table format 3.0
- (False, True, True, 2), # CFF2 with post table format 2.0
- (False, False, True, 3), # CFF2 with post table format 3.0
-])
+@pytest.mark.parametrize(
+ "is_ttf, keep_glyph_names, make_cff2, post_format",
+ [
+ (True, True, False, 2), # TTF with post table format 2.0
+ (True, False, False, 3), # TTF with post table format 3.0
+ (False, True, False, 3), # CFF with post table format 3.0
+ (False, False, False, 3), # CFF with post table format 3.0
+ (False, True, True, 2), # CFF2 with post table format 2.0
+ (False, False, True, 3), # CFF2 with post table format 3.0
+ ],
+)
def test_setupPost(is_ttf, keep_glyph_names, make_cff2, post_format):
fb, _, nameStrings = _setupFontBuilder(is_ttf)
@@ -302,7 +375,7 @@ def test_setupPost(is_ttf, keep_glyph_names, make_cff2, post_format):
fb.setupPost(keepGlyphNames=keep_glyph_names)
assert fb.isTTF is is_ttf
- assert ('CFF2' in fb.font) is make_cff2
+ assert ("CFF2" in fb.font) is make_cff2
assert fb.font["post"].formatType == post_format
@@ -310,7 +383,7 @@ def test_unicodeVariationSequences(tmpdir):
familyName = "UVSTestFont"
styleName = "Regular"
nameStrings = dict(familyName=familyName, styleName=styleName)
- nameStrings['psName'] = familyName + "-" + styleName
+ nameStrings["psName"] = familyName + "-" + styleName
glyphOrder = [".notdef", "space", "zero", "zero.slash"]
cmap = {ord(" "): "space", ord("0"): "zero"}
uvs = [
@@ -338,8 +411,57 @@ def test_unicodeVariationSequences(tmpdir):
uvs = [
(0x0030, 0xFE00, "zero.slash"),
- (0x0030, 0xFE01, "zero"), # should result in the exact same subtable data, due to cmap[0x0030] == "zero"
+ (
+ 0x0030,
+ 0xFE01,
+ "zero",
+ ), # should result in the exact same subtable data, due to cmap[0x0030] == "zero"
]
fb.setupCharacterMap(cmap, uvs)
fb.save(outPath)
_verifyOutput(outPath, tables=["cmap"])
+
+
+def test_setupPanose():
+ from fontTools.ttLib.tables.O_S_2f_2 import Panose
+
+ fb, advanceWidths, nameStrings = _setupFontBuilder(True)
+
+ pen = TTGlyphPen(None)
+ drawTestGlyph(pen)
+ glyph = pen.glyph()
+ glyphs = {".notdef": glyph, "A": glyph, "a": glyph, ".null": glyph}
+ fb.setupGlyf(glyphs)
+ metrics = {}
+ glyphTable = fb.font["glyf"]
+ for gn, advanceWidth in advanceWidths.items():
+ metrics[gn] = (advanceWidth, glyphTable[gn].xMin)
+ fb.setupHorizontalMetrics(metrics)
+
+ fb.setupHorizontalHeader(ascent=824, descent=200)
+ fb.setupNameTable(nameStrings)
+ fb.setupOS2()
+ fb.setupPost()
+
+ panoseValues = { # sample value of Times New Roman from https://www.w3.org/Printing/stevahn.html
+ "bFamilyType": 2,
+ "bSerifStyle": 2,
+ "bWeight": 6,
+ "bProportion": 3,
+ "bContrast": 5,
+ "bStrokeVariation": 4,
+ "bArmStyle": 5,
+ "bLetterForm": 2,
+ "bMidline": 3,
+ "bXHeight": 4,
+ }
+ panoseObj = Panose(**panoseValues)
+
+ for name in panoseValues:
+ assert getattr(fb.font["OS/2"].panose, name) == 0
+
+ fb.setupOS2(panose=panoseObj)
+ fb.setupPost()
+
+ for name, value in panoseValues.items():
+ assert getattr(fb.font["OS/2"].panose, name) == value
diff --git a/Tests/merge/data/CFFFont_expected.ttx b/Tests/merge/data/CFFFont_expected.ttx
index 2c4cd33e..b3d46894 100644
--- a/Tests/merge/data/CFFFont_expected.ttx
+++ b/Tests/merge/data/CFFFont_expected.ttx
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
-<ttFont sfntVersion="OTTO" ttLibVersion="4.34">
+<ttFont sfntVersion="OTTO" ttLibVersion="4.39">
<GlyphOrder>
<!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
@@ -788,12 +788,12 @@
<!-- Most of this table will be recalculated by the compiler -->
<tableVersion value="1.0"/>
<fontRevision value="1.003"/>
- <checkSumAdjustment value="0x9a87f91"/>
+ <checkSumAdjustment value="0x579debce"/>
<magicNumber value="0x5f0f3cf5"/>
<flags value="00000000 00000011"/>
<unitsPerEm value="1000"/>
- <created value="Sun Aug 14 18:30:31 2022"/>
- <modified value="Sun Aug 14 18:30:31 2022"/>
+ <created value="Wed Mar 29 18:41:25 2023"/>
+ <modified value="Wed Mar 29 18:41:25 2023"/>
<xMin value="-199"/>
<yMin value="-364"/>
<xMax value="1459"/>
@@ -1381,14 +1381,14 @@
endchar
</CharString>
<CharString name=".notdef.1">
- -45 50 -200 rmoveto
+ 136 50 -200 rmoveto
400 1000 -400 -1000 hlineto
50 50 rmoveto
900 300 -900 -300 vlineto
endchar
</CharString>
<CharString name="A">
- 177 572 -10 rmoveto
+ 358 572 -10 rmoveto
15 15 2 4 15 hvcurveto
104 30 -1 26 rlineto
-1 -14 -15 -2 -14 hhcurveto
@@ -1417,7 +1417,7 @@
endchar
</CharString>
<CharString name="AE">
- 252 366 66 rmoveto
+ 433 366 66 rmoveto
-27 6 -28 20 -11 vhcurveto
388 hlineto
13 53 -5 5 rlineto
@@ -1452,7 +1452,7 @@
endchar
</CharString>
<CharString name="Aacute">
- 177 517 882 rmoveto
+ 358 517 882 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
217 -738 rmoveto
15 15 2 4 15 hvcurveto
@@ -1483,7 +1483,7 @@
endchar
</CharString>
<CharString name="Acircumflex">
- 177 189 729 rmoveto
+ 358 189 729 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
214 -920 rmoveto
15 15 2 4 15 hvcurveto
@@ -1514,7 +1514,7 @@
endchar
</CharString>
<CharString name="Adieresis">
- 177 512 813 rmoveto
+ 358 512 813 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
@@ -1547,7 +1547,7 @@
endchar
</CharString>
<CharString name="Agrave">
- 177 222 883 rmoveto
+ 358 222 883 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
303 -929 rmoveto
15 15 2 4 15 hvcurveto
@@ -1578,7 +1578,7 @@
endchar
</CharString>
<CharString name="Aring">
- 177 572 -10 rmoveto
+ 358 572 -10 rmoveto
15 15 2 4 15 hvcurveto
104 30 -1 26 rlineto
-1 -14 -15 -2 -14 hhcurveto
@@ -1611,7 +1611,7 @@
endchar
</CharString>
<CharString name="Atilde">
- 177 350 838 rmoveto
+ 358 350 838 rmoveto
-26 12 -31 13 -26 -4 -35 -5 -19 -58 -8 -35 14 -5 rcurveline
12 33 10 12 24 3 17 2 18 -4 15 -7 63 -30 rcurveline
20 -9 22 -6 22 3 32 4 20 56 3 29 -13 5 rcurveline
@@ -1645,7 +1645,7 @@
endchar
</CharString>
<CharString name="B">
- 14 335 378 rmoveto
+ 195 335 378 rmoveto
81 34 72 44 97 vvcurveto
109 -116 24 -113 vhcurveto
-247 hlineto
@@ -1665,7 +1665,7 @@
endchar
</CharString>
<CharString name="C">
- 88 624 586 rmoveto
+ 269 624 586 rmoveto
85 -52 -76 28 -88 hhcurveto
-104 -101 -36 -73 -73 hvcurveto
-70 -70 -21 -98 -96 vvcurveto
@@ -1681,7 +1681,7 @@
endchar
</CharString>
<CharString name="Ccedilla">
- 88 285 -112 rmoveto
+ 269 285 -112 rmoveto
37 -11 50 -31 -28 -38 -14 -18 -32 -14 -26 -10 -5 -10 rcurveline
6 -15 55 10 70 18 33 44 rlinecurve
26 36 -9 36 -23 28 -19 23 -40 18 -13 5 11 26 rcurveline
@@ -1699,7 +1699,7 @@
endchar
</CharString>
<CharString name="D">
- 192 295 2 rmoveto
+ 373 295 2 rmoveto
104 131 16 73 67 hvcurveto
77 83 22 84 127 vvcurveto
177 -57 124 -310 vhcurveto
@@ -1721,7 +1721,7 @@
endchar
</CharString>
<CharString name="Delta">
- 206 46 45 rmoveto
+ 387 46 45 rmoveto
2 -42 rlineto
5 73 74 2 74 hhcurveto
125 124 -6 -8 125 hvcurveto
@@ -1736,7 +1736,7 @@
endchar
</CharString>
<CharString name="E">
- -27 417 384 rmoveto
+ 154 417 384 rmoveto
-6 7 rlineto
-4 -53 -74 -3 -53 hhcurveto
-37 -29 -3 17 hvcurveto
@@ -1761,7 +1761,7 @@
endchar
</CharString>
<CharString name="Eacute">
- -27 385 885 rmoveto
+ 154 385 885 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
194 -347 rmoveto
-6 7 rlineto
@@ -1788,7 +1788,7 @@
endchar
</CharString>
<CharString name="Ecircumflex">
- -27 107 732 rmoveto
+ 154 107 732 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
141 -529 rmoveto
-6 7 rlineto
@@ -1815,7 +1815,7 @@
endchar
</CharString>
<CharString name="Edieresis">
- -27 430 816 rmoveto
+ 154 430 816 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
@@ -1844,7 +1844,7 @@
endchar
</CharString>
<CharString name="Egrave">
- -27 140 886 rmoveto
+ 154 140 886 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
230 -538 rmoveto
-6 7 rlineto
@@ -1871,7 +1871,7 @@
endchar
</CharString>
<CharString name="Eth">
- 201 304 2 rmoveto
+ 382 304 2 rmoveto
104 131 16 73 67 hvcurveto
77 83 22 84 127 vvcurveto
177 -57 124 -310 vhcurveto
@@ -1897,7 +1897,7 @@
endchar
</CharString>
<CharString name="F">
- -104 87 360 rmoveto
+ 77 87 360 rmoveto
-121 -3 -120 -4 -121 vhcurveto
92 15 rlineto
-4 101 -1 115 101 vvcurveto
@@ -1917,7 +1917,7 @@
endchar
</CharString>
<CharString name="G">
- 116 325 270 rmoveto
+ 297 325 270 rmoveto
6 -5 54 -2 64 -3 44 -4 rlinecurve
38 -4 -10 -98 -20 vvcurveto
-30 6 -54 -26 -5 vhcurveto
@@ -1935,7 +1935,7 @@
endchar
</CharString>
<CharString name="Gamma">
- -53 464 697 rmoveto
+ 128 464 697 rmoveto
-3 -60 -61 -1 -61 hhcurveto
-237 hlineto
-3 -20 26 -22 rlineto
@@ -1951,7 +1951,7 @@
endchar
</CharString>
<CharString name="H">
- 161 699 32 rmoveto
+ 342 699 32 rmoveto
-7 -70 11 36 hvcurveto
-3 81 -1 81 81 vvcurveto
121 2 120 9 121 vhcurveto
@@ -1976,7 +1976,7 @@
endchar
</CharString>
<CharString name="I">
- -265 275 23 rmoveto
+ -84 275 23 rmoveto
-1 -43 11 6 -21 hvcurveto
-42 11 11 75 -1 43 rrcurveto
-1 61 -1 62 67 vvcurveto
@@ -1995,7 +1995,7 @@
endchar
</CharString>
<CharString name="Iacute">
- -265 252 885 rmoveto
+ -84 252 885 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
185 -708 rmoveto
-1 -43 11 6 -21 hvcurveto
@@ -2016,7 +2016,7 @@
endchar
</CharString>
<CharString name="Icircumflex">
- -265 -26 732 rmoveto
+ -84 -26 732 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
132 -890 rmoveto
-1 -43 11 6 -21 hvcurveto
@@ -2037,7 +2037,7 @@
endchar
</CharString>
<CharString name="Idieresis">
- -265 297 816 rmoveto
+ -84 297 816 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
@@ -2060,7 +2060,7 @@
endchar
</CharString>
<CharString name="Igrave">
- -265 7 886 rmoveto
+ -84 7 886 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
221 -899 rmoveto
-1 -43 11 6 -21 hvcurveto
@@ -2081,7 +2081,7 @@
endchar
</CharString>
<CharString name="J">
- -280 175 608 rmoveto
+ -99 175 608 rmoveto
18 9 17 12 3 vhcurveto
68 14 2 24 -7 6 rlineto
-250 hlineto
@@ -2096,7 +2096,7 @@
endchar
</CharString>
<CharString name="K">
- 48 178 319 rmoveto
+ 229 178 319 rmoveto
108 -109 112 -105 105 -112 85 31 rcurveline
1 11 -61 54 -165 164 -107 107 rlinecurve
-4 4 -1 2 3 vvcurveto
@@ -2116,7 +2116,7 @@
endchar
</CharString>
<CharString name="L">
- -51 482 58 rmoveto
+ 130 482 58 rmoveto
-1 -6 -182 -14 -104 hhcurveto
-5 -7 7 6 -1 hvcurveto
-4 108 -3 116 113 vvcurveto
@@ -2137,7 +2137,7 @@
endchar
</CharString>
<CharString name="Lambda">
- 203 368 673 rmoveto
+ 384 368 673 rmoveto
-115 -227 -123 -222 -127 -220 13 -10 rcurveline
27 4 26 9 25 10 28 66 146 294 95 188 rrcurveto
12 hlineto
@@ -2148,7 +2148,7 @@
endchar
</CharString>
<CharString name="M">
- 417 475 147 rmoveto
+ 598 475 147 rmoveto
-4 -2 -6 -5 -4 hhcurveto
-4 -5 10 4 -2 hvcurveto
-83 177 -94 200 -55 161 rrcurveto
@@ -2177,7 +2177,7 @@
endchar
</CharString>
<CharString name="N">
- 187 577 678 rmoveto
+ 368 577 678 rmoveto
21 -304 3 -99 -102 vvcurveto
-2 -1 -6 -5 -4 -4 5 3 -3 vhcurveto
-117 126 -162 215 -133 176 rrcurveto
@@ -2196,7 +2196,7 @@
endchar
</CharString>
<CharString name="Ntilde">
- 187 393 841 rmoveto
+ 368 393 841 rmoveto
-26 12 -31 13 -26 -4 -35 -5 -19 -58 -8 -35 14 -5 rcurveline
12 33 10 12 24 3 17 2 18 -4 15 -7 63 -30 rcurveline
20 -9 22 -6 22 3 32 4 20 56 3 29 -13 5 rcurveline
@@ -2220,7 +2220,7 @@
endchar
</CharString>
<CharString name="O">
- 192 320 -18 rmoveto
+ 373 320 -18 rmoveto
157 129 72 98 48 hvcurveto
40 81 1 65 66 vvcurveto
93 -22 104 -62 63 vhcurveto
@@ -2235,7 +2235,7 @@
endchar
</CharString>
<CharString name="OE">
- 452 896 384 rmoveto
+ 633 896 384 rmoveto
-6 7 rlineto
-4 -53 -74 -3 -53 hhcurveto
-37 -29 -3 17 hvcurveto
@@ -2267,7 +2267,7 @@
endchar
</CharString>
<CharString name="Oacute">
- 209 557 918 rmoveto
+ 390 557 918 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
-75 -782 rmoveto
157 129 72 98 48 hvcurveto
@@ -2284,7 +2284,7 @@
endchar
</CharString>
<CharString name="Ocircumflex">
- 192 218 742 rmoveto
+ 373 218 742 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
-67 -941 rmoveto
157 129 72 98 48 hvcurveto
@@ -2301,7 +2301,7 @@
endchar
</CharString>
<CharString name="Odieresis">
- 192 541 826 rmoveto
+ 373 541 826 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
@@ -2320,7 +2320,7 @@
endchar
</CharString>
<CharString name="Ograve">
- 192 251 896 rmoveto
+ 373 251 896 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
22 -950 rmoveto
157 129 72 98 48 hvcurveto
@@ -2337,7 +2337,7 @@
endchar
</CharString>
<CharString name="Omega">
- 360 853 60 rmoveto
+ 541 853 60 rmoveto
-70 -96 -5 -4 -85 hvcurveto
12 vlineto
135 55 50 138 129 vvcurveto
@@ -2358,7 +2358,7 @@
endchar
</CharString>
<CharString name="Oslash">
- 192 320 -18 rmoveto
+ 373 320 -18 rmoveto
157 129 72 98 48 hvcurveto
40 81 1 65 66 vvcurveto
93 -22 104 -62 63 vhcurveto
@@ -2382,7 +2382,7 @@
endchar
</CharString>
<CharString name="Otilde">
- 192 379 851 rmoveto
+ 373 379 851 rmoveto
-26 12 -31 13 -26 -4 -35 -5 -19 -58 -8 -35 14 -5 rcurveline
12 33 10 12 24 3 17 2 18 -4 15 -7 63 -30 rcurveline
20 -9 22 -6 22 3 32 4 20 56 3 29 -13 5 rcurveline
@@ -2402,7 +2402,7 @@
endchar
</CharString>
<CharString name="P">
- -18 172 22 rmoveto
+ 163 172 22 rmoveto
-1 101 -6 126 25 vvcurveto
19 65 -2 29 2 vhcurveto
65 5 73 15 44 45 rrcurveto
@@ -2421,7 +2421,7 @@
endchar
</CharString>
<CharString name="Phi">
- 187 234 657 rmoveto
+ 368 234 657 rmoveto
63 -9 rlineto
13 -2 10 -12 -14 vvcurveto
-12 vlineto
@@ -2454,7 +2454,7 @@
endchar
</CharString>
<CharString name="Pi">
- 123 668 686 rmoveto
+ 304 668 686 rmoveto
-2 -81 -119 -3 -44 hhcurveto
-198 hlineto
-75 -75 4 5 -75 hvcurveto
@@ -2478,7 +2478,7 @@
endchar
</CharString>
<CharString name="Psi">
- 191 443 251 rmoveto
+ 372 443 251 rmoveto
117 2 74 32 31 203 16 90 1 55 44 28 -3 18 rcurveline
-35 -4 -35 -8 -34 -12 -12 -43 -1 -45 -6 -44 -18 -127 -29 -114 -112 -6 -7 6 rcurveline
-2 58 -1 59 59 vvcurveto
@@ -2504,7 +2504,7 @@
endchar
</CharString>
<CharString name="Q">
- 195 722 -174 rmoveto
+ 376 722 -174 rmoveto
-6 -23 -48 -9 -16 hhcurveto
-89 -42 46 21 -15 hvcurveto
-20 26 -26 49 -16 30 4 10 rcurveline
@@ -2525,7 +2525,7 @@
endchar
</CharString>
<CharString name="R">
- 3 275 331 rmoveto
+ 184 275 331 rmoveto
119 13 96 56 139 vvcurveto
93 -81 47 -122 vhcurveto
-272 hlineto
@@ -2547,7 +2547,7 @@
endchar
</CharString>
<CharString name="S">
- -61 451 606 rmoveto
+ 120 451 606 rmoveto
83 -44 -60 13 -73 hhcurveto
-109 -103 -65 -125 -98 62 -48 121 -52 hvcurveto
60 -26 61 -54 -56 vvcurveto
@@ -2561,7 +2561,7 @@
endchar
</CharString>
<CharString name="Sigma">
- 26 299 362 rmoveto
+ 207 299 362 rmoveto
4 7 7 9 6 vvcurveto
6 -2 5 -3 5 vhcurveto
-137 222 11 13 rlineto
@@ -2581,7 +2581,7 @@
endchar
</CharString>
<CharString name="T">
- 6 543 696 rmoveto
+ 187 543 696 rmoveto
-4 -88 -99 -6 -75 hhcurveto
-177 0 8 3 -95 hvcurveto
-7 -7 rlineto
@@ -2599,7 +2599,7 @@
endchar
</CharString>
<CharString name="Theta">
- 233 348 -12 rmoveto
+ 414 348 -12 rmoveto
196 194 83 310 87 -20 93 -63 63 hvcurveto
57 -57 -84 20 -79 hhcurveto
-152 -139 -58 -100 -58 hvcurveto
@@ -2628,7 +2628,7 @@
endchar
</CharString>
<CharString name="Thorn">
- -18 168 679 rmoveto
+ 163 168 679 rmoveto
-151 hlineto
-7 -7 rlineto
-18 vlineto
@@ -2649,7 +2649,7 @@
endchar
</CharString>
<CharString name="U">
- 167 183 674 rmoveto
+ 348 183 674 rmoveto
-7 6 rlineto
-169 hlineto
-8 -7 2 -20 rlineto
@@ -2673,7 +2673,7 @@
endchar
</CharString>
<CharString name="Uacute">
- 167 482 885 rmoveto
+ 348 482 885 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
-137 -57 rmoveto
-7 6 rlineto
@@ -2699,7 +2699,7 @@
endchar
</CharString>
<CharString name="Ucircumflex">
- 167 204 732 rmoveto
+ 348 204 732 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
-190 -239 rmoveto
-7 6 rlineto
@@ -2725,7 +2725,7 @@
endchar
</CharString>
<CharString name="Udieresis">
- 167 527 816 rmoveto
+ 348 527 816 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
@@ -2753,7 +2753,7 @@
endchar
</CharString>
<CharString name="Ugrave">
- 167 237 886 rmoveto
+ 348 237 886 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
-101 -248 rmoveto
-7 6 rlineto
@@ -2779,7 +2779,7 @@
endchar
</CharString>
<CharString name="Upsilon">
- 137 672 546 rmoveto
+ 318 672 546 rmoveto
75 -21 76 -85 -155 -48 -199 -123 -21 vhcurveto
-9 hlineto
130 -13 -32 192 -146 hhcurveto
@@ -2796,7 +2796,7 @@
endchar
</CharString>
<CharString name="V">
- 162 614 680 rmoveto
+ 343 614 680 rmoveto
-25 -43 -25 -44 -19 -45 -119 -274 rcurveline
-41 -89 -23 -52 -7 -2 -2 1 -4 2 -2 4 -71 182 -55 187 -39 191 -175 -10 rcurveline
-7 -6 1 -27 56 -8 23 -5 16 -33 rlinecurve
@@ -2806,7 +2806,7 @@
endchar
</CharString>
<CharString name="W">
- 505 965 675 rmoveto
+ 686 965 675 rmoveto
-17 -32 -22 -39 -21 -46 -109 -242 rcurveline
-80 -170 rlineto
-3 -2 -2 -4 -9 hhcurveto
@@ -2829,7 +2829,7 @@
endchar
</CharString>
<CharString name="X">
- 79 612 687 rmoveto
+ 260 612 687 rmoveto
-95 -13 -9 -21 -138 -198 -33 -33 rlinecurve
-9 hlineto
-147 266 -45 -3 -76 -4 -25 -1 rlinecurve
@@ -2850,7 +2850,7 @@
endchar
</CharString>
<CharString name="Xi">
- 68 49 695 rmoveto
+ 249 49 695 rmoveto
-5 -57 -1 -58 -7 -57 27 2 rcurveline
23 64 rlineto
20 7 24 9 20 hhcurveto
@@ -2876,7 +2876,7 @@
endchar
</CharString>
<CharString name="Y">
- 47 231 -2 rmoveto
+ 228 231 -2 rmoveto
11 -9 90 29 -9 288 83 128 74 96 111 145 rlinecurve
-4 18 -85 -17 -36 -57 -84 -145 -8 -14 rlinecurve
-54 -30 -35 -53 -5 hhcurveto
@@ -2890,7 +2890,7 @@
endchar
</CharString>
<CharString name="Yacute">
- 47 451 885 rmoveto
+ 228 451 885 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
-58 -733 rmoveto
11 -9 90 29 -9 288 83 128 74 96 111 145 rlinecurve
@@ -2906,7 +2906,7 @@
endchar
</CharString>
<CharString name="Z">
- 58 552 683 rmoveto
+ 239 552 683 rmoveto
-149 -148 3 9 -148 hvcurveto
-9 -8 rlineto
-54 vlineto
@@ -2920,7 +2920,7 @@
endchar
</CharString>
<CharString name="a">
- 3 278 466 rmoveto
+ 184 278 466 rmoveto
-170 -68 -140 -141 -90 36 -107 111 58 56 31 32 49 hvcurveto
15 -4 rlineto
-34 6 14 -25 37 hhcurveto
@@ -2941,7 +2941,7 @@
endchar
</CharString>
<CharString name="aacute">
- 3 389 702 rmoveto
+ 184 389 702 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
51 -82 rmoveto
-170 -68 -140 -141 -90 36 -107 111 58 56 31 32 49 hvcurveto
@@ -2964,7 +2964,7 @@
endchar
</CharString>
<CharString name="acircumflex">
- 3 111 549 rmoveto
+ 184 111 549 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
-2 -264 rmoveto
-170 -68 -140 -141 -90 36 -107 111 58 56 31 32 49 hvcurveto
@@ -2987,17 +2987,17 @@
endchar
</CharString>
<CharString name="acute">
- -25 405 664 rmoveto
+ 156 405 664 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
endchar
</CharString>
<CharString name="acutecomb">
- -545 115 664 rmoveto
+ 115 664 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
endchar
</CharString>
<CharString name="adieresis">
- 3 434 633 rmoveto
+ 184 434 633 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
@@ -3022,7 +3022,7 @@
endchar
</CharString>
<CharString name="ae">
- 207 707 319 rmoveto
+ 388 707 319 rmoveto
11 5 10 4 12 vvcurveto
77 -56 61 -86 -67 -64 -25 -46 -48 vhcurveto
39 -22 -65 20 -42 hhcurveto
@@ -3057,7 +3057,7 @@
endchar
</CharString>
<CharString name="agrave">
- 3 144 703 rmoveto
+ 184 144 703 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
87 -273 rmoveto
-170 -68 -140 -141 -90 36 -107 111 58 56 31 32 49 hvcurveto
@@ -3579,7 +3579,7 @@
endchar
</CharString>
<CharString name="alpha">
- 64 410 307 rmoveto
+ 245 410 307 rmoveto
-10 1 rlineto
55 -15 -34 105 -89 hhcurveto
-151 -81 -161 -138 -68 25 -113 81 60 60 50 44 36 hvcurveto
@@ -3597,7 +3597,7 @@
endchar
</CharString>
<CharString name="ampersand">
- 193 326 356 rmoveto
+ 374 326 356 rmoveto
3 23 23 2 23 hhcurveto
22 23 0 -16 16 hvcurveto
21 -21 15 -28 -30 vvcurveto
@@ -3632,7 +3632,7 @@
endchar
</CharString>
<CharString name="aring">
- 3 177 613 rmoveto
+ 184 177 613 rmoveto
-54 43 -43 54 54 43 43 54 54 -43 43 -54 -54 -43 -43 -54 vhcurveto
30 hmoveto
37 30 30 37 37 30 -30 -37 -37 -30 -30 -37 -37 -30 30 37 vhcurveto
@@ -3657,7 +3657,7 @@
endchar
</CharString>
<CharString name="asciitilde">
- 127 342 291 rmoveto
+ 308 342 291 rmoveto
25 -41 -50 31 -44 hhcurveto
-59 -36 -49 -58 -29 hvcurveto
22 -12 rlineto
@@ -3672,7 +3672,7 @@
endchar
</CharString>
<CharString name="asterisk">
- -244 175 448 rmoveto
+ -63 175 448 rmoveto
-8 36 -2 33 -1 35 26 -20 24 -22 17 -19 26 29 rcurveline
-28 13 -29 20 -31 23 28 20 28 17 32 15 -25 31 rcurveline
-20 -21 -23 -21 -25 -20 2 33 2 29 7 33 rrcurveto
@@ -3684,7 +3684,7 @@
endchar
</CharString>
<CharString name="at">
- 183 472 371 rmoveto
+ 364 472 371 rmoveto
16 -25 -35 8 -25 hhcurveto
-132 -56 -97 -99 -64 29 -74 88 46 44 22 22 39 hvcurveto
12 -3 rlineto
@@ -3708,7 +3708,7 @@
endchar
</CharString>
<CharString name="atilde">
- 3 272 658 rmoveto
+ 184 272 658 rmoveto
-26 12 -31 13 -26 -4 -35 -5 -19 -58 -8 -35 14 -5 rcurveline
12 33 10 12 24 3 17 2 18 -4 15 -7 63 -30 rcurveline
20 -9 22 -6 22 3 32 4 20 56 3 29 -13 5 rcurveline
@@ -3734,7 +3734,7 @@
endchar
</CharString>
<CharString name="b">
- -23 -14 640 rmoveto
+ 158 -14 640 rmoveto
86 -13 8 7 -35 vvcurveto
-476 vlineto
-111 66 -22 51 114 117 80 107 35 vhcurveto
@@ -3752,12 +3752,12 @@
endchar
</CharString>
<CharString name="backslash">
- -155 404 -184 rmoveto
+ 26 404 -184 rmoveto
3 13 -388 893 -31 -7 -5 -11 389 -896 rlineto
endchar
</CharString>
<CharString name="bar">
- -329 128 738 rmoveto
+ -148 128 738 rmoveto
-40 -8 rlineto
-897 vlineto
40 8 rlineto
@@ -4080,7 +4080,7 @@
endchar
</CharString>
<CharString name="beta">
- -15 325 392 rmoveto
+ 166 325 392 rmoveto
65 25 58 71 75 vvcurveto
75 -61 60 -75 -106 -71 -57 -81 -38 vhcurveto
-22 -47 -8 -78 -2 -51 -8 -190 4 -191 -5 -190 12 -15 rcurveline
@@ -4100,7 +4100,7 @@
endchar
</CharString>
<CharString name="braceleft">
- -232 303 -154 rmoveto
+ -51 303 -154 rmoveto
-97 -36 2 182 91 7 120 -92 32 hvcurveto
1 vlineto
64 23 21 88 133 vvcurveto
@@ -4117,7 +4117,7 @@
endchar
</CharString>
<CharString name="braceright">
- -232 10 -175 rmoveto
+ -51 10 -175 rmoveto
149 34 -1 213 104 -3 101 61 15 hvcurveto
17 4 19 2 18 -1 rrcurveto
26 vlineto
@@ -4133,7 +4133,7 @@
endchar
</CharString>
<CharString name="bracketleft">
- -287 247 736 rmoveto
+ -106 247 736 rmoveto
-54 -66 0 5 -39 hvcurveto
-866 vlineto
5 52 53 0 54 hhcurveto
@@ -4144,7 +4144,7 @@
endchar
</CharString>
<CharString name="bracketright">
- -287 130 -83 rmoveto
+ -106 130 -83 rmoveto
-119 -13 rlineto
-24 vlineto
54 53 0 -5 52 hvcurveto
@@ -4155,7 +4155,7 @@
endchar
</CharString>
<CharString name="brokenbar">
- -337 124 738 rmoveto
+ -156 124 738 rmoveto
-40 -8 rlineto
-403 vlineto
40 8 rlineto
@@ -4166,12 +4166,12 @@
endchar
</CharString>
<CharString name="bullet">
- -213 69 265 rmoveto
+ -32 69 265 rmoveto
-54 43 -43 54 54 43 43 54 54 -43 43 -54 -54 -43 -43 -54 vhcurveto
endchar
</CharString>
<CharString name="c">
- -90 425 116 rmoveto
+ 91 425 116 rmoveto
-38 -44 -50 -31 -59 hhcurveto
-117 -24 115 86 88 45 100 85 43 33 -39 -67 9 hvcurveto
17 -6 54 53 rlineto
@@ -4181,7 +4181,7 @@
endchar
</CharString>
<CharString name="ccedilla">
- -90 180 -122 rmoveto
+ 91 180 -122 rmoveto
37 -11 50 -31 -28 -38 -14 -18 -32 -14 -26 -10 -5 -10 rcurveline
6 -15 55 10 70 18 33 44 rlinecurve
26 36 -9 36 -23 28 -19 23 -40 18 -13 5 15 36 rcurveline
@@ -4196,7 +4196,7 @@
endchar
</CharString>
<CharString name="cedilla">
- -25 188 -112 rmoveto
+ 156 188 -112 rmoveto
37 -11 50 -31 -28 -38 -14 -18 -32 -14 -26 -10 -5 -10 rcurveline
6 -15 55 10 70 18 33 44 rlinecurve
26 36 -9 36 -23 28 -19 23 -40 18 -13 5 20 47 rcurveline
@@ -4204,7 +4204,7 @@
endchar
</CharString>
<CharString name="cedillacomb">
- -545 -72 -112 rmoveto
+ -72 -112 rmoveto
37 -11 50 -31 -28 -38 -14 -18 -32 -14 -26 -10 -5 -10 rcurveline
6 -15 55 10 70 18 33 44 rlinecurve
26 36 -9 36 -23 28 -19 23 -40 18 -13 5 20 47 rcurveline
@@ -4212,7 +4212,7 @@
endchar
</CharString>
<CharString name="cent">
- -84 418 116 rmoveto
+ 97 418 116 rmoveto
-38 -44 -50 -31 -59 hhcurveto
-2 388 hlineto
39 -5 29 -38 8 -62 17 -6 rcurveline
@@ -4236,7 +4236,7 @@
endchar
</CharString>
<CharString name="chi">
- -20 284 206 rmoveto
+ 161 284 206 rmoveto
-90 192 rlineto
30 -14 -23 38 -37 hhcurveto
-25 -25 -8 -15 -19 hvcurveto
@@ -4257,24 +4257,24 @@
endchar
</CharString>
<CharString name="circumflex">
- -45 103 511 rmoveto
+ 136 103 511 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
endchar
</CharString>
<CharString name="circumflexcomb">
- -545 -163 511 rmoveto
+ -163 511 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
endchar
</CharString>
<CharString name="colon">
- -361 35 398 rmoveto
+ -180 35 398 rmoveto
-31 26 -26 32 31 25 26 31 31 -25 26 -31 -32 -26 -26 -31 vhcurveto
-353 vmoveto
-31 26 -26 32 31 25 26 31 31 -25 26 -31 -32 -26 -26 -31 vhcurveto
endchar
</CharString>
<CharString name="comma">
- -312 77 -210 rmoveto
+ -131 77 -210 rmoveto
71 82 33 51 50 vvcurveto
35 -35 130 -53 -25 -34 -22 -26 -10 3 -11 4 -5 vhcurveto
70 -83 rlineto
@@ -4301,7 +4301,7 @@
endchar
</CharString>
<CharString name="copyright">
- 87 45 437 rmoveto
+ 268 45 437 rmoveto
-150 121 -121 150 150 121 121 150 150 -121 121 -150 -150 -121 -121 -150 vhcurveto
25 hmoveto
136 110 110 136 136 110 -110 -136 -136 -110 -110 -136 -136 -110 110 136 vhcurveto
@@ -4323,7 +4323,7 @@
endchar
</CharString>
<CharString name="currency">
- 45 75 484 rmoveto
+ 226 75 484 rmoveto
49 -49 rlineto
-30 -34 -12 -36 -52 vvcurveto
-50 12 -35 30 -38 vhcurveto
@@ -4342,7 +4342,7 @@
endchar
</CharString>
<CharString name="d">
- 6 269 638 rmoveto
+ 187 269 638 rmoveto
33 47 -1 -35 11 hvcurveto
7 -26 -1 -72 1 -53 -7 -4 rcurveline
9 -27 -27 6 -29 hhcurveto
@@ -4485,14 +4485,14 @@
endchar
</CharString>
<CharString name="degree">
- -213 69 575 rmoveto
+ -32 69 575 rmoveto
-54 43 -43 54 54 43 43 54 54 -43 43 -54 -54 -43 -43 -54 vhcurveto
30 hmoveto
37 30 30 37 37 30 -30 -37 -37 -30 -30 -37 -37 -30 30 37 vhcurveto
endchar
</CharString>
<CharString name="delta">
- -122 349 687 rmoveto
+ 59 349 687 rmoveto
4 -29 -24 3 -27 hhcurveto
-73 -63 -21 -71 -35 hvcurveto
-6 -12 -5 -12 -13 vvcurveto
@@ -4509,21 +4509,21 @@
endchar
</CharString>
<CharString name="dieresis">
- -25 420 595 rmoveto
+ 156 420 595 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
endchar
</CharString>
<CharString name="dieresiscomb">
- -545 160 595 rmoveto
+ 160 595 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
endchar
</CharString>
<CharString name="divide">
- 279 471 514 rmoveto
+ 460 471 514 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
273 -238 rmoveto
-657 hlineto
@@ -4534,7 +4534,7 @@
endchar
</CharString>
<CharString name="dollar">
- -69 439 593 rmoveto
+ 112 439 593 rmoveto
79 -42 -57 12 -69 -1 -1 0 -1 hhcurveto
114 vlineto
-40 -8 rlineto
@@ -4578,7 +4578,7 @@
endchar
</CharString>
<CharString name="dotlessi">
- -222 192 392 rmoveto
+ -41 192 392 rmoveto
44 -17 35 -45 -39 -56 -39 -36 -45 vhcurveto
13 -18 rlineto
15 24 34 17 22 hhcurveto
@@ -4592,7 +4592,7 @@
endchar
</CharString>
<CharString name="dotlessj">
- -257 18 375 rmoveto
+ -76 18 375 rmoveto
13 25 35 20 16 hhcurveto
36 8 -12 -242 -44 -3 -124 -17 -62 hvcurveto
-14 -50 -40 -45 -74 -44 16 -16 rcurveline
@@ -4615,7 +4615,7 @@
endchar
</CharString>
<CharString name="e">
- -83 438 340 rmoveto
+ 98 438 340 rmoveto
77 -56 61 -86 -76 -72 -33 -57 -49 vhcurveto
-39 -45 -12 -59 -58 vvcurveto
-113 32 -125 134 86 80 58 58 62 vhcurveto
@@ -4633,7 +4633,7 @@
endchar
</CharString>
<CharString name="eacute">
- -107 354 714 rmoveto
+ 74 354 714 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
246 -220 rmoveto
77 -56 61 -86 -76 -72 -33 -57 -49 vhcurveto
@@ -4653,7 +4653,7 @@
endchar
</CharString>
<CharString name="ecircumflex">
- -107 76 561 rmoveto
+ 74 76 561 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
193 -402 rmoveto
77 -56 61 -86 -76 -72 -33 -57 -49 vhcurveto
@@ -4673,7 +4673,7 @@
endchar
</CharString>
<CharString name="edieresis">
- -107 399 615 rmoveto
+ 74 399 615 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
@@ -4695,7 +4695,7 @@
endchar
</CharString>
<CharString name="egrave">
- -107 109 716 rmoveto
+ 74 109 716 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
282 -412 rmoveto
77 -56 61 -86 -76 -72 -33 -57 -49 vhcurveto
@@ -4715,7 +4715,7 @@
endchar
</CharString>
<CharString name="eight">
- -45 312 396 rmoveto
+ 136 312 396 rmoveto
60 37 63 63 72 vvcurveto
82 -70 56 -102 vhcurveto
-9 hlineto
@@ -4757,7 +4757,7 @@
endchar
</CharString>
<CharString name="eight.dnom">
- 230 231 rmoveto
+ 12 230 231 rmoveto
7 vlineto
40 22 44 38 43 vvcurveto
50 -49 33 -68 vhcurveto
@@ -4779,7 +4779,7 @@
endchar
</CharString>
<CharString name="eight.numr">
- 230 506 rmoveto
+ 12 230 506 rmoveto
7 vlineto
40 22 44 38 43 vvcurveto
50 -49 33 -68 vhcurveto
@@ -4850,7 +4850,7 @@
endchar
</CharString>
<CharString name="epsilon">
- -175 335 430 rmoveto
+ 6 335 430 rmoveto
22 -31 -35 19 -39 hhcurveto
-85 -82 -55 -96 -30 20 -34 23 -14 hvcurveto
-7 vlineto
@@ -4866,7 +4866,7 @@
endchar
</CharString>
<CharString name="epsilon1">
- -125 213 258 rmoveto
+ 56 213 258 rmoveto
-2 -1 -64 1 -37 hhcurveto
-6 6 rlineto
78 2 35 87 96 hhcurveto
@@ -4886,7 +4886,7 @@
endchar
</CharString>
<CharString name="equal">
- 278 745 369 rmoveto
+ 459 745 369 rmoveto
-661 hlineto
-7 -40 rlineto
662 hlineto
@@ -4897,7 +4897,7 @@
endchar
</CharString>
<CharString name="eta">
- -5 498 -164 rmoveto
+ 176 498 -164 rmoveto
-15 122 -2 123 123 vvcurveto
177 11 85 -104 -64 -71 -38 -37 -56 vhcurveto
-11 2 rlineto
@@ -4916,7 +4916,7 @@
endchar
</CharString>
<CharString name="eth">
- -84 144 662 rmoveto
+ 97 144 662 rmoveto
27 -16 23 -17 20 -22 -108 -57 rcurveline
13 -39 122 65 22 -31 20 -36 18 -43 rlinecurve
-3 -5 -37 4 -23 -1 -21 -3 rlinecurve
@@ -4927,7 +4927,7 @@
endchar
</CharString>
<CharString name="euro">
- 93 425 270 rmoveto
+ 274 425 270 rmoveto
10 40 rlineto
-225 hlineto
-1 11 -1 12 11 vvcurveto
@@ -4963,7 +4963,7 @@
endchar
</CharString>
<CharString name="exclam.1">
- -329 112 186 rmoveto
+ -148 112 186 rmoveto
36 371 rlineto
3 29 2 29 29 vvcurveto
20 -7 26 -30 -32 -18 -14 -51 vhcurveto
@@ -4973,7 +4973,7 @@
endchar
</CharString>
<CharString name="exclamdown">
- -329 54 442 rmoveto
+ -148 54 442 rmoveto
-31 26 -26 32 31 25 26 31 31 -25 26 -31 -32 -26 -26 -31 vhcurveto
58 -140 rmoveto
-16 2 -27 -383 -1 -19 -3 -22 1 -17 rlinecurve
@@ -4981,7 +4981,7 @@
endchar
</CharString>
<CharString name="f">
- -216 3 428 rmoveto
+ -35 3 428 rmoveto
-4 -22 5 -7 rlineto
112 hlineto
6 -8 rlineto
@@ -5005,7 +5005,7 @@
endchar
</CharString>
<CharString name="f.alt">
- -228 314 641 rmoveto
+ -47 314 641 rmoveto
28 -22 vlineto
-63 -60 -37 -70 -30 hvcurveto
-16 -36 -2 -51 -35 vvcurveto
@@ -5411,7 +5411,7 @@
endchar
</CharString>
<CharString name="five">
- -45 125 594 rmoveto
+ 136 125 594 rmoveto
6 6 4 6 48 126 3 2 87 vhcurveto
27 78 rlineto
-335 hlineto
@@ -5469,7 +5469,7 @@
endchar
</CharString>
<CharString name="five.dnom">
- 103 356 rmoveto
+ 12 103 356 rmoveto
4 5 2 4 32 86 2 1 59 vhcurveto
18 47 rlineto
-227 hlineto
@@ -5485,7 +5485,7 @@
endchar
</CharString>
<CharString name="five.numr">
- 103 631 rmoveto
+ 12 103 631 rmoveto
4 5 2 4 32 86 2 1 59 vhcurveto
18 47 rlineto
-227 hlineto
@@ -5501,7 +5501,7 @@
endchar
</CharString>
<CharString name="four">
- -45 285 669 rmoveto
+ 136 285 669 rmoveto
-286 -434 rlineto
-44 284 vlineto
7 -9 rlineto
@@ -5594,7 +5594,7 @@
endchar
</CharString>
<CharString name="four.dnom">
- 263 422 rmoveto
+ 12 263 422 rmoveto
-51 -21 -194 -260 rlineto
-26 192 vlineto
5 -6 rlineto
@@ -5618,7 +5618,7 @@
endchar
</CharString>
<CharString name="four.numr">
- 263 697 rmoveto
+ 12 263 697 rmoveto
-51 -21 -194 -260 rlineto
-26 192 vlineto
5 -6 rlineto
@@ -5642,7 +5642,7 @@
endchar
</CharString>
<CharString name="foursuperior">
- -69 263 697 rmoveto
+ 112 263 697 rmoveto
-51 -21 -194 -260 rlineto
-26 192 vlineto
5 -6 rlineto
@@ -5666,7 +5666,7 @@
endchar
</CharString>
<CharString name="fraction">
- -475 245 704 rmoveto
+ -294 245 704 rmoveto
-11 -1 -433 -680 rlineto
-14 vlineto
31 -13 437 688 -3 11 rlineto
@@ -5678,7 +5678,7 @@
endchar
</CharString>
<CharString name="g">
- -34 365 45 rmoveto
+ 147 365 45 rmoveto
-130 -4 -45 -107 -106 hhcurveto
-36 -46 41 36 -18 hvcurveto
-14 hlineto
@@ -5715,7 +5715,7 @@
endchar
</CharString>
<CharString name="gamma">
- 44 525 465 rmoveto
+ 225 525 465 rmoveto
-37 -95 -82 -160 -65 -120 -8 2 rcurveline
146 -24 231 -149 -77 -42 -34 -77 -50 vhcurveto
16 -12 23 27 rlineto
@@ -5731,7 +5731,7 @@
endchar
</CharString>
<CharString name="germandbls">
- -92 3 428 rmoveto
+ 89 3 428 rmoveto
-4 -22 5 -7 rlineto
68 hlineto
6 -8 rlineto
@@ -5770,17 +5770,17 @@
endchar
</CharString>
<CharString name="grave">
- -25 70 665 rmoveto
+ 156 70 665 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
endchar
</CharString>
<CharString name="gravecomb">
- -545 -130 665 rmoveto
+ -130 665 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
endchar
</CharString>
<CharString name="greater">
- 166 574 254 rmoveto
+ 347 574 254 rmoveto
-9 vlineto
-518 -243 -4 -9 8 -24 10 -5 600 280 rlineto
12 vlineto
@@ -5788,31 +5788,31 @@
endchar
</CharString>
<CharString name="guillemotleft">
- -129 374 36 rmoveto
+ 52 374 36 rmoveto
14 28 -150 170 149 146 -31 46 -192 -188 rlineto
58 -202 rmoveto
14 28 -150 170 149 146 -31 46 -192 -188 rlineto
endchar
</CharString>
<CharString name="guillemotright">
- -129 42 36 rmoveto
+ 52 42 36 rmoveto
210 202 -192 188 -31 -46 149 -146 -150 -170 rlineto
166 -28 rmoveto
210 202 -192 188 -31 -46 149 -146 -150 -170 rlineto
endchar
</CharString>
<CharString name="guilsinglleft">
- -281 12 238 rmoveto
+ -100 12 238 rmoveto
210 -202 14 28 -150 170 149 146 -31 46 rlineto
endchar
</CharString>
<CharString name="guilsinglright">
- -281 252 238 rmoveto
+ -100 252 238 rmoveto
-192 188 -31 -46 149 -146 -150 -170 14 -28 rlineto
endchar
</CharString>
<CharString name="h">
- 47 499 62 rmoveto
+ 228 499 62 rmoveto
-53 9 83 41 hvcurveto
74 vlineto
80 6 70 -30 28 vhcurveto
@@ -7098,14 +7098,14 @@
endchar
</CharString>
<CharString name="hyphen">
- -192 297 276 rmoveto
+ -11 297 276 rmoveto
-235 hlineto
-7 -40 rlineto
236 hlineto
endchar
</CharString>
<CharString name="i">
- -222 206 625 rmoveto
+ -41 206 625 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-14 -233 rmoveto
44 -17 35 -45 -39 -56 -39 -36 -45 vhcurveto
@@ -7121,7 +7121,7 @@
endchar
</CharString>
<CharString name="iacute">
- -287 279 707 rmoveto
+ -106 279 707 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
75 -161 rmoveto
44 -17 35 -45 -39 -56 -39 -36 -45 vhcurveto
@@ -7137,7 +7137,7 @@
endchar
</CharString>
<CharString name="icircumflex">
- -287 -51 562 rmoveto
+ -106 -51 562 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
74 -351 rmoveto
44 -17 35 -45 -39 -56 -39 -36 -45 vhcurveto
@@ -7153,7 +7153,7 @@
endchar
</CharString>
<CharString name="idieresis">
- -287 272 607 rmoveto
+ -106 272 607 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
@@ -7171,7 +7171,7 @@
endchar
</CharString>
<CharString name="igrave">
- -287 -70 708 rmoveto
+ -106 -70 708 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
215 -352 rmoveto
44 -17 35 -45 -39 -56 -39 -36 -45 vhcurveto
@@ -7187,7 +7187,7 @@
endchar
</CharString>
<CharString name="iota">
- -267 266 108 rmoveto
+ -86 266 108 rmoveto
-20 -20 -27 -24 -28 hhcurveto
-61 2 86 45 90 2 90 15 89 hvcurveto
-11 10 -81 -25 rlineto
@@ -7197,7 +7197,7 @@
endchar
</CharString>
<CharString name="j">
- -254 159 683 rmoveto
+ -73 159 683 rmoveto
-32 -27 -26 -32 -32 23 -28 33 35 26 30 34 32 -28 22 -30 hvcurveto
-141 -308 rmoveto
13 25 35 20 16 hhcurveto
@@ -7231,7 +7231,7 @@
endchar
</CharString>
<CharString name="k">
- -29 154 266 rmoveto
+ 152 154 266 rmoveto
413 vlineto
3 7 -160 -23 -5 -4 rlineto
-21 vlineto
@@ -7664,7 +7664,7 @@
endchar
</CharString>
<CharString name="kappa">
- -86 399 472 rmoveto
+ 95 399 472 rmoveto
-94 -60 -88 -68 -76 -82 -6 2 rcurveline
1 31 4 106 2 58 -5 6 rcurveline
-79 -26 rlineto
@@ -7768,7 +7768,7 @@
endchar
</CharString>
<CharString name="l">
- -244 289 112 rmoveto
+ -63 289 112 rmoveto
-22 -25 -28 -25 -33 hhcurveto
-60 4 12 91 99 vvcurveto
140 10 140 4 140 vhcurveto
@@ -8077,7 +8077,7 @@
endchar
</CharString>
<CharString name="lambda">
- -64 408 -11 rmoveto
+ 117 408 -11 rmoveto
73 26 rlineto
0 2 4 4 2 -1 1 1 vvcurveto
-101 220 -35 97 -18 52 -24 65 -30 107 -22 44 rrcurveto
@@ -8093,7 +8093,7 @@
endchar
</CharString>
<CharString name="less">
- 165 653 528 rmoveto
+ 346 653 528 rmoveto
-11 3 -601 -275 rlineto
-12 vlineto
600 -280 10 5 11 23 -5 10 -519 241 rlineto
@@ -8102,12 +8102,12 @@
endchar
</CharString>
<CharString name="logicalnot">
- 232 57 409 rmoveto
+ 413 57 409 rmoveto
-40 563 -163 40 203 vlineto
endchar
</CharString>
<CharString name="lscript">
- -165 296 84 rmoveto
+ 16 296 84 rmoveto
-10 -8 -28 -23 -19 hhcurveto
-68 -11 108 91 -7 hvcurveto
80 101 93 133 124 vvcurveto
@@ -8125,7 +8125,7 @@
endchar
</CharString>
<CharString name="m">
- 328 393 -10 rmoveto
+ 509 393 -10 rmoveto
74 28 rlineto
-4 69 -2 71 70 vvcurveto
46 1 46 1 46 vhcurveto
@@ -8158,14 +8158,14 @@
endchar
</CharString>
<CharString name="macron">
- -25 381 595 rmoveto
+ 156 381 595 rmoveto
-235 hlineto
-7 -40 rlineto
236 hlineto
endchar
</CharString>
<CharString name="macroncomb">
- -545 121 595 rmoveto
+ 121 595 rmoveto
-235 hlineto
-7 -40 rlineto
236 hlineto
@@ -8377,14 +8377,14 @@
endchar
</CharString>
<CharString name="minus">
- 347 778 276 rmoveto
+ 528 778 276 rmoveto
-657 hlineto
-7 -40 rlineto
658 hlineto
endchar
</CharString>
<CharString name="mu">
- 137 415 434 rmoveto
+ 318 415 434 rmoveto
2 -57 2 -57 -58 vvcurveto
-21 -2 -73 -1 -48 vhcurveto
-39 -36 -56 -24 -36 hhcurveto
@@ -8411,12 +8411,12 @@
endchar
</CharString>
<CharString name="multiply">
- 58 81 502 rmoveto
+ 239 81 502 rmoveto
-24 -32 215 -215 -214 -215 23 -33 220 219 219 -219 23 33 -214 215 218 217 -25 32 -221 -221 rlineto
endchar
</CharString>
<CharString name="n">
- 86 195 18 rmoveto
+ 267 195 18 rmoveto
-2 40 -1 30 28 vvcurveto
73 3 88 3 72 vhcurveto
29 36 47 32 46 hhcurveto
@@ -8439,7 +8439,7 @@
endchar
</CharString>
<CharString name="nine">
- -45 214 11 rmoveto
+ 136 214 11 rmoveto
13 83 69 97 54 79 rrcurveto
52 77 60 96 85 vvcurveto
107 -71 67 -145 vhcurveto
@@ -8485,7 +8485,7 @@
endchar
</CharString>
<CharString name="nine.dnom">
- 164 7 rmoveto
+ 12 164 7 rmoveto
10 50 46 58 36 47 rrcurveto
36 46 40 59 50 vvcurveto
65 -49 39 -98 vhcurveto
@@ -8501,7 +8501,7 @@
endchar
</CharString>
<CharString name="nine.numr">
- 164 282 rmoveto
+ 12 164 282 rmoveto
10 50 46 58 36 47 rrcurveto
36 46 40 59 50 vvcurveto
65 -49 39 -98 vhcurveto
@@ -8618,7 +8618,7 @@
endchar
</CharString>
<CharString name="ntilde">
- 86 315 620 rmoveto
+ 267 315 620 rmoveto
-26 12 -31 13 -26 -4 -35 -5 -19 -58 -8 -35 14 -5 rcurveline
12 33 10 12 24 3 17 2 18 -4 15 -7 63 -30 rcurveline
20 -9 22 -6 22 3 32 4 20 56 3 29 -13 5 rcurveline
@@ -8646,7 +8646,7 @@
endchar
</CharString>
<CharString name="nu">
- -16 531 452 rmoveto
+ 165 531 452 rmoveto
-57 16 -23 -10 -33 -129 -36 -110 -56 -118 rlinecurve
-5 hlineto
-26 119 -37 164 -55 55 rrcurveto
@@ -8661,7 +8661,7 @@
endchar
</CharString>
<CharString name="numbersign">
- 170 10 228 rmoveto
+ 351 10 228 rmoveto
6 -10 rlineto
165 hlineto
-54 -195 4 -14 33 -3 59 212 rlineto
@@ -8690,7 +8690,7 @@
endchar
</CharString>
<CharString name="o">
- -8 431 59 rmoveto
+ 173 431 59 rmoveto
44 49 14 66 65 vvcurveto
112 -51 114 -147 -70 -71 -22 -52 -47 vhcurveto
-48 -52 -7 -74 -69 vvcurveto
@@ -8702,7 +8702,7 @@
endchar
</CharString>
<CharString name="oacute">
- -44 366 694 rmoveto
+ 137 366 694 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
227 -481 rmoveto
44 49 14 66 65 vvcurveto
@@ -8716,7 +8716,7 @@
endchar
</CharString>
<CharString name="ocircumflex">
- -44 88 555 rmoveto
+ 137 88 555 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
174 -677 rmoveto
44 49 14 66 65 vvcurveto
@@ -8730,7 +8730,7 @@
endchar
</CharString>
<CharString name="odieresis">
- -8 429 595 rmoveto
+ 173 429 595 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
@@ -8746,7 +8746,7 @@
endchar
</CharString>
<CharString name="oe">
- 270 770 319 rmoveto
+ 451 770 319 rmoveto
11 5 10 4 12 vvcurveto
77 -56 61 -86 -76 -72 -33 -57 -49 vhcurveto
-2 -2 rlineto
@@ -8786,7 +8786,7 @@
endchar
</CharString>
<CharString name="ograve">
- -44 121 695 rmoveto
+ 137 121 695 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
263 -672 rmoveto
44 49 14 66 65 vvcurveto
@@ -8800,7 +8800,7 @@
endchar
</CharString>
<CharString name="omega">
- 226 609 409 rmoveto
+ 407 609 409 rmoveto
48 -41 20 -63 -61 vvcurveto
-87 -45 -96 -94 -94 -14 118 85 65 0 71 9 59 vhcurveto
-6 7 -75 -28 rlineto
@@ -8818,7 +8818,7 @@
endchar
</CharString>
<CharString name="one">
- -45 92 639 rmoveto
+ 136 92 639 rmoveto
-27 83 vlineto
62 -15 -82 -111 -140 -3 -140 -5 -139 hvcurveto
12 -5 84 27 rlineto
@@ -8849,7 +8849,7 @@
endchar
</CharString>
<CharString name="one.dnom">
- 227 427 rmoveto
+ 12 227 427 rmoveto
-146 -44 rlineto
-16 56 vlineto
42 -10 -49 -67 -84 -2 -84 -3 -83 hvcurveto
@@ -8859,7 +8859,7 @@
endchar
</CharString>
<CharString name="one.numr">
- 227 702 rmoveto
+ 12 227 702 rmoveto
-146 -44 rlineto
-16 56 vlineto
42 -10 -49 -67 -84 -2 -84 -3 -83 hvcurveto
@@ -8869,7 +8869,7 @@
endchar
</CharString>
<CharString name="onehalf">
- 245 589 704 rmoveto
+ 426 589 704 rmoveto
-11 -1 -433 -680 rlineto
-14 vlineto
31 -13 437 688 -3 11 rlineto
@@ -8894,7 +8894,7 @@
endchar
</CharString>
<CharString name="onequarter">
- 245 593 704 rmoveto
+ 426 593 704 rmoveto
-11 -1 -433 -680 rlineto
-14 vlineto
31 -13 437 688 -3 11 rlineto
@@ -8929,7 +8929,7 @@
endchar
</CharString>
<CharString name="onesuperior">
- -367 -28 685 rmoveto
+ -186 -28 685 rmoveto
-17 61 vlineto
46 -12 -53 -73 -91 -3 -91 -3 -90 hvcurveto
9 -3 62 17 rlineto
@@ -8974,7 +8974,7 @@
endchar
</CharString>
<CharString name="ordfeminine">
- -179 269 550 rmoveto
+ 2 269 550 rmoveto
15 -23 -33 8 -23 hhcurveto
-123 -52 -90 -92 -59 28 -70 81 43 41 20 21 36 hvcurveto
11 -2 rlineto
@@ -8996,7 +8996,7 @@
endchar
</CharString>
<CharString name="ordmasculine">
- -189 159 261 rmoveto
+ -8 159 261 rmoveto
51 53 13 34 35 hvcurveto
32 32 10 43 42 vvcurveto
43 -13 48 -37 30 vhcurveto
@@ -9010,7 +9010,7 @@
endchar
</CharString>
<CharString name="oslash">
- -25 431 59 rmoveto
+ 156 431 59 rmoveto
44 49 14 66 65 vvcurveto
88 -31 88 -84 35 vhcurveto
28 65 -6 11 -22 5 -10 -2 -29 -68 rlineto
@@ -9034,7 +9034,7 @@
endchar
</CharString>
<CharString name="otilde">
- -8 267 620 rmoveto
+ 173 267 620 rmoveto
-26 12 -31 13 -26 -4 -35 -5 -19 -58 -8 -35 14 -5 rcurveline
12 33 10 12 24 3 17 2 18 -4 15 -7 63 -30 rcurveline
20 -9 22 -6 22 3 32 4 20 56 3 29 -13 5 rcurveline
@@ -9051,7 +9051,7 @@
endchar
</CharString>
<CharString name="p">
- 17 203 -206 rmoveto
+ 198 203 -206 rmoveto
-6 52 -3 97 -1 46 10 8 rcurveline
-5 19 20 -3 20 hhcurveto
155 105 162 147 75 -35 92 -102 -69 -61 -31 -43 -54 hvcurveto
@@ -9071,7 +9071,7 @@
endchar
</CharString>
<CharString name="paragraph">
- -92 294 13 rmoveto
+ 89 294 13 rmoveto
62 -19 7 7 rlineto
-2 140 -5 141 141 vvcurveto
59 2 59 3 59 vhcurveto
@@ -9084,7 +9084,7 @@
endchar
</CharString>
<CharString name="parenleft">
- -288 246 -163 rmoveto
+ -107 246 -163 rmoveto
-129 25 -7 243 169 vvcurveto
151 9 254 127 32 vhcurveto
27 vlineto
@@ -9167,7 +9167,7 @@
endchar
</CharString>
<CharString name="parenright">
- -286 11 709 rmoveto
+ -105 11 709 rmoveto
81 -29 23 -94 15 -75 rrcurveto
15 -77 4 -80 -78 vvcurveto
-114 2 -122 -36 -107 vhcurveto
@@ -9250,7 +9250,7 @@
endchar
</CharString>
<CharString name="partialdiff">
- 22 100 548 rmoveto
+ 203 100 548 rmoveto
48 21 34 46 64 hhcurveto
93 52 -94 -100 25 hvcurveto
-4 -4 rlineto
@@ -9282,7 +9282,7 @@
endchar
</CharString>
<CharString name="percent">
- 282 228 795 rmoveto
+ 463 228 795 rmoveto
-85 -43 -83 -71 -92 vvcurveto
-71 44 -77 88 91 94 101 90 76 -73 67 -76 20 vhcurveto
475 -18 rmoveto
@@ -9309,17 +9309,17 @@
endchar
</CharString>
<CharString name="period">
- -273 69 52 rmoveto
+ -92 69 52 rmoveto
-37 30 -30 37 37 30 30 37 37 -30 30 -37 -37 -30 -30 -37 vhcurveto
endchar
</CharString>
<CharString name="periodcentered">
- -273 69 257 rmoveto
+ -92 69 257 rmoveto
-37 30 -30 37 37 30 30 37 37 -30 30 -37 -37 -30 -30 -37 vhcurveto
endchar
</CharString>
<CharString name="phi">
- 99 365 -156 rmoveto
+ 280 365 -156 rmoveto
-7 49 -4 50 49 vvcurveto
9 10 70 2 67 21 50 50 rlinecurve
48 48 21 69 67 vvcurveto
@@ -9348,7 +9348,7 @@
endchar
</CharString>
<CharString name="phi1">
- 97 265 -197 rmoveto
+ 278 265 -197 rmoveto
69 21 rlineto
-5 54 -3 53 54 vvcurveto
7 8 81 11 77 28 58 58 rlinecurve
@@ -9372,7 +9372,7 @@
endchar
</CharString>
<CharString name="pi">
- 51 171 460 rmoveto
+ 232 171 460 rmoveto
-58 -61 -1 -58 -18 hvcurveto
-28 -113 21 -9 18 46 rlineto
59 23 26 10 49 hhcurveto
@@ -9394,7 +9394,7 @@
endchar
</CharString>
<CharString name="pi1">
- 230 356 302 rmoveto
+ 411 356 302 rmoveto
2 -22 3 -22 -22 vvcurveto
-81 -26 -108 -104 -83 -29 104 73 53 17 52 38 37 vhcurveto
-10 21 rlineto
@@ -9413,7 +9413,7 @@
endchar
</CharString>
<CharString name="plus">
- 202 392 586 rmoveto
+ 383 392 586 rmoveto
-40 -6 rlineto
-304 -303 vlineto
-7 -40 rlineto
@@ -9425,7 +9425,7 @@
endchar
</CharString>
<CharString name="plusminus">
- 218 399 586 rmoveto
+ 399 399 586 rmoveto
-40 -6 rlineto
-304 -303 vlineto
-7 -40 rlineto
@@ -9439,7 +9439,7 @@
endchar
</CharString>
<CharString name="psi">
- 159 358 -189 rmoveto
+ 340 358 -189 rmoveto
76 35 rlineto
-7 49 -2 51 50 vvcurveto
7 8 rlineto
@@ -9469,7 +9469,7 @@
endchar
</CharString>
<CharString name="q">
- -37 451 -214 rmoveto
+ 144 451 -214 rmoveto
-8 89 -4 116 122 vvcurveto
132 4 139 10 116 vhcurveto
-21 4 -44 -59 -9 -2 rlineto
@@ -9550,7 +9550,7 @@
endchar
</CharString>
<CharString name="question">
- -192 162 222 rmoveto
+ -11 162 222 rmoveto
-16 12 -11 20 20 vvcurveto
62 122 61 47 79 vhcurveto
17 29 13 27 33 vvcurveto
@@ -9577,7 +9577,7 @@
endchar
</CharString>
<CharString name="questiondown">
- -192 271 442 rmoveto
+ -11 271 442 rmoveto
31 -25 26 -31 -32 -26 -26 -31 -31 26 -26 32 31 25 26 31 vhcurveto
-80 -176 rmoveto
16 -12 11 -20 -20 vvcurveto
@@ -9593,7 +9593,7 @@
endchar
</CharString>
<CharString name="quotedbl">
- -212 118 432 rmoveto
+ -31 118 432 rmoveto
21 191 rlineto
52 vlineto
14 -19 6 -14 -25 -9 -17 -21 -24 2 -25 3 -24 vhcurveto
@@ -9688,7 +9688,7 @@
endchar
</CharString>
<CharString name="quoteleft.1">
- -345 115 532 rmoveto
+ -164 115 532 rmoveto
-9 14 -5 15 16 vvcurveto
35 28 47 21 36 vhcurveto
-14 14 rlineto
@@ -9710,7 +9710,7 @@
endchar
</CharString>
<CharString name="quoteright.1">
- -348 66 395 rmoveto
+ -167 66 395 rmoveto
35 53 54 54 62 vvcurveto
42 -43 89 -28 -16 -32 -26 -15 -7 8 -16 6 -10 vhcurveto
35 -57 rlineto
@@ -9732,7 +9732,7 @@
endchar
</CharString>
<CharString name="quotesingle">
- -335 110 436 rmoveto
+ -154 110 436 rmoveto
26 186 rlineto
2 13 2 16 11 vvcurveto
19 -9 14 -21 -19 -17 -10 -19 -13 1 -12 1 -12 vhcurveto
@@ -9740,7 +9740,7 @@
endchar
</CharString>
<CharString name="r">
- -129 406 388 rmoveto
+ 52 406 388 rmoveto
52 -5 -25 27 -49 hhcurveto
-36 -45 -42 -28 -29 hvcurveto
-10 4 rlineto
@@ -9758,7 +9758,7 @@
endchar
</CharString>
<CharString name="registered">
- 87 45 437 rmoveto
+ 268 45 437 rmoveto
-150 121 -121 150 150 121 121 150 150 -121 121 -150 -150 -121 -121 -150 vhcurveto
25 hmoveto
136 110 110 136 136 110 -110 -136 -136 -110 -110 -136 -136 -110 110 136 vhcurveto
@@ -9808,7 +9808,7 @@
endchar
</CharString>
<CharString name="rho">
- -61 133 -169 rmoveto
+ 120 133 -169 rmoveto
2 4 4 8 1 vvcurveto
-20 78 -4 72 -3 80 7 1 rcurveline
-50 33 45 -28 54 hhcurveto
@@ -9823,14 +9823,14 @@
endchar
</CharString>
<CharString name="ring">
- -45 153 575 rmoveto
+ 136 153 575 rmoveto
-54 43 -43 54 54 43 43 54 54 -43 43 -54 -54 -43 -43 -54 vhcurveto
30 hmoveto
37 30 30 37 37 30 -30 -37 -37 -30 -30 -37 -37 -30 30 37 vhcurveto
endchar
</CharString>
<CharString name="ringcomb">
- -545 -97 575 rmoveto
+ -97 575 rmoveto
-54 43 -43 54 54 43 43 54 54 -43 43 -54 -54 -43 -43 -54 vhcurveto
30 hmoveto
37 30 30 37 37 30 -30 -37 -37 -30 -30 -37 -37 -30 30 37 vhcurveto
@@ -9864,7 +9864,7 @@
endchar
</CharString>
<CharString name="s">
- -166 342 383 rmoveto
+ 15 342 383 rmoveto
61 -3 -58 22 -52 hhcurveto
-83 -90 -53 -90 -66 62 -36 63 -26 hvcurveto
47 -20 52 -27 -51 vvcurveto
@@ -10351,7 +10351,7 @@
endchar
</CharString>
<CharString name="section">
- -80 35 23 rmoveto
+ 101 35 23 rmoveto
-99 9 91 -31 81 hhcurveto
99 81 83 99 26 -5 27 -13 21 hvcurveto
33 37 19 48 51 vvcurveto
@@ -10756,7 +10756,7 @@
endchar
</CharString>
<CharString name="semicolon">
- -352 29 400 rmoveto
+ -171 29 400 rmoveto
-31 26 -26 32 31 25 26 31 31 -25 26 -31 -32 -26 -26 -31 vhcurveto
41 -590 rmoveto
40 49 50 58 61 vvcurveto
@@ -10798,7 +10798,7 @@
endchar
</CharString>
<CharString name="seven">
- -45 494 686 rmoveto
+ 136 494 686 rmoveto
-208 hlineto
-45 -153 2 6 -18 hvcurveto
-21 -79 7 -9 rlineto
@@ -10860,7 +10860,7 @@
endchar
</CharString>
<CharString name="seven.dnom">
- 353 404 rmoveto
+ 12 353 404 rmoveto
8 -140 vlineto
-31 -103 0 4 -13 hvcurveto
-14 -47 4 -5 rlineto
@@ -10872,7 +10872,7 @@
endchar
</CharString>
<CharString name="seven.numr">
- 353 679 rmoveto
+ 12 353 679 rmoveto
8 -140 vlineto
-31 -103 0 4 -13 hvcurveto
-14 -47 4 -5 rlineto
@@ -10918,7 +10918,7 @@
endchar
</CharString>
<CharString name="sigma">
- -2 210 462 rmoveto
+ 179 210 462 rmoveto
-126 -5 -53 -118 -118 vvcurveto
-109 46 -117 129 145 87 101 132 83 -35 70 -67 45 vhcurveto
1 6 66 -8 69 -8 64 -13 rlinecurve
@@ -10930,7 +10930,7 @@
endchar
</CharString>
<CharString name="six">
- -45 471 688 rmoveto
+ 136 471 688 rmoveto
6 -15 -25 6 -15 hhcurveto
-89 -86 -59 -65 -62 hvcurveto
-89 -89 -45 -124 -125 vvcurveto
@@ -10998,7 +10998,7 @@
endchar
</CharString>
<CharString name="six.dnom">
- 338 413 rmoveto
+ 12 338 413 rmoveto
4 -10 -17 3 -10 hhcurveto
-61 -57 -35 -39 -43 hvcurveto
-60 -55 -31 -72 -76 vvcurveto
@@ -11013,7 +11013,7 @@
endchar
</CharString>
<CharString name="six.numr">
- 338 688 rmoveto
+ 12 338 688 rmoveto
4 -10 -17 3 -10 hhcurveto
-61 -57 -35 -39 -43 hvcurveto
-60 -55 -31 -72 -76 vvcurveto
@@ -11028,21 +11028,21 @@
endchar
</CharString>
<CharString name="slash">
- -155 380 720 rmoveto
+ 26 380 720 rmoveto
-11 -2 -386 -888 3 -14 32 -8 389 896 -5 11 rlineto
endchar
</CharString>
<CharString name="softhyphen">
- -545 endchar
+ endchar
</CharString>
<CharString name="space">
-218 endchar
</CharString>
<CharString name="space.1">
- -212 endchar
+ -31 endchar
</CharString>
<CharString name="sterling">
- 104 301 134 rmoveto
+ 285 301 134 rmoveto
0 -50 36 -80 vhcurveto
-3 6 31 16 49 50 17 61 rlinecurve
2 5 1 4 2 5 59 -1 36 -1 89 -3 4 48 rcurveline
@@ -11091,7 +11091,7 @@
endchar
</CharString>
<CharString name="t">
- -160 121 582 rmoveto
+ 21 121 582 rmoveto
2 -26 0 -64 -51 vvcurveto
-6 -6 rlineto
-104 hlineto
@@ -11555,7 +11555,7 @@
endchar
</CharString>
<CharString name="tau">
- -59 130 461 rmoveto
+ 122 130 461 rmoveto
-29 -39 -5 -30 -20 hvcurveto
-13 -20 -22 -102 -4 -19 22 -3 rcurveline
13 34 17 46 13 14 rrcurveto
@@ -11634,7 +11634,7 @@
endchar
</CharString>
<CharString name="theta">
- -45 214 -11 rmoveto
+ 136 214 -11 rmoveto
109 91 99 103 32 hvcurveto
18 56 5 61 60 vvcurveto
134 -16 193 -153 -115 -89 -82 -108 -34 vhcurveto
@@ -11653,7 +11653,7 @@
endchar
</CharString>
<CharString name="theta1">
- -19 114 118 rmoveto
+ 162 114 118 rmoveto
-76 31 -56 73 104 91 94 100 35 vhcurveto
17 50 7 47 53 vvcurveto
4 4 rlineto
@@ -11676,7 +11676,7 @@
endchar
</CharString>
<CharString name="thorn">
- -2 199 -206 rmoveto
+ 179 199 -206 rmoveto
-5 52 0 13 -2 46 10 8 rcurveline
-5 19 17 -3 20 hhcurveto
155 105 162 147 75 -35 92 -102 -69 -59 -31 -43 -54 hvcurveto
@@ -11697,7 +11697,7 @@
endchar
</CharString>
<CharString name="three">
- -45 241 382 rmoveto
+ 136 241 382 rmoveto
87 25 71 65 96 vvcurveto
87 -65 47 -81 -62 -68 -51 -43 -51 vhcurveto
14 -22 rlineto
@@ -11738,7 +11738,7 @@
endchar
</CharString>
<CharString name="three.dnom">
- 182 229 rmoveto
+ 12 182 229 rmoveto
59 15 48 39 58 vvcurveto
53 -46 27 -53 -42 -46 -30 -26 -35 vhcurveto
10 -13 rlineto
@@ -11755,7 +11755,7 @@
endchar
</CharString>
<CharString name="three.numr">
- 182 504 rmoveto
+ 12 182 504 rmoveto
59 15 48 39 58 vvcurveto
53 -46 27 -53 -42 -46 -30 -26 -35 vhcurveto
10 -13 rlineto
@@ -11786,7 +11786,7 @@
endchar
</CharString>
<CharString name="threequarters">
- 245 633 704 rmoveto
+ 426 633 704 rmoveto
-11 -1 -433 -680 rlineto
-14 vlineto
31 -13 437 688 -3 11 rlineto
@@ -11828,7 +11828,7 @@
endchar
</CharString>
<CharString name="threesuperior">
- -246 144 518 rmoveto
+ -65 144 518 rmoveto
64 16 52 43 62 vvcurveto
58 -50 29 -57 -46 -49 -32 -29 -38 vhcurveto
10 -14 rlineto
@@ -11845,7 +11845,7 @@
endchar
</CharString>
<CharString name="tilde">
- -45 249 620 rmoveto
+ 136 249 620 rmoveto
-26 12 -31 13 -26 -4 -35 -5 -19 -58 -8 -35 14 -5 rcurveline
12 33 10 12 24 3 17 2 18 -4 15 -7 63 -30 rcurveline
20 -9 22 -6 22 3 32 4 20 56 3 29 -13 5 rcurveline
@@ -11853,7 +11853,7 @@
endchar
</CharString>
<CharString name="tildecomb">
- -545 -2 620 rmoveto
+ -2 620 rmoveto
-26 12 -31 13 -26 -4 -35 -5 -19 -58 -8 -35 14 -5 rcurveline
12 33 10 12 24 3 17 2 18 -4 15 -7 63 -30 rcurveline
20 -9 22 -6 22 3 32 4 20 56 3 29 -13 5 rcurveline
@@ -11879,7 +11879,7 @@
endchar
</CharString>
<CharString name="two">
- -45 467 88 rmoveto
+ 136 467 88 rmoveto
-309 hlineto
-5 -8 1 6 2 1 1 1 1 hvcurveto
84 98 88 101 59 107 rrcurveto
@@ -11945,7 +11945,7 @@
endchar
</CharString>
<CharString name="two.dnom">
- 338 48 rmoveto
+ 12 338 48 rmoveto
-3 5 rlineto
-209 hlineto
-4 -5 0 4 1 0 0 1 1 hvcurveto
@@ -11959,7 +11959,7 @@
endchar
</CharString>
<CharString name="two.numr">
- 338 323 rmoveto
+ 12 338 323 rmoveto
-3 5 rlineto
-209 hlineto
-4 -5 0 4 1 0 0 1 1 hvcurveto
@@ -11985,7 +11985,7 @@
endchar
</CharString>
<CharString name="twosuperior">
- -241 308 327 rmoveto
+ -60 308 327 rmoveto
-227 hlineto
-5 -4 1 4 1 0 0 1 1 hvcurveto
88 90 117 114 91 vvcurveto
@@ -11999,7 +11999,7 @@
endchar
</CharString>
<CharString name="u">
- 93 627 107 rmoveto
+ 274 627 107 rmoveto
-19 -26 -34 -23 -27 hhcurveto
-49 0 55 61 92 4 93 8 93 hvcurveto
-13 11 -77 -35 rlineto
@@ -12018,7 +12018,7 @@
endchar
</CharString>
<CharString name="uacute">
- 88 442 704 rmoveto
+ 269 442 704 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
347 -443 rmoveto
-19 -26 -34 -23 -27 hhcurveto
@@ -12039,7 +12039,7 @@
endchar
</CharString>
<CharString name="ucircumflex">
- 88 154 561 rmoveto
+ 269 154 561 rmoveto
23 -12 143 131 121 -124 39 26 -157 160 rlineto
304 -635 rmoveto
-19 -26 -34 -23 -27 hhcurveto
@@ -12060,7 +12060,7 @@
endchar
</CharString>
<CharString name="udieresis">
- 88 477 606 rmoveto
+ 269 477 606 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
@@ -12083,7 +12083,7 @@
endchar
</CharString>
<CharString name="ugrave">
- 88 187 705 rmoveto
+ 269 187 705 rmoveto
162 -154 18 31 -120 157 -13 2 rlineto
393 -634 rmoveto
-19 -26 -34 -23 -27 hhcurveto
@@ -12104,12 +12104,12 @@
endchar
</CharString>
<CharString name="underscore">
- 211 756 -74 rmoveto
+ 392 756 -74 rmoveto
-756 -40 756 hlineto
endchar
</CharString>
<CharString name="upsilon">
- -2 387 398 rmoveto
+ 179 387 398 rmoveto
39 -38 18 -53 -54 vvcurveto
-76 -35 -126 -129 -67 -27 58 72 vhcurveto
69 5 60 66 vvcurveto
@@ -12123,7 +12123,7 @@
endchar
</CharString>
<CharString name="v">
- -52 363 425 rmoveto
+ 129 363 425 rmoveto
-2 -2 -2 -3 -3 vvcurveto
-2 0 -2 1 -1 vhcurveto
17 -32 20 -36 -32 vvcurveto
@@ -12174,7 +12174,7 @@
endchar
</CharString>
<CharString name="w">
- 256 668 416 rmoveto
+ 437 668 416 rmoveto
18 -31 20 -40 -33 vvcurveto
-81 -63 -76 -51 -64 vhcurveto
-4 -3 -8 -6 -4 hhcurveto
@@ -12241,7 +12241,7 @@
endchar
</CharString>
<CharString name="weierstrass">
- 46 182 464 rmoveto
+ 227 182 464 rmoveto
-51 -37 -42 -55 -66 vvcurveto
-43 21 -42 20 -37 vhcurveto
-28 -52 -58 -121 -76 vvcurveto
@@ -12265,7 +12265,7 @@
endchar
</CharString>
<CharString name="x">
- -55 410 462 rmoveto
+ 126 410 462 rmoveto
2 -2 -2 0 -2 hhcurveto
-3 -2 0 -2 -1 hvcurveto
-44 -51 -59 -70 -36 -48 -6 1 rcurveline
@@ -12291,7 +12291,7 @@
endchar
</CharString>
<CharString name="xi">
- -73 411 -137 rmoveto
+ 108 411 -137 rmoveto
32 43 42 54 47 vvcurveto
42 -55 9 -32 vhcurveto
-40 -84 -11 -56 hhcurveto
@@ -12315,7 +12315,7 @@
endchar
</CharString>
<CharString name="y">
- 27 104 -182 rmoveto
+ 208 104 -182 rmoveto
-28 27 53 -23 47 hhcurveto
107 103 54 130 36 hvcurveto
22 95 -15 212 14 204 -12 4 rcurveline
@@ -12340,7 +12340,7 @@
endchar
</CharString>
<CharString name="yacute">
- 7 441 699 rmoveto
+ 188 441 699 rmoveto
-47 36 -13 -2 -120 -157 18 -31 rlineto
-175 -727 rmoveto
-28 27 53 -23 47 hhcurveto
@@ -12367,7 +12367,7 @@
endchar
</CharString>
<CharString name="ydieresis">
- 7 456 605 rmoveto
+ 188 456 605 rmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
-202 hmoveto
30 -20 28 -34 -29 -35 -22 -34 -32 26 -26 32 32 26 23 33 2 vhcurveto
@@ -12471,7 +12471,7 @@
endchar
</CharString>
<CharString name="yen">
- 78 250 299 rmoveto
+ 259 250 299 rmoveto
-21 0 -20 -21 vvcurveto
-149 hlineto
-7 -40 rlineto
@@ -12500,7 +12500,7 @@
endchar
</CharString>
<CharString name="z">
- -82 402 462 rmoveto
+ 99 402 462 rmoveto
-17 -21 -23 -14 -29 -56 -68 31 -51 hhcurveto
-29 -13 -10 -31 -20 hvcurveto
-50 -89 20 -13 rlineto
@@ -12541,7 +12541,7 @@
endchar
</CharString>
<CharString name="zero">
- -45 290 704 rmoveto
+ 136 290 704 rmoveto
-44 -18 -49 -24 -33 -33 rrcurveto
-85 -85 -45 -121 -120 vvcurveto
-115 28 -202 145 184 69 211 159 133 -49 138 -121 77 vhcurveto
@@ -12599,7 +12599,7 @@
endchar
</CharString>
<CharString name="zero.dnom">
- 215 422 rmoveto
+ 12 215 422 rmoveto
-29 -10 -34 -15 -22 -20 rrcurveto
-58 -51 -31 -72 -72 vvcurveto
-71 21 -119 97 122 50 123 99 79 -33 84 -83 45 vhcurveto
@@ -12612,7 +12612,7 @@
endchar
</CharString>
<CharString name="zero.numr">
- 215 697 rmoveto
+ 12 215 697 rmoveto
-29 -10 -34 -15 -22 -20 rrcurveto
-58 -51 -31 -72 -72 vvcurveto
-71 21 -119 97 122 50 123 99 79 -33 84 -83 45 vhcurveto
@@ -12625,7 +12625,7 @@
endchar
</CharString>
<CharString name="zeta">
- -124 363 -136 rmoveto
+ 57 363 -136 rmoveto
32 40 40 54 48 vvcurveto
36 -39 11 -41 vhcurveto
-24 -37 -6 -40 hhcurveto
diff --git a/Tests/merge/merge_test.py b/Tests/merge/merge_test.py
index 5ff12d1c..5558a2e3 100644
--- a/Tests/merge/merge_test.py
+++ b/Tests/merge/merge_test.py
@@ -16,222 +16,237 @@ import pytest
class MergeIntegrationTest(unittest.TestCase):
- def setUp(self):
- self.tempdir = None
- self.num_tempfiles = 0
-
- def tearDown(self):
- if self.tempdir:
- shutil.rmtree(self.tempdir)
-
- @staticmethod
- def getpath(testfile):
- path, _ = os.path.split(__file__)
- return os.path.join(path, "data", testfile)
-
- def temp_path(self, suffix):
- if not self.tempdir:
- self.tempdir = tempfile.mkdtemp()
- self.num_tempfiles += 1
- return os.path.join(self.tempdir, "tmp%d%s" % (self.num_tempfiles, suffix))
-
- IGNORED_LINES_RE = re.compile(
- "^(<ttFont | <(checkSumAdjustment|created|modified) ).*"
- )
- def read_ttx(self, path):
- lines = []
- with open(path, "r", encoding="utf-8") as ttx:
- for line in ttx.readlines():
- # Elide lines with data that often change.
- if self.IGNORED_LINES_RE.match(line):
- lines.append("\n")
- else:
- lines.append(line.rstrip() + "\n")
- return lines
-
- def expect_ttx(self, font, expected_ttx, tables=None):
- path = self.temp_path(suffix=".ttx")
- font.saveXML(path, tables=tables)
- actual = self.read_ttx(path)
- expected = self.read_ttx(expected_ttx)
- if actual != expected:
- for line in difflib.unified_diff(
- expected, actual, fromfile=expected_ttx, tofile=path):
- sys.stdout.write(line)
- self.fail("TTX output is different from expected")
-
- def compile_font(self, path, suffix):
- savepath = self.temp_path(suffix=suffix)
- font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
- font.importXML(path)
- font.save(savepath, reorderTables=None)
- return font, savepath
-
-# -----
-# Tests
-# -----
-
- def test_merge_cff(self):
- _, fontpath1 = self.compile_font(self.getpath("CFFFont1.ttx"), ".otf")
- _, fontpath2 = self.compile_font(self.getpath("CFFFont2.ttx"), ".otf")
- mergedpath = self.temp_path(".otf")
- merge_main([fontpath1, fontpath2, "--output-file=%s" % mergedpath])
- mergedfont = ttLib.TTFont(mergedpath)
- self.expect_ttx(mergedfont, self.getpath("CFFFont_expected.ttx"))
+ def setUp(self):
+ self.tempdir = None
+ self.num_tempfiles = 0
+
+ def tearDown(self):
+ if self.tempdir:
+ shutil.rmtree(self.tempdir)
+
+ @staticmethod
+ def getpath(testfile):
+ path, _ = os.path.split(__file__)
+ return os.path.join(path, "data", testfile)
+
+ def temp_path(self, suffix):
+ if not self.tempdir:
+ self.tempdir = tempfile.mkdtemp()
+ self.num_tempfiles += 1
+ return os.path.join(self.tempdir, "tmp%d%s" % (self.num_tempfiles, suffix))
+
+ IGNORED_LINES_RE = re.compile(
+ "^(<ttFont | <(checkSumAdjustment|created|modified) ).*"
+ )
+
+ def read_ttx(self, path):
+ lines = []
+ with open(path, "r", encoding="utf-8") as ttx:
+ for line in ttx.readlines():
+ # Elide lines with data that often change.
+ if self.IGNORED_LINES_RE.match(line):
+ lines.append("\n")
+ else:
+ lines.append(line.rstrip() + "\n")
+ return lines
+
+ def expect_ttx(self, font, expected_ttx, tables=None):
+ path = self.temp_path(suffix=".ttx")
+ font.saveXML(path, tables=tables)
+ actual = self.read_ttx(path)
+ expected = self.read_ttx(expected_ttx)
+ if actual != expected:
+ for line in difflib.unified_diff(
+ expected, actual, fromfile=expected_ttx, tofile=path
+ ):
+ sys.stdout.write(line)
+ self.fail("TTX output is different from expected")
+
+ def compile_font(self, path, suffix):
+ savepath = self.temp_path(suffix=suffix)
+ font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
+ font.importXML(path)
+ font.save(savepath, reorderTables=None)
+ return font, savepath
+
+ # -----
+ # Tests
+ # -----
+
+ def test_merge_cff(self):
+ _, fontpath1 = self.compile_font(self.getpath("CFFFont1.ttx"), ".otf")
+ _, fontpath2 = self.compile_font(self.getpath("CFFFont2.ttx"), ".otf")
+ mergedpath = self.temp_path(".otf")
+ merge_main([fontpath1, fontpath2, "--output-file=%s" % mergedpath])
+ mergedfont = ttLib.TTFont(mergedpath)
+ self.expect_ttx(mergedfont, self.getpath("CFFFont_expected.ttx"))
class gaspMergeUnitTest(unittest.TestCase):
- def setUp(self):
- self.merger = Merger()
+ def setUp(self):
+ self.merger = Merger()
- self.table1 = ttLib.newTable('gasp')
- self.table1.version = 1
- self.table1.gaspRange = {
- 0x8: 0xA ,
- 0x10: 0x5,
- }
+ self.table1 = ttLib.newTable("gasp")
+ self.table1.version = 1
+ self.table1.gaspRange = {
+ 0x8: 0xA,
+ 0x10: 0x5,
+ }
- self.table2 = ttLib.newTable('gasp')
- self.table2.version = 1
- self.table2.gaspRange = {
- 0x6: 0xB ,
- 0xFF: 0x4,
- }
+ self.table2 = ttLib.newTable("gasp")
+ self.table2.version = 1
+ self.table2.gaspRange = {
+ 0x6: 0xB,
+ 0xFF: 0x4,
+ }
- self.result = ttLib.newTable('gasp')
+ self.result = ttLib.newTable("gasp")
- def test_gasp_merge_basic(self):
- result = self.result.merge(self.merger, [self.table1, self.table2])
- self.assertEqual(result, self.table1)
+ def test_gasp_merge_basic(self):
+ result = self.result.merge(self.merger, [self.table1, self.table2])
+ self.assertEqual(result, self.table1)
- result = self.result.merge(self.merger, [self.table2, self.table1])
- self.assertEqual(result, self.table2)
+ result = self.result.merge(self.merger, [self.table2, self.table1])
+ self.assertEqual(result, self.table2)
- def test_gasp_merge_notImplemented(self):
- result = self.result.merge(self.merger, [NotImplemented, self.table1])
- self.assertEqual(result, NotImplemented)
+ def test_gasp_merge_notImplemented(self):
+ result = self.result.merge(self.merger, [NotImplemented, self.table1])
+ self.assertEqual(result, NotImplemented)
- result = self.result.merge(self.merger, [self.table1, NotImplemented])
- self.assertEqual(result, self.table1)
+ result = self.result.merge(self.merger, [self.table1, NotImplemented])
+ self.assertEqual(result, self.table1)
class CmapMergeUnitTest(unittest.TestCase):
- def setUp(self):
- self.merger = Merger()
- self.table1 = ttLib.newTable('cmap')
- self.table2 = ttLib.newTable('cmap')
- self.mergedTable = ttLib.newTable('cmap')
- pass
-
- def tearDown(self):
- pass
-
-
- def makeSubtable(self, format, platformID, platEncID, cmap):
- module = ttLib.getTableModule('cmap')
- subtable = module.cmap_classes[format](format)
- (subtable.platformID,
- subtable.platEncID,
- subtable.language,
- subtable.cmap) = (platformID, platEncID, 0, cmap)
- return subtable
-
- # 4-3-1 table merged with 12-3-10 table with no dupes with codepoints outside BMP
- def test_cmap_merge_no_dupes(self):
- table1 = self.table1
- table2 = self.table2
- mergedTable = self.mergedTable
-
- cmap1 = {0x2603: 'SNOWMAN'}
- table1.tables = [self.makeSubtable(4,3,1, cmap1)]
-
- cmap2 = {0x26C4: 'SNOWMAN WITHOUT SNOW'}
- cmap2Extended = {0x1F93C: 'WRESTLERS'}
- cmap2Extended.update(cmap2)
- table2.tables = [self.makeSubtable(4,3,1, cmap2), self.makeSubtable(12,3,10, cmap2Extended)]
-
- self.merger.alternateGlyphsPerFont = [{},{}]
- mergedTable.merge(self.merger, [table1, table2])
-
- expectedCmap = cmap2.copy()
- expectedCmap.update(cmap1)
- expectedCmapExtended = cmap2Extended.copy()
- expectedCmapExtended.update(cmap1)
- self.assertEqual(mergedTable.numSubTables, 2)
- self.assertEqual([(table.format, table.platformID, table.platEncID, table.language) for table in mergedTable.tables],
- [(4,3,1,0),(12,3,10,0)])
- self.assertEqual(mergedTable.tables[0].cmap, expectedCmap)
- self.assertEqual(mergedTable.tables[1].cmap, expectedCmapExtended)
-
- # Tests Issue #322
- def test_cmap_merge_three_dupes(self):
- table1 = self.table1
- table2 = self.table2
- mergedTable = self.mergedTable
-
- cmap1 = {0x20: 'space#0', 0xA0: 'space#0'}
- table1.tables = [self.makeSubtable(4,3,1,cmap1)]
- cmap2 = {0x20: 'space#1', 0xA0: 'uni00A0#1'}
- table2.tables = [self.makeSubtable(4,3,1,cmap2)]
-
- self.merger.duplicateGlyphsPerFont = [{},{}]
- mergedTable.merge(self.merger, [table1, table2])
-
- expectedCmap = cmap1.copy()
- self.assertEqual(mergedTable.numSubTables, 1)
- table = mergedTable.tables[0]
- self.assertEqual((table.format, table.platformID, table.platEncID, table.language), (4,3,1,0))
- self.assertEqual(table.cmap, expectedCmap)
- self.assertEqual(self.merger.duplicateGlyphsPerFont, [{}, {'space#0': 'space#1'}])
+ def setUp(self):
+ self.merger = Merger()
+ self.table1 = ttLib.newTable("cmap")
+ self.table2 = ttLib.newTable("cmap")
+ self.mergedTable = ttLib.newTable("cmap")
+ pass
+
+ def tearDown(self):
+ pass
+
+ def makeSubtable(self, format, platformID, platEncID, cmap):
+ module = ttLib.getTableModule("cmap")
+ subtable = module.cmap_classes[format](format)
+ (subtable.platformID, subtable.platEncID, subtable.language, subtable.cmap) = (
+ platformID,
+ platEncID,
+ 0,
+ cmap,
+ )
+ return subtable
+
+ # 4-3-1 table merged with 12-3-10 table with no dupes with codepoints outside BMP
+ def test_cmap_merge_no_dupes(self):
+ table1 = self.table1
+ table2 = self.table2
+ mergedTable = self.mergedTable
+
+ cmap1 = {0x2603: "SNOWMAN"}
+ table1.tables = [self.makeSubtable(4, 3, 1, cmap1)]
+
+ cmap2 = {0x26C4: "SNOWMAN WITHOUT SNOW"}
+ cmap2Extended = {0x1F93C: "WRESTLERS"}
+ cmap2Extended.update(cmap2)
+ table2.tables = [
+ self.makeSubtable(4, 3, 1, cmap2),
+ self.makeSubtable(12, 3, 10, cmap2Extended),
+ ]
+
+ self.merger.alternateGlyphsPerFont = [{}, {}]
+ mergedTable.merge(self.merger, [table1, table2])
+
+ expectedCmap = cmap2.copy()
+ expectedCmap.update(cmap1)
+ expectedCmapExtended = cmap2Extended.copy()
+ expectedCmapExtended.update(cmap1)
+ self.assertEqual(mergedTable.numSubTables, 2)
+ self.assertEqual(
+ [
+ (table.format, table.platformID, table.platEncID, table.language)
+ for table in mergedTable.tables
+ ],
+ [(4, 3, 1, 0), (12, 3, 10, 0)],
+ )
+ self.assertEqual(mergedTable.tables[0].cmap, expectedCmap)
+ self.assertEqual(mergedTable.tables[1].cmap, expectedCmapExtended)
+
+ # Tests Issue #322
+ def test_cmap_merge_three_dupes(self):
+ table1 = self.table1
+ table2 = self.table2
+ mergedTable = self.mergedTable
+
+ cmap1 = {0x20: "space#0", 0xA0: "space#0"}
+ table1.tables = [self.makeSubtable(4, 3, 1, cmap1)]
+ cmap2 = {0x20: "space#1", 0xA0: "uni00A0#1"}
+ table2.tables = [self.makeSubtable(4, 3, 1, cmap2)]
+
+ self.merger.duplicateGlyphsPerFont = [{}, {}]
+ mergedTable.merge(self.merger, [table1, table2])
+
+ expectedCmap = cmap1.copy()
+ self.assertEqual(mergedTable.numSubTables, 1)
+ table = mergedTable.tables[0]
+ self.assertEqual(
+ (table.format, table.platformID, table.platEncID, table.language),
+ (4, 3, 1, 0),
+ )
+ self.assertEqual(table.cmap, expectedCmap)
+ self.assertEqual(
+ self.merger.duplicateGlyphsPerFont, [{}, {"space#0": "space#1"}]
+ )
def _compile(ttFont):
- buf = io.BytesIO()
- ttFont.save(buf)
- buf.seek(0)
- return buf
+ buf = io.BytesIO()
+ ttFont.save(buf)
+ buf.seek(0)
+ return buf
def _make_fontfile_with_OS2(*, version, **kwargs):
- upem = 1000
- glyphOrder = [".notdef", "a"]
- cmap = {0x61: "a"}
- glyphs = {gn: Glyph() for gn in glyphOrder}
- hmtx = {gn: (500, 0) for gn in glyphOrder}
- names = {"familyName": "TestOS2", "styleName": "Regular"}
+ upem = 1000
+ glyphOrder = [".notdef", "a"]
+ cmap = {0x61: "a"}
+ glyphs = {gn: Glyph() for gn in glyphOrder}
+ hmtx = {gn: (500, 0) for gn in glyphOrder}
+ names = {"familyName": "TestOS2", "styleName": "Regular"}
- fb = FontBuilder(unitsPerEm=upem)
- fb.setupGlyphOrder(glyphOrder)
- fb.setupCharacterMap(cmap)
- fb.setupGlyf(glyphs)
- fb.setupHorizontalMetrics(hmtx)
- fb.setupHorizontalHeader()
- fb.setupNameTable(names)
- fb.setupOS2(version=version, **kwargs)
+ fb = FontBuilder(unitsPerEm=upem)
+ fb.setupGlyphOrder(glyphOrder)
+ fb.setupCharacterMap(cmap)
+ fb.setupGlyf(glyphs)
+ fb.setupHorizontalMetrics(hmtx)
+ fb.setupHorizontalHeader()
+ fb.setupNameTable(names)
+ fb.setupOS2(version=version, **kwargs)
- return _compile(fb.font)
+ return _compile(fb.font)
def _merge_and_recompile(fontfiles, options=None):
- merger = Merger(options)
- merged = merger.merge(fontfiles)
- buf = _compile(merged)
- return ttLib.TTFont(buf)
+ merger = Merger(options)
+ merged = merger.merge(fontfiles)
+ buf = _compile(merged)
+ return ttLib.TTFont(buf)
-@pytest.mark.parametrize(
- "v1, v2", list(itertools.permutations(range(5+1), 2))
-)
+@pytest.mark.parametrize("v1, v2", list(itertools.permutations(range(5 + 1), 2)))
def test_merge_OS2_mixed_versions(v1, v2):
- # https://github.com/fonttools/fonttools/issues/1865
- fontfiles = [
- _make_fontfile_with_OS2(version=v1),
- _make_fontfile_with_OS2(version=v2),
- ]
- merged = _merge_and_recompile(fontfiles)
- assert merged["OS/2"].version == max(v1, v2)
+ # https://github.com/fonttools/fonttools/issues/1865
+ fontfiles = [
+ _make_fontfile_with_OS2(version=v1),
+ _make_fontfile_with_OS2(version=v2),
+ ]
+ merged = _merge_and_recompile(fontfiles)
+ assert merged["OS/2"].version == max(v1, v2)
if __name__ == "__main__":
- import sys
- sys.exit(unittest.main())
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/misc/arrayTools_test.py b/Tests/misc/arrayTools_test.py
index 45b186fe..c8de7bda 100644
--- a/Tests/misc/arrayTools_test.py
+++ b/Tests/misc/arrayTools_test.py
@@ -1,24 +1,38 @@
from fontTools.misc.arrayTools import (
- calcBounds, calcIntBounds, updateBounds, pointInRect, pointsInRect,
- vectorLength, asInt16, normRect, scaleRect, offsetRect, insetRect,
- sectRect, unionRect, rectCenter, intRect)
+ calcBounds,
+ calcIntBounds,
+ updateBounds,
+ pointInRect,
+ pointsInRect,
+ vectorLength,
+ asInt16,
+ normRect,
+ scaleRect,
+ offsetRect,
+ insetRect,
+ sectRect,
+ unionRect,
+ rectCenter,
+ intRect,
+)
import math
def test_calcBounds():
assert calcBounds([]) == (0, 0, 0, 0)
- assert calcBounds(
- [(0, 40), (0, 100), (50, 50), (80, 10)]) == (0, 10, 80, 100)
+ assert calcBounds([(0, 40), (0, 100), (50, 50), (80, 10)]) == (0, 10, 80, 100)
def test_calcIntBounds():
- assert calcIntBounds(
- [(0.1, 40.1), (0.1, 100.1), (49.9, 49.9), (78.5, 9.5)]
- ) == (0, 10, 79, 100)
+ assert calcIntBounds([(0.1, 40.1), (0.1, 100.1), (49.9, 49.9), (78.5, 9.5)]) == (
+ 0,
+ 10,
+ 79,
+ 100,
+ )
assert calcIntBounds(
- [(0.1, 40.1), (0.1, 100.1), (49.9, 49.9), (78.5, 9.5)],
- round=round
+ [(0.1, 40.1), (0.1, 100.1), (49.9, 49.9), (78.5, 9.5)], round=round
) == (0, 10, 78, 100)
@@ -36,8 +50,8 @@ def test_pointInRect():
def test_pointsInRect():
assert pointsInRect([], (0, 0, 100, 100)) == []
assert pointsInRect(
- [(50, 50), (0, 0), (100, 100), (101, 100)],
- (0, 0, 100, 100)) == [True, True, True, False]
+ [(50, 50), (0, 0), (100, 100), (101, 100)], (0, 0, 100, 100)
+ ) == [True, True, True, False]
def test_vectorLength():
diff --git a/Tests/misc/bezierTools_test.py b/Tests/misc/bezierTools_test.py
index da73375d..8a3e2ecd 100644
--- a/Tests/misc/bezierTools_test.py
+++ b/Tests/misc/bezierTools_test.py
@@ -1,50 +1,67 @@
import fontTools.misc.bezierTools as bezierTools
from fontTools.misc.bezierTools import (
- calcQuadraticBounds, calcCubicBounds, curveLineIntersections,
- segmentPointAtT, splitLine, splitQuadratic, splitCubic, splitQuadraticAtT,
- splitCubicAtT, solveCubic)
+ calcQuadraticBounds,
+ calcQuadraticArcLength,
+ calcCubicBounds,
+ curveLineIntersections,
+ segmentPointAtT,
+ splitLine,
+ splitQuadratic,
+ splitCubic,
+ splitQuadraticAtT,
+ splitCubicAtT,
+ solveCubic,
+)
import pytest
def test_calcQuadraticBounds():
- assert calcQuadraticBounds(
- (0, 0), (50, 100), (100, 0)) == (0, 0, 100, 50.0)
- assert calcQuadraticBounds(
- (0, 0), (100, 0), (100, 100)) == (0.0, 0.0, 100, 100)
+ assert calcQuadraticBounds((0, 0), (50, 100), (100, 0)) == (0, 0, 100, 50.0)
+ assert calcQuadraticBounds((0, 0), (100, 0), (100, 100)) == (0.0, 0.0, 100, 100)
def test_calcCubicBounds():
- assert calcCubicBounds(
- (0, 0), (25, 100), (75, 100), (100, 0)) == ((0, 0, 100, 75.0))
- assert calcCubicBounds(
- (0, 0), (50, 0), (100, 50), (100, 100)) == (0.0, 0.0, 100, 100)
- assert calcCubicBounds(
- (50, 0), (0, 100), (100, 100), (50, 0)
- ) == pytest.approx((35.566243, 0.000000, 64.433757, 75.000000))
+ assert calcCubicBounds((0, 0), (25, 100), (75, 100), (100, 0)) == (
+ (0, 0, 100, 75.0)
+ )
+ assert calcCubicBounds((0, 0), (50, 0), (100, 50), (100, 100)) == (
+ 0.0,
+ 0.0,
+ 100,
+ 100,
+ )
+ assert calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0)) == pytest.approx(
+ (35.566243, 0.000000, 64.433757, 75.000000)
+ )
def test_splitLine():
- assert splitLine(
- (0, 0), (100, 100), where=50, isHorizontal=True
- ) == [((0, 0), (50.0, 50.0)), ((50.0, 50.0), (100, 100))]
- assert splitLine(
- (0, 0), (100, 100), where=100, isHorizontal=True
- ) == [((0, 0), (100, 100))]
- assert splitLine(
- (0, 0), (100, 100), where=0, isHorizontal=True
- ) == [((0, 0), (0, 0)), ((0, 0), (100, 100))]
- assert splitLine(
- (0, 0), (100, 100), where=0, isHorizontal=False
- ) == [((0, 0), (0, 0)), ((0, 0), (100, 100))]
- assert splitLine(
- (100, 0), (0, 0), where=50, isHorizontal=False
- ) == [((100, 0), (50, 0)), ((50, 0), (0, 0))]
- assert splitLine(
- (0, 100), (0, 0), where=50, isHorizontal=True
- ) == [((0, 100), (0, 50)), ((0, 50), (0, 0))]
- assert splitLine(
- (0, 100), (100, 100), where=50, isHorizontal=True
- ) == [((0, 100), (100, 100))]
+ assert splitLine((0, 0), (100, 100), where=50, isHorizontal=True) == [
+ ((0, 0), (50.0, 50.0)),
+ ((50.0, 50.0), (100, 100)),
+ ]
+ assert splitLine((0, 0), (100, 100), where=100, isHorizontal=True) == [
+ ((0, 0), (100, 100))
+ ]
+ assert splitLine((0, 0), (100, 100), where=0, isHorizontal=True) == [
+ ((0, 0), (0, 0)),
+ ((0, 0), (100, 100)),
+ ]
+ assert splitLine((0, 0), (100, 100), where=0, isHorizontal=False) == [
+ ((0, 0), (0, 0)),
+ ((0, 0), (100, 100)),
+ ]
+ assert splitLine((100, 0), (0, 0), where=50, isHorizontal=False) == [
+ ((100, 0), (50, 0)),
+ ((50, 0), (0, 0)),
+ ]
+ assert splitLine((0, 100), (0, 0), where=50, isHorizontal=True) == [
+ ((0, 100), (0, 50)),
+ ((0, 50), (0, 0)),
+ ]
+ assert splitLine((0, 100), (100, 100), where=50, isHorizontal=True) == [
+ ((0, 100), (100, 100))
+ ]
def assert_curves_approx_equal(actual_curves, expected_curves):
@@ -61,24 +78,24 @@ def test_splitQuadratic():
) == [((0, 0), (50, 100), (100, 0))]
assert splitQuadratic(
(0, 0), (50, 100), (100, 0), where=50, isHorizontal=False
- ) == [((0, 0), (25, 50), (50, 50)),
- ((50, 50), (75, 50), (100, 0))]
+ ) == [((0, 0), (25, 50), (50, 50)), ((50, 50), (75, 50), (100, 0))]
assert splitQuadratic(
(0, 0), (50, 100), (100, 0), where=25, isHorizontal=False
- ) == [((0, 0), (12.5, 25), (25, 37.5)),
- ((25, 37.5), (62.5, 75), (100, 0))]
+ ) == [((0, 0), (12.5, 25), (25, 37.5)), ((25, 37.5), (62.5, 75), (100, 0))]
assert_curves_approx_equal(
- splitQuadratic(
- (0, 0), (50, 100), (100, 0), where=25, isHorizontal=True),
- [((0, 0), (7.32233, 14.64466), (14.64466, 25)),
- ((14.64466, 25), (50, 75), (85.3553, 25)),
- ((85.3553, 25), (92.6777, 14.64466), (100, -7.10543e-15))])
+ splitQuadratic((0, 0), (50, 100), (100, 0), where=25, isHorizontal=True),
+ [
+ ((0, 0), (7.32233, 14.64466), (14.64466, 25)),
+ ((14.64466, 25), (50, 75), (85.3553, 25)),
+ ((85.3553, 25), (92.6777, 14.64466), (100, -7.10543e-15)),
+ ],
+ )
# XXX I'm not at all sure if the following behavior is desirable
- assert splitQuadratic(
- (0, 0), (50, 100), (100, 0), where=50, isHorizontal=True
- ) == [((0, 0), (25, 50), (50, 50)),
- ((50, 50), (50, 50), (50, 50)),
- ((50, 50), (75, 50), (100, 0))]
+ assert splitQuadratic((0, 0), (50, 100), (100, 0), where=50, isHorizontal=True) == [
+ ((0, 0), (25, 50), (50, 50)),
+ ((50, 50), (50, 50), (50, 50)),
+ ((50, 50), (75, 50), (100, 0)),
+ ]
def test_splitCubic():
@@ -87,41 +104,42 @@ def test_splitCubic():
) == [((0, 0), (25, 100), (75, 100), (100, 0))]
assert splitCubic(
(0, 0), (25, 100), (75, 100), (100, 0), where=50, isHorizontal=False
- ) == [((0, 0), (12.5, 50), (31.25, 75), (50, 75)),
- ((50, 75), (68.75, 75), (87.5, 50), (100, 0))]
+ ) == [
+ ((0, 0), (12.5, 50), (31.25, 75), (50, 75)),
+ ((50, 75), (68.75, 75), (87.5, 50), (100, 0)),
+ ]
assert_curves_approx_equal(
- splitCubic(
- (0, 0), (25, 100), (75, 100), (100, 0), where=25,
- isHorizontal=True),
- [((0, 0), (2.293792, 9.17517), (4.798045, 17.5085), (7.47414, 25)),
- ((7.47414, 25), (31.2886, 91.6667), (68.7114, 91.6667),
- (92.5259, 25)),
- ((92.5259, 25), (95.202, 17.5085), (97.7062, 9.17517),
- (100, 1.77636e-15))])
+ splitCubic((0, 0), (25, 100), (75, 100), (100, 0), where=25, isHorizontal=True),
+ [
+ ((0, 0), (2.293792, 9.17517), (4.798045, 17.5085), (7.47414, 25)),
+ ((7.47414, 25), (31.2886, 91.6667), (68.7114, 91.6667), (92.5259, 25)),
+ ((92.5259, 25), (95.202, 17.5085), (97.7062, 9.17517), (100, 1.77636e-15)),
+ ],
+ )
def test_splitQuadraticAtT():
- assert splitQuadraticAtT(
- (0, 0), (50, 100), (100, 0), 0.5
- ) == [((0, 0), (25, 50), (50, 50)),
- ((50, 50), (75, 50), (100, 0))]
- assert splitQuadraticAtT(
- (0, 0), (50, 100), (100, 0), 0.5, 0.75
- ) == [((0, 0), (25, 50), (50, 50)),
- ((50, 50), (62.5, 50), (75, 37.5)),
- ((75, 37.5), (87.5, 25), (100, 0))]
+ assert splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5) == [
+ ((0, 0), (25, 50), (50, 50)),
+ ((50, 50), (75, 50), (100, 0)),
+ ]
+ assert splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5, 0.75) == [
+ ((0, 0), (25, 50), (50, 50)),
+ ((50, 50), (62.5, 50), (75, 37.5)),
+ ((75, 37.5), (87.5, 25), (100, 0)),
+ ]
def test_splitCubicAtT():
- assert splitCubicAtT(
- (0, 0), (25, 100), (75, 100), (100, 0), 0.5
- ) == [((0, 0), (12.5, 50), (31.25, 75), (50, 75)),
- ((50, 75), (68.75, 75), (87.5, 50), (100, 0))]
- assert splitCubicAtT(
- (0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75
- ) == [((0, 0), (12.5, 50), (31.25, 75), (50, 75)),
- ((50, 75), (59.375, 75), (68.75, 68.75), (77.34375, 56.25)),
- ((77.34375, 56.25), (85.9375, 43.75), (93.75, 25), (100, 0))]
+ assert splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5) == [
+ ((0, 0), (12.5, 50), (31.25, 75), (50, 75)),
+ ((50, 75), (68.75, 75), (87.5, 50), (100, 0)),
+ ]
+ assert splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75) == [
+ ((0, 0), (12.5, 50), (31.25, 75), (50, 75)),
+ ((50, 75), (59.375, 75), (68.75, 68.75), (77.34375, 56.25)),
+ ((77.34375, 56.25), (85.9375, 43.75), (93.75, 25), (100, 0)),
+ ]
def test_solveCubic():
@@ -164,3 +182,10 @@ def test_intersections_straight_line():
e = (110, 0)
pt = (109.05194805194802, 0.0)
assert bezierTools._line_t_of_pt(s, e, pt) == pytest.approx(0.98958184)
+
+
+def test_calcQuadraticArcLength():
+ # https://github.com/fonttools/fonttools/issues/3287
+ assert calcQuadraticArcLength(
+ (210, 333), (289, 333), (326.5, 290.5)
+ ) == pytest.approx(127.9225)
diff --git a/Tests/misc/classifyTools_test.py b/Tests/misc/classifyTools_test.py
index 72a97523..8f2b9d61 100644
--- a/Tests/misc/classifyTools_test.py
+++ b/Tests/misc/classifyTools_test.py
@@ -6,23 +6,28 @@ def test_classify():
assert classify([[]]) == ([], {})
assert classify([[], []]) == ([], {})
assert classify([[1]]) == ([{1}], {1: {1}})
- assert classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}})
- assert classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
- assert classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
- assert classify([[1,2],[2,4]]) == (
- [{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}})
- assert classify([[1,2],[2,4,5]]) == (
- [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
- assert classify([[1,2],[2,4,5]], sort=False) == (
- [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
- assert classify([[1,2,9],[2,4,5]], sort=False) == (
+ assert classify([[1, 2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}})
+ assert classify([[1], [2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
+ assert classify([[1, 2], [2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
+ assert classify([[1, 2], [2, 4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}})
+ assert classify([[1, 2], [2, 4, 5]]) == (
+ [{4, 5}, {1}, {2}],
+ {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}},
+ )
+ assert classify([[1, 2], [2, 4, 5]], sort=False) == (
+ [{1}, {4, 5}, {2}],
+ {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}},
+ )
+ assert classify([[1, 2, 9], [2, 4, 5]], sort=False) == (
[{1, 9}, {4, 5}, {2}],
- {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5}, 9: {1, 9}})
- assert classify([[1,2,9,15],[2,4,5]], sort=False) == (
+ {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5}, 9: {1, 9}},
+ )
+ assert classify([[1, 2, 9, 15], [2, 4, 5]], sort=False) == (
[{1, 9, 15}, {4, 5}, {2}],
- {1: {1, 9, 15}, 2: {2}, 4: {4, 5}, 5: {4, 5}, 9: {1, 9, 15},
- 15: {1, 9, 15}})
- classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False)
+ {1: {1, 9, 15}, 2: {2}, 4: {4, 5}, 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}},
+ )
+ classes, mapping = classify([[1, 2, 9, 15], [2, 4, 5], [15, 5]], sort=False)
assert set([frozenset(c) for c in classes]) == set(
- [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})])
+ [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})]
+ )
assert mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}}
diff --git a/Tests/misc/eexec_test.py b/Tests/misc/eexec_test.py
index f72760a7..b02bbfe3 100644
--- a/Tests/misc/eexec_test.py
+++ b/Tests/misc/eexec_test.py
@@ -4,12 +4,12 @@ from fontTools.misc.eexec import decrypt, encrypt
def test_decrypt():
testStr = b"\0\0asdadads asds\265"
decryptedStr, R = decrypt(testStr, 12321)
- assert decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
+ assert decryptedStr == b"0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1"
assert R == 36142
def test_encrypt():
- testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
+ testStr = b"0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1"
encryptedStr, R = encrypt(testStr, 12321)
assert encryptedStr == b"\0\0asdadads asds\265"
assert R == 36142
diff --git a/Tests/misc/encodingTools_test.py b/Tests/misc/encodingTools_test.py
index 1a131f61..7c4e1435 100644
--- a/Tests/misc/encodingTools_test.py
+++ b/Tests/misc/encodingTools_test.py
@@ -1,30 +1,33 @@
import unittest
from fontTools.misc.encodingTools import getEncoding
-class EncodingTest(unittest.TestCase):
- def test_encoding_unicode(self):
+class EncodingTest(unittest.TestCase):
+ def test_encoding_unicode(self):
+ self.assertEqual(
+ getEncoding(3, 0, None), "utf_16_be"
+ ) # MS Symbol is Unicode as well
+ self.assertEqual(getEncoding(3, 1, None), "utf_16_be")
+ self.assertEqual(getEncoding(3, 10, None), "utf_16_be")
+ self.assertEqual(getEncoding(0, 3, None), "utf_16_be")
- self.assertEqual(getEncoding(3, 0, None), "utf_16_be") # MS Symbol is Unicode as well
- self.assertEqual(getEncoding(3, 1, None), "utf_16_be")
- self.assertEqual(getEncoding(3, 10, None), "utf_16_be")
- self.assertEqual(getEncoding(0, 3, None), "utf_16_be")
+ def test_encoding_macroman_misc(self):
+ self.assertEqual(getEncoding(1, 0, 17), "mac_turkish")
+ self.assertEqual(getEncoding(1, 0, 37), "mac_romanian")
+ self.assertEqual(getEncoding(1, 0, 45), "mac_roman")
- def test_encoding_macroman_misc(self):
- self.assertEqual(getEncoding(1, 0, 17), "mac_turkish")
- self.assertEqual(getEncoding(1, 0, 37), "mac_romanian")
- self.assertEqual(getEncoding(1, 0, 45), "mac_roman")
+ def test_extended_mac_encodings(self):
+ encoding = getEncoding(1, 1, 0) # Mac Japanese
+ decoded = b"\xfe".decode(encoding)
+ self.assertEqual(decoded, chr(0x2122))
- def test_extended_mac_encodings(self):
- encoding = getEncoding(1, 1, 0) # Mac Japanese
- decoded = b'\xfe'.decode(encoding)
- self.assertEqual(decoded, chr(0x2122))
+ def test_extended_unknown(self):
+ self.assertEqual(getEncoding(10, 11, 12), None)
+ self.assertEqual(getEncoding(10, 11, 12, "ascii"), "ascii")
+ self.assertEqual(getEncoding(10, 11, 12, default="ascii"), "ascii")
- def test_extended_unknown(self):
- self.assertEqual(getEncoding(10, 11, 12), None)
- self.assertEqual(getEncoding(10, 11, 12, "ascii"), "ascii")
- self.assertEqual(getEncoding(10, 11, 12, default="ascii"), "ascii")
if __name__ == "__main__":
- import sys
- sys.exit(unittest.main())
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/misc/filenames_test.py b/Tests/misc/filenames_test.py
index bb7b63c2..f96156c0 100644
--- a/Tests/misc/filenames_test.py
+++ b/Tests/misc/filenames_test.py
@@ -1,136 +1,123 @@
import unittest
-from fontTools.misc.filenames import (
- userNameToFileName, handleClash1, handleClash2)
+from fontTools.misc.filenames import userNameToFileName, handleClash1, handleClash2
class UserNameToFilenameTest(unittest.TestCase):
+ def test_names(self):
+ self.assertEqual(userNameToFileName("a"), "a")
+ self.assertEqual(userNameToFileName("A"), "A_")
+ self.assertEqual(userNameToFileName("AE"), "A_E_")
+ self.assertEqual(userNameToFileName("Ae"), "A_e")
+ self.assertEqual(userNameToFileName("ae"), "ae")
+ self.assertEqual(userNameToFileName("aE"), "aE_")
+ self.assertEqual(userNameToFileName("a.alt"), "a.alt")
+ self.assertEqual(userNameToFileName("A.alt"), "A_.alt")
+ self.assertEqual(userNameToFileName("A.Alt"), "A_.A_lt")
+ self.assertEqual(userNameToFileName("A.aLt"), "A_.aL_t")
+ self.assertEqual(userNameToFileName("A.alT"), "A_.alT_")
+ self.assertEqual(userNameToFileName("T_H"), "T__H_")
+ self.assertEqual(userNameToFileName("T_h"), "T__h")
+ self.assertEqual(userNameToFileName("t_h"), "t_h")
+ self.assertEqual(userNameToFileName("F_F_I"), "F__F__I_")
+ self.assertEqual(userNameToFileName("f_f_i"), "f_f_i")
+ self.assertEqual(userNameToFileName("Aacute_V.swash"), "A_acute_V_.swash")
+ self.assertEqual(userNameToFileName(".notdef"), "_notdef")
+ self.assertEqual(userNameToFileName("con"), "_con")
+ self.assertEqual(userNameToFileName("CON"), "C_O_N_")
+ self.assertEqual(userNameToFileName("con.alt"), "_con.alt")
+ self.assertEqual(userNameToFileName("alt.con"), "alt._con")
+
+ def test_prefix_suffix(self):
+ prefix = "TEST_PREFIX"
+ suffix = "TEST_SUFFIX"
+ name = "NAME"
+ name_file = "N_A_M_E_"
+ self.assertEqual(
+ userNameToFileName(name, prefix=prefix, suffix=suffix),
+ prefix + name_file + suffix,
+ )
+
+ def test_collide(self):
+ prefix = "TEST_PREFIX"
+ suffix = "TEST_SUFFIX"
+ name = "NAME"
+ name_file = "N_A_M_E_"
+ collision_avoidance1 = "000000000000001"
+ collision_avoidance2 = "000000000000002"
+ exist = set()
+ generated = userNameToFileName(name, exist, prefix=prefix, suffix=suffix)
+ exist.add(generated.lower())
+ self.assertEqual(generated, prefix + name_file + suffix)
+ generated = userNameToFileName(name, exist, prefix=prefix, suffix=suffix)
+ exist.add(generated.lower())
+ self.assertEqual(generated, prefix + name_file + collision_avoidance1 + suffix)
+ generated = userNameToFileName(name, exist, prefix=prefix, suffix=suffix)
+ self.assertEqual(generated, prefix + name_file + collision_avoidance2 + suffix)
+
+ def test_ValueError(self):
+ with self.assertRaises(ValueError):
+ userNameToFileName(b"a")
+ with self.assertRaises(ValueError):
+ userNameToFileName({"a"})
+ with self.assertRaises(ValueError):
+ userNameToFileName(("a",))
+ with self.assertRaises(ValueError):
+ userNameToFileName(["a"])
+ with self.assertRaises(ValueError):
+ userNameToFileName(["a"])
+ with self.assertRaises(ValueError):
+ userNameToFileName(b"\xd8\x00")
+
+ def test_handleClash1(self):
+ prefix = ("0" * 5) + "."
+ suffix = "." + ("0" * 10)
+ existing = ["a" * 5]
+
+ e = list(existing)
+ self.assertEqual(
+ handleClash1(userName="A" * 5, existing=e, prefix=prefix, suffix=suffix),
+ "00000.AAAAA000000000000001.0000000000",
+ )
+
+ e = list(existing)
+ e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
+ self.assertEqual(
+ handleClash1(userName="A" * 5, existing=e, prefix=prefix, suffix=suffix),
+ "00000.AAAAA000000000000002.0000000000",
+ )
+
+ e = list(existing)
+ e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
+ self.assertEqual(
+ handleClash1(userName="A" * 5, existing=e, prefix=prefix, suffix=suffix),
+ "00000.AAAAA000000000000001.0000000000",
+ )
+
+ def test_handleClash2(self):
+ prefix = ("0" * 5) + "."
+ suffix = "." + ("0" * 10)
+ existing = [prefix + str(i) + suffix for i in range(100)]
+
+ e = list(existing)
+ self.assertEqual(
+ handleClash2(existing=e, prefix=prefix, suffix=suffix),
+ "00000.100.0000000000",
+ )
+
+ e = list(existing)
+ e.remove(prefix + "1" + suffix)
+ self.assertEqual(
+ handleClash2(existing=e, prefix=prefix, suffix=suffix), "00000.1.0000000000"
+ )
+
+ e = list(existing)
+ e.remove(prefix + "2" + suffix)
+ self.assertEqual(
+ handleClash2(existing=e, prefix=prefix, suffix=suffix), "00000.2.0000000000"
+ )
- def test_names(self):
- self.assertEqual(userNameToFileName("a"),"a")
- self.assertEqual(userNameToFileName("A"), "A_")
- self.assertEqual(userNameToFileName("AE"), "A_E_")
- self.assertEqual(userNameToFileName("Ae"), "A_e")
- self.assertEqual(userNameToFileName("ae"), "ae")
- self.assertEqual(userNameToFileName("aE"), "aE_")
- self.assertEqual(userNameToFileName("a.alt"), "a.alt")
- self.assertEqual(userNameToFileName("A.alt"), "A_.alt")
- self.assertEqual(userNameToFileName("A.Alt"), "A_.A_lt")
- self.assertEqual(userNameToFileName("A.aLt"), "A_.aL_t")
- self.assertEqual(userNameToFileName(u"A.alT"), "A_.alT_")
- self.assertEqual(userNameToFileName("T_H"), "T__H_")
- self.assertEqual(userNameToFileName("T_h"), "T__h")
- self.assertEqual(userNameToFileName("t_h"), "t_h")
- self.assertEqual(userNameToFileName("F_F_I"), "F__F__I_")
- self.assertEqual(userNameToFileName("f_f_i"), "f_f_i")
- self.assertEqual(
- userNameToFileName("Aacute_V.swash"),
- "A_acute_V_.swash")
- self.assertEqual(userNameToFileName(".notdef"), "_notdef")
- self.assertEqual(userNameToFileName("con"), "_con")
- self.assertEqual(userNameToFileName("CON"), "C_O_N_")
- self.assertEqual(userNameToFileName("con.alt"), "_con.alt")
- self.assertEqual(userNameToFileName("alt.con"), "alt._con")
-
- def test_prefix_suffix(self):
- prefix = "TEST_PREFIX"
- suffix = "TEST_SUFFIX"
- name = "NAME"
- name_file = "N_A_M_E_"
- self.assertEqual(
- userNameToFileName(name, prefix=prefix, suffix=suffix),
- prefix + name_file + suffix)
-
- def test_collide(self):
- prefix = "TEST_PREFIX"
- suffix = "TEST_SUFFIX"
- name = "NAME"
- name_file = "N_A_M_E_"
- collision_avoidance1 = "000000000000001"
- collision_avoidance2 = "000000000000002"
- exist = set()
- generated = userNameToFileName(
- name, exist, prefix=prefix, suffix=suffix)
- exist.add(generated.lower())
- self.assertEqual(generated, prefix + name_file + suffix)
- generated = userNameToFileName(
- name, exist, prefix=prefix, suffix=suffix)
- exist.add(generated.lower())
- self.assertEqual(
- generated,
- prefix + name_file + collision_avoidance1 + suffix)
- generated = userNameToFileName(
- name, exist, prefix=prefix, suffix=suffix)
- self.assertEqual(
- generated,
- prefix + name_file + collision_avoidance2+ suffix)
-
- def test_ValueError(self):
- with self.assertRaises(ValueError):
- userNameToFileName(b"a")
- with self.assertRaises(ValueError):
- userNameToFileName({"a"})
- with self.assertRaises(ValueError):
- userNameToFileName(("a",))
- with self.assertRaises(ValueError):
- userNameToFileName(["a"])
- with self.assertRaises(ValueError):
- userNameToFileName(["a"])
- with self.assertRaises(ValueError):
- userNameToFileName(b"\xd8\x00")
-
- def test_handleClash1(self):
- prefix = ("0" * 5) + "."
- suffix = "." + ("0" * 10)
- existing = ["a" * 5]
-
- e = list(existing)
- self.assertEqual(
- handleClash1(userName="A" * 5, existing=e, prefix=prefix,
- suffix=suffix),
- '00000.AAAAA000000000000001.0000000000'
- )
-
- e = list(existing)
- e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
- self.assertEqual(
- handleClash1(userName="A" * 5, existing=e, prefix=prefix,
- suffix=suffix),
- '00000.AAAAA000000000000002.0000000000'
- )
-
- e = list(existing)
- e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
- self.assertEqual(
- handleClash1(userName="A" * 5, existing=e, prefix=prefix,
- suffix=suffix),
- '00000.AAAAA000000000000001.0000000000'
- )
-
- def test_handleClash2(self):
- prefix = ("0" * 5) + "."
- suffix = "." + ("0" * 10)
- existing = [prefix + str(i) + suffix for i in range(100)]
-
- e = list(existing)
- self.assertEqual(
- handleClash2(existing=e, prefix=prefix, suffix=suffix),
- '00000.100.0000000000'
- )
-
- e = list(existing)
- e.remove(prefix + "1" + suffix)
- self.assertEqual(
- handleClash2(existing=e, prefix=prefix, suffix=suffix),
- '00000.1.0000000000'
- )
-
- e = list(existing)
- e.remove(prefix + "2" + suffix)
- self.assertEqual(
- handleClash2(existing=e, prefix=prefix, suffix=suffix),
- '00000.2.0000000000'
- )
if __name__ == "__main__":
- import sys
- sys.exit(unittest.main())
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/misc/fixedTools_test.py b/Tests/misc/fixedTools_test.py
index dea61b90..3cabf3ab 100644
--- a/Tests/misc/fixedTools_test.py
+++ b/Tests/misc/fixedTools_test.py
@@ -10,10 +10,9 @@ import unittest
class FixedToolsTest(unittest.TestCase):
-
def test_roundtrip(self):
for bits in range(0, 15):
- for value in range(-(2**(bits+1)), 2**(bits+1)):
+ for value in range(-(2 ** (bits + 1)), 2 ** (bits + 1)):
self.assertEqual(value, floatToFixed(fixedToFloat(value, bits), bits))
def test_fixedToFloat_precision14(self):
@@ -31,18 +30,18 @@ class FixedToolsTest(unittest.TestCase):
self.assertAlmostEqual(10.0, fixedToFloat(640, 6))
def test_fixedToStr_precision14(self):
- self.assertEqual('0.8', fixedToStr(13107, 14))
- self.assertEqual('0.0', fixedToStr(0, 14))
- self.assertEqual('1.0', fixedToStr(16384, 14))
- self.assertEqual('-1.0', fixedToStr(-16384, 14))
- self.assertEqual('0.99994', fixedToStr(16383, 14))
- self.assertEqual('-0.99994', fixedToStr(-16383, 14))
+ self.assertEqual("0.8", fixedToStr(13107, 14))
+ self.assertEqual("0.0", fixedToStr(0, 14))
+ self.assertEqual("1.0", fixedToStr(16384, 14))
+ self.assertEqual("-1.0", fixedToStr(-16384, 14))
+ self.assertEqual("0.99994", fixedToStr(16383, 14))
+ self.assertEqual("-0.99994", fixedToStr(-16383, 14))
def test_fixedToStr_precision6(self):
- self.assertAlmostEqual('-9.98', fixedToStr(-639, 6))
- self.assertAlmostEqual('-10.0', fixedToStr(-640, 6))
- self.assertAlmostEqual('9.98', fixedToStr(639, 6))
- self.assertAlmostEqual('10.0', fixedToStr(640, 6))
+ self.assertAlmostEqual("-9.98", fixedToStr(-639, 6))
+ self.assertAlmostEqual("-10.0", fixedToStr(-640, 6))
+ self.assertAlmostEqual("9.98", fixedToStr(639, 6))
+ self.assertAlmostEqual("10.0", fixedToStr(640, 6))
def test_floatToFixed_precision14(self):
self.assertEqual(13107, floatToFixed(0.8, 14))
@@ -53,28 +52,28 @@ class FixedToolsTest(unittest.TestCase):
self.assertEqual(0, floatToFixed(0, 14))
def test_strToFixed_precision14(self):
- self.assertEqual(13107, strToFixed('0.8', 14))
- self.assertEqual(16384, strToFixed('1.0', 14))
- self.assertEqual(16384, strToFixed('1', 14))
- self.assertEqual(-16384, strToFixed('-1.0', 14))
- self.assertEqual(-16384, strToFixed('-1', 14))
- self.assertEqual(0, strToFixed('0', 14))
+ self.assertEqual(13107, strToFixed("0.8", 14))
+ self.assertEqual(16384, strToFixed("1.0", 14))
+ self.assertEqual(16384, strToFixed("1", 14))
+ self.assertEqual(-16384, strToFixed("-1.0", 14))
+ self.assertEqual(-16384, strToFixed("-1", 14))
+ self.assertEqual(0, strToFixed("0", 14))
def test_strToFixedToFloat_precision14(self):
- self.assertAlmostEqual(0.7999878, strToFixedToFloat('0.8', 14))
- self.assertEqual(0.0, strToFixedToFloat('0', 14))
- self.assertEqual(1.0, strToFixedToFloat('1.0', 14))
- self.assertEqual(-1.0, strToFixedToFloat('-1.0', 14))
- self.assertAlmostEqual(0.999939, strToFixedToFloat('0.99994', 14))
- self.assertAlmostEqual(-0.999939, strToFixedToFloat('-0.99994', 14))
+ self.assertAlmostEqual(0.7999878, strToFixedToFloat("0.8", 14))
+ self.assertEqual(0.0, strToFixedToFloat("0", 14))
+ self.assertEqual(1.0, strToFixedToFloat("1.0", 14))
+ self.assertEqual(-1.0, strToFixedToFloat("-1.0", 14))
+ self.assertAlmostEqual(0.999939, strToFixedToFloat("0.99994", 14))
+ self.assertAlmostEqual(-0.999939, strToFixedToFloat("-0.99994", 14))
def test_floatToFixedToStr_precision14(self):
- self.assertEqual('0.8', floatToFixedToStr(0.7999878, 14))
- self.assertEqual('1.0', floatToFixedToStr(1.0, 14))
- self.assertEqual('1.0', floatToFixedToStr(1, 14))
- self.assertEqual('-1.0', floatToFixedToStr(-1.0, 14))
- self.assertEqual('-1.0', floatToFixedToStr(-1, 14))
- self.assertEqual('0.0', floatToFixedToStr(0, 14))
+ self.assertEqual("0.8", floatToFixedToStr(0.7999878, 14))
+ self.assertEqual("1.0", floatToFixedToStr(1.0, 14))
+ self.assertEqual("1.0", floatToFixedToStr(1, 14))
+ self.assertEqual("-1.0", floatToFixedToStr(-1.0, 14))
+ self.assertEqual("-1.0", floatToFixedToStr(-1, 14))
+ self.assertEqual("0.0", floatToFixedToStr(0, 14))
def test_fixedToFloat_return_float(self):
value = fixedToFloat(16384, 14)
@@ -83,4 +82,5 @@ class FixedToolsTest(unittest.TestCase):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/misc/loggingTools_test.py b/Tests/misc/loggingTools_test.py
index fd13044c..feccd7b3 100644
--- a/Tests/misc/loggingTools_test.py
+++ b/Tests/misc/loggingTools_test.py
@@ -17,9 +17,10 @@ def logger_name_generator():
basename = "fontTools.test#"
num = 1
while True:
- yield basename+str(num)
+ yield basename + str(num)
num += 1
+
unique_logger_name = logger_name_generator()
@@ -35,10 +36,11 @@ def test_LevelFormatter():
handler = logging.StreamHandler(stream)
formatter = LevelFormatter(
fmt={
- '*': '[%(levelname)s] %(message)s',
- 'DEBUG': '%(name)s [%(levelname)s] %(message)s',
- 'INFO': '%(message)s',
- })
+ "*": "[%(levelname)s] %(message)s",
+ "DEBUG": "%(name)s [%(levelname)s] %(message)s",
+ "INFO": "%(message)s",
+ }
+ )
handler.setFormatter(formatter)
name = next(unique_logger_name)
log = logging.getLogger(name)
@@ -49,19 +51,21 @@ def test_LevelFormatter():
log.info("this also uses a custom format string")
log.warning("this one uses the default format string")
- assert stream.getvalue() == textwrap.dedent("""\
+ assert stream.getvalue() == textwrap.dedent(
+ """\
%s [DEBUG] this uses a custom format string
this also uses a custom format string
[WARNING] this one uses the default format string
- """ % name)
+ """
+ % name
+ )
class TimerTest(object):
-
def test_split(self):
timer = Timer()
time.sleep(0.01)
- fist_lap = timer.split()
+ fist_lap = timer.split()
assert timer.elapsed == fist_lap
time.sleep(0.1)
second_lap = timer.split()
@@ -80,12 +84,13 @@ class TimerTest(object):
assert t.elapsed > 0
def test_using_logger(self, logger):
- with Timer(logger, 'do something'):
+ with Timer(logger, "do something"):
time.sleep(0.01)
assert re.match(
r"Took [0-9]\.[0-9]{3}s to do something",
- logger.handlers[0].stream.getvalue())
+ logger.handlers[0].stream.getvalue(),
+ )
def test_using_logger_calling_instance(self, logger):
timer = Timer(logger)
@@ -93,16 +98,17 @@ class TimerTest(object):
time.sleep(0.01)
assert re.match(
- r"elapsed time: [0-9]\.[0-9]{3}s",
- logger.handlers[0].stream.getvalue())
+ r"elapsed time: [0-9]\.[0-9]{3}s", logger.handlers[0].stream.getvalue()
+ )
# do it again but with custom level
- with timer('redo it', level=logging.WARNING):
+ with timer("redo it", level=logging.WARNING):
time.sleep(0.02)
assert re.search(
r"WARNING: Took [0-9]\.[0-9]{3}s to redo it",
- logger.handlers[0].stream.getvalue())
+ logger.handlers[0].stream.getvalue(),
+ )
def test_function_decorator(self, logger):
timer = Timer(logger)
@@ -110,7 +116,8 @@ class TimerTest(object):
@timer()
def test1():
time.sleep(0.01)
- @timer('run test 2', level=logging.INFO)
+
+ @timer("run test 2", level=logging.INFO)
def test2():
time.sleep(0.02)
@@ -118,44 +125,44 @@ class TimerTest(object):
assert re.match(
r"Took [0-9]\.[0-9]{3}s to run 'test1'",
- logger.handlers[0].stream.getvalue())
+ logger.handlers[0].stream.getvalue(),
+ )
test2()
assert re.search(
- r"Took [0-9]\.[0-9]{3}s to run test 2",
- logger.handlers[0].stream.getvalue())
+ r"Took [0-9]\.[0-9]{3}s to run test 2", logger.handlers[0].stream.getvalue()
+ )
def test_ChannelsFilter(logger):
n = logger.name
- filtr = ChannelsFilter(n+".A.B", n+".C.D")
+ filtr = ChannelsFilter(n + ".A.B", n + ".C.D")
handler = logger.handlers[0]
handler.addFilter(filtr)
stream = handler.stream
- logging.getLogger(n+".A.B").debug('this record passes through')
- assert 'this record passes through' in stream.getvalue()
+ logging.getLogger(n + ".A.B").debug("this record passes through")
+ assert "this record passes through" in stream.getvalue()
- logging.getLogger(n+'.A.B.C').debug('records from children also pass')
- assert 'records from children also pass' in stream.getvalue()
+ logging.getLogger(n + ".A.B.C").debug("records from children also pass")
+ assert "records from children also pass" in stream.getvalue()
- logging.getLogger(n+'.C.D').debug('this one as well')
- assert 'this one as well' in stream.getvalue()
+ logging.getLogger(n + ".C.D").debug("this one as well")
+ assert "this one as well" in stream.getvalue()
- logging.getLogger(n+'.A.B.').debug('also this one')
- assert 'also this one' in stream.getvalue()
+ logging.getLogger(n + ".A.B.").debug("also this one")
+ assert "also this one" in stream.getvalue()
before = stream.getvalue()
- logging.getLogger(n+'.A.F').debug('but this one does not!')
+ logging.getLogger(n + ".A.F").debug("but this one does not!")
assert before == stream.getvalue()
- logging.getLogger(n+'.C.DE').debug('neither this one!')
+ logging.getLogger(n + ".C.DE").debug("neither this one!")
assert before == stream.getvalue()
def test_LogMixin():
-
class Base(object):
pass
@@ -168,8 +175,8 @@ def test_LogMixin():
a = A()
b = B()
- assert hasattr(a, 'log')
- assert hasattr(b, 'log')
+ assert hasattr(a, "log")
+ assert hasattr(b, "log")
assert isinstance(a.log, logging.Logger)
assert isinstance(b.log, logging.Logger)
assert a.log.name == "loggingTools_test.A"
diff --git a/Tests/misc/macRes_test.py b/Tests/misc/macRes_test.py
index a6a8e9d4..deac29bf 100644
--- a/Tests/misc/macRes_test.py
+++ b/Tests/misc/macRes_test.py
@@ -17,80 +17,78 @@ data 'test' (130, "name3") { $"486F 7720 6172 6520 796F 753F" }; /* How are you
# $ /usr/bin/Rez testdata.rez -o compiled
# $ hexdump -v compiled/..namedfork/rsrc
TEST_RSRC_FORK = deHexStr(
- "00 00 01 00 00 00 01 22 00 00 00 22 00 00 00 64 " # 0x00000000
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000010
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000020
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000030
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000040
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000050
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000060
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000070
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000080
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000090
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000A0
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000B0
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000C0
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000D0
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000E0
- "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000F0
- "00 00 00 05 48 65 6c 6c 6f 00 00 00 05 57 6f 72 " # 0x00000100
- "6c 64 00 00 00 0c 48 6f 77 20 61 72 65 20 79 6f " # 0x00000110
- "75 3f 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000120
- "00 00 00 00 00 00 00 00 00 00 00 1c 00 52 00 01 " # 0x00000130
- "54 45 53 54 00 01 00 12 74 65 73 74 00 00 00 2a " # 0x00000140
- "00 80 00 00 00 00 00 00 00 00 00 00 00 81 00 06 " # 0x00000150
- "00 00 00 09 00 00 00 00 00 82 00 0c 00 00 00 12 " # 0x00000160
- "00 00 00 00 05 6e 61 6d 65 31 05 6e 61 6d 65 32 " # 0x00000170
- "05 6e 61 6d 65 33 " # 0x00000180
+ "00 00 01 00 00 00 01 22 00 00 00 22 00 00 00 64 " # 0x00000000
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000010
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000020
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000030
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000040
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000050
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000060
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000070
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000080
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000090
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000A0
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000B0
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000C0
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000D0
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000E0
+ "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000F0
+ "00 00 00 05 48 65 6c 6c 6f 00 00 00 05 57 6f 72 " # 0x00000100
+ "6c 64 00 00 00 0c 48 6f 77 20 61 72 65 20 79 6f " # 0x00000110
+ "75 3f 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000120
+ "00 00 00 00 00 00 00 00 00 00 00 1c 00 52 00 01 " # 0x00000130
+ "54 45 53 54 00 01 00 12 74 65 73 74 00 00 00 2a " # 0x00000140
+ "00 80 00 00 00 00 00 00 00 00 00 00 00 81 00 06 " # 0x00000150
+ "00 00 00 09 00 00 00 00 00 82 00 0c 00 00 00 12 " # 0x00000160
+ "00 00 00 00 05 6e 61 6d 65 31 05 6e 61 6d 65 32 " # 0x00000170
+ "05 6e 61 6d 65 33 " # 0x00000180
)
class ResourceReaderTest(unittest.TestCase):
+ def test_read_file(self):
+ infile = BytesIO(TEST_RSRC_FORK)
+ reader = ResourceReader(infile)
+ resources = [res for typ in reader.keys() for res in reader[typ]]
+ self.assertExpected(resources)
- def test_read_file(self):
- infile = BytesIO(TEST_RSRC_FORK)
- reader = ResourceReader(infile)
- resources = [res for typ in reader.keys() for res in reader[typ]]
- self.assertExpected(resources)
+ def test_read_datafork(self):
+ with tempfile.NamedTemporaryFile(delete=False) as tmp:
+ tmp.write(TEST_RSRC_FORK)
+ try:
+ reader = ResourceReader(tmp.name)
+ resources = [res for typ in reader.keys() for res in reader[typ]]
+ reader.close()
+ self.assertExpected(resources)
+ finally:
+ os.remove(tmp.name)
- def test_read_datafork(self):
- with tempfile.NamedTemporaryFile(delete=False) as tmp:
- tmp.write(TEST_RSRC_FORK)
- try:
- reader = ResourceReader(tmp.name)
- resources = [res for typ in reader.keys() for res in reader[typ]]
- reader.close()
- self.assertExpected(resources)
- finally:
- os.remove(tmp.name)
+ def test_read_namedfork_rsrc(self):
+ if sys.platform != "darwin":
+ self.skipTest('Not supported on "%s"' % sys.platform)
+ tmp = tempfile.NamedTemporaryFile(delete=False)
+ tmp.close()
+ try:
+ with open(tmp.name + "/..namedfork/rsrc", "wb") as fork:
+ fork.write(TEST_RSRC_FORK)
+ reader = ResourceReader(tmp.name)
+ resources = [res for typ in reader.keys() for res in reader[typ]]
+ reader.close()
+ self.assertExpected(resources)
+ finally:
+ os.remove(tmp.name)
- def test_read_namedfork_rsrc(self):
- if sys.platform != 'darwin':
- self.skipTest('Not supported on "%s"' % sys.platform)
- tmp = tempfile.NamedTemporaryFile(delete=False)
- tmp.close()
- try:
- with open(tmp.name + '/..namedfork/rsrc', 'wb') as fork:
- fork.write(TEST_RSRC_FORK)
- reader = ResourceReader(tmp.name)
- resources = [res for typ in reader.keys() for res in reader[typ]]
- reader.close()
- self.assertExpected(resources)
- finally:
- os.remove(tmp.name)
+ def assertExpected(self, resources):
+ self.assertRezEqual(resources[0], "TEST", b"Hello", 128, "name1")
+ self.assertRezEqual(resources[1], "TEST", b"World", 129, "name2")
+ self.assertRezEqual(resources[2], "test", b"How are you?", 130, "name3")
- def assertExpected(self, resources):
- self.assertRezEqual(resources[0], 'TEST', b'Hello', 128, 'name1')
- self.assertRezEqual(resources[1], 'TEST', b'World', 129, 'name2')
- self.assertRezEqual(
- resources[2], 'test', b'How are you?', 130, 'name3')
+ def assertRezEqual(self, res, type_, data, id, name):
+ self.assertEqual(res.type, type_)
+ self.assertEqual(res.data, data)
+ self.assertEqual(res.id, id)
+ self.assertEqual(res.name, name)
- def assertRezEqual(self, res, type_, data, id, name):
- self.assertEqual(res.type, type_)
- self.assertEqual(res.data, data)
- self.assertEqual(res.id, id)
- self.assertEqual(res.name, name)
-
-if __name__ == '__main__':
- sys.exit(unittest.main())
+if __name__ == "__main__":
+ sys.exit(unittest.main())
diff --git a/Tests/misc/plistlib_test.py b/Tests/misc/plistlib_test.py
index 5659d690..057df64a 100644
--- a/Tests/misc/plistlib_test.py
+++ b/Tests/misc/plistlib_test.py
@@ -8,9 +8,6 @@ from numbers import Integral
from fontTools.misc import etree
from fontTools.misc import plistlib
from fontTools.misc.textTools import tostr
-from fontTools.ufoLib.plistlib import (
- readPlist, readPlistFromString, writePlist, writePlistToString,
-)
import pytest
from collections.abc import Mapping
@@ -30,8 +27,8 @@ def _test_pl(use_builtin_types):
aList=["A", "B", 12, 32.5, [1, 2, 3]],
aFloat=0.5,
anInt=728,
- aBigInt=2 ** 63 - 44,
- aBigInt2=2 ** 63 + 44,
+ aBigInt=2**63 - 44,
+ aBigInt2=2**63 + 44,
aNegativeInt=-5,
aNegativeBigInt=-80000000000,
aDict=dict(
@@ -112,16 +109,16 @@ def test_invalid_type():
"pl",
[
0,
- 2 ** 8 - 1,
- 2 ** 8,
- 2 ** 16 - 1,
- 2 ** 16,
- 2 ** 32 - 1,
- 2 ** 32,
- 2 ** 63 - 1,
- 2 ** 64 - 1,
+ 2**8 - 1,
+ 2**8,
+ 2**16 - 1,
+ 2**16,
+ 2**32 - 1,
+ 2**32,
+ 2**63 - 1,
+ 2**64 - 1,
1,
- -2 ** 63,
+ -(2**63),
],
)
def test_int(pl):
@@ -133,9 +130,7 @@ def test_int(pl):
assert data == data2
-@pytest.mark.parametrize(
- "pl", [2 ** 64 + 1, 2 ** 127 - 1, -2 ** 64, -2 ** 127]
-)
+@pytest.mark.parametrize("pl", [2**64 + 1, 2**127 - 1, -(2**64), -(2**127)])
def test_int_overflow(pl):
with pytest.raises(OverflowError):
plistlib.dumps(pl)
@@ -186,9 +181,7 @@ def test_indentation_array():
def test_indentation_dict():
- data = {
- "1": {"2": {"3": {"4": {"5": {"6": {"7": {"8": {"9": "aaaaaa"}}}}}}}}
- }
+ data = {"1": {"2": {"3": {"4": {"5": {"6": {"7": {"8": {"9": "aaaaaa"}}}}}}}}}
assert plistlib.loads(plistlib.dumps(data)) == data
@@ -226,9 +219,7 @@ def test_bytesio(parametrized_pl):
pl, use_builtin_types = parametrized_pl
b = BytesIO()
plistlib.dump(pl, b, use_builtin_types=use_builtin_types)
- pl2 = plistlib.load(
- BytesIO(b.getvalue()), use_builtin_types=use_builtin_types
- )
+ pl2 = plistlib.load(BytesIO(b.getvalue()), use_builtin_types=use_builtin_types)
assert pl == pl2
@@ -242,9 +233,7 @@ def test_keysort_bytesio(sort_keys):
b = BytesIO()
plistlib.dump(pl, b, sort_keys=sort_keys)
- pl2 = plistlib.load(
- BytesIO(b.getvalue()), dict_type=collections.OrderedDict
- )
+ pl2 = plistlib.load(BytesIO(b.getvalue()), dict_type=collections.OrderedDict)
assert dict(pl) == dict(pl2)
if sort_keys:
@@ -362,9 +351,7 @@ def test_invalidarray():
"<true/><key>key inside an array3</key>",
]:
with pytest.raises(ValueError):
- plistlib.loads(
- ("<plist><array>%s</array></plist>" % i).encode("utf-8")
- )
+ plistlib.loads(("<plist><array>%s</array></plist>" % i).encode("utf-8"))
def test_invaliddict():
@@ -447,9 +434,7 @@ def test_no_pretty_print(use_builtin_types):
use_builtin_types=use_builtin_types,
)
assert data == (
- plistlib.XML_DECLARATION
- + plistlib.PLIST_DOCTYPE
- + b'<plist version="1.0">'
+ plistlib.XML_DECLARATION + plistlib.PLIST_DOCTYPE + b'<plist version="1.0">'
b"<dict>"
b"<key>data</key>"
b"<data>aGVsbG8=</data>"
@@ -459,45 +444,51 @@ def test_no_pretty_print(use_builtin_types):
def test_readPlist_from_path(pl):
+ old_plistlib = pytest.importorskip("fontTools.ufoLib.plistlib")
path = os.path.join(datadir, "test.plist")
- pl2 = readPlist(path)
+ pl2 = old_plistlib.readPlist(path)
assert isinstance(pl2["someData"], plistlib.Data)
assert pl2 == pl
def test_readPlist_from_file(pl):
+ old_plistlib = pytest.importorskip("fontTools.ufoLib.plistlib")
with open(os.path.join(datadir, "test.plist"), "rb") as f:
- pl2 = readPlist(f)
+ pl2 = old_plistlib.readPlist(f)
assert isinstance(pl2["someData"], plistlib.Data)
assert pl2 == pl
assert not f.closed
def test_readPlistFromString(pl):
- pl2 = readPlistFromString(TESTDATA)
+ old_plistlib = pytest.importorskip("fontTools.ufoLib.plistlib")
+ pl2 = old_plistlib.readPlistFromString(TESTDATA)
assert isinstance(pl2["someData"], plistlib.Data)
assert pl2 == pl
def test_writePlist_to_path(tmpdir, pl_no_builtin_types):
+ old_plistlib = pytest.importorskip("fontTools.ufoLib.plistlib")
testpath = tmpdir / "test.plist"
- writePlist(pl_no_builtin_types, str(testpath))
+ old_plistlib.writePlist(pl_no_builtin_types, str(testpath))
with testpath.open("rb") as fp:
pl2 = plistlib.load(fp, use_builtin_types=False)
assert pl2 == pl_no_builtin_types
def test_writePlist_to_file(tmpdir, pl_no_builtin_types):
+ old_plistlib = pytest.importorskip("fontTools.ufoLib.plistlib")
testpath = tmpdir / "test.plist"
with testpath.open("wb") as fp:
- writePlist(pl_no_builtin_types, fp)
+ old_plistlib.writePlist(pl_no_builtin_types, fp)
with testpath.open("rb") as fp:
pl2 = plistlib.load(fp, use_builtin_types=False)
assert pl2 == pl_no_builtin_types
def test_writePlistToString(pl_no_builtin_types):
- data = writePlistToString(pl_no_builtin_types)
+ old_plistlib = pytest.importorskip("fontTools.ufoLib.plistlib")
+ data = old_plistlib.writePlistToString(pl_no_builtin_types)
pl2 = plistlib.loads(data)
assert pl2 == pl_no_builtin_types
diff --git a/Tests/misc/psCharStrings_test.py b/Tests/misc/psCharStrings_test.py
index 5e36fe73..5eb2f774 100644
--- a/Tests/misc/psCharStrings_test.py
+++ b/Tests/misc/psCharStrings_test.py
@@ -13,11 +13,10 @@ import unittest
def hexenc(s):
- return ' '.join('%02x' % x for x in s)
+ return " ".join("%02x" % x for x in s)
class T2CharStringTest(unittest.TestCase):
-
@classmethod
def stringToT2CharString(cls, string):
return T2CharString(program=stringToProgram(string), private=PrivateDict())
@@ -28,50 +27,69 @@ class T2CharStringTest(unittest.TestCase):
self.assertEqual(bounds, None)
def test_calcBounds_line(self):
- cs = self.stringToT2CharString("100 100 rmoveto 40 10 rlineto -20 50 rlineto endchar")
+ cs = self.stringToT2CharString(
+ "100 100 rmoveto 40 10 rlineto -20 50 rlineto endchar"
+ )
bounds = cs.calcBounds(None)
self.assertEqual(bounds, (100, 100, 140, 160))
def test_calcBounds_curve(self):
- cs = self.stringToT2CharString("100 100 rmoveto -50 -150 200 0 -50 150 rrcurveto endchar")
+ cs = self.stringToT2CharString(
+ "100 100 rmoveto -50 -150 200 0 -50 150 rrcurveto endchar"
+ )
bounds = cs.calcBounds(None)
self.assertEqual(bounds, (91.90524980688875, -12.5, 208.09475019311125, 100))
def test_charstring_bytecode_optimization(self):
cs = self.stringToT2CharString(
- "100.0 100 rmoveto -50.0 -150 200.5 0.0 -50 150 rrcurveto endchar")
+ "100.0 100 rmoveto -50.0 -150 200.5 0.0 -50 150 rrcurveto endchar"
+ )
cs.isCFF2 = False
cs.private._isCFF2 = False
cs.compile()
cs.decompile()
self.assertEqual(
- cs.program, [100, 100, 'rmoveto', -50, -150, 200.5, 0, -50, 150,
- 'rrcurveto', 'endchar'])
+ cs.program,
+ [
+ 100,
+ 100,
+ "rmoveto",
+ -50,
+ -150,
+ 200.5,
+ 0,
+ -50,
+ 150,
+ "rrcurveto",
+ "endchar",
+ ],
+ )
cs2 = self.stringToT2CharString(
- "100.0 rmoveto -50.0 -150 200.5 0.0 -50 150 rrcurveto")
+ "100.0 rmoveto -50.0 -150 200.5 0.0 -50 150 rrcurveto"
+ )
cs2.isCFF2 = True
cs2.private._isCFF2 = True
cs2.compile(isCFF2=True)
cs2.decompile()
self.assertEqual(
- cs2.program, [100, 'rmoveto', -50, -150, 200.5, 0, -50, 150,
- 'rrcurveto'])
+ cs2.program, [100, "rmoveto", -50, -150, 200.5, 0, -50, 150, "rrcurveto"]
+ )
def test_encodeFloat(self):
testNums = [
# value expected result
- (-9.399999999999999, '1e e9 a4 ff'), # -9.4
- (9.399999999999999999, '1e 9a 4f'), # 9.4
- (456.8, '1e 45 6a 8f'), # 456.8
- (0.0, '1e 0f'), # 0
- (-0.0, '1e 0f'), # 0
- (1.0, '1e 1f'), # 1
- (-1.0, '1e e1 ff'), # -1
- (98765.37e2, '1e 98 76 53 7f'), # 9876537
- (1234567890.0, '1e 1a 23 45 67 9b 09 ff'), # 1234567890
- (9.876537e-4, '1e a0 00 98 76 53 7f'), # 9.876537e-24
- (9.876537e+4, '1e 98 76 5a 37 ff'), # 9.876537e+24
+ (-9.399999999999999, "1e e9 a4 ff"), # -9.4
+ (9.399999999999999999, "1e 9a 4f"), # 9.4
+ (456.8, "1e 45 6a 8f"), # 456.8
+ (0.0, "1e 0f"), # 0
+ (-0.0, "1e 0f"), # 0
+ (1.0, "1e 1f"), # 1
+ (-1.0, "1e e1 ff"), # -1
+ (98765.37e2, "1e 98 76 53 7f"), # 9876537
+ (1234567890.0, "1e 1a 23 45 67 9b 09 ff"), # 1234567890
+ (9.876537e-4, "1e a0 00 98 76 53 7f"), # 9.876537e-24
+ (9.876537e4, "1e 98 76 5a 37 ff"), # 9.876537e+24
]
for sample in testNums:
@@ -87,22 +105,22 @@ class T2CharStringTest(unittest.TestCase):
encoded_result,
1,
)
- self.assertEqual(decoded_result[0], float('%.8g' % sample[0]))
+ self.assertEqual(decoded_result[0], float("%.8g" % sample[0]))
# We limit to 8 digits of precision to match the implementation
# of encodeFloat.
def test_encode_decode_fixed(self):
testNums = [
# value expected hex expected float
- (-9.399999999999999, 'ff ff f6 99 9a', -9.3999939),
- (-9.4, 'ff ff f6 99 9a', -9.3999939),
- (9.399999999999999999, 'ff 00 09 66 66', 9.3999939),
- (9.4, 'ff 00 09 66 66', 9.3999939),
- (456.8, 'ff 01 c8 cc cd', 456.8000031),
- (-456.8, 'ff fe 37 33 33', -456.8000031),
+ (-9.399999999999999, "ff ff f6 99 9a", -9.3999939),
+ (-9.4, "ff ff f6 99 9a", -9.3999939),
+ (9.399999999999999999, "ff 00 09 66 66", 9.3999939),
+ (9.4, "ff 00 09 66 66", 9.3999939),
+ (456.8, "ff 01 c8 cc cd", 456.8000031),
+ (-456.8, "ff fe 37 33 33", -456.8000031),
]
- for (value, expected_hex, expected_float) in testNums:
+ for value, expected_hex, expected_float in testNums:
encoded_result = encodeFixed(value)
# check to see if we got the expected bytes
@@ -119,11 +137,11 @@ class T2CharStringTest(unittest.TestCase):
def test_toXML(self):
program = [
- '107 53.4004 166.199 hstem',
- '174.6 163.801 vstem',
- '338.4 142.8 rmoveto',
- '28 0 21.9 9 15.8 18 15.8 18 7.9 20.79959 0 23.6 rrcurveto',
- 'endchar'
+ "107 53.4004 166.199 hstem",
+ "174.6 163.801 vstem",
+ "338.4 142.8 rmoveto",
+ "28 0 21.9 9 15.8 18 15.8 18 7.9 20.79959 0 23.6 rrcurveto",
+ "endchar",
]
cs = self.stringToT2CharString(" ".join(program))
@@ -133,21 +151,31 @@ class T2CharStringTest(unittest.TestCase):
cs = T2CharString()
for name, attrs, content in parseXML(
[
- '<CharString name="period">'
- ' 338.4 142.8 rmoveto',
- ' 28 0 21.9 9 15.8 18 15.8 18 7.9 20.79959 0 23.6 rrcurveto',
- ' endchar'
- '</CharString>'
+ '<CharString name="period">' " 338.4 142.8 rmoveto",
+ " 28 0 21.9 9 15.8 18 15.8 18 7.9 20.79959 0 23.6 rrcurveto",
+ " endchar" "</CharString>",
]
):
cs.fromXML(name, attrs, content)
expected_program = [
- 338.3999939, 142.8000031, 'rmoveto',
- 28, 0, 21.8999939, 9, 15.8000031,
- 18, 15.8000031, 18, 7.8999939,
- 20.7995911, 0, 23.6000061, 'rrcurveto',
- 'endchar'
+ 338.3999939,
+ 142.8000031,
+ "rmoveto",
+ 28,
+ 0,
+ 21.8999939,
+ 9,
+ 15.8000031,
+ 18,
+ 15.8000031,
+ 18,
+ 7.8999939,
+ 20.7995911,
+ 0,
+ 23.6000061,
+ "rrcurveto",
+ "endchar",
]
self.assertEqual(len(cs.program), len(expected_program))
@@ -162,12 +190,15 @@ class T2CharStringTest(unittest.TestCase):
def test_pen_closePath(self):
# Test CFF2/T2 charstring: it does NOT end in "endchar"
# https://github.com/fonttools/fonttools/issues/2455
- cs = self.stringToT2CharString("100 100 rmoveto -50 -150 200 0 -50 150 rrcurveto")
+ cs = self.stringToT2CharString(
+ "100 100 rmoveto -50 -150 200 0 -50 150 rrcurveto"
+ )
pen = RecordingPen()
cs.draw(pen)
- self.assertEqual(pen.value[-1], ('closePath', ()))
+ self.assertEqual(pen.value[-1], ("closePath", ()))
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/misc/py23_test.py b/Tests/misc/py23_test.py
index 61274cc2..30382455 100644
--- a/Tests/misc/py23_test.py
+++ b/Tests/misc/py23_test.py
@@ -9,7 +9,12 @@ import os
import unittest
from fontTools.misc.py23 import (
- round2, round3, isclose, redirect_stdout, redirect_stderr)
+ round2,
+ round3,
+ isclose,
+ redirect_stdout,
+ redirect_stderr,
+)
PIPE_SCRIPT = """\
@@ -21,377 +26,374 @@ binary_stdout.write(binary_stdin.read())
# the string contains a mix of line endings, plus the Win "EOF" charater (0x1A)
# 'hello\rworld\r\n\x1a\r\n'
-TEST_BIN_DATA = deHexStr(
- "68 65 6c 6c 6f 0d 77 6f 72 6c 64 0d 0a 1a 0d 0a"
-)
+TEST_BIN_DATA = deHexStr("68 65 6c 6c 6f 0d 77 6f 72 6c 64 0d 0a 1a 0d 0a")
-class OpenFuncWrapperTest(unittest.TestCase):
- @staticmethod
- def make_temp(data):
- with tempfile.NamedTemporaryFile(delete=False) as f:
- f.write(tobytes(data))
- return f.name
-
- def diff_piped(self, data, import_statement):
- script = self.make_temp("\n".join([import_statement, PIPE_SCRIPT]))
- datafile = self.make_temp(data)
- try:
- with open(datafile, 'rb') as infile, \
- tempfile.NamedTemporaryFile(delete=False) as outfile:
- env = dict(os.environ)
- env["PYTHONPATH"] = os.pathsep.join(sys.path)
- check_call(
- [sys.executable, script], stdin=infile, stdout=outfile,
- env=env)
- result = not filecmp.cmp(infile.name, outfile.name, shallow=False)
- finally:
- os.remove(script)
- os.remove(datafile)
- os.remove(outfile.name)
- return result
-
- def test_binary_pipe_py23_open_wrapper(self):
- if self.diff_piped(
- TEST_BIN_DATA, "from fontTools.misc.py23 import open"):
- self.fail("Input and output data differ!")
-
- def test_binary_pipe_built_in_io_open(self):
- if sys.version_info.major < 3 and sys.platform == 'win32':
- # On Windows Python 2.x, the piped input and output data are
- # expected to be different when using io.open, because of issue
- # https://bugs.python.org/issue10841.
- expected = True
- else:
- expected = False
- result = self.diff_piped(TEST_BIN_DATA, "from io import open")
- self.assertEqual(result, expected)
+class OpenFuncWrapperTest(unittest.TestCase):
+ @staticmethod
+ def make_temp(data):
+ with tempfile.NamedTemporaryFile(delete=False) as f:
+ f.write(tobytes(data))
+ return f.name
+
+ def diff_piped(self, data, import_statement):
+ script = self.make_temp("\n".join([import_statement, PIPE_SCRIPT]))
+ datafile = self.make_temp(data)
+ try:
+ with open(datafile, "rb") as infile, tempfile.NamedTemporaryFile(
+ delete=False
+ ) as outfile:
+ env = dict(os.environ)
+ env["PYTHONPATH"] = os.pathsep.join(sys.path)
+ check_call(
+ [sys.executable, script], stdin=infile, stdout=outfile, env=env
+ )
+ result = not filecmp.cmp(infile.name, outfile.name, shallow=False)
+ finally:
+ os.remove(script)
+ os.remove(datafile)
+ os.remove(outfile.name)
+ return result
+
+ def test_binary_pipe_py23_open_wrapper(self):
+ if self.diff_piped(TEST_BIN_DATA, "from fontTools.misc.py23 import open"):
+ self.fail("Input and output data differ!")
+
+ def test_binary_pipe_built_in_io_open(self):
+ if sys.version_info.major < 3 and sys.platform == "win32":
+ # On Windows Python 2.x, the piped input and output data are
+ # expected to be different when using io.open, because of issue
+ # https://bugs.python.org/issue10841.
+ expected = True
+ else:
+ expected = False
+ result = self.diff_piped(TEST_BIN_DATA, "from io import open")
+ self.assertEqual(result, expected)
class Round2Test(unittest.TestCase):
- """
- Test cases taken from cpython 2.7 test suite:
-
- https://github.com/python/cpython/blob/2.7/Lib/test/test_float.py#L748
-
- Excludes the test cases that are not supported when using the `decimal`
- module's `quantize` method.
- """
-
- def test_second_argument_type(self):
- # floats should be illegal
- self.assertRaises(TypeError, round2, 3.14159, 2.0)
-
- def test_halfway_cases(self):
- # Halfway cases need special attention, since the current
- # implementation has to deal with them specially. Note that
- # 2.x rounds halfway values up (i.e., away from zero) while
- # 3.x does round-half-to-even.
- self.assertAlmostEqual(round2(0.125, 2), 0.13)
- self.assertAlmostEqual(round2(0.375, 2), 0.38)
- self.assertAlmostEqual(round2(0.625, 2), 0.63)
- self.assertAlmostEqual(round2(0.875, 2), 0.88)
- self.assertAlmostEqual(round2(-0.125, 2), -0.13)
- self.assertAlmostEqual(round2(-0.375, 2), -0.38)
- self.assertAlmostEqual(round2(-0.625, 2), -0.63)
- self.assertAlmostEqual(round2(-0.875, 2), -0.88)
-
- self.assertAlmostEqual(round2(0.25, 1), 0.3)
- self.assertAlmostEqual(round2(0.75, 1), 0.8)
- self.assertAlmostEqual(round2(-0.25, 1), -0.3)
- self.assertAlmostEqual(round2(-0.75, 1), -0.8)
-
- self.assertEqual(round2(-6.5, 0), -7.0)
- self.assertEqual(round2(-5.5, 0), -6.0)
- self.assertEqual(round2(-1.5, 0), -2.0)
- self.assertEqual(round2(-0.5, 0), -1.0)
- self.assertEqual(round2(0.5, 0), 1.0)
- self.assertEqual(round2(1.5, 0), 2.0)
- self.assertEqual(round2(2.5, 0), 3.0)
- self.assertEqual(round2(3.5, 0), 4.0)
- self.assertEqual(round2(4.5, 0), 5.0)
- self.assertEqual(round2(5.5, 0), 6.0)
- self.assertEqual(round2(6.5, 0), 7.0)
-
- # same but without an explicit second argument; in 3.x these
- # will give integers
- self.assertEqual(round2(-6.5), -7.0)
- self.assertEqual(round2(-5.5), -6.0)
- self.assertEqual(round2(-1.5), -2.0)
- self.assertEqual(round2(-0.5), -1.0)
- self.assertEqual(round2(0.5), 1.0)
- self.assertEqual(round2(1.5), 2.0)
- self.assertEqual(round2(2.5), 3.0)
- self.assertEqual(round2(3.5), 4.0)
- self.assertEqual(round2(4.5), 5.0)
- self.assertEqual(round2(5.5), 6.0)
- self.assertEqual(round2(6.5), 7.0)
-
- self.assertEqual(round2(-25.0, -1), -30.0)
- self.assertEqual(round2(-15.0, -1), -20.0)
- self.assertEqual(round2(-5.0, -1), -10.0)
- self.assertEqual(round2(5.0, -1), 10.0)
- self.assertEqual(round2(15.0, -1), 20.0)
- self.assertEqual(round2(25.0, -1), 30.0)
- self.assertEqual(round2(35.0, -1), 40.0)
- self.assertEqual(round2(45.0, -1), 50.0)
- self.assertEqual(round2(55.0, -1), 60.0)
- self.assertEqual(round2(65.0, -1), 70.0)
- self.assertEqual(round2(75.0, -1), 80.0)
- self.assertEqual(round2(85.0, -1), 90.0)
- self.assertEqual(round2(95.0, -1), 100.0)
- self.assertEqual(round2(12325.0, -1), 12330.0)
- self.assertEqual(round2(0, -1), 0.0)
-
- self.assertEqual(round2(350.0, -2), 400.0)
- self.assertEqual(round2(450.0, -2), 500.0)
-
- self.assertAlmostEqual(round2(0.5e21, -21), 1e21)
- self.assertAlmostEqual(round2(1.5e21, -21), 2e21)
- self.assertAlmostEqual(round2(2.5e21, -21), 3e21)
- self.assertAlmostEqual(round2(5.5e21, -21), 6e21)
- self.assertAlmostEqual(round2(8.5e21, -21), 9e21)
-
- self.assertAlmostEqual(round2(-1.5e22, -22), -2e22)
- self.assertAlmostEqual(round2(-0.5e22, -22), -1e22)
- self.assertAlmostEqual(round2(0.5e22, -22), 1e22)
- self.assertAlmostEqual(round2(1.5e22, -22), 2e22)
+ """
+ Test cases taken from cpython 2.7 test suite:
+
+ https://github.com/python/cpython/blob/2.7/Lib/test/test_float.py#L748
+
+ Excludes the test cases that are not supported when using the `decimal`
+ module's `quantize` method.
+ """
+
+ def test_second_argument_type(self):
+ # floats should be illegal
+ self.assertRaises(TypeError, round2, 3.14159, 2.0)
+
+ def test_halfway_cases(self):
+ # Halfway cases need special attention, since the current
+ # implementation has to deal with them specially. Note that
+ # 2.x rounds halfway values up (i.e., away from zero) while
+ # 3.x does round-half-to-even.
+ self.assertAlmostEqual(round2(0.125, 2), 0.13)
+ self.assertAlmostEqual(round2(0.375, 2), 0.38)
+ self.assertAlmostEqual(round2(0.625, 2), 0.63)
+ self.assertAlmostEqual(round2(0.875, 2), 0.88)
+ self.assertAlmostEqual(round2(-0.125, 2), -0.13)
+ self.assertAlmostEqual(round2(-0.375, 2), -0.38)
+ self.assertAlmostEqual(round2(-0.625, 2), -0.63)
+ self.assertAlmostEqual(round2(-0.875, 2), -0.88)
+
+ self.assertAlmostEqual(round2(0.25, 1), 0.3)
+ self.assertAlmostEqual(round2(0.75, 1), 0.8)
+ self.assertAlmostEqual(round2(-0.25, 1), -0.3)
+ self.assertAlmostEqual(round2(-0.75, 1), -0.8)
+
+ self.assertEqual(round2(-6.5, 0), -7.0)
+ self.assertEqual(round2(-5.5, 0), -6.0)
+ self.assertEqual(round2(-1.5, 0), -2.0)
+ self.assertEqual(round2(-0.5, 0), -1.0)
+ self.assertEqual(round2(0.5, 0), 1.0)
+ self.assertEqual(round2(1.5, 0), 2.0)
+ self.assertEqual(round2(2.5, 0), 3.0)
+ self.assertEqual(round2(3.5, 0), 4.0)
+ self.assertEqual(round2(4.5, 0), 5.0)
+ self.assertEqual(round2(5.5, 0), 6.0)
+ self.assertEqual(round2(6.5, 0), 7.0)
+
+ # same but without an explicit second argument; in 3.x these
+ # will give integers
+ self.assertEqual(round2(-6.5), -7.0)
+ self.assertEqual(round2(-5.5), -6.0)
+ self.assertEqual(round2(-1.5), -2.0)
+ self.assertEqual(round2(-0.5), -1.0)
+ self.assertEqual(round2(0.5), 1.0)
+ self.assertEqual(round2(1.5), 2.0)
+ self.assertEqual(round2(2.5), 3.0)
+ self.assertEqual(round2(3.5), 4.0)
+ self.assertEqual(round2(4.5), 5.0)
+ self.assertEqual(round2(5.5), 6.0)
+ self.assertEqual(round2(6.5), 7.0)
+
+ self.assertEqual(round2(-25.0, -1), -30.0)
+ self.assertEqual(round2(-15.0, -1), -20.0)
+ self.assertEqual(round2(-5.0, -1), -10.0)
+ self.assertEqual(round2(5.0, -1), 10.0)
+ self.assertEqual(round2(15.0, -1), 20.0)
+ self.assertEqual(round2(25.0, -1), 30.0)
+ self.assertEqual(round2(35.0, -1), 40.0)
+ self.assertEqual(round2(45.0, -1), 50.0)
+ self.assertEqual(round2(55.0, -1), 60.0)
+ self.assertEqual(round2(65.0, -1), 70.0)
+ self.assertEqual(round2(75.0, -1), 80.0)
+ self.assertEqual(round2(85.0, -1), 90.0)
+ self.assertEqual(round2(95.0, -1), 100.0)
+ self.assertEqual(round2(12325.0, -1), 12330.0)
+ self.assertEqual(round2(0, -1), 0.0)
+
+ self.assertEqual(round2(350.0, -2), 400.0)
+ self.assertEqual(round2(450.0, -2), 500.0)
+
+ self.assertAlmostEqual(round2(0.5e21, -21), 1e21)
+ self.assertAlmostEqual(round2(1.5e21, -21), 2e21)
+ self.assertAlmostEqual(round2(2.5e21, -21), 3e21)
+ self.assertAlmostEqual(round2(5.5e21, -21), 6e21)
+ self.assertAlmostEqual(round2(8.5e21, -21), 9e21)
+
+ self.assertAlmostEqual(round2(-1.5e22, -22), -2e22)
+ self.assertAlmostEqual(round2(-0.5e22, -22), -1e22)
+ self.assertAlmostEqual(round2(0.5e22, -22), 1e22)
+ self.assertAlmostEqual(round2(1.5e22, -22), 2e22)
class Round3Test(unittest.TestCase):
- """ Same as above but results adapted for Python 3 round() """
-
- def test_second_argument_type(self):
- # floats should be illegal
- self.assertRaises(TypeError, round3, 3.14159, 2.0)
-
- # None should be allowed
- self.assertEqual(round3(1.0, None), 1)
- # the following would raise an error with the built-in Python3.5 round:
- # TypeError: 'NoneType' object cannot be interpreted as an integer
- self.assertEqual(round3(1, None), 1)
-
- def test_halfway_cases(self):
- self.assertAlmostEqual(round3(0.125, 2), 0.12)
- self.assertAlmostEqual(round3(0.375, 2), 0.38)
- self.assertAlmostEqual(round3(0.625, 2), 0.62)
- self.assertAlmostEqual(round3(0.875, 2), 0.88)
- self.assertAlmostEqual(round3(-0.125, 2), -0.12)
- self.assertAlmostEqual(round3(-0.375, 2), -0.38)
- self.assertAlmostEqual(round3(-0.625, 2), -0.62)
- self.assertAlmostEqual(round3(-0.875, 2), -0.88)
-
- self.assertAlmostEqual(round3(0.25, 1), 0.2)
- self.assertAlmostEqual(round3(0.75, 1), 0.8)
- self.assertAlmostEqual(round3(-0.25, 1), -0.2)
- self.assertAlmostEqual(round3(-0.75, 1), -0.8)
-
- self.assertEqual(round3(-6.5, 0), -6.0)
- self.assertEqual(round3(-5.5, 0), -6.0)
- self.assertEqual(round3(-1.5, 0), -2.0)
- self.assertEqual(round3(-0.5, 0), 0.0)
- self.assertEqual(round3(0.5, 0), 0.0)
- self.assertEqual(round3(1.5, 0), 2.0)
- self.assertEqual(round3(2.5, 0), 2.0)
- self.assertEqual(round3(3.5, 0), 4.0)
- self.assertEqual(round3(4.5, 0), 4.0)
- self.assertEqual(round3(5.5, 0), 6.0)
- self.assertEqual(round3(6.5, 0), 6.0)
-
- # same but without an explicit second argument; in 2.x these
- # will give floats
- self.assertEqual(round3(-6.5), -6)
- self.assertEqual(round3(-5.5), -6)
- self.assertEqual(round3(-1.5), -2.0)
- self.assertEqual(round3(-0.5), 0)
- self.assertEqual(round3(0.5), 0)
- self.assertEqual(round3(1.5), 2)
- self.assertEqual(round3(2.5), 2)
- self.assertEqual(round3(3.5), 4)
- self.assertEqual(round3(4.5), 4)
- self.assertEqual(round3(5.5), 6)
- self.assertEqual(round3(6.5), 6)
-
- # no ndigits and input is already an integer: output == input
- rv = round3(1)
- self.assertEqual(rv, 1)
- self.assertTrue(isinstance(rv, int))
- rv = round3(1.0)
- self.assertEqual(rv, 1)
- self.assertTrue(isinstance(rv, int))
-
- self.assertEqual(round3(-25.0, -1), -20.0)
- self.assertEqual(round3(-15.0, -1), -20.0)
- self.assertEqual(round3(-5.0, -1), 0.0)
- self.assertEqual(round3(5.0, -1), 0.0)
- self.assertEqual(round3(15.0, -1), 20.0)
- self.assertEqual(round3(25.0, -1), 20.0)
- self.assertEqual(round3(35.0, -1), 40.0)
- self.assertEqual(round3(45.0, -1), 40.0)
- self.assertEqual(round3(55.0, -1), 60.0)
- self.assertEqual(round3(65.0, -1), 60.0)
- self.assertEqual(round3(75.0, -1), 80.0)
- self.assertEqual(round3(85.0, -1), 80.0)
- self.assertEqual(round3(95.0, -1), 100.0)
- self.assertEqual(round3(12325.0, -1), 12320.0)
- self.assertEqual(round3(0, -1), 0.0)
-
- self.assertEqual(round3(350.0, -2), 400.0)
- self.assertEqual(round3(450.0, -2), 400.0)
-
- self.assertAlmostEqual(round3(0.5e21, -21), 0.0)
- self.assertAlmostEqual(round3(1.5e21, -21), 2e21)
- self.assertAlmostEqual(round3(2.5e21, -21), 2e21)
- self.assertAlmostEqual(round3(5.5e21, -21), 6e21)
- self.assertAlmostEqual(round3(8.5e21, -21), 8e21)
-
- self.assertAlmostEqual(round3(-1.5e22, -22), -2e22)
- self.assertAlmostEqual(round3(-0.5e22, -22), 0.0)
- self.assertAlmostEqual(round3(0.5e22, -22), 0.0)
- self.assertAlmostEqual(round3(1.5e22, -22), 2e22)
-
-
-NAN = float('nan')
-INF = float('inf')
-NINF = float('-inf')
+ """Same as above but results adapted for Python 3 round()"""
+
+ def test_second_argument_type(self):
+ # floats should be illegal
+ self.assertRaises(TypeError, round3, 3.14159, 2.0)
+
+ # None should be allowed
+ self.assertEqual(round3(1.0, None), 1)
+ # the following would raise an error with the built-in Python3.5 round:
+ # TypeError: 'NoneType' object cannot be interpreted as an integer
+ self.assertEqual(round3(1, None), 1)
+
+ def test_halfway_cases(self):
+ self.assertAlmostEqual(round3(0.125, 2), 0.12)
+ self.assertAlmostEqual(round3(0.375, 2), 0.38)
+ self.assertAlmostEqual(round3(0.625, 2), 0.62)
+ self.assertAlmostEqual(round3(0.875, 2), 0.88)
+ self.assertAlmostEqual(round3(-0.125, 2), -0.12)
+ self.assertAlmostEqual(round3(-0.375, 2), -0.38)
+ self.assertAlmostEqual(round3(-0.625, 2), -0.62)
+ self.assertAlmostEqual(round3(-0.875, 2), -0.88)
+
+ self.assertAlmostEqual(round3(0.25, 1), 0.2)
+ self.assertAlmostEqual(round3(0.75, 1), 0.8)
+ self.assertAlmostEqual(round3(-0.25, 1), -0.2)
+ self.assertAlmostEqual(round3(-0.75, 1), -0.8)
+
+ self.assertEqual(round3(-6.5, 0), -6.0)
+ self.assertEqual(round3(-5.5, 0), -6.0)
+ self.assertEqual(round3(-1.5, 0), -2.0)
+ self.assertEqual(round3(-0.5, 0), 0.0)
+ self.assertEqual(round3(0.5, 0), 0.0)
+ self.assertEqual(round3(1.5, 0), 2.0)
+ self.assertEqual(round3(2.5, 0), 2.0)
+ self.assertEqual(round3(3.5, 0), 4.0)
+ self.assertEqual(round3(4.5, 0), 4.0)
+ self.assertEqual(round3(5.5, 0), 6.0)
+ self.assertEqual(round3(6.5, 0), 6.0)
+
+ # same but without an explicit second argument; in 2.x these
+ # will give floats
+ self.assertEqual(round3(-6.5), -6)
+ self.assertEqual(round3(-5.5), -6)
+ self.assertEqual(round3(-1.5), -2.0)
+ self.assertEqual(round3(-0.5), 0)
+ self.assertEqual(round3(0.5), 0)
+ self.assertEqual(round3(1.5), 2)
+ self.assertEqual(round3(2.5), 2)
+ self.assertEqual(round3(3.5), 4)
+ self.assertEqual(round3(4.5), 4)
+ self.assertEqual(round3(5.5), 6)
+ self.assertEqual(round3(6.5), 6)
+
+ # no ndigits and input is already an integer: output == input
+ rv = round3(1)
+ self.assertEqual(rv, 1)
+ self.assertTrue(isinstance(rv, int))
+ rv = round3(1.0)
+ self.assertEqual(rv, 1)
+ self.assertTrue(isinstance(rv, int))
+
+ self.assertEqual(round3(-25.0, -1), -20.0)
+ self.assertEqual(round3(-15.0, -1), -20.0)
+ self.assertEqual(round3(-5.0, -1), 0.0)
+ self.assertEqual(round3(5.0, -1), 0.0)
+ self.assertEqual(round3(15.0, -1), 20.0)
+ self.assertEqual(round3(25.0, -1), 20.0)
+ self.assertEqual(round3(35.0, -1), 40.0)
+ self.assertEqual(round3(45.0, -1), 40.0)
+ self.assertEqual(round3(55.0, -1), 60.0)
+ self.assertEqual(round3(65.0, -1), 60.0)
+ self.assertEqual(round3(75.0, -1), 80.0)
+ self.assertEqual(round3(85.0, -1), 80.0)
+ self.assertEqual(round3(95.0, -1), 100.0)
+ self.assertEqual(round3(12325.0, -1), 12320.0)
+ self.assertEqual(round3(0, -1), 0.0)
+
+ self.assertEqual(round3(350.0, -2), 400.0)
+ self.assertEqual(round3(450.0, -2), 400.0)
+
+ self.assertAlmostEqual(round3(0.5e21, -21), 0.0)
+ self.assertAlmostEqual(round3(1.5e21, -21), 2e21)
+ self.assertAlmostEqual(round3(2.5e21, -21), 2e21)
+ self.assertAlmostEqual(round3(5.5e21, -21), 6e21)
+ self.assertAlmostEqual(round3(8.5e21, -21), 8e21)
+
+ self.assertAlmostEqual(round3(-1.5e22, -22), -2e22)
+ self.assertAlmostEqual(round3(-0.5e22, -22), 0.0)
+ self.assertAlmostEqual(round3(0.5e22, -22), 0.0)
+ self.assertAlmostEqual(round3(1.5e22, -22), 2e22)
+
+
+NAN = float("nan")
+INF = float("inf")
+NINF = float("-inf")
class IsCloseTests(unittest.TestCase):
- """
- Tests taken from Python 3.5 test_math.py:
- https://hg.python.org/cpython/file/v3.5.2/Lib/test/test_math.py
- """
- isclose = staticmethod(isclose)
-
- def assertIsClose(self, a, b, *args, **kwargs):
- self.assertTrue(
- self.isclose(a, b, *args, **kwargs),
- msg="%s and %s should be close!" % (a, b))
-
- def assertIsNotClose(self, a, b, *args, **kwargs):
- self.assertFalse(
- self.isclose(a, b, *args, **kwargs),
- msg="%s and %s should not be close!" % (a, b))
-
- def assertAllClose(self, examples, *args, **kwargs):
- for a, b in examples:
- self.assertIsClose(a, b, *args, **kwargs)
-
- def assertAllNotClose(self, examples, *args, **kwargs):
- for a, b in examples:
- self.assertIsNotClose(a, b, *args, **kwargs)
-
- def test_negative_tolerances(self):
- # ValueError should be raised if either tolerance is less than zero
- with self.assertRaises(ValueError):
- self.assertIsClose(1, 1, rel_tol=-1e-100)
- with self.assertRaises(ValueError):
- self.assertIsClose(1, 1, rel_tol=1e-100, abs_tol=-1e10)
-
- def test_identical(self):
- # identical values must test as close
- identical_examples = [
- (2.0, 2.0),
- (0.1e200, 0.1e200),
- (1.123e-300, 1.123e-300),
- (12345, 12345.0),
- (0.0, -0.0),
- (345678, 345678)]
- self.assertAllClose(identical_examples, rel_tol=0.0, abs_tol=0.0)
-
- def test_eight_decimal_places(self):
- # examples that are close to 1e-8, but not 1e-9
- eight_decimal_places_examples = [
- (1e8, 1e8 + 1),
- (-1e-8, -1.000000009e-8),
- (1.12345678, 1.12345679)]
- self.assertAllClose(eight_decimal_places_examples, rel_tol=1e-8)
- self.assertAllNotClose(eight_decimal_places_examples, rel_tol=1e-9)
-
- def test_near_zero(self):
- # values close to zero
- near_zero_examples = [
- (1e-9, 0.0),
- (-1e-9, 0.0),
- (-1e-150, 0.0)]
- # these should not be close to any rel_tol
- self.assertAllNotClose(near_zero_examples, rel_tol=0.9)
- # these should be close to abs_tol=1e-8
- self.assertAllClose(near_zero_examples, abs_tol=1e-8)
-
- def test_identical_infinite(self):
- # these are close regardless of tolerance -- i.e. they are equal
- self.assertIsClose(INF, INF)
- self.assertIsClose(INF, INF, abs_tol=0.0)
- self.assertIsClose(NINF, NINF)
- self.assertIsClose(NINF, NINF, abs_tol=0.0)
-
- def test_inf_ninf_nan(self):
- # these should never be close (following IEEE 754 rules for equality)
- not_close_examples = [
- (NAN, NAN),
- (NAN, 1e-100),
- (1e-100, NAN),
- (INF, NAN),
- (NAN, INF),
- (INF, NINF),
- (INF, 1.0),
- (1.0, INF),
- (INF, 1e308),
- (1e308, INF)]
- # use largest reasonable tolerance
- self.assertAllNotClose(not_close_examples, abs_tol=0.999999999999999)
-
- def test_zero_tolerance(self):
- # test with zero tolerance
- zero_tolerance_close_examples = [
- (1.0, 1.0),
- (-3.4, -3.4),
- (-1e-300, -1e-300)]
- self.assertAllClose(zero_tolerance_close_examples, rel_tol=0.0)
-
- zero_tolerance_not_close_examples = [
- (1.0, 1.000000000000001),
- (0.99999999999999, 1.0),
- (1.0e200, .999999999999999e200)]
- self.assertAllNotClose(zero_tolerance_not_close_examples, rel_tol=0.0)
-
- def test_assymetry(self):
- # test the assymetry example from PEP 485
- self.assertAllClose([(9, 10), (10, 9)], rel_tol=0.1)
-
- def test_integers(self):
- # test with integer values
- integer_examples = [
- (100000001, 100000000),
- (123456789, 123456788)]
-
- self.assertAllClose(integer_examples, rel_tol=1e-8)
- self.assertAllNotClose(integer_examples, rel_tol=1e-9)
-
- def test_decimals(self):
- # test with Decimal values
- from decimal import Decimal
-
- decimal_examples = [
- (Decimal('1.00000001'), Decimal('1.0')),
- (Decimal('1.00000001e-20'), Decimal('1.0e-20')),
- (Decimal('1.00000001e-100'), Decimal('1.0e-100'))]
- self.assertAllClose(decimal_examples, rel_tol=1e-8)
- self.assertAllNotClose(decimal_examples, rel_tol=1e-9)
-
- def test_fractions(self):
- # test with Fraction values
- from fractions import Fraction
-
- # could use some more examples here!
- fraction_examples = [(Fraction(1, 100000000) + 1, Fraction(1))]
- self.assertAllClose(fraction_examples, rel_tol=1e-8)
- self.assertAllNotClose(fraction_examples, rel_tol=1e-9)
+ """
+ Tests taken from Python 3.5 test_math.py:
+ https://hg.python.org/cpython/file/v3.5.2/Lib/test/test_math.py
+ """
+
+ isclose = staticmethod(isclose)
+
+ def assertIsClose(self, a, b, *args, **kwargs):
+ self.assertTrue(
+ self.isclose(a, b, *args, **kwargs),
+ msg="%s and %s should be close!" % (a, b),
+ )
+
+ def assertIsNotClose(self, a, b, *args, **kwargs):
+ self.assertFalse(
+ self.isclose(a, b, *args, **kwargs),
+ msg="%s and %s should not be close!" % (a, b),
+ )
+
+ def assertAllClose(self, examples, *args, **kwargs):
+ for a, b in examples:
+ self.assertIsClose(a, b, *args, **kwargs)
+
+ def assertAllNotClose(self, examples, *args, **kwargs):
+ for a, b in examples:
+ self.assertIsNotClose(a, b, *args, **kwargs)
+
+ def test_negative_tolerances(self):
+ # ValueError should be raised if either tolerance is less than zero
+ with self.assertRaises(ValueError):
+ self.assertIsClose(1, 1, rel_tol=-1e-100)
+ with self.assertRaises(ValueError):
+ self.assertIsClose(1, 1, rel_tol=1e-100, abs_tol=-1e10)
+
+ def test_identical(self):
+ # identical values must test as close
+ identical_examples = [
+ (2.0, 2.0),
+ (0.1e200, 0.1e200),
+ (1.123e-300, 1.123e-300),
+ (12345, 12345.0),
+ (0.0, -0.0),
+ (345678, 345678),
+ ]
+ self.assertAllClose(identical_examples, rel_tol=0.0, abs_tol=0.0)
+
+ def test_eight_decimal_places(self):
+ # examples that are close to 1e-8, but not 1e-9
+ eight_decimal_places_examples = [
+ (1e8, 1e8 + 1),
+ (-1e-8, -1.000000009e-8),
+ (1.12345678, 1.12345679),
+ ]
+ self.assertAllClose(eight_decimal_places_examples, rel_tol=1e-8)
+ self.assertAllNotClose(eight_decimal_places_examples, rel_tol=1e-9)
+
+ def test_near_zero(self):
+ # values close to zero
+ near_zero_examples = [(1e-9, 0.0), (-1e-9, 0.0), (-1e-150, 0.0)]
+ # these should not be close to any rel_tol
+ self.assertAllNotClose(near_zero_examples, rel_tol=0.9)
+ # these should be close to abs_tol=1e-8
+ self.assertAllClose(near_zero_examples, abs_tol=1e-8)
+
+ def test_identical_infinite(self):
+ # these are close regardless of tolerance -- i.e. they are equal
+ self.assertIsClose(INF, INF)
+ self.assertIsClose(INF, INF, abs_tol=0.0)
+ self.assertIsClose(NINF, NINF)
+ self.assertIsClose(NINF, NINF, abs_tol=0.0)
+
+ def test_inf_ninf_nan(self):
+ # these should never be close (following IEEE 754 rules for equality)
+ not_close_examples = [
+ (NAN, NAN),
+ (NAN, 1e-100),
+ (1e-100, NAN),
+ (INF, NAN),
+ (NAN, INF),
+ (INF, NINF),
+ (INF, 1.0),
+ (1.0, INF),
+ (INF, 1e308),
+ (1e308, INF),
+ ]
+ # use largest reasonable tolerance
+ self.assertAllNotClose(not_close_examples, abs_tol=0.999999999999999)
+
+ def test_zero_tolerance(self):
+ # test with zero tolerance
+ zero_tolerance_close_examples = [(1.0, 1.0), (-3.4, -3.4), (-1e-300, -1e-300)]
+ self.assertAllClose(zero_tolerance_close_examples, rel_tol=0.0)
+
+ zero_tolerance_not_close_examples = [
+ (1.0, 1.000000000000001),
+ (0.99999999999999, 1.0),
+ (1.0e200, 0.999999999999999e200),
+ ]
+ self.assertAllNotClose(zero_tolerance_not_close_examples, rel_tol=0.0)
+
+ def test_assymetry(self):
+ # test the assymetry example from PEP 485
+ self.assertAllClose([(9, 10), (10, 9)], rel_tol=0.1)
+
+ def test_integers(self):
+ # test with integer values
+ integer_examples = [(100000001, 100000000), (123456789, 123456788)]
+
+ self.assertAllClose(integer_examples, rel_tol=1e-8)
+ self.assertAllNotClose(integer_examples, rel_tol=1e-9)
+
+ def test_decimals(self):
+ # test with Decimal values
+ from decimal import Decimal
+
+ decimal_examples = [
+ (Decimal("1.00000001"), Decimal("1.0")),
+ (Decimal("1.00000001e-20"), Decimal("1.0e-20")),
+ (Decimal("1.00000001e-100"), Decimal("1.0e-100")),
+ ]
+ self.assertAllClose(decimal_examples, rel_tol=1e-8)
+ self.assertAllNotClose(decimal_examples, rel_tol=1e-9)
+
+ def test_fractions(self):
+ # test with Fraction values
+ from fractions import Fraction
+
+ # could use some more examples here!
+ fraction_examples = [(Fraction(1, 100000000) + 1, Fraction(1))]
+ self.assertAllClose(fraction_examples, rel_tol=1e-8)
+ self.assertAllNotClose(fraction_examples, rel_tol=1e-9)
class TestRedirectStream:
-
redirect_stream = None
orig_stream = None
@@ -441,16 +443,14 @@ class TestRedirectStream:
class TestRedirectStdout(TestRedirectStream, unittest.TestCase):
-
redirect_stream = redirect_stdout
orig_stream = "stdout"
class TestRedirectStderr(TestRedirectStream, unittest.TestCase):
-
redirect_stream = redirect_stderr
orig_stream = "stderr"
if __name__ == "__main__":
- sys.exit(unittest.main())
+ sys.exit(unittest.main())
diff --git a/Tests/misc/testTools_test.py b/Tests/misc/testTools_test.py
index 80d4d2ba..22d79eb7 100644
--- a/Tests/misc/testTools_test.py
+++ b/Tests/misc/testTools_test.py
@@ -3,77 +3,88 @@ import unittest
class TestToolsTest(unittest.TestCase):
-
def test_parseXML_str(self):
- self.assertEqual(testTools.parseXML(
- '<Foo n="1"/>'
- '<Foo n="2">'
- ' some ünıcòðe text'
- ' <Bar color="red"/>'
- ' some more text'
- '</Foo>'
- '<Foo n="3"/>'), [
+ self.assertEqual(
+ testTools.parseXML(
+ '<Foo n="1"/>'
+ '<Foo n="2">'
+ " some ünıcòðe text"
+ ' <Bar color="red"/>'
+ " some more text"
+ "</Foo>"
+ '<Foo n="3"/>'
+ ),
+ [
("Foo", {"n": "1"}, []),
- ("Foo", {"n": "2"}, [
- " some ünıcòðe text ",
- ("Bar", {"color": "red"}, []),
- " some more text",
- ]),
- ("Foo", {"n": "3"}, [])
- ])
+ (
+ "Foo",
+ {"n": "2"},
+ [
+ " some ünıcòðe text ",
+ ("Bar", {"color": "red"}, []),
+ " some more text",
+ ],
+ ),
+ ("Foo", {"n": "3"}, []),
+ ],
+ )
def test_parseXML_bytes(self):
- self.assertEqual(testTools.parseXML(
- b'<Foo n="1"/>'
- b'<Foo n="2">'
- b' some \xc3\xbcn\xc4\xb1c\xc3\xb2\xc3\xb0e text'
- b' <Bar color="red"/>'
- b' some more text'
- b'</Foo>'
- b'<Foo n="3"/>'), [
+ self.assertEqual(
+ testTools.parseXML(
+ b'<Foo n="1"/>'
+ b'<Foo n="2">'
+ b" some \xc3\xbcn\xc4\xb1c\xc3\xb2\xc3\xb0e text"
+ b' <Bar color="red"/>'
+ b" some more text"
+ b"</Foo>"
+ b'<Foo n="3"/>'
+ ),
+ [
("Foo", {"n": "1"}, []),
- ("Foo", {"n": "2"}, [
- " some ünıcòðe text ",
- ("Bar", {"color": "red"}, []),
- " some more text",
- ]),
- ("Foo", {"n": "3"}, [])
- ])
+ (
+ "Foo",
+ {"n": "2"},
+ [
+ " some ünıcòðe text ",
+ ("Bar", {"color": "red"}, []),
+ " some more text",
+ ],
+ ),
+ ("Foo", {"n": "3"}, []),
+ ],
+ )
def test_parseXML_str_list(self):
- self.assertEqual(testTools.parseXML(
- ['<Foo n="1"/>'
- '<Foo n="2"/>']), [
- ("Foo", {"n": "1"}, []),
- ("Foo", {"n": "2"}, [])
- ])
+ self.assertEqual(
+ testTools.parseXML(['<Foo n="1"/>' '<Foo n="2"/>']),
+ [("Foo", {"n": "1"}, []), ("Foo", {"n": "2"}, [])],
+ )
def test_parseXML_bytes_list(self):
- self.assertEqual(testTools.parseXML(
- [b'<Foo n="1"/>'
- b'<Foo n="2"/>']), [
- ("Foo", {"n": "1"}, []),
- ("Foo", {"n": "2"}, [])
- ])
+ self.assertEqual(
+ testTools.parseXML([b'<Foo n="1"/>' b'<Foo n="2"/>']),
+ [("Foo", {"n": "1"}, []), ("Foo", {"n": "2"}, [])],
+ )
def test_getXML(self):
def toXML(writer, ttFont):
writer.simpletag("simple")
writer.newline()
- writer.begintag("tag", attr='value')
+ writer.begintag("tag", attr="value")
writer.newline()
writer.write("hello world")
writer.newline()
writer.endtag("tag")
writer.newline() # toXML always ends with a newline
- self.assertEqual(testTools.getXML(toXML),
- ['<simple/>',
- '<tag attr="value">',
- ' hello world',
- '</tag>'])
+ self.assertEqual(
+ testTools.getXML(toXML),
+ ["<simple/>", '<tag attr="value">', " hello world", "</tag>"],
+ )
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/misc/textTools_test.py b/Tests/misc/textTools_test.py
index f83abf91..f28ca211 100644
--- a/Tests/misc/textTools_test.py
+++ b/Tests/misc/textTools_test.py
@@ -2,8 +2,8 @@ from fontTools.misc.textTools import pad
def test_pad():
- assert len(pad(b'abcd', 4)) == 4
- assert len(pad(b'abcde', 2)) == 6
- assert len(pad(b'abcde', 4)) == 8
- assert pad(b'abcdef', 4) == b'abcdef\x00\x00'
- assert pad(b'abcdef', 1) == b'abcdef'
+ assert len(pad(b"abcd", 4)) == 4
+ assert len(pad(b"abcde", 2)) == 6
+ assert len(pad(b"abcde", 4)) == 8
+ assert pad(b"abcdef", 4) == b"abcdef\x00\x00"
+ assert pad(b"abcdef", 1) == b"abcdef"
diff --git a/Tests/misc/timeTools_test.py b/Tests/misc/timeTools_test.py
index 4d75ce4e..d37e3c6d 100644
--- a/Tests/misc/timeTools_test.py
+++ b/Tests/misc/timeTools_test.py
@@ -1,4 +1,10 @@
-from fontTools.misc.timeTools import asctime, timestampNow, timestampToString, timestampFromString, epoch_diff
+from fontTools.misc.timeTools import (
+ asctime,
+ timestampNow,
+ timestampToString,
+ timestampFromString,
+ epoch_diff,
+)
import os
import time
import locale
@@ -7,7 +13,7 @@ import pytest
def test_asctime():
assert isinstance(asctime(), str)
- assert asctime(time.gmtime(0)) == 'Thu Jan 1 00:00:00 1970'
+ assert asctime(time.gmtime(0)) == "Thu Jan 1 00:00:00 1970"
def test_source_date_epoch():
@@ -27,7 +33,7 @@ def test_source_date_epoch():
def test_date_parsing_with_locale():
l = locale.getlocale(locale.LC_TIME)
try:
- locale.setlocale(locale.LC_TIME, 'de_DE.utf8')
+ locale.setlocale(locale.LC_TIME, "de_DE.utf8")
except locale.Error:
pytest.skip("Locale de_DE not available")
diff --git a/Tests/misc/transform_test.py b/Tests/misc/transform_test.py
index 53d4a202..eaa16678 100644
--- a/Tests/misc/transform_test.py
+++ b/Tests/misc/transform_test.py
@@ -1,10 +1,15 @@
-from fontTools.misc.transform import Transform, Identity, Offset, Scale
+from fontTools.misc.transform import (
+ Transform,
+ Identity,
+ Offset,
+ Scale,
+ DecomposedTransform,
+)
import math
import pytest
class TransformTest(object):
-
def test_examples(self):
t = Transform()
assert repr(t) == "<Transform [1 0 0 1 0 0]>"
@@ -19,9 +24,12 @@ class TransformTest(object):
def test_transformPoints(self):
t = Transform(2, 0, 0, 3, 0, 0)
- assert t.transformPoints(
- [(0, 0), (0, 100), (100, 100), (100, 0)]
- ) == [(0, 0), (0, 300), (200, 300), (200, 0)]
+ assert t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)]) == [
+ (0, 0),
+ (0, 300),
+ (200, 300),
+ (200, 0),
+ ]
def test_transformVector(self):
t = Transform(2, 0, 0, 3, -10, 30)
@@ -47,7 +55,8 @@ class TransformTest(object):
assert t.rotate(-math.pi / 2) == Transform(0, -1, 1, 0, 0, 0)
t = Transform()
assert tuple(t.rotate(math.radians(30))) == pytest.approx(
- tuple(Transform(0.866025, 0.5, -0.5, 0.866025, 0, 0)))
+ tuple(Transform(0.866025, 0.5, -0.5, 0.866025, 0, 0))
+ )
def test_skew(self):
t = Transform().skew(math.pi / 4)
@@ -74,7 +83,7 @@ class TransformTest(object):
def test_toPS(self):
t = Transform().scale(2, 3).translate(4, 5)
- assert t.toPS() == '[2 0 0 3 8 15]'
+ assert t.toPS() == "[2 0 0 3 8 15]"
def test__ne__(self):
assert Transform() != Transform(2, 0, 0, 2, 0, 0)
@@ -90,7 +99,7 @@ class TransformTest(object):
assert Transform(1, 0, 0, 1, 1, 0)
def test__repr__(self):
- assert repr(Transform(1, 2, 3, 4, 5, 6)) == '<Transform [1 2 3 4 5 6]>'
+ assert repr(Transform(1, 2, 3, 4, 5, 6)) == "<Transform [1 2 3 4 5 6]>"
def test_Identity(self):
assert isinstance(Identity, Transform)
@@ -105,3 +114,85 @@ class TransformTest(object):
assert Scale(1) == Transform(1, 0, 0, 1, 0, 0)
assert Scale(2) == Transform(2, 0, 0, 2, 0, 0)
assert Scale(1, 2) == Transform(1, 0, 0, 2, 0, 0)
+
+ def test_decompose(self):
+ t = Transform(2, 0, 0, 3, 5, 7)
+ d = t.toDecomposed()
+ assert d.scaleX == 2
+ assert d.scaleY == 3
+ assert d.translateX == 5
+ assert d.translateY == 7
+
+ def test_decompose(self):
+ t = Transform(-1, 0, 0, 1, 0, 0)
+ d = t.toDecomposed()
+ assert d.scaleX == -1
+ assert d.scaleY == 1
+ assert d.rotation == 0
+
+ t = Transform(1, 0, 0, -1, 0, 0)
+ d = t.toDecomposed()
+ assert d.scaleX == 1
+ assert d.scaleY == -1
+ assert d.rotation == 0
+
+
+class DecomposedTransformTest(object):
+ def test_identity(self):
+ t = DecomposedTransform()
+ assert (
+ repr(t)
+ == "DecomposedTransform(translateX=0, translateY=0, rotation=0, scaleX=1, scaleY=1, skewX=0, skewY=0, tCenterX=0, tCenterY=0)"
+ )
+ assert t == DecomposedTransform(scaleX=1.0)
+
+ def test_scale(self):
+ t = DecomposedTransform(scaleX=2, scaleY=3)
+ assert t.scaleX == 2
+ assert t.scaleY == 3
+
+ def test_toTransform(self):
+ t = DecomposedTransform(scaleX=2, scaleY=3)
+ assert t.toTransform() == (2, 0, 0, 3, 0, 0)
+
+ @pytest.mark.parametrize(
+ "decomposed",
+ [
+ DecomposedTransform(scaleX=1, scaleY=0),
+ DecomposedTransform(scaleX=0, scaleY=1),
+ DecomposedTransform(scaleX=1, scaleY=0, rotation=30),
+ DecomposedTransform(scaleX=0, scaleY=1, rotation=30),
+ DecomposedTransform(scaleX=1, scaleY=1),
+ DecomposedTransform(scaleX=-1, scaleY=1),
+ DecomposedTransform(scaleX=1, scaleY=-1),
+ DecomposedTransform(scaleX=-1, scaleY=-1),
+ DecomposedTransform(rotation=90),
+ DecomposedTransform(rotation=-90),
+ DecomposedTransform(skewX=45),
+ DecomposedTransform(skewY=45),
+ DecomposedTransform(scaleX=-1, skewX=45),
+ DecomposedTransform(scaleX=-1, skewY=45),
+ DecomposedTransform(scaleY=-1, skewX=45),
+ DecomposedTransform(scaleY=-1, skewY=45),
+ DecomposedTransform(scaleX=-1, skewX=45, rotation=30),
+ DecomposedTransform(scaleX=-1, skewY=45, rotation=30),
+ DecomposedTransform(scaleY=-1, skewX=45, rotation=30),
+ DecomposedTransform(scaleY=-1, skewY=45, rotation=30),
+ DecomposedTransform(scaleX=-1, skewX=45, rotation=-30),
+ DecomposedTransform(scaleX=-1, skewY=45, rotation=-30),
+ DecomposedTransform(scaleY=-1, skewX=45, rotation=-30),
+ DecomposedTransform(scaleY=-1, skewY=45, rotation=-30),
+ DecomposedTransform(scaleX=-2, skewX=45, rotation=30),
+ DecomposedTransform(scaleX=-2, skewY=45, rotation=30),
+ DecomposedTransform(scaleY=-2, skewX=45, rotation=30),
+ DecomposedTransform(scaleY=-2, skewY=45, rotation=30),
+ DecomposedTransform(scaleX=-2, skewX=45, rotation=-30),
+ DecomposedTransform(scaleX=-2, skewY=45, rotation=-30),
+ DecomposedTransform(scaleY=-2, skewX=45, rotation=-30),
+ DecomposedTransform(scaleY=-2, skewY=45, rotation=-30),
+ ],
+ )
+ def test_roundtrip(lst, decomposed):
+ assert decomposed.toTransform().toDecomposed().toTransform() == pytest.approx(
+ tuple(decomposed.toTransform())
+ ), decomposed
diff --git a/Tests/misc/treeTools_test.py b/Tests/misc/treeTools_test.py
index 467a5c57..be8ffa99 100644
--- a/Tests/misc/treeTools_test.py
+++ b/Tests/misc/treeTools_test.py
@@ -70,7 +70,7 @@ import pytest
(list(range(512)), 256, [list(range(256)), list(range(256, 512))]),
(list(range(512 + 1)), 256, [list(range(256)), list(range(256, 512)), 512]),
(
- list(range(256 ** 2)),
+ list(range(256**2)),
256,
[list(range(k * 256, k * 256 + 256)) for k in range(256)],
),
diff --git a/Tests/misc/visitor_test.py b/Tests/misc/visitor_test.py
index fe71e08f..268cc716 100644
--- a/Tests/misc/visitor_test.py
+++ b/Tests/misc/visitor_test.py
@@ -8,6 +8,7 @@ class E(enum.Enum):
E2 = 2
E3 = 3
+
class A:
def __init__(self):
self.a = 1
diff --git a/Tests/misc/xmlReader_test.py b/Tests/misc/xmlReader_test.py
index ec4aff57..1f06e1ea 100644
--- a/Tests/misc/xmlReader_test.py
+++ b/Tests/misc/xmlReader_test.py
@@ -8,24 +8,21 @@ import tempfile
class TestXMLReader(unittest.TestCase):
-
- def test_decode_utf8(self):
-
- class DebugXMLReader(XMLReader):
-
- def __init__(self, fileOrPath, ttFont, progress=None):
- super(DebugXMLReader, self).__init__(
- fileOrPath, ttFont, progress)
- self.contents = []
-
- def _endElementHandler(self, name):
- if self.stackSize == 3:
- name, attrs, content = self.root
- self.contents.append(content)
- super(DebugXMLReader, self)._endElementHandler(name)
-
- expected = 'fôôbär'
- data = '''\
+ def test_decode_utf8(self):
+ class DebugXMLReader(XMLReader):
+ def __init__(self, fileOrPath, ttFont, progress=None):
+ super(DebugXMLReader, self).__init__(fileOrPath, ttFont, progress)
+ self.contents = []
+
+ def _endElementHandler(self, name):
+ if self.stackSize == 3:
+ name, attrs, content = self.root
+ self.contents.append(content)
+ super(DebugXMLReader, self)._endElementHandler(name)
+
+ expected = "fôôbär"
+ data = (
+ """\
<?xml version="1.0" encoding="UTF-8"?>
<ttFont>
<name>
@@ -34,155 +31,157 @@ class TestXMLReader(unittest.TestCase):
</namerecord>
</name>
</ttFont>
-''' % expected
-
- with BytesIO(data.encode('utf-8')) as tmp:
- reader = DebugXMLReader(tmp, TTFont())
- reader.read()
- content = strjoin(reader.contents[0]).strip()
- self.assertEqual(expected, content)
-
- def test_normalise_newlines(self):
-
- class DebugXMLReader(XMLReader):
-
- def __init__(self, fileOrPath, ttFont, progress=None):
- super(DebugXMLReader, self).__init__(
- fileOrPath, ttFont, progress)
- self.newlines = []
-
- def _characterDataHandler(self, data):
- self.newlines.extend([c for c in data if c in ('\r', '\n')])
-
- # notice how when CR is escaped, it is not normalised by the XML parser
- data = (
- '<ttFont>\r' # \r -> \n
- ' <test>\r\n' # \r\n -> \n
- ' a line of text\n' # \n
- ' escaped CR and unix newline &#13;\n' # &#13;\n -> \r\n
- ' escaped CR and macintosh newline &#13;\r' # &#13;\r -> \r\n
- ' escaped CR and windows newline &#13;\r\n' # &#13;\r\n -> \r\n
- ' </test>\n' # \n
- '</ttFont>')
-
- with BytesIO(data.encode('utf-8')) as tmp:
- reader = DebugXMLReader(tmp, TTFont())
- reader.read()
- expected = ['\n'] * 3 + ['\r', '\n'] * 3 + ['\n']
- self.assertEqual(expected, reader.newlines)
-
- def test_progress(self):
-
- class DummyProgressPrinter(ProgressPrinter):
-
- def __init__(self, title, maxval=100):
- self.label = title
- self.maxval = maxval
- self.pos = 0
-
- def set(self, val, maxval=None):
- if maxval is not None:
- self.maxval = maxval
- self.pos = val
-
- def increment(self, val=1):
- self.pos += val
-
- def setLabel(self, text):
- self.label = text
-
- data = (
- '<ttFont>\n'
- ' <test>\n'
- ' %s\n'
- ' </test>\n'
- '</ttFont>\n'
- % ("z" * 2 * BUFSIZE)
- ).encode('utf-8')
-
- dataSize = len(data)
- progressBar = DummyProgressPrinter('test')
- with BytesIO(data) as tmp:
- reader = XMLReader(tmp, TTFont(), progress=progressBar)
- self.assertEqual(progressBar.pos, 0)
- reader.read()
- self.assertEqual(progressBar.pos, dataSize // 100)
- self.assertEqual(progressBar.maxval, dataSize // 100)
- self.assertTrue('test' in progressBar.label)
- with BytesIO(b"<ttFont></ttFont>") as tmp:
- reader = XMLReader(tmp, TTFont(), progress=progressBar)
- reader.read()
- # when data size is less than 100 bytes, 'maxval' is 1
- self.assertEqual(progressBar.maxval, 1)
-
- def test_close_file_path(self):
- with tempfile.NamedTemporaryFile(delete=False) as tmp:
- tmp.write(b'<ttFont></ttFont>')
- reader = XMLReader(tmp.name, TTFont())
- reader.read()
- # when reading from path, the file is closed automatically at the end
- self.assertTrue(reader.file.closed)
- # this does nothing
- reader.close()
- self.assertTrue(reader.file.closed)
- os.remove(tmp.name)
-
- def test_close_file_obj(self):
- with tempfile.NamedTemporaryFile(delete=False) as tmp:
- tmp.write(b'<ttFont>"hello"</ttFont>')
- with open(tmp.name, "rb") as f:
- reader = XMLReader(f, TTFont())
- reader.read()
- # when reading from a file or file-like object, the latter is kept open
- self.assertFalse(reader.file.closed)
- # ... until the user explicitly closes it
- reader.close()
- self.assertTrue(reader.file.closed)
- os.remove(tmp.name)
-
- def test_read_sub_file(self):
- # Verifies that sub-file content is able to be read to a table.
- expectedContent = 'testContent'
- expectedNameID = '1'
- expectedPlatform = '3'
- expectedLangId = '0x409'
-
- with tempfile.NamedTemporaryFile(delete=False) as tmp:
- subFileData = (
- '<ttFont ttLibVersion="3.15">'
- '<name>'
- '<namerecord nameID="%s" platformID="%s" platEncID="1" langID="%s">'
- '%s'
- '</namerecord>'
- '</name>'
- '</ttFont>'
- ) % (expectedNameID, expectedPlatform, expectedLangId, expectedContent)
- tmp.write(subFileData.encode("utf-8"))
-
- with tempfile.NamedTemporaryFile(delete=False) as tmp2:
- fileData = (
- '<ttFont ttLibVersion="3.15">'
- '<name>'
- '<namerecord src="%s"/>'
- '</name>'
- '</ttFont>'
- ) % tmp.name
- tmp2.write(fileData.encode('utf-8'))
-
- ttf = TTFont()
- with open(tmp2.name, "rb") as f:
- reader = XMLReader(f, ttf)
- reader.read()
- reader.close()
- nameTable = ttf['name']
- self.assertTrue(int(expectedNameID) == nameTable.names[0].nameID)
- self.assertTrue(int(expectedLangId, 16) == nameTable.names[0].langID)
- self.assertTrue(int(expectedPlatform) == nameTable.names[0].platformID)
- self.assertEqual(expectedContent, nameTable.names[0].string.decode(nameTable.names[0].getEncoding()))
-
- os.remove(tmp.name)
- os.remove(tmp2.name)
-
-if __name__ == '__main__':
- import sys
- sys.exit(unittest.main())
+"""
+ % expected
+ )
+
+ with BytesIO(data.encode("utf-8")) as tmp:
+ reader = DebugXMLReader(tmp, TTFont())
+ reader.read()
+ content = strjoin(reader.contents[0]).strip()
+ self.assertEqual(expected, content)
+
+ def test_normalise_newlines(self):
+ class DebugXMLReader(XMLReader):
+ def __init__(self, fileOrPath, ttFont, progress=None):
+ super(DebugXMLReader, self).__init__(fileOrPath, ttFont, progress)
+ self.newlines = []
+
+ def _characterDataHandler(self, data):
+ self.newlines.extend([c for c in data if c in ("\r", "\n")])
+
+ # notice how when CR is escaped, it is not normalised by the XML parser
+ data = (
+ "<ttFont>\r" # \r -> \n
+ " <test>\r\n" # \r\n -> \n
+ " a line of text\n" # \n
+ " escaped CR and unix newline &#13;\n" # &#13;\n -> \r\n
+ " escaped CR and macintosh newline &#13;\r" # &#13;\r -> \r\n
+ " escaped CR and windows newline &#13;\r\n" # &#13;\r\n -> \r\n
+ " </test>\n" # \n
+ "</ttFont>"
+ )
+
+ with BytesIO(data.encode("utf-8")) as tmp:
+ reader = DebugXMLReader(tmp, TTFont())
+ reader.read()
+ expected = ["\n"] * 3 + ["\r", "\n"] * 3 + ["\n"]
+ self.assertEqual(expected, reader.newlines)
+
+ def test_progress(self):
+ class DummyProgressPrinter(ProgressPrinter):
+ def __init__(self, title, maxval=100):
+ self.label = title
+ self.maxval = maxval
+ self.pos = 0
+
+ def set(self, val, maxval=None):
+ if maxval is not None:
+ self.maxval = maxval
+ self.pos = val
+
+ def increment(self, val=1):
+ self.pos += val
+
+ def setLabel(self, text):
+ self.label = text
+
+ data = (
+ "<ttFont>\n"
+ " <test>\n"
+ " %s\n"
+ " </test>\n"
+ "</ttFont>\n" % ("z" * 2 * BUFSIZE)
+ ).encode("utf-8")
+
+ dataSize = len(data)
+ progressBar = DummyProgressPrinter("test")
+ with BytesIO(data) as tmp:
+ reader = XMLReader(tmp, TTFont(), progress=progressBar)
+ self.assertEqual(progressBar.pos, 0)
+ reader.read()
+ self.assertEqual(progressBar.pos, dataSize // 100)
+ self.assertEqual(progressBar.maxval, dataSize // 100)
+ self.assertTrue("test" in progressBar.label)
+ with BytesIO(b"<ttFont></ttFont>") as tmp:
+ reader = XMLReader(tmp, TTFont(), progress=progressBar)
+ reader.read()
+ # when data size is less than 100 bytes, 'maxval' is 1
+ self.assertEqual(progressBar.maxval, 1)
+
+ def test_close_file_path(self):
+ with tempfile.NamedTemporaryFile(delete=False) as tmp:
+ tmp.write(b"<ttFont></ttFont>")
+ reader = XMLReader(tmp.name, TTFont())
+ reader.read()
+ # when reading from path, the file is closed automatically at the end
+ self.assertTrue(reader.file.closed)
+ # this does nothing
+ reader.close()
+ self.assertTrue(reader.file.closed)
+ os.remove(tmp.name)
+
+ def test_close_file_obj(self):
+ with tempfile.NamedTemporaryFile(delete=False) as tmp:
+ tmp.write(b'<ttFont>"hello"</ttFont>')
+ with open(tmp.name, "rb") as f:
+ reader = XMLReader(f, TTFont())
+ reader.read()
+ # when reading from a file or file-like object, the latter is kept open
+ self.assertFalse(reader.file.closed)
+ # ... until the user explicitly closes it
+ reader.close()
+ self.assertTrue(reader.file.closed)
+ os.remove(tmp.name)
+
+ def test_read_sub_file(self):
+ # Verifies that sub-file content is able to be read to a table.
+ expectedContent = "testContent"
+ expectedNameID = "1"
+ expectedPlatform = "3"
+ expectedLangId = "0x409"
+
+ with tempfile.NamedTemporaryFile(delete=False) as tmp:
+ subFileData = (
+ '<ttFont ttLibVersion="3.15">'
+ "<name>"
+ '<namerecord nameID="%s" platformID="%s" platEncID="1" langID="%s">'
+ "%s"
+ "</namerecord>"
+ "</name>"
+ "</ttFont>"
+ ) % (expectedNameID, expectedPlatform, expectedLangId, expectedContent)
+ tmp.write(subFileData.encode("utf-8"))
+
+ with tempfile.NamedTemporaryFile(delete=False) as tmp2:
+ fileData = (
+ '<ttFont ttLibVersion="3.15">'
+ "<name>"
+ '<namerecord src="%s"/>'
+ "</name>"
+ "</ttFont>"
+ ) % tmp.name
+ tmp2.write(fileData.encode("utf-8"))
+
+ ttf = TTFont()
+ with open(tmp2.name, "rb") as f:
+ reader = XMLReader(f, ttf)
+ reader.read()
+ reader.close()
+ nameTable = ttf["name"]
+ self.assertTrue(int(expectedNameID) == nameTable.names[0].nameID)
+ self.assertTrue(int(expectedLangId, 16) == nameTable.names[0].langID)
+ self.assertTrue(int(expectedPlatform) == nameTable.names[0].platformID)
+ self.assertEqual(
+ expectedContent,
+ nameTable.names[0].string.decode(nameTable.names[0].getEncoding()),
+ )
+
+ os.remove(tmp.name)
+ os.remove(tmp2.name)
+
+
+if __name__ == "__main__":
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/misc/xmlWriter_test.py b/Tests/misc/xmlWriter_test.py
index 69471543..c1e65171 100644
--- a/Tests/misc/xmlWriter_test.py
+++ b/Tests/misc/xmlWriter_test.py
@@ -6,122 +6,146 @@ from fontTools.misc.xmlWriter import XMLWriter
HEADER = b'<?xml version="1.0" encoding="UTF-8"?>\n'
-class TestXMLWriter(unittest.TestCase):
- def test_comment_escaped(self):
- writer = XMLWriter(BytesIO())
- writer.comment("This&that are <comments>")
- self.assertEqual(HEADER + b"<!-- This&amp;that are &lt;comments&gt; -->", writer.file.getvalue())
-
- def test_comment_multiline(self):
- writer = XMLWriter(BytesIO())
- writer.comment("Hello world\nHow are you?")
- self.assertEqual(HEADER + b"<!-- Hello world\n How are you? -->",
- writer.file.getvalue())
-
- def test_encoding_default(self):
- writer = XMLWriter(BytesIO())
- self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>\n',
- writer.file.getvalue())
-
- def test_encoding_utf8(self):
- # https://github.com/fonttools/fonttools/issues/246
- writer = XMLWriter(BytesIO(), encoding="utf8")
- self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>\n',
- writer.file.getvalue())
-
- def test_encoding_UTF_8(self):
- # https://github.com/fonttools/fonttools/issues/246
- writer = XMLWriter(BytesIO(), encoding="UTF-8")
- self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>\n',
- writer.file.getvalue())
-
- def test_encoding_UTF8(self):
- # https://github.com/fonttools/fonttools/issues/246
- writer = XMLWriter(BytesIO(), encoding="UTF8")
- self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>\n',
- writer.file.getvalue())
-
- def test_encoding_other(self):
- self.assertRaises(Exception, XMLWriter, BytesIO(),
- encoding="iso-8859-1")
-
- def test_write(self):
- writer = XMLWriter(BytesIO())
- writer.write("foo&bar")
- self.assertEqual(HEADER + b"foo&amp;bar", writer.file.getvalue())
-
- def test_indent_dedent(self):
- writer = XMLWriter(BytesIO())
- writer.write("foo")
- writer.newline()
- writer.indent()
- writer.write("bar")
- writer.newline()
- writer.dedent()
- writer.write("baz")
- self.assertEqual(HEADER + bytesjoin(["foo", " bar", "baz"], "\n"),
- writer.file.getvalue())
-
- def test_writecdata(self):
- writer = XMLWriter(BytesIO())
- writer.writecdata("foo&bar")
- self.assertEqual(HEADER + b"<![CDATA[foo&bar]]>", writer.file.getvalue())
-
- def test_simpletag(self):
- writer = XMLWriter(BytesIO())
- writer.simpletag("tag", a="1", b="2")
- self.assertEqual(HEADER + b'<tag a="1" b="2"/>', writer.file.getvalue())
-
- def test_begintag_endtag(self):
- writer = XMLWriter(BytesIO())
- writer.begintag("tag", attr="value")
- writer.write("content")
- writer.endtag("tag")
- self.assertEqual(HEADER + b'<tag attr="value">content</tag>', writer.file.getvalue())
-
- def test_dumphex(self):
- writer = XMLWriter(BytesIO())
- writer.dumphex("Type is a beautiful group of letters, not a group of beautiful letters.")
- self.assertEqual(HEADER + bytesjoin([
- "54797065 20697320 61206265 61757469",
- "66756c20 67726f75 70206f66 206c6574",
- "74657273 2c206e6f 74206120 67726f75",
- "70206f66 20626561 75746966 756c206c",
- "65747465 72732e ", ""], joiner="\n"), writer.file.getvalue())
-
- def test_stringifyattrs(self):
- writer = XMLWriter(BytesIO())
- expected = ' attr="0"'
- self.assertEqual(expected, writer.stringifyattrs(attr=0))
- self.assertEqual(expected, writer.stringifyattrs(attr=b'0'))
- self.assertEqual(expected, writer.stringifyattrs(attr='0'))
- self.assertEqual(expected, writer.stringifyattrs(attr=u'0'))
-
- def test_carriage_return_escaped(self):
- writer = XMLWriter(BytesIO())
- writer.write("two lines\r\nseparated by Windows line endings")
- self.assertEqual(
- HEADER + b'two lines&#13;\nseparated by Windows line endings',
- writer.file.getvalue())
-
- def test_newlinestr(self):
- header = b'<?xml version="1.0" encoding="UTF-8"?>'
-
- for nls in (None, '\n', '\r\n', '\r', ''):
- writer = XMLWriter(BytesIO(), newlinestr=nls)
- writer.write("hello")
- writer.newline()
- writer.write("world")
- writer.newline()
-
- linesep = tobytes(os.linesep) if nls is None else tobytes(nls)
-
- self.assertEqual(
- header + linesep + b"hello" + linesep + b"world" + linesep,
- writer.file.getvalue())
-
-
-if __name__ == '__main__':
- import sys
- sys.exit(unittest.main())
+class TestXMLWriter(unittest.TestCase):
+ def test_comment_escaped(self):
+ writer = XMLWriter(BytesIO())
+ writer.comment("This&that are <comments>")
+ self.assertEqual(
+ HEADER + b"<!-- This&amp;that are &lt;comments&gt; -->",
+ writer.file.getvalue(),
+ )
+
+ def test_comment_multiline(self):
+ writer = XMLWriter(BytesIO())
+ writer.comment("Hello world\nHow are you?")
+ self.assertEqual(
+ HEADER + b"<!-- Hello world\n How are you? -->", writer.file.getvalue()
+ )
+
+ def test_encoding_default(self):
+ writer = XMLWriter(BytesIO())
+ self.assertEqual(
+ b'<?xml version="1.0" encoding="UTF-8"?>\n', writer.file.getvalue()
+ )
+
+ def test_encoding_utf8(self):
+ # https://github.com/fonttools/fonttools/issues/246
+ writer = XMLWriter(BytesIO(), encoding="utf8")
+ self.assertEqual(
+ b'<?xml version="1.0" encoding="UTF-8"?>\n', writer.file.getvalue()
+ )
+
+ def test_encoding_UTF_8(self):
+ # https://github.com/fonttools/fonttools/issues/246
+ writer = XMLWriter(BytesIO(), encoding="UTF-8")
+ self.assertEqual(
+ b'<?xml version="1.0" encoding="UTF-8"?>\n', writer.file.getvalue()
+ )
+
+ def test_encoding_UTF8(self):
+ # https://github.com/fonttools/fonttools/issues/246
+ writer = XMLWriter(BytesIO(), encoding="UTF8")
+ self.assertEqual(
+ b'<?xml version="1.0" encoding="UTF-8"?>\n', writer.file.getvalue()
+ )
+
+ def test_encoding_other(self):
+ self.assertRaises(Exception, XMLWriter, BytesIO(), encoding="iso-8859-1")
+
+ def test_write(self):
+ writer = XMLWriter(BytesIO())
+ writer.write("foo&bar")
+ self.assertEqual(HEADER + b"foo&amp;bar", writer.file.getvalue())
+
+ def test_indent_dedent(self):
+ writer = XMLWriter(BytesIO())
+ writer.write("foo")
+ writer.newline()
+ writer.indent()
+ writer.write("bar")
+ writer.newline()
+ writer.dedent()
+ writer.write("baz")
+ self.assertEqual(
+ HEADER + bytesjoin(["foo", " bar", "baz"], "\n"), writer.file.getvalue()
+ )
+
+ def test_writecdata(self):
+ writer = XMLWriter(BytesIO())
+ writer.writecdata("foo&bar")
+ self.assertEqual(HEADER + b"<![CDATA[foo&bar]]>", writer.file.getvalue())
+
+ def test_simpletag(self):
+ writer = XMLWriter(BytesIO())
+ writer.simpletag("tag", a="1", b="2")
+ self.assertEqual(HEADER + b'<tag a="1" b="2"/>', writer.file.getvalue())
+
+ def test_begintag_endtag(self):
+ writer = XMLWriter(BytesIO())
+ writer.begintag("tag", attr="value")
+ writer.write("content")
+ writer.endtag("tag")
+ self.assertEqual(
+ HEADER + b'<tag attr="value">content</tag>', writer.file.getvalue()
+ )
+
+ def test_dumphex(self):
+ writer = XMLWriter(BytesIO())
+ writer.dumphex(
+ "Type is a beautiful group of letters, not a group of beautiful letters."
+ )
+ self.assertEqual(
+ HEADER
+ + bytesjoin(
+ [
+ "54797065 20697320 61206265 61757469",
+ "66756c20 67726f75 70206f66 206c6574",
+ "74657273 2c206e6f 74206120 67726f75",
+ "70206f66 20626561 75746966 756c206c",
+ "65747465 72732e ",
+ "",
+ ],
+ joiner="\n",
+ ),
+ writer.file.getvalue(),
+ )
+
+ def test_stringifyattrs(self):
+ writer = XMLWriter(BytesIO())
+ expected = ' attr="0"'
+ self.assertEqual(expected, writer.stringifyattrs(attr=0))
+ self.assertEqual(expected, writer.stringifyattrs(attr=b"0"))
+ self.assertEqual(expected, writer.stringifyattrs(attr="0"))
+ self.assertEqual(expected, writer.stringifyattrs(attr="0"))
+
+ def test_carriage_return_escaped(self):
+ writer = XMLWriter(BytesIO())
+ writer.write("two lines\r\nseparated by Windows line endings")
+ self.assertEqual(
+ HEADER + b"two lines&#13;\nseparated by Windows line endings",
+ writer.file.getvalue(),
+ )
+
+ def test_newlinestr(self):
+ header = b'<?xml version="1.0" encoding="UTF-8"?>'
+
+ for nls in (None, "\n", "\r\n", "\r", ""):
+ writer = XMLWriter(BytesIO(), newlinestr=nls)
+ writer.write("hello")
+ writer.newline()
+ writer.write("world")
+ writer.newline()
+
+ linesep = tobytes(os.linesep) if nls is None else tobytes(nls)
+
+ self.assertEqual(
+ header + linesep + b"hello" + linesep + b"world" + linesep,
+ writer.file.getvalue(),
+ )
+
+
+if __name__ == "__main__":
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/mtiLib/data/featurename-backward.ttx.GSUB b/Tests/mtiLib/data/featurename-backward.ttx.GSUB
index cc893cd9..0fbe51fd 100644
--- a/Tests/mtiLib/data/featurename-backward.ttx.GSUB
+++ b/Tests/mtiLib/data/featurename-backward.ttx.GSUB
@@ -47,6 +47,7 @@
</FeatureList>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- l1: -->
<Lookup index="0">
<LookupType value="1"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/featurename-forward.ttx.GSUB b/Tests/mtiLib/data/featurename-forward.ttx.GSUB
index cc893cd9..0fbe51fd 100644
--- a/Tests/mtiLib/data/featurename-forward.ttx.GSUB
+++ b/Tests/mtiLib/data/featurename-forward.ttx.GSUB
@@ -47,6 +47,7 @@
</FeatureList>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- l1: -->
<Lookup index="0">
<LookupType value="1"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/lookupnames-backward.ttx.GSUB b/Tests/mtiLib/data/lookupnames-backward.ttx.GSUB
index cb358d7c..811f79a0 100644
--- a/Tests/mtiLib/data/lookupnames-backward.ttx.GSUB
+++ b/Tests/mtiLib/data/lookupnames-backward.ttx.GSUB
@@ -35,6 +35,7 @@
</FeatureList>
<LookupList>
<!-- LookupCount=2 -->
+ <!-- l1: -->
<Lookup index="0">
<LookupType value="1"/>
<LookupFlag value="0"/>
@@ -44,6 +45,7 @@
<Substitution in="uvowelsignkannada" out="uvowelsignaltkannada"/>
</SingleSubst>
</Lookup>
+ <!-- l0: -->
<Lookup index="1">
<LookupType value="6"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/lookupnames-forward.ttx.GSUB b/Tests/mtiLib/data/lookupnames-forward.ttx.GSUB
index 249d605b..86b3148f 100644
--- a/Tests/mtiLib/data/lookupnames-forward.ttx.GSUB
+++ b/Tests/mtiLib/data/lookupnames-forward.ttx.GSUB
@@ -35,6 +35,7 @@
</FeatureList>
<LookupList>
<!-- LookupCount=2 -->
+ <!-- l0: -->
<Lookup index="0">
<LookupType value="6"/>
<LookupFlag value="0"/>
@@ -74,6 +75,7 @@
</ChainSubClassSet>
</ChainContextSubst>
</Lookup>
+ <!-- l1: -->
<Lookup index="1">
<LookupType value="1"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mixed-toplevels.ttx.GSUB b/Tests/mtiLib/data/mixed-toplevels.ttx.GSUB
index 249d605b..74192a51 100644
--- a/Tests/mtiLib/data/mixed-toplevels.ttx.GSUB
+++ b/Tests/mtiLib/data/mixed-toplevels.ttx.GSUB
@@ -35,6 +35,7 @@
</FeatureList>
<LookupList>
<!-- LookupCount=2 -->
+ <!-- 0: -->
<Lookup index="0">
<LookupType value="6"/>
<LookupFlag value="0"/>
@@ -74,6 +75,7 @@
</ChainSubClassSet>
</ChainContextSubst>
</Lookup>
+ <!-- 1: -->
<Lookup index="1">
<LookupType value="1"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/chained-glyph.ttx.GPOS b/Tests/mtiLib/data/mti/chained-glyph.ttx.GPOS
index b550c700..32fffc4b 100644
--- a/Tests/mtiLib/data/mti/chained-glyph.ttx.GPOS
+++ b/Tests/mtiLib/data/mti/chained-glyph.ttx.GPOS
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=2 -->
+ <!-- raucontext-sinh: -->
<Lookup index="0">
<LookupType value="8"/>
<LookupFlag value="512"/><!-- markAttachmentType[2] -->
@@ -43,6 +44,7 @@
</ChainPosRuleSet>
</ChainContextPos>
</Lookup>
+ <!-- u2aelow-sinh: -->
<Lookup index="1" empty="1"/>
</LookupList>
</GPOS>
diff --git a/Tests/mtiLib/data/mti/chained-glyph.ttx.GSUB b/Tests/mtiLib/data/mti/chained-glyph.ttx.GSUB
index 7dfdb848..30a15304 100644
--- a/Tests/mtiLib/data/mti/chained-glyph.ttx.GSUB
+++ b/Tests/mtiLib/data/mti/chained-glyph.ttx.GSUB
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=2 -->
+ <!-- raucontext-sinh: -->
<Lookup index="0">
<LookupType value="6"/>
<LookupFlag value="512"/><!-- markAttachmentType[2] -->
@@ -43,6 +44,7 @@
</ChainSubRuleSet>
</ChainContextSubst>
</Lookup>
+ <!-- u2aelow-sinh: -->
<Lookup index="1" empty="1"/>
</LookupList>
</GSUB>
diff --git a/Tests/mtiLib/data/mti/chainedclass.ttx.GSUB b/Tests/mtiLib/data/mti/chainedclass.ttx.GSUB
index fcd7569f..39691c44 100644
--- a/Tests/mtiLib/data/mti/chainedclass.ttx.GSUB
+++ b/Tests/mtiLib/data/mti/chainedclass.ttx.GSUB
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=2 -->
+ <!-- swashes-knda: -->
<Lookup index="0">
<LookupType value="6"/>
<LookupFlag value="0"/>
@@ -42,6 +43,7 @@
</ChainSubClassSet>
</ChainContextSubst>
</Lookup>
+ <!-- u-swash-knda: -->
<Lookup index="1">
<LookupType value="1"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/chainedcoverage.ttx.GSUB b/Tests/mtiLib/data/mti/chainedcoverage.ttx.GSUB
index 4f312c6e..bea53f50 100644
--- a/Tests/mtiLib/data/mti/chainedcoverage.ttx.GSUB
+++ b/Tests/mtiLib/data/mti/chainedcoverage.ttx.GSUB
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=2 -->
+ <!-- slashcontext: -->
<Lookup index="0">
<LookupType value="6"/>
<LookupFlag value="0"/>
@@ -45,6 +46,7 @@
</SubstLookupRecord>
</ChainContextSubst>
</Lookup>
+ <!-- slashTofraction: -->
<Lookup index="1">
<LookupType value="1"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/gposcursive.ttx.GPOS b/Tests/mtiLib/data/mti/gposcursive.ttx.GPOS
index 6c08c50c..6d92d083 100644
--- a/Tests/mtiLib/data/mti/gposcursive.ttx.GPOS
+++ b/Tests/mtiLib/data/mti/gposcursive.ttx.GPOS
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- kernpairs: -->
<Lookup index="0">
<LookupType value="3"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/gposkernset.ttx.GPOS b/Tests/mtiLib/data/mti/gposkernset.ttx.GPOS
index a8371233..e7a5ff78 100644
--- a/Tests/mtiLib/data/mti/gposkernset.ttx.GPOS
+++ b/Tests/mtiLib/data/mti/gposkernset.ttx.GPOS
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- 0: -->
<Lookup index="0">
<LookupType value="2"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/gposmarktobase.ttx.GPOS b/Tests/mtiLib/data/mti/gposmarktobase.ttx.GPOS
index e6e21028..b78d4ff0 100644
--- a/Tests/mtiLib/data/mti/gposmarktobase.ttx.GPOS
+++ b/Tests/mtiLib/data/mti/gposmarktobase.ttx.GPOS
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- topmarktobase-guru: -->
<Lookup index="0">
<LookupType value="4"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/gpospairclass.ttx.GPOS b/Tests/mtiLib/data/mti/gpospairclass.ttx.GPOS
index 32b35aee..9058eb0f 100644
--- a/Tests/mtiLib/data/mti/gpospairclass.ttx.GPOS
+++ b/Tests/mtiLib/data/mti/gpospairclass.ttx.GPOS
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- 0: -->
<Lookup index="0">
<LookupType value="2"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/gpospairglyph.ttx.GPOS b/Tests/mtiLib/data/mti/gpospairglyph.ttx.GPOS
index f03a90e3..58567a97 100644
--- a/Tests/mtiLib/data/mti/gpospairglyph.ttx.GPOS
+++ b/Tests/mtiLib/data/mti/gpospairglyph.ttx.GPOS
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- 0: -->
<Lookup index="0">
<LookupType value="2"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/gpossingle.ttx.GPOS b/Tests/mtiLib/data/mti/gpossingle.ttx.GPOS
index c3bdbf68..3a955f65 100644
--- a/Tests/mtiLib/data/mti/gpossingle.ttx.GPOS
+++ b/Tests/mtiLib/data/mti/gpossingle.ttx.GPOS
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- supsToInferiors: -->
<Lookup index="0">
<LookupType value="1"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/gsubalternate.ttx.GSUB b/Tests/mtiLib/data/mti/gsubalternate.ttx.GSUB
index 86b0b731..7762c621 100644
--- a/Tests/mtiLib/data/mti/gsubalternate.ttx.GSUB
+++ b/Tests/mtiLib/data/mti/gsubalternate.ttx.GSUB
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- 27: -->
<Lookup index="0">
<LookupType value="3"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/gsubligature.ttx.GSUB b/Tests/mtiLib/data/mti/gsubligature.ttx.GSUB
index 26c88c81..5ad20184 100644
--- a/Tests/mtiLib/data/mti/gsubligature.ttx.GSUB
+++ b/Tests/mtiLib/data/mti/gsubligature.ttx.GSUB
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- latinLigatures: -->
<Lookup index="0">
<LookupType value="4"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/gsubmultiple.ttx.GSUB b/Tests/mtiLib/data/mti/gsubmultiple.ttx.GSUB
index 5bedfba6..72eefb87 100644
--- a/Tests/mtiLib/data/mti/gsubmultiple.ttx.GSUB
+++ b/Tests/mtiLib/data/mti/gsubmultiple.ttx.GSUB
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- replace-akhand-telugu: -->
<Lookup index="0">
<LookupType value="2"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/gsubreversechanined.ttx.GSUB b/Tests/mtiLib/data/mti/gsubreversechanined.ttx.GSUB
index d705af53..87412eae 100644
--- a/Tests/mtiLib/data/mti/gsubreversechanined.ttx.GSUB
+++ b/Tests/mtiLib/data/mti/gsubreversechanined.ttx.GSUB
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- arabicReverse: -->
<Lookup index="0">
<LookupType value="8"/>
<LookupFlag value="9"/><!-- rightToLeft ignoreMarks -->
diff --git a/Tests/mtiLib/data/mti/gsubsingle.ttx.GSUB b/Tests/mtiLib/data/mti/gsubsingle.ttx.GSUB
index dc6a2950..adc3ba52 100644
--- a/Tests/mtiLib/data/mti/gsubsingle.ttx.GSUB
+++ b/Tests/mtiLib/data/mti/gsubsingle.ttx.GSUB
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- alt-fractions: -->
<Lookup index="0">
<LookupType value="1"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/data/mti/mark-to-ligature.ttx.GPOS b/Tests/mtiLib/data/mti/mark-to-ligature.ttx.GPOS
index b5f275eb..f723670c 100644
--- a/Tests/mtiLib/data/mti/mark-to-ligature.ttx.GPOS
+++ b/Tests/mtiLib/data/mti/mark-to-ligature.ttx.GPOS
@@ -3,6 +3,7 @@
<Version value="0x00010000"/>
<LookupList>
<!-- LookupCount=1 -->
+ <!-- LigMk0: -->
<Lookup index="0">
<LookupType value="5"/>
<LookupFlag value="0"/>
diff --git a/Tests/mtiLib/mti_test.py b/Tests/mtiLib/mti_test.py
index 8a80113c..a4cc098c 100644
--- a/Tests/mtiLib/mti_test.py
+++ b/Tests/mtiLib/mti_test.py
@@ -1,120 +1,396 @@
from fontTools.misc.xmlWriter import XMLWriter
from fontTools.ttLib import TTFont
+from fontTools.feaLib.lookupDebugInfo import LOOKUP_DEBUG_ENV_VAR
from fontTools import mtiLib
import difflib
from io import StringIO
import os
import sys
-import unittest
-
-
-class MtiTest(unittest.TestCase):
-
- GLYPH_ORDER = ['.notdef',
- 'a', 'b', 'pakannada', 'phakannada', 'vakannada', 'pevowelkannada',
- 'phevowelkannada', 'vevowelkannada', 'uvowelsignkannada', 'uuvowelsignkannada',
- 'uvowelsignaltkannada', 'uuvowelsignaltkannada', 'uuvowelsignsinh',
- 'uvowelsignsinh', 'rakarsinh', 'zero', 'one', 'two', 'three', 'four', 'five',
- 'six', 'seven', 'eight', 'nine', 'slash', 'fraction', 'A', 'B', 'C', 'fi',
- 'fl', 'breve', 'acute', 'uniFB01', 'ffi', 'grave', 'commaacent', 'dotbelow',
- 'dotabove', 'cedilla', 'commaaccent', 'Acircumflex', 'V', 'T', 'acircumflex',
- 'Aacute', 'Agrave', 'O', 'Oacute', 'Ograve', 'Ocircumflex', 'aacute', 'agrave',
- 'aimatrabindigurmukhi', 'aimatragurmukhi', 'aimatratippigurmukhi',
- 'aumatrabindigurmukhi', 'aumatragurmukhi', 'bindigurmukhi',
- 'eematrabindigurmukhi', 'eematragurmukhi', 'eematratippigurmukhi',
- 'oomatrabindigurmukhi', 'oomatragurmukhi', 'oomatratippigurmukhi',
- 'lagurmukhi', 'lanuktagurmukhi', 'nagurmukhi', 'nanuktagurmukhi',
- 'ngagurmukhi', 'nganuktagurmukhi', 'nnagurmukhi', 'nnanuktagurmukhi',
- 'tthagurmukhi', 'tthanuktagurmukhi', 'bsuperior', 'isuperior', 'vsuperior',
- 'wsuperior', 'periodsuperior', 'osuperior', 'tsuperior', 'dollarsuperior',
- 'fsuperior', 'gsuperior', 'zsuperior', 'dsuperior', 'psuperior', 'hsuperior',
- 'oesuperior', 'aesuperior', 'centsuperior', 'esuperior', 'lsuperior',
- 'qsuperior', 'csuperior', 'asuperior', 'commasuperior', 'xsuperior',
- 'egravesuperior', 'usuperior', 'rsuperior', 'nsuperior', 'ssuperior',
- 'msuperior', 'jsuperior', 'ysuperior', 'ksuperior', 'guilsinglright',
- 'guilsinglleft', 'uniF737', 'uniE11C', 'uniE11D', 'uniE11A', 'uni2077',
- 'uni2087', 'uniE11B', 'uniE119', 'uniE0DD', 'uniE0DE', 'uniF736', 'uniE121',
- 'uniE122', 'uniE11F', 'uni2076', 'uni2086', 'uniE120', 'uniE11E', 'uniE0DB',
- 'uniE0DC', 'uniF733', 'uniE12B', 'uniE12C', 'uniE129', 'uni00B3', 'uni2083',
- 'uniE12A', 'uniE128', 'uniF732', 'uniE133', 'uniE134', 'uniE131', 'uni00B2',
- 'uni2082', 'uniE132', 'uniE130', 'uniE0F9', 'uniF734', 'uniE0D4', 'uniE0D5',
- 'uniE0D2', 'uni2074', 'uni2084', 'uniE0D3', 'uniE0D1', 'uniF730', 'uniE13D',
- 'uniE13E', 'uniE13A', 'uni2070', 'uni2080', 'uniE13B', 'uniE139', 'uniE13C',
- 'uniF739', 'uniE0EC', 'uniE0ED', 'uniE0EA', 'uni2079', 'uni2089', 'uniE0EB',
- 'uniE0E9', 'uniF735', 'uniE0CD', 'uniE0CE', 'uniE0CB', 'uni2075', 'uni2085',
- 'uniE0CC', 'uniE0CA', 'uniF731', 'uniE0F3', 'uniE0F4', 'uniE0F1', 'uni00B9',
- 'uni2081', 'uniE0F2', 'uniE0F0', 'uniE0F8', 'uniF738', 'uniE0C0', 'uniE0C1',
- 'uniE0BE', 'uni2078', 'uni2088', 'uniE0BF', 'uniE0BD', 'I', 'Ismall', 't', 'i',
- 'f', 'IJ', 'J', 'IJsmall', 'Jsmall', 'tt', 'ij', 'j', 'ffb', 'ffh', 'h', 'ffk',
- 'k', 'ffl', 'l', 'fft', 'fb', 'ff', 'fh', 'fj', 'fk', 'ft', 'janyevoweltelugu',
- 'kassevoweltelugu', 'jaivoweltelugu', 'nyasubscripttelugu', 'kaivoweltelugu',
- 'ssasubscripttelugu', 'bayi1', 'jeemi1', 'kafi1', 'ghafi1', 'laami1', 'kafm1',
- 'ghafm1', 'laamm1', 'rayf2', 'reyf2', 'yayf2', 'zayf2', 'fayi1', 'ayehf2',
- 'hamzayeharabf2', 'hamzayehf2', 'yehf2', 'ray', 'rey', 'zay', 'yay', 'dal',
- 'del', 'zal', 'rayf1', 'reyf1', 'yayf1', 'zayf1', 'ayehf1', 'hamzayeharabf1',
- 'hamzayehf1', 'yehf1', 'dal1', 'del1', 'zal1', 'onehalf', 'onehalf.alt',
- 'onequarter', 'onequarter.alt', 'threequarters', 'threequarters.alt',
- 'AlefSuperiorNS', 'DammaNS', 'DammaRflxNS', 'DammatanNS', 'Fatha2dotsNS',
- 'FathaNS', 'FathatanNS', 'FourDotsAboveNS', 'HamzaAboveNS', 'MaddaNS',
- 'OneDotAbove2NS', 'OneDotAboveNS', 'ShaddaAlefNS', 'ShaddaDammaNS',
- 'ShaddaDammatanNS', 'ShaddaFathatanNS', 'ShaddaKasraNS', 'ShaddaKasratanNS',
- 'ShaddaNS', 'SharetKafNS', 'SukunNS', 'ThreeDotsDownAboveNS',
- 'ThreeDotsUpAboveNS', 'TwoDotsAboveNS', 'TwoDotsVerticalAboveNS', 'UltapeshNS',
- 'WaslaNS', 'AinIni.12m_MeemFin.02', 'AinIni_YehBarreeFin',
- 'AinMed_YehBarreeFin', 'BehxIni_MeemFin', 'BehxIni_NoonGhunnaFin',
- 'BehxIni_RehFin', 'BehxIni_RehFin.b', 'BehxMed_MeemFin.py',
- 'BehxMed_NoonGhunnaFin', 'BehxMed_NoonGhunnaFin.cup', 'BehxMed_RehFin',
- 'BehxMed_RehFin.cup', 'BehxMed_YehxFin', 'FehxMed_YehBarreeFin',
- 'HahIni_YehBarreeFin', 'KafIni_YehBarreeFin', 'KafMed.12_YehxFin.01',
- 'KafMed_MeemFin', 'KafMed_YehBarreeFin', 'LamAlefFin', 'LamAlefFin.cup',
- 'LamAlefFin.cut', 'LamAlefFin.short', 'LamAlefSep', 'LamIni_MeemFin',
- 'LamIni_YehBarreeFin', 'LamMed_MeemFin', 'LamMed_MeemFin.b', 'LamMed_YehxFin',
- 'LamMed_YehxFin.cup', 'TahIni_YehBarreeFin', 'null', 'CR', 'space',
- 'exclam', 'quotedbl', 'numbersign',
+import pytest
+
+
+@pytest.fixture(autouse=True)
+def set_lookup_debug_env_var(monkeypatch):
+ monkeypatch.setenv(LOOKUP_DEBUG_ENV_VAR, "1")
+
+
+class MtiTest:
+ GLYPH_ORDER = [
+ ".notdef",
+ "a",
+ "b",
+ "pakannada",
+ "phakannada",
+ "vakannada",
+ "pevowelkannada",
+ "phevowelkannada",
+ "vevowelkannada",
+ "uvowelsignkannada",
+ "uuvowelsignkannada",
+ "uvowelsignaltkannada",
+ "uuvowelsignaltkannada",
+ "uuvowelsignsinh",
+ "uvowelsignsinh",
+ "rakarsinh",
+ "zero",
+ "one",
+ "two",
+ "three",
+ "four",
+ "five",
+ "six",
+ "seven",
+ "eight",
+ "nine",
+ "slash",
+ "fraction",
+ "A",
+ "B",
+ "C",
+ "fi",
+ "fl",
+ "breve",
+ "acute",
+ "uniFB01",
+ "ffi",
+ "grave",
+ "commaacent",
+ "dotbelow",
+ "dotabove",
+ "cedilla",
+ "commaaccent",
+ "Acircumflex",
+ "V",
+ "T",
+ "acircumflex",
+ "Aacute",
+ "Agrave",
+ "O",
+ "Oacute",
+ "Ograve",
+ "Ocircumflex",
+ "aacute",
+ "agrave",
+ "aimatrabindigurmukhi",
+ "aimatragurmukhi",
+ "aimatratippigurmukhi",
+ "aumatrabindigurmukhi",
+ "aumatragurmukhi",
+ "bindigurmukhi",
+ "eematrabindigurmukhi",
+ "eematragurmukhi",
+ "eematratippigurmukhi",
+ "oomatrabindigurmukhi",
+ "oomatragurmukhi",
+ "oomatratippigurmukhi",
+ "lagurmukhi",
+ "lanuktagurmukhi",
+ "nagurmukhi",
+ "nanuktagurmukhi",
+ "ngagurmukhi",
+ "nganuktagurmukhi",
+ "nnagurmukhi",
+ "nnanuktagurmukhi",
+ "tthagurmukhi",
+ "tthanuktagurmukhi",
+ "bsuperior",
+ "isuperior",
+ "vsuperior",
+ "wsuperior",
+ "periodsuperior",
+ "osuperior",
+ "tsuperior",
+ "dollarsuperior",
+ "fsuperior",
+ "gsuperior",
+ "zsuperior",
+ "dsuperior",
+ "psuperior",
+ "hsuperior",
+ "oesuperior",
+ "aesuperior",
+ "centsuperior",
+ "esuperior",
+ "lsuperior",
+ "qsuperior",
+ "csuperior",
+ "asuperior",
+ "commasuperior",
+ "xsuperior",
+ "egravesuperior",
+ "usuperior",
+ "rsuperior",
+ "nsuperior",
+ "ssuperior",
+ "msuperior",
+ "jsuperior",
+ "ysuperior",
+ "ksuperior",
+ "guilsinglright",
+ "guilsinglleft",
+ "uniF737",
+ "uniE11C",
+ "uniE11D",
+ "uniE11A",
+ "uni2077",
+ "uni2087",
+ "uniE11B",
+ "uniE119",
+ "uniE0DD",
+ "uniE0DE",
+ "uniF736",
+ "uniE121",
+ "uniE122",
+ "uniE11F",
+ "uni2076",
+ "uni2086",
+ "uniE120",
+ "uniE11E",
+ "uniE0DB",
+ "uniE0DC",
+ "uniF733",
+ "uniE12B",
+ "uniE12C",
+ "uniE129",
+ "uni00B3",
+ "uni2083",
+ "uniE12A",
+ "uniE128",
+ "uniF732",
+ "uniE133",
+ "uniE134",
+ "uniE131",
+ "uni00B2",
+ "uni2082",
+ "uniE132",
+ "uniE130",
+ "uniE0F9",
+ "uniF734",
+ "uniE0D4",
+ "uniE0D5",
+ "uniE0D2",
+ "uni2074",
+ "uni2084",
+ "uniE0D3",
+ "uniE0D1",
+ "uniF730",
+ "uniE13D",
+ "uniE13E",
+ "uniE13A",
+ "uni2070",
+ "uni2080",
+ "uniE13B",
+ "uniE139",
+ "uniE13C",
+ "uniF739",
+ "uniE0EC",
+ "uniE0ED",
+ "uniE0EA",
+ "uni2079",
+ "uni2089",
+ "uniE0EB",
+ "uniE0E9",
+ "uniF735",
+ "uniE0CD",
+ "uniE0CE",
+ "uniE0CB",
+ "uni2075",
+ "uni2085",
+ "uniE0CC",
+ "uniE0CA",
+ "uniF731",
+ "uniE0F3",
+ "uniE0F4",
+ "uniE0F1",
+ "uni00B9",
+ "uni2081",
+ "uniE0F2",
+ "uniE0F0",
+ "uniE0F8",
+ "uniF738",
+ "uniE0C0",
+ "uniE0C1",
+ "uniE0BE",
+ "uni2078",
+ "uni2088",
+ "uniE0BF",
+ "uniE0BD",
+ "I",
+ "Ismall",
+ "t",
+ "i",
+ "f",
+ "IJ",
+ "J",
+ "IJsmall",
+ "Jsmall",
+ "tt",
+ "ij",
+ "j",
+ "ffb",
+ "ffh",
+ "h",
+ "ffk",
+ "k",
+ "ffl",
+ "l",
+ "fft",
+ "fb",
+ "ff",
+ "fh",
+ "fj",
+ "fk",
+ "ft",
+ "janyevoweltelugu",
+ "kassevoweltelugu",
+ "jaivoweltelugu",
+ "nyasubscripttelugu",
+ "kaivoweltelugu",
+ "ssasubscripttelugu",
+ "bayi1",
+ "jeemi1",
+ "kafi1",
+ "ghafi1",
+ "laami1",
+ "kafm1",
+ "ghafm1",
+ "laamm1",
+ "rayf2",
+ "reyf2",
+ "yayf2",
+ "zayf2",
+ "fayi1",
+ "ayehf2",
+ "hamzayeharabf2",
+ "hamzayehf2",
+ "yehf2",
+ "ray",
+ "rey",
+ "zay",
+ "yay",
+ "dal",
+ "del",
+ "zal",
+ "rayf1",
+ "reyf1",
+ "yayf1",
+ "zayf1",
+ "ayehf1",
+ "hamzayeharabf1",
+ "hamzayehf1",
+ "yehf1",
+ "dal1",
+ "del1",
+ "zal1",
+ "onehalf",
+ "onehalf.alt",
+ "onequarter",
+ "onequarter.alt",
+ "threequarters",
+ "threequarters.alt",
+ "AlefSuperiorNS",
+ "DammaNS",
+ "DammaRflxNS",
+ "DammatanNS",
+ "Fatha2dotsNS",
+ "FathaNS",
+ "FathatanNS",
+ "FourDotsAboveNS",
+ "HamzaAboveNS",
+ "MaddaNS",
+ "OneDotAbove2NS",
+ "OneDotAboveNS",
+ "ShaddaAlefNS",
+ "ShaddaDammaNS",
+ "ShaddaDammatanNS",
+ "ShaddaFathatanNS",
+ "ShaddaKasraNS",
+ "ShaddaKasratanNS",
+ "ShaddaNS",
+ "SharetKafNS",
+ "SukunNS",
+ "ThreeDotsDownAboveNS",
+ "ThreeDotsUpAboveNS",
+ "TwoDotsAboveNS",
+ "TwoDotsVerticalAboveNS",
+ "UltapeshNS",
+ "WaslaNS",
+ "AinIni.12m_MeemFin.02",
+ "AinIni_YehBarreeFin",
+ "AinMed_YehBarreeFin",
+ "BehxIni_MeemFin",
+ "BehxIni_NoonGhunnaFin",
+ "BehxIni_RehFin",
+ "BehxIni_RehFin.b",
+ "BehxMed_MeemFin.py",
+ "BehxMed_NoonGhunnaFin",
+ "BehxMed_NoonGhunnaFin.cup",
+ "BehxMed_RehFin",
+ "BehxMed_RehFin.cup",
+ "BehxMed_YehxFin",
+ "FehxMed_YehBarreeFin",
+ "HahIni_YehBarreeFin",
+ "KafIni_YehBarreeFin",
+ "KafMed.12_YehxFin.01",
+ "KafMed_MeemFin",
+ "KafMed_YehBarreeFin",
+ "LamAlefFin",
+ "LamAlefFin.cup",
+ "LamAlefFin.cut",
+ "LamAlefFin.short",
+ "LamAlefSep",
+ "LamIni_MeemFin",
+ "LamIni_YehBarreeFin",
+ "LamMed_MeemFin",
+ "LamMed_MeemFin.b",
+ "LamMed_YehxFin",
+ "LamMed_YehxFin.cup",
+ "TahIni_YehBarreeFin",
+ "null",
+ "CR",
+ "space",
+ "exclam",
+ "quotedbl",
+ "numbersign",
]
# Feature files in data/*.txt; output gets compared to data/*.ttx.
TESTS = {
- None: (
- 'mti/cmap',
+ None: ("mti/cmap",),
+ "cmap": ("mti/cmap",),
+ "GSUB": (
+ "featurename-backward",
+ "featurename-forward",
+ "lookupnames-backward",
+ "lookupnames-forward",
+ "mixed-toplevels",
+ "mti/scripttable",
+ "mti/chainedclass",
+ "mti/chainedcoverage",
+ "mti/chained-glyph",
+ "mti/gsubalternate",
+ "mti/gsubligature",
+ "mti/gsubmultiple",
+ "mti/gsubreversechanined",
+ "mti/gsubsingle",
),
- 'cmap': (
- 'mti/cmap',
+ "GPOS": (
+ "mti/scripttable",
+ "mti/chained-glyph",
+ "mti/gposcursive",
+ "mti/gposkernset",
+ "mti/gposmarktobase",
+ "mti/gpospairclass",
+ "mti/gpospairglyph",
+ "mti/gpossingle",
+ "mti/mark-to-ligature",
),
- 'GSUB': (
- 'featurename-backward',
- 'featurename-forward',
- 'lookupnames-backward',
- 'lookupnames-forward',
- 'mixed-toplevels',
-
- 'mti/scripttable',
- 'mti/chainedclass',
- 'mti/chainedcoverage',
- 'mti/chained-glyph',
- 'mti/gsubalternate',
- 'mti/gsubligature',
- 'mti/gsubmultiple',
- 'mti/gsubreversechanined',
- 'mti/gsubsingle',
- ),
- 'GPOS': (
- 'mti/scripttable',
- 'mti/chained-glyph',
- 'mti/gposcursive',
- 'mti/gposkernset',
- 'mti/gposmarktobase',
- 'mti/gpospairclass',
- 'mti/gpospairglyph',
- 'mti/gpossingle',
- 'mti/mark-to-ligature',
- ),
- 'GDEF': (
- 'mti/gdefattach',
- 'mti/gdefclasses',
- 'mti/gdefligcaret',
- 'mti/gdefmarkattach',
- 'mti/gdefmarkfilter',
+ "GDEF": (
+ "mti/gdefattach",
+ "mti/gdefclasses",
+ "mti/gdefligcaret",
+ "mti/gdefmarkattach",
+ "mti/gdefmarkfilter",
),
}
# TODO:
@@ -125,33 +401,21 @@ class MtiTest(unittest.TestCase):
# 'mti/contextcoverage'
# 'mti/context-glyph'
- def __init__(self, methodName):
- unittest.TestCase.__init__(self, methodName)
- # Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
- # and fires deprecation warnings if a program uses the old name.
- if not hasattr(self, "assertRaisesRegex"):
- self.assertRaisesRegex = self.assertRaisesRegexp
-
- def setUp(self):
- pass
-
- def tearDown(self):
- pass
-
@staticmethod
def getpath(testfile):
path, _ = os.path.split(__file__)
return os.path.join(path, "data", testfile)
def expect_ttx(self, expected_ttx, actual_ttx, fromfile=None, tofile=None):
- expected = [l+'\n' for l in expected_ttx.split('\n')]
- actual = [l+'\n' for l in actual_ttx.split('\n')]
+ expected = [l + "\n" for l in expected_ttx.split("\n")]
+ actual = [l + "\n" for l in actual_ttx.split("\n")]
if actual != expected:
- sys.stderr.write('\n')
+ sys.stderr.write("\n")
for line in difflib.unified_diff(
- expected, actual, fromfile=fromfile, tofile=tofile):
+ expected, actual, fromfile=fromfile, tofile=tofile
+ ):
sys.stderr.write(line)
- self.fail("TTX output is different from expected")
+ pytest.fail("TTX output is different from expected")
@classmethod
def create_font(celf):
@@ -160,18 +424,19 @@ class MtiTest(unittest.TestCase):
return font
def check_mti_file(self, name, tableTag=None):
-
- xml_expected_path = self.getpath("%s.ttx" % name + ('.'+tableTag if tableTag is not None else ''))
- with open(xml_expected_path, 'rt', encoding="utf-8") as xml_expected_file:
+ xml_expected_path = self.getpath(
+ "%s.ttx" % name + ("." + tableTag if tableTag is not None else "")
+ )
+ with open(xml_expected_path, "rt", encoding="utf-8") as xml_expected_file:
xml_expected = xml_expected_file.read()
font = self.create_font()
- with open(self.getpath("%s.txt" % name), 'rt', encoding="utf-8") as f:
+ with open(self.getpath("%s.txt" % name), "rt", encoding="utf-8") as f:
table = mtiLib.build(f, font, tableTag=tableTag)
if tableTag is not None:
- self.assertEqual(tableTag, table.tableTag)
+ assert tableTag == table.tableTag
tableTag = table.tableTag
# Make sure it compiles.
@@ -183,22 +448,29 @@ class MtiTest(unittest.TestCase):
# XML from built object.
writer = XMLWriter(StringIO())
- writer.begintag(tableTag); writer.newline()
+ writer.begintag(tableTag)
+ writer.newline()
table.toXML(writer, font)
- writer.endtag(tableTag); writer.newline()
+ writer.endtag(tableTag)
+ writer.newline()
xml_built = writer.file.getvalue()
# XML from decompiled object.
writer = XMLWriter(StringIO())
- writer.begintag(tableTag); writer.newline()
+ writer.begintag(tableTag)
+ writer.newline()
decompiled.toXML(writer, font)
- writer.endtag(tableTag); writer.newline()
+ writer.endtag(tableTag)
+ writer.newline()
xml_binary = writer.file.getvalue()
- self.expect_ttx(xml_binary, xml_built, fromfile='decompiled', tofile='built')
- self.expect_ttx(xml_expected, xml_built, fromfile=xml_expected_path, tofile='built')
+ self.expect_ttx(xml_binary, xml_built, fromfile="decompiled", tofile="built")
+ self.expect_ttx(
+ xml_expected, xml_built, fromfile=xml_expected_path, tofile="built"
+ )
from fontTools.misc import xmlReader
+
f = StringIO()
f.write(xml_expected)
f.seek(0)
@@ -209,26 +481,37 @@ class MtiTest(unittest.TestCase):
# XML from object read from XML.
writer = XMLWriter(StringIO())
- writer.begintag(tableTag); writer.newline()
+ writer.begintag(tableTag)
+ writer.newline()
font2[tableTag].toXML(writer, font)
- writer.endtag(tableTag); writer.newline()
+ writer.endtag(tableTag)
+ writer.newline()
xml_fromxml = writer.file.getvalue()
- self.expect_ttx(xml_expected, xml_fromxml, fromfile=xml_expected_path, tofile='fromxml')
+ self.expect_ttx(
+ xml_expected, xml_fromxml, fromfile=xml_expected_path, tofile="fromxml"
+ )
+
def generate_mti_file_test(name, tableTag=None):
- return lambda self: self.check_mti_file(os.path.join(*name.split('/')), tableTag=tableTag)
+ return lambda self: self.check_mti_file(
+ os.path.join(*name.split("/")), tableTag=tableTag
+ )
-for tableTag,tests in MtiTest.TESTS.items():
+for tableTag, tests in MtiTest.TESTS.items():
for name in tests:
- setattr(MtiTest, "test_MtiFile_%s%s" % (name, '_'+tableTag if tableTag else ''),
- generate_mti_file_test(name, tableTag=tableTag))
+ setattr(
+ MtiTest,
+ "test_MtiFile_%s%s" % (name, "_" + tableTag if tableTag else ""),
+ generate_mti_file_test(name, tableTag=tableTag),
+ )
if __name__ == "__main__":
if len(sys.argv) > 1:
from fontTools.mtiLib import main
+
font = MtiTest.create_font()
sys.exit(main(sys.argv[1:], font))
- sys.exit(unittest.main())
+ sys.exit(pytest.main(sys.argv))
diff --git a/Tests/otlLib/builder_test.py b/Tests/otlLib/builder_test.py
index 548a31e9..b7a6caa2 100644
--- a/Tests/otlLib/builder_test.py
+++ b/Tests/otlLib/builder_test.py
@@ -1080,7 +1080,7 @@ class ClassDefBuilderTest(object):
b.add({"e", "f", "g", "h"})
cdef = b.build()
assert isinstance(cdef, otTables.ClassDef)
- assert cdef.classDefs == {"a": 2, "b": 2, "c": 3, "aa": 1, "bb": 1}
+ assert cdef.classDefs == {"a": 1, "b": 1, "c": 3, "aa": 2, "bb": 2}
def test_build_notUsingClass0(self):
b = builder.ClassDefBuilder(useClass0=False)
diff --git a/Tests/otlLib/maxContextCalc_test.py b/Tests/otlLib/maxContextCalc_test.py
index dc169c60..f672052e 100644
--- a/Tests/otlLib/maxContextCalc_test.py
+++ b/Tests/otlLib/maxContextCalc_test.py
@@ -1,4 +1,3 @@
-
import os
import pytest
from fontTools.ttLib import TTFont
@@ -9,13 +8,13 @@ from fontTools.feaLib.builder import addOpenTypeFeaturesFromString
def test_max_ctx_calc_no_features():
font = TTFont()
assert maxCtxFont(font) == 0
- font.setGlyphOrder(['.notdef'])
- addOpenTypeFeaturesFromString(font, '')
+ font.setGlyphOrder([".notdef"])
+ addOpenTypeFeaturesFromString(font, "")
assert maxCtxFont(font) == 0
def test_max_ctx_calc_features():
- glyphs = '.notdef space A B C a b c'.split()
+ glyphs = ".notdef space A B C a b c".split()
features = """
lookup GSUB_EXT useExtension {
sub a by b;
@@ -59,15 +58,19 @@ def test_max_ctx_calc_features():
assert maxCtxFont(font) == 3
-@pytest.mark.parametrize('file_name, max_context', [
- ('gsub_51', 2),
- ('gsub_52', 2),
- ('gsub_71', 1),
- ('gpos_91', 1),
-])
+@pytest.mark.parametrize(
+ "file_name, max_context",
+ [
+ ("gsub_51", 2),
+ ("gsub_52", 2),
+ ("gsub_71", 1),
+ ("gpos_91", 1),
+ ],
+)
def test_max_ctx_calc_features_ttx(file_name, max_context):
- ttx_path = os.path.join(os.path.dirname(__file__),
- 'data', '{}.ttx'.format(file_name))
+ ttx_path = os.path.join(
+ os.path.dirname(__file__), "data", "{}.ttx".format(file_name)
+ )
font = TTFont()
font.importXML(ttx_path)
diff --git a/Tests/otlLib/mock_builder_test.py b/Tests/otlLib/mock_builder_test.py
index b3fecd83..46f5f80b 100644
--- a/Tests/otlLib/mock_builder_test.py
+++ b/Tests/otlLib/mock_builder_test.py
@@ -13,7 +13,7 @@ from fontTools.otlLib.builder import (
ClassPairPosSubtableBuilder,
PairPosBuilder,
SinglePosBuilder,
- ChainContextualRule
+ ChainContextualRule,
)
from fontTools.otlLib.error import OpenTypeLibError
from fontTools.ttLib import TTFont
@@ -76,11 +76,15 @@ def test_unsupported_subtable_break_1(ttfont):
captor.assertRegex('5:beta: unsupported "subtable" statement for lookup type')
+
def test_chain_pos_references_GSUB_lookup(ttfont):
location = MockBuilderLocation((0, "alpha"))
builder = ChainContextPosBuilder(ttfont, location)
builder2 = SingleSubstBuilder(ttfont, location)
builder.rules.append(ChainContextualRule([], [], [], [[builder2]]))
- with pytest.raises(OpenTypeLibError, match="0:alpha: Missing index of the specified lookup, might be a substitution lookup"):
+ with pytest.raises(
+ OpenTypeLibError,
+ match="0:alpha: Missing index of the specified lookup, might be a substitution lookup",
+ ):
builder.build()
diff --git a/Tests/pens/__init__.py b/Tests/pens/__init__.py
index 187b9816..00e7b05c 100644
--- a/Tests/pens/__init__.py
+++ b/Tests/pens/__init__.py
@@ -1,13 +1,6 @@
-import os
-from fontTools.ufoLib.glifLib import GlyphSet
-import pkg_resources
-
-DATADIR = os.path.join(os.path.dirname(__file__), 'data')
-CUBIC_GLYPHS = GlyphSet(os.path.join(DATADIR, 'cubic'))
-QUAD_GLYPHS = GlyphSet(os.path.join(DATADIR, 'quadratic'))
-
import unittest
+
# Python 3 renamed 'assertRaisesRegexp' to 'assertRaisesRegex', and fires
# deprecation warnings if a program uses the old name.
-if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
+if not hasattr(unittest.TestCase, "assertRaisesRegex"):
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
diff --git a/Tests/pens/areaPen_test.py b/Tests/pens/areaPen_test.py
index c3f3f80c..487c108e 100644
--- a/Tests/pens/areaPen_test.py
+++ b/Tests/pens/areaPen_test.py
@@ -3,120 +3,128 @@ import unittest
precision = 6
+
def draw1_(pen):
- pen.moveTo( (254, 360) )
- pen.lineTo( (771, 367) )
- pen.curveTo( (800, 393), (808, 399), (819, 412) )
- pen.curveTo( (818, 388), (774, 138), (489, 145) )
- pen.curveTo( (188, 145), (200, 398), (200, 421) )
- pen.curveTo( (209, 409), (220, 394), (254, 360) )
+ pen.moveTo((254, 360))
+ pen.lineTo((771, 367))
+ pen.curveTo((800, 393), (808, 399), (819, 412))
+ pen.curveTo((818, 388), (774, 138), (489, 145))
+ pen.curveTo((188, 145), (200, 398), (200, 421))
+ pen.curveTo((209, 409), (220, 394), (254, 360))
pen.closePath()
+
def draw2_(pen):
- pen.moveTo( (254, 360) )
- pen.curveTo( (220, 394), (209, 409), (200, 421) )
- pen.curveTo( (200, 398), (188, 145), (489, 145) )
- pen.curveTo( (774, 138), (818, 388), (819, 412) )
- pen.curveTo( (808, 399), (800, 393), (771, 367) )
+ pen.moveTo((254, 360))
+ pen.curveTo((220, 394), (209, 409), (200, 421))
+ pen.curveTo((200, 398), (188, 145), (489, 145))
+ pen.curveTo((774, 138), (818, 388), (819, 412))
+ pen.curveTo((808, 399), (800, 393), (771, 367))
pen.closePath()
+
def draw3_(pen):
- pen.moveTo( (771, 367) )
- pen.curveTo( (800, 393), (808, 399), (819, 412) )
- pen.curveTo( (818, 388), (774, 138), (489, 145) )
- pen.curveTo( (188, 145), (200, 398), (200, 421) )
- pen.curveTo( (209, 409), (220, 394), (254, 360) )
+ pen.moveTo((771, 367))
+ pen.curveTo((800, 393), (808, 399), (819, 412))
+ pen.curveTo((818, 388), (774, 138), (489, 145))
+ pen.curveTo((188, 145), (200, 398), (200, 421))
+ pen.curveTo((209, 409), (220, 394), (254, 360))
pen.closePath()
+
def draw4_(pen):
- pen.moveTo( (771, 367) )
- pen.lineTo( (254, 360) )
- pen.curveTo( (220, 394), (209, 409), (200, 421) )
- pen.curveTo( (200, 398), (188, 145), (489, 145) )
- pen.curveTo( (774, 138), (818, 388), (819, 412) )
- pen.curveTo( (808, 399), (800, 393), (771, 367) )
+ pen.moveTo((771, 367))
+ pen.lineTo((254, 360))
+ pen.curveTo((220, 394), (209, 409), (200, 421))
+ pen.curveTo((200, 398), (188, 145), (489, 145))
+ pen.curveTo((774, 138), (818, 388), (819, 412))
+ pen.curveTo((808, 399), (800, 393), (771, 367))
pen.closePath()
+
def draw5_(pen):
- pen.moveTo( (254, 360) )
- pen.lineTo( (771, 367) )
- pen.qCurveTo( (793, 386), (802, 394) )
- pen.qCurveTo( (811, 402), (819, 412) )
- pen.qCurveTo( (819, 406), (814, 383.5) )
- pen.qCurveTo( (809, 361), (796, 330.5) )
- pen.qCurveTo( (783, 300), (760.5, 266.5) )
- pen.qCurveTo( (738, 233), (701, 205.5) )
- pen.qCurveTo( (664, 178), (612, 160.5) )
- pen.qCurveTo( (560, 143), (489, 145) )
- pen.qCurveTo( (414, 145), (363, 164) )
- pen.qCurveTo( (312, 183), (280, 211.5) )
- pen.qCurveTo( (248, 240), (231.5, 274.5) )
- pen.qCurveTo( (215, 309), (208, 339.5) )
- pen.qCurveTo( (201, 370), (200.5, 392.5) )
- pen.qCurveTo( (200, 415), (200, 421) )
- pen.qCurveTo( (207, 412), (217.5, 399) )
- pen.qCurveTo( (228, 386), (254, 360) )
+ pen.moveTo((254, 360))
+ pen.lineTo((771, 367))
+ pen.qCurveTo((793, 386), (802, 394))
+ pen.qCurveTo((811, 402), (819, 412))
+ pen.qCurveTo((819, 406), (814, 383.5))
+ pen.qCurveTo((809, 361), (796, 330.5))
+ pen.qCurveTo((783, 300), (760.5, 266.5))
+ pen.qCurveTo((738, 233), (701, 205.5))
+ pen.qCurveTo((664, 178), (612, 160.5))
+ pen.qCurveTo((560, 143), (489, 145))
+ pen.qCurveTo((414, 145), (363, 164))
+ pen.qCurveTo((312, 183), (280, 211.5))
+ pen.qCurveTo((248, 240), (231.5, 274.5))
+ pen.qCurveTo((215, 309), (208, 339.5))
+ pen.qCurveTo((201, 370), (200.5, 392.5))
+ pen.qCurveTo((200, 415), (200, 421))
+ pen.qCurveTo((207, 412), (217.5, 399))
+ pen.qCurveTo((228, 386), (254, 360))
pen.closePath()
+
def draw6_(pen):
- pen.moveTo( (254, 360) )
- pen.qCurveTo( (228, 386), (217.5, 399) )
- pen.qCurveTo( (207, 412), (200, 421) )
- pen.qCurveTo( (200, 415), (200.5, 392.5) )
- pen.qCurveTo( (201, 370), (208, 339.5) )
- pen.qCurveTo( (215, 309), (231.5, 274.5) )
- pen.qCurveTo( (248, 240), (280, 211.5) )
- pen.qCurveTo( (312, 183), (363, 164) )
- pen.qCurveTo( (414, 145), (489, 145) )
- pen.qCurveTo( (560, 143), (612, 160.5) )
- pen.qCurveTo( (664, 178), (701, 205.5) )
- pen.qCurveTo( (738, 233), (760.5, 266.5) )
- pen.qCurveTo( (783, 300), (796, 330.5) )
- pen.qCurveTo( (809, 361), (814, 383.5) )
- pen.qCurveTo( (819, 406), (819, 412) )
- pen.qCurveTo( (811, 402), (802, 394) )
- pen.qCurveTo( (793, 386), (771, 367) )
+ pen.moveTo((254, 360))
+ pen.qCurveTo((228, 386), (217.5, 399))
+ pen.qCurveTo((207, 412), (200, 421))
+ pen.qCurveTo((200, 415), (200.5, 392.5))
+ pen.qCurveTo((201, 370), (208, 339.5))
+ pen.qCurveTo((215, 309), (231.5, 274.5))
+ pen.qCurveTo((248, 240), (280, 211.5))
+ pen.qCurveTo((312, 183), (363, 164))
+ pen.qCurveTo((414, 145), (489, 145))
+ pen.qCurveTo((560, 143), (612, 160.5))
+ pen.qCurveTo((664, 178), (701, 205.5))
+ pen.qCurveTo((738, 233), (760.5, 266.5))
+ pen.qCurveTo((783, 300), (796, 330.5))
+ pen.qCurveTo((809, 361), (814, 383.5))
+ pen.qCurveTo((819, 406), (819, 412))
+ pen.qCurveTo((811, 402), (802, 394))
+ pen.qCurveTo((793, 386), (771, 367))
pen.closePath()
+
def draw7_(pen):
- pen.moveTo( (771, 367) )
- pen.qCurveTo( (793, 386), (802, 394) )
- pen.qCurveTo( (811, 402), (819, 412) )
- pen.qCurveTo( (819, 406), (814, 383.5) )
- pen.qCurveTo( (809, 361), (796, 330.5) )
- pen.qCurveTo( (783, 300), (760.5, 266.5) )
- pen.qCurveTo( (738, 233), (701, 205.5) )
- pen.qCurveTo( (664, 178), (612, 160.5) )
- pen.qCurveTo( (560, 143), (489, 145) )
- pen.qCurveTo( (414, 145), (363, 164) )
- pen.qCurveTo( (312, 183), (280, 211.5) )
- pen.qCurveTo( (248, 240), (231.5, 274.5) )
- pen.qCurveTo( (215, 309), (208, 339.5) )
- pen.qCurveTo( (201, 370), (200.5, 392.5) )
- pen.qCurveTo( (200, 415), (200, 421) )
- pen.qCurveTo( (207, 412), (217.5, 399) )
- pen.qCurveTo( (228, 386), (254, 360) )
+ pen.moveTo((771, 367))
+ pen.qCurveTo((793, 386), (802, 394))
+ pen.qCurveTo((811, 402), (819, 412))
+ pen.qCurveTo((819, 406), (814, 383.5))
+ pen.qCurveTo((809, 361), (796, 330.5))
+ pen.qCurveTo((783, 300), (760.5, 266.5))
+ pen.qCurveTo((738, 233), (701, 205.5))
+ pen.qCurveTo((664, 178), (612, 160.5))
+ pen.qCurveTo((560, 143), (489, 145))
+ pen.qCurveTo((414, 145), (363, 164))
+ pen.qCurveTo((312, 183), (280, 211.5))
+ pen.qCurveTo((248, 240), (231.5, 274.5))
+ pen.qCurveTo((215, 309), (208, 339.5))
+ pen.qCurveTo((201, 370), (200.5, 392.5))
+ pen.qCurveTo((200, 415), (200, 421))
+ pen.qCurveTo((207, 412), (217.5, 399))
+ pen.qCurveTo((228, 386), (254, 360))
pen.closePath()
+
def draw8_(pen):
- pen.moveTo( (771, 367) )
- pen.lineTo( (254, 360) )
- pen.qCurveTo( (228, 386), (217.5, 399) )
- pen.qCurveTo( (207, 412), (200, 421) )
- pen.qCurveTo( (200, 415), (200.5, 392.5) )
- pen.qCurveTo( (201, 370), (208, 339.5) )
- pen.qCurveTo( (215, 309), (231.5, 274.5) )
- pen.qCurveTo( (248, 240), (280, 211.5) )
- pen.qCurveTo( (312, 183), (363, 164) )
- pen.qCurveTo( (414, 145), (489, 145) )
- pen.qCurveTo( (560, 143), (612, 160.5) )
- pen.qCurveTo( (664, 178), (701, 205.5) )
- pen.qCurveTo( (738, 233), (760.5, 266.5) )
- pen.qCurveTo( (783, 300), (796, 330.5) )
- pen.qCurveTo( (809, 361), (814, 383.5) )
- pen.qCurveTo( (819, 406), (819, 412) )
- pen.qCurveTo( (811, 402), (802, 394) )
- pen.qCurveTo( (793, 386), (771, 367) )
+ pen.moveTo((771, 367))
+ pen.lineTo((254, 360))
+ pen.qCurveTo((228, 386), (217.5, 399))
+ pen.qCurveTo((207, 412), (200, 421))
+ pen.qCurveTo((200, 415), (200.5, 392.5))
+ pen.qCurveTo((201, 370), (208, 339.5))
+ pen.qCurveTo((215, 309), (231.5, 274.5))
+ pen.qCurveTo((248, 240), (280, 211.5))
+ pen.qCurveTo((312, 183), (363, 164))
+ pen.qCurveTo((414, 145), (489, 145))
+ pen.qCurveTo((560, 143), (612, 160.5))
+ pen.qCurveTo((664, 178), (701, 205.5))
+ pen.qCurveTo((738, 233), (760.5, 266.5))
+ pen.qCurveTo((783, 300), (796, 330.5))
+ pen.qCurveTo((809, 361), (814, 383.5))
+ pen.qCurveTo((819, 406), (819, 412))
+ pen.qCurveTo((811, 402), (802, 394))
+ pen.qCurveTo((793, 386), (771, 367))
pen.closePath()
@@ -173,6 +181,7 @@ class AreaPenTest(unittest.TestCase):
pen.endPath()
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/pens/basePen_test.py b/Tests/pens/basePen_test.py
index db57e80e..d8508fd1 100644
--- a/Tests/pens/basePen_test.py
+++ b/Tests/pens/basePen_test.py
@@ -1,5 +1,9 @@
-from fontTools.pens.basePen import \
- AbstractPen, BasePen, decomposeSuperBezierSegment, decomposeQuadraticSegment
+from fontTools.pens.basePen import (
+ AbstractPen,
+ BasePen,
+ decomposeSuperBezierSegment,
+ decomposeQuadraticSegment,
+)
from fontTools.pens.pointPen import AbstractPointPen
from fontTools.misc.loggingTools import CapturingLogHandler
import unittest
@@ -23,10 +27,10 @@ class _TestPen(BasePen):
self._commands.append("%s %s lineto" % (pt[0], pt[1]))
def _curveToOne(self, bcp1, bcp2, pt):
- self._commands.append("%s %s %s %s %s %s curveto" %
- (bcp1[0], bcp1[1],
- bcp2[0], bcp2[1],
- pt[0], pt[1]))
+ self._commands.append(
+ "%s %s %s %s %s %s curveto"
+ % (bcp1[0], bcp1[1], bcp2[0], bcp2[1], pt[0], pt[1])
+ )
def _closePath(self):
self._commands.append("closepath")
@@ -73,17 +77,19 @@ class BasePenTest(unittest.TestCase):
pen = _TestPen()
pen.moveTo((0.0, 0.0))
pen.curveTo((6.0, 3.0), (3.0, 6.0))
- self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto",
- repr(pen))
+ self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", repr(pen))
self.assertEqual((3.0, 6.0), pen.getCurrentPoint())
def test_curveTo_manyPoints(self):
pen = _TestPen()
pen.moveTo((0.0, 0.0))
pen.curveTo((1.0, 1.1), (2.0, 2.1), (3.0, 3.1), (4.0, 4.1))
- self.assertEqual("0.0 0.0 moveto "
- "1.0 1.1 1.5 1.6 2.0 2.1 curveto "
- "2.5 2.6 3.0 3.1 4.0 4.1 curveto", repr(pen))
+ self.assertEqual(
+ "0.0 0.0 moveto "
+ "1.0 1.1 1.5 1.6 2.0 2.1 curveto "
+ "2.5 2.6 3.0 3.1 4.0 4.1 curveto",
+ repr(pen),
+ )
self.assertEqual((4.0, 4.1), pen.getCurrentPoint())
def test_qCurveTo_zeroPoints(self):
@@ -102,19 +108,21 @@ class BasePenTest(unittest.TestCase):
pen = _TestPen()
pen.moveTo((0.0, 0.0))
pen.qCurveTo((6.0, 3.0), (3.0, 6.0))
- self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto",
- repr(pen))
+ self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", repr(pen))
self.assertEqual((3.0, 6.0), pen.getCurrentPoint())
def test_qCurveTo_onlyOffCurvePoints(self):
pen = _TestPen()
pen.moveTo((0.0, 0.0))
pen.qCurveTo((6.0, -6.0), (12.0, 12.0), (18.0, -18.0), None)
- self.assertEqual("0.0 0.0 moveto "
- "12.0 -12.0 moveto "
- "8.0 -8.0 7.0 -3.0 9.0 3.0 curveto "
- "11.0 9.0 13.0 7.0 15.0 -3.0 curveto "
- "17.0 -13.0 16.0 -16.0 12.0 -12.0 curveto", repr(pen))
+ self.assertEqual(
+ "0.0 0.0 moveto "
+ "12.0 -12.0 moveto "
+ "8.0 -8.0 7.0 -3.0 9.0 3.0 curveto "
+ "11.0 9.0 13.0 7.0 15.0 -3.0 curveto "
+ "17.0 -13.0 16.0 -16.0 12.0 -12.0 curveto",
+ repr(pen),
+ )
self.assertEqual((12.0, -12.0), pen.getCurrentPoint())
def test_closePath(self):
@@ -135,11 +143,14 @@ class BasePenTest(unittest.TestCase):
pen = _TestPen()
pen.glyphSet["oslash"] = _TestGlyph()
pen.addComponent("oslash", (2, 3, 0.5, 2, -10, 0))
- self.assertEqual("-10.0 0.0 moveto "
- "40.0 200.0 lineto "
- "127.5 300.0 131.25 290.0 125.0 265.0 curveto "
- "118.75 240.0 102.5 200.0 -10.0 0.0 curveto "
- "closepath", repr(pen))
+ self.assertEqual(
+ "-10.0 0.0 moveto "
+ "40.0 200.0 lineto "
+ "127.5 300.0 131.25 290.0 125.0 265.0 curveto "
+ "118.75 240.0 102.5 200.0 -10.0 0.0 curveto "
+ "closepath",
+ repr(pen),
+ )
self.assertEqual(None, pen.getCurrentPoint())
def test_addComponent_skip_missing(self):
@@ -155,24 +166,29 @@ class DecomposeSegmentTest(unittest.TestCase):
self.assertRaises(AssertionError, decompose, [])
self.assertRaises(AssertionError, decompose, [(0, 0)])
self.assertRaises(AssertionError, decompose, [(0, 0), (1, 1)])
- self.assertEqual([((0, 0), (1, 1), (2, 2))],
- decompose([(0, 0), (1, 1), (2, 2)]))
+ self.assertEqual(
+ [((0, 0), (1, 1), (2, 2))], decompose([(0, 0), (1, 1), (2, 2)])
+ )
self.assertEqual(
[((0, 0), (2, -2), (4, 0)), ((6, 2), (8, 8), (12, -12))],
- decompose([(0, 0), (4, -4), (8, 8), (12, -12)]))
+ decompose([(0, 0), (4, -4), (8, 8), (12, -12)]),
+ )
def test_decomposeQuadraticSegment(self):
decompose = decomposeQuadraticSegment
self.assertRaises(AssertionError, decompose, [])
self.assertRaises(AssertionError, decompose, [(0, 0)])
- self.assertEqual([((0,0), (4, 8))], decompose([(0, 0), (4, 8)]))
- self.assertEqual([((0,0), (2, 4)), ((4, 8), (9, -9))],
- decompose([(0, 0), (4, 8), (9, -9)]))
+ self.assertEqual([((0, 0), (4, 8))], decompose([(0, 0), (4, 8)]))
+ self.assertEqual(
+ [((0, 0), (2, 4)), ((4, 8), (9, -9))], decompose([(0, 0), (4, 8), (9, -9)])
+ )
self.assertEqual(
[((0, 0), (2.0, 4.0)), ((4, 8), (6.5, -0.5)), ((9, -9), (10, 10))],
- decompose([(0, 0), (4, 8), (9, -9), (10, 10)]))
+ decompose([(0, 0), (4, 8), (9, -9), (10, 10)]),
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/pens/boundsPen_test.py b/Tests/pens/boundsPen_test.py
index c0c56108..190161f3 100644
--- a/Tests/pens/boundsPen_test.py
+++ b/Tests/pens/boundsPen_test.py
@@ -70,6 +70,7 @@ class ControlBoundsPenTest(unittest.TestCase):
self.assertEqual(None, pen.bounds)
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/pens/cocoaPen_test.py b/Tests/pens/cocoaPen_test.py
index 11077c0b..6222cc7e 100644
--- a/Tests/pens/cocoaPen_test.py
+++ b/Tests/pens/cocoaPen_test.py
@@ -7,10 +7,10 @@ try:
PATH_ELEMENTS = {
# NSBezierPathElement key desc
- NSBezierPathElementMoveTo: 'moveto',
- NSBezierPathElementLineTo: 'lineto',
- NSBezierPathElementCurveTo: 'curveto',
- NSBezierPathElementClosePath: 'close',
+ NSBezierPathElementMoveTo: "moveto",
+ NSBezierPathElementLineTo: "lineto",
+ NSBezierPathElementCurveTo: "curveto",
+ NSBezierPathElementClosePath: "close",
}
PYOBJC_AVAILABLE = True
@@ -45,7 +45,7 @@ class CocoaPenTest(unittest.TestCase):
draw(pen)
self.assertEqual(
"moveto 50.0 0.0 lineto 50.0 500.0 lineto 200.0 500.0 curveto 350.0 500.0 450.0 400.0 450.0 250.0 curveto 450.0 100.0 350.0 0.0 200.0 0.0 close ",
- cocoaPathToString(pen.path)
+ cocoaPathToString(pen.path),
)
def test_empty(self):
@@ -53,6 +53,7 @@ class CocoaPenTest(unittest.TestCase):
self.assertEqual("", cocoaPathToString(pen.path))
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/pens/cu2quPen_test.py b/Tests/pens/cu2quPen_test.py
index 4ce5b512..779254c3 100644
--- a/Tests/pens/cu2quPen_test.py
+++ b/Tests/pens/cu2quPen_test.py
@@ -15,13 +15,19 @@
import sys
import unittest
-from fontTools.pens.cu2quPen import Cu2QuPen, Cu2QuPointPen
-from . import CUBIC_GLYPHS, QUAD_GLYPHS
-from .utils import DummyGlyph, DummyPointGlyph
-from .utils import DummyPen, DummyPointPen
+from fontTools.pens.cu2quPen import Cu2QuPen, Cu2QuPointPen, Cu2QuMultiPen
+from fontTools.pens.recordingPen import RecordingPen, RecordingPointPen
from fontTools.misc.loggingTools import CapturingLogHandler
from textwrap import dedent
import logging
+import pytest
+
+try:
+ from .utils import CUBIC_GLYPHS, QUAD_GLYPHS
+ from .utils import DummyGlyph, DummyPointGlyph
+ from .utils import DummyPen, DummyPointPen
+except ImportError as e:
+ pytest.skip(str(e), allow_module_level=True)
MAX_ERR = 1.0
@@ -36,10 +42,12 @@ class _TestPenMixin(object):
def diff(self, expected, actual):
import difflib
+
expected = str(self.Glyph(expected)).splitlines(True)
actual = str(self.Glyph(actual)).splitlines(True)
diff = difflib.unified_diff(
- expected, actual, fromfile='expected', tofile='actual')
+ expected, actual, fromfile="expected", tofile="actual"
+ )
return "".join(diff)
def convert_glyph(self, glyph, **kwargs):
@@ -58,28 +66,27 @@ class _TestPenMixin(object):
self.fail("converted glyph is different from expected")
def test_convert_simple_glyph(self):
- self.expect_glyph(CUBIC_GLYPHS['a'], QUAD_GLYPHS['a'])
- self.expect_glyph(CUBIC_GLYPHS['A'], QUAD_GLYPHS['A'])
+ self.expect_glyph(CUBIC_GLYPHS["a"], QUAD_GLYPHS["a"])
+ self.expect_glyph(CUBIC_GLYPHS["A"], QUAD_GLYPHS["A"])
def test_convert_composite_glyph(self):
- source = CUBIC_GLYPHS['Aacute']
+ source = CUBIC_GLYPHS["Aacute"]
converted = self.convert_glyph(source)
# components don't change after quadratic conversion
self.assertEqual(converted, source)
def test_convert_mixed_glyph(self):
# this contains a mix of contours and components
- self.expect_glyph(CUBIC_GLYPHS['Eacute'], QUAD_GLYPHS['Eacute'])
+ self.expect_glyph(CUBIC_GLYPHS["Eacute"], QUAD_GLYPHS["Eacute"])
def test_reverse_direction(self):
- for name in ('a', 'A', 'Eacute'):
+ for name in ("a", "A", "Eacute"):
source = CUBIC_GLYPHS[name]
normal_glyph = self.convert_glyph(source)
reversed_glyph = self.convert_glyph(source, reverse_direction=True)
# the number of commands is the same, just their order is iverted
- self.assertTrue(
- len(normal_glyph.outline), len(reversed_glyph.outline))
+ self.assertTrue(len(normal_glyph.outline), len(reversed_glyph.outline))
self.assertNotEqual(normal_glyph, reversed_glyph)
def test_stats(self):
@@ -89,8 +96,8 @@ class _TestPenMixin(object):
self.convert_glyph(source, stats=stats)
self.assertTrue(stats)
- self.assertTrue('1' in stats)
- self.assertEqual(type(stats['1']), int)
+ self.assertTrue("1" in stats)
+ self.assertEqual(type(stats["1"]), int)
def test_addComponent(self):
pen = self.Pen()
@@ -98,65 +105,22 @@ class _TestPenMixin(object):
quadpen.addComponent("a", (1, 2, 3, 4, 5.0, 6.0))
# components are passed through without changes
- self.assertEqual(str(pen).splitlines(), [
- "pen.addComponent('a', (1, 2, 3, 4, 5.0, 6.0))",
- ])
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.addComponent('a', (1, 2, 3, 4, 5.0, 6.0))",
+ ],
+ )
class TestCu2QuPen(unittest.TestCase, _TestPenMixin):
-
def __init__(self, *args, **kwargs):
super(TestCu2QuPen, self).__init__(*args, **kwargs)
self.Glyph = DummyGlyph
self.Pen = DummyPen
self.Cu2QuPen = Cu2QuPen
- self.pen_getter_name = 'getPen'
- self.draw_method_name = 'draw'
-
- def test__check_contour_is_open(self):
- msg = "moveTo is required"
- quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
-
- with self.assertRaisesRegex(AssertionError, msg):
- quadpen.lineTo((0, 0))
- with self.assertRaisesRegex(AssertionError, msg):
- quadpen.qCurveTo((0, 0), (1, 1))
- with self.assertRaisesRegex(AssertionError, msg):
- quadpen.curveTo((0, 0), (1, 1), (2, 2))
- with self.assertRaisesRegex(AssertionError, msg):
- quadpen.closePath()
- with self.assertRaisesRegex(AssertionError, msg):
- quadpen.endPath()
-
- quadpen.moveTo((0, 0)) # now it works
- quadpen.lineTo((1, 1))
- quadpen.qCurveTo((2, 2), (3, 3))
- quadpen.curveTo((4, 4), (5, 5), (6, 6))
- quadpen.closePath()
-
- def test__check_contour_closed(self):
- msg = "closePath or endPath is required"
- quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
- quadpen.moveTo((0, 0))
-
- with self.assertRaisesRegex(AssertionError, msg):
- quadpen.moveTo((1, 1))
- with self.assertRaisesRegex(AssertionError, msg):
- quadpen.addComponent("a", (1, 0, 0, 1, 0, 0))
-
- # it works if contour is closed
- quadpen.closePath()
- quadpen.moveTo((1, 1))
- quadpen.endPath()
- quadpen.addComponent("a", (1, 0, 0, 1, 0, 0))
-
- def test_qCurveTo_no_points(self):
- quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
- quadpen.moveTo((0, 0))
-
- with self.assertRaisesRegex(
- AssertionError, "illegal qcurve segment point count: 0"):
- quadpen.qCurveTo()
+ self.pen_getter_name = "getPen"
+ self.draw_method_name = "draw"
def test_qCurveTo_1_point(self):
pen = DummyPen()
@@ -164,10 +128,13 @@ class TestCu2QuPen(unittest.TestCase, _TestPenMixin):
quadpen.moveTo((0, 0))
quadpen.qCurveTo((1, 1))
- self.assertEqual(str(pen).splitlines(), [
- "pen.moveTo((0, 0))",
- "pen.lineTo((1, 1))",
- ])
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.qCurveTo((1, 1))",
+ ],
+ )
def test_qCurveTo_more_than_1_point(self):
pen = DummyPen()
@@ -175,18 +142,13 @@ class TestCu2QuPen(unittest.TestCase, _TestPenMixin):
quadpen.moveTo((0, 0))
quadpen.qCurveTo((1, 1), (2, 2))
- self.assertEqual(str(pen).splitlines(), [
- "pen.moveTo((0, 0))",
- "pen.qCurveTo((1, 1), (2, 2))",
- ])
-
- def test_curveTo_no_points(self):
- quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
- quadpen.moveTo((0, 0))
-
- with self.assertRaisesRegex(
- AssertionError, "illegal curve segment point count: 0"):
- quadpen.curveTo()
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.qCurveTo((1, 1), (2, 2))",
+ ],
+ )
def test_curveTo_1_point(self):
pen = DummyPen()
@@ -194,10 +156,13 @@ class TestCu2QuPen(unittest.TestCase, _TestPenMixin):
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1))
- self.assertEqual(str(pen).splitlines(), [
- "pen.moveTo((0, 0))",
- "pen.lineTo((1, 1))",
- ])
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.qCurveTo((1, 1))",
+ ],
+ )
def test_curveTo_2_points(self):
pen = DummyPen()
@@ -205,10 +170,13 @@ class TestCu2QuPen(unittest.TestCase, _TestPenMixin):
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2))
- self.assertEqual(str(pen).splitlines(), [
- "pen.moveTo((0, 0))",
- "pen.qCurveTo((1, 1), (2, 2))",
- ])
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.qCurveTo((1, 1), (2, 2))",
+ ],
+ )
def test_curveTo_3_points(self):
pen = DummyPen()
@@ -216,10 +184,13 @@ class TestCu2QuPen(unittest.TestCase, _TestPenMixin):
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2), (3, 3))
- self.assertEqual(str(pen).splitlines(), [
- "pen.moveTo((0, 0))",
- "pen.qCurveTo((0.75, 0.75), (2.25, 2.25), (3, 3))",
- ])
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.qCurveTo((0.75, 0.75), (2.25, 2.25), (3, 3))",
+ ],
+ )
def test_curveTo_more_than_3_points(self):
# a 'SuperBezier' as described in fontTools.basePen.AbstractPen
@@ -228,71 +199,24 @@ class TestCu2QuPen(unittest.TestCase, _TestPenMixin):
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2), (3, 3), (4, 4))
- self.assertEqual(str(pen).splitlines(), [
- "pen.moveTo((0, 0))",
- "pen.qCurveTo((0.75, 0.75), (1.625, 1.625), (2, 2))",
- "pen.qCurveTo((2.375, 2.375), (3.25, 3.25), (4, 4))",
- ])
-
- def test_addComponent(self):
- pen = DummyPen()
- quadpen = Cu2QuPen(pen, MAX_ERR)
- quadpen.addComponent("a", (1, 2, 3, 4, 5.0, 6.0))
-
- # components are passed through without changes
- self.assertEqual(str(pen).splitlines(), [
- "pen.addComponent('a', (1, 2, 3, 4, 5.0, 6.0))",
- ])
-
- def test_ignore_single_points(self):
- pen = DummyPen()
- try:
- logging.captureWarnings(True)
- with CapturingLogHandler("py.warnings", level="WARNING") as log:
- quadpen = Cu2QuPen(pen, MAX_ERR, ignore_single_points=True)
- finally:
- logging.captureWarnings(False)
- quadpen.moveTo((0, 0))
- quadpen.endPath()
- quadpen.moveTo((1, 1))
- quadpen.closePath()
-
- self.assertGreaterEqual(len(log.records), 1)
- if sys.version_info < (3, 11):
- self.assertIn("ignore_single_points is deprecated",
- log.records[0].args[0])
- else:
- self.assertIn("ignore_single_points is deprecated",
- log.records[0].msg)
-
- # single-point contours were ignored, so the pen commands are empty
- self.assertFalse(pen.commands)
-
- # redraw without ignoring single points
- quadpen.ignore_single_points = False
- quadpen.moveTo((0, 0))
- quadpen.endPath()
- quadpen.moveTo((1, 1))
- quadpen.closePath()
-
- self.assertTrue(pen.commands)
- self.assertEqual(str(pen).splitlines(), [
- "pen.moveTo((0, 0))",
- "pen.endPath()",
- "pen.moveTo((1, 1))",
- "pen.closePath()"
- ])
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.qCurveTo((0.75, 0.75), (1.625, 1.625), (2, 2))",
+ "pen.qCurveTo((2.375, 2.375), (3.25, 3.25), (4, 4))",
+ ],
+ )
class TestCu2QuPointPen(unittest.TestCase, _TestPenMixin):
-
def __init__(self, *args, **kwargs):
super(TestCu2QuPointPen, self).__init__(*args, **kwargs)
self.Glyph = DummyPointGlyph
self.Pen = DummyPointPen
self.Cu2QuPen = Cu2QuPointPen
- self.pen_getter_name = 'getPointPen'
- self.draw_method_name = 'drawPoints'
+ self.pen_getter_name = "getPointPen"
+ self.draw_method_name = "drawPoints"
def test_super_bezier_curve(self):
pen = DummyPointPen()
@@ -303,10 +227,13 @@ class TestCu2QuPointPen(unittest.TestCase, _TestPenMixin):
quadpen.addPoint((2, 2))
quadpen.addPoint((3, 3))
quadpen.addPoint(
- (4, 4), segmentType="curve", smooth=False, name="up", selected=1)
+ (4, 4), segmentType="curve", smooth=False, name="up", selected=1
+ )
quadpen.endPath()
- self.assertEqual(str(pen).splitlines(), """\
+ self.assertEqual(
+ str(pen).splitlines(),
+ """\
pen.beginPath()
pen.addPoint((0, 0), name=None, segmentType='move', smooth=False)
pen.addPoint((0.75, 0.75), name=None, segmentType=None, smooth=False)
@@ -315,7 +242,8 @@ pen.addPoint((2, 2), name=None, segmentType='qcurve', smooth=True)
pen.addPoint((2.375, 2.375), name=None, segmentType=None, smooth=False)
pen.addPoint((3.25, 3.25), name=None, segmentType=None, smooth=False)
pen.addPoint((4, 4), name='up', segmentType='qcurve', selected=1, smooth=False)
-pen.endPath()""".splitlines())
+pen.endPath()""".splitlines(),
+ )
def test__flushContour_restore_starting_point(self):
pen = DummyPointPen()
@@ -323,24 +251,34 @@ pen.endPath()""".splitlines())
# collect the output of _flushContour before it's sent to _drawPoints
new_segments = []
+
def _drawPoints(segments):
new_segments.extend(segments)
Cu2QuPointPen._drawPoints(quadpen, segments)
+
quadpen._drawPoints = _drawPoints
# a closed path (ie. no "move" segmentType)
- quadpen._flushContour([
- ("curve", [
- ((2, 2), False, None, {}),
- ((1, 1), False, None, {}),
- ((0, 0), False, None, {}),
- ]),
- ("curve", [
- ((1, 1), False, None, {}),
- ((2, 2), False, None, {}),
- ((3, 3), False, None, {}),
- ]),
- ])
+ quadpen._flushContour(
+ [
+ (
+ "curve",
+ [
+ ((2, 2), False, None, {}),
+ ((1, 1), False, None, {}),
+ ((0, 0), False, None, {}),
+ ],
+ ),
+ (
+ "curve",
+ [
+ ((1, 1), False, None, {}),
+ ((2, 2), False, None, {}),
+ ((3, 3), False, None, {}),
+ ],
+ ),
+ ]
+ )
# the original starting point is restored: the last segment has become
# the first
@@ -349,16 +287,24 @@ pen.endPath()""".splitlines())
new_segments = []
# an open path (ie. starting with "move")
- quadpen._flushContour([
- ("move", [
- ((0, 0), False, None, {}),
- ]),
- ("curve", [
- ((1, 1), False, None, {}),
- ((2, 2), False, None, {}),
- ((3, 3), False, None, {}),
- ]),
- ])
+ quadpen._flushContour(
+ [
+ (
+ "move",
+ [
+ ((0, 0), False, None, {}),
+ ],
+ ),
+ (
+ "curve",
+ [
+ ((1, 1), False, None, {}),
+ ((2, 2), False, None, {}),
+ ((3, 3), False, None, {}),
+ ],
+ ),
+ ]
+ )
# the segment order stays the same before and after _flushContour
self.assertEqual(new_segments[0][1][-1][0], (0, 0))
@@ -387,9 +333,97 @@ pen.endPath()""".splitlines())
pen.addPoint((2, 2), name=None, segmentType=None, smooth=False)
pen.addPoint((3, 3), name=None, segmentType=None, smooth=False)
pen.endPath()"""
- )
+ ),
)
+class TestCu2QuMultiPen(unittest.TestCase):
+ def test_multi_pen(self):
+ pens = [RecordingPen(), RecordingPen()]
+ pen = Cu2QuMultiPen(pens, 0.1)
+ pen.moveTo([((0, 0),), ((0, 0),)])
+ pen.lineTo([((0, 1),), ((0, 1),)])
+ pen.qCurveTo([((0, 2),), ((0, 2),)])
+ pen.qCurveTo([((0, 3), (1, 3)), ((0, 3), (1, 4))])
+ pen.curveTo([((2, 3), (0, 3), (0, 0)), ((1.1, 4), (0, 4), (0, 0))])
+ pen.closePath()
+
+ assert len(pens[0].value) == 6
+ assert len(pens[1].value) == 6
+
+ for op0, op1 in zip(pens[0].value, pens[1].value):
+ assert op0[0] == op0[0]
+ assert op0[0] != "curveTo"
+
+
+class TestAllQuadraticFalse(unittest.TestCase):
+ def test_segment_pen_cubic(self):
+ rpen = RecordingPen()
+ pen = Cu2QuPen(rpen, 0.1, all_quadratic=False)
+
+ pen.moveTo((0, 0))
+ pen.curveTo((0, 1), (2, 1), (2, 0))
+ pen.closePath()
+
+ assert rpen.value == [
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((0, 1), (2, 1), (2, 0))),
+ ("closePath", ()),
+ ]
+
+ def test_segment_pen_quadratic(self):
+ rpen = RecordingPen()
+ pen = Cu2QuPen(rpen, 0.1, all_quadratic=False)
+
+ pen.moveTo((0, 0))
+ pen.curveTo((2, 2), (4, 2), (6, 0))
+ pen.closePath()
+
+ assert rpen.value == [
+ ("moveTo", ((0, 0),)),
+ ("qCurveTo", ((3, 3), (6, 0))),
+ ("closePath", ()),
+ ]
+
+ def test_point_pen_cubic(self):
+ rpen = RecordingPointPen()
+ pen = Cu2QuPointPen(rpen, 0.1, all_quadratic=False)
+
+ pen.beginPath()
+ pen.addPoint((0, 0), "move")
+ pen.addPoint((0, 1))
+ pen.addPoint((2, 1))
+ pen.addPoint((2, 0), "curve")
+ pen.endPath()
+
+ assert rpen.value == [
+ ("beginPath", (), {}),
+ ("addPoint", ((0, 0), "move", False, None), {}),
+ ("addPoint", ((0, 1), None, False, None), {}),
+ ("addPoint", ((2, 1), None, False, None), {}),
+ ("addPoint", ((2, 0), "curve", False, None), {}),
+ ("endPath", (), {}),
+ ]
+
+ def test_point_pen_quadratic(self):
+ rpen = RecordingPointPen()
+ pen = Cu2QuPointPen(rpen, 0.1, all_quadratic=False)
+
+ pen.beginPath()
+ pen.addPoint((0, 0), "move")
+ pen.addPoint((2, 2))
+ pen.addPoint((4, 2))
+ pen.addPoint((6, 0), "curve")
+ pen.endPath()
+
+ assert rpen.value == [
+ ("beginPath", (), {}),
+ ("addPoint", ((0, 0), "move", False, None), {}),
+ ("addPoint", ((3, 3), None, False, None), {}),
+ ("addPoint", ((6, 0), "qcurve", False, None), {}),
+ ("endPath", (), {}),
+ ]
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/Tests/pens/perimeterPen_test.py b/Tests/pens/perimeterPen_test.py
index 1b645345..dff1a088 100644
--- a/Tests/pens/perimeterPen_test.py
+++ b/Tests/pens/perimeterPen_test.py
@@ -1,120 +1,128 @@
from fontTools.pens.perimeterPen import PerimeterPen
import unittest
+
def draw1_(pen):
- pen.moveTo( (254, 360) )
- pen.lineTo( (771, 367) )
- pen.curveTo( (800, 393), (808, 399), (819, 412) )
- pen.curveTo( (818, 388), (774, 138), (489, 145) )
- pen.curveTo( (188, 145), (200, 398), (200, 421) )
- pen.curveTo( (209, 409), (220, 394), (254, 360) )
+ pen.moveTo((254, 360))
+ pen.lineTo((771, 367))
+ pen.curveTo((800, 393), (808, 399), (819, 412))
+ pen.curveTo((818, 388), (774, 138), (489, 145))
+ pen.curveTo((188, 145), (200, 398), (200, 421))
+ pen.curveTo((209, 409), (220, 394), (254, 360))
pen.closePath()
+
def draw2_(pen):
- pen.moveTo( (254, 360) )
- pen.curveTo( (220, 394), (209, 409), (200, 421) )
- pen.curveTo( (200, 398), (188, 145), (489, 145) )
- pen.curveTo( (774, 138), (818, 388), (819, 412) )
- pen.curveTo( (808, 399), (800, 393), (771, 367) )
+ pen.moveTo((254, 360))
+ pen.curveTo((220, 394), (209, 409), (200, 421))
+ pen.curveTo((200, 398), (188, 145), (489, 145))
+ pen.curveTo((774, 138), (818, 388), (819, 412))
+ pen.curveTo((808, 399), (800, 393), (771, 367))
pen.closePath()
+
def draw3_(pen):
- pen.moveTo( (771, 367) )
- pen.curveTo( (800, 393), (808, 399), (819, 412) )
- pen.curveTo( (818, 388), (774, 138), (489, 145) )
- pen.curveTo( (188, 145), (200, 398), (200, 421) )
- pen.curveTo( (209, 409), (220, 394), (254, 360) )
+ pen.moveTo((771, 367))
+ pen.curveTo((800, 393), (808, 399), (819, 412))
+ pen.curveTo((818, 388), (774, 138), (489, 145))
+ pen.curveTo((188, 145), (200, 398), (200, 421))
+ pen.curveTo((209, 409), (220, 394), (254, 360))
pen.closePath()
+
def draw4_(pen):
- pen.moveTo( (771, 367) )
- pen.lineTo( (254, 360) )
- pen.curveTo( (220, 394), (209, 409), (200, 421) )
- pen.curveTo( (200, 398), (188, 145), (489, 145) )
- pen.curveTo( (774, 138), (818, 388), (819, 412) )
- pen.curveTo( (808, 399), (800, 393), (771, 367) )
+ pen.moveTo((771, 367))
+ pen.lineTo((254, 360))
+ pen.curveTo((220, 394), (209, 409), (200, 421))
+ pen.curveTo((200, 398), (188, 145), (489, 145))
+ pen.curveTo((774, 138), (818, 388), (819, 412))
+ pen.curveTo((808, 399), (800, 393), (771, 367))
pen.closePath()
+
def draw5_(pen):
- pen.moveTo( (254, 360) )
- pen.lineTo( (771, 367) )
- pen.qCurveTo( (793, 386), (802, 394) )
- pen.qCurveTo( (811, 402), (819, 412) )
- pen.qCurveTo( (819, 406), (814, 383.5) )
- pen.qCurveTo( (809, 361), (796, 330.5) )
- pen.qCurveTo( (783, 300), (760.5, 266.5) )
- pen.qCurveTo( (738, 233), (701, 205.5) )
- pen.qCurveTo( (664, 178), (612, 160.5) )
- pen.qCurveTo( (560, 143), (489, 145) )
- pen.qCurveTo( (414, 145), (363, 164) )
- pen.qCurveTo( (312, 183), (280, 211.5) )
- pen.qCurveTo( (248, 240), (231.5, 274.5) )
- pen.qCurveTo( (215, 309), (208, 339.5) )
- pen.qCurveTo( (201, 370), (200.5, 392.5) )
- pen.qCurveTo( (200, 415), (200, 421) )
- pen.qCurveTo( (207, 412), (217.5, 399) )
- pen.qCurveTo( (228, 386), (254, 360) )
+ pen.moveTo((254, 360))
+ pen.lineTo((771, 367))
+ pen.qCurveTo((793, 386), (802, 394))
+ pen.qCurveTo((811, 402), (819, 412))
+ pen.qCurveTo((819, 406), (814, 383.5))
+ pen.qCurveTo((809, 361), (796, 330.5))
+ pen.qCurveTo((783, 300), (760.5, 266.5))
+ pen.qCurveTo((738, 233), (701, 205.5))
+ pen.qCurveTo((664, 178), (612, 160.5))
+ pen.qCurveTo((560, 143), (489, 145))
+ pen.qCurveTo((414, 145), (363, 164))
+ pen.qCurveTo((312, 183), (280, 211.5))
+ pen.qCurveTo((248, 240), (231.5, 274.5))
+ pen.qCurveTo((215, 309), (208, 339.5))
+ pen.qCurveTo((201, 370), (200.5, 392.5))
+ pen.qCurveTo((200, 415), (200, 421))
+ pen.qCurveTo((207, 412), (217.5, 399))
+ pen.qCurveTo((228, 386), (254, 360))
pen.closePath()
+
def draw6_(pen):
- pen.moveTo( (254, 360) )
- pen.qCurveTo( (228, 386), (217.5, 399) )
- pen.qCurveTo( (207, 412), (200, 421) )
- pen.qCurveTo( (200, 415), (200.5, 392.5) )
- pen.qCurveTo( (201, 370), (208, 339.5) )
- pen.qCurveTo( (215, 309), (231.5, 274.5) )
- pen.qCurveTo( (248, 240), (280, 211.5) )
- pen.qCurveTo( (312, 183), (363, 164) )
- pen.qCurveTo( (414, 145), (489, 145) )
- pen.qCurveTo( (560, 143), (612, 160.5) )
- pen.qCurveTo( (664, 178), (701, 205.5) )
- pen.qCurveTo( (738, 233), (760.5, 266.5) )
- pen.qCurveTo( (783, 300), (796, 330.5) )
- pen.qCurveTo( (809, 361), (814, 383.5) )
- pen.qCurveTo( (819, 406), (819, 412) )
- pen.qCurveTo( (811, 402), (802, 394) )
- pen.qCurveTo( (793, 386), (771, 367) )
+ pen.moveTo((254, 360))
+ pen.qCurveTo((228, 386), (217.5, 399))
+ pen.qCurveTo((207, 412), (200, 421))
+ pen.qCurveTo((200, 415), (200.5, 392.5))
+ pen.qCurveTo((201, 370), (208, 339.5))
+ pen.qCurveTo((215, 309), (231.5, 274.5))
+ pen.qCurveTo((248, 240), (280, 211.5))
+ pen.qCurveTo((312, 183), (363, 164))
+ pen.qCurveTo((414, 145), (489, 145))
+ pen.qCurveTo((560, 143), (612, 160.5))
+ pen.qCurveTo((664, 178), (701, 205.5))
+ pen.qCurveTo((738, 233), (760.5, 266.5))
+ pen.qCurveTo((783, 300), (796, 330.5))
+ pen.qCurveTo((809, 361), (814, 383.5))
+ pen.qCurveTo((819, 406), (819, 412))
+ pen.qCurveTo((811, 402), (802, 394))
+ pen.qCurveTo((793, 386), (771, 367))
pen.closePath()
+
def draw7_(pen):
- pen.moveTo( (771, 367) )
- pen.qCurveTo( (793, 386), (802, 394) )
- pen.qCurveTo( (811, 402), (819, 412) )
- pen.qCurveTo( (819, 406), (814, 383.5) )
- pen.qCurveTo( (809, 361), (796, 330.5) )
- pen.qCurveTo( (783, 300), (760.5, 266.5) )
- pen.qCurveTo( (738, 233), (701, 205.5) )
- pen.qCurveTo( (664, 178), (612, 160.5) )
- pen.qCurveTo( (560, 143), (489, 145) )
- pen.qCurveTo( (414, 145), (363, 164) )
- pen.qCurveTo( (312, 183), (280, 211.5) )
- pen.qCurveTo( (248, 240), (231.5, 274.5) )
- pen.qCurveTo( (215, 309), (208, 339.5) )
- pen.qCurveTo( (201, 370), (200.5, 392.5) )
- pen.qCurveTo( (200, 415), (200, 421) )
- pen.qCurveTo( (207, 412), (217.5, 399) )
- pen.qCurveTo( (228, 386), (254, 360) )
+ pen.moveTo((771, 367))
+ pen.qCurveTo((793, 386), (802, 394))
+ pen.qCurveTo((811, 402), (819, 412))
+ pen.qCurveTo((819, 406), (814, 383.5))
+ pen.qCurveTo((809, 361), (796, 330.5))
+ pen.qCurveTo((783, 300), (760.5, 266.5))
+ pen.qCurveTo((738, 233), (701, 205.5))
+ pen.qCurveTo((664, 178), (612, 160.5))
+ pen.qCurveTo((560, 143), (489, 145))
+ pen.qCurveTo((414, 145), (363, 164))
+ pen.qCurveTo((312, 183), (280, 211.5))
+ pen.qCurveTo((248, 240), (231.5, 274.5))
+ pen.qCurveTo((215, 309), (208, 339.5))
+ pen.qCurveTo((201, 370), (200.5, 392.5))
+ pen.qCurveTo((200, 415), (200, 421))
+ pen.qCurveTo((207, 412), (217.5, 399))
+ pen.qCurveTo((228, 386), (254, 360))
pen.closePath()
+
def draw8_(pen):
- pen.moveTo( (771, 367) )
- pen.lineTo( (254, 360) )
- pen.qCurveTo( (228, 386), (217.5, 399) )
- pen.qCurveTo( (207, 412), (200, 421) )
- pen.qCurveTo( (200, 415), (200.5, 392.5) )
- pen.qCurveTo( (201, 370), (208, 339.5) )
- pen.qCurveTo( (215, 309), (231.5, 274.5) )
- pen.qCurveTo( (248, 240), (280, 211.5) )
- pen.qCurveTo( (312, 183), (363, 164) )
- pen.qCurveTo( (414, 145), (489, 145) )
- pen.qCurveTo( (560, 143), (612, 160.5) )
- pen.qCurveTo( (664, 178), (701, 205.5) )
- pen.qCurveTo( (738, 233), (760.5, 266.5) )
- pen.qCurveTo( (783, 300), (796, 330.5) )
- pen.qCurveTo( (809, 361), (814, 383.5) )
- pen.qCurveTo( (819, 406), (819, 412) )
- pen.qCurveTo( (811, 402), (802, 394) )
- pen.qCurveTo( (793, 386), (771, 367) )
+ pen.moveTo((771, 367))
+ pen.lineTo((254, 360))
+ pen.qCurveTo((228, 386), (217.5, 399))
+ pen.qCurveTo((207, 412), (200, 421))
+ pen.qCurveTo((200, 415), (200.5, 392.5))
+ pen.qCurveTo((201, 370), (208, 339.5))
+ pen.qCurveTo((215, 309), (231.5, 274.5))
+ pen.qCurveTo((248, 240), (280, 211.5))
+ pen.qCurveTo((312, 183), (363, 164))
+ pen.qCurveTo((414, 145), (489, 145))
+ pen.qCurveTo((560, 143), (612, 160.5))
+ pen.qCurveTo((664, 178), (701, 205.5))
+ pen.qCurveTo((738, 233), (760.5, 266.5))
+ pen.qCurveTo((783, 300), (796, 330.5))
+ pen.qCurveTo((809, 361), (814, 383.5))
+ pen.qCurveTo((819, 406), (819, 412))
+ pen.qCurveTo((811, 402), (802, 394))
+ pen.qCurveTo((793, 386), (771, 367))
pen.closePath()
@@ -160,6 +168,7 @@ class PerimeterPenTest(unittest.TestCase):
self.assertEqual(1589, round(pen.value))
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/pens/pointInsidePen_test.py b/Tests/pens/pointInsidePen_test.py
index b561c43f..85936ff5 100644
--- a/Tests/pens/pointInsidePen_test.py
+++ b/Tests/pens/pointInsidePen_test.py
@@ -6,69 +6,59 @@ import unittest
class PointInsidePenTest(unittest.TestCase):
def test_line(self):
def draw_triangles(pen):
- pen.moveTo((0,0)); pen.lineTo((10,5)); pen.lineTo((10,0))
- pen.moveTo((9,1)); pen.lineTo((4,1)); pen.lineTo((9,4))
+ pen.moveTo((0, 0))
+ pen.lineTo((10, 5))
+ pen.lineTo((10, 0))
+ pen.moveTo((9, 1))
+ pen.lineTo((4, 1))
+ pen.lineTo((9, 4))
pen.closePath()
self.assertEqual(
- " *********"
- " ** *"
- " ** *"
- " * *"
- " *",
- self.render(draw_triangles, even_odd=True))
+ " *********" " ** *" " ** *" " * *" " *",
+ self.render(draw_triangles, even_odd=True),
+ )
self.assertEqual(
- " *********"
- " *******"
- " *****"
- " ***"
- " *",
- self.render(draw_triangles, even_odd=False))
+ " *********" " *******" " *****" " ***" " *",
+ self.render(draw_triangles, even_odd=False),
+ )
def test_curve(self):
def draw_curves(pen):
- pen.moveTo((0,0)); pen.curveTo((9,1), (9,4), (0,5))
- pen.moveTo((10,5)); pen.curveTo((1,4), (1,1), (10,0))
+ pen.moveTo((0, 0))
+ pen.curveTo((9, 1), (9, 4), (0, 5))
+ pen.moveTo((10, 5))
+ pen.curveTo((1, 4), (1, 1), (10, 0))
pen.closePath()
self.assertEqual(
- "*** ***"
- "**** ****"
- "*** ***"
- "**** ****"
- "*** ***",
- self.render(draw_curves, even_odd=True))
+ "*** ***" "**** ****" "*** ***" "**** ****" "*** ***",
+ self.render(draw_curves, even_odd=True),
+ )
self.assertEqual(
- "*** ***"
- "**********"
- "**********"
- "**********"
- "*** ***",
- self.render(draw_curves, even_odd=False))
+ "*** ***" "**********" "**********" "**********" "*** ***",
+ self.render(draw_curves, even_odd=False),
+ )
def test_qCurve(self):
def draw_qCurves(pen):
- pen.moveTo((0,0)); pen.qCurveTo((15,2), (0,5))
- pen.moveTo((10,5)); pen.qCurveTo((-5,3), (10,0))
+ pen.moveTo((0, 0))
+ pen.qCurveTo((15, 2), (0, 5))
+ pen.moveTo((10, 5))
+ pen.qCurveTo((-5, 3), (10, 0))
pen.closePath()
self.assertEqual(
- "*** **"
- "**** ***"
- "*** ***"
- "*** ****"
- "** ***",
- self.render(draw_qCurves, even_odd=True))
+ "*** **" "**** ***" "*** ***" "*** ****" "** ***",
+ self.render(draw_qCurves, even_odd=True),
+ )
self.assertEqual(
- "*** **"
- "**********"
- "**********"
- "**********"
- "** ***",
- self.render(draw_qCurves, even_odd=False))
+ "*** **" "**********" "**********" "**********" "** ***",
+ self.render(draw_qCurves, even_odd=False),
+ )
@staticmethod
def render(draw_function, even_odd):
@@ -83,142 +73,148 @@ class PointInsidePenTest(unittest.TestCase):
result.write(" ")
return result.getvalue()
-
def test_contour_no_solutions(self):
def draw_contour(pen):
- pen.moveTo( (969, 230) )
- pen.curveTo( (825, 348) , (715, 184) , (614, 202) )
- pen.lineTo( (614, 160) )
- pen.lineTo( (969, 160) )
+ pen.moveTo((969, 230))
+ pen.curveTo((825, 348), (715, 184), (614, 202))
+ pen.lineTo((614, 160))
+ pen.lineTo((969, 160))
pen.closePath()
- piPen = PointInsidePen(None, (750, 295)) # this point is outside
+ piPen = PointInsidePen(None, (750, 295)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
self.assertEqual(piPen.getResult(), False)
- piPen = PointInsidePen(None, (835, 190)) # this point is inside
+ piPen = PointInsidePen(None, (835, 190)) # this point is inside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 1)
self.assertEqual(piPen.getResult(), True)
def test_contour_square_closed(self):
def draw_contour(pen):
- pen.moveTo( (100, 100) )
- pen.lineTo( (-100, 100) )
- pen.lineTo( (-100, -100) )
- pen.lineTo( (100, -100) )
+ pen.moveTo((100, 100))
+ pen.lineTo((-100, 100))
+ pen.lineTo((-100, -100))
+ pen.lineTo((100, -100))
pen.closePath()
- piPen = PointInsidePen(None, (0, 0)) # this point is inside
+ piPen = PointInsidePen(None, (0, 0)) # this point is inside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 1)
self.assertEqual(piPen.getResult(), True)
def test_contour_square_opened(self):
def draw_contour(pen):
- pen.moveTo( (100, 100) )
- pen.lineTo( (-100, 100) )
- pen.lineTo( (-100, -100) )
- pen.lineTo( (100, -100) )
+ pen.moveTo((100, 100))
+ pen.lineTo((-100, 100))
+ pen.lineTo((-100, -100))
+ pen.lineTo((100, -100))
# contour not explicitly closed
- piPen = PointInsidePen(None, (0, 0)) # this point is inside
+ piPen = PointInsidePen(None, (0, 0)) # this point is inside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 1)
self.assertEqual(piPen.getResult(), True)
def test_contour_circle(self):
def draw_contour(pen):
- pen.moveTo( (0, 100) )
- pen.curveTo( (-55, 100) , (-100, 55) , (-100, 0) )
- pen.curveTo( (-100, -55) , (-55, -100) , (0, -100) )
- pen.curveTo( (55, -100) , (100, -55) , (100, 0) )
- pen.curveTo( (100, 55) , (55, 100) , (0, 100) )
+ pen.moveTo((0, 100))
+ pen.curveTo((-55, 100), (-100, 55), (-100, 0))
+ pen.curveTo((-100, -55), (-55, -100), (0, -100))
+ pen.curveTo((55, -100), (100, -55), (100, 0))
+ pen.curveTo((100, 55), (55, 100), (0, 100))
- piPen = PointInsidePen(None, (50, 50)) # this point is inside
+ piPen = PointInsidePen(None, (50, 50)) # this point is inside
draw_contour(piPen)
self.assertEqual(piPen.getResult(), True)
- piPen = PointInsidePen(None, (50, -50)) # this point is inside
+ piPen = PointInsidePen(None, (50, -50)) # this point is inside
draw_contour(piPen)
self.assertEqual(piPen.getResult(), True)
def test_contour_diamond(self):
def draw_contour(pen):
- pen.moveTo( (0, 100) )
- pen.lineTo( (100, 0) )
- pen.lineTo( (0, -100) )
- pen.lineTo( (-100, 0) )
+ pen.moveTo((0, 100))
+ pen.lineTo((100, 0))
+ pen.lineTo((0, -100))
+ pen.lineTo((-100, 0))
pen.closePath()
- piPen = PointInsidePen(None, (-200, 0)) # this point is outside
+ piPen = PointInsidePen(None, (-200, 0)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
- piPen = PointInsidePen(None, (-200, 100)) # this point is outside
+ piPen = PointInsidePen(None, (-200, 100)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
- piPen = PointInsidePen(None, (-200, -100)) # this point is outside
+ piPen = PointInsidePen(None, (-200, -100)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
- piPen = PointInsidePen(None, (-200, 50)) # this point is outside
+ piPen = PointInsidePen(None, (-200, 50)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
def test_contour_integers(self):
def draw_contour(pen):
- pen.moveTo( (728, 697) )
- pen.lineTo( (504, 699) )
- pen.curveTo( (487, 719) , (508, 783) , (556, 783) )
- pen.lineTo( (718, 783) )
- pen.curveTo( (739, 783) , (749, 712) , (728, 697) )
+ pen.moveTo((728, 697))
+ pen.lineTo((504, 699))
+ pen.curveTo((487, 719), (508, 783), (556, 783))
+ pen.lineTo((718, 783))
+ pen.curveTo((739, 783), (749, 712), (728, 697))
pen.closePath()
- piPen = PointInsidePen(None, (416, 783)) # this point is outside
+ piPen = PointInsidePen(None, (416, 783)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
def test_contour_decimals(self):
def draw_contour(pen):
- pen.moveTo( (727.546875, 697.0) )
- pen.lineTo( (504.375, 698.515625) )
- pen.curveTo( (487.328125, 719.359375), (507.84375, 783.140625), (555.796875, 783.140625) )
- pen.lineTo( (717.96875, 783.140625) )
- pen.curveTo( (738.890625, 783.140625), (748.796875, 711.5), (727.546875, 697.0) )
+ pen.moveTo((727.546875, 697.0))
+ pen.lineTo((504.375, 698.515625))
+ pen.curveTo(
+ (487.328125, 719.359375),
+ (507.84375, 783.140625),
+ (555.796875, 783.140625),
+ )
+ pen.lineTo((717.96875, 783.140625))
+ pen.curveTo(
+ (738.890625, 783.140625), (748.796875, 711.5), (727.546875, 697.0)
+ )
pen.closePath()
- piPen = PointInsidePen(None, (416.625, 783.140625)) # this point is outside
+ piPen = PointInsidePen(None, (416.625, 783.140625)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
def test_contour2_integers(self):
def draw_contour(pen):
- pen.moveTo( (51, 22) )
- pen.lineTo( (51, 74) )
- pen.lineTo( (83, 50) )
- pen.curveTo( (83, 49) , (82, 48) , (82, 47) )
+ pen.moveTo((51, 22))
+ pen.lineTo((51, 74))
+ pen.lineTo((83, 50))
+ pen.curveTo((83, 49), (82, 48), (82, 47))
pen.closePath()
- piPen = PointInsidePen(None, (21, 50)) # this point is outside
+ piPen = PointInsidePen(None, (21, 50)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
def test_contour2_decimals(self):
def draw_contour(pen):
- pen.moveTo( (51.25, 21.859375) )
- pen.lineTo( (51.25, 73.828125) )
- pen.lineTo( (82.5, 50.0) )
- pen.curveTo( (82.5, 49.09375) , (82.265625, 48.265625) , (82.234375, 47.375) )
+ pen.moveTo((51.25, 21.859375))
+ pen.lineTo((51.25, 73.828125))
+ pen.lineTo((82.5, 50.0))
+ pen.curveTo((82.5, 49.09375), (82.265625, 48.265625), (82.234375, 47.375))
pen.closePath()
- piPen = PointInsidePen(None, (21.25, 50.0)) # this point is outside
+ piPen = PointInsidePen(None, (21.25, 50.0)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
+
if __name__ == "__main__":
import sys
- sys.exit(unittest.main())
+ sys.exit(unittest.main())
diff --git a/Tests/pens/pointPen_test.py b/Tests/pens/pointPen_test.py
index a9201780..e811826f 100644
--- a/Tests/pens/pointPen_test.py
+++ b/Tests/pens/pointPen_test.py
@@ -1,12 +1,16 @@
import unittest
from fontTools.pens.basePen import AbstractPen
-from fontTools.pens.pointPen import AbstractPointPen, PointToSegmentPen, \
- SegmentToPointPen, GuessSmoothPointPen, ReverseContourPointPen
+from fontTools.pens.pointPen import (
+ AbstractPointPen,
+ PointToSegmentPen,
+ SegmentToPointPen,
+ GuessSmoothPointPen,
+ ReverseContourPointPen,
+)
class _TestSegmentPen(AbstractPen):
-
def __init__(self):
self._commands = []
@@ -49,7 +53,6 @@ def _reprKwargs(kwargs):
class _TestPointPen(AbstractPointPen):
-
def __init__(self):
self._commands = []
@@ -63,8 +66,9 @@ class _TestPointPen(AbstractPointPen):
items.extend(_reprKwargs(kwargs))
self._commands.append("beginPath(%s)" % ", ".join(items))
- def addPoint(self, pt, segmentType=None, smooth=False, name=None,
- identifier=None, **kwargs):
+ def addPoint(
+ self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
+ ):
items = ["%s" % (pt,)]
if segmentType is not None:
items.append("segmentType='%s'" % segmentType)
@@ -89,7 +93,6 @@ class _TestPointPen(AbstractPointPen):
class PointToSegmentPenTest(unittest.TestCase):
-
def test_open(self):
pen = _TestSegmentPen()
ppen = PointToSegmentPen(pen)
@@ -123,7 +126,7 @@ class PointToSegmentPenTest(unittest.TestCase):
def test_quad(self):
pen = _TestSegmentPen()
ppen = PointToSegmentPen(pen)
- ppen.beginPath(identifier='foo')
+ ppen.beginPath(identifier="foo")
ppen.addPoint((10, 10), "line")
ppen.addPoint((10, 40))
ppen.addPoint((40, 40))
@@ -150,9 +153,11 @@ class PointToSegmentPenTest(unittest.TestCase):
ppen.addPoint((20, 20))
ppen.addPoint((20, 40), "curve")
ppen.endPath()
- self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') addPoint((10, 20)) "
- "addPoint((20, 20)) addPoint((20, 40), segmentType='curve') endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() addPoint((10, 10), segmentType='line') addPoint((10, 20)) "
+ "addPoint((20, 20)) addPoint((20, 40), segmentType='curve') endPath()",
+ repr(tpen),
+ )
def test_closed_outputImpliedClosingLine(self):
tpen = _TestSegmentPen()
@@ -168,7 +173,7 @@ class PointToSegmentPenTest(unittest.TestCase):
"20 20 lineto "
"10 10 lineto " # explicit closing line
"closepath",
- repr(tpen)
+ repr(tpen),
)
def test_closed_line_overlapping_start_end_points(self):
@@ -193,7 +198,7 @@ class PointToSegmentPenTest(unittest.TestCase):
"0 651 lineto "
"0 651 lineto "
"closepath",
- repr(tpen)
+ repr(tpen),
)
def test_roundTrip2(self):
@@ -212,19 +217,19 @@ class PointToSegmentPenTest(unittest.TestCase):
"addPoint((0, 101), segmentType='line') "
"addPoint((0, 651), segmentType='line') "
"endPath()",
- repr(tpen)
+ repr(tpen),
)
class TestSegmentToPointPen(unittest.TestCase):
-
def test_move(self):
tpen = _TestPointPen()
pen = SegmentToPointPen(tpen)
pen.moveTo((10, 10))
pen.endPath()
- self.assertEqual("beginPath() addPoint((10, 10), segmentType='move') endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() addPoint((10, 10), segmentType='move') endPath()", repr(tpen)
+ )
def test_poly(self):
tpen = _TestPointPen()
@@ -233,10 +238,12 @@ class TestSegmentToPointPen(unittest.TestCase):
pen.lineTo((10, 20))
pen.lineTo((20, 20))
pen.closePath()
- self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') "
- "addPoint((10, 20), segmentType='line') "
- "addPoint((20, 20), segmentType='line') endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() addPoint((10, 10), segmentType='line') "
+ "addPoint((10, 20), segmentType='line') "
+ "addPoint((20, 20), segmentType='line') endPath()",
+ repr(tpen),
+ )
def test_cubic(self):
tpen = _TestPointPen()
@@ -244,9 +251,12 @@ class TestSegmentToPointPen(unittest.TestCase):
pen.moveTo((10, 10))
pen.curveTo((10, 20), (20, 20), (20, 10))
pen.closePath()
- self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') "
- "addPoint((10, 20)) addPoint((20, 20)) addPoint((20, 10), "
- "segmentType='curve') endPath()", repr(tpen))
+ self.assertEqual(
+ "beginPath() addPoint((10, 10), segmentType='line') "
+ "addPoint((10, 20)) addPoint((20, 20)) addPoint((20, 10), "
+ "segmentType='curve') endPath()",
+ repr(tpen),
+ )
def test_quad(self):
tpen = _TestPointPen()
@@ -254,19 +264,23 @@ class TestSegmentToPointPen(unittest.TestCase):
pen.moveTo((10, 10))
pen.qCurveTo((10, 20), (20, 20), (20, 10))
pen.closePath()
- self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') "
- "addPoint((10, 20)) addPoint((20, 20)) "
- "addPoint((20, 10), segmentType='qcurve') endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() addPoint((10, 10), segmentType='line') "
+ "addPoint((10, 20)) addPoint((20, 20)) "
+ "addPoint((20, 10), segmentType='qcurve') endPath()",
+ repr(tpen),
+ )
def test_quad2(self):
tpen = _TestPointPen()
pen = SegmentToPointPen(tpen)
pen.qCurveTo((10, 20), (20, 20), (20, 10), (10, 10), None)
pen.closePath()
- self.assertEqual("beginPath() addPoint((10, 20)) addPoint((20, 20)) "
- "addPoint((20, 10)) addPoint((10, 10)) endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() addPoint((10, 20)) addPoint((20, 20)) "
+ "addPoint((20, 10)) addPoint((10, 10)) endPath()",
+ repr(tpen),
+ )
def test_roundTrip1(self):
spen = _TestSegmentPen()
@@ -282,31 +296,34 @@ class TestSegmentToPointPen(unittest.TestCase):
pen = SegmentToPointPen(PointToSegmentPen(spen))
pen.qCurveTo((10, 20), (20, 20), (20, 10), (10, 10), None)
pen.closePath()
- pen.addComponent('base', [1, 0, 0, 1, 0, 0])
- self.assertEqual("10 20 20 20 20 10 10 10 None qcurveto closepath "
- "'base' [1, 0, 0, 1, 0, 0] addcomponent",
- repr(spen))
+ pen.addComponent("base", [1, 0, 0, 1, 0, 0])
+ self.assertEqual(
+ "10 20 20 20 20 10 10 10 None qcurveto closepath "
+ "'base' [1, 0, 0, 1, 0, 0] addcomponent",
+ repr(spen),
+ )
class TestGuessSmoothPointPen(unittest.TestCase):
-
def test_guessSmooth_exact(self):
tpen = _TestPointPen()
pen = GuessSmoothPointPen(tpen)
pen.beginPath(identifier="foo")
pen.addPoint((0, 100), segmentType="curve")
pen.addPoint((0, 200))
- pen.addPoint((400, 200), identifier='bar')
+ pen.addPoint((400, 200), identifier="bar")
pen.addPoint((400, 100), segmentType="curve")
pen.addPoint((400, 0))
pen.addPoint((0, 0))
pen.endPath()
- self.assertEqual("beginPath(identifier='foo') "
- "addPoint((0, 100), segmentType='curve', smooth=True) "
- "addPoint((0, 200)) addPoint((400, 200), identifier='bar') "
- "addPoint((400, 100), segmentType='curve', smooth=True) "
- "addPoint((400, 0)) addPoint((0, 0)) endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath(identifier='foo') "
+ "addPoint((0, 100), segmentType='curve', smooth=True) "
+ "addPoint((0, 200)) addPoint((400, 200), identifier='bar') "
+ "addPoint((400, 100), segmentType='curve', smooth=True) "
+ "addPoint((400, 0)) addPoint((0, 0)) endPath()",
+ repr(tpen),
+ )
def test_guessSmooth_almost(self):
tpen = _TestPointPen()
@@ -319,11 +336,13 @@ class TestGuessSmoothPointPen(unittest.TestCase):
pen.addPoint((400, 0))
pen.addPoint((0, 0))
pen.endPath()
- self.assertEqual("beginPath() addPoint((0, 100), segmentType='curve', smooth=True) "
- "addPoint((1, 200)) addPoint((395, 200)) "
- "addPoint((400, 100), segmentType='curve', smooth=True) "
- "addPoint((400, 0)) addPoint((0, 0)) endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() addPoint((0, 100), segmentType='curve', smooth=True) "
+ "addPoint((1, 200)) addPoint((395, 200)) "
+ "addPoint((400, 100), segmentType='curve', smooth=True) "
+ "addPoint((400, 0)) addPoint((0, 0)) endPath()",
+ repr(tpen),
+ )
def test_guessSmooth_tangent(self):
tpen = _TestPointPen()
@@ -335,24 +354,26 @@ class TestGuessSmoothPointPen(unittest.TestCase):
pen.addPoint((300, 200))
pen.addPoint((400, 200), segmentType="curve")
pen.endPath()
- self.assertEqual("beginPath() addPoint((0, 0), segmentType='move') "
- "addPoint((0, 100), segmentType='line', smooth=True) "
- "addPoint((3, 200)) addPoint((300, 200)) "
- "addPoint((400, 200), segmentType='curve') endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() addPoint((0, 0), segmentType='move') "
+ "addPoint((0, 100), segmentType='line', smooth=True) "
+ "addPoint((3, 200)) addPoint((300, 200)) "
+ "addPoint((400, 200), segmentType='curve') endPath()",
+ repr(tpen),
+ )
-class TestReverseContourPointPen(unittest.TestCase):
+class TestReverseContourPointPen(unittest.TestCase):
def test_singlePoint(self):
tpen = _TestPointPen()
pen = ReverseContourPointPen(tpen)
pen.beginPath()
pen.addPoint((0, 0), segmentType="move")
pen.endPath()
- self.assertEqual("beginPath() "
- "addPoint((0, 0), segmentType='move') "
- "endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() " "addPoint((0, 0), segmentType='move') " "endPath()",
+ repr(tpen),
+ )
def test_line(self):
tpen = _TestPointPen()
@@ -361,11 +382,13 @@ class TestReverseContourPointPen(unittest.TestCase):
pen.addPoint((0, 0), segmentType="move")
pen.addPoint((0, 100), segmentType="line")
pen.endPath()
- self.assertEqual("beginPath() "
- "addPoint((0, 100), segmentType='move') "
- "addPoint((0, 0), segmentType='line') "
- "endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() "
+ "addPoint((0, 100), segmentType='move') "
+ "addPoint((0, 0), segmentType='line') "
+ "endPath()",
+ repr(tpen),
+ )
def test_triangle(self):
tpen = _TestPointPen()
@@ -375,12 +398,14 @@ class TestReverseContourPointPen(unittest.TestCase):
pen.addPoint((0, 100), segmentType="line")
pen.addPoint((100, 100), segmentType="line")
pen.endPath()
- self.assertEqual("beginPath() "
- "addPoint((0, 0), segmentType='line') "
- "addPoint((100, 100), segmentType='line') "
- "addPoint((0, 100), segmentType='line') "
- "endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() "
+ "addPoint((0, 0), segmentType='line') "
+ "addPoint((100, 100), segmentType='line') "
+ "addPoint((0, 100), segmentType='line') "
+ "endPath()",
+ repr(tpen),
+ )
def test_cubicOpen(self):
tpen = _TestPointPen()
@@ -391,13 +416,15 @@ class TestReverseContourPointPen(unittest.TestCase):
pen.addPoint((100, 200))
pen.addPoint((200, 200), segmentType="curve")
pen.endPath()
- self.assertEqual("beginPath() "
- "addPoint((200, 200), segmentType='move') "
- "addPoint((100, 200)) "
- "addPoint((0, 100)) "
- "addPoint((0, 0), segmentType='curve') "
- "endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() "
+ "addPoint((200, 200), segmentType='move') "
+ "addPoint((100, 200)) "
+ "addPoint((0, 100)) "
+ "addPoint((0, 0), segmentType='curve') "
+ "endPath()",
+ repr(tpen),
+ )
def test_quadOpen(self):
tpen = _TestPointPen()
@@ -408,13 +435,15 @@ class TestReverseContourPointPen(unittest.TestCase):
pen.addPoint((100, 200))
pen.addPoint((200, 200), segmentType="qcurve")
pen.endPath()
- self.assertEqual("beginPath() "
- "addPoint((200, 200), segmentType='move') "
- "addPoint((100, 200)) "
- "addPoint((0, 100)) "
- "addPoint((0, 0), segmentType='qcurve') "
- "endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() "
+ "addPoint((200, 200), segmentType='move') "
+ "addPoint((100, 200)) "
+ "addPoint((0, 100)) "
+ "addPoint((0, 0), segmentType='qcurve') "
+ "endPath()",
+ repr(tpen),
+ )
def test_cubicClosed(self):
tpen = _TestPointPen()
@@ -425,13 +454,15 @@ class TestReverseContourPointPen(unittest.TestCase):
pen.addPoint((100, 200))
pen.addPoint((200, 200), segmentType="curve")
pen.endPath()
- self.assertEqual("beginPath() "
- "addPoint((0, 0), segmentType='curve') "
- "addPoint((200, 200), segmentType='line') "
- "addPoint((100, 200)) "
- "addPoint((0, 100)) "
- "endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() "
+ "addPoint((0, 0), segmentType='curve') "
+ "addPoint((200, 200), segmentType='line') "
+ "addPoint((100, 200)) "
+ "addPoint((0, 100)) "
+ "endPath()",
+ repr(tpen),
+ )
def test_quadClosedOffCurveStart(self):
tpen = _TestPointPen()
@@ -442,32 +473,36 @@ class TestReverseContourPointPen(unittest.TestCase):
pen.addPoint((0, 0), segmentType="line")
pen.addPoint((0, 100))
pen.endPath()
- self.assertEqual("beginPath() "
- "addPoint((100, 200)) "
- "addPoint((0, 100)) "
- "addPoint((0, 0), segmentType='qcurve') "
- "addPoint((200, 200), segmentType='line') "
- "endPath()",
- repr(tpen))
+ self.assertEqual(
+ "beginPath() "
+ "addPoint((100, 200)) "
+ "addPoint((0, 100)) "
+ "addPoint((0, 0), segmentType='qcurve') "
+ "addPoint((200, 200), segmentType='line') "
+ "endPath()",
+ repr(tpen),
+ )
def test_quadNoOnCurve(self):
tpen = _TestPointPen()
pen = ReverseContourPointPen(tpen)
- pen.beginPath(identifier='bar')
+ pen.beginPath(identifier="bar")
pen.addPoint((0, 0))
- pen.addPoint((0, 100), identifier='foo', arbitrary='foo')
+ pen.addPoint((0, 100), identifier="foo", arbitrary="foo")
pen.addPoint((100, 200), arbitrary=123)
pen.addPoint((200, 200))
pen.endPath()
- pen.addComponent("base", [1, 0, 0, 1, 0, 0], identifier='foo')
- self.assertEqual("beginPath(identifier='bar') "
- "addPoint((0, 0)) "
- "addPoint((200, 200)) "
- "addPoint((100, 200), arbitrary=123) "
- "addPoint((0, 100), identifier='foo', arbitrary='foo') "
- "endPath() "
- "addComponent('base', [1, 0, 0, 1, 0, 0], identifier='foo')",
- repr(tpen))
+ pen.addComponent("base", [1, 0, 0, 1, 0, 0], identifier="foo")
+ self.assertEqual(
+ "beginPath(identifier='bar') "
+ "addPoint((0, 0)) "
+ "addPoint((200, 200)) "
+ "addPoint((100, 200), arbitrary=123) "
+ "addPoint((0, 100), identifier='foo', arbitrary='foo') "
+ "endPath() "
+ "addComponent('base', [1, 0, 0, 1, 0, 0], identifier='foo')",
+ repr(tpen),
+ )
def test_closed_line_overlapping_start_end_points(self):
# Test case from https://github.com/googlefonts/fontmake/issues/572
@@ -486,5 +521,5 @@ class TestReverseContourPointPen(unittest.TestCase):
"addPoint((0, 101), segmentType='line') "
"addPoint((0, 101), segmentType='line') "
"endPath()",
- repr(tpen)
+ repr(tpen),
)
diff --git a/Tests/pens/qu2cuPen_test.py b/Tests/pens/qu2cuPen_test.py
new file mode 100644
index 00000000..94449194
--- /dev/null
+++ b/Tests/pens/qu2cuPen_test.py
@@ -0,0 +1,253 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import unittest
+
+from fontTools.pens.qu2cuPen import Qu2CuPen
+from fontTools.pens.recordingPen import RecordingPen
+from textwrap import dedent
+import pytest
+
+try:
+ from .utils import CUBIC_GLYPHS, QUAD_GLYPHS
+ from .utils import DummyGlyph
+ from .utils import DummyPen
+except ImportError as e:
+ pytest.skip(str(e), allow_module_level=True)
+
+MAX_ERR = 1.0
+
+
+class _TestPenMixin(object):
+ """Collection of tests that are shared by both the SegmentPen and the
+ PointPen test cases, plus some helper methods.
+ Note: We currently don't have a PointPen.
+ """
+
+ maxDiff = None
+
+ def diff(self, expected, actual):
+ import difflib
+
+ expected = str(self.Glyph(expected)).splitlines(True)
+ actual = str(self.Glyph(actual)).splitlines(True)
+ diff = difflib.unified_diff(
+ expected, actual, fromfile="expected", tofile="actual"
+ )
+ return "".join(diff)
+
+ def convert_glyph(self, glyph, **kwargs):
+ # draw source glyph onto a new glyph using a Cu2Qu pen and return it
+ converted = self.Glyph()
+ pen = getattr(converted, self.pen_getter_name)()
+ cubicpen = self.Qu2CuPen(pen, MAX_ERR, all_cubic=True, **kwargs)
+ getattr(glyph, self.draw_method_name)(cubicpen)
+ return converted
+
+ def expect_glyph(self, source, expected):
+ converted = self.convert_glyph(source)
+ self.assertNotEqual(converted, source)
+ if not converted.approx(expected):
+ print(self.diff(expected, converted))
+ self.fail("converted glyph is different from expected")
+
+ def test_convert_simple_glyph(self):
+ self.expect_glyph(QUAD_GLYPHS["a"], CUBIC_GLYPHS["a"])
+ self.expect_glyph(QUAD_GLYPHS["A"], CUBIC_GLYPHS["A"])
+
+ def test_convert_composite_glyph(self):
+ source = CUBIC_GLYPHS["Aacute"]
+ converted = self.convert_glyph(source)
+ # components don't change after quadratic conversion
+ self.assertEqual(converted, source)
+
+ def test_reverse_direction(self):
+ for name in ("a", "A", "Eacute"):
+ source = QUAD_GLYPHS[name]
+ normal_glyph = self.convert_glyph(source)
+ reversed_glyph = self.convert_glyph(source, reverse_direction=True)
+
+ # the number of commands is the same, just their order is iverted
+ self.assertTrue(len(normal_glyph.outline), len(reversed_glyph.outline))
+ self.assertNotEqual(normal_glyph, reversed_glyph)
+
+ def test_stats(self):
+ stats = {}
+ for name in QUAD_GLYPHS.keys():
+ source = QUAD_GLYPHS[name]
+ self.convert_glyph(source, stats=stats)
+
+ self.assertTrue(stats)
+ self.assertTrue("2" in stats)
+ self.assertEqual(type(stats["2"]), int)
+
+ def test_addComponent(self):
+ pen = self.Pen()
+ cubicpen = self.Qu2CuPen(pen, MAX_ERR)
+ cubicpen.addComponent("a", (1, 2, 3, 4, 5.0, 6.0))
+
+ # components are passed through without changes
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.addComponent('a', (1, 2, 3, 4, 5.0, 6.0))",
+ ],
+ )
+
+
+class TestQu2CuPen(unittest.TestCase, _TestPenMixin):
+ def __init__(self, *args, **kwargs):
+ super(TestQu2CuPen, self).__init__(*args, **kwargs)
+ self.Glyph = DummyGlyph
+ self.Pen = DummyPen
+ self.Qu2CuPen = Qu2CuPen
+ self.pen_getter_name = "getPen"
+ self.draw_method_name = "draw"
+
+ def test_qCurveTo_1_point(self):
+ pen = DummyPen()
+ cubicpen = Qu2CuPen(pen, MAX_ERR)
+ cubicpen.moveTo((0, 0))
+ cubicpen.qCurveTo((1, 1))
+ cubicpen.closePath()
+
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.qCurveTo((1, 1))",
+ "pen.closePath()",
+ ],
+ )
+
+ def test_qCurveTo_2_points(self):
+ pen = DummyPen()
+ cubicpen = Qu2CuPen(pen, MAX_ERR)
+ cubicpen.moveTo((0, 0))
+ cubicpen.qCurveTo((1, 1), (2, 2))
+ cubicpen.closePath()
+
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.qCurveTo((1, 1), (2, 2))",
+ "pen.closePath()",
+ ],
+ )
+
+ def test_qCurveTo_3_points_no_conversion(self):
+ pen = DummyPen()
+ cubicpen = Qu2CuPen(pen, MAX_ERR)
+ cubicpen.moveTo((0, 0))
+ cubicpen.qCurveTo((0, 3), (1, 3), (1, 0))
+ cubicpen.closePath()
+
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.qCurveTo((0, 3), (1, 3), (1, 0))",
+ "pen.closePath()",
+ ],
+ )
+
+ def test_qCurveTo_no_oncurve_points(self):
+ pen = DummyPen()
+ cubicpen = Qu2CuPen(pen, MAX_ERR)
+ cubicpen.qCurveTo((0, 0), (1, 0), (1, 1), (0, 1), None)
+ cubicpen.closePath()
+
+ self.assertEqual(
+ str(pen).splitlines(),
+ ["pen.qCurveTo((0, 0), (1, 0), (1, 1), (0, 1), None)", "pen.closePath()"],
+ )
+
+ def test_curveTo_1_point(self):
+ pen = DummyPen()
+ cubicpen = Qu2CuPen(pen, MAX_ERR)
+ cubicpen.moveTo((0, 0))
+ cubicpen.curveTo((1, 1))
+ cubicpen.closePath()
+
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.curveTo((1, 1))",
+ "pen.closePath()",
+ ],
+ )
+
+ def test_curveTo_2_points(self):
+ pen = DummyPen()
+ cubicpen = Qu2CuPen(pen, MAX_ERR)
+ cubicpen.moveTo((0, 0))
+ cubicpen.curveTo((1, 1), (2, 2))
+ cubicpen.closePath()
+
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.curveTo((1, 1), (2, 2))",
+ "pen.closePath()",
+ ],
+ )
+
+ def test_curveTo_3_points(self):
+ pen = DummyPen()
+ cubicpen = Qu2CuPen(pen, MAX_ERR)
+ cubicpen.moveTo((0, 0))
+ cubicpen.curveTo((1, 1), (2, 2), (3, 3))
+ cubicpen.closePath()
+
+ self.assertEqual(
+ str(pen).splitlines(),
+ [
+ "pen.moveTo((0, 0))",
+ "pen.curveTo((1, 1), (2, 2), (3, 3))",
+ "pen.closePath()",
+ ],
+ )
+
+ def test_all_cubic(self):
+ inPen = RecordingPen()
+ inPen.value = [
+ ("moveTo", ((1204, 347),)),
+ ("qCurveTo", ((1255, 347), (1323, 433), (1323, 467))),
+ ("qCurveTo", ((1323, 478), (1310, 492), (1302, 492))),
+ ("qCurveTo", ((1295, 492), (1289, 484))),
+ ("lineTo", ((1272, 461),)),
+ ("qCurveTo", ((1256, 439), (1221, 416), (1200, 416))),
+ ("qCurveTo", ((1181, 416), (1141, 440), (1141, 462))),
+ ("qCurveTo", ((1141, 484), (1190, 565), (1190, 594))),
+ ("qCurveTo", ((1190, 607), (1181, 634), (1168, 634))),
+ ("qCurveTo", ((1149, 634), (1146, 583), (1081, 496), (1081, 463))),
+ ("qCurveTo", ((1081, 417), (1164, 347), (1204, 347))),
+ ("closePath", ()),
+ ]
+
+ outPen = RecordingPen()
+ q2cPen = Qu2CuPen(outPen, 1.0, all_cubic=True)
+ inPen.replay(q2cPen)
+
+ print(outPen.value)
+
+ assert not any(typ == "qCurveTo" for typ, _ in outPen.value)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Tests/pens/quartzPen_test.py b/Tests/pens/quartzPen_test.py
index 3a81d97f..0caa379d 100644
--- a/Tests/pens/quartzPen_test.py
+++ b/Tests/pens/quartzPen_test.py
@@ -12,11 +12,11 @@ try:
PATH_ELEMENTS = {
# CG constant key desc num_points
- kCGPathElementMoveToPoint: ('moveto', 1),
- kCGPathElementAddLineToPoint: ('lineto', 1),
- kCGPathElementAddCurveToPoint: ('curveto', 3),
- kCGPathElementAddQuadCurveToPoint: ('qcurveto', 2),
- kCGPathElementCloseSubpath: ('close', 0),
+ kCGPathElementMoveToPoint: ("moveto", 1),
+ kCGPathElementAddLineToPoint: ("lineto", 1),
+ kCGPathElementAddCurveToPoint: ("curveto", 3),
+ kCGPathElementAddQuadCurveToPoint: ("qcurveto", 2),
+ kCGPathElementCloseSubpath: ("close", 0),
}
PYOBJC_AVAILABLE = True
@@ -65,7 +65,7 @@ class QuartzPenTest(unittest.TestCase):
draw(pen)
self.assertEqual(
"moveto 50.0 0.0 lineto 50.0 500.0 lineto 200.0 500.0 curveto 350.0 500.0 450.0 400.0 450.0 250.0 curveto 450.0 100.0 350.0 0.0 200.0 0.0 close ",
- quartzPathToString(pen.path)
+ quartzPathToString(pen.path),
)
def test_empty(self):
@@ -73,6 +73,7 @@ class QuartzPenTest(unittest.TestCase):
self.assertEqual("", quartzPathToString(pen.path))
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/pens/reverseContourPen_test.py b/Tests/pens/reverseContourPen_test.py
index 9c715404..c250847e 100644
--- a/Tests/pens/reverseContourPen_test.py
+++ b/Tests/pens/reverseContourPen_test.py
@@ -6,331 +6,512 @@ import pytest
TEST_DATA = [
(
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((1, 1),)),
- ('lineTo', ((2, 2),)),
- ('lineTo', ((3, 3),)), # last not on move, line is implied
- ('closePath', ()),
- ],
- [
- ('moveTo', ((0, 0),)),
- ('lineTo', ((3, 3),)),
- ('lineTo', ((2, 2),)),
- ('lineTo', ((1, 1),)),
- ('closePath', ()),
- ]
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)),
+ ("lineTo", ((2, 2),)),
+ ("lineTo", ((3, 3),)), # last not on move, line is implied
+ ("closePath", ()),
+ ],
+ False, # outputImpliedClosingLine
+ [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((3, 3),)),
+ ("lineTo", ((2, 2),)),
+ ("lineTo", ((1, 1),)),
+ ("closePath", ()),
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((1, 1),)),
- ('lineTo', ((2, 2),)),
- ('lineTo', ((0, 0),)), # last on move, no implied line
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)),
+ ("lineTo", ((2, 2),)),
+ ("lineTo", ((3, 3),)), # last line does not overlap move...
+ ("closePath", ()),
],
+ True, # outputImpliedClosingLine
+ [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((3, 3),)),
+ ("lineTo", ((2, 2),)),
+ ("lineTo", ((1, 1),)),
+ ("lineTo", ((0, 0),)), # ... but closing line is NOT implied
+ ("closePath", ()),
+ ],
+ ),
+ (
+ [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)),
+ ("lineTo", ((2, 2),)),
+ ("lineTo", ((0, 0),)), # last line overlaps move, explicit line
+ ("closePath", ()),
+ ],
+ False,
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((2, 2),)),
- ('lineTo', ((1, 1),)),
- ('closePath', ()),
- ]
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((2, 2),)),
+ ("lineTo", ((1, 1),)),
+ ("closePath", ()), # closing line implied
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((0, 0),)),
- ('lineTo', ((1, 1),)),
- ('lineTo', ((2, 2),)),
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)),
+ ("lineTo", ((2, 2),)),
+ ("lineTo", ((0, 0),)), # last line overlaps move...
+ ("closePath", ()),
],
+ True,
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((2, 2),)),
- ('lineTo', ((1, 1),)),
- ('lineTo', ((0, 0),)),
- ('lineTo', ((0, 0),)),
- ('closePath', ()),
- ]
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((2, 2),)),
+ ("lineTo", ((1, 1),)),
+ ("lineTo", ((0, 0),)), # ... but line is NOT implied
+ ("closePath", ()),
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((1, 1),)),
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((0, 0),)), # duplicate lineTo following moveTo
+ ("lineTo", ((1, 1),)),
+ ("lineTo", ((2, 2),)),
+ ("closePath", ()),
+ ],
+ False,
+ [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((2, 2),)),
+ ("lineTo", ((1, 1),)),
+ ("lineTo", ((0, 0),)), # extra explicit lineTo is always emitted to
+ ("lineTo", ((0, 0),)), # disambiguate from an implicit closing line
+ ("closePath", ()),
],
+ ),
+ (
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((1, 1),)),
- ('closePath', ()),
- ]
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((0, 0),)), # duplicate lineTo following moveTo
+ ("lineTo", ((1, 1),)),
+ ("lineTo", ((2, 2),)),
+ ("closePath", ()),
+ ],
+ True,
+ [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((2, 2),)),
+ ("lineTo", ((1, 1),)),
+ ("lineTo", ((0, 0),)), # duplicate lineTo is retained also in this case,
+ ("lineTo", ((0, 0),)), # same result as with outputImpliedClosingLine=False
+ ("closePath", ()),
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('curveTo', ((1, 1), (2, 2), (3, 3))),
- ('curveTo', ((4, 4), (5, 5), (0, 0))),
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)),
+ ("closePath", ()),
],
+ False,
[
- ('moveTo', ((0, 0),)),
- ('curveTo', ((5, 5), (4, 4), (3, 3))),
- ('curveTo', ((2, 2), (1, 1), (0, 0))),
- ('closePath', ()),
- ]
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)),
+ ("closePath", ()),
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('curveTo', ((1, 1), (2, 2), (3, 3))),
- ('curveTo', ((4, 4), (5, 5), (6, 6))),
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)),
+ ("closePath", ()),
],
+ True,
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((6, 6),)), # implied line
- ('curveTo', ((5, 5), (4, 4), (3, 3))),
- ('curveTo', ((2, 2), (1, 1), (0, 0))),
- ('closePath', ()),
- ]
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)),
+ ("lineTo", ((0, 0),)),
+ ("closePath", ()),
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((1, 1),)), # this line becomes implied
- ('curveTo', ((2, 2), (3, 3), (4, 4))),
- ('curveTo', ((5, 5), (6, 6), (7, 7))),
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((1, 1), (2, 2), (3, 3))),
+ ("curveTo", ((4, 4), (5, 5), (0, 0))), # closed curveTo overlaps moveTo
+ ("closePath", ()),
],
+ False,
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((7, 7),)),
- ('curveTo', ((6, 6), (5, 5), (4, 4))),
- ('curveTo', ((3, 3), (2, 2), (1, 1))),
- ('closePath', ()),
- ]
+ ("moveTo", ((0, 0),)), # no extra lineTo added here
+ ("curveTo", ((5, 5), (4, 4), (3, 3))),
+ ("curveTo", ((2, 2), (1, 1), (0, 0))),
+ ("closePath", ()),
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('qCurveTo', ((1, 1), (2, 2))),
- ('qCurveTo', ((3, 3), (0, 0))),
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((1, 1), (2, 2), (3, 3))),
+ ("curveTo", ((4, 4), (5, 5), (0, 0))), # closed curveTo overlaps moveTo
+ ("closePath", ()),
],
+ True,
[
- ('moveTo', ((0, 0),)),
- ('qCurveTo', ((3, 3), (2, 2))),
- ('qCurveTo', ((1, 1), (0, 0))),
- ('closePath', ()),
- ]
+ ("moveTo", ((0, 0),)), # no extra lineTo added here, same as preceding
+ ("curveTo", ((5, 5), (4, 4), (3, 3))),
+ ("curveTo", ((2, 2), (1, 1), (0, 0))),
+ ("closePath", ()),
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('qCurveTo', ((1, 1), (2, 2))),
- ('qCurveTo', ((3, 3), (4, 4))),
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((1, 1), (2, 2), (3, 3))),
+ ("curveTo", ((4, 4), (5, 5), (6, 6))), # closed curve not overlapping move
+ ("closePath", ()),
],
+ False,
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((4, 4),)),
- ('qCurveTo', ((3, 3), (2, 2))),
- ('qCurveTo', ((1, 1), (0, 0))),
- ('closePath', ()),
- ]
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((6, 6),)), # the previously implied line
+ ("curveTo", ((5, 5), (4, 4), (3, 3))),
+ ("curveTo", ((2, 2), (1, 1), (0, 0))),
+ ("closePath", ()),
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((1, 1),)),
- ('qCurveTo', ((2, 2), (3, 3))),
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((1, 1), (2, 2), (3, 3))),
+ ("curveTo", ((4, 4), (5, 5), (6, 6))), # closed curve not overlapping move
+ ("closePath", ()),
],
+ True,
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((3, 3),)),
- ('qCurveTo', ((2, 2), (1, 1))),
- ('closePath', ()),
- ]
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((6, 6),)), # the previously implied line (same as above)
+ ("curveTo", ((5, 5), (4, 4), (3, 3))),
+ ("curveTo", ((2, 2), (1, 1), (0, 0))),
+ ("closePath", ()),
+ ],
),
(
[
- ('addComponent', ('a', (1, 0, 0, 1, 0, 0)))
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)), # this line becomes implied
+ ("curveTo", ((2, 2), (3, 3), (4, 4))),
+ ("curveTo", ((5, 5), (6, 6), (7, 7))),
+ ("closePath", ()),
],
+ False,
[
- ('addComponent', ('a', (1, 0, 0, 1, 0, 0)))
- ]
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((7, 7),)),
+ ("curveTo", ((6, 6), (5, 5), (4, 4))),
+ ("curveTo", ((3, 3), (2, 2), (1, 1))),
+ ("closePath", ()),
+ ],
),
(
- [], []
+ [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)), # this line...
+ ("curveTo", ((2, 2), (3, 3), (4, 4))),
+ ("curveTo", ((5, 5), (6, 6), (7, 7))),
+ ("closePath", ()),
+ ],
+ True,
+ [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((7, 7),)),
+ ("curveTo", ((6, 6), (5, 5), (4, 4))),
+ ("curveTo", ((3, 3), (2, 2), (1, 1))),
+ ("lineTo", ((0, 0),)), # ... does NOT become implied
+ ("closePath", ()),
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('endPath', ()),
+ ("moveTo", ((0, 0),)),
+ ("qCurveTo", ((1, 1), (2, 2))),
+ ("qCurveTo", ((3, 3), (0, 0))), # closed qCurve overlaps move
+ ("closePath", ()),
],
+ False,
[
- ('moveTo', ((0, 0),)),
- ('endPath', ()),
+ ("moveTo", ((0, 0),)), # no extra lineTo added here
+ ("qCurveTo", ((3, 3), (2, 2))),
+ ("qCurveTo", ((1, 1), (0, 0))),
+ ("closePath", ()),
],
),
(
[
- ('moveTo', ((0, 0),)),
- ('closePath', ()),
+ ("moveTo", ((0, 0),)),
+ ("qCurveTo", ((1, 1), (2, 2))),
+ ("qCurveTo", ((3, 3), (0, 0))), # closed qCurve overlaps move
+ ("closePath", ()),
],
+ True, # <--
[
- ('moveTo', ((0, 0),)),
- ('endPath', ()), # single-point paths is always open
+ ("moveTo", ((0, 0),)), # no extra lineTo added here, same as above
+ ("qCurveTo", ((3, 3), (2, 2))),
+ ("qCurveTo", ((1, 1), (0, 0))),
+ ("closePath", ()),
],
),
(
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((1, 1),)),
- ('endPath', ())
+ ("moveTo", ((0, 0),)),
+ ("qCurveTo", ((1, 1), (2, 2))),
+ ("qCurveTo", ((3, 3), (4, 4))), # closed qCurve not overlapping move
+ ("closePath", ()),
],
+ False,
[
- ('moveTo', ((1, 1),)),
- ('lineTo', ((0, 0),)),
- ('endPath', ())
- ]
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((4, 4),)), # the previously implied line
+ ("qCurveTo", ((3, 3), (2, 2))),
+ ("qCurveTo", ((1, 1), (0, 0))),
+ ("closePath", ()),
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('curveTo', ((1, 1), (2, 2), (3, 3))),
- ('endPath', ())
+ ("moveTo", ((0, 0),)),
+ ("qCurveTo", ((1, 1), (2, 2))),
+ ("qCurveTo", ((3, 3), (4, 4))), # closed qCurve not overlapping move
+ ("closePath", ()),
],
+ True,
[
- ('moveTo', ((3, 3),)),
- ('curveTo', ((2, 2), (1, 1), (0, 0))),
- ('endPath', ())
- ]
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((4, 4),)), # the previously implied line (same as above)
+ ("qCurveTo", ((3, 3), (2, 2))),
+ ("qCurveTo", ((1, 1), (0, 0))),
+ ("closePath", ()),
+ ],
+ ),
+ (
+ [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)),
+ ("qCurveTo", ((2, 2), (3, 3))),
+ ("closePath", ()),
+ ],
+ False,
+ [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((3, 3),)),
+ ("qCurveTo", ((2, 2), (1, 1))),
+ ("closePath", ()),
+ ],
+ ),
+ (
+ [("addComponent", ("a", (1, 0, 0, 1, 0, 0)))],
+ False,
+ [("addComponent", ("a", (1, 0, 0, 1, 0, 0)))],
),
+ ([], False, []),
(
[
- ('moveTo', ((0, 0),)),
- ('curveTo', ((1, 1), (2, 2), (3, 3))),
- ('lineTo', ((4, 4),)),
- ('endPath', ())
+ ("moveTo", ((0, 0),)),
+ ("endPath", ()),
],
+ False,
[
- ('moveTo', ((4, 4),)),
- ('lineTo', ((3, 3),)),
- ('curveTo', ((2, 2), (1, 1), (0, 0))),
- ('endPath', ())
- ]
+ ("moveTo", ((0, 0),)),
+ ("endPath", ()),
+ ],
),
(
[
- ('moveTo', ((0, 0),)),
- ('lineTo', ((1, 1),)),
- ('curveTo', ((2, 2), (3, 3), (4, 4))),
- ('endPath', ())
+ ("moveTo", ((0, 0),)),
+ ("closePath", ()),
],
+ False,
[
- ('moveTo', ((4, 4),)),
- ('curveTo', ((3, 3), (2, 2), (1, 1))),
- ('lineTo', ((0, 0),)),
- ('endPath', ())
- ]
+ ("moveTo", ((0, 0),)),
+ ("endPath", ()), # single-point paths is always open
+ ],
+ ),
+ (
+ [("moveTo", ((0, 0),)), ("lineTo", ((1, 1),)), ("endPath", ())],
+ False,
+ [("moveTo", ((1, 1),)), ("lineTo", ((0, 0),)), ("endPath", ())],
+ ),
+ (
+ [("moveTo", ((0, 0),)), ("curveTo", ((1, 1), (2, 2), (3, 3))), ("endPath", ())],
+ False,
+ [("moveTo", ((3, 3),)), ("curveTo", ((2, 2), (1, 1), (0, 0))), ("endPath", ())],
),
(
[
- ('qCurveTo', ((0, 0), (1, 1), (2, 2), None)),
- ('closePath', ())
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((1, 1), (2, 2), (3, 3))),
+ ("lineTo", ((4, 4),)),
+ ("endPath", ()),
],
+ False,
[
- ('qCurveTo', ((0, 0), (2, 2), (1, 1), None)),
- ('closePath', ())
- ]
+ ("moveTo", ((4, 4),)),
+ ("lineTo", ((3, 3),)),
+ ("curveTo", ((2, 2), (1, 1), (0, 0))),
+ ("endPath", ()),
+ ],
),
(
[
- ('qCurveTo', ((0, 0), (1, 1), (2, 2), None)),
- ('endPath', ())
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((1, 1),)),
+ ("curveTo", ((2, 2), (3, 3), (4, 4))),
+ ("endPath", ()),
],
+ False,
[
- ('qCurveTo', ((0, 0), (2, 2), (1, 1), None)),
- ('closePath', ()) # this is always "closed"
- ]
+ ("moveTo", ((4, 4),)),
+ ("curveTo", ((3, 3), (2, 2), (1, 1))),
+ ("lineTo", ((0, 0),)),
+ ("endPath", ()),
+ ],
+ ),
+ (
+ [("qCurveTo", ((0, 0), (1, 1), (2, 2), None)), ("closePath", ())],
+ False,
+ [("qCurveTo", ((0, 0), (2, 2), (1, 1), None)), ("closePath", ())],
+ ),
+ (
+ [("qCurveTo", ((0, 0), (1, 1), (2, 2), None)), ("endPath", ())],
+ False,
+ [
+ ("qCurveTo", ((0, 0), (2, 2), (1, 1), None)),
+ ("closePath", ()), # this is always "closed"
+ ],
),
# Test case from:
# https://github.com/googlei18n/cu2qu/issues/51#issue-179370514
(
[
- ('moveTo', ((848, 348),)),
- ('lineTo', ((848, 348),)), # duplicate lineTo point after moveTo
- ('qCurveTo', ((848, 526), (649, 704), (449, 704))),
- ('qCurveTo', ((449, 704), (248, 704), (50, 526), (50, 348))),
- ('lineTo', ((50, 348),)),
- ('qCurveTo', ((50, 348), (50, 171), (248, -3), (449, -3))),
- ('qCurveTo', ((449, -3), (649, -3), (848, 171), (848, 348))),
- ('closePath', ())
- ],
- [
- ('moveTo', ((848, 348),)),
- ('qCurveTo', ((848, 171), (649, -3), (449, -3), (449, -3))),
- ('qCurveTo', ((248, -3), (50, 171), (50, 348), (50, 348))),
- ('lineTo', ((50, 348),)),
- ('qCurveTo', ((50, 526), (248, 704), (449, 704), (449, 704))),
- ('qCurveTo', ((649, 704), (848, 526), (848, 348))),
- ('lineTo', ((848, 348),)), # the duplicate point is kept
- ('closePath', ())
- ]
+ ("moveTo", ((848, 348),)),
+ ("lineTo", ((848, 348),)), # duplicate lineTo point after moveTo
+ ("qCurveTo", ((848, 526), (649, 704), (449, 704))),
+ ("qCurveTo", ((449, 704), (248, 704), (50, 526), (50, 348))),
+ ("lineTo", ((50, 348),)),
+ ("qCurveTo", ((50, 348), (50, 171), (248, -3), (449, -3))),
+ ("qCurveTo", ((449, -3), (649, -3), (848, 171), (848, 348))),
+ ("closePath", ()),
+ ],
+ False,
+ [
+ ("moveTo", ((848, 348),)),
+ ("qCurveTo", ((848, 171), (649, -3), (449, -3), (449, -3))),
+ ("qCurveTo", ((248, -3), (50, 171), (50, 348), (50, 348))),
+ ("lineTo", ((50, 348),)),
+ ("qCurveTo", ((50, 526), (248, 704), (449, 704), (449, 704))),
+ ("qCurveTo", ((649, 704), (848, 526), (848, 348))),
+ ("lineTo", ((848, 348),)), # the duplicate point is kept
+ ("closePath", ()),
+ ],
),
# Test case from https://github.com/googlefonts/fontmake/issues/572
# An additional closing lineTo is required to disambiguate a duplicate
# point at the end of a contour from the implied closing line.
(
[
- ('moveTo', ((0, 651),)),
- ('lineTo', ((0, 101),)),
- ('lineTo', ((0, 101),)),
- ('lineTo', ((0, 651),)),
- ('lineTo', ((0, 651),)),
- ('closePath', ())
+ ("moveTo", ((0, 651),)),
+ ("lineTo", ((0, 101),)),
+ ("lineTo", ((0, 101),)),
+ ("lineTo", ((0, 651),)),
+ ("lineTo", ((0, 651),)),
+ ("closePath", ()),
],
+ False,
[
- ('moveTo', ((0, 651),)),
- ('lineTo', ((0, 651),)),
- ('lineTo', ((0, 101),)),
- ('lineTo', ((0, 101),)),
- ('closePath', ())
- ]
- )
+ ("moveTo", ((0, 651),)),
+ ("lineTo", ((0, 651),)),
+ ("lineTo", ((0, 101),)),
+ ("lineTo", ((0, 101),)),
+ ("closePath", ()),
+ ],
+ ),
+ (
+ [
+ ("moveTo", ((0, 651),)),
+ ("lineTo", ((0, 101),)),
+ ("lineTo", ((0, 101),)),
+ ("lineTo", ((0, 651),)),
+ ("lineTo", ((0, 651),)),
+ ("closePath", ()),
+ ],
+ True,
+ [
+ ("moveTo", ((0, 651),)),
+ ("lineTo", ((0, 651),)),
+ ("lineTo", ((0, 101),)),
+ ("lineTo", ((0, 101),)),
+ ("lineTo", ((0, 651),)), # closing line not implied
+ ("closePath", ()),
+ ],
+ ),
]
-@pytest.mark.parametrize("contour, expected", TEST_DATA)
-def test_reverse_pen(contour, expected):
+@pytest.mark.parametrize("contour, outputImpliedClosingLine, expected", TEST_DATA)
+def test_reverse_pen(contour, outputImpliedClosingLine, expected):
recpen = RecordingPen()
- revpen = ReverseContourPen(recpen)
+ revpen = ReverseContourPen(recpen, outputImpliedClosingLine)
for operator, operands in contour:
getattr(revpen, operator)(*operands)
assert recpen.value == expected
-@pytest.mark.parametrize("contour, expected", TEST_DATA)
-def test_reverse_point_pen(contour, expected):
- from fontTools.ufoLib.pointPen import (
- ReverseContourPointPen, PointToSegmentPen, SegmentToPointPen)
+def test_reverse_pen_outputImpliedClosingLine():
+ recpen = RecordingPen()
+ revpen = ReverseContourPen(recpen)
+ revpen.moveTo((0, 0))
+ revpen.lineTo((10, 0))
+ revpen.lineTo((0, 10))
+ revpen.lineTo((0, 0))
+ revpen.closePath()
+ assert recpen.value == [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((0, 10),)),
+ ("lineTo", ((10, 0),)),
+ # ("lineTo", ((0, 0),)), # implied
+ ("closePath", ()),
+ ]
+
+ recpen = RecordingPen()
+ revpen = ReverseContourPen(recpen, outputImpliedClosingLine=True)
+ revpen.moveTo((0, 0))
+ revpen.lineTo((10, 0))
+ revpen.lineTo((0, 10))
+ revpen.lineTo((0, 0))
+ revpen.closePath()
+ assert recpen.value == [
+ ("moveTo", ((0, 0),)),
+ ("lineTo", ((0, 10),)),
+ ("lineTo", ((10, 0),)),
+ ("lineTo", ((0, 0),)), # not implied
+ ("closePath", ()),
+ ]
+
+
+@pytest.mark.parametrize("contour, outputImpliedClosingLine, expected", TEST_DATA)
+def test_reverse_point_pen(contour, outputImpliedClosingLine, expected):
+ from fontTools.pens.pointPen import (
+ ReverseContourPointPen,
+ PointToSegmentPen,
+ SegmentToPointPen,
+ )
recpen = RecordingPen()
- pt2seg = PointToSegmentPen(recpen, outputImpliedClosingLine=True)
+ pt2seg = PointToSegmentPen(recpen, outputImpliedClosingLine)
revpen = ReverseContourPointPen(pt2seg)
seg2pt = SegmentToPointPen(revpen)
for operator, operands in contour:
getattr(seg2pt, operator)(*operands)
- # for closed contours that have a lineTo following the moveTo,
- # and whose points don't overlap, our current implementation diverges
- # from the ReverseContourPointPen as wrapped by ufoLib's pen converters.
- # In the latter case, an extra lineTo is added because of
- # outputImpliedClosingLine=True. This is redundant but not incorrect,
- # as the number of points is the same in both.
- if (contour and contour[-1][0] == "closePath" and
- contour[1][0] == "lineTo" and contour[1][1] != contour[0][1]):
- expected = expected[:-1] + [("lineTo", contour[0][1])] + expected[-1:]
-
assert recpen.value == expected
diff --git a/Tests/pens/t2CharStringPen_test.py b/Tests/pens/t2CharStringPen_test.py
index b710df55..74db2411 100644
--- a/Tests/pens/t2CharStringPen_test.py
+++ b/Tests/pens/t2CharStringPen_test.py
@@ -3,7 +3,6 @@ import unittest
class T2CharStringPenTest(unittest.TestCase):
-
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
@@ -31,13 +30,24 @@ class T2CharStringPenTest(unittest.TestCase):
charstring = pen.getCharString(None, None)
self.assertEqual(
- [100,
- 0, 'hmoveto',
- 10, 10, -10, 'hlineto',
- 10, 'hmoveto',
- 10, -10, -10, 'vlineto',
- 'endchar'],
- charstring.program)
+ [
+ 100,
+ 0,
+ "hmoveto",
+ 10,
+ 10,
+ -10,
+ "hlineto",
+ 10,
+ "hmoveto",
+ 10,
+ -10,
+ -10,
+ "vlineto",
+ "endchar",
+ ],
+ charstring.program,
+ )
def test_draw_lines(self):
pen = T2CharStringPen(100, {})
@@ -49,11 +59,9 @@ class T2CharStringPenTest(unittest.TestCase):
charstring = pen.getCharString(None, None)
self.assertEqual(
- [100,
- 5, 5, 'rmoveto',
- 20, 10, 10, 20, -20, -10, 'rlineto',
- 'endchar'],
- charstring.program)
+ [100, 5, 5, "rmoveto", 20, 10, 10, 20, -20, -10, "rlineto", "endchar"],
+ charstring.program,
+ )
def test_draw_h_v_curves(self):
pen = T2CharStringPen(100, {})
@@ -64,11 +72,23 @@ class T2CharStringPenTest(unittest.TestCase):
charstring = pen.getCharString(None, None)
self.assertEqual(
- [100,
- 0, 'hmoveto',
- 10, 10, 10, 10, 10, -10, 10, -10, 'hvcurveto',
- 'endchar'],
- charstring.program)
+ [
+ 100,
+ 0,
+ "hmoveto",
+ 10,
+ 10,
+ 10,
+ 10,
+ 10,
+ -10,
+ 10,
+ -10,
+ "hvcurveto",
+ "endchar",
+ ],
+ charstring.program,
+ )
def test_draw_curves(self):
pen = T2CharStringPen(100, {})
@@ -79,11 +99,28 @@ class T2CharStringPenTest(unittest.TestCase):
charstring = pen.getCharString(None, None)
self.assertEqual(
- [100,
- 95, 25, 'rmoveto',
- 20, 19, 0, 32, -20, 19, -19, 19, -32, 1, -19, -20, 'rrcurveto',
- 'endchar'],
- charstring.program)
+ [
+ 100,
+ 95,
+ 25,
+ "rmoveto",
+ 20,
+ 19,
+ 0,
+ 32,
+ -20,
+ 19,
+ -19,
+ 19,
+ -32,
+ 1,
+ -19,
+ -20,
+ "rrcurveto",
+ "endchar",
+ ],
+ charstring.program,
+ )
def test_draw_more_curves(self):
pen = T2CharStringPen(100, {})
@@ -99,22 +136,61 @@ class T2CharStringPenTest(unittest.TestCase):
charstring = pen.getCharString(None, None)
self.assertEqual(
- [100,
- 10, 10, 'rmoveto',
- 10, 30, 0, 10, 'hhcurveto',
- 10, 0, 30, 10, 'vvcurveto',
- -10, -10, -10, 10, -10, 'hhcurveto',
- 10, -10, -10, -10, -10, 'vvcurveto',
- -5, -5, -6, -5, 1, 'vhcurveto',
- -5, -6, 5, 5, 1, 'hvcurveto',
- -3, -5, -1, -10, 4, -5, 'rrcurveto',
- 'endchar'],
- charstring.program)
+ [
+ 100,
+ 10,
+ 10,
+ "rmoveto",
+ 10,
+ 30,
+ 0,
+ 10,
+ "hhcurveto",
+ 10,
+ 0,
+ 30,
+ 10,
+ "vvcurveto",
+ -10,
+ -10,
+ -10,
+ 10,
+ -10,
+ "hhcurveto",
+ 10,
+ -10,
+ -10,
+ -10,
+ -10,
+ "vvcurveto",
+ -5,
+ -5,
+ -6,
+ -5,
+ 1,
+ "vhcurveto",
+ -5,
+ -6,
+ 5,
+ 5,
+ 1,
+ "hvcurveto",
+ -3,
+ -5,
+ -1,
+ -10,
+ 4,
+ -5,
+ "rrcurveto",
+ "endchar",
+ ],
+ charstring.program,
+ )
def test_default_width(self):
pen = T2CharStringPen(None, {})
charstring = pen.getCharString(None, None)
- self.assertEqual(['endchar'], charstring.program)
+ self.assertEqual(["endchar"], charstring.program)
def test_no_round(self):
pen = T2CharStringPen(100.1, {}, roundTolerance=0.0)
@@ -125,12 +201,27 @@ class T2CharStringPenTest(unittest.TestCase):
charstring = pen.getCharString(None, None)
self.assertAlmostEqualProgram(
- [100, # we always round the advance width
- 0, 'hmoveto',
- 10.1, 0.1, 9.8, 9.8, 0.59, 10.59, 'rrcurveto',
- 10, -10.59, 9.41, -9.8, 0.2, 'vhcurveto',
- 'endchar'],
- charstring.program)
+ [
+ 100, # we always round the advance width
+ 0,
+ "hmoveto",
+ 10.1,
+ 0.1,
+ 9.8,
+ 9.8,
+ 0.59,
+ 10.59,
+ "rrcurveto",
+ 10,
+ -10.59,
+ 9.41,
+ -9.8,
+ 0.2,
+ "vhcurveto",
+ "endchar",
+ ],
+ charstring.program,
+ )
def test_round_all(self):
pen = T2CharStringPen(100.1, {}, roundTolerance=0.5)
@@ -141,11 +232,23 @@ class T2CharStringPenTest(unittest.TestCase):
charstring = pen.getCharString(None, None)
self.assertEqual(
- [100,
- 0, 'hmoveto',
- 10, 10, 10, 10, 11, -10, 9, -10, 'hvcurveto',
- 'endchar'],
- charstring.program)
+ [
+ 100,
+ 0,
+ "hmoveto",
+ 10,
+ 10,
+ 10,
+ 10,
+ 11,
+ -10,
+ 9,
+ -10,
+ "hvcurveto",
+ "endchar",
+ ],
+ charstring.program,
+ )
def test_round_some(self):
pen = T2CharStringPen(100, {}, roundTolerance=0.2)
@@ -159,20 +262,34 @@ class T2CharStringPenTest(unittest.TestCase):
charstring = pen.getCharString(None, None)
self.assertAlmostEqualProgram(
- [100,
- 0, 'hmoveto',
- 10, 'hlineto',
- 10, 10, 0.49, 10.49, 'rlineto',
- 'endchar'],
- charstring.program)
+ [
+ 100,
+ 0,
+ "hmoveto",
+ 10,
+ "hlineto",
+ 10,
+ 10,
+ 0.49,
+ 10.49,
+ "rlineto",
+ "endchar",
+ ],
+ charstring.program,
+ )
def test_invalid_tolerance(self):
self.assertRaisesRegex(
ValueError,
"Rounding tolerance must be positive",
- T2CharStringPen, None, {}, roundTolerance=-0.1)
+ T2CharStringPen,
+ None,
+ {},
+ roundTolerance=-0.1,
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/pens/ttGlyphPen_test.py b/Tests/pens/ttGlyphPen_test.py
index 96d75a19..6a74cb25 100644
--- a/Tests/pens/ttGlyphPen_test.py
+++ b/Tests/pens/ttGlyphPen_test.py
@@ -4,6 +4,7 @@ import struct
from fontTools import ttLib
from fontTools.pens.basePen import PenError
+from fontTools.pens.recordingPen import RecordingPen, RecordingPointPen
from fontTools.pens.ttGlyphPen import TTGlyphPen, TTGlyphPointPen, MAX_F2DOT14
@@ -21,12 +22,11 @@ class TTGlyphPenTestBase:
glyphSet = font.getGlyphSet()
glyfTable = font["glyf"]
- pen = self.penClass(font.getGlyphSet())
+ pen = self.penClass(glyphSet)
for name in font.getGlyphOrder():
- oldGlyph = glyphSet[name]
- getattr(oldGlyph, self.drawMethod)(pen)
- oldGlyph = oldGlyph._glyph
+ getattr(glyphSet[name], self.drawMethod)(pen)
+ oldGlyph = glyfTable[name]
newGlyph = pen.glyph()
if hasattr(oldGlyph, "program"):
@@ -295,6 +295,27 @@ class TTGlyphPenTest(TTGlyphPenTestBase):
uni0302_uni0300.recalcBounds(glyphSet)
self.assertGlyphBoundsEqual(uni0302_uni0300, (-278, 745, 148, 1025))
+ def test_outputImpliedClosingLine(self):
+ glyphSet = {}
+
+ pen = TTGlyphPen(glyphSet)
+ pen.moveTo((0, 0))
+ pen.lineTo((10, 0))
+ pen.lineTo((0, 10))
+ pen.lineTo((0, 0))
+ pen.closePath()
+ glyph = pen.glyph()
+ assert len(glyph.coordinates) == 3
+
+ pen = TTGlyphPen(glyphSet, outputImpliedClosingLine=True)
+ pen.moveTo((0, 0))
+ pen.lineTo((10, 0))
+ pen.lineTo((0, 10))
+ pen.lineTo((0, 0))
+ pen.closePath()
+ glyph = pen.glyph()
+ assert len(glyph.coordinates) == 4
+
class TTGlyphPointPenTest(TTGlyphPenTestBase):
penClass = TTGlyphPointPen
@@ -312,11 +333,11 @@ class TTGlyphPointPenTest(TTGlyphPenTestBase):
assert glyph.numberOfContours == 1
assert glyph.endPtsOfContours == [3]
- def test_addPoint_errorOnCurve(self):
+ def test_addPoint_noErrorOnCurve(self):
pen = TTGlyphPointPen(None)
pen.beginPath()
- with pytest.raises(NotImplementedError):
- pen.addPoint((0, 0), "curve")
+ pen.addPoint((0, 0), "curve")
+ pen.endPath()
def test_beginPath_beginPathOnOpenPath(self):
pen = TTGlyphPointPen(None)
@@ -332,12 +353,6 @@ class TTGlyphPointPenTest(TTGlyphPenTestBase):
with pytest.raises(PenError):
pen.glyph()
- def test_glyph_errorOnEmptyContour(self):
- pen = TTGlyphPointPen(None)
- pen.beginPath()
- with pytest.raises(PenError):
- pen.endPath()
-
def test_glyph_decomposes(self):
componentName = "a"
glyphSet = {}
@@ -574,6 +589,221 @@ class TTGlyphPointPenTest(TTGlyphPenTestBase):
assert pen1.points == pen2.points == [(0, 0), (10, 10), (20, 20), (20, 0)]
assert pen1.types == pen2.types == [1, 1, 0, 1]
+ def test_skip_empty_contours(self):
+ pen = TTGlyphPointPen(None)
+ pen.beginPath()
+ pen.endPath()
+ pen.beginPath()
+ pen.endPath()
+ glyph = pen.glyph()
+ assert glyph.numberOfContours == 0
+
+
+class CubicGlyfTest:
+ def test_cubic_simple(self):
+ spen = TTGlyphPen(None)
+ spen.moveTo((0, 0))
+ spen.curveTo((0, 1), (1, 1), (1, 0))
+ spen.closePath()
+
+ ppen = TTGlyphPointPen(None)
+ ppen.beginPath()
+ ppen.addPoint((0, 0), "line")
+ ppen.addPoint((0, 1))
+ ppen.addPoint((1, 1))
+ ppen.addPoint((1, 0), "curve")
+ ppen.endPath()
+
+ for pen in (spen, ppen):
+ glyph = pen.glyph()
+
+ for i in range(2):
+ if i == 1:
+ glyph.compile(None)
+
+ assert list(glyph.coordinates) == [(0, 0), (0, 1), (1, 1), (1, 0)]
+ assert list(glyph.flags) == [0x01, 0x80, 0x80, 0x01]
+
+ rpen = RecordingPen()
+ glyph.draw(rpen, None)
+ assert rpen.value == [
+ ("moveTo", ((0, 0),)),
+ (
+ "curveTo",
+ (
+ (0, 1),
+ (1, 1),
+ (1, 0),
+ ),
+ ),
+ ("closePath", ()),
+ ]
+
+ @pytest.mark.parametrize(
+ "dropImpliedOnCurves, segment_pen_commands, point_pen_commands, expected_coordinates, expected_flags, expected_endPts",
+ [
+ ( # Two curves that do NOT merge; request merging
+ True,
+ [
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((0, 1), (1, 2), (2, 2))),
+ ("curveTo", ((3, 3), (4, 1), (4, 0))),
+ ("closePath", ()),
+ ],
+ [
+ ("beginPath", (), {}),
+ ("addPoint", ((0, 0), "line", None, None), {}),
+ ("addPoint", ((0, 1), None, None, None), {}),
+ ("addPoint", ((1, 2), None, None, None), {}),
+ ("addPoint", ((2, 2), "curve", None, None), {}),
+ ("addPoint", ((3, 3), None, None, None), {}),
+ ("addPoint", ((4, 1), None, None, None), {}),
+ ("addPoint", ((4, 0), "curve", None, None), {}),
+ ("endPath", (), {}),
+ ],
+ [(0, 0), (0, 1), (1, 2), (2, 2), (3, 3), (4, 1), (4, 0)],
+ [0x01, 0x80, 0x80, 0x01, 0x80, 0x80, 0x01],
+ [6],
+ ),
+ ( # Two curves that merge; request merging
+ True,
+ [
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((0, 1), (1, 2), (2, 2))),
+ ("curveTo", ((3, 2), (4, 1), (4, 0))),
+ ("closePath", ()),
+ ],
+ [
+ ("beginPath", (), {}),
+ ("addPoint", ((0, 0), "line", None, None), {}),
+ ("addPoint", ((0, 1), None, None, None), {}),
+ ("addPoint", ((1, 2), None, None, None), {}),
+ ("addPoint", ((2, 2), "curve", None, None), {}),
+ ("addPoint", ((3, 2), None, None, None), {}),
+ ("addPoint", ((4, 1), None, None, None), {}),
+ ("addPoint", ((4, 0), "curve", None, None), {}),
+ ("endPath", (), {}),
+ ],
+ [(0, 0), (0, 1), (1, 2), (3, 2), (4, 1), (4, 0)],
+ [0x01, 0x80, 0x80, 0x80, 0x80, 0x01],
+ [5],
+ ),
+ ( # Two curves that merge; request NOT merging
+ False,
+ [
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((0, 1), (1, 2), (2, 2))),
+ ("curveTo", ((3, 2), (4, 1), (4, 0))),
+ ("closePath", ()),
+ ],
+ [
+ ("beginPath", (), {}),
+ ("addPoint", ((0, 0), "line", None, None), {}),
+ ("addPoint", ((0, 1), None, None, None), {}),
+ ("addPoint", ((1, 2), None, None, None), {}),
+ ("addPoint", ((2, 2), "curve", None, None), {}),
+ ("addPoint", ((3, 2), None, None, None), {}),
+ ("addPoint", ((4, 1), None, None, None), {}),
+ ("addPoint", ((4, 0), "curve", None, None), {}),
+ ("endPath", (), {}),
+ ],
+ [(0, 0), (0, 1), (1, 2), (2, 2), (3, 2), (4, 1), (4, 0)],
+ [0x01, 0x80, 0x80, 0x01, 0x80, 0x80, 0x01],
+ [6],
+ ),
+ ( # Two (duplicate) contours
+ True,
+ [
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((0, 1), (1, 2), (2, 2))),
+ ("curveTo", ((3, 2), (4, 1), (4, 0))),
+ ("closePath", ()),
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((0, 1), (1, 2), (2, 2))),
+ ("curveTo", ((3, 2), (4, 1), (4, 0))),
+ ("closePath", ()),
+ ],
+ [
+ ("beginPath", (), {}),
+ ("addPoint", ((0, 0), "line", None, None), {}),
+ ("addPoint", ((0, 1), None, None, None), {}),
+ ("addPoint", ((1, 2), None, None, None), {}),
+ ("addPoint", ((2, 2), "curve", None, None), {}),
+ ("addPoint", ((3, 2), None, None, None), {}),
+ ("addPoint", ((4, 1), None, None, None), {}),
+ ("addPoint", ((4, 0), "curve", None, None), {}),
+ ("endPath", (), {}),
+ ("beginPath", (), {}),
+ ("addPoint", ((0, 0), "line", None, None), {}),
+ ("addPoint", ((0, 1), None, None, None), {}),
+ ("addPoint", ((1, 2), None, None, None), {}),
+ ("addPoint", ((2, 2), "curve", None, None), {}),
+ ("addPoint", ((3, 2), None, None, None), {}),
+ ("addPoint", ((4, 1), None, None, None), {}),
+ ("addPoint", ((4, 0), "curve", None, None), {}),
+ ("endPath", (), {}),
+ ],
+ [
+ (0, 0),
+ (0, 1),
+ (1, 2),
+ (3, 2),
+ (4, 1),
+ (4, 0),
+ (0, 0),
+ (0, 1),
+ (1, 2),
+ (3, 2),
+ (4, 1),
+ (4, 0),
+ ],
+ [
+ 0x01,
+ 0x80,
+ 0x80,
+ 0x80,
+ 0x80,
+ 0x01,
+ 0x01,
+ 0x80,
+ 0x80,
+ 0x80,
+ 0x80,
+ 0x01,
+ ],
+ [5, 11],
+ ),
+ ],
+ )
+ def test_cubic_topology(
+ self,
+ dropImpliedOnCurves,
+ segment_pen_commands,
+ point_pen_commands,
+ expected_coordinates,
+ expected_flags,
+ expected_endPts,
+ ):
+ spen = TTGlyphPen(None)
+ rpen = RecordingPen()
+ rpen.value = segment_pen_commands
+ rpen.replay(spen)
+
+ ppen = TTGlyphPointPen(None)
+ rpen = RecordingPointPen()
+ rpen.value = point_pen_commands
+ rpen.replay(ppen)
+
+ for pen in (spen, ppen):
+ glyph = pen.glyph(dropImpliedOnCurves=dropImpliedOnCurves)
+
+ assert list(glyph.coordinates) == expected_coordinates
+ assert list(glyph.flags) == expected_flags
+ assert list(glyph.endPtsOfContours) == expected_endPts
+
+ rpen = RecordingPen()
+ glyph.draw(rpen, None)
+ assert rpen.value == segment_pen_commands
class _TestGlyph(object):
diff --git a/Tests/pens/utils.py b/Tests/pens/utils.py
index dced3c1b..c52fddd4 100644
--- a/Tests/pens/utils.py
+++ b/Tests/pens/utils.py
@@ -12,12 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from . import CUBIC_GLYPHS
from fontTools.pens.pointPen import PointToSegmentPen, SegmentToPointPen
+from fontTools.ufoLib.glifLib import GlyphSet
from math import isclose
+import os
import unittest
+DATADIR = os.path.join(os.path.dirname(__file__), "data")
+CUBIC_GLYPHS = GlyphSet(os.path.join(DATADIR, "cubic"))
+QUAD_GLYPHS = GlyphSet(os.path.join(DATADIR, "quadratic"))
+
+
class BaseDummyPen(object):
"""Base class for pens that record the commands they are called with."""
@@ -29,45 +35,45 @@ class BaseDummyPen(object):
return _repr_pen_commands(self.commands)
def addComponent(self, glyphName, transformation, **kwargs):
- self.commands.append(('addComponent', (glyphName, transformation), kwargs))
+ self.commands.append(("addComponent", (glyphName, transformation), kwargs))
class DummyPen(BaseDummyPen):
"""A SegmentPen that records the commands it's called with."""
def moveTo(self, pt):
- self.commands.append(('moveTo', (pt,), {}))
+ self.commands.append(("moveTo", (pt,), {}))
def lineTo(self, pt):
- self.commands.append(('lineTo', (pt,), {}))
+ self.commands.append(("lineTo", (pt,), {}))
def curveTo(self, *points):
- self.commands.append(('curveTo', points, {}))
+ self.commands.append(("curveTo", points, {}))
def qCurveTo(self, *points):
- self.commands.append(('qCurveTo', points, {}))
+ self.commands.append(("qCurveTo", points, {}))
def closePath(self):
- self.commands.append(('closePath', tuple(), {}))
+ self.commands.append(("closePath", tuple(), {}))
def endPath(self):
- self.commands.append(('endPath', tuple(), {}))
+ self.commands.append(("endPath", tuple(), {}))
class DummyPointPen(BaseDummyPen):
"""A PointPen that records the commands it's called with."""
def beginPath(self, **kwargs):
- self.commands.append(('beginPath', tuple(), kwargs))
+ self.commands.append(("beginPath", tuple(), kwargs))
def endPath(self):
- self.commands.append(('endPath', tuple(), {}))
+ self.commands.append(("endPath", tuple(), {}))
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
- kwargs['segmentType'] = str(segmentType) if segmentType else None
- kwargs['smooth'] = smooth
- kwargs['name'] = name
- self.commands.append(('addPoint', (pt,), kwargs))
+ kwargs["segmentType"] = str(segmentType) if segmentType else None
+ kwargs["smooth"] = smooth
+ kwargs["name"] = name
+ self.commands.append(("addPoint", (pt,), kwargs))
class DummyGlyph(object):
@@ -115,9 +121,9 @@ class DummyGlyph(object):
def __eq__(self, other):
"""Return True if 'other' glyph's outline is the same as self."""
- if hasattr(other, 'outline'):
+ if hasattr(other, "outline"):
return self.outline == other.outline
- elif hasattr(other, 'draw'):
+ elif hasattr(other, "draw"):
return self.outline == self.__class__(other).outline
return NotImplemented
@@ -126,9 +132,9 @@ class DummyGlyph(object):
return not (self == other)
def approx(self, other, rel_tol=1e-12):
- if hasattr(other, 'outline'):
+ if hasattr(other, "outline"):
outline2 == other.outline
- elif hasattr(other, 'draw'):
+ elif hasattr(other, "draw"):
outline2 = self.__class__(other).outline
else:
raise TypeError(type(other).__name__)
@@ -145,9 +151,8 @@ class DummyGlyph(object):
if not arg2 or not isinstance(arg2[0], tuple):
return False
for (x1, y1), (x2, y2) in zip(arg1, arg2):
- if (
- not isclose(x1, x2, rel_tol=rel_tol) or
- not isclose(y1, y2, rel_tol=rel_tol)
+ if not isclose(x1, x2, rel_tol=rel_tol) or not isclose(
+ y1, y2, rel_tol=rel_tol
):
return False
elif arg1 != arg2:
@@ -227,13 +232,16 @@ def _repr_pen_commands(commands):
# cast float to int if there're no digits after decimal point,
# and round floats to 12 decimal digits (more than enough)
args = [
- tuple((int(v) if int(v) == v else round(v, 12)) for v in pt)
+ (
+ tuple((int(v) if int(v) == v else round(v, 12)) for v in pt)
+ if pt is not None
+ else None
+ )
for pt in args
]
args = ", ".join(repr(a) for a in args)
if kwargs:
- kwargs = ", ".join("%s=%r" % (k, v)
- for k, v in sorted(kwargs.items()))
+ kwargs = ", ".join("%s=%r" % (k, v) for k, v in sorted(kwargs.items()))
if args and kwargs:
s.append("pen.%s(%s, %s)" % (cmd, args, kwargs))
elif args:
@@ -246,11 +254,10 @@ def _repr_pen_commands(commands):
class TestDummyGlyph(unittest.TestCase):
-
def test_equal(self):
# verify that the copy and the copy of the copy are equal to
# the source glyph's outline, as well as to each other
- source = CUBIC_GLYPHS['a']
+ source = CUBIC_GLYPHS["a"]
copy = DummyGlyph(source)
copy2 = DummyGlyph(copy)
self.assertEqual(source, copy)
@@ -263,10 +270,9 @@ class TestDummyGlyph(unittest.TestCase):
class TestDummyPointGlyph(unittest.TestCase):
-
def test_equal(self):
# same as above but using the PointPen protocol
- source = CUBIC_GLYPHS['a']
+ source = CUBIC_GLYPHS["a"]
copy = DummyPointGlyph(source)
copy2 = DummyPointGlyph(copy)
self.assertEqual(source, copy)
diff --git a/Tests/qu2cu/data/NotoSansArabic-Regular.quadratic.subset.ttf b/Tests/qu2cu/data/NotoSansArabic-Regular.quadratic.subset.ttf
new file mode 100644
index 00000000..7a318161
--- /dev/null
+++ b/Tests/qu2cu/data/NotoSansArabic-Regular.quadratic.subset.ttf
Binary files differ
diff --git a/Tests/qu2cu/qu2cu_cli_test.py b/Tests/qu2cu/qu2cu_cli_test.py
new file mode 100644
index 00000000..55cd2718
--- /dev/null
+++ b/Tests/qu2cu/qu2cu_cli_test.py
@@ -0,0 +1,62 @@
+import os
+
+import pytest
+import py
+
+from fontTools.qu2cu.cli import main
+from fontTools.ttLib import TTFont
+
+
+DATADIR = os.path.join(os.path.dirname(__file__), "data")
+
+TEST_TTFS = [
+ py.path.local(DATADIR).join("NotoSansArabic-Regular.quadratic.subset.ttf"),
+]
+
+
+@pytest.fixture
+def test_paths(tmpdir):
+ result = []
+ for path in TEST_TTFS:
+ new_path = tmpdir / path.basename
+ path.copy(new_path)
+ result.append(new_path)
+ return result
+
+
+class MainTest(object):
+ @staticmethod
+ def run_main(*args):
+ main([str(p) for p in args if p])
+
+ def test_no_output(self, test_paths):
+ ttf_path = test_paths[0]
+
+ self.run_main(ttf_path)
+
+ output_path = str(ttf_path).replace(".ttf", ".cubic.ttf")
+ font = TTFont(output_path)
+ assert font["head"].glyphDataFormat == 1
+ assert os.stat(ttf_path).st_size > os.stat(output_path).st_size
+
+ def test_output_file(self, test_paths):
+ ttf_path = test_paths[0]
+ output_path = str(ttf_path) + ".cubic"
+
+ self.run_main(ttf_path, "-o", output_path)
+
+ font = TTFont(output_path)
+ assert font["head"].glyphDataFormat == 1
+
+ def test_stats(self, test_paths):
+ ttf_path = test_paths[0]
+ self.run_main(ttf_path, "--verbose")
+
+ def test_all_cubic(self, test_paths):
+ ttf_path = test_paths[0]
+
+ self.run_main(ttf_path, "-c")
+
+ output_path = str(ttf_path).replace(".ttf", ".cubic.ttf")
+ font = TTFont(output_path)
+ assert font["head"].glyphDataFormat == 1
diff --git a/Tests/qu2cu/qu2cu_test.py b/Tests/qu2cu/qu2cu_test.py
new file mode 100644
index 00000000..3ca7ab55
--- /dev/null
+++ b/Tests/qu2cu/qu2cu_test.py
@@ -0,0 +1,104 @@
+# Copyright 2023 Behdad Esfahbod. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import pytest
+
+from fontTools.qu2cu import quadratic_to_curves
+from fontTools.qu2cu.qu2cu import main as qu2cu_main
+from fontTools.qu2cu.benchmark import main as benchmark_main
+
+import os
+import json
+from fontTools.cu2qu import curve_to_quadratic
+
+
+class Qu2CuTest:
+ @pytest.mark.parametrize(
+ "quadratics, expected, tolerance, cubic_only",
+ [
+ (
+ [
+ [(0, 0), (0, 1), (2, 1), (2, 0)],
+ ],
+ [
+ ((0, 0), (0, 4 / 3), (2, 4 / 3), (2, 0)),
+ ],
+ 0.1,
+ True,
+ ),
+ (
+ [
+ [(0, 0), (0, 1), (2, 1), (2, 2)],
+ ],
+ [
+ ((0, 0), (0, 4 / 3), (2, 2 / 3), (2, 2)),
+ ],
+ 0.2,
+ True,
+ ),
+ (
+ [
+ [(0, 0), (0, 1), (1, 1)],
+ [(1, 1), (3, 1), (3, 0)],
+ ],
+ [
+ ((0, 0), (0, 1), (1, 1)),
+ ((1, 1), (3, 1), (3, 0)),
+ ],
+ 0.2,
+ False,
+ ),
+ (
+ [
+ [(0, 0), (0, 1), (1, 1)],
+ [(1, 1), (3, 1), (3, 0)],
+ ],
+ [
+ ((0, 0), (0, 2 / 3), (1 / 3, 1), (1, 1)),
+ ((1, 1), (7 / 3, 1), (3, 2 / 3), (3, 0)),
+ ],
+ 0.2,
+ True,
+ ),
+ ],
+ )
+ def test_simple(self, quadratics, expected, tolerance, cubic_only):
+ expected = [
+ tuple((pytest.approx(p[0]), pytest.approx(p[1])) for p in curve)
+ for curve in expected
+ ]
+
+ c = quadratic_to_curves(quadratics, tolerance, cubic_only)
+ assert c == expected
+
+ def test_roundtrip(self):
+ DATADIR = os.path.join(os.path.dirname(__file__), "..", "cu2qu", "data")
+ with open(os.path.join(DATADIR, "curves.json"), "r") as fp:
+ curves = json.load(fp)
+
+ tolerance = 1
+
+ splines = [curve_to_quadratic(c, tolerance) for c in curves]
+ reconsts = [quadratic_to_curves([spline], tolerance) for spline in splines]
+
+ for curve, reconst in zip(curves, reconsts):
+ assert len(reconst) == 1
+ curve = tuple((pytest.approx(p[0]), pytest.approx(p[1])) for p in curve)
+ assert curve == reconst[0]
+
+ def test_main(self):
+ # Just for coverage
+ qu2cu_main()
+ benchmark_main()
diff --git a/Tests/subset/data/NotoSansCJKjp-Regular.subset.ttx b/Tests/subset/data/NotoSansCJKjp-Regular.subset.ttx
new file mode 100644
index 00000000..4dfc0b23
--- /dev/null
+++ b/Tests/subset/data/NotoSansCJKjp-Regular.subset.ttx
@@ -0,0 +1,417 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="OTTO" ttLibVersion="4.43">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="cid01404"/>
+ <GlyphID id="2" name="cid59004"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="2.004"/>
+ <checkSumAdjustment value="0x6e6d05f3"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Thu Apr 29 16:22:51 2021"/>
+ <modified value="Thu Oct 19 10:07:59 2023"/>
+ <xMin value="34"/>
+ <yMin value="-86"/>
+ <xMax value="966"/>
+ <yMax value="846"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="3"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="1160"/>
+ <descent value="-288"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="1000"/>
+ <minLeftSideBearing value="34"/>
+ <minRightSideBearing value="34"/>
+ <xMaxExtent value="966"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="1"/>
+ </hhea>
+
+ <maxp>
+ <tableVersion value="0x5000"/>
+ <numGlyphs value="3"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="3"/>
+ <xAvgCharWidth value="979"/>
+ <usWeightClass value="400"/>
+ <usWidthClass value="5"/>
+ <fsType value="00000000 00000000"/>
+ <ySubscriptXSize value="650"/>
+ <ySubscriptYSize value="600"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="75"/>
+ <ySuperscriptXSize value="650"/>
+ <ySuperscriptYSize value="600"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="350"/>
+ <yStrikeoutSize value="50"/>
+ <yStrikeoutPosition value="325"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="2"/>
+ <bSerifStyle value="11"/>
+ <bWeight value="5"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange2 value="00000000 00000001 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="GOOG"/>
+ <fsSelection value="00000000 01000000"/>
+ <usFirstCharIndex value="9001"/>
+ <usLastCharIndex value="12296"/>
+ <sTypoAscender value="880"/>
+ <sTypoDescender value="-120"/>
+ <sTypoLineGap value="0"/>
+ <usWinAscent value="1160"/>
+ <usWinDescent value="288"/>
+ <ulCodePageRange1 value="01100000 00101110 00000001 00000111"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="543"/>
+ <sCapHeight value="733"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="6"/>
+ </OS_2>
+
+ <name>
+ <namerecord nameID="0" platformID="3" platEncID="1" langID="0x409">
+ © 2014-2021 Adobe (http://www.adobe.com/).
+ </namerecord>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ Noto Sans CJK JP
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ 2.004;GOOG;NotoSansCJKjp-Regular;ADOBE
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ Noto Sans CJK JP
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ Version 2.004;hotconv 1.0.118;makeotfexe 2.5.65603
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ NotoSansCJKjp-Regular
+ </namerecord>
+ </name>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x2329" name="cid01404"/><!-- LEFT-POINTING ANGLE BRACKET -->
+ <map code="0x3008" name="cid01404"/><!-- LEFT ANGLE BRACKET -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x2329" name="cid01404"/><!-- LEFT-POINTING ANGLE BRACKET -->
+ <map code="0x3008" name="cid01404"/><!-- LEFT ANGLE BRACKET -->
+ </cmap_format_4>
+ </cmap>
+
+ <post>
+ <formatType value="3.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="-125"/>
+ <underlineThickness value="50"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ </post>
+
+ <CFF>
+ <major value="1"/>
+ <minor value="0"/>
+ <CFFFont name="NotoSansCJKjp-Regular">
+ <ROS Registry="Adobe" Order="Identity" Supplement="0"/>
+ <Notice value="Copyright 2014-2021 Adobe (http://www.adobe.com/). Noto is a trademark of Google Inc."/>
+ <FullName value="Noto Sans CJK JP Regular"/>
+ <FamilyName value="Noto Sans CJK JP"/>
+ <Weight value="Regular"/>
+ <isFixedPitch value="0"/>
+ <ItalicAngle value="0"/>
+ <UnderlinePosition value="-150"/>
+ <UnderlineThickness value="50"/>
+ <PaintType value="0"/>
+ <CharstringType value="2"/>
+ <FontMatrix value="0.001 0 0 0.001 0 0"/>
+ <FontBBox value="34 -86 966 846"/>
+ <StrokeWidth value="0"/>
+ <CIDFontVersion value="2.0039999"/>
+ <CIDFontRevision value="0"/>
+ <CIDFontType value="0"/>
+ <CIDCount value="65535"/>
+ <!-- charset is dumped separately as the 'GlyphOrder' element -->
+ <FDSelect format="0"/>
+ <FDArray>
+ <FontDict index="0">
+ <FontName value="NotoSansCJKjp-Regular-Dingbats"/>
+ <Private>
+ <BlueValues value="-1100 -1100 1900 1900"/>
+ <BlueScale value="0.039625"/>
+ <BlueShift value="7"/>
+ <BlueFuzz value="1"/>
+ <StdHW value="66"/>
+ <StdVW value="69"/>
+ <StemSnapH value="32 40 66"/>
+ <StemSnapV value="32 43 69"/>
+ <ForceBold value="0"/>
+ <LanguageGroup value="1"/>
+ <ExpansionFactor value="0.06"/>
+ <initialRandomSeed value="0"/>
+ <defaultWidthX value="1000"/>
+ <nominalWidthX value="107"/>
+ </Private>
+ </FontDict>
+ <FontDict index="1">
+ <FontName value="NotoSansCJKjp-Regular-Generic"/>
+ <Private>
+ <BlueValues value="-250 -250 1100 1100"/>
+ <BlueScale value="0.039625"/>
+ <BlueShift value="7"/>
+ <BlueFuzz value="1"/>
+ <StdHW value="40"/>
+ <StdVW value="40"/>
+ <StemSnapH value="40 120"/>
+ <StemSnapV value="40 120"/>
+ <ForceBold value="0"/>
+ <LanguageGroup value="1"/>
+ <ExpansionFactor value="0.06"/>
+ <initialRandomSeed value="0"/>
+ <defaultWidthX value="1000"/>
+ <nominalWidthX value="107"/>
+ </Private>
+ </FontDict>
+ </FDArray>
+ <CharStrings>
+ <CharString name=".notdef" fdSelectIndex="1">
+ endchar
+ </CharString>
+ <CharString name="cid01404" fdSelectIndex="0">
+ -86 932 hstem
+ 588 360 vstem
+ 948 -57 rmoveto
+ -280 437 280 437 -63 29 -297 -466 297 -466 rlineto
+ endchar
+ </CharString>
+ <CharString name="cid59004" fdSelectIndex="0">
+ -68 360 hstem
+ 34 932 vstem
+ 63 -68 rmoveto
+ 437 280 437 -280 29 63 -466 297 -466 -297 rlineto
+ endchar
+ </CharString>
+ </CharStrings>
+ </CFFFont>
+
+ <GlobalSubrs>
+ <!-- The 'index' attribute is only for humans; it is ignored when parsed. -->
+ </GlobalSubrs>
+ </CFF>
+
+ <GPOS>
+ <Version value="0x00010000"/>
+ <ScriptList>
+ <!-- ScriptCount=1 -->
+ <ScriptRecord index="0">
+ <ScriptTag value="DFLT"/>
+ <Script>
+ <DefaultLangSys>
+ <ReqFeatureIndex value="65535"/>
+ <!-- FeatureCount=3 -->
+ <FeatureIndex index="0" value="0"/>
+ <FeatureIndex index="1" value="1"/>
+ <FeatureIndex index="2" value="2"/>
+ </DefaultLangSys>
+ <!-- LangSysCount=0 -->
+ </Script>
+ </ScriptRecord>
+ </ScriptList>
+ <FeatureList>
+ <!-- FeatureCount=3 -->
+ <FeatureRecord index="0">
+ <FeatureTag value="halt"/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="0"/>
+ </Feature>
+ </FeatureRecord>
+ <FeatureRecord index="1">
+ <FeatureTag value="vhal"/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="1"/>
+ </Feature>
+ </FeatureRecord>
+ <FeatureRecord index="2">
+ <FeatureTag value="vpal"/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="2"/>
+ </Feature>
+ </FeatureRecord>
+ </FeatureList>
+ <LookupList>
+ <!-- LookupCount=3 -->
+ <Lookup index="0">
+ <LookupType value="1"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <SinglePos index="0" Format="1">
+ <Coverage>
+ <Glyph value="cid01404"/>
+ </Coverage>
+ <ValueFormat value="5"/>
+ <Value XPlacement="-500" XAdvance="-500"/>
+ </SinglePos>
+ </Lookup>
+ <Lookup index="1">
+ <LookupType value="1"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <SinglePos index="0" Format="1">
+ <Coverage>
+ <Glyph value="cid59004"/>
+ </Coverage>
+ <ValueFormat value="10"/>
+ <Value YPlacement="500" YAdvance="-500"/>
+ </SinglePos>
+ </Lookup>
+ <Lookup index="2">
+ <LookupType value="1"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <SinglePos index="0" Format="1">
+ <Coverage>
+ <Glyph value="cid59004"/>
+ </Coverage>
+ <ValueFormat value="10"/>
+ <Value YPlacement="475" YAdvance="-500"/>
+ </SinglePos>
+ </Lookup>
+ </LookupList>
+ </GPOS>
+
+ <GSUB>
+ <Version value="0x00010000"/>
+ <ScriptList>
+ <!-- ScriptCount=1 -->
+ <ScriptRecord index="0">
+ <ScriptTag value="DFLT"/>
+ <Script>
+ <DefaultLangSys>
+ <ReqFeatureIndex value="65535"/>
+ <!-- FeatureCount=2 -->
+ <FeatureIndex index="0" value="0"/>
+ <FeatureIndex index="1" value="1"/>
+ </DefaultLangSys>
+ <!-- LangSysCount=0 -->
+ </Script>
+ </ScriptRecord>
+ </ScriptList>
+ <FeatureList>
+ <!-- FeatureCount=2 -->
+ <FeatureRecord index="0">
+ <FeatureTag value="vert"/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="0"/>
+ </Feature>
+ </FeatureRecord>
+ <FeatureRecord index="1">
+ <FeatureTag value="vrt2"/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="0"/>
+ </Feature>
+ </FeatureRecord>
+ </FeatureList>
+ <LookupList>
+ <!-- LookupCount=1 -->
+ <Lookup index="0">
+ <LookupType value="1"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <SingleSubst index="0">
+ <Substitution in="cid01404" out="cid59004"/>
+ </SingleSubst>
+ </Lookup>
+ </LookupList>
+ </GSUB>
+
+ <VORG>
+ <majorVersion value="1"/>
+ <minorVersion value="0"/>
+ <defaultVertOriginY value="880"/>
+ <numVertOriginYMetrics value="0"/>
+ </VORG>
+
+ <hmtx>
+ <mtx name=".notdef" width="1000" lsb="100"/>
+ <mtx name="cid01404" width="1000" lsb="588"/>
+ <mtx name="cid59004" width="1000" lsb="34"/>
+ </hmtx>
+
+ <vhea>
+ <tableVersion value="0x00011000"/>
+ <ascent value="500"/>
+ <descent value="-500"/>
+ <lineGap value="0"/>
+ <advanceHeightMax value="1000"/>
+ <minTopSideBearing value="34"/>
+ <minBottomSideBearing value="34"/>
+ <yMaxExtent value="966"/>
+ <caretSlopeRise value="0"/>
+ <caretSlopeRun value="1"/>
+ <caretOffset value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <reserved4 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfVMetrics value="1"/>
+ </vhea>
+
+ <vmtx>
+ <mtx name=".notdef" height="1000" tsb="0"/>
+ <mtx name="cid01404" height="1000" tsb="34"/>
+ <mtx name="cid59004" height="1000" tsb="588"/>
+ </vmtx>
+
+</ttFont>
diff --git a/Tests/subset/data/TestGVAR.ttx b/Tests/subset/data/TestGVAR.ttx
index b14466da..2d2ee1e7 100644
--- a/Tests/subset/data/TestGVAR.ttx
+++ b/Tests/subset/data/TestGVAR.ttx
@@ -378,6 +378,7 @@
</GSUB>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="0.0" to="0.0"/>
diff --git a/Tests/subset/data/TestHVVAR.ttx b/Tests/subset/data/TestHVVAR.ttx
index 3e746527..5906988e 100644
--- a/Tests/subset/data/TestHVVAR.ttx
+++ b/Tests/subset/data/TestHVVAR.ttx
@@ -406,6 +406,7 @@
</VVAR>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="0.0" to="0.0"/>
diff --git a/Tests/subset/data/expect_HVVAR.ttx b/Tests/subset/data/expect_HVVAR.ttx
index 5fbc1770..2bd86221 100644
--- a/Tests/subset/data/expect_HVVAR.ttx
+++ b/Tests/subset/data/expect_HVVAR.ttx
@@ -129,6 +129,7 @@
</VVAR>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="0.0" to="0.0"/>
diff --git a/Tests/subset/data/expect_HVVAR_retain_gids.ttx b/Tests/subset/data/expect_HVVAR_retain_gids.ttx
index 8e51ca7f..e5476170 100644
--- a/Tests/subset/data/expect_HVVAR_retain_gids.ttx
+++ b/Tests/subset/data/expect_HVVAR_retain_gids.ttx
@@ -140,6 +140,7 @@
</VVAR>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="0.0" to="0.0"/>
diff --git a/Tests/subset/data/expect_keep_gvar.ttx b/Tests/subset/data/expect_keep_gvar.ttx
index 43e4a34a..09c066be 100644
--- a/Tests/subset/data/expect_keep_gvar.ttx
+++ b/Tests/subset/data/expect_keep_gvar.ttx
@@ -9,6 +9,7 @@
</GlyphOrder>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="0.0" to="0.0"/>
diff --git a/Tests/subset/data/expect_keep_gvar_notdef_outline.ttx b/Tests/subset/data/expect_keep_gvar_notdef_outline.ttx
index d4cc79f3..7811b1bb 100644
--- a/Tests/subset/data/expect_keep_gvar_notdef_outline.ttx
+++ b/Tests/subset/data/expect_keep_gvar_notdef_outline.ttx
@@ -8,6 +8,7 @@
</GlyphOrder>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="0.0" to="0.0"/>
diff --git a/Tests/subset/subset_test.py b/Tests/subset/subset_test.py
index 7efcb698..2b57633c 100644
--- a/Tests/subset/subset_test.py
+++ b/Tests/subset/subset_test.py
@@ -32,17 +32,16 @@ class SubsetTest:
shutil.rmtree(cls.tempdir, ignore_errors=True)
@staticmethod
- def getpath(testfile):
+ def getpath(*testfile):
path, _ = os.path.split(__file__)
- return os.path.join(path, "data", testfile)
+ return os.path.join(path, "data", *testfile)
@classmethod
def temp_path(cls, suffix):
if not cls.tempdir:
cls.tempdir = tempfile.mkdtemp()
cls.num_tempfiles += 1
- return os.path.join(cls.tempdir,
- "tmp%d%s" % (cls.num_tempfiles, suffix))
+ return os.path.join(cls.tempdir, "tmp%d%s" % (cls.num_tempfiles, suffix))
@staticmethod
def read_ttx(path):
@@ -58,7 +57,8 @@ class SubsetTest:
expected = self.read_ttx(expected_ttx)
if actual != expected:
for line in difflib.unified_diff(
- expected, actual, fromfile=expected_ttx, tofile=path):
+ expected, actual, fromfile=expected_ttx, tofile=path
+ ):
sys.stdout.write(line)
pytest.fail("TTX output is different from expected")
@@ -69,40 +69,76 @@ class SubsetTest:
font.save(savepath, reorderTables=None)
return savepath
-# -----
-# Tests
-# -----
+ # -----
+ # Tests
+ # -----
def test_layout_scripts(self):
fontpath = self.compile_font(self.getpath("layout_scripts.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
- subset.main([fontpath, "--glyphs=*", "--layout-features=*",
- "--layout-scripts=latn,arab.URD,arab.dflt",
- "--output-file=%s" % subsetpath])
+ subset.main(
+ [
+ fontpath,
+ "--glyphs=*",
+ "--layout-features=*",
+ "--layout-scripts=latn,arab.URD,arab.dflt",
+ "--output-file=%s" % subsetpath,
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath("expect_layout_scripts.ttx"),
- ["GPOS", "GSUB"])
+ self.expect_ttx(
+ subsetfont, self.getpath("expect_layout_scripts.ttx"), ["GPOS", "GSUB"]
+ )
def test_no_notdef_outline_otf(self):
fontpath = self.compile_font(self.getpath("TestOTF-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
- subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
+ subset.main(
+ [
+ fontpath,
+ "--no-notdef-outline",
+ "--gids=0",
+ "--output-file=%s" % subsetpath,
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_otf.ttx"), ["CFF "])
+ self.expect_ttx(
+ subsetfont, self.getpath("expect_no_notdef_outline_otf.ttx"), ["CFF "]
+ )
def test_no_notdef_outline_cid(self):
fontpath = self.compile_font(self.getpath("TestCID-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
- subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
+ subset.main(
+ [
+ fontpath,
+ "--no-notdef-outline",
+ "--gids=0",
+ "--output-file=%s" % subsetpath,
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_cid.ttx"), ["CFF "])
+ self.expect_ttx(
+ subsetfont, self.getpath("expect_no_notdef_outline_cid.ttx"), ["CFF "]
+ )
def test_no_notdef_outline_ttf(self):
fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
+ subset.main(
+ [
+ fontpath,
+ "--no-notdef-outline",
+ "--gids=0",
+ "--output-file=%s" % subsetpath,
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_ttf.ttx"), ["glyf", "hmtx"])
+ self.expect_ttx(
+ subsetfont,
+ self.getpath("expect_no_notdef_outline_ttf.ttx"),
+ ["glyf", "hmtx"],
+ )
def test_subset_ankr(self):
fontpath = self.compile_font(self.getpath("TestANKR.ttx"), ".ttf")
@@ -133,8 +169,9 @@ class SubsetTest:
# we expect a format 0 'bsln' table because it is the most compact.
fontpath = self.compile_font(self.getpath("TestBSLN-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+0030-0031",
- "--output-file=%s" % subsetpath])
+ subset.main(
+ [fontpath, "--unicodes=U+0030-0031", "--output-file=%s" % subsetpath]
+ )
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_0.ttx"), ["bsln"])
@@ -149,8 +186,9 @@ class SubsetTest:
# for uni2EA2.
fontpath = self.compile_font(self.getpath("TestBSLN-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+0030-0031,U+2EA2",
- "--output-file=%s" % subsetpath])
+ subset.main(
+ [fontpath, "--unicodes=U+0030-0031,U+2EA2", "--output-file=%s" % subsetpath]
+ )
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_1.ttx"), ["bsln"])
@@ -174,8 +212,7 @@ class SubsetTest:
# is the most compact encoding.
fontpath = self.compile_font(self.getpath("TestBSLN-3.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+0030",
- "--output-file=%s" % subsetpath])
+ subset.main([fontpath, "--unicodes=U+0030", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_2.ttx"), ["bsln"])
@@ -190,8 +227,9 @@ class SubsetTest:
# for uni2EA2.
fontpath = self.compile_font(self.getpath("TestBSLN-3.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+0030-0031,U+2EA2",
- "--output-file=%s" % subsetpath])
+ subset.main(
+ [fontpath, "--unicodes=U+0030-0031,U+2EA2", "--output-file=%s" % subsetpath]
+ )
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_3.ttx"), ["bsln"])
@@ -200,21 +238,42 @@ class SubsetTest:
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=smileface", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath("expect_keep_colr.ttx"), ["GlyphOrder", "hmtx", "glyf", "COLR", "CPAL"])
+ self.expect_ttx(
+ subsetfont,
+ self.getpath("expect_keep_colr.ttx"),
+ ["GlyphOrder", "hmtx", "glyf", "COLR", "CPAL"],
+ )
def test_subset_gvar(self):
fontpath = self.compile_font(self.getpath("TestGVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+002B,U+2212", "--output-file=%s" % subsetpath])
+ subset.main(
+ [fontpath, "--unicodes=U+002B,U+2212", "--output-file=%s" % subsetpath]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath("expect_keep_gvar.ttx"), ["GlyphOrder", "avar", "fvar", "gvar", "name"])
+ self.expect_ttx(
+ subsetfont,
+ self.getpath("expect_keep_gvar.ttx"),
+ ["GlyphOrder", "avar", "fvar", "gvar", "name"],
+ )
def test_subset_gvar_notdef_outline(self):
fontpath = self.compile_font(self.getpath("TestGVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+0030", "--notdef_outline", "--output-file=%s" % subsetpath])
+ subset.main(
+ [
+ fontpath,
+ "--unicodes=U+0030",
+ "--notdef_outline",
+ "--output-file=%s" % subsetpath,
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath("expect_keep_gvar_notdef_outline.ttx"), ["GlyphOrder", "avar", "fvar", "gvar", "name"])
+ self.expect_ttx(
+ subsetfont,
+ self.getpath("expect_keep_gvar_notdef_outline.ttx"),
+ ["GlyphOrder", "avar", "fvar", "gvar", "name"],
+ )
def test_subset_lcar_remove(self):
fontpath = self.compile_font(self.getpath("TestLCAR-0.ttx"), ".ttf")
@@ -226,25 +285,33 @@ class SubsetTest:
def test_subset_lcar_format_0(self):
fontpath = self.compile_font(self.getpath("TestLCAR-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+FB01",
- "--output-file=%s" % subsetpath])
+ subset.main([fontpath, "--unicodes=U+FB01", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_lcar_0.ttx"), ["lcar"])
def test_subset_lcar_format_1(self):
fontpath = self.compile_font(self.getpath("TestLCAR-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+FB01",
- "--output-file=%s" % subsetpath])
+ subset.main([fontpath, "--unicodes=U+FB01", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_lcar_1.ttx"), ["lcar"])
def test_subset_math(self):
fontpath = self.compile_font(self.getpath("TestMATH-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+0041,U+0028,U+0302,U+1D400,U+1D435", "--output-file=%s" % subsetpath])
+ subset.main(
+ [
+ fontpath,
+ "--unicodes=U+0041,U+0028,U+0302,U+1D400,U+1D435",
+ "--output-file=%s" % subsetpath,
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath("expect_keep_math.ttx"), ["GlyphOrder", "CFF ", "MATH", "hmtx"])
+ self.expect_ttx(
+ subsetfont,
+ self.getpath("expect_keep_math.ttx"),
+ ["GlyphOrder", "CFF ", "MATH", "hmtx"],
+ )
def test_subset_math_partial(self):
fontpath = self.compile_font(self.getpath("test_math_partial.ttx"), ".ttf")
@@ -283,8 +350,7 @@ class SubsetTest:
# the "prop" table should be removed from the subsetted font.
fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+0041",
- "--output-file=%s" % subsetpath])
+ subset.main([fontpath, "--unicodes=U+0041", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
assert "prop" not in subsetfont
@@ -297,8 +363,14 @@ class SubsetTest:
# tested above in test_subset_prop_remove_default_zero().
fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+0030-0032", "--no-notdef-glyph",
- "--output-file=%s" % subsetpath])
+ subset.main(
+ [
+ fontpath,
+ "--unicodes=U+0030-0032",
+ "--no-notdef-glyph",
+ "--output-file=%s" % subsetpath,
+ ]
+ )
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_prop_0.ttx"), ["prop"])
@@ -308,19 +380,25 @@ class SubsetTest:
# DefaultProperties should be set to the most frequent value.
fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--unicodes=U+0030-0032", "--notdef-outline",
- "--output-file=%s" % subsetpath])
+ subset.main(
+ [
+ fontpath,
+ "--unicodes=U+0030-0032",
+ "--notdef-outline",
+ "--output-file=%s" % subsetpath,
+ ]
+ )
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_prop_1.ttx"), ["prop"])
def test_options(self):
# https://github.com/fonttools/fonttools/issues/413
opt1 = subset.Options()
- assert 'Xyz-' not in opt1.layout_features
+ assert "Xyz-" not in opt1.layout_features
opt2 = subset.Options()
- opt2.layout_features.append('Xyz-')
- assert 'Xyz-' in opt2.layout_features
- assert 'Xyz-' not in opt1.layout_features
+ opt2.layout_features.append("Xyz-")
+ assert "Xyz-" in opt2.layout_features
+ assert "Xyz-" not in opt1.layout_features
def test_google_color(self):
fontpath = self.compile_font(self.getpath("google_color.ttx"), ".ttf")
@@ -329,24 +407,35 @@ class SubsetTest:
subsetfont = TTFont(subsetpath)
assert "CBDT" in subsetfont
assert "CBLC" in subsetfont
- assert "x" in subsetfont['CBDT'].strikeData[0]
- assert "y" not in subsetfont['CBDT'].strikeData[0]
+ assert "x" in subsetfont["CBDT"].strikeData[0]
+ assert "y" not in subsetfont["CBDT"].strikeData[0]
def test_google_color_all(self):
fontpath = self.compile_font(self.getpath("google_color.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=*", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
- assert "x" in subsetfont['CBDT'].strikeData[0]
- assert "y" in subsetfont['CBDT'].strikeData[0]
+ assert "x" in subsetfont["CBDT"].strikeData[0]
+ assert "y" in subsetfont["CBDT"].strikeData[0]
def test_sbix(self):
fontpath = self.compile_font(self.getpath("sbix.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--gids=0,1", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath(
- "expect_sbix.ttx"), ["sbix"])
+ self.expect_ttx(subsetfont, self.getpath("expect_sbix.ttx"), ["sbix"])
+
+ def test_varComposite(self):
+ fontpath = self.getpath("..", "..", "ttLib", "data", "varc-ac00-ac01.ttf")
+ origfont = TTFont(fontpath)
+ assert len(origfont.getGlyphOrder()) == 6
+ subsetpath = self.temp_path(".ttf")
+ subset.main([fontpath, "--unicodes=ac00", "--output-file=%s" % subsetpath])
+ subsetfont = TTFont(subsetpath)
+ assert len(subsetfont.getGlyphOrder()) == 4
+ subset.main([fontpath, "--unicodes=ac01", "--output-file=%s" % subsetpath])
+ subsetfont = TTFont(subsetpath)
+ assert len(subsetfont.getGlyphOrder()) == 5
def test_timing_publishes_parts(self):
fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
@@ -354,25 +443,27 @@ class SubsetTest:
options = subset.Options()
options.timing = True
subsetter = subset.Subsetter(options)
- subsetter.populate(text='ABC')
+ subsetter.populate(text="ABC")
font = TTFont(fontpath)
- with CapturingLogHandler('fontTools.subset.timer', logging.DEBUG) as captor:
+ with CapturingLogHandler("fontTools.subset.timer", logging.DEBUG) as captor:
subsetter.subset(font)
logs = captor.records
assert len(logs) > 5
- assert len(logs) == len([l for l in logs if 'msg' in l.args and 'time' in l.args])
+ assert len(logs) == len(
+ [l for l in logs if "msg" in l.args and "time" in l.args]
+ )
# Look for a few things we know should happen
- assert filter(lambda l: l.args['msg'] == "load 'cmap'", logs)
- assert filter(lambda l: l.args['msg'] == "subset 'cmap'", logs)
- assert filter(lambda l: l.args['msg'] == "subset 'glyf'", logs)
+ assert filter(lambda l: l.args["msg"] == "load 'cmap'", logs)
+ assert filter(lambda l: l.args["msg"] == "subset 'cmap'", logs)
+ assert filter(lambda l: l.args["msg"] == "subset 'glyf'", logs)
def test_passthrough_tables(self):
fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
font = TTFont(fontpath)
- unknown_tag = 'ZZZZ'
+ unknown_tag = "ZZZZ"
unknown_table = newTable(unknown_tag)
- unknown_table.data = b'\0'*10
+ unknown_table.data = b"\0" * 10
font[unknown_tag] = unknown_table
font.save(fontpath)
@@ -392,92 +483,144 @@ class SubsetTest:
def test_non_BMP_text_arg_input(self):
fontpath = self.compile_font(
- self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf")
+ self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf"
+ )
subsetpath = self.temp_path(".ttf")
- text = tostr(u"A\U0001F6D2", encoding='utf-8')
+ text = tostr("A\U0001F6D2", encoding="utf-8")
subset.main([fontpath, "--text=%s" % text, "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
- assert subsetfont['maxp'].numGlyphs == 3
- assert subsetfont.getGlyphOrder() == ['.notdef', 'A', 'u1F6D2']
+ assert subsetfont["maxp"].numGlyphs == 3
+ assert subsetfont.getGlyphOrder() == [".notdef", "A", "u1F6D2"]
def test_non_BMP_text_file_input(self):
fontpath = self.compile_font(
- self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf")
+ self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf"
+ )
subsetpath = self.temp_path(".ttf")
- text = tobytes(u"A\U0001F6D2", encoding='utf-8')
+ text = tobytes("A\U0001F6D2", encoding="utf-8")
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.write(text)
try:
- subset.main([fontpath, "--text-file=%s" % tmp.name,
- "--output-file=%s" % subsetpath])
+ subset.main(
+ [fontpath, "--text-file=%s" % tmp.name, "--output-file=%s" % subsetpath]
+ )
subsetfont = TTFont(subsetpath)
finally:
os.remove(tmp.name)
- assert subsetfont['maxp'].numGlyphs == 3
- assert subsetfont.getGlyphOrder() == ['.notdef', 'A', 'u1F6D2']
+ assert subsetfont["maxp"].numGlyphs == 3
+ assert subsetfont.getGlyphOrder() == [".notdef", "A", "u1F6D2"]
def test_no_hinting_CFF(self):
ttxpath = self.getpath("Lobster.subset.ttx")
fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
- subset.main([fontpath, "--no-hinting", "--notdef-outline",
- "--output-file=%s" % subsetpath, "*"])
+ subset.main(
+ [
+ fontpath,
+ "--no-hinting",
+ "--notdef-outline",
+ "--output-file=%s" % subsetpath,
+ "*",
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath(
- "expect_no_hinting_CFF.ttx"), ["CFF "])
+ self.expect_ttx(subsetfont, self.getpath("expect_no_hinting_CFF.ttx"), ["CFF "])
def test_desubroutinize_CFF(self):
ttxpath = self.getpath("Lobster.subset.ttx")
fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
- subset.main([fontpath, "--desubroutinize", "--notdef-outline",
- "--output-file=%s" % subsetpath, "*"])
+ subset.main(
+ [
+ fontpath,
+ "--desubroutinize",
+ "--notdef-outline",
+ "--output-file=%s" % subsetpath,
+ "*",
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath(
- "expect_desubroutinize_CFF.ttx"), ["CFF "])
+ self.expect_ttx(
+ subsetfont, self.getpath("expect_desubroutinize_CFF.ttx"), ["CFF "]
+ )
def test_desubroutinize_hinted_subrs_CFF(self):
ttxpath = self.getpath("test_hinted_subrs_CFF.ttx")
fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
- subset.main([fontpath, "--desubroutinize", "--notdef-outline",
- "--output-file=%s" % subsetpath, "*"])
+ subset.main(
+ [
+ fontpath,
+ "--desubroutinize",
+ "--notdef-outline",
+ "--output-file=%s" % subsetpath,
+ "*",
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath(
- "test_hinted_subrs_CFF.desub.ttx"), ["CFF "])
+ self.expect_ttx(
+ subsetfont, self.getpath("test_hinted_subrs_CFF.desub.ttx"), ["CFF "]
+ )
def test_desubroutinize_cntrmask_CFF(self):
ttxpath = self.getpath("test_cntrmask_CFF.ttx")
fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
- subset.main([fontpath, "--desubroutinize", "--notdef-outline",
- "--output-file=%s" % subsetpath, "*"])
+ subset.main(
+ [
+ fontpath,
+ "--desubroutinize",
+ "--notdef-outline",
+ "--output-file=%s" % subsetpath,
+ "*",
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath(
- "test_cntrmask_CFF.desub.ttx"), ["CFF "])
+ self.expect_ttx(
+ subsetfont, self.getpath("test_cntrmask_CFF.desub.ttx"), ["CFF "]
+ )
def test_no_hinting_desubroutinize_CFF(self):
ttxpath = self.getpath("test_hinted_subrs_CFF.ttx")
fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
- subset.main([fontpath, "--no-hinting", "--desubroutinize", "--notdef-outline",
- "--output-file=%s" % subsetpath, "*"])
+ subset.main(
+ [
+ fontpath,
+ "--no-hinting",
+ "--desubroutinize",
+ "--notdef-outline",
+ "--output-file=%s" % subsetpath,
+ "*",
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath(
- "expect_no_hinting_desubroutinize_CFF.ttx"), ["CFF "])
+ self.expect_ttx(
+ subsetfont,
+ self.getpath("expect_no_hinting_desubroutinize_CFF.ttx"),
+ ["CFF "],
+ )
def test_no_hinting_TTF(self):
fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--no-hinting", "--notdef-outline",
- "--output-file=%s" % subsetpath, "*"])
+ subset.main(
+ [
+ fontpath,
+ "--no-hinting",
+ "--notdef-outline",
+ "--output-file=%s" % subsetpath,
+ "*",
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath(
- "expect_no_hinting_TTF.ttx"), ["glyf", "maxp"])
+ self.expect_ttx(
+ subsetfont, self.getpath("expect_no_hinting_TTF.ttx"), ["glyf", "maxp"]
+ )
for tag in subset.Options().hinting_tables:
assert tag not in subsetfont
@@ -485,15 +628,24 @@ class SubsetTest:
# https://github.com/fonttools/fonttools/pull/845
fontpath = self.compile_font(self.getpath("NotdefWidthCID-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
- subset.main([fontpath, "--no-notdef-outline", "--gids=0,1", "--output-file=%s" % subsetpath])
+ subset.main(
+ [
+ fontpath,
+ "--no-notdef-outline",
+ "--gids=0,1",
+ "--output-file=%s" % subsetpath,
+ ]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath("expect_notdef_width_cid.ttx"), ["CFF "])
+ self.expect_ttx(
+ subsetfont, self.getpath("expect_notdef_width_cid.ttx"), ["CFF "]
+ )
def test_recalc_bounds_ttf(self):
ttxpath = self.getpath("TestTTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
- head = font['head']
+ head = font["head"]
bounds = [head.xMin, head.yMin, head.xMax, head.yMax]
fontpath = self.compile_font(ttxpath, ".ttf")
@@ -501,11 +653,11 @@ class SubsetTest:
# by default, the subsetter does not recalculate the bounding box
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
- head = TTFont(subsetpath)['head']
+ head = TTFont(subsetpath)["head"]
assert bounds == [head.xMin, head.yMin, head.xMax, head.yMax]
subset.main([fontpath, "--recalc-bounds", "--output-file=%s" % subsetpath, "*"])
- head = TTFont(subsetpath)['head']
+ head = TTFont(subsetpath)["head"]
bounds = [132, 304, 365, 567]
assert bounds == [head.xMin, head.yMin, head.xMax, head.yMax]
@@ -513,7 +665,7 @@ class SubsetTest:
ttxpath = self.getpath("TestOTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
- head = font['head']
+ head = font["head"]
bounds = [head.xMin, head.yMin, head.xMax, head.yMax]
fontpath = self.compile_font(ttxpath, ".otf")
@@ -521,11 +673,11 @@ class SubsetTest:
# by default, the subsetter does not recalculate the bounding box
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
- head = TTFont(subsetpath)['head']
+ head = TTFont(subsetpath)["head"]
assert bounds == [head.xMin, head.yMin, head.xMax, head.yMax]
subset.main([fontpath, "--recalc-bounds", "--output-file=%s" % subsetpath, "*"])
- head = TTFont(subsetpath)['head']
+ head = TTFont(subsetpath)["head"]
bounds = [132, 304, 365, 567]
assert bounds == [head.xMin, head.yMin, head.xMax, head.yMax]
@@ -533,49 +685,59 @@ class SubsetTest:
ttxpath = self.getpath("TestTTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
- modified = font['head'].modified
+ modified = font["head"].modified
fontpath = self.compile_font(ttxpath, ".ttf")
subsetpath = self.temp_path(".ttf")
# by default, the subsetter does not recalculate the modified timestamp
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
- assert modified == TTFont(subsetpath)['head'].modified
+ assert modified == TTFont(subsetpath)["head"].modified
- subset.main([fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"])
- assert modified < TTFont(subsetpath)['head'].modified
+ subset.main(
+ [fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"]
+ )
+ assert modified < TTFont(subsetpath)["head"].modified
def test_recalc_timestamp_otf(self):
ttxpath = self.getpath("TestOTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
- modified = font['head'].modified
+ modified = font["head"].modified
fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
# by default, the subsetter does not recalculate the modified timestamp
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
- assert modified == TTFont(subsetpath)['head'].modified
+ assert modified == TTFont(subsetpath)["head"].modified
- subset.main([fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"])
- assert modified < TTFont(subsetpath)['head'].modified
+ subset.main(
+ [fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"]
+ )
+ assert modified < TTFont(subsetpath)["head"].modified
def test_recalc_max_context(self):
ttxpath = self.getpath("Lobster.subset.ttx")
font = TTFont()
font.importXML(ttxpath)
- max_context = font['OS/2'].usMaxContext
+ max_context = font["OS/2"].usMaxContext
fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
# by default, the subsetter does not recalculate the usMaxContext
- subset.main([fontpath, "--drop-tables+=GSUB,GPOS",
- "--output-file=%s" % subsetpath])
- assert max_context == TTFont(subsetpath)['OS/2'].usMaxContext
+ subset.main(
+ [fontpath, "--drop-tables+=GSUB,GPOS", "--output-file=%s" % subsetpath]
+ )
+ assert max_context == TTFont(subsetpath)["OS/2"].usMaxContext
- subset.main([fontpath, "--recalc-max-context",
- "--drop-tables+=GSUB,GPOS",
- "--output-file=%s" % subsetpath])
- assert 0 == TTFont(subsetpath)['OS/2'].usMaxContext
+ subset.main(
+ [
+ fontpath,
+ "--recalc-max-context",
+ "--drop-tables+=GSUB,GPOS",
+ "--output-file=%s" % subsetpath,
+ ]
+ )
+ assert 0 == TTFont(subsetpath)["OS/2"].usMaxContext
def test_retain_gids_ttf(self):
fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
@@ -648,7 +810,9 @@ class SubsetTest:
assert len(cs["B"].program) > 0
def test_retain_gids_cff2(self):
- ttx_path = self.getpath("../../varLib/data/master_ttx_varfont_otf/TestCFF2VF.ttx")
+ ttx_path = self.getpath(
+ "../../varLib/data/master_ttx_varfont_otf/TestCFF2VF.ttx"
+ )
fontpath = self.compile_font(ttx_path, ".otf")
font = TTFont(fontpath)
@@ -687,20 +851,29 @@ class SubsetTest:
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--text=BD", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath("expect_HVVAR.ttx"), ["GlyphOrder", "HVAR", "VVAR", "avar", "fvar"])
+ self.expect_ttx(
+ subsetfont,
+ self.getpath("expect_HVVAR.ttx"),
+ ["GlyphOrder", "HVAR", "VVAR", "avar", "fvar"],
+ )
def test_HVAR_VVAR_retain_gids(self):
fontpath = self.compile_font(self.getpath("TestHVVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
- subset.main([fontpath, "--text=BD", "--retain-gids", "--output-file=%s" % subsetpath])
+ subset.main(
+ [fontpath, "--text=BD", "--retain-gids", "--output-file=%s" % subsetpath]
+ )
subsetfont = TTFont(subsetpath)
- self.expect_ttx(subsetfont, self.getpath("expect_HVVAR_retain_gids.ttx"), ["GlyphOrder", "HVAR", "VVAR", "avar", "fvar"])
+ self.expect_ttx(
+ subsetfont,
+ self.getpath("expect_HVVAR_retain_gids.ttx"),
+ ["GlyphOrder", "HVAR", "VVAR", "avar", "fvar"],
+ )
- def test_subset_flavor(self):
+ def test_subset_flavor_woff(self):
fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
- font = TTFont(fontpath)
-
woff_path = self.temp_path(".woff")
+
subset.main(
[
fontpath,
@@ -713,10 +886,16 @@ class SubsetTest:
assert woff.flavor == "woff"
+ def test_subset_flavor_woff2(self):
+ # skip if brotli is not importable, required for woff2
+ pytest.importorskip("brotli")
+
+ fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
woff2_path = self.temp_path(".woff2")
+
subset.main(
[
- woff_path,
+ fontpath,
"*",
"--flavor=woff2",
"--output-file=%s" % woff2_path,
@@ -726,10 +905,13 @@ class SubsetTest:
assert woff2.flavor == "woff2"
+ def test_subset_flavor_none(self):
+ fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
ttf_path = self.temp_path(".ttf")
+
subset.main(
[
- woff2_path,
+ fontpath,
"*",
"--output-file=%s" % ttf_path,
]
@@ -885,12 +1067,33 @@ class SubsetTest:
# test we emit a log.error if hb.repack fails (and we don't if successful)
assert (
- (
+ (
"hb.repack failed to serialize 'GSUB', attempting fonttools resolutions "
"; the error message was: RepackerError: mocking"
- ) in caplog.text
+ )
+ in caplog.text
) ^ ok
+ def test_retain_east_asian_spacing_features(self):
+ # This test font contains halt and vhal features, check that
+ # they are retained by default after subsetting.
+ ttx_path = self.getpath("NotoSansCJKjp-Regular.subset.ttx")
+ ttx = pathlib.Path(ttx_path).read_text()
+ assert 'FeatureTag value="halt"' in ttx
+ assert 'FeatureTag value="vhal"' in ttx
+
+ fontpath = self.compile_font(ttx_path, ".otf")
+ subsetpath = self.temp_path(".otf")
+ subset.main(
+ [
+ fontpath,
+ "--unicodes=*",
+ "--output-file=%s" % subsetpath,
+ ]
+ )
+ # subset output is the same as the input
+ self.expect_ttx(TTFont(subsetpath), ttx_path)
+
@pytest.fixture
def featureVarsTestFont():
@@ -900,14 +1103,15 @@ def featureVarsTestFont():
fb.setupNameTable({"familyName": "TestFeatureVars", "styleName": "Regular"})
fb.setupPost()
fb.setupFvar(axes=[("wght", 100, 400, 900, "Weight")], instances=[])
- fb.addOpenTypeFeatures("""\
+ fb.addOpenTypeFeatures(
+ """\
feature dlig {
sub f f by f_f;
} dlig;
- """)
+ """
+ )
fb.addFeatureVariations(
- [([{"wght": (0.20886, 1.0)}], {"dollar": "dollar.rvrn"})],
- featureTag="rvrn"
+ [([{"wght": (0.20886, 1.0)}], {"dollar": "dollar.rvrn"})], featureTag="rvrn"
)
buf = io.BytesIO()
fb.save(buf)
@@ -924,9 +1128,7 @@ def test_subset_feature_variations_keep_all(featureVarsTestFont):
subsetter.populate(unicodes=[ord("f"), ord("$")])
subsetter.subset(font)
- featureTags = {
- r.FeatureTag for r in font["GSUB"].table.FeatureList.FeatureRecord
- }
+ featureTags = {r.FeatureTag for r in font["GSUB"].table.FeatureList.FeatureRecord}
# 'dlig' is discretionary so it is dropped by default
assert "dlig" not in featureTags
assert "f_f" not in font.getGlyphOrder()
@@ -944,9 +1146,7 @@ def test_subset_feature_variations_drop_all(featureVarsTestFont):
subsetter.populate(unicodes=[ord("f"), ord("$")])
subsetter.subset(font)
- featureTags = {
- r.FeatureTag for r in font["GSUB"].table.FeatureList.FeatureRecord
- }
+ featureTags = {r.FeatureTag for r in font["GSUB"].table.FeatureList.FeatureRecord}
glyphs = set(font.getGlyphOrder())
assert "rvrn" not in featureTags
@@ -967,13 +1167,15 @@ def singlepos2_font():
fb.setupCharacterMap({ord("a"): "a", ord("b"): "b", ord("c"): "c"})
fb.setupNameTable({"familyName": "TestSingePosFormat", "styleName": "Regular"})
fb.setupPost()
- fb.addOpenTypeFeatures("""
+ fb.addOpenTypeFeatures(
+ """
feature kern {
pos a -50;
pos b -40;
pos c -50;
} kern;
- """)
+ """
+ )
buf = io.BytesIO()
fb.save(buf)
@@ -987,23 +1189,23 @@ def test_subset_single_pos_format(singlepos2_font):
# The input font has a SinglePos Format 2 subtable where each glyph has
# different ValueRecords
assert getXML(font["GPOS"].table.LookupList.Lookup[0].toXML, font) == [
- '<Lookup>',
+ "<Lookup>",
' <LookupType value="1"/>',
' <LookupFlag value="0"/>',
- ' <!-- SubTableCount=1 -->',
+ " <!-- SubTableCount=1 -->",
' <SinglePos index="0" Format="2">',
- ' <Coverage>',
+ " <Coverage>",
' <Glyph value="a"/>',
' <Glyph value="b"/>',
' <Glyph value="c"/>',
- ' </Coverage>',
+ " </Coverage>",
' <ValueFormat value="4"/>',
- ' <!-- ValueCount=3 -->',
+ " <!-- ValueCount=3 -->",
' <Value index="0" XAdvance="-50"/>',
' <Value index="1" XAdvance="-40"/>',
' <Value index="2" XAdvance="-50"/>',
- ' </SinglePos>',
- '</Lookup>',
+ " </SinglePos>",
+ "</Lookup>",
]
options = subset.Options()
@@ -1014,21 +1216,22 @@ def test_subset_single_pos_format(singlepos2_font):
# All the subsetted glyphs from the original SinglePos Format2 subtable
# now have the same ValueRecord, so we use a more compact Format 1 subtable.
assert getXML(font["GPOS"].table.LookupList.Lookup[0].toXML, font) == [
- '<Lookup>',
+ "<Lookup>",
' <LookupType value="1"/>',
' <LookupFlag value="0"/>',
- ' <!-- SubTableCount=1 -->',
+ " <!-- SubTableCount=1 -->",
' <SinglePos index="0" Format="1">',
- ' <Coverage>',
+ " <Coverage>",
' <Glyph value="a"/>',
' <Glyph value="c"/>',
- ' </Coverage>',
+ " </Coverage>",
' <ValueFormat value="4"/>',
' <Value XAdvance="-50"/>',
- ' </SinglePos>',
- '</Lookup>',
+ " </SinglePos>",
+ "</Lookup>",
]
+
def test_subset_single_pos_format2_all_None(singlepos2_font):
# https://github.com/fonttools/fonttools/issues/2602
font = singlepos2_font
@@ -1043,14 +1246,14 @@ def test_subset_single_pos_format2_all_None(singlepos2_font):
assert getXML(subtable.toXML, font) == [
'<SinglePos Format="2">',
- ' <Coverage>',
+ " <Coverage>",
' <Glyph value="a"/>',
' <Glyph value="b"/>',
' <Glyph value="c"/>',
- ' </Coverage>',
+ " </Coverage>",
' <ValueFormat value="0"/>',
- ' <!-- ValueCount=3 -->',
- '</SinglePos>',
+ " <!-- ValueCount=3 -->",
+ "</SinglePos>",
]
options = subset.Options()
@@ -1061,12 +1264,12 @@ def test_subset_single_pos_format2_all_None(singlepos2_font):
# Check it was downgraded to Format1 after subsetting
assert getXML(font["GPOS"].table.LookupList.Lookup[0].SubTable[0].toXML, font) == [
'<SinglePos Format="1">',
- ' <Coverage>',
+ " <Coverage>",
' <Glyph value="a"/>',
' <Glyph value="c"/>',
- ' </Coverage>',
+ " </Coverage>",
' <ValueFormat value="0"/>',
- '</SinglePos>',
+ "</SinglePos>",
]
@@ -1096,7 +1299,7 @@ def test_subset_empty_glyf(tmp_path, ttf_path):
subset_font = TTFont(subset_path)
assert subset_font.getGlyphOrder() == [".notdef", "space"]
- assert subset_font.reader['glyf'] == b"\x00"
+ assert subset_font.reader["glyf"] == b"\x00"
glyf = subset_font["glyf"]
assert all(glyf[g].numberOfContours == 0 for g in subset_font.getGlyphOrder())
@@ -1225,8 +1428,8 @@ def colrv1_path(tmp_path):
clipBoxes={
"uniE000": (0, 0, 200, 300),
"uniE001": (0, 0, 500, 500),
- "uniE002": (100, 100, 400, 400),
- "uniE003": (-50, -50, 350, 350),
+ "uniE002": (-50, -50, 400, 400),
+ "uniE003": (-50, -50, 400, 400),
},
)
fb.setupCPAL(
@@ -1245,6 +1448,54 @@ def colrv1_path(tmp_path):
return output_path
+@pytest.fixture
+def colrv1_cpalv1_path(colrv1_path):
+ # upgrade CPAL from v0 to v1 by adding labels
+ font = TTFont(colrv1_path)
+ fb = FontBuilder(font=font)
+ fb.setupCPAL(
+ [
+ [
+ (1.0, 0.0, 0.0, 1.0), # red
+ (0.0, 1.0, 0.0, 1.0), # green
+ (0.0, 0.0, 1.0, 1.0), # blue
+ ],
+ ],
+ paletteLabels=["test palette"],
+ paletteEntryLabels=["first color", "second color", "third color"],
+ )
+
+ output_path = colrv1_path.parent / "TestCOLRv1CPALv1.ttf"
+ fb.save(output_path)
+
+ return output_path
+
+
+@pytest.fixture
+def colrv1_cpalv1_share_nameID_path(colrv1_path):
+ font = TTFont(colrv1_path)
+ fb = FontBuilder(font=font)
+ fb.setupCPAL(
+ [
+ [
+ (1.0, 0.0, 0.0, 1.0), # red
+ (0.0, 1.0, 0.0, 1.0), # green
+ (0.0, 0.0, 1.0, 1.0), # blue
+ ],
+ ],
+ paletteLabels=["test palette"],
+ paletteEntryLabels=["first color", "second color", "third color"],
+ )
+
+ # Set the name ID of the first color to use nameID 1 = familyName = "TestCOLRv1"
+ fb.font["CPAL"].paletteEntryLabels[0] = 1
+
+ output_path = colrv1_path.parent / "TestCOLRv1CPALv1.ttf"
+ fb.save(output_path)
+
+ return output_path
+
+
def test_subset_COLRv1_and_CPAL(colrv1_path):
subset_path = colrv1_path.parent / (colrv1_path.name + ".subset")
@@ -1290,7 +1541,7 @@ def test_subset_COLRv1_and_CPAL(colrv1_path):
base = colr.BaseGlyphList.BaseGlyphPaintRecord[0]
assert base.BaseGlyph == "uniE001"
layers = colr.LayerList.Paint[
- base.Paint.FirstLayerIndex: base.Paint.FirstLayerIndex + base.Paint.NumLayers
+ base.Paint.FirstLayerIndex : base.Paint.FirstLayerIndex + base.Paint.NumLayers
]
assert len(layers) == 2
# check v1 palette indices were remapped
@@ -1309,6 +1560,7 @@ def test_subset_COLRv1_and_CPAL(colrv1_path):
clipBoxes = colr.ClipList.clips
assert {"uniE001", "uniE002", "uniE003"} == set(clipBoxes)
+ assert clipBoxes["uniE002"] == clipBoxes["uniE003"]
assert "CPAL" in subset_font
cpal = subset_font["CPAL"]
@@ -1322,6 +1574,110 @@ def test_subset_COLRv1_and_CPAL(colrv1_path):
]
+def test_subset_COLRv1_and_CPALv1(colrv1_cpalv1_path):
+ subset_path = colrv1_cpalv1_path.parent / (colrv1_cpalv1_path.name + ".subset")
+
+ subset.main(
+ [
+ str(colrv1_cpalv1_path),
+ "--glyph-names",
+ f"--output-file={subset_path}",
+ "--unicodes=E002,E003,E004",
+ ]
+ )
+ subset_font = TTFont(subset_path)
+
+ assert "CPAL" in subset_font
+ cpal = subset_font["CPAL"]
+ name_table = subset_font["name"]
+ assert [
+ name_table.getDebugName(name_id) for name_id in cpal.paletteEntryLabels
+ ] == [
+ # "first color", # The first color was pruned
+ "second color",
+ "third color",
+ ]
+ # check that the "first color" name is dropped from name table
+ font = TTFont(colrv1_cpalv1_path)
+
+ first_color_nameID = None
+ for n in font["name"].names:
+ if n.toUnicode() == "first color":
+ first_color_nameID = n.nameID
+ break
+ assert first_color_nameID is not None
+ assert all(n.nameID != first_color_nameID for n in name_table.names)
+
+
+def test_subset_COLRv1_and_CPALv1_keep_nameID(colrv1_cpalv1_path):
+ subset_path = colrv1_cpalv1_path.parent / (colrv1_cpalv1_path.name + ".subset")
+
+ # figure out the name ID of first color so we can keep it
+ font = TTFont(colrv1_cpalv1_path)
+
+ first_color_nameID = None
+ for n in font["name"].names:
+ if n.toUnicode() == "first color":
+ first_color_nameID = n.nameID
+ break
+ assert first_color_nameID is not None
+
+ subset.main(
+ [
+ str(colrv1_cpalv1_path),
+ "--glyph-names",
+ f"--output-file={subset_path}",
+ "--unicodes=E002,E003,E004",
+ f"--name-IDs={first_color_nameID}",
+ ]
+ )
+ subset_font = TTFont(subset_path)
+
+ assert "CPAL" in subset_font
+ cpal = subset_font["CPAL"]
+ name_table = subset_font["name"]
+ assert [
+ name_table.getDebugName(name_id) for name_id in cpal.paletteEntryLabels
+ ] == [
+ # "first color", # The first color was pruned
+ "second color",
+ "third color",
+ ]
+
+ # Check that the name ID is kept
+ assert any(n.nameID == first_color_nameID for n in name_table.names)
+
+
+def test_subset_COLRv1_and_CPALv1_share_nameID(colrv1_cpalv1_share_nameID_path):
+ subset_path = colrv1_cpalv1_share_nameID_path.parent / (
+ colrv1_cpalv1_share_nameID_path.name + ".subset"
+ )
+
+ subset.main(
+ [
+ str(colrv1_cpalv1_share_nameID_path),
+ "--glyph-names",
+ f"--output-file={subset_path}",
+ "--unicodes=E002,E003,E004",
+ ]
+ )
+ subset_font = TTFont(subset_path)
+
+ assert "CPAL" in subset_font
+ cpal = subset_font["CPAL"]
+ name_table = subset_font["name"]
+ assert [
+ name_table.getDebugName(name_id) for name_id in cpal.paletteEntryLabels
+ ] == [
+ # "first color", # The first color was pruned
+ "second color",
+ "third color",
+ ]
+
+ # Check that the name ID 1 is kept
+ assert any(n.nameID == 1 for n in name_table.names)
+
+
def test_subset_COLRv1_and_CPAL_drop_empty(colrv1_path):
subset_path = colrv1_path.parent / (colrv1_path.name + ".subset")
@@ -1427,7 +1783,8 @@ def test_subset_keep_size_drop_empty_stylistic_set():
fb.setupOS2()
fb.setupPost()
fb.setupNameTable({"familyName": "TestKeepSizeFeature", "styleName": "Regular"})
- fb.addOpenTypeFeatures("""
+ fb.addOpenTypeFeatures(
+ """
feature size {
parameters 10.0 0;
} size;
@@ -1437,7 +1794,8 @@ def test_subset_keep_size_drop_empty_stylistic_set():
};
sub b by b.ss01;
} ss01;
- """)
+ """
+ )
buf = io.BytesIO()
fb.save(buf)
@@ -1529,6 +1887,139 @@ def test_subset_COLR_glyph_closure(tmp_path):
assert "grave" not in color_layers
+def test_subset_recalc_xAvgCharWidth(ttf_path):
+ # Note that the font in in the *ttLib*/data/TestTTF-Regular.ttx file,
+ # not this subset/data folder.
+ font = TTFont(ttf_path)
+ xAvgCharWidth_before = font["OS/2"].xAvgCharWidth
+
+ subset_path = ttf_path.with_suffix(".subset.ttf")
+ subset.main(
+ [
+ str(ttf_path),
+ f"--output-file={subset_path}",
+ # Keep only the ellipsis, which is very wide, that ought to bump up the average
+ "--glyphs=ellipsis",
+ "--recalc-average-width",
+ "--no-prune-unicode-ranges",
+ ]
+ )
+ subset_font = TTFont(subset_path)
+ xAvgCharWidth_after = subset_font["OS/2"].xAvgCharWidth
+
+ # Check that the value gets updated
+ assert xAvgCharWidth_after != xAvgCharWidth_before
+
+ # Check that the value gets updated to the actual new value
+ subset_font["OS/2"].recalcAvgCharWidth(subset_font)
+ assert xAvgCharWidth_after == subset_font["OS/2"].xAvgCharWidth
+
if __name__ == "__main__":
sys.exit(unittest.main())
+
+
+def test_subset_prune_gdef_markglyphsetsdef():
+ # GDEF_MarkGlyphSetsDef
+ fb = FontBuilder(unitsPerEm=1000, isTTF=True)
+ glyph_order = [
+ ".notdef",
+ "A",
+ "Aacute",
+ "Acircumflex",
+ "Adieresis",
+ "a",
+ "aacute",
+ "acircumflex",
+ "adieresis",
+ "dieresiscomb",
+ "acutecomb",
+ "circumflexcomb",
+ ]
+ fb.setupGlyphOrder(glyph_order)
+ fb.setupGlyf({g: TTGlyphPen(None).glyph() for g in glyph_order})
+ fb.setupHorizontalMetrics({g: (500, 0) for g in glyph_order})
+ fb.setupHorizontalHeader()
+ fb.setupPost()
+ fb.setupNameTable(
+ {"familyName": "TestGDEFMarkGlyphSetsDef", "styleName": "Regular"}
+ )
+ fb.addOpenTypeFeatures(
+ """
+ feature ccmp {
+ lookup ccmp_1 {
+ lookupflag UseMarkFilteringSet [acutecomb];
+ sub a acutecomb by aacute;
+ sub A acutecomb by Aacute;
+ } ccmp_1;
+ lookup ccmp_2 {
+ lookupflag UseMarkFilteringSet [circumflexcomb];
+ sub a circumflexcomb by acircumflex;
+ sub A circumflexcomb by Acircumflex;
+ } ccmp_2;
+ lookup ccmp_3 {
+ lookupflag UseMarkFilteringSet [dieresiscomb];
+ sub a dieresiscomb by adieresis;
+ sub A dieresiscomb by Adieresis;
+ sub A acutecomb by Aacute;
+ } ccmp_3;
+ } ccmp;
+ """
+ )
+
+ buf = io.BytesIO()
+ fb.save(buf)
+ buf.seek(0)
+
+ font = TTFont(buf)
+
+ features = font["GSUB"].table.FeatureList.FeatureRecord
+ assert features[0].FeatureTag == "ccmp"
+ lookups = font["GSUB"].table.LookupList.Lookup
+ assert lookups[0].LookupFlag == 16
+ assert lookups[0].MarkFilteringSet == 0
+ assert lookups[1].LookupFlag == 16
+ assert lookups[1].MarkFilteringSet == 1
+ assert lookups[2].LookupFlag == 16
+ assert lookups[2].MarkFilteringSet == 2
+ marksets = font["GDEF"].table.MarkGlyphSetsDef.Coverage
+ assert marksets[0].glyphs == ["acutecomb"]
+ assert marksets[1].glyphs == ["circumflexcomb"]
+ assert marksets[2].glyphs == ["dieresiscomb"]
+
+ options = subset.Options(layout_features=["*"])
+ subsetter = subset.Subsetter(options)
+ subsetter.populate(glyphs=["A", "a", "acutecomb", "dieresiscomb"])
+ subsetter.subset(font)
+
+ features = font["GSUB"].table.FeatureList.FeatureRecord
+ assert features[0].FeatureTag == "ccmp"
+ lookups = font["GSUB"].table.LookupList.Lookup
+ assert lookups[0].LookupFlag == 16
+ assert lookups[0].MarkFilteringSet == 0
+ assert lookups[1].LookupFlag == 16
+ assert lookups[1].MarkFilteringSet == 1
+ marksets = font["GDEF"].table.MarkGlyphSetsDef.Coverage
+ assert marksets[0].glyphs == ["acutecomb"]
+ assert marksets[1].glyphs == ["dieresiscomb"]
+
+ buf = io.BytesIO()
+ fb.save(buf)
+ buf.seek(0)
+
+ font = TTFont(buf)
+
+ options = subset.Options(layout_features=["*"], layout_closure=False)
+ subsetter = subset.Subsetter(options)
+ subsetter.populate(glyphs=["A", "acutecomb", "Aacute"])
+ subsetter.subset(font)
+
+ features = font["GSUB"].table.FeatureList.FeatureRecord
+ assert features[0].FeatureTag == "ccmp"
+ lookups = font["GSUB"].table.LookupList.Lookup
+ assert lookups[0].LookupFlag == 16
+ assert lookups[0].MarkFilteringSet == 0
+ assert lookups[1].LookupFlag == 0
+ assert lookups[1].MarkFilteringSet == None
+ marksets = font["GDEF"].table.MarkGlyphSetsDef.Coverage
+ assert marksets[0].glyphs == ["acutecomb"]
diff --git a/Tests/svgLib/path/parser_test.py b/Tests/svgLib/path/parser_test.py
index d33043fc..4db64919 100644
--- a/Tests/svgLib/path/parser_test.py
+++ b/Tests/svgLib/path/parser_test.py
@@ -7,9 +7,7 @@ import pytest
@pytest.mark.parametrize(
"pathdef, expected",
[
-
# Examples from the SVG spec
-
(
"M 100 100 L 300 100 L 200 300 z",
[
@@ -18,7 +16,7 @@ import pytest
("lineTo", ((200.0, 300.0),)),
("lineTo", ((100.0, 100.0),)),
("closePath", ()),
- ]
+ ],
),
# for Z command behavior when there is multiple subpaths
(
@@ -32,97 +30,76 @@ import pytest
("lineTo", ((200.0, 300.0),)),
("lineTo", ((100.0, 100.0),)),
("closePath", ()),
- ]
+ ],
),
(
"M100,200 C100,100 250,100 250,200 S400,300 400,200",
[
("moveTo", ((100.0, 200.0),)),
- ("curveTo", ((100.0, 100.0),
- (250.0, 100.0),
- (250.0, 200.0))),
- ("curveTo", ((250.0, 300.0),
- (400.0, 300.0),
- (400.0, 200.0))),
+ ("curveTo", ((100.0, 100.0), (250.0, 100.0), (250.0, 200.0))),
+ ("curveTo", ((250.0, 300.0), (400.0, 300.0), (400.0, 200.0))),
("endPath", ()),
- ]
+ ],
),
(
"M100,200 C100,100 400,100 400,200",
[
("moveTo", ((100.0, 200.0),)),
- ("curveTo", ((100.0, 100.0),
- (400.0, 100.0),
- (400.0, 200.0))),
+ ("curveTo", ((100.0, 100.0), (400.0, 100.0), (400.0, 200.0))),
("endPath", ()),
- ]
+ ],
),
(
"M100,500 C25,400 475,400 400,500",
[
("moveTo", ((100.0, 500.0),)),
- ("curveTo", ((25.0, 400.0),
- (475.0, 400.0),
- (400.0, 500.0))),
+ ("curveTo", ((25.0, 400.0), (475.0, 400.0), (400.0, 500.0))),
("endPath", ()),
- ]
+ ],
),
(
"M100,800 C175,700 325,700 400,800",
[
("moveTo", ((100.0, 800.0),)),
- ("curveTo", ((175.0, 700.0),
- (325.0, 700.0),
- (400.0, 800.0))),
+ ("curveTo", ((175.0, 700.0), (325.0, 700.0), (400.0, 800.0))),
("endPath", ()),
- ]
+ ],
),
(
"M600,200 C675,100 975,100 900,200",
[
("moveTo", ((600.0, 200.0),)),
- ("curveTo", ((675.0, 100.0),
- (975.0, 100.0),
- (900.0, 200.0))),
+ ("curveTo", ((675.0, 100.0), (975.0, 100.0), (900.0, 200.0))),
("endPath", ()),
- ]
+ ],
),
(
"M600,500 C600,350 900,650 900,500",
[
("moveTo", ((600.0, 500.0),)),
- ("curveTo", ((600.0, 350.0),
- (900.0, 650.0),
- (900.0, 500.0))),
+ ("curveTo", ((600.0, 350.0), (900.0, 650.0), (900.0, 500.0))),
("endPath", ()),
- ]
+ ],
),
(
"M600,800 C625,700 725,700 750,800 S875,900 900,800",
[
("moveTo", ((600.0, 800.0),)),
- ("curveTo", ((625.0, 700.0),
- (725.0, 700.0),
- (750.0, 800.0))),
- ("curveTo", ((775.0, 900.0),
- (875.0, 900.0),
- (900.0, 800.0))),
+ ("curveTo", ((625.0, 700.0), (725.0, 700.0), (750.0, 800.0))),
+ ("curveTo", ((775.0, 900.0), (875.0, 900.0), (900.0, 800.0))),
("endPath", ()),
- ]
+ ],
),
(
"M200,300 Q400,50 600,300 T1000,300",
[
("moveTo", ((200.0, 300.0),)),
- ("qCurveTo", ((400.0, 50.0),
- (600.0, 300.0))),
- ("qCurveTo", ((800.0, 550.0),
- (1000.0, 300.0))),
+ ("qCurveTo", ((400.0, 50.0), (600.0, 300.0))),
+ ("qCurveTo", ((800.0, 550.0), (1000.0, 300.0))),
("endPath", ()),
- ]
+ ],
),
# End examples from SVG spec
-
# Relative moveto
(
"M 0 0 L 50 20 m 50 80 L 300 100 L 200 300 z",
@@ -135,28 +112,25 @@ import pytest
("lineTo", ((200.0, 300.0),)),
("lineTo", ((100.0, 100.0),)),
("closePath", ()),
- ]
+ ],
),
# Initial smooth and relative curveTo
(
"M100,200 s 150,-100 150,0",
[
("moveTo", ((100.0, 200.0),)),
- ("curveTo", ((100.0, 200.0),
- (250.0, 100.0),
- (250.0, 200.0))),
+ ("curveTo", ((100.0, 200.0), (250.0, 100.0), (250.0, 200.0))),
("endPath", ()),
- ]
+ ],
),
# Initial smooth and relative qCurveTo
(
"M100,200 t 150,0",
[
("moveTo", ((100.0, 200.0),)),
- ("qCurveTo", ((100.0, 200.0),
- (250.0, 200.0))),
+ ("qCurveTo", ((100.0, 200.0), (250.0, 200.0))),
("endPath", ()),
- ]
+ ],
),
# relative l command
(
@@ -167,17 +141,16 @@ import pytest
("lineTo", ((200.0, 300.0),)),
("lineTo", ((100.0, 100.0),)),
("closePath", ()),
- ]
+ ],
),
# relative q command
(
"M200,300 q200,-250 400,0",
[
("moveTo", ((200.0, 300.0),)),
- ("qCurveTo", ((400.0, 50.0),
- (600.0, 300.0))),
+ ("qCurveTo", ((400.0, 50.0), (600.0, 300.0))),
("endPath", ()),
- ]
+ ],
),
# absolute H command
(
@@ -188,7 +161,7 @@ import pytest
("lineTo", ((200.0, 300.0),)),
("lineTo", ((100.0, 100.0),)),
("closePath", ()),
- ]
+ ],
),
# relative h command
(
@@ -199,7 +172,7 @@ import pytest
("lineTo", ((200.0, 300.0),)),
("lineTo", ((100.0, 100.0),)),
("closePath", ()),
- ]
+ ],
),
# absolute V command
(
@@ -210,7 +183,7 @@ import pytest
("lineTo", ((200.0, 300.0),)),
("lineTo", ((100.0, 100.0),)),
("closePath", ()),
- ]
+ ],
),
# relative v command
(
@@ -221,9 +194,9 @@ import pytest
("lineTo", ((200.0, 300.0),)),
("lineTo", ((100.0, 100.0),)),
("closePath", ()),
- ]
+ ],
),
- ]
+ ],
)
def test_parse_path(pathdef, expected):
pen = RecordingPen()
@@ -241,22 +214,16 @@ def test_parse_path(pathdef, expected):
"M100 100L200 200",
),
# repeated implicit command
- (
- "M 100 200 L 200 100 L -100 -200",
- "M 100 200 L 200 100 -100 -200"
- ),
+ ("M 100 200 L 200 100 L -100 -200", "M 100 200 L 200 100 -100 -200"),
# don't need spaces before a minus-sign
- (
- "M100,200c10-5,20-10,30-20",
- "M 100 200 c 10 -5 20 -10 30 -20"
- ),
+ ("M100,200c10-5,20-10,30-20", "M 100 200 c 10 -5 20 -10 30 -20"),
# closed paths have an implicit lineTo if they don't
# end on the same point as the initial moveTo
(
"M 100 100 L 300 100 L 200 300 z",
- "M 100 100 L 300 100 L 200 300 L 100 100 z"
- )
- ]
+ "M 100 100 L 300 100 L 200 300 L 100 100 z",
+ ),
+ ],
)
def test_equivalent_paths(pathdef1, pathdef2):
pen1 = RecordingPen()
@@ -273,7 +240,7 @@ def test_exponents():
pen = RecordingPen()
parse_path("M-3.4e38 3.4E+38L-3.4E-38,3.4e-38", pen)
expected = [
- ("moveTo", ((-3.4e+38, 3.4e+38),)),
+ ("moveTo", ((-3.4e38, 3.4e38),)),
("lineTo", ((-3.4e-38, 3.4e-38),)),
("endPath", ()),
]
@@ -283,13 +250,14 @@ def test_exponents():
pen = RecordingPen()
parse_path("M-3e38 3E+38L-3E-38,3e-38", pen)
expected = [
- ("moveTo", ((-3e+38, 3e+38),)),
+ ("moveTo", ((-3e38, 3e38),)),
("lineTo", ((-3e-38, 3e-38),)),
("endPath", ()),
]
assert pen.value == expected
+
def test_invalid_implicit_command():
with pytest.raises(ValueError) as exc_info:
parse_path("M 100 100 L 200 200 Z 100 200", RecordingPen())
@@ -300,34 +268,13 @@ def test_arc_to_cubic_bezier():
pen = RecordingPen()
parse_path("M300,200 h-150 a150,150 0 1,0 150,-150 z", pen)
expected = [
- ('moveTo', ((300.0, 200.0),)),
- ('lineTo', ((150.0, 200.0),)),
- (
- 'curveTo',
- (
- (150.0, 282.842),
- (217.157, 350.0),
- (300.0, 350.0)
- )
- ),
- (
- 'curveTo',
- (
- (382.842, 350.0),
- (450.0, 282.842),
- (450.0, 200.0)
- )
- ),
- (
- 'curveTo',
- (
- (450.0, 117.157),
- (382.842, 50.0),
- (300.0, 50.0)
- )
- ),
- ('lineTo', ((300.0, 200.0),)),
- ('closePath', ())
+ ("moveTo", ((300.0, 200.0),)),
+ ("lineTo", ((150.0, 200.0),)),
+ ("curveTo", ((150.0, 282.842), (217.157, 350.0), (300.0, 350.0))),
+ ("curveTo", ((382.842, 350.0), (450.0, 282.842), (450.0, 200.0))),
+ ("curveTo", ((450.0, 117.157), (382.842, 50.0), (300.0, 50.0))),
+ ("lineTo", ((300.0, 200.0),)),
+ ("closePath", ()),
]
result = list(pen.value)
@@ -339,9 +286,7 @@ def test_arc_to_cubic_bezier():
assert pt1 == pytest.approx(pt2, rel=1e-5)
-
class ArcRecordingPen(RecordingPen):
-
def arcTo(self, rx, ry, rotation, arc_large, arc_sweep, end_point):
self.value.append(
("arcTo", (rx, ry, rotation, arc_large, arc_sweep, end_point))
@@ -352,11 +297,11 @@ def test_arc_pen_with_arcTo():
pen = ArcRecordingPen()
parse_path("M300,200 h-150 a150,150 0 1,0 150,-150 z", pen)
expected = [
- ('moveTo', ((300.0, 200.0),)),
- ('lineTo', ((150.0, 200.0),)),
- ('arcTo', (150.0, 150.0, 0.0, True, False, (300.0, 50.0))),
- ('lineTo', ((300.0, 200.0),)),
- ('closePath', ())
+ ("moveTo", ((300.0, 200.0),)),
+ ("lineTo", ((150.0, 200.0),)),
+ ("arcTo", (150.0, 150.0, 0.0, True, False, (300.0, 50.0))),
+ ("lineTo", ((300.0, 200.0),)),
+ ("closePath", ()),
]
assert pen.value == expected
@@ -400,13 +345,12 @@ def test_arc_pen_with_arcTo():
(
"M12 7a5 5 0 105 5 5 5 0 00-5-5",
[
-
("moveTo", ((12.0, 7.0),)),
("arcTo", (5.0, 5.0, 0.0, True, False, (17.0, 12.0))),
("arcTo", (5.0, 5.0, 0.0, False, False, (12.0, 7.0))),
("endPath", ()),
],
- )
+ ),
],
)
def test_arc_flags_without_spaces(path, expected):
@@ -415,9 +359,7 @@ def test_arc_flags_without_spaces(path, expected):
assert pen.value == expected
-@pytest.mark.parametrize(
- "path", ["A", "A0,0,0,0,0,0", "A 0 0 0 0 0 0 0 0 0 0 0 0 0"]
-)
+@pytest.mark.parametrize("path", ["A", "A0,0,0,0,0,0", "A 0 0 0 0 0 0 0 0 0 0 0 0 0"])
def test_invalid_arc_not_enough_args(path):
pen = ArcRecordingPen()
with pytest.raises(ValueError, match="Invalid arc command") as e:
diff --git a/Tests/svgLib/path/path_test.py b/Tests/svgLib/path/path_test.py
index caf351d4..0b82193d 100644
--- a/Tests/svgLib/path/path_test.py
+++ b/Tests/svgLib/path/path_test.py
@@ -24,18 +24,13 @@ EXPECTED_PEN_COMMANDS = [
("lineTo", ((100.0, 100.0),)),
("closePath", ()),
("moveTo", ((100.0, 200.0),)),
- ("curveTo", ((100.0, 100.0),
- (250.0, 100.0),
- (250.0, 200.0))),
- ("curveTo", ((250.0, 300.0),
- (400.0, 300.0),
- (400.0, 200.0))),
- ("endPath", ())
+ ("curveTo", ((100.0, 100.0), (250.0, 100.0), (250.0, 200.0))),
+ ("curveTo", ((250.0, 300.0), (400.0, 300.0), (400.0, 200.0))),
+ ("endPath", ()),
]
class SVGPathTest(object):
-
def test_from_svg_file(self):
pen = RecordingPen()
with NamedTemporaryFile(delete=False) as tmp:
@@ -57,8 +52,7 @@ class SVGPathTest(object):
def test_transform(self):
pen = RecordingPen()
- svg = SVGPath.fromstring(SVG_DATA,
- transform=(1.0, 0, 0, -1.0, 0, 1000))
+ svg = SVGPath.fromstring(SVG_DATA, transform=(1.0, 0, 0, -1.0, 0, 1000))
svg.draw(pen)
assert pen.value == [
@@ -68,11 +62,7 @@ class SVGPathTest(object):
("lineTo", ((100.0, 900.0),)),
("closePath", ()),
("moveTo", ((100.0, 800.0),)),
- ("curveTo", ((100.0, 900.0),
- (250.0, 900.0),
- (250.0, 800.0))),
- ("curveTo", ((250.0, 700.0),
- (400.0, 700.0),
- (400.0, 800.0))),
- ("endPath", ())
+ ("curveTo", ((100.0, 900.0), (250.0, 900.0), (250.0, 800.0))),
+ ("curveTo", ((250.0, 700.0), (400.0, 700.0), (400.0, 800.0))),
+ ("endPath", ()),
]
diff --git a/Tests/svgLib/path/shapes_test.py b/Tests/svgLib/path/shapes_test.py
index 24e3dd2e..0d5be842 100644
--- a/Tests/svgLib/path/shapes_test.py
+++ b/Tests/svgLib/path/shapes_test.py
@@ -7,115 +7,87 @@ import pytest
"svg_xml, expected_path, expected_transform",
[
# path: direct passthrough
- (
- "<path d='I love kittens'/>",
- "I love kittens",
- None
- ),
+ ("<path d='I love kittens'/>", "I love kittens", None),
# path no @d
- (
- "<path duck='Mallard'/>",
- None,
- None
- ),
+ ("<path duck='Mallard'/>", None, None),
# line
- (
- '<line x1="10" x2="50" y1="110" y2="150"/>',
- 'M10,110 L50,150',
- None
- ),
+ ('<line x1="10" x2="50" y1="110" y2="150"/>', "M10,110 L50,150", None),
# line, decimal positioning
(
'<line x1="10.0" x2="50.5" y1="110.2" y2="150.7"/>',
- 'M10,110.2 L50.5,150.7',
- None
+ "M10,110.2 L50.5,150.7",
+ None,
),
# rect: minimal valid example
- (
- "<rect width='1' height='1'/>",
- "M0,0 H1 V1 H0 V0 z",
- None
- ),
+ ("<rect width='1' height='1'/>", "M0,0 H1 V1 H0 V0 z", None),
# rect: sharp corners
(
"<rect x='10' y='11' width='17' height='11'/>",
"M10,11 H27 V22 H10 V11 z",
- None
+ None,
),
# rect: round corners
(
"<rect x='9' y='9' width='11' height='7' rx='2'/>",
"M11,9 H18 A2,2 0 0 1 20,11 V14 A2,2 0 0 1 18,16 H11"
" A2,2 0 0 1 9,14 V11 A2,2 0 0 1 11,9 z",
- None
+ None,
),
# rect: simple
(
"<rect x='11.5' y='16' width='11' height='2'/>",
"M11.5,16 H22.5 V18 H11.5 V16 z",
- None
+ None,
),
# rect: the one above plus a rotation
(
"<rect x='11.5' y='16' transform='matrix(0.7071 -0.7071 0.7071 0.7071 -7.0416 16.9999)' width='11' height='2'/>",
"M11.5,16 H22.5 V18 H11.5 V16 z",
- (0.7071, -0.7071, 0.7071, 0.7071, -7.0416, 16.9999)
+ (0.7071, -0.7071, 0.7071, 0.7071, -7.0416, 16.9999),
),
# polygon
- (
- "<polygon points='30,10 50,30 10,30'/>",
- "M30,10 50,30 10,30 z",
- None
- ),
+ ("<polygon points='30,10 50,30 10,30'/>", "M30,10 50,30 10,30 z", None),
# polyline
- (
- "<polyline points='30,10 50,30 10,30'/>",
- "M30,10 50,30 10,30",
- None
- ),
+ ("<polyline points='30,10 50,30 10,30'/>", "M30,10 50,30 10,30", None),
# circle, minimal valid example
- (
- "<circle r='1'/>",
- "M-1,0 A1,1 0 1 1 1,0 A1,1 0 1 1 -1,0",
- None
- ),
+ ("<circle r='1'/>", "M-1,0 A1,1 0 1 1 1,0 A1,1 0 1 1 -1,0", None),
# circle
(
"<circle cx='600' cy='200' r='100'/>",
"M500,200 A100,100 0 1 1 700,200 A100,100 0 1 1 500,200",
- None
+ None,
),
# circle, decimal positioning
(
"<circle cx='12' cy='6.5' r='1.5'></circle>",
"M10.5,6.5 A1.5,1.5 0 1 1 13.5,6.5 A1.5,1.5 0 1 1 10.5,6.5",
- None
+ None,
),
# circle, with transform
(
'<circle transform="matrix(0.9871 -0.1602 0.1602 0.9871 -7.525 8.6516)" cx="49.9" cy="51" r="14.3"/>',
- 'M35.6,51 A14.3,14.3 0 1 1 64.2,51 A14.3,14.3 0 1 1 35.6,51',
- (0.9871, -0.1602, 0.1602, 0.9871, -7.525, 8.6516)
+ "M35.6,51 A14.3,14.3 0 1 1 64.2,51 A14.3,14.3 0 1 1 35.6,51",
+ (0.9871, -0.1602, 0.1602, 0.9871, -7.525, 8.6516),
),
# ellipse
(
'<ellipse cx="100" cy="50" rx="100" ry="50"/>',
- 'M0,50 A100,50 0 1 1 200,50 A100,50 0 1 1 0,50',
- None
+ "M0,50 A100,50 0 1 1 200,50 A100,50 0 1 1 0,50",
+ None,
),
# ellipse, decimal positioning
(
'<ellipse cx="100.5" cy="50" rx="10" ry="50.5"/>',
- 'M90.5,50 A10,50.5 0 1 1 110.5,50 A10,50.5 0 1 1 90.5,50',
- None
+ "M90.5,50 A10,50.5 0 1 1 110.5,50 A10,50.5 0 1 1 90.5,50",
+ None,
),
# ellipse, with transform
(
'<ellipse transform="matrix(0.9557 -0.2945 0.2945 0.9557 -14.7694 20.1454)" cx="59.5" cy="59.1" rx="30.9" ry="11.9"/>',
- 'M28.6,59.1 A30.9,11.9 0 1 1 90.4,59.1 A30.9,11.9 0 1 1 28.6,59.1',
- (0.9557, -0.2945, 0.2945, 0.9557, -14.7694, 20.1454)
+ "M28.6,59.1 A30.9,11.9 0 1 1 90.4,59.1 A30.9,11.9 0 1 1 28.6,59.1",
+ (0.9557, -0.2945, 0.2945, 0.9557, -14.7694, 20.1454),
),
- ]
+ ],
)
def test_el_to_path(svg_xml, expected_path, expected_transform):
pb = shapes.PathBuilder()
diff --git a/Tests/t1Lib/t1Lib_test.py b/Tests/t1Lib/t1Lib_test.py
index 3e639a58..bfc186da 100644
--- a/Tests/t1Lib/t1Lib_test.py
+++ b/Tests/t1Lib/t1Lib_test.py
@@ -8,194 +8,191 @@ import random
CWD = os.path.abspath(os.path.dirname(__file__))
-DATADIR = os.path.join(CWD, 'data')
+DATADIR = os.path.join(CWD, "data")
# I used `tx` to convert PFA to LWFN (stored in the data fork)
-LWFN = os.path.join(DATADIR, 'TestT1-Regular.lwfn')
-PFA = os.path.join(DATADIR, 'TestT1-Regular.pfa')
-PFB = os.path.join(DATADIR, 'TestT1-Regular.pfb')
-WEIRD_ZEROS = os.path.join(DATADIR, 'TestT1-weird-zeros.pfa')
+LWFN = os.path.join(DATADIR, "TestT1-Regular.lwfn")
+PFA = os.path.join(DATADIR, "TestT1-Regular.pfa")
+PFB = os.path.join(DATADIR, "TestT1-Regular.pfb")
+WEIRD_ZEROS = os.path.join(DATADIR, "TestT1-weird-zeros.pfa")
# ellipsis is hinted with 55 131 296 131 537 131 vstem3 0 122 hstem
-ELLIPSIS_HINTED = os.path.join(DATADIR, 'TestT1-ellipsis-hinted.pfa')
+ELLIPSIS_HINTED = os.path.join(DATADIR, "TestT1-ellipsis-hinted.pfa")
class FindEncryptedChunksTest(unittest.TestCase):
+ def test_findEncryptedChunks(self):
+ with open(PFA, "rb") as f:
+ data = f.read()
+ chunks = t1Lib.findEncryptedChunks(data)
+ self.assertEqual(len(chunks), 3)
+ self.assertFalse(chunks[0][0])
+ # the second chunk is encrypted
+ self.assertTrue(chunks[1][0])
+ self.assertFalse(chunks[2][0])
- def test_findEncryptedChunks(self):
- with open(PFA, "rb") as f:
- data = f.read()
- chunks = t1Lib.findEncryptedChunks(data)
- self.assertEqual(len(chunks), 3)
- self.assertFalse(chunks[0][0])
- # the second chunk is encrypted
- self.assertTrue(chunks[1][0])
- self.assertFalse(chunks[2][0])
+ def test_findEncryptedChunks_weird_zeros(self):
+ with open(WEIRD_ZEROS, "rb") as f:
+ data = f.read()
- def test_findEncryptedChunks_weird_zeros(self):
- with open(WEIRD_ZEROS, 'rb') as f:
- data = f.read()
-
- # Just assert that this doesn't raise any exception for not finding the
- # end of eexec
- t1Lib.findEncryptedChunks(data)
+ # Just assert that this doesn't raise any exception for not finding the
+ # end of eexec
+ t1Lib.findEncryptedChunks(data)
class DecryptType1Test(unittest.TestCase):
-
- def test_decryptType1(self):
- with open(PFA, "rb") as f:
- data = f.read()
- decrypted = t1Lib.decryptType1(data)
- self.assertNotEqual(decrypted, data)
+ def test_decryptType1(self):
+ with open(PFA, "rb") as f:
+ data = f.read()
+ decrypted = t1Lib.decryptType1(data)
+ self.assertNotEqual(decrypted, data)
class ReadWriteTest(unittest.TestCase):
-
- def test_read_pfa_write_pfb(self):
- font = t1Lib.T1Font(PFA)
- data = self.write(font, 'PFB')
- self.assertEqual(font.getData(), data)
-
- def test_read_and_parse_pfa_write_pfb(self):
- font = t1Lib.T1Font(PFA)
- font.parse()
- saved_font = self.write(font, 'PFB', dohex=False, doparse=True)
- self.assertTrue(same_dicts(font.font, saved_font))
-
- def test_read_pfb_write_pfa(self):
- font = t1Lib.T1Font(PFB)
- # 'OTHER' == 'PFA'
- data = self.write(font, 'OTHER', dohex=True)
- self.assertEqual(font.getData(), data)
-
- def test_read_and_parse_pfb_write_pfa(self):
- font = t1Lib.T1Font(PFB)
- font.parse()
- # 'OTHER' == 'PFA'
- saved_font = self.write(font, 'OTHER', dohex=True, doparse=True)
- self.assertTrue(same_dicts(font.font, saved_font))
-
- def test_read_with_path(self):
- import pathlib
- font = t1Lib.T1Font(pathlib.Path(PFB))
-
- @staticmethod
- def write(font, outtype, dohex=False, doparse=False):
- temp = os.path.join(DATADIR, 'temp.' + outtype.lower())
- try:
- font.saveAs(temp, outtype, dohex=dohex)
- newfont = t1Lib.T1Font(temp)
- if doparse:
- newfont.parse()
- data = newfont.font
- else:
- data = newfont.getData()
- finally:
- if os.path.exists(temp):
- os.remove(temp)
- return data
+ def test_read_pfa_write_pfb(self):
+ font = t1Lib.T1Font(PFA)
+ data = self.write(font, "PFB")
+ self.assertEqual(font.getData(), data)
+
+ def test_read_and_parse_pfa_write_pfb(self):
+ font = t1Lib.T1Font(PFA)
+ font.parse()
+ saved_font = self.write(font, "PFB", dohex=False, doparse=True)
+ self.assertTrue(same_dicts(font.font, saved_font))
+
+ def test_read_pfb_write_pfa(self):
+ font = t1Lib.T1Font(PFB)
+ # 'OTHER' == 'PFA'
+ data = self.write(font, "OTHER", dohex=True)
+ self.assertEqual(font.getData(), data)
+
+ def test_read_and_parse_pfb_write_pfa(self):
+ font = t1Lib.T1Font(PFB)
+ font.parse()
+ # 'OTHER' == 'PFA'
+ saved_font = self.write(font, "OTHER", dohex=True, doparse=True)
+ self.assertTrue(same_dicts(font.font, saved_font))
+
+ def test_read_with_path(self):
+ import pathlib
+
+ font = t1Lib.T1Font(pathlib.Path(PFB))
+
+ @staticmethod
+ def write(font, outtype, dohex=False, doparse=False):
+ temp = os.path.join(DATADIR, "temp." + outtype.lower())
+ try:
+ font.saveAs(temp, outtype, dohex=dohex)
+ newfont = t1Lib.T1Font(temp)
+ if doparse:
+ newfont.parse()
+ data = newfont.font
+ else:
+ data = newfont.getData()
+ finally:
+ if os.path.exists(temp):
+ os.remove(temp)
+ return data
class T1FontTest(unittest.TestCase):
-
- def test_parse_lwfn(self):
- # the extended attrs are lost on git so we can't auto-detect 'LWFN'
- font = t1Lib.T1Font(LWFN, kind="LWFN")
- font.parse()
- self.assertEqual(font['FontName'], 'TestT1-Regular')
- self.assertTrue('Subrs' in font['Private'])
-
- def test_parse_pfa(self):
- font = t1Lib.T1Font(PFA)
- font.parse()
- self.assertEqual(font['FontName'], 'TestT1-Regular')
- self.assertTrue('Subrs' in font['Private'])
-
- def test_parse_pfb(self):
- font = t1Lib.T1Font(PFB)
- font.parse()
- self.assertEqual(font['FontName'], 'TestT1-Regular')
- self.assertTrue('Subrs' in font['Private'])
-
- def test_getGlyphSet(self):
- font = t1Lib.T1Font(PFA)
- glyphs = font.getGlyphSet()
- i = random.randrange(len(glyphs))
- aglyph = list(glyphs.values())[i]
- self.assertTrue(hasattr(aglyph, 'draw'))
- self.assertFalse(hasattr(aglyph, 'width'))
- aglyph.draw(NullPen())
- self.assertTrue(hasattr(aglyph, 'width'))
+ def test_parse_lwfn(self):
+ # the extended attrs are lost on git so we can't auto-detect 'LWFN'
+ font = t1Lib.T1Font(LWFN, kind="LWFN")
+ font.parse()
+ self.assertEqual(font["FontName"], "TestT1-Regular")
+ self.assertTrue("Subrs" in font["Private"])
+
+ def test_parse_pfa(self):
+ font = t1Lib.T1Font(PFA)
+ font.parse()
+ self.assertEqual(font["FontName"], "TestT1-Regular")
+ self.assertTrue("Subrs" in font["Private"])
+
+ def test_parse_pfb(self):
+ font = t1Lib.T1Font(PFB)
+ font.parse()
+ self.assertEqual(font["FontName"], "TestT1-Regular")
+ self.assertTrue("Subrs" in font["Private"])
+
+ def test_getGlyphSet(self):
+ font = t1Lib.T1Font(PFA)
+ glyphs = font.getGlyphSet()
+ i = random.randrange(len(glyphs))
+ aglyph = list(glyphs.values())[i]
+ self.assertTrue(hasattr(aglyph, "draw"))
+ self.assertFalse(hasattr(aglyph, "width"))
+ aglyph.draw(NullPen())
+ self.assertTrue(hasattr(aglyph, "width"))
class EditTest(unittest.TestCase):
-
- def test_edit_pfa(self):
- font = t1Lib.T1Font(PFA)
- ellipsis = font.getGlyphSet()["ellipsis"]
- ellipsis.decompile()
- program = []
- for v in ellipsis.program:
- try:
- program.append(int(v))
- except:
- program.append(v)
- if v == 'hsbw':
- hints = [55, 131, 296, 131, 537, 131, 'vstem3', 0, 122, 'hstem']
- program.extend(hints)
- ellipsis.program = program
- # 'OTHER' == 'PFA'
- saved_font = self.write(font, 'OTHER', dohex=True, doparse=True)
- hinted_font = t1Lib.T1Font(ELLIPSIS_HINTED)
- hinted_font.parse()
- self.assertTrue(same_dicts(hinted_font.font, saved_font))
-
- @staticmethod
- def write(font, outtype, dohex=False, doparse=False):
- temp = os.path.join(DATADIR, 'temp.' + outtype.lower())
- try:
- font.saveAs(temp, outtype, dohex=dohex)
- newfont = t1Lib.T1Font(temp)
- if doparse:
- newfont.parse()
- data = newfont.font
- else:
- data = newfont.getData()
- finally:
- if os.path.exists(temp):
- os.remove(temp)
- return data
+ def test_edit_pfa(self):
+ font = t1Lib.T1Font(PFA)
+ ellipsis = font.getGlyphSet()["ellipsis"]
+ ellipsis.decompile()
+ program = []
+ for v in ellipsis.program:
+ try:
+ program.append(int(v))
+ except:
+ program.append(v)
+ if v == "hsbw":
+ hints = [55, 131, 296, 131, 537, 131, "vstem3", 0, 122, "hstem"]
+ program.extend(hints)
+ ellipsis.program = program
+ # 'OTHER' == 'PFA'
+ saved_font = self.write(font, "OTHER", dohex=True, doparse=True)
+ hinted_font = t1Lib.T1Font(ELLIPSIS_HINTED)
+ hinted_font.parse()
+ self.assertTrue(same_dicts(hinted_font.font, saved_font))
+
+ @staticmethod
+ def write(font, outtype, dohex=False, doparse=False):
+ temp = os.path.join(DATADIR, "temp." + outtype.lower())
+ try:
+ font.saveAs(temp, outtype, dohex=dohex)
+ newfont = t1Lib.T1Font(temp)
+ if doparse:
+ newfont.parse()
+ data = newfont.font
+ else:
+ data = newfont.getData()
+ finally:
+ if os.path.exists(temp):
+ os.remove(temp)
+ return data
def same_dicts(dict1, dict2):
- if dict1.keys() != dict2.keys():
- return False
- for key, value in dict1.items():
- if isinstance(value, dict):
- if not same_dicts(value, dict2[key]):
- return False
- elif isinstance(value, list):
- if len(value) != len(dict2[key]):
- return False
- for elem1, elem2 in zip(value, dict2[key]):
- if isinstance(elem1, T1CharString):
- elem1.compile()
- elem2.compile()
- if elem1.bytecode != elem2.bytecode:
- return False
- else:
- if elem1 != elem2:
- return False
- elif isinstance(value, T1CharString):
- value.compile()
- dict2[key].compile()
- if value.bytecode != dict2[key].bytecode:
- return False
- else:
- if value != dict2[key]:
- return False
- return True
-
-
-if __name__ == '__main__':
- import sys
- sys.exit(unittest.main())
+ if dict1.keys() != dict2.keys():
+ return False
+ for key, value in dict1.items():
+ if isinstance(value, dict):
+ if not same_dicts(value, dict2[key]):
+ return False
+ elif isinstance(value, list):
+ if len(value) != len(dict2[key]):
+ return False
+ for elem1, elem2 in zip(value, dict2[key]):
+ if isinstance(elem1, T1CharString):
+ elem1.compile()
+ elem2.compile()
+ if elem1.bytecode != elem2.bytecode:
+ return False
+ else:
+ if elem1 != elem2:
+ return False
+ elif isinstance(value, T1CharString):
+ value.compile()
+ dict2[key].compile()
+ if value.bytecode != dict2[key].bytecode:
+ return False
+ else:
+ if value != dict2[key]:
+ return False
+ return True
+
+
+if __name__ == "__main__":
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/ttLib/data/I-512upem.ttx b/Tests/ttLib/data/I-512upem.ttx
index 34795b1f..400685e3 100644
--- a/Tests/ttLib/data/I-512upem.ttx
+++ b/Tests/ttLib/data/I-512upem.ttx
@@ -2277,7 +2277,7 @@
</VarData>
<VarData index="1">
<!-- ItemCount=1 -->
- <NumShorts value="7"/>
+ <NumShorts value="0"/>
<!-- VarRegionCount=25 -->
<VarRegionIndex index="0" value="1"/>
<VarRegionIndex index="1" value="5"/>
@@ -2668,6 +2668,7 @@
</STAT>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="0.0" to="0.0"/>
diff --git a/Tests/ttLib/data/I.otf b/Tests/ttLib/data/I.otf
new file mode 100644
index 00000000..41c65347
--- /dev/null
+++ b/Tests/ttLib/data/I.otf
Binary files differ
diff --git a/Tests/ttLib/data/TestOTF-Regular.otx b/Tests/ttLib/data/TestOTF-Regular.otx
index 92e0b2f6..4e459158 100644
--- a/Tests/ttLib/data/TestOTF-Regular.otx
+++ b/Tests/ttLib/data/TestOTF-Regular.otx
@@ -148,7 +148,7 @@
https://github.com/fonttools/fonttools
</namerecord>
<namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True">
- https://github.com/fonttools/fonttools/blob/master/LICENSE
+ https://github.com/fonttools/fonttools/blob/main/LICENSE
</namerecord>
<namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True">
Test TTF
@@ -190,7 +190,7 @@
https://github.com/fonttools/fonttools
</namerecord>
<namerecord nameID="14" platformID="3" platEncID="1" langID="0x409">
- https://github.com/fonttools/fonttools/blob/master/LICENSE
+ https://github.com/fonttools/fonttools/blob/main/LICENSE
</namerecord>
</name>
diff --git a/Tests/ttLib/data/TestTTF-Regular.ttx b/Tests/ttLib/data/TestTTF-Regular.ttx
index 1f1dd2b4..d18be46f 100644
--- a/Tests/ttLib/data/TestTTF-Regular.ttx
+++ b/Tests/ttLib/data/TestTTF-Regular.ttx
@@ -468,7 +468,7 @@
https://github.com/fonttools/fonttools
</namerecord>
<namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True">
- https://github.com/fonttools/fonttools/blob/master/LICENSE
+ https://github.com/fonttools/fonttools/blob/main/LICENSE
</namerecord>
<namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True">
Test TTF
@@ -510,7 +510,7 @@
https://github.com/fonttools/fonttools
</namerecord>
<namerecord nameID="14" platformID="3" platEncID="1" langID="0x409">
- https://github.com/fonttools/fonttools/blob/master/LICENSE
+ https://github.com/fonttools/fonttools/blob/main/LICENSE
</namerecord>
</name>
diff --git a/Tests/ttLib/data/TestTTF_normalizeLocation.ttx b/Tests/ttLib/data/TestTTF_normalizeLocation.ttx
new file mode 100644
index 00000000..0cb99593
--- /dev/null
+++ b/Tests/ttLib/data/TestTTF_normalizeLocation.ttx
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.36">
+
+ <avar>
+ <version major="1" minor="0"/>
+ <segment axis="wght">
+ <mapping from="-1.0" to="-1.0"/>
+ <mapping from="-0.5" to="-0.75"/>
+ <mapping from="0.0" to="0.0"/>
+ <mapping from="0.5" to="0.75"/>
+ <mapping from="1.0" to="1.0"/>
+ </segment>
+ </avar>
+
+ <fvar>
+
+ <Axis>
+ <AxisTag>wght</AxisTag>
+ <Flags>0x0</Flags>
+ <MinValue>100.0</MinValue>
+ <DefaultValue>400.0</DefaultValue>
+ <MaxValue>700.0</MaxValue>
+ <AxisNameID>256</AxisNameID>
+ </Axis>
+
+ </fvar>
+
+</ttFont>
diff --git a/Tests/ttLib/data/bogus_post_format_1.ttf b/Tests/ttLib/data/bogus_post_format_1.ttf
new file mode 100644
index 00000000..62b80213
--- /dev/null
+++ b/Tests/ttLib/data/bogus_post_format_1.ttf
Binary files differ
diff --git a/Tests/ttLib/data/dot-cubic.ttf b/Tests/ttLib/data/dot-cubic.ttf
new file mode 100644
index 00000000..5adb11cc
--- /dev/null
+++ b/Tests/ttLib/data/dot-cubic.ttf
Binary files differ
diff --git a/Tests/ttLib/data/issue2824.ttf b/Tests/ttLib/data/issue2824.ttf
new file mode 100644
index 00000000..de110958
--- /dev/null
+++ b/Tests/ttLib/data/issue2824.ttf
Binary files differ
diff --git a/Tests/ttLib/data/varc-6868.ttf b/Tests/ttLib/data/varc-6868.ttf
new file mode 100644
index 00000000..aa55df21
--- /dev/null
+++ b/Tests/ttLib/data/varc-6868.ttf
Binary files differ
diff --git a/Tests/ttLib/data/varc-ac00-ac01-500upem.ttx b/Tests/ttLib/data/varc-ac00-ac01-500upem.ttx
new file mode 100644
index 00000000..db32c06e
--- /dev/null
+++ b/Tests/ttLib/data/varc-ac00-ac01-500upem.ttx
@@ -0,0 +1,2055 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.38">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="uniAC00"/>
+ <GlyphID id="2" name="uniAC01"/>
+ <GlyphID id="3" name="glyph00003"/>
+ <GlyphID id="4" name="glyph00004"/>
+ <GlyphID id="5" name="glyph00005"/>
+ </GlyphOrder>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="576"/>
+ <descent value="-143"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="0"/>
+ <minLeftSideBearing value="0"/>
+ <minRightSideBearing value="0"/>
+ <xMaxExtent value="0"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="2"/>
+ </hhea>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="6"/>
+ <maxPoints value="0"/>
+ <maxContours value="0"/>
+ <maxCompositePoints value="0"/>
+ <maxCompositeContours value="0"/>
+ <maxZones value="2"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="0"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="0"/>
+ <maxSizeOfInstructions value="0"/>
+ <maxComponentElements value="0"/>
+ <maxComponentDepth value="0"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="3"/>
+ <xAvgCharWidth value="483"/>
+ <usWeightClass value="400"/>
+ <usWidthClass value="5"/>
+ <fsType value="00000000 00000100"/>
+ <ySubscriptXSize value="0"/>
+ <ySubscriptYSize value="0"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="0"/>
+ <ySuperscriptXSize value="0"/>
+ <ySuperscriptYSize value="0"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="0"/>
+ <yStrikeoutSize value="0"/>
+ <yStrikeoutPosition value="0"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="0"/>
+ <bSerifStyle value="0"/>
+ <bWeight value="0"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange2 value="00000001 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="????"/>
+ <fsSelection value="00000000 00000000"/>
+ <usFirstCharIndex value="44032"/>
+ <usLastCharIndex value="44033"/>
+ <sTypoAscender value="440"/>
+ <sTypoDescender value="0"/>
+ <sTypoLineGap value="0"/>
+ <usWinAscent value="576"/>
+ <usWinDescent value="143"/>
+ <ulCodePageRange1 value="00000000 00000000 00000000 00000000"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="0"/>
+ <sCapHeight value="0"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <hmtx>
+ <mtx name=".notdef" width="500" lsb="0"/>
+ <mtx name="glyph00003" width="483" lsb="-207"/>
+ <mtx name="glyph00004" width="483" lsb="-155"/>
+ <mtx name="glyph00005" width="483" lsb="-258"/>
+ <mtx name="uniAC00" width="483" lsb="0"/>
+ <mtx name="uniAC01" width="483" lsb="0"/>
+ </hmtx>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0xac00" name="uniAC00"/><!-- HANGUL SYLLABLE GA -->
+ <map code="0xac01" name="uniAC01"/><!-- HANGUL SYLLABLE GAG -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0xac00" name="uniAC00"/><!-- HANGUL SYLLABLE GA -->
+ <map code="0xac01" name="uniAC01"/><!-- HANGUL SYLLABLE GAG -->
+ </cmap_format_4>
+ </cmap>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef"/><!-- contains no outline data -->
+
+ <TTGlyph name="glyph00003" xMin="-207" yMin="-76" xMax="34" yMax="7">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="-15" on="1"/>
+ <pt x="-8" y="-15" on="0"/>
+ <pt x="-38" y="-15" on="0"/>
+ <pt x="-74" y="-15" on="0"/>
+ <pt x="-108" y="-16" on="0"/>
+ <pt x="-133" y="-17" on="0"/>
+ <pt x="-139" y="-17" on="1"/>
+ <pt x="-143" y="-17" on="0"/>
+ <pt x="-155" y="-19" on="0"/>
+ <pt x="-168" y="-22" on="0"/>
+ <pt x="-172" y="-24" on="1"/>
+ <pt x="-181" y="-21" on="0"/>
+ <pt x="-198" y="-13" on="0"/>
+ <pt x="-205" y="-8" on="1"/>
+ <pt x="-206" y="-7" on="0"/>
+ <pt x="-207" y="-5" on="0"/>
+ <pt x="-206" y="-4" on="1"/>
+ <pt x="-204" y="1" on="1"/>
+ <pt x="-195" y="-1" on="0"/>
+ <pt x="-174" y="-3" on="0"/>
+ <pt x="-166" y="-3" on="1"/>
+ <pt x="-161" y="-3" on="0"/>
+ <pt x="-136" y="-3" on="0"/>
+ <pt x="-103" y="-2" on="0"/>
+ <pt x="-66" y="-2" on="0"/>
+ <pt x="-31" y="-1" on="0"/>
+ <pt x="-5" y="0" on="0"/>
+ <pt x="0" y="0" on="1"/>
+ </contour>
+ <contour>
+ <pt x="27" y="-18" on="1"/>
+ <pt x="0" y="-15" on="1"/>
+ <pt x="-2" y="0" on="1"/>
+ <pt x="10" y="6" on="1"/>
+ <pt x="12" y="7" on="0"/>
+ <pt x="15" y="7" on="0"/>
+ <pt x="16" y="6" on="1"/>
+ <pt x="18" y="6" on="0"/>
+ <pt x="23" y="2" on="0"/>
+ <pt x="28" y="-1" on="0"/>
+ <pt x="29" y="-2" on="1"/>
+ <pt x="30" y="-3" on="0"/>
+ <pt x="31" y="-5" on="0"/>
+ <pt x="30" y="-7" on="1"/>
+ <pt x="27" y="-18" on="1"/>
+ </contour>
+ <contour>
+ <pt x="29" y="-76" on="1"/>
+ <pt x="22" y="-76" on="1"/>
+ <pt x="19" y="-76" on="0"/>
+ <pt x="20" y="-72" on="1"/>
+ <pt x="15" y="-63" on="0"/>
+ <pt x="7" y="-41" on="0"/>
+ <pt x="1" y="-21" on="0"/>
+ <pt x="0" y="-15" on="1"/>
+ <pt x="15" y="-11" on="1"/>
+ <pt x="27" y="-18" on="1"/>
+ <pt x="27" y="-26" on="0"/>
+ <pt x="28" y="-45" on="0"/>
+ <pt x="31" y="-65" on="0"/>
+ <pt x="34" y="-73" on="1"/>
+ <pt x="33" y="-75" on="0"/>
+ <pt x="30" y="-76" on="0"/>
+ <pt x="29" y="-76" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="glyph00004" xMin="-155" yMin="-48" xMax="14" yMax="177">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="-100" y="-1" on="1"/>
+ <pt x="-100" y="16" on="1"/>
+ <pt x="-55" y="18" on="1"/>
+ <pt x="-38" y="27" on="1"/>
+ <pt x="-26" y="28" on="0"/>
+ <pt x="1" y="26" on="0"/>
+ <pt x="8" y="24" on="1"/>
+ <pt x="14" y="23" on="0"/>
+ <pt x="14" y="17" on="1"/>
+ <pt x="14" y="11" on="0"/>
+ <pt x="11" y="3" on="0"/>
+ <pt x="8" y="2" on="1"/>
+ <pt x="7" y="1" on="0"/>
+ <pt x="2" y="0" on="0"/>
+ <pt x="0" y="0" on="1"/>
+ </contour>
+ <contour>
+ <pt x="-91" y="-48" on="1"/>
+ <pt x="-108" y="-48" on="1"/>
+ <pt x="-108" y="-42" on="0"/>
+ <pt x="-109" y="-26" on="0"/>
+ <pt x="-109" y="-8" on="0"/>
+ <pt x="-109" y="9" on="0"/>
+ <pt x="-109" y="16" on="1"/>
+ <pt x="-109" y="139" on="1"/>
+ <pt x="-117" y="147" on="0"/>
+ <pt x="-141" y="160" on="0"/>
+ <pt x="-155" y="165" on="1"/>
+ <pt x="-152" y="175" on="1"/>
+ <pt x="-152" y="176" on="0"/>
+ <pt x="-150" y="177" on="0"/>
+ <pt x="-147" y="177" on="1"/>
+ <pt x="-138" y="177" on="0"/>
+ <pt x="-115" y="173" on="0"/>
+ <pt x="-93" y="168" on="0"/>
+ <pt x="-86" y="165" on="1"/>
+ <pt x="-81" y="163" on="0"/>
+ <pt x="-81" y="156" on="1"/>
+ <pt x="-82" y="130" on="1"/>
+ <pt x="-82" y="27" on="1"/>
+ <pt x="-82" y="15" on="0"/>
+ <pt x="-83" y="-7" on="0"/>
+ <pt x="-85" y="-26" on="0"/>
+ <pt x="-89" y="-42" on="0"/>
+ <pt x="-91" y="-48" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="glyph00005" xMin="-258" yMin="-122" xMax="32" yMax="7">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="-18" on="1"/>
+ <pt x="-11" y="-18" on="0"/>
+ <pt x="-49" y="-18" on="0"/>
+ <pt x="-94" y="-18" on="0"/>
+ <pt x="-138" y="-19" on="0"/>
+ <pt x="-171" y="-20" on="0"/>
+ <pt x="-178" y="-21" on="1"/>
+ <pt x="-186" y="-21" on="0"/>
+ <pt x="-210" y="-25" on="0"/>
+ <pt x="-218" y="-28" on="1"/>
+ <pt x="-228" y="-25" on="0"/>
+ <pt x="-248" y="-15" on="0"/>
+ <pt x="-256" y="-9" on="1"/>
+ <pt x="-258" y="-7" on="0"/>
+ <pt x="-257" y="-5" on="1"/>
+ <pt x="-254" y="0" on="1"/>
+ <pt x="-245" y="-2" on="0"/>
+ <pt x="-221" y="-4" on="0"/>
+ <pt x="-212" y="-4" on="1"/>
+ <pt x="-206" y="-4" on="0"/>
+ <pt x="-174" y="-4" on="0"/>
+ <pt x="-131" y="-3" on="0"/>
+ <pt x="-84" y="-2" on="0"/>
+ <pt x="-40" y="-1" on="0"/>
+ <pt x="-7" y="0" on="0"/>
+ <pt x="0" y="0" on="1"/>
+ </contour>
+ <contour>
+ <pt x="25" y="-21" on="1"/>
+ <pt x="-1" y="-18" on="1"/>
+ <pt x="-3" y="0" on="1"/>
+ <pt x="9" y="6" on="1"/>
+ <pt x="13" y="7" on="0"/>
+ <pt x="16" y="6" on="1"/>
+ <pt x="18" y="4" on="0"/>
+ <pt x="28" y="-1" on="0"/>
+ <pt x="30" y="-3" on="1"/>
+ <pt x="32" y="-5" on="0"/>
+ <pt x="30" y="-9" on="1"/>
+ <pt x="25" y="-21" on="1"/>
+ </contour>
+ <contour>
+ <pt x="17" y="-122" on="1"/>
+ <pt x="1" y="-122" on="1"/>
+ <pt x="1" y="-113" on="0"/>
+ <pt x="0" y="-92" on="0"/>
+ <pt x="-1" y="-70" on="0"/>
+ <pt x="-1" y="-62" on="1"/>
+ <pt x="-1" y="-18" on="1"/>
+ <pt x="25" y="-18" on="1"/>
+ <pt x="24" y="-55" on="1"/>
+ <pt x="24" y="-64" on="0"/>
+ <pt x="22" y="-89" on="0"/>
+ <pt x="19" y="-113" on="0"/>
+ <pt x="17" y="-122" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="uniAC00" xMin="23" yMin="-53" xMax="480" yMax="426">
+ <varComponent glyphName="glyph00003" flags="0x18" translateX="230.0" translateY="338.0">
+ <location>
+ <axis tag="0000" value="0.8466"/>
+ <axis tag="0001" value="0.98944"/>
+ <axis tag="0002" value="0.47284"/>
+ <axis tag="0003" value="0.44653"/>
+ </location>
+ </varComponent>
+ <varComponent glyphName="glyph00004" flags="0x18" translateX="466.0" translateY="191.0">
+ <location>
+ <axis tag="0000" value="0.9336"/>
+ <axis tag="0001" value="0.916"/>
+ <axis tag="0002" value="0.5232"/>
+ <axis tag="0003" value="0.32806"/>
+ <axis tag="0004" value="0.8509"/>
+ </location>
+ </varComponent>
+ </TTGlyph>
+
+ <TTGlyph name="uniAC01" xMin="17" yMin="-58" xMax="480" yMax="426">
+ <varComponent glyphName="glyph00003" flags="0x18" translateX="227.0" translateY="366.0">
+ <location>
+ <axis tag="0000" value="0.7492"/>
+ <axis tag="0001" value="0.9945"/>
+ <axis tag="0002" value="0.28485"/>
+ <axis tag="0003" value="0.297"/>
+ </location>
+ </varComponent>
+ <varComponent glyphName="glyph00004" flags="0x18" translateX="466.0" translateY="238.0">
+ <location>
+ <axis tag="0000" value="0.309"/>
+ <axis tag="0001" value="0.15155"/>
+ <axis tag="0002" value="0.2873"/>
+ <axis tag="0003" value="0.35034"/>
+ <axis tag="0004" value="0.7843"/>
+ </location>
+ </varComponent>
+ <varComponent glyphName="glyph00005" flags="0x18" translateX="346.0" translateY="120.0">
+ <location>
+ <axis tag="0000" value="0.3236"/>
+ <axis tag="0001" value="1.0"/>
+ </location>
+ </varComponent>
+ </TTGlyph>
+
+ </glyf>
+
+ <name>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ butchered-hangul-serif
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ smarties-variable
+ </namerecord>
+ <namerecord nameID="256" platformID="3" platEncID="1" langID="0x409">
+ 0000
+ </namerecord>
+ <namerecord nameID="257" platformID="3" platEncID="1" langID="0x409">
+ 0001
+ </namerecord>
+ <namerecord nameID="258" platformID="3" platEncID="1" langID="0x409">
+ 0002
+ </namerecord>
+ <namerecord nameID="259" platformID="3" platEncID="1" langID="0x409">
+ 0003
+ </namerecord>
+ <namerecord nameID="260" platformID="3" platEncID="1" langID="0x409">
+ 0004
+ </namerecord>
+ <namerecord nameID="261" platformID="3" platEncID="1" langID="0x409">
+ 0005
+ </namerecord>
+ <namerecord nameID="262" platformID="3" platEncID="1" langID="0x409">
+ 0006
+ </namerecord>
+ <namerecord nameID="263" platformID="3" platEncID="1" langID="0x409">
+ 0007
+ </namerecord>
+ <namerecord nameID="264" platformID="3" platEncID="1" langID="0x409">
+ Weight
+ </namerecord>
+ </name>
+
+ <post>
+ <formatType value="3.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="0"/>
+ <underlineThickness value="0"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ </post>
+
+ <avar>
+ <version major="1" minor="0"/>
+ <segment axis="0000">
+ </segment>
+ <segment axis="0001">
+ </segment>
+ <segment axis="0002">
+ </segment>
+ <segment axis="0003">
+ </segment>
+ <segment axis="0004">
+ </segment>
+ <segment axis="0005">
+ </segment>
+ <segment axis="0006">
+ </segment>
+ <segment axis="0007">
+ </segment>
+ <segment axis="wght">
+ <mapping from="-1.0" to="-1.0"/>
+ <mapping from="0.0" to="0.0"/>
+ <mapping from="0.1429" to="0.095"/>
+ <mapping from="0.2857" to="0.21"/>
+ <mapping from="0.4286" to="0.36"/>
+ <mapping from="0.5714" to="0.51"/>
+ <mapping from="0.7143" to="0.73"/>
+ <mapping from="1.0" to="1.0"/>
+ </segment>
+ </avar>
+
+ <fvar>
+
+ <!-- 0000 -->
+ <Axis>
+ <AxisTag>0000</AxisTag>
+ <Flags>0x1</Flags>
+ <MinValue>0.0</MinValue>
+ <DefaultValue>0.0</DefaultValue>
+ <MaxValue>1.0</MaxValue>
+ <AxisNameID>256</AxisNameID>
+ </Axis>
+
+ <!-- 0001 -->
+ <Axis>
+ <AxisTag>0001</AxisTag>
+ <Flags>0x1</Flags>
+ <MinValue>0.0</MinValue>
+ <DefaultValue>0.0</DefaultValue>
+ <MaxValue>1.0</MaxValue>
+ <AxisNameID>257</AxisNameID>
+ </Axis>
+
+ <!-- 0002 -->
+ <Axis>
+ <AxisTag>0002</AxisTag>
+ <Flags>0x1</Flags>
+ <MinValue>0.0</MinValue>
+ <DefaultValue>0.0</DefaultValue>
+ <MaxValue>1.0</MaxValue>
+ <AxisNameID>258</AxisNameID>
+ </Axis>
+
+ <!-- 0003 -->
+ <Axis>
+ <AxisTag>0003</AxisTag>
+ <Flags>0x1</Flags>
+ <MinValue>0.0</MinValue>
+ <DefaultValue>0.0</DefaultValue>
+ <MaxValue>1.0</MaxValue>
+ <AxisNameID>259</AxisNameID>
+ </Axis>
+
+ <!-- 0004 -->
+ <Axis>
+ <AxisTag>0004</AxisTag>
+ <Flags>0x1</Flags>
+ <MinValue>0.0</MinValue>
+ <DefaultValue>0.0</DefaultValue>
+ <MaxValue>1.0</MaxValue>
+ <AxisNameID>260</AxisNameID>
+ </Axis>
+
+ <!-- 0005 -->
+ <Axis>
+ <AxisTag>0005</AxisTag>
+ <Flags>0x1</Flags>
+ <MinValue>0.0</MinValue>
+ <DefaultValue>0.0</DefaultValue>
+ <MaxValue>1.0</MaxValue>
+ <AxisNameID>261</AxisNameID>
+ </Axis>
+
+ <!-- 0006 -->
+ <Axis>
+ <AxisTag>0006</AxisTag>
+ <Flags>0x1</Flags>
+ <MinValue>0.0</MinValue>
+ <DefaultValue>0.0</DefaultValue>
+ <MaxValue>1.0</MaxValue>
+ <AxisNameID>262</AxisNameID>
+ </Axis>
+
+ <!-- 0007 -->
+ <Axis>
+ <AxisTag>0007</AxisTag>
+ <Flags>0x1</Flags>
+ <MinValue>0.0</MinValue>
+ <DefaultValue>0.0</DefaultValue>
+ <MaxValue>1.0</MaxValue>
+ <AxisNameID>263</AxisNameID>
+ </Axis>
+
+ <!-- Weight -->
+ <Axis>
+ <AxisTag>wght</AxisTag>
+ <Flags>0x0</Flags>
+ <MinValue>200.0</MinValue>
+ <DefaultValue>200.0</DefaultValue>
+ <MaxValue>900.0</MaxValue>
+ <AxisNameID>264</AxisNameID>
+ </Axis>
+ </fvar>
+
+ <gvar>
+ <version value="1"/>
+ <reserved value="0"/>
+ <glyphVariations glyph="glyph00003">
+ <tuple>
+ <coord axis="0000" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="-11"/>
+ <delta pt="2" x="-5" y="-11"/>
+ <delta pt="3" x="-24" y="-11"/>
+ <delta pt="4" x="-46" y="-12"/>
+ <delta pt="5" x="-67" y="-12"/>
+ <delta pt="6" x="-84" y="-12"/>
+ <delta pt="7" x="-87" y="-13"/>
+ <delta pt="8" x="-90" y="-13"/>
+ <delta pt="9" x="-98" y="-14"/>
+ <delta pt="10" x="-107" y="-16"/>
+ <delta pt="11" x="-110" y="-17"/>
+ <delta pt="12" x="-116" y="-15"/>
+ <delta pt="13" x="-128" y="-9"/>
+ <delta pt="14" x="-133" y="-6"/>
+ <delta pt="15" x="-134" y="-5"/>
+ <delta pt="16" x="-134" y="-4"/>
+ <delta pt="17" x="-134" y="-3"/>
+ <delta pt="18" x="-132" y="0"/>
+ <delta pt="19" x="-126" y="-1"/>
+ <delta pt="20" x="-112" y="-2"/>
+ <delta pt="21" x="-106" y="-2"/>
+ <delta pt="22" x="-102" y="-2"/>
+ <delta pt="23" x="-87" y="-2"/>
+ <delta pt="24" x="-65" y="-2"/>
+ <delta pt="25" x="-42" y="-1"/>
+ <delta pt="26" x="-21" y="-1"/>
+ <delta pt="27" x="-4" y="0"/>
+ <delta pt="28" x="0" y="0"/>
+ <delta pt="29" x="20" y="-13"/>
+ <delta pt="30" x="0" y="-11"/>
+ <delta pt="31" x="-1" y="0"/>
+ <delta pt="32" x="9" y="5"/>
+ <delta pt="33" x="10" y="5"/>
+ <delta pt="34" x="12" y="5"/>
+ <delta pt="35" x="13" y="5"/>
+ <delta pt="36" x="15" y="4"/>
+ <delta pt="37" x="19" y="2"/>
+ <delta pt="38" x="23" y="0"/>
+ <delta pt="39" x="24" y="-1"/>
+ <delta pt="40" x="25" y="-2"/>
+ <delta pt="41" x="25" y="-3"/>
+ <delta pt="42" x="24" y="-4"/>
+ <delta pt="43" x="20" y="-13"/>
+ <delta pt="44" x="-64" y="-129"/>
+ <delta pt="45" x="-68" y="-127"/>
+ <delta pt="46" x="-70" y="-127"/>
+ <delta pt="47" x="-69" y="-125"/>
+ <delta pt="48" x="-53" y="-108"/>
+ <delta pt="49" x="-26" y="-69"/>
+ <delta pt="50" x="-5" y="-29"/>
+ <delta pt="51" x="0" y="-11"/>
+ <delta pt="52" x="11" y="-7"/>
+ <delta pt="53" x="20" y="-13"/>
+ <delta pt="54" x="15" y="-32"/>
+ <delta pt="55" x="-9" y="-74"/>
+ <delta pt="56" x="-43" y="-113"/>
+ <delta pt="57" x="-62" y="-128"/>
+ <delta pt="58" x="-62" y="-128"/>
+ <delta pt="59" x="-64" y="-129"/>
+ <delta pt="60" x="-64" y="-129"/>
+ <delta pt="61" x="0" y="0"/>
+ <delta pt="62" x="0" y="0"/>
+ <delta pt="63" x="0" y="0"/>
+ <delta pt="64" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0001" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="5"/>
+ <delta pt="2" x="5" y="5"/>
+ <delta pt="3" x="22" y="5"/>
+ <delta pt="4" x="43" y="4"/>
+ <delta pt="5" x="62" y="5"/>
+ <delta pt="6" x="77" y="5"/>
+ <delta pt="7" x="80" y="5"/>
+ <delta pt="8" x="82" y="5"/>
+ <delta pt="9" x="87" y="5"/>
+ <delta pt="10" x="92" y="7"/>
+ <delta pt="11" x="93" y="7"/>
+ <delta pt="12" x="97" y="7"/>
+ <delta pt="13" x="102" y="4"/>
+ <delta pt="14" x="105" y="2"/>
+ <delta pt="15" x="105" y="2"/>
+ <delta pt="16" x="106" y="1"/>
+ <delta pt="17" x="105" y="1"/>
+ <delta pt="18" x="105" y="-1"/>
+ <delta pt="19" x="102" y="0"/>
+ <delta pt="20" x="94" y="2"/>
+ <delta pt="21" x="91" y="2"/>
+ <delta pt="22" x="89" y="2"/>
+ <delta pt="23" x="75" y="2"/>
+ <delta pt="24" x="56" y="1"/>
+ <delta pt="25" x="35" y="1"/>
+ <delta pt="26" x="16" y="1"/>
+ <delta pt="27" x="2" y="1"/>
+ <delta pt="28" x="0" y="0"/>
+ <delta pt="29" x="-6" y="6"/>
+ <delta pt="30" x="0" y="5"/>
+ <delta pt="31" x="1" y="0"/>
+ <delta pt="32" x="-1" y="-1"/>
+ <delta pt="33" x="-1" y="-1"/>
+ <delta pt="34" x="-2" y="-1"/>
+ <delta pt="35" x="-2" y="-1"/>
+ <delta pt="36" x="-3" y="-1"/>
+ <delta pt="37" x="-3" y="1"/>
+ <delta pt="38" x="-4" y="2"/>
+ <delta pt="39" x="-4" y="2"/>
+ <delta pt="40" x="-4" y="3"/>
+ <delta pt="41" x="-4" y="3"/>
+ <delta pt="42" x="-4" y="4"/>
+ <delta pt="43" x="-6" y="6"/>
+ <delta pt="44" x="-200" y="-57"/>
+ <delta pt="45" x="-195" y="-54"/>
+ <delta pt="46" x="-194" y="-52"/>
+ <delta pt="47" x="-192" y="-56"/>
+ <delta pt="48" x="-148" y="-49"/>
+ <delta pt="49" x="-73" y="-34"/>
+ <delta pt="50" x="-17" y="-12"/>
+ <delta pt="51" x="0" y="5"/>
+ <delta pt="52" x="-3" y="6"/>
+ <delta pt="53" x="-6" y="6"/>
+ <delta pt="54" x="-20" y="-11"/>
+ <delta pt="55" x="-78" y="-39"/>
+ <delta pt="56" x="-157" y="-57"/>
+ <delta pt="57" x="-202" y="-60"/>
+ <delta pt="58" x="-202" y="-59"/>
+ <delta pt="59" x="-201" y="-58"/>
+ <delta pt="60" x="-200" y="-57"/>
+ <delta pt="61" x="0" y="0"/>
+ <delta pt="62" x="0" y="0"/>
+ <delta pt="63" x="0" y="0"/>
+ <delta pt="64" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0002" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="-6"/>
+ <delta pt="2" x="2" y="-6"/>
+ <delta pt="3" x="8" y="-6"/>
+ <delta pt="4" x="15" y="-6"/>
+ <delta pt="5" x="22" y="-6"/>
+ <delta pt="6" x="27" y="-7"/>
+ <delta pt="7" x="28" y="-7"/>
+ <delta pt="8" x="28" y="-7"/>
+ <delta pt="9" x="27" y="-7"/>
+ <delta pt="10" x="26" y="-7"/>
+ <delta pt="11" x="25" y="-7"/>
+ <delta pt="12" x="25" y="-7"/>
+ <delta pt="13" x="22" y="-4"/>
+ <delta pt="14" x="22" y="-4"/>
+ <delta pt="15" x="22" y="-3"/>
+ <delta pt="16" x="23" y="-3"/>
+ <delta pt="17" x="23" y="-3"/>
+ <delta pt="18" x="24" y="-2"/>
+ <delta pt="19" x="24" y="-2"/>
+ <delta pt="20" x="25" y="-2"/>
+ <delta pt="21" x="26" y="-2"/>
+ <delta pt="22" x="25" y="-2"/>
+ <delta pt="23" x="21" y="-2"/>
+ <delta pt="24" x="15" y="-1"/>
+ <delta pt="25" x="8" y="-1"/>
+ <delta pt="26" x="3" y="-1"/>
+ <delta pt="27" x="0" y="0"/>
+ <delta pt="28" x="0" y="0"/>
+ <delta pt="29" x="7" y="-6"/>
+ <delta pt="30" x="-1" y="-6"/>
+ <delta pt="31" x="-2" y="0"/>
+ <delta pt="32" x="4" y="3"/>
+ <delta pt="33" x="5" y="3"/>
+ <delta pt="34" x="6" y="3"/>
+ <delta pt="35" x="6" y="3"/>
+ <delta pt="36" x="6" y="2"/>
+ <delta pt="37" x="6" y="2"/>
+ <delta pt="38" x="6" y="2"/>
+ <delta pt="39" x="6" y="1"/>
+ <delta pt="40" x="6" y="1"/>
+ <delta pt="41" x="7" y="1"/>
+ <delta pt="42" x="7" y="0"/>
+ <delta pt="43" x="7" y="-6"/>
+ <delta pt="44" x="41" y="-54"/>
+ <delta pt="45" x="38" y="-54"/>
+ <delta pt="46" x="37" y="-54"/>
+ <delta pt="47" x="37" y="-52"/>
+ <delta pt="48" x="30" y="-44"/>
+ <delta pt="49" x="16" y="-26"/>
+ <delta pt="50" x="5" y="-10"/>
+ <delta pt="51" x="-1" y="-6"/>
+ <delta pt="52" x="3" y="-5"/>
+ <delta pt="53" x="7" y="-6"/>
+ <delta pt="54" x="13" y="-10"/>
+ <delta pt="55" x="25" y="-26"/>
+ <delta pt="56" x="37" y="-44"/>
+ <delta pt="57" x="43" y="-52"/>
+ <delta pt="58" x="43" y="-52"/>
+ <delta pt="59" x="42" y="-53"/>
+ <delta pt="60" x="41" y="-54"/>
+ <delta pt="61" x="0" y="0"/>
+ <delta pt="62" x="0" y="0"/>
+ <delta pt="63" x="0" y="0"/>
+ <delta pt="64" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0003" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="13"/>
+ <delta pt="2" x="0" y="13"/>
+ <delta pt="3" x="-1" y="13"/>
+ <delta pt="4" x="-3" y="13"/>
+ <delta pt="5" x="-4" y="14"/>
+ <delta pt="6" x="-6" y="15"/>
+ <delta pt="7" x="-7" y="15"/>
+ <delta pt="8" x="-5" y="15"/>
+ <delta pt="9" x="0" y="16"/>
+ <delta pt="10" x="6" y="17"/>
+ <delta pt="11" x="7" y="18"/>
+ <delta pt="12" x="11" y="17"/>
+ <delta pt="13" x="20" y="11"/>
+ <delta pt="14" x="24" y="8"/>
+ <delta pt="15" x="24" y="7"/>
+ <delta pt="16" x="24" y="7"/>
+ <delta pt="17" x="24" y="7"/>
+ <delta pt="18" x="21" y="2"/>
+ <delta pt="19" x="17" y="3"/>
+ <delta pt="20" x="8" y="3"/>
+ <delta pt="21" x="4" y="3"/>
+ <delta pt="22" x="3" y="3"/>
+ <delta pt="23" x="3" y="3"/>
+ <delta pt="24" x="4" y="3"/>
+ <delta pt="25" x="4" y="2"/>
+ <delta pt="26" x="4" y="2"/>
+ <delta pt="27" x="2" y="1"/>
+ <delta pt="28" x="0" y="0"/>
+ <delta pt="29" x="-19" y="13"/>
+ <delta pt="30" x="1" y="13"/>
+ <delta pt="31" x="2" y="1"/>
+ <delta pt="32" x="-9" y="-5"/>
+ <delta pt="33" x="-10" y="-6"/>
+ <delta pt="34" x="-13" y="-6"/>
+ <delta pt="35" x="-14" y="-5"/>
+ <delta pt="36" x="-15" y="-5"/>
+ <delta pt="37" x="-17" y="-3"/>
+ <delta pt="38" x="-18" y="-1"/>
+ <delta pt="39" x="-18" y="-1"/>
+ <delta pt="40" x="-18" y="0"/>
+ <delta pt="41" x="-19" y="2"/>
+ <delta pt="42" x="-19" y="3"/>
+ <delta pt="43" x="-19" y="13"/>
+ <delta pt="44" x="9" y="-22"/>
+ <delta pt="45" x="16" y="-22"/>
+ <delta pt="46" x="18" y="-23"/>
+ <delta pt="47" x="17" y="-26"/>
+ <delta pt="48" x="15" y="-19"/>
+ <delta pt="49" x="9" y="-5"/>
+ <delta pt="50" x="4" y="8"/>
+ <delta pt="51" x="1" y="13"/>
+ <delta pt="52" x="-10" y="9"/>
+ <delta pt="53" x="-19" y="13"/>
+ <delta pt="54" x="-16" y="11"/>
+ <delta pt="55" x="-8" y="-1"/>
+ <delta pt="56" x="1" y="-17"/>
+ <delta pt="57" x="5" y="-24"/>
+ <delta pt="58" x="6" y="-23"/>
+ <delta pt="59" x="8" y="-22"/>
+ <delta pt="60" x="9" y="-22"/>
+ <delta pt="61" x="0" y="0"/>
+ <delta pt="62" x="0" y="0"/>
+ <delta pt="63" x="0" y="0"/>
+ <delta pt="64" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="-20"/>
+ <delta pt="2" x="1" y="-19"/>
+ <delta pt="3" x="8" y="-19"/>
+ <delta pt="4" x="19" y="-19"/>
+ <delta pt="5" x="29" y="-18"/>
+ <delta pt="6" x="36" y="-18"/>
+ <delta pt="7" x="36" y="-18"/>
+ <delta pt="8" x="35" y="-18"/>
+ <delta pt="9" x="32" y="-19"/>
+ <delta pt="10" x="29" y="-19"/>
+ <delta pt="11" x="27" y="-19"/>
+ <delta pt="12" x="24" y="-16"/>
+ <delta pt="13" x="17" y="-6"/>
+ <delta pt="14" x="16" y="-3"/>
+ <delta pt="15" x="15" y="-2"/>
+ <delta pt="16" x="15" y="-1"/>
+ <delta pt="17" x="15" y="1"/>
+ <delta pt="18" x="17" y="2"/>
+ <delta pt="19" x="21" y="2"/>
+ <delta pt="20" x="27" y="2"/>
+ <delta pt="21" x="30" y="2"/>
+ <delta pt="22" x="31" y="2"/>
+ <delta pt="23" x="27" y="2"/>
+ <delta pt="24" x="20" y="2"/>
+ <delta pt="25" x="11" y="1"/>
+ <delta pt="26" x="4" y="1"/>
+ <delta pt="27" x="0" y="1"/>
+ <delta pt="28" x="0" y="0"/>
+ <delta pt="29" x="30" y="-18"/>
+ <delta pt="30" x="-6" y="-19"/>
+ <delta pt="31" x="2" y="1"/>
+ <delta pt="32" x="14" y="4"/>
+ <delta pt="33" x="15" y="4"/>
+ <delta pt="34" x="17" y="4"/>
+ <delta pt="35" x="18" y="4"/>
+ <delta pt="36" x="20" y="3"/>
+ <delta pt="37" x="27" y="0"/>
+ <delta pt="38" x="34" y="-2"/>
+ <delta pt="39" x="36" y="-3"/>
+ <delta pt="40" x="36" y="-3"/>
+ <delta pt="41" x="36" y="-5"/>
+ <delta pt="42" x="36" y="-5"/>
+ <delta pt="43" x="30" y="-18"/>
+ <delta pt="44" x="10" y="-2"/>
+ <delta pt="45" x="-7" y="-1"/>
+ <delta pt="46" x="-10" y="-1"/>
+ <delta pt="47" x="-10" y="4"/>
+ <delta pt="48" x="-8" y="-2"/>
+ <delta pt="49" x="-7" y="-11"/>
+ <delta pt="50" x="-6" y="-17"/>
+ <delta pt="51" x="-6" y="-19"/>
+ <delta pt="52" x="12" y="-13"/>
+ <delta pt="53" x="30" y="-17"/>
+ <delta pt="54" x="28" y="-15"/>
+ <delta pt="55" x="23" y="-10"/>
+ <delta pt="56" x="18" y="-4"/>
+ <delta pt="57" x="16" y="0"/>
+ <delta pt="58" x="14" y="-1"/>
+ <delta pt="59" x="12" y="-2"/>
+ <delta pt="60" x="10" y="-2"/>
+ <delta pt="61" x="0" y="0"/>
+ <delta pt="62" x="0" y="0"/>
+ <delta pt="63" x="0" y="0"/>
+ <delta pt="64" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0000" value="1.0"/>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="-14"/>
+ <delta pt="2" x="1" y="-14"/>
+ <delta pt="3" x="7" y="-14"/>
+ <delta pt="4" x="14" y="-13"/>
+ <delta pt="5" x="22" y="-14"/>
+ <delta pt="6" x="27" y="-14"/>
+ <delta pt="7" x="27" y="-13"/>
+ <delta pt="8" x="26" y="-14"/>
+ <delta pt="9" x="24" y="-13"/>
+ <delta pt="10" x="22" y="-14"/>
+ <delta pt="11" x="21" y="-14"/>
+ <delta pt="12" x="19" y="-12"/>
+ <delta pt="13" x="14" y="-6"/>
+ <delta pt="14" x="13" y="-3"/>
+ <delta pt="15" x="13" y="-2"/>
+ <delta pt="16" x="12" y="-1"/>
+ <delta pt="17" x="13" y="0"/>
+ <delta pt="18" x="14" y="1"/>
+ <delta pt="19" x="16" y="1"/>
+ <delta pt="20" x="21" y="1"/>
+ <delta pt="21" x="23" y="1"/>
+ <delta pt="22" x="23" y="1"/>
+ <delta pt="23" x="20" y="1"/>
+ <delta pt="24" x="14" y="1"/>
+ <delta pt="25" x="9" y="1"/>
+ <delta pt="26" x="4" y="0"/>
+ <delta pt="27" x="0" y="0"/>
+ <delta pt="28" x="0" y="0"/>
+ <delta pt="29" x="20" y="-11"/>
+ <delta pt="30" x="-9" y="-14"/>
+ <delta pt="31" x="1" y="0"/>
+ <delta pt="32" x="6" y="2"/>
+ <delta pt="33" x="7" y="3"/>
+ <delta pt="34" x="9" y="3"/>
+ <delta pt="35" x="9" y="2"/>
+ <delta pt="36" x="11" y="2"/>
+ <delta pt="37" x="16" y="-1"/>
+ <delta pt="38" x="21" y="-3"/>
+ <delta pt="39" x="22" y="-4"/>
+ <delta pt="40" x="22" y="-5"/>
+ <delta pt="41" x="23" y="-6"/>
+ <delta pt="42" x="22" y="-6"/>
+ <delta pt="43" x="20" y="-11"/>
+ <delta pt="44" x="15" y="-2"/>
+ <delta pt="45" x="7" y="1"/>
+ <delta pt="46" x="6" y="2"/>
+ <delta pt="47" x="7" y="4"/>
+ <delta pt="48" x="3" y="0"/>
+ <delta pt="49" x="-2" y="-6"/>
+ <delta pt="50" x="-8" y="-11"/>
+ <delta pt="51" x="-9" y="-14"/>
+ <delta pt="52" x="5" y="-9"/>
+ <delta pt="53" x="20" y="-11"/>
+ <delta pt="54" x="20" y="-12"/>
+ <delta pt="55" x="19" y="-9"/>
+ <delta pt="56" x="19" y="-4"/>
+ <delta pt="57" x="19" y="-1"/>
+ <delta pt="58" x="18" y="-2"/>
+ <delta pt="59" x="16" y="-2"/>
+ <delta pt="60" x="15" y="-2"/>
+ <delta pt="61" x="0" y="0"/>
+ <delta pt="62" x="0" y="0"/>
+ <delta pt="63" x="0" y="0"/>
+ <delta pt="64" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0001" value="1.0"/>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="8"/>
+ <delta pt="2" x="1" y="8"/>
+ <delta pt="3" x="-1" y="8"/>
+ <delta pt="4" x="-5" y="9"/>
+ <delta pt="5" x="-9" y="8"/>
+ <delta pt="6" x="-11" y="9"/>
+ <delta pt="7" x="-11" y="9"/>
+ <delta pt="8" x="-10" y="9"/>
+ <delta pt="9" x="-8" y="10"/>
+ <delta pt="10" x="-6" y="9"/>
+ <delta pt="11" x="-5" y="9"/>
+ <delta pt="12" x="-4" y="8"/>
+ <delta pt="13" x="-1" y="4"/>
+ <delta pt="14" x="-1" y="3"/>
+ <delta pt="15" x="0" y="3"/>
+ <delta pt="16" x="-1" y="3"/>
+ <delta pt="17" x="0" y="2"/>
+ <delta pt="18" x="-1" y="0"/>
+ <delta pt="19" x="-3" y="0"/>
+ <delta pt="20" x="-6" y="0"/>
+ <delta pt="21" x="-7" y="0"/>
+ <delta pt="22" x="-8" y="0"/>
+ <delta pt="23" x="-7" y="0"/>
+ <delta pt="24" x="-4" y="0"/>
+ <delta pt="25" x="-1" y="0"/>
+ <delta pt="26" x="1" y="0"/>
+ <delta pt="27" x="2" y="0"/>
+ <delta pt="28" x="0" y="0"/>
+ <delta pt="29" x="-11" y="6"/>
+ <delta pt="30" x="-7" y="8"/>
+ <delta pt="31" x="0" y="1"/>
+ <delta pt="32" x="-8" y="-2"/>
+ <delta pt="33" x="-9" y="-3"/>
+ <delta pt="34" x="-8" y="-3"/>
+ <delta pt="35" x="-9" y="-2"/>
+ <delta pt="36" x="-9" y="-2"/>
+ <delta pt="37" x="-12" y="-2"/>
+ <delta pt="38" x="-15" y="-3"/>
+ <delta pt="39" x="-16" y="-3"/>
+ <delta pt="40" x="-16" y="-4"/>
+ <delta pt="41" x="-16" y="-3"/>
+ <delta pt="42" x="-16" y="-3"/>
+ <delta pt="43" x="-11" y="6"/>
+ <delta pt="44" x="7" y="1"/>
+ <delta pt="45" x="24" y="6"/>
+ <delta pt="46" x="27" y="8"/>
+ <delta pt="47" x="28" y="4"/>
+ <delta pt="48" x="19" y="8"/>
+ <delta pt="49" x="6" y="11"/>
+ <delta pt="50" x="-4" y="10"/>
+ <delta pt="51" x="-7" y="8"/>
+ <delta pt="52" x="-9" y="5"/>
+ <delta pt="53" x="-11" y="6"/>
+ <delta pt="54" x="-11" y="4"/>
+ <delta pt="55" x="-5" y="1"/>
+ <delta pt="56" x="3" y="0"/>
+ <delta pt="57" x="5" y="-1"/>
+ <delta pt="58" x="5" y="0"/>
+ <delta pt="59" x="5" y="1"/>
+ <delta pt="60" x="7" y="1"/>
+ <delta pt="61" x="0" y="0"/>
+ <delta pt="62" x="0" y="0"/>
+ <delta pt="63" x="0" y="0"/>
+ <delta pt="64" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0002" value="1.0"/>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="-7"/>
+ <delta pt="2" x="3" y="-8"/>
+ <delta pt="3" x="9" y="-8"/>
+ <delta pt="4" x="14" y="-8"/>
+ <delta pt="5" x="19" y="-8"/>
+ <delta pt="6" x="25" y="-8"/>
+ <delta pt="7" x="26" y="-8"/>
+ <delta pt="8" x="24" y="-8"/>
+ <delta pt="9" x="22" y="-8"/>
+ <delta pt="10" x="19" y="-8"/>
+ <delta pt="11" x="19" y="-9"/>
+ <delta pt="12" x="17" y="-7"/>
+ <delta pt="13" x="15" y="-5"/>
+ <delta pt="14" x="14" y="-3"/>
+ <delta pt="15" x="13" y="-3"/>
+ <delta pt="16" x="12" y="0"/>
+ <delta pt="17" x="13" y="-1"/>
+ <delta pt="18" x="14" y="2"/>
+ <delta pt="19" x="15" y="1"/>
+ <delta pt="20" x="18" y="1"/>
+ <delta pt="21" x="19" y="1"/>
+ <delta pt="22" x="18" y="1"/>
+ <delta pt="23" x="15" y="1"/>
+ <delta pt="24" x="12" y="0"/>
+ <delta pt="25" x="9" y="0"/>
+ <delta pt="26" x="5" y="0"/>
+ <delta pt="27" x="2" y="0"/>
+ <delta pt="28" x="0" y="0"/>
+ <delta pt="29" x="8" y="-6"/>
+ <delta pt="30" x="-2" y="-8"/>
+ <delta pt="31" x="3" y="0"/>
+ <delta pt="32" x="0" y="3"/>
+ <delta pt="33" x="0" y="3"/>
+ <delta pt="34" x="2" y="3"/>
+ <delta pt="35" x="2" y="3"/>
+ <delta pt="36" x="3" y="3"/>
+ <delta pt="37" x="5" y="-1"/>
+ <delta pt="38" x="8" y="-4"/>
+ <delta pt="39" x="8" y="-4"/>
+ <delta pt="40" x="9" y="-5"/>
+ <delta pt="41" x="9" y="-6"/>
+ <delta pt="42" x="9" y="-7"/>
+ <delta pt="43" x="8" y="-6"/>
+ <delta pt="44" x="11" y="-5"/>
+ <delta pt="45" x="4" y="-5"/>
+ <delta pt="46" x="3" y="-5"/>
+ <delta pt="47" x="3" y="-3"/>
+ <delta pt="48" x="1" y="-6"/>
+ <delta pt="49" x="-1" y="-7"/>
+ <delta pt="50" x="-3" y="-8"/>
+ <delta pt="51" x="-2" y="-8"/>
+ <delta pt="52" x="3" y="-5"/>
+ <delta pt="53" x="8" y="-6"/>
+ <delta pt="54" x="10" y="-6"/>
+ <delta pt="55" x="11" y="-6"/>
+ <delta pt="56" x="12" y="-6"/>
+ <delta pt="57" x="13" y="-5"/>
+ <delta pt="58" x="12" y="-5"/>
+ <delta pt="59" x="11" y="-5"/>
+ <delta pt="60" x="11" y="-5"/>
+ <delta pt="61" x="0" y="0"/>
+ <delta pt="62" x="0" y="0"/>
+ <delta pt="63" x="0" y="0"/>
+ <delta pt="64" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0003" value="1.0"/>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="14"/>
+ <delta pt="2" x="-4" y="14"/>
+ <delta pt="3" x="-14" y="13"/>
+ <delta pt="4" x="-24" y="13"/>
+ <delta pt="5" x="-32" y="12"/>
+ <delta pt="6" x="-38" y="11"/>
+ <delta pt="7" x="-40" y="11"/>
+ <delta pt="8" x="-39" y="11"/>
+ <delta pt="9" x="-37" y="11"/>
+ <delta pt="10" x="-35" y="11"/>
+ <delta pt="11" x="-33" y="12"/>
+ <delta pt="12" x="-30" y="9"/>
+ <delta pt="13" x="-26" y="1"/>
+ <delta pt="14" x="-24" y="-2"/>
+ <delta pt="15" x="-23" y="-2"/>
+ <delta pt="16" x="-23" y="-5"/>
+ <delta pt="17" x="-24" y="-6"/>
+ <delta pt="18" x="-25" y="-5"/>
+ <delta pt="19" x="-27" y="-4"/>
+ <delta pt="20" x="-31" y="-3"/>
+ <delta pt="21" x="-33" y="-3"/>
+ <delta pt="22" x="-33" y="-3"/>
+ <delta pt="23" x="-29" y="-2"/>
+ <delta pt="24" x="-23" y="-2"/>
+ <delta pt="25" x="-16" y="-2"/>
+ <delta pt="26" x="-8" y="-1"/>
+ <delta pt="27" x="-2" y="-1"/>
+ <delta pt="28" x="0" y="0"/>
+ <delta pt="29" x="-24" y="17"/>
+ <delta pt="30" x="8" y="14"/>
+ <delta pt="31" x="-4" y="-1"/>
+ <delta pt="32" x="-11" y="-4"/>
+ <delta pt="33" x="-11" y="-4"/>
+ <delta pt="34" x="-13" y="-4"/>
+ <delta pt="35" x="-14" y="-4"/>
+ <delta pt="36" x="-15" y="-2"/>
+ <delta pt="37" x="-21" y="3"/>
+ <delta pt="38" x="-27" y="7"/>
+ <delta pt="39" x="-29" y="8"/>
+ <delta pt="40" x="-30" y="9"/>
+ <delta pt="41" x="-30" y="11"/>
+ <delta pt="42" x="-29" y="12"/>
+ <delta pt="43" x="-24" y="17"/>
+ <delta pt="44" x="-8" y="6"/>
+ <delta pt="45" x="0" y="2"/>
+ <delta pt="46" x="2" y="3"/>
+ <delta pt="47" x="2" y="-1"/>
+ <delta pt="48" x="3" y="-1"/>
+ <delta pt="49" x="7" y="5"/>
+ <delta pt="50" x="8" y="11"/>
+ <delta pt="51" x="8" y="14"/>
+ <delta pt="52" x="-10" y="10"/>
+ <delta pt="53" x="-24" y="17"/>
+ <delta pt="54" x="-20" y="15"/>
+ <delta pt="55" x="-15" y="10"/>
+ <delta pt="56" x="-12" y="7"/>
+ <delta pt="57" x="-13" y="5"/>
+ <delta pt="58" x="-10" y="6"/>
+ <delta pt="59" x="-8" y="6"/>
+ <delta pt="60" x="-8" y="6"/>
+ <delta pt="61" x="0" y="0"/>
+ <delta pt="62" x="0" y="0"/>
+ <delta pt="63" x="0" y="0"/>
+ <delta pt="64" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ <glyphVariations glyph="glyph00004">
+ <tuple>
+ <coord axis="0000" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="-34" y="0"/>
+ <delta pt="2" x="-34" y="6"/>
+ <delta pt="3" x="-19" y="6"/>
+ <delta pt="4" x="-13" y="10"/>
+ <delta pt="5" x="-9" y="10"/>
+ <delta pt="6" x="0" y="9"/>
+ <delta pt="7" x="3" y="9"/>
+ <delta pt="8" x="5" y="8"/>
+ <delta pt="9" x="5" y="6"/>
+ <delta pt="10" x="5" y="4"/>
+ <delta pt="11" x="4" y="1"/>
+ <delta pt="12" x="3" y="1"/>
+ <delta pt="13" x="2" y="0"/>
+ <delta pt="14" x="1" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="-36" y="-41"/>
+ <delta pt="17" x="-41" y="-41"/>
+ <delta pt="18" x="-42" y="-38"/>
+ <delta pt="19" x="-42" y="-32"/>
+ <delta pt="20" x="-42" y="-25"/>
+ <delta pt="21" x="-42" y="-18"/>
+ <delta pt="22" x="-42" y="-16"/>
+ <delta pt="23" x="-42" y="52"/>
+ <delta pt="24" x="-44" y="55"/>
+ <delta pt="25" x="-53" y="60"/>
+ <delta pt="26" x="-58" y="61"/>
+ <delta pt="27" x="-57" y="65"/>
+ <delta pt="28" x="-57" y="65"/>
+ <delta pt="29" x="-56" y="66"/>
+ <delta pt="30" x="-55" y="66"/>
+ <delta pt="31" x="-52" y="65"/>
+ <delta pt="32" x="-44" y="64"/>
+ <delta pt="33" x="-36" y="62"/>
+ <delta pt="34" x="-34" y="61"/>
+ <delta pt="35" x="-32" y="61"/>
+ <delta pt="36" x="-32" y="58"/>
+ <delta pt="37" x="-32" y="49"/>
+ <delta pt="38" x="-32" y="-12"/>
+ <delta pt="39" x="-32" y="-17"/>
+ <delta pt="40" x="-33" y="-24"/>
+ <delta pt="41" x="-33" y="-32"/>
+ <delta pt="42" x="-35" y="-38"/>
+ <delta pt="43" x="-36" y="-41"/>
+ <delta pt="44" x="0" y="0"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="0" y="0"/>
+ <delta pt="47" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0001" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="28" y="1"/>
+ <delta pt="2" x="28" y="-4"/>
+ <delta pt="3" x="16" y="-5"/>
+ <delta pt="4" x="11" y="-7"/>
+ <delta pt="5" x="8" y="-7"/>
+ <delta pt="6" x="0" y="-7"/>
+ <delta pt="7" x="-2" y="-7"/>
+ <delta pt="8" x="-4" y="-6"/>
+ <delta pt="9" x="-4" y="-4"/>
+ <delta pt="10" x="-4" y="-3"/>
+ <delta pt="11" x="-3" y="0"/>
+ <delta pt="12" x="-2" y="0"/>
+ <delta pt="13" x="-1" y="0"/>
+ <delta pt="14" x="0" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="33" y="-154"/>
+ <delta pt="17" x="37" y="-154"/>
+ <delta pt="18" x="37" y="-153"/>
+ <delta pt="19" x="38" y="-152"/>
+ <delta pt="20" x="38" y="-150"/>
+ <delta pt="21" x="38" y="-150"/>
+ <delta pt="22" x="38" y="-150"/>
+ <delta pt="23" x="38" y="25"/>
+ <delta pt="24" x="40" y="23"/>
+ <delta pt="25" x="46" y="19"/>
+ <delta pt="26" x="51" y="18"/>
+ <delta pt="27" x="50" y="15"/>
+ <delta pt="28" x="50" y="15"/>
+ <delta pt="29" x="49" y="15"/>
+ <delta pt="30" x="48" y="15"/>
+ <delta pt="31" x="46" y="15"/>
+ <delta pt="32" x="40" y="16"/>
+ <delta pt="33" x="33" y="18"/>
+ <delta pt="34" x="31" y="18"/>
+ <delta pt="35" x="30" y="18"/>
+ <delta pt="36" x="30" y="21"/>
+ <delta pt="37" x="30" y="28"/>
+ <delta pt="38" x="30" y="-153"/>
+ <delta pt="39" x="30" y="-149"/>
+ <delta pt="40" x="30" y="-146"/>
+ <delta pt="41" x="31" y="-148"/>
+ <delta pt="42" x="32" y="-152"/>
+ <delta pt="43" x="33" y="-154"/>
+ <delta pt="44" x="0" y="0"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="0" y="0"/>
+ <delta pt="47" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0002" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="-4" y="0"/>
+ <delta pt="2" x="-4" y="1"/>
+ <delta pt="3" x="-2" y="1"/>
+ <delta pt="4" x="-2" y="1"/>
+ <delta pt="5" x="-1" y="2"/>
+ <delta pt="6" x="0" y="1"/>
+ <delta pt="7" x="1" y="1"/>
+ <delta pt="8" x="1" y="1"/>
+ <delta pt="9" x="1" y="1"/>
+ <delta pt="10" x="1" y="1"/>
+ <delta pt="11" x="1" y="0"/>
+ <delta pt="12" x="1" y="0"/>
+ <delta pt="13" x="1" y="0"/>
+ <delta pt="14" x="0" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="-12" y="-20"/>
+ <delta pt="17" x="-13" y="-20"/>
+ <delta pt="18" x="-13" y="-20"/>
+ <delta pt="19" x="-13" y="-19"/>
+ <delta pt="20" x="-13" y="-19"/>
+ <delta pt="21" x="-13" y="-18"/>
+ <delta pt="22" x="-13" y="-18"/>
+ <delta pt="23" x="-13" y="-19"/>
+ <delta pt="24" x="-13" y="-19"/>
+ <delta pt="25" x="-14" y="-18"/>
+ <delta pt="26" x="-15" y="-18"/>
+ <delta pt="27" x="-15" y="-18"/>
+ <delta pt="28" x="-15" y="-18"/>
+ <delta pt="29" x="-15" y="-18"/>
+ <delta pt="30" x="-14" y="-18"/>
+ <delta pt="31" x="-14" y="-18"/>
+ <delta pt="32" x="-13" y="-18"/>
+ <delta pt="33" x="-12" y="-18"/>
+ <delta pt="34" x="-12" y="-18"/>
+ <delta pt="35" x="-11" y="-18"/>
+ <delta pt="36" x="-11" y="-19"/>
+ <delta pt="37" x="-11" y="-20"/>
+ <delta pt="38" x="-11" y="-17"/>
+ <delta pt="39" x="-11" y="-18"/>
+ <delta pt="40" x="-11" y="-19"/>
+ <delta pt="41" x="-11" y="-19"/>
+ <delta pt="42" x="-12" y="-20"/>
+ <delta pt="43" x="-12" y="-20"/>
+ <delta pt="44" x="0" y="0"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="0" y="0"/>
+ <delta pt="47" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0003" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="2" y="0"/>
+ <delta pt="2" x="2" y="0"/>
+ <delta pt="3" x="1" y="0"/>
+ <delta pt="4" x="1" y="0"/>
+ <delta pt="5" x="1" y="0"/>
+ <delta pt="6" x="0" y="0"/>
+ <delta pt="7" x="0" y="0"/>
+ <delta pt="8" x="0" y="0"/>
+ <delta pt="9" x="0" y="0"/>
+ <delta pt="10" x="0" y="0"/>
+ <delta pt="11" x="0" y="0"/>
+ <delta pt="12" x="0" y="0"/>
+ <delta pt="13" x="0" y="0"/>
+ <delta pt="14" x="0" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="-1" y="-27"/>
+ <delta pt="17" x="-1" y="-27"/>
+ <delta pt="18" x="-1" y="-28"/>
+ <delta pt="19" x="-1" y="-29"/>
+ <delta pt="20" x="-1" y="-29"/>
+ <delta pt="21" x="-1" y="-30"/>
+ <delta pt="22" x="-1" y="-30"/>
+ <delta pt="23" x="-1" y="-22"/>
+ <delta pt="24" x="-1" y="-22"/>
+ <delta pt="25" x="-1" y="-23"/>
+ <delta pt="26" x="0" y="-23"/>
+ <delta pt="27" x="0" y="-23"/>
+ <delta pt="28" x="0" y="-23"/>
+ <delta pt="29" x="0" y="-23"/>
+ <delta pt="30" x="0" y="-23"/>
+ <delta pt="31" x="0" y="-23"/>
+ <delta pt="32" x="-1" y="-23"/>
+ <delta pt="33" x="-2" y="-23"/>
+ <delta pt="34" x="-2" y="-23"/>
+ <delta pt="35" x="-2" y="-23"/>
+ <delta pt="36" x="-2" y="-23"/>
+ <delta pt="37" x="-2" y="-22"/>
+ <delta pt="38" x="-2" y="-30"/>
+ <delta pt="39" x="-2" y="-30"/>
+ <delta pt="40" x="-2" y="-30"/>
+ <delta pt="41" x="-2" y="-29"/>
+ <delta pt="42" x="-1" y="-28"/>
+ <delta pt="43" x="-1" y="-27"/>
+ <delta pt="44" x="0" y="0"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="0" y="0"/>
+ <delta pt="47" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0004" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="15" y="0"/>
+ <delta pt="2" x="15" y="-2"/>
+ <delta pt="3" x="8" y="-2"/>
+ <delta pt="4" x="6" y="-4"/>
+ <delta pt="5" x="4" y="-4"/>
+ <delta pt="6" x="0" y="-4"/>
+ <delta pt="7" x="-1" y="-3"/>
+ <delta pt="8" x="-2" y="-3"/>
+ <delta pt="9" x="-2" y="-2"/>
+ <delta pt="10" x="-2" y="-1"/>
+ <delta pt="11" x="-1" y="0"/>
+ <delta pt="12" x="-1" y="0"/>
+ <delta pt="13" x="-1" y="0"/>
+ <delta pt="14" x="0" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="3" y="2"/>
+ <delta pt="17" x="6" y="2"/>
+ <delta pt="18" x="6" y="2"/>
+ <delta pt="19" x="6" y="0"/>
+ <delta pt="20" x="6" y="-2"/>
+ <delta pt="21" x="6" y="-3"/>
+ <delta pt="22" x="6" y="-4"/>
+ <delta pt="23" x="6" y="7"/>
+ <delta pt="24" x="7" y="6"/>
+ <delta pt="25" x="11" y="4"/>
+ <delta pt="26" x="13" y="4"/>
+ <delta pt="27" x="13" y="2"/>
+ <delta pt="28" x="12" y="2"/>
+ <delta pt="29" x="12" y="2"/>
+ <delta pt="30" x="12" y="2"/>
+ <delta pt="31" x="11" y="2"/>
+ <delta pt="32" x="7" y="2"/>
+ <delta pt="33" x="4" y="3"/>
+ <delta pt="34" x="3" y="4"/>
+ <delta pt="35" x="2" y="4"/>
+ <delta pt="36" x="2" y="5"/>
+ <delta pt="37" x="2" y="9"/>
+ <delta pt="38" x="2" y="-5"/>
+ <delta pt="39" x="2" y="-4"/>
+ <delta pt="40" x="2" y="-1"/>
+ <delta pt="41" x="2" y="1"/>
+ <delta pt="42" x="3" y="2"/>
+ <delta pt="43" x="3" y="2"/>
+ <delta pt="44" x="0" y="0"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="0" y="0"/>
+ <delta pt="47" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="19" y="-2"/>
+ <delta pt="2" x="19" y="25"/>
+ <delta pt="3" x="12" y="26"/>
+ <delta pt="4" x="11" y="28"/>
+ <delta pt="5" x="9" y="28"/>
+ <delta pt="6" x="5" y="28"/>
+ <delta pt="7" x="5" y="28"/>
+ <delta pt="8" x="11" y="27"/>
+ <delta pt="9" x="12" y="22"/>
+ <delta pt="10" x="13" y="19"/>
+ <delta pt="11" x="13" y="10"/>
+ <delta pt="12" x="12" y="7"/>
+ <delta pt="13" x="10" y="3"/>
+ <delta pt="14" x="4" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="16" y="-5"/>
+ <delta pt="17" x="-8" y="-5"/>
+ <delta pt="18" x="-9" y="-5"/>
+ <delta pt="19" x="-11" y="-5"/>
+ <delta pt="20" x="-14" y="-4"/>
+ <delta pt="21" x="-15" y="-4"/>
+ <delta pt="22" x="-15" y="-5"/>
+ <delta pt="23" x="-15" y="-11"/>
+ <delta pt="24" x="-11" y="-12"/>
+ <delta pt="25" x="-5" y="-10"/>
+ <delta pt="26" x="-2" y="-10"/>
+ <delta pt="27" x="3" y="1"/>
+ <delta pt="28" x="4" y="2"/>
+ <delta pt="29" x="6" y="3"/>
+ <delta pt="30" x="8" y="3"/>
+ <delta pt="31" x="11" y="3"/>
+ <delta pt="32" x="18" y="1"/>
+ <delta pt="33" x="23" y="-1"/>
+ <delta pt="34" x="25" y="-3"/>
+ <delta pt="35" x="28" y="-4"/>
+ <delta pt="36" x="27" y="-8"/>
+ <delta pt="37" x="26" y="-10"/>
+ <delta pt="38" x="26" y="-3"/>
+ <delta pt="39" x="26" y="1"/>
+ <delta pt="40" x="24" y="2"/>
+ <delta pt="41" x="20" y="-2"/>
+ <delta pt="42" x="17" y="-5"/>
+ <delta pt="43" x="16" y="-5"/>
+ <delta pt="44" x="0" y="0"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="0" y="0"/>
+ <delta pt="47" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0000" value="1.0"/>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="2" y="0"/>
+ <delta pt="2" x="2" y="9"/>
+ <delta pt="3" x="1" y="9"/>
+ <delta pt="4" x="1" y="10"/>
+ <delta pt="5" x="1" y="10"/>
+ <delta pt="6" x="2" y="10"/>
+ <delta pt="7" x="2" y="10"/>
+ <delta pt="8" x="4" y="10"/>
+ <delta pt="9" x="4" y="8"/>
+ <delta pt="10" x="4" y="6"/>
+ <delta pt="11" x="4" y="3"/>
+ <delta pt="12" x="4" y="2"/>
+ <delta pt="13" x="4" y="1"/>
+ <delta pt="14" x="1" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="2" y="6"/>
+ <delta pt="17" x="-7" y="6"/>
+ <delta pt="18" x="-7" y="5"/>
+ <delta pt="19" x="-8" y="8"/>
+ <delta pt="20" x="-9" y="10"/>
+ <delta pt="21" x="-9" y="12"/>
+ <delta pt="22" x="-9" y="13"/>
+ <delta pt="23" x="-9" y="-1"/>
+ <delta pt="24" x="-8" y="-1"/>
+ <delta pt="25" x="-7" y="1"/>
+ <delta pt="26" x="-7" y="1"/>
+ <delta pt="27" x="-5" y="5"/>
+ <delta pt="28" x="-4" y="5"/>
+ <delta pt="29" x="-4" y="6"/>
+ <delta pt="30" x="-4" y="6"/>
+ <delta pt="31" x="-2" y="6"/>
+ <delta pt="32" x="2" y="5"/>
+ <delta pt="33" x="5" y="4"/>
+ <delta pt="34" x="6" y="4"/>
+ <delta pt="35" x="7" y="2"/>
+ <delta pt="36" x="7" y="1"/>
+ <delta pt="37" x="7" y="-1"/>
+ <delta pt="38" x="7" y="15"/>
+ <delta pt="39" x="7" y="16"/>
+ <delta pt="40" x="6" y="13"/>
+ <delta pt="41" x="4" y="10"/>
+ <delta pt="42" x="3" y="7"/>
+ <delta pt="43" x="2" y="6"/>
+ <delta pt="44" x="0" y="0"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="0" y="0"/>
+ <delta pt="47" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0001" value="1.0"/>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="2" y="1"/>
+ <delta pt="2" x="2" y="-7"/>
+ <delta pt="3" x="2" y="-7"/>
+ <delta pt="4" x="2" y="-8"/>
+ <delta pt="5" x="1" y="-8"/>
+ <delta pt="6" x="-1" y="-8"/>
+ <delta pt="7" x="-1" y="-8"/>
+ <delta pt="8" x="-2" y="-7"/>
+ <delta pt="9" x="-3" y="-6"/>
+ <delta pt="10" x="-3" y="-5"/>
+ <delta pt="11" x="-2" y="-2"/>
+ <delta pt="12" x="-3" y="-1"/>
+ <delta pt="13" x="-2" y="0"/>
+ <delta pt="14" x="0" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="0" y="-6"/>
+ <delta pt="17" x="10" y="-6"/>
+ <delta pt="18" x="11" y="-7"/>
+ <delta pt="19" x="11" y="-1"/>
+ <delta pt="20" x="12" y="7"/>
+ <delta pt="21" x="12" y="16"/>
+ <delta pt="22" x="12" y="18"/>
+ <delta pt="23" x="12" y="7"/>
+ <delta pt="24" x="11" y="8"/>
+ <delta pt="25" x="11" y="6"/>
+ <delta pt="26" x="10" y="5"/>
+ <delta pt="27" x="9" y="2"/>
+ <delta pt="28" x="9" y="1"/>
+ <delta pt="29" x="8" y="1"/>
+ <delta pt="30" x="8" y="1"/>
+ <delta pt="31" x="5" y="1"/>
+ <delta pt="32" x="2" y="2"/>
+ <delta pt="33" x="0" y="2"/>
+ <delta pt="34" x="-1" y="3"/>
+ <delta pt="35" x="-2" y="3"/>
+ <delta pt="36" x="-2" y="5"/>
+ <delta pt="37" x="-2" y="7"/>
+ <delta pt="38" x="-2" y="22"/>
+ <delta pt="39" x="-2" y="16"/>
+ <delta pt="40" x="0" y="7"/>
+ <delta pt="41" x="1" y="2"/>
+ <delta pt="42" x="1" y="-3"/>
+ <delta pt="43" x="0" y="-6"/>
+ <delta pt="44" x="0" y="0"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="0" y="0"/>
+ <delta pt="47" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0002" value="1.0"/>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="-8" y="0"/>
+ <delta pt="2" x="-8" y="2"/>
+ <delta pt="3" x="-5" y="1"/>
+ <delta pt="4" x="-5" y="2"/>
+ <delta pt="5" x="-3" y="2"/>
+ <delta pt="6" x="0" y="2"/>
+ <delta pt="7" x="0" y="2"/>
+ <delta pt="8" x="0" y="2"/>
+ <delta pt="9" x="0" y="1"/>
+ <delta pt="10" x="0" y="1"/>
+ <delta pt="11" x="0" y="0"/>
+ <delta pt="12" x="0" y="0"/>
+ <delta pt="13" x="0" y="0"/>
+ <delta pt="14" x="0" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="-5" y="-3"/>
+ <delta pt="17" x="-8" y="-3"/>
+ <delta pt="18" x="-9" y="-3"/>
+ <delta pt="19" x="-9" y="-3"/>
+ <delta pt="20" x="-9" y="-5"/>
+ <delta pt="21" x="-9" y="-6"/>
+ <delta pt="22" x="-9" y="-6"/>
+ <delta pt="23" x="-9" y="-14"/>
+ <delta pt="24" x="-9" y="-14"/>
+ <delta pt="25" x="-11" y="-12"/>
+ <delta pt="26" x="-12" y="-11"/>
+ <delta pt="27" x="-12" y="-11"/>
+ <delta pt="28" x="-12" y="-11"/>
+ <delta pt="29" x="-12" y="-10"/>
+ <delta pt="30" x="-13" y="-10"/>
+ <delta pt="31" x="-11" y="-10"/>
+ <delta pt="32" x="-8" y="-11"/>
+ <delta pt="33" x="-5" y="-11"/>
+ <delta pt="34" x="-4" y="-11"/>
+ <delta pt="35" x="-4" y="-12"/>
+ <delta pt="36" x="-4" y="-12"/>
+ <delta pt="37" x="-4" y="-15"/>
+ <delta pt="38" x="-4" y="-6"/>
+ <delta pt="39" x="-4" y="-5"/>
+ <delta pt="40" x="-5" y="-5"/>
+ <delta pt="41" x="-5" y="-4"/>
+ <delta pt="42" x="-5" y="-3"/>
+ <delta pt="43" x="-5" y="-3"/>
+ <delta pt="44" x="0" y="0"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="0" y="0"/>
+ <delta pt="47" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0003" value="1.0"/>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="-4" y="0"/>
+ <delta pt="2" x="-4" y="0"/>
+ <delta pt="3" x="-3" y="0"/>
+ <delta pt="4" x="-3" y="0"/>
+ <delta pt="5" x="-2" y="0"/>
+ <delta pt="6" x="0" y="0"/>
+ <delta pt="7" x="0" y="0"/>
+ <delta pt="8" x="-1" y="0"/>
+ <delta pt="9" x="0" y="0"/>
+ <delta pt="10" x="0" y="0"/>
+ <delta pt="11" x="-1" y="0"/>
+ <delta pt="12" x="-1" y="0"/>
+ <delta pt="13" x="0" y="0"/>
+ <delta pt="14" x="0" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="-1" y="46"/>
+ <delta pt="17" x="0" y="46"/>
+ <delta pt="18" x="0" y="47"/>
+ <delta pt="19" x="0" y="53"/>
+ <delta pt="20" x="0" y="58"/>
+ <delta pt="21" x="0" y="64"/>
+ <delta pt="22" x="0" y="66"/>
+ <delta pt="23" x="0" y="38"/>
+ <delta pt="24" x="0" y="38"/>
+ <delta pt="25" x="-1" y="39"/>
+ <delta pt="26" x="-1" y="39"/>
+ <delta pt="27" x="-2" y="39"/>
+ <delta pt="28" x="-2" y="38"/>
+ <delta pt="29" x="-2" y="39"/>
+ <delta pt="30" x="-2" y="39"/>
+ <delta pt="31" x="-2" y="39"/>
+ <delta pt="32" x="-1" y="39"/>
+ <delta pt="33" x="0" y="39"/>
+ <delta pt="34" x="0" y="39"/>
+ <delta pt="35" x="0" y="39"/>
+ <delta pt="36" x="0" y="39"/>
+ <delta pt="37" x="0" y="38"/>
+ <delta pt="38" x="0" y="68"/>
+ <delta pt="39" x="0" y="66"/>
+ <delta pt="40" x="0" y="61"/>
+ <delta pt="41" x="1" y="55"/>
+ <delta pt="42" x="0" y="49"/>
+ <delta pt="43" x="-1" y="46"/>
+ <delta pt="44" x="0" y="0"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="0" y="0"/>
+ <delta pt="47" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0004" value="1.0"/>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="-14" y="1"/>
+ <delta pt="2" x="-14" y="-3"/>
+ <delta pt="3" x="-10" y="-4"/>
+ <delta pt="4" x="-9" y="-4"/>
+ <delta pt="5" x="-6" y="-4"/>
+ <delta pt="6" x="-1" y="-4"/>
+ <delta pt="7" x="-1" y="-4"/>
+ <delta pt="8" x="-3" y="-3"/>
+ <delta pt="9" x="-2" y="-3"/>
+ <delta pt="10" x="-2" y="-2"/>
+ <delta pt="11" x="-3" y="-3"/>
+ <delta pt="12" x="-3" y="-2"/>
+ <delta pt="13" x="-2" y="-1"/>
+ <delta pt="14" x="-1" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="-12" y="-1"/>
+ <delta pt="17" x="-11" y="-1"/>
+ <delta pt="18" x="-11" y="-1"/>
+ <delta pt="19" x="-11" y="2"/>
+ <delta pt="20" x="-11" y="5"/>
+ <delta pt="21" x="-11" y="8"/>
+ <delta pt="22" x="-11" y="9"/>
+ <delta pt="23" x="-11" y="-6"/>
+ <delta pt="24" x="-11" y="-5"/>
+ <delta pt="25" x="-15" y="-2"/>
+ <delta pt="26" x="-17" y="-2"/>
+ <delta pt="27" x="-19" y="-4"/>
+ <delta pt="28" x="-18" y="-4"/>
+ <delta pt="29" x="-20" y="-4"/>
+ <delta pt="30" x="-20" y="-4"/>
+ <delta pt="31" x="-19" y="-4"/>
+ <delta pt="32" x="-16" y="-3"/>
+ <delta pt="33" x="-13" y="-3"/>
+ <delta pt="34" x="-12" y="-3"/>
+ <delta pt="35" x="-12" y="-4"/>
+ <delta pt="36" x="-12" y="-3"/>
+ <delta pt="37" x="-12" y="-7"/>
+ <delta pt="38" x="-12" y="10"/>
+ <delta pt="39" x="-12" y="9"/>
+ <delta pt="40" x="-12" y="5"/>
+ <delta pt="41" x="-12" y="3"/>
+ <delta pt="42" x="-12" y="0"/>
+ <delta pt="43" x="-12" y="-1"/>
+ <delta pt="44" x="0" y="0"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="0" y="0"/>
+ <delta pt="47" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ <glyphVariations glyph="glyph00005">
+ <tuple>
+ <coord axis="0000" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="-2"/>
+ <delta pt="2" x="-1" y="-2"/>
+ <delta pt="3" x="-4" y="-2"/>
+ <delta pt="4" x="-9" y="-2"/>
+ <delta pt="5" x="-13" y="-2"/>
+ <delta pt="6" x="-16" y="-2"/>
+ <delta pt="7" x="-17" y="-2"/>
+ <delta pt="8" x="-17" y="-2"/>
+ <delta pt="9" x="-20" y="-2"/>
+ <delta pt="10" x="-21" y="-3"/>
+ <delta pt="11" x="-22" y="-2"/>
+ <delta pt="12" x="-24" y="-1"/>
+ <delta pt="13" x="-25" y="-1"/>
+ <delta pt="14" x="-25" y="0"/>
+ <delta pt="15" x="-25" y="0"/>
+ <delta pt="16" x="-24" y="0"/>
+ <delta pt="17" x="-24" y="0"/>
+ <delta pt="18" x="-21" y="0"/>
+ <delta pt="19" x="-20" y="0"/>
+ <delta pt="20" x="-20" y="0"/>
+ <delta pt="21" x="-17" y="0"/>
+ <delta pt="22" x="-12" y="0"/>
+ <delta pt="23" x="-8" y="0"/>
+ <delta pt="24" x="-4" y="0"/>
+ <delta pt="25" x="-1" y="0"/>
+ <delta pt="26" x="0" y="0"/>
+ <delta pt="27" x="3" y="-2"/>
+ <delta pt="28" x="0" y="-2"/>
+ <delta pt="29" x="0" y="0"/>
+ <delta pt="30" x="1" y="1"/>
+ <delta pt="31" x="1" y="1"/>
+ <delta pt="32" x="2" y="1"/>
+ <delta pt="33" x="2" y="1"/>
+ <delta pt="34" x="3" y="0"/>
+ <delta pt="35" x="3" y="0"/>
+ <delta pt="36" x="3" y="0"/>
+ <delta pt="37" x="3" y="-1"/>
+ <delta pt="38" x="3" y="-2"/>
+ <delta pt="39" x="2" y="-15"/>
+ <delta pt="40" x="0" y="-15"/>
+ <delta pt="41" x="0" y="-14"/>
+ <delta pt="42" x="0" y="-12"/>
+ <delta pt="43" x="0" y="-10"/>
+ <delta pt="44" x="0" y="-9"/>
+ <delta pt="45" x="0" y="-2"/>
+ <delta pt="46" x="3" y="-2"/>
+ <delta pt="47" x="3" y="-8"/>
+ <delta pt="48" x="3" y="-9"/>
+ <delta pt="49" x="3" y="-12"/>
+ <delta pt="50" x="2" y="-14"/>
+ <delta pt="51" x="2" y="-15"/>
+ <delta pt="52" x="0" y="0"/>
+ <delta pt="53" x="0" y="0"/>
+ <delta pt="54" x="0" y="0"/>
+ <delta pt="55" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0001" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="0"/>
+ <delta pt="2" x="1" y="0"/>
+ <delta pt="3" x="4" y="0"/>
+ <delta pt="4" x="8" y="0"/>
+ <delta pt="5" x="11" y="0"/>
+ <delta pt="6" x="14" y="0"/>
+ <delta pt="7" x="14" y="0"/>
+ <delta pt="8" x="14" y="0"/>
+ <delta pt="9" x="13" y="0"/>
+ <delta pt="10" x="13" y="0"/>
+ <delta pt="11" x="13" y="0"/>
+ <delta pt="12" x="12" y="0"/>
+ <delta pt="13" x="12" y="0"/>
+ <delta pt="14" x="12" y="0"/>
+ <delta pt="15" x="12" y="1"/>
+ <delta pt="16" x="12" y="1"/>
+ <delta pt="17" x="13" y="1"/>
+ <delta pt="18" x="13" y="1"/>
+ <delta pt="19" x="13" y="1"/>
+ <delta pt="20" x="13" y="1"/>
+ <delta pt="21" x="12" y="1"/>
+ <delta pt="22" x="9" y="1"/>
+ <delta pt="23" x="6" y="0"/>
+ <delta pt="24" x="3" y="0"/>
+ <delta pt="25" x="0" y="0"/>
+ <delta pt="26" x="0" y="0"/>
+ <delta pt="27" x="1" y="0"/>
+ <delta pt="28" x="1" y="0"/>
+ <delta pt="29" x="1" y="0"/>
+ <delta pt="30" x="1" y="0"/>
+ <delta pt="31" x="1" y="0"/>
+ <delta pt="32" x="1" y="0"/>
+ <delta pt="33" x="1" y="0"/>
+ <delta pt="34" x="1" y="0"/>
+ <delta pt="35" x="2" y="0"/>
+ <delta pt="36" x="2" y="0"/>
+ <delta pt="37" x="2" y="0"/>
+ <delta pt="38" x="1" y="0"/>
+ <delta pt="39" x="1" y="-50"/>
+ <delta pt="40" x="1" y="-50"/>
+ <delta pt="41" x="1" y="-50"/>
+ <delta pt="42" x="1" y="-49"/>
+ <delta pt="43" x="1" y="-48"/>
+ <delta pt="44" x="1" y="-48"/>
+ <delta pt="45" x="1" y="0"/>
+ <delta pt="46" x="1" y="0"/>
+ <delta pt="47" x="1" y="-48"/>
+ <delta pt="48" x="1" y="-48"/>
+ <delta pt="49" x="1" y="-49"/>
+ <delta pt="50" x="1" y="-50"/>
+ <delta pt="51" x="1" y="-50"/>
+ <delta pt="52" x="0" y="0"/>
+ <delta pt="53" x="0" y="0"/>
+ <delta pt="54" x="0" y="0"/>
+ <delta pt="55" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="-21"/>
+ <delta pt="2" x="0" y="-21"/>
+ <delta pt="3" x="7" y="-21"/>
+ <delta pt="4" x="16" y="-21"/>
+ <delta pt="5" x="27" y="-20"/>
+ <delta pt="6" x="33" y="-20"/>
+ <delta pt="7" x="34" y="-20"/>
+ <delta pt="8" x="32" y="-20"/>
+ <delta pt="9" x="28" y="-21"/>
+ <delta pt="10" x="26" y="-21"/>
+ <delta pt="11" x="22" y="-18"/>
+ <delta pt="12" x="15" y="-11"/>
+ <delta pt="13" x="13" y="-7"/>
+ <delta pt="14" x="12" y="-6"/>
+ <delta pt="15" x="13" y="-4"/>
+ <delta pt="16" x="15" y="0"/>
+ <delta pt="17" x="19" y="0"/>
+ <delta pt="18" x="26" y="1"/>
+ <delta pt="19" x="30" y="1"/>
+ <delta pt="20" x="30" y="1"/>
+ <delta pt="21" x="26" y="0"/>
+ <delta pt="22" x="19" y="0"/>
+ <delta pt="23" x="11" y="0"/>
+ <delta pt="24" x="4" y="0"/>
+ <delta pt="25" x="0" y="0"/>
+ <delta pt="26" x="0" y="0"/>
+ <delta pt="27" x="30" y="-18"/>
+ <delta pt="28" x="-9" y="-21"/>
+ <delta pt="29" x="2" y="1"/>
+ <delta pt="30" x="9" y="3"/>
+ <delta pt="31" x="11" y="4"/>
+ <delta pt="32" x="13" y="3"/>
+ <delta pt="33" x="18" y="1"/>
+ <delta pt="34" x="28" y="-4"/>
+ <delta pt="35" x="31" y="-6"/>
+ <delta pt="36" x="32" y="-7"/>
+ <delta pt="37" x="32" y="-8"/>
+ <delta pt="38" x="30" y="-18"/>
+ <delta pt="39" x="18" y="-7"/>
+ <delta pt="40" x="-4" y="-7"/>
+ <delta pt="41" x="-6" y="-10"/>
+ <delta pt="42" x="-8" y="-11"/>
+ <delta pt="43" x="-9" y="-12"/>
+ <delta pt="44" x="-9" y="-13"/>
+ <delta pt="45" x="-9" y="-20"/>
+ <delta pt="46" x="30" y="-20"/>
+ <delta pt="47" x="30" y="-6"/>
+ <delta pt="48" x="30" y="-8"/>
+ <delta pt="49" x="27" y="-9"/>
+ <delta pt="50" x="22" y="-9"/>
+ <delta pt="51" x="18" y="-7"/>
+ <delta pt="52" x="0" y="0"/>
+ <delta pt="53" x="0" y="0"/>
+ <delta pt="54" x="0" y="0"/>
+ <delta pt="55" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0000" value="1.0"/>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="-2"/>
+ <delta pt="2" x="0" y="-2"/>
+ <delta pt="3" x="1" y="-2"/>
+ <delta pt="4" x="2" y="-2"/>
+ <delta pt="5" x="3" y="-2"/>
+ <delta pt="6" x="4" y="-2"/>
+ <delta pt="7" x="4" y="-2"/>
+ <delta pt="8" x="3" y="-2"/>
+ <delta pt="9" x="3" y="-2"/>
+ <delta pt="10" x="3" y="-2"/>
+ <delta pt="11" x="3" y="-2"/>
+ <delta pt="12" x="2" y="-1"/>
+ <delta pt="13" x="2" y="0"/>
+ <delta pt="14" x="2" y="-1"/>
+ <delta pt="15" x="2" y="0"/>
+ <delta pt="16" x="2" y="0"/>
+ <delta pt="17" x="3" y="0"/>
+ <delta pt="18" x="3" y="0"/>
+ <delta pt="19" x="4" y="0"/>
+ <delta pt="20" x="4" y="0"/>
+ <delta pt="21" x="3" y="1"/>
+ <delta pt="22" x="2" y="0"/>
+ <delta pt="23" x="1" y="0"/>
+ <delta pt="24" x="1" y="0"/>
+ <delta pt="25" x="0" y="0"/>
+ <delta pt="26" x="0" y="0"/>
+ <delta pt="27" x="3" y="-2"/>
+ <delta pt="28" x="-1" y="-2"/>
+ <delta pt="29" x="1" y="0"/>
+ <delta pt="30" x="1" y="1"/>
+ <delta pt="31" x="1" y="0"/>
+ <delta pt="32" x="2" y="1"/>
+ <delta pt="33" x="2" y="0"/>
+ <delta pt="34" x="4" y="0"/>
+ <delta pt="35" x="3" y="0"/>
+ <delta pt="36" x="4" y="0"/>
+ <delta pt="37" x="3" y="-1"/>
+ <delta pt="38" x="3" y="-2"/>
+ <delta pt="39" x="2" y="0"/>
+ <delta pt="40" x="0" y="0"/>
+ <delta pt="41" x="-1" y="0"/>
+ <delta pt="42" x="-1" y="-1"/>
+ <delta pt="43" x="-1" y="0"/>
+ <delta pt="44" x="-1" y="-1"/>
+ <delta pt="45" x="-1" y="-2"/>
+ <delta pt="46" x="3" y="-2"/>
+ <delta pt="47" x="3" y="0"/>
+ <delta pt="48" x="3" y="0"/>
+ <delta pt="49" x="3" y="0"/>
+ <delta pt="50" x="2" y="0"/>
+ <delta pt="51" x="2" y="0"/>
+ <delta pt="52" x="0" y="0"/>
+ <delta pt="53" x="0" y="0"/>
+ <delta pt="54" x="0" y="0"/>
+ <delta pt="55" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="0001" value="1.0"/>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="0"/>
+ <delta pt="2" x="1" y="-1"/>
+ <delta pt="3" x="2" y="-1"/>
+ <delta pt="4" x="3" y="-1"/>
+ <delta pt="5" x="4" y="-1"/>
+ <delta pt="6" x="5" y="-1"/>
+ <delta pt="7" x="5" y="-1"/>
+ <delta pt="8" x="5" y="-1"/>
+ <delta pt="9" x="5" y="-1"/>
+ <delta pt="10" x="5" y="-1"/>
+ <delta pt="11" x="5" y="-1"/>
+ <delta pt="12" x="4" y="-1"/>
+ <delta pt="13" x="4" y="0"/>
+ <delta pt="14" x="5" y="0"/>
+ <delta pt="15" x="4" y="-1"/>
+ <delta pt="16" x="4" y="-1"/>
+ <delta pt="17" x="4" y="-1"/>
+ <delta pt="18" x="5" y="-1"/>
+ <delta pt="19" x="5" y="-1"/>
+ <delta pt="20" x="4" y="-1"/>
+ <delta pt="21" x="3" y="0"/>
+ <delta pt="22" x="3" y="0"/>
+ <delta pt="23" x="2" y="0"/>
+ <delta pt="24" x="1" y="0"/>
+ <delta pt="25" x="1" y="0"/>
+ <delta pt="26" x="0" y="0"/>
+ <delta pt="27" x="1" y="0"/>
+ <delta pt="28" x="0" y="0"/>
+ <delta pt="29" x="0" y="0"/>
+ <delta pt="30" x="0" y="0"/>
+ <delta pt="31" x="0" y="1"/>
+ <delta pt="32" x="1" y="0"/>
+ <delta pt="33" x="0" y="0"/>
+ <delta pt="34" x="1" y="0"/>
+ <delta pt="35" x="0" y="0"/>
+ <delta pt="36" x="0" y="-1"/>
+ <delta pt="37" x="0" y="0"/>
+ <delta pt="38" x="1" y="0"/>
+ <delta pt="39" x="0" y="2"/>
+ <delta pt="40" x="0" y="2"/>
+ <delta pt="41" x="0" y="3"/>
+ <delta pt="42" x="0" y="6"/>
+ <delta pt="43" x="0" y="9"/>
+ <delta pt="44" x="0" y="9"/>
+ <delta pt="45" x="0" y="0"/>
+ <delta pt="46" x="1" y="0"/>
+ <delta pt="47" x="1" y="10"/>
+ <delta pt="48" x="1" y="9"/>
+ <delta pt="49" x="0" y="6"/>
+ <delta pt="50" x="0" y="3"/>
+ <delta pt="51" x="0" y="2"/>
+ <delta pt="52" x="0" y="0"/>
+ <delta pt="53" x="0" y="0"/>
+ <delta pt="54" x="0" y="0"/>
+ <delta pt="55" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ <glyphVariations glyph="uniAC00">
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="-27" y="2"/>
+ <delta pt="1" x="-6" y="-10"/>
+ <delta pt="2" x="0" y="0"/>
+ <delta pt="3" x="0" y="0"/>
+ <delta pt="4" x="0" y="0"/>
+ <delta pt="5" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ <glyphVariations glyph="uniAC01">
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="-23" y="1"/>
+ <delta pt="1" x="-6" y="-7"/>
+ <delta pt="2" x="-21" y="3"/>
+ <delta pt="3" x="0" y="0"/>
+ <delta pt="4" x="0" y="0"/>
+ <delta pt="5" x="0" y="0"/>
+ <delta pt="6" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ </gvar>
+
+</ttFont>
diff --git a/Tests/ttLib/data/varc-ac00-ac01.ttf b/Tests/ttLib/data/varc-ac00-ac01.ttf
new file mode 100644
index 00000000..1e385bad
--- /dev/null
+++ b/Tests/ttLib/data/varc-ac00-ac01.ttf
Binary files differ
diff --git a/Tests/ttLib/main_test.py b/Tests/ttLib/main_test.py
new file mode 100644
index 00000000..d97f3c94
--- /dev/null
+++ b/Tests/ttLib/main_test.py
@@ -0,0 +1,105 @@
+import subprocess
+import sys
+import tempfile
+from pathlib import Path
+
+from fontTools.ttLib import __main__, TTFont, TTCollection
+
+import pytest
+
+
+TEST_DATA = Path(__file__).parent / "data"
+
+
+@pytest.fixture
+def ttfont_path():
+ font = TTFont()
+ font.importXML(TEST_DATA / "TestTTF-Regular.ttx")
+ with tempfile.NamedTemporaryFile(suffix=".ttf", delete=False) as fp:
+ font_path = Path(fp.name)
+ font.save(font_path)
+ yield font_path
+ font_path.unlink()
+
+
+@pytest.fixture
+def ttcollection_path():
+ font1 = TTFont()
+ font1.importXML(TEST_DATA / "TestTTF-Regular.ttx")
+ font2 = TTFont()
+ font2.importXML(TEST_DATA / "TestTTF-Regular.ttx")
+ coll = TTCollection()
+ coll.fonts = [font1, font2]
+ with tempfile.NamedTemporaryFile(suffix=".ttf", delete=False) as fp:
+ collection_path = Path(fp.name)
+ coll.save(collection_path)
+ yield collection_path
+ collection_path.unlink()
+
+
+@pytest.fixture(params=[None, "woff"])
+def flavor(request):
+ return request.param
+
+
+def test_ttLib_main_as_subprocess(ttfont_path):
+ subprocess.run(
+ [sys.executable, "-m", "fontTools.ttLib", str(ttfont_path)], check=True
+ )
+
+
+def test_ttLib_open_ttfont(ttfont_path):
+ __main__.main([str(ttfont_path)])
+
+
+def test_ttLib_open_save_ttfont(tmp_path, ttfont_path, flavor):
+ output_path = tmp_path / "TestTTF-Regular.ttf"
+ args = ["-o", str(output_path), str(ttfont_path)]
+ if flavor is not None:
+ args.extend(["--flavor", flavor])
+
+ __main__.main(args)
+
+ assert output_path.exists()
+ assert TTFont(output_path).getGlyphOrder() == TTFont(ttfont_path).getGlyphOrder()
+
+
+def test_ttLib_open_ttcollection(ttcollection_path):
+ __main__.main(["-y", "0", str(ttcollection_path)])
+
+
+def test_ttLib_open_ttcollection_save_single_font(tmp_path, ttcollection_path, flavor):
+ for i in range(2):
+ output_path = tmp_path / f"TestTTF-Regular#{i}.ttf"
+ args = ["-y", str(i), "-o", str(output_path), str(ttcollection_path)]
+ if flavor is not None:
+ args.extend(["--flavor", flavor])
+
+ __main__.main(args)
+
+ assert output_path.exists()
+ assert (
+ TTFont(output_path).getGlyphOrder()
+ == TTCollection(ttcollection_path)[i].getGlyphOrder()
+ )
+
+
+def test_ttLib_open_ttcollection_save_ttcollection(tmp_path, ttcollection_path):
+ output_path = tmp_path / "TestTTF.ttc"
+
+ __main__.main(["-o", str(output_path), str(ttcollection_path)])
+
+ assert output_path.exists()
+ assert len(TTCollection(output_path)) == len(TTCollection(ttcollection_path))
+
+
+def test_ttLib_open_multiple_fonts_save_ttcollection(tmp_path, ttfont_path):
+ output_path = tmp_path / "TestTTF.ttc"
+
+ __main__.main(["-o", str(output_path), str(ttfont_path), str(ttfont_path)])
+
+ assert output_path.exists()
+
+ coll = TTCollection(output_path)
+ assert len(coll) == 2
+ assert coll[0].getGlyphOrder() == coll[1].getGlyphOrder()
diff --git a/Tests/ttLib/scaleUpem_test.py b/Tests/ttLib/scaleUpem_test.py
index dc52bf94..6024758f 100644
--- a/Tests/ttLib/scaleUpem_test.py
+++ b/Tests/ttLib/scaleUpem_test.py
@@ -8,8 +8,8 @@ import tempfile
import unittest
import pytest
-class ScaleUpemTest(unittest.TestCase):
+class ScaleUpemTest(unittest.TestCase):
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0
@@ -26,8 +26,7 @@ class ScaleUpemTest(unittest.TestCase):
def temp_path(self, suffix):
self.temp_dir()
self.num_tempfiles += 1
- return os.path.join(self.tempdir,
- "tmp%d%s" % (self.num_tempfiles, suffix))
+ return os.path.join(self.tempdir, "tmp%d%s" % (self.num_tempfiles, suffix))
def temp_dir(self):
if not self.tempdir:
@@ -51,12 +50,12 @@ class ScaleUpemTest(unittest.TestCase):
expected = self.read_ttx(expected_ttx)
if actual != expected:
for line in difflib.unified_diff(
- expected, actual, fromfile=expected_ttx, tofile=path):
+ expected, actual, fromfile=expected_ttx, tofile=path
+ ):
sys.stdout.write(line)
self.fail("TTX output is different from expected")
def test_scale_upem_ttf(self):
-
font = TTFont(self.get_path("I.ttf"))
tables = [table_tag for table_tag in font.keys() if table_tag != "head"]
@@ -65,9 +64,20 @@ class ScaleUpemTest(unittest.TestCase):
expected_ttx_path = self.get_path("I-512upem.ttx")
self.expect_ttx(font, expected_ttx_path, tables)
+ def test_scale_upem_varComposite(self):
+ font = TTFont(self.get_path("varc-ac00-ac01.ttf"))
+ tables = [table_tag for table_tag in font.keys() if table_tag != "head"]
- def test_scale_upem_otf(self):
+ scale_upem(font, 500)
+ expected_ttx_path = self.get_path("varc-ac00-ac01-500upem.ttx")
+ self.expect_ttx(font, expected_ttx_path, tables)
+
+ # Scale our other varComposite font as well; without checking the expected
+ font = TTFont(self.get_path("varc-6868.ttf"))
+ scale_upem(font, 500)
+
+ def test_scale_upem_otf(self):
# Just test that it doesn't crash
font = TTFont(self.get_path("TestVGID-Regular.otf"))
diff --git a/Tests/ttLib/sfnt_test.py b/Tests/ttLib/sfnt_test.py
index 9f817444..7832a2ff 100644
--- a/Tests/ttLib/sfnt_test.py
+++ b/Tests/ttLib/sfnt_test.py
@@ -1,9 +1,25 @@
import io
import copy
import pickle
-from fontTools.ttLib.sfnt import calcChecksum, SFNTReader
+import tempfile
+from fontTools.ttLib import TTFont
+from fontTools.ttLib.sfnt import calcChecksum, SFNTReader, WOFFFlavorData
+from pathlib import Path
import pytest
+TEST_DATA = Path(__file__).parent / "data"
+
+
+@pytest.fixture
+def ttfont_path():
+ font = TTFont()
+ font.importXML(TEST_DATA / "TestTTF-Regular.ttx")
+ with tempfile.NamedTemporaryFile(suffix=".ttf", delete=False) as fp:
+ font_path = Path(fp.name)
+ font.save(font_path)
+ yield font_path
+ font_path.unlink()
+
def test_calcChecksum():
assert calcChecksum(b"abcd") == 1633837924
@@ -57,3 +73,24 @@ class SFNTReaderTest:
if k == "file":
continue
assert getattr(reader2, k) == v
+
+
+def test_ttLib_sfnt_write_privData(tmp_path, ttfont_path):
+ output_path = tmp_path / "TestTTF-Regular.woff"
+ font = TTFont(ttfont_path)
+
+ privData = "Private Eyes".encode()
+
+ data = WOFFFlavorData()
+ head = font["head"]
+ data.majorVersion, data.minorVersion = map(
+ int, format(head.fontRevision, ".3f").split(".")
+ )
+
+ data.privData = privData
+ font.flavor = "woff"
+ font.flavorData = data
+ font.save(output_path)
+
+ assert output_path.exists()
+ assert TTFont(output_path).flavorData.privData == privData
diff --git a/Tests/ttLib/tables/C_F_F__2_test.py b/Tests/ttLib/tables/C_F_F__2_test.py
index 10f9b2fb..2e4d19af 100644
--- a/Tests/ttLib/tables/C_F_F__2_test.py
+++ b/Tests/ttLib/tables/C_F_F__2_test.py
@@ -8,7 +8,7 @@ import unittest
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-DATA_DIR = os.path.join(CURR_DIR, 'data')
+DATA_DIR = os.path.join(CURR_DIR, "data")
CFF_TTX = os.path.join(DATA_DIR, "C_F_F__2.ttx")
CFF_BIN = os.path.join(DATA_DIR, "C_F_F__2.bin")
@@ -16,28 +16,28 @@ CFF_BIN = os.path.join(DATA_DIR, "C_F_F__2.bin")
def strip_VariableItems(string):
# ttlib changes with the fontTools version
- string = re.sub(' ttLibVersion=".*"', '', string)
+ string = re.sub(' ttLibVersion=".*"', "", string)
# head table checksum and mod date changes with each save.
- string = re.sub('<checkSumAdjustment value="[^"]+"/>', '', string)
- string = re.sub('<modified value="[^"]+"/>', '', string)
+ string = re.sub('<checkSumAdjustment value="[^"]+"/>', "", string)
+ string = re.sub('<modified value="[^"]+"/>', "", string)
return string
-class CFFTableTest(unittest.TestCase):
+class CFFTableTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
- with open(CFF_BIN, 'rb') as f:
+ with open(CFF_BIN, "rb") as f:
font = TTFont(file=CFF_BIN)
- cffTable = font['CFF2']
+ cffTable = font["CFF2"]
cls.cff2Data = cffTable.compile(font)
- with open(CFF_TTX, 'r') as f:
+ with open(CFF_TTX, "r") as f:
cff2XML = f.read()
cff2XML = strip_VariableItems(cff2XML)
cls.cff2XML = cff2XML.splitlines()
def test_toXML(self):
font = TTFont(file=CFF_BIN)
- cffTable = font['CFF2']
+ cffTable = font["CFF2"]
cffData = cffTable.compile(font)
out = StringIO()
font.saveXML(out)
@@ -47,9 +47,9 @@ class CFFTableTest(unittest.TestCase):
self.assertEqual(cff2XML, self.cff2XML)
def test_fromXML(self):
- font = TTFont(sfntVersion='OTTO')
+ font = TTFont(sfntVersion="OTTO")
font.importXML(CFF_TTX)
- cffTable = font['CFF2']
+ cffTable = font["CFF2"]
cff2Data = cffTable.compile(font)
self.assertEqual(cff2Data, self.cff2Data)
diff --git a/Tests/ttLib/tables/C_F_F_test.py b/Tests/ttLib/tables/C_F_F_test.py
index cb8d8c55..76bff437 100644
--- a/Tests/ttLib/tables/C_F_F_test.py
+++ b/Tests/ttLib/tables/C_F_F_test.py
@@ -8,28 +8,27 @@ import unittest
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-DATA_DIR = os.path.join(CURR_DIR, 'data')
+DATA_DIR = os.path.join(CURR_DIR, "data")
CFF_TTX = os.path.join(DATA_DIR, "C_F_F_.ttx")
CFF_BIN = os.path.join(DATA_DIR, "C_F_F_.bin")
def strip_ttLibVersion(string):
- return re.sub(' ttLibVersion=".*"', '', string)
+ return re.sub(' ttLibVersion=".*"', "", string)
class CFFTableTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
- with open(CFF_BIN, 'rb') as f:
+ with open(CFF_BIN, "rb") as f:
cls.cffData = f.read()
- with open(CFF_TTX, 'r') as f:
+ with open(CFF_TTX, "r") as f:
cls.cffXML = strip_ttLibVersion(f.read()).splitlines()
def test_toXML(self):
- font = TTFont(sfntVersion='OTTO')
- cffTable = font['CFF '] = newTable('CFF ')
+ font = TTFont(sfntVersion="OTTO")
+ cffTable = font["CFF "] = newTable("CFF ")
cffTable.decompile(self.cffData, font)
out = StringIO()
font.saveXML(out)
@@ -37,13 +36,14 @@ class CFFTableTest(unittest.TestCase):
self.assertEqual(cffXML, self.cffXML)
def test_fromXML(self):
- font = TTFont(sfntVersion='OTTO')
+ font = TTFont(sfntVersion="OTTO")
font.importXML(CFF_TTX)
- cffTable = font['CFF ']
+ cffTable = font["CFF "]
cffData = cffTable.compile(font)
self.assertEqual(cffData, self.cffData)
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/C_O_L_R_test.py b/Tests/ttLib/tables/C_O_L_R_test.py
index 132449ea..43ad7049 100644
--- a/Tests/ttLib/tables/C_O_L_R_test.py
+++ b/Tests/ttLib/tables/C_O_L_R_test.py
@@ -1,11 +1,16 @@
from fontTools import ttLib
from fontTools.misc.testTools import getXML, parseXML
+from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.C_O_L_R_ import table_C_O_L_R_
+from pathlib import Path
import binascii
import pytest
+TEST_DATA_DIR = Path(__file__).parent / "data"
+
+
COLR_V0_SAMPLE = (
(b"\x00\x00", "Version (0)"),
(b"\x00\x01", "BaseGlyphRecordCount (1)"),
@@ -111,7 +116,7 @@ COLR_V1_SAMPLE = (
(b"\x00\x03", "LayerRecordCount (3)"),
(b"\x00\x00\x00\x34", "Offset to BaseGlyphList from beginning of table (52)"),
(b"\x00\x00\x00\x9f", "Offset to LayerList from beginning of table (159)"),
- (b"\x00\x00\x01\x62", "Offset to ClipList (354)"),
+ (b"\x00\x00\x01\x66", "Offset to ClipList (358)"),
(b"\x00\x00\x00\x00", "Offset to DeltaSetIndexMap (NULL)"),
(b"\x00\x00\x00\x00", "Offset to VarStore (NULL)"),
(b"\x00\x06", "BaseGlyphRecord[0].BaseGlyph (6)"),
@@ -151,7 +156,10 @@ COLR_V1_SAMPLE = (
(b"\x00\x00\x08", "Offset to BackdropPaint from beginning of PaintComposite (8)"),
(b"\x0d", "BaseGlyphPaintRecord[1].Paint.BackdropPaint.Format (13)"),
(b"\x00\x00\x07", "Offset to Paint from beginning of PaintVarTransform (7)"),
- (b"\x00\x00\x0a", "Offset to VarAffine2x3 from beginning of PaintVarTransform (10)"),
+ (
+ b"\x00\x00\x0a",
+ "Offset to VarAffine2x3 from beginning of PaintVarTransform (10)",
+ ),
(b"\x0b", "BaseGlyphPaintRecord[1].Paint.BackdropPaint.Format (11)"),
(b"\x00\x0a", "BaseGlyphPaintRecord[1].Paint.BackdropPaint.Glyph (10)"),
(b"\x00\x01\x00\x00", "VarAffine2x3.xx (1.0)"),
@@ -179,22 +187,26 @@ COLR_V1_SAMPLE = (
(b"\x00\x05", "ColorLine.ColorStop[1].PaletteIndex (5)"),
(b"@\x00", "ColorLine.ColorStop[1].Alpha (1.0)"),
# LayerList
- (b"\x00\x00\x00\x04", "LayerList.LayerCount (4)"),
+ (b"\x00\x00\x00\x05", "LayerList.LayerCount (5)"),
+ (
+ b"\x00\x00\x00\x18",
+ "First Offset to Paint table from beginning of LayerList (24)",
+ ),
(
- b"\x00\x00\x00\x14",
- "First Offset to Paint table from beginning of LayerList (20)",
+ b"\x00\x00\x00\x27",
+ "Second Offset to Paint table from beginning of LayerList (39)",
),
(
- b"\x00\x00\x00\x23",
- "Second Offset to Paint table from beginning of LayerList (35)",
+ b"\x00\x00\x00\x52",
+ "Third Offset to Paint table from beginning of LayerList (82)",
),
(
- b"\x00\x00\x00\x4e",
- "Third Offset to Paint table from beginning of LayerList (78)",
+ b"\x00\x00\x00\xa2",
+ "Fourth Offset to Paint table from beginning of LayerList (162)",
),
(
- b"\x00\x00\x00\x9e",
- "Fourth Offset to Paint table from beginning of LayerList (158)",
+ b"\x00\x00\x00\xbc",
+ "Fifth Offset to Paint table from beginning of LayerList (188)",
),
# BaseGlyphPaintRecord[2]
(b"\x0a", "BaseGlyphPaintRecord[2].Paint.Format (10)"),
@@ -234,9 +246,15 @@ COLR_V1_SAMPLE = (
(b"\x00\x0d", "LayerList.Paint[2].Glyph (13)"),
(b"\x0c", "LayerList.Paint[2].Paint.Format (12)"),
(b"\x00\x00\x07", "Offset to Paint subtable from beginning of PaintTransform (7)"),
- (b"\x00\x00\x32", "Offset to Affine2x3 subtable from beginning of PaintTransform (50)"),
+ (
+ b"\x00\x00\x32",
+ "Offset to Affine2x3 subtable from beginning of PaintTransform (50)",
+ ),
(b"\x07", "LayerList.Paint[2].Paint.Paint.Format (7)"),
- (b"\x00\x00\x14", "Offset to ColorLine from beginning of PaintVarRadialGradient (20)"),
+ (
+ b"\x00\x00\x14",
+ "Offset to ColorLine from beginning of PaintVarRadialGradient (20)",
+ ),
(b"\x00\x07", "Paint.x0.value (7)"),
(b"\x00\x08", "Paint.y0.value (8)"),
(b"\x00\t", "Paint.r0.value (9)"),
@@ -253,7 +271,6 @@ COLR_V1_SAMPLE = (
(b"@\x00", "ColorLine.ColorStop[1].StopOffset.value (1.0)"),
(b"\x00\x07", "ColorLine.ColorStop[1].PaletteIndex (7)"),
(b"\x19\x9a", "ColorLine.ColorStop[1].Alpha.value (0.4)"),
-
(b"\x00\x00\x00\x07", "VarIndexBase (7)"),
(b"\xff\xf3\x00\x00", "Affine2x3.xx (-13)"),
(b"\x00\x0e\x00\x00", "Affine2x3.xy (14)"),
@@ -261,13 +278,11 @@ COLR_V1_SAMPLE = (
(b"\xff\xef\x00\x00", "Affine2x3.yy (-17)"),
(b"\x00\x12\x00\x00", "Affine2x3.yy (18)"),
(b"\x00\x13\x00\x00", "Affine2x3.yy (19)"),
-
# PaintTranslate
(b"\x0e", "LayerList.Paint[3].Format (14)"),
(b"\x00\x00\x08", "Offset to Paint subtable from beginning of PaintTranslate (8)"),
(b"\x01\x01", "dx (257)"),
(b"\x01\x02", "dy (258)"),
-
# PaintRotateAroundCenter
(b"\x1a", "LayerList.Paint[3].Paint.Format (26)"),
(
@@ -277,7 +292,6 @@ COLR_V1_SAMPLE = (
(b"\x10\x00", "angle (0.25)"),
(b"\x00\xff", "centerX (255)"),
(b"\x01\x00", "centerY (256)"),
-
# PaintSkew
(b"\x1c", "LayerList.Paint[3].Paint.Paint.Format (28)"),
(
@@ -286,39 +300,34 @@ COLR_V1_SAMPLE = (
),
(b"\xfc\x17", "xSkewAngle (-0.0611)"),
(b"\x01\xc7", "ySkewAngle (0.0278)"),
-
- # PaintGlyph
+ # PaintGlyph glyph00011 (pointed to by both PaintSkew above and by LayerList[4] offset)
(b"\x0a", "LayerList.Paint[3].Paint.Paint.Paint.Format (10)"),
(b"\x00\x00\x06", "Offset to Paint subtable from beginning of PaintGlyph (6)"),
(b"\x00\x0b", "LayerList.Paint[2].Glyph (11)"),
-
# PaintSolid
(b"\x02", "LayerList.Paint[0].Paint.Paint.Paint.Paint.Format (2)"),
(b"\x00\x02", "Paint.PaletteIndex (2)"),
(b" \x00", "Paint.Alpha (0.5)"),
-
# ClipList
- (b'\x01', "ClipList.Format (1)"),
- (b'\x00\x00\x00\x02', "ClipList.ClipCount (2)"),
- (b'\x00\x0a', "ClipRecord[0].StartGlyphID (10)"),
- (b'\x00\x0a', "ClipRecord[0].EndGlyphID (10)"),
- (b'\x00\x00\x13', "Offset to ClipBox subtable from beginning of ClipList (19)"),
- (b'\x00\x0e', "ClipRecord[1].StartGlyphID (14)"),
- (b'\x00\x0f', "ClipRecord[1].EndGlyphID (15)"),
- (b'\x00\x00\x20', "Offset to ClipBox subtable from beginning of ClipList (32)"),
-
- (b'\x02', "ClipBox.Format (2)"),
- (b'\x00\x00', "ClipBox.xMin (0)"),
- (b'\x00\x00', "ClipBox.yMin (0)"),
- (b'\x01\xf4', "ClipBox.xMax (500)"),
- (b'\x01\xf4', "ClipBox.yMax (500)"),
- (b'\x00\x00\x00\t', "ClipBox.VarIndexBase (9)"),
-
- (b'\x01', "ClipBox.Format (1)"),
- (b'\x00\x00', "ClipBox.xMin (0)"),
- (b'\x00\x00', "ClipBox.yMin (0)"),
- (b'\x03\xe8', "ClipBox.xMax (1000)"),
- (b'\x03\xe8', "ClipBox.yMax (1000)"),
+ (b"\x01", "ClipList.Format (1)"),
+ (b"\x00\x00\x00\x02", "ClipList.ClipCount (2)"),
+ (b"\x00\x0a", "ClipRecord[0].StartGlyphID (10)"),
+ (b"\x00\x0a", "ClipRecord[0].EndGlyphID (10)"),
+ (b"\x00\x00\x13", "Offset to ClipBox subtable from beginning of ClipList (19)"),
+ (b"\x00\x0e", "ClipRecord[1].StartGlyphID (14)"),
+ (b"\x00\x0f", "ClipRecord[1].EndGlyphID (15)"),
+ (b"\x00\x00\x20", "Offset to ClipBox subtable from beginning of ClipList (32)"),
+ (b"\x02", "ClipBox.Format (2)"),
+ (b"\x00\x00", "ClipBox.xMin (0)"),
+ (b"\x00\x00", "ClipBox.yMin (0)"),
+ (b"\x01\xf4", "ClipBox.xMax (500)"),
+ (b"\x01\xf4", "ClipBox.yMax (500)"),
+ (b"\x00\x00\x00\t", "ClipBox.VarIndexBase (9)"),
+ (b"\x01", "ClipBox.Format (1)"),
+ (b"\x00\x00", "ClipBox.xMin (0)"),
+ (b"\x00\x00", "ClipBox.yMin (0)"),
+ (b"\x03\xe8", "ClipBox.xMax (1000)"),
+ (b"\x03\xe8", "ClipBox.yMax (1000)"),
)
COLR_V1_DATA = b"".join(t[0] for t in COLR_V1_SAMPLE)
@@ -408,7 +417,7 @@ COLR_V1_XML = [
" </BaseGlyphPaintRecord>",
"</BaseGlyphList>",
"<LayerList>",
- " <!-- LayerCount=4 -->",
+ " <!-- LayerCount=5 -->",
' <Paint index="0" Format="10"><!-- PaintGlyph -->',
' <Paint Format="3"><!-- PaintVarSolid -->',
' <PaletteIndex value="2"/>',
@@ -505,6 +514,13 @@ COLR_V1_XML = [
' <dx value="257"/>',
' <dy value="258"/>',
" </Paint>",
+ ' <Paint index="4" Format="10"><!-- PaintGlyph -->',
+ ' <Paint Format="2"><!-- PaintSolid -->',
+ ' <PaletteIndex value="2"/>',
+ ' <Alpha value="0.5"/>',
+ " </Paint>",
+ ' <Glyph value="glyph00011"/>',
+ " </Paint>",
"</LayerList>",
'<ClipList Format="1">',
" <Clip>",
@@ -532,7 +548,7 @@ COLR_V1_XML = [
COLR_V1_VAR_XML = [
'<VarIndexMap Format="0">',
- ' <!-- Omitted values default to 0xFFFF/0xFFFF (no variations) -->',
+ " <!-- Omitted values default to 0xFFFF/0xFFFF (no variations) -->",
' <Map index="0" outer="1" inner="0"/>',
' <Map index="1"/>',
' <Map index="2"/>',
@@ -611,6 +627,26 @@ class COLR_V1_Test(object):
colr.decompile(compiled, font)
assert getXML(colr.toXML, font) == COLR_V1_XML
+ @pytest.mark.parametrize("quantization", [1, 10, 100])
+ @pytest.mark.parametrize("flavor", ["glyf", "cff"])
+ def test_computeClipBoxes(self, flavor, quantization):
+ font = TTFont()
+ font.importXML(TEST_DATA_DIR / f"COLRv1-clip-boxes-{flavor}.ttx")
+ assert font["COLR"].table.ClipList is None
+
+ font["COLR"].table.computeClipBoxes(font.getGlyphSet(), quantization)
+
+ clipList = font["COLR"].table.ClipList
+ assert len(clipList.clips) > 0
+
+ expected = TTFont()
+ expected.importXML(
+ TEST_DATA_DIR / f"COLRv1-clip-boxes-q{quantization}-expected.ttx"
+ )
+ expectedClipList = expected["COLR"].table.ClipList
+
+ assert getXML(clipList.toXML) == getXML(expectedClipList.toXML)
+
class COLR_V1_Variable_Test(object):
def test_round_trip_xml(self, font):
diff --git a/Tests/ttLib/tables/C_P_A_L_test.py b/Tests/ttLib/tables/C_P_A_L_test.py
index 10c8ea0e..0a197099 100644
--- a/Tests/ttLib/tables/C_P_A_L_test.py
+++ b/Tests/ttLib/tables/C_P_A_L_test.py
@@ -5,48 +5,52 @@ import unittest
CPAL_DATA_V0 = deHexStr(
- '0000 0002 ' # version=0, numPaletteEntries=2
- '0002 0004 ' # numPalettes=2, numColorRecords=4
- '00000010 ' # offsetToFirstColorRecord=16
- '0000 0002 ' # colorRecordIndex=[0, 2]
- '000000FF FFCC66FF ' # colorRecord #0, #1 (blue/green/red/alpha)
- '000000FF 000080FF') # colorRecord #2, #3
+ "0000 0002 " # version=0, numPaletteEntries=2
+ "0002 0004 " # numPalettes=2, numColorRecords=4
+ "00000010 " # offsetToFirstColorRecord=16
+ "0000 0002 " # colorRecordIndex=[0, 2]
+ "000000FF FFCC66FF " # colorRecord #0, #1 (blue/green/red/alpha)
+ "000000FF 000080FF"
+) # colorRecord #2, #3
CPAL_DATA_V0_SHARING_COLORS = deHexStr(
- '0000 0003 ' # version=0, numPaletteEntries=3
- '0004 0006 ' # numPalettes=4, numColorRecords=6
- '00000014 ' # offsetToFirstColorRecord=20
- '0000 0000 0003 0000 ' # colorRecordIndex=[0, 0, 3, 0]
- '443322FF 77889911 55555555 ' # colorRecord #0, #1, #2 (BGRA)
- '443322FF 77889911 FFFFFFFF') # colorRecord #3, #4, #5
+ "0000 0003 " # version=0, numPaletteEntries=3
+ "0004 0006 " # numPalettes=4, numColorRecords=6
+ "00000014 " # offsetToFirstColorRecord=20
+ "0000 0000 0003 0000 " # colorRecordIndex=[0, 0, 3, 0]
+ "443322FF 77889911 55555555 " # colorRecord #0, #1, #2 (BGRA)
+ "443322FF 77889911 FFFFFFFF"
+) # colorRecord #3, #4, #5
CPAL_DATA_V1_NOLABELS_NOTYPES = deHexStr(
- '0001 0003 ' # version=1, numPaletteEntries=3
- '0002 0006 ' # numPalettes=2, numColorRecords=6
- '0000001C ' # offsetToFirstColorRecord=28
- '0000 0003 ' # colorRecordIndex=[0, 3]
- '00000000 ' # offsetToPaletteTypeArray=0
- '00000000 ' # offsetToPaletteLabelArray=0
- '00000000 ' # offsetToPaletteEntryLabelArray=0
- 'CAFECAFE 00112233 44556677 ' # colorRecord #0, #1, #2 (BGRA)
- '31415927 42424242 00331337') # colorRecord #3, #4, #5
+ "0001 0003 " # version=1, numPaletteEntries=3
+ "0002 0006 " # numPalettes=2, numColorRecords=6
+ "0000001C " # offsetToFirstColorRecord=28
+ "0000 0003 " # colorRecordIndex=[0, 3]
+ "00000000 " # offsetToPaletteTypeArray=0
+ "00000000 " # offsetToPaletteLabelArray=0
+ "00000000 " # offsetToPaletteEntryLabelArray=0
+ "CAFECAFE 00112233 44556677 " # colorRecord #0, #1, #2 (BGRA)
+ "31415927 42424242 00331337"
+) # colorRecord #3, #4, #5
CPAL_DATA_V1 = deHexStr(
- '0001 0003 ' # version=1, numPaletteEntries=3
- '0002 0006 ' # numPalettes=2, numColorRecords=6
- '0000001C ' # offsetToFirstColorRecord=28
- '0000 0003 ' # colorRecordIndex=[0, 3]
- '00000034 ' # offsetToPaletteTypeArray=52
- '0000003C ' # offsetToPaletteLabelArray=60
- '00000040 ' # offsetToPaletteEntryLabelArray=64
- 'CAFECAFE 00112233 44556677 ' # colorRecord #0, #1, #2 (BGRA)
- '31415927 42424242 00331337 ' # colorRecord #3, #4, #5
- '00000001 00000002 ' # paletteType=[1, 2]
- '0102 0103 ' # paletteLabel=[258, 259]
- '0201 0202 0203') # paletteEntryLabel=[513, 514, 515]
+ "0001 0003 " # version=1, numPaletteEntries=3
+ "0002 0006 " # numPalettes=2, numColorRecords=6
+ "0000001C " # offsetToFirstColorRecord=28
+ "0000 0003 " # colorRecordIndex=[0, 3]
+ "00000034 " # offsetToPaletteTypeArray=52
+ "0000003C " # offsetToPaletteLabelArray=60
+ "00000040 " # offsetToPaletteEntryLabelArray=64
+ "CAFECAFE 00112233 44556677 " # colorRecord #0, #1, #2 (BGRA)
+ "31415927 42424242 00331337 " # colorRecord #3, #4, #5
+ "00000001 00000002 " # paletteType=[1, 2]
+ "0102 0103 " # paletteLabel=[258, 259]
+ "0201 0202 0203"
+) # paletteEntryLabel=[513, 514, 515]
class FakeNameTable(object):
@@ -59,160 +63,188 @@ class FakeNameTable(object):
class CPALTest(unittest.TestCase):
def test_decompile_v0(self):
- cpal = newTable('CPAL')
+ cpal = newTable("CPAL")
cpal.decompile(CPAL_DATA_V0, ttFont=None)
self.assertEqual(cpal.version, 0)
self.assertEqual(cpal.numPaletteEntries, 2)
- self.assertEqual(repr(cpal.palettes),
- '[[#000000FF, #66CCFFFF], [#000000FF, #800000FF]]')
+ self.assertEqual(
+ repr(cpal.palettes), "[[#000000FF, #66CCFFFF], [#000000FF, #800000FF]]"
+ )
def test_decompile_v0_sharingColors(self):
- cpal = newTable('CPAL')
+ cpal = newTable("CPAL")
cpal.decompile(CPAL_DATA_V0_SHARING_COLORS, ttFont=None)
self.assertEqual(cpal.version, 0)
self.assertEqual(cpal.numPaletteEntries, 3)
- self.assertEqual([repr(p) for p in cpal.palettes], [
- '[#223344FF, #99887711, #55555555]',
- '[#223344FF, #99887711, #55555555]',
- '[#223344FF, #99887711, #FFFFFFFF]',
- '[#223344FF, #99887711, #55555555]'])
+ self.assertEqual(
+ [repr(p) for p in cpal.palettes],
+ [
+ "[#223344FF, #99887711, #55555555]",
+ "[#223344FF, #99887711, #55555555]",
+ "[#223344FF, #99887711, #FFFFFFFF]",
+ "[#223344FF, #99887711, #55555555]",
+ ],
+ )
def test_decompile_v1_noLabelsNoTypes(self):
- cpal = newTable('CPAL')
+ cpal = newTable("CPAL")
cpal.decompile(CPAL_DATA_V1_NOLABELS_NOTYPES, ttFont=None)
self.assertEqual(cpal.version, 1)
self.assertEqual(cpal.numPaletteEntries, 3)
- self.assertEqual([repr(p) for p in cpal.palettes], [
- '[#CAFECAFE, #22110033, #66554477]', # RGBA
- '[#59413127, #42424242, #13330037]'])
+ self.assertEqual(
+ [repr(p) for p in cpal.palettes],
+ [
+ "[#CAFECAFE, #22110033, #66554477]", # RGBA
+ "[#59413127, #42424242, #13330037]",
+ ],
+ )
self.assertEqual(cpal.paletteLabels, [cpal.NO_NAME_ID] * len(cpal.palettes))
self.assertEqual(cpal.paletteTypes, [0, 0])
- self.assertEqual(cpal.paletteEntryLabels,
- [cpal.NO_NAME_ID] * cpal.numPaletteEntries)
+ self.assertEqual(
+ cpal.paletteEntryLabels, [cpal.NO_NAME_ID] * cpal.numPaletteEntries
+ )
def test_decompile_v1(self):
- cpal = newTable('CPAL')
+ cpal = newTable("CPAL")
cpal.decompile(CPAL_DATA_V1, ttFont=None)
self.assertEqual(cpal.version, 1)
self.assertEqual(cpal.numPaletteEntries, 3)
- self.assertEqual([repr(p) for p in cpal.palettes], [
- '[#CAFECAFE, #22110033, #66554477]', # RGBA
- '[#59413127, #42424242, #13330037]'])
+ self.assertEqual(
+ [repr(p) for p in cpal.palettes],
+ [
+ "[#CAFECAFE, #22110033, #66554477]", # RGBA
+ "[#59413127, #42424242, #13330037]",
+ ],
+ )
self.assertEqual(cpal.paletteTypes, [1, 2])
self.assertEqual(cpal.paletteLabels, [258, 259])
self.assertEqual(cpal.paletteEntryLabels, [513, 514, 515])
def test_compile_v0(self):
- cpal = newTable('CPAL')
+ cpal = newTable("CPAL")
cpal.decompile(CPAL_DATA_V0, ttFont=None)
self.assertEqual(cpal.compile(ttFont=None), CPAL_DATA_V0)
def test_compile_v0_sharingColors(self):
- cpal = newTable('CPAL')
+ cpal = newTable("CPAL")
cpal.version = 0
- Color = getTableModule('CPAL').Color
- palette1 = [Color(red=0x22, green=0x33, blue=0x44, alpha=0xff),
- Color(red=0x99, green=0x88, blue=0x77, alpha=0x11),
- Color(red=0x55, green=0x55, blue=0x55, alpha=0x55)]
- palette2 = [Color(red=0x22, green=0x33, blue=0x44, alpha=0xff),
- Color(red=0x99, green=0x88, blue=0x77, alpha=0x11),
- Color(red=0xFF, green=0xFF, blue=0xFF, alpha=0xFF)]
+ Color = getTableModule("CPAL").Color
+ palette1 = [
+ Color(red=0x22, green=0x33, blue=0x44, alpha=0xFF),
+ Color(red=0x99, green=0x88, blue=0x77, alpha=0x11),
+ Color(red=0x55, green=0x55, blue=0x55, alpha=0x55),
+ ]
+ palette2 = [
+ Color(red=0x22, green=0x33, blue=0x44, alpha=0xFF),
+ Color(red=0x99, green=0x88, blue=0x77, alpha=0x11),
+ Color(red=0xFF, green=0xFF, blue=0xFF, alpha=0xFF),
+ ]
cpal.numPaletteEntries = len(palette1)
cpal.palettes = [palette1, palette1, palette2, palette1]
- self.assertEqual(cpal.compile(ttFont=None),
- CPAL_DATA_V0_SHARING_COLORS)
+ self.assertEqual(cpal.compile(ttFont=None), CPAL_DATA_V0_SHARING_COLORS)
def test_compile_v1(self):
- cpal = newTable('CPAL')
+ cpal = newTable("CPAL")
cpal.decompile(CPAL_DATA_V1, ttFont=None)
self.assertEqual(cpal.compile(ttFont=None), CPAL_DATA_V1)
def test_compile_v1_noLabelsNoTypes(self):
- cpal = newTable('CPAL')
+ cpal = newTable("CPAL")
cpal.decompile(CPAL_DATA_V1_NOLABELS_NOTYPES, ttFont=None)
- self.assertEqual(cpal.compile(ttFont=None),
- CPAL_DATA_V1_NOLABELS_NOTYPES)
+ self.assertEqual(cpal.compile(ttFont=None), CPAL_DATA_V1_NOLABELS_NOTYPES)
def test_toXML_v0(self):
- cpal = newTable('CPAL')
+ cpal = newTable("CPAL")
cpal.decompile(CPAL_DATA_V0, ttFont=None)
- self.assertEqual(getXML(cpal.toXML),
- ['<version value="0"/>',
- '<numPaletteEntries value="2"/>',
- '<palette index="0">',
- ' <color index="0" value="#000000FF"/>',
- ' <color index="1" value="#66CCFFFF"/>',
- '</palette>',
- '<palette index="1">',
- ' <color index="0" value="#000000FF"/>',
- ' <color index="1" value="#800000FF"/>',
- '</palette>'])
+ self.assertEqual(
+ getXML(cpal.toXML),
+ [
+ '<version value="0"/>',
+ '<numPaletteEntries value="2"/>',
+ '<palette index="0">',
+ ' <color index="0" value="#000000FF"/>',
+ ' <color index="1" value="#66CCFFFF"/>',
+ "</palette>",
+ '<palette index="1">',
+ ' <color index="0" value="#000000FF"/>',
+ ' <color index="1" value="#800000FF"/>',
+ "</palette>",
+ ],
+ )
def test_toXML_v1(self):
- name = FakeNameTable({258: "Spring theme", 259: "Winter theme",
- 513: "darks", 515: "lights"})
- cpal = newTable('CPAL')
+ name = FakeNameTable(
+ {258: "Spring theme", 259: "Winter theme", 513: "darks", 515: "lights"}
+ )
+ cpal = newTable("CPAL")
ttFont = {"name": name, "CPAL": cpal}
cpal.decompile(CPAL_DATA_V1, ttFont)
- self.assertEqual(getXML(cpal.toXML, ttFont),
- ['<version value="1"/>',
- '<numPaletteEntries value="3"/>',
- '<palette index="0" label="258" type="1">',
- ' <!-- Spring theme -->',
- ' <color index="0" value="#CAFECAFE"/>',
- ' <color index="1" value="#22110033"/>',
- ' <color index="2" value="#66554477"/>',
- '</palette>',
- '<palette index="1" label="259" type="2">',
- ' <!-- Winter theme -->',
- ' <color index="0" value="#59413127"/>',
- ' <color index="1" value="#42424242"/>',
- ' <color index="2" value="#13330037"/>',
- '</palette>',
- '<paletteEntryLabels>',
- ' <label index="0" value="513"/><!-- darks -->',
- ' <label index="1" value="514"/>',
- ' <label index="2" value="515"/><!-- lights -->',
- '</paletteEntryLabels>'])
+ self.assertEqual(
+ getXML(cpal.toXML, ttFont),
+ [
+ '<version value="1"/>',
+ '<numPaletteEntries value="3"/>',
+ '<palette index="0" label="258" type="1">',
+ " <!-- Spring theme -->",
+ ' <color index="0" value="#CAFECAFE"/>',
+ ' <color index="1" value="#22110033"/>',
+ ' <color index="2" value="#66554477"/>',
+ "</palette>",
+ '<palette index="1" label="259" type="2">',
+ " <!-- Winter theme -->",
+ ' <color index="0" value="#59413127"/>',
+ ' <color index="1" value="#42424242"/>',
+ ' <color index="2" value="#13330037"/>',
+ "</palette>",
+ "<paletteEntryLabels>",
+ ' <label index="0" value="513"/><!-- darks -->',
+ ' <label index="1" value="514"/>',
+ ' <label index="2" value="515"/><!-- lights -->',
+ "</paletteEntryLabels>",
+ ],
+ )
def test_fromXML_v0(self):
- cpal = newTable('CPAL')
+ cpal = newTable("CPAL")
for name, attrs, content in parseXML(
- '<version value="0"/>'
- '<numPaletteEntries value="2"/>'
- '<palette index="0">'
- ' <color index="0" value="#12345678"/>'
- ' <color index="1" value="#FEDCBA98"/>'
- '</palette>'):
+ '<version value="0"/>'
+ '<numPaletteEntries value="2"/>'
+ '<palette index="0">'
+ ' <color index="0" value="#12345678"/>'
+ ' <color index="1" value="#FEDCBA98"/>'
+ "</palette>"
+ ):
cpal.fromXML(name, attrs, content, ttFont=None)
self.assertEqual(cpal.version, 0)
self.assertEqual(cpal.numPaletteEntries, 2)
- self.assertEqual(repr(cpal.palettes), '[[#12345678, #FEDCBA98]]')
+ self.assertEqual(repr(cpal.palettes), "[[#12345678, #FEDCBA98]]")
def test_fromXML_v1(self):
- cpal = newTable('CPAL')
+ cpal = newTable("CPAL")
for name, attrs, content in parseXML(
- '<version value="1"/>'
- '<numPaletteEntries value="3"/>'
- '<palette index="0" label="259" type="2">'
- ' <color index="0" value="#12345678"/>'
- ' <color index="1" value="#FEDCBA98"/>'
- ' <color index="2" value="#CAFECAFE"/>'
- '</palette>'
- '<paletteEntryLabels>'
- ' <label index="1" value="262"/>'
- '</paletteEntryLabels>'):
+ '<version value="1"/>'
+ '<numPaletteEntries value="3"/>'
+ '<palette index="0" label="259" type="2">'
+ ' <color index="0" value="#12345678"/>'
+ ' <color index="1" value="#FEDCBA98"/>'
+ ' <color index="2" value="#CAFECAFE"/>'
+ "</palette>"
+ "<paletteEntryLabels>"
+ ' <label index="1" value="262"/>'
+ "</paletteEntryLabels>"
+ ):
cpal.fromXML(name, attrs, content, ttFont=None)
self.assertEqual(cpal.version, 1)
self.assertEqual(cpal.numPaletteEntries, 3)
- self.assertEqual(repr(cpal.palettes),
- '[[#12345678, #FEDCBA98, #CAFECAFE]]')
+ self.assertEqual(repr(cpal.palettes), "[[#12345678, #FEDCBA98, #CAFECAFE]]")
self.assertEqual(cpal.paletteLabels, [259])
self.assertEqual(cpal.paletteTypes, [2])
- self.assertEqual(cpal.paletteEntryLabels,
- [cpal.NO_NAME_ID, 262, cpal.NO_NAME_ID])
+ self.assertEqual(
+ cpal.paletteEntryLabels, [cpal.NO_NAME_ID, 262, cpal.NO_NAME_ID]
+ )
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/M_V_A_R_test.py b/Tests/ttLib/tables/M_V_A_R_test.py
index a8b092e0..17d365fe 100644
--- a/Tests/ttLib/tables/M_V_A_R_test.py
+++ b/Tests/ttLib/tables/M_V_A_R_test.py
@@ -6,146 +6,144 @@ import unittest
MVAR_DATA = deHexStr(
- '0001 0000 ' # 0: version=1.0
- '0000 0008 ' # 4: reserved=0, valueRecordSize=8
- '0009 ' # 8: valueRecordCount=9
- '0054 ' # 10: offsetToItemVariationStore=84
- '6861 7363 ' # 12: ValueRecord.valueTag="hasc"
- '0000 ' # 16: ValueRecord.deltaSetOuterIndex
- '0003 ' # 18: ValueRecord.deltaSetInnerIndex
- '6863 6C61 ' # 20: ValueRecord.valueTag="hcla"
- '0000 ' # 24: ValueRecord.deltaSetOuterIndex
- '0003 ' # 26: ValueRecord.deltaSetInnerIndex
- '6863 6C64 ' # 28: ValueRecord.valueTag="hcld"
- '0000 ' # 32: ValueRecord.deltaSetOuterIndex
- '0003 ' # 34: ValueRecord.deltaSetInnerIndex
- '6864 7363 ' # 36: ValueRecord.valueTag="hdsc"
- '0000 ' # 40: ValueRecord.deltaSetOuterIndex
- '0000 ' # 42: ValueRecord.deltaSetInnerIndex
- '686C 6770 ' # 44: ValueRecord.valueTag="hlgp"
- '0000 ' # 48: ValueRecord.deltaSetOuterIndex
- '0002 ' # 50: ValueRecord.deltaSetInnerIndex
- '7362 796F ' # 52: ValueRecord.valueTag="sbyo"
- '0000 ' # 56: ValueRecord.deltaSetOuterIndex
- '0001 ' # 58: ValueRecord.deltaSetInnerIndex
- '7370 796F ' # 60: ValueRecord.valueTag="spyo"
- '0000 ' # 64: ValueRecord.deltaSetOuterIndex
- '0002 ' # 66: ValueRecord.deltaSetInnerIndex
- '7465 7374 ' # 68: ValueRecord.valueTag="test"
- '0000 ' # 72: ValueRecord.deltaSetOuterIndex
- '0002 ' # 74: ValueRecord.deltaSetInnerIndex
- '7465 7332 ' # 76: ValueRecord.valueTag="tes2"
- '0000 ' # 78: ValueRecord.deltaSetOuterIndex
- '0002 ' # 82: ValueRecord.deltaSetInnerIndex
- '0001 ' # 84: VarStore.format=1
- '0000 000C ' # 86: VarStore.offsetToVariationRegionList=12
- '0001 ' # 90: VarStore.itemVariationDataCount=1
- '0000 0016 ' # 92: VarStore.itemVariationDataOffsets[0]=22
- '0001 ' # 96: VarRegionList.axisCount=1
- '0001 ' # 98: VarRegionList.regionCount=1
- '0000 ' # 100: variationRegions[0].regionAxes[0].startCoord=0.0
- '4000 ' # 102: variationRegions[0].regionAxes[0].peakCoord=1.0
- '4000 ' # 104: variationRegions[0].regionAxes[0].endCoord=1.0
- '0004 ' # 106: VarData.ItemCount=4
- '0001 ' # 108: VarData.NumShorts=1
- '0001 ' # 110: VarData.VarRegionCount=1
- '0000 ' # 112: VarData.VarRegionIndex[0]=0
- 'FF38 ' # 114: VarData.deltaSets[0]=-200
- 'FFCE ' # 116: VarData.deltaSets[0]=-50
- '0064 ' # 118: VarData.deltaSets[0]=100
- '00C8 ' # 120: VarData.deltaSets[0]=200
+ "0001 0000 " # 0: version=1.0
+ "0000 0008 " # 4: reserved=0, valueRecordSize=8
+ "0009 " # 8: valueRecordCount=9
+ "0054 " # 10: offsetToItemVariationStore=84
+ "6861 7363 " # 12: ValueRecord.valueTag="hasc"
+ "0000 " # 16: ValueRecord.deltaSetOuterIndex
+ "0003 " # 18: ValueRecord.deltaSetInnerIndex
+ "6863 6C61 " # 20: ValueRecord.valueTag="hcla"
+ "0000 " # 24: ValueRecord.deltaSetOuterIndex
+ "0003 " # 26: ValueRecord.deltaSetInnerIndex
+ "6863 6C64 " # 28: ValueRecord.valueTag="hcld"
+ "0000 " # 32: ValueRecord.deltaSetOuterIndex
+ "0003 " # 34: ValueRecord.deltaSetInnerIndex
+ "6864 7363 " # 36: ValueRecord.valueTag="hdsc"
+ "0000 " # 40: ValueRecord.deltaSetOuterIndex
+ "0000 " # 42: ValueRecord.deltaSetInnerIndex
+ "686C 6770 " # 44: ValueRecord.valueTag="hlgp"
+ "0000 " # 48: ValueRecord.deltaSetOuterIndex
+ "0002 " # 50: ValueRecord.deltaSetInnerIndex
+ "7362 796F " # 52: ValueRecord.valueTag="sbyo"
+ "0000 " # 56: ValueRecord.deltaSetOuterIndex
+ "0001 " # 58: ValueRecord.deltaSetInnerIndex
+ "7370 796F " # 60: ValueRecord.valueTag="spyo"
+ "0000 " # 64: ValueRecord.deltaSetOuterIndex
+ "0002 " # 66: ValueRecord.deltaSetInnerIndex
+ "7465 7374 " # 68: ValueRecord.valueTag="test"
+ "0000 " # 72: ValueRecord.deltaSetOuterIndex
+ "0002 " # 74: ValueRecord.deltaSetInnerIndex
+ "7465 7332 " # 76: ValueRecord.valueTag="tes2"
+ "0000 " # 78: ValueRecord.deltaSetOuterIndex
+ "0002 " # 82: ValueRecord.deltaSetInnerIndex
+ "0001 " # 84: VarStore.format=1
+ "0000 000C " # 86: VarStore.offsetToVariationRegionList=12
+ "0001 " # 90: VarStore.itemVariationDataCount=1
+ "0000 0016 " # 92: VarStore.itemVariationDataOffsets[0]=22
+ "0001 " # 96: VarRegionList.axisCount=1
+ "0001 " # 98: VarRegionList.regionCount=1
+ "0000 " # 100: variationRegions[0].regionAxes[0].startCoord=0.0
+ "4000 " # 102: variationRegions[0].regionAxes[0].peakCoord=1.0
+ "4000 " # 104: variationRegions[0].regionAxes[0].endCoord=1.0
+ "0004 " # 106: VarData.ItemCount=4
+ "0001 " # 108: VarData.NumShorts=1
+ "0001 " # 110: VarData.VarRegionCount=1
+ "0000 " # 112: VarData.VarRegionIndex[0]=0
+ "FF38 " # 114: VarData.deltaSets[0]=-200
+ "FFCE " # 116: VarData.deltaSets[0]=-50
+ "0064 " # 118: VarData.deltaSets[0]=100
+ "00C8 " # 120: VarData.deltaSets[0]=200
)
MVAR_XML = [
'<Version value="0x00010000"/>',
'<Reserved value="0"/>',
'<ValueRecordSize value="8"/>',
- '<!-- ValueRecordCount=9 -->',
+ "<!-- ValueRecordCount=9 -->",
'<VarStore Format="1">',
' <Format value="1"/>',
- ' <VarRegionList>',
- ' <!-- RegionAxisCount=1 -->',
- ' <!-- RegionCount=1 -->',
+ " <VarRegionList>",
+ " <!-- RegionAxisCount=1 -->",
+ " <!-- RegionCount=1 -->",
' <Region index="0">',
' <VarRegionAxis index="0">',
' <StartCoord value="0.0"/>',
' <PeakCoord value="1.0"/>',
' <EndCoord value="1.0"/>',
- ' </VarRegionAxis>',
- ' </Region>',
- ' </VarRegionList>',
- ' <!-- VarDataCount=1 -->',
+ " </VarRegionAxis>",
+ " </Region>",
+ " </VarRegionList>",
+ " <!-- VarDataCount=1 -->",
' <VarData index="0">',
- ' <!-- ItemCount=4 -->',
+ " <!-- ItemCount=4 -->",
' <NumShorts value="1"/>',
- ' <!-- VarRegionCount=1 -->',
+ " <!-- VarRegionCount=1 -->",
' <VarRegionIndex index="0" value="0"/>',
' <Item index="0" value="[-200]"/>',
' <Item index="1" value="[-50]"/>',
' <Item index="2" value="[100]"/>',
' <Item index="3" value="[200]"/>',
- ' </VarData>',
- '</VarStore>',
+ " </VarData>",
+ "</VarStore>",
'<ValueRecord index="0">',
' <ValueTag value="hasc"/>',
' <VarIdx value="3"/>',
- '</ValueRecord>',
+ "</ValueRecord>",
'<ValueRecord index="1">',
' <ValueTag value="hcla"/>',
' <VarIdx value="3"/>',
- '</ValueRecord>',
+ "</ValueRecord>",
'<ValueRecord index="2">',
' <ValueTag value="hcld"/>',
' <VarIdx value="3"/>',
- '</ValueRecord>',
+ "</ValueRecord>",
'<ValueRecord index="3">',
' <ValueTag value="hdsc"/>',
' <VarIdx value="0"/>',
- '</ValueRecord>',
+ "</ValueRecord>",
'<ValueRecord index="4">',
' <ValueTag value="hlgp"/>',
' <VarIdx value="2"/>',
- '</ValueRecord>',
+ "</ValueRecord>",
'<ValueRecord index="5">',
' <ValueTag value="sbyo"/>',
' <VarIdx value="1"/>',
- '</ValueRecord>',
+ "</ValueRecord>",
'<ValueRecord index="6">',
' <ValueTag value="spyo"/>',
' <VarIdx value="2"/>',
- '</ValueRecord>',
+ "</ValueRecord>",
'<ValueRecord index="7">',
' <ValueTag value="test"/>',
' <VarIdx value="2"/>',
- '</ValueRecord>',
+ "</ValueRecord>",
'<ValueRecord index="8">',
' <ValueTag value="tes2"/>',
' <VarIdx value="2"/>',
- '</ValueRecord>',
+ "</ValueRecord>",
]
class MVARTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
def test_decompile_toXML(self):
- mvar = newTable('MVAR')
+ mvar = newTable("MVAR")
font = TTFont()
mvar.decompile(MVAR_DATA, font)
self.assertEqual(getXML(mvar.toXML), MVAR_XML)
-
def test_decompile_toXML_lazy(self):
- mvar = newTable('MVAR')
+ mvar = newTable("MVAR")
font = TTFont(lazy=True)
mvar.decompile(MVAR_DATA, font)
self.assertEqual(getXML(mvar.toXML), MVAR_XML)
def test_compile_fromXML(self):
- mvar = newTable('MVAR')
+ mvar = newTable("MVAR")
font = TTFont()
for name, attrs, content in parseXML(MVAR_XML):
mvar.fromXML(name, attrs, content, font=font)
@@ -153,6 +151,7 @@ class MVARTest(unittest.TestCase):
self.assertEqual(hexStr(mvar.compile(font)), hexStr(data))
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/O_S_2f_2_test.py b/Tests/ttLib/tables/O_S_2f_2_test.py
index 1f123090..9567b9ec 100644
--- a/Tests/ttLib/tables/O_S_2f_2_test.py
+++ b/Tests/ttLib/tables/O_S_2f_2_test.py
@@ -4,58 +4,59 @@ import unittest
class OS2TableTest(unittest.TestCase):
-
- def test_getUnicodeRanges(self):
- table = table_O_S_2f_2()
- table.ulUnicodeRange1 = 0xFFFFFFFF
- table.ulUnicodeRange2 = 0xFFFFFFFF
- table.ulUnicodeRange3 = 0xFFFFFFFF
- table.ulUnicodeRange4 = 0xFFFFFFFF
- bits = table.getUnicodeRanges()
- for i in range(127):
- self.assertIn(i, bits)
-
- def test_setUnicodeRanges(self):
- table = table_O_S_2f_2()
- table.ulUnicodeRange1 = 0
- table.ulUnicodeRange2 = 0
- table.ulUnicodeRange3 = 0
- table.ulUnicodeRange4 = 0
- bits = set(range(123))
- table.setUnicodeRanges(bits)
- self.assertEqual(table.getUnicodeRanges(), bits)
- with self.assertRaises(ValueError):
- table.setUnicodeRanges([-1, 127, 255])
-
- def test_recalcUnicodeRanges(self):
- font = TTFont()
- font['OS/2'] = os2 = newTable('OS/2')
- font['cmap'] = cmap = newTable('cmap')
- st = getTableModule('cmap').CmapSubtable.newSubtable(4)
- st.platformID, st.platEncID, st.language = 3, 1, 0
- st.cmap = {0x0041:'A', 0x03B1: 'alpha', 0x0410: 'Acyr'}
- cmap.tables = []
- cmap.tables.append(st)
- os2.setUnicodeRanges({0, 1, 9})
- # 'pruneOnly' will clear any bits for which there's no intersection:
- # bit 1 ('Latin 1 Supplement'), in this case. However, it won't set
- # bit 7 ('Greek and Coptic') despite the "alpha" character is present.
- self.assertEqual(os2.recalcUnicodeRanges(font, pruneOnly=True), {0, 9})
- # try again with pruneOnly=False: bit 7 is now set.
- self.assertEqual(os2.recalcUnicodeRanges(font), {0, 7, 9})
- # add a non-BMP char from 'Mahjong Tiles' block (bit 122)
- st.cmap[0x1F000] = 'eastwindtile'
- # the bit 122 and the special bit 57 ('Non Plane 0') are also enabled
- self.assertEqual(os2.recalcUnicodeRanges(font), {0, 7, 9, 57, 122})
-
- def test_intersectUnicodeRanges(self):
- self.assertEqual(intersectUnicodeRanges([0x0410]), {9})
- self.assertEqual(intersectUnicodeRanges([0x0410, 0x1F000]), {9, 57, 122})
- self.assertEqual(
- intersectUnicodeRanges([0x0410, 0x1F000], inverse=True),
- (set(range(123)) - {9, 57, 122}))
+ def test_getUnicodeRanges(self):
+ table = table_O_S_2f_2()
+ table.ulUnicodeRange1 = 0xFFFFFFFF
+ table.ulUnicodeRange2 = 0xFFFFFFFF
+ table.ulUnicodeRange3 = 0xFFFFFFFF
+ table.ulUnicodeRange4 = 0xFFFFFFFF
+ bits = table.getUnicodeRanges()
+ for i in range(127):
+ self.assertIn(i, bits)
+
+ def test_setUnicodeRanges(self):
+ table = table_O_S_2f_2()
+ table.ulUnicodeRange1 = 0
+ table.ulUnicodeRange2 = 0
+ table.ulUnicodeRange3 = 0
+ table.ulUnicodeRange4 = 0
+ bits = set(range(123))
+ table.setUnicodeRanges(bits)
+ self.assertEqual(table.getUnicodeRanges(), bits)
+ with self.assertRaises(ValueError):
+ table.setUnicodeRanges([-1, 127, 255])
+
+ def test_recalcUnicodeRanges(self):
+ font = TTFont()
+ font["OS/2"] = os2 = newTable("OS/2")
+ font["cmap"] = cmap = newTable("cmap")
+ st = getTableModule("cmap").CmapSubtable.newSubtable(4)
+ st.platformID, st.platEncID, st.language = 3, 1, 0
+ st.cmap = {0x0041: "A", 0x03B1: "alpha", 0x0410: "Acyr"}
+ cmap.tables = []
+ cmap.tables.append(st)
+ os2.setUnicodeRanges({0, 1, 9})
+ # 'pruneOnly' will clear any bits for which there's no intersection:
+ # bit 1 ('Latin 1 Supplement'), in this case. However, it won't set
+ # bit 7 ('Greek and Coptic') despite the "alpha" character is present.
+ self.assertEqual(os2.recalcUnicodeRanges(font, pruneOnly=True), {0, 9})
+ # try again with pruneOnly=False: bit 7 is now set.
+ self.assertEqual(os2.recalcUnicodeRanges(font), {0, 7, 9})
+ # add a non-BMP char from 'Mahjong Tiles' block (bit 122)
+ st.cmap[0x1F000] = "eastwindtile"
+ # the bit 122 and the special bit 57 ('Non Plane 0') are also enabled
+ self.assertEqual(os2.recalcUnicodeRanges(font), {0, 7, 9, 57, 122})
+
+ def test_intersectUnicodeRanges(self):
+ self.assertEqual(intersectUnicodeRanges([0x0410]), {9})
+ self.assertEqual(intersectUnicodeRanges([0x0410, 0x1F000]), {9, 57, 122})
+ self.assertEqual(
+ intersectUnicodeRanges([0x0410, 0x1F000], inverse=True),
+ (set(range(123)) - {9, 57, 122}),
+ )
if __name__ == "__main__":
- import sys
- sys.exit(unittest.main())
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/S_T_A_T_test.py b/Tests/ttLib/tables/S_T_A_T_test.py
index c5c12341..947ff195 100644
--- a/Tests/ttLib/tables/S_T_A_T_test.py
+++ b/Tests/ttLib/tables/S_T_A_T_test.py
@@ -5,53 +5,53 @@ import unittest
STAT_DATA = deHexStr(
- '0001 0000 ' # 0: Version=1.0
- '0008 0002 ' # 4: DesignAxisSize=8, DesignAxisCount=2
- '0000 0012 ' # 8: OffsetToDesignAxes=18
- '0003 0000 0022 ' # 12: AxisValueCount=3, OffsetToAxisValueOffsets=34
- '7767 6874 ' # 18: DesignAxis[0].AxisTag='wght'
- '012D 0002 ' # 22: DesignAxis[0].NameID=301, .AxisOrdering=2
- '5445 5354 ' # 26: DesignAxis[1].AxisTag='TEST'
- '012E 0001 ' # 30: DesignAxis[1].NameID=302, .AxisOrdering=1
- '0006 0012 0026 ' # 34: AxisValueOffsets = [6, 18, 38] (+34)
- '0001 0000 0000 ' # 40: AxisValue[0].Format=1, .AxisIndex=0, .Flags=0
- '0191 0190 0000 ' # 46: AxisValue[0].ValueNameID=401, .Value=400.0
- '0002 0001 0000 ' # 52: AxisValue[1].Format=2, .AxisIndex=1, .Flags=0
- '0192 ' # 58: AxisValue[1].ValueNameID=402
- '0002 0000 ' # 60: AxisValue[1].NominalValue=2.0
- '0001 0000 ' # 64: AxisValue[1].RangeMinValue=1.0
- '0003 0000 ' # 68: AxisValue[1].RangeMaxValue=3.0
- '0003 0000 0000 ' # 72: AxisValue[2].Format=3, .AxisIndex=0, .Flags=0
- '0002 ' # 78: AxisValue[2].ValueNameID=2 'Regular'
- '0190 0000 02BC 0000 ' # 80: AxisValue[2].Value=400.0, .LinkedValue=700.0
-) # 88: <end>
-assert(len(STAT_DATA) == 88)
+ "0001 0000 " # 0: Version=1.0
+ "0008 0002 " # 4: DesignAxisSize=8, DesignAxisCount=2
+ "0000 0012 " # 8: OffsetToDesignAxes=18
+ "0003 0000 0022 " # 12: AxisValueCount=3, OffsetToAxisValueOffsets=34
+ "7767 6874 " # 18: DesignAxis[0].AxisTag='wght'
+ "012D 0002 " # 22: DesignAxis[0].NameID=301, .AxisOrdering=2
+ "5445 5354 " # 26: DesignAxis[1].AxisTag='TEST'
+ "012E 0001 " # 30: DesignAxis[1].NameID=302, .AxisOrdering=1
+ "0006 0012 0026 " # 34: AxisValueOffsets = [6, 18, 38] (+34)
+ "0001 0000 0000 " # 40: AxisValue[0].Format=1, .AxisIndex=0, .Flags=0
+ "0191 0190 0000 " # 46: AxisValue[0].ValueNameID=401, .Value=400.0
+ "0002 0001 0000 " # 52: AxisValue[1].Format=2, .AxisIndex=1, .Flags=0
+ "0192 " # 58: AxisValue[1].ValueNameID=402
+ "0002 0000 " # 60: AxisValue[1].NominalValue=2.0
+ "0001 0000 " # 64: AxisValue[1].RangeMinValue=1.0
+ "0003 0000 " # 68: AxisValue[1].RangeMaxValue=3.0
+ "0003 0000 0000 " # 72: AxisValue[2].Format=3, .AxisIndex=0, .Flags=0
+ "0002 " # 78: AxisValue[2].ValueNameID=2 'Regular'
+ "0190 0000 02BC 0000 " # 80: AxisValue[2].Value=400.0, .LinkedValue=700.0
+) # 88: <end>
+assert len(STAT_DATA) == 88
STAT_XML = [
'<Version value="0x00010000"/>',
'<DesignAxisRecordSize value="8"/>',
- '<!-- DesignAxisCount=2 -->',
- '<DesignAxisRecord>',
+ "<!-- DesignAxisCount=2 -->",
+ "<DesignAxisRecord>",
' <Axis index="0">',
' <AxisTag value="wght"/>',
' <AxisNameID value="301"/>',
' <AxisOrdering value="2"/>',
- ' </Axis>',
+ " </Axis>",
' <Axis index="1">',
' <AxisTag value="TEST"/>',
' <AxisNameID value="302"/>',
' <AxisOrdering value="1"/>',
- ' </Axis>',
- '</DesignAxisRecord>',
- '<!-- AxisValueCount=3 -->',
- '<AxisValueArray>',
+ " </Axis>",
+ "</DesignAxisRecord>",
+ "<!-- AxisValueCount=3 -->",
+ "<AxisValueArray>",
' <AxisValue index="0" Format="1">',
' <AxisIndex value="0"/>',
' <Flags value="0"/>',
' <ValueNameID value="401"/>',
' <Value value="400.0"/>',
- ' </AxisValue>',
+ " </AxisValue>",
' <AxisValue index="1" Format="2">',
' <AxisIndex value="1"/>',
' <Flags value="0"/>',
@@ -59,204 +59,202 @@ STAT_XML = [
' <NominalValue value="2.0"/>',
' <RangeMinValue value="1.0"/>',
' <RangeMaxValue value="3.0"/>',
- ' </AxisValue>',
+ " </AxisValue>",
' <AxisValue index="2" Format="3">',
' <AxisIndex value="0"/>',
' <Flags value="0"/>',
' <ValueNameID value="2"/>',
' <Value value="400.0"/>',
' <LinkedValue value="700.0"/>',
- ' </AxisValue>',
- '</AxisValueArray>',
+ " </AxisValue>",
+ "</AxisValueArray>",
]
# Contains junk data for making sure we get our offset decoding right.
STAT_DATA_WITH_AXIS_JUNK = deHexStr(
- '0001 0000 ' # 0: Version=1.0
- '000A 0002 ' # 4: DesignAxisSize=10, DesignAxisCount=2
- '0000 0012 ' # 8: OffsetToDesignAxes=18
- '0000 0000 0000 ' # 12: AxisValueCount=3, OffsetToAxisValueOffsets=34
- '7767 6874 ' # 18: DesignAxis[0].AxisTag='wght'
- '012D 0002 ' # 22: DesignAxis[0].NameID=301, .AxisOrdering=2
- 'DEAD ' # 26: <junk>
- '5445 5354 ' # 28: DesignAxis[1].AxisTag='TEST'
- '012E 0001 ' # 32: DesignAxis[1].NameID=302, .AxisOrdering=1
- 'BEEF ' # 36: <junk>
-) # 38: <end>
+ "0001 0000 " # 0: Version=1.0
+ "000A 0002 " # 4: DesignAxisSize=10, DesignAxisCount=2
+ "0000 0012 " # 8: OffsetToDesignAxes=18
+ "0000 0000 0000 " # 12: AxisValueCount=3, OffsetToAxisValueOffsets=34
+ "7767 6874 " # 18: DesignAxis[0].AxisTag='wght'
+ "012D 0002 " # 22: DesignAxis[0].NameID=301, .AxisOrdering=2
+ "DEAD " # 26: <junk>
+ "5445 5354 " # 28: DesignAxis[1].AxisTag='TEST'
+ "012E 0001 " # 32: DesignAxis[1].NameID=302, .AxisOrdering=1
+ "BEEF " # 36: <junk>
+) # 38: <end>
-assert(len(STAT_DATA_WITH_AXIS_JUNK) == 38)
+assert len(STAT_DATA_WITH_AXIS_JUNK) == 38
STAT_XML_WITH_AXIS_JUNK = [
'<Version value="0x00010000"/>',
'<DesignAxisRecordSize value="10"/>',
- '<!-- DesignAxisCount=2 -->',
- '<DesignAxisRecord>',
+ "<!-- DesignAxisCount=2 -->",
+ "<DesignAxisRecord>",
' <Axis index="0">',
' <AxisTag value="wght"/>',
' <AxisNameID value="301"/>',
' <AxisOrdering value="2"/>',
' <MoreBytes index="0" value="222"/>', # 0xDE
' <MoreBytes index="1" value="173"/>', # 0xAD
- ' </Axis>',
+ " </Axis>",
' <Axis index="1">',
' <AxisTag value="TEST"/>',
' <AxisNameID value="302"/>',
' <AxisOrdering value="1"/>',
' <MoreBytes index="0" value="190"/>', # 0xBE
' <MoreBytes index="1" value="239"/>', # 0xEF
- ' </Axis>',
- '</DesignAxisRecord>',
- '<!-- AxisValueCount=0 -->',
+ " </Axis>",
+ "</DesignAxisRecord>",
+ "<!-- AxisValueCount=0 -->",
]
STAT_DATA_AXIS_VALUE_FORMAT3 = deHexStr(
- '0001 0000 ' # 0: Version=1.0
- '0008 0001 ' # 4: DesignAxisSize=8, DesignAxisCount=1
- '0000 0012 ' # 8: OffsetToDesignAxes=18
- '0001 ' # 12: AxisValueCount=1
- '0000 001A ' # 14: OffsetToAxisValueOffsets=26
- '7767 6874 ' # 18: DesignAxis[0].AxisTag='wght'
- '0102 ' # 22: DesignAxis[0].AxisNameID=258 'Weight'
- '0000 ' # 24: DesignAxis[0].AxisOrdering=0
- '0002 ' # 26: AxisValueOffsets=[2] (+26)
- '0003 ' # 28: AxisValue[0].Format=3
- '0000 0002 ' # 30: AxisValue[0].AxisIndex=0, .Flags=0x2
- '0002 ' # 34: AxisValue[0].ValueNameID=2 'Regular'
- '0190 0000 ' # 36: AxisValue[0].Value=400.0
- '02BC 0000 ' # 40: AxisValue[0].LinkedValue=700.0
-) # 44: <end>
-assert(len(STAT_DATA_AXIS_VALUE_FORMAT3) == 44)
+ "0001 0000 " # 0: Version=1.0
+ "0008 0001 " # 4: DesignAxisSize=8, DesignAxisCount=1
+ "0000 0012 " # 8: OffsetToDesignAxes=18
+ "0001 " # 12: AxisValueCount=1
+ "0000 001A " # 14: OffsetToAxisValueOffsets=26
+ "7767 6874 " # 18: DesignAxis[0].AxisTag='wght'
+ "0102 " # 22: DesignAxis[0].AxisNameID=258 'Weight'
+ "0000 " # 24: DesignAxis[0].AxisOrdering=0
+ "0002 " # 26: AxisValueOffsets=[2] (+26)
+ "0003 " # 28: AxisValue[0].Format=3
+ "0000 0002 " # 30: AxisValue[0].AxisIndex=0, .Flags=0x2
+ "0002 " # 34: AxisValue[0].ValueNameID=2 'Regular'
+ "0190 0000 " # 36: AxisValue[0].Value=400.0
+ "02BC 0000 " # 40: AxisValue[0].LinkedValue=700.0
+) # 44: <end>
+assert len(STAT_DATA_AXIS_VALUE_FORMAT3) == 44
STAT_XML_AXIS_VALUE_FORMAT3 = [
'<Version value="0x00010000"/>',
'<DesignAxisRecordSize value="8"/>',
- '<!-- DesignAxisCount=1 -->',
- '<DesignAxisRecord>',
+ "<!-- DesignAxisCount=1 -->",
+ "<DesignAxisRecord>",
' <Axis index="0">',
' <AxisTag value="wght"/>',
' <AxisNameID value="258"/>',
' <AxisOrdering value="0"/>',
- ' </Axis>',
- '</DesignAxisRecord>',
- '<!-- AxisValueCount=1 -->',
- '<AxisValueArray>',
+ " </Axis>",
+ "</DesignAxisRecord>",
+ "<!-- AxisValueCount=1 -->",
+ "<AxisValueArray>",
' <AxisValue index="0" Format="3">',
' <AxisIndex value="0"/>',
' <Flags value="2"/> <!-- ElidableAxisValueName -->',
' <ValueNameID value="2"/>',
' <Value value="400.0"/>',
' <LinkedValue value="700.0"/>',
- ' </AxisValue>',
- '</AxisValueArray>',
+ " </AxisValue>",
+ "</AxisValueArray>",
]
STAT_DATA_VERSION_1_1 = deHexStr(
- '0001 0001 ' # 0: Version=1.1
- '0008 0001 ' # 4: DesignAxisSize=8, DesignAxisCount=1
- '0000 0014 ' # 8: OffsetToDesignAxes=20
- '0001 ' # 12: AxisValueCount=1
- '0000 001C ' # 14: OffsetToAxisValueOffsets=28
- '0101 ' # 18: ElidedFallbackNameID: 257
- '7767 6874 ' # 20: DesignAxis[0].AxisTag='wght'
- '0102 ' # 24: DesignAxis[0].AxisNameID=258 'Weight'
- '0000 ' # 26: DesignAxis[0].AxisOrdering=0
- '0002 ' # 28: AxisValueOffsets=[2] (+28)
- '0003 ' # 30: AxisValue[0].Format=3
- '0000 0002 ' # 32: AxisValue[0].AxisIndex=0, .Flags=0x2
- '0002 ' # 36: AxisValue[0].ValueNameID=2 'Regular'
- '0190 0000 ' # 38: AxisValue[0].Value=400.0
- '02BC 0000 ' # 42: AxisValue[0].LinkedValue=700.0
-) # 46: <end>
-assert(len(STAT_DATA_VERSION_1_1) == 46)
+ "0001 0001 " # 0: Version=1.1
+ "0008 0001 " # 4: DesignAxisSize=8, DesignAxisCount=1
+ "0000 0014 " # 8: OffsetToDesignAxes=20
+ "0001 " # 12: AxisValueCount=1
+ "0000 001C " # 14: OffsetToAxisValueOffsets=28
+ "0101 " # 18: ElidedFallbackNameID: 257
+ "7767 6874 " # 20: DesignAxis[0].AxisTag='wght'
+ "0102 " # 24: DesignAxis[0].AxisNameID=258 'Weight'
+ "0000 " # 26: DesignAxis[0].AxisOrdering=0
+ "0002 " # 28: AxisValueOffsets=[2] (+28)
+ "0003 " # 30: AxisValue[0].Format=3
+ "0000 0002 " # 32: AxisValue[0].AxisIndex=0, .Flags=0x2
+ "0002 " # 36: AxisValue[0].ValueNameID=2 'Regular'
+ "0190 0000 " # 38: AxisValue[0].Value=400.0
+ "02BC 0000 " # 42: AxisValue[0].LinkedValue=700.0
+) # 46: <end>
+assert len(STAT_DATA_VERSION_1_1) == 46
STAT_XML_VERSION_1_1 = [
'<Version value="0x00010001"/>',
'<DesignAxisRecordSize value="8"/>',
- '<!-- DesignAxisCount=1 -->',
- '<DesignAxisRecord>',
+ "<!-- DesignAxisCount=1 -->",
+ "<DesignAxisRecord>",
' <Axis index="0">',
' <AxisTag value="wght"/>',
' <AxisNameID value="258"/>',
' <AxisOrdering value="0"/>',
- ' </Axis>',
- '</DesignAxisRecord>',
- '<!-- AxisValueCount=1 -->',
- '<AxisValueArray>',
+ " </Axis>",
+ "</DesignAxisRecord>",
+ "<!-- AxisValueCount=1 -->",
+ "<AxisValueArray>",
' <AxisValue index="0" Format="3">',
' <AxisIndex value="0"/>',
' <Flags value="2"/> <!-- ElidableAxisValueName -->',
' <ValueNameID value="2"/>',
' <Value value="400.0"/>',
' <LinkedValue value="700.0"/>',
- ' </AxisValue>',
- '</AxisValueArray>',
+ " </AxisValue>",
+ "</AxisValueArray>",
'<ElidedFallbackNameID value="257"/>',
]
class STATTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
def test_decompile_toXML(self):
- table = newTable('STAT')
- table.decompile(STAT_DATA, font=FakeFont(['.notdef']))
+ table = newTable("STAT")
+ table.decompile(STAT_DATA, font=FakeFont([".notdef"]))
self.assertEqual(getXML(table.toXML), STAT_XML)
def test_decompile_toXML_withAxisJunk(self):
- table = newTable('STAT')
- table.decompile(STAT_DATA_WITH_AXIS_JUNK, font=FakeFont(['.notdef']))
+ table = newTable("STAT")
+ table.decompile(STAT_DATA_WITH_AXIS_JUNK, font=FakeFont([".notdef"]))
self.assertEqual(getXML(table.toXML), STAT_XML_WITH_AXIS_JUNK)
def test_decompile_toXML_format3(self):
- table = newTable('STAT')
- table.decompile(STAT_DATA_AXIS_VALUE_FORMAT3,
- font=FakeFont(['.notdef']))
+ table = newTable("STAT")
+ table.decompile(STAT_DATA_AXIS_VALUE_FORMAT3, font=FakeFont([".notdef"]))
self.assertEqual(getXML(table.toXML), STAT_XML_AXIS_VALUE_FORMAT3)
def test_decompile_toXML_version_1_1(self):
- table = newTable('STAT')
- table.decompile(STAT_DATA_VERSION_1_1,
- font=FakeFont(['.notdef']))
+ table = newTable("STAT")
+ table.decompile(STAT_DATA_VERSION_1_1, font=FakeFont([".notdef"]))
self.assertEqual(getXML(table.toXML), STAT_XML_VERSION_1_1)
def test_compile_fromXML(self):
- table = newTable('STAT')
- font = FakeFont(['.notdef'])
+ table = newTable("STAT")
+ font = FakeFont([".notdef"])
for name, attrs, content in parseXML(STAT_XML):
table.fromXML(name, attrs, content, font=font)
self.assertEqual(table.compile(font), STAT_DATA)
def test_compile_fromXML_withAxisJunk(self):
- table = newTable('STAT')
- font = FakeFont(['.notdef'])
+ table = newTable("STAT")
+ font = FakeFont([".notdef"])
for name, attrs, content in parseXML(STAT_XML_WITH_AXIS_JUNK):
table.fromXML(name, attrs, content, font=font)
self.assertEqual(table.compile(font), STAT_DATA_WITH_AXIS_JUNK)
def test_compile_fromXML_format3(self):
- table = newTable('STAT')
- font = FakeFont(['.notdef'])
+ table = newTable("STAT")
+ font = FakeFont([".notdef"])
for name, attrs, content in parseXML(STAT_XML_AXIS_VALUE_FORMAT3):
table.fromXML(name, attrs, content, font=font)
self.assertEqual(table.compile(font), STAT_DATA_AXIS_VALUE_FORMAT3)
def test_compile_fromXML_version_1_1(self):
- table = newTable('STAT')
- font = FakeFont(['.notdef'])
+ table = newTable("STAT")
+ font = FakeFont([".notdef"])
for name, attrs, content in parseXML(STAT_XML_VERSION_1_1):
table.fromXML(name, attrs, content, font=font)
self.assertEqual(table.compile(font), STAT_DATA_VERSION_1_1)
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/T_S_I__0_test.py b/Tests/ttLib/tables/T_S_I__0_test.py
index 44ca44ed..871ece3d 100644
--- a/Tests/ttLib/tables/T_S_I__0_test.py
+++ b/Tests/ttLib/tables/T_S_I__0_test.py
@@ -6,19 +6,15 @@ import pytest
# (gid, length, offset) for glyph programs
-TSI0_INDICES = [
- (0, 1, 0),
- (1, 5, 1),
- (2, 0, 1),
- (3, 0, 1),
- (4, 8, 6)]
+TSI0_INDICES = [(0, 1, 0), (1, 5, 1), (2, 0, 1), (3, 0, 1), (4, 8, 6)]
# (type, length, offset) for 'extra' programs
TSI0_EXTRA_INDICES = [
- (0xFFFA, 2, 14), # ppgm
- (0xFFFB, 4, 16), # cvt
- (0xFFFC, 6, 20), # reserved
- (0xFFFD, 10, 26)] # fpgm
+ (0xFFFA, 2, 14), # ppgm
+ (0xFFFB, 4, 16), # cvt
+ (0xFFFC, 6, 20), # reserved
+ (0xFFFD, 10, 26),
+] # fpgm
# compiled TSI0 table from data above
TSI0_DATA = deHexStr(
@@ -27,25 +23,28 @@ TSI0_DATA = deHexStr(
"0002 0000 00000001"
"0003 0000 00000001"
"0004 0008 00000006"
- "FFFE 0000 ABFC1F34" # 'magic' separates glyph from extra programs
+ "FFFE 0000 ABFC1F34" # 'magic' separates glyph from extra programs
"FFFA 0002 0000000E"
"FFFB 0004 00000010"
"FFFC 0006 00000014"
- "FFFD 000A 0000001A")
+ "FFFD 000A 0000001A"
+)
# empty font has no glyph programs but 4 extra programs are always present
EMPTY_TSI0_EXTRA_INDICES = [
(0xFFFA, 0, 0),
(0xFFFB, 0, 0),
(0xFFFC, 0, 0),
- (0xFFFD, 0, 0)]
+ (0xFFFD, 0, 0),
+]
EMPTY_TSI0_DATA = deHexStr(
"FFFE 0000 ABFC1F34"
"FFFA 0000 00000000"
"FFFB 0000 00000000"
"FFFC 0000 00000000"
- "FFFD 0000 00000000")
+ "FFFD 0000 00000000"
+)
@pytest.fixture
@@ -57,13 +56,12 @@ def table():
"numGlyphs, data, expected_indices, expected_extra_indices",
[
(5, TSI0_DATA, TSI0_INDICES, TSI0_EXTRA_INDICES),
- (0, EMPTY_TSI0_DATA, [], EMPTY_TSI0_EXTRA_INDICES)
+ (0, EMPTY_TSI0_DATA, [], EMPTY_TSI0_EXTRA_INDICES),
],
- ids=["simple", "empty"]
+ ids=["simple", "empty"],
)
-def test_decompile(table, numGlyphs, data, expected_indices,
- expected_extra_indices):
- font = {'maxp': SimpleNamespace(numGlyphs=numGlyphs)}
+def test_decompile(table, numGlyphs, data, expected_indices, expected_extra_indices):
+ font = {"maxp": SimpleNamespace(numGlyphs=numGlyphs)}
table.decompile(data, font)
@@ -77,9 +75,9 @@ def test_decompile(table, numGlyphs, data, expected_indices,
"numGlyphs, indices, extra_indices, expected_data",
[
(5, TSI0_INDICES, TSI0_EXTRA_INDICES, TSI0_DATA),
- (0, [], EMPTY_TSI0_EXTRA_INDICES, EMPTY_TSI0_DATA)
+ (0, [], EMPTY_TSI0_EXTRA_INDICES, EMPTY_TSI0_DATA),
],
- ids=["simple", "empty"]
+ ids=["simple", "empty"],
)
def test_compile(table, numGlyphs, indices, extra_indices, expected_data):
assert table.compile(ttFont=None) == b""
@@ -97,9 +95,11 @@ def test_set(table):
def test_toXML(table):
assert getXML(table.toXML, ttFont=None) == [
- '<!-- This table will be calculated by the compiler -->']
+ "<!-- This table will be calculated by the compiler -->"
+ ]
if __name__ == "__main__":
import sys
+
sys.exit(pytest.main(sys.argv))
diff --git a/Tests/ttLib/tables/T_S_I__1_test.py b/Tests/ttLib/tables/T_S_I__1_test.py
index b792221e..e14f41d2 100644
--- a/Tests/ttLib/tables/T_S_I__1_test.py
+++ b/Tests/ttLib/tables/T_S_I__1_test.py
@@ -14,15 +14,19 @@ TSI1_UTF8_DATA = b"""abcd\xc3\xa9ghijklmnopqrstuvxywz0123456789"""
def indextable():
table = table_T_S_I__0()
table.set(
- [(0, 1, 0), # gid 0, length=1, offset=0, text='a'
- (1, 5, 1), # gid 1, length=5, offset=1, text='bcdef'
- (2, 0, 1), # gid 2, length=0, offset=1, text=''
- (3, 0, 1), # gid 3, length=0, offset=1, text=''
- (4, 8, 6)], # gid 4, length=8, offset=6, text='ghijklmn'
- [(0xFFFA, 2, 14), # 'ppgm', length=2, offset=14, text='op'
- (0xFFFB, 4, 16), # 'cvt', length=4, offset=16, text='qrst'
- (0xFFFC, 6, 20), # 'reserved', length=6, offset=20, text='uvxywz'
- (0xFFFD, 10, 26)] # 'fpgm', length=10, offset=26, text='0123456789'
+ [
+ (0, 1, 0), # gid 0, length=1, offset=0, text='a'
+ (1, 5, 1), # gid 1, length=5, offset=1, text='bcdef'
+ (2, 0, 1), # gid 2, length=0, offset=1, text=''
+ (3, 0, 1), # gid 3, length=0, offset=1, text=''
+ (4, 8, 6),
+ ], # gid 4, length=8, offset=6, text='ghijklmn'
+ [
+ (0xFFFA, 2, 14), # 'ppgm', length=2, offset=14, text='op'
+ (0xFFFB, 4, 16), # 'cvt', length=4, offset=16, text='qrst'
+ (0xFFFC, 6, 20), # 'reserved', length=6, offset=20, text='uvxywz'
+ (0xFFFD, 10, 26),
+ ], # 'fpgm', length=10, offset=26, text='0123456789'
)
return table
@@ -33,8 +37,8 @@ def font(indextable):
# ['a', 'b', 'c', ...]
ch = 0x61
n = len(indextable.indices)
- font.glyphOrder = [chr(i) for i in range(ch, ch+n)]
- font['TSI0'] = indextable
+ font.glyphOrder = [chr(i) for i in range(ch, ch + n)]
+ font["TSI0"] = indextable
return font
@@ -43,11 +47,8 @@ def empty_font():
font = TTFont()
font.glyphOrder = []
indextable = table_T_S_I__0()
- indextable.set([], [(0xFFFA, 0, 0),
- (0xFFFB, 0, 0),
- (0xFFFC, 0, 0),
- (0xFFFD, 0, 0)])
- font['TSI0'] = indextable
+ indextable.set([], [(0xFFFA, 0, 0), (0xFFFB, 0, 0), (0xFFFC, 0, 0), (0xFFFD, 0, 0)])
+ font["TSI0"] = indextable
return font
@@ -56,16 +57,18 @@ def test_decompile(font):
table.decompile(TSI1_DATA, font)
assert table.glyphPrograms == {
- 'a': 'a',
- 'b': 'bcdef',
+ "a": "a",
+ "b": "bcdef",
# 'c': '', # zero-length entries are skipped
# 'd': '',
- 'e': 'ghijklmn'}
+ "e": "ghijklmn",
+ }
assert table.extraPrograms == {
- 'ppgm': 'op',
- 'cvt': 'qrst',
- 'reserved': 'uvxywz',
- 'fpgm': '0123456789'}
+ "ppgm": "op",
+ "cvt": "qrst",
+ "reserved": "uvxywz",
+ "fpgm": "0123456789",
+ }
def test_decompile_utf8(font):
@@ -73,16 +76,18 @@ def test_decompile_utf8(font):
table.decompile(TSI1_UTF8_DATA, font)
assert table.glyphPrograms == {
- 'a': 'a',
- 'b': 'bcd\u00e9',
+ "a": "a",
+ "b": "bcd\u00e9",
# 'c': '', # zero-length entries are skipped
# 'd': '',
- 'e': 'ghijklmn'}
+ "e": "ghijklmn",
+ }
assert table.extraPrograms == {
- 'ppgm': 'op',
- 'cvt': 'qrst',
- 'reserved': 'uvxywz',
- 'fpgm': '0123456789'}
+ "ppgm": "op",
+ "cvt": "qrst",
+ "reserved": "uvxywz",
+ "fpgm": "0123456789",
+ }
def test_decompile_empty(empty_font):
@@ -94,32 +99,32 @@ def test_decompile_empty(empty_font):
def test_decompile_invalid_length(empty_font):
- empty_font.glyphOrder = ['a']
- empty_font['TSI0'].indices = [(0, 0x8000+1, 0)]
+ empty_font.glyphOrder = ["a"]
+ empty_font["TSI0"].indices = [(0, 0x8000 + 1, 0)]
table = table_T_S_I__1()
with pytest.raises(TTLibError) as excinfo:
- table.decompile(b'', empty_font)
+ table.decompile(b"", empty_font)
assert excinfo.match("textLength .* must not be > 32768")
def test_decompile_offset_past_end(empty_font):
- empty_font.glyphOrder = ['foo', 'bar']
- content = 'baz'
+ empty_font.glyphOrder = ["foo", "bar"]
+ content = "baz"
data = tobytes(content)
- empty_font['TSI0'].indices = [(0, len(data), 0), (1, 1, len(data)+1)]
+ empty_font["TSI0"].indices = [(0, len(data), 0), (1, 1, len(data) + 1)]
table = table_T_S_I__1()
with CapturingLogHandler(table.log, "WARNING") as captor:
table.decompile(data, empty_font)
# the 'bar' program is skipped because its offset > len(data)
- assert table.glyphPrograms == {'foo': 'baz'}
+ assert table.glyphPrograms == {"foo": "baz"}
assert any("textOffset > totalLength" in r.msg for r in captor.records)
def test_decompile_magic_length_last_extra(empty_font):
- indextable = empty_font['TSI0']
+ indextable = empty_font["TSI0"]
indextable.extra_indices[-1] = (0xFFFD, 0x8000, 0)
content = "0" * (0x8000 + 1)
data = tobytes(content)
@@ -127,20 +132,22 @@ def test_decompile_magic_length_last_extra(empty_font):
table = table_T_S_I__1()
table.decompile(data, empty_font)
- assert table.extraPrograms['fpgm'] == content
+ assert table.extraPrograms["fpgm"] == content
def test_decompile_magic_length_last_glyph(empty_font):
- empty_font.glyphOrder = ['foo', 'bar']
- indextable = empty_font['TSI0']
+ empty_font.glyphOrder = ["foo", "bar"]
+ indextable = empty_font["TSI0"]
indextable.indices = [
(0, 3, 0),
- (1, 0x8000, 3)] # the actual length of 'bar' program is
+ (1, 0x8000, 3),
+ ] # the actual length of 'bar' program is
indextable.extra_indices = [ # the difference between the first extra's
- (0xFFFA, 0, 0x8004), # offset and 'bar' offset: 0x8004 - 3
+ (0xFFFA, 0, 0x8004), # offset and 'bar' offset: 0x8004 - 3
(0xFFFB, 0, 0x8004),
(0xFFFC, 0, 0x8004),
- (0xFFFD, 0, 0x8004)]
+ (0xFFFD, 0, 0x8004),
+ ]
foo_content = "0" * 3
bar_content = "1" * (0x8000 + 1)
data = tobytes(foo_content + bar_content)
@@ -148,17 +155,18 @@ def test_decompile_magic_length_last_glyph(empty_font):
table = table_T_S_I__1()
table.decompile(data, empty_font)
- assert table.glyphPrograms['foo'] == foo_content
- assert table.glyphPrograms['bar'] == bar_content
+ assert table.glyphPrograms["foo"] == foo_content
+ assert table.glyphPrograms["bar"] == bar_content
def test_decompile_magic_length_non_last(empty_font):
- indextable = empty_font['TSI0']
+ indextable = empty_font["TSI0"]
indextable.extra_indices = [
(0xFFFA, 3, 0),
(0xFFFB, 0x8000, 3), # the actual length of 'cvt' program is:
(0xFFFC, 0, 0x8004), # nextTextOffset - textOffset: 0x8004 - 3
- (0xFFFD, 0, 0x8004)]
+ (0xFFFD, 0, 0x8004),
+ ]
ppgm_content = "0" * 3
cvt_content = "1" * (0x8000 + 1)
data = tobytes(ppgm_content + cvt_content)
@@ -166,16 +174,17 @@ def test_decompile_magic_length_non_last(empty_font):
table = table_T_S_I__1()
table.decompile(data, empty_font)
- assert table.extraPrograms['ppgm'] == ppgm_content
- assert table.extraPrograms['cvt'] == cvt_content
+ assert table.extraPrograms["ppgm"] == ppgm_content
+ assert table.extraPrograms["cvt"] == cvt_content
table = table_T_S_I__1()
with CapturingLogHandler(table.log, "WARNING") as captor:
table.decompile(data[:-1], empty_font) # last entry is truncated
captor.assertRegex("nextTextOffset > totalLength")
- assert table.extraPrograms['cvt'] == cvt_content[:-1]
+ assert table.extraPrograms["cvt"] == cvt_content[:-1]
if __name__ == "__main__":
import sys
+
sys.exit(pytest.main(sys.argv))
diff --git a/Tests/ttLib/tables/TupleVariation_test.py b/Tests/ttLib/tables/TupleVariation_test.py
index 99b94918..bfb0e453 100644
--- a/Tests/ttLib/tables/TupleVariation_test.py
+++ b/Tests/ttLib/tables/TupleVariation_test.py
@@ -2,23 +2,29 @@ from fontTools.misc.loggingTools import CapturingLogHandler
from fontTools.misc.testTools import parseXML
from fontTools.misc.textTools import deHexStr, hexStr
from fontTools.misc.xmlWriter import XMLWriter
-from fontTools.ttLib.tables.TupleVariation import \
- log, TupleVariation, compileSharedTuples, decompileSharedTuples, \
- compileTupleVariationStore, decompileTupleVariationStore, inferRegion_
+from fontTools.ttLib.tables.TupleVariation import (
+ log,
+ TupleVariation,
+ compileSharedTuples,
+ decompileSharedTuples,
+ compileTupleVariationStore,
+ decompileTupleVariationStore,
+ inferRegion_,
+)
from io import BytesIO
import random
import unittest
def hexencode(s):
- h = hexStr(s).upper()
- return ' '.join([h[i:i+2] for i in range(0, len(h), 2)])
+ h = hexStr(s).upper()
+ return " ".join([h[i : i + 2] for i in range(0, len(h), 2)])
AXES = {
- "wdth": (0.25, 0.375, 0.5),
- "wght": (0.0, 1.0, 1.0),
- "opsz": (-0.75, -0.75, 0.0)
+ "wdth": (0.25, 0.375, 0.5),
+ "wght": (0.0, 1.0, 1.0),
+ "opsz": (-0.75, -0.75, 0.0),
}
@@ -26,18 +32,19 @@ AXES = {
# in Apple's TrueType specification.
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html
SKIA_GVAR_SHARED_TUPLES_DATA = deHexStr(
- "40 00 00 00 C0 00 00 00 00 00 40 00 00 00 C0 00 "
- "C0 00 C0 00 40 00 C0 00 40 00 40 00 C0 00 40 00")
+ "40 00 00 00 C0 00 00 00 00 00 40 00 00 00 C0 00 "
+ "C0 00 C0 00 40 00 C0 00 40 00 40 00 C0 00 40 00"
+)
SKIA_GVAR_SHARED_TUPLES = [
- {"wght": 1.0, "wdth": 0.0},
- {"wght": -1.0, "wdth": 0.0},
- {"wght": 0.0, "wdth": 1.0},
- {"wght": 0.0, "wdth": -1.0},
- {"wght": -1.0, "wdth": -1.0},
- {"wght": 1.0, "wdth": -1.0},
- {"wght": 1.0, "wdth": 1.0},
- {"wght": -1.0, "wdth": 1.0}
+ {"wght": 1.0, "wdth": 0.0},
+ {"wght": -1.0, "wdth": 0.0},
+ {"wght": 0.0, "wdth": 1.0},
+ {"wght": 0.0, "wdth": -1.0},
+ {"wght": -1.0, "wdth": -1.0},
+ {"wght": 1.0, "wdth": -1.0},
+ {"wght": 1.0, "wdth": 1.0},
+ {"wght": -1.0, "wdth": 1.0},
]
@@ -47,831 +54,987 @@ SKIA_GVAR_SHARED_TUPLES = [
# we can parse the data as it appears in the specification.
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html
SKIA_GVAR_I_DATA = deHexStr(
- "00 08 00 24 00 33 20 00 00 15 20 01 00 1B 20 02 "
- "00 24 20 03 00 15 20 04 00 26 20 07 00 0D 20 06 "
- "00 1A 20 05 00 40 01 01 01 81 80 43 FF 7E FF 7E "
- "FF 7E FF 7E 00 81 45 01 01 01 03 01 04 01 04 01 "
- "04 01 02 80 40 00 82 81 81 04 3A 5A 3E 43 20 81 "
- "04 0E 40 15 45 7C 83 00 0D 9E F3 F2 F0 F0 F0 F0 "
- "F3 9E A0 A1 A1 A1 9F 80 00 91 81 91 00 0D 0A 0A "
- "09 0A 0A 0A 0A 0A 0A 0A 0A 0A 0A 0B 80 00 15 81 "
- "81 00 C4 89 00 C4 83 00 0D 80 99 98 96 96 96 96 "
- "99 80 82 83 83 83 81 80 40 FF 18 81 81 04 E6 F9 "
- "10 21 02 81 04 E8 E5 EB 4D DA 83 00 0D CE D3 D4 "
- "D3 D3 D3 D5 D2 CE CC CD CD CD CD 80 00 A1 81 91 "
- "00 0D 07 03 04 02 02 02 03 03 07 07 08 08 08 07 "
- "80 00 09 81 81 00 28 40 00 A4 02 24 24 66 81 04 "
- "08 FA FA FA 28 83 00 82 02 FF FF FF 83 02 01 01 "
- "01 84 91 00 80 06 07 08 08 08 08 0A 07 80 03 FE "
- "FF FF FF 81 00 08 81 82 02 EE EE EE 8B 6D 00")
+ "00 08 00 24 00 33 20 00 00 15 20 01 00 1B 20 02 "
+ "00 24 20 03 00 15 20 04 00 26 20 07 00 0D 20 06 "
+ "00 1A 20 05 00 40 01 01 01 81 80 43 FF 7E FF 7E "
+ "FF 7E FF 7E 00 81 45 01 01 01 03 01 04 01 04 01 "
+ "04 01 02 80 40 00 82 81 81 04 3A 5A 3E 43 20 81 "
+ "04 0E 40 15 45 7C 83 00 0D 9E F3 F2 F0 F0 F0 F0 "
+ "F3 9E A0 A1 A1 A1 9F 80 00 91 81 91 00 0D 0A 0A "
+ "09 0A 0A 0A 0A 0A 0A 0A 0A 0A 0A 0B 80 00 15 81 "
+ "81 00 C4 89 00 C4 83 00 0D 80 99 98 96 96 96 96 "
+ "99 80 82 83 83 83 81 80 40 FF 18 81 81 04 E6 F9 "
+ "10 21 02 81 04 E8 E5 EB 4D DA 83 00 0D CE D3 D4 "
+ "D3 D3 D3 D5 D2 CE CC CD CD CD CD 80 00 A1 81 91 "
+ "00 0D 07 03 04 02 02 02 03 03 07 07 08 08 08 07 "
+ "80 00 09 81 81 00 28 40 00 A4 02 24 24 66 81 04 "
+ "08 FA FA FA 28 83 00 82 02 FF FF FF 83 02 01 01 "
+ "01 84 91 00 80 06 07 08 08 08 08 0A 07 80 03 FE "
+ "FF FF FF 81 00 08 81 82 02 EE EE EE 8B 6D 00"
+)
class TupleVariationTest(unittest.TestCase):
- def __init__(self, methodName):
- unittest.TestCase.__init__(self, methodName)
- # Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
- # and fires deprecation warnings if a program uses the old name.
- if not hasattr(self, "assertRaisesRegex"):
- self.assertRaisesRegex = self.assertRaisesRegexp
-
- def test_equal(self):
- var1 = TupleVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)])
- var2 = TupleVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)])
- self.assertEqual(var1, var2)
-
- def test_equal_differentAxes(self):
- var1 = TupleVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)])
- var2 = TupleVariation({"wght":(0.7, 0.8, 0.9)}, [(0,0), (9,8), (7,6)])
- self.assertNotEqual(var1, var2)
-
- def test_equal_differentCoordinates(self):
- var1 = TupleVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)])
- var2 = TupleVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8)])
- self.assertNotEqual(var1, var2)
-
- def test_hasImpact_someDeltasNotZero(self):
- axes = {"wght":(0.0, 1.0, 1.0)}
- var = TupleVariation(axes, [(0,0), (9,8), (7,6)])
- self.assertTrue(var.hasImpact())
-
- def test_hasImpact_allDeltasZero(self):
- axes = {"wght":(0.0, 1.0, 1.0)}
- var = TupleVariation(axes, [(0,0), (0,0), (0,0)])
- self.assertTrue(var.hasImpact())
-
- def test_hasImpact_allDeltasNone(self):
- axes = {"wght":(0.0, 1.0, 1.0)}
- var = TupleVariation(axes, [None, None, None])
- self.assertFalse(var.hasImpact())
-
- def test_toXML_badDeltaFormat(self):
- writer = XMLWriter(BytesIO())
- g = TupleVariation(AXES, ["String"])
- with CapturingLogHandler(log, "ERROR") as captor:
- g.toXML(writer, ["wdth"])
- self.assertIn("bad delta format", [r.msg for r in captor.records])
- self.assertEqual([
- '<tuple>',
- '<coord axis="wdth" min="0.25" value="0.375" max="0.5"/>',
- '<!-- bad delta #0 -->',
- '</tuple>',
- ], TupleVariationTest.xml_lines(writer))
-
- def test_toXML_constants(self):
- writer = XMLWriter(BytesIO())
- g = TupleVariation(AXES, [42, None, 23, 0, -17, None])
- g.toXML(writer, ["wdth", "wght", "opsz"])
- self.assertEqual([
- '<tuple>',
- '<coord axis="wdth" min="0.25" value="0.375" max="0.5"/>',
- '<coord axis="wght" value="1.0"/>',
- '<coord axis="opsz" value="-0.75"/>',
- '<delta cvt="0" value="42"/>',
- '<delta cvt="2" value="23"/>',
- '<delta cvt="3" value="0"/>',
- '<delta cvt="4" value="-17"/>',
- '</tuple>'
- ], TupleVariationTest.xml_lines(writer))
-
- def test_toXML_points(self):
- writer = XMLWriter(BytesIO())
- g = TupleVariation(AXES, [(9,8), None, (7,6), (0,0), (-1,-2), None])
- g.toXML(writer, ["wdth", "wght", "opsz"])
- self.assertEqual([
- '<tuple>',
- '<coord axis="wdth" min="0.25" value="0.375" max="0.5"/>',
- '<coord axis="wght" value="1.0"/>',
- '<coord axis="opsz" value="-0.75"/>',
- '<delta pt="0" x="9" y="8"/>',
- '<delta pt="2" x="7" y="6"/>',
- '<delta pt="3" x="0" y="0"/>',
- '<delta pt="4" x="-1" y="-2"/>',
- '</tuple>'
- ], TupleVariationTest.xml_lines(writer))
-
- def test_toXML_allDeltasNone(self):
- writer = XMLWriter(BytesIO())
- axes = {"wght":(0.0, 1.0, 1.0)}
- g = TupleVariation(axes, [None] * 5)
- g.toXML(writer, ["wght", "wdth"])
- self.assertEqual([
- '<tuple>',
- '<coord axis="wght" value="1.0"/>',
- '<!-- no deltas -->',
- '</tuple>'
- ], TupleVariationTest.xml_lines(writer))
-
- def test_toXML_axes_floats(self):
- writer = XMLWriter(BytesIO())
- axes = {
- "wght": (0.0, 0.2999878, 0.7000122),
- "wdth": (0.0, 0.4000244, 0.4000244),
- }
- g = TupleVariation(axes, [None] * 5)
- g.toXML(writer, ["wght", "wdth"])
- self.assertEqual(
- [
- '<coord axis="wght" min="0.0" value="0.3" max="0.7"/>',
- '<coord axis="wdth" value="0.4"/>',
- ],
- TupleVariationTest.xml_lines(writer)[1:3]
- )
-
- def test_fromXML_badDeltaFormat(self):
- g = TupleVariation({}, [])
- with CapturingLogHandler(log, "WARNING") as captor:
- for name, attrs, content in parseXML('<delta a="1" b="2"/>'):
- g.fromXML(name, attrs, content)
- self.assertIn("bad delta format: a, b",
- [r.msg for r in captor.records])
-
- def test_fromXML_constants(self):
- g = TupleVariation({}, [None] * 4)
- for name, attrs, content in parseXML(
- '<coord axis="wdth" min="0.25" value="0.375" max="0.5"/>'
- '<coord axis="wght" value="1.0"/>'
- '<coord axis="opsz" value="-0.75"/>'
- '<delta cvt="1" value="42"/>'
- '<delta cvt="2" value="-23"/>'):
- g.fromXML(name, attrs, content)
- self.assertEqual(AXES, g.axes)
- self.assertEqual([None, 42, -23, None], g.coordinates)
-
- def test_fromXML_points(self):
- g = TupleVariation({}, [None] * 4)
- for name, attrs, content in parseXML(
- '<coord axis="wdth" min="0.25" value="0.375" max="0.5"/>'
- '<coord axis="wght" value="1.0"/>'
- '<coord axis="opsz" value="-0.75"/>'
- '<delta pt="1" x="33" y="44"/>'
- '<delta pt="2" x="-2" y="170"/>'):
- g.fromXML(name, attrs, content)
- self.assertEqual(AXES, g.axes)
- self.assertEqual([None, (33, 44), (-2, 170), None], g.coordinates)
-
- def test_fromXML_axes_floats(self):
- g = TupleVariation({}, [None] * 4)
- for name, attrs, content in parseXML(
- '<coord axis="wght" min="0.0" value="0.3" max="0.7"/>'
- '<coord axis="wdth" value="0.4"/>'
- ):
- g.fromXML(name, attrs, content)
-
- self.assertEqual(g.axes["wght"][0], 0)
- self.assertAlmostEqual(g.axes["wght"][1], 0.2999878)
- self.assertAlmostEqual(g.axes["wght"][2], 0.7000122)
-
- self.assertEqual(g.axes["wdth"][0], 0)
- self.assertAlmostEqual(g.axes["wdth"][1], 0.4000244)
- self.assertAlmostEqual(g.axes["wdth"][2], 0.4000244)
-
- def test_compile_sharedPeaks_nonIntermediate_sharedPoints(self):
- var = TupleVariation(
- {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)},
- [(7,4), (8,5), (9,6)])
- axisTags = ["wght", "wdth"]
- sharedPeakIndices = { var.compileCoord(axisTags): 0x77 }
- tup, deltas = var.compile(axisTags, sharedPeakIndices, pointData=b'')
- # len(deltas)=8; flags=None; tupleIndex=0x77
- # embeddedPeaks=[]; intermediateCoord=[]
- self.assertEqual("00 08 00 77", hexencode(tup))
- self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9]
- "02 04 05 06", # deltaY: [4, 5, 6]
- hexencode(deltas))
-
- def test_compile_sharedPeaks_intermediate_sharedPoints(self):
- var = TupleVariation(
- {"wght": (0.3, 0.5, 0.7), "wdth": (0.1, 0.8, 0.9)},
- [(7,4), (8,5), (9,6)])
- axisTags = ["wght", "wdth"]
- sharedPeakIndices = { var.compileCoord(axisTags): 0x77 }
- tup, deltas = var.compile(axisTags, sharedPeakIndices, pointData=b'')
- # len(deltas)=8; flags=INTERMEDIATE_REGION; tupleIndex=0x77
- # embeddedPeak=[]; intermediateCoord=[(0.3, 0.1), (0.7, 0.9)]
- self.assertEqual("00 08 40 77 13 33 06 66 2C CD 39 9A", hexencode(tup))
- self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9]
- "02 04 05 06", # deltaY: [4, 5, 6]
- hexencode(deltas))
-
- def test_compile_sharedPeaks_nonIntermediate_privatePoints(self):
- var = TupleVariation(
- {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)},
- [(7,4), (8,5), (9,6)])
- axisTags = ["wght", "wdth"]
- sharedPeakIndices = { var.compileCoord(axisTags): 0x77 }
- tup, deltas = var.compile(axisTags, sharedPeakIndices)
- # len(deltas)=9; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77
- # embeddedPeak=[]; intermediateCoord=[]
- self.assertEqual("00 09 20 77", hexencode(tup))
- self.assertEqual("00 " # all points in glyph
- "02 07 08 09 " # deltaX: [7, 8, 9]
- "02 04 05 06", # deltaY: [4, 5, 6]
- hexencode(deltas))
-
- def test_compile_sharedPeaks_intermediate_privatePoints(self):
- var = TupleVariation(
- {"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 1.0)},
- [(7,4), (8,5), (9,6)])
- axisTags = ["wght", "wdth"]
- sharedPeakIndices = { var.compileCoord(axisTags): 0x77 }
- tuple, deltas = var.compile(axisTags, sharedPeakIndices)
- # len(deltas)=9; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77
- # embeddedPeak=[]; intermediateCoord=[(0.0, 0.0), (1.0, 1.0)]
- self.assertEqual("00 09 60 77 00 00 00 00 40 00 40 00",
- hexencode(tuple))
- self.assertEqual("00 " # all points in glyph
- "02 07 08 09 " # deltaX: [7, 8, 9]
- "02 04 05 06", # deltaY: [4, 5, 6]
- hexencode(deltas))
-
- def test_compile_embeddedPeak_nonIntermediate_sharedPoints(self):
- var = TupleVariation(
- {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)},
- [(7,4), (8,5), (9,6)])
- tup, deltas = var.compile(axisTags=["wght", "wdth"], pointData=b'')
- # len(deltas)=8; flags=EMBEDDED_PEAK_TUPLE
- # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[]
- self.assertEqual("00 08 80 00 20 00 33 33", hexencode(tup))
- self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9]
- "02 04 05 06", # deltaY: [4, 5, 6]
- hexencode(deltas))
-
- def test_compile_embeddedPeak_nonIntermediate_sharedConstants(self):
- var = TupleVariation(
- {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)},
- [3, 1, 4])
- tup, deltas = var.compile(axisTags=["wght", "wdth"], pointData=b'')
- # len(deltas)=4; flags=EMBEDDED_PEAK_TUPLE
- # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[]
- self.assertEqual("00 04 80 00 20 00 33 33", hexencode(tup))
- self.assertEqual("02 03 01 04", # delta: [3, 1, 4]
- hexencode(deltas))
-
- def test_compile_embeddedPeak_intermediate_sharedPoints(self):
- var = TupleVariation(
- {"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 0.8)},
- [(7,4), (8,5), (9,6)])
- tup, deltas = var.compile(axisTags=["wght", "wdth"], pointData=b'')
- # len(deltas)=8; flags=EMBEDDED_PEAK_TUPLE
- # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[(0.0, 0.0), (1.0, 0.8)]
- self.assertEqual("00 08 C0 00 20 00 33 33 00 00 00 00 40 00 33 33",
- hexencode(tup))
- self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9]
- "02 04 05 06", # deltaY: [4, 5, 6]
- hexencode(deltas))
-
- def test_compile_embeddedPeak_nonIntermediate_privatePoints(self):
- var = TupleVariation(
- {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)},
- [(7,4), (8,5), (9,6)])
- tup, deltas = var.compile(axisTags=["wght", "wdth"])
- # len(deltas)=9; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_PEAK_TUPLE
- # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[]
- self.assertEqual("00 09 A0 00 20 00 33 33", hexencode(tup))
- self.assertEqual("00 " # all points in glyph
- "02 07 08 09 " # deltaX: [7, 8, 9]
- "02 04 05 06", # deltaY: [4, 5, 6]
- hexencode(deltas))
-
- def test_compile_embeddedPeak_nonIntermediate_privateConstants(self):
- var = TupleVariation(
- {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)},
- [7, 8, 9])
- tup, deltas = var.compile(axisTags=["wght", "wdth"])
- # len(deltas)=5; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_PEAK_TUPLE
- # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[]
- self.assertEqual("00 05 A0 00 20 00 33 33", hexencode(tup))
- self.assertEqual("00 " # all points in glyph
- "02 07 08 09", # delta: [7, 8, 9]
- hexencode(deltas))
-
- def test_compile_embeddedPeak_intermediate_privatePoints(self):
- var = TupleVariation(
- {"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)},
- [(7,4), (8,5), (9,6)])
- tup, deltas = var.compile(axisTags = ["wght", "wdth"])
- # len(deltas)=9;
- # flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_REGION|EMBEDDED_PEAK_TUPLE
- # embeddedPeak=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)]
- self.assertEqual("00 09 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A",
- hexencode(tup))
- self.assertEqual("00 " # all points in glyph
- "02 07 08 09 " # deltaX: [7, 8, 9]
- "02 04 05 06", # deltaY: [4, 5, 6]
- hexencode(deltas))
-
- def test_compile_embeddedPeak_intermediate_privateConstants(self):
- var = TupleVariation(
- {"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)},
- [7, 8, 9])
- tup, deltas = var.compile(axisTags = ["wght", "wdth"])
- # len(deltas)=5;
- # flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_REGION|EMBEDDED_PEAK_TUPLE
- # embeddedPeak=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)]
- self.assertEqual("00 05 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A",
- hexencode(tup))
- self.assertEqual("00 " # all points in glyph
- "02 07 08 09", # delta: [7, 8, 9]
- hexencode(deltas))
-
- def test_compileCoord(self):
- var = TupleVariation({"wght": (-1.0, -1.0, -1.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4)
- self.assertEqual("C0 00 20 00", hexencode(var.compileCoord(["wght", "wdth"])))
- self.assertEqual("20 00 C0 00", hexencode(var.compileCoord(["wdth", "wght"])))
- self.assertEqual("C0 00", hexencode(var.compileCoord(["wght"])))
-
- def test_compileIntermediateCoord(self):
- var = TupleVariation({"wght": (-1.0, -1.0, 0.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4)
- self.assertEqual("C0 00 19 9A 00 00 26 66", hexencode(var.compileIntermediateCoord(["wght", "wdth"])))
- self.assertEqual("19 9A C0 00 26 66 00 00", hexencode(var.compileIntermediateCoord(["wdth", "wght"])))
- self.assertEqual(None, var.compileIntermediateCoord(["wght"]))
- self.assertEqual("19 9A 26 66", hexencode(var.compileIntermediateCoord(["wdth"])))
-
- def test_decompileCoord(self):
- decompileCoord = TupleVariation.decompileCoord_
- data = deHexStr("DE AD C0 00 20 00 DE AD")
- self.assertEqual(({"wght": -1.0, "wdth": 0.5}, 6), decompileCoord(["wght", "wdth"], data, 2))
-
- def test_decompileCoord_roundTrip(self):
- # Make sure we are not affected by https://github.com/fonttools/fonttools/issues/286
- data = deHexStr("7F B9 80 35")
- values, _ = TupleVariation.decompileCoord_(["wght", "wdth"], data, 0)
- axisValues = {axis:(val, val, val) for axis, val in values.items()}
- var = TupleVariation(axisValues, [None] * 4)
- self.assertEqual("7F B9 80 35", hexencode(var.compileCoord(["wght", "wdth"])))
-
- def test_compilePoints(self):
- compilePoints = lambda p: TupleVariation.compilePoints(set(p))
- self.assertEqual("00", hexencode(compilePoints(set()))) # all points in glyph
- self.assertEqual("01 00 07", hexencode(compilePoints([7])))
- self.assertEqual("01 80 FF FF", hexencode(compilePoints([65535])))
- self.assertEqual("02 01 09 06", hexencode(compilePoints([9, 15])))
- self.assertEqual("06 05 07 01 F7 02 01 F2", hexencode(compilePoints([7, 8, 255, 257, 258, 500])))
- self.assertEqual("03 01 07 01 80 01 EC", hexencode(compilePoints([7, 8, 500])))
- self.assertEqual("04 01 07 01 81 BE E7 0C 0F", hexencode(compilePoints([7, 8, 0xBEEF, 0xCAFE])))
- self.maxDiff = None
- self.assertEqual("81 2C" + # 300 points (0x12c) in total
- " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127]
- " 7F" + (128 * " 01") + # second run, contains 128 points: [128 .. 255]
- " 2B" + (44 * " 01"), # third run, contains 44 points: [256 .. 299]
- hexencode(compilePoints(range(300))))
- self.assertEqual("81 8F" + # 399 points (0x18f) in total
- " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127]
- " 7F" + (128 * " 01") + # second run, contains 128 points: [128 .. 255]
- " 7F" + (128 * " 01") + # third run, contains 128 points: [256 .. 383]
- " 0E" + (15 * " 01"), # fourth run, contains 15 points: [384 .. 398]
- hexencode(compilePoints(range(399))))
-
- def test_decompilePoints(self):
- numPointsInGlyph = 65536
- allPoints = list(range(numPointsInGlyph))
- def decompilePoints(data, offset):
- points, offset = TupleVariation.decompilePoints_(numPointsInGlyph, deHexStr(data), offset, "gvar")
- # Conversion to list needed for Python 3.
- return (list(points), offset)
- # all points in glyph
- self.assertEqual((allPoints, 1), decompilePoints("00", 0))
- # all points in glyph (in overly verbose encoding, not explicitly prohibited by spec)
- self.assertEqual((allPoints, 2), decompilePoints("80 00", 0))
- # 2 points; first run: [9, 9+6]
- self.assertEqual(([9, 15], 4), decompilePoints("02 01 09 06", 0))
- # 2 points; first run: [0xBEEF, 0xCAFE]. (0x0C0F = 0xCAFE - 0xBEEF)
- self.assertEqual(([0xBEEF, 0xCAFE], 6), decompilePoints("02 81 BE EF 0C 0F", 0))
- # 1 point; first run: [7]
- self.assertEqual(([7], 3), decompilePoints("01 00 07", 0))
- # 1 point; first run: [7] in overly verbose encoding
- self.assertEqual(([7], 4), decompilePoints("01 80 00 07", 0))
- # 1 point; first run: [65535]; requires words to be treated as unsigned numbers
- self.assertEqual(([65535], 4), decompilePoints("01 80 FF FF", 0))
- # 4 points; first run: [7, 8]; second run: [255, 257]. 257 is stored in delta-encoded bytes (0xFF + 2).
- self.assertEqual(([7, 8, 263, 265], 7), decompilePoints("04 01 07 01 01 FF 02", 0))
- # combination of all encodings, preceded and followed by 4 bytes of unused data
- data = "DE AD DE AD 04 01 07 01 81 BE E7 0C 0F DE AD DE AD"
- self.assertEqual(([7, 8, 0xBEEF, 0xCAFE], 13), decompilePoints(data, 4))
- self.assertSetEqual(set(range(300)), set(decompilePoints(
- "81 2C" + # 300 points (0x12c) in total
- " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127]
- " 7F" + (128 * " 01") + # second run, contains 128 points: [128 .. 255]
- " AB" + (44 * " 00 01"), # third run, contains 44 points: [256 .. 299]
- 0)[0]))
- self.assertSetEqual(set(range(399)), set(decompilePoints(
- "81 8F" + # 399 points (0x18f) in total
- " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127]
- " 7F" + (128 * " 01") + # second run, contains 128 points: [128 .. 255]
- " FF" + (128 * " 00 01") + # third run, contains 128 points: [256 .. 383]
- " 8E" + (15 * " 00 01"), # fourth run, contains 15 points: [384 .. 398]
- 0)[0]))
-
- def test_decompilePoints_shouldAcceptBadPointNumbers(self):
- decompilePoints = TupleVariation.decompilePoints_
- # 2 points; first run: [3, 9].
- numPointsInGlyph = 8
- with CapturingLogHandler(log, "WARNING") as captor:
- decompilePoints(numPointsInGlyph,
- deHexStr("02 01 03 06"), 0, "cvar")
- self.assertIn("point 9 out of range in 'cvar' table",
- [r.msg for r in captor.records])
-
- def test_decompilePoints_roundTrip(self):
- numPointsInGlyph = 500 # greater than 255, so we also exercise code path for 16-bit encoding
- compile = lambda points: TupleVariation.compilePoints(points)
- decompile = lambda data: set(TupleVariation.decompilePoints_(numPointsInGlyph, data, 0, "gvar")[0])
- for i in range(50):
- points = set(random.sample(range(numPointsInGlyph), 30))
- self.assertSetEqual(points, decompile(compile(points)),
- "failed round-trip decompile/compilePoints; points=%s" % points)
- allPoints = set(range(numPointsInGlyph))
- self.assertSetEqual(allPoints, decompile(compile(allPoints)))
- self.assertSetEqual(allPoints, decompile(compile(set())))
-
- def test_compileDeltas_points(self):
- var = TupleVariation({}, [None, (1, 0), (2, 0), None, (4, 0), None])
- # deltaX for points: [1, 2, 4]; deltaY for points: [0, 0, 0]
- self.assertEqual("02 01 02 04 82", hexencode(var.compileDeltas()))
-
- def test_compileDeltas_constants(self):
- var = TupleVariation({}, [None, 1, 2, None, 4, None])
- # delta for cvts: [1, 2, 4]
- self.assertEqual("02 01 02 04", hexencode(var.compileDeltas()))
-
- def test_compileDeltaValues(self):
- compileDeltaValues = lambda values: hexencode(TupleVariation.compileDeltaValues_(values))
- # zeroes
- self.assertEqual("80", compileDeltaValues([0]))
- self.assertEqual("BF", compileDeltaValues([0] * 64))
- self.assertEqual("BF 80", compileDeltaValues([0] * 65))
- self.assertEqual("BF A3", compileDeltaValues([0] * 100))
- self.assertEqual("BF BF BF BF", compileDeltaValues([0] * 256))
- # bytes
- self.assertEqual("00 01", compileDeltaValues([1]))
- self.assertEqual("06 01 02 03 7F 80 FF FE", compileDeltaValues([1, 2, 3, 127, -128, -1, -2]))
- self.assertEqual("3F" + (64 * " 7F"), compileDeltaValues([127] * 64))
- self.assertEqual("3F" + (64 * " 7F") + " 00 7F", compileDeltaValues([127] * 65))
- # words
- self.assertEqual("40 66 66", compileDeltaValues([0x6666]))
- self.assertEqual("43 66 66 7F FF FF FF 80 00", compileDeltaValues([0x6666, 32767, -1, -32768]))
- self.assertEqual("7F" + (64 * " 11 22"), compileDeltaValues([0x1122] * 64))
- self.assertEqual("7F" + (64 * " 11 22") + " 40 11 22", compileDeltaValues([0x1122] * 65))
- # bytes, zeroes, bytes: a single zero is more compact when encoded as part of the bytes run
- self.assertEqual("04 7F 7F 00 7F 7F", compileDeltaValues([127, 127, 0, 127, 127]))
- self.assertEqual("01 7F 7F 81 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 127, 127]))
- self.assertEqual("01 7F 7F 82 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 127, 127]))
- self.assertEqual("01 7F 7F 83 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 0, 127, 127]))
- # bytes, zeroes
- self.assertEqual("01 01 00", compileDeltaValues([1, 0]))
- self.assertEqual("00 01 81", compileDeltaValues([1, 0, 0]))
- # words, bytes, words: a single byte is more compact when encoded as part of the words run
- self.assertEqual("42 66 66 00 02 77 77", compileDeltaValues([0x6666, 2, 0x7777]))
- self.assertEqual("40 66 66 01 02 02 40 77 77", compileDeltaValues([0x6666, 2, 2, 0x7777]))
- # words, zeroes, words
- self.assertEqual("40 66 66 80 40 77 77", compileDeltaValues([0x6666, 0, 0x7777]))
- self.assertEqual("40 66 66 81 40 77 77", compileDeltaValues([0x6666, 0, 0, 0x7777]))
- self.assertEqual("40 66 66 82 40 77 77", compileDeltaValues([0x6666, 0, 0, 0, 0x7777]))
- # words, zeroes, bytes
- self.assertEqual("40 66 66 80 02 01 02 03", compileDeltaValues([0x6666, 0, 1, 2, 3]))
- self.assertEqual("40 66 66 81 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 1, 2, 3]))
- self.assertEqual("40 66 66 82 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 0, 1, 2, 3]))
- # words, zeroes
- self.assertEqual("40 66 66 80", compileDeltaValues([0x6666, 0]))
- self.assertEqual("40 66 66 81", compileDeltaValues([0x6666, 0, 0]))
-
- def test_decompileDeltas(self):
- decompileDeltas = TupleVariation.decompileDeltas_
- # 83 = zero values (0x80), count = 4 (1 + 0x83 & 0x3F)
- self.assertEqual(([0, 0, 0, 0], 1), decompileDeltas(4, deHexStr("83"), 0))
- # 41 01 02 FF FF = signed 16-bit values (0x40), count = 2 (1 + 0x41 & 0x3F)
- self.assertEqual(([258, -1], 5), decompileDeltas(2, deHexStr("41 01 02 FF FF"), 0))
- # 01 81 07 = signed 8-bit values, count = 2 (1 + 0x01 & 0x3F)
- self.assertEqual(([-127, 7], 3), decompileDeltas(2, deHexStr("01 81 07"), 0))
- # combination of all three encodings, preceded and followed by 4 bytes of unused data
- data = deHexStr("DE AD BE EF 83 40 01 02 01 81 80 DE AD BE EF")
- self.assertEqual(([0, 0, 0, 0, 258, -127, -128], 11), decompileDeltas(7, data, 4))
-
- def test_decompileDeltas_roundTrip(self):
- numDeltas = 30
- compile = TupleVariation.compileDeltaValues_
- decompile = lambda data: TupleVariation.decompileDeltas_(numDeltas, data, 0)[0]
- for i in range(50):
- deltas = random.sample(range(-128, 127), 10)
- deltas.extend(random.sample(range(-32768, 32767), 10))
- deltas.extend([0] * 10)
- random.shuffle(deltas)
- self.assertListEqual(deltas, decompile(compile(deltas)))
-
- def test_compileSharedTuples(self):
- # Below, the peak coordinate {"wght": 1.0, "wdth": 0.8} appears
- # three times (most frequent sorted first); {"wght": 1.0, "wdth": 0.5}
- # and {"wght": 1.0, "wdth": 0.7} both appears two times (tie) and
- # are sorted alphanumerically to ensure determinism.
- # The peak coordinate {"wght": 1.0, "wdth": 0.9} appears only once
- # and is thus ignored.
- # Because the start and end of variation ranges is not encoded
- # into the shared pool, they should get ignored.
- deltas = [None] * 4
- variations = [
- TupleVariation({
- "wght": (1.0, 1.0, 1.0),
- "wdth": (0.5, 0.7, 1.0)
- }, deltas),
- TupleVariation({
- "wght": (1.0, 1.0, 1.0),
- "wdth": (0.2, 0.7, 1.0)
- }, deltas),
- TupleVariation({
- "wght": (1.0, 1.0, 1.0),
- "wdth": (0.2, 0.8, 1.0)
- }, deltas),
- TupleVariation({
- "wght": (1.0, 1.0, 1.0),
- "wdth": (0.3, 0.5, 1.0)
- }, deltas),
- TupleVariation({
- "wght": (1.0, 1.0, 1.0),
- "wdth": (0.3, 0.8, 1.0)
- }, deltas),
- TupleVariation({
- "wght": (1.0, 1.0, 1.0),
- "wdth": (0.3, 0.9, 1.0)
- }, deltas),
- TupleVariation({
- "wght": (1.0, 1.0, 1.0),
- "wdth": (0.4, 0.8, 1.0)
- }, deltas),
- TupleVariation({
- "wght": (1.0, 1.0, 1.0),
- "wdth": (0.5, 0.5, 1.0)
- }, deltas),
- ]
- result = compileSharedTuples(["wght", "wdth"], variations)
- self.assertEqual([hexencode(c) for c in result],
- ["40 00 33 33", "40 00 20 00", "40 00 2C CD"])
-
- def test_decompileSharedTuples_Skia(self):
- sharedTuples = decompileSharedTuples(
- axisTags=["wght", "wdth"], sharedTupleCount=8,
- data=SKIA_GVAR_SHARED_TUPLES_DATA, offset=0)
- self.assertEqual(sharedTuples, SKIA_GVAR_SHARED_TUPLES)
-
- def test_decompileSharedTuples_empty(self):
- self.assertEqual(decompileSharedTuples(["wght"], 0, b"", 0), [])
-
- def test_compileTupleVariationStore_allVariationsRedundant(self):
- axes = {"wght": (0.3, 0.4, 0.5), "opsz": (0.7, 0.8, 0.9)}
- variations = [
- TupleVariation(axes, [None] * 4),
- TupleVariation(axes, [None] * 4),
- TupleVariation(axes, [None] * 4)
- ]
- self.assertEqual(
- compileTupleVariationStore(variations, pointCount=8,
- axisTags=["wght", "opsz"],
- sharedTupleIndices={}),
- (0, b"", b""))
-
- def test_compileTupleVariationStore_noVariations(self):
- self.assertEqual(
- compileTupleVariationStore(variations=[], pointCount=8,
- axisTags=["wght", "opsz"],
- sharedTupleIndices={}),
- (0, b"", b""))
-
- def test_compileTupleVariationStore_roundTrip_cvar(self):
- deltas = [1, 2, 3, 4]
- variations = [
- TupleVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)},
- deltas),
- TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)},
- deltas)
- ]
- tupleVariationCount, tuples, data = compileTupleVariationStore(
- variations, pointCount=4, axisTags=["wght", "wdth"],
- sharedTupleIndices={})
- self.assertEqual(
- decompileTupleVariationStore("cvar", ["wght", "wdth"],
- tupleVariationCount, pointCount=4,
- sharedTuples={}, data=(tuples + data),
- pos=0, dataPos=len(tuples)),
- variations)
-
- def test_compileTupleVariationStore_roundTrip_gvar(self):
- deltas = [(1,1), (2,2), (3,3), (4,4)]
- variations = [
- TupleVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)},
- deltas),
- TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)},
- deltas)
- ]
- tupleVariationCount, tuples, data = compileTupleVariationStore(
- variations, pointCount=4, axisTags=["wght", "wdth"],
- sharedTupleIndices={})
- self.assertEqual(
- decompileTupleVariationStore("gvar", ["wght", "wdth"],
- tupleVariationCount, pointCount=4,
- sharedTuples={}, data=(tuples + data),
- pos=0, dataPos=len(tuples)),
- variations)
-
- def test_decompileTupleVariationStore_Skia_I(self):
- tvar = decompileTupleVariationStore(
- tableTag="gvar", axisTags=["wght", "wdth"],
- tupleVariationCount=8, pointCount=18,
- sharedTuples=SKIA_GVAR_SHARED_TUPLES,
- data=SKIA_GVAR_I_DATA, pos=4, dataPos=36)
- self.assertEqual(len(tvar), 8)
- self.assertEqual(tvar[0].axes, {"wght": (0.0, 1.0, 1.0)})
- self.assertEqual(
- " ".join(["%d,%d" % c for c in tvar[0].coordinates]),
- "257,0 -127,0 -128,58 -130,90 -130,62 -130,67 -130,32 -127,0 "
- "257,0 259,14 260,64 260,21 260,69 258,124 0,0 130,0 0,0 0,0")
-
- def test_decompileTupleVariationStore_empty(self):
- self.assertEqual(
- decompileTupleVariationStore(tableTag="gvar", axisTags=[],
- tupleVariationCount=0, pointCount=5,
- sharedTuples=[],
- data=b"", pos=4, dataPos=4),
- [])
-
- def test_getTupleSize(self):
- getTupleSize = TupleVariation.getTupleSize_
- numAxes = 3
- self.assertEqual(4 + numAxes * 2, getTupleSize(0x8042, numAxes))
- self.assertEqual(4 + numAxes * 4, getTupleSize(0x4077, numAxes))
- self.assertEqual(4, getTupleSize(0x2077, numAxes))
- self.assertEqual(4, getTupleSize(11, numAxes))
-
- def test_inferRegion(self):
- start, end = inferRegion_({"wght": -0.3, "wdth": 0.7})
- self.assertEqual(start, {"wght": -0.3, "wdth": 0.0})
- self.assertEqual(end, {"wght": 0.0, "wdth": 0.7})
-
- @staticmethod
- def xml_lines(writer):
- content = writer.file.getvalue().decode("utf-8")
- return [line.strip() for line in content.splitlines()][1:]
-
- def test_getCoordWidth(self):
- empty = TupleVariation({}, [])
- self.assertEqual(empty.getCoordWidth(), 0)
-
- empty = TupleVariation({}, [None])
- self.assertEqual(empty.getCoordWidth(), 0)
-
- gvarTuple = TupleVariation({}, [None, (0, 0)])
- self.assertEqual(gvarTuple.getCoordWidth(), 2)
-
- cvarTuple = TupleVariation({}, [None, 0])
- self.assertEqual(cvarTuple.getCoordWidth(), 1)
-
- cvarTuple.coordinates[1] *= 1.0
- self.assertEqual(cvarTuple.getCoordWidth(), 1)
-
- with self.assertRaises(TypeError):
- TupleVariation({}, [None, "a"]).getCoordWidth()
-
- def test_scaleDeltas_cvar(self):
- var = TupleVariation({}, [100, None])
-
- var.scaleDeltas(1.0)
- self.assertEqual(var.coordinates, [100, None])
-
- var.scaleDeltas(0.333)
- self.assertAlmostEqual(var.coordinates[0], 33.3)
- self.assertIsNone(var.coordinates[1])
-
- var.scaleDeltas(0.0)
- self.assertEqual(var.coordinates, [0, None])
-
- def test_scaleDeltas_gvar(self):
- var = TupleVariation({}, [(100, 200), None])
-
- var.scaleDeltas(1.0)
- self.assertEqual(var.coordinates, [(100, 200), None])
-
- var.scaleDeltas(0.333)
- self.assertAlmostEqual(var.coordinates[0][0], 33.3)
- self.assertAlmostEqual(var.coordinates[0][1], 66.6)
- self.assertIsNone(var.coordinates[1])
-
- var.scaleDeltas(0.0)
- self.assertEqual(var.coordinates, [(0, 0), None])
-
- def test_roundDeltas_cvar(self):
- var = TupleVariation({}, [55.5, None, 99.9])
- var.roundDeltas()
- self.assertEqual(var.coordinates, [56, None, 100])
-
- def test_roundDeltas_gvar(self):
- var = TupleVariation({}, [(55.5, 100.0), None, (99.9, 100.0)])
- var.roundDeltas()
- self.assertEqual(var.coordinates, [(56, 100), None, (100, 100)])
-
- def test_calcInferredDeltas(self):
- var = TupleVariation({}, [(0, 0), None, None, None])
- coords = [(1, 1), (1, 1), (1, 1), (1, 1)]
-
- var.calcInferredDeltas(coords, [])
-
- self.assertEqual(
- var.coordinates,
- [(0, 0), (0, 0), (0, 0), (0, 0)]
- )
-
- def test_calcInferredDeltas_invalid(self):
- # cvar tuples can't have inferred deltas
- with self.assertRaises(TypeError):
- TupleVariation({}, [0]).calcInferredDeltas([], [])
-
- # origCoords must have same length as self.coordinates
- with self.assertRaises(ValueError):
- TupleVariation({}, [(0, 0), None]).calcInferredDeltas([], [])
-
- # at least 4 phantom points required
- with self.assertRaises(AssertionError):
- TupleVariation({}, [(0, 0), None]).calcInferredDeltas([(0, 0), (0, 0)], [])
-
- with self.assertRaises(AssertionError):
- TupleVariation({}, [(0, 0)] + [None]*5).calcInferredDeltas(
- [(0, 0)]*6,
- [1, 0] # endPts not in increasing order
- )
-
- def test_optimize(self):
- var = TupleVariation({"wght": (0.0, 1.0, 1.0)}, [(0, 0)]*5)
-
- var.optimize([(0, 0)]*5, [0])
-
- self.assertEqual(var.coordinates, [None, None, None, None, None])
-
- def test_optimize_isComposite(self):
- # when a composite glyph's deltas are all (0, 0), we still want
- # to write out an entry in gvar, else macOS doesn't apply any
- # variations to the composite glyph (even if its individual components
- # do vary).
- # https://github.com/fonttools/fonttools/issues/1381
- var = TupleVariation({"wght": (0.0, 1.0, 1.0)}, [(0, 0)]*5)
- var.optimize([(0, 0)]*5, [0], isComposite=True)
- self.assertEqual(var.coordinates, [(0, 0)]*5)
-
- # it takes more than 128 (0, 0) deltas before the optimized tuple with
- # (None) inferred deltas (except for the first) becomes smaller than
- # the un-optimized one that has all deltas explicitly set to (0, 0).
- var = TupleVariation({"wght": (0.0, 1.0, 1.0)}, [(0, 0)]*129)
- var.optimize([(0, 0)]*129, list(range(129-4)), isComposite=True)
- self.assertEqual(var.coordinates, [(0, 0)] + [None]*128)
-
- def test_sum_deltas_gvar(self):
- var1 = TupleVariation(
- {},
- [
- (-20, 0), (-20, 0), (20, 0), (20, 0),
- (0, 0), (0, 0), (0, 0), (0, 0),
- ]
- )
- var2 = TupleVariation(
- {},
- [
- (-10, 0), (-10, 0), (10, 0), (10, 0),
- (0, 0), (20, 0), (0, 0), (0, 0),
- ]
- )
-
- var1 += var2
-
- self.assertEqual(
- var1.coordinates,
- [
- (-30, 0), (-30, 0), (30, 0), (30, 0),
- (0, 0), (20, 0), (0, 0), (0, 0),
- ]
- )
-
- def test_sum_deltas_gvar_invalid_length(self):
- var1 = TupleVariation({}, [(1, 2)])
- var2 = TupleVariation({}, [(1, 2), (3, 4)])
-
- with self.assertRaisesRegex(ValueError, "deltas with different lengths"):
- var1 += var2
-
- def test_sum_deltas_gvar_with_inferred_points(self):
- var1 = TupleVariation({}, [(1, 2), None])
- var2 = TupleVariation({}, [(2, 3), None])
-
- with self.assertRaisesRegex(ValueError, "deltas with inferred points"):
- var1 += var2
-
- def test_sum_deltas_cvar(self):
- axes = {"wght": (0.0, 1.0, 1.0)}
- var1 = TupleVariation(axes, [0, 1, None, None])
- var2 = TupleVariation(axes, [None, 2, None, 3])
- var3 = TupleVariation(axes, [None, None, None, 4])
-
- var1 += var2
- var1 += var3
-
- self.assertEqual(var1.coordinates, [0, 3, None, 7])
+ def __init__(self, methodName):
+ unittest.TestCase.__init__(self, methodName)
+ # Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
+ # and fires deprecation warnings if a program uses the old name.
+ if not hasattr(self, "assertRaisesRegex"):
+ self.assertRaisesRegex = self.assertRaisesRegexp
+
+ def test_equal(self):
+ var1 = TupleVariation({"wght": (0.0, 1.0, 1.0)}, [(0, 0), (9, 8), (7, 6)])
+ var2 = TupleVariation({"wght": (0.0, 1.0, 1.0)}, [(0, 0), (9, 8), (7, 6)])
+ self.assertEqual(var1, var2)
+
+ def test_equal_differentAxes(self):
+ var1 = TupleVariation({"wght": (0.0, 1.0, 1.0)}, [(0, 0), (9, 8), (7, 6)])
+ var2 = TupleVariation({"wght": (0.7, 0.8, 0.9)}, [(0, 0), (9, 8), (7, 6)])
+ self.assertNotEqual(var1, var2)
+
+ def test_equal_differentCoordinates(self):
+ var1 = TupleVariation({"wght": (0.0, 1.0, 1.0)}, [(0, 0), (9, 8), (7, 6)])
+ var2 = TupleVariation({"wght": (0.0, 1.0, 1.0)}, [(0, 0), (9, 8)])
+ self.assertNotEqual(var1, var2)
+
+ def test_hasImpact_someDeltasNotZero(self):
+ axes = {"wght": (0.0, 1.0, 1.0)}
+ var = TupleVariation(axes, [(0, 0), (9, 8), (7, 6)])
+ self.assertTrue(var.hasImpact())
+
+ def test_hasImpact_allDeltasZero(self):
+ axes = {"wght": (0.0, 1.0, 1.0)}
+ var = TupleVariation(axes, [(0, 0), (0, 0), (0, 0)])
+ self.assertTrue(var.hasImpact())
+
+ def test_hasImpact_allDeltasNone(self):
+ axes = {"wght": (0.0, 1.0, 1.0)}
+ var = TupleVariation(axes, [None, None, None])
+ self.assertFalse(var.hasImpact())
+
+ def test_toXML_badDeltaFormat(self):
+ writer = XMLWriter(BytesIO())
+ g = TupleVariation(AXES, ["String"])
+ with CapturingLogHandler(log, "ERROR") as captor:
+ g.toXML(writer, ["wdth"])
+ self.assertIn("bad delta format", [r.msg for r in captor.records])
+ self.assertEqual(
+ [
+ "<tuple>",
+ '<coord axis="wdth" min="0.25" value="0.375" max="0.5"/>',
+ "<!-- bad delta #0 -->",
+ "</tuple>",
+ ],
+ TupleVariationTest.xml_lines(writer),
+ )
+
+ def test_toXML_constants(self):
+ writer = XMLWriter(BytesIO())
+ g = TupleVariation(AXES, [42, None, 23, 0, -17, None])
+ g.toXML(writer, ["wdth", "wght", "opsz"])
+ self.assertEqual(
+ [
+ "<tuple>",
+ '<coord axis="wdth" min="0.25" value="0.375" max="0.5"/>',
+ '<coord axis="wght" value="1.0"/>',
+ '<coord axis="opsz" value="-0.75"/>',
+ '<delta cvt="0" value="42"/>',
+ '<delta cvt="2" value="23"/>',
+ '<delta cvt="3" value="0"/>',
+ '<delta cvt="4" value="-17"/>',
+ "</tuple>",
+ ],
+ TupleVariationTest.xml_lines(writer),
+ )
+
+ def test_toXML_points(self):
+ writer = XMLWriter(BytesIO())
+ g = TupleVariation(AXES, [(9, 8), None, (7, 6), (0, 0), (-1, -2), None])
+ g.toXML(writer, ["wdth", "wght", "opsz"])
+ self.assertEqual(
+ [
+ "<tuple>",
+ '<coord axis="wdth" min="0.25" value="0.375" max="0.5"/>',
+ '<coord axis="wght" value="1.0"/>',
+ '<coord axis="opsz" value="-0.75"/>',
+ '<delta pt="0" x="9" y="8"/>',
+ '<delta pt="2" x="7" y="6"/>',
+ '<delta pt="3" x="0" y="0"/>',
+ '<delta pt="4" x="-1" y="-2"/>',
+ "</tuple>",
+ ],
+ TupleVariationTest.xml_lines(writer),
+ )
+
+ def test_toXML_allDeltasNone(self):
+ writer = XMLWriter(BytesIO())
+ axes = {"wght": (0.0, 1.0, 1.0)}
+ g = TupleVariation(axes, [None] * 5)
+ g.toXML(writer, ["wght", "wdth"])
+ self.assertEqual(
+ [
+ "<tuple>",
+ '<coord axis="wght" value="1.0"/>',
+ "<!-- no deltas -->",
+ "</tuple>",
+ ],
+ TupleVariationTest.xml_lines(writer),
+ )
+
+ def test_toXML_axes_floats(self):
+ writer = XMLWriter(BytesIO())
+ axes = {
+ "wght": (0.0, 0.2999878, 0.7000122),
+ "wdth": (0.0, 0.4000244, 0.4000244),
+ }
+ g = TupleVariation(axes, [None] * 5)
+ g.toXML(writer, ["wght", "wdth"])
+ self.assertEqual(
+ [
+ '<coord axis="wght" min="0.0" value="0.3" max="0.7"/>',
+ '<coord axis="wdth" value="0.4"/>',
+ ],
+ TupleVariationTest.xml_lines(writer)[1:3],
+ )
+
+ def test_fromXML_badDeltaFormat(self):
+ g = TupleVariation({}, [])
+ with CapturingLogHandler(log, "WARNING") as captor:
+ for name, attrs, content in parseXML('<delta a="1" b="2"/>'):
+ g.fromXML(name, attrs, content)
+ self.assertIn("bad delta format: a, b", [r.msg for r in captor.records])
+
+ def test_fromXML_constants(self):
+ g = TupleVariation({}, [None] * 4)
+ for name, attrs, content in parseXML(
+ '<coord axis="wdth" min="0.25" value="0.375" max="0.5"/>'
+ '<coord axis="wght" value="1.0"/>'
+ '<coord axis="opsz" value="-0.75"/>'
+ '<delta cvt="1" value="42"/>'
+ '<delta cvt="2" value="-23"/>'
+ ):
+ g.fromXML(name, attrs, content)
+ self.assertEqual(AXES, g.axes)
+ self.assertEqual([None, 42, -23, None], g.coordinates)
+
+ def test_fromXML_points(self):
+ g = TupleVariation({}, [None] * 4)
+ for name, attrs, content in parseXML(
+ '<coord axis="wdth" min="0.25" value="0.375" max="0.5"/>'
+ '<coord axis="wght" value="1.0"/>'
+ '<coord axis="opsz" value="-0.75"/>'
+ '<delta pt="1" x="33" y="44"/>'
+ '<delta pt="2" x="-2" y="170"/>'
+ ):
+ g.fromXML(name, attrs, content)
+ self.assertEqual(AXES, g.axes)
+ self.assertEqual([None, (33, 44), (-2, 170), None], g.coordinates)
+
+ def test_fromXML_axes_floats(self):
+ g = TupleVariation({}, [None] * 4)
+ for name, attrs, content in parseXML(
+ '<coord axis="wght" min="0.0" value="0.3" max="0.7"/>'
+ '<coord axis="wdth" value="0.4"/>'
+ ):
+ g.fromXML(name, attrs, content)
+
+ self.assertEqual(g.axes["wght"][0], 0)
+ self.assertAlmostEqual(g.axes["wght"][1], 0.2999878)
+ self.assertAlmostEqual(g.axes["wght"][2], 0.7000122)
+
+ self.assertEqual(g.axes["wdth"][0], 0)
+ self.assertAlmostEqual(g.axes["wdth"][1], 0.4000244)
+ self.assertAlmostEqual(g.axes["wdth"][2], 0.4000244)
+
+ def test_compile_sharedPeaks_nonIntermediate_sharedPoints(self):
+ var = TupleVariation(
+ {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, [(7, 4), (8, 5), (9, 6)]
+ )
+ axisTags = ["wght", "wdth"]
+ sharedPeakIndices = {var.compileCoord(axisTags): 0x77}
+ tup, deltas = var.compile(axisTags, sharedPeakIndices, pointData=b"")
+ # len(deltas)=8; flags=None; tupleIndex=0x77
+ # embeddedPeaks=[]; intermediateCoord=[]
+ self.assertEqual("00 08 00 77", hexencode(tup))
+ self.assertEqual(
+ "02 07 08 09 " "02 04 05 06", # deltaX: [7, 8, 9] # deltaY: [4, 5, 6]
+ hexencode(deltas),
+ )
+
+ def test_compile_sharedPeaks_intermediate_sharedPoints(self):
+ var = TupleVariation(
+ {"wght": (0.3, 0.5, 0.7), "wdth": (0.1, 0.8, 0.9)}, [(7, 4), (8, 5), (9, 6)]
+ )
+ axisTags = ["wght", "wdth"]
+ sharedPeakIndices = {var.compileCoord(axisTags): 0x77}
+ tup, deltas = var.compile(axisTags, sharedPeakIndices, pointData=b"")
+ # len(deltas)=8; flags=INTERMEDIATE_REGION; tupleIndex=0x77
+ # embeddedPeak=[]; intermediateCoord=[(0.3, 0.1), (0.7, 0.9)]
+ self.assertEqual("00 08 40 77 13 33 06 66 2C CD 39 9A", hexencode(tup))
+ self.assertEqual(
+ "02 07 08 09 " "02 04 05 06", # deltaX: [7, 8, 9] # deltaY: [4, 5, 6]
+ hexencode(deltas),
+ )
+
+ def test_compile_sharedPeaks_nonIntermediate_privatePoints(self):
+ var = TupleVariation(
+ {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, [(7, 4), (8, 5), (9, 6)]
+ )
+ axisTags = ["wght", "wdth"]
+ sharedPeakIndices = {var.compileCoord(axisTags): 0x77}
+ tup, deltas = var.compile(axisTags, sharedPeakIndices)
+ # len(deltas)=9; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77
+ # embeddedPeak=[]; intermediateCoord=[]
+ self.assertEqual("00 09 20 77", hexencode(tup))
+ self.assertEqual(
+ "00 " # all points in glyph
+ "02 07 08 09 " # deltaX: [7, 8, 9]
+ "02 04 05 06", # deltaY: [4, 5, 6]
+ hexencode(deltas),
+ )
+
+ def test_compile_sharedPeaks_intermediate_privatePoints(self):
+ var = TupleVariation(
+ {"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 1.0)}, [(7, 4), (8, 5), (9, 6)]
+ )
+ axisTags = ["wght", "wdth"]
+ sharedPeakIndices = {var.compileCoord(axisTags): 0x77}
+ tuple, deltas = var.compile(axisTags, sharedPeakIndices)
+ # len(deltas)=9; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77
+ # embeddedPeak=[]; intermediateCoord=[(0.0, 0.0), (1.0, 1.0)]
+ self.assertEqual("00 09 60 77 00 00 00 00 40 00 40 00", hexencode(tuple))
+ self.assertEqual(
+ "00 " # all points in glyph
+ "02 07 08 09 " # deltaX: [7, 8, 9]
+ "02 04 05 06", # deltaY: [4, 5, 6]
+ hexencode(deltas),
+ )
+
+ def test_compile_embeddedPeak_nonIntermediate_sharedPoints(self):
+ var = TupleVariation(
+ {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, [(7, 4), (8, 5), (9, 6)]
+ )
+ tup, deltas = var.compile(axisTags=["wght", "wdth"], pointData=b"")
+ # len(deltas)=8; flags=EMBEDDED_PEAK_TUPLE
+ # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[]
+ self.assertEqual("00 08 80 00 20 00 33 33", hexencode(tup))
+ self.assertEqual(
+ "02 07 08 09 " "02 04 05 06", # deltaX: [7, 8, 9] # deltaY: [4, 5, 6]
+ hexencode(deltas),
+ )
+
+ def test_compile_embeddedPeak_nonIntermediate_sharedConstants(self):
+ var = TupleVariation(
+ {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, [3, 1, 4]
+ )
+ tup, deltas = var.compile(axisTags=["wght", "wdth"], pointData=b"")
+ # len(deltas)=4; flags=EMBEDDED_PEAK_TUPLE
+ # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[]
+ self.assertEqual("00 04 80 00 20 00 33 33", hexencode(tup))
+ self.assertEqual("02 03 01 04", hexencode(deltas)) # delta: [3, 1, 4]
+
+ def test_compile_embeddedPeak_intermediate_sharedPoints(self):
+ var = TupleVariation(
+ {"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 0.8)}, [(7, 4), (8, 5), (9, 6)]
+ )
+ tup, deltas = var.compile(axisTags=["wght", "wdth"], pointData=b"")
+ # len(deltas)=8; flags=EMBEDDED_PEAK_TUPLE
+ # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[(0.0, 0.0), (1.0, 0.8)]
+ self.assertEqual(
+ "00 08 C0 00 20 00 33 33 00 00 00 00 40 00 33 33", hexencode(tup)
+ )
+ self.assertEqual(
+ "02 07 08 09 " "02 04 05 06", # deltaX: [7, 8, 9] # deltaY: [4, 5, 6]
+ hexencode(deltas),
+ )
+
+ def test_compile_embeddedPeak_nonIntermediate_privatePoints(self):
+ var = TupleVariation(
+ {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, [(7, 4), (8, 5), (9, 6)]
+ )
+ tup, deltas = var.compile(axisTags=["wght", "wdth"])
+ # len(deltas)=9; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_PEAK_TUPLE
+ # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[]
+ self.assertEqual("00 09 A0 00 20 00 33 33", hexencode(tup))
+ self.assertEqual(
+ "00 " # all points in glyph
+ "02 07 08 09 " # deltaX: [7, 8, 9]
+ "02 04 05 06", # deltaY: [4, 5, 6]
+ hexencode(deltas),
+ )
+
+ def test_compile_embeddedPeak_nonIntermediate_privateConstants(self):
+ var = TupleVariation(
+ {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, [7, 8, 9]
+ )
+ tup, deltas = var.compile(axisTags=["wght", "wdth"])
+ # len(deltas)=5; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_PEAK_TUPLE
+ # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[]
+ self.assertEqual("00 05 A0 00 20 00 33 33", hexencode(tup))
+ self.assertEqual(
+ "00 " "02 07 08 09", # all points in glyph # delta: [7, 8, 9]
+ hexencode(deltas),
+ )
+
+ def test_compile_embeddedPeak_intermediate_privatePoints(self):
+ var = TupleVariation(
+ {"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)}, [(7, 4), (8, 5), (9, 6)]
+ )
+ tup, deltas = var.compile(axisTags=["wght", "wdth"])
+ # len(deltas)=9;
+ # flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_REGION|EMBEDDED_PEAK_TUPLE
+ # embeddedPeak=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)]
+ self.assertEqual(
+ "00 09 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A", hexencode(tup)
+ )
+ self.assertEqual(
+ "00 " # all points in glyph
+ "02 07 08 09 " # deltaX: [7, 8, 9]
+ "02 04 05 06", # deltaY: [4, 5, 6]
+ hexencode(deltas),
+ )
+
+ def test_compile_embeddedPeak_intermediate_privateConstants(self):
+ var = TupleVariation(
+ {"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)}, [7, 8, 9]
+ )
+ tup, deltas = var.compile(axisTags=["wght", "wdth"])
+ # len(deltas)=5;
+ # flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_REGION|EMBEDDED_PEAK_TUPLE
+ # embeddedPeak=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)]
+ self.assertEqual(
+ "00 05 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A", hexencode(tup)
+ )
+ self.assertEqual(
+ "00 " "02 07 08 09", # all points in glyph # delta: [7, 8, 9]
+ hexencode(deltas),
+ )
+
+ def test_compileCoord(self):
+ var = TupleVariation(
+ {"wght": (-1.0, -1.0, -1.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4
+ )
+ self.assertEqual("C0 00 20 00", hexencode(var.compileCoord(["wght", "wdth"])))
+ self.assertEqual("20 00 C0 00", hexencode(var.compileCoord(["wdth", "wght"])))
+ self.assertEqual("C0 00", hexencode(var.compileCoord(["wght"])))
+
+ def test_compileIntermediateCoord(self):
+ var = TupleVariation(
+ {"wght": (-1.0, -1.0, 0.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4
+ )
+ self.assertEqual(
+ "C0 00 19 9A 00 00 26 66",
+ hexencode(var.compileIntermediateCoord(["wght", "wdth"])),
+ )
+ self.assertEqual(
+ "19 9A C0 00 26 66 00 00",
+ hexencode(var.compileIntermediateCoord(["wdth", "wght"])),
+ )
+ self.assertEqual(None, var.compileIntermediateCoord(["wght"]))
+ self.assertEqual(
+ "19 9A 26 66", hexencode(var.compileIntermediateCoord(["wdth"]))
+ )
+
+ def test_decompileCoord(self):
+ decompileCoord = TupleVariation.decompileCoord_
+ data = deHexStr("DE AD C0 00 20 00 DE AD")
+ self.assertEqual(
+ ({"wght": -1.0, "wdth": 0.5}, 6), decompileCoord(["wght", "wdth"], data, 2)
+ )
+
+ def test_decompileCoord_roundTrip(self):
+ # Make sure we are not affected by https://github.com/fonttools/fonttools/issues/286
+ data = deHexStr("7F B9 80 35")
+ values, _ = TupleVariation.decompileCoord_(["wght", "wdth"], data, 0)
+ axisValues = {axis: (val, val, val) for axis, val in values.items()}
+ var = TupleVariation(axisValues, [None] * 4)
+ self.assertEqual("7F B9 80 35", hexencode(var.compileCoord(["wght", "wdth"])))
+
+ def test_compilePoints(self):
+ compilePoints = lambda p: TupleVariation.compilePoints(set(p))
+ self.assertEqual("00", hexencode(compilePoints(set()))) # all points in glyph
+ self.assertEqual("01 00 07", hexencode(compilePoints([7])))
+ self.assertEqual("01 80 FF FF", hexencode(compilePoints([65535])))
+ self.assertEqual("02 01 09 06", hexencode(compilePoints([9, 15])))
+ self.assertEqual(
+ "06 05 07 01 F7 02 01 F2",
+ hexencode(compilePoints([7, 8, 255, 257, 258, 500])),
+ )
+ self.assertEqual("03 01 07 01 80 01 EC", hexencode(compilePoints([7, 8, 500])))
+ self.assertEqual(
+ "04 01 07 01 81 BE E7 0C 0F",
+ hexencode(compilePoints([7, 8, 0xBEEF, 0xCAFE])),
+ )
+ self.maxDiff = None
+ self.assertEqual(
+ "81 2C"
+ + " 7F 00" # 300 points (0x12c) in total
+ + (127 * " 01")
+ + " 7F" # first run, contains 128 points: [0 .. 127]
+ + (128 * " 01")
+ + " 2B" # second run, contains 128 points: [128 .. 255]
+ + (44 * " 01"), # third run, contains 44 points: [256 .. 299]
+ hexencode(compilePoints(range(300))),
+ )
+ self.assertEqual(
+ "81 8F"
+ + " 7F 00" # 399 points (0x18f) in total
+ + (127 * " 01")
+ + " 7F" # first run, contains 128 points: [0 .. 127]
+ + (128 * " 01")
+ + " 7F" # second run, contains 128 points: [128 .. 255]
+ + (128 * " 01")
+ + " 0E" # third run, contains 128 points: [256 .. 383]
+ + (15 * " 01"), # fourth run, contains 15 points: [384 .. 398]
+ hexencode(compilePoints(range(399))),
+ )
+
+ def test_decompilePoints(self):
+ numPointsInGlyph = 65536
+ allPoints = list(range(numPointsInGlyph))
+
+ def decompilePoints(data, offset):
+ points, offset = TupleVariation.decompilePoints_(
+ numPointsInGlyph, deHexStr(data), offset, "gvar"
+ )
+ # Conversion to list needed for Python 3.
+ return (list(points), offset)
+
+ # all points in glyph
+ self.assertEqual((allPoints, 1), decompilePoints("00", 0))
+ # all points in glyph (in overly verbose encoding, not explicitly prohibited by spec)
+ self.assertEqual((allPoints, 2), decompilePoints("80 00", 0))
+ # 2 points; first run: [9, 9+6]
+ self.assertEqual(([9, 15], 4), decompilePoints("02 01 09 06", 0))
+ # 2 points; first run: [0xBEEF, 0xCAFE]. (0x0C0F = 0xCAFE - 0xBEEF)
+ self.assertEqual(([0xBEEF, 0xCAFE], 6), decompilePoints("02 81 BE EF 0C 0F", 0))
+ # 1 point; first run: [7]
+ self.assertEqual(([7], 3), decompilePoints("01 00 07", 0))
+ # 1 point; first run: [7] in overly verbose encoding
+ self.assertEqual(([7], 4), decompilePoints("01 80 00 07", 0))
+ # 1 point; first run: [65535]; requires words to be treated as unsigned numbers
+ self.assertEqual(([65535], 4), decompilePoints("01 80 FF FF", 0))
+ # 4 points; first run: [7, 8]; second run: [255, 257]. 257 is stored in delta-encoded bytes (0xFF + 2).
+ self.assertEqual(
+ ([7, 8, 263, 265], 7), decompilePoints("04 01 07 01 01 FF 02", 0)
+ )
+ # combination of all encodings, preceded and followed by 4 bytes of unused data
+ data = "DE AD DE AD 04 01 07 01 81 BE E7 0C 0F DE AD DE AD"
+ self.assertEqual(([7, 8, 0xBEEF, 0xCAFE], 13), decompilePoints(data, 4))
+ self.assertSetEqual(
+ set(range(300)),
+ set(
+ decompilePoints(
+ "81 2C"
+ + " 7F 00" # 300 points (0x12c) in total
+ + (127 * " 01")
+ + " 7F" # first run, contains 128 points: [0 .. 127]
+ + (128 * " 01")
+ + " AB" # second run, contains 128 points: [128 .. 255]
+ + (44 * " 00 01"), # third run, contains 44 points: [256 .. 299]
+ 0,
+ )[0]
+ ),
+ )
+ self.assertSetEqual(
+ set(range(399)),
+ set(
+ decompilePoints(
+ "81 8F"
+ + " 7F 00" # 399 points (0x18f) in total
+ + (127 * " 01")
+ + " 7F" # first run, contains 128 points: [0 .. 127]
+ + (128 * " 01")
+ + " FF" # second run, contains 128 points: [128 .. 255]
+ + (128 * " 00 01")
+ + " 8E" # third run, contains 128 points: [256 .. 383]
+ + (15 * " 00 01"), # fourth run, contains 15 points: [384 .. 398]
+ 0,
+ )[0]
+ ),
+ )
+
+ def test_decompilePoints_shouldAcceptBadPointNumbers(self):
+ decompilePoints = TupleVariation.decompilePoints_
+ # 2 points; first run: [3, 9].
+ numPointsInGlyph = 8
+ with CapturingLogHandler(log, "WARNING") as captor:
+ decompilePoints(numPointsInGlyph, deHexStr("02 01 03 06"), 0, "cvar")
+ self.assertIn(
+ "point 9 out of range in 'cvar' table", [r.msg for r in captor.records]
+ )
+
+ def test_decompilePoints_roundTrip(self):
+ numPointsInGlyph = (
+ 500 # greater than 255, so we also exercise code path for 16-bit encoding
+ )
+ compile = lambda points: TupleVariation.compilePoints(points)
+ decompile = lambda data: set(
+ TupleVariation.decompilePoints_(numPointsInGlyph, data, 0, "gvar")[0]
+ )
+ for i in range(50):
+ points = set(random.sample(range(numPointsInGlyph), 30))
+ self.assertSetEqual(
+ points,
+ decompile(compile(points)),
+ "failed round-trip decompile/compilePoints; points=%s" % points,
+ )
+ allPoints = set(range(numPointsInGlyph))
+ self.assertSetEqual(allPoints, decompile(compile(allPoints)))
+ self.assertSetEqual(allPoints, decompile(compile(set())))
+
+ def test_compileDeltas_points(self):
+ var = TupleVariation({}, [None, (1, 0), (2, 0), None, (4, 0), None])
+ # deltaX for points: [1, 2, 4]; deltaY for points: [0, 0, 0]
+ self.assertEqual("02 01 02 04 82", hexencode(var.compileDeltas()))
+
+ def test_compileDeltas_constants(self):
+ var = TupleVariation({}, [None, 1, 2, None, 4, None])
+ # delta for cvts: [1, 2, 4]
+ self.assertEqual("02 01 02 04", hexencode(var.compileDeltas()))
+
+ def test_compileDeltaValues(self):
+ compileDeltaValues = lambda values: hexencode(
+ TupleVariation.compileDeltaValues_(values)
+ )
+ # zeroes
+ self.assertEqual("80", compileDeltaValues([0]))
+ self.assertEqual("BF", compileDeltaValues([0] * 64))
+ self.assertEqual("BF 80", compileDeltaValues([0] * 65))
+ self.assertEqual("BF A3", compileDeltaValues([0] * 100))
+ self.assertEqual("BF BF BF BF", compileDeltaValues([0] * 256))
+ # bytes
+ self.assertEqual("00 01", compileDeltaValues([1]))
+ self.assertEqual(
+ "06 01 02 03 7F 80 FF FE", compileDeltaValues([1, 2, 3, 127, -128, -1, -2])
+ )
+ self.assertEqual("3F" + (64 * " 7F"), compileDeltaValues([127] * 64))
+ self.assertEqual("3F" + (64 * " 7F") + " 00 7F", compileDeltaValues([127] * 65))
+ # words
+ self.assertEqual("40 66 66", compileDeltaValues([0x6666]))
+ self.assertEqual(
+ "43 66 66 7F FF FF FF 80 00",
+ compileDeltaValues([0x6666, 32767, -1, -32768]),
+ )
+ self.assertEqual("7F" + (64 * " 11 22"), compileDeltaValues([0x1122] * 64))
+ self.assertEqual(
+ "7F" + (64 * " 11 22") + " 40 11 22", compileDeltaValues([0x1122] * 65)
+ )
+ # bytes, zeroes, bytes: a single zero is more compact when encoded as part of the bytes run
+ self.assertEqual(
+ "04 7F 7F 00 7F 7F", compileDeltaValues([127, 127, 0, 127, 127])
+ )
+ self.assertEqual(
+ "01 7F 7F 81 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 127, 127])
+ )
+ self.assertEqual(
+ "01 7F 7F 82 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 127, 127])
+ )
+ self.assertEqual(
+ "01 7F 7F 83 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 0, 127, 127])
+ )
+ # bytes, zeroes
+ self.assertEqual("01 01 00", compileDeltaValues([1, 0]))
+ self.assertEqual("00 01 81", compileDeltaValues([1, 0, 0]))
+ # words, bytes, words: a single byte is more compact when encoded as part of the words run
+ self.assertEqual(
+ "42 66 66 00 02 77 77", compileDeltaValues([0x6666, 2, 0x7777])
+ )
+ self.assertEqual(
+ "40 66 66 01 02 02 40 77 77", compileDeltaValues([0x6666, 2, 2, 0x7777])
+ )
+ # words, zeroes, words
+ self.assertEqual(
+ "40 66 66 80 40 77 77", compileDeltaValues([0x6666, 0, 0x7777])
+ )
+ self.assertEqual(
+ "40 66 66 81 40 77 77", compileDeltaValues([0x6666, 0, 0, 0x7777])
+ )
+ self.assertEqual(
+ "40 66 66 82 40 77 77", compileDeltaValues([0x6666, 0, 0, 0, 0x7777])
+ )
+ # words, zeroes, bytes
+ self.assertEqual(
+ "40 66 66 80 02 01 02 03", compileDeltaValues([0x6666, 0, 1, 2, 3])
+ )
+ self.assertEqual(
+ "40 66 66 81 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 1, 2, 3])
+ )
+ self.assertEqual(
+ "40 66 66 82 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 0, 1, 2, 3])
+ )
+ # words, zeroes
+ self.assertEqual("40 66 66 80", compileDeltaValues([0x6666, 0]))
+ self.assertEqual("40 66 66 81", compileDeltaValues([0x6666, 0, 0]))
+
+ def test_decompileDeltas(self):
+ decompileDeltas = TupleVariation.decompileDeltas_
+ # 83 = zero values (0x80), count = 4 (1 + 0x83 & 0x3F)
+ self.assertEqual(([0, 0, 0, 0], 1), decompileDeltas(4, deHexStr("83"), 0))
+ # 41 01 02 FF FF = signed 16-bit values (0x40), count = 2 (1 + 0x41 & 0x3F)
+ self.assertEqual(
+ ([258, -1], 5), decompileDeltas(2, deHexStr("41 01 02 FF FF"), 0)
+ )
+ # 01 81 07 = signed 8-bit values, count = 2 (1 + 0x01 & 0x3F)
+ self.assertEqual(([-127, 7], 3), decompileDeltas(2, deHexStr("01 81 07"), 0))
+ # combination of all three encodings, preceded and followed by 4 bytes of unused data
+ data = deHexStr("DE AD BE EF 83 40 01 02 01 81 80 DE AD BE EF")
+ self.assertEqual(
+ ([0, 0, 0, 0, 258, -127, -128], 11), decompileDeltas(7, data, 4)
+ )
+
+ def test_decompileDeltas_roundTrip(self):
+ numDeltas = 30
+ compile = TupleVariation.compileDeltaValues_
+ decompile = lambda data: TupleVariation.decompileDeltas_(numDeltas, data, 0)[0]
+ for i in range(50):
+ deltas = random.sample(range(-128, 127), 10)
+ deltas.extend(random.sample(range(-32768, 32767), 10))
+ deltas.extend([0] * 10)
+ random.shuffle(deltas)
+ self.assertListEqual(deltas, decompile(compile(deltas)))
+
+ def test_compileSharedTuples(self):
+ # Below, the peak coordinate {"wght": 1.0, "wdth": 0.8} appears
+ # three times (most frequent sorted first); {"wght": 1.0, "wdth": 0.5}
+ # and {"wght": 1.0, "wdth": 0.7} both appears two times (tie) and
+ # are sorted alphanumerically to ensure determinism.
+ # The peak coordinate {"wght": 1.0, "wdth": 0.9} appears only once
+ # and is thus ignored.
+ # Because the start and end of variation ranges is not encoded
+ # into the shared pool, they should get ignored.
+ deltas = [None] * 4
+ variations = [
+ TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.5, 0.7, 1.0)}, deltas),
+ TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.7, 1.0)}, deltas),
+ TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.8, 1.0)}, deltas),
+ TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.5, 1.0)}, deltas),
+ TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.8, 1.0)}, deltas),
+ TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.9, 1.0)}, deltas),
+ TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.4, 0.8, 1.0)}, deltas),
+ TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.5, 0.5, 1.0)}, deltas),
+ ]
+ result = compileSharedTuples(["wght", "wdth"], variations)
+ self.assertEqual(
+ [hexencode(c) for c in result],
+ ["40 00 33 33", "40 00 20 00", "40 00 2C CD"],
+ )
+
+ def test_decompileSharedTuples_Skia(self):
+ sharedTuples = decompileSharedTuples(
+ axisTags=["wght", "wdth"],
+ sharedTupleCount=8,
+ data=SKIA_GVAR_SHARED_TUPLES_DATA,
+ offset=0,
+ )
+ self.assertEqual(sharedTuples, SKIA_GVAR_SHARED_TUPLES)
+
+ def test_decompileSharedTuples_empty(self):
+ self.assertEqual(decompileSharedTuples(["wght"], 0, b"", 0), [])
+
+ def test_compileTupleVariationStore_allVariationsRedundant(self):
+ axes = {"wght": (0.3, 0.4, 0.5), "opsz": (0.7, 0.8, 0.9)}
+ variations = [
+ TupleVariation(axes, [None] * 4),
+ TupleVariation(axes, [None] * 4),
+ TupleVariation(axes, [None] * 4),
+ ]
+ self.assertEqual(
+ compileTupleVariationStore(
+ variations,
+ pointCount=8,
+ axisTags=["wght", "opsz"],
+ sharedTupleIndices={},
+ ),
+ (0, b"", b""),
+ )
+
+ def test_compileTupleVariationStore_noVariations(self):
+ self.assertEqual(
+ compileTupleVariationStore(
+ variations=[],
+ pointCount=8,
+ axisTags=["wght", "opsz"],
+ sharedTupleIndices={},
+ ),
+ (0, b"", b""),
+ )
+
+ def test_compileTupleVariationStore_roundTrip_cvar(self):
+ deltas = [1, 2, 3, 4]
+ variations = [
+ TupleVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, deltas),
+ TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, deltas),
+ ]
+ tupleVariationCount, tuples, data = compileTupleVariationStore(
+ variations, pointCount=4, axisTags=["wght", "wdth"], sharedTupleIndices={}
+ )
+ self.assertEqual(
+ decompileTupleVariationStore(
+ "cvar",
+ ["wght", "wdth"],
+ tupleVariationCount,
+ pointCount=4,
+ sharedTuples={},
+ data=(tuples + data),
+ pos=0,
+ dataPos=len(tuples),
+ ),
+ variations,
+ )
+
+ def test_compileTupleVariationStore_roundTrip_gvar(self):
+ deltas = [(1, 1), (2, 2), (3, 3), (4, 4)]
+ variations = [
+ TupleVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, deltas),
+ TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, deltas),
+ ]
+ tupleVariationCount, tuples, data = compileTupleVariationStore(
+ variations, pointCount=4, axisTags=["wght", "wdth"], sharedTupleIndices={}
+ )
+ self.assertEqual(
+ decompileTupleVariationStore(
+ "gvar",
+ ["wght", "wdth"],
+ tupleVariationCount,
+ pointCount=4,
+ sharedTuples={},
+ data=(tuples + data),
+ pos=0,
+ dataPos=len(tuples),
+ ),
+ variations,
+ )
+
+ def test_decompileTupleVariationStore_Skia_I(self):
+ tvar = decompileTupleVariationStore(
+ tableTag="gvar",
+ axisTags=["wght", "wdth"],
+ tupleVariationCount=8,
+ pointCount=18,
+ sharedTuples=SKIA_GVAR_SHARED_TUPLES,
+ data=SKIA_GVAR_I_DATA,
+ pos=4,
+ dataPos=36,
+ )
+ self.assertEqual(len(tvar), 8)
+ self.assertEqual(tvar[0].axes, {"wght": (0.0, 1.0, 1.0)})
+ self.assertEqual(
+ " ".join(["%d,%d" % c for c in tvar[0].coordinates]),
+ "257,0 -127,0 -128,58 -130,90 -130,62 -130,67 -130,32 -127,0 "
+ "257,0 259,14 260,64 260,21 260,69 258,124 0,0 130,0 0,0 0,0",
+ )
+
+ def test_decompileTupleVariationStore_empty(self):
+ self.assertEqual(
+ decompileTupleVariationStore(
+ tableTag="gvar",
+ axisTags=[],
+ tupleVariationCount=0,
+ pointCount=5,
+ sharedTuples=[],
+ data=b"",
+ pos=4,
+ dataPos=4,
+ ),
+ [],
+ )
+
+ def test_getTupleSize(self):
+ getTupleSize = TupleVariation.getTupleSize_
+ numAxes = 3
+ self.assertEqual(4 + numAxes * 2, getTupleSize(0x8042, numAxes))
+ self.assertEqual(4 + numAxes * 4, getTupleSize(0x4077, numAxes))
+ self.assertEqual(4, getTupleSize(0x2077, numAxes))
+ self.assertEqual(4, getTupleSize(11, numAxes))
+
+ def test_inferRegion(self):
+ start, end = inferRegion_({"wght": -0.3, "wdth": 0.7})
+ self.assertEqual(start, {"wght": -0.3, "wdth": 0.0})
+ self.assertEqual(end, {"wght": 0.0, "wdth": 0.7})
+
+ @staticmethod
+ def xml_lines(writer):
+ content = writer.file.getvalue().decode("utf-8")
+ return [line.strip() for line in content.splitlines()][1:]
+
+ def test_getCoordWidth(self):
+ empty = TupleVariation({}, [])
+ self.assertEqual(empty.getCoordWidth(), 0)
+
+ empty = TupleVariation({}, [None])
+ self.assertEqual(empty.getCoordWidth(), 0)
+
+ gvarTuple = TupleVariation({}, [None, (0, 0)])
+ self.assertEqual(gvarTuple.getCoordWidth(), 2)
+
+ cvarTuple = TupleVariation({}, [None, 0])
+ self.assertEqual(cvarTuple.getCoordWidth(), 1)
+
+ cvarTuple.coordinates[1] *= 1.0
+ self.assertEqual(cvarTuple.getCoordWidth(), 1)
+
+ with self.assertRaises(TypeError):
+ TupleVariation({}, [None, "a"]).getCoordWidth()
+
+ def test_scaleDeltas_cvar(self):
+ var = TupleVariation({}, [100, None])
+
+ var.scaleDeltas(1.0)
+ self.assertEqual(var.coordinates, [100, None])
+
+ var.scaleDeltas(0.333)
+ self.assertAlmostEqual(var.coordinates[0], 33.3)
+ self.assertIsNone(var.coordinates[1])
+
+ var.scaleDeltas(0.0)
+ self.assertEqual(var.coordinates, [0, None])
+
+ def test_scaleDeltas_gvar(self):
+ var = TupleVariation({}, [(100, 200), None])
+
+ var.scaleDeltas(1.0)
+ self.assertEqual(var.coordinates, [(100, 200), None])
+
+ var.scaleDeltas(0.333)
+ self.assertAlmostEqual(var.coordinates[0][0], 33.3)
+ self.assertAlmostEqual(var.coordinates[0][1], 66.6)
+ self.assertIsNone(var.coordinates[1])
+
+ var.scaleDeltas(0.0)
+ self.assertEqual(var.coordinates, [(0, 0), None])
+
+ def test_roundDeltas_cvar(self):
+ var = TupleVariation({}, [55.5, None, 99.9])
+ var.roundDeltas()
+ self.assertEqual(var.coordinates, [56, None, 100])
+
+ def test_roundDeltas_gvar(self):
+ var = TupleVariation({}, [(55.5, 100.0), None, (99.9, 100.0)])
+ var.roundDeltas()
+ self.assertEqual(var.coordinates, [(56, 100), None, (100, 100)])
+
+ def test_calcInferredDeltas(self):
+ var = TupleVariation({}, [(0, 0), None, None, None])
+ coords = [(1, 1), (1, 1), (1, 1), (1, 1)]
+
+ var.calcInferredDeltas(coords, [])
+
+ self.assertEqual(var.coordinates, [(0, 0), (0, 0), (0, 0), (0, 0)])
+
+ def test_calcInferredDeltas_invalid(self):
+ # cvar tuples can't have inferred deltas
+ with self.assertRaises(TypeError):
+ TupleVariation({}, [0]).calcInferredDeltas([], [])
+
+ # origCoords must have same length as self.coordinates
+ with self.assertRaises(ValueError):
+ TupleVariation({}, [(0, 0), None]).calcInferredDeltas([], [])
+
+ # at least 4 phantom points required
+ with self.assertRaises(AssertionError):
+ TupleVariation({}, [(0, 0), None]).calcInferredDeltas([(0, 0), (0, 0)], [])
+
+ with self.assertRaises(AssertionError):
+ TupleVariation({}, [(0, 0)] + [None] * 5).calcInferredDeltas(
+ [(0, 0)] * 6, [1, 0] # endPts not in increasing order
+ )
+
+ def test_optimize(self):
+ var = TupleVariation({"wght": (0.0, 1.0, 1.0)}, [(0, 0)] * 5)
+
+ var.optimize([(0, 0)] * 5, [0])
+
+ self.assertEqual(var.coordinates, [None, None, None, None, None])
+
+ def test_optimize_isComposite(self):
+ # when a composite glyph's deltas are all (0, 0), we still want
+ # to write out an entry in gvar, else macOS doesn't apply any
+ # variations to the composite glyph (even if its individual components
+ # do vary).
+ # https://github.com/fonttools/fonttools/issues/1381
+ var = TupleVariation({"wght": (0.0, 1.0, 1.0)}, [(0, 0)] * 5)
+ var.optimize([(0, 0)] * 5, [0], isComposite=True)
+ self.assertEqual(var.coordinates, [(0, 0)] * 5)
+
+ # it takes more than 128 (0, 0) deltas before the optimized tuple with
+ # (None) inferred deltas (except for the first) becomes smaller than
+ # the un-optimized one that has all deltas explicitly set to (0, 0).
+ var = TupleVariation({"wght": (0.0, 1.0, 1.0)}, [(0, 0)] * 129)
+ var.optimize([(0, 0)] * 129, list(range(129 - 4)), isComposite=True)
+ self.assertEqual(var.coordinates, [(0, 0)] + [None] * 128)
+
+ def test_sum_deltas_gvar(self):
+ var1 = TupleVariation(
+ {},
+ [
+ (-20, 0),
+ (-20, 0),
+ (20, 0),
+ (20, 0),
+ (0, 0),
+ (0, 0),
+ (0, 0),
+ (0, 0),
+ ],
+ )
+ var2 = TupleVariation(
+ {},
+ [
+ (-10, 0),
+ (-10, 0),
+ (10, 0),
+ (10, 0),
+ (0, 0),
+ (20, 0),
+ (0, 0),
+ (0, 0),
+ ],
+ )
+
+ var1 += var2
+
+ self.assertEqual(
+ var1.coordinates,
+ [
+ (-30, 0),
+ (-30, 0),
+ (30, 0),
+ (30, 0),
+ (0, 0),
+ (20, 0),
+ (0, 0),
+ (0, 0),
+ ],
+ )
+
+ def test_sum_deltas_gvar_invalid_length(self):
+ var1 = TupleVariation({}, [(1, 2)])
+ var2 = TupleVariation({}, [(1, 2), (3, 4)])
+
+ with self.assertRaisesRegex(ValueError, "deltas with different lengths"):
+ var1 += var2
+
+ def test_sum_deltas_gvar_with_inferred_points(self):
+ var1 = TupleVariation({}, [(1, 2), None])
+ var2 = TupleVariation({}, [(2, 3), None])
+
+ with self.assertRaisesRegex(ValueError, "deltas with inferred points"):
+ var1 += var2
+
+ def test_sum_deltas_cvar(self):
+ axes = {"wght": (0.0, 1.0, 1.0)}
+ var1 = TupleVariation(axes, [0, 1, None, None])
+ var2 = TupleVariation(axes, [None, 2, None, 3])
+ var3 = TupleVariation(axes, [None, None, None, 4])
+
+ var1 += var2
+ var1 += var3
+
+ self.assertEqual(var1.coordinates, [0, 3, None, 7])
if __name__ == "__main__":
- import sys
- sys.exit(unittest.main())
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_a_n_k_r_test.py b/Tests/ttLib/tables/_a_n_k_r_test.py
index 6c9be16d..ab873328 100644
--- a/Tests/ttLib/tables/_a_n_k_r_test.py
+++ b/Tests/ttLib/tables/_a_n_k_r_test.py
@@ -13,135 +13,134 @@ import unittest
# what our encoder emits. (The value for end-of-table markers
# does not actually matter).
ANKR_FORMAT_0_DATA = deHexStr(
- '0000 0000 ' # 0: Format=0, Flags=0
- '0000 000C ' # 4: LookupTableOffset=12
- '0000 0024 ' # 8: GlyphDataTableOffset=36
- '0006 0004 0002 ' # 12: LookupFormat=6, UnitSize=4, NUnits=2
- '0008 0001 0000 ' # 18: SearchRange=8, EntrySelector=1, RangeShift=0
- '0001 0000 ' # 24: Glyph=A, Offset=0 (+GlyphDataTableOffset=36)
- '0003 0008 ' # 28: Glyph=C, Offset=8 (+GlyphDataTableOffset=44)
- 'FFFF 0000 ' # 32: Glyph=<end>, Offset=<n/a>
- '0000 0001 ' # 36: GlyphData[A].NumPoints=1
- '0235 045E ' # 40: GlyphData[A].Points[0].X=565, .Y=1118
- '0000 0001 ' # 44: GlyphData[C].NumPoints=1
- 'FED2 045E ' # 48: GlyphData[C].Points[0].X=-302, .Y=1118
-) # 52: <end>
+ "0000 0000 " # 0: Format=0, Flags=0
+ "0000 000C " # 4: LookupTableOffset=12
+ "0000 0024 " # 8: GlyphDataTableOffset=36
+ "0006 0004 0002 " # 12: LookupFormat=6, UnitSize=4, NUnits=2
+ "0008 0001 0000 " # 18: SearchRange=8, EntrySelector=1, RangeShift=0
+ "0001 0000 " # 24: Glyph=A, Offset=0 (+GlyphDataTableOffset=36)
+ "0003 0008 " # 28: Glyph=C, Offset=8 (+GlyphDataTableOffset=44)
+ "FFFF 0000 " # 32: Glyph=<end>, Offset=<n/a>
+ "0000 0001 " # 36: GlyphData[A].NumPoints=1
+ "0235 045E " # 40: GlyphData[A].Points[0].X=565, .Y=1118
+ "0000 0001 " # 44: GlyphData[C].NumPoints=1
+ "FED2 045E " # 48: GlyphData[C].Points[0].X=-302, .Y=1118
+) # 52: <end>
assert len(ANKR_FORMAT_0_DATA) == 52
ANKR_FORMAT_0_XML = [
'<AnchorPoints Format="0">',
' <Flags value="0"/>',
- ' <Anchors>',
+ " <Anchors>",
' <Lookup glyph="A">',
- ' <!-- AnchorPointCount=1 -->',
+ " <!-- AnchorPointCount=1 -->",
' <AnchorPoint index="0">',
' <XCoordinate value="565"/>',
' <YCoordinate value="1118"/>',
- ' </AnchorPoint>',
- ' </Lookup>',
+ " </AnchorPoint>",
+ " </Lookup>",
' <Lookup glyph="C">',
- ' <!-- AnchorPointCount=1 -->',
+ " <!-- AnchorPointCount=1 -->",
' <AnchorPoint index="0">',
' <XCoordinate value="-302"/>',
' <YCoordinate value="1118"/>',
- ' </AnchorPoint>',
- ' </Lookup>',
- ' </Anchors>',
- '</AnchorPoints>',
+ " </AnchorPoint>",
+ " </Lookup>",
+ " </Anchors>",
+ "</AnchorPoints>",
]
# Same data as ANKR_FORMAT_0_DATA, but with chunks of unused data
# whose presence should not stop us from decompiling the table.
ANKR_FORMAT_0_STRAY_DATA = deHexStr(
- '0000 0000 ' # 0: Format=0, Flags=0
- '0000 0018 ' # 4: LookupTableOffset=24
- '0000 0034 ' # 8: GlyphDataTableOffset=52
- 'DEAD BEEF CAFE ' # 12: <stray data>
- 'DEAD BEEF CAFE ' # 18: <stray data>
- '0006 0004 0002 ' # 24: LookupFormat=6, UnitSize=4, NUnits=2
- '0008 0001 0000 ' # 30: SearchRange=8, EntrySelector=1, RangeShift=0
- '0001 0000 ' # 36: Glyph=A, Offset=0 (+GlyphDataTableOffset=52)
- '0003 0008 ' # 40: Glyph=C, Offset=8 (+GlyphDataTableOffset=60)
- 'FFFF 0000 ' # 44: Glyph=<end>, Offset=<n/a>
- 'BEEF F00D ' # 48: <stray data>
- '0000 0001 ' # 52: GlyphData[A].NumPoints=1
- '0235 045E ' # 56: GlyphData[A].Points[0].X=565, .Y=1118
- '0000 0001 ' # 60: GlyphData[C].NumPoints=1
- 'FED2 045E ' # 64: GlyphData[C].Points[0].X=-302, .Y=1118
-) # 68: <end>
+ "0000 0000 " # 0: Format=0, Flags=0
+ "0000 0018 " # 4: LookupTableOffset=24
+ "0000 0034 " # 8: GlyphDataTableOffset=52
+ "DEAD BEEF CAFE " # 12: <stray data>
+ "DEAD BEEF CAFE " # 18: <stray data>
+ "0006 0004 0002 " # 24: LookupFormat=6, UnitSize=4, NUnits=2
+ "0008 0001 0000 " # 30: SearchRange=8, EntrySelector=1, RangeShift=0
+ "0001 0000 " # 36: Glyph=A, Offset=0 (+GlyphDataTableOffset=52)
+ "0003 0008 " # 40: Glyph=C, Offset=8 (+GlyphDataTableOffset=60)
+ "FFFF 0000 " # 44: Glyph=<end>, Offset=<n/a>
+ "BEEF F00D " # 48: <stray data>
+ "0000 0001 " # 52: GlyphData[A].NumPoints=1
+ "0235 045E " # 56: GlyphData[A].Points[0].X=565, .Y=1118
+ "0000 0001 " # 60: GlyphData[C].NumPoints=1
+ "FED2 045E " # 64: GlyphData[C].Points[0].X=-302, .Y=1118
+) # 68: <end>
assert len(ANKR_FORMAT_0_STRAY_DATA) == 68
# Constructed test case where glyphs A and D share the same anchor data.
ANKR_FORMAT_0_SHARING_DATA = deHexStr(
- '0000 0000 ' # 0: Format=0, Flags=0
- '0000 000C ' # 4: LookupTableOffset=12
- '0000 0028 ' # 8: GlyphDataTableOffset=40
- '0006 0004 0003 ' # 12: LookupFormat=6, UnitSize=4, NUnits=3
- '0008 0001 0004 ' # 18: SearchRange=8, EntrySelector=1, RangeShift=4
- '0001 0000 ' # 24: Glyph=A, Offset=0 (+GlyphDataTableOffset=36)
- '0003 0008 ' # 28: Glyph=C, Offset=8 (+GlyphDataTableOffset=44)
- '0004 0000 ' # 32: Glyph=D, Offset=0 (+GlyphDataTableOffset=36)
- 'FFFF 0000 ' # 36: Glyph=<end>, Offset=<n/a>
- '0000 0001 ' # 40: GlyphData[A].NumPoints=1
- '0235 045E ' # 44: GlyphData[A].Points[0].X=565, .Y=1118
- '0000 0002 ' # 48: GlyphData[C].NumPoints=2
- '000B 000C ' # 52: GlyphData[C].Points[0].X=11, .Y=12
- '001B 001C ' # 56: GlyphData[C].Points[1].X=27, .Y=28
-) # 60: <end>
+ "0000 0000 " # 0: Format=0, Flags=0
+ "0000 000C " # 4: LookupTableOffset=12
+ "0000 0028 " # 8: GlyphDataTableOffset=40
+ "0006 0004 0003 " # 12: LookupFormat=6, UnitSize=4, NUnits=3
+ "0008 0001 0004 " # 18: SearchRange=8, EntrySelector=1, RangeShift=4
+ "0001 0000 " # 24: Glyph=A, Offset=0 (+GlyphDataTableOffset=36)
+ "0003 0008 " # 28: Glyph=C, Offset=8 (+GlyphDataTableOffset=44)
+ "0004 0000 " # 32: Glyph=D, Offset=0 (+GlyphDataTableOffset=36)
+ "FFFF 0000 " # 36: Glyph=<end>, Offset=<n/a>
+ "0000 0001 " # 40: GlyphData[A].NumPoints=1
+ "0235 045E " # 44: GlyphData[A].Points[0].X=565, .Y=1118
+ "0000 0002 " # 48: GlyphData[C].NumPoints=2
+ "000B 000C " # 52: GlyphData[C].Points[0].X=11, .Y=12
+ "001B 001C " # 56: GlyphData[C].Points[1].X=27, .Y=28
+) # 60: <end>
assert len(ANKR_FORMAT_0_SHARING_DATA) == 60
ANKR_FORMAT_0_SHARING_XML = [
'<AnchorPoints Format="0">',
' <Flags value="0"/>',
- ' <Anchors>',
+ " <Anchors>",
' <Lookup glyph="A">',
- ' <!-- AnchorPointCount=1 -->',
+ " <!-- AnchorPointCount=1 -->",
' <AnchorPoint index="0">',
' <XCoordinate value="565"/>',
' <YCoordinate value="1118"/>',
- ' </AnchorPoint>',
- ' </Lookup>',
+ " </AnchorPoint>",
+ " </Lookup>",
' <Lookup glyph="C">',
- ' <!-- AnchorPointCount=2 -->',
+ " <!-- AnchorPointCount=2 -->",
' <AnchorPoint index="0">',
' <XCoordinate value="11"/>',
' <YCoordinate value="12"/>',
- ' </AnchorPoint>',
+ " </AnchorPoint>",
' <AnchorPoint index="1">',
' <XCoordinate value="27"/>',
' <YCoordinate value="28"/>',
- ' </AnchorPoint>',
- ' </Lookup>',
+ " </AnchorPoint>",
+ " </Lookup>",
' <Lookup glyph="D">',
- ' <!-- AnchorPointCount=1 -->',
+ " <!-- AnchorPointCount=1 -->",
' <AnchorPoint index="0">',
' <XCoordinate value="565"/>',
' <YCoordinate value="1118"/>',
- ' </AnchorPoint>',
- ' </Lookup>',
- ' </Anchors>',
- '</AnchorPoints>',
+ " </AnchorPoint>",
+ " </Lookup>",
+ " </Anchors>",
+ "</AnchorPoints>",
]
class ANKRTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- cls.font = FakeFont(['.notdef', 'A', 'B', 'C', 'D'])
+ cls.font = FakeFont([".notdef", "A", "B", "C", "D"])
def decompileToXML(self, data, xml):
- table = newTable('ankr')
+ table = newTable("ankr")
table.decompile(data, self.font)
self.assertEqual(getXML(table.toXML), xml)
def compileFromXML(self, xml, data):
- table = newTable('ankr')
+ table = newTable("ankr")
for name, attrs, content in parseXML(xml):
table.fromXML(name, attrs, content, font=self.font)
self.assertEqual(hexStr(table.compile(self.font)), hexStr(data))
@@ -160,6 +159,7 @@ class ANKRTest(unittest.TestCase):
self.roundtrip(ANKR_FORMAT_0_SHARING_DATA, ANKR_FORMAT_0_SHARING_XML)
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_a_v_a_r_test.py b/Tests/ttLib/tables/_a_v_a_r_test.py
index 429ca2e8..dbe07b3a 100644
--- a/Tests/ttLib/tables/_a_v_a_r_test.py
+++ b/Tests/ttLib/tables/_a_v_a_r_test.py
@@ -1,9 +1,13 @@
from fontTools.misc.testTools import parseXML
from fontTools.misc.textTools import deHexStr
from fontTools.misc.xmlWriter import XMLWriter
-from fontTools.ttLib import TTLibError
+from fontTools.misc.fixedTools import floatToFixed as fl2fi
+from fontTools.ttLib import TTFont, TTLibError
+import fontTools.ttLib.tables.otTables as otTables
from fontTools.ttLib.tables._a_v_a_r import table__a_v_a_r
from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis
+import fontTools.varLib.models as models
+import fontTools.varLib.varStore as varStore
from io import BytesIO
import unittest
@@ -11,7 +15,8 @@ import unittest
TEST_DATA = deHexStr(
"00 01 00 00 00 00 00 02 "
"00 04 C0 00 C0 00 00 00 00 00 13 33 33 33 40 00 40 00 "
- "00 03 C0 00 C0 00 00 00 00 00 40 00 40 00")
+ "00 03 C0 00 C0 00 00 00 00 00 40 00 40 00"
+)
class AxisVariationTableTest(unittest.TestCase):
@@ -35,43 +40,45 @@ class AxisVariationTableTest(unittest.TestCase):
def test_decompile(self):
avar = table__a_v_a_r()
avar.decompile(TEST_DATA, self.makeFont(["wdth", "wght"]))
- self.assertAvarAlmostEqual({
- "wdth": {-1.0: -1.0, 0.0: 0.0, 0.2999878: 0.7999878, 1.0: 1.0},
- "wght": {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}
- }, avar.segments)
-
- def test_decompile_unsupportedVersion(self):
- avar = table__a_v_a_r()
- font = self.makeFont(["wdth", "wght"])
- self.assertRaises(TTLibError, avar.decompile, deHexStr("02 01 03 06 00 00 00 00"), font)
+ self.assertAvarAlmostEqual(
+ {
+ "wdth": {-1.0: -1.0, 0.0: 0.0, 0.2999878: 0.7999878, 1.0: 1.0},
+ "wght": {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0},
+ },
+ avar.segments,
+ )
def test_toXML(self):
avar = table__a_v_a_r()
avar.segments["opsz"] = {-1.0: -1.0, 0.0: 0.0, 0.2999878: 0.7999878, 1.0: 1.0}
writer = XMLWriter(BytesIO())
avar.toXML(writer, self.makeFont(["opsz"]))
- self.assertEqual([
- '<segment axis="opsz">',
+ self.assertEqual(
+ [
+ '<version major="1" minor="0"/>',
+ '<segment axis="opsz">',
'<mapping from="-1.0" to="-1.0"/>',
'<mapping from="0.0" to="0.0"/>',
'<mapping from="0.3" to="0.8"/>',
'<mapping from="1.0" to="1.0"/>',
- '</segment>'
- ], self.xml_lines(writer))
+ "</segment>",
+ ],
+ self.xml_lines(writer),
+ )
def test_fromXML(self):
avar = table__a_v_a_r()
for name, attrs, content in parseXML(
- '<segment axis="wdth">'
- ' <mapping from="-1.0" to="-1.0"/>'
- ' <mapping from="0.0" to="0.0"/>'
- ' <mapping from="0.7" to="0.2"/>'
- ' <mapping from="1.0" to="1.0"/>'
- '</segment>'):
+ '<segment axis="wdth">'
+ ' <mapping from="-1.0" to="-1.0"/>'
+ ' <mapping from="0.0" to="0.0"/>'
+ ' <mapping from="0.7" to="0.2"/>'
+ ' <mapping from="1.0" to="1.0"/>'
+ "</segment>"
+ ):
avar.fromXML(name, attrs, content, ttFont=None)
self.assertAvarAlmostEqual(
- {"wdth": {-1: -1, 0: 0, 0.7000122: 0.2000122, 1.0: 1.0}},
- avar.segments
+ {"wdth": {-1: -1, 0: 0, 0.7000122: 0.2000122, 1.0: 1.0}}, avar.segments
)
@staticmethod
@@ -82,7 +89,9 @@ class AxisVariationTableTest(unittest.TestCase):
axis = Axis()
axis.axisTag = tag
fvar.axes.append(axis)
- return {"fvar": fvar}
+ font = TTFont()
+ font["fvar"] = fvar
+ return font
@staticmethod
def xml_lines(writer):
@@ -90,6 +99,82 @@ class AxisVariationTableTest(unittest.TestCase):
return [line.strip() for line in content.splitlines()][1:]
+class Avar2Test(unittest.TestCase):
+ def test(self):
+ axisTags = ["wght", "wdth"]
+ fvar = table__f_v_a_r()
+ for tag in axisTags:
+ axis = Axis()
+ axis.axisTag = tag
+ fvar.axes.append(axis)
+
+ master_locations_normalized = [
+ {},
+ {"wght": 1, "wdth": -1},
+ ]
+ data = [
+ {},
+ {"wdth": -0.8},
+ ]
+
+ model = models.VariationModel(master_locations_normalized, axisTags)
+ store_builder = varStore.OnlineVarStoreBuilder(axisTags)
+ store_builder.setModel(model)
+ varIdxes = {}
+ for axis in axisTags:
+ masters = [fl2fi(m.get(axis, 0), 14) for m in data]
+ varIdxes[axis] = store_builder.storeMasters(masters)[1]
+ store = store_builder.finish()
+ mapping = store.optimize()
+ varIdxes = {axis: mapping[value] for axis, value in varIdxes.items()}
+ del model, store_builder, mapping
+
+ varIdxMap = otTables.DeltaSetIndexMap()
+ varIdxMap.Format = 1
+ varIdxMap.mapping = []
+ for tag in axisTags:
+ varIdxMap.mapping.append(varIdxes[tag])
+
+ avar = table__a_v_a_r()
+ avar.segments["wght"] = {}
+ avar.segments["wdth"] = {-1.0: -1.0, 0.0: 0.0, 0.4: 0.5, 1.0: 1.0}
+
+ avar.majorVersion = 2
+ avar.table = otTables.avar()
+ avar.table.VarIdxMap = varIdxMap
+ avar.table.VarStore = store
+
+ font = TTFont()
+ font["fvar"] = fvar
+ font["avar"] = avar
+
+ b = BytesIO()
+ font.save(b)
+ b.seek(0)
+ font2 = TTFont(b)
+
+ assert font2["avar"].table.VarStore.VarRegionList.RegionAxisCount == 2
+ assert font2["avar"].table.VarStore.VarRegionList.RegionCount == 1
+
+ xml1 = BytesIO()
+ writer = XMLWriter(xml1)
+ font["avar"].toXML(writer, font)
+
+ xml2 = BytesIO()
+ writer = XMLWriter(xml2)
+ font2["avar"].toXML(writer, font2)
+
+ assert xml1.getvalue() == xml2.getvalue(), (xml1.getvalue(), xml2.getvalue())
+
+ avar = table__a_v_a_r()
+ xml = b"".join(xml2.getvalue().splitlines()[1:])
+ for name, attrs, content in parseXML(xml):
+ avar.fromXML(name, attrs, content, ttFont=TTFont())
+ assert avar.table.VarStore.VarRegionList.RegionAxisCount == 2
+ assert avar.table.VarStore.VarRegionList.RegionCount == 1
+
+
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_b_s_l_n_test.py b/Tests/ttLib/tables/_b_s_l_n_test.py
index e40c1bd2..3ef3195f 100644
--- a/Tests/ttLib/tables/_b_s_l_n_test.py
+++ b/Tests/ttLib/tables/_b_s_l_n_test.py
@@ -7,17 +7,17 @@ import unittest
# Apple's spec of the baseline table gives no example for 'bsln' format 0,
# but the Apple Chancery font contains the following data.
BSLN_FORMAT_0_DATA = deHexStr(
- '0001 0000 0000 ' # 0: Version=1.0, Format=0
- '0000 ' # 6: DefaultBaseline=0 (Roman baseline)
- '0000 01D1 0000 0541 ' # 8: Delta[0..3]=0, 465, 0, 1345
- '01FB 0000 0000 0000 ' # 16: Delta[4..7]=507, 0, 0, 0
- '0000 0000 0000 0000 ' # 24: Delta[8..11]=0, 0, 0, 0
- '0000 0000 0000 0000 ' # 32: Delta[12..15]=0, 0, 0, 0
- '0000 0000 0000 0000 ' # 40: Delta[16..19]=0, 0, 0, 0
- '0000 0000 0000 0000 ' # 48: Delta[20..23]=0, 0, 0, 0
- '0000 0000 0000 0000 ' # 56: Delta[24..27]=0, 0, 0, 0
- '0000 0000 0000 0000 ' # 64: Delta[28..31]=0, 0, 0, 0
-) # 72: <end>
+ "0001 0000 0000 " # 0: Version=1.0, Format=0
+ "0000 " # 6: DefaultBaseline=0 (Roman baseline)
+ "0000 01D1 0000 0541 " # 8: Delta[0..3]=0, 465, 0, 1345
+ "01FB 0000 0000 0000 " # 16: Delta[4..7]=507, 0, 0, 0
+ "0000 0000 0000 0000 " # 24: Delta[8..11]=0, 0, 0, 0
+ "0000 0000 0000 0000 " # 32: Delta[12..15]=0, 0, 0, 0
+ "0000 0000 0000 0000 " # 40: Delta[16..19]=0, 0, 0, 0
+ "0000 0000 0000 0000 " # 48: Delta[20..23]=0, 0, 0, 0
+ "0000 0000 0000 0000 " # 56: Delta[24..27]=0, 0, 0, 0
+ "0000 0000 0000 0000 " # 64: Delta[28..31]=0, 0, 0, 0
+) # 72: <end>
assert len(BSLN_FORMAT_0_DATA) == 72
@@ -57,7 +57,7 @@ BSLN_FORMAT_0_XML = [
' <Delta index="29" value="0"/>',
' <Delta index="30" value="0"/>',
' <Delta index="31" value="0"/>',
- '</Baseline>',
+ "</Baseline>",
]
@@ -66,21 +66,21 @@ BSLN_FORMAT_0_XML = [
# The example in the AAT specification uses the value 270 for Seg[0].LastGlyph,
# whereas we use the value 10 for testng to shorten the XML dump.
BSLN_FORMAT_1_DATA = deHexStr(
- '0001 0000 0001 ' # 0: Version=1.0, Format=1
- '0001 ' # 6: DefaultBaseline=1 (Ideographic baseline)
- '0000 0357 0000 05F0 ' # 8: Delta[0..3]=0, 855, 0, 1520
- '0000 0000 0000 0000 ' # 16: Delta[4..7]=0, 0, 0, 0
- '0000 0000 0000 0000 ' # 24: Delta[8..11]=0, 0, 0, 0
- '0000 0000 0000 0000 ' # 32: Delta[12..15]=0, 0, 0, 0
- '0000 0000 0000 0000 ' # 40: Delta[16..19]=0, 0, 0, 0
- '0000 0000 0000 0000 ' # 48: Delta[20..23]=0, 0, 0, 0
- '0000 0000 0000 0000 ' # 56: Delta[24..27]=0, 0, 0, 0
- '0000 0000 0000 0000 ' # 64: Delta[28..31]=0, 0, 0, 0
- '0002 0006 0001 ' # 72: LookupFormat=2, UnitSize=6, NUnits=1
- '0006 0000 0000 ' # 78: SearchRange=6, EntrySelector=0, RangeShift=0
- '000A 0002 0000 ' # 84: Seg[0].LastGlyph=10 FirstGl=2 Value=0/Roman
- 'FFFF FFFF 0000 ' # 90: Seg[1]=<end>
-) # 96: <end>
+ "0001 0000 0001 " # 0: Version=1.0, Format=1
+ "0001 " # 6: DefaultBaseline=1 (Ideographic baseline)
+ "0000 0357 0000 05F0 " # 8: Delta[0..3]=0, 855, 0, 1520
+ "0000 0000 0000 0000 " # 16: Delta[4..7]=0, 0, 0, 0
+ "0000 0000 0000 0000 " # 24: Delta[8..11]=0, 0, 0, 0
+ "0000 0000 0000 0000 " # 32: Delta[12..15]=0, 0, 0, 0
+ "0000 0000 0000 0000 " # 40: Delta[16..19]=0, 0, 0, 0
+ "0000 0000 0000 0000 " # 48: Delta[20..23]=0, 0, 0, 0
+ "0000 0000 0000 0000 " # 56: Delta[24..27]=0, 0, 0, 0
+ "0000 0000 0000 0000 " # 64: Delta[28..31]=0, 0, 0, 0
+ "0002 0006 0001 " # 72: LookupFormat=2, UnitSize=6, NUnits=1
+ "0006 0000 0000 " # 78: SearchRange=6, EntrySelector=0, RangeShift=0
+ "000A 0002 0000 " # 84: Seg[0].LastGlyph=10 FirstGl=2 Value=0/Roman
+ "FFFF FFFF 0000 " # 90: Seg[1]=<end>
+) # 96: <end>
assert len(BSLN_FORMAT_1_DATA) == 96
@@ -120,7 +120,7 @@ BSLN_FORMAT_1_XML = [
' <Delta index="29" value="0"/>',
' <Delta index="30" value="0"/>',
' <Delta index="31" value="0"/>',
- ' <BaselineValues>',
+ " <BaselineValues>",
' <Lookup glyph="B" value="0"/>',
' <Lookup glyph="C" value="0"/>',
' <Lookup glyph="D" value="0"/>',
@@ -130,24 +130,24 @@ BSLN_FORMAT_1_XML = [
' <Lookup glyph="H" value="0"/>',
' <Lookup glyph="I" value="0"/>',
' <Lookup glyph="J" value="0"/>',
- ' </BaselineValues>',
- '</Baseline>',
+ " </BaselineValues>",
+ "</Baseline>",
]
BSLN_FORMAT_2_DATA = deHexStr(
- '0001 0000 0002 ' # 0: Version=1.0, Format=2
- '0004 ' # 6: DefaultBaseline=4 (Math)
- '0016 ' # 8: StandardGlyph=22
- '0050 0051 FFFF 0052 ' # 10: ControlPoint[0..3]=80, 81, <none>, 82
- 'FFFF FFFF FFFF FFFF ' # 18: ControlPoint[4..7]=<none>
- 'FFFF FFFF FFFF FFFF ' # 26: ControlPoint[8..11]=<none>
- 'FFFF FFFF FFFF FFFF ' # 34: ControlPoint[12..15]=<none>
- 'FFFF FFFF FFFF FFFF ' # 42: ControlPoint[16..19]=<none>
- 'FFFF FFFF FFFF FFFF ' # 50: ControlPoint[20..23]=<none>
- 'FFFF FFFF FFFF FFFF ' # 58: ControlPoint[24..27]=<none>
- 'FFFF FFFF FFFF FFFF ' # 66: ControlPoint[28..31]=<none>
-) # 74: <end>
+ "0001 0000 0002 " # 0: Version=1.0, Format=2
+ "0004 " # 6: DefaultBaseline=4 (Math)
+ "0016 " # 8: StandardGlyph=22
+ "0050 0051 FFFF 0052 " # 10: ControlPoint[0..3]=80, 81, <none>, 82
+ "FFFF FFFF FFFF FFFF " # 18: ControlPoint[4..7]=<none>
+ "FFFF FFFF FFFF FFFF " # 26: ControlPoint[8..11]=<none>
+ "FFFF FFFF FFFF FFFF " # 34: ControlPoint[12..15]=<none>
+ "FFFF FFFF FFFF FFFF " # 42: ControlPoint[16..19]=<none>
+ "FFFF FFFF FFFF FFFF " # 50: ControlPoint[20..23]=<none>
+ "FFFF FFFF FFFF FFFF " # 58: ControlPoint[24..27]=<none>
+ "FFFF FFFF FFFF FFFF " # 66: ControlPoint[28..31]=<none>
+) # 74: <end>
assert len(BSLN_FORMAT_2_DATA) == 74
@@ -188,7 +188,7 @@ BSLN_FORMAT_2_XML = [
' <ControlPoint index="29" value="65535"/>',
' <ControlPoint index="30" value="65535"/>',
' <ControlPoint index="31" value="65535"/>',
- '</Baseline>',
+ "</Baseline>",
]
@@ -197,22 +197,22 @@ BSLN_FORMAT_2_XML = [
# The example in the AAT specification uses the value 270 for Seg[0].LastGlyph,
# whereas we use the value 10 for testng to shorten the XML dump.
BSLN_FORMAT_3_DATA = deHexStr(
- '0001 0000 0003 ' # 0: Version=1.0, Format=3
- '0001 ' # 6: DefaultBaseline=1 (Ideographic)
- '0016 ' # 8: StandardGlyph=22
- '0050 0051 FFFF 0052 ' # 10: ControlPoint[0..3]=80, 81, <none>, 82
- 'FFFF FFFF FFFF FFFF ' # 18: ControlPoint[4..7]=<none>
- 'FFFF FFFF FFFF FFFF ' # 26: ControlPoint[8..11]=<none>
- 'FFFF FFFF FFFF FFFF ' # 34: ControlPoint[12..15]=<none>
- 'FFFF FFFF FFFF FFFF ' # 42: ControlPoint[16..19]=<none>
- 'FFFF FFFF FFFF FFFF ' # 50: ControlPoint[20..23]=<none>
- 'FFFF FFFF FFFF FFFF ' # 58: ControlPoint[24..27]=<none>
- 'FFFF FFFF FFFF FFFF ' # 66: ControlPoint[28..31]=<none>
- '0002 0006 0001 ' # 74: LookupFormat=2, UnitSize=6, NUnits=1
- '0006 0000 0000 ' # 80: SearchRange=6, EntrySelector=0, RangeShift=0
- '000A 0002 0000 ' # 86: Seg[0].LastGlyph=10 FirstGl=2 Value=0/Roman
- 'FFFF FFFF 0000 ' # 92: Seg[1]=<end>
-) # 98: <end>
+ "0001 0000 0003 " # 0: Version=1.0, Format=3
+ "0001 " # 6: DefaultBaseline=1 (Ideographic)
+ "0016 " # 8: StandardGlyph=22
+ "0050 0051 FFFF 0052 " # 10: ControlPoint[0..3]=80, 81, <none>, 82
+ "FFFF FFFF FFFF FFFF " # 18: ControlPoint[4..7]=<none>
+ "FFFF FFFF FFFF FFFF " # 26: ControlPoint[8..11]=<none>
+ "FFFF FFFF FFFF FFFF " # 34: ControlPoint[12..15]=<none>
+ "FFFF FFFF FFFF FFFF " # 42: ControlPoint[16..19]=<none>
+ "FFFF FFFF FFFF FFFF " # 50: ControlPoint[20..23]=<none>
+ "FFFF FFFF FFFF FFFF " # 58: ControlPoint[24..27]=<none>
+ "FFFF FFFF FFFF FFFF " # 66: ControlPoint[28..31]=<none>
+ "0002 0006 0001 " # 74: LookupFormat=2, UnitSize=6, NUnits=1
+ "0006 0000 0000 " # 80: SearchRange=6, EntrySelector=0, RangeShift=0
+ "000A 0002 0000 " # 86: Seg[0].LastGlyph=10 FirstGl=2 Value=0/Roman
+ "FFFF FFFF 0000 " # 92: Seg[1]=<end>
+) # 98: <end>
assert len(BSLN_FORMAT_3_DATA) == 98
@@ -253,7 +253,7 @@ BSLN_FORMAT_3_XML = [
' <ControlPoint index="29" value="65535"/>',
' <ControlPoint index="30" value="65535"/>',
' <ControlPoint index="31" value="65535"/>',
- ' <BaselineValues>',
+ " <BaselineValues>",
' <Lookup glyph="B" value="0"/>',
' <Lookup glyph="C" value="0"/>',
' <Lookup glyph="D" value="0"/>',
@@ -263,26 +263,24 @@ BSLN_FORMAT_3_XML = [
' <Lookup glyph="H" value="0"/>',
' <Lookup glyph="I" value="0"/>',
' <Lookup glyph="J" value="0"/>',
- ' </BaselineValues>',
- '</Baseline>',
+ " </BaselineValues>",
+ "</Baseline>",
]
class BSLNTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- cls.font = FakeFont(
- ['.notdef'] + [g for g in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
+ cls.font = FakeFont([".notdef"] + [g for g in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"])
def decompileToXML(self, data, xml):
- table = newTable('bsln')
+ table = newTable("bsln")
table.decompile(data, self.font)
self.assertEqual(getXML(table.toXML), xml)
def compileFromXML(self, xml, data):
- table = newTable('bsln')
+ table = newTable("bsln")
for name, attrs, content in parseXML(xml):
table.fromXML(name, attrs, content, font=self.font)
self.assertEqual(hexStr(table.compile(self.font)), hexStr(data))
@@ -304,6 +302,7 @@ class BSLNTest(unittest.TestCase):
self.compileFromXML(BSLN_FORMAT_3_XML, BSLN_FORMAT_3_DATA)
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_c_i_d_g_test.py b/Tests/ttLib/tables/_c_i_d_g_test.py
index 11c1fc0f..315736b8 100644
--- a/Tests/ttLib/tables/_c_i_d_g_test.py
+++ b/Tests/ttLib/tables/_c_i_d_g_test.py
@@ -7,39 +7,39 @@ import unittest
# On macOS X 10.12.6, the first font in /System/Library/Fonts/PingFang.ttc
# has a ‘cidg’ table with a similar structure as this test data, just larger.
CIDG_DATA = deHexStr(
- "0000 0000 " # 0: Format=0, Flags=0
- "0000 0098 " # 4: StructLength=152
- "0000 " # 8: Registry=0
- "41 64 6F 62 65 " # 10: RegistryName="Adobe"
- + ("00" * 59) + # 15: <padding>
- "0002 " # 74: Order=2
- "43 4E 53 31 " # 76: Order="CNS1"
- + ("00" * 60) + # 80: <padding>
- "0000 " # 140: SupplementVersion=0
- "0004 " # 142: Count
- "0000 " # 144: GlyphID[0]=.notdef
- "FFFF " # 146: CIDs[1]=<None>
- "0003 " # 148: CIDs[2]=C
- "0001 " # 150: CIDs[3]=A
-) # 152: <end>
+ "0000 0000 " # 0: Format=0, Flags=0
+ "0000 0098 " # 4: StructLength=152
+ "0000 " # 8: Registry=0
+ "41 64 6F 62 65 " # 10: RegistryName="Adobe"
+ + ("00" * 59)
+ + "0002 " # 15: <padding> # 74: Order=2
+ "43 4E 53 31 " # 76: Order="CNS1"
+ + ("00" * 60)
+ + "0000 " # 80: <padding> # 140: SupplementVersion=0
+ "0004 " # 142: Count
+ "0000 " # 144: GlyphID[0]=.notdef
+ "FFFF " # 146: CIDs[1]=<None>
+ "0003 " # 148: CIDs[2]=C
+ "0001 " # 150: CIDs[3]=A
+) # 152: <end>
assert len(CIDG_DATA) == 152, len(CIDG_DATA)
CIDG_XML = [
- '<CIDGlyphMapping Format="0">',
- ' <DataFormat value="0"/>',
- ' <!-- StructLength=152 -->',
- ' <Registry value="0"/>',
- ' <RegistryName value="Adobe"/>',
- ' <Order value="2"/>',
- ' <OrderName value="CNS1"/>',
- ' <SupplementVersion value="0"/>',
- ' <Mapping>',
- ' <CID cid="0" glyph=".notdef"/>',
- ' <CID cid="2" glyph="C"/>',
- ' <CID cid="3" glyph="A"/>',
- ' </Mapping>',
- '</CIDGlyphMapping>',
+ '<CIDGlyphMapping Format="0">',
+ ' <DataFormat value="0"/>',
+ " <!-- StructLength=152 -->",
+ ' <Registry value="0"/>',
+ ' <RegistryName value="Adobe"/>',
+ ' <Order value="2"/>',
+ ' <OrderName value="CNS1"/>',
+ ' <SupplementVersion value="0"/>',
+ " <Mapping>",
+ ' <CID cid="0" glyph=".notdef"/>',
+ ' <CID cid="2" glyph="C"/>',
+ ' <CID cid="3" glyph="A"/>',
+ " </Mapping>",
+ "</CIDGlyphMapping>",
]
@@ -47,21 +47,21 @@ class GCIDTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- cls.font = FakeFont(['.notdef', 'A', 'B', 'C', 'D'])
+ cls.font = FakeFont([".notdef", "A", "B", "C", "D"])
def testDecompileToXML(self):
- table = newTable('cidg')
+ table = newTable("cidg")
table.decompile(CIDG_DATA, self.font)
self.assertEqual(getXML(table.toXML, self.font), CIDG_XML)
def testCompileFromXML(self):
- table = newTable('cidg')
+ table = newTable("cidg")
for name, attrs, content in parseXML(CIDG_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(CIDG_DATA))
+ self.assertEqual(hexStr(table.compile(self.font)), hexStr(CIDG_DATA))
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_c_m_a_p_test.py b/Tests/ttLib/tables/_c_m_a_p_test.py
index 63285045..9bf854e1 100644
--- a/Tests/ttLib/tables/_c_m_a_p_test.py
+++ b/Tests/ttLib/tables/_c_m_a_p_test.py
@@ -7,166 +7,182 @@ import unittest
from fontTools.ttLib.tables._c_m_a_p import CmapSubtable, table__c_m_a_p
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-DATA_DIR = os.path.join(CURR_DIR, 'data')
+DATA_DIR = os.path.join(CURR_DIR, "data")
CMAP_FORMAT_14_TTX = os.path.join(DATA_DIR, "_c_m_a_p_format_14.ttx")
-CMAP_FORMAT_14_BW_COMPAT_TTX = os.path.join(DATA_DIR, "_c_m_a_p_format_14_bw_compat.ttx")
+CMAP_FORMAT_14_BW_COMPAT_TTX = os.path.join(
+ DATA_DIR, "_c_m_a_p_format_14_bw_compat.ttx"
+)
+
def strip_VariableItems(string):
# ttlib changes with the fontTools version
- string = re.sub(' ttLibVersion=".*"', '', string)
+ string = re.sub(' ttLibVersion=".*"', "", string)
return string
-class CmapSubtableTest(unittest.TestCase):
- def makeSubtable(self, cmapFormat, platformID, platEncID, langID):
- subtable = CmapSubtable.newSubtable(cmapFormat)
- subtable.platformID, subtable.platEncID, subtable.language = (platformID, platEncID, langID)
- return subtable
-
- def test_toUnicode_utf16be(self):
- subtable = self.makeSubtable(4, 0, 2, 7)
- self.assertEqual("utf_16_be", subtable.getEncoding())
- self.assertEqual(True, subtable.isUnicode())
-
- def test_toUnicode_macroman(self):
- subtable = self.makeSubtable(4, 1, 0, 7) # MacRoman
- self.assertEqual("mac_roman", subtable.getEncoding())
- self.assertEqual(False, subtable.isUnicode())
-
- def test_toUnicode_macromanian(self):
- subtable = self.makeSubtable(4, 1, 0, 37) # Mac Romanian
- self.assertNotEqual(None, subtable.getEncoding())
- self.assertEqual(False, subtable.isUnicode())
-
- def test_extended_mac_encodings(self):
- subtable = self.makeSubtable(4, 1, 1, 0) # Mac Japanese
- self.assertNotEqual(None, subtable.getEncoding())
- self.assertEqual(False, subtable.isUnicode())
-
- def test_extended_unknown(self):
- subtable = self.makeSubtable(4, 10, 11, 12)
- self.assertEqual(subtable.getEncoding(), None)
- self.assertEqual(subtable.getEncoding("ascii"), "ascii")
- self.assertEqual(subtable.getEncoding(default="xyz"), "xyz")
-
- def test_compile_2(self):
- subtable = self.makeSubtable(2, 1, 2, 0)
- subtable.cmap = {c: "cid%05d" % c for c in range(32, 8192)}
- font = ttLib.TTFont()
- font.setGlyphOrder([".notdef"] + list(subtable.cmap.values()))
- data = subtable.compile(font)
-
- subtable2 = CmapSubtable.newSubtable(2)
- subtable2.decompile(data, font)
- self.assertEqual(subtable2.cmap, subtable.cmap)
-
- def test_compile_2_rebuild_rev_glyph_order(self):
- for fmt in [2, 4, 12]:
- subtable = self.makeSubtable(fmt, 1, 2, 0)
- subtable.cmap = {c: "cid%05d" % c for c in range(32, 8192)}
- font = ttLib.TTFont()
- font.setGlyphOrder([".notdef"] + list(subtable.cmap.values()))
- font._reverseGlyphOrderDict = {} # force first KeyError branch in subtable.compile()
- data = subtable.compile(font)
- subtable2 = CmapSubtable.newSubtable(fmt)
- subtable2.decompile(data, font)
- self.assertEqual(subtable2.cmap, subtable.cmap, str(fmt))
-
- def test_compile_2_gids(self):
- for fmt in [2, 4, 12]:
- subtable = self.makeSubtable(fmt, 1, 3, 0)
- subtable.cmap = {0x0041:'gid001', 0x0042:'gid002'}
- font = ttLib.TTFont()
- font.setGlyphOrder([".notdef"])
- data = subtable.compile(font)
-
- def test_compile_decompile_4_empty(self):
- subtable = self.makeSubtable(4, 3, 1, 0)
- subtable.cmap = {}
- font = ttLib.TTFont()
- font.setGlyphOrder([])
- data = subtable.compile(font)
- subtable2 = CmapSubtable.newSubtable(4)
- subtable2.decompile(data, font)
- self.assertEqual(subtable2.cmap, {})
-
- def test_decompile_4(self):
- subtable = CmapSubtable.newSubtable(4)
- font = ttLib.TTFont()
- font.setGlyphOrder([])
- subtable.decompile(b'\0' * 3 + b'\x10' + b'\0' * 12, font)
-
- def test_decompile_12(self):
- subtable = CmapSubtable.newSubtable(12)
- font = ttLib.TTFont()
- font.setGlyphOrder([])
- subtable.decompile(b'\0' * 7 + b'\x10' + b'\0' * 8, font)
-
- def test_buildReversed(self):
- c4 = self.makeSubtable(4, 3, 1, 0)
- c4.cmap = {0x0041:'A', 0x0391:'A'}
- c12 = self.makeSubtable(12, 3, 10, 0)
- c12.cmap = {0x10314: 'u10314'}
- cmap = table__c_m_a_p()
- cmap.tables = [c4, c12]
- self.assertEqual(cmap.buildReversed(), {'A':{0x0041, 0x0391}, 'u10314':{0x10314}})
-
- def test_getBestCmap(self):
- c4 = self.makeSubtable(4, 3, 1, 0)
- c4.cmap = {0x0041:'A', 0x0391:'A'}
- c12 = self.makeSubtable(12, 3, 10, 0)
- c12.cmap = {0x10314: 'u10314'}
- cmap = table__c_m_a_p()
- cmap.tables = [c4, c12]
- self.assertEqual(cmap.getBestCmap(), {0x10314: 'u10314'})
- self.assertEqual(cmap.getBestCmap(cmapPreferences=[(3, 1)]), {0x0041:'A', 0x0391:'A'})
- self.assertEqual(cmap.getBestCmap(cmapPreferences=[(0, 4)]), None)
-
- def test_font_getBestCmap(self):
- c4 = self.makeSubtable(4, 3, 1, 0)
- c4.cmap = {0x0041:'A', 0x0391:'A'}
- c12 = self.makeSubtable(12, 3, 10, 0)
- c12.cmap = {0x10314: 'u10314'}
- cmap = table__c_m_a_p()
- cmap.tables = [c4, c12]
- font = ttLib.TTFont()
- font["cmap"] = cmap
- self.assertEqual(font.getBestCmap(), {0x10314: 'u10314'})
- self.assertEqual(font.getBestCmap(cmapPreferences=[(3, 1)]), {0x0041:'A', 0x0391:'A'})
- self.assertEqual(font.getBestCmap(cmapPreferences=[(0, 4)]), None)
-
- def test_format_14(self):
- subtable = self.makeSubtable(14, 0, 5, 0)
- subtable.cmap = {} # dummy
- subtable.uvsDict = {
- 0xFE00: [(0x0030, "zero.slash")],
- 0xFE01: [(0x0030, None)],
- }
- fb = FontBuilder(1024, isTTF=True)
- font = fb.font
- fb.setupGlyphOrder([".notdef", "zero.slash"])
- fb.setupMaxp()
- fb.setupPost()
- cmap = table__c_m_a_p()
- cmap.tableVersion = 0
- cmap.tables = [subtable]
- font["cmap"] = cmap
- f = io.BytesIO()
- font.save(f)
- f.seek(0)
- font = ttLib.TTFont(f)
- self.assertEqual(font["cmap"].getcmap(0, 5).uvsDict, subtable.uvsDict)
- f = io.StringIO(newline=None)
- font.saveXML(f, tables=["cmap"])
- ttx = strip_VariableItems(f.getvalue())
- with open(CMAP_FORMAT_14_TTX) as f:
- expected = strip_VariableItems(f.read())
- self.assertEqual(ttx, expected)
- with open(CMAP_FORMAT_14_BW_COMPAT_TTX) as f:
- font.importXML(f)
- self.assertEqual(font["cmap"].getcmap(0, 5).uvsDict, subtable.uvsDict)
+class CmapSubtableTest(unittest.TestCase):
+ def makeSubtable(self, cmapFormat, platformID, platEncID, langID):
+ subtable = CmapSubtable.newSubtable(cmapFormat)
+ subtable.platformID, subtable.platEncID, subtable.language = (
+ platformID,
+ platEncID,
+ langID,
+ )
+ return subtable
+
+ def test_toUnicode_utf16be(self):
+ subtable = self.makeSubtable(4, 0, 2, 7)
+ self.assertEqual("utf_16_be", subtable.getEncoding())
+ self.assertEqual(True, subtable.isUnicode())
+
+ def test_toUnicode_macroman(self):
+ subtable = self.makeSubtable(4, 1, 0, 7) # MacRoman
+ self.assertEqual("mac_roman", subtable.getEncoding())
+ self.assertEqual(False, subtable.isUnicode())
+
+ def test_toUnicode_macromanian(self):
+ subtable = self.makeSubtable(4, 1, 0, 37) # Mac Romanian
+ self.assertNotEqual(None, subtable.getEncoding())
+ self.assertEqual(False, subtable.isUnicode())
+
+ def test_extended_mac_encodings(self):
+ subtable = self.makeSubtable(4, 1, 1, 0) # Mac Japanese
+ self.assertNotEqual(None, subtable.getEncoding())
+ self.assertEqual(False, subtable.isUnicode())
+
+ def test_extended_unknown(self):
+ subtable = self.makeSubtable(4, 10, 11, 12)
+ self.assertEqual(subtable.getEncoding(), None)
+ self.assertEqual(subtable.getEncoding("ascii"), "ascii")
+ self.assertEqual(subtable.getEncoding(default="xyz"), "xyz")
+
+ def test_compile_2(self):
+ subtable = self.makeSubtable(2, 1, 2, 0)
+ subtable.cmap = {c: "cid%05d" % c for c in range(32, 8192)}
+ font = ttLib.TTFont()
+ font.setGlyphOrder([".notdef"] + list(subtable.cmap.values()))
+ data = subtable.compile(font)
+
+ subtable2 = CmapSubtable.newSubtable(2)
+ subtable2.decompile(data, font)
+ self.assertEqual(subtable2.cmap, subtable.cmap)
+
+ def test_compile_2_rebuild_rev_glyph_order(self):
+ for fmt in [2, 4, 12]:
+ subtable = self.makeSubtable(fmt, 1, 2, 0)
+ subtable.cmap = {c: "cid%05d" % c for c in range(32, 8192)}
+ font = ttLib.TTFont()
+ font.setGlyphOrder([".notdef"] + list(subtable.cmap.values()))
+ font._reverseGlyphOrderDict = (
+ {}
+ ) # force first KeyError branch in subtable.compile()
+ data = subtable.compile(font)
+ subtable2 = CmapSubtable.newSubtable(fmt)
+ subtable2.decompile(data, font)
+ self.assertEqual(subtable2.cmap, subtable.cmap, str(fmt))
+
+ def test_compile_2_gids(self):
+ for fmt in [2, 4, 12]:
+ subtable = self.makeSubtable(fmt, 1, 3, 0)
+ subtable.cmap = {0x0041: "gid001", 0x0042: "gid002"}
+ font = ttLib.TTFont()
+ font.setGlyphOrder([".notdef"])
+ data = subtable.compile(font)
+
+ def test_compile_decompile_4_empty(self):
+ subtable = self.makeSubtable(4, 3, 1, 0)
+ subtable.cmap = {}
+ font = ttLib.TTFont()
+ font.setGlyphOrder([])
+ data = subtable.compile(font)
+ subtable2 = CmapSubtable.newSubtable(4)
+ subtable2.decompile(data, font)
+ self.assertEqual(subtable2.cmap, {})
+
+ def test_decompile_4(self):
+ subtable = CmapSubtable.newSubtable(4)
+ font = ttLib.TTFont()
+ font.setGlyphOrder([])
+ subtable.decompile(b"\0" * 3 + b"\x10" + b"\0" * 12, font)
+
+ def test_decompile_12(self):
+ subtable = CmapSubtable.newSubtable(12)
+ font = ttLib.TTFont()
+ font.setGlyphOrder([])
+ subtable.decompile(b"\0" * 7 + b"\x10" + b"\0" * 8, font)
+
+ def test_buildReversed(self):
+ c4 = self.makeSubtable(4, 3, 1, 0)
+ c4.cmap = {0x0041: "A", 0x0391: "A"}
+ c12 = self.makeSubtable(12, 3, 10, 0)
+ c12.cmap = {0x10314: "u10314"}
+ cmap = table__c_m_a_p()
+ cmap.tables = [c4, c12]
+ self.assertEqual(
+ cmap.buildReversed(), {"A": {0x0041, 0x0391}, "u10314": {0x10314}}
+ )
+
+ def test_getBestCmap(self):
+ c4 = self.makeSubtable(4, 3, 1, 0)
+ c4.cmap = {0x0041: "A", 0x0391: "A"}
+ c12 = self.makeSubtable(12, 3, 10, 0)
+ c12.cmap = {0x10314: "u10314"}
+ cmap = table__c_m_a_p()
+ cmap.tables = [c4, c12]
+ self.assertEqual(cmap.getBestCmap(), {0x10314: "u10314"})
+ self.assertEqual(
+ cmap.getBestCmap(cmapPreferences=[(3, 1)]), {0x0041: "A", 0x0391: "A"}
+ )
+ self.assertEqual(cmap.getBestCmap(cmapPreferences=[(0, 4)]), None)
+
+ def test_font_getBestCmap(self):
+ c4 = self.makeSubtable(4, 3, 1, 0)
+ c4.cmap = {0x0041: "A", 0x0391: "A"}
+ c12 = self.makeSubtable(12, 3, 10, 0)
+ c12.cmap = {0x10314: "u10314"}
+ cmap = table__c_m_a_p()
+ cmap.tables = [c4, c12]
+ font = ttLib.TTFont()
+ font["cmap"] = cmap
+ self.assertEqual(font.getBestCmap(), {0x10314: "u10314"})
+ self.assertEqual(
+ font.getBestCmap(cmapPreferences=[(3, 1)]), {0x0041: "A", 0x0391: "A"}
+ )
+ self.assertEqual(font.getBestCmap(cmapPreferences=[(0, 4)]), None)
+
+ def test_format_14(self):
+ subtable = self.makeSubtable(14, 0, 5, 0)
+ subtable.cmap = {} # dummy
+ subtable.uvsDict = {
+ 0xFE00: [(0x0030, "zero.slash")],
+ 0xFE01: [(0x0030, None)],
+ }
+ fb = FontBuilder(1024, isTTF=True)
+ font = fb.font
+ fb.setupGlyphOrder([".notdef", "zero.slash"])
+ fb.setupMaxp()
+ fb.setupPost()
+ cmap = table__c_m_a_p()
+ cmap.tableVersion = 0
+ cmap.tables = [subtable]
+ font["cmap"] = cmap
+ f = io.BytesIO()
+ font.save(f)
+ f.seek(0)
+ font = ttLib.TTFont(f)
+ self.assertEqual(font["cmap"].getcmap(0, 5).uvsDict, subtable.uvsDict)
+ f = io.StringIO(newline=None)
+ font.saveXML(f, tables=["cmap"])
+ ttx = strip_VariableItems(f.getvalue())
+ with open(CMAP_FORMAT_14_TTX) as f:
+ expected = strip_VariableItems(f.read())
+ self.assertEqual(ttx, expected)
+ with open(CMAP_FORMAT_14_BW_COMPAT_TTX) as f:
+ font.importXML(f)
+ self.assertEqual(font["cmap"].getcmap(0, 5).uvsDict, subtable.uvsDict)
if __name__ == "__main__":
- import sys
- sys.exit(unittest.main())
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_c_v_a_r_test.py b/Tests/ttLib/tables/_c_v_a_r_test.py
index 31c19538..c6fe0113 100644
--- a/Tests/ttLib/tables/_c_v_a_r_test.py
+++ b/Tests/ttLib/tables/_c_v_a_r_test.py
@@ -7,58 +7,62 @@ import unittest
CVAR_DATA = deHexStr(
- "0001 0000 " # 0: majorVersion=1 minorVersion=0
- "8002 0018 " # 4: tupleVariationCount=2|TUPLES_SHARE_POINT_NUMBERS offsetToData=24
- "0004 " # 8: tvHeader[0].variationDataSize=4
- "8000 " # 10: tvHeader[0].tupleIndex=EMBEDDED_PEAK
- "4000 0000 " # 12: tvHeader[0].peakTuple=[1.0, 0.0]
- "0004 " # 16: tvHeader[1].variationDataSize=4
- "8000 " # 18: tvHeader[1].tupleIndex=EMBEDDED_PEAK
- "C000 3333 " # 20: tvHeader[1].peakTuple=[-1.0, 0.8]
+ "0001 0000 " # 0: majorVersion=1 minorVersion=0
+ "8002 0018 " # 4: tupleVariationCount=2|TUPLES_SHARE_POINT_NUMBERS offsetToData=24
+ "0004 " # 8: tvHeader[0].variationDataSize=4
+ "8000 " # 10: tvHeader[0].tupleIndex=EMBEDDED_PEAK
+ "4000 0000 " # 12: tvHeader[0].peakTuple=[1.0, 0.0]
+ "0004 " # 16: tvHeader[1].variationDataSize=4
+ "8000 " # 18: tvHeader[1].tupleIndex=EMBEDDED_PEAK
+ "C000 3333 " # 20: tvHeader[1].peakTuple=[-1.0, 0.8]
"03 02 02 01 01" # 24: shared_pointCount=03, run_count=2 cvt=[2, 3, 4]
- "02 03 01 04 " # 25: deltas=[3, 1, 4]
- "02 09 07 08") # 29: deltas=[9, 7, 8]
+ "02 03 01 04 " # 25: deltas=[3, 1, 4]
+ "02 09 07 08"
+) # 29: deltas=[9, 7, 8]
CVAR_PRIVATE_POINT_DATA = deHexStr(
- "0001 0000 " # 0: majorVersion=1 minorVersion=0
- "0002 0018 " # 4: tupleVariationCount=2 offsetToData=24
- "0009 " # 8: tvHeader[0].variationDataSize=9
- "A000 " # 10: tvHeader[0].tupleIndex=EMBEDDED_PEAK|PRIVATE_POINT_NUMBERS
- "4000 0000 " # 12: tvHeader[0].peakTuple=[1.0, 0.0]
- "0009 " # 16: tvHeader[1].variationDataSize=9
- "A000 " # 18: tvHeader[1].tupleIndex=EMBEDDED_PEAK|PRIVATE_POINT_NUMBERS
- "C000 3333 " # 20: tvHeader[1].peakTuple=[-1.0, 0.8]
- "03 02 02 01 01 02 03 01 04 " # 24: pointCount=3 run_count=2 cvt=2 1 1 run_count=2 deltas=[3, 1, 4]
- "03 02 02 01 01 02 09 07 08 ") # 33: pointCount=3 run_count=2 cvt=2 1 1 run_count=2 deltas=[9, 7, 8]
+ "0001 0000 " # 0: majorVersion=1 minorVersion=0
+ "0002 0018 " # 4: tupleVariationCount=2 offsetToData=24
+ "0009 " # 8: tvHeader[0].variationDataSize=9
+ "A000 " # 10: tvHeader[0].tupleIndex=EMBEDDED_PEAK|PRIVATE_POINT_NUMBERS
+ "4000 0000 " # 12: tvHeader[0].peakTuple=[1.0, 0.0]
+ "0009 " # 16: tvHeader[1].variationDataSize=9
+ "A000 " # 18: tvHeader[1].tupleIndex=EMBEDDED_PEAK|PRIVATE_POINT_NUMBERS
+ "C000 3333 " # 20: tvHeader[1].peakTuple=[-1.0, 0.8]
+ "03 02 02 01 01 02 03 01 04 " # 24: pointCount=3 run_count=2 cvt=2 1 1 run_count=2 deltas=[3, 1, 4]
+ "03 02 02 01 01 02 09 07 08 "
+) # 33: pointCount=3 run_count=2 cvt=2 1 1 run_count=2 deltas=[9, 7, 8]
CVAR_XML = [
'<version major="1" minor="0"/>',
- '<tuple>',
+ "<tuple>",
' <coord axis="wght" value="1.0"/>',
' <delta cvt="2" value="3"/>',
' <delta cvt="3" value="1"/>',
' <delta cvt="4" value="4"/>',
- '</tuple>',
- '<tuple>',
+ "</tuple>",
+ "<tuple>",
' <coord axis="wght" value="-1.0"/>',
' <coord axis="wdth" value="0.8"/>',
' <delta cvt="2" value="9"/>',
' <delta cvt="3" value="7"/>',
' <delta cvt="4" value="8"/>',
- '</tuple>',
+ "</tuple>",
]
CVAR_VARIATIONS = [
TupleVariation({"wght": (0.0, 1.0, 1.0)}, [None, None, 3, 1, 4]),
- TupleVariation({"wght": (-1, -1.0, 0.0), "wdth": (0.0, 0.7999878, 0.7999878)},
- [None, None, 9, 7, 8]),
+ TupleVariation(
+ {"wght": (-1, -1.0, 0.0), "wdth": (0.0, 0.7999878, 0.7999878)},
+ [None, None, 9, 7, 8],
+ ),
]
class CVARTableTest(unittest.TestCase):
def assertVariationsAlmostEqual(self, variations1, variations2):
self.assertEqual(len(variations1), len(variations2))
- for (v1, v2) in zip(variations1, variations2):
+ for v1, v2 in zip(variations1, variations2):
self.assertSetEqual(set(v1.axes), set(v2.axes))
for axisTag, support1 in v1.axes.items():
support2 = v2.axes[axisTag]
@@ -84,7 +88,9 @@ class CVARTableTest(unittest.TestCase):
def test_compile_shared_points(self):
font, cvar = self.makeFont()
cvar.variations = CVAR_VARIATIONS
- self.assertEqual(hexStr(cvar.compile(font, useSharedPoints=True)), hexStr(CVAR_DATA))
+ self.assertEqual(
+ hexStr(cvar.compile(font, useSharedPoints=True)), hexStr(CVAR_DATA)
+ )
def test_decompile(self):
font, cvar = self.makeFont()
@@ -116,4 +122,5 @@ class CVARTableTest(unittest.TestCase):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_f_v_a_r_test.py b/Tests/ttLib/tables/_f_v_a_r_test.py
index 2ac5237f..353df9f3 100644
--- a/Tests/ttLib/tables/_f_v_a_r_test.py
+++ b/Tests/ttLib/tables/_f_v_a_r_test.py
@@ -8,22 +8,19 @@ from io import BytesIO
import unittest
-
FVAR_DATA = deHexStr(
"00 01 00 00 00 10 00 02 00 02 00 14 00 02 00 0C "
"77 67 68 74 00 64 00 00 01 90 00 00 03 84 00 00 00 00 01 01 "
"77 64 74 68 00 32 00 00 00 64 00 00 00 c8 00 00 00 00 01 02 "
"01 03 00 00 01 2c 00 00 00 64 00 00 "
- "01 04 00 00 01 2c 00 00 00 4b 00 00")
+ "01 04 00 00 01 2c 00 00 00 4b 00 00"
+)
-FVAR_AXIS_DATA = deHexStr(
- "6F 70 73 7a ff ff 80 00 00 01 4c cd 00 01 80 00 00 00 01 59")
+FVAR_AXIS_DATA = deHexStr("6F 70 73 7a ff ff 80 00 00 01 4c cd 00 01 80 00 00 00 01 59")
-FVAR_INSTANCE_DATA_WITHOUT_PSNAME = deHexStr(
- "01 59 00 00 00 00 b3 33 00 00 80 00")
+FVAR_INSTANCE_DATA_WITHOUT_PSNAME = deHexStr("01 59 00 00 00 00 b3 33 00 00 80 00")
-FVAR_INSTANCE_DATA_WITH_PSNAME = (
- FVAR_INSTANCE_DATA_WITHOUT_PSNAME + deHexStr("02 34"))
+FVAR_INSTANCE_DATA_WITH_PSNAME = FVAR_INSTANCE_DATA_WITHOUT_PSNAME + deHexStr("02 34")
def xml_lines(writer):
@@ -38,7 +35,7 @@ def AddName(font, name):
nameTable.names = []
namerec = NameRecord()
namerec.nameID = 1 + max([n.nameID for n in nameTable.names] + [256])
- namerec.string = name.encode('mac_roman')
+ namerec.string = name.encode("mac_roman")
namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0)
nameTable.names.append(namerec)
return namerec
@@ -91,15 +88,16 @@ class FontVariationTableTest(unittest.TestCase):
def test_fromXML(self):
fvar = table__f_v_a_r()
for name, attrs, content in parseXML(
- '<Axis>'
- ' <AxisTag>opsz</AxisTag>'
- '</Axis>'
- '<Axis>'
- ' <AxisTag>slnt</AxisTag>'
- ' <Flags>0x123</Flags>'
- '</Axis>'
- '<NamedInstance subfamilyNameID="765"/>'
- '<NamedInstance subfamilyNameID="234"/>'):
+ "<Axis>"
+ " <AxisTag>opsz</AxisTag>"
+ "</Axis>"
+ "<Axis>"
+ " <AxisTag>slnt</AxisTag>"
+ " <Flags>0x123</Flags>"
+ "</Axis>"
+ '<NamedInstance subfamilyNameID="765"/>'
+ '<NamedInstance subfamilyNameID="234"/>'
+ ):
fvar.fromXML(name, attrs, content, ttFont=None)
self.assertEqual(["opsz", "slnt"], [a.axisTag for a in fvar.axes])
self.assertEqual([0, 0x123], [a.flags for a in fvar.axes])
@@ -109,7 +107,7 @@ class FontVariationTableTest(unittest.TestCase):
class AxisTest(unittest.TestCase):
def test_compile(self):
axis = Axis()
- axis.axisTag, axis.axisNameID = ('opsz', 345)
+ axis.axisTag, axis.axisNameID = ("opsz", 345)
axis.minValue, axis.defaultValue, axis.maxValue = (-0.5, 1.3, 1.5)
self.assertEqual(FVAR_AXIS_DATA, axis.compile())
@@ -131,30 +129,34 @@ class AxisTest(unittest.TestCase):
axis.flags = 0xABC
writer = XMLWriter(BytesIO())
axis.toXML(writer, font)
- self.assertEqual([
- '',
- '<!-- Optical Size -->',
- '<Axis>',
- '<AxisTag>opsz</AxisTag>',
- '<Flags>0xABC</Flags>',
- '<MinValue>-0.5</MinValue>',
- '<DefaultValue>1.3</DefaultValue>',
- '<MaxValue>1.5</MaxValue>',
- '<AxisNameID>256</AxisNameID>',
- '</Axis>'
- ], xml_lines(writer))
+ self.assertEqual(
+ [
+ "",
+ "<!-- Optical Size -->",
+ "<Axis>",
+ "<AxisTag>opsz</AxisTag>",
+ "<Flags>0xABC</Flags>",
+ "<MinValue>-0.5</MinValue>",
+ "<DefaultValue>1.3</DefaultValue>",
+ "<MaxValue>1.5</MaxValue>",
+ "<AxisNameID>256</AxisNameID>",
+ "</Axis>",
+ ],
+ xml_lines(writer),
+ )
def test_fromXML(self):
axis = Axis()
for name, attrs, content in parseXML(
- '<Axis>'
- ' <AxisTag>wght</AxisTag>'
- ' <Flags>0x123ABC</Flags>'
- ' <MinValue>100</MinValue>'
- ' <DefaultValue>400</DefaultValue>'
- ' <MaxValue>900</MaxValue>'
- ' <AxisNameID>256</AxisNameID>'
- '</Axis>'):
+ "<Axis>"
+ " <AxisTag>wght</AxisTag>"
+ " <Flags>0x123ABC</Flags>"
+ " <MinValue>100</MinValue>"
+ " <DefaultValue>400</DefaultValue>"
+ " <MaxValue>900</MaxValue>"
+ " <AxisNameID>256</AxisNameID>"
+ "</Axis>"
+ ):
axis.fromXML(name, attrs, content, ttFont=None)
self.assertEqual("wght", axis.axisTag)
self.assertEqual(0x123ABC, axis.flags)
@@ -175,16 +177,18 @@ class NamedInstanceTest(unittest.TestCase):
inst.subfamilyNameID = 345
inst.postscriptNameID = 564
inst.coordinates = {"wght": 0.7, "wdth": 0.5}
- self.assertEqual(FVAR_INSTANCE_DATA_WITH_PSNAME,
- inst.compile(["wght", "wdth"], True))
+ self.assertEqual(
+ FVAR_INSTANCE_DATA_WITH_PSNAME, inst.compile(["wght", "wdth"], True)
+ )
def test_compile_withoutPostScriptName(self):
inst = NamedInstance()
inst.subfamilyNameID = 345
inst.postscriptNameID = 564
inst.coordinates = {"wght": 0.7, "wdth": 0.5}
- self.assertEqual(FVAR_INSTANCE_DATA_WITHOUT_PSNAME,
- inst.compile(["wght", "wdth"], False))
+ self.assertEqual(
+ FVAR_INSTANCE_DATA_WITHOUT_PSNAME, inst.compile(["wght", "wdth"], False)
+ )
def test_decompile_withPostScriptName(self):
inst = NamedInstance()
@@ -209,16 +213,19 @@ class NamedInstanceTest(unittest.TestCase):
inst.coordinates = {"wght": 0.7, "wdth": 0.5}
writer = XMLWriter(BytesIO())
inst.toXML(writer, font)
- self.assertEqual([
- '',
- '<!-- Light Condensed -->',
- '<!-- PostScript: Test-LightCondensed -->',
- '<NamedInstance flags="0xE9" postscriptNameID="%s" subfamilyNameID="%s">' % (
- inst.postscriptNameID, inst.subfamilyNameID),
- '<coord axis="wght" value="0.7"/>',
- '<coord axis="wdth" value="0.5"/>',
- '</NamedInstance>'
- ], xml_lines(writer))
+ self.assertEqual(
+ [
+ "",
+ "<!-- Light Condensed -->",
+ "<!-- PostScript: Test-LightCondensed -->",
+ '<NamedInstance flags="0xE9" postscriptNameID="%s" subfamilyNameID="%s">'
+ % (inst.postscriptNameID, inst.subfamilyNameID),
+ '<coord axis="wght" value="0.7"/>',
+ '<coord axis="wdth" value="0.5"/>',
+ "</NamedInstance>",
+ ],
+ xml_lines(writer),
+ )
def test_toXML_withoutPostScriptName(self):
font = MakeFont()
@@ -228,23 +235,27 @@ class NamedInstanceTest(unittest.TestCase):
inst.coordinates = {"wght": 0.7, "wdth": 0.5}
writer = XMLWriter(BytesIO())
inst.toXML(writer, font)
- self.assertEqual([
- '',
- '<!-- Light Condensed -->',
- '<NamedInstance flags="0xABC" subfamilyNameID="%s">' %
- inst.subfamilyNameID,
- '<coord axis="wght" value="0.7"/>',
- '<coord axis="wdth" value="0.5"/>',
- '</NamedInstance>'
- ], xml_lines(writer))
+ self.assertEqual(
+ [
+ "",
+ "<!-- Light Condensed -->",
+ '<NamedInstance flags="0xABC" subfamilyNameID="%s">'
+ % inst.subfamilyNameID,
+ '<coord axis="wght" value="0.7"/>',
+ '<coord axis="wdth" value="0.5"/>',
+ "</NamedInstance>",
+ ],
+ xml_lines(writer),
+ )
def test_fromXML_withPostScriptName(self):
inst = NamedInstance()
for name, attrs, content in parseXML(
- '<NamedInstance flags="0x0" postscriptNameID="257" subfamilyNameID="345">'
- ' <coord axis="wght" value="0.7"/>'
- ' <coord axis="wdth" value="0.5"/>'
- '</NamedInstance>'):
+ '<NamedInstance flags="0x0" postscriptNameID="257" subfamilyNameID="345">'
+ ' <coord axis="wght" value="0.7"/>'
+ ' <coord axis="wdth" value="0.5"/>'
+ "</NamedInstance>"
+ ):
inst.fromXML(name, attrs, content, ttFont=MakeFont())
self.assertEqual(257, inst.postscriptNameID)
self.assertEqual(345, inst.subfamilyNameID)
@@ -253,10 +264,11 @@ class NamedInstanceTest(unittest.TestCase):
def test_fromXML_withoutPostScriptName(self):
inst = NamedInstance()
for name, attrs, content in parseXML(
- '<NamedInstance flags="0x123ABC" subfamilyNameID="345">'
- ' <coord axis="wght" value="0.7"/>'
- ' <coord axis="wdth" value="0.5"/>'
- '</NamedInstance>'):
+ '<NamedInstance flags="0x123ABC" subfamilyNameID="345">'
+ ' <coord axis="wght" value="0.7"/>'
+ ' <coord axis="wdth" value="0.5"/>'
+ "</NamedInstance>"
+ ):
inst.fromXML(name, attrs, content, ttFont=MakeFont())
self.assertEqual(0x123ABC, inst.flags)
self.assertEqual(345, inst.subfamilyNameID)
@@ -265,4 +277,5 @@ class NamedInstanceTest(unittest.TestCase):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_g_c_i_d_test.py b/Tests/ttLib/tables/_g_c_i_d_test.py
index e7666771..c5e027e0 100644
--- a/Tests/ttLib/tables/_g_c_i_d_test.py
+++ b/Tests/ttLib/tables/_g_c_i_d_test.py
@@ -7,39 +7,39 @@ import unittest
# On macOS X 10.12.3, the font /Library/Fonts/AppleGothic.ttf has a ‘gcid’
# table with a similar structure as this test data, just more CIDs.
GCID_DATA = deHexStr(
- "0000 0000 " # 0: Format=0, Flags=0
- "0000 0098 " # 4: Size=152
- "0000 " # 8: Registry=0
- "41 64 6F 62 65 " # 10: RegistryName="Adobe"
- + ("00" * 59) + # 15: <padding>
- "0003 " # 74: Order=3
+ "0000 0000 " # 0: Format=0, Flags=0
+ "0000 0098 " # 4: Size=152
+ "0000 " # 8: Registry=0
+ "41 64 6F 62 65 " # 10: RegistryName="Adobe"
+ + ("00" * 59)
+ + "0003 " # 15: <padding> # 74: Order=3
"4B 6F 72 65 61 31 " # 76: Order="Korea1"
- + ("00" * 58) + # 82: <padding>
- "0001 " # 140: SupplementVersion
- "0004 " # 142: Count
- "1234 " # 144: CIDs[0/.notdef]=4660
- "FFFF " # 146: CIDs[1/A]=None
- "0007 " # 148: CIDs[2/B]=7
- "DEF0 " # 150: CIDs[3/C]=57072
-) # 152: <end>
+ + ("00" * 58)
+ + "0001 " # 82: <padding> # 140: SupplementVersion
+ "0004 " # 142: Count
+ "1234 " # 144: CIDs[0/.notdef]=4660
+ "FFFF " # 146: CIDs[1/A]=None
+ "0007 " # 148: CIDs[2/B]=7
+ "DEF0 " # 150: CIDs[3/C]=57072
+) # 152: <end>
assert len(GCID_DATA) == 152, len(GCID_DATA)
GCID_XML = [
- '<GlyphCIDMapping Format="0">',
- ' <DataFormat value="0"/>',
- ' <!-- StructLength=152 -->',
- ' <Registry value="0"/>',
- ' <RegistryName value="Adobe"/>',
- ' <Order value="3"/>',
- ' <OrderName value="Korea1"/>',
- ' <SupplementVersion value="1"/>',
- ' <Mapping>',
- ' <CID glyph=".notdef" value="4660"/>',
- ' <CID glyph="B" value="7"/>',
- ' <CID glyph="C" value="57072"/>',
- ' </Mapping>',
- '</GlyphCIDMapping>',
+ '<GlyphCIDMapping Format="0">',
+ ' <DataFormat value="0"/>',
+ " <!-- StructLength=152 -->",
+ ' <Registry value="0"/>',
+ ' <RegistryName value="Adobe"/>',
+ ' <Order value="3"/>',
+ ' <OrderName value="Korea1"/>',
+ ' <SupplementVersion value="1"/>',
+ " <Mapping>",
+ ' <CID glyph=".notdef" value="4660"/>',
+ ' <CID glyph="B" value="7"/>',
+ ' <CID glyph="C" value="57072"/>',
+ " </Mapping>",
+ "</GlyphCIDMapping>",
]
@@ -47,21 +47,21 @@ class GCIDTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- cls.font = FakeFont(['.notdef', 'A', 'B', 'C', 'D'])
+ cls.font = FakeFont([".notdef", "A", "B", "C", "D"])
def testDecompileToXML(self):
- table = newTable('gcid')
+ table = newTable("gcid")
table.decompile(GCID_DATA, self.font)
self.assertEqual(getXML(table.toXML, self.font), GCID_XML)
def testCompileFromXML(self):
- table = newTable('gcid')
+ table = newTable("gcid")
for name, attrs, content in parseXML(GCID_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(GCID_DATA))
+ self.assertEqual(hexStr(table.compile(self.font)), hexStr(GCID_DATA))
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_g_l_y_f_test.py b/Tests/ttLib/tables/_g_l_y_f_test.py
index 84f30dc6..ce2e0e57 100644
--- a/Tests/ttLib/tables/_g_l_y_f_test.py
+++ b/Tests/ttLib/tables/_g_l_y_f_test.py
@@ -1,5 +1,6 @@
from fontTools.misc.fixedTools import otRound
from fontTools.misc.testTools import getXML, parseXML
+from fontTools.misc.transform import Transform
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.pens.recordingPen import RecordingPen, RecordingPointPen
from fontTools.pens.pointPen import PointToSegmentPen
@@ -8,6 +9,9 @@ from fontTools.ttLib.tables._g_l_y_f import (
Glyph,
GlyphCoordinates,
GlyphComponent,
+ dropImpliedOnCurvePoints,
+ flagOnCurve,
+ flagCubic,
ARGS_ARE_XY_VALUES,
SCALED_COMPONENT_OFFSET,
UNSCALED_COMPONENT_OFFSET,
@@ -18,7 +22,8 @@ from fontTools.ttLib.tables._g_l_y_f import (
from fontTools.ttLib.tables import ttProgram
import sys
import array
-from io import StringIO
+from copy import deepcopy
+from io import StringIO, BytesIO
import itertools
import pytest
import re
@@ -27,139 +32,137 @@ import unittest
class GlyphCoordinatesTest(object):
-
def test_translate(self):
- g = GlyphCoordinates([(1,2)])
- g.translate((.5,0))
- assert g == GlyphCoordinates([(1.5,2.0)])
+ g = GlyphCoordinates([(1, 2)])
+ g.translate((0.5, 0))
+ assert g == GlyphCoordinates([(1.5, 2.0)])
def test_scale(self):
- g = GlyphCoordinates([(1,2)])
- g.scale((.5,0))
- assert g == GlyphCoordinates([(0.5,0.0)])
+ g = GlyphCoordinates([(1, 2)])
+ g.scale((0.5, 0))
+ assert g == GlyphCoordinates([(0.5, 0.0)])
def test_transform(self):
- g = GlyphCoordinates([(1,2)])
- g.transform(((.5,0),(.2,.5)))
- assert g[0] == GlyphCoordinates([(0.9,1.0)])[0]
+ g = GlyphCoordinates([(1, 2)])
+ g.transform(((0.5, 0), (0.2, 0.5)))
+ assert g[0] == GlyphCoordinates([(0.9, 1.0)])[0]
def test__eq__(self):
- g = GlyphCoordinates([(1,2)])
- g2 = GlyphCoordinates([(1.0,2)])
- g3 = GlyphCoordinates([(1.5,2)])
+ g = GlyphCoordinates([(1, 2)])
+ g2 = GlyphCoordinates([(1.0, 2)])
+ g3 = GlyphCoordinates([(1.5, 2)])
assert g == g2
assert not g == g3
assert not g2 == g3
assert not g == object()
def test__ne__(self):
- g = GlyphCoordinates([(1,2)])
- g2 = GlyphCoordinates([(1.0,2)])
- g3 = GlyphCoordinates([(1.5,2)])
+ g = GlyphCoordinates([(1, 2)])
+ g2 = GlyphCoordinates([(1.0, 2)])
+ g3 = GlyphCoordinates([(1.5, 2)])
assert not (g != g2)
assert g != g3
assert g2 != g3
assert g != object()
def test__pos__(self):
- g = GlyphCoordinates([(1,2)])
+ g = GlyphCoordinates([(1, 2)])
g2 = +g
assert g == g2
def test__neg__(self):
- g = GlyphCoordinates([(1,2)])
+ g = GlyphCoordinates([(1, 2)])
g2 = -g
assert g2 == GlyphCoordinates([(-1, -2)])
- @pytest.mark.skipif(sys.version_info[0] < 3,
- reason="__round___ requires Python 3")
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="__round___ requires Python 3")
def test__round__(self):
- g = GlyphCoordinates([(-1.5,2)])
+ g = GlyphCoordinates([(-1.5, 2)])
g2 = round(g)
- assert g2 == GlyphCoordinates([(-1,2)])
+ assert g2 == GlyphCoordinates([(-1, 2)])
def test__add__(self):
- g1 = GlyphCoordinates([(1,2)])
- g2 = GlyphCoordinates([(3,4)])
- g3 = GlyphCoordinates([(4,6)])
+ g1 = GlyphCoordinates([(1, 2)])
+ g2 = GlyphCoordinates([(3, 4)])
+ g3 = GlyphCoordinates([(4, 6)])
assert g1 + g2 == g3
- assert g1 + (1, 1) == GlyphCoordinates([(2,3)])
+ assert g1 + (1, 1) == GlyphCoordinates([(2, 3)])
with pytest.raises(TypeError) as excinfo:
assert g1 + object()
- assert 'unsupported operand' in str(excinfo.value)
+ assert "unsupported operand" in str(excinfo.value)
def test__sub__(self):
- g1 = GlyphCoordinates([(1,2)])
- g2 = GlyphCoordinates([(3,4)])
- g3 = GlyphCoordinates([(-2,-2)])
+ g1 = GlyphCoordinates([(1, 2)])
+ g2 = GlyphCoordinates([(3, 4)])
+ g3 = GlyphCoordinates([(-2, -2)])
assert g1 - g2 == g3
- assert g1 - (1, 1) == GlyphCoordinates([(0,1)])
+ assert g1 - (1, 1) == GlyphCoordinates([(0, 1)])
with pytest.raises(TypeError) as excinfo:
assert g1 - object()
- assert 'unsupported operand' in str(excinfo.value)
+ assert "unsupported operand" in str(excinfo.value)
def test__rsub__(self):
- g = GlyphCoordinates([(1,2)])
+ g = GlyphCoordinates([(1, 2)])
# other + (-self)
- assert (1, 1) - g == GlyphCoordinates([(0,-1)])
+ assert (1, 1) - g == GlyphCoordinates([(0, -1)])
def test__mul__(self):
- g = GlyphCoordinates([(1,2)])
- assert g * 3 == GlyphCoordinates([(3,6)])
- assert g * (3,2) == GlyphCoordinates([(3,4)])
- assert g * (1,1) == g
+ g = GlyphCoordinates([(1, 2)])
+ assert g * 3 == GlyphCoordinates([(3, 6)])
+ assert g * (3, 2) == GlyphCoordinates([(3, 4)])
+ assert g * (1, 1) == g
with pytest.raises(TypeError) as excinfo:
assert g * object()
- assert 'unsupported operand' in str(excinfo.value)
+ assert "unsupported operand" in str(excinfo.value)
def test__truediv__(self):
- g = GlyphCoordinates([(1,2)])
- assert g / 2 == GlyphCoordinates([(.5,1)])
- assert g / (1, 2) == GlyphCoordinates([(1,1)])
+ g = GlyphCoordinates([(1, 2)])
+ assert g / 2 == GlyphCoordinates([(0.5, 1)])
+ assert g / (1, 2) == GlyphCoordinates([(1, 1)])
assert g / (1, 1) == g
with pytest.raises(TypeError) as excinfo:
assert g / object()
- assert 'unsupported operand' in str(excinfo.value)
+ assert "unsupported operand" in str(excinfo.value)
def test__iadd__(self):
- g = GlyphCoordinates([(1,2)])
- g += (.5,0)
+ g = GlyphCoordinates([(1, 2)])
+ g += (0.5, 0)
assert g == GlyphCoordinates([(1.5, 2.0)])
- g2 = GlyphCoordinates([(3,4)])
+ g2 = GlyphCoordinates([(3, 4)])
g += g2
assert g == GlyphCoordinates([(4.5, 6.0)])
def test__isub__(self):
- g = GlyphCoordinates([(1,2)])
- g -= (.5, 0)
+ g = GlyphCoordinates([(1, 2)])
+ g -= (0.5, 0)
assert g == GlyphCoordinates([(0.5, 2.0)])
- g2 = GlyphCoordinates([(3,4)])
+ g2 = GlyphCoordinates([(3, 4)])
g -= g2
assert g == GlyphCoordinates([(-2.5, -2.0)])
def __test__imul__(self):
- g = GlyphCoordinates([(1,2)])
- g *= (2,.5)
+ g = GlyphCoordinates([(1, 2)])
+ g *= (2, 0.5)
g *= 2
assert g == GlyphCoordinates([(4.0, 2.0)])
- g = GlyphCoordinates([(1,2)])
+ g = GlyphCoordinates([(1, 2)])
g *= 2
assert g == GlyphCoordinates([(2, 4)])
def test__itruediv__(self):
- g = GlyphCoordinates([(1,3)])
- g /= (.5,1.5)
+ g = GlyphCoordinates([(1, 3)])
+ g /= (0.5, 1.5)
g /= 2
assert g == GlyphCoordinates([(1.0, 1.0)])
def test__bool__(self):
g = GlyphCoordinates([])
assert bool(g) == False
- g = GlyphCoordinates([(0,0), (0.,0)])
+ g = GlyphCoordinates([(0, 0), (0.0, 0)])
assert bool(g) == True
- g = GlyphCoordinates([(0,0), (1,0)])
+ g = GlyphCoordinates([(0, 0), (1, 0)])
assert bool(g) == True
- g = GlyphCoordinates([(0,.5), (0,0)])
+ g = GlyphCoordinates([(0, 0.5), (0, 0)])
assert bool(g) == True
def test_double_precision_float(self):
@@ -179,21 +182,21 @@ class GlyphCoordinatesTest(object):
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-DATA_DIR = os.path.join(CURR_DIR, 'data')
+DATA_DIR = os.path.join(CURR_DIR, "data")
GLYF_TTX = os.path.join(DATA_DIR, "_g_l_y_f_outline_flag_bit6.ttx")
GLYF_BIN = os.path.join(DATA_DIR, "_g_l_y_f_outline_flag_bit6.glyf.bin")
HEAD_BIN = os.path.join(DATA_DIR, "_g_l_y_f_outline_flag_bit6.head.bin")
LOCA_BIN = os.path.join(DATA_DIR, "_g_l_y_f_outline_flag_bit6.loca.bin")
MAXP_BIN = os.path.join(DATA_DIR, "_g_l_y_f_outline_flag_bit6.maxp.bin")
+INST_TTX = os.path.join(DATA_DIR, "_g_l_y_f_instructions.ttx")
def strip_ttLibVersion(string):
- return re.sub(' ttLibVersion=".*"', '', string)
+ return re.sub(' ttLibVersion=".*"', "", string)
class GlyfTableTest(unittest.TestCase):
-
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
@@ -203,26 +206,26 @@ class GlyfTableTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
- with open(GLYF_BIN, 'rb') as f:
+ with open(GLYF_BIN, "rb") as f:
cls.glyfData = f.read()
- with open(HEAD_BIN, 'rb') as f:
+ with open(HEAD_BIN, "rb") as f:
cls.headData = f.read()
- with open(LOCA_BIN, 'rb') as f:
+ with open(LOCA_BIN, "rb") as f:
cls.locaData = f.read()
- with open(MAXP_BIN, 'rb') as f:
+ with open(MAXP_BIN, "rb") as f:
cls.maxpData = f.read()
- with open(GLYF_TTX, 'r') as f:
+ with open(GLYF_TTX, "r") as f:
cls.glyfXML = strip_ttLibVersion(f.read()).splitlines()
def test_toXML(self):
font = TTFont(sfntVersion="\x00\x01\x00\x00")
- glyfTable = font['glyf'] = newTable('glyf')
- font['head'] = newTable('head')
- font['loca'] = newTable('loca')
- font['maxp'] = newTable('maxp')
- font['maxp'].decompile(self.maxpData, font)
- font['head'].decompile(self.headData, font)
- font['loca'].decompile(self.locaData, font)
+ glyfTable = font["glyf"] = newTable("glyf")
+ font["head"] = newTable("head")
+ font["loca"] = newTable("loca")
+ font["maxp"] = newTable("maxp")
+ font["maxp"].decompile(self.maxpData, font)
+ font["head"].decompile(self.headData, font)
+ font["loca"].decompile(self.locaData, font)
glyfTable.decompile(self.glyfData, font)
out = StringIO()
font.saveXML(out)
@@ -232,10 +235,22 @@ class GlyfTableTest(unittest.TestCase):
def test_fromXML(self):
font = TTFont(sfntVersion="\x00\x01\x00\x00")
font.importXML(GLYF_TTX)
- glyfTable = font['glyf']
+ glyfTable = font["glyf"]
glyfData = glyfTable.compile(font)
self.assertEqual(glyfData, self.glyfData)
+ def test_instructions_roundtrip(self):
+ font = TTFont(sfntVersion="\x00\x01\x00\x00")
+ font.importXML(INST_TTX)
+ glyfTable = font["glyf"]
+ self.glyfData = glyfTable.compile(font)
+ out = StringIO()
+ font.saveXML(out)
+ glyfXML = strip_ttLibVersion(out.getvalue()).splitlines()
+ with open(INST_TTX, "r") as f:
+ origXML = strip_ttLibVersion(f.read()).splitlines()
+ self.assertEqual(glyfXML, origXML)
+
def test_recursiveComponent(self):
glyphSet = {}
pen_dummy = TTGlyphPen(glyphSet)
@@ -250,7 +265,9 @@ class GlyfTableTest(unittest.TestCase):
glyph_B = pen_B.glyph()
glyphSet["A"] = glyph_A
glyphSet["B"] = glyph_B
- with self.assertRaisesRegex(TTLibError, "glyph '.' contains a recursive component reference"):
+ with self.assertRaisesRegex(
+ TTLibError, "glyph '.' contains a recursive component reference"
+ ):
glyph_A.getCoordinates(glyphSet)
def test_trim_remove_hinting_composite_glyph(self):
@@ -260,7 +277,7 @@ class GlyfTableTest(unittest.TestCase):
pen.addComponent("dummy", (1, 0, 0, 1, 0, 0))
composite = pen.glyph()
p = ttProgram.Program()
- p.fromAssembly(['SVTCA[0]'])
+ p.fromAssembly(["SVTCA[0]"])
composite.program = p
glyphSet["composite"] = composite
@@ -294,22 +311,24 @@ class GlyfTableTest(unittest.TestCase):
# glyph00003 contains a bit 6 flag on the first point,
# which triggered the issue
font.importXML(GLYF_TTX)
- glyfTable = font['glyf']
+ glyfTable = font["glyf"]
pen = RecordingPen()
glyfTable["glyph00003"].draw(pen, glyfTable=glyfTable)
- expected = [('moveTo', ((501, 1430),)),
- ('lineTo', ((683, 1430),)),
- ('lineTo', ((1172, 0),)),
- ('lineTo', ((983, 0),)),
- ('lineTo', ((591, 1193),)),
- ('lineTo', ((199, 0),)),
- ('lineTo', ((12, 0),)),
- ('closePath', ()),
- ('moveTo', ((249, 514),)),
- ('lineTo', ((935, 514),)),
- ('lineTo', ((935, 352),)),
- ('lineTo', ((249, 352),)),
- ('closePath', ())]
+ expected = [
+ ("moveTo", ((501, 1430),)),
+ ("lineTo", ((683, 1430),)),
+ ("lineTo", ((1172, 0),)),
+ ("lineTo", ((983, 0),)),
+ ("lineTo", ((591, 1193),)),
+ ("lineTo", ((199, 0),)),
+ ("lineTo", ((12, 0),)),
+ ("closePath", ()),
+ ("moveTo", ((249, 514),)),
+ ("lineTo", ((935, 514),)),
+ ("lineTo", ((935, 352),)),
+ ("lineTo", ((249, 352),)),
+ ("closePath", ()),
+ ]
self.assertEqual(pen.value, expected)
def test_bit6_draw_to_pointpen(self):
@@ -318,22 +337,22 @@ class GlyfTableTest(unittest.TestCase):
# glyph00003 contains a bit 6 flag on the first point
# which triggered the issue
font.importXML(GLYF_TTX)
- glyfTable = font['glyf']
+ glyfTable = font["glyf"]
pen = RecordingPointPen()
glyfTable["glyph00003"].drawPoints(pen, glyfTable=glyfTable)
expected = [
- ('beginPath', (), {}),
- ('addPoint', ((501, 1430), 'line', False, None), {}),
- ('addPoint', ((683, 1430), 'line', False, None), {}),
- ('addPoint', ((1172, 0), 'line', False, None), {}),
- ('addPoint', ((983, 0), 'line', False, None), {}),
+ ("beginPath", (), {}),
+ ("addPoint", ((501, 1430), "line", False, None), {}),
+ ("addPoint", ((683, 1430), "line", False, None), {}),
+ ("addPoint", ((1172, 0), "line", False, None), {}),
+ ("addPoint", ((983, 0), "line", False, None), {}),
]
- self.assertEqual(pen.value[:len(expected)], expected)
+ self.assertEqual(pen.value[: len(expected)], expected)
def test_draw_vs_drawpoints(self):
font = TTFont(sfntVersion="\x00\x01\x00\x00")
font.importXML(GLYF_TTX)
- glyfTable = font['glyf']
+ glyfTable = font["glyf"]
pen1 = RecordingPen()
pen2 = RecordingPen()
glyfTable["glyph00003"].draw(pen1, glyfTable)
@@ -343,12 +362,12 @@ class GlyfTableTest(unittest.TestCase):
def test_compile_empty_table(self):
font = TTFont(sfntVersion="\x00\x01\x00\x00")
font.importXML(GLYF_TTX)
- glyfTable = font['glyf']
+ glyfTable = font["glyf"]
# set all glyphs to zero contours
glyfTable.glyphs = {glyphName: Glyph() for glyphName in font.getGlyphOrder()}
glyfData = glyfTable.compile(font)
self.assertEqual(glyfData, b"\x00")
- self.assertEqual(list(font["loca"]), [0] * (font["maxp"].numGlyphs+1))
+ self.assertEqual(list(font["loca"]), [0] * (font["maxp"].numGlyphs + 1))
def test_decompile_empty_table(self):
font = TTFont()
@@ -372,16 +391,36 @@ class GlyfTableTest(unittest.TestCase):
font["glyf"] = newTable("glyf")
font["glyf"].decompile(b"\x00", font)
font["hmtx"] = newTable("hmtx")
- font["hmtx"].metrics = {".notdef": (100,0)}
+ font["hmtx"].metrics = {".notdef": (100, 0)}
font["head"] = newTable("head")
font["head"].unitsPerEm = 1000
- self.assertEqual(
- font["glyf"].getPhantomPoints(".notdef", font, 0),
- [(0, 0), (100, 0), (0, 0), (0, -1000)]
- )
+ with pytest.deprecated_call():
+ self.assertEqual(
+ font["glyf"].getPhantomPoints(".notdef", font, 0),
+ [(0, 0), (100, 0), (0, 0), (0, -1000)],
+ )
-class GlyphTest:
+ def test_getGlyphID(self):
+ # https://github.com/fonttools/fonttools/pull/3301#discussion_r1360405861
+ glyf = newTable("glyf")
+ glyf.setGlyphOrder([".notdef", "a", "b"])
+ glyf.glyphs = {}
+ for glyphName in glyf.glyphOrder:
+ glyf[glyphName] = Glyph()
+
+ assert glyf.getGlyphID("a") == 1
+ with pytest.raises(ValueError):
+ glyf.getGlyphID("c")
+
+ glyf["c"] = Glyph()
+ assert glyf.getGlyphID("c") == 3
+
+ del glyf["b"]
+ assert glyf.getGlyphID("c") == 2
+
+
+class GlyphTest:
def test_getCoordinates(self):
glyphSet = {}
pen = TTGlyphPen(glyphSet)
@@ -472,18 +511,30 @@ class GlyphTest:
assert flags == array.array("B", [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert list(sum(coords, ())) == pytest.approx(
[
- 0, 0,
- 100, 0,
- 100, 100,
- 0, 100,
- 100, 100,
- 200, 100,
- 200, 200,
- 100, 200,
- 200, 200,
- 270.7107, 270.7107,
- 200.0, 341.4214,
- 129.2893, 270.7107,
+ 0,
+ 0,
+ 100,
+ 0,
+ 100,
+ 100,
+ 0,
+ 100,
+ 100,
+ 100,
+ 200,
+ 100,
+ 200,
+ 200,
+ 100,
+ 200,
+ 200,
+ 200,
+ 270.7107,
+ 270.7107,
+ 200.0,
+ 341.4214,
+ 129.2893,
+ 270.7107,
]
)
@@ -513,7 +564,6 @@ class GlyphTest:
class GlyphComponentTest:
-
def test_toXML_no_transform(self):
comp = GlyphComponent()
comp.glyphName = "a"
@@ -625,7 +675,7 @@ class GlyphComponentTest:
assert hasattr(comp, "transform")
for value, expected in zip(
itertools.chain(*comp.transform),
- [0.5999756, -0.2000122, 0.2000122, 0.2999878]
+ [0.5999756, -0.2000122, 0.2000122, 0.2999878],
):
assert value == pytest.approx(expected)
@@ -652,7 +702,349 @@ class GlyphComponentTest:
assert (comp.firstPt, comp.secondPt) == (1, 2)
assert not hasattr(comp, "transform")
+ def test_trim_varComposite_glyph(self):
+ font_path = os.path.join(DATA_DIR, "..", "..", "data", "varc-ac00-ac01.ttf")
+ font = TTFont(font_path)
+ glyf = font["glyf"]
+
+ glyf.glyphs["uniAC00"].trim()
+ glyf.glyphs["uniAC01"].trim()
+
+ font_path = os.path.join(DATA_DIR, "..", "..", "data", "varc-6868.ttf")
+ font = TTFont(font_path)
+ glyf = font["glyf"]
+
+ glyf.glyphs["uni6868"].trim()
+
+ def test_varComposite_basic(self):
+ font_path = os.path.join(DATA_DIR, "..", "..", "data", "varc-ac00-ac01.ttf")
+ font = TTFont(font_path)
+ tables = [
+ table_tag
+ for table_tag in font.keys()
+ if table_tag not in {"head", "maxp", "hhea"}
+ ]
+ xml = StringIO()
+ font.saveXML(xml)
+ xml1 = StringIO()
+ font.saveXML(xml1, tables=tables)
+ xml.seek(0)
+ font = TTFont()
+ font.importXML(xml)
+ ttf = BytesIO()
+ font.save(ttf)
+ ttf.seek(0)
+ font = TTFont(ttf)
+ xml2 = StringIO()
+ font.saveXML(xml2, tables=tables)
+ assert xml1.getvalue() == xml2.getvalue()
+
+ font_path = os.path.join(DATA_DIR, "..", "..", "data", "varc-6868.ttf")
+ font = TTFont(font_path)
+ tables = [
+ table_tag
+ for table_tag in font.keys()
+ if table_tag not in {"head", "maxp", "hhea", "name", "fvar"}
+ ]
+ xml = StringIO()
+ font.saveXML(xml)
+ xml1 = StringIO()
+ font.saveXML(xml1, tables=tables)
+ xml.seek(0)
+ font = TTFont()
+ font.importXML(xml)
+ ttf = BytesIO()
+ font.save(ttf)
+ ttf.seek(0)
+ font = TTFont(ttf)
+ xml2 = StringIO()
+ font.saveXML(xml2, tables=tables)
+ assert xml1.getvalue() == xml2.getvalue()
+
+
+class GlyphCubicTest:
+ def test_roundtrip(self):
+ font_path = os.path.join(DATA_DIR, "NotoSans-VF-cubic.subset.ttf")
+ font = TTFont(font_path)
+ tables = [table_tag for table_tag in font.keys() if table_tag not in {"head"}]
+ xml = StringIO()
+ font.saveXML(xml)
+ xml1 = StringIO()
+ font.saveXML(xml1, tables=tables)
+ xml.seek(0)
+ font = TTFont()
+ font.importXML(xml)
+ ttf = BytesIO()
+ font.save(ttf)
+ ttf.seek(0)
+ font = TTFont(ttf)
+ xml2 = StringIO()
+ font.saveXML(xml2, tables=tables)
+ assert xml1.getvalue() == xml2.getvalue()
+
+ def test_no_oncurves(self):
+ glyph = Glyph()
+ glyph.numberOfContours = 1
+ glyph.coordinates = GlyphCoordinates(
+ [(0, 0), (1, 0), (1, 0), (1, 1), (1, 1), (0, 1), (0, 1), (0, 0)]
+ )
+ glyph.flags = array.array("B", [flagCubic] * 8)
+ glyph.endPtsOfContours = [7]
+ glyph.program = ttProgram.Program()
+
+ for i in range(2):
+ if i == 1:
+ glyph.compile(None)
+
+ pen = RecordingPen()
+ glyph.draw(pen, None)
+
+ assert pen.value == [
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((0, 0), (1, 0), (1, 0))),
+ ("curveTo", ((1, 0), (1, 1), (1, 1))),
+ ("curveTo", ((1, 1), (0, 1), (0, 1))),
+ ("curveTo", ((0, 1), (0, 0), (0, 0))),
+ ("closePath", ()),
+ ]
+
+ def test_spline(self):
+ glyph = Glyph()
+ glyph.numberOfContours = 1
+ glyph.coordinates = GlyphCoordinates(
+ [(0, 0), (1, 0), (1, 0), (1, 1), (1, 1), (0, 1), (0, 1)]
+ )
+ glyph.flags = array.array("B", [flagOnCurve] + [flagCubic] * 6)
+ glyph.endPtsOfContours = [6]
+ glyph.program = ttProgram.Program()
+
+ for i in range(2):
+ if i == 1:
+ glyph.compile(None)
+
+ pen = RecordingPen()
+ glyph.draw(pen, None)
+
+ assert pen.value == [
+ ("moveTo", ((0, 0),)),
+ ("curveTo", ((1, 0), (1, 0), (1.0, 0.5))),
+ ("curveTo", ((1, 1), (1, 1), (0.5, 1.0))),
+ ("curveTo", ((0, 1), (0, 1), (0, 0))),
+ ("closePath", ()),
+ ]
+
+
+def build_interpolatable_glyphs(contours, *transforms):
+ # given a list of lists of (point, flag) tuples (one per contour), build a Glyph
+ # then make len(transforms) copies transformed accordingly, and return a
+ # list of such interpolatable glyphs.
+ glyph1 = Glyph()
+ glyph1.numberOfContours = len(contours)
+ glyph1.coordinates = GlyphCoordinates(
+ [pt for contour in contours for pt, _flag in contour]
+ )
+ glyph1.flags = array.array(
+ "B", [flag for contour in contours for _pt, flag in contour]
+ )
+ glyph1.endPtsOfContours = [
+ sum(len(contour) for contour in contours[: i + 1]) - 1
+ for i in range(len(contours))
+ ]
+ result = [glyph1]
+ for t in transforms:
+ glyph = deepcopy(glyph1)
+ glyph.coordinates.transform((t[0:2], t[2:4]))
+ glyph.coordinates.translate(t[4:6])
+ result.append(glyph)
+ return result
+
+
+def test_dropImpliedOnCurvePoints_all_quad_off_curves():
+ # Two interpolatable glyphs with same structure, the coordinates of one are 2x the
+ # other; all the on-curve points are impliable in each one, thus are dropped from
+ # both, leaving contours with off-curve points only.
+ glyph1, glyph2 = build_interpolatable_glyphs(
+ [
+ [
+ ((0, 1), flagOnCurve),
+ ((1, 1), 0),
+ ((1, 0), flagOnCurve),
+ ((1, -1), 0),
+ ((0, -1), flagOnCurve),
+ ((-1, -1), 0),
+ ((-1, 0), flagOnCurve),
+ ((-1, 1), 0),
+ ],
+ [
+ ((0, 2), flagOnCurve),
+ ((2, 2), 0),
+ ((2, 0), flagOnCurve),
+ ((2, -2), 0),
+ ((0, -2), flagOnCurve),
+ ((-2, -2), 0),
+ ((-2, 0), flagOnCurve),
+ ((-2, 2), 0),
+ ],
+ ],
+ Transform().scale(2.0),
+ )
+ # also add an empty glyph (will be ignored); we use this trick for 'sparse' masters
+ glyph3 = Glyph()
+ glyph3.numberOfContours = 0
+
+ assert dropImpliedOnCurvePoints(glyph1, glyph2, glyph3) == {
+ 0,
+ 2,
+ 4,
+ 6,
+ 8,
+ 10,
+ 12,
+ 14,
+ }
+
+ assert glyph1.flags == glyph2.flags == array.array("B", [0, 0, 0, 0, 0, 0, 0, 0])
+ assert glyph1.coordinates == GlyphCoordinates(
+ [(1, 1), (1, -1), (-1, -1), (-1, 1), (2, 2), (2, -2), (-2, -2), (-2, 2)]
+ )
+ assert glyph2.coordinates == GlyphCoordinates(
+ [(2, 2), (2, -2), (-2, -2), (-2, 2), (4, 4), (4, -4), (-4, -4), (-4, 4)]
+ )
+ assert glyph1.endPtsOfContours == glyph2.endPtsOfContours == [3, 7]
+ assert glyph3.numberOfContours == 0
+
+
+def test_dropImpliedOnCurvePoints_all_cubic_off_curves():
+ # same as above this time using cubic curves
+ glyph1, glyph2 = build_interpolatable_glyphs(
+ [
+ [
+ ((0, 1), flagOnCurve),
+ ((1, 1), flagCubic),
+ ((1, 1), flagCubic),
+ ((1, 0), flagOnCurve),
+ ((1, -1), flagCubic),
+ ((1, -1), flagCubic),
+ ((0, -1), flagOnCurve),
+ ((-1, -1), flagCubic),
+ ((-1, -1), flagCubic),
+ ((-1, 0), flagOnCurve),
+ ((-1, 1), flagCubic),
+ ((-1, 1), flagCubic),
+ ]
+ ],
+ Transform().translate(10.0),
+ )
+ glyph3 = Glyph()
+ glyph3.numberOfContours = 0
+
+ assert dropImpliedOnCurvePoints(glyph1, glyph2, glyph3) == {0, 3, 6, 9}
+
+ assert glyph1.flags == glyph2.flags == array.array("B", [flagCubic] * 8)
+ assert glyph1.coordinates == GlyphCoordinates(
+ [(1, 1), (1, 1), (1, -1), (1, -1), (-1, -1), (-1, -1), (-1, 1), (-1, 1)]
+ )
+ assert glyph2.coordinates == GlyphCoordinates(
+ [(11, 1), (11, 1), (11, -1), (11, -1), (9, -1), (9, -1), (9, 1), (9, 1)]
+ )
+ assert glyph1.endPtsOfContours == glyph2.endPtsOfContours == [7]
+ assert glyph3.numberOfContours == 0
+
+
+def test_dropImpliedOnCurvePoints_not_all_impliable():
+ # same input as in in test_dropImpliedOnCurvePoints_all_quad_off_curves but we
+ # perturbate one of the glyphs such that the 2nd on-curve is no longer half-way
+ # between the neighboring off-curves.
+ glyph1, glyph2, glyph3 = build_interpolatable_glyphs(
+ [
+ [
+ ((0, 1), flagOnCurve),
+ ((1, 1), 0),
+ ((1, 0), flagOnCurve),
+ ((1, -1), 0),
+ ((0, -1), flagOnCurve),
+ ((-1, -1), 0),
+ ((-1, 0), flagOnCurve),
+ ((-1, 1), 0),
+ ]
+ ],
+ Transform().translate(10.0),
+ Transform().translate(10.0).scale(2.0),
+ )
+ p2 = glyph2.coordinates[2]
+ glyph2.coordinates[2] = (p2[0] + 2.0, p2[1] - 2.0)
+
+ assert dropImpliedOnCurvePoints(glyph1, glyph2, glyph3) == {
+ 0,
+ # 2, this is NOT implied because it's no longer impliable for all glyphs
+ 4,
+ 6,
+ }
+
+ assert glyph2.flags == array.array("B", [0, flagOnCurve, 0, 0, 0])
+
+
+def test_dropImpliedOnCurvePoints_all_empty_glyphs():
+ glyph1 = Glyph()
+ glyph1.numberOfContours = 0
+ glyph2 = Glyph()
+ glyph2.numberOfContours = 0
+
+ assert dropImpliedOnCurvePoints(glyph1, glyph2) == set()
+
+
+def test_dropImpliedOnCurvePoints_incompatible_number_of_contours():
+ glyph1 = Glyph()
+ glyph1.numberOfContours = 1
+ glyph1.endPtsOfContours = [3]
+ glyph1.flags = array.array("B", [1, 1, 1, 1])
+ glyph1.coordinates = GlyphCoordinates([(0, 0), (1, 1), (2, 2), (3, 3)])
+
+ glyph2 = Glyph()
+ glyph2.numberOfContours = 2
+ glyph2.endPtsOfContours = [1, 3]
+ glyph2.flags = array.array("B", [1, 1, 1, 1])
+ glyph2.coordinates = GlyphCoordinates([(0, 0), (1, 1), (2, 2), (3, 3)])
+
+ with pytest.raises(ValueError, match="Incompatible numberOfContours"):
+ dropImpliedOnCurvePoints(glyph1, glyph2)
+
+
+def test_dropImpliedOnCurvePoints_incompatible_flags():
+ glyph1 = Glyph()
+ glyph1.numberOfContours = 1
+ glyph1.endPtsOfContours = [3]
+ glyph1.flags = array.array("B", [1, 1, 1, 1])
+ glyph1.coordinates = GlyphCoordinates([(0, 0), (1, 1), (2, 2), (3, 3)])
+
+ glyph2 = Glyph()
+ glyph2.numberOfContours = 1
+ glyph2.endPtsOfContours = [3]
+ glyph2.flags = array.array("B", [0, 0, 0, 0])
+ glyph2.coordinates = GlyphCoordinates([(0, 0), (1, 1), (2, 2), (3, 3)])
+
+ with pytest.raises(ValueError, match="Incompatible flags"):
+ dropImpliedOnCurvePoints(glyph1, glyph2)
+
+
+def test_dropImpliedOnCurvePoints_incompatible_endPtsOfContours():
+ glyph1 = Glyph()
+ glyph1.numberOfContours = 2
+ glyph1.endPtsOfContours = [2, 6]
+ glyph1.flags = array.array("B", [1, 1, 1, 1, 1, 1, 1])
+ glyph1.coordinates = GlyphCoordinates([(i, i) for i in range(7)])
+
+ glyph2 = Glyph()
+ glyph2.numberOfContours = 2
+ glyph2.endPtsOfContours = [3, 6]
+ glyph2.flags = array.array("B", [1, 1, 1, 1, 1, 1, 1])
+ glyph2.coordinates = GlyphCoordinates([(i, i) for i in range(7)])
+
+ with pytest.raises(ValueError, match="Incompatible endPtsOfContours"):
+ dropImpliedOnCurvePoints(glyph1, glyph2)
+
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_g_v_a_r_test.py b/Tests/ttLib/tables/_g_v_a_r_test.py
index 077bb639..4fe3ae96 100644
--- a/Tests/ttLib/tables/_g_v_a_r_test.py
+++ b/Tests/ttLib/tables/_g_v_a_r_test.py
@@ -9,12 +9,12 @@ gvarClass = getTableClass("gvar")
GVAR_DATA = deHexStr(
- "0001 0000 " # 0: majorVersion=1 minorVersion=0
- "0002 0000 " # 4: axisCount=2 sharedTupleCount=0
- "0000001C " # 8: offsetToSharedTuples=28
- "0003 0000 " # 12: glyphCount=3 flags=0
- "0000001C " # 16: offsetToGlyphVariationData=28
- "0000 0000 000C 002F " # 20: offsets=[0,0,12,47], times 2: [0,0,24,94],
+ "0001 0000 " # 0: majorVersion=1 minorVersion=0
+ "0002 0000 " # 4: axisCount=2 sharedTupleCount=0
+ "0000001C " # 8: offsetToSharedTuples=28
+ "0003 0000 " # 12: glyphCount=3 flags=0
+ "0000001C " # 16: offsetToGlyphVariationData=28
+ "0000 0000 000C 002F " # 20: offsets=[0,0,12,47], times 2: [0,0,24,94],
# # +offsetToGlyphVariationData: [28,28,52,122]
#
# 28: Glyph variation data for glyph #0, ".notdef"
@@ -23,56 +23,57 @@ GVAR_DATA = deHexStr(
#
# 28: Glyph variation data for glyph #1, "space"
# ----------------------------------------------
- "8001 000C " # 28: tupleVariationCount=1|TUPLES_SHARE_POINT_NUMBERS, offsetToData=12(+28=40)
- "000A " # 32: tvHeader[0].variationDataSize=10
- "8000 " # 34: tvHeader[0].tupleIndex=EMBEDDED_PEAK
- "0000 2CCD " # 36: tvHeader[0].peakTuple={wght:0.0, wdth:0.7}
- "00 " # 40: all points
- "03 01 02 03 04 " # 41: deltaX=[1, 2, 3, 4]
- "03 0b 16 21 2C " # 46: deltaY=[11, 22, 33, 44]
- "00 " # 51: padding
+ "8001 000C " # 28: tupleVariationCount=1|TUPLES_SHARE_POINT_NUMBERS, offsetToData=12(+28=40)
+ "000A " # 32: tvHeader[0].variationDataSize=10
+ "8000 " # 34: tvHeader[0].tupleIndex=EMBEDDED_PEAK
+ "0000 2CCD " # 36: tvHeader[0].peakTuple={wght:0.0, wdth:0.7}
+ "00 " # 40: all points
+ "03 01 02 03 04 " # 41: deltaX=[1, 2, 3, 4]
+ "03 0b 16 21 2C " # 46: deltaY=[11, 22, 33, 44]
+ "00 " # 51: padding
#
# 52: Glyph variation data for glyph #2, "I"
# ------------------------------------------
- "8002 001c " # 52: tupleVariationCount=2|TUPLES_SHARE_POINT_NUMBERS, offsetToData=28(+52=80)
- "0012 " # 56: tvHeader[0].variationDataSize=18
- "C000 " # 58: tvHeader[0].tupleIndex=EMBEDDED_PEAK|INTERMEDIATE_REGION
- "2000 0000 " # 60: tvHeader[0].peakTuple={wght:0.5, wdth:0.0}
- "0000 0000 " # 64: tvHeader[0].intermediateStart={wght:0.0, wdth:0.0}
- "4000 0000 " # 68: tvHeader[0].intermediateEnd={wght:1.0, wdth:0.0}
- "0016 " # 72: tvHeader[1].variationDataSize=22
- "A000 " # 74: tvHeader[1].tupleIndex=EMBEDDED_PEAK|PRIVATE_POINTS
- "C000 3333 " # 76: tvHeader[1].peakTuple={wght:-1.0, wdth:0.8}
- "00 " # 80: all points
- "07 03 01 04 01 " # 81: deltaX.len=7, deltaX=[3, 1, 4, 1,
- "05 09 02 06 " # 86: 5, 9, 2, 6]
- "07 03 01 04 01 " # 90: deltaY.len=7, deltaY=[3, 1, 4, 1,
- "05 09 02 06 " # 95: 5, 9, 2, 6]
- "06 " # 99: 6 points
- "05 00 01 03 01 " # 100: runLen=5(+1=6); delta-encoded run=[0, 1, 4, 5,
- "01 01 " # 105: 6, 7]
+ "8002 001c " # 52: tupleVariationCount=2|TUPLES_SHARE_POINT_NUMBERS, offsetToData=28(+52=80)
+ "0012 " # 56: tvHeader[0].variationDataSize=18
+ "C000 " # 58: tvHeader[0].tupleIndex=EMBEDDED_PEAK|INTERMEDIATE_REGION
+ "2000 0000 " # 60: tvHeader[0].peakTuple={wght:0.5, wdth:0.0}
+ "0000 0000 " # 64: tvHeader[0].intermediateStart={wght:0.0, wdth:0.0}
+ "4000 0000 " # 68: tvHeader[0].intermediateEnd={wght:1.0, wdth:0.0}
+ "0016 " # 72: tvHeader[1].variationDataSize=22
+ "A000 " # 74: tvHeader[1].tupleIndex=EMBEDDED_PEAK|PRIVATE_POINTS
+ "C000 3333 " # 76: tvHeader[1].peakTuple={wght:-1.0, wdth:0.8}
+ "00 " # 80: all points
+ "07 03 01 04 01 " # 81: deltaX.len=7, deltaX=[3, 1, 4, 1,
+ "05 09 02 06 " # 86: 5, 9, 2, 6]
+ "07 03 01 04 01 " # 90: deltaY.len=7, deltaY=[3, 1, 4, 1,
+ "05 09 02 06 " # 95: 5, 9, 2, 6]
+ "06 " # 99: 6 points
+ "05 00 01 03 01 " # 100: runLen=5(+1=6); delta-encoded run=[0, 1, 4, 5,
+ "01 01 " # 105: 6, 7]
"05 f8 07 fc 03 fe 01 " # 107: deltaX.len=5, deltaX=[-8,7,-4,3,-2,1]
"05 a8 4d 2c 21 ea 0b " # 114: deltaY.len=5, deltaY=[-88,77,44,33,-22,11]
- "00" # 121: padding
-) # 122: <end>
+ "00" # 121: padding
+) # 122: <end>
assert len(GVAR_DATA) == 122
GVAR_VARIATIONS = {
- ".notdef": [
- ],
+ ".notdef": [],
"space": [
TupleVariation(
- {"wdth": (0.0, 0.7000122, 0.7000122)},
- [(1, 11), (2, 22), (3, 33), (4, 44)]),
+ {"wdth": (0.0, 0.7000122, 0.7000122)}, [(1, 11), (2, 22), (3, 33), (4, 44)]
+ ),
],
"I": [
TupleVariation(
{"wght": (0.0, 0.5, 1.0)},
- [(3,3), (1,1), (4,4), (1,1), (5,5), (9,9), (2,2), (6,6)]),
+ [(3, 3), (1, 1), (4, 4), (1, 1), (5, 5), (9, 9), (2, 2), (6, 6)],
+ ),
TupleVariation(
{"wght": (-1.0, -1.0, 0.0), "wdth": (0.0, 0.7999878, 0.7999878)},
- [(-8,-88), (7,77), None, None, (-4,44), (3,33), (-2,-22), (1,11)]),
+ [(-8, -88), (7, 77), None, None, (-4, 44), (3, 33), (-2, -22), (1, 11)],
+ ),
],
}
@@ -81,7 +82,7 @@ GVAR_XML = [
'<version value="1"/>',
'<reserved value="0"/>',
'<glyphVariations glyph="I">',
- ' <tuple>',
+ " <tuple>",
' <coord axis="wght" min="0.0" value="0.5" max="1.0"/>',
' <delta pt="0" x="3" y="3"/>',
' <delta pt="1" x="1" y="1"/>',
@@ -91,8 +92,8 @@ GVAR_XML = [
' <delta pt="5" x="9" y="9"/>',
' <delta pt="6" x="2" y="2"/>',
' <delta pt="7" x="6" y="6"/>',
- ' </tuple>',
- ' <tuple>',
+ " </tuple>",
+ " <tuple>",
' <coord axis="wght" value="-1.0"/>',
' <coord axis="wdth" value="0.8"/>',
' <delta pt="0" x="-8" y="-88"/>',
@@ -101,125 +102,137 @@ GVAR_XML = [
' <delta pt="5" x="3" y="33"/>',
' <delta pt="6" x="-2" y="-22"/>',
' <delta pt="7" x="1" y="11"/>',
- ' </tuple>',
- '</glyphVariations>',
+ " </tuple>",
+ "</glyphVariations>",
'<glyphVariations glyph="space">',
- ' <tuple>',
+ " <tuple>",
' <coord axis="wdth" value="0.7"/>',
' <delta pt="0" x="1" y="11"/>',
' <delta pt="1" x="2" y="22"/>',
' <delta pt="2" x="3" y="33"/>',
' <delta pt="3" x="4" y="44"/>',
- ' </tuple>',
- '</glyphVariations>',
+ " </tuple>",
+ "</glyphVariations>",
]
GVAR_DATA_EMPTY_VARIATIONS = deHexStr(
- "0001 0000 " # 0: majorVersion=1 minorVersion=0
- "0002 0000 " # 4: axisCount=2 sharedTupleCount=0
- "0000001c " # 8: offsetToSharedTuples=28
- "0003 0000 " # 12: glyphCount=3 flags=0
- "0000001c " # 16: offsetToGlyphVariationData=28
+ "0001 0000 " # 0: majorVersion=1 minorVersion=0
+ "0002 0000 " # 4: axisCount=2 sharedTupleCount=0
+ "0000001c " # 8: offsetToSharedTuples=28
+ "0003 0000 " # 12: glyphCount=3 flags=0
+ "0000001c " # 16: offsetToGlyphVariationData=28
"0000 0000 0000 0000" # 20: offsets=[0, 0, 0, 0]
-) # 28: <end>
+) # 28: <end>
def hexencode(s):
- h = hexStr(s).upper()
- return ' '.join([h[i:i+2] for i in range(0, len(h), 2)])
+ h = hexStr(s).upper()
+ return " ".join([h[i : i + 2] for i in range(0, len(h), 2)])
class GVARTableTest(unittest.TestCase):
- def assertVariationsAlmostEqual(self, vars1, vars2):
- self.assertSetEqual(set(vars1.keys()), set(vars2.keys()))
- for glyphName, variations1 in vars1.items():
- variations2 = vars2[glyphName]
- self.assertEqual(len(variations1), len(variations2))
- for (v1, v2) in zip(variations1, variations2):
- self.assertSetEqual(set(v1.axes), set(v2.axes))
- for axisTag, support1 in v1.axes.items():
- support2 = v2.axes[axisTag]
- self.assertEqual(len(support1), len(support2))
- for s1, s2 in zip(support1, support2):
- self.assertAlmostEqual(s1, s2)
- self.assertEqual(v1.coordinates, v2.coordinates)
-
- def makeFont(self, variations):
- glyphs=[".notdef", "space", "I"]
- Axis = getTableModule("fvar").Axis
- Glyph = getTableModule("glyf").Glyph
- glyf, fvar, gvar = newTable("glyf"), newTable("fvar"), newTable("gvar")
- font = FakeFont(glyphs)
- font.tables = {"glyf": glyf, "gvar": gvar, "fvar": fvar}
- glyf.glyphs = {glyph: Glyph() for glyph in glyphs}
- glyf.glyphs["I"].coordinates = [(10, 10), (10, 20), (20, 20), (20, 10)]
- fvar.axes = [Axis(), Axis()]
- fvar.axes[0].axisTag, fvar.axes[1].axisTag = "wght", "wdth"
- gvar.variations = variations
- return font, gvar
-
- def test_compile(self):
- font, gvar = self.makeFont(GVAR_VARIATIONS)
- self.assertEqual(hexStr(gvar.compile(font)), hexStr(GVAR_DATA))
-
- def test_compile_noVariations(self):
- font, gvar = self.makeFont({})
- self.assertEqual(hexStr(gvar.compile(font)),
- hexStr(GVAR_DATA_EMPTY_VARIATIONS))
-
- def test_compile_emptyVariations(self):
- font, gvar = self.makeFont({".notdef": [], "space": [], "I": []})
- self.assertEqual(hexStr(gvar.compile(font)),
- hexStr(GVAR_DATA_EMPTY_VARIATIONS))
-
- def test_decompile(self):
- font, gvar = self.makeFont({})
- gvar.decompile(GVAR_DATA, font)
- self.assertVariationsAlmostEqual(gvar.variations, GVAR_VARIATIONS)
-
- def test_decompile_noVariations(self):
- font, gvar = self.makeFont({})
- gvar.decompile(GVAR_DATA_EMPTY_VARIATIONS, font)
- self.assertEqual(gvar.variations,
- {".notdef": [], "space": [], "I": []})
-
- def test_fromXML(self):
- font, gvar = self.makeFont({})
- for name, attrs, content in parseXML(GVAR_XML):
- gvar.fromXML(name, attrs, content, ttFont=font)
- self.assertVariationsAlmostEqual(
- gvar.variations,
- {g:v for g,v in GVAR_VARIATIONS.items() if v}
- )
-
- def test_toXML(self):
- font, gvar = self.makeFont(GVAR_VARIATIONS)
- self.assertEqual(getXML(gvar.toXML, font), GVAR_XML)
-
- def test_compileOffsets_shortFormat(self):
- self.assertEqual((deHexStr("00 00 00 02 FF C0"), 0),
- gvarClass.compileOffsets_([0, 4, 0x1ff80]))
-
- def test_compileOffsets_longFormat(self):
- self.assertEqual((deHexStr("00 00 00 00 00 00 00 04 CA FE BE EF"), 1),
- gvarClass.compileOffsets_([0, 4, 0xCAFEBEEF]))
-
- def test_decompileOffsets_shortFormat(self):
- decompileOffsets = gvarClass.decompileOffsets_
- data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb")
- self.assertEqual(
- [2*0x0011, 2*0x2233, 2*0x4455, 2*0x6677, 2*0x8899, 2*0xaabb],
- list(decompileOffsets(data, tableFormat=0, glyphCount=5)))
-
- def test_decompileOffsets_longFormat(self):
- decompileOffsets = gvarClass.decompileOffsets_
- data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb")
- self.assertEqual(
- [0x00112233, 0x44556677, 0x8899aabb],
- list(decompileOffsets(data, tableFormat=1, glyphCount=2)))
+ def assertVariationsAlmostEqual(self, vars1, vars2):
+ self.assertSetEqual(set(vars1.keys()), set(vars2.keys()))
+ for glyphName, variations1 in vars1.items():
+ variations2 = vars2[glyphName]
+ self.assertEqual(len(variations1), len(variations2))
+ for v1, v2 in zip(variations1, variations2):
+ self.assertSetEqual(set(v1.axes), set(v2.axes))
+ for axisTag, support1 in v1.axes.items():
+ support2 = v2.axes[axisTag]
+ self.assertEqual(len(support1), len(support2))
+ for s1, s2 in zip(support1, support2):
+ self.assertAlmostEqual(s1, s2)
+ self.assertEqual(v1.coordinates, v2.coordinates)
+
+ def makeFont(self, variations):
+ glyphs = [".notdef", "space", "I"]
+ Axis = getTableModule("fvar").Axis
+ Glyph = getTableModule("glyf").Glyph
+ glyf, fvar, gvar = newTable("glyf"), newTable("fvar"), newTable("gvar")
+ font = FakeFont(glyphs)
+ font.tables = {"glyf": glyf, "gvar": gvar, "fvar": fvar}
+ glyf.glyphs = {glyph: Glyph() for glyph in glyphs}
+ glyf.glyphs["I"].coordinates = [(10, 10), (10, 20), (20, 20), (20, 10)]
+ fvar.axes = [Axis(), Axis()]
+ fvar.axes[0].axisTag, fvar.axes[1].axisTag = "wght", "wdth"
+ gvar.variations = variations
+ return font, gvar
+
+ def test_compile(self):
+ font, gvar = self.makeFont(GVAR_VARIATIONS)
+ self.assertEqual(hexStr(gvar.compile(font)), hexStr(GVAR_DATA))
+
+ def test_compile_noVariations(self):
+ font, gvar = self.makeFont({})
+ self.assertEqual(hexStr(gvar.compile(font)), hexStr(GVAR_DATA_EMPTY_VARIATIONS))
+
+ def test_compile_emptyVariations(self):
+ font, gvar = self.makeFont({".notdef": [], "space": [], "I": []})
+ self.assertEqual(hexStr(gvar.compile(font)), hexStr(GVAR_DATA_EMPTY_VARIATIONS))
+
+ def test_decompile(self):
+ for lazy in (True, False, None):
+ with self.subTest(lazy=lazy):
+ font, gvar = self.makeFont({})
+ font.lazy = lazy
+ gvar.decompile(GVAR_DATA, font)
+
+ self.assertEqual(
+ all(callable(v) for v in gvar.variations.data.values()),
+ lazy is not False,
+ )
+
+ self.assertVariationsAlmostEqual(gvar.variations, GVAR_VARIATIONS)
+
+ def test_decompile_noVariations(self):
+ font, gvar = self.makeFont({})
+ gvar.decompile(GVAR_DATA_EMPTY_VARIATIONS, font)
+ self.assertEqual(gvar.variations, {".notdef": [], "space": [], "I": []})
+
+ def test_fromXML(self):
+ font, gvar = self.makeFont({})
+ for name, attrs, content in parseXML(GVAR_XML):
+ gvar.fromXML(name, attrs, content, ttFont=font)
+ self.assertVariationsAlmostEqual(
+ gvar.variations, {g: v for g, v in GVAR_VARIATIONS.items() if v}
+ )
+
+ def test_toXML(self):
+ font, gvar = self.makeFont(GVAR_VARIATIONS)
+ self.assertEqual(getXML(gvar.toXML, font), GVAR_XML)
+
+ def test_compileOffsets_shortFormat(self):
+ self.assertEqual(
+ (deHexStr("00 00 00 02 FF C0"), 0),
+ gvarClass.compileOffsets_([0, 4, 0x1FF80]),
+ )
+
+ def test_compileOffsets_longFormat(self):
+ self.assertEqual(
+ (deHexStr("00 00 00 00 00 00 00 04 CA FE BE EF"), 1),
+ gvarClass.compileOffsets_([0, 4, 0xCAFEBEEF]),
+ )
+
+ def test_decompileOffsets_shortFormat(self):
+ decompileOffsets = gvarClass.decompileOffsets_
+ data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb")
+ self.assertEqual(
+ [2 * 0x0011, 2 * 0x2233, 2 * 0x4455, 2 * 0x6677, 2 * 0x8899, 2 * 0xAABB],
+ list(decompileOffsets(data, tableFormat=0, glyphCount=5)),
+ )
+
+ def test_decompileOffsets_longFormat(self):
+ decompileOffsets = gvarClass.decompileOffsets_
+ data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb")
+ self.assertEqual(
+ [0x00112233, 0x44556677, 0x8899AABB],
+ list(decompileOffsets(data, tableFormat=1, glyphCount=2)),
+ )
if __name__ == "__main__":
- import sys
- sys.exit(unittest.main())
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_h_h_e_a_test.py b/Tests/ttLib/tables/_h_h_e_a_test.py
index e04fd7bb..4b5c6338 100644
--- a/Tests/ttLib/tables/_h_h_e_a_test.py
+++ b/Tests/ttLib/tables/_h_h_e_a_test.py
@@ -8,47 +8,47 @@ import unittest
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-DATA_DIR = os.path.join(CURR_DIR, 'data')
+DATA_DIR = os.path.join(CURR_DIR, "data")
HHEA_DATA = deHexStr(
- '0001 0000 ' # 1.0 version
- '02EE ' # 750 ascent
- 'FF06 ' # -250 descent
- '00C8 ' # 200 lineGap
- '03E8 ' # 1000 advanceWidthMax
- 'FFE7 ' # -25 minLeftSideBearing
- 'FFEC ' # -20 minRightSideBearing
- '03D1 ' # 977 xMaxExtent
- '0000 ' # 0 caretSlopeRise
- '0001 ' # 1 caretSlopeRun
- '0010 ' # 16 caretOffset
- '0000 ' # 0 reserved0
- '0000 ' # 0 reserved1
- '0000 ' # 0 reserved2
- '0000 ' # 0 reserved3
- '0000 ' # 0 metricDataFormat
- '002A ' # 42 numberOfHMetrics
+ "0001 0000 " # 1.0 version
+ "02EE " # 750 ascent
+ "FF06 " # -250 descent
+ "00C8 " # 200 lineGap
+ "03E8 " # 1000 advanceWidthMax
+ "FFE7 " # -25 minLeftSideBearing
+ "FFEC " # -20 minRightSideBearing
+ "03D1 " # 977 xMaxExtent
+ "0000 " # 0 caretSlopeRise
+ "0001 " # 1 caretSlopeRun
+ "0010 " # 16 caretOffset
+ "0000 " # 0 reserved0
+ "0000 " # 0 reserved1
+ "0000 " # 0 reserved2
+ "0000 " # 0 reserved3
+ "0000 " # 0 metricDataFormat
+ "002A " # 42 numberOfHMetrics
)
HHEA_AS_DICT = {
- 'tableTag': 'hhea',
- 'tableVersion': 0x00010000,
- 'ascent': 750,
- 'descent': -250,
- 'lineGap': 200,
- 'advanceWidthMax': 1000,
- 'minLeftSideBearing': -25,
- 'minRightSideBearing': -20,
- 'xMaxExtent': 977,
- 'caretSlopeRise': 0,
- 'caretSlopeRun': 1,
- 'caretOffset': 16,
- 'reserved0': 0,
- 'reserved1': 0,
- 'reserved2': 0,
- 'reserved3': 0,
- 'metricDataFormat': 0,
- 'numberOfHMetrics': 42,
+ "tableTag": "hhea",
+ "tableVersion": 0x00010000,
+ "ascent": 750,
+ "descent": -250,
+ "lineGap": 200,
+ "advanceWidthMax": 1000,
+ "minLeftSideBearing": -25,
+ "minRightSideBearing": -20,
+ "xMaxExtent": 977,
+ "caretSlopeRise": 0,
+ "caretSlopeRun": 1,
+ "caretOffset": 16,
+ "reserved0": 0,
+ "reserved1": 0,
+ "reserved2": 0,
+ "reserved3": 0,
+ "metricDataFormat": 0,
+ "numberOfHMetrics": 42,
}
HHEA_XML = [
@@ -77,9 +77,8 @@ HHEA_XML_VERSION_AS_FLOAT = [
class HheaCompileOrToXMLTest(unittest.TestCase):
-
def setUp(self):
- hhea = newTable('hhea')
+ hhea = newTable("hhea")
hhea.tableVersion = 0x00010000
hhea.ascent = 750
hhea.descent = -250
@@ -94,39 +93,45 @@ class HheaCompileOrToXMLTest(unittest.TestCase):
hhea.metricDataFormat = 0
hhea.numberOfHMetrics = 42
hhea.reserved0 = hhea.reserved1 = hhea.reserved2 = hhea.reserved3 = 0
- self.font = TTFont(sfntVersion='OTTO')
- self.font['hhea'] = hhea
+ self.font = TTFont(sfntVersion="OTTO")
+ self.font["hhea"] = hhea
def test_compile(self):
- hhea = self.font['hhea']
+ hhea = self.font["hhea"]
hhea.tableVersion = 0x00010000
self.assertEqual(HHEA_DATA, hhea.compile(self.font))
def test_compile_version_10_as_float(self):
- hhea = self.font['hhea']
+ hhea = self.font["hhea"]
hhea.tableVersion = 1.0
with CapturingLogHandler(log, "WARNING") as captor:
self.assertEqual(HHEA_DATA, hhea.compile(self.font))
self.assertTrue(
- len([r for r in captor.records
- if "Table version value is a float" in r.msg]) == 1)
+ len(
+ [r for r in captor.records if "Table version value is a float" in r.msg]
+ )
+ == 1
+ )
def test_toXML(self):
- hhea = self.font['hhea']
- self.font['hhea'].tableVersion = 0x00010000
+ hhea = self.font["hhea"]
+ self.font["hhea"].tableVersion = 0x00010000
self.assertEqual(getXML(hhea.toXML), HHEA_XML)
def test_toXML_version_as_float(self):
- hhea = self.font['hhea']
+ hhea = self.font["hhea"]
hhea.tableVersion = 1.0
with CapturingLogHandler(log, "WARNING") as captor:
self.assertEqual(getXML(hhea.toXML), HHEA_XML)
self.assertTrue(
- len([r for r in captor.records
- if "Table version value is a float" in r.msg]) == 1)
+ len(
+ [r for r in captor.records if "Table version value is a float" in r.msg]
+ )
+ == 1
+ )
def test_aliases(self):
- hhea = self.font['hhea']
+ hhea = self.font["hhea"]
self.assertEqual(hhea.ascent, hhea.ascender)
self.assertEqual(hhea.descent, hhea.descender)
hhea.ascender = 800
@@ -138,44 +143,46 @@ class HheaCompileOrToXMLTest(unittest.TestCase):
hhea.descent = -299
self.assertEqual(hhea.descender, -299)
-class HheaDecompileOrFromXMLTest(unittest.TestCase):
+class HheaDecompileOrFromXMLTest(unittest.TestCase):
def setUp(self):
- hhea = newTable('hhea')
- self.font = TTFont(sfntVersion='OTTO')
- self.font['hhea'] = hhea
+ hhea = newTable("hhea")
+ self.font = TTFont(sfntVersion="OTTO")
+ self.font["hhea"] = hhea
def test_decompile(self):
- hhea = self.font['hhea']
+ hhea = self.font["hhea"]
hhea.decompile(HHEA_DATA, self.font)
for key in hhea.__dict__:
self.assertEqual(getattr(hhea, key), HHEA_AS_DICT[key])
def test_fromXML(self):
- hhea = self.font['hhea']
+ hhea = self.font["hhea"]
for name, attrs, content in parseXML(HHEA_XML):
hhea.fromXML(name, attrs, content, self.font)
for key in hhea.__dict__:
self.assertEqual(getattr(hhea, key), HHEA_AS_DICT[key])
def test_fromXML_version_as_float(self):
- hhea = self.font['hhea']
+ hhea = self.font["hhea"]
with CapturingLogHandler(log, "WARNING") as captor:
for name, attrs, content in parseXML(HHEA_XML_VERSION_AS_FLOAT):
hhea.fromXML(name, attrs, content, self.font)
self.assertTrue(
- len([r for r in captor.records
- if "Table version value is a float" in r.msg]) == 1)
+ len(
+ [r for r in captor.records if "Table version value is a float" in r.msg]
+ )
+ == 1
+ )
for key in hhea.__dict__:
self.assertEqual(getattr(hhea, key), HHEA_AS_DICT[key])
class HheaRecalcTest(unittest.TestCase):
-
def test_recalc_TTF(self):
font = TTFont()
- font.importXML(os.path.join(DATA_DIR, '_h_h_e_a_recalc_TTF.ttx'))
- hhea = font['hhea']
+ font.importXML(os.path.join(DATA_DIR, "_h_h_e_a_recalc_TTF.ttx"))
+ hhea = font["hhea"]
hhea.recalc(font)
self.assertEqual(hhea.advanceWidthMax, 600)
self.assertEqual(hhea.minLeftSideBearing, -56)
@@ -184,8 +191,8 @@ class HheaRecalcTest(unittest.TestCase):
def test_recalc_OTF(self):
font = TTFont()
- font.importXML(os.path.join(DATA_DIR, '_h_h_e_a_recalc_OTF.ttx'))
- hhea = font['hhea']
+ font.importXML(os.path.join(DATA_DIR, "_h_h_e_a_recalc_OTF.ttx"))
+ hhea = font["hhea"]
hhea.recalc(font)
self.assertEqual(hhea.advanceWidthMax, 600)
self.assertEqual(hhea.minLeftSideBearing, -56)
@@ -194,8 +201,8 @@ class HheaRecalcTest(unittest.TestCase):
def test_recalc_empty(self):
font = TTFont()
- font.importXML(os.path.join(DATA_DIR, '_h_h_e_a_recalc_empty.ttx'))
- hhea = font['hhea']
+ font.importXML(os.path.join(DATA_DIR, "_h_h_e_a_recalc_empty.ttx"))
+ hhea = font["hhea"]
hhea.recalc(font)
self.assertEqual(hhea.advanceWidthMax, 600)
self.assertEqual(hhea.minLeftSideBearing, 0)
@@ -205,4 +212,5 @@ class HheaRecalcTest(unittest.TestCase):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_h_m_t_x_test.py b/Tests/ttLib/tables/_h_m_t_x_test.py
index 79d0cb7e..f7ab8b1d 100644
--- a/Tests/ttLib/tables/_h_m_t_x_test.py
+++ b/Tests/ttLib/tables/_h_m_t_x_test.py
@@ -8,7 +8,6 @@ import unittest
class HmtxTableTest(unittest.TestCase):
-
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
@@ -23,10 +22,10 @@ class HmtxTableTest(unittest.TestCase):
def makeFont(self, numGlyphs, numberOfMetrics):
font = TTFont()
- maxp = font['maxp'] = newTable('maxp')
+ maxp = font["maxp"] = newTable("maxp")
maxp.numGlyphs = numGlyphs
# from A to ...
- font.glyphOrder = [chr(i) for i in range(65, 65+numGlyphs)]
+ font.glyphOrder = [chr(i) for i in range(65, 65 + numGlyphs)]
headerTag = self.tableClass.headerTag
font[headerTag] = newTable(headerTag)
numberOfMetricsName = self.tableClass.numberOfMetricsName
@@ -40,9 +39,9 @@ class HmtxTableTest(unittest.TestCase):
mtxTable = newTable(self.tag)
mtxTable.decompile(data, font)
- self.assertEqual(mtxTable['A'], (674, -11))
- self.assertEqual(mtxTable['B'], (632, 79))
- self.assertEqual(mtxTable['C'], (710, 54))
+ self.assertEqual(mtxTable["A"], (674, -11))
+ self.assertEqual(mtxTable["B"], (632, 79))
+ self.assertEqual(mtxTable["C"], (710, 54))
def test_decompile_additional_SB(self):
font = self.makeFont(numGlyphs=4, numberOfMetrics=2)
@@ -53,11 +52,11 @@ class HmtxTableTest(unittest.TestCase):
mtxTable = newTable(self.tag)
mtxTable.decompile(data, font)
- self.assertEqual(mtxTable['A'], (674, -11))
- self.assertEqual(mtxTable['B'], (632, 79))
+ self.assertEqual(mtxTable["A"], (674, -11))
+ self.assertEqual(mtxTable["B"], (632, 79))
# all following have same width as the previous
- self.assertEqual(mtxTable['C'], (632, 54))
- self.assertEqual(mtxTable['D'], (632, -4))
+ self.assertEqual(mtxTable["C"], (632, 54))
+ self.assertEqual(mtxTable["D"], (632, -4))
def test_decompile_not_enough_data(self):
font = self.makeFont(numGlyphs=1, numberOfMetrics=1)
@@ -75,20 +74,20 @@ class HmtxTableTest(unittest.TestCase):
with CapturingLogHandler(log, "WARNING") as captor:
mtxTable.decompile(b"\0\0\0\0\0", font)
- self.assertTrue(
- len([r for r in captor.records if msg == r.msg]) == 1)
+ self.assertTrue(len([r for r in captor.records if msg == r.msg]) == 1)
def test_decompile_num_metrics_greater_than_glyphs(self):
font = self.makeFont(numGlyphs=1, numberOfMetrics=2)
mtxTable = newTable(self.tag)
msg = "The %s.%s exceeds the maxp.numGlyphs" % (
- self.tableClass.headerTag, self.tableClass.numberOfMetricsName)
+ self.tableClass.headerTag,
+ self.tableClass.numberOfMetricsName,
+ )
with CapturingLogHandler(log, "WARNING") as captor:
mtxTable.decompile(b"\0\0\0\0", font)
- self.assertTrue(
- len([r for r in captor.records if msg == r.msg]) == 1)
+ self.assertTrue(len([r for r in captor.records if msg == r.msg]) == 1)
def test_decompile_possibly_negative_advance(self):
font = self.makeFont(numGlyphs=1, numberOfMetrics=1)
@@ -101,12 +100,12 @@ class HmtxTableTest(unittest.TestCase):
mtxTable.decompile(data, font)
self.assertTrue(
- len([r for r in captor.records
- if "has a huge advance" in r.msg]) == 1)
+ len([r for r in captor.records if "has a huge advance" in r.msg]) == 1
+ )
def test_decompile_no_header_table(self):
font = TTFont()
- maxp = font['maxp'] = newTable('maxp')
+ maxp = font["maxp"] = newTable("maxp")
maxp.numGlyphs = 3
font.glyphOrder = ["A", "B", "C"]
@@ -122,7 +121,7 @@ class HmtxTableTest(unittest.TestCase):
"A": (400, 30),
"B": (400, 40),
"C": (400, 50),
- }
+ },
)
def test_compile(self):
@@ -130,9 +129,9 @@ class HmtxTableTest(unittest.TestCase):
font = self.makeFont(numGlyphs=3, numberOfMetrics=4)
mtxTable = font[self.tag] = newTable(self.tag)
mtxTable.metrics = {
- 'A': (674, -11),
- 'B': (632, 79),
- 'C': (710, 54),
+ "A": (674, -11),
+ "B": (632, 79),
+ "C": (710, 54),
}
data = mtxTable.compile(font)
@@ -140,17 +139,16 @@ class HmtxTableTest(unittest.TestCase):
self.assertEqual(data, deHexStr("02A2 FFF5 0278 004F 02C6 0036"))
headerTable = font[self.tableClass.headerTag]
- self.assertEqual(
- getattr(headerTable, self.tableClass.numberOfMetricsName), 3)
+ self.assertEqual(getattr(headerTable, self.tableClass.numberOfMetricsName), 3)
def test_compile_additional_SB(self):
font = self.makeFont(numGlyphs=4, numberOfMetrics=1)
mtxTable = font[self.tag] = newTable(self.tag)
mtxTable.metrics = {
- 'A': (632, -11),
- 'B': (632, 79),
- 'C': (632, 54),
- 'D': (632, -4),
+ "A": (632, -11),
+ "B": (632, 79),
+ "C": (632, 54),
+ "D": (632, -4),
}
data = mtxTable.compile(font)
@@ -160,20 +158,23 @@ class HmtxTableTest(unittest.TestCase):
def test_compile_negative_advance(self):
font = self.makeFont(numGlyphs=1, numberOfMetrics=1)
mtxTable = font[self.tag] = newTable(self.tag)
- mtxTable.metrics = {'A': [-1, 0]}
+ mtxTable.metrics = {"A": [-1, 0]}
with CapturingLogHandler(log, "ERROR") as captor:
with self.assertRaisesRegex(TTLibError, "negative advance"):
mtxTable.compile(font)
self.assertTrue(
- len([r for r in captor.records
- if "Glyph 'A' has negative advance" in r.msg]) == 1)
+ len(
+ [r for r in captor.records if "Glyph 'A' has negative advance" in r.msg]
+ )
+ == 1
+ )
def test_compile_struct_out_of_range(self):
font = self.makeFont(numGlyphs=1, numberOfMetrics=1)
mtxTable = font[self.tag] = newTable(self.tag)
- mtxTable.metrics = {'A': (0xFFFF+1, -0x8001)}
+ mtxTable.metrics = {"A": (0xFFFF + 1, -0x8001)}
with self.assertRaises(struct.error):
mtxTable.compile(font)
@@ -182,9 +183,9 @@ class HmtxTableTest(unittest.TestCase):
font = self.makeFont(numGlyphs=3, numberOfMetrics=2)
mtxTable = font[self.tag] = newTable(self.tag)
mtxTable.metrics = {
- 'A': (0.5, 0.5), # round -> (1, 1)
- 'B': (0.1, 0.9), # round -> (0, 1)
- 'C': (0.1, 0.1), # round -> (0, 0)
+ "A": (0.5, 0.5), # round -> (1, 1)
+ "B": (0.1, 0.9), # round -> (0, 1)
+ "C": (0.1, 0.1), # round -> (0, 0)
}
data = mtxTable.compile(font)
@@ -193,7 +194,7 @@ class HmtxTableTest(unittest.TestCase):
def test_compile_no_header_table(self):
font = TTFont()
- maxp = font['maxp'] = newTable('maxp')
+ maxp = font["maxp"] = newTable("maxp")
maxp.numGlyphs = 3
font.glyphOrder = [chr(i) for i in range(65, 68)]
mtxTable = font[self.tag] = newTable(self.tag)
@@ -212,44 +213,46 @@ class HmtxTableTest(unittest.TestCase):
def test_toXML(self):
font = self.makeFont(numGlyphs=2, numberOfMetrics=2)
mtxTable = font[self.tag] = newTable(self.tag)
- mtxTable.metrics = {'B': (632, 79), 'A': (674, -11)}
+ mtxTable.metrics = {"B": (632, 79), "A": (674, -11)}
self.assertEqual(
getXML(mtxTable.toXML),
- ('<mtx name="A" %s="674" %s="-11"/>\n'
- '<mtx name="B" %s="632" %s="79"/>' % (
- (self.tableClass.advanceName,
- self.tableClass.sideBearingName) * 2)).split('\n'))
+ (
+ '<mtx name="A" %s="674" %s="-11"/>\n'
+ '<mtx name="B" %s="632" %s="79"/>'
+ % ((self.tableClass.advanceName, self.tableClass.sideBearingName) * 2)
+ ).split("\n"),
+ )
def test_fromXML(self):
mtxTable = newTable(self.tag)
for name, attrs, content in parseXML(
- '<mtx name="A" %s="674" %s="-11"/>'
- '<mtx name="B" %s="632" %s="79"/>' % (
- (self.tableClass.advanceName,
- self.tableClass.sideBearingName) * 2)):
+ '<mtx name="A" %s="674" %s="-11"/>'
+ '<mtx name="B" %s="632" %s="79"/>'
+ % ((self.tableClass.advanceName, self.tableClass.sideBearingName) * 2)
+ ):
mtxTable.fromXML(name, attrs, content, ttFont=None)
- self.assertEqual(
- mtxTable.metrics, {'A': (674, -11), 'B': (632, 79)})
+ self.assertEqual(mtxTable.metrics, {"A": (674, -11), "B": (632, 79)})
def test_delitem(self):
mtxTable = newTable(self.tag)
- mtxTable.metrics = {'A': (0, 0)}
+ mtxTable.metrics = {"A": (0, 0)}
- del mtxTable['A']
+ del mtxTable["A"]
- self.assertTrue('A' not in mtxTable.metrics)
+ self.assertTrue("A" not in mtxTable.metrics)
def test_setitem(self):
mtxTable = newTable(self.tag)
- mtxTable.metrics = {'A': (674, -11), 'B': (632, 79)}
- mtxTable['B'] = [0, 0] # list is converted to tuple
+ mtxTable.metrics = {"A": (674, -11), "B": (632, 79)}
+ mtxTable["B"] = [0, 0] # list is converted to tuple
- self.assertEqual(mtxTable.metrics, {'A': (674, -11), 'B': (0, 0)})
+ self.assertEqual(mtxTable.metrics, {"A": (674, -11), "B": (0, 0)})
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_k_e_r_n_test.py b/Tests/ttLib/tables/_k_e_r_n_test.py
index eb48bae6..be0fe9aa 100644
--- a/Tests/ttLib/tables/_k_e_r_n_test.py
+++ b/Tests/ttLib/tables/_k_e_r_n_test.py
@@ -1,6 +1,5 @@
from fontTools.ttLib import newTable
-from fontTools.ttLib.tables._k_e_r_n import (
- KernTable_format_0, KernTable_format_unkown)
+from fontTools.ttLib.tables._k_e_r_n import KernTable_format_0, KernTable_format_unkown
from fontTools.misc.textTools import deHexStr
from fontTools.misc.testTools import FakeFont, getXML, parseXML
import itertools
@@ -8,19 +7,19 @@ import pytest
KERN_VER_0_FMT_0_DATA = deHexStr(
- '0000 ' # 0: version=0
- '0001 ' # 2: nTables=1
- '0000 ' # 4: version=0 (bogus field, unused)
- '0020 ' # 6: length=32
- '00 ' # 8: format=0
- '01 ' # 9: coverage=1
- '0003 ' # 10: nPairs=3
- '000C ' # 12: searchRange=12
- '0001 ' # 14: entrySelector=1
- '0006 ' # 16: rangeShift=6
- '0004 000C FFD8 ' # 18: l=4, r=12, v=-40
- '0004 001C 0028 ' # 24: l=4, r=28, v=40
- '0005 0028 FFCE ' # 30: l=5, r=40, v=-50
+ "0000 " # 0: version=0
+ "0001 " # 2: nTables=1
+ "0000 " # 4: version=0 (bogus field, unused)
+ "0020 " # 6: length=32
+ "00 " # 8: format=0
+ "01 " # 9: coverage=1
+ "0003 " # 10: nPairs=3
+ "000C " # 12: searchRange=12
+ "0001 " # 14: entrySelector=1
+ "0006 " # 16: rangeShift=6
+ "0004 000C FFD8 " # 18: l=4, r=12, v=-40
+ "0004 001C 0028 " # 24: l=4, r=28, v=40
+ "0005 0028 FFCE " # 30: l=5, r=40, v=-50
)
assert len(KERN_VER_0_FMT_0_DATA) == 36
@@ -30,23 +29,23 @@ KERN_VER_0_FMT_0_XML = [
' <pair l="E" r="M" v="-40"/>',
' <pair l="E" r="c" v="40"/>',
' <pair l="F" r="o" v="-50"/>',
- '</kernsubtable>',
+ "</kernsubtable>",
]
KERN_VER_1_FMT_0_DATA = deHexStr(
- '0001 0000 ' # 0: version=1
- '0000 0001 ' # 4: nTables=1
- '0000 0022 ' # 8: length=34
- '00 ' # 12: coverage=0
- '00 ' # 13: format=0
- '0000 ' # 14: tupleIndex=0
- '0003 ' # 16: nPairs=3
- '000C ' # 18: searchRange=12
- '0001 ' # 20: entrySelector=1
- '0006 ' # 22: rangeShift=6
- '0004 000C FFD8 ' # 24: l=4, r=12, v=-40
- '0004 001C 0028 ' # 30: l=4, r=28, v=40
- '0005 0028 FFCE ' # 36: l=5, r=40, v=-50
+ "0001 0000 " # 0: version=1
+ "0000 0001 " # 4: nTables=1
+ "0000 0022 " # 8: length=34
+ "00 " # 12: coverage=0
+ "00 " # 13: format=0
+ "0000 " # 14: tupleIndex=0
+ "0003 " # 16: nPairs=3
+ "000C " # 18: searchRange=12
+ "0001 " # 20: entrySelector=1
+ "0006 " # 22: rangeShift=6
+ "0004 000C FFD8 " # 24: l=4, r=12, v=-40
+ "0004 001C 0028 " # 30: l=4, r=28, v=40
+ "0005 0028 FFCE " # 36: l=5, r=40, v=-50
)
assert len(KERN_VER_1_FMT_0_DATA) == 42
@@ -56,22 +55,22 @@ KERN_VER_1_FMT_0_XML = [
' <pair l="E" r="M" v="-40"/>',
' <pair l="E" r="c" v="40"/>',
' <pair l="F" r="o" v="-50"/>',
- '</kernsubtable>',
+ "</kernsubtable>",
]
KERN_VER_0_FMT_UNKNOWN_DATA = deHexStr(
- '0000 ' # 0: version=0
- '0002 ' # 2: nTables=2
- '0000 ' # 4: version=0
- '000A ' # 6: length=10
- '04 ' # 8: format=4 (format 4 doesn't exist)
- '01 ' # 9: coverage=1
- '1234 5678 ' # 10: garbage...
- '0000 ' # 14: version=0
- '000A ' # 16: length=10
- '05 ' # 18: format=5 (format 5 doesn't exist)
- '01 ' # 19: coverage=1
- '9ABC DEF0 ' # 20: garbage...
+ "0000 " # 0: version=0
+ "0002 " # 2: nTables=2
+ "0000 " # 4: version=0
+ "000A " # 6: length=10
+ "04 " # 8: format=4 (format 4 doesn't exist)
+ "01 " # 9: coverage=1
+ "1234 5678 " # 10: garbage...
+ "0000 " # 14: version=0
+ "000A " # 16: length=10
+ "05 " # 18: format=5 (format 5 doesn't exist)
+ "01 " # 19: coverage=1
+ "9ABC DEF0 " # 20: garbage...
)
assert len(KERN_VER_0_FMT_UNKNOWN_DATA) == 24
@@ -79,29 +78,29 @@ KERN_VER_0_FMT_UNKNOWN_XML = [
'<version value="0"/>',
'<kernsubtable format="4">',
" <!-- unknown 'kern' subtable format -->",
- ' 0000000A 04011234',
- ' 5678 ',
- '</kernsubtable>',
+ " 0000000A 04011234",
+ " 5678 ",
+ "</kernsubtable>",
'<kernsubtable format="5">',
"<!-- unknown 'kern' subtable format -->",
- ' 0000000A 05019ABC',
- ' DEF0 ',
- '</kernsubtable>',
+ " 0000000A 05019ABC",
+ " DEF0 ",
+ "</kernsubtable>",
]
KERN_VER_1_FMT_UNKNOWN_DATA = deHexStr(
- '0001 0000 ' # 0: version=1
- '0000 0002 ' # 4: nTables=2
- '0000 000C ' # 8: length=12
- '00 ' # 12: coverage=0
- '04 ' # 13: format=4 (format 4 doesn't exist)
- '0000 ' # 14: tupleIndex=0
- '1234 5678' # 16: garbage...
- '0000 000C ' # 20: length=12
- '00 ' # 24: coverage=0
- '05 ' # 25: format=5 (format 5 doesn't exist)
- '0000 ' # 26: tupleIndex=0
- '9ABC DEF0 ' # 28: garbage...
+ "0001 0000 " # 0: version=1
+ "0000 0002 " # 4: nTables=2
+ "0000 000C " # 8: length=12
+ "00 " # 12: coverage=0
+ "04 " # 13: format=4 (format 4 doesn't exist)
+ "0000 " # 14: tupleIndex=0
+ "1234 5678" # 16: garbage...
+ "0000 000C " # 20: length=12
+ "00 " # 24: coverage=0
+ "05 " # 25: format=5 (format 5 doesn't exist)
+ "0000 " # 26: tupleIndex=0
+ "9ABC DEF0 " # 28: garbage...
)
assert len(KERN_VER_1_FMT_UNKNOWN_DATA) == 32
@@ -109,37 +108,39 @@ KERN_VER_1_FMT_UNKNOWN_XML = [
'<version value="1"/>',
'<kernsubtable format="4">',
" <!-- unknown 'kern' subtable format -->",
- ' 0000000C 00040000',
- ' 12345678 ',
- '</kernsubtable>',
+ " 0000000C 00040000",
+ " 12345678 ",
+ "</kernsubtable>",
'<kernsubtable format="5">',
" <!-- unknown 'kern' subtable format -->",
- ' 0000000C 00050000',
- ' 9ABCDEF0 ',
- '</kernsubtable>',
+ " 0000000C 00050000",
+ " 9ABCDEF0 ",
+ "</kernsubtable>",
]
KERN_VER_0_FMT_0_OVERFLOWING_DATA = deHexStr(
- '0000 ' # 0: version=0
- '0001 ' # 2: nTables=1
- '0000 ' # 4: version=0 (bogus field, unused)
- '0274 ' # 6: length=628 (bogus value for 66164 % 0x10000)
- '00 ' # 8: format=0
- '01 ' # 9: coverage=1
- '2B11 ' # 10: nPairs=11025
- 'C000 ' # 12: searchRange=49152
- '000D ' # 14: entrySelector=13
- '4266 ' # 16: rangeShift=16998
-) + deHexStr(' '.join(
- '%04X %04X %04X' % (a, b, 0)
- for (a, b) in itertools.product(range(105), repeat=2)
-))
+ "0000 " # 0: version=0
+ "0001 " # 2: nTables=1
+ "0000 " # 4: version=0 (bogus field, unused)
+ "0274 " # 6: length=628 (bogus value for 66164 % 0x10000)
+ "00 " # 8: format=0
+ "01 " # 9: coverage=1
+ "2B11 " # 10: nPairs=11025
+ "C000 " # 12: searchRange=49152
+ "000D " # 14: entrySelector=13
+ "4266 " # 16: rangeShift=16998
+) + deHexStr(
+ " ".join(
+ "%04X %04X %04X" % (a, b, 0)
+ for (a, b) in itertools.product(range(105), repeat=2)
+ )
+)
@pytest.fixture
def font():
- return FakeFont(list("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- "abcdefghijklmnopqrstuvwxyz"))
+ return FakeFont(list("ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz"))
+
@pytest.fixture
def overflowing_font():
@@ -147,14 +148,13 @@ def overflowing_font():
class KernTableTest(object):
-
@pytest.mark.parametrize(
"data, version",
[
(KERN_VER_0_FMT_0_DATA, 0),
(KERN_VER_1_FMT_0_DATA, 1.0),
],
- ids=["version_0", "version_1"]
+ ids=["version_0", "version_1"],
)
def test_decompile_single_format_0(self, data, font, version):
kern = newTable("kern")
@@ -171,11 +171,7 @@ class KernTableTest(object):
assert st.coverage == (0 if st.apple else 1)
assert st.tupleIndex == (0 if st.apple else None)
assert len(st.kernTable) == 3
- assert st.kernTable == {
- ('E', 'M'): -40,
- ('E', 'c'): 40,
- ('F', 'o'): -50
- }
+ assert st.kernTable == {("E", "M"): -40, ("E", "c"): 40, ("F", "o"): -50}
@pytest.mark.parametrize(
"version, expected",
@@ -183,7 +179,7 @@ class KernTableTest(object):
(0, KERN_VER_0_FMT_0_DATA),
(1.0, KERN_VER_1_FMT_0_DATA),
],
- ids=["version_0", "version_1"]
+ ids=["version_0", "version_1"],
)
def test_compile_single_format_0(self, font, version, expected):
kern = newTable("kern")
@@ -191,13 +187,9 @@ class KernTableTest(object):
apple = version == 1.0
st = KernTable_format_0(apple)
kern.kernTables = [st]
- st.coverage = (0 if apple else 1)
+ st.coverage = 0 if apple else 1
st.tupleIndex = 0 if apple else None
- st.kernTable = {
- ('E', 'M'): -40,
- ('E', 'c'): 40,
- ('F', 'o'): -50
- }
+ st.kernTable = {("E", "M"): -40, ("E", "c"): 40, ("F", "o"): -50}
data = kern.compile(font)
assert data == expected
@@ -207,7 +199,7 @@ class KernTableTest(object):
(KERN_VER_0_FMT_0_XML, 0),
(KERN_VER_1_FMT_0_XML, 1.0),
],
- ids=["version_0", "version_1"]
+ ids=["version_0", "version_1"],
)
def test_fromXML_single_format_0(self, xml, font, version):
kern = newTable("kern")
@@ -223,11 +215,7 @@ class KernTableTest(object):
assert st.coverage == (0 if st.apple else 1)
assert st.tupleIndex == (0 if st.apple else None)
assert len(st.kernTable) == 3
- assert st.kernTable == {
- ('E', 'M'): -40,
- ('E', 'c'): 40,
- ('F', 'o'): -50
- }
+ assert st.kernTable == {("E", "M"): -40, ("E", "c"): 40, ("F", "o"): -50}
@pytest.mark.parametrize(
"version, expected",
@@ -235,7 +223,7 @@ class KernTableTest(object):
(0, KERN_VER_0_FMT_0_XML),
(1.0, KERN_VER_1_FMT_0_XML),
],
- ids=["version_0", "version_1"]
+ ids=["version_0", "version_1"],
)
def test_toXML_single_format_0(self, font, version, expected):
kern = newTable("kern")
@@ -245,11 +233,7 @@ class KernTableTest(object):
kern.kernTables = [st]
st.coverage = 0 if apple else 1
st.tupleIndex = 0 if apple else None
- st.kernTable = {
- ('E', 'M'): -40,
- ('E', 'c'): 40,
- ('F', 'o'): -50
- }
+ st.kernTable = {("E", "M"): -40, ("E", "c"): 40, ("F", "o"): -50}
xml = getXML(kern.toXML, font)
assert xml == expected
@@ -259,10 +243,11 @@ class KernTableTest(object):
(KERN_VER_0_FMT_UNKNOWN_DATA, 0, 4, 10),
(KERN_VER_1_FMT_UNKNOWN_DATA, 1.0, 8, 12),
],
- ids=["version_0", "version_1"]
+ ids=["version_0", "version_1"],
)
def test_decompile_format_unknown(
- self, data, font, version, header_length, st_length):
+ self, data, font, version, header_length, st_length
+ ):
kern = newTable("kern")
kern.decompile(data, font)
@@ -285,7 +270,7 @@ class KernTableTest(object):
(0, 10, KERN_VER_0_FMT_UNKNOWN_DATA),
(1.0, 12, KERN_VER_1_FMT_UNKNOWN_DATA),
],
- ids=["version_0", "version_1"]
+ ids=["version_0", "version_1"],
)
def test_compile_format_unknown(self, version, st_length, expected):
kern = newTable("kern")
@@ -296,13 +281,13 @@ class KernTableTest(object):
if version > 0:
coverage = 0
header_fmt = deHexStr(
- "%08X %02X %02X %04X" % (
- st_length, coverage, unknown_fmt, 0))
+ "%08X %02X %02X %04X" % (st_length, coverage, unknown_fmt, 0)
+ )
else:
coverage = 1
header_fmt = deHexStr(
- "%04X %04X %02X %02X" % (
- 0, st_length, unknown_fmt, coverage))
+ "%04X %04X %02X %02X" % (0, st_length, unknown_fmt, coverage)
+ )
st = KernTable_format_unkown(unknown_fmt)
st.data = header_fmt + deHexStr(kern_data)
kern.kernTables.append(st)
@@ -316,7 +301,7 @@ class KernTableTest(object):
(KERN_VER_0_FMT_UNKNOWN_XML, 0, 10),
(KERN_VER_1_FMT_UNKNOWN_XML, 1.0, 12),
],
- ids=["version_0", "version_1"]
+ ids=["version_0", "version_1"],
)
def test_fromXML_format_unknown(self, xml, font, version, st_length):
kern = newTable("kern")
@@ -334,8 +319,7 @@ class KernTableTest(object):
assert st1.format == 5
assert len(st1.data) == st_length
- @pytest.mark.parametrize(
- "version", [0, 1.0], ids=["version_0", "version_1"])
+ @pytest.mark.parametrize("version", [0, 1.0], ids=["version_0", "version_1"])
def test_toXML_format_unknown(self, font, version):
kern = newTable("kern")
kern.version = version
@@ -348,9 +332,9 @@ class KernTableTest(object):
assert xml == [
'<version value="%s"/>' % version,
'<kernsubtable format="4">',
- ' <!-- unknown \'kern\' subtable format -->',
- ' 41424344 ',
- '</kernsubtable>',
+ " <!-- unknown 'kern' subtable format -->",
+ " 41424344 ",
+ "</kernsubtable>",
]
def test_getkern(self):
@@ -371,15 +355,32 @@ class KernTableTest(object):
class KernTable_format_0_Test(object):
-
def test_decompileBadGlyphId(self, font):
subtable = KernTable_format_0()
subtable.decompile(
- b'\x00' + b'\x00' + b'\x00' + b'\x1a' + b'\x00' + b'\x00' +
- b'\x00' + b'\x02' + b'\x00' * 6 +
- b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01' +
- b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02',
- font)
+ b"\x00"
+ + b"\x00"
+ + b"\x00"
+ + b"\x1a"
+ + b"\x00"
+ + b"\x00"
+ + b"\x00"
+ + b"\x02"
+ + b"\x00" * 6
+ + b"\x00"
+ + b"\x01"
+ + b"\x00"
+ + b"\x03"
+ + b"\x00"
+ + b"\x01"
+ + b"\x00"
+ + b"\x01"
+ + b"\xFF"
+ + b"\xFF"
+ + b"\x00"
+ + b"\x02",
+ font,
+ )
assert subtable[("B", "D")] == 1
assert subtable[("B", "glyph65535")] == 2
@@ -392,9 +393,7 @@ class KernTable_format_0_Test(object):
st.coverage = 1
st.tupleIndex = None
st.kernTable = {
- (a, b): 0
- for (a, b) in itertools.product(
- font.getGlyphOrder(), repeat=2)
+ (a, b): 0 for (a, b) in itertools.product(font.getGlyphOrder(), repeat=2)
}
assert len(st.kernTable) == 11025
data = kern.compile(font)
@@ -408,12 +407,11 @@ class KernTable_format_0_Test(object):
st = kern.kernTables[0]
assert st.kernTable == {
- (a, b): 0
- for (a, b) in itertools.product(
- font.getGlyphOrder(), repeat=2)
+ (a, b): 0 for (a, b) in itertools.product(font.getGlyphOrder(), repeat=2)
}
if __name__ == "__main__":
import sys
+
sys.exit(pytest.main(sys.argv))
diff --git a/Tests/ttLib/tables/_l_c_a_r_test.py b/Tests/ttLib/tables/_l_c_a_r_test.py
index 5837a07a..79a80249 100644
--- a/Tests/ttLib/tables/_l_c_a_r_test.py
+++ b/Tests/ttLib/tables/_l_c_a_r_test.py
@@ -7,101 +7,99 @@ import unittest
# Example: Format 0 Ligature Caret Table
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6lcar.html
LCAR_FORMAT_0_DATA = deHexStr(
- '0001 0000 0000 ' # 0: Version=1.0, Format=0
- '0006 0004 0002 ' # 6: LookupFormat=6, UnitSize=4, NUnits=2
- '0008 0001 0000 ' # 12: SearchRange=8, EntrySelector=1, RangeShift=0
- '0001 001E ' # 18: Glyph=1 (f_r), OffsetOfLigCaretEntry=30
- '0003 0022 ' # 22: Glyph=3 (f_f_l), OffsetOfLigCaretEntry=34
- 'FFFF 0000 ' # 26: Glyph=<end>, OffsetOfLigCaretEntry=0
- '0001 00DC ' # 30: DivisionPointCount=1, DivisionPoint=[220]
- '0002 00EF 01D8 ' # 34: DivisionPointCount=2, DivisionPoint=[239, 475]
-) # 40: <end>
-assert(len(LCAR_FORMAT_0_DATA) == 40)
+ "0001 0000 0000 " # 0: Version=1.0, Format=0
+ "0006 0004 0002 " # 6: LookupFormat=6, UnitSize=4, NUnits=2
+ "0008 0001 0000 " # 12: SearchRange=8, EntrySelector=1, RangeShift=0
+ "0001 001E " # 18: Glyph=1 (f_r), OffsetOfLigCaretEntry=30
+ "0003 0022 " # 22: Glyph=3 (f_f_l), OffsetOfLigCaretEntry=34
+ "FFFF 0000 " # 26: Glyph=<end>, OffsetOfLigCaretEntry=0
+ "0001 00DC " # 30: DivisionPointCount=1, DivisionPoint=[220]
+ "0002 00EF 01D8 " # 34: DivisionPointCount=2, DivisionPoint=[239, 475]
+) # 40: <end>
+assert len(LCAR_FORMAT_0_DATA) == 40
LCAR_FORMAT_0_XML = [
'<Version value="0x00010000"/>',
'<LigatureCarets Format="0">',
- ' <Carets>',
+ " <Carets>",
' <Lookup glyph="f_f_l">',
- ' <!-- DivsionPointCount=2 -->',
+ " <!-- DivsionPointCount=2 -->",
' <DivisionPoint index="0" value="239"/>',
' <DivisionPoint index="1" value="472"/>',
- ' </Lookup>',
+ " </Lookup>",
' <Lookup glyph="f_r">',
- ' <!-- DivsionPointCount=1 -->',
+ " <!-- DivsionPointCount=1 -->",
' <DivisionPoint index="0" value="220"/>',
- ' </Lookup>',
- ' </Carets>',
- '</LigatureCarets>',
+ " </Lookup>",
+ " </Carets>",
+ "</LigatureCarets>",
]
# Example: Format 1 Ligature Caret Table
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6lcar.html
LCAR_FORMAT_1_DATA = deHexStr(
- '0001 0000 0001 ' # 0: Version=1.0, Format=1
- '0006 0004 0002 ' # 6: LookupFormat=6, UnitSize=4, NUnits=2
- '0008 0001 0000 ' # 12: SearchRange=8, EntrySelector=1, RangeShift=0
- '0001 001E ' # 18: Glyph=1 (f_r), OffsetOfLigCaretEntry=30
- '0003 0022 ' # 22: Glyph=3 (f_f_l), OffsetOfLigCaretEntry=34
- 'FFFF 0000 ' # 26: Glyph=<end>, OffsetOfLigCaretEntry=0
- '0001 0032 ' # 30: DivisionPointCount=1, DivisionPoint=[50]
- '0002 0037 004B ' # 34: DivisionPointCount=2, DivisionPoint=[55, 75]
-) # 40: <end>
-assert(len(LCAR_FORMAT_1_DATA) == 40)
+ "0001 0000 0001 " # 0: Version=1.0, Format=1
+ "0006 0004 0002 " # 6: LookupFormat=6, UnitSize=4, NUnits=2
+ "0008 0001 0000 " # 12: SearchRange=8, EntrySelector=1, RangeShift=0
+ "0001 001E " # 18: Glyph=1 (f_r), OffsetOfLigCaretEntry=30
+ "0003 0022 " # 22: Glyph=3 (f_f_l), OffsetOfLigCaretEntry=34
+ "FFFF 0000 " # 26: Glyph=<end>, OffsetOfLigCaretEntry=0
+ "0001 0032 " # 30: DivisionPointCount=1, DivisionPoint=[50]
+ "0002 0037 004B " # 34: DivisionPointCount=2, DivisionPoint=[55, 75]
+) # 40: <end>
+assert len(LCAR_FORMAT_1_DATA) == 40
LCAR_FORMAT_1_XML = [
'<Version value="0x00010000"/>',
'<LigatureCarets Format="1">',
- ' <Carets>',
+ " <Carets>",
' <Lookup glyph="f_f_l">',
- ' <!-- DivsionPointCount=2 -->',
+ " <!-- DivsionPointCount=2 -->",
' <DivisionPoint index="0" value="55"/>',
' <DivisionPoint index="1" value="75"/>',
- ' </Lookup>',
+ " </Lookup>",
' <Lookup glyph="f_r">',
- ' <!-- DivsionPointCount=1 -->',
+ " <!-- DivsionPointCount=1 -->",
' <DivisionPoint index="0" value="50"/>',
- ' </Lookup>',
- ' </Carets>',
- '</LigatureCarets>',
+ " </Lookup>",
+ " </Carets>",
+ "</LigatureCarets>",
]
class LCARTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- cls.font = FakeFont(['.notdef', 'f_r', 'X', 'f_f_l'])
+ cls.font = FakeFont([".notdef", "f_r", "X", "f_f_l"])
def test_decompile_toXML_format0(self):
- table = newTable('lcar')
+ table = newTable("lcar")
table.decompile(LCAR_FORMAT_0_DATA, self.font)
self.assertEqual(getXML(table.toXML), LCAR_FORMAT_0_XML)
def test_compile_fromXML_format0(self):
- table = newTable('lcar')
+ table = newTable("lcar")
for name, attrs, content in parseXML(LCAR_FORMAT_0_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(LCAR_FORMAT_0_DATA))
+ self.assertEqual(hexStr(table.compile(self.font)), hexStr(LCAR_FORMAT_0_DATA))
def test_decompile_toXML_format1(self):
- table = newTable('lcar')
+ table = newTable("lcar")
table.decompile(LCAR_FORMAT_1_DATA, self.font)
self.assertEqual(getXML(table.toXML), LCAR_FORMAT_1_XML)
def test_compile_fromXML_format1(self):
- table = newTable('lcar')
+ table = newTable("lcar")
for name, attrs, content in parseXML(LCAR_FORMAT_1_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(LCAR_FORMAT_1_DATA))
+ self.assertEqual(hexStr(table.compile(self.font)), hexStr(LCAR_FORMAT_1_DATA))
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_l_t_a_g_test.py b/Tests/ttLib/tables/_l_t_a_g_test.py
index 29119903..aa73cde6 100644
--- a/Tests/ttLib/tables/_l_t_a_g_test.py
+++ b/Tests/ttLib/tables/_l_t_a_g_test.py
@@ -7,55 +7,64 @@ from fontTools.ttLib import newTable
class Test_l_t_a_g(unittest.TestCase):
+ DATA_ = (
+ struct.pack(b">LLLHHHHHH", 1, 0, 3, 24 + 0, 2, 24 + 2, 7, 24 + 2, 2)
+ + b"enzh-Hant"
+ )
+ TAGS_ = ["en", "zh-Hant", "zh"]
- DATA_ = struct.pack(b">LLLHHHHHH", 1, 0, 3, 24 + 0, 2, 24 + 2, 7, 24 + 2, 2) + b"enzh-Hant"
- TAGS_ = ["en", "zh-Hant", "zh"]
-
- def test_addTag(self):
- table = newTable("ltag")
- self.assertEqual(table.addTag("de-CH"), 0)
- self.assertEqual(table.addTag("gsw-LI"), 1)
- self.assertEqual(table.addTag("de-CH"), 0)
- self.assertEqual(table.tags, ["de-CH", "gsw-LI"])
-
- def test_decompile_compile(self):
- table = newTable("ltag")
- table.decompile(self.DATA_, ttFont=None)
- self.assertEqual(1, table.version)
- self.assertEqual(0, table.flags)
- self.assertEqual(self.TAGS_, table.tags)
- compiled = table.compile(ttFont=None)
- self.assertEqual(self.DATA_, compiled)
- self.assertIsInstance(compiled, bytes)
-
- def test_fromXML(self):
- table = newTable("ltag")
- for name, attrs, content in parseXML(
- '<version value="1"/>'
- '<flags value="777"/>'
- '<LanguageTag tag="sr-Latn"/>'
- '<LanguageTag tag="fa"/>'):
- table.fromXML(name, attrs, content, ttFont=None)
- self.assertEqual(1, table.version)
- self.assertEqual(777, table.flags)
- self.assertEqual(["sr-Latn", "fa"], table.tags)
-
- def test_toXML(self):
- writer = XMLWriter(BytesIO())
- table = newTable("ltag")
- table.decompile(self.DATA_, ttFont=None)
- table.toXML(writer, ttFont=None)
- expected = "\n".join([
- '<?xml version="1.0" encoding="UTF-8"?>',
- '<version value="1"/>',
- '<flags value="0"/>',
- '<LanguageTag tag="en"/>',
- '<LanguageTag tag="zh-Hant"/>',
- '<LanguageTag tag="zh"/>'
- ]) + "\n"
- self.assertEqual(expected.encode("utf_8"), writer.file.getvalue())
-
-
-if __name__ == '__main__':
- import sys
- sys.exit(unittest.main())
+ def test_addTag(self):
+ table = newTable("ltag")
+ self.assertEqual(table.addTag("de-CH"), 0)
+ self.assertEqual(table.addTag("gsw-LI"), 1)
+ self.assertEqual(table.addTag("de-CH"), 0)
+ self.assertEqual(table.tags, ["de-CH", "gsw-LI"])
+
+ def test_decompile_compile(self):
+ table = newTable("ltag")
+ table.decompile(self.DATA_, ttFont=None)
+ self.assertEqual(1, table.version)
+ self.assertEqual(0, table.flags)
+ self.assertEqual(self.TAGS_, table.tags)
+ compiled = table.compile(ttFont=None)
+ self.assertEqual(self.DATA_, compiled)
+ self.assertIsInstance(compiled, bytes)
+
+ def test_fromXML(self):
+ table = newTable("ltag")
+ for name, attrs, content in parseXML(
+ '<version value="1"/>'
+ '<flags value="777"/>'
+ '<LanguageTag tag="sr-Latn"/>'
+ '<LanguageTag tag="fa"/>'
+ ):
+ table.fromXML(name, attrs, content, ttFont=None)
+ self.assertEqual(1, table.version)
+ self.assertEqual(777, table.flags)
+ self.assertEqual(["sr-Latn", "fa"], table.tags)
+
+ def test_toXML(self):
+ writer = XMLWriter(BytesIO())
+ table = newTable("ltag")
+ table.decompile(self.DATA_, ttFont=None)
+ table.toXML(writer, ttFont=None)
+ expected = (
+ "\n".join(
+ [
+ '<?xml version="1.0" encoding="UTF-8"?>',
+ '<version value="1"/>',
+ '<flags value="0"/>',
+ '<LanguageTag tag="en"/>',
+ '<LanguageTag tag="zh-Hant"/>',
+ '<LanguageTag tag="zh"/>',
+ ]
+ )
+ + "\n"
+ )
+ self.assertEqual(expected.encode("utf_8"), writer.file.getvalue())
+
+
+if __name__ == "__main__":
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_m_e_t_a_test.py b/Tests/ttLib/tables/_m_e_t_a_test.py
index f05ff576..e96c492c 100644
--- a/Tests/ttLib/tables/_m_e_t_a_test.py
+++ b/Tests/ttLib/tables/_m_e_t_a_test.py
@@ -11,7 +11,8 @@ import unittest
# and shortened the payload.
META_DATA = deHexStr(
"00 00 00 01 00 00 00 00 00 00 00 1C 00 00 00 01 "
- "54 45 53 54 00 00 00 1C 00 00 00 04 CA FE BE EF")
+ "54 45 53 54 00 00 00 1C 00 00 00 04 CA FE BE EF"
+)
# The 'dlng' and 'slng' tag with text data containing "augmented" BCP 47
# comma-separated or comma-space-separated tags. These should be UTF-8 encoded
@@ -21,7 +22,9 @@ META_DATA_TEXT = deHexStr(
"64 6C 6E 67 00 00 00 28 00 00 00 0E 73 6C 6E 67 "
"00 00 00 36 00 00 00 0E 4C 61 74 6E 2C 47 72 65 "
"6B 2C 43 79 72 6C 4C 61 74 6E 2C 47 72 65 6B 2C "
- "43 79 72 6C")
+ "43 79 72 6C"
+)
+
class MetaTableTest(unittest.TestCase):
def test_decompile(self):
@@ -37,13 +40,14 @@ class MetaTableTest(unittest.TestCase):
def test_decompile_text(self):
table = table__m_e_t_a()
table.decompile(META_DATA_TEXT, ttFont={"meta": table})
- self.assertEqual({"dlng": u"Latn,Grek,Cyrl",
- "slng": u"Latn,Grek,Cyrl"}, table.data)
+ self.assertEqual(
+ {"dlng": "Latn,Grek,Cyrl", "slng": "Latn,Grek,Cyrl"}, table.data
+ )
def test_compile_text(self):
table = table__m_e_t_a()
- table.data["dlng"] = u"Latn,Grek,Cyrl"
- table.data["slng"] = u"Latn,Grek,Cyrl"
+ table.data["dlng"] = "Latn,Grek,Cyrl"
+ table.data["slng"] = "Latn,Grek,Cyrl"
self.assertEqual(META_DATA_TEXT, table.compile(ttFont={"meta": table}))
def test_toXML(self):
@@ -52,11 +56,10 @@ class MetaTableTest(unittest.TestCase):
writer = XMLWriter(BytesIO())
table.toXML(writer, {"meta": table})
xml = writer.file.getvalue().decode("utf-8")
- self.assertEqual([
- '<hexdata tag="TEST">',
- 'cafebeef',
- '</hexdata>'
- ], [line.strip() for line in xml.splitlines()][1:])
+ self.assertEqual(
+ ['<hexdata tag="TEST">', "cafebeef", "</hexdata>"],
+ [line.strip() for line in xml.splitlines()][1:],
+ )
def test_toXML_ascii_data(self):
table = table__m_e_t_a()
@@ -64,44 +67,45 @@ class MetaTableTest(unittest.TestCase):
writer = XMLWriter(BytesIO())
table.toXML(writer, {"meta": table})
xml = writer.file.getvalue().decode("utf-8")
- self.assertEqual([
- '<hexdata tag="TEST">',
- '<!-- ascii: Hello! -->',
- '48656c6c 6f21',
- '</hexdata>'
- ], [line.strip() for line in xml.splitlines()][1:])
+ self.assertEqual(
+ [
+ '<hexdata tag="TEST">',
+ "<!-- ascii: Hello! -->",
+ "48656c6c 6f21",
+ "</hexdata>",
+ ],
+ [line.strip() for line in xml.splitlines()][1:],
+ )
def test_fromXML(self):
table = table__m_e_t_a()
for name, attrs, content in parseXML(
- '<hexdata tag="TEST">'
- ' cafebeef'
- '</hexdata>'):
+ '<hexdata tag="TEST">' " cafebeef" "</hexdata>"
+ ):
table.fromXML(name, attrs, content, ttFont=None)
self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data)
def test_toXML_text(self):
table = table__m_e_t_a()
- table.data["dlng"] = u"Latn,Grek,Cyrl"
+ table.data["dlng"] = "Latn,Grek,Cyrl"
writer = XMLWriter(BytesIO())
table.toXML(writer, {"meta": table})
xml = writer.file.getvalue().decode("utf-8")
- self.assertEqual([
- '<text tag="dlng">',
- 'Latn,Grek,Cyrl',
- '</text>'
- ], [line.strip() for line in xml.splitlines()][1:])
+ self.assertEqual(
+ ['<text tag="dlng">', "Latn,Grek,Cyrl", "</text>"],
+ [line.strip() for line in xml.splitlines()][1:],
+ )
def test_fromXML_text(self):
table = table__m_e_t_a()
for name, attrs, content in parseXML(
- '<text tag="dlng">'
- ' Latn,Grek,Cyrl'
- '</text>'):
+ '<text tag="dlng">' " Latn,Grek,Cyrl" "</text>"
+ ):
table.fromXML(name, attrs, content, ttFont=None)
- self.assertEqual({"dlng": u"Latn,Grek,Cyrl"}, table.data)
+ self.assertEqual({"dlng": "Latn,Grek,Cyrl"}, table.data)
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_m_o_r_t_test.py b/Tests/ttLib/tables/_m_o_r_t_test.py
index 3e7169be..fb711158 100644
--- a/Tests/ttLib/tables/_m_o_r_t_test.py
+++ b/Tests/ttLib/tables/_m_o_r_t_test.py
@@ -15,99 +15,100 @@ import unittest
# character; the non-contiguous glyph range for the AAT lookup makes
# format 6 to be most compact.
MORT_NONCONTEXTUAL_DATA = deHexStr(
- '0001 0000 ' # 0: Version=1.0
- '0000 0001 ' # 4: MorphChainCount=1
- '0000 0001 ' # 8: DefaultFlags=1
- '0000 0050 ' # 12: StructLength=80
- '0003 0001 ' # 16: MorphFeatureCount=3, MorphSubtableCount=1
- '0004 0000 ' # 20: Feature[0].FeatureType=4/VertSubst, .FeatureSetting=on
- '0000 0001 ' # 24: Feature[0].EnableFlags=0x00000001
- 'FFFF FFFF ' # 28: Feature[0].DisableFlags=0xFFFFFFFF
- '0004 0001 ' # 32: Feature[1].FeatureType=4/VertSubst, .FeatureSetting=off
- '0000 0000 ' # 36: Feature[1].EnableFlags=0x00000000
- 'FFFF FFFE ' # 40: Feature[1].DisableFlags=0xFFFFFFFE
- '0000 0001 ' # 44: Feature[2].FeatureType=0/GlyphEffects, .FeatSetting=off
- '0000 0000 ' # 48: Feature[2].EnableFlags=0 (required for last feature)
- '0000 0000 ' # 52: Feature[2].EnableFlags=0 (required for last feature)
- '0020 ' # 56: Subtable[0].StructLength=32
- '80 ' # 58: Subtable[0].CoverageFlags=0x80
- '04 ' # 59: Subtable[0].MorphType=4/NoncontextualMorph
- '0000 0001 ' # 60: Subtable[0].SubFeatureFlags=0x1
- '0006 0004 ' # 64: LookupFormat=6, UnitSize=4
- '0002 0008 ' # 68: NUnits=2, SearchRange=8
- '0001 0000 ' # 72: EntrySelector=1, RangeShift=0
- '000B 0087 ' # 76: Glyph=11 (parenleft); Value=135 (parenleft.vertical)
- '000D 0088 ' # 80: Glyph=13 (parenright); Value=136 (parenright.vertical)
- 'FFFF 0000 ' # 84: Glyph=<end>; Value=0
-) # 88: <end>
+ "0001 0000 " # 0: Version=1.0
+ "0000 0001 " # 4: MorphChainCount=1
+ "0000 0001 " # 8: DefaultFlags=1
+ "0000 0050 " # 12: StructLength=80
+ "0003 0001 " # 16: MorphFeatureCount=3, MorphSubtableCount=1
+ "0004 0000 " # 20: Feature[0].FeatureType=4/VertSubst, .FeatureSetting=on
+ "0000 0001 " # 24: Feature[0].EnableFlags=0x00000001
+ "FFFF FFFF " # 28: Feature[0].DisableFlags=0xFFFFFFFF
+ "0004 0001 " # 32: Feature[1].FeatureType=4/VertSubst, .FeatureSetting=off
+ "0000 0000 " # 36: Feature[1].EnableFlags=0x00000000
+ "FFFF FFFE " # 40: Feature[1].DisableFlags=0xFFFFFFFE
+ "0000 0001 " # 44: Feature[2].FeatureType=0/GlyphEffects, .FeatSetting=off
+ "0000 0000 " # 48: Feature[2].EnableFlags=0 (required for last feature)
+ "0000 0000 " # 52: Feature[2].EnableFlags=0 (required for last feature)
+ "0020 " # 56: Subtable[0].StructLength=32
+ "80 " # 58: Subtable[0].CoverageFlags=0x80
+ "04 " # 59: Subtable[0].MorphType=4/NoncontextualMorph
+ "0000 0001 " # 60: Subtable[0].SubFeatureFlags=0x1
+ "0006 0004 " # 64: LookupFormat=6, UnitSize=4
+ "0002 0008 " # 68: NUnits=2, SearchRange=8
+ "0001 0000 " # 72: EntrySelector=1, RangeShift=0
+ "000B 0087 " # 76: Glyph=11 (parenleft); Value=135 (parenleft.vertical)
+ "000D 0088 " # 80: Glyph=13 (parenright); Value=136 (parenright.vertical)
+ "FFFF 0000 " # 84: Glyph=<end>; Value=0
+) # 88: <end>
assert len(MORT_NONCONTEXTUAL_DATA) == 88
MORT_NONCONTEXTUAL_XML = [
'<Version value="0x00010000"/>',
- '<!-- MorphChainCount=1 -->',
+ "<!-- MorphChainCount=1 -->",
'<MorphChain index="0">',
' <DefaultFlags value="0x00000001"/>',
- ' <!-- StructLength=80 -->',
- ' <!-- MorphFeatureCount=3 -->',
- ' <!-- MorphSubtableCount=1 -->',
+ " <!-- StructLength=80 -->",
+ " <!-- MorphFeatureCount=3 -->",
+ " <!-- MorphSubtableCount=1 -->",
' <MorphFeature index="0">',
' <FeatureType value="4"/>',
' <FeatureSetting value="0"/>',
' <EnableFlags value="0x00000001"/>',
' <DisableFlags value="0xFFFFFFFF"/>',
- ' </MorphFeature>',
+ " </MorphFeature>",
' <MorphFeature index="1">',
' <FeatureType value="4"/>',
' <FeatureSetting value="1"/>',
' <EnableFlags value="0x00000000"/>',
' <DisableFlags value="0xFFFFFFFE"/>',
- ' </MorphFeature>',
+ " </MorphFeature>",
' <MorphFeature index="2">',
' <FeatureType value="0"/>',
' <FeatureSetting value="1"/>',
' <EnableFlags value="0x00000000"/>',
' <DisableFlags value="0x00000000"/>',
- ' </MorphFeature>',
+ " </MorphFeature>",
' <MorphSubtable index="0">',
- ' <!-- StructLength=32 -->',
+ " <!-- StructLength=32 -->",
' <CoverageFlags value="128"/>',
- ' <!-- MorphType=4 -->',
+ " <!-- MorphType=4 -->",
' <SubFeatureFlags value="0x00000001"/>',
- ' <NoncontextualMorph>',
- ' <Substitution>',
+ " <NoncontextualMorph>",
+ " <Substitution>",
' <Lookup glyph="parenleft" value="parenleft.vertical"/>',
' <Lookup glyph="parenright" value="parenright.vertical"/>',
- ' </Substitution>',
- ' </NoncontextualMorph>',
- ' </MorphSubtable>',
- '</MorphChain>',
+ " </Substitution>",
+ " </NoncontextualMorph>",
+ " </MorphSubtable>",
+ "</MorphChain>",
]
class MORTNoncontextualGlyphSubstitutionTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- glyphs = ['.notdef'] + ['g.%d' % i for i in range (1, 140)]
- glyphs[11], glyphs[13] = 'parenleft', 'parenright'
- glyphs[135], glyphs[136] = 'parenleft.vertical', 'parenright.vertical'
+ glyphs = [".notdef"] + ["g.%d" % i for i in range(1, 140)]
+ glyphs[11], glyphs[13] = "parenleft", "parenright"
+ glyphs[135], glyphs[136] = "parenleft.vertical", "parenright.vertical"
cls.font = FakeFont(glyphs)
def test_decompile_toXML(self):
- table = newTable('mort')
+ table = newTable("mort")
table.decompile(MORT_NONCONTEXTUAL_DATA, self.font)
self.assertEqual(getXML(table.toXML), MORT_NONCONTEXTUAL_XML)
def test_compile_fromXML(self):
- table = newTable('mort')
+ table = newTable("mort")
for name, attrs, content in parseXML(MORT_NONCONTEXTUAL_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(MORT_NONCONTEXTUAL_DATA))
+ self.assertEqual(
+ hexStr(table.compile(self.font)), hexStr(MORT_NONCONTEXTUAL_DATA)
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_m_o_r_x_test.py b/Tests/ttLib/tables/_m_o_r_x_test.py
index d65619ca..eae3efc0 100644
--- a/Tests/ttLib/tables/_m_o_r_x_test.py
+++ b/Tests/ttLib/tables/_m_o_r_x_test.py
@@ -9,131 +9,131 @@ import unittest
# The test case has therefore been adapted from the example 'mort' table in
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6mort.html
MORX_NONCONTEXTUAL_DATA = deHexStr(
- '0002 0000 ' # 0: Version=2, Reserved=0
- '0000 0001 ' # 4: MorphChainCount=1
- '0000 0001 ' # 8: DefaultFlags=1
- '0000 0058 ' # 12: StructLength=88
- '0000 0003 ' # 16: MorphFeatureCount=3
- '0000 0001 ' # 20: MorphSubtableCount=1
- '0004 0000 ' # 24: Feature[0].FeatureType=4/VertSubst, .FeatureSetting=on
- '0000 0001 ' # 28: Feature[0].EnableFlags=0x00000001
- 'FFFF FFFF ' # 32: Feature[0].DisableFlags=0xFFFFFFFF
- '0004 0001 ' # 36: Feature[1].FeatureType=4/VertSubst, .FeatureSetting=off
- '0000 0000 ' # 40: Feature[1].EnableFlags=0x00000000
- 'FFFF FFFE ' # 44: Feature[1].DisableFlags=0xFFFFFFFE
- '0000 0001 ' # 48: Feature[2].FeatureType=0/GlyphEffects, .FeatSetting=off
- '0000 0000 ' # 52: Feature[2].EnableFlags=0 (required for last feature)
- '0000 0000 ' # 56: Feature[2].EnableFlags=0 (required for last feature)
- '0000 0024 ' # 60: Subtable[0].StructLength=36
- '80 ' # 64: Subtable[0].CoverageFlags=0x80
- '00 00 ' # 65: Subtable[0].Reserved=0
- '04 ' # 67: Subtable[0].MorphType=4/NoncontextualMorph
- '0000 0001 ' # 68: Subtable[0].SubFeatureFlags=0x1
- '0006 0004 ' # 72: LookupFormat=6, UnitSize=4
- '0002 0008 ' # 76: NUnits=2, SearchRange=8
- '0001 0000 ' # 80: EntrySelector=1, RangeShift=0
- '000B 0087 ' # 84: Glyph=11 (parenleft); Value=135 (parenleft.vertical)
- '000D 0088 ' # 88: Glyph=13 (parenright); Value=136 (parenright.vertical)
- 'FFFF 0000 ' # 92: Glyph=<end>; Value=0
-) # 96: <end>
+ "0002 0000 " # 0: Version=2, Reserved=0
+ "0000 0001 " # 4: MorphChainCount=1
+ "0000 0001 " # 8: DefaultFlags=1
+ "0000 0058 " # 12: StructLength=88
+ "0000 0003 " # 16: MorphFeatureCount=3
+ "0000 0001 " # 20: MorphSubtableCount=1
+ "0004 0000 " # 24: Feature[0].FeatureType=4/VertSubst, .FeatureSetting=on
+ "0000 0001 " # 28: Feature[0].EnableFlags=0x00000001
+ "FFFF FFFF " # 32: Feature[0].DisableFlags=0xFFFFFFFF
+ "0004 0001 " # 36: Feature[1].FeatureType=4/VertSubst, .FeatureSetting=off
+ "0000 0000 " # 40: Feature[1].EnableFlags=0x00000000
+ "FFFF FFFE " # 44: Feature[1].DisableFlags=0xFFFFFFFE
+ "0000 0001 " # 48: Feature[2].FeatureType=0/GlyphEffects, .FeatSetting=off
+ "0000 0000 " # 52: Feature[2].EnableFlags=0 (required for last feature)
+ "0000 0000 " # 56: Feature[2].EnableFlags=0 (required for last feature)
+ "0000 0024 " # 60: Subtable[0].StructLength=36
+ "80 " # 64: Subtable[0].CoverageFlags=0x80
+ "00 00 " # 65: Subtable[0].Reserved=0
+ "04 " # 67: Subtable[0].MorphType=4/NoncontextualMorph
+ "0000 0001 " # 68: Subtable[0].SubFeatureFlags=0x1
+ "0006 0004 " # 72: LookupFormat=6, UnitSize=4
+ "0002 0008 " # 76: NUnits=2, SearchRange=8
+ "0001 0000 " # 80: EntrySelector=1, RangeShift=0
+ "000B 0087 " # 84: Glyph=11 (parenleft); Value=135 (parenleft.vertical)
+ "000D 0088 " # 88: Glyph=13 (parenright); Value=136 (parenright.vertical)
+ "FFFF 0000 " # 92: Glyph=<end>; Value=0
+) # 96: <end>
assert len(MORX_NONCONTEXTUAL_DATA) == 96
MORX_NONCONTEXTUAL_XML = [
'<Version value="2"/>',
'<Reserved value="0"/>',
- '<!-- MorphChainCount=1 -->',
+ "<!-- MorphChainCount=1 -->",
'<MorphChain index="0">',
' <DefaultFlags value="0x00000001"/>',
- ' <!-- StructLength=88 -->',
- ' <!-- MorphFeatureCount=3 -->',
- ' <!-- MorphSubtableCount=1 -->',
+ " <!-- StructLength=88 -->",
+ " <!-- MorphFeatureCount=3 -->",
+ " <!-- MorphSubtableCount=1 -->",
' <MorphFeature index="0">',
' <FeatureType value="4"/>',
' <FeatureSetting value="0"/>',
' <EnableFlags value="0x00000001"/>',
' <DisableFlags value="0xFFFFFFFF"/>',
- ' </MorphFeature>',
+ " </MorphFeature>",
' <MorphFeature index="1">',
' <FeatureType value="4"/>',
' <FeatureSetting value="1"/>',
' <EnableFlags value="0x00000000"/>',
' <DisableFlags value="0xFFFFFFFE"/>',
- ' </MorphFeature>',
+ " </MorphFeature>",
' <MorphFeature index="2">',
' <FeatureType value="0"/>',
' <FeatureSetting value="1"/>',
' <EnableFlags value="0x00000000"/>',
' <DisableFlags value="0x00000000"/>',
- ' </MorphFeature>',
+ " </MorphFeature>",
' <MorphSubtable index="0">',
- ' <!-- StructLength=36 -->',
+ " <!-- StructLength=36 -->",
' <TextDirection value="Vertical"/>',
' <ProcessingOrder value="LayoutOrder"/>',
- ' <!-- MorphType=4 -->',
+ " <!-- MorphType=4 -->",
' <SubFeatureFlags value="0x00000001"/>',
- ' <NoncontextualMorph>',
- ' <Substitution>',
+ " <NoncontextualMorph>",
+ " <Substitution>",
' <Lookup glyph="parenleft" value="parenleft.vertical"/>',
' <Lookup glyph="parenright" value="parenright.vertical"/>',
- ' </Substitution>',
- ' </NoncontextualMorph>',
- ' </MorphSubtable>',
- '</MorphChain>',
+ " </Substitution>",
+ " </NoncontextualMorph>",
+ " </MorphSubtable>",
+ "</MorphChain>",
]
MORX_REARRANGEMENT_DATA = deHexStr(
- '0002 0000 ' # 0: Version=2, Reserved=0
- '0000 0001 ' # 4: MorphChainCount=1
- '0000 0001 ' # 8: DefaultFlags=1
- '0000 0078 ' # 12: StructLength=120 (+8=128)
- '0000 0000 ' # 16: MorphFeatureCount=0
- '0000 0001 ' # 20: MorphSubtableCount=1
- '0000 0068 ' # 24: Subtable[0].StructLength=104 (+24=128)
- '80 ' # 28: Subtable[0].CoverageFlags=0x80
- '00 00 ' # 29: Subtable[0].Reserved=0
- '00 ' # 31: Subtable[0].MorphType=0/RearrangementMorph
- '0000 0001 ' # 32: Subtable[0].SubFeatureFlags=0x1
- '0000 0006 ' # 36: STXHeader.ClassCount=6
- '0000 0010 ' # 40: STXHeader.ClassTableOffset=16 (+36=52)
- '0000 0028 ' # 44: STXHeader.StateArrayOffset=40 (+36=76)
- '0000 004C ' # 48: STXHeader.EntryTableOffset=76 (+36=112)
- '0006 0004 ' # 52: ClassTable.LookupFormat=6, .UnitSize=4
- '0002 0008 ' # 56: .NUnits=2, .SearchRange=8
- '0001 0000 ' # 60: .EntrySelector=1, .RangeShift=0
- '0001 0005 ' # 64: Glyph=A; Class=5
- '0003 0004 ' # 68: Glyph=C; Class=4
- 'FFFF 0000 ' # 72: Glyph=<end>; Value=0
- '0000 0001 0002 0003 0002 0001 ' # 76: State[0][0..5]
- '0003 0003 0003 0003 0003 0003 ' # 88: State[1][0..5]
- '0001 0003 0003 0003 0002 0002 ' # 100: State[2][0..5]
- '0002 FFFF ' # 112: Entries[0].NewState=2, .Flags=0xFFFF
- '0001 A00D ' # 116: Entries[1].NewState=1, .Flags=0xA00D
- '0000 8006 ' # 120: Entries[2].NewState=0, .Flags=0x8006
- '0002 0000 ' # 124: Entries[3].NewState=2, .Flags=0x0000
-) # 128: <end>
+ "0002 0000 " # 0: Version=2, Reserved=0
+ "0000 0001 " # 4: MorphChainCount=1
+ "0000 0001 " # 8: DefaultFlags=1
+ "0000 0078 " # 12: StructLength=120 (+8=128)
+ "0000 0000 " # 16: MorphFeatureCount=0
+ "0000 0001 " # 20: MorphSubtableCount=1
+ "0000 0068 " # 24: Subtable[0].StructLength=104 (+24=128)
+ "80 " # 28: Subtable[0].CoverageFlags=0x80
+ "00 00 " # 29: Subtable[0].Reserved=0
+ "00 " # 31: Subtable[0].MorphType=0/RearrangementMorph
+ "0000 0001 " # 32: Subtable[0].SubFeatureFlags=0x1
+ "0000 0006 " # 36: STXHeader.ClassCount=6
+ "0000 0010 " # 40: STXHeader.ClassTableOffset=16 (+36=52)
+ "0000 0028 " # 44: STXHeader.StateArrayOffset=40 (+36=76)
+ "0000 004C " # 48: STXHeader.EntryTableOffset=76 (+36=112)
+ "0006 0004 " # 52: ClassTable.LookupFormat=6, .UnitSize=4
+ "0002 0008 " # 56: .NUnits=2, .SearchRange=8
+ "0001 0000 " # 60: .EntrySelector=1, .RangeShift=0
+ "0001 0005 " # 64: Glyph=A; Class=5
+ "0003 0004 " # 68: Glyph=C; Class=4
+ "FFFF 0000 " # 72: Glyph=<end>; Value=0
+ "0000 0001 0002 0003 0002 0001 " # 76: State[0][0..5]
+ "0003 0003 0003 0003 0003 0003 " # 88: State[1][0..5]
+ "0001 0003 0003 0003 0002 0002 " # 100: State[2][0..5]
+ "0002 FFFF " # 112: Entries[0].NewState=2, .Flags=0xFFFF
+ "0001 A00D " # 116: Entries[1].NewState=1, .Flags=0xA00D
+ "0000 8006 " # 120: Entries[2].NewState=0, .Flags=0x8006
+ "0002 0000 " # 124: Entries[3].NewState=2, .Flags=0x0000
+) # 128: <end>
assert len(MORX_REARRANGEMENT_DATA) == 128, len(MORX_REARRANGEMENT_DATA)
MORX_REARRANGEMENT_XML = [
'<Version value="2"/>',
'<Reserved value="0"/>',
- '<!-- MorphChainCount=1 -->',
+ "<!-- MorphChainCount=1 -->",
'<MorphChain index="0">',
' <DefaultFlags value="0x00000001"/>',
- ' <!-- StructLength=120 -->',
- ' <!-- MorphFeatureCount=0 -->',
- ' <!-- MorphSubtableCount=1 -->',
+ " <!-- StructLength=120 -->",
+ " <!-- MorphFeatureCount=0 -->",
+ " <!-- MorphSubtableCount=1 -->",
' <MorphSubtable index="0">',
- ' <!-- StructLength=104 -->',
+ " <!-- StructLength=104 -->",
' <TextDirection value="Vertical"/>',
' <ProcessingOrder value="LayoutOrder"/>',
- ' <!-- MorphType=0 -->',
+ " <!-- MorphType=0 -->",
' <SubFeatureFlags value="0x00000001"/>',
- ' <RearrangementMorph>',
- ' <StateTable>',
- ' <!-- GlyphClassCount=6 -->',
+ " <RearrangementMorph>",
+ " <StateTable>",
+ " <!-- GlyphClassCount=6 -->",
' <GlyphClass glyph="A" value="5"/>',
' <GlyphClass glyph="C" value="4"/>',
' <State index="0">',
@@ -142,91 +142,91 @@ MORX_REARRANGEMENT_XML = [
' <Flags value="MarkFirst,DontAdvance,MarkLast"/>',
' <ReservedFlags value="0x1FF0"/>',
' <Verb value="15"/><!-- ABxCD ⇒ DCxBA -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="1"/>',
' <Flags value="MarkFirst,MarkLast"/>',
' <Verb value="13"/><!-- ABxCD ⇒ CDxBA -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="0"/>',
' <Flags value="MarkFirst"/>',
' <Verb value="6"/><!-- xCD ⇒ CDx -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="2"/>',
' <Verb value="0"/><!-- no change -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="0"/>',
' <Flags value="MarkFirst"/>',
' <Verb value="6"/><!-- xCD ⇒ CDx -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="1"/>',
' <Flags value="MarkFirst,MarkLast"/>',
' <Verb value="13"/><!-- ABxCD ⇒ CDxBA -->',
- ' </Transition>',
- ' </State>',
+ " </Transition>",
+ " </State>",
' <State index="1">',
' <Transition onGlyphClass="0">',
' <NewState value="2"/>',
' <Verb value="0"/><!-- no change -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="2"/>',
' <Verb value="0"/><!-- no change -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="2"/>',
' <Verb value="0"/><!-- no change -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="2"/>',
' <Verb value="0"/><!-- no change -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="2"/>',
' <Verb value="0"/><!-- no change -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="2"/>',
' <Verb value="0"/><!-- no change -->',
- ' </Transition>',
- ' </State>',
+ " </Transition>",
+ " </State>",
' <State index="2">',
' <Transition onGlyphClass="0">',
' <NewState value="1"/>',
' <Flags value="MarkFirst,MarkLast"/>',
' <Verb value="13"/><!-- ABxCD ⇒ CDxBA -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="2"/>',
' <Verb value="0"/><!-- no change -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="2"/>',
' <Verb value="0"/><!-- no change -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="2"/>',
' <Verb value="0"/><!-- no change -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="0"/>',
' <Flags value="MarkFirst"/>',
' <Verb value="6"/><!-- xCD ⇒ CDx -->',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="0"/>',
' <Flags value="MarkFirst"/>',
' <Verb value="6"/><!-- xCD ⇒ CDx -->',
- ' </Transition>',
- ' </State>',
- ' </StateTable>',
- ' </RearrangementMorph>',
- ' </MorphSubtable>',
- '</MorphChain>',
+ " </Transition>",
+ " </State>",
+ " </StateTable>",
+ " </RearrangementMorph>",
+ " </MorphSubtable>",
+ "</MorphChain>",
]
@@ -266,83 +266,77 @@ MORX_REARRANGEMENT_XML = [
#
# TODO: Ask Apple to fix “Example 1” in the ‘morx’ specification.
MORX_CONTEXTUAL_DATA = deHexStr(
- '0002 0000 ' # 0: Version=2, Reserved=0
- '0000 0001 ' # 4: MorphChainCount=1
- '0000 0001 ' # 8: DefaultFlags=1
- '0000 00B4 ' # 12: StructLength=180 (+8=188)
- '0000 0000 ' # 16: MorphFeatureCount=0
- '0000 0001 ' # 20: MorphSubtableCount=1
- '0000 00A4 ' # 24: Subtable[0].StructLength=164 (+24=188)
- '80 ' # 28: Subtable[0].CoverageFlags=0x80
- '00 00 ' # 29: Subtable[0].Reserved=0
- '01 ' # 31: Subtable[0].MorphType=1/ContextualMorph
- '0000 0001 ' # 32: Subtable[0].SubFeatureFlags=0x1
- '0000 0006 ' # 36: STXHeader.ClassCount=6
- '0000 0014 ' # 40: STXHeader.ClassTableOffset=20 (+36=56)
- '0000 0038 ' # 44: STXHeader.StateArrayOffset=56 (+36=92)
- '0000 005C ' # 48: STXHeader.EntryTableOffset=92 (+36=128)
- '0000 0074 ' # 52: STXHeader.PerGlyphTableOffset=116 (+36=152)
-
+ "0002 0000 " # 0: Version=2, Reserved=0
+ "0000 0001 " # 4: MorphChainCount=1
+ "0000 0001 " # 8: DefaultFlags=1
+ "0000 00B4 " # 12: StructLength=180 (+8=188)
+ "0000 0000 " # 16: MorphFeatureCount=0
+ "0000 0001 " # 20: MorphSubtableCount=1
+ "0000 00A4 " # 24: Subtable[0].StructLength=164 (+24=188)
+ "80 " # 28: Subtable[0].CoverageFlags=0x80
+ "00 00 " # 29: Subtable[0].Reserved=0
+ "01 " # 31: Subtable[0].MorphType=1/ContextualMorph
+ "0000 0001 " # 32: Subtable[0].SubFeatureFlags=0x1
+ "0000 0006 " # 36: STXHeader.ClassCount=6
+ "0000 0014 " # 40: STXHeader.ClassTableOffset=20 (+36=56)
+ "0000 0038 " # 44: STXHeader.StateArrayOffset=56 (+36=92)
+ "0000 005C " # 48: STXHeader.EntryTableOffset=92 (+36=128)
+ "0000 0074 " # 52: STXHeader.PerGlyphTableOffset=116 (+36=152)
# Glyph class table.
- '0006 0004 ' # 56: ClassTable.LookupFormat=6, .UnitSize=4
- '0005 0010 ' # 60: .NUnits=5, .SearchRange=16
- '0002 0004 ' # 64: .EntrySelector=2, .RangeShift=4
- '0032 0004 ' # 68: Glyph=50; Class=4
- '0034 0004 ' # 72: Glyph=52; Class=4
- '0050 0005 ' # 76: Glyph=80; Class=5
- '00C9 0004 ' # 80: Glyph=201; Class=4
- '00CA 0004 ' # 84: Glyph=202; Class=4
- 'FFFF 0000 ' # 88: Glyph=<end>; Value=<filler>
-
+ "0006 0004 " # 56: ClassTable.LookupFormat=6, .UnitSize=4
+ "0005 0010 " # 60: .NUnits=5, .SearchRange=16
+ "0002 0004 " # 64: .EntrySelector=2, .RangeShift=4
+ "0032 0004 " # 68: Glyph=50; Class=4
+ "0034 0004 " # 72: Glyph=52; Class=4
+ "0050 0005 " # 76: Glyph=80; Class=5
+ "00C9 0004 " # 80: Glyph=201; Class=4
+ "00CA 0004 " # 84: Glyph=202; Class=4
+ "FFFF 0000 " # 88: Glyph=<end>; Value=<filler>
# State array.
- '0000 0000 0000 0000 0000 0001 ' # 92: State[0][0..5]
- '0000 0000 0000 0000 0000 0001 ' # 104: State[1][0..5]
- '0000 0000 0000 0000 0002 0001 ' # 116: State[2][0..5]
-
+ "0000 0000 0000 0000 0000 0001 " # 92: State[0][0..5]
+ "0000 0000 0000 0000 0000 0001 " # 104: State[1][0..5]
+ "0000 0000 0000 0000 0002 0001 " # 116: State[2][0..5]
# Entry table.
- '0000 0000 ' # 128: Entries[0].NewState=0, .Flags=0
- 'FFFF FFFF ' # 132: Entries[0].MarkSubst=None, .CurSubst=None
- '0002 0000 ' # 136: Entries[1].NewState=2, .Flags=0
- 'FFFF FFFF ' # 140: Entries[1].MarkSubst=None, .CurSubst=None
- '0000 0000 ' # 144: Entries[2].NewState=0, .Flags=0
- 'FFFF 0000 ' # 148: Entries[2].MarkSubst=None, .CurSubst=PerGlyph #0
- # 152: <no padding needed for 4-byte alignment>
-
+ "0000 0000 " # 128: Entries[0].NewState=0, .Flags=0
+ "FFFF FFFF " # 132: Entries[0].MarkSubst=None, .CurSubst=None
+ "0002 0000 " # 136: Entries[1].NewState=2, .Flags=0
+ "FFFF FFFF " # 140: Entries[1].MarkSubst=None, .CurSubst=None
+ "0000 0000 " # 144: Entries[2].NewState=0, .Flags=0
+ "FFFF 0000 " # 148: Entries[2].MarkSubst=None, .CurSubst=PerGlyph #0
+ # 152: <no padding needed for 4-byte alignment>
# Per-glyph lookup tables.
- '0000 0004 ' # 152: Offset from this point to per-glyph lookup #0.
-
+ "0000 0004 " # 152: Offset from this point to per-glyph lookup #0.
# Per-glyph lookup #0.
- '0006 0004 ' # 156: ClassTable.LookupFormat=6, .UnitSize=4
- '0004 0010 ' # 160: .NUnits=4, .SearchRange=16
- '0002 0000 ' # 164: .EntrySelector=2, .RangeShift=0
- '0032 0258 ' # 168: Glyph=50; ReplacementGlyph=600
- '0034 0259 ' # 172: Glyph=52; ReplacementGlyph=601
- '00C9 025A ' # 176: Glyph=201; ReplacementGlyph=602
- '00CA 0384 ' # 180: Glyph=202; ReplacementGlyph=900
- 'FFFF 0000 ' # 184: Glyph=<end>; Value=<filler>
-
-) # 188: <end>
+ "0006 0004 " # 156: ClassTable.LookupFormat=6, .UnitSize=4
+ "0004 0010 " # 160: .NUnits=4, .SearchRange=16
+ "0002 0000 " # 164: .EntrySelector=2, .RangeShift=0
+ "0032 0258 " # 168: Glyph=50; ReplacementGlyph=600
+ "0034 0259 " # 172: Glyph=52; ReplacementGlyph=601
+ "00C9 025A " # 176: Glyph=201; ReplacementGlyph=602
+ "00CA 0384 " # 180: Glyph=202; ReplacementGlyph=900
+ "FFFF 0000 " # 184: Glyph=<end>; Value=<filler>
+) # 188: <end>
assert len(MORX_CONTEXTUAL_DATA) == 188, len(MORX_CONTEXTUAL_DATA)
MORX_CONTEXTUAL_XML = [
'<Version value="2"/>',
'<Reserved value="0"/>',
- '<!-- MorphChainCount=1 -->',
+ "<!-- MorphChainCount=1 -->",
'<MorphChain index="0">',
' <DefaultFlags value="0x00000001"/>',
- ' <!-- StructLength=180 -->',
- ' <!-- MorphFeatureCount=0 -->',
- ' <!-- MorphSubtableCount=1 -->',
+ " <!-- StructLength=180 -->",
+ " <!-- MorphFeatureCount=0 -->",
+ " <!-- MorphSubtableCount=1 -->",
' <MorphSubtable index="0">',
- ' <!-- StructLength=164 -->',
+ " <!-- StructLength=164 -->",
' <TextDirection value="Vertical"/>',
' <ProcessingOrder value="LayoutOrder"/>',
- ' <!-- MorphType=1 -->',
+ " <!-- MorphType=1 -->",
' <SubFeatureFlags value="0x00000001"/>',
- ' <ContextualMorph>',
- ' <StateTable>',
- ' <!-- GlyphClassCount=6 -->',
+ " <ContextualMorph>",
+ " <StateTable>",
+ " <!-- GlyphClassCount=6 -->",
' <GlyphClass glyph="A" value="4"/>',
' <GlyphClass glyph="B" value="4"/>',
' <GlyphClass glyph="C" value="5"/>',
@@ -353,107 +347,107 @@ MORX_CONTEXTUAL_XML = [
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="2"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
- ' </State>',
+ " </Transition>",
+ " </State>",
' <State index="1">',
' <Transition onGlyphClass="0">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="2"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
- ' </State>',
+ " </Transition>",
+ " </State>",
' <State index="2">',
' <Transition onGlyphClass="0">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="0"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="2"/>',
' <MarkIndex value="65535"/>',
' <CurrentIndex value="65535"/>',
- ' </Transition>',
- ' </State>',
+ " </Transition>",
+ " </State>",
' <PerGlyphLookup index="0">',
' <Lookup glyph="A" value="A.swash"/>',
' <Lookup glyph="B" value="B.swash"/>',
' <Lookup glyph="X" value="X.swash"/>',
' <Lookup glyph="Y" value="Y.swash"/>',
- ' </PerGlyphLookup>',
- ' </StateTable>',
- ' </ContextualMorph>',
- ' </MorphSubtable>',
- '</MorphChain>',
+ " </PerGlyphLookup>",
+ " </StateTable>",
+ " </ContextualMorph>",
+ " </MorphSubtable>",
+ "</MorphChain>",
]
@@ -482,91 +476,84 @@ MORX_CONTEXTUAL_XML = [
#
# TODO: Ask Apple to fix “Example 2” in the ‘morx’ specification.
MORX_LIGATURE_DATA = deHexStr(
- '0002 0000 ' # 0: Version=2, Reserved=0
- '0000 0001 ' # 4: MorphChainCount=1
- '0000 0001 ' # 8: DefaultFlags=1
- '0000 00DA ' # 12: StructLength=218 (+8=226)
- '0000 0000 ' # 16: MorphFeatureCount=0
- '0000 0001 ' # 20: MorphSubtableCount=1
- '0000 00CA ' # 24: Subtable[0].StructLength=202 (+24=226)
- '80 ' # 28: Subtable[0].CoverageFlags=0x80
- '00 00 ' # 29: Subtable[0].Reserved=0
- '02 ' # 31: Subtable[0].MorphType=2/LigatureMorph
- '0000 0001 ' # 32: Subtable[0].SubFeatureFlags=0x1
-
+ "0002 0000 " # 0: Version=2, Reserved=0
+ "0000 0001 " # 4: MorphChainCount=1
+ "0000 0001 " # 8: DefaultFlags=1
+ "0000 00DA " # 12: StructLength=218 (+8=226)
+ "0000 0000 " # 16: MorphFeatureCount=0
+ "0000 0001 " # 20: MorphSubtableCount=1
+ "0000 00CA " # 24: Subtable[0].StructLength=202 (+24=226)
+ "80 " # 28: Subtable[0].CoverageFlags=0x80
+ "00 00 " # 29: Subtable[0].Reserved=0
+ "02 " # 31: Subtable[0].MorphType=2/LigatureMorph
+ "0000 0001 " # 32: Subtable[0].SubFeatureFlags=0x1
# State table header.
- '0000 0007 ' # 36: STXHeader.ClassCount=7
- '0000 001C ' # 40: STXHeader.ClassTableOffset=28 (+36=64)
- '0000 0040 ' # 44: STXHeader.StateArrayOffset=64 (+36=100)
- '0000 0078 ' # 48: STXHeader.EntryTableOffset=120 (+36=156)
- '0000 0090 ' # 52: STXHeader.LigActionsOffset=144 (+36=180)
- '0000 009C ' # 56: STXHeader.LigComponentsOffset=156 (+36=192)
- '0000 00AE ' # 60: STXHeader.LigListOffset=174 (+36=210)
-
+ "0000 0007 " # 36: STXHeader.ClassCount=7
+ "0000 001C " # 40: STXHeader.ClassTableOffset=28 (+36=64)
+ "0000 0040 " # 44: STXHeader.StateArrayOffset=64 (+36=100)
+ "0000 0078 " # 48: STXHeader.EntryTableOffset=120 (+36=156)
+ "0000 0090 " # 52: STXHeader.LigActionsOffset=144 (+36=180)
+ "0000 009C " # 56: STXHeader.LigComponentsOffset=156 (+36=192)
+ "0000 00AE " # 60: STXHeader.LigListOffset=174 (+36=210)
# Glyph class table.
- '0002 0006 ' # 64: ClassTable.LookupFormat=2, .UnitSize=6
- '0003 000C ' # 68: .NUnits=3, .SearchRange=12
- '0001 0006 ' # 72: .EntrySelector=1, .RangeShift=6
- '0016 0014 0004 ' # 76: GlyphID 20..22 [a..c] -> GlyphClass 4
- '0018 0017 0005 ' # 82: GlyphID 23..24 [d..e] -> GlyphClass 5
- '001C 001A 0006 ' # 88: GlyphID 26..28 [g..i] -> GlyphClass 6
- 'FFFF FFFF 0000 ' # 94: <end of lookup>
-
+ "0002 0006 " # 64: ClassTable.LookupFormat=2, .UnitSize=6
+ "0003 000C " # 68: .NUnits=3, .SearchRange=12
+ "0001 0006 " # 72: .EntrySelector=1, .RangeShift=6
+ "0016 0014 0004 " # 76: GlyphID 20..22 [a..c] -> GlyphClass 4
+ "0018 0017 0005 " # 82: GlyphID 23..24 [d..e] -> GlyphClass 5
+ "001C 001A 0006 " # 88: GlyphID 26..28 [g..i] -> GlyphClass 6
+ "FFFF FFFF 0000 " # 94: <end of lookup>
# State array.
- '0000 0000 0000 0000 0001 0000 0000 ' # 100: State[0][0..6]
- '0000 0000 0000 0000 0001 0000 0000 ' # 114: State[1][0..6]
- '0000 0000 0000 0000 0001 0002 0000 ' # 128: State[2][0..6]
- '0000 0000 0000 0000 0001 0002 0003 ' # 142: State[3][0..6]
-
+ "0000 0000 0000 0000 0001 0000 0000 " # 100: State[0][0..6]
+ "0000 0000 0000 0000 0001 0000 0000 " # 114: State[1][0..6]
+ "0000 0000 0000 0000 0001 0002 0000 " # 128: State[2][0..6]
+ "0000 0000 0000 0000 0001 0002 0003 " # 142: State[3][0..6]
# Entry table.
- '0000 0000 ' # 156: Entries[0].NewState=0, .Flags=0
- '0000 ' # 160: Entries[0].ActionIndex=<n/a> because no 0x2000 flag
- '0002 8000 ' # 162: Entries[1].NewState=2, .Flags=0x8000 (SetComponent)
- '0000 ' # 166: Entries[1].ActionIndex=<n/a> because no 0x2000 flag
- '0003 8000 ' # 168: Entries[2].NewState=3, .Flags=0x8000 (SetComponent)
- '0000 ' # 172: Entries[2].ActionIndex=<n/a> because no 0x2000 flag
- '0000 A000 ' # 174: Entries[3].NewState=0, .Flags=0xA000 (SetComponent,Act)
- '0000 ' # 178: Entries[3].ActionIndex=0 (start at Action[0])
-
+ "0000 0000 " # 156: Entries[0].NewState=0, .Flags=0
+ "0000 " # 160: Entries[0].ActionIndex=<n/a> because no 0x2000 flag
+ "0002 8000 " # 162: Entries[1].NewState=2, .Flags=0x8000 (SetComponent)
+ "0000 " # 166: Entries[1].ActionIndex=<n/a> because no 0x2000 flag
+ "0003 8000 " # 168: Entries[2].NewState=3, .Flags=0x8000 (SetComponent)
+ "0000 " # 172: Entries[2].ActionIndex=<n/a> because no 0x2000 flag
+ "0000 A000 " # 174: Entries[3].NewState=0, .Flags=0xA000 (SetComponent,Act)
+ "0000 " # 178: Entries[3].ActionIndex=0 (start at Action[0])
# Ligature actions table.
- '3FFF FFE7 ' # 180: Action[0].Flags=0, .GlyphIndexDelta=-25
- '3FFF FFED ' # 184: Action[1].Flags=0, .GlyphIndexDelta=-19
- 'BFFF FFF2 ' # 188: Action[2].Flags=<end of list>, .GlyphIndexDelta=-14
-
+ "3FFF FFE7 " # 180: Action[0].Flags=0, .GlyphIndexDelta=-25
+ "3FFF FFED " # 184: Action[1].Flags=0, .GlyphIndexDelta=-19
+ "BFFF FFF2 " # 188: Action[2].Flags=<end of list>, .GlyphIndexDelta=-14
# Ligature component table.
- '0000 0001 ' # 192: LigComponent[0]=0, LigComponent[1]=1
- '0002 0003 ' # 196: LigComponent[2]=2, LigComponent[3]=3
- '0000 0004 ' # 200: LigComponent[4]=0, LigComponent[5]=4
- '0000 0008 ' # 204: LigComponent[6]=0, LigComponent[7]=8
- '0010 ' # 208: LigComponent[8]=16
-
+ "0000 0001 " # 192: LigComponent[0]=0, LigComponent[1]=1
+ "0002 0003 " # 196: LigComponent[2]=2, LigComponent[3]=3
+ "0000 0004 " # 200: LigComponent[4]=0, LigComponent[5]=4
+ "0000 0008 " # 204: LigComponent[6]=0, LigComponent[7]=8
+ "0010 " # 208: LigComponent[8]=16
# Ligature list.
- '03E8 03E9 ' # 210: LigList[0]=1000, LigList[1]=1001
- '03EA 03EB ' # 214: LigList[2]=1002, LigList[3]=1003
- '03EC 03ED ' # 218: LigList[4]=1004, LigList[3]=1005
- '03EE 03EF ' # 222: LigList[5]=1006, LigList[6]=1007
-) # 226: <end>
+ "03E8 03E9 " # 210: LigList[0]=1000, LigList[1]=1001
+ "03EA 03EB " # 214: LigList[2]=1002, LigList[3]=1003
+ "03EC 03ED " # 218: LigList[4]=1004, LigList[3]=1005
+ "03EE 03EF " # 222: LigList[5]=1006, LigList[6]=1007
+) # 226: <end>
assert len(MORX_LIGATURE_DATA) == 226, len(MORX_LIGATURE_DATA)
MORX_LIGATURE_XML = [
'<Version value="2"/>',
'<Reserved value="0"/>',
- '<!-- MorphChainCount=1 -->',
+ "<!-- MorphChainCount=1 -->",
'<MorphChain index="0">',
' <DefaultFlags value="0x00000001"/>',
- ' <!-- StructLength=218 -->',
- ' <!-- MorphFeatureCount=0 -->',
- ' <!-- MorphSubtableCount=1 -->',
+ " <!-- StructLength=218 -->",
+ " <!-- MorphFeatureCount=0 -->",
+ " <!-- MorphSubtableCount=1 -->",
' <MorphSubtable index="0">',
- ' <!-- StructLength=202 -->',
+ " <!-- StructLength=202 -->",
' <TextDirection value="Vertical"/>',
' <ProcessingOrder value="LayoutOrder"/>',
- ' <!-- MorphType=2 -->',
+ " <!-- MorphType=2 -->",
' <SubFeatureFlags value="0x00000001"/>',
- ' <LigatureMorph>',
- ' <StateTable>',
- ' <!-- GlyphClassCount=7 -->',
+ " <LigatureMorph>",
+ " <StateTable>",
+ " <!-- GlyphClassCount=7 -->",
' <GlyphClass glyph="a" value="4"/>',
' <GlyphClass glyph="b" value="4"/>',
' <GlyphClass glyph="c" value="4"/>',
@@ -578,106 +565,106 @@ MORX_LIGATURE_XML = [
' <State index="0">',
' <Transition onGlyphClass="0">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="2"/>',
' <Flags value="SetComponent"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="6">',
' <NewState value="0"/>',
- ' </Transition>',
- ' </State>',
+ " </Transition>",
+ " </State>",
' <State index="1">',
' <Transition onGlyphClass="0">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="2"/>',
' <Flags value="SetComponent"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="6">',
' <NewState value="0"/>',
- ' </Transition>',
- ' </State>',
+ " </Transition>",
+ " </State>",
' <State index="2">',
' <Transition onGlyphClass="0">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="2"/>',
' <Flags value="SetComponent"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="3"/>',
' <Flags value="SetComponent"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="6">',
' <NewState value="0"/>',
- ' </Transition>',
- ' </State>',
+ " </Transition>",
+ " </State>",
' <State index="3">',
' <Transition onGlyphClass="0">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="2"/>',
' <Flags value="SetComponent"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="3"/>',
' <Flags value="SetComponent"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="6">',
' <NewState value="0"/>',
' <Flags value="SetComponent"/>',
' <Action GlyphIndexDelta="-25"/>',
' <Action GlyphIndexDelta="-19"/>',
' <Action GlyphIndexDelta="-14"/>',
- ' </Transition>',
- ' </State>',
- ' <LigComponents>',
+ " </Transition>",
+ " </State>",
+ " <LigComponents>",
' <LigComponent index="0" value="0"/>',
' <LigComponent index="1" value="1"/>',
' <LigComponent index="2" value="2"/>',
@@ -687,8 +674,8 @@ MORX_LIGATURE_XML = [
' <LigComponent index="6" value="0"/>',
' <LigComponent index="7" value="8"/>',
' <LigComponent index="8" value="16"/>',
- ' </LigComponents>',
- ' <Ligatures>',
+ " </LigComponents>",
+ " <Ligatures>",
' <Ligature glyph="adf" index="0"/>',
' <Ligature glyph="adg" index="1"/>',
' <Ligature glyph="adh" index="2"/>',
@@ -697,84 +684,84 @@ MORX_LIGATURE_XML = [
' <Ligature glyph="aeg" index="5"/>',
' <Ligature glyph="aeh" index="6"/>',
' <Ligature glyph="aei" index="7"/>',
- ' </Ligatures>',
- ' </StateTable>',
- ' </LigatureMorph>',
- ' </MorphSubtable>',
- '</MorphChain>',
+ " </Ligatures>",
+ " </StateTable>",
+ " </LigatureMorph>",
+ " </MorphSubtable>",
+ "</MorphChain>",
]
# Taken from the `morx` table of the second font in DevanagariSangamMN.ttc
# on macOS X 10.12.6; manually pruned to just contain the insertion lookup.
MORX_INSERTION_DATA = deHexStr(
- '0002 0000 ' # 0: Version=2, Reserved=0
- '0000 0001 ' # 4: MorphChainCount=1
- '0000 0001 ' # 8: DefaultFlags=1
- '0000 00A4 ' # 12: StructLength=164 (+8=172)
- '0000 0000 ' # 16: MorphFeatureCount=0
- '0000 0001 ' # 20: MorphSubtableCount=1
- '0000 0094 ' # 24: Subtable[0].StructLength=148 (+24=172)
- '00 ' # 28: Subtable[0].CoverageFlags=0x00
- '00 00 ' # 29: Subtable[0].Reserved=0
- '05 ' # 31: Subtable[0].MorphType=5/InsertionMorph
- '0000 0001 ' # 32: Subtable[0].SubFeatureFlags=0x1
- '0000 0006 ' # 36: STXHeader.ClassCount=6
- '0000 0014 ' # 40: STXHeader.ClassTableOffset=20 (+36=56)
- '0000 004A ' # 44: STXHeader.StateArrayOffset=74 (+36=110)
- '0000 006E ' # 48: STXHeader.EntryTableOffset=110 (+36=146)
- '0000 0086 ' # 52: STXHeader.InsertionActionOffset=134 (+36=170)
- # Glyph class table.
- '0002 0006 ' # 56: ClassTable.LookupFormat=2, .UnitSize=6
- '0006 0018 ' # 60: .NUnits=6, .SearchRange=24
- '0002 000C ' # 64: .EntrySelector=2, .RangeShift=12
- '00AC 00AC 0005 ' # 68: GlyphID 172..172 -> GlyphClass 5
- '01EB 01E6 0005 ' # 74: GlyphID 486..491 -> GlyphClass 5
- '01F0 01F0 0004 ' # 80: GlyphID 496..496 -> GlyphClass 4
- '01F8 01F6 0004 ' # 88: GlyphID 502..504 -> GlyphClass 4
- '01FC 01FA 0004 ' # 92: GlyphID 506..508 -> GlyphClass 4
- '0250 0250 0005 ' # 98: GlyphID 592..592 -> GlyphClass 5
- 'FFFF FFFF 0000 ' # 104: <end of lookup>
+ "0002 0000 " # 0: Version=2, Reserved=0
+ "0000 0001 " # 4: MorphChainCount=1
+ "0000 0001 " # 8: DefaultFlags=1
+ "0000 00A4 " # 12: StructLength=164 (+8=172)
+ "0000 0000 " # 16: MorphFeatureCount=0
+ "0000 0001 " # 20: MorphSubtableCount=1
+ "0000 0094 " # 24: Subtable[0].StructLength=148 (+24=172)
+ "00 " # 28: Subtable[0].CoverageFlags=0x00
+ "00 00 " # 29: Subtable[0].Reserved=0
+ "05 " # 31: Subtable[0].MorphType=5/InsertionMorph
+ "0000 0001 " # 32: Subtable[0].SubFeatureFlags=0x1
+ "0000 0006 " # 36: STXHeader.ClassCount=6
+ "0000 0014 " # 40: STXHeader.ClassTableOffset=20 (+36=56)
+ "0000 004A " # 44: STXHeader.StateArrayOffset=74 (+36=110)
+ "0000 006E " # 48: STXHeader.EntryTableOffset=110 (+36=146)
+ "0000 0086 " # 52: STXHeader.InsertionActionOffset=134 (+36=170)
+ # Glyph class table.
+ "0002 0006 " # 56: ClassTable.LookupFormat=2, .UnitSize=6
+ "0006 0018 " # 60: .NUnits=6, .SearchRange=24
+ "0002 000C " # 64: .EntrySelector=2, .RangeShift=12
+ "00AC 00AC 0005 " # 68: GlyphID 172..172 -> GlyphClass 5
+ "01EB 01E6 0005 " # 74: GlyphID 486..491 -> GlyphClass 5
+ "01F0 01F0 0004 " # 80: GlyphID 496..496 -> GlyphClass 4
+ "01F8 01F6 0004 " # 88: GlyphID 502..504 -> GlyphClass 4
+ "01FC 01FA 0004 " # 92: GlyphID 506..508 -> GlyphClass 4
+ "0250 0250 0005 " # 98: GlyphID 592..592 -> GlyphClass 5
+ "FFFF FFFF 0000 " # 104: <end of lookup>
# State array.
- '0000 0000 0000 0000 0001 0000 ' # 110: State[0][0..5]
- '0000 0000 0000 0000 0001 0000 ' # 122: State[1][0..5]
- '0000 0000 0001 0000 0001 0002 ' # 134: State[2][0..5]
+ "0000 0000 0000 0000 0001 0000 " # 110: State[0][0..5]
+ "0000 0000 0000 0000 0001 0000 " # 122: State[1][0..5]
+ "0000 0000 0001 0000 0001 0002 " # 134: State[2][0..5]
# Entry table.
- '0000 0000 ' # 146: Entries[0].NewState=0, .Flags=0
- 'FFFF ' # 150: Entries[0].CurrentInsertIndex=<None>
- 'FFFF ' # 152: Entries[0].MarkedInsertIndex=<None>
- '0002 0000 ' # 154: Entries[1].NewState=0, .Flags=0
- 'FFFF ' # 158: Entries[1].CurrentInsertIndex=<None>
- 'FFFF ' # 160: Entries[1].MarkedInsertIndex=<None>
- '0000 ' # 162: Entries[2].NewState=0
- '2820 ' # 164: .Flags=CurrentIsKashidaLike,CurrentInsertBefore
- # .CurrentInsertCount=1, .MarkedInsertCount=0
- '0000 ' # 166: Entries[1].CurrentInsertIndex=0
- 'FFFF ' # 168: Entries[1].MarkedInsertIndex=<None>
+ "0000 0000 " # 146: Entries[0].NewState=0, .Flags=0
+ "FFFF " # 150: Entries[0].CurrentInsertIndex=<None>
+ "FFFF " # 152: Entries[0].MarkedInsertIndex=<None>
+ "0002 0000 " # 154: Entries[1].NewState=0, .Flags=0
+ "FFFF " # 158: Entries[1].CurrentInsertIndex=<None>
+ "FFFF " # 160: Entries[1].MarkedInsertIndex=<None>
+ "0000 " # 162: Entries[2].NewState=0
+ "2820 " # 164: .Flags=CurrentIsKashidaLike,CurrentInsertBefore
+ # .CurrentInsertCount=1, .MarkedInsertCount=0
+ "0000 " # 166: Entries[1].CurrentInsertIndex=0
+ "FFFF " # 168: Entries[1].MarkedInsertIndex=<None>
# Insertion action table.
- '022F' # 170: InsertionActionTable[0]=GlyphID 559
-) # 172: <end>
+ "022F" # 170: InsertionActionTable[0]=GlyphID 559
+) # 172: <end>
assert len(MORX_INSERTION_DATA) == 172, len(MORX_INSERTION_DATA)
MORX_INSERTION_XML = [
'<Version value="2"/>',
'<Reserved value="0"/>',
- '<!-- MorphChainCount=1 -->',
+ "<!-- MorphChainCount=1 -->",
'<MorphChain index="0">',
' <DefaultFlags value="0x00000001"/>',
- ' <!-- StructLength=164 -->',
- ' <!-- MorphFeatureCount=0 -->',
- ' <!-- MorphSubtableCount=1 -->',
+ " <!-- StructLength=164 -->",
+ " <!-- MorphFeatureCount=0 -->",
+ " <!-- MorphSubtableCount=1 -->",
' <MorphSubtable index="0">',
- ' <!-- StructLength=148 -->',
+ " <!-- StructLength=148 -->",
' <TextDirection value="Horizontal"/>',
' <ProcessingOrder value="LayoutOrder"/>',
- ' <!-- MorphType=5 -->',
+ " <!-- MorphType=5 -->",
' <SubFeatureFlags value="0x00000001"/>',
- ' <InsertionMorph>',
- ' <StateTable>',
- ' <!-- GlyphClassCount=6 -->',
+ " <InsertionMorph>",
+ " <StateTable>",
+ " <!-- GlyphClassCount=6 -->",
' <GlyphClass glyph="g.172" value="5"/>',
' <GlyphClass glyph="g.486" value="5"/>',
' <GlyphClass glyph="g.487" value="5"/>',
@@ -793,211 +780,205 @@ MORX_INSERTION_XML = [
' <State index="0">',
' <Transition onGlyphClass="0">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="2"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="0"/>',
- ' </Transition>',
- ' </State>',
+ " </Transition>",
+ " </State>",
' <State index="1">',
' <Transition onGlyphClass="0">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="2"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="0"/>',
- ' </Transition>',
- ' </State>',
+ " </Transition>",
+ " </State>",
' <State index="2">',
' <Transition onGlyphClass="0">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="1">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="2">',
' <NewState value="2"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="3">',
' <NewState value="0"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="4">',
' <NewState value="2"/>',
- ' </Transition>',
+ " </Transition>",
' <Transition onGlyphClass="5">',
' <NewState value="0"/>',
' <Flags value="CurrentIsKashidaLike,CurrentInsertBefore"/>',
' <CurrentInsertionAction glyph="g.559"/>',
- ' </Transition>',
- ' </State>',
- ' </StateTable>',
- ' </InsertionMorph>',
- ' </MorphSubtable>',
- '</MorphChain>',
+ " </Transition>",
+ " </State>",
+ " </StateTable>",
+ " </InsertionMorph>",
+ " </MorphSubtable>",
+ "</MorphChain>",
]
class MORXNoncontextualGlyphSubstitutionTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- glyphs = ['.notdef'] + ['g.%d' % i for i in range (1, 140)]
- glyphs[11], glyphs[13] = 'parenleft', 'parenright'
- glyphs[135], glyphs[136] = 'parenleft.vertical', 'parenright.vertical'
+ glyphs = [".notdef"] + ["g.%d" % i for i in range(1, 140)]
+ glyphs[11], glyphs[13] = "parenleft", "parenright"
+ glyphs[135], glyphs[136] = "parenleft.vertical", "parenright.vertical"
cls.font = FakeFont(glyphs)
def test_decompile_toXML(self):
- table = newTable('morx')
+ table = newTable("morx")
table.decompile(MORX_NONCONTEXTUAL_DATA, self.font)
self.assertEqual(getXML(table.toXML), MORX_NONCONTEXTUAL_XML)
def test_compile_fromXML(self):
- table = newTable('morx')
+ table = newTable("morx")
for name, attrs, content in parseXML(MORX_NONCONTEXTUAL_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(MORX_NONCONTEXTUAL_DATA))
+ self.assertEqual(
+ hexStr(table.compile(self.font)), hexStr(MORX_NONCONTEXTUAL_DATA)
+ )
class MORXRearrangementTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- cls.font = FakeFont(['.nodef', 'A', 'B', 'C'])
+ cls.font = FakeFont([".nodef", "A", "B", "C"])
def test_decompile_toXML(self):
- table = newTable('morx')
+ table = newTable("morx")
table.decompile(MORX_REARRANGEMENT_DATA, self.font)
self.assertEqual(getXML(table.toXML), MORX_REARRANGEMENT_XML)
def test_compile_fromXML(self):
- table = newTable('morx')
+ table = newTable("morx")
for name, attrs, content in parseXML(MORX_REARRANGEMENT_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(MORX_REARRANGEMENT_DATA))
+ self.assertEqual(
+ hexStr(table.compile(self.font)), hexStr(MORX_REARRANGEMENT_DATA)
+ )
class MORXContextualSubstitutionTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- g = ['.notdef'] + ['g.%d' % i for i in range (1, 910)]
- g[80] = 'C'
- g[50], g[52], g[201], g[202] = 'A', 'B', 'X', 'Y'
- g[600], g[601], g[602], g[900] = (
- 'A.swash', 'B.swash', 'X.swash', 'Y.swash')
+ g = [".notdef"] + ["g.%d" % i for i in range(1, 910)]
+ g[80] = "C"
+ g[50], g[52], g[201], g[202] = "A", "B", "X", "Y"
+ g[600], g[601], g[602], g[900] = ("A.swash", "B.swash", "X.swash", "Y.swash")
cls.font = FakeFont(g)
def test_decompile_toXML(self):
- table = newTable('morx')
+ table = newTable("morx")
table.decompile(MORX_CONTEXTUAL_DATA, self.font)
self.assertEqual(getXML(table.toXML), MORX_CONTEXTUAL_XML)
def test_compile_fromXML(self):
- table = newTable('morx')
+ table = newTable("morx")
for name, attrs, content in parseXML(MORX_CONTEXTUAL_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(MORX_CONTEXTUAL_DATA))
+ self.assertEqual(hexStr(table.compile(self.font)), hexStr(MORX_CONTEXTUAL_DATA))
class MORXLigatureSubstitutionTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- g = ['.notdef'] + ['g.%d' % i for i in range (1, 1515)]
- g[20:29] = 'a b c d e f g h i'.split()
- g[1000:1008] = 'adf adg adh adi aef aeg aeh aei'.split()
- g[1008:1016] = 'bdf bdg bdh bdi bef beg beh bei'.split()
- g[1500:1507] = 'cdf cdg cdh cdi cef ceg ceh'.split()
- g[1511] = 'cei'
+ g = [".notdef"] + ["g.%d" % i for i in range(1, 1515)]
+ g[20:29] = "a b c d e f g h i".split()
+ g[1000:1008] = "adf adg adh adi aef aeg aeh aei".split()
+ g[1008:1016] = "bdf bdg bdh bdi bef beg beh bei".split()
+ g[1500:1507] = "cdf cdg cdh cdi cef ceg ceh".split()
+ g[1511] = "cei"
cls.font = FakeFont(g)
def test_decompile_toXML(self):
- table = newTable('morx')
+ table = newTable("morx")
table.decompile(MORX_LIGATURE_DATA, self.font)
self.assertEqual(getXML(table.toXML), MORX_LIGATURE_XML)
def test_compile_fromXML(self):
- table = newTable('morx')
+ table = newTable("morx")
for name, attrs, content in parseXML(MORX_LIGATURE_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(MORX_LIGATURE_DATA))
+ self.assertEqual(hexStr(table.compile(self.font)), hexStr(MORX_LIGATURE_DATA))
class MORXGlyphInsertionTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- cls.font = FakeFont(['.notdef'] + ['g.%d' % i for i in range (1, 910)])
+ cls.font = FakeFont([".notdef"] + ["g.%d" % i for i in range(1, 910)])
def test_decompile_toXML(self):
- table = newTable('morx')
+ table = newTable("morx")
table.decompile(MORX_INSERTION_DATA, self.font)
self.assertEqual(getXML(table.toXML), MORX_INSERTION_XML)
def test_compile_fromXML(self):
- table = newTable('morx')
+ table = newTable("morx")
for name, attrs, content in parseXML(MORX_INSERTION_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(MORX_INSERTION_DATA))
+ self.assertEqual(hexStr(table.compile(self.font)), hexStr(MORX_INSERTION_DATA))
class MORXCoverageFlagsTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- cls.font = FakeFont(['.notdef', 'A', 'B', 'C'])
-
- def checkFlags(self, flags, textDirection, processingOrder,
- checkCompile=True):
- data = bytesjoin([
- MORX_REARRANGEMENT_DATA[:28],
- bytechr(flags << 4),
- MORX_REARRANGEMENT_DATA[29:]])
+ cls.font = FakeFont([".notdef", "A", "B", "C"])
+
+ def checkFlags(self, flags, textDirection, processingOrder, checkCompile=True):
+ data = bytesjoin(
+ [
+ MORX_REARRANGEMENT_DATA[:28],
+ bytechr(flags << 4),
+ MORX_REARRANGEMENT_DATA[29:],
+ ]
+ )
xml = []
for line in MORX_REARRANGEMENT_XML:
- if line.startswith(' <TextDirection '):
+ if line.startswith(" <TextDirection "):
line = ' <TextDirection value="%s"/>' % textDirection
- elif line.startswith(' <ProcessingOrder '):
+ elif line.startswith(" <ProcessingOrder "):
line = ' <ProcessingOrder value="%s"/>' % processingOrder
xml.append(line)
- table1 = newTable('morx')
+ table1 = newTable("morx")
table1.decompile(data, self.font)
self.assertEqual(getXML(table1.toXML), xml)
if checkCompile:
- table2 = newTable('morx')
+ table2 = newTable("morx")
for name, attrs, content in parseXML(xml):
table2.fromXML(name, attrs, content, font=self.font)
self.assertEqual(hexStr(table2.compile(self.font)), hexStr(data))
@@ -1034,17 +1015,22 @@ class MORXCoverageFlagsTest(unittest.TestCase):
# Note that the lower 4 bits of the first byte are already
# part of the Reserved value. We test the full round-trip
# to encoding and decoding is quite hairy.
- data = bytesjoin([
- MORX_REARRANGEMENT_DATA[:28],
- bytechr(0x8A), bytechr(0xBC), bytechr(0xDE),
- MORX_REARRANGEMENT_DATA[31:]])
- table = newTable('morx')
+ data = bytesjoin(
+ [
+ MORX_REARRANGEMENT_DATA[:28],
+ bytechr(0x8A),
+ bytechr(0xBC),
+ bytechr(0xDE),
+ MORX_REARRANGEMENT_DATA[31:],
+ ]
+ )
+ table = newTable("morx")
table.decompile(data, self.font)
subtable = table.table.MorphChain[0].MorphSubtable[0]
self.assertEqual(subtable.Reserved, 0xABCDE)
xml = getXML(table.toXML)
self.assertIn(' <Reserved value="0xabcde"/>', xml)
- table2 = newTable('morx')
+ table2 = newTable("morx")
for name, attrs, content in parseXML(xml):
table2.fromXML(name, attrs, content, font=self.font)
self.assertEqual(hexStr(table2.compile(self.font)[28:31]), "8abcde")
@@ -1059,16 +1045,17 @@ class UnsupportedMorxLookupTest(unittest.TestCase):
self.assertRaisesRegex = self.assertRaisesRegexp
def test_unsupportedLookupType(self):
- data = bytesjoin([
- MORX_NONCONTEXTUAL_DATA[:67],
- bytechr(66),
- MORX_NONCONTEXTUAL_DATA[69:]])
- with self.assertRaisesRegex(AssertionError,
- r"unsupported 'morx' lookup type 66"):
- morx = newTable('morx')
- morx.decompile(data, FakeFont(['.notdef']))
+ data = bytesjoin(
+ [MORX_NONCONTEXTUAL_DATA[:67], bytechr(66), MORX_NONCONTEXTUAL_DATA[69:]]
+ )
+ with self.assertRaisesRegex(
+ AssertionError, r"unsupported 'morx' lookup type 66"
+ ):
+ morx = newTable("morx")
+ morx.decompile(data, FakeFont([".notdef"]))
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_n_a_m_e_test.py b/Tests/ttLib/tables/_n_a_m_e_test.py
index 5e8a0c2b..6b3a0a70 100644
--- a/Tests/ttLib/tables/_n_a_m_e_test.py
+++ b/Tests/ttLib/tables/_n_a_m_e_test.py
@@ -8,556 +8,624 @@ import struct
import unittest
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables._n_a_m_e import (
- table__n_a_m_e, NameRecord, nameRecordFormat, nameRecordSize, makeName, log)
+ table__n_a_m_e,
+ NameRecord,
+ nameRecordFormat,
+ nameRecordSize,
+ makeName,
+ log,
+)
def names(nameTable):
- result = [(n.nameID, n.platformID, n.platEncID, n.langID, n.string)
- for n in nameTable.names]
- result.sort()
- return result
+ result = [
+ (n.nameID, n.platformID, n.platEncID, n.langID, n.string)
+ for n in nameTable.names
+ ]
+ result.sort()
+ return result
class NameTableTest(unittest.TestCase):
-
- def test_getDebugName(self):
- table = table__n_a_m_e()
- table.names = [
- makeName("Bold", 258, 1, 0, 0), # Mac, MacRoman, English
- makeName("Gras", 258, 1, 0, 1), # Mac, MacRoman, French
- makeName("Fett", 258, 1, 0, 2), # Mac, MacRoman, German
- makeName("Sem Fracções", 292, 1, 0, 8) # Mac, MacRoman, Portuguese
- ]
- self.assertEqual("Bold", table.getDebugName(258))
- self.assertEqual("Sem Fracções", table.getDebugName(292))
- self.assertEqual(None, table.getDebugName(999))
-
- def test_setName(self):
- table = table__n_a_m_e()
- table.setName("Regular", 2, 1, 0, 0)
- table.setName("Version 1.000", 5, 3, 1, 0x409)
- table.setName("寬鬆", 276, 1, 2, 0x13)
- self.assertEqual("Regular", table.getName(2, 1, 0, 0).toUnicode())
- self.assertEqual("Version 1.000", table.getName(5, 3, 1, 0x409).toUnicode())
- self.assertEqual("寬鬆", table.getName(276, 1, 2, 0x13).toUnicode())
- self.assertTrue(len(table.names) == 3)
- table.setName("緊縮", 276, 1, 2, 0x13)
- self.assertEqual("緊縮", table.getName(276, 1, 2, 0x13).toUnicode())
- self.assertTrue(len(table.names) == 3)
- # passing bytes issues a warning
- with CapturingLogHandler(log, "WARNING") as captor:
- table.setName(b"abc", 0, 1, 0, 0)
- self.assertTrue(
- len([r for r in captor.records if "string is bytes" in r.msg]) == 1)
- # anything other than unicode or bytes raises an error
- with self.assertRaises(TypeError):
- table.setName(1.000, 5, 1, 0, 0)
-
- def test_names_sort_bytes_str(self):
- # Corner case: If a user appends a name record directly to `names`, the
- # `__lt__` method on NameRecord may run into duplicate name records where
- # one `string` is a str and the other one bytes, leading to an exception.
- table = table__n_a_m_e()
- table.names = [
- makeName("Test", 25, 3, 1, 0x409),
- makeName("Test".encode("utf-16be"), 25, 3, 1, 0x409),
- ]
- table.compile(None)
-
- def test_names_sort_bytes_str_encoding_error(self):
- table = table__n_a_m_e()
- table.names = [
- makeName("Test寬", 25, 1, 0, 0),
- makeName("Test鬆鬆", 25, 1, 0, 0),
- ]
- with self.assertRaises(TypeError):
- table.names.sort()
-
- def test_addName(self):
- table = table__n_a_m_e()
- nameIDs = []
- for string in ("Width", "Weight", "Custom"):
- nameIDs.append(table.addName(string))
-
- self.assertEqual(nameIDs[0], 256)
- self.assertEqual(nameIDs[1], 257)
- self.assertEqual(nameIDs[2], 258)
- self.assertEqual(len(table.names), 6)
- self.assertEqual(table.names[0].string, "Width")
- self.assertEqual(table.names[1].string, "Width")
- self.assertEqual(table.names[2].string, "Weight")
- self.assertEqual(table.names[3].string, "Weight")
- self.assertEqual(table.names[4].string, "Custom")
- self.assertEqual(table.names[5].string, "Custom")
-
- with self.assertRaises(ValueError):
- table.addName('Invalid nameID', minNameID=32767)
- with self.assertRaises(TypeError):
- table.addName(b"abc") # must be unicode string
-
- def test_removeNames(self):
- table = table__n_a_m_e()
- table.setName("Regular", 2, 1, 0, 0)
- table.setName("Regular", 2, 3, 1, 0x409)
- table.removeNames(nameID=2)
- self.assertEqual(table.names, [])
-
- table = table__n_a_m_e()
- table.setName("FamilyName", 1, 1, 0, 0)
- table.setName("Regular", 2, 1, 0, 0)
- table.setName("FamilyName", 1, 3, 1, 0x409)
- table.setName("Regular", 2, 3, 1, 0x409)
- table.removeNames(platformID=1)
- self.assertEqual(len(table.names), 2)
- self.assertIsNone(table.getName(1, 1, 0, 0))
- self.assertIsNone(table.getName(2, 1, 0, 0))
- rec1 = table.getName(1, 3, 1, 0x409)
- self.assertEqual(str(rec1), "FamilyName")
- rec2 = table.getName(2, 3, 1, 0x409)
- self.assertEqual(str(rec2), "Regular")
-
- table = table__n_a_m_e()
- table.setName("FamilyName", 1, 1, 0, 0)
- table.setName("Regular", 2, 1, 0, 0)
- table.removeNames(nameID=1)
- self.assertEqual(len(table.names), 1)
- self.assertIsNone(table.getName(1, 1, 0, 0))
- rec = table.getName(2, 1, 0, 0)
- self.assertEqual(str(rec), "Regular")
-
- table = table__n_a_m_e()
- table.setName("FamilyName", 1, 1, 0, 0)
- table.setName("Regular", 2, 1, 0, 0)
- table.removeNames(2, 1, 0, 0)
- self.assertEqual(len(table.names), 1)
- self.assertIsNone(table.getName(2, 1, 0, 0))
- rec = table.getName(1, 1, 0, 0)
- self.assertEqual(str(rec), "FamilyName")
-
- table = table__n_a_m_e()
- table.setName("FamilyName", 1, 1, 0, 0)
- table.setName("Regular", 2, 1, 0, 0)
- table.removeNames()
- self.assertEqual(len(table.names), 2)
- rec1 = table.getName(1, 1, 0, 0)
- self.assertEqual(str(rec1), "FamilyName")
- rec2 = table.getName(2, 1, 0, 0)
- self.assertEqual(str(rec2), "Regular")
-
- @staticmethod
- def _get_test_names():
- names = {
- "en": "Width",
- "de-CH": "Breite",
- "gsw-LI": "Bräiti",
- }
- namesSubSet = names.copy()
- del namesSubSet["gsw-LI"]
- namesSuperSet = names.copy()
- namesSuperSet["nl"] = "Breedte"
- return names, namesSubSet, namesSuperSet
-
- def test_findMultilingualName(self):
- table = table__n_a_m_e()
- names, namesSubSet, namesSuperSet = self._get_test_names()
- nameID = table.addMultilingualName(names)
- assert nameID is not None
- self.assertEqual(nameID, table.findMultilingualName(names))
- self.assertEqual(nameID, table.findMultilingualName(namesSubSet))
- self.assertEqual(None, table.findMultilingualName(namesSuperSet))
-
- def test_findMultilingualName_compiled(self):
- table = table__n_a_m_e()
- names, namesSubSet, namesSuperSet = self._get_test_names()
- nameID = table.addMultilingualName(names)
- assert nameID is not None
- # After compile/decompile, name.string is a bytes sequence, which
- # findMultilingualName() should also handle
- data = table.compile(None)
- table = table__n_a_m_e()
- table.decompile(data, None)
- self.assertEqual(nameID, table.findMultilingualName(names))
- self.assertEqual(nameID, table.findMultilingualName(namesSubSet))
- self.assertEqual(None, table.findMultilingualName(namesSuperSet))
-
- def test_addMultilingualNameReuse(self):
- table = table__n_a_m_e()
- names, namesSubSet, namesSuperSet = self._get_test_names()
- nameID = table.addMultilingualName(names)
- assert nameID is not None
- self.assertEqual(nameID, table.addMultilingualName(names))
- self.assertEqual(nameID, table.addMultilingualName(namesSubSet))
- self.assertNotEqual(None, table.addMultilingualName(namesSuperSet))
-
- def test_findMultilingualNameNoMac(self):
- table = table__n_a_m_e()
- names, namesSubSet, namesSuperSet = self._get_test_names()
- nameID = table.addMultilingualName(names, mac=False)
- assert nameID is not None
- self.assertEqual(nameID, table.findMultilingualName(names, mac=False))
- self.assertEqual(None, table.findMultilingualName(names))
- self.assertEqual(nameID, table.findMultilingualName(namesSubSet, mac=False))
- self.assertEqual(None, table.findMultilingualName(namesSubSet))
- self.assertEqual(None, table.findMultilingualName(namesSuperSet))
-
- def test_addMultilingualName(self):
- # Microsoft Windows has language codes for “English” (en)
- # and for “Standard German as used in Switzerland” (de-CH).
- # In this case, we expect that the implementation just
- # encodes the name for the Windows platform; Apple platforms
- # have been able to decode Windows names since the early days
- # of OSX (~2001). However, Windows has no language code for
- # “Swiss German as used in Liechtenstein” (gsw-LI), so we
- # expect that the implementation populates the 'ltag' table
- # to represent that particular, rather exotic BCP47 code.
- font = FakeFont(glyphs=[".notdef", "A"])
- nameTable = font.tables['name'] = newTable("name")
- with CapturingLogHandler(log, "WARNING") as captor:
- widthID = nameTable.addMultilingualName({
- "en": "Width",
- "de-CH": "Breite",
- "gsw-LI": "Bräiti",
- }, ttFont=font, mac=False)
- self.assertEqual(widthID, 256)
- xHeightID = nameTable.addMultilingualName({
- "en": "X-Height",
- "gsw-LI": "X-Hööchi"
- }, ttFont=font, mac=False)
- self.assertEqual(xHeightID, 257)
- captor.assertRegex("cannot add Windows name in language gsw-LI")
- self.assertEqual(names(nameTable), [
- (256, 0, 4, 0, "Bräiti"),
- (256, 3, 1, 0x0409, "Width"),
- (256, 3, 1, 0x0807, "Breite"),
- (257, 0, 4, 0, "X-Hööchi"),
- (257, 3, 1, 0x0409, "X-Height"),
- ])
- self.assertEqual(set(font.tables.keys()), {"ltag", "name"})
- self.assertEqual(font["ltag"].tags, ["gsw-LI"])
-
- def test_addMultilingualName_legacyMacEncoding(self):
- # Windows has no language code for Latin; MacOS has a code;
- # and we actually can convert the name to the legacy MacRoman
- # encoding. In this case, we expect that the name gets encoded
- # as Macintosh name (platformID 1) with the corresponding Mac
- # language code (133); the 'ltag' table should not be used.
- font = FakeFont(glyphs=[".notdef", "A"])
- nameTable = font.tables['name'] = newTable("name")
- with CapturingLogHandler(log, "WARNING") as captor:
- nameTable.addMultilingualName({"la": "SPQR"},
- ttFont=font)
- captor.assertRegex("cannot add Windows name in language la")
- self.assertEqual(names(nameTable), [(256, 1, 0, 131, "SPQR")])
- self.assertNotIn("ltag", font.tables.keys())
-
- def test_addMultilingualName_legacyMacEncodingButUnencodableName(self):
- # Windows has no language code for Latin; MacOS has a code;
- # but we cannot encode the name into this encoding because
- # it contains characters that are not representable.
- # In this case, we expect that the name gets encoded as
- # Unicode name (platformID 0) with the language tag being
- # added to the 'ltag' table.
- font = FakeFont(glyphs=[".notdef", "A"])
- nameTable = font.tables['name'] = newTable("name")
- with CapturingLogHandler(log, "WARNING") as captor:
- nameTable.addMultilingualName({"la": "ⱾƤℚⱤ"},
- ttFont=font)
- captor.assertRegex("cannot add Windows name in language la")
- self.assertEqual(names(nameTable), [(256, 0, 4, 0, "ⱾƤℚⱤ")])
- self.assertIn("ltag", font.tables)
- self.assertEqual(font["ltag"].tags, ["la"])
-
- def test_addMultilingualName_legacyMacEncodingButNoCodec(self):
- # Windows has no language code for “Azeri written in the
- # Arabic script” (az-Arab); MacOS would have a code (50);
- # but we cannot encode the name into the legacy encoding
- # because we have no codec for MacArabic in fonttools.
- # In this case, we expect that the name gets encoded as
- # Unicode name (platformID 0) with the language tag being
- # added to the 'ltag' table.
- font = FakeFont(glyphs=[".notdef", "A"])
- nameTable = font.tables['name'] = newTable("name")
- with CapturingLogHandler(log, "WARNING") as captor:
- nameTable.addMultilingualName({"az-Arab": "آذربايجان ديلی"},
- ttFont=font)
- captor.assertRegex("cannot add Windows name in language az-Arab")
- self.assertEqual(names(nameTable), [(256, 0, 4, 0, "آذربايجان ديلی")])
- self.assertIn("ltag", font.tables)
- self.assertEqual(font["ltag"].tags, ["az-Arab"])
-
- def test_addMultilingualName_noTTFont(self):
- # If the ttFont argument is not passed, the implementation
- # should add whatever names it can, but it should not crash
- # just because it cannot build an ltag table.
- nameTable = newTable("name")
- with CapturingLogHandler(log, "WARNING") as captor:
- nameTable.addMultilingualName({"en": "A", "la": "ⱾƤℚⱤ"})
- captor.assertRegex("cannot store language la into 'ltag' table")
-
- def test_addMultilingualName_minNameID(self):
- table = table__n_a_m_e()
- names, namesSubSet, namesSuperSet = self._get_test_names()
- nameID = table.addMultilingualName(names, nameID=2)
- self.assertEqual(nameID, 2)
- nameID = table.addMultilingualName(names)
- self.assertEqual(nameID, 2)
- nameID = table.addMultilingualName(names, minNameID=256)
- self.assertGreaterEqual(nameID, 256)
- self.assertEqual(nameID, table.findMultilingualName(names, minNameID=256))
-
- def test_addMultilingualName_name_inconsistencies(self):
- # Check what happens, when there are
- # inconsistencies in the name table
- table = table__n_a_m_e()
- table.setName('Weight', 270, 3, 1, 0x409)
- names = {'en': 'Weight', }
- nameID = table.addMultilingualName(names, minNameID=256)
- # Because there is an inconsistency in the names,
- # addMultilingualName adds a new name ID
- self.assertEqual(271, nameID)
-
- def test_decompile_badOffset(self):
- # https://github.com/fonttools/fonttools/issues/525
- table = table__n_a_m_e()
- badRecord = {
- "platformID": 1,
- "platEncID": 3,
- "langID": 7,
- "nameID": 1,
- "length": 3,
- "offset": 8765 # out of range
- }
- data = bytesjoin([
- struct.pack(tostr(">HHH"), 1, 1, 6 + nameRecordSize),
- sstruct.pack(nameRecordFormat, badRecord)])
- table.decompile(data, ttFont=None)
- self.assertEqual(table.names, [])
+ def test_getDebugName(self):
+ table = table__n_a_m_e()
+ table.names = [
+ makeName("Bold", 258, 1, 0, 0), # Mac, MacRoman, English
+ makeName("Gras", 258, 1, 0, 1), # Mac, MacRoman, French
+ makeName("Fett", 258, 1, 0, 2), # Mac, MacRoman, German
+ makeName("Sem Fracções", 292, 1, 0, 8), # Mac, MacRoman, Portuguese
+ ]
+ self.assertEqual("Bold", table.getDebugName(258))
+ self.assertEqual("Sem Fracções", table.getDebugName(292))
+ self.assertEqual(None, table.getDebugName(999))
+
+ def test_setName(self):
+ table = table__n_a_m_e()
+ table.setName("Regular", 2, 1, 0, 0)
+ table.setName("Version 1.000", 5, 3, 1, 0x409)
+ table.setName("寬鬆", 276, 1, 2, 0x13)
+ self.assertEqual("Regular", table.getName(2, 1, 0, 0).toUnicode())
+ self.assertEqual("Version 1.000", table.getName(5, 3, 1, 0x409).toUnicode())
+ self.assertEqual("寬鬆", table.getName(276, 1, 2, 0x13).toUnicode())
+ self.assertTrue(len(table.names) == 3)
+ table.setName("緊縮", 276, 1, 2, 0x13)
+ self.assertEqual("緊縮", table.getName(276, 1, 2, 0x13).toUnicode())
+ self.assertTrue(len(table.names) == 3)
+ # passing bytes issues a warning
+ with CapturingLogHandler(log, "WARNING") as captor:
+ table.setName(b"abc", 0, 1, 0, 0)
+ self.assertTrue(
+ len([r for r in captor.records if "string is bytes" in r.msg]) == 1
+ )
+ # anything other than unicode or bytes raises an error
+ with self.assertRaises(TypeError):
+ table.setName(1.000, 5, 1, 0, 0)
+
+ def test_names_sort_bytes_str(self):
+ # Corner case: If a user appends a name record directly to `names`, the
+ # `__lt__` method on NameRecord may run into duplicate name records where
+ # one `string` is a str and the other one bytes, leading to an exception.
+ table = table__n_a_m_e()
+ table.names = [
+ makeName("Test", 25, 3, 1, 0x409),
+ makeName("Test".encode("utf-16be"), 25, 3, 1, 0x409),
+ ]
+ table.compile(None)
+
+ def test_names_sort_attributes(self):
+ table = table__n_a_m_e()
+ # Create an actual invalid NameRecord object
+ broken = makeName("Test", 25, 3, 1, 0x409)
+ delattr(broken, "platformID")
+ table.names = [
+ makeName("Test", 25, 3, 1, 0x409),
+ broken,
+ ]
+ # Sorting these two is impossible, expect an error to be raised
+ with self.assertRaises(TypeError):
+ table.names.sort()
+
+ def test_names_sort_encoding(self):
+ """
+ Confirm that encoding errors in name table strings do not prevent at
+ least sorting by other IDs
+ """
+ table = table__n_a_m_e()
+ table.names = [
+ makeName("Mac Unicode 寬 encodes ok", 25, 3, 0, 0x409),
+ makeName("Win Latin 寬 fails to encode", 25, 1, 0, 0),
+ ]
+ table.names.sort()
+ # Encoding errors or not, sort based on other IDs nonetheless
+ self.assertEqual(table.names[0].platformID, 1)
+ self.assertEqual(table.names[1].platformID, 3)
+
+ def test_addName(self):
+ table = table__n_a_m_e()
+ nameIDs = []
+ for string in ("Width", "Weight", "Custom"):
+ nameIDs.append(table.addName(string))
+
+ self.assertEqual(nameIDs[0], 256)
+ self.assertEqual(nameIDs[1], 257)
+ self.assertEqual(nameIDs[2], 258)
+ self.assertEqual(len(table.names), 6)
+ self.assertEqual(table.names[0].string, "Width")
+ self.assertEqual(table.names[1].string, "Width")
+ self.assertEqual(table.names[2].string, "Weight")
+ self.assertEqual(table.names[3].string, "Weight")
+ self.assertEqual(table.names[4].string, "Custom")
+ self.assertEqual(table.names[5].string, "Custom")
+
+ with self.assertRaises(ValueError):
+ table.addName("Invalid nameID", minNameID=32767)
+ with self.assertRaises(TypeError):
+ table.addName(b"abc") # must be unicode string
+
+ def test_removeNames(self):
+ table = table__n_a_m_e()
+ table.setName("Regular", 2, 1, 0, 0)
+ table.setName("Regular", 2, 3, 1, 0x409)
+ table.removeNames(nameID=2)
+ self.assertEqual(table.names, [])
+
+ table = table__n_a_m_e()
+ table.setName("FamilyName", 1, 1, 0, 0)
+ table.setName("Regular", 2, 1, 0, 0)
+ table.setName("FamilyName", 1, 3, 1, 0x409)
+ table.setName("Regular", 2, 3, 1, 0x409)
+ table.removeNames(platformID=1)
+ self.assertEqual(len(table.names), 2)
+ self.assertIsNone(table.getName(1, 1, 0, 0))
+ self.assertIsNone(table.getName(2, 1, 0, 0))
+ rec1 = table.getName(1, 3, 1, 0x409)
+ self.assertEqual(str(rec1), "FamilyName")
+ rec2 = table.getName(2, 3, 1, 0x409)
+ self.assertEqual(str(rec2), "Regular")
+
+ table = table__n_a_m_e()
+ table.setName("FamilyName", 1, 1, 0, 0)
+ table.setName("Regular", 2, 1, 0, 0)
+ table.removeNames(nameID=1)
+ self.assertEqual(len(table.names), 1)
+ self.assertIsNone(table.getName(1, 1, 0, 0))
+ rec = table.getName(2, 1, 0, 0)
+ self.assertEqual(str(rec), "Regular")
+
+ table = table__n_a_m_e()
+ table.setName("FamilyName", 1, 1, 0, 0)
+ table.setName("Regular", 2, 1, 0, 0)
+ table.removeNames(2, 1, 0, 0)
+ self.assertEqual(len(table.names), 1)
+ self.assertIsNone(table.getName(2, 1, 0, 0))
+ rec = table.getName(1, 1, 0, 0)
+ self.assertEqual(str(rec), "FamilyName")
+
+ table = table__n_a_m_e()
+ table.setName("FamilyName", 1, 1, 0, 0)
+ table.setName("Regular", 2, 1, 0, 0)
+ table.removeNames()
+ self.assertEqual(len(table.names), 2)
+ rec1 = table.getName(1, 1, 0, 0)
+ self.assertEqual(str(rec1), "FamilyName")
+ rec2 = table.getName(2, 1, 0, 0)
+ self.assertEqual(str(rec2), "Regular")
+
+ @staticmethod
+ def _get_test_names():
+ names = {
+ "en": "Width",
+ "de-CH": "Breite",
+ "gsw-LI": "Bräiti",
+ }
+ namesSubSet = names.copy()
+ del namesSubSet["gsw-LI"]
+ namesSuperSet = names.copy()
+ namesSuperSet["nl"] = "Breedte"
+ return names, namesSubSet, namesSuperSet
+
+ def test_findMultilingualName(self):
+ table = table__n_a_m_e()
+ names, namesSubSet, namesSuperSet = self._get_test_names()
+ nameID = table.addMultilingualName(names)
+ assert nameID is not None
+ self.assertEqual(nameID, table.findMultilingualName(names))
+ self.assertEqual(nameID, table.findMultilingualName(namesSubSet))
+ self.assertEqual(None, table.findMultilingualName(namesSuperSet))
+
+ def test_findMultilingualName_compiled(self):
+ table = table__n_a_m_e()
+ names, namesSubSet, namesSuperSet = self._get_test_names()
+ nameID = table.addMultilingualName(names)
+ assert nameID is not None
+ # After compile/decompile, name.string is a bytes sequence, which
+ # findMultilingualName() should also handle
+ data = table.compile(None)
+ table = table__n_a_m_e()
+ table.decompile(data, None)
+ self.assertEqual(nameID, table.findMultilingualName(names))
+ self.assertEqual(nameID, table.findMultilingualName(namesSubSet))
+ self.assertEqual(None, table.findMultilingualName(namesSuperSet))
+
+ def test_addMultilingualNameReuse(self):
+ table = table__n_a_m_e()
+ names, namesSubSet, namesSuperSet = self._get_test_names()
+ nameID = table.addMultilingualName(names)
+ assert nameID is not None
+ self.assertEqual(nameID, table.addMultilingualName(names))
+ self.assertEqual(nameID, table.addMultilingualName(namesSubSet))
+ self.assertNotEqual(None, table.addMultilingualName(namesSuperSet))
+
+ def test_findMultilingualNameNoMac(self):
+ table = table__n_a_m_e()
+ names, namesSubSet, namesSuperSet = self._get_test_names()
+ nameID = table.addMultilingualName(names, mac=False)
+ assert nameID is not None
+ self.assertEqual(nameID, table.findMultilingualName(names, mac=False))
+ self.assertEqual(None, table.findMultilingualName(names))
+ self.assertEqual(nameID, table.findMultilingualName(namesSubSet, mac=False))
+ self.assertEqual(None, table.findMultilingualName(namesSubSet))
+ self.assertEqual(None, table.findMultilingualName(namesSuperSet))
+
+ def test_addMultilingualName(self):
+ # Microsoft Windows has language codes for “English” (en)
+ # and for “Standard German as used in Switzerland” (de-CH).
+ # In this case, we expect that the implementation just
+ # encodes the name for the Windows platform; Apple platforms
+ # have been able to decode Windows names since the early days
+ # of OSX (~2001). However, Windows has no language code for
+ # “Swiss German as used in Liechtenstein” (gsw-LI), so we
+ # expect that the implementation populates the 'ltag' table
+ # to represent that particular, rather exotic BCP47 code.
+ font = FakeFont(glyphs=[".notdef", "A"])
+ nameTable = font.tables["name"] = newTable("name")
+ with CapturingLogHandler(log, "WARNING") as captor:
+ widthID = nameTable.addMultilingualName(
+ {
+ "en": "Width",
+ "de-CH": "Breite",
+ "gsw-LI": "Bräiti",
+ },
+ ttFont=font,
+ mac=False,
+ )
+ self.assertEqual(widthID, 256)
+ xHeightID = nameTable.addMultilingualName(
+ {"en": "X-Height", "gsw-LI": "X-Hööchi"}, ttFont=font, mac=False
+ )
+ self.assertEqual(xHeightID, 257)
+ captor.assertRegex("cannot add Windows name in language gsw-LI")
+ self.assertEqual(
+ names(nameTable),
+ [
+ (256, 0, 4, 0, "Bräiti"),
+ (256, 3, 1, 0x0409, "Width"),
+ (256, 3, 1, 0x0807, "Breite"),
+ (257, 0, 4, 0, "X-Hööchi"),
+ (257, 3, 1, 0x0409, "X-Height"),
+ ],
+ )
+ self.assertEqual(set(font.tables.keys()), {"ltag", "name"})
+ self.assertEqual(font["ltag"].tags, ["gsw-LI"])
+
+ def test_addMultilingualName_legacyMacEncoding(self):
+ # Windows has no language code for Latin; MacOS has a code;
+ # and we actually can convert the name to the legacy MacRoman
+ # encoding. In this case, we expect that the name gets encoded
+ # as Macintosh name (platformID 1) with the corresponding Mac
+ # language code (133); the 'ltag' table should not be used.
+ font = FakeFont(glyphs=[".notdef", "A"])
+ nameTable = font.tables["name"] = newTable("name")
+ with CapturingLogHandler(log, "WARNING") as captor:
+ nameTable.addMultilingualName({"la": "SPQR"}, ttFont=font)
+ captor.assertRegex("cannot add Windows name in language la")
+ self.assertEqual(names(nameTable), [(256, 1, 0, 131, "SPQR")])
+ self.assertNotIn("ltag", font.tables.keys())
+
+ def test_addMultilingualName_legacyMacEncodingButUnencodableName(self):
+ # Windows has no language code for Latin; MacOS has a code;
+ # but we cannot encode the name into this encoding because
+ # it contains characters that are not representable.
+ # In this case, we expect that the name gets encoded as
+ # Unicode name (platformID 0) with the language tag being
+ # added to the 'ltag' table.
+ font = FakeFont(glyphs=[".notdef", "A"])
+ nameTable = font.tables["name"] = newTable("name")
+ with CapturingLogHandler(log, "WARNING") as captor:
+ nameTable.addMultilingualName({"la": "ⱾƤℚⱤ"}, ttFont=font)
+ captor.assertRegex("cannot add Windows name in language la")
+ self.assertEqual(names(nameTable), [(256, 0, 4, 0, "ⱾƤℚⱤ")])
+ self.assertIn("ltag", font.tables)
+ self.assertEqual(font["ltag"].tags, ["la"])
+
+ def test_addMultilingualName_legacyMacEncodingButNoCodec(self):
+ # Windows has no language code for “Azeri written in the
+ # Arabic script” (az-Arab); MacOS would have a code (50);
+ # but we cannot encode the name into the legacy encoding
+ # because we have no codec for MacArabic in fonttools.
+ # In this case, we expect that the name gets encoded as
+ # Unicode name (platformID 0) with the language tag being
+ # added to the 'ltag' table.
+ font = FakeFont(glyphs=[".notdef", "A"])
+ nameTable = font.tables["name"] = newTable("name")
+ with CapturingLogHandler(log, "WARNING") as captor:
+ nameTable.addMultilingualName({"az-Arab": "آذربايجان ديلی"}, ttFont=font)
+ captor.assertRegex("cannot add Windows name in language az-Arab")
+ self.assertEqual(names(nameTable), [(256, 0, 4, 0, "آذربايجان ديلی")])
+ self.assertIn("ltag", font.tables)
+ self.assertEqual(font["ltag"].tags, ["az-Arab"])
+
+ def test_addMultilingualName_noTTFont(self):
+ # If the ttFont argument is not passed, the implementation
+ # should add whatever names it can, but it should not crash
+ # just because it cannot build an ltag table.
+ nameTable = newTable("name")
+ with CapturingLogHandler(log, "WARNING") as captor:
+ nameTable.addMultilingualName({"en": "A", "la": "ⱾƤℚⱤ"})
+ captor.assertRegex("cannot store language la into 'ltag' table")
+
+ def test_addMultilingualName_TTFont(self):
+ # if ttFont argument is passed, it should not WARN about not being able
+ # to create ltag table.
+ font = FakeFont(glyphs=[".notdef", "A"])
+ nameTable = newTable("name")
+ with CapturingLogHandler(log, "WARNING") as captor:
+ nameTable.addMultilingualName({"en": "A", "ar": "ع"}, ttFont=font)
+ self.assertFalse(captor.records)
+
+ def test_addMultilingualName_minNameID(self):
+ table = table__n_a_m_e()
+ names, namesSubSet, namesSuperSet = self._get_test_names()
+ nameID = table.addMultilingualName(names, nameID=2)
+ self.assertEqual(nameID, 2)
+ nameID = table.addMultilingualName(names)
+ self.assertEqual(nameID, 2)
+ nameID = table.addMultilingualName(names, minNameID=256)
+ self.assertGreaterEqual(nameID, 256)
+ self.assertEqual(nameID, table.findMultilingualName(names, minNameID=256))
+
+ def test_addMultilingualName_name_inconsistencies(self):
+ # Check what happens, when there are
+ # inconsistencies in the name table
+ table = table__n_a_m_e()
+ table.setName("Weight", 270, 3, 1, 0x409)
+ names = {
+ "en": "Weight",
+ }
+ nameID = table.addMultilingualName(names, minNameID=256)
+ # Because there is an inconsistency in the names,
+ # addMultilingualName adds a new name ID
+ self.assertEqual(271, nameID)
+
+ def test_decompile_badOffset(self):
+ # https://github.com/fonttools/fonttools/issues/525
+ table = table__n_a_m_e()
+ badRecord = {
+ "platformID": 1,
+ "platEncID": 3,
+ "langID": 7,
+ "nameID": 1,
+ "length": 3,
+ "offset": 8765, # out of range
+ }
+ data = bytesjoin(
+ [
+ struct.pack(tostr(">HHH"), 1, 1, 6 + nameRecordSize),
+ sstruct.pack(nameRecordFormat, badRecord),
+ ]
+ )
+ table.decompile(data, ttFont=None)
+ self.assertEqual(table.names, [])
class NameRecordTest(unittest.TestCase):
-
- def test_toUnicode_utf16be(self):
- name = makeName("Foo Bold", 111, 0, 2, 7)
- self.assertEqual("utf_16_be", name.getEncoding())
- self.assertEqual("Foo Bold", name.toUnicode())
-
- def test_toUnicode_macroman(self):
- name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman
- self.assertEqual("mac_roman", name.getEncoding())
- self.assertEqual("Foo Italic", name.toUnicode())
-
- def test_toUnicode_macromanian(self):
- name = makeName(b"Foo Italic\xfb", 222, 1, 0, 37) # Mac Romanian
- self.assertEqual("mac_romanian", name.getEncoding())
- self.assertEqual("Foo Italic"+chr(0x02DA), name.toUnicode())
-
- def test_toUnicode_UnicodeDecodeError(self):
- name = makeName(b"\1", 111, 0, 2, 7)
- self.assertEqual("utf_16_be", name.getEncoding())
- self.assertRaises(UnicodeDecodeError, name.toUnicode)
-
- def test_toUnicode_singleChar(self):
- # https://github.com/fonttools/fonttools/issues/1997
- name = makeName("A", 256, 3, 1, 0x409)
- self.assertEqual(name.toUnicode(), "A")
-
- def toXML(self, name):
- writer = XMLWriter(BytesIO())
- name.toXML(writer, ttFont=None)
- xml = writer.file.getvalue().decode("utf_8").strip()
- return xml.split(writer.newlinestr.decode("utf_8"))[1:]
-
- def test_toXML_utf16be(self):
- name = makeName("Foo Bold", 111, 0, 2, 7)
- self.assertEqual([
- '<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
- ' Foo Bold',
- '</namerecord>'
- ], self.toXML(name))
-
- def test_toXML_utf16be_odd_length1(self):
- name = makeName(b"\0F\0o\0o\0", 111, 0, 2, 7)
- self.assertEqual([
- '<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
- ' Foo',
- '</namerecord>'
- ], self.toXML(name))
-
- def test_toXML_utf16be_odd_length2(self):
- name = makeName(b"\0Fooz", 111, 0, 2, 7)
- self.assertEqual([
- '<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
- ' Fooz',
- '</namerecord>'
- ], self.toXML(name))
-
- def test_toXML_utf16be_double_encoded(self):
- name = makeName(b"\0\0\0F\0\0\0o", 111, 0, 2, 7)
- self.assertEqual([
- '<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
- ' Fo',
- '</namerecord>'
- ], self.toXML(name))
-
- def test_toXML_macroman(self):
- name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman
- self.assertEqual([
- '<namerecord nameID="222" platformID="1" platEncID="0" langID="0x7" unicode="True">',
- ' Foo Italic',
- '</namerecord>'
- ], self.toXML(name))
-
- def test_toXML_macroman_actual_utf16be(self):
- name = makeName("\0F\0o\0o", 222, 1, 0, 7)
- self.assertEqual([
- '<namerecord nameID="222" platformID="1" platEncID="0" langID="0x7" unicode="True">',
- ' Foo',
- '</namerecord>'
- ], self.toXML(name))
-
- def test_toXML_unknownPlatEncID_nonASCII(self):
- name = makeName(b"B\x8arli", 333, 1, 9876, 7) # Unknown Mac encodingID
- self.assertEqual([
- '<namerecord nameID="333" platformID="1" platEncID="9876" langID="0x7" unicode="False">',
- ' B&#138;rli',
- '</namerecord>'
- ], self.toXML(name))
-
- def test_toXML_unknownPlatEncID_ASCII(self):
- name = makeName(b"Barli", 333, 1, 9876, 7) # Unknown Mac encodingID
- self.assertEqual([
- '<namerecord nameID="333" platformID="1" platEncID="9876" langID="0x7" unicode="True">',
- ' Barli',
- '</namerecord>'
- ], self.toXML(name))
-
- def test_encoding_macroman_misc(self):
- name = makeName('', 123, 1, 0, 17) # Mac Turkish
- self.assertEqual(name.getEncoding(), "mac_turkish")
- name.langID = 37
- self.assertEqual(name.getEncoding(), "mac_romanian")
- name.langID = 45 # Other
- self.assertEqual(name.getEncoding(), "mac_roman")
-
- def test_extended_mac_encodings(self):
- name = makeName(b'\xfe', 123, 1, 1, 0) # Mac Japanese
- self.assertEqual(name.toUnicode(), chr(0x2122))
-
- def test_extended_mac_encodings_errors(self):
- s = "汉仪彩云体简"
- name = makeName(s.encode("x_mac_simp_chinese_ttx"), 123, 1, 25, 0)
- # first check we round-trip with 'strict'
- self.assertEqual(name.toUnicode(errors="strict"), s)
-
- # append an incomplete invalid sequence and check that we handle
- # errors with the requested error handler
- name.string += b"\xba"
- self.assertEqual(name.toUnicode(errors="backslashreplace"), s + "\\xba")
- self.assertEqual(name.toUnicode(errors="replace"), s + "�")
-
- def test_extended_unknown(self):
- name = makeName(b'\xfe', 123, 10, 11, 12)
- self.assertEqual(name.getEncoding(), "ascii")
- self.assertEqual(name.getEncoding(None), None)
- self.assertEqual(name.getEncoding(default=None), None)
-
- def test_get_family_name(self):
- name = table__n_a_m_e()
- name.names = [
- makeName("Copyright", 0, 1, 0, 0),
- makeName("Family Name ID 1", 1, 1, 0, 0),
- makeName("SubFamily Name ID 2", 2, 1, 0, 0),
- makeName("Unique Name ID 3", 3, 1, 0, 0),
- makeName("Full Name ID 4", 4, 1, 0, 0),
- makeName("PS Name ID 6", 6, 1, 0, 0),
- makeName("Version Name ID 5", 5, 1, 0, 0),
- makeName("Trademark Name ID 7", 7, 1, 0, 0),
- ]
-
- result_value = name.getBestFamilyName()
- self.assertEqual("Family Name ID 1", result_value)
-
- expected_value = "Family Name ID 16"
- name.setName(expected_value, 16, 1, 0, 0)
- result_value = name.getBestFamilyName()
- self.assertEqual(expected_value, result_value)
-
- expected_value = "Family Name ID 21"
- name.setName(expected_value, 21, 1, 0, 0)
- result_value = name.getBestFamilyName()
- self.assertEqual(expected_value, result_value)
-
- def test_get_subfamily_name(self):
- name = table__n_a_m_e()
- name.names = [
- makeName("Copyright", 0, 1, 0, 0),
- makeName("Family Name ID 1", 1, 1, 0, 0),
- makeName("SubFamily Name ID 2", 2, 1, 0, 0),
- makeName("Unique Name ID 3", 3, 1, 0, 0),
- makeName("Full Name ID 4", 4, 1, 0, 0),
- makeName("PS Name ID 6", 6, 1, 0, 0),
- makeName("Version Name ID 5", 5, 1, 0, 0),
- makeName("Trademark Name ID 7", 7, 1, 0, 0),
- ]
-
- result_value = name.getBestSubFamilyName()
- self.assertEqual("SubFamily Name ID 2", result_value)
-
- expected_value = "Family Name ID 17"
- name.setName(expected_value, 17, 1, 0, 0)
- result_value = name.getBestSubFamilyName()
- self.assertEqual(expected_value, result_value)
-
- expected_value = "Family Name ID 22"
- name.setName(expected_value, 22, 1, 0, 0)
- result_value = name.getBestSubFamilyName()
- self.assertEqual(expected_value, result_value)
-
- def test_get_nice_full_name(self):
- name = table__n_a_m_e()
- name.names = [
- makeName("NID 1", 1, 1, 0, 0),
- makeName("NID 2", 2, 1, 0, 0),
- makeName("NID 4", 4, 1, 0, 0),
- makeName("NID 6", 6, 1, 0, 0),
- ]
-
- result_value = name.getBestFullName()
- self.assertEqual("NID 1 NID 2", result_value)
-
- expected_value = "NID 1 NID 2"
- # expection is still NID 1 NID 2,
- # because name ID 17 is missing
- name.setName("NID 16", 16, 1, 0, 0)
- result_value = name.getBestFullName()
- self.assertEqual(expected_value, result_value)
-
- name.setName('NID 17', 17, 1, 0, 0)
- result_value = name.getBestFullName()
- self.assertEqual("NID 16 NID 17", result_value)
-
- expected_value = "NID 16 NID 17"
- # expection is still NID 16 NID 17,
- # because name ID 21 is missing
- name.setName('NID 21', 21, 1, 0, 0)
- result_value = name.getBestFullName()
- self.assertEqual(expected_value, result_value)
-
- name.setName('NID 22', 22, 1, 0, 0)
- result_value = name.getBestFullName()
- self.assertEqual("NID 21 NID 22", result_value)
-
- for NID in [2, 16, 17, 21, 22]:
- name.removeNames(NID)
-
- result_value = name.getBestFullName()
- self.assertEqual("NID 4", result_value)
-
- name.setName('Regular', 2, 1, 0, 0)
- result_value = name.getBestFullName()
- self.assertEqual("NID 1", result_value)
+ def test_toUnicode_utf16be(self):
+ name = makeName("Foo Bold", 111, 0, 2, 7)
+ self.assertEqual("utf_16_be", name.getEncoding())
+ self.assertEqual("Foo Bold", name.toUnicode())
+
+ def test_toUnicode_macroman(self):
+ name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman
+ self.assertEqual("mac_roman", name.getEncoding())
+ self.assertEqual("Foo Italic", name.toUnicode())
+
+ def test_toUnicode_macromanian(self):
+ name = makeName(b"Foo Italic\xfb", 222, 1, 0, 37) # Mac Romanian
+ self.assertEqual("mac_romanian", name.getEncoding())
+ self.assertEqual("Foo Italic" + chr(0x02DA), name.toUnicode())
+
+ def test_toUnicode_UnicodeDecodeError(self):
+ name = makeName(b"\1", 111, 0, 2, 7)
+ self.assertEqual("utf_16_be", name.getEncoding())
+ self.assertRaises(UnicodeDecodeError, name.toUnicode)
+
+ def test_toUnicode_singleChar(self):
+ # https://github.com/fonttools/fonttools/issues/1997
+ name = makeName("A", 256, 3, 1, 0x409)
+ self.assertEqual(name.toUnicode(), "A")
+
+ def toXML(self, name):
+ writer = XMLWriter(BytesIO())
+ name.toXML(writer, ttFont=None)
+ xml = writer.file.getvalue().decode("utf_8").strip()
+ return xml.split(writer.newlinestr.decode("utf_8"))[1:]
+
+ def test_toXML_utf16be(self):
+ name = makeName("Foo Bold", 111, 0, 2, 7)
+ self.assertEqual(
+ [
+ '<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
+ " Foo Bold",
+ "</namerecord>",
+ ],
+ self.toXML(name),
+ )
+
+ def test_toXML_utf16be_odd_length1(self):
+ name = makeName(b"\0F\0o\0o\0", 111, 0, 2, 7)
+ self.assertEqual(
+ [
+ '<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
+ " Foo",
+ "</namerecord>",
+ ],
+ self.toXML(name),
+ )
+
+ def test_toXML_utf16be_odd_length2(self):
+ name = makeName(b"\0Fooz", 111, 0, 2, 7)
+ self.assertEqual(
+ [
+ '<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
+ " Fooz",
+ "</namerecord>",
+ ],
+ self.toXML(name),
+ )
+
+ def test_toXML_utf16be_double_encoded(self):
+ name = makeName(b"\0\0\0F\0\0\0o", 111, 0, 2, 7)
+ self.assertEqual(
+ [
+ '<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
+ " Fo",
+ "</namerecord>",
+ ],
+ self.toXML(name),
+ )
+
+ def test_toXML_macroman(self):
+ name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman
+ self.assertEqual(
+ [
+ '<namerecord nameID="222" platformID="1" platEncID="0" langID="0x7" unicode="True">',
+ " Foo Italic",
+ "</namerecord>",
+ ],
+ self.toXML(name),
+ )
+
+ def test_toXML_macroman_actual_utf16be(self):
+ name = makeName("\0F\0o\0o", 222, 1, 0, 7)
+ self.assertEqual(
+ [
+ '<namerecord nameID="222" platformID="1" platEncID="0" langID="0x7" unicode="True">',
+ " Foo",
+ "</namerecord>",
+ ],
+ self.toXML(name),
+ )
+
+ def test_toXML_unknownPlatEncID_nonASCII(self):
+ name = makeName(b"B\x8arli", 333, 1, 9876, 7) # Unknown Mac encodingID
+ self.assertEqual(
+ [
+ '<namerecord nameID="333" platformID="1" platEncID="9876" langID="0x7" unicode="False">',
+ " B&#138;rli",
+ "</namerecord>",
+ ],
+ self.toXML(name),
+ )
+
+ def test_toXML_unknownPlatEncID_ASCII(self):
+ name = makeName(b"Barli", 333, 1, 9876, 7) # Unknown Mac encodingID
+ self.assertEqual(
+ [
+ '<namerecord nameID="333" platformID="1" platEncID="9876" langID="0x7" unicode="True">',
+ " Barli",
+ "</namerecord>",
+ ],
+ self.toXML(name),
+ )
+
+ def test_encoding_macroman_misc(self):
+ name = makeName("", 123, 1, 0, 17) # Mac Turkish
+ self.assertEqual(name.getEncoding(), "mac_turkish")
+ name.langID = 37
+ self.assertEqual(name.getEncoding(), "mac_romanian")
+ name.langID = 45 # Other
+ self.assertEqual(name.getEncoding(), "mac_roman")
+
+ def test_extended_mac_encodings(self):
+ name = makeName(b"\xfe", 123, 1, 1, 0) # Mac Japanese
+ self.assertEqual(name.toUnicode(), chr(0x2122))
+
+ def test_extended_mac_encodings_errors(self):
+ s = "汉仪彩云体简"
+ name = makeName(s.encode("x_mac_simp_chinese_ttx"), 123, 1, 25, 0)
+ # first check we round-trip with 'strict'
+ self.assertEqual(name.toUnicode(errors="strict"), s)
+
+ # append an incomplete invalid sequence and check that we handle
+ # errors with the requested error handler
+ name.string += b"\xba"
+ self.assertEqual(name.toUnicode(errors="backslashreplace"), s + "\\xba")
+ self.assertEqual(name.toUnicode(errors="replace"), s + "�")
+
+ def test_extended_unknown(self):
+ name = makeName(b"\xfe", 123, 10, 11, 12)
+ self.assertEqual(name.getEncoding(), "ascii")
+ self.assertEqual(name.getEncoding(None), None)
+ self.assertEqual(name.getEncoding(default=None), None)
+
+ def test_get_family_name(self):
+ name = table__n_a_m_e()
+ name.names = [
+ makeName("Copyright", 0, 1, 0, 0),
+ makeName("Family Name ID 1", 1, 1, 0, 0),
+ makeName("SubFamily Name ID 2", 2, 1, 0, 0),
+ makeName("Unique Name ID 3", 3, 1, 0, 0),
+ makeName("Full Name ID 4", 4, 1, 0, 0),
+ makeName("PS Name ID 6", 6, 1, 0, 0),
+ makeName("Version Name ID 5", 5, 1, 0, 0),
+ makeName("Trademark Name ID 7", 7, 1, 0, 0),
+ ]
+
+ result_value = name.getBestFamilyName()
+ self.assertEqual("Family Name ID 1", result_value)
+
+ expected_value = "Family Name ID 16"
+ name.setName(expected_value, 16, 1, 0, 0)
+ result_value = name.getBestFamilyName()
+ self.assertEqual(expected_value, result_value)
+
+ expected_value = "Family Name ID 21"
+ name.setName(expected_value, 21, 1, 0, 0)
+ result_value = name.getBestFamilyName()
+ self.assertEqual(expected_value, result_value)
+
+ def test_get_subfamily_name(self):
+ name = table__n_a_m_e()
+ name.names = [
+ makeName("Copyright", 0, 1, 0, 0),
+ makeName("Family Name ID 1", 1, 1, 0, 0),
+ makeName("SubFamily Name ID 2", 2, 1, 0, 0),
+ makeName("Unique Name ID 3", 3, 1, 0, 0),
+ makeName("Full Name ID 4", 4, 1, 0, 0),
+ makeName("PS Name ID 6", 6, 1, 0, 0),
+ makeName("Version Name ID 5", 5, 1, 0, 0),
+ makeName("Trademark Name ID 7", 7, 1, 0, 0),
+ ]
+
+ result_value = name.getBestSubFamilyName()
+ self.assertEqual("SubFamily Name ID 2", result_value)
+
+ expected_value = "Family Name ID 17"
+ name.setName(expected_value, 17, 1, 0, 0)
+ result_value = name.getBestSubFamilyName()
+ self.assertEqual(expected_value, result_value)
+
+ expected_value = "Family Name ID 22"
+ name.setName(expected_value, 22, 1, 0, 0)
+ result_value = name.getBestSubFamilyName()
+ self.assertEqual(expected_value, result_value)
+
+ def test_get_nice_full_name(self):
+ name = table__n_a_m_e()
+ name.names = [
+ makeName("NID 1", 1, 1, 0, 0),
+ makeName("NID 2", 2, 1, 0, 0),
+ makeName("NID 4", 4, 1, 0, 0),
+ makeName("NID 6", 6, 1, 0, 0),
+ ]
+
+ result_value = name.getBestFullName()
+ self.assertEqual("NID 1 NID 2", result_value)
+
+ expected_value = "NID 1 NID 2"
+ # expection is still NID 1 NID 2,
+ # because name ID 17 is missing
+ name.setName("NID 16", 16, 1, 0, 0)
+ result_value = name.getBestFullName()
+ self.assertEqual(expected_value, result_value)
+
+ name.setName("NID 17", 17, 1, 0, 0)
+ result_value = name.getBestFullName()
+ self.assertEqual("NID 16 NID 17", result_value)
+
+ expected_value = "NID 16 NID 17"
+ # expection is still NID 16 NID 17,
+ # because name ID 21 is missing
+ name.setName("NID 21", 21, 1, 0, 0)
+ result_value = name.getBestFullName()
+ self.assertEqual(expected_value, result_value)
+
+ name.setName("NID 22", 22, 1, 0, 0)
+ result_value = name.getBestFullName()
+ self.assertEqual("NID 21 NID 22", result_value)
+
+ for NID in [2, 16, 17, 21, 22]:
+ name.removeNames(NID)
+
+ result_value = name.getBestFullName()
+ self.assertEqual("NID 4", result_value)
+
+ name.setName("Regular", 2, 1, 0, 0)
+ result_value = name.getBestFullName()
+ self.assertEqual("NID 1", result_value)
if __name__ == "__main__":
- import sys
- sys.exit(unittest.main())
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_o_p_b_d_test.py b/Tests/ttLib/tables/_o_p_b_d_test.py
index d62ada8b..24020e31 100644
--- a/Tests/ttLib/tables/_o_p_b_d_test.py
+++ b/Tests/ttLib/tables/_o_p_b_d_test.py
@@ -7,72 +7,72 @@ import unittest
# Example: Format 0 Optical Bounds Table
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html
OPBD_FORMAT_0_DATA = deHexStr(
- '0001 0000 0000 ' # 0: Version=1.0, Format=0
- '0006 0004 0002 ' # 6: LookupFormat=6, UnitSize=4, NUnits=2
- '0008 0001 0000 ' # 12: SearchRange=8, EntrySelector=1, RangeShift=0
- '000A 001E ' # 18: Glyph=10(=C), OffsetOfOpticalBoundsDeltas=30
- '002B 0026 ' # 22: Glyph=43(=A), OffsetOfOpticalBoundsDeltas=38
- 'FFFF 0000 ' # 26: Glyph=<end>, OffsetOfOpticalBoundsDeltas=0
- 'FFCE 0005 0037 FFFB ' # 30: Bounds[C].Left=-50 .Top=5 .Right=55 .Bottom=-5
- 'FFF6 000F 0000 0000 ' # 38: Bounds[A].Left=-10 .Top=15 .Right=0 .Bottom=0
-) # 46: <end>
-assert(len(OPBD_FORMAT_0_DATA) == 46)
+ "0001 0000 0000 " # 0: Version=1.0, Format=0
+ "0006 0004 0002 " # 6: LookupFormat=6, UnitSize=4, NUnits=2
+ "0008 0001 0000 " # 12: SearchRange=8, EntrySelector=1, RangeShift=0
+ "000A 001E " # 18: Glyph=10(=C), OffsetOfOpticalBoundsDeltas=30
+ "002B 0026 " # 22: Glyph=43(=A), OffsetOfOpticalBoundsDeltas=38
+ "FFFF 0000 " # 26: Glyph=<end>, OffsetOfOpticalBoundsDeltas=0
+ "FFCE 0005 0037 FFFB " # 30: Bounds[C].Left=-50 .Top=5 .Right=55 .Bottom=-5
+ "FFF6 000F 0000 0000 " # 38: Bounds[A].Left=-10 .Top=15 .Right=0 .Bottom=0
+) # 46: <end>
+assert len(OPBD_FORMAT_0_DATA) == 46
OPBD_FORMAT_0_XML = [
'<Version value="0x00010000"/>',
'<OpticalBounds Format="0">',
- ' <OpticalBoundsDeltas>',
+ " <OpticalBoundsDeltas>",
' <Lookup glyph="A">',
' <Left value="-10"/>',
' <Top value="15"/>',
' <Right value="0"/>',
' <Bottom value="0"/>',
- ' </Lookup>',
+ " </Lookup>",
' <Lookup glyph="C">',
' <Left value="-50"/>',
' <Top value="5"/>',
' <Right value="55"/>',
' <Bottom value="-5"/>',
- ' </Lookup>',
- ' </OpticalBoundsDeltas>',
- '</OpticalBounds>',
+ " </Lookup>",
+ " </OpticalBoundsDeltas>",
+ "</OpticalBounds>",
]
# Example: Format 1 Optical Bounds Table
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html
OPBD_FORMAT_1_DATA = deHexStr(
- '0001 0000 0001 ' # 0: Version=1.0, Format=1
- '0006 0004 0002 ' # 6: LookupFormat=6, UnitSize=4, NUnits=2
- '0008 0001 0000 ' # 12: SearchRange=8, EntrySelector=1, RangeShift=0
- '000A 001E ' # 18: Glyph=10(=C), OffsetOfOpticalBoundsPoints=30
- '002B 0026 ' # 22: Glyph=43(=A), OffsetOfOpticalBoundsPoints=38
- 'FFFF 0000 ' # 26: Glyph=<end>, OffsetOfOpticalBoundsPoints=0
- '0024 0025 0026 0027 ' # 30: Bounds[C].Left=36 .Top=37 .Right=38 .Bottom=39
- '0020 0029 FFFF FFFF ' # 38: Bounds[A].Left=32 .Top=41 .Right=-1 .Bottom=-1
-) # 46: <end>
-assert(len(OPBD_FORMAT_1_DATA) == 46)
+ "0001 0000 0001 " # 0: Version=1.0, Format=1
+ "0006 0004 0002 " # 6: LookupFormat=6, UnitSize=4, NUnits=2
+ "0008 0001 0000 " # 12: SearchRange=8, EntrySelector=1, RangeShift=0
+ "000A 001E " # 18: Glyph=10(=C), OffsetOfOpticalBoundsPoints=30
+ "002B 0026 " # 22: Glyph=43(=A), OffsetOfOpticalBoundsPoints=38
+ "FFFF 0000 " # 26: Glyph=<end>, OffsetOfOpticalBoundsPoints=0
+ "0024 0025 0026 0027 " # 30: Bounds[C].Left=36 .Top=37 .Right=38 .Bottom=39
+ "0020 0029 FFFF FFFF " # 38: Bounds[A].Left=32 .Top=41 .Right=-1 .Bottom=-1
+) # 46: <end>
+assert len(OPBD_FORMAT_1_DATA) == 46
OPBD_FORMAT_1_XML = [
'<Version value="0x00010000"/>',
'<OpticalBounds Format="1">',
- ' <OpticalBoundsPoints>',
+ " <OpticalBoundsPoints>",
' <Lookup glyph="A">',
' <Left value="32"/>',
' <Top value="41"/>',
' <Right value="-1"/>',
' <Bottom value="-1"/>',
- ' </Lookup>',
+ " </Lookup>",
' <Lookup glyph="C">',
' <Left value="36"/>',
' <Top value="37"/>',
' <Right value="38"/>',
' <Bottom value="39"/>',
- ' </Lookup>',
- ' </OpticalBoundsPoints>',
- '</OpticalBounds>',
+ " </Lookup>",
+ " </OpticalBoundsPoints>",
+ "</OpticalBounds>",
]
@@ -81,101 +81,99 @@ OPBD_FORMAT_1_XML = [
# was crashing when trying to decompile this table.
# https://github.com/fonttools/fonttools/issues/1031
OPBD_APPLE_CHANCERY_DATA = deHexStr(
- '0001 0000 0000 ' # 0: Version=1.0, Format=0
- '0004 0006 0011 ' # 6: LookupFormat=4, UnitSize=6, NUnits=17
- '0060 0004 0006 ' # 12: SearchRange=96, EntrySelector=4, RangeShift=6
- '017d 017d 0072 ' # 18: Seg[0].LastGlyph=381, FirstGlyph=381, Off=114(+6)
- '0183 0180 0074 ' # 24: Seg[1].LastGlyph=387, FirstGlyph=384, Off=116(+6)
- '0186 0185 007c ' # 30: Seg[2].LastGlyph=390, FirstGlyph=389, Off=124(+6)
- '018f 018b 0080 ' # 36: Seg[3].LastGlyph=399, FirstGlyph=395, Off=128(+6)
- '01a0 0196 008a ' # 42: Seg[4].LastGlyph=416, FirstGlyph=406, Off=138(+6)
- '01a5 01a3 00a0 ' # 48: Seg[5].LastGlyph=421, FirstGlyph=419, Off=160(+6)
- '01aa 01aa 00a6 ' # 54: Seg[6].LastGlyph=426, FirstGlyph=426, Off=166(+6)
- '01ac 01ac 00a8 ' # 60: Seg[7].LastGlyph=428, FirstGlyph=428, Off=168(+6)
- '01fb 01f1 00aa ' # 66: Seg[8].LastGlyph=507, FirstGlyph=497, Off=170(+6)
- '0214 0209 00c0 ' # 72: Seg[9].LastGlyph=532, FirstGlyph=521, Off=192(+6)
- '021d 0216 00d8 ' # 78: Seg[10].LastGlyph=541, FirstGlyph=534, Off=216(+6)
- '0222 0220 00e8 ' # 84: Seg[11].LastGlyph=546, FirstGlyph=544, Off=232(+6)
- '0227 0225 00ee ' # 90: Seg[12].LastGlyph=551, FirstGlyph=549, Off=238(+6)
- '0229 0229 00f4 ' # 96: Seg[13].LastGlyph=553, FirstGlyph=553, Off=244(+6)
- '023b 023b 00f6 ' # 102: Seg[14].LastGlyph=571, FirstGlyph=571, Off=246(+6)
- '023e 023e 00f8 ' # 108: Seg[15].LastGlyph=574, FirstGlyph=574, Off=248(+6)
- 'ffff ffff 00fa ' # 114: Seg[16]=<end>
- '0100 0108 0110 0118 0120 0128 0130 0138 0140 0148 0150 0158 '
- '0160 0168 0170 0178 0180 0188 0190 0198 01a0 01a8 01b0 01b8 '
- '01c0 01c8 01d0 01d8 01e0 01e8 01f0 01f8 0200 0208 0210 0218 '
- '0220 0228 0230 0238 0240 0248 0250 0258 0260 0268 0270 0278 '
- '0280 0288 0290 0298 02a0 02a8 02b0 02b8 02c0 02c8 02d0 02d8 '
- '02e0 02e8 02f0 02f8 0300 0308 0310 0318 fd98 0000 0000 0000 '
- 'fdbc 0000 0000 0000 fdbc 0000 0000 0000 fdbf 0000 0000 0000 '
- 'fdbc 0000 0000 0000 fd98 0000 0000 0000 fda9 0000 0000 0000 '
- 'fd98 0000 0000 0000 fd98 0000 0000 0000 fd98 0000 0000 0000 '
- '0000 0000 0205 0000 0000 0000 0205 0000 0000 0000 02a4 0000 '
- '0000 0000 027e 0000 0000 0000 02f4 0000 0000 0000 02a4 0000 '
- '0000 0000 0365 0000 0000 0000 0291 0000 0000 0000 0291 0000 '
- '0000 0000 026a 0000 0000 0000 02b8 0000 0000 0000 02cb 0000 '
- '0000 0000 02a4 0000 0000 0000 01a9 0000 0000 0000 0244 0000 '
- '0000 0000 02a4 0000 0000 0000 02cb 0000 0000 0000 0244 0000 '
- '0000 0000 0307 0000 0000 0000 0307 0000 0000 0000 037f 0000 '
- '0000 0000 0307 0000 0000 0000 0307 0000 0000 0000 0307 0000 '
- '0000 0000 0307 0000 0000 0000 0307 0000 0000 0000 03e3 0000 '
- '0000 0000 030c 0000 0000 0000 0307 0000 fe30 0000 0000 0000 '
- 'fe7e 0000 0000 0000 fe91 0000 0000 0000 fe6a 0000 0000 0000 '
- 'fe6a 0000 0000 0000 fecb 0000 0000 0000 fe6a 0000 0000 0000 '
- 'fe7e 0000 0000 0000 fea4 0000 0000 0000 fe7e 0000 0000 0000 '
- 'fe44 0000 0000 0000 fea4 0000 0000 0000 feb8 0000 0000 0000 '
- 'fe7e 0000 0000 0000 fe5e 0000 0000 0000 fe37 0000 0000 0000 '
- 'fe37 0000 0000 0000 fcbd 0000 0000 0000 fd84 0000 0000 0000 '
- 'fd98 0000 0000 0000 fd82 0000 0000 0000 fcbd 0000 0000 0000 '
- 'fd84 0000 0000 0000 fcbd 0000 0000 0000 fcbd 0000 0000 0000 '
- 'fe72 0000 0000 0000 ff9d 0000 0000 0000 0000 0000 032f 0000 '
- '0000 0000 03ba 0000 '
+ "0001 0000 0000 " # 0: Version=1.0, Format=0
+ "0004 0006 0011 " # 6: LookupFormat=4, UnitSize=6, NUnits=17
+ "0060 0004 0006 " # 12: SearchRange=96, EntrySelector=4, RangeShift=6
+ "017d 017d 0072 " # 18: Seg[0].LastGlyph=381, FirstGlyph=381, Off=114(+6)
+ "0183 0180 0074 " # 24: Seg[1].LastGlyph=387, FirstGlyph=384, Off=116(+6)
+ "0186 0185 007c " # 30: Seg[2].LastGlyph=390, FirstGlyph=389, Off=124(+6)
+ "018f 018b 0080 " # 36: Seg[3].LastGlyph=399, FirstGlyph=395, Off=128(+6)
+ "01a0 0196 008a " # 42: Seg[4].LastGlyph=416, FirstGlyph=406, Off=138(+6)
+ "01a5 01a3 00a0 " # 48: Seg[5].LastGlyph=421, FirstGlyph=419, Off=160(+6)
+ "01aa 01aa 00a6 " # 54: Seg[6].LastGlyph=426, FirstGlyph=426, Off=166(+6)
+ "01ac 01ac 00a8 " # 60: Seg[7].LastGlyph=428, FirstGlyph=428, Off=168(+6)
+ "01fb 01f1 00aa " # 66: Seg[8].LastGlyph=507, FirstGlyph=497, Off=170(+6)
+ "0214 0209 00c0 " # 72: Seg[9].LastGlyph=532, FirstGlyph=521, Off=192(+6)
+ "021d 0216 00d8 " # 78: Seg[10].LastGlyph=541, FirstGlyph=534, Off=216(+6)
+ "0222 0220 00e8 " # 84: Seg[11].LastGlyph=546, FirstGlyph=544, Off=232(+6)
+ "0227 0225 00ee " # 90: Seg[12].LastGlyph=551, FirstGlyph=549, Off=238(+6)
+ "0229 0229 00f4 " # 96: Seg[13].LastGlyph=553, FirstGlyph=553, Off=244(+6)
+ "023b 023b 00f6 " # 102: Seg[14].LastGlyph=571, FirstGlyph=571, Off=246(+6)
+ "023e 023e 00f8 " # 108: Seg[15].LastGlyph=574, FirstGlyph=574, Off=248(+6)
+ "ffff ffff 00fa " # 114: Seg[16]=<end>
+ "0100 0108 0110 0118 0120 0128 0130 0138 0140 0148 0150 0158 "
+ "0160 0168 0170 0178 0180 0188 0190 0198 01a0 01a8 01b0 01b8 "
+ "01c0 01c8 01d0 01d8 01e0 01e8 01f0 01f8 0200 0208 0210 0218 "
+ "0220 0228 0230 0238 0240 0248 0250 0258 0260 0268 0270 0278 "
+ "0280 0288 0290 0298 02a0 02a8 02b0 02b8 02c0 02c8 02d0 02d8 "
+ "02e0 02e8 02f0 02f8 0300 0308 0310 0318 fd98 0000 0000 0000 "
+ "fdbc 0000 0000 0000 fdbc 0000 0000 0000 fdbf 0000 0000 0000 "
+ "fdbc 0000 0000 0000 fd98 0000 0000 0000 fda9 0000 0000 0000 "
+ "fd98 0000 0000 0000 fd98 0000 0000 0000 fd98 0000 0000 0000 "
+ "0000 0000 0205 0000 0000 0000 0205 0000 0000 0000 02a4 0000 "
+ "0000 0000 027e 0000 0000 0000 02f4 0000 0000 0000 02a4 0000 "
+ "0000 0000 0365 0000 0000 0000 0291 0000 0000 0000 0291 0000 "
+ "0000 0000 026a 0000 0000 0000 02b8 0000 0000 0000 02cb 0000 "
+ "0000 0000 02a4 0000 0000 0000 01a9 0000 0000 0000 0244 0000 "
+ "0000 0000 02a4 0000 0000 0000 02cb 0000 0000 0000 0244 0000 "
+ "0000 0000 0307 0000 0000 0000 0307 0000 0000 0000 037f 0000 "
+ "0000 0000 0307 0000 0000 0000 0307 0000 0000 0000 0307 0000 "
+ "0000 0000 0307 0000 0000 0000 0307 0000 0000 0000 03e3 0000 "
+ "0000 0000 030c 0000 0000 0000 0307 0000 fe30 0000 0000 0000 "
+ "fe7e 0000 0000 0000 fe91 0000 0000 0000 fe6a 0000 0000 0000 "
+ "fe6a 0000 0000 0000 fecb 0000 0000 0000 fe6a 0000 0000 0000 "
+ "fe7e 0000 0000 0000 fea4 0000 0000 0000 fe7e 0000 0000 0000 "
+ "fe44 0000 0000 0000 fea4 0000 0000 0000 feb8 0000 0000 0000 "
+ "fe7e 0000 0000 0000 fe5e 0000 0000 0000 fe37 0000 0000 0000 "
+ "fe37 0000 0000 0000 fcbd 0000 0000 0000 fd84 0000 0000 0000 "
+ "fd98 0000 0000 0000 fd82 0000 0000 0000 fcbd 0000 0000 0000 "
+ "fd84 0000 0000 0000 fcbd 0000 0000 0000 fcbd 0000 0000 0000 "
+ "fe72 0000 0000 0000 ff9d 0000 0000 0000 0000 0000 032f 0000 "
+ "0000 0000 03ba 0000 "
)
assert len(OPBD_APPLE_CHANCERY_DATA) == 800
class OPBDTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- glyphs = ['.notdef'] + ['X.alt%d' for g in range(1, 50)]
- glyphs[10] = 'C'
- glyphs[43] = 'A'
+ glyphs = [".notdef"] + ["X.alt%d" for g in range(1, 50)]
+ glyphs[10] = "C"
+ glyphs[43] = "A"
cls.font = FakeFont(glyphs)
def test_decompile_toXML_format0(self):
- table = newTable('opbd')
+ table = newTable("opbd")
table.decompile(OPBD_FORMAT_0_DATA, self.font)
self.assertEqual(getXML(table.toXML), OPBD_FORMAT_0_XML)
def test_compile_fromXML_format0(self):
- table = newTable('opbd')
+ table = newTable("opbd")
for name, attrs, content in parseXML(OPBD_FORMAT_0_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(OPBD_FORMAT_0_DATA))
+ self.assertEqual(hexStr(table.compile(self.font)), hexStr(OPBD_FORMAT_0_DATA))
def test_decompile_toXML_format1(self):
- table = newTable('opbd')
+ table = newTable("opbd")
table.decompile(OPBD_FORMAT_1_DATA, self.font)
self.assertEqual(getXML(table.toXML), OPBD_FORMAT_1_XML)
def test_compile_fromXML_format1(self):
- table = newTable('opbd')
+ table = newTable("opbd")
for name, attrs, content in parseXML(OPBD_FORMAT_1_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(OPBD_FORMAT_1_DATA))
+ self.assertEqual(hexStr(table.compile(self.font)), hexStr(OPBD_FORMAT_1_DATA))
def test_decompile_AppleChancery(self):
# Make sure we do not crash when decompiling the 'opbd' table of
# AppleChancery.ttf. https://github.com/fonttools/fonttools/issues/1031
- table = newTable('opbd')
+ table = newTable("opbd")
table.decompile(OPBD_APPLE_CHANCERY_DATA, self.font)
self.assertIn('<OpticalBounds Format="0">', getXML(table.toXML))
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_p_r_o_p_test.py b/Tests/ttLib/tables/_p_r_o_p_test.py
index 63c2924b..42f9815b 100644
--- a/Tests/ttLib/tables/_p_r_o_p_test.py
+++ b/Tests/ttLib/tables/_p_r_o_p_test.py
@@ -5,78 +5,76 @@ import unittest
PROP_FORMAT_0_DATA = deHexStr(
- '0001 0000 0000 ' # 0: Version=1.0, Format=0
- '0005 ' # 6: DefaultProperties=European number terminator
-) # 8: <end>
-assert(len(PROP_FORMAT_0_DATA) == 8)
+ "0001 0000 0000 " # 0: Version=1.0, Format=0
+ "0005 " # 6: DefaultProperties=European number terminator
+) # 8: <end>
+assert len(PROP_FORMAT_0_DATA) == 8
PROP_FORMAT_0_XML = [
'<Version value="1.0"/>',
'<GlyphProperties Format="0">',
' <DefaultProperties value="5"/>',
- '</GlyphProperties>',
+ "</GlyphProperties>",
]
PROP_FORMAT_1_DATA = deHexStr(
- '0003 0000 0001 ' # 0: Version=3.0, Format=1
- '0000 ' # 6: DefaultProperties=left-to-right; non-whitespace
- '0008 0003 0004 ' # 8: LookupFormat=8, FirstGlyph=3, GlyphCount=4
- '000B ' # 14: Properties[C]=other neutral
- '000A ' # 16: Properties[D]=whitespace
- '600B ' # 18: Properties[E]=other neutral; hanging punct
- '0005 ' # 20: Properties[F]=European number terminator
-) # 22: <end>
-assert(len(PROP_FORMAT_1_DATA) == 22)
+ "0003 0000 0001 " # 0: Version=3.0, Format=1
+ "0000 " # 6: DefaultProperties=left-to-right; non-whitespace
+ "0008 0003 0004 " # 8: LookupFormat=8, FirstGlyph=3, GlyphCount=4
+ "000B " # 14: Properties[C]=other neutral
+ "000A " # 16: Properties[D]=whitespace
+ "600B " # 18: Properties[E]=other neutral; hanging punct
+ "0005 " # 20: Properties[F]=European number terminator
+) # 22: <end>
+assert len(PROP_FORMAT_1_DATA) == 22
PROP_FORMAT_1_XML = [
'<Version value="3.0"/>',
'<GlyphProperties Format="1">',
' <DefaultProperties value="0"/>',
- ' <Properties>',
+ " <Properties>",
' <Lookup glyph="C" value="11"/>',
' <Lookup glyph="D" value="10"/>',
' <Lookup glyph="E" value="24587"/>',
' <Lookup glyph="F" value="5"/>',
- ' </Properties>',
- '</GlyphProperties>',
+ " </Properties>",
+ "</GlyphProperties>",
]
class PROPTest(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
cls.maxDiff = None
- cls.font = FakeFont(['.notdef', 'A', 'B', 'C', 'D', 'E', 'F', 'G'])
+ cls.font = FakeFont([".notdef", "A", "B", "C", "D", "E", "F", "G"])
def test_decompile_toXML_format0(self):
- table = newTable('prop')
+ table = newTable("prop")
table.decompile(PROP_FORMAT_0_DATA, self.font)
self.assertEqual(getXML(table.toXML), PROP_FORMAT_0_XML)
def test_compile_fromXML_format0(self):
- table = newTable('prop')
+ table = newTable("prop")
for name, attrs, content in parseXML(PROP_FORMAT_0_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(PROP_FORMAT_0_DATA))
+ self.assertEqual(hexStr(table.compile(self.font)), hexStr(PROP_FORMAT_0_DATA))
def test_decompile_toXML_format1(self):
- table = newTable('prop')
+ table = newTable("prop")
table.decompile(PROP_FORMAT_1_DATA, self.font)
self.assertEqual(getXML(table.toXML), PROP_FORMAT_1_XML)
def test_compile_fromXML_format1(self):
- table = newTable('prop')
+ table = newTable("prop")
for name, attrs, content in parseXML(PROP_FORMAT_1_XML):
table.fromXML(name, attrs, content, font=self.font)
- self.assertEqual(hexStr(table.compile(self.font)),
- hexStr(PROP_FORMAT_1_DATA))
+ self.assertEqual(hexStr(table.compile(self.font)), hexStr(PROP_FORMAT_1_DATA))
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_t_r_a_k_test.py b/Tests/ttLib/tables/_t_r_a_k_test.py
index 2ea6cf59..ec178377 100644
--- a/Tests/ttLib/tables/_t_r_a_k_test.py
+++ b/Tests/ttLib/tables/_t_r_a_k_test.py
@@ -8,332 +8,336 @@ import unittest
# /Library/Fonts/Osaka.ttf from OSX has trak table with both horiz and vertData
OSAKA_TRAK_TABLE_DATA = deHexStr(
- '00 01 00 00 00 00 00 0c 00 40 00 00 00 03 00 02 00 00 00 2c ff ff '
- '00 00 01 06 00 34 00 00 00 00 01 07 00 38 00 01 00 00 01 08 00 3c '
- '00 0c 00 00 00 18 00 00 ff f4 ff f4 00 00 00 00 00 0c 00 0c 00 03 '
- '00 02 00 00 00 60 ff ff 00 00 01 09 00 68 00 00 00 00 01 0a 00 6c '
- '00 01 00 00 01 0b 00 70 00 0c 00 00 00 18 00 00 ff f4 ff f4 00 00 '
- '00 00 00 0c 00 0c')
-
-# decompiled horizData and vertData entries from Osaka.ttf
+ "00 01 00 00 00 00 00 0c 00 40 00 00 00 03 00 02 00 00 00 2c ff ff "
+ "00 00 01 06 00 34 00 00 00 00 01 07 00 38 00 01 00 00 01 08 00 3c "
+ "00 0c 00 00 00 18 00 00 ff f4 ff f4 00 00 00 00 00 0c 00 0c 00 03 "
+ "00 02 00 00 00 60 ff ff 00 00 01 09 00 68 00 00 00 00 01 0a 00 6c "
+ "00 01 00 00 01 0b 00 70 00 0c 00 00 00 18 00 00 ff f4 ff f4 00 00 "
+ "00 00 00 0c 00 0c"
+)
+
+# decompiled horizData and vertData entries from Osaka.ttf
OSAKA_HORIZ_TRACK_ENTRIES = {
- -1.0: TrackTableEntry({24.0: -12, 12.0: -12}, nameIndex=262),
- 0.0: TrackTableEntry({24.0: 0, 12.0: 0}, nameIndex=263),
- 1.0: TrackTableEntry({24.0: 12, 12.0: 12}, nameIndex=264)
- }
+ -1.0: TrackTableEntry({24.0: -12, 12.0: -12}, nameIndex=262),
+ 0.0: TrackTableEntry({24.0: 0, 12.0: 0}, nameIndex=263),
+ 1.0: TrackTableEntry({24.0: 12, 12.0: 12}, nameIndex=264),
+}
OSAKA_VERT_TRACK_ENTRIES = {
- -1.0: TrackTableEntry({24.0: -12, 12.0: -12}, nameIndex=265),
- 0.0: TrackTableEntry({24.0: 0, 12.0: 0}, nameIndex=266),
- 1.0: TrackTableEntry({24.0: 12, 12.0: 12}, nameIndex=267)
- }
+ -1.0: TrackTableEntry({24.0: -12, 12.0: -12}, nameIndex=265),
+ 0.0: TrackTableEntry({24.0: 0, 12.0: 0}, nameIndex=266),
+ 1.0: TrackTableEntry({24.0: 12, 12.0: 12}, nameIndex=267),
+}
OSAKA_TRAK_TABLE_XML = [
- '<version value="1.0"/>',
- '<format value="0"/>',
- '<horizData>',
- ' <!-- nTracks=3, nSizes=2 -->',
- ' <trackEntry value="-1.0" nameIndex="262">',
- ' <!-- Tight -->',
- ' <track size="12.0" value="-12"/>',
- ' <track size="24.0" value="-12"/>',
- ' </trackEntry>',
- ' <trackEntry value="0.0" nameIndex="263">',
- ' <!-- Normal -->',
- ' <track size="12.0" value="0"/>',
- ' <track size="24.0" value="0"/>',
- ' </trackEntry>',
- ' <trackEntry value="1.0" nameIndex="264">',
- ' <!-- Loose -->',
- ' <track size="12.0" value="12"/>',
- ' <track size="24.0" value="12"/>',
- ' </trackEntry>',
- '</horizData>',
- '<vertData>',
- ' <!-- nTracks=3, nSizes=2 -->',
- ' <trackEntry value="-1.0" nameIndex="265">',
- ' <!-- Tight -->',
- ' <track size="12.0" value="-12"/>',
- ' <track size="24.0" value="-12"/>',
- ' </trackEntry>',
- ' <trackEntry value="0.0" nameIndex="266">',
- ' <!-- Normal -->',
- ' <track size="12.0" value="0"/>',
- ' <track size="24.0" value="0"/>',
- ' </trackEntry>',
- ' <trackEntry value="1.0" nameIndex="267">',
- ' <!-- Loose -->',
- ' <track size="12.0" value="12"/>',
- ' <track size="24.0" value="12"/>',
- ' </trackEntry>',
- '</vertData>',
+ '<version value="1.0"/>',
+ '<format value="0"/>',
+ "<horizData>",
+ " <!-- nTracks=3, nSizes=2 -->",
+ ' <trackEntry value="-1.0" nameIndex="262">',
+ " <!-- Tight -->",
+ ' <track size="12.0" value="-12"/>',
+ ' <track size="24.0" value="-12"/>',
+ " </trackEntry>",
+ ' <trackEntry value="0.0" nameIndex="263">',
+ " <!-- Normal -->",
+ ' <track size="12.0" value="0"/>',
+ ' <track size="24.0" value="0"/>',
+ " </trackEntry>",
+ ' <trackEntry value="1.0" nameIndex="264">',
+ " <!-- Loose -->",
+ ' <track size="12.0" value="12"/>',
+ ' <track size="24.0" value="12"/>',
+ " </trackEntry>",
+ "</horizData>",
+ "<vertData>",
+ " <!-- nTracks=3, nSizes=2 -->",
+ ' <trackEntry value="-1.0" nameIndex="265">',
+ " <!-- Tight -->",
+ ' <track size="12.0" value="-12"/>',
+ ' <track size="24.0" value="-12"/>',
+ " </trackEntry>",
+ ' <trackEntry value="0.0" nameIndex="266">',
+ " <!-- Normal -->",
+ ' <track size="12.0" value="0"/>',
+ ' <track size="24.0" value="0"/>',
+ " </trackEntry>",
+ ' <trackEntry value="1.0" nameIndex="267">',
+ " <!-- Loose -->",
+ ' <track size="12.0" value="12"/>',
+ ' <track size="24.0" value="12"/>',
+ " </trackEntry>",
+ "</vertData>",
]
# made-up table containing only vertData (no horizData)
OSAKA_VERT_ONLY_TRAK_TABLE_DATA = deHexStr(
- '00 01 00 00 00 00 00 00 00 0c 00 00 00 03 00 02 00 00 00 2c ff ff '
- '00 00 01 09 00 34 00 00 00 00 01 0a 00 38 00 01 00 00 01 0b 00 3c '
- '00 0c 00 00 00 18 00 00 ff f4 ff f4 00 00 00 00 00 0c 00 0c')
+ "00 01 00 00 00 00 00 00 00 0c 00 00 00 03 00 02 00 00 00 2c ff ff "
+ "00 00 01 09 00 34 00 00 00 00 01 0a 00 38 00 01 00 00 01 0b 00 3c "
+ "00 0c 00 00 00 18 00 00 ff f4 ff f4 00 00 00 00 00 0c 00 0c"
+)
OSAKA_VERT_ONLY_TRAK_TABLE_XML = [
- '<version value="1.0"/>',
- '<format value="0"/>',
- '<horizData>',
- ' <!-- nTracks=0, nSizes=0 -->',
- '</horizData>',
- '<vertData>',
- ' <!-- nTracks=3, nSizes=2 -->',
- ' <trackEntry value="-1.0" nameIndex="265">',
- ' <!-- Tight -->',
- ' <track size="12.0" value="-12"/>',
- ' <track size="24.0" value="-12"/>',
- ' </trackEntry>',
- ' <trackEntry value="0.0" nameIndex="266">',
- ' <!-- Normal -->',
- ' <track size="12.0" value="0"/>',
- ' <track size="24.0" value="0"/>',
- ' </trackEntry>',
- ' <trackEntry value="1.0" nameIndex="267">',
- ' <!-- Loose -->',
- ' <track size="12.0" value="12"/>',
- ' <track size="24.0" value="12"/>',
- ' </trackEntry>',
- '</vertData>',
+ '<version value="1.0"/>',
+ '<format value="0"/>',
+ "<horizData>",
+ " <!-- nTracks=0, nSizes=0 -->",
+ "</horizData>",
+ "<vertData>",
+ " <!-- nTracks=3, nSizes=2 -->",
+ ' <trackEntry value="-1.0" nameIndex="265">',
+ " <!-- Tight -->",
+ ' <track size="12.0" value="-12"/>',
+ ' <track size="24.0" value="-12"/>',
+ " </trackEntry>",
+ ' <trackEntry value="0.0" nameIndex="266">',
+ " <!-- Normal -->",
+ ' <track size="12.0" value="0"/>',
+ ' <track size="24.0" value="0"/>',
+ " </trackEntry>",
+ ' <trackEntry value="1.0" nameIndex="267">',
+ " <!-- Loose -->",
+ ' <track size="12.0" value="12"/>',
+ ' <track size="24.0" value="12"/>',
+ " </trackEntry>",
+ "</vertData>",
]
# also /Library/Fonts/Skia.ttf contains a trak table with horizData
SKIA_TRAK_TABLE_DATA = deHexStr(
- '00 01 00 00 00 00 00 0c 00 00 00 00 00 03 00 05 00 00 00 2c ff ff '
- '00 00 01 13 00 40 00 00 00 00 01 2f 00 4a 00 01 00 00 01 14 00 54 '
- '00 09 00 00 00 0a 00 00 00 0c 00 00 00 12 00 00 00 13 00 00 ff f6 '
- 'ff e2 ff c4 ff c1 ff c1 00 0f 00 00 ff fb ff e7 ff e7 00 8c 00 82 '
- '00 7d 00 73 00 73')
+ "00 01 00 00 00 00 00 0c 00 00 00 00 00 03 00 05 00 00 00 2c ff ff "
+ "00 00 01 13 00 40 00 00 00 00 01 2f 00 4a 00 01 00 00 01 14 00 54 "
+ "00 09 00 00 00 0a 00 00 00 0c 00 00 00 12 00 00 00 13 00 00 ff f6 "
+ "ff e2 ff c4 ff c1 ff c1 00 0f 00 00 ff fb ff e7 ff e7 00 8c 00 82 "
+ "00 7d 00 73 00 73"
+)
SKIA_TRACK_ENTRIES = {
- -1.0: TrackTableEntry(
- {9.0: -10, 10.0: -30, 19.0: -63, 12.0: -60, 18.0: -63}, nameIndex=275),
- 0.0: TrackTableEntry(
- {9.0: 15, 10.0: 0, 19.0: -25, 12.0: -5, 18.0: -25}, nameIndex=303),
- 1.0: TrackTableEntry(
- {9.0: 140, 10.0: 130, 19.0: 115, 12.0: 125, 18.0: 115}, nameIndex=276)
- }
+ -1.0: TrackTableEntry(
+ {9.0: -10, 10.0: -30, 19.0: -63, 12.0: -60, 18.0: -63}, nameIndex=275
+ ),
+ 0.0: TrackTableEntry(
+ {9.0: 15, 10.0: 0, 19.0: -25, 12.0: -5, 18.0: -25}, nameIndex=303
+ ),
+ 1.0: TrackTableEntry(
+ {9.0: 140, 10.0: 130, 19.0: 115, 12.0: 125, 18.0: 115}, nameIndex=276
+ ),
+}
SKIA_TRAK_TABLE_XML = [
- '<version value="1.0"/>',
- '<format value="0"/>',
- '<horizData>',
- ' <!-- nTracks=3, nSizes=5 -->',
- ' <trackEntry value="-1.0" nameIndex="275">',
- ' <!-- Tight -->',
- ' <track size="9.0" value="-10"/>',
- ' <track size="10.0" value="-30"/>',
- ' <track size="12.0" value="-60"/>',
- ' <track size="18.0" value="-63"/>',
- ' <track size="19.0" value="-63"/>',
- ' </trackEntry>',
- ' <trackEntry value="0.0" nameIndex="303">',
- ' <!-- Normal -->',
- ' <track size="9.0" value="15"/>',
- ' <track size="10.0" value="0"/>',
- ' <track size="12.0" value="-5"/>',
- ' <track size="18.0" value="-25"/>',
- ' <track size="19.0" value="-25"/>',
- ' </trackEntry>',
- ' <trackEntry value="1.0" nameIndex="276">',
- ' <!-- Loose -->',
- ' <track size="9.0" value="140"/>',
- ' <track size="10.0" value="130"/>',
- ' <track size="12.0" value="125"/>',
- ' <track size="18.0" value="115"/>',
- ' <track size="19.0" value="115"/>',
- ' </trackEntry>',
- '</horizData>',
- '<vertData>',
- ' <!-- nTracks=0, nSizes=0 -->',
- '</vertData>',
+ '<version value="1.0"/>',
+ '<format value="0"/>',
+ "<horizData>",
+ " <!-- nTracks=3, nSizes=5 -->",
+ ' <trackEntry value="-1.0" nameIndex="275">',
+ " <!-- Tight -->",
+ ' <track size="9.0" value="-10"/>',
+ ' <track size="10.0" value="-30"/>',
+ ' <track size="12.0" value="-60"/>',
+ ' <track size="18.0" value="-63"/>',
+ ' <track size="19.0" value="-63"/>',
+ " </trackEntry>",
+ ' <trackEntry value="0.0" nameIndex="303">',
+ " <!-- Normal -->",
+ ' <track size="9.0" value="15"/>',
+ ' <track size="10.0" value="0"/>',
+ ' <track size="12.0" value="-5"/>',
+ ' <track size="18.0" value="-25"/>',
+ ' <track size="19.0" value="-25"/>',
+ " </trackEntry>",
+ ' <trackEntry value="1.0" nameIndex="276">',
+ " <!-- Loose -->",
+ ' <track size="9.0" value="140"/>',
+ ' <track size="10.0" value="130"/>',
+ ' <track size="12.0" value="125"/>',
+ ' <track size="18.0" value="115"/>',
+ ' <track size="19.0" value="115"/>',
+ " </trackEntry>",
+ "</horizData>",
+ "<vertData>",
+ " <!-- nTracks=0, nSizes=0 -->",
+ "</vertData>",
]
class TrackingTableTest(unittest.TestCase):
-
- def __init__(self, methodName):
- unittest.TestCase.__init__(self, methodName)
- # Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
- # and fires deprecation warnings if a program uses the old name.
- if not hasattr(self, "assertRaisesRegex"):
- self.assertRaisesRegex = self.assertRaisesRegexp
-
- def setUp(self):
- table = table__t_r_a_k()
- table.version = 1.0
- table.format = 0
- self.font = {'trak': table}
-
- def test_compile_horiz(self):
- table = self.font['trak']
- table.horizData = TrackData(SKIA_TRACK_ENTRIES)
- trakData = table.compile(self.font)
- self.assertEqual(trakData, SKIA_TRAK_TABLE_DATA)
-
- def test_compile_vert(self):
- table = self.font['trak']
- table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES)
- trakData = table.compile(self.font)
- self.assertEqual(trakData, OSAKA_VERT_ONLY_TRAK_TABLE_DATA)
-
- def test_compile_horiz_and_vert(self):
- table = self.font['trak']
- table.horizData = TrackData(OSAKA_HORIZ_TRACK_ENTRIES)
- table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES)
- trakData = table.compile(self.font)
- self.assertEqual(trakData, OSAKA_TRAK_TABLE_DATA)
-
- def test_compile_longword_aligned(self):
- table = self.font['trak']
- # without padding, this 'horizData' would end up 46 byte long
- table.horizData = TrackData({
- 0.0: TrackTableEntry(nameIndex=256, values={12.0: 0, 24.0: 0, 36.0: 0})
- })
- table.vertData = TrackData({
- 0.0: TrackTableEntry(nameIndex=257, values={12.0: 0, 24.0: 0, 36.0: 0})
- })
- trakData = table.compile(self.font)
- self.assertTrue(table.vertOffset % 4 == 0)
-
- def test_compile_sizes_mismatch(self):
- table = self.font['trak']
- table.horizData = TrackData({
- -1.0: TrackTableEntry(nameIndex=256, values={9.0: -10, 10.0: -30}),
- 0.0: TrackTableEntry(nameIndex=257, values={8.0: 20, 12.0: 0})
- })
- with self.assertRaisesRegex(TTLibError, 'entries must specify the same sizes'):
- table.compile(self.font)
-
- def test_decompile_horiz(self):
- table = self.font['trak']
- table.decompile(SKIA_TRAK_TABLE_DATA, self.font)
- self.assertEqual(table.horizData, SKIA_TRACK_ENTRIES)
- self.assertEqual(table.vertData, TrackData())
-
- def test_decompile_vert(self):
- table = self.font['trak']
- table.decompile(OSAKA_VERT_ONLY_TRAK_TABLE_DATA, self.font)
- self.assertEqual(table.horizData, TrackData())
- self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES)
-
- def test_decompile_horiz_and_vert(self):
- table = self.font['trak']
- table.decompile(OSAKA_TRAK_TABLE_DATA, self.font)
- self.assertEqual(table.horizData, OSAKA_HORIZ_TRACK_ENTRIES)
- self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES)
-
- def test_roundtrip_decompile_compile(self):
- for trakData in (
- OSAKA_TRAK_TABLE_DATA,
- OSAKA_VERT_ONLY_TRAK_TABLE_DATA,
- SKIA_TRAK_TABLE_DATA):
- table = table__t_r_a_k()
- table.decompile(trakData, ttFont=None)
- newTrakData = table.compile(ttFont=None)
- self.assertEqual(trakData, newTrakData)
-
- def test_fromXML_horiz(self):
- table = self.font['trak']
- for name, attrs, content in parseXML(SKIA_TRAK_TABLE_XML):
- table.fromXML(name, attrs, content, self.font)
- self.assertEqual(table.version, 1.0)
- self.assertEqual(table.format, 0)
- self.assertEqual(table.horizData, SKIA_TRACK_ENTRIES)
- self.assertEqual(table.vertData, TrackData())
-
- def test_fromXML_horiz_and_vert(self):
- table = self.font['trak']
- for name, attrs, content in parseXML(OSAKA_TRAK_TABLE_XML):
- table.fromXML(name, attrs, content, self.font)
- self.assertEqual(table.version, 1.0)
- self.assertEqual(table.format, 0)
- self.assertEqual(table.horizData, OSAKA_HORIZ_TRACK_ENTRIES)
- self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES)
-
- def test_fromXML_vert(self):
- table = self.font['trak']
- for name, attrs, content in parseXML(OSAKA_VERT_ONLY_TRAK_TABLE_XML):
- table.fromXML(name, attrs, content, self.font)
- self.assertEqual(table.version, 1.0)
- self.assertEqual(table.format, 0)
- self.assertEqual(table.horizData, TrackData())
- self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES)
-
- def test_toXML_horiz(self):
- table = self.font['trak']
- table.horizData = TrackData(SKIA_TRACK_ENTRIES)
- add_name(self.font, 'Tight', nameID=275)
- add_name(self.font, 'Normal', nameID=303)
- add_name(self.font, 'Loose', nameID=276)
- self.assertEqual(
- SKIA_TRAK_TABLE_XML,
- getXML(table.toXML, self.font))
-
- def test_toXML_horiz_and_vert(self):
- table = self.font['trak']
- table.horizData = TrackData(OSAKA_HORIZ_TRACK_ENTRIES)
- table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES)
- add_name(self.font, 'Tight', nameID=262)
- add_name(self.font, 'Normal', nameID=263)
- add_name(self.font, 'Loose', nameID=264)
- add_name(self.font, 'Tight', nameID=265)
- add_name(self.font, 'Normal', nameID=266)
- add_name(self.font, 'Loose', nameID=267)
- self.assertEqual(
- OSAKA_TRAK_TABLE_XML,
- getXML(table.toXML, self.font))
-
- def test_toXML_vert(self):
- table = self.font['trak']
- table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES)
- add_name(self.font, 'Tight', nameID=265)
- add_name(self.font, 'Normal', nameID=266)
- add_name(self.font, 'Loose', nameID=267)
- self.assertEqual(
- OSAKA_VERT_ONLY_TRAK_TABLE_XML,
- getXML(table.toXML, self.font))
-
- def test_roundtrip_fromXML_toXML(self):
- font = {}
- add_name(font, 'Tight', nameID=275)
- add_name(font, 'Normal', nameID=303)
- add_name(font, 'Loose', nameID=276)
- add_name(font, 'Tight', nameID=262)
- add_name(font, 'Normal', nameID=263)
- add_name(font, 'Loose', nameID=264)
- add_name(font, 'Tight', nameID=265)
- add_name(font, 'Normal', nameID=266)
- add_name(font, 'Loose', nameID=267)
- for input_xml in (
- SKIA_TRAK_TABLE_XML,
- OSAKA_TRAK_TABLE_XML,
- OSAKA_VERT_ONLY_TRAK_TABLE_XML):
- table = table__t_r_a_k()
- font['trak'] = table
- for name, attrs, content in parseXML(input_xml):
- table.fromXML(name, attrs, content, font)
- output_xml = getXML(table.toXML, font)
- self.assertEqual(input_xml, output_xml)
+ def __init__(self, methodName):
+ unittest.TestCase.__init__(self, methodName)
+ # Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
+ # and fires deprecation warnings if a program uses the old name.
+ if not hasattr(self, "assertRaisesRegex"):
+ self.assertRaisesRegex = self.assertRaisesRegexp
+
+ def setUp(self):
+ table = table__t_r_a_k()
+ table.version = 1.0
+ table.format = 0
+ self.font = {"trak": table}
+
+ def test_compile_horiz(self):
+ table = self.font["trak"]
+ table.horizData = TrackData(SKIA_TRACK_ENTRIES)
+ trakData = table.compile(self.font)
+ self.assertEqual(trakData, SKIA_TRAK_TABLE_DATA)
+
+ def test_compile_vert(self):
+ table = self.font["trak"]
+ table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES)
+ trakData = table.compile(self.font)
+ self.assertEqual(trakData, OSAKA_VERT_ONLY_TRAK_TABLE_DATA)
+
+ def test_compile_horiz_and_vert(self):
+ table = self.font["trak"]
+ table.horizData = TrackData(OSAKA_HORIZ_TRACK_ENTRIES)
+ table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES)
+ trakData = table.compile(self.font)
+ self.assertEqual(trakData, OSAKA_TRAK_TABLE_DATA)
+
+ def test_compile_longword_aligned(self):
+ table = self.font["trak"]
+ # without padding, this 'horizData' would end up 46 byte long
+ table.horizData = TrackData(
+ {0.0: TrackTableEntry(nameIndex=256, values={12.0: 0, 24.0: 0, 36.0: 0})}
+ )
+ table.vertData = TrackData(
+ {0.0: TrackTableEntry(nameIndex=257, values={12.0: 0, 24.0: 0, 36.0: 0})}
+ )
+ trakData = table.compile(self.font)
+ self.assertTrue(table.vertOffset % 4 == 0)
+
+ def test_compile_sizes_mismatch(self):
+ table = self.font["trak"]
+ table.horizData = TrackData(
+ {
+ -1.0: TrackTableEntry(nameIndex=256, values={9.0: -10, 10.0: -30}),
+ 0.0: TrackTableEntry(nameIndex=257, values={8.0: 20, 12.0: 0}),
+ }
+ )
+ with self.assertRaisesRegex(TTLibError, "entries must specify the same sizes"):
+ table.compile(self.font)
+
+ def test_decompile_horiz(self):
+ table = self.font["trak"]
+ table.decompile(SKIA_TRAK_TABLE_DATA, self.font)
+ self.assertEqual(table.horizData, SKIA_TRACK_ENTRIES)
+ self.assertEqual(table.vertData, TrackData())
+
+ def test_decompile_vert(self):
+ table = self.font["trak"]
+ table.decompile(OSAKA_VERT_ONLY_TRAK_TABLE_DATA, self.font)
+ self.assertEqual(table.horizData, TrackData())
+ self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES)
+
+ def test_decompile_horiz_and_vert(self):
+ table = self.font["trak"]
+ table.decompile(OSAKA_TRAK_TABLE_DATA, self.font)
+ self.assertEqual(table.horizData, OSAKA_HORIZ_TRACK_ENTRIES)
+ self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES)
+
+ def test_roundtrip_decompile_compile(self):
+ for trakData in (
+ OSAKA_TRAK_TABLE_DATA,
+ OSAKA_VERT_ONLY_TRAK_TABLE_DATA,
+ SKIA_TRAK_TABLE_DATA,
+ ):
+ table = table__t_r_a_k()
+ table.decompile(trakData, ttFont=None)
+ newTrakData = table.compile(ttFont=None)
+ self.assertEqual(trakData, newTrakData)
+
+ def test_fromXML_horiz(self):
+ table = self.font["trak"]
+ for name, attrs, content in parseXML(SKIA_TRAK_TABLE_XML):
+ table.fromXML(name, attrs, content, self.font)
+ self.assertEqual(table.version, 1.0)
+ self.assertEqual(table.format, 0)
+ self.assertEqual(table.horizData, SKIA_TRACK_ENTRIES)
+ self.assertEqual(table.vertData, TrackData())
+
+ def test_fromXML_horiz_and_vert(self):
+ table = self.font["trak"]
+ for name, attrs, content in parseXML(OSAKA_TRAK_TABLE_XML):
+ table.fromXML(name, attrs, content, self.font)
+ self.assertEqual(table.version, 1.0)
+ self.assertEqual(table.format, 0)
+ self.assertEqual(table.horizData, OSAKA_HORIZ_TRACK_ENTRIES)
+ self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES)
+
+ def test_fromXML_vert(self):
+ table = self.font["trak"]
+ for name, attrs, content in parseXML(OSAKA_VERT_ONLY_TRAK_TABLE_XML):
+ table.fromXML(name, attrs, content, self.font)
+ self.assertEqual(table.version, 1.0)
+ self.assertEqual(table.format, 0)
+ self.assertEqual(table.horizData, TrackData())
+ self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES)
+
+ def test_toXML_horiz(self):
+ table = self.font["trak"]
+ table.horizData = TrackData(SKIA_TRACK_ENTRIES)
+ add_name(self.font, "Tight", nameID=275)
+ add_name(self.font, "Normal", nameID=303)
+ add_name(self.font, "Loose", nameID=276)
+ self.assertEqual(SKIA_TRAK_TABLE_XML, getXML(table.toXML, self.font))
+
+ def test_toXML_horiz_and_vert(self):
+ table = self.font["trak"]
+ table.horizData = TrackData(OSAKA_HORIZ_TRACK_ENTRIES)
+ table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES)
+ add_name(self.font, "Tight", nameID=262)
+ add_name(self.font, "Normal", nameID=263)
+ add_name(self.font, "Loose", nameID=264)
+ add_name(self.font, "Tight", nameID=265)
+ add_name(self.font, "Normal", nameID=266)
+ add_name(self.font, "Loose", nameID=267)
+ self.assertEqual(OSAKA_TRAK_TABLE_XML, getXML(table.toXML, self.font))
+
+ def test_toXML_vert(self):
+ table = self.font["trak"]
+ table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES)
+ add_name(self.font, "Tight", nameID=265)
+ add_name(self.font, "Normal", nameID=266)
+ add_name(self.font, "Loose", nameID=267)
+ self.assertEqual(OSAKA_VERT_ONLY_TRAK_TABLE_XML, getXML(table.toXML, self.font))
+
+ def test_roundtrip_fromXML_toXML(self):
+ font = {}
+ add_name(font, "Tight", nameID=275)
+ add_name(font, "Normal", nameID=303)
+ add_name(font, "Loose", nameID=276)
+ add_name(font, "Tight", nameID=262)
+ add_name(font, "Normal", nameID=263)
+ add_name(font, "Loose", nameID=264)
+ add_name(font, "Tight", nameID=265)
+ add_name(font, "Normal", nameID=266)
+ add_name(font, "Loose", nameID=267)
+ for input_xml in (
+ SKIA_TRAK_TABLE_XML,
+ OSAKA_TRAK_TABLE_XML,
+ OSAKA_VERT_ONLY_TRAK_TABLE_XML,
+ ):
+ table = table__t_r_a_k()
+ font["trak"] = table
+ for name, attrs, content in parseXML(input_xml):
+ table.fromXML(name, attrs, content, font)
+ output_xml = getXML(table.toXML, font)
+ self.assertEqual(input_xml, output_xml)
def add_name(font, string, nameID):
- nameTable = font.get("name")
- if nameTable is None:
- nameTable = font["name"] = table__n_a_m_e()
- nameTable.names = []
- namerec = NameRecord()
- namerec.nameID = nameID
- namerec.string = string.encode('mac_roman')
- namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0)
- nameTable.names.append(namerec)
+ nameTable = font.get("name")
+ if nameTable is None:
+ nameTable = font["name"] = table__n_a_m_e()
+ nameTable.names = []
+ namerec = NameRecord()
+ namerec.nameID = nameID
+ namerec.string = string.encode("mac_roman")
+ namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0)
+ nameTable.names.append(namerec)
if __name__ == "__main__":
- import sys
- sys.exit(unittest.main())
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_v_h_e_a_test.py b/Tests/ttLib/tables/_v_h_e_a_test.py
index c6018632..698bd3b7 100644
--- a/Tests/ttLib/tables/_v_h_e_a_test.py
+++ b/Tests/ttLib/tables/_v_h_e_a_test.py
@@ -8,53 +8,53 @@ import unittest
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-DATA_DIR = os.path.join(CURR_DIR, 'data')
+DATA_DIR = os.path.join(CURR_DIR, "data")
VHEA_DATA_VERSION_11 = deHexStr(
- '0001 1000 ' # 1.1 version
- '01F4 ' # 500 ascent
- 'FE0C ' # -500 descent
- '0000 ' # 0 lineGap
- '0BB8 ' # 3000 advanceHeightMax
- 'FC16 ' # -1002 minTopSideBearing
- 'FD5B ' # -677 minBottomSideBearing
- '0B70 ' # 2928 yMaxExtent
- '0000 ' # 0 caretSlopeRise
- '0001 ' # 1 caretSlopeRun
- '0000 ' # 0 caretOffset
- '0000 ' # 0 reserved1
- '0000 ' # 0 reserved2
- '0000 ' # 0 reserved3
- '0000 ' # 0 reserved4
- '0000 ' # 0 metricDataFormat
- '000C ' # 12 numberOfVMetrics
+ "0001 1000 " # 1.1 version
+ "01F4 " # 500 ascent
+ "FE0C " # -500 descent
+ "0000 " # 0 lineGap
+ "0BB8 " # 3000 advanceHeightMax
+ "FC16 " # -1002 minTopSideBearing
+ "FD5B " # -677 minBottomSideBearing
+ "0B70 " # 2928 yMaxExtent
+ "0000 " # 0 caretSlopeRise
+ "0001 " # 1 caretSlopeRun
+ "0000 " # 0 caretOffset
+ "0000 " # 0 reserved1
+ "0000 " # 0 reserved2
+ "0000 " # 0 reserved3
+ "0000 " # 0 reserved4
+ "0000 " # 0 metricDataFormat
+ "000C " # 12 numberOfVMetrics
)
-VHEA_DATA_VERSION_10 = deHexStr('00010000') + VHEA_DATA_VERSION_11[4:]
+VHEA_DATA_VERSION_10 = deHexStr("00010000") + VHEA_DATA_VERSION_11[4:]
VHEA_VERSION_11_AS_DICT = {
- 'tableTag': 'vhea',
- 'tableVersion': 0x00011000,
- 'ascent': 500,
- 'descent': -500,
- 'lineGap': 0,
- 'advanceHeightMax': 3000,
- 'minTopSideBearing': -1002,
- 'minBottomSideBearing': -677,
- 'yMaxExtent': 2928,
- 'caretSlopeRise': 0,
- 'caretSlopeRun': 1,
- 'caretOffset': 0,
- 'reserved1': 0,
- 'reserved2': 0,
- 'reserved3': 0,
- 'reserved4': 0,
- 'metricDataFormat': 0,
- 'numberOfVMetrics': 12,
+ "tableTag": "vhea",
+ "tableVersion": 0x00011000,
+ "ascent": 500,
+ "descent": -500,
+ "lineGap": 0,
+ "advanceHeightMax": 3000,
+ "minTopSideBearing": -1002,
+ "minBottomSideBearing": -677,
+ "yMaxExtent": 2928,
+ "caretSlopeRise": 0,
+ "caretSlopeRun": 1,
+ "caretOffset": 0,
+ "reserved1": 0,
+ "reserved2": 0,
+ "reserved3": 0,
+ "reserved4": 0,
+ "metricDataFormat": 0,
+ "numberOfVMetrics": 12,
}
VHEA_VERSION_10_AS_DICT = dict(VHEA_VERSION_11_AS_DICT)
-VHEA_VERSION_10_AS_DICT['tableVersion'] = 0x00010000
+VHEA_VERSION_10_AS_DICT["tableVersion"] = 0x00010000
VHEA_XML_VERSION_11 = [
'<tableVersion value="0x00011000"/>',
@@ -90,9 +90,8 @@ VHEA_XML_VERSION_10_AS_FLOAT = [
class VheaCompileOrToXMLTest(unittest.TestCase):
-
def setUp(self):
- vhea = newTable('vhea')
+ vhea = newTable("vhea")
vhea.tableVersion = 0x00010000
vhea.ascent = 500
vhea.descent = -500
@@ -107,140 +106,156 @@ class VheaCompileOrToXMLTest(unittest.TestCase):
vhea.metricDataFormat = 0
vhea.numberOfVMetrics = 12
vhea.reserved1 = vhea.reserved2 = vhea.reserved3 = vhea.reserved4 = 0
- self.font = TTFont(sfntVersion='OTTO')
- self.font['vhea'] = vhea
+ self.font = TTFont(sfntVersion="OTTO")
+ self.font["vhea"] = vhea
def test_compile_caretOffset_as_reserved0(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
del vhea.caretOffset
vhea.reserved0 = 0
self.assertEqual(VHEA_DATA_VERSION_10, vhea.compile(self.font))
def test_compile_version_10(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
vhea.tableVersion = 0x00010000
self.assertEqual(VHEA_DATA_VERSION_10, vhea.compile(self.font))
def test_compile_version_10_as_float(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
vhea.tableVersion = 1.0
with CapturingLogHandler(log, "WARNING") as captor:
self.assertEqual(VHEA_DATA_VERSION_10, vhea.compile(self.font))
self.assertTrue(
- len([r for r in captor.records
- if "Table version value is a float" in r.msg]) == 1)
+ len(
+ [r for r in captor.records if "Table version value is a float" in r.msg]
+ )
+ == 1
+ )
def test_compile_version_11(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
vhea.tableVersion = 0x00011000
self.assertEqual(VHEA_DATA_VERSION_11, vhea.compile(self.font))
def test_compile_version_11_as_float(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
vhea.tableVersion = 1.0625
with CapturingLogHandler(log, "WARNING") as captor:
self.assertEqual(VHEA_DATA_VERSION_11, vhea.compile(self.font))
self.assertTrue(
- len([r for r in captor.records
- if "Table version value is a float" in r.msg]) == 1)
+ len(
+ [r for r in captor.records if "Table version value is a float" in r.msg]
+ )
+ == 1
+ )
def test_toXML_caretOffset_as_reserved0(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
del vhea.caretOffset
vhea.reserved0 = 0
self.assertEqual(getXML(vhea.toXML), VHEA_XML_VERSION_10)
def test_toXML_version_10(self):
- vhea = self.font['vhea']
- self.font['vhea'].tableVersion = 0x00010000
+ vhea = self.font["vhea"]
+ self.font["vhea"].tableVersion = 0x00010000
self.assertEqual(getXML(vhea.toXML), VHEA_XML_VERSION_10)
def test_toXML_version_10_as_float(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
vhea.tableVersion = 1.0
with CapturingLogHandler(log, "WARNING") as captor:
self.assertEqual(getXML(vhea.toXML), VHEA_XML_VERSION_10)
self.assertTrue(
- len([r for r in captor.records
- if "Table version value is a float" in r.msg]) == 1)
+ len(
+ [r for r in captor.records if "Table version value is a float" in r.msg]
+ )
+ == 1
+ )
def test_toXML_version_11(self):
- vhea = self.font['vhea']
- self.font['vhea'].tableVersion = 0x00011000
+ vhea = self.font["vhea"]
+ self.font["vhea"].tableVersion = 0x00011000
self.assertEqual(getXML(vhea.toXML), VHEA_XML_VERSION_11)
def test_toXML_version_11_as_float(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
vhea.tableVersion = 1.0625
with CapturingLogHandler(log, "WARNING") as captor:
self.assertEqual(getXML(vhea.toXML), VHEA_XML_VERSION_11)
self.assertTrue(
- len([r for r in captor.records
- if "Table version value is a float" in r.msg]) == 1)
+ len(
+ [r for r in captor.records if "Table version value is a float" in r.msg]
+ )
+ == 1
+ )
class VheaDecompileOrFromXMLTest(unittest.TestCase):
-
def setUp(self):
- vhea = newTable('vhea')
- self.font = TTFont(sfntVersion='OTTO')
- self.font['vhea'] = vhea
+ vhea = newTable("vhea")
+ self.font = TTFont(sfntVersion="OTTO")
+ self.font["vhea"] = vhea
def test_decompile_version_10(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
vhea.decompile(VHEA_DATA_VERSION_10, self.font)
for key in vhea.__dict__:
self.assertEqual(getattr(vhea, key), VHEA_VERSION_10_AS_DICT[key])
def test_decompile_version_11(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
vhea.decompile(VHEA_DATA_VERSION_11, self.font)
for key in vhea.__dict__:
self.assertEqual(getattr(vhea, key), VHEA_VERSION_11_AS_DICT[key])
def test_fromXML_version_10(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
for name, attrs, content in parseXML(VHEA_XML_VERSION_10):
vhea.fromXML(name, attrs, content, self.font)
for key in vhea.__dict__:
self.assertEqual(getattr(vhea, key), VHEA_VERSION_10_AS_DICT[key])
def test_fromXML_version_10_as_float(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
with CapturingLogHandler(log, "WARNING") as captor:
for name, attrs, content in parseXML(VHEA_XML_VERSION_10_AS_FLOAT):
vhea.fromXML(name, attrs, content, self.font)
self.assertTrue(
- len([r for r in captor.records
- if "Table version value is a float" in r.msg]) == 1)
+ len(
+ [r for r in captor.records if "Table version value is a float" in r.msg]
+ )
+ == 1
+ )
for key in vhea.__dict__:
self.assertEqual(getattr(vhea, key), VHEA_VERSION_10_AS_DICT[key])
def test_fromXML_version_11(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
for name, attrs, content in parseXML(VHEA_XML_VERSION_11):
vhea.fromXML(name, attrs, content, self.font)
for key in vhea.__dict__:
self.assertEqual(getattr(vhea, key), VHEA_VERSION_11_AS_DICT[key])
def test_fromXML_version_11_as_float(self):
- vhea = self.font['vhea']
+ vhea = self.font["vhea"]
with CapturingLogHandler(log, "WARNING") as captor:
for name, attrs, content in parseXML(VHEA_XML_VERSION_11_AS_FLOAT):
vhea.fromXML(name, attrs, content, self.font)
self.assertTrue(
- len([r for r in captor.records
- if "Table version value is a float" in r.msg]) == 1)
+ len(
+ [r for r in captor.records if "Table version value is a float" in r.msg]
+ )
+ == 1
+ )
for key in vhea.__dict__:
self.assertEqual(getattr(vhea, key), VHEA_VERSION_11_AS_DICT[key])
class VheaRecalcTest(unittest.TestCase):
-
def test_recalc_TTF(self):
font = TTFont()
- font.importXML(os.path.join(DATA_DIR, '_v_h_e_a_recalc_TTF.ttx'))
- vhea = font['vhea']
+ font.importXML(os.path.join(DATA_DIR, "_v_h_e_a_recalc_TTF.ttx"))
+ vhea = font["vhea"]
vhea.recalc(font)
self.assertEqual(vhea.advanceHeightMax, 900)
self.assertEqual(vhea.minTopSideBearing, 200)
@@ -249,8 +264,8 @@ class VheaRecalcTest(unittest.TestCase):
def test_recalc_OTF(self):
font = TTFont()
- font.importXML(os.path.join(DATA_DIR, '_v_h_e_a_recalc_OTF.ttx'))
- vhea = font['vhea']
+ font.importXML(os.path.join(DATA_DIR, "_v_h_e_a_recalc_OTF.ttx"))
+ vhea = font["vhea"]
vhea.recalc(font)
self.assertEqual(vhea.advanceHeightMax, 900)
self.assertEqual(vhea.minTopSideBearing, 200)
@@ -259,8 +274,8 @@ class VheaRecalcTest(unittest.TestCase):
def test_recalc_empty(self):
font = TTFont()
- font.importXML(os.path.join(DATA_DIR, '_v_h_e_a_recalc_empty.ttx'))
- vhea = font['vhea']
+ font.importXML(os.path.join(DATA_DIR, "_v_h_e_a_recalc_empty.ttx"))
+ vhea = font["vhea"]
vhea.recalc(font)
self.assertEqual(vhea.advanceHeightMax, 900)
self.assertEqual(vhea.minTopSideBearing, 0)
@@ -270,4 +285,5 @@ class VheaRecalcTest(unittest.TestCase):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/_v_m_t_x_test.py b/Tests/ttLib/tables/_v_m_t_x_test.py
index 5ea2d245..9b8fcb7a 100644
--- a/Tests/ttLib/tables/_v_m_t_x_test.py
+++ b/Tests/ttLib/tables/_v_m_t_x_test.py
@@ -4,7 +4,6 @@ import unittest
class VmtxTableTest(_h_m_t_x_test.HmtxTableTest):
-
@classmethod
def setUpClass(cls):
cls.tableClass = table__v_m_t_x
@@ -13,4 +12,5 @@ class VmtxTableTest(_h_m_t_x_test.HmtxTableTest):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/data/COLRv1-clip-boxes-cff.ttx b/Tests/ttLib/tables/data/COLRv1-clip-boxes-cff.ttx
new file mode 100644
index 00000000..05172cae
--- /dev/null
+++ b/Tests/ttLib/tables/data/COLRv1-clip-boxes-cff.ttx
@@ -0,0 +1,1213 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="OTTO" ttLibVersion="4.37">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name=".null"/>
+ <GlyphID id="2" name="upem_box_glyph"/>
+ <GlyphID id="3" name="cross_glyph"/>
+ <GlyphID id="4" name="one"/>
+ <GlyphID id="5" name="zero"/>
+ <GlyphID id="6" name="scale_0.5_1.5_center_500.0_500.0"/>
+ <GlyphID id="7" name="scale_1.5_1.5_center_500.0_500.0"/>
+ <GlyphID id="8" name="scale_0.5_1.5_center_0_0"/>
+ <GlyphID id="9" name="scale_1.5_1.5_center_0_0"/>
+ <GlyphID id="10" name="scale_0.5_1.5_center_1000_1000"/>
+ <GlyphID id="11" name="scale_1.5_1.5_center_1000_1000"/>
+ <GlyphID id="12" name="rotate_10_center_0_0"/>
+ <GlyphID id="13" name="rotate_-10_center_1000_1000"/>
+ <GlyphID id="14" name="rotate_25_center_500.0_500.0"/>
+ <GlyphID id="15" name="rotate_-15_center_500.0_500.0"/>
+ <GlyphID id="16" name="skew_25_0_center_0_0"/>
+ <GlyphID id="17" name="skew_25_0_center_500.0_500.0"/>
+ <GlyphID id="18" name="skew_0_15_center_0_0"/>
+ <GlyphID id="19" name="skew_0_15_center_500.0_500.0"/>
+ <GlyphID id="20" name="skew_-10_20_center_500.0_500.0"/>
+ <GlyphID id="21" name="skew_-10_20_center_1000_1000"/>
+ <GlyphID id="22" name="transform_matrix_1_0_0_1_125_125"/>
+ <GlyphID id="23" name="transform_matrix_1.5_0_0_1.5_0_0"/>
+ <GlyphID id="24" name="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/>
+ <GlyphID id="25" name="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/>
+ <GlyphID id="26" name="translate_0_0"/>
+ <GlyphID id="27" name="translate_0_100"/>
+ <GlyphID id="28" name="translate_0_-100"/>
+ <GlyphID id="29" name="translate_100_0"/>
+ <GlyphID id="30" name="translate_-100_0"/>
+ <GlyphID id="31" name="translate_200_200"/>
+ <GlyphID id="32" name="translate_-200_-200"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="1.0"/>
+ <checkSumAdjustment value="0x4b7ffe68"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Fri Mar 10 15:01:34 2023"/>
+ <modified value="Fri Mar 10 15:01:34 2023"/>
+ <xMin value="0"/>
+ <yMin value="0"/>
+ <xMax value="1000"/>
+ <yMax value="1000"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="3"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="950"/>
+ <descent value="-250"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="1000"/>
+ <minLeftSideBearing value="0"/>
+ <minRightSideBearing value="0"/>
+ <xMaxExtent value="1000"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="3"/>
+ </hhea>
+
+ <maxp>
+ <tableVersion value="0x5000"/>
+ <numGlyphs value="33"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="988"/>
+ <usWeightClass value="400"/>
+ <usWidthClass value="5"/>
+ <fsType value="00000000 00000000"/>
+ <ySubscriptXSize value="0"/>
+ <ySubscriptYSize value="0"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="0"/>
+ <ySuperscriptXSize value="0"/>
+ <ySuperscriptYSize value="0"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="0"/>
+ <yStrikeoutSize value="0"/>
+ <yStrikeoutPosition value="0"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="0"/>
+ <bSerifStyle value="0"/>
+ <bWeight value="0"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange2 value="00000010 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000100 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="????"/>
+ <fsSelection value="00000000 10000000"/>
+ <usFirstCharIndex value="65535"/>
+ <usLastCharIndex value="65535"/>
+ <sTypoAscender value="950"/>
+ <sTypoDescender value="0"/>
+ <sTypoLineGap value="0"/>
+ <usWinAscent value="950"/>
+ <usWinDescent value="250"/>
+ <ulCodePageRange1 value="00000000 00000000 00000000 00000000"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="0"/>
+ <sCapHeight value="0"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <name>
+ <namerecord nameID="1" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ COLRv1 Static Test Glyphs
+ </namerecord>
+ <namerecord nameID="2" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ COLRv1 Static Test Glyphs 2023-03-10T15:01:34.955294
+ </namerecord>
+ <namerecord nameID="4" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ COLRv1 Static Test Glyphs Regular
+ </namerecord>
+ <namerecord nameID="5" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ 2023-03-10T15:01:34.955294
+ </namerecord>
+ <namerecord nameID="6" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ COLRv1StaticTestGlyphs-Regular
+ </namerecord>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ COLRv1 Static Test Glyphs
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ COLRv1 Static Test Glyphs 2023-03-10T15:01:34.955294
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ COLRv1 Static Test Glyphs Regular
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ 2023-03-10T15:01:34.955294
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ COLRv1StaticTestGlyphs-Regular
+ </namerecord>
+ </name>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ </cmap_format_4>
+ <cmap_format_12 platformID="3" platEncID="10" format="12" reserved="0" length="88" language="0" nGroups="6">
+ <map code="0xf0300" name="scale_0.5_1.5_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0301" name="scale_1.5_1.5_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0302" name="scale_0.5_1.5_center_0_0"/><!-- ???? -->
+ <map code="0xf0303" name="scale_1.5_1.5_center_0_0"/><!-- ???? -->
+ <map code="0xf0304" name="scale_0.5_1.5_center_1000_1000"/><!-- ???? -->
+ <map code="0xf0305" name="scale_1.5_1.5_center_1000_1000"/><!-- ???? -->
+ <map code="0xf0600" name="rotate_10_center_0_0"/><!-- ???? -->
+ <map code="0xf0601" name="rotate_-10_center_1000_1000"/><!-- ???? -->
+ <map code="0xf0602" name="rotate_25_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0603" name="rotate_-15_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0700" name="skew_25_0_center_0_0"/><!-- ???? -->
+ <map code="0xf0701" name="skew_25_0_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0702" name="skew_0_15_center_0_0"/><!-- ???? -->
+ <map code="0xf0703" name="skew_0_15_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0704" name="skew_-10_20_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0705" name="skew_-10_20_center_1000_1000"/><!-- ???? -->
+ <map code="0xf0800" name="transform_matrix_1_0_0_1_125_125"/><!-- ???? -->
+ <map code="0xf0801" name="transform_matrix_1.5_0_0_1.5_0_0"/><!-- ???? -->
+ <map code="0xf0802" name="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/><!-- ???? -->
+ <map code="0xf0803" name="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/><!-- ???? -->
+ <map code="0xf0900" name="translate_0_0"/><!-- ???? -->
+ <map code="0xf0901" name="translate_0_100"/><!-- ???? -->
+ <map code="0xf0902" name="translate_0_-100"/><!-- ???? -->
+ <map code="0xf0903" name="translate_100_0"/><!-- ???? -->
+ <map code="0xf0904" name="translate_-100_0"/><!-- ???? -->
+ <map code="0xf0905" name="translate_200_200"/><!-- ???? -->
+ <map code="0xf0906" name="translate_-200_-200"/><!-- ???? -->
+ <map code="0xfe001" name=".null"/><!-- ???? -->
+ <map code="0xfe002" name="upem_box_glyph"/><!-- ???? -->
+ <map code="0xfe003" name="cross_glyph"/><!-- ???? -->
+ <map code="0xfe004" name="one"/><!-- ???? -->
+ <map code="0xfe005" name="zero"/><!-- ???? -->
+ </cmap_format_12>
+ </cmap>
+
+ <post>
+ <formatType value="3.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="0"/>
+ <underlineThickness value="0"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ </post>
+
+ <CFF>
+ <major value="1"/>
+ <minor value="0"/>
+ <CFFFont name="COLRv1StaticTestGlyphs-Regular">
+ <FullName value="COLRv1StaticTestGlyphs-Regular"/>
+ <isFixedPitch value="0"/>
+ <ItalicAngle value="0"/>
+ <UnderlinePosition value="-100"/>
+ <UnderlineThickness value="50"/>
+ <PaintType value="0"/>
+ <CharstringType value="2"/>
+ <FontMatrix value="0.001 0 0 0.001 0 0"/>
+ <FontBBox value="0 0 1000 1000"/>
+ <StrokeWidth value="0"/>
+ <!-- charset is dumped separately as the 'GlyphOrder' element -->
+ <Encoding name="StandardEncoding"/>
+ <Private>
+ <BlueScale value="0.039625"/>
+ <BlueShift value="7"/>
+ <BlueFuzz value="1"/>
+ <ForceBold value="0"/>
+ <LanguageGroup value="0"/>
+ <ExpansionFactor value="0.06"/>
+ <initialRandomSeed value="0"/>
+ <defaultWidthX value="0"/>
+ <nominalWidthX value="0"/>
+ </Private>
+ <CharStrings>
+ <CharString name=".notdef">
+ 600 endchar
+ </CharString>
+ <CharString name=".null">
+ 0 endchar
+ </CharString>
+ <CharString name="cross_glyph">
+ 1000 475 525 rmoveto
+ 225 50 -225 225 -50 -225 -225 -50 225 -225 50 vlineto
+ endchar
+ </CharString>
+ <CharString name="one">
+ 1000 296 543 rmoveto
+ -293 -37 247 vlineto
+ -75 -31 0 37 106 40 rlineto
+ endchar
+ </CharString>
+ <CharString name="rotate_-10_center_1000_1000">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="rotate_-15_center_500.0_500.0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="rotate_10_center_0_0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="rotate_25_center_500.0_500.0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="scale_0.5_1.5_center_0_0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="scale_0.5_1.5_center_1000_1000">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="scale_0.5_1.5_center_500.0_500.0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="scale_1.5_1.5_center_0_0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="scale_1.5_1.5_center_1000_1000">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="scale_1.5_1.5_center_500.0_500.0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="skew_-10_20_center_1000_1000">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="skew_-10_20_center_500.0_500.0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="skew_0_15_center_0_0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="skew_0_15_center_500.0_500.0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="skew_25_0_center_0_0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="skew_25_0_center_500.0_500.0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="transform_matrix_1.5_0_0_1.5_0_0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="transform_matrix_1_0_0_1_125_125">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="translate_-100_0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="translate_-200_-200">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="translate_0_-100">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="translate_0_0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="translate_0_100">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="translate_100_0">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="translate_200_200">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="upem_box_glyph">
+ 1000 0 hmoveto
+ 1000 1000 -1000 vlineto
+ endchar
+ </CharString>
+ <CharString name="zero">
+ 1000 357 374 rmoveto
+ -47 -8 -34 -17 -19 vhcurveto
+ -19 -16 -23 -9 -28 hhcurveto
+ -28 -22 9 19 -17 hvcurveto
+ -17 19 -8 34 47 vvcurveto
+ 45 vlineto
+ 47 8 33 17 19 vhcurveto
+ 18 17 22 9 28 hhcurveto
+ 28 23 -9 -18 16 hvcurveto
+ 17 -19 8 -33 -47 vvcurveto
+ -37 6 rmoveto
+ 33 -5 23 -9 14 vhcurveto
+ 13 -10 -13 7 -18 hhcurveto
+ -18 -13 -7 -13 -10 hvcurveto
+ -9 -14 -5 -23 -33 vvcurveto
+ -57 vlineto
+ -32 5 -24 10 -14 vhcurveto
+ -14 9 14 -8 17 hhcurveto
+ 18 14 8 14 9 hvcurveto
+ 9 14 5 24 32 vvcurveto
+ endchar
+ </CharString>
+ </CharStrings>
+ </CFFFont>
+
+ <GlobalSubrs>
+ <!-- The 'index' attribute is only for humans; it is ignored when parsed. -->
+ </GlobalSubrs>
+ </CFF>
+
+ <COLR>
+ <Version value="1"/>
+ <!-- BaseGlyphRecordCount=0 -->
+ <!-- LayerRecordCount=0 -->
+ <BaseGlyphList>
+ <!-- BaseGlyphCount=27 -->
+ <BaseGlyphPaintRecord index="0">
+ <BaseGlyph value="scale_0.5_1.5_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="18"><!-- PaintScaleAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="1">
+ <BaseGlyph value="scale_1.5_1.5_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="22"><!-- PaintScaleUniformAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="2">
+ <BaseGlyph value="scale_0.5_1.5_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="16"><!-- PaintScale -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="3">
+ <BaseGlyph value="scale_1.5_1.5_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="20"><!-- PaintScaleUniform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="4">
+ <BaseGlyph value="scale_0.5_1.5_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="18"><!-- PaintScaleAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="5">
+ <BaseGlyph value="scale_1.5_1.5_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="22"><!-- PaintScaleUniformAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="6">
+ <BaseGlyph value="rotate_10_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="24"><!-- PaintRotate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="10.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="7">
+ <BaseGlyph value="rotate_-10_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="-10.0"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="8">
+ <BaseGlyph value="rotate_25_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="25.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="9">
+ <BaseGlyph value="rotate_-15_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="-15.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="10">
+ <BaseGlyph value="skew_25_0_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="28"><!-- PaintSkew -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="25.0"/>
+ <ySkewAngle value="0.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="11">
+ <BaseGlyph value="skew_25_0_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="25.0"/>
+ <ySkewAngle value="0.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="12">
+ <BaseGlyph value="skew_0_15_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="28"><!-- PaintSkew -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="0.0"/>
+ <ySkewAngle value="15.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="13">
+ <BaseGlyph value="skew_0_15_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="0.0"/>
+ <ySkewAngle value="15.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="14">
+ <BaseGlyph value="skew_-10_20_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="-10.0"/>
+ <ySkewAngle value="20.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="15">
+ <BaseGlyph value="skew_-10_20_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="-10.0"/>
+ <ySkewAngle value="20.0"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="16">
+ <BaseGlyph value="transform_matrix_1_0_0_1_125_125"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.0"/>
+ <yx value="0.0"/>
+ <xy value="0.0"/>
+ <yy value="1.0"/>
+ <dx value="125.0"/>
+ <dy value="125.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="17">
+ <BaseGlyph value="transform_matrix_1.5_0_0_1.5_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.5"/>
+ <yx value="0.0"/>
+ <xy value="0.0"/>
+ <yy value="1.5"/>
+ <dx value="0.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="18">
+ <BaseGlyph value="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="0.9659"/>
+ <yx value="0.2588"/>
+ <xy value="-0.2588"/>
+ <yy value="0.9659"/>
+ <dx value="0.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="19">
+ <BaseGlyph value="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.0"/>
+ <yx value="0.0"/>
+ <xy value="0.6"/>
+ <yy value="1.0"/>
+ <dx value="-300.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="20">
+ <BaseGlyph value="translate_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="21">
+ <BaseGlyph value="translate_0_100"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="100"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="22">
+ <BaseGlyph value="translate_0_-100"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="-100"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="23">
+ <BaseGlyph value="translate_100_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="100"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="24">
+ <BaseGlyph value="translate_-100_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="-100"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="25">
+ <BaseGlyph value="translate_200_200"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="200"/>
+ <dy value="200"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="26">
+ <BaseGlyph value="translate_-200_-200"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="-200"/>
+ <dy value="-200"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ </BaseGlyphList>
+ </COLR>
+
+ <CPAL>
+ <version value="1"/>
+ <numPaletteEntries value="7"/>
+ <palette index="0">
+ <color index="0" value="#FF0000FF"/>
+ <color index="1" value="#FFA500FF"/>
+ <color index="2" value="#FFFF00FF"/>
+ <color index="3" value="#008000FF"/>
+ <color index="4" value="#0000FFFF"/>
+ <color index="5" value="#4B0082FF"/>
+ <color index="6" value="#EE82EEFF"/>
+ </palette>
+ <palette index="1" type="2">
+ <color index="0" value="#2A294AFF"/>
+ <color index="1" value="#244163FF"/>
+ <color index="2" value="#1B6388FF"/>
+ <color index="3" value="#157DA3FF"/>
+ <color index="4" value="#0E9AC2FF"/>
+ <color index="5" value="#05BEE8FF"/>
+ <color index="6" value="#00D4FFFF"/>
+ </palette>
+ <palette index="2" type="1">
+ <color index="0" value="#FC7118FF"/>
+ <color index="1" value="#FB8115FF"/>
+ <color index="2" value="#FA9511FF"/>
+ <color index="3" value="#FAA80DFF"/>
+ <color index="4" value="#F9BE09FF"/>
+ <color index="5" value="#F8D304FF"/>
+ <color index="6" value="#F8E700FF"/>
+ </palette>
+ </CPAL>
+
+ <hmtx>
+ <mtx name=".notdef" width="600" lsb="0"/>
+ <mtx name=".null" width="0" lsb="0"/>
+ <mtx name="cross_glyph" width="1000" lsb="250"/>
+ <mtx name="one" width="1000" lsb="184"/>
+ <mtx name="rotate_-10_center_1000_1000" width="1000" lsb="0"/>
+ <mtx name="rotate_-15_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="rotate_10_center_0_0" width="1000" lsb="0"/>
+ <mtx name="rotate_25_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="scale_0.5_1.5_center_0_0" width="1000" lsb="0"/>
+ <mtx name="scale_0.5_1.5_center_1000_1000" width="1000" lsb="0"/>
+ <mtx name="scale_0.5_1.5_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="scale_1.5_1.5_center_0_0" width="1000" lsb="0"/>
+ <mtx name="scale_1.5_1.5_center_1000_1000" width="1000" lsb="0"/>
+ <mtx name="scale_1.5_1.5_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="skew_-10_20_center_1000_1000" width="1000" lsb="0"/>
+ <mtx name="skew_-10_20_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="skew_0_15_center_0_0" width="1000" lsb="0"/>
+ <mtx name="skew_0_15_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="skew_25_0_center_0_0" width="1000" lsb="0"/>
+ <mtx name="skew_25_0_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0" width="1000" lsb="0"/>
+ <mtx name="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0" width="1000" lsb="0"/>
+ <mtx name="transform_matrix_1.5_0_0_1.5_0_0" width="1000" lsb="0"/>
+ <mtx name="transform_matrix_1_0_0_1_125_125" width="1000" lsb="0"/>
+ <mtx name="translate_-100_0" width="1000" lsb="0"/>
+ <mtx name="translate_-200_-200" width="1000" lsb="0"/>
+ <mtx name="translate_0_-100" width="1000" lsb="0"/>
+ <mtx name="translate_0_0" width="1000" lsb="0"/>
+ <mtx name="translate_0_100" width="1000" lsb="0"/>
+ <mtx name="translate_100_0" width="1000" lsb="0"/>
+ <mtx name="translate_200_200" width="1000" lsb="0"/>
+ <mtx name="upem_box_glyph" width="1000" lsb="0"/>
+ <mtx name="zero" width="1000" lsb="173"/>
+ </hmtx>
+
+</ttFont>
diff --git a/Tests/ttLib/tables/data/COLRv1-clip-boxes-glyf.ttx b/Tests/ttLib/tables/data/COLRv1-clip-boxes-glyf.ttx
new file mode 100644
index 00000000..2f1c14c4
--- /dev/null
+++ b/Tests/ttLib/tables/data/COLRv1-clip-boxes-glyf.ttx
@@ -0,0 +1,1414 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.37">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name=".null"/>
+ <GlyphID id="2" name="upem_box_glyph"/>
+ <GlyphID id="3" name="cross_glyph"/>
+ <GlyphID id="4" name="one"/>
+ <GlyphID id="5" name="zero"/>
+ <GlyphID id="6" name="scale_0.5_1.5_center_500.0_500.0"/>
+ <GlyphID id="7" name="scale_1.5_1.5_center_500.0_500.0"/>
+ <GlyphID id="8" name="scale_0.5_1.5_center_0_0"/>
+ <GlyphID id="9" name="scale_1.5_1.5_center_0_0"/>
+ <GlyphID id="10" name="scale_0.5_1.5_center_1000_1000"/>
+ <GlyphID id="11" name="scale_1.5_1.5_center_1000_1000"/>
+ <GlyphID id="12" name="rotate_10_center_0_0"/>
+ <GlyphID id="13" name="rotate_-10_center_1000_1000"/>
+ <GlyphID id="14" name="rotate_25_center_500.0_500.0"/>
+ <GlyphID id="15" name="rotate_-15_center_500.0_500.0"/>
+ <GlyphID id="16" name="skew_25_0_center_0_0"/>
+ <GlyphID id="17" name="skew_25_0_center_500.0_500.0"/>
+ <GlyphID id="18" name="skew_0_15_center_0_0"/>
+ <GlyphID id="19" name="skew_0_15_center_500.0_500.0"/>
+ <GlyphID id="20" name="skew_-10_20_center_500.0_500.0"/>
+ <GlyphID id="21" name="skew_-10_20_center_1000_1000"/>
+ <GlyphID id="22" name="transform_matrix_1_0_0_1_125_125"/>
+ <GlyphID id="23" name="transform_matrix_1.5_0_0_1.5_0_0"/>
+ <GlyphID id="24" name="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/>
+ <GlyphID id="25" name="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/>
+ <GlyphID id="26" name="translate_0_0"/>
+ <GlyphID id="27" name="translate_0_100"/>
+ <GlyphID id="28" name="translate_0_-100"/>
+ <GlyphID id="29" name="translate_100_0"/>
+ <GlyphID id="30" name="translate_-100_0"/>
+ <GlyphID id="31" name="translate_200_200"/>
+ <GlyphID id="32" name="translate_-200_-200"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="1.0"/>
+ <checkSumAdjustment value="0x5a54e94d"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Fri Mar 10 15:07:35 2023"/>
+ <modified value="Fri Mar 10 15:07:35 2023"/>
+ <xMin value="0"/>
+ <yMin value="0"/>
+ <xMax value="1000"/>
+ <yMax value="1000"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="3"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="950"/>
+ <descent value="-250"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="1000"/>
+ <minLeftSideBearing value="0"/>
+ <minRightSideBearing value="0"/>
+ <xMaxExtent value="1000"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="3"/>
+ </hhea>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="33"/>
+ <maxPoints value="28"/>
+ <maxContours value="2"/>
+ <maxCompositePoints value="0"/>
+ <maxCompositeContours value="0"/>
+ <maxZones value="2"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="0"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="0"/>
+ <maxSizeOfInstructions value="0"/>
+ <maxComponentElements value="0"/>
+ <maxComponentDepth value="0"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="988"/>
+ <usWeightClass value="400"/>
+ <usWidthClass value="5"/>
+ <fsType value="00000000 00000000"/>
+ <ySubscriptXSize value="0"/>
+ <ySubscriptYSize value="0"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="0"/>
+ <ySuperscriptXSize value="0"/>
+ <ySuperscriptYSize value="0"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="0"/>
+ <yStrikeoutSize value="0"/>
+ <yStrikeoutPosition value="0"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="0"/>
+ <bSerifStyle value="0"/>
+ <bWeight value="0"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange2 value="00000010 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000100 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="????"/>
+ <fsSelection value="00000000 10000000"/>
+ <usFirstCharIndex value="65535"/>
+ <usLastCharIndex value="65535"/>
+ <sTypoAscender value="950"/>
+ <sTypoDescender value="0"/>
+ <sTypoLineGap value="0"/>
+ <usWinAscent value="950"/>
+ <usWinDescent value="250"/>
+ <ulCodePageRange1 value="00000000 00000000 00000000 00000000"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="0"/>
+ <sCapHeight value="0"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <hmtx>
+ <mtx name=".notdef" width="600" lsb="0"/>
+ <mtx name=".null" width="0" lsb="0"/>
+ <mtx name="cross_glyph" width="1000" lsb="250"/>
+ <mtx name="one" width="1000" lsb="184"/>
+ <mtx name="rotate_-10_center_1000_1000" width="1000" lsb="0"/>
+ <mtx name="rotate_-15_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="rotate_10_center_0_0" width="1000" lsb="0"/>
+ <mtx name="rotate_25_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="scale_0.5_1.5_center_0_0" width="1000" lsb="0"/>
+ <mtx name="scale_0.5_1.5_center_1000_1000" width="1000" lsb="0"/>
+ <mtx name="scale_0.5_1.5_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="scale_1.5_1.5_center_0_0" width="1000" lsb="0"/>
+ <mtx name="scale_1.5_1.5_center_1000_1000" width="1000" lsb="0"/>
+ <mtx name="scale_1.5_1.5_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="skew_-10_20_center_1000_1000" width="1000" lsb="0"/>
+ <mtx name="skew_-10_20_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="skew_0_15_center_0_0" width="1000" lsb="0"/>
+ <mtx name="skew_0_15_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="skew_25_0_center_0_0" width="1000" lsb="0"/>
+ <mtx name="skew_25_0_center_500.0_500.0" width="1000" lsb="0"/>
+ <mtx name="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0" width="1000" lsb="0"/>
+ <mtx name="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0" width="1000" lsb="0"/>
+ <mtx name="transform_matrix_1.5_0_0_1.5_0_0" width="1000" lsb="0"/>
+ <mtx name="transform_matrix_1_0_0_1_125_125" width="1000" lsb="0"/>
+ <mtx name="translate_-100_0" width="1000" lsb="0"/>
+ <mtx name="translate_-200_-200" width="1000" lsb="0"/>
+ <mtx name="translate_0_-100" width="1000" lsb="0"/>
+ <mtx name="translate_0_0" width="1000" lsb="0"/>
+ <mtx name="translate_0_100" width="1000" lsb="0"/>
+ <mtx name="translate_100_0" width="1000" lsb="0"/>
+ <mtx name="translate_200_200" width="1000" lsb="0"/>
+ <mtx name="upem_box_glyph" width="1000" lsb="0"/>
+ <mtx name="zero" width="1000" lsb="173"/>
+ </hmtx>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ </cmap_format_4>
+ <cmap_format_12 platformID="3" platEncID="10" format="12" reserved="0" length="88" language="0" nGroups="6">
+ <map code="0xf0300" name="scale_0.5_1.5_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0301" name="scale_1.5_1.5_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0302" name="scale_0.5_1.5_center_0_0"/><!-- ???? -->
+ <map code="0xf0303" name="scale_1.5_1.5_center_0_0"/><!-- ???? -->
+ <map code="0xf0304" name="scale_0.5_1.5_center_1000_1000"/><!-- ???? -->
+ <map code="0xf0305" name="scale_1.5_1.5_center_1000_1000"/><!-- ???? -->
+ <map code="0xf0600" name="rotate_10_center_0_0"/><!-- ???? -->
+ <map code="0xf0601" name="rotate_-10_center_1000_1000"/><!-- ???? -->
+ <map code="0xf0602" name="rotate_25_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0603" name="rotate_-15_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0700" name="skew_25_0_center_0_0"/><!-- ???? -->
+ <map code="0xf0701" name="skew_25_0_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0702" name="skew_0_15_center_0_0"/><!-- ???? -->
+ <map code="0xf0703" name="skew_0_15_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0704" name="skew_-10_20_center_500.0_500.0"/><!-- ???? -->
+ <map code="0xf0705" name="skew_-10_20_center_1000_1000"/><!-- ???? -->
+ <map code="0xf0800" name="transform_matrix_1_0_0_1_125_125"/><!-- ???? -->
+ <map code="0xf0801" name="transform_matrix_1.5_0_0_1.5_0_0"/><!-- ???? -->
+ <map code="0xf0802" name="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/><!-- ???? -->
+ <map code="0xf0803" name="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/><!-- ???? -->
+ <map code="0xf0900" name="translate_0_0"/><!-- ???? -->
+ <map code="0xf0901" name="translate_0_100"/><!-- ???? -->
+ <map code="0xf0902" name="translate_0_-100"/><!-- ???? -->
+ <map code="0xf0903" name="translate_100_0"/><!-- ???? -->
+ <map code="0xf0904" name="translate_-100_0"/><!-- ???? -->
+ <map code="0xf0905" name="translate_200_200"/><!-- ???? -->
+ <map code="0xf0906" name="translate_-200_-200"/><!-- ???? -->
+ <map code="0xfe001" name=".null"/><!-- ???? -->
+ <map code="0xfe002" name="upem_box_glyph"/><!-- ???? -->
+ <map code="0xfe003" name="cross_glyph"/><!-- ???? -->
+ <map code="0xfe004" name="one"/><!-- ???? -->
+ <map code="0xfe005" name="zero"/><!-- ???? -->
+ </cmap_format_12>
+ </cmap>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef"/><!-- contains no outline data -->
+
+ <TTGlyph name=".null"/><!-- contains no outline data -->
+
+ <TTGlyph name="cross_glyph" xMin="250" yMin="250" xMax="750" yMax="750">
+ <contour>
+ <pt x="475" y="525" on="1"/>
+ <pt x="475" y="750" on="1"/>
+ <pt x="525" y="750" on="1"/>
+ <pt x="525" y="525" on="1"/>
+ <pt x="750" y="525" on="1"/>
+ <pt x="750" y="475" on="1"/>
+ <pt x="525" y="475" on="1"/>
+ <pt x="525" y="250" on="1"/>
+ <pt x="475" y="250" on="1"/>
+ <pt x="475" y="475" on="1"/>
+ <pt x="250" y="475" on="1"/>
+ <pt x="250" y="525" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="one" xMin="184" yMin="250" xMax="296" yMax="543">
+ <contour>
+ <pt x="296" y="543" on="1"/>
+ <pt x="296" y="250" on="1"/>
+ <pt x="259" y="250" on="1"/>
+ <pt x="259" y="497" on="1"/>
+ <pt x="184" y="466" on="1"/>
+ <pt x="184" y="503" on="1"/>
+ <pt x="290" y="543" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="rotate_-10_center_1000_1000" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="rotate_-15_center_500.0_500.0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="rotate_10_center_0_0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="rotate_25_center_500.0_500.0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="scale_0.5_1.5_center_0_0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="scale_0.5_1.5_center_1000_1000" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="scale_0.5_1.5_center_500.0_500.0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="scale_1.5_1.5_center_0_0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="scale_1.5_1.5_center_1000_1000" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="scale_1.5_1.5_center_500.0_500.0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="skew_-10_20_center_1000_1000" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="skew_-10_20_center_500.0_500.0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="skew_0_15_center_0_0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="skew_0_15_center_500.0_500.0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="skew_25_0_center_0_0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="skew_25_0_center_500.0_500.0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="transform_matrix_1.5_0_0_1.5_0_0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="transform_matrix_1_0_0_1_125_125" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="translate_-100_0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="translate_-200_-200" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="translate_0_-100" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="translate_0_0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="translate_0_100" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="translate_100_0" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="translate_200_200" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="upem_box_glyph" xMin="0" yMin="0" xMax="1000" yMax="1000">
+ <contour>
+ <pt x="0" y="0" on="1"/>
+ <pt x="0" y="1000" on="1"/>
+ <pt x="1000" y="1000" on="1"/>
+ <pt x="1000" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="zero" xMin="173" yMin="246" xMax="357" yMax="545">
+ <contour>
+ <pt x="357" y="374" on="1"/>
+ <pt x="357" y="303" on="0"/>
+ <pt x="308" y="246" on="0"/>
+ <pt x="265" y="246" on="1"/>
+ <pt x="223" y="246" on="0"/>
+ <pt x="173" y="303" on="0"/>
+ <pt x="173" y="374" on="1"/>
+ <pt x="173" y="419" on="1"/>
+ <pt x="173" y="490" on="0"/>
+ <pt x="223" y="545" on="0"/>
+ <pt x="265" y="545" on="1"/>
+ <pt x="307" y="545" on="0"/>
+ <pt x="357" y="490" on="0"/>
+ <pt x="357" y="419" on="1"/>
+ </contour>
+ <contour>
+ <pt x="320" y="425" on="1"/>
+ <pt x="320" y="474" on="0"/>
+ <pt x="292" y="515" on="0"/>
+ <pt x="265" y="515" on="1"/>
+ <pt x="238" y="515" on="0"/>
+ <pt x="210" y="474" on="0"/>
+ <pt x="210" y="425" on="1"/>
+ <pt x="210" y="368" on="1"/>
+ <pt x="210" y="320" on="0"/>
+ <pt x="239" y="276" on="0"/>
+ <pt x="265" y="276" on="1"/>
+ <pt x="292" y="276" on="0"/>
+ <pt x="320" y="320" on="0"/>
+ <pt x="320" y="368" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ </glyf>
+
+ <name>
+ <namerecord nameID="1" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ COLRv1 Static Test Glyphs
+ </namerecord>
+ <namerecord nameID="2" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ COLRv1 Static Test Glyphs 2023-03-10T15:07:35.658876
+ </namerecord>
+ <namerecord nameID="4" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ COLRv1 Static Test Glyphs Regular
+ </namerecord>
+ <namerecord nameID="5" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ 2023-03-10T15:07:35.658876
+ </namerecord>
+ <namerecord nameID="6" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ COLRv1StaticTestGlyphs-Regular
+ </namerecord>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ COLRv1 Static Test Glyphs
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ COLRv1 Static Test Glyphs 2023-03-10T15:07:35.658876
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ COLRv1 Static Test Glyphs Regular
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ 2023-03-10T15:07:35.658876
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ COLRv1StaticTestGlyphs-Regular
+ </namerecord>
+ </name>
+
+ <post>
+ <formatType value="2.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="0"/>
+ <underlineThickness value="0"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ <psNames>
+ <!-- This file uses unique glyph names based on the information
+ found in the 'post' table. Since these names might not be unique,
+ we have to invent artificial names in case of clashes. In order to
+ be able to retain the original information, we need a name to
+ ps name mapping for those cases where they differ. That's what
+ you see below.
+ -->
+ </psNames>
+ <extraNames>
+ <!-- following are the name that are not taken from the standard Mac glyph order -->
+ <psName name="upem_box_glyph"/>
+ <psName name="cross_glyph"/>
+ <psName name="scale_0.5_1.5_center_500.0_500.0"/>
+ <psName name="scale_1.5_1.5_center_500.0_500.0"/>
+ <psName name="scale_0.5_1.5_center_0_0"/>
+ <psName name="scale_1.5_1.5_center_0_0"/>
+ <psName name="scale_0.5_1.5_center_1000_1000"/>
+ <psName name="scale_1.5_1.5_center_1000_1000"/>
+ <psName name="rotate_10_center_0_0"/>
+ <psName name="rotate_-10_center_1000_1000"/>
+ <psName name="rotate_25_center_500.0_500.0"/>
+ <psName name="rotate_-15_center_500.0_500.0"/>
+ <psName name="skew_25_0_center_0_0"/>
+ <psName name="skew_25_0_center_500.0_500.0"/>
+ <psName name="skew_0_15_center_0_0"/>
+ <psName name="skew_0_15_center_500.0_500.0"/>
+ <psName name="skew_-10_20_center_500.0_500.0"/>
+ <psName name="skew_-10_20_center_1000_1000"/>
+ <psName name="transform_matrix_1_0_0_1_125_125"/>
+ <psName name="transform_matrix_1.5_0_0_1.5_0_0"/>
+ <psName name="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/>
+ <psName name="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/>
+ <psName name="translate_0_0"/>
+ <psName name="translate_0_100"/>
+ <psName name="translate_0_-100"/>
+ <psName name="translate_100_0"/>
+ <psName name="translate_-100_0"/>
+ <psName name="translate_200_200"/>
+ <psName name="translate_-200_-200"/>
+ </extraNames>
+ </post>
+
+ <COLR>
+ <Version value="1"/>
+ <!-- BaseGlyphRecordCount=0 -->
+ <!-- LayerRecordCount=0 -->
+ <BaseGlyphList>
+ <!-- BaseGlyphCount=27 -->
+ <BaseGlyphPaintRecord index="0">
+ <BaseGlyph value="scale_0.5_1.5_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="18"><!-- PaintScaleAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="1">
+ <BaseGlyph value="scale_1.5_1.5_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="22"><!-- PaintScaleUniformAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="2">
+ <BaseGlyph value="scale_0.5_1.5_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="16"><!-- PaintScale -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="3">
+ <BaseGlyph value="scale_1.5_1.5_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="20"><!-- PaintScaleUniform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="4">
+ <BaseGlyph value="scale_0.5_1.5_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="18"><!-- PaintScaleAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="5">
+ <BaseGlyph value="scale_1.5_1.5_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="22"><!-- PaintScaleUniformAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="6">
+ <BaseGlyph value="rotate_10_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="24"><!-- PaintRotate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="10.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="7">
+ <BaseGlyph value="rotate_-10_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="-10.0"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="8">
+ <BaseGlyph value="rotate_25_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="25.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="9">
+ <BaseGlyph value="rotate_-15_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="-15.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="10">
+ <BaseGlyph value="skew_25_0_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="28"><!-- PaintSkew -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="25.0"/>
+ <ySkewAngle value="0.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="11">
+ <BaseGlyph value="skew_25_0_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="25.0"/>
+ <ySkewAngle value="0.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="12">
+ <BaseGlyph value="skew_0_15_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="28"><!-- PaintSkew -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="0.0"/>
+ <ySkewAngle value="15.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="13">
+ <BaseGlyph value="skew_0_15_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="0.0"/>
+ <ySkewAngle value="15.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="14">
+ <BaseGlyph value="skew_-10_20_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="-10.0"/>
+ <ySkewAngle value="20.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="15">
+ <BaseGlyph value="skew_-10_20_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="-10.0"/>
+ <ySkewAngle value="20.0"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="16">
+ <BaseGlyph value="transform_matrix_1_0_0_1_125_125"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.0"/>
+ <yx value="0.0"/>
+ <xy value="0.0"/>
+ <yy value="1.0"/>
+ <dx value="125.0"/>
+ <dy value="125.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="17">
+ <BaseGlyph value="transform_matrix_1.5_0_0_1.5_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.5"/>
+ <yx value="0.0"/>
+ <xy value="0.0"/>
+ <yy value="1.5"/>
+ <dx value="0.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="18">
+ <BaseGlyph value="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="0.9659"/>
+ <yx value="0.2588"/>
+ <xy value="-0.2588"/>
+ <yy value="0.9659"/>
+ <dx value="0.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="19">
+ <BaseGlyph value="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.0"/>
+ <yx value="0.0"/>
+ <xy value="0.6"/>
+ <yy value="1.0"/>
+ <dx value="-300.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="20">
+ <BaseGlyph value="translate_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="21">
+ <BaseGlyph value="translate_0_100"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="100"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="22">
+ <BaseGlyph value="translate_0_-100"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="-100"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="23">
+ <BaseGlyph value="translate_100_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="100"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="24">
+ <BaseGlyph value="translate_-100_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="-100"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="25">
+ <BaseGlyph value="translate_200_200"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="200"/>
+ <dy value="200"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="26">
+ <BaseGlyph value="translate_-200_-200"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="-200"/>
+ <dy value="-200"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ </BaseGlyphList>
+ </COLR>
+
+ <CPAL>
+ <version value="1"/>
+ <numPaletteEntries value="7"/>
+ <palette index="0">
+ <color index="0" value="#FF0000FF"/>
+ <color index="1" value="#FFA500FF"/>
+ <color index="2" value="#FFFF00FF"/>
+ <color index="3" value="#008000FF"/>
+ <color index="4" value="#0000FFFF"/>
+ <color index="5" value="#4B0082FF"/>
+ <color index="6" value="#EE82EEFF"/>
+ </palette>
+ <palette index="1" type="2">
+ <color index="0" value="#2A294AFF"/>
+ <color index="1" value="#244163FF"/>
+ <color index="2" value="#1B6388FF"/>
+ <color index="3" value="#157DA3FF"/>
+ <color index="4" value="#0E9AC2FF"/>
+ <color index="5" value="#05BEE8FF"/>
+ <color index="6" value="#00D4FFFF"/>
+ </palette>
+ <palette index="2" type="1">
+ <color index="0" value="#FC7118FF"/>
+ <color index="1" value="#FB8115FF"/>
+ <color index="2" value="#FA9511FF"/>
+ <color index="3" value="#FAA80DFF"/>
+ <color index="4" value="#F9BE09FF"/>
+ <color index="5" value="#F8D304FF"/>
+ <color index="6" value="#F8E700FF"/>
+ </palette>
+ </CPAL>
+
+</ttFont>
diff --git a/Tests/ttLib/tables/data/COLRv1-clip-boxes-q1-expected.ttx b/Tests/ttLib/tables/data/COLRv1-clip-boxes-q1-expected.ttx
new file mode 100644
index 00000000..c3b8ef62
--- /dev/null
+++ b/Tests/ttLib/tables/data/COLRv1-clip-boxes-q1-expected.ttx
@@ -0,0 +1,919 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.39">
+
+ <COLR>
+ <Version value="1"/>
+ <BaseGlyphList>
+ <!-- BaseGlyphCount=27 -->
+ <BaseGlyphPaintRecord index="0">
+ <BaseGlyph value="scale_0.5_1.5_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="18"><!-- PaintScaleAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="1">
+ <BaseGlyph value="scale_1.5_1.5_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="22"><!-- PaintScaleUniformAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="2">
+ <BaseGlyph value="scale_0.5_1.5_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="16"><!-- PaintScale -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="3">
+ <BaseGlyph value="scale_1.5_1.5_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="20"><!-- PaintScaleUniform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="4">
+ <BaseGlyph value="scale_0.5_1.5_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="18"><!-- PaintScaleAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="5">
+ <BaseGlyph value="scale_1.5_1.5_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="22"><!-- PaintScaleUniformAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="6">
+ <BaseGlyph value="rotate_10_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="24"><!-- PaintRotate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="10.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="7">
+ <BaseGlyph value="rotate_-10_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="-10.0"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="8">
+ <BaseGlyph value="rotate_25_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="25.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="9">
+ <BaseGlyph value="rotate_-15_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="-15.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="10">
+ <BaseGlyph value="skew_25_0_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="28"><!-- PaintSkew -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="25.0"/>
+ <ySkewAngle value="0.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="11">
+ <BaseGlyph value="skew_25_0_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="25.0"/>
+ <ySkewAngle value="0.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="12">
+ <BaseGlyph value="skew_0_15_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="28"><!-- PaintSkew -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="0.0"/>
+ <ySkewAngle value="15.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="13">
+ <BaseGlyph value="skew_0_15_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="0.0"/>
+ <ySkewAngle value="15.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="14">
+ <BaseGlyph value="skew_-10_20_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="-10.0"/>
+ <ySkewAngle value="20.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="15">
+ <BaseGlyph value="skew_-10_20_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="-10.0"/>
+ <ySkewAngle value="20.0"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="16">
+ <BaseGlyph value="transform_matrix_1_0_0_1_125_125"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.0"/>
+ <yx value="0.0"/>
+ <xy value="0.0"/>
+ <yy value="1.0"/>
+ <dx value="125.0"/>
+ <dy value="125.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="17">
+ <BaseGlyph value="transform_matrix_1.5_0_0_1.5_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.5"/>
+ <yx value="0.0"/>
+ <xy value="0.0"/>
+ <yy value="1.5"/>
+ <dx value="0.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="18">
+ <BaseGlyph value="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="0.9659"/>
+ <yx value="0.2588"/>
+ <xy value="-0.2588"/>
+ <yy value="0.9659"/>
+ <dx value="0.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="19">
+ <BaseGlyph value="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.0"/>
+ <yx value="0.0"/>
+ <xy value="0.6"/>
+ <yy value="1.0"/>
+ <dx value="-300.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="20">
+ <BaseGlyph value="translate_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="21">
+ <BaseGlyph value="translate_0_100"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="100"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="22">
+ <BaseGlyph value="translate_0_-100"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="-100"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="23">
+ <BaseGlyph value="translate_100_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="100"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="24">
+ <BaseGlyph value="translate_-100_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="-100"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="25">
+ <BaseGlyph value="translate_200_200"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="200"/>
+ <dy value="200"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="26">
+ <BaseGlyph value="translate_-200_-200"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="-200"/>
+ <dy value="-200"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ </BaseGlyphList>
+ <ClipList Format="1">
+ <Clip>
+ <Glyph value="rotate_-10_center_1000_1000"/>
+ <ClipBox Format="1">
+ <xMin value="170"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="845"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="rotate_-15_center_500.0_500.0"/>
+ <Glyph value="rotate_25_center_500.0_500.0"/>
+ <Glyph value="translate_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="rotate_10_center_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="155"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="830"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_0.5_1.5_center_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="125"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="1125"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_0.5_1.5_center_1000_1000"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="-125"/>
+ <xMax value="875"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_0.5_1.5_center_500.0_500.0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="125"/>
+ <xMax value="750"/>
+ <yMax value="875"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_1.5_1.5_center_0_0"/>
+ <Glyph value="transform_matrix_1.5_0_0_1.5_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="1125"/>
+ <yMax value="1125"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_1.5_1.5_center_1000_1000"/>
+ <ClipBox Format="1">
+ <xMin value="-125"/>
+ <yMin value="-125"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_1.5_1.5_center_500.0_500.0"/>
+ <ClipBox Format="1">
+ <xMin value="125"/>
+ <yMin value="125"/>
+ <xMax value="875"/>
+ <yMax value="875"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_-10_20_center_1000_1000"/>
+ <ClipBox Format="1">
+ <xMin value="157"/>
+ <yMin value="58"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_-10_20_center_500.0_500.0"/>
+ <ClipBox Format="1">
+ <xMin value="245"/>
+ <yMin value="240"/>
+ <xMax value="755"/>
+ <yMax value="760"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_0_15_center_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="891"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_0_15_center_500.0_500.0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="243"/>
+ <xMax value="750"/>
+ <yMax value="757"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_25_0_center_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="5"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_25_0_center_500.0_500.0"/>
+ <ClipBox Format="1">
+ <xMin value="238"/>
+ <yMin value="250"/>
+ <xMax value="762"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="105"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="861"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/>
+ <ClipBox Format="1">
+ <xMin value="235"/>
+ <yMin value="250"/>
+ <xMax value="766"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="transform_matrix_1_0_0_1_125_125"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="875"/>
+ <yMax value="875"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_-100_0"/>
+ <ClipBox Format="1">
+ <xMin value="150"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_-200_-200"/>
+ <ClipBox Format="1">
+ <xMin value="50"/>
+ <yMin value="50"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_0_-100"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="150"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_0_100"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="850"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_100_0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="850"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_200_200"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="950"/>
+ <yMax value="950"/>
+ </ClipBox>
+ </Clip>
+ </ClipList>
+ </COLR>
+
+</ttFont>
diff --git a/Tests/ttLib/tables/data/COLRv1-clip-boxes-q10-expected.ttx b/Tests/ttLib/tables/data/COLRv1-clip-boxes-q10-expected.ttx
new file mode 100644
index 00000000..f6b66f5c
--- /dev/null
+++ b/Tests/ttLib/tables/data/COLRv1-clip-boxes-q10-expected.ttx
@@ -0,0 +1,911 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.39">
+
+ <COLR>
+ <Version value="1"/>
+ <BaseGlyphList>
+ <!-- BaseGlyphCount=27 -->
+ <BaseGlyphPaintRecord index="0">
+ <BaseGlyph value="scale_0.5_1.5_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="18"><!-- PaintScaleAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="1">
+ <BaseGlyph value="scale_1.5_1.5_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="22"><!-- PaintScaleUniformAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="2">
+ <BaseGlyph value="scale_0.5_1.5_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="16"><!-- PaintScale -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="3">
+ <BaseGlyph value="scale_1.5_1.5_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="20"><!-- PaintScaleUniform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="4">
+ <BaseGlyph value="scale_0.5_1.5_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="18"><!-- PaintScaleAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="5">
+ <BaseGlyph value="scale_1.5_1.5_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="22"><!-- PaintScaleUniformAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="6">
+ <BaseGlyph value="rotate_10_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="24"><!-- PaintRotate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="10.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="7">
+ <BaseGlyph value="rotate_-10_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="-10.0"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="8">
+ <BaseGlyph value="rotate_25_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="25.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="9">
+ <BaseGlyph value="rotate_-15_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="-15.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="10">
+ <BaseGlyph value="skew_25_0_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="28"><!-- PaintSkew -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="25.0"/>
+ <ySkewAngle value="0.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="11">
+ <BaseGlyph value="skew_25_0_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="25.0"/>
+ <ySkewAngle value="0.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="12">
+ <BaseGlyph value="skew_0_15_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="28"><!-- PaintSkew -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="0.0"/>
+ <ySkewAngle value="15.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="13">
+ <BaseGlyph value="skew_0_15_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="0.0"/>
+ <ySkewAngle value="15.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="14">
+ <BaseGlyph value="skew_-10_20_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="-10.0"/>
+ <ySkewAngle value="20.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="15">
+ <BaseGlyph value="skew_-10_20_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="-10.0"/>
+ <ySkewAngle value="20.0"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="16">
+ <BaseGlyph value="transform_matrix_1_0_0_1_125_125"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.0"/>
+ <yx value="0.0"/>
+ <xy value="0.0"/>
+ <yy value="1.0"/>
+ <dx value="125.0"/>
+ <dy value="125.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="17">
+ <BaseGlyph value="transform_matrix_1.5_0_0_1.5_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.5"/>
+ <yx value="0.0"/>
+ <xy value="0.0"/>
+ <yy value="1.5"/>
+ <dx value="0.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="18">
+ <BaseGlyph value="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="0.9659"/>
+ <yx value="0.2588"/>
+ <xy value="-0.2588"/>
+ <yy value="0.9659"/>
+ <dx value="0.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="19">
+ <BaseGlyph value="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.0"/>
+ <yx value="0.0"/>
+ <xy value="0.6"/>
+ <yy value="1.0"/>
+ <dx value="-300.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="20">
+ <BaseGlyph value="translate_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="21">
+ <BaseGlyph value="translate_0_100"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="100"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="22">
+ <BaseGlyph value="translate_0_-100"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="-100"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="23">
+ <BaseGlyph value="translate_100_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="100"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="24">
+ <BaseGlyph value="translate_-100_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="-100"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="25">
+ <BaseGlyph value="translate_200_200"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="200"/>
+ <dy value="200"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="26">
+ <BaseGlyph value="translate_-200_-200"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="-200"/>
+ <dy value="-200"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ </BaseGlyphList>
+ <ClipList Format="1">
+ <Clip>
+ <Glyph value="rotate_-10_center_1000_1000"/>
+ <ClipBox Format="1">
+ <xMin value="170"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="850"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="rotate_-15_center_500.0_500.0"/>
+ <Glyph value="rotate_25_center_500.0_500.0"/>
+ <Glyph value="translate_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="rotate_10_center_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="150"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="830"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_0.5_1.5_center_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="120"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="1130"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_0.5_1.5_center_1000_1000"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="-130"/>
+ <xMax value="880"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_0.5_1.5_center_500.0_500.0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="120"/>
+ <xMax value="750"/>
+ <yMax value="880"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_1.5_1.5_center_0_0"/>
+ <Glyph value="transform_matrix_1.5_0_0_1.5_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="1130"/>
+ <yMax value="1130"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_1.5_1.5_center_1000_1000"/>
+ <ClipBox Format="1">
+ <xMin value="-130"/>
+ <yMin value="-130"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_1.5_1.5_center_500.0_500.0"/>
+ <ClipBox Format="1">
+ <xMin value="120"/>
+ <yMin value="120"/>
+ <xMax value="880"/>
+ <yMax value="880"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_-10_20_center_1000_1000"/>
+ <ClipBox Format="1">
+ <xMin value="150"/>
+ <yMin value="50"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_-10_20_center_500.0_500.0"/>
+ <ClipBox Format="1">
+ <xMin value="240"/>
+ <yMin value="240"/>
+ <xMax value="760"/>
+ <yMax value="760"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_0_15_center_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="900"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_0_15_center_500.0_500.0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="240"/>
+ <xMax value="750"/>
+ <yMax value="760"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_25_0_center_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="0"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_25_0_center_500.0_500.0"/>
+ <Glyph value="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/>
+ <ClipBox Format="1">
+ <xMin value="230"/>
+ <yMin value="250"/>
+ <xMax value="770"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="100"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="870"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="transform_matrix_1_0_0_1_125_125"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="880"/>
+ <yMax value="880"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_-100_0"/>
+ <ClipBox Format="1">
+ <xMin value="150"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_-200_-200"/>
+ <ClipBox Format="1">
+ <xMin value="50"/>
+ <yMin value="50"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_0_-100"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="150"/>
+ <xMax value="750"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_0_100"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="750"/>
+ <yMax value="850"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_100_0"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="850"/>
+ <yMax value="750"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_200_200"/>
+ <ClipBox Format="1">
+ <xMin value="250"/>
+ <yMin value="250"/>
+ <xMax value="950"/>
+ <yMax value="950"/>
+ </ClipBox>
+ </Clip>
+ </ClipList>
+ </COLR>
+
+</ttFont>
diff --git a/Tests/ttLib/tables/data/COLRv1-clip-boxes-q100-expected.ttx b/Tests/ttLib/tables/data/COLRv1-clip-boxes-q100-expected.ttx
new file mode 100644
index 00000000..f4fee78a
--- /dev/null
+++ b/Tests/ttLib/tables/data/COLRv1-clip-boxes-q100-expected.ttx
@@ -0,0 +1,863 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.39">
+
+ <COLR>
+ <Version value="1"/>
+ <BaseGlyphList>
+ <!-- BaseGlyphCount=27 -->
+ <BaseGlyphPaintRecord index="0">
+ <BaseGlyph value="scale_0.5_1.5_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="18"><!-- PaintScaleAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="1">
+ <BaseGlyph value="scale_1.5_1.5_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="22"><!-- PaintScaleUniformAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="2">
+ <BaseGlyph value="scale_0.5_1.5_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="16"><!-- PaintScale -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="3">
+ <BaseGlyph value="scale_1.5_1.5_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="20"><!-- PaintScaleUniform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="4">
+ <BaseGlyph value="scale_0.5_1.5_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="18"><!-- PaintScaleAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scaleX value="0.5"/>
+ <scaleY value="1.5"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="5">
+ <BaseGlyph value="scale_1.5_1.5_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="22"><!-- PaintScaleUniformAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <scale value="1.5"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="6">
+ <BaseGlyph value="rotate_10_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="24"><!-- PaintRotate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="10.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="7">
+ <BaseGlyph value="rotate_-10_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="-10.0"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="8">
+ <BaseGlyph value="rotate_25_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="25.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="9">
+ <BaseGlyph value="rotate_-15_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="26"><!-- PaintRotateAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <angle value="-15.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="10">
+ <BaseGlyph value="skew_25_0_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="28"><!-- PaintSkew -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="25.0"/>
+ <ySkewAngle value="0.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="11">
+ <BaseGlyph value="skew_25_0_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="25.0"/>
+ <ySkewAngle value="0.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="12">
+ <BaseGlyph value="skew_0_15_center_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="28"><!-- PaintSkew -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="0.0"/>
+ <ySkewAngle value="15.0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="13">
+ <BaseGlyph value="skew_0_15_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="0.0"/>
+ <ySkewAngle value="15.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="14">
+ <BaseGlyph value="skew_-10_20_center_500.0_500.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="-10.0"/>
+ <ySkewAngle value="20.0"/>
+ <centerX value="500"/>
+ <centerY value="500"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="15">
+ <BaseGlyph value="skew_-10_20_center_1000_1000"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="30"><!-- PaintSkewAroundCenter -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <xSkewAngle value="-10.0"/>
+ <ySkewAngle value="20.0"/>
+ <centerX value="1000"/>
+ <centerY value="1000"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="16">
+ <BaseGlyph value="transform_matrix_1_0_0_1_125_125"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.0"/>
+ <yx value="0.0"/>
+ <xy value="0.0"/>
+ <yy value="1.0"/>
+ <dx value="125.0"/>
+ <dy value="125.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="17">
+ <BaseGlyph value="transform_matrix_1.5_0_0_1.5_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.5"/>
+ <yx value="0.0"/>
+ <xy value="0.0"/>
+ <yy value="1.5"/>
+ <dx value="0.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="18">
+ <BaseGlyph value="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="0.9659"/>
+ <yx value="0.2588"/>
+ <xy value="-0.2588"/>
+ <yy value="0.9659"/>
+ <dx value="0.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="19">
+ <BaseGlyph value="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="12"><!-- PaintTransform -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <Transform>
+ <xx value="1.0"/>
+ <yx value="0.0"/>
+ <xy value="0.6"/>
+ <yy value="1.0"/>
+ <dx value="-300.0"/>
+ <dy value="0.0"/>
+ </Transform>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="20">
+ <BaseGlyph value="translate_0_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="21">
+ <BaseGlyph value="translate_0_100"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="100"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="22">
+ <BaseGlyph value="translate_0_-100"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="0"/>
+ <dy value="-100"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="23">
+ <BaseGlyph value="translate_100_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="100"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="24">
+ <BaseGlyph value="translate_-100_0"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="-100"/>
+ <dy value="0"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="25">
+ <BaseGlyph value="translate_200_200"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="200"/>
+ <dy value="200"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ <BaseGlyphPaintRecord index="26">
+ <BaseGlyph value="translate_-200_-200"/>
+ <Paint Format="32"><!-- PaintComposite -->
+ <SourcePaint Format="14"><!-- PaintTranslate -->
+ <Paint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="1"/>
+ <Alpha value="0.7"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </Paint>
+ <dx value="-200"/>
+ <dy value="-200"/>
+ </SourcePaint>
+ <CompositeMode value="dest_over"/>
+ <BackdropPaint Format="10"><!-- PaintGlyph -->
+ <Paint Format="2"><!-- PaintSolid -->
+ <PaletteIndex value="4"/>
+ <Alpha value="0.5"/>
+ </Paint>
+ <Glyph value="cross_glyph"/>
+ </BackdropPaint>
+ </Paint>
+ </BaseGlyphPaintRecord>
+ </BaseGlyphList>
+ <ClipList Format="1">
+ <Clip>
+ <Glyph value="rotate_-10_center_1000_1000"/>
+ <Glyph value="rotate_10_center_0_0"/>
+ <Glyph value="transform_matrix_0.9659_0.2588_-0.2588_0.9659_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="100"/>
+ <yMin value="200"/>
+ <xMax value="800"/>
+ <yMax value="900"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="rotate_-15_center_500.0_500.0"/>
+ <Glyph value="rotate_25_center_500.0_500.0"/>
+ <Glyph value="skew_-10_20_center_500.0_500.0"/>
+ <Glyph value="skew_0_15_center_500.0_500.0"/>
+ <Glyph value="skew_25_0_center_500.0_500.0"/>
+ <Glyph value="transform_matrix_1.0_0.0_0.6_1.0_-300.0_0.0"/>
+ <Glyph value="translate_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="200"/>
+ <yMin value="200"/>
+ <xMax value="800"/>
+ <yMax value="800"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_0.5_1.5_center_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="100"/>
+ <yMin value="200"/>
+ <xMax value="800"/>
+ <yMax value="1200"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_0.5_1.5_center_1000_1000"/>
+ <ClipBox Format="1">
+ <xMin value="200"/>
+ <yMin value="-200"/>
+ <xMax value="900"/>
+ <yMax value="800"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_0.5_1.5_center_500.0_500.0"/>
+ <ClipBox Format="1">
+ <xMin value="200"/>
+ <yMin value="100"/>
+ <xMax value="800"/>
+ <yMax value="900"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_1.5_1.5_center_0_0"/>
+ <Glyph value="transform_matrix_1.5_0_0_1.5_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="200"/>
+ <yMin value="200"/>
+ <xMax value="1200"/>
+ <yMax value="1200"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_1.5_1.5_center_1000_1000"/>
+ <ClipBox Format="1">
+ <xMin value="-200"/>
+ <yMin value="-200"/>
+ <xMax value="800"/>
+ <yMax value="800"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="scale_1.5_1.5_center_500.0_500.0"/>
+ <ClipBox Format="1">
+ <xMin value="100"/>
+ <yMin value="100"/>
+ <xMax value="900"/>
+ <yMax value="900"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_-10_20_center_1000_1000"/>
+ <ClipBox Format="1">
+ <xMin value="100"/>
+ <yMin value="0"/>
+ <xMax value="800"/>
+ <yMax value="800"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_0_15_center_0_0"/>
+ <Glyph value="translate_0_100"/>
+ <ClipBox Format="1">
+ <xMin value="200"/>
+ <yMin value="200"/>
+ <xMax value="800"/>
+ <yMax value="900"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="skew_25_0_center_0_0"/>
+ <ClipBox Format="1">
+ <xMin value="0"/>
+ <yMin value="200"/>
+ <xMax value="800"/>
+ <yMax value="800"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="transform_matrix_1_0_0_1_125_125"/>
+ <ClipBox Format="1">
+ <xMin value="200"/>
+ <yMin value="200"/>
+ <xMax value="900"/>
+ <yMax value="900"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_-100_0"/>
+ <ClipBox Format="1">
+ <xMin value="100"/>
+ <yMin value="200"/>
+ <xMax value="800"/>
+ <yMax value="800"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_-200_-200"/>
+ <ClipBox Format="1">
+ <xMin value="0"/>
+ <yMin value="0"/>
+ <xMax value="800"/>
+ <yMax value="800"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_0_-100"/>
+ <ClipBox Format="1">
+ <xMin value="200"/>
+ <yMin value="100"/>
+ <xMax value="800"/>
+ <yMax value="800"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_100_0"/>
+ <ClipBox Format="1">
+ <xMin value="200"/>
+ <yMin value="200"/>
+ <xMax value="900"/>
+ <yMax value="800"/>
+ </ClipBox>
+ </Clip>
+ <Clip>
+ <Glyph value="translate_200_200"/>
+ <ClipBox Format="1">
+ <xMin value="200"/>
+ <yMin value="200"/>
+ <xMax value="1000"/>
+ <yMax value="1000"/>
+ </ClipBox>
+ </Clip>
+ </ClipList>
+ </COLR>
+
+</ttFont>
diff --git a/Tests/ttLib/tables/data/NotoSans-VF-cubic.subset.ttf b/Tests/ttLib/tables/data/NotoSans-VF-cubic.subset.ttf
new file mode 100644
index 00000000..604d4281
--- /dev/null
+++ b/Tests/ttLib/tables/data/NotoSans-VF-cubic.subset.ttf
Binary files differ
diff --git a/Tests/ttLib/tables/data/_g_l_y_f_instructions.ttx b/Tests/ttLib/tables/data/_g_l_y_f_instructions.ttx
new file mode 100644
index 00000000..d090a25f
--- /dev/null
+++ b/Tests/ttLib/tables/data/_g_l_y_f_instructions.ttx
@@ -0,0 +1,82 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.39">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="NULL"/>
+ <GlyphID id="2" name="nonmarkingreturn"/>
+ <GlyphID id="3" name="A"/>
+ <GlyphID id="4" name="B"/>
+ </GlyphOrder>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="5"/>
+ <maxPoints value="4"/>
+ <maxContours value="1"/>
+ <maxCompositePoints value="4"/>
+ <maxCompositeContours value="1"/>
+ <maxZones value="1"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="10"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="512"/>
+ <maxSizeOfInstructions value="371"/>
+ <maxComponentElements value="1"/>
+ <maxComponentDepth value="1"/>
+ </maxp>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef"/><!-- contains no outline data -->
+
+ <TTGlyph name="A" xMin="100" yMin="0" xMax="477" yMax="700">
+ <contour>
+ <pt x="100" y="700" on="1"/>
+ <pt x="477" y="700" on="1"/>
+ <pt x="477" y="0" on="1"/>
+ <pt x="100" y="0" on="1"/>
+ </contour>
+ <instructions>
+ <assembly>
+ SVTCA[0] /* SetFPVectorToAxis */
+ PUSHW[ ] /* 1 value pushed */
+ 3
+ MDAP[1] /* MoveDirectAbsPt */
+ IUP[0] /* InterpolateUntPts */
+ IUP[1] /* InterpolateUntPts */
+ </assembly>
+ </instructions>
+ </TTGlyph>
+
+ <TTGlyph name="B" xMin="100" yMin="0" xMax="477" yMax="700">
+ <component glyphName="A" x="0" y="0" flags="0x204"/>
+ <instructions>
+ <assembly>
+ SVTCA[0] /* SetFPVectorToAxis */
+ PUSHW[ ] /* 1 value pushed */
+ 1
+ MDAP[1] /* MoveDirectAbsPt */
+ IUP[0] /* InterpolateUntPts */
+ IUP[1] /* InterpolateUntPts */
+ </assembly>
+ </instructions>
+ </TTGlyph>
+
+ <TTGlyph name="NULL"/><!-- contains no outline data -->
+
+ <TTGlyph name="nonmarkingreturn"/><!-- contains no outline data -->
+
+ </glyf>
+
+</ttFont>
diff --git a/Tests/ttLib/tables/otBase_test.py b/Tests/ttLib/tables/otBase_test.py
index ce0416e4..27efcba9 100644
--- a/Tests/ttLib/tables/otBase_test.py
+++ b/Tests/ttLib/tables/otBase_test.py
@@ -26,8 +26,7 @@ class OTTableReaderTest(unittest.TestCase):
def test_readUShortArray(self):
reader = OTTableReader(deHexStr("DE AD BE EF CA FE"))
- self.assertEqual(list(reader.readUShortArray(3)),
- [0xDEAD, 0xBEEF, 0xCAFE])
+ self.assertEqual(list(reader.readUShortArray(3)), [0xDEAD, 0xBEEF, 0xCAFE])
self.assertEqual(reader.pos, 6)
def test_readUInt24(self):
@@ -91,4 +90,5 @@ class OTTableWriterTest(unittest.TestCase):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/otConverters_test.py b/Tests/ttLib/tables/otConverters_test.py
index 1aff03bd..94b62a1f 100644
--- a/Tests/ttLib/tables/otConverters_test.py
+++ b/Tests/ttLib/tables/otConverters_test.py
@@ -22,8 +22,10 @@ class Char64Test(unittest.TestCase):
data = self.converter.read(reader, self.font, {})
self.assertEqual(data, "Hello � world")
self.assertEqual(reader.pos, 64)
- self.assertIn('replaced non-ASCII characters in "Hello � world"',
- [r.msg for r in captor.records])
+ self.assertIn(
+ 'replaced non-ASCII characters in "Hello � world"',
+ [r.msg for r in captor.records],
+ )
def test_write(self):
writer = OTTableWriter()
@@ -35,16 +37,20 @@ class Char64Test(unittest.TestCase):
with CapturingLogHandler(otConverters.log, "WARNING") as captor:
self.converter.write(writer, self.font, {}, "Hello ☃")
self.assertEqual(writer.getData(), b"Hello ?" + 57 * b"\0")
- self.assertIn('replacing non-ASCII characters in "Hello ☃"',
- [r.msg for r in captor.records])
+ self.assertIn(
+ 'replacing non-ASCII characters in "Hello ☃"',
+ [r.msg for r in captor.records],
+ )
def test_write_truncated(self):
writer = OTTableWriter()
with CapturingLogHandler(otConverters.log, "WARNING") as captor:
self.converter.write(writer, self.font, {}, "A" * 80)
self.assertEqual(writer.getData(), b"A" * 64)
- self.assertIn('truncating overlong "' + "A" * 80 + '" to 64 bytes',
- [r.msg for r in captor.records])
+ self.assertIn(
+ 'truncating overlong "' + "A" * 80 + '" to 64 bytes',
+ [r.msg for r in captor.records],
+ )
def test_xmlRead(self):
value = self.converter.xmlRead({"value": "Foo"}, [], self.font)
@@ -52,20 +58,23 @@ class Char64Test(unittest.TestCase):
def test_xmlWrite(self):
writer = makeXMLWriter()
- self.converter.xmlWrite(writer, self.font, "Hello world", "Element",
- [("attr", "v")])
+ self.converter.xmlWrite(
+ writer, self.font, "Hello world", "Element", [("attr", "v")]
+ )
xml = writer.file.getvalue().decode("utf-8").rstrip()
self.assertEqual(xml, '<Element attr="v" value="Hello world"/>')
class GlyphIDTest(unittest.TestCase):
font = FakeFont(".notdef A B C".split())
- converter = otConverters.GlyphID('GlyphID', 0, None, None)
+ converter = otConverters.GlyphID("GlyphID", 0, None, None)
def test_readArray(self):
reader = OTTableReader(deHexStr("0002 0001 DEAD 0002"))
- self.assertEqual(self.converter.readArray(reader, self.font, {}, 4),
- ["B", "A", "glyph57005", "B"])
+ self.assertEqual(
+ self.converter.readArray(reader, self.font, {}, 4),
+ ["B", "A", "glyph57005", "B"],
+ )
self.assertEqual(reader.pos, 8)
def test_read(self):
@@ -81,7 +90,7 @@ class GlyphIDTest(unittest.TestCase):
class LongTest(unittest.TestCase):
font = FakeFont([])
- converter = otConverters.Long('Long', 0, None, None)
+ converter = otConverters.Long("Long", 0, None, None)
def test_read(self):
reader = OTTableReader(deHexStr("FF0000EE"))
@@ -105,12 +114,12 @@ class LongTest(unittest.TestCase):
class NameIDTest(unittest.TestCase):
- converter = otConverters.NameID('NameID', 0, None, None)
+ converter = otConverters.NameID("NameID", 0, None, None)
def makeFont(self):
- nameTable = newTable('name')
- nameTable.setName(u"Demibold Condensed", 0x123, 3, 0, 0x409)
- nameTable.setName(u"Copyright 2018", 0, 3, 0, 0x409)
+ nameTable = newTable("name")
+ nameTable.setName("Demibold Condensed", 0x123, 3, 0, 0x409)
+ nameTable.setName("Copyright 2018", 0, 3, 0, 0x409)
return {"name": nameTable}
def test_read(self):
@@ -125,33 +134,36 @@ class NameIDTest(unittest.TestCase):
def test_xmlWrite(self):
writer = makeXMLWriter()
- self.converter.xmlWrite(writer, self.makeFont(), 291,
- "FooNameID", [("attr", "val")])
+ self.converter.xmlWrite(
+ writer, self.makeFont(), 291, "FooNameID", [("attr", "val")]
+ )
xml = writer.file.getvalue().decode("utf-8").rstrip()
self.assertEqual(
- xml,
- '<FooNameID attr="val" value="291"/> <!-- Demibold Condensed -->')
+ xml, '<FooNameID attr="val" value="291"/> <!-- Demibold Condensed -->'
+ )
def test_xmlWrite_missingID(self):
writer = makeXMLWriter()
with CapturingLogHandler(otConverters.log, "WARNING") as captor:
- self.converter.xmlWrite(writer, self.makeFont(), 666,
- "Entity", [("attrib", "val")])
- self.assertIn("name id 666 missing from name table",
- [r.msg for r in captor.records])
+ self.converter.xmlWrite(
+ writer, self.makeFont(), 666, "Entity", [("attrib", "val")]
+ )
+ self.assertIn(
+ "name id 666 missing from name table", [r.msg for r in captor.records]
+ )
xml = writer.file.getvalue().decode("utf-8").rstrip()
self.assertEqual(
xml,
- '<Entity attrib="val"'
- ' value="666"/> <!-- missing from name table -->')
+ '<Entity attrib="val"' ' value="666"/> <!-- missing from name table -->',
+ )
def test_xmlWrite_NULL(self):
writer = makeXMLWriter()
- self.converter.xmlWrite(writer, self.makeFont(), 0,
- "FooNameID", [("attr", "val")])
+ self.converter.xmlWrite(
+ writer, self.makeFont(), 0, "FooNameID", [("attr", "val")]
+ )
xml = writer.file.getvalue().decode("utf-8").rstrip()
- self.assertEqual(
- xml, '<FooNameID attr="val" value="0"/>')
+ self.assertEqual(xml, '<FooNameID attr="val" value="0"/>')
class UInt8Test(unittest.TestCase):
@@ -181,8 +193,9 @@ class UInt8Test(unittest.TestCase):
class AATLookupTest(unittest.TestCase):
font = FakeFont(".notdef A B C D E F G H A.alt B.alt".split())
- converter = otConverters.AATLookup("AATLookup", 0, None,
- tableClass=otConverters.GlyphID)
+ converter = otConverters.AATLookup(
+ "AATLookup", 0, None, tableClass=otConverters.GlyphID
+ )
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
@@ -193,170 +206,228 @@ class AATLookupTest(unittest.TestCase):
def test_readFormat0(self):
reader = OTTableReader(deHexStr("0000 0000 0001 0002 0000 7D00 0001"))
- self.assertEqual(self.converter.read(reader, self.font, None), {
- ".notdef": ".notdef",
- "A": "A",
- "B": "B",
- "C": ".notdef",
- "D": "glyph32000",
- "E": "A"
- })
+ self.assertEqual(
+ self.converter.read(reader, self.font, None),
+ {
+ ".notdef": ".notdef",
+ "A": "A",
+ "B": "B",
+ "C": ".notdef",
+ "D": "glyph32000",
+ "E": "A",
+ },
+ )
def test_readFormat2(self):
- reader = OTTableReader(deHexStr(
- "0002 0006 0002 000C 0001 0006 "
- "0002 0001 0003 " # glyph A..B: map to C
- "0007 0005 0008 " # glyph E..G: map to H
- "FFFF FFFF FFFF")) # end of search table
- self.assertEqual(self.converter.read(reader, self.font, None), {
- "A": "C",
- "B": "C",
- "E": "H",
- "F": "H",
- "G": "H",
- })
+ reader = OTTableReader(
+ deHexStr(
+ "0002 0006 0002 000C 0001 0006 "
+ "0002 0001 0003 " # glyph A..B: map to C
+ "0007 0005 0008 " # glyph E..G: map to H
+ "FFFF FFFF FFFF"
+ )
+ ) # end of search table
+ self.assertEqual(
+ self.converter.read(reader, self.font, None),
+ {
+ "A": "C",
+ "B": "C",
+ "E": "H",
+ "F": "H",
+ "G": "H",
+ },
+ )
def test_readFormat4(self):
- reader = OTTableReader(deHexStr(
- "0004 0006 0003 000C 0001 0006 "
- "0002 0001 001E " # glyph 1..2: mapping at offset 0x1E
- "0005 0004 001E " # glyph 4..5: mapping at offset 0x1E
- "FFFF FFFF FFFF " # end of search table
- "0007 0008")) # offset 0x18: glyphs [7, 8] = [G, H]
- self.assertEqual(self.converter.read(reader, self.font, None), {
- "A": "G",
- "B": "H",
- "D": "G",
- "E": "H",
- })
+ reader = OTTableReader(
+ deHexStr(
+ "0004 0006 0003 000C 0001 0006 "
+ "0002 0001 001E " # glyph 1..2: mapping at offset 0x1E
+ "0005 0004 001E " # glyph 4..5: mapping at offset 0x1E
+ "FFFF FFFF FFFF " # end of search table
+ "0007 0008"
+ )
+ ) # offset 0x18: glyphs [7, 8] = [G, H]
+ self.assertEqual(
+ self.converter.read(reader, self.font, None),
+ {
+ "A": "G",
+ "B": "H",
+ "D": "G",
+ "E": "H",
+ },
+ )
def test_readFormat6(self):
- reader = OTTableReader(deHexStr(
- "0006 0004 0002 0008 0001 0004 "
- "0003 0001 " # C --> A
- "0005 0002 " # E --> B
- "FFFF FFFF")) # end of search table
- self.assertEqual(self.converter.read(reader, self.font, None), {
- "C": "A",
- "E": "B",
- })
+ reader = OTTableReader(
+ deHexStr(
+ "0006 0004 0002 0008 0001 0004 "
+ "0003 0001 " # C --> A
+ "0005 0002 " # E --> B
+ "FFFF FFFF"
+ )
+ ) # end of search table
+ self.assertEqual(
+ self.converter.read(reader, self.font, None),
+ {
+ "C": "A",
+ "E": "B",
+ },
+ )
def test_readFormat8(self):
- reader = OTTableReader(deHexStr(
- "0008 "
- "0003 0003 " # first: C, count: 3
- "0007 0001 0002")) # [G, A, B]
- self.assertEqual(self.converter.read(reader, self.font, None), {
- "C": "G",
- "D": "A",
- "E": "B",
- })
+ reader = OTTableReader(
+ deHexStr("0008 " "0003 0003 " "0007 0001 0002") # first: C, count: 3
+ ) # [G, A, B]
+ self.assertEqual(
+ self.converter.read(reader, self.font, None),
+ {
+ "C": "G",
+ "D": "A",
+ "E": "B",
+ },
+ )
def test_readUnknownFormat(self):
reader = OTTableReader(deHexStr("0009"))
self.assertRaisesRegex(
AssertionError,
"unsupported lookup format: 9",
- self.converter.read, reader, self.font, None)
+ self.converter.read,
+ reader,
+ self.font,
+ None,
+ )
def test_writeFormat0(self):
writer = OTTableWriter()
font = FakeFont(".notdef A B C".split())
- self.converter.write(writer, font, {}, {
- ".notdef": ".notdef",
- "A": "C",
- "B": "C",
- "C": "A"
- })
+ self.converter.write(
+ writer, font, {}, {".notdef": ".notdef", "A": "C", "B": "C", "C": "A"}
+ )
self.assertEqual(writer.getData(), deHexStr("0000 0000 0003 0003 0001"))
def test_writeFormat2(self):
writer = OTTableWriter()
font = FakeFont(".notdef A B C D E F G H".split())
- self.converter.write(writer, font, {}, {
- "B": "C",
- "C": "C",
- "D": "C",
- "E": "C",
- "G": "A",
- "H": "A",
- })
- self.assertEqual(writer.getData(), deHexStr(
- "0002 " # format=2
- "0006 " # binSrchHeader.unitSize=6
- "0002 " # binSrchHeader.nUnits=2
- "000C " # binSrchHeader.searchRange=12
- "0001 " # binSrchHeader.entrySelector=1
- "0000 " # binSrchHeader.rangeShift=0
- "0005 0002 0003 " # segments[0].lastGlyph=E, firstGlyph=B, value=C
- "0008 0007 0001 " # segments[1].lastGlyph=H, firstGlyph=G, value=A
- "FFFF FFFF 0000 " # segments[2]=<END>
- ))
+ self.converter.write(
+ writer,
+ font,
+ {},
+ {
+ "B": "C",
+ "C": "C",
+ "D": "C",
+ "E": "C",
+ "G": "A",
+ "H": "A",
+ },
+ )
+ self.assertEqual(
+ writer.getData(),
+ deHexStr(
+ "0002 " # format=2
+ "0006 " # binSrchHeader.unitSize=6
+ "0002 " # binSrchHeader.nUnits=2
+ "000C " # binSrchHeader.searchRange=12
+ "0001 " # binSrchHeader.entrySelector=1
+ "0000 " # binSrchHeader.rangeShift=0
+ "0005 0002 0003 " # segments[0].lastGlyph=E, firstGlyph=B, value=C
+ "0008 0007 0001 " # segments[1].lastGlyph=H, firstGlyph=G, value=A
+ "FFFF FFFF 0000 " # segments[2]=<END>
+ ),
+ )
def test_writeFormat6(self):
writer = OTTableWriter()
font = FakeFont(".notdef A B C D E".split())
- self.converter.write(writer, font, {}, {
- "A": "C",
- "C": "B",
- "D": "D",
- "E": "E",
- })
- self.assertEqual(writer.getData(), deHexStr(
- "0006 " # format=6
- "0004 " # binSrchHeader.unitSize=4
- "0004 " # binSrchHeader.nUnits=4
- "0010 " # binSrchHeader.searchRange=16
- "0002 " # binSrchHeader.entrySelector=2
- "0000 " # binSrchHeader.rangeShift=0
- "0001 0003 " # entries[0].glyph=A, .value=C
- "0003 0002 " # entries[1].glyph=C, .value=B
- "0004 0004 " # entries[2].glyph=D, .value=D
- "0005 0005 " # entries[3].glyph=E, .value=E
- "FFFF 0000 " # entries[4]=<END>
- ))
+ self.converter.write(
+ writer,
+ font,
+ {},
+ {
+ "A": "C",
+ "C": "B",
+ "D": "D",
+ "E": "E",
+ },
+ )
+ self.assertEqual(
+ writer.getData(),
+ deHexStr(
+ "0006 " # format=6
+ "0004 " # binSrchHeader.unitSize=4
+ "0004 " # binSrchHeader.nUnits=4
+ "0010 " # binSrchHeader.searchRange=16
+ "0002 " # binSrchHeader.entrySelector=2
+ "0000 " # binSrchHeader.rangeShift=0
+ "0001 0003 " # entries[0].glyph=A, .value=C
+ "0003 0002 " # entries[1].glyph=C, .value=B
+ "0004 0004 " # entries[2].glyph=D, .value=D
+ "0005 0005 " # entries[3].glyph=E, .value=E
+ "FFFF 0000 " # entries[4]=<END>
+ ),
+ )
def test_writeFormat8(self):
writer = OTTableWriter()
font = FakeFont(".notdef A B C D E F G H".split())
- self.converter.write(writer, font, {}, {
- "B": "B",
- "C": "A",
- "D": "B",
- "E": "C",
- "F": "B",
- "G": "A",
- })
- self.assertEqual(writer.getData(), deHexStr(
- "0008 " # format=8
- "0002 " # firstGlyph=B
- "0006 " # glyphCount=6
- "0002 0001 0002 0003 0002 0001" # valueArray=[B, A, B, C, B, A]
- ))
+ self.converter.write(
+ writer,
+ font,
+ {},
+ {
+ "B": "B",
+ "C": "A",
+ "D": "B",
+ "E": "C",
+ "F": "B",
+ "G": "A",
+ },
+ )
+ self.assertEqual(
+ writer.getData(),
+ deHexStr(
+ "0008 " # format=8
+ "0002 " # firstGlyph=B
+ "0006 " # glyphCount=6
+ "0002 0001 0002 0003 0002 0001" # valueArray=[B, A, B, C, B, A]
+ ),
+ )
def test_xmlRead(self):
- value = self.converter.xmlRead({}, [
- ("Lookup", {"glyph": "A", "value": "A.alt"}, []),
- ("Lookup", {"glyph": "B", "value": "B.alt"}, []),
- ], self.font)
+ value = self.converter.xmlRead(
+ {},
+ [
+ ("Lookup", {"glyph": "A", "value": "A.alt"}, []),
+ ("Lookup", {"glyph": "B", "value": "B.alt"}, []),
+ ],
+ self.font,
+ )
self.assertEqual(value, {"A": "A.alt", "B": "B.alt"})
def test_xmlWrite(self):
writer = makeXMLWriter()
- self.converter.xmlWrite(writer, self.font,
- value={"A": "A.alt", "B": "B.alt"},
- name="Foo", attrs=[("attr", "val")])
+ self.converter.xmlWrite(
+ writer,
+ self.font,
+ value={"A": "A.alt", "B": "B.alt"},
+ name="Foo",
+ attrs=[("attr", "val")],
+ )
xml = writer.file.getvalue().decode("utf-8").splitlines()
- self.assertEqual(xml, [
- '<Foo attr="val">',
- ' <Lookup glyph="A" value="A.alt"/>',
- ' <Lookup glyph="B" value="B.alt"/>',
- '</Foo>',
- ])
+ self.assertEqual(
+ xml,
+ [
+ '<Foo attr="val">',
+ ' <Lookup glyph="A" value="A.alt"/>',
+ ' <Lookup glyph="B" value="B.alt"/>',
+ "</Foo>",
+ ],
+ )
class LazyListTest(unittest.TestCase):
-
def test_slice(self):
ll = otConverters._LazyList([10, 11, 12, 13])
sl = ll[:]
@@ -426,4 +497,5 @@ class LazyListTest(unittest.TestCase):
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/otTables_test.py b/Tests/ttLib/tables/otTables_test.py
index 3f74b7a9..db33e4c6 100644
--- a/Tests/ttLib/tables/otTables_test.py
+++ b/Tests/ttLib/tables/otTables_test.py
@@ -21,10 +21,7 @@ class SingleSubstTest(unittest.TestCase):
def test_postRead_format1(self):
table = otTables.SingleSubst()
table.Format = 1
- rawTable = {
- "Coverage": makeCoverage(["A", "B", "C"]),
- "DeltaGlyphID": 5
- }
+ rawTable = {"Coverage": makeCoverage(["A", "B", "C"]), "DeltaGlyphID": 5}
table.postRead(rawTable, self.font)
self.assertEqual(table.mapping, {"A": "a", "B": "b", "C": "c"})
@@ -34,7 +31,7 @@ class SingleSubstTest(unittest.TestCase):
rawTable = {
"Coverage": makeCoverage(["A", "B", "C"]),
"GlyphCount": 3,
- "Substitute": ["c", "b", "a"]
+ "Substitute": ["c", "b", "a"],
}
table.postRead(rawTable, self.font)
self.assertEqual(table.mapping, {"A": "c", "B": "b", "C": "a"})
@@ -74,18 +71,22 @@ class SingleSubstTest(unittest.TestCase):
table = otTables.SingleSubst()
table.mapping = {"A": "a", "B": "b", "C": "c"}
table.toXML2(writer, self.font)
- self.assertEqual(writer.file.getvalue().splitlines()[1:], [
- '<Substitution in="A" out="a"/>',
- '<Substitution in="B" out="b"/>',
- '<Substitution in="C" out="c"/>',
- ])
+ self.assertEqual(
+ writer.file.getvalue().splitlines()[1:],
+ [
+ '<Substitution in="A" out="a"/>',
+ '<Substitution in="B" out="b"/>',
+ '<Substitution in="C" out="c"/>',
+ ],
+ )
def test_fromXML(self):
table = otTables.SingleSubst()
for name, attrs, content in parseXML(
- '<Substitution in="A" out="a"/>'
- '<Substitution in="B" out="b"/>'
- '<Substitution in="C" out="c"/>'):
+ '<Substitution in="A" out="a"/>'
+ '<Substitution in="B" out="b"/>'
+ '<Substitution in="C" out="c"/>'
+ ):
table.fromXML(name, attrs, content, self.font)
self.assertEqual(table.mapping, {"A": "a", "B": "b", "C": "c"})
@@ -101,16 +102,10 @@ class MultipleSubstTest(unittest.TestCase):
table.Format = 1
rawTable = {
"Coverage": makeCoverage(["c_t", "f_f_i"]),
- "Sequence": [
- makeSequence(["c", "t"]),
- makeSequence(["f", "f", "i"])
- ]
+ "Sequence": [makeSequence(["c", "t"]), makeSequence(["f", "f", "i"])],
}
table.postRead(rawTable, self.font)
- self.assertEqual(table.mapping, {
- "c_t": ["c", "t"],
- "f_f_i": ["f", "f", "i"]
- })
+ self.assertEqual(table.mapping, {"c_t": ["c", "t"], "f_f_i": ["f", "f", "i"]})
def test_postRead_formatUnknown(self):
table = otTables.MultipleSubst()
@@ -129,60 +124,63 @@ class MultipleSubstTest(unittest.TestCase):
table = otTables.MultipleSubst()
table.mapping = {"c_t": ["c", "t"], "f_f_i": ["f", "f", "i"]}
table.toXML2(writer, self.font)
- self.assertEqual(writer.file.getvalue().splitlines()[1:], [
- '<Substitution in="c_t" out="c,t"/>',
- '<Substitution in="f_f_i" out="f,f,i"/>',
- ])
+ self.assertEqual(
+ writer.file.getvalue().splitlines()[1:],
+ [
+ '<Substitution in="c_t" out="c,t"/>',
+ '<Substitution in="f_f_i" out="f,f,i"/>',
+ ],
+ )
def test_fromXML(self):
table = otTables.MultipleSubst()
for name, attrs, content in parseXML(
- '<Substitution in="c_t" out="c,t"/>'
- '<Substitution in="f_f_i" out="f,f,i"/>'):
+ '<Substitution in="c_t" out="c,t"/>'
+ '<Substitution in="f_f_i" out="f,f,i"/>'
+ ):
table.fromXML(name, attrs, content, self.font)
- self.assertEqual(table.mapping,
- {'c_t': ['c', 't'], 'f_f_i': ['f', 'f', 'i']})
+ self.assertEqual(table.mapping, {"c_t": ["c", "t"], "f_f_i": ["f", "f", "i"]})
def test_fromXML_oldFormat(self):
table = otTables.MultipleSubst()
for name, attrs, content in parseXML(
- '<Coverage>'
- ' <Glyph value="c_t"/>'
- ' <Glyph value="f_f_i"/>'
- '</Coverage>'
- '<Sequence index="0">'
- ' <Substitute index="0" value="c"/>'
- ' <Substitute index="1" value="t"/>'
- '</Sequence>'
- '<Sequence index="1">'
- ' <Substitute index="0" value="f"/>'
- ' <Substitute index="1" value="f"/>'
- ' <Substitute index="2" value="i"/>'
- '</Sequence>'):
+ "<Coverage>"
+ ' <Glyph value="c_t"/>'
+ ' <Glyph value="f_f_i"/>'
+ "</Coverage>"
+ '<Sequence index="0">'
+ ' <Substitute index="0" value="c"/>'
+ ' <Substitute index="1" value="t"/>'
+ "</Sequence>"
+ '<Sequence index="1">'
+ ' <Substitute index="0" value="f"/>'
+ ' <Substitute index="1" value="f"/>'
+ ' <Substitute index="2" value="i"/>'
+ "</Sequence>"
+ ):
table.fromXML(name, attrs, content, self.font)
- self.assertEqual(table.mapping,
- {'c_t': ['c', 't'], 'f_f_i': ['f', 'f', 'i']})
+ self.assertEqual(table.mapping, {"c_t": ["c", "t"], "f_f_i": ["f", "f", "i"]})
def test_fromXML_oldFormat_bug385(self):
# https://github.com/fonttools/fonttools/issues/385
table = otTables.MultipleSubst()
table.Format = 1
for name, attrs, content in parseXML(
- '<Coverage>'
- ' <Glyph value="o"/>'
- ' <Glyph value="l"/>'
- '</Coverage>'
- '<Sequence>'
- ' <Substitute value="o"/>'
- ' <Substitute value="l"/>'
- ' <Substitute value="o"/>'
- '</Sequence>'
- '<Sequence>'
- ' <Substitute value="o"/>'
- '</Sequence>'):
+ "<Coverage>"
+ ' <Glyph value="o"/>'
+ ' <Glyph value="l"/>'
+ "</Coverage>"
+ "<Sequence>"
+ ' <Substitute value="o"/>'
+ ' <Substitute value="l"/>'
+ ' <Substitute value="o"/>'
+ "</Sequence>"
+ "<Sequence>"
+ ' <Substitute value="o"/>'
+ "</Sequence>"
+ ):
table.fromXML(name, attrs, content, self.font)
- self.assertEqual(table.mapping,
- {'o': ['o', 'l', 'o'], 'l': ['o']})
+ self.assertEqual(table.mapping, {"o": ["o", "l", "o"], "l": ["o"]})
class LigatureSubstTest(unittest.TestCase):
@@ -210,7 +208,7 @@ class LigatureSubstTest(unittest.TestCase):
ligs_f.Ligature = self.makeLigatures("ffi ff fi")
rawTable = {
"Coverage": makeCoverage(["c", "f"]),
- "LigatureSet": [ligs_c, ligs_f]
+ "LigatureSet": [ligs_c, ligs_f],
}
table.postRead(rawTable, self.font)
self.assertEqual(set(table.ligatures.keys()), {"c", "f"})
@@ -235,7 +233,7 @@ class LigatureSubstTest(unittest.TestCase):
table = otTables.LigatureSubst()
table.ligatures = {
"c": self.makeLigatures("ct"),
- "f": self.makeLigatures("ffi ff fi")
+ "f": self.makeLigatures("ffi ff fi"),
}
rawTable = table.preWrite(self.font)
self.assertEqual(table.Format, 1)
@@ -263,27 +261,31 @@ class LigatureSubstTest(unittest.TestCase):
table = otTables.LigatureSubst()
table.ligatures = {
"c": self.makeLigatures("ct"),
- "f": self.makeLigatures("ffi ff fi")
+ "f": self.makeLigatures("ffi ff fi"),
}
table.toXML2(writer, self.font)
- self.assertEqual(writer.file.getvalue().splitlines()[1:], [
- '<LigatureSet glyph="c">',
- ' <Ligature components="c,t" glyph="c_t"/>',
- '</LigatureSet>',
- '<LigatureSet glyph="f">',
- ' <Ligature components="f,f,i" glyph="f_f_i"/>',
- ' <Ligature components="f,f" glyph="f_f"/>',
- ' <Ligature components="f,i" glyph="f_i"/>',
- '</LigatureSet>'
- ])
+ self.assertEqual(
+ writer.file.getvalue().splitlines()[1:],
+ [
+ '<LigatureSet glyph="c">',
+ ' <Ligature components="c,t" glyph="c_t"/>',
+ "</LigatureSet>",
+ '<LigatureSet glyph="f">',
+ ' <Ligature components="f,f,i" glyph="f_f_i"/>',
+ ' <Ligature components="f,f" glyph="f_f"/>',
+ ' <Ligature components="f,i" glyph="f_i"/>',
+ "</LigatureSet>",
+ ],
+ )
def test_fromXML(self):
table = otTables.LigatureSubst()
for name, attrs, content in parseXML(
- '<LigatureSet glyph="f">'
- ' <Ligature components="f,f,i" glyph="f_f_i"/>'
- ' <Ligature components="f,f" glyph="f_f"/>'
- '</LigatureSet>'):
+ '<LigatureSet glyph="f">'
+ ' <Ligature components="f,f,i" glyph="f_f_i"/>'
+ ' <Ligature components="f,f" glyph="f_f"/>'
+ "</LigatureSet>"
+ ):
table.fromXML(name, attrs, content, self.font)
self.assertEqual(set(table.ligatures.keys()), {"f"})
[ffi, ff] = table.ligatures["f"]
@@ -310,14 +312,11 @@ class AlternateSubstTest(unittest.TestCase):
"Coverage": makeCoverage(["G", "Z"]),
"AlternateSet": [
self.makeAlternateSet("G.alt2 G.alt1"),
- self.makeAlternateSet("Z.fina")
- ]
+ self.makeAlternateSet("Z.fina"),
+ ],
}
table.postRead(rawTable, self.font)
- self.assertEqual(table.alternates, {
- "G": ["G.alt2", "G.alt1"],
- "Z": ["Z.fina"]
- })
+ self.assertEqual(table.alternates, {"G": ["G.alt2", "G.alt1"], "Z": ["Z.fina"]})
def test_postRead_formatUnknown(self):
table = otTables.AlternateSubst()
@@ -341,36 +340,37 @@ class AlternateSubstTest(unittest.TestCase):
table = otTables.AlternateSubst()
table.alternates = {"G": ["G.alt2", "G.alt1"], "Z": ["Z.fina"]}
table.toXML2(writer, self.font)
- self.assertEqual(writer.file.getvalue().splitlines()[1:], [
- '<AlternateSet glyph="G">',
- ' <Alternate glyph="G.alt2"/>',
- ' <Alternate glyph="G.alt1"/>',
- '</AlternateSet>',
- '<AlternateSet glyph="Z">',
- ' <Alternate glyph="Z.fina"/>',
- '</AlternateSet>'
- ])
+ self.assertEqual(
+ writer.file.getvalue().splitlines()[1:],
+ [
+ '<AlternateSet glyph="G">',
+ ' <Alternate glyph="G.alt2"/>',
+ ' <Alternate glyph="G.alt1"/>',
+ "</AlternateSet>",
+ '<AlternateSet glyph="Z">',
+ ' <Alternate glyph="Z.fina"/>',
+ "</AlternateSet>",
+ ],
+ )
def test_fromXML(self):
table = otTables.AlternateSubst()
for name, attrs, content in parseXML(
- '<AlternateSet glyph="G">'
- ' <Alternate glyph="G.alt2"/>'
- ' <Alternate glyph="G.alt1"/>'
- '</AlternateSet>'
- '<AlternateSet glyph="Z">'
- ' <Alternate glyph="Z.fina"/>'
- '</AlternateSet>'):
+ '<AlternateSet glyph="G">'
+ ' <Alternate glyph="G.alt2"/>'
+ ' <Alternate glyph="G.alt1"/>'
+ "</AlternateSet>"
+ '<AlternateSet glyph="Z">'
+ ' <Alternate glyph="Z.fina"/>'
+ "</AlternateSet>"
+ ):
table.fromXML(name, attrs, content, self.font)
- self.assertEqual(table.alternates, {
- "G": ["G.alt2", "G.alt1"],
- "Z": ["Z.fina"]
- })
+ self.assertEqual(table.alternates, {"G": ["G.alt2", "G.alt1"], "Z": ["Z.fina"]})
class RearrangementMorphActionTest(unittest.TestCase):
def setUp(self):
- self.font = FakeFont(['.notdef', 'A', 'B', 'C'])
+ self.font = FakeFont([".notdef", "A", "B", "C"])
def testCompile(self):
r = otTables.RearrangementMorphAction()
@@ -387,22 +387,24 @@ class RearrangementMorphActionTest(unittest.TestCase):
def testDecompileToXML(self):
r = otTables.RearrangementMorphAction()
- r.decompile(OTTableReader(deHexStr("1234fffd")),
- self.font, actionReader=None)
+ r.decompile(OTTableReader(deHexStr("1234fffd")), self.font, actionReader=None)
toXML = lambda w, f: r.toXML(w, f, {"Test": "Foo"}, "Transition")
- self.assertEqual(getXML(toXML, self.font), [
+ self.assertEqual(
+ getXML(toXML, self.font),
+ [
'<Transition Test="Foo">',
' <NewState value="4660"/>', # 0x1234 = 4660
' <Flags value="MarkFirst,DontAdvance,MarkLast"/>',
' <ReservedFlags value="0x1FF0"/>',
' <Verb value="13"/><!-- ABxCD ⇒ CDxBA -->',
- '</Transition>',
- ])
+ "</Transition>",
+ ],
+ )
class ContextualMorphActionTest(unittest.TestCase):
def setUp(self):
- self.font = FakeFont(['.notdef', 'A', 'B', 'C'])
+ self.font = FakeFont([".notdef", "A", "B", "C"])
def testCompile(self):
a = otTables.ContextualMorphAction()
@@ -419,50 +421,55 @@ class ContextualMorphActionTest(unittest.TestCase):
def testDecompileToXML(self):
a = otTables.ContextualMorphAction()
- a.decompile(OTTableReader(deHexStr("1234f117deadbeef")),
- self.font, actionReader=None)
+ a.decompile(
+ OTTableReader(deHexStr("1234f117deadbeef")), self.font, actionReader=None
+ )
toXML = lambda w, f: a.toXML(w, f, {"Test": "Foo"}, "Transition")
- self.assertEqual(getXML(toXML, self.font), [
+ self.assertEqual(
+ getXML(toXML, self.font),
+ [
'<Transition Test="Foo">',
' <NewState value="4660"/>', # 0x1234 = 4660
' <Flags value="SetMark,DontAdvance"/>',
' <ReservedFlags value="0x3117"/>',
' <MarkIndex value="57005"/>', # 0xDEAD = 57005
' <CurrentIndex value="48879"/>', # 0xBEEF = 48879
- '</Transition>',
- ])
+ "</Transition>",
+ ],
+ )
class LigatureMorphActionTest(unittest.TestCase):
def setUp(self):
- self.font = FakeFont(['.notdef', 'A', 'B', 'C'])
+ self.font = FakeFont([".notdef", "A", "B", "C"])
def testDecompileToXML(self):
a = otTables.LigatureMorphAction()
actionReader = OTTableReader(deHexStr("DEADBEEF 7FFFFFFE 80000003"))
- a.decompile(OTTableReader(deHexStr("1234FAB30001")),
- self.font, actionReader)
+ a.decompile(OTTableReader(deHexStr("1234FAB30001")), self.font, actionReader)
toXML = lambda w, f: a.toXML(w, f, {"Test": "Foo"}, "Transition")
- self.assertEqual(getXML(toXML, self.font), [
+ self.assertEqual(
+ getXML(toXML, self.font),
+ [
'<Transition Test="Foo">',
' <NewState value="4660"/>', # 0x1234 = 4660
' <Flags value="SetComponent,DontAdvance"/>',
' <ReservedFlags value="0x1AB3"/>',
' <Action GlyphIndexDelta="-2" Flags="Store"/>',
' <Action GlyphIndexDelta="3"/>',
- '</Transition>',
- ])
+ "</Transition>",
+ ],
+ )
def testCompileActions_empty(self):
act = otTables.LigatureMorphAction()
actions, actionIndex = act.compileActions(self.font, [])
- self.assertEqual(actions, b'')
+ self.assertEqual(actions, b"")
self.assertEqual(actionIndex, {})
def testCompileActions_shouldShareSubsequences(self):
state = otTables.AATState()
- t = state.Transitions = {i: otTables.LigatureMorphAction()
- for i in range(3)}
+ t = state.Transitions = {i: otTables.LigatureMorphAction() for i in range(3)}
ligs = [otTables.LigAction() for _ in range(3)]
for i, lig in enumerate(ligs):
lig.GlyphIndexDelta = i
@@ -470,14 +477,16 @@ class LigatureMorphActionTest(unittest.TestCase):
t[1].Actions = ligs[0:3]
t[2].Actions = ligs[1:3]
actions, actionIndex = t[0].compileActions(self.font, [state])
- self.assertEqual(actions,
- deHexStr("00000000 00000001 80000002 80000001"))
- self.assertEqual(actionIndex, {
- deHexStr("00000000 00000001 80000002"): 0,
- deHexStr("00000001 80000002"): 1,
- deHexStr("80000002"): 2,
- deHexStr("80000001"): 3,
- })
+ self.assertEqual(actions, deHexStr("00000000 00000001 80000002 80000001"))
+ self.assertEqual(
+ actionIndex,
+ {
+ deHexStr("00000000 00000001 80000002"): 0,
+ deHexStr("00000001 80000002"): 1,
+ deHexStr("80000002"): 2,
+ deHexStr("80000001"): 3,
+ },
+ )
class InsertionMorphActionTest(unittest.TestCase):
@@ -485,25 +494,27 @@ class InsertionMorphActionTest(unittest.TestCase):
'<Transition Test="Foo">',
' <NewState value="4660"/>', # 0x1234 = 4660
' <Flags value="SetMark,DontAdvance,CurrentIsKashidaLike,'
- 'MarkedIsKashidaLike,CurrentInsertBefore,MarkedInsertBefore"/>',
+ 'MarkedIsKashidaLike,CurrentInsertBefore,MarkedInsertBefore"/>',
' <CurrentInsertionAction glyph="B"/>',
' <CurrentInsertionAction glyph="C"/>',
' <MarkedInsertionAction glyph="B"/>',
' <MarkedInsertionAction glyph="A"/>',
' <MarkedInsertionAction glyph="D"/>',
- '</Transition>'
+ "</Transition>",
]
def setUp(self):
- self.font = FakeFont(['.notdef', 'A', 'B', 'C', 'D'])
+ self.font = FakeFont([".notdef", "A", "B", "C", "D"])
self.maxDiff = None
def testDecompileToXML(self):
a = otTables.InsertionMorphAction()
actionReader = OTTableReader(
- deHexStr("DEAD BEEF 0002 0001 0004 0002 0003 DEAD BEEF"))
- a.decompile(OTTableReader(deHexStr("1234 FC43 0005 0002")),
- self.font, actionReader)
+ deHexStr("DEAD BEEF 0002 0001 0004 0002 0003 DEAD BEEF")
+ )
+ a.decompile(
+ OTTableReader(deHexStr("1234 FC43 0005 0002")), self.font, actionReader
+ )
toXML = lambda w, f: a.toXML(w, f, {"Test": "Foo"}, "Transition")
self.assertEqual(getXML(toXML, self.font), self.MORPH_ACTION_XML)
@@ -515,37 +526,39 @@ class InsertionMorphActionTest(unittest.TestCase):
a.compile(
writer,
self.font,
- actionIndex={('B', 'C'): 9, ('B', 'A', 'D'): 7},
+ actionIndex={("B", "C"): 9, ("B", "A", "D"): 7},
)
self.assertEqual(hexStr(writer.getAllData()), "1234fc4300090007")
def testCompileActions_empty(self):
act = otTables.InsertionMorphAction()
actions, actionIndex = act.compileActions(self.font, [])
- self.assertEqual(actions, b'')
+ self.assertEqual(actions, b"")
self.assertEqual(actionIndex, {})
def testCompileActions_shouldShareSubsequences(self):
state = otTables.AATState()
- t = state.Transitions = {i: otTables.InsertionMorphAction()
- for i in range(3)}
+ t = state.Transitions = {i: otTables.InsertionMorphAction() for i in range(3)}
t[1].CurrentInsertionAction = []
- t[0].MarkedInsertionAction = ['A']
- t[1].CurrentInsertionAction = ['C', 'D']
- t[1].MarkedInsertionAction = ['B']
- t[2].CurrentInsertionAction = ['B', 'C', 'D']
- t[2].MarkedInsertionAction = ['C', 'D']
+ t[0].MarkedInsertionAction = ["A"]
+ t[1].CurrentInsertionAction = ["C", "D"]
+ t[1].MarkedInsertionAction = ["B"]
+ t[2].CurrentInsertionAction = ["B", "C", "D"]
+ t[2].MarkedInsertionAction = ["C", "D"]
actions, actionIndex = t[0].compileActions(self.font, [state])
- self.assertEqual(actions, deHexStr('0002 0003 0004 0001'))
- self.assertEqual(actionIndex, {
- ('A',): 3,
- ('B',): 0,
- ('B', 'C'): 0,
- ('B', 'C', 'D'): 0,
- ('C',): 1,
- ('C', 'D'): 1,
- ('D',): 2,
- })
+ self.assertEqual(actions, deHexStr("0002 0003 0004 0001"))
+ self.assertEqual(
+ actionIndex,
+ {
+ ("A",): 3,
+ ("B",): 0,
+ ("B", "C"): 0,
+ ("B", "C", "D"): 0,
+ ("C",): 1,
+ ("C", "D"): 1,
+ ("D",): 2,
+ },
+ )
class SplitMultipleSubstTest:
@@ -553,28 +566,34 @@ class SplitMultipleSubstTest:
from fontTools.otlLib.builder import buildMultipleSubstSubtable
from fontTools.ttLib.tables.otBase import OverflowErrorRecord
- oldSubTable = buildMultipleSubstSubtable({'e': 1, 'a': 2, 'b': 3, 'c': 4, 'd': 5})
+ oldSubTable = buildMultipleSubstSubtable(
+ {"e": 1, "a": 2, "b": 3, "c": 4, "d": 5}
+ )
newSubTable = otTables.MultipleSubst()
- ok = otTables.splitMultipleSubst(oldSubTable, newSubTable, OverflowErrorRecord((None, None, None, itemName, itemRecord)))
+ ok = otTables.splitMultipleSubst(
+ oldSubTable,
+ newSubTable,
+ OverflowErrorRecord((None, None, None, itemName, itemRecord)),
+ )
assert ok
return oldSubTable.mapping, newSubTable.mapping
def test_Coverage(self):
- oldMapping, newMapping = self.overflow('Coverage', None)
- assert oldMapping == {'a': 2, 'b': 3}
- assert newMapping == {'c': 4, 'd': 5, 'e': 1}
+ oldMapping, newMapping = self.overflow("Coverage", None)
+ assert oldMapping == {"a": 2, "b": 3}
+ assert newMapping == {"c": 4, "d": 5, "e": 1}
def test_RangeRecord(self):
- oldMapping, newMapping = self.overflow('RangeRecord', None)
- assert oldMapping == {'a': 2, 'b': 3}
- assert newMapping == {'c': 4, 'd': 5, 'e': 1}
+ oldMapping, newMapping = self.overflow("RangeRecord", None)
+ assert oldMapping == {"a": 2, "b": 3}
+ assert newMapping == {"c": 4, "d": 5, "e": 1}
def test_Sequence(self):
- oldMapping, newMapping = self.overflow('Sequence', 4)
- assert oldMapping == {'a': 2, 'b': 3,'c': 4}
- assert newMapping == {'d': 5, 'e': 1}
+ oldMapping, newMapping = self.overflow("Sequence", 4)
+ assert oldMapping == {"a": 2, "b": 3, "c": 4}
+ assert newMapping == {"d": 5, "e": 1}
def test_splitMarkBasePos():
@@ -607,95 +626,95 @@ def test_splitMarkBasePos():
assert getXML(oldSubTable.toXML) == [
'<MarkBasePos Format="1">',
- ' <MarkCoverage>',
+ " <MarkCoverage>",
' <Glyph value="acutecomb"/>',
' <Glyph value="gravecomb"/>',
- ' </MarkCoverage>',
- ' <BaseCoverage>',
+ " </MarkCoverage>",
+ " <BaseCoverage>",
' <Glyph value="a"/>',
' <Glyph value="c"/>',
- ' </BaseCoverage>',
- ' <!-- ClassCount=1 -->',
- ' <MarkArray>',
- ' <!-- MarkCount=2 -->',
+ " </BaseCoverage>",
+ " <!-- ClassCount=1 -->",
+ " <MarkArray>",
+ " <!-- MarkCount=2 -->",
' <MarkRecord index="0">',
' <Class value="0"/>',
' <MarkAnchor Format="1">',
' <XCoordinate value="0"/>',
' <YCoordinate value="600"/>',
- ' </MarkAnchor>',
- ' </MarkRecord>',
+ " </MarkAnchor>",
+ " </MarkRecord>",
' <MarkRecord index="1">',
' <Class value="0"/>',
' <MarkAnchor Format="1">',
' <XCoordinate value="0"/>',
' <YCoordinate value="590"/>',
- ' </MarkAnchor>',
- ' </MarkRecord>',
- ' </MarkArray>',
- ' <BaseArray>',
- ' <!-- BaseCount=2 -->',
+ " </MarkAnchor>",
+ " </MarkRecord>",
+ " </MarkArray>",
+ " <BaseArray>",
+ " <!-- BaseCount=2 -->",
' <BaseRecord index="0">',
' <BaseAnchor index="0" Format="1">',
' <XCoordinate value="350"/>',
' <YCoordinate value="500"/>',
- ' </BaseAnchor>',
- ' </BaseRecord>',
+ " </BaseAnchor>",
+ " </BaseRecord>",
' <BaseRecord index="1">',
' <BaseAnchor index="0" Format="1">',
' <XCoordinate value="300"/>',
' <YCoordinate value="700"/>',
- ' </BaseAnchor>',
- ' </BaseRecord>',
- ' </BaseArray>',
- '</MarkBasePos>',
+ " </BaseAnchor>",
+ " </BaseRecord>",
+ " </BaseArray>",
+ "</MarkBasePos>",
]
assert getXML(newSubTable.toXML) == [
'<MarkBasePos Format="1">',
- ' <MarkCoverage>',
+ " <MarkCoverage>",
' <Glyph value="cedillacomb"/>',
- ' </MarkCoverage>',
- ' <BaseCoverage>',
+ " </MarkCoverage>",
+ " <BaseCoverage>",
' <Glyph value="a"/>',
' <Glyph value="c"/>',
- ' </BaseCoverage>',
- ' <!-- ClassCount=1 -->',
- ' <MarkArray>',
- ' <!-- MarkCount=1 -->',
+ " </BaseCoverage>",
+ " <!-- ClassCount=1 -->",
+ " <MarkArray>",
+ " <!-- MarkCount=1 -->",
' <MarkRecord index="0">',
' <Class value="0"/>',
' <MarkAnchor Format="1">',
' <XCoordinate value="0"/>',
' <YCoordinate value="0"/>',
- ' </MarkAnchor>',
- ' </MarkRecord>',
- ' </MarkArray>',
- ' <BaseArray>',
- ' <!-- BaseCount=2 -->',
+ " </MarkAnchor>",
+ " </MarkRecord>",
+ " </MarkArray>",
+ " <BaseArray>",
+ " <!-- BaseCount=2 -->",
' <BaseRecord index="0">',
' <BaseAnchor index="0" empty="1"/>',
- ' </BaseRecord>',
+ " </BaseRecord>",
' <BaseRecord index="1">',
' <BaseAnchor index="0" Format="1">',
' <XCoordinate value="300"/>',
' <YCoordinate value="0"/>',
- ' </BaseAnchor>',
- ' </BaseRecord>',
- ' </BaseArray>',
- '</MarkBasePos>',
+ " </BaseAnchor>",
+ " </BaseRecord>",
+ " </BaseArray>",
+ "</MarkBasePos>",
]
class ColrV1Test(unittest.TestCase):
- def setUp(self):
- self.font = FakeFont(['.notdef', 'meh'])
-
- def test_traverseEmptyPaintColrLayersNeedsNoLayerList(self):
- colr = parseXmlInto(
- self.font,
- otTables.COLR(),
- '''
+ def setUp(self):
+ self.font = FakeFont([".notdef", "meh"])
+
+ def test_traverseEmptyPaintColrLayersNeedsNoLayerList(self):
+ colr = parseXmlInto(
+ self.font,
+ otTables.COLR(),
+ """
<Version value="1"/>
<BaseGlyphList>
<BaseGlyphPaintRecord index="0">
@@ -706,16 +725,17 @@ class ColrV1Test(unittest.TestCase):
</Paint>
</BaseGlyphPaintRecord>
</BaseGlyphList>
- ''',
- )
- paint = colr.BaseGlyphList.BaseGlyphPaintRecord[0].Paint
+ """,
+ )
+ paint = colr.BaseGlyphList.BaseGlyphPaintRecord[0].Paint
- # Just want to confirm we don't crash
- visited = []
- paint.traverse(colr, lambda p: visited.append(p))
- assert len(visited) == 1
+ # Just want to confirm we don't crash
+ visited = []
+ paint.traverse(colr, lambda p: visited.append(p))
+ assert len(visited) == 1
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/tables/tables_test.py b/Tests/ttLib/tables/tables_test.py
index be8c63e3..816385b6 100644
--- a/Tests/ttLib/tables/tables_test.py
+++ b/Tests/ttLib/tables/tables_test.py
@@ -14,225 +14,234 @@ except ImportError:
else:
# on 3.6 the built-in unicodedata is the same as unicodedata2 backport
import unicodedata
+
unicodedata2 = unicodedata
# Font files in data/*.{o,t}tf; output gets compared to data/*.ttx.*
TESTS = {
- "aots/base.otf": ('CFF ', 'cmap', 'head',
- 'hhea', 'hmtx', 'maxp',
- 'name', 'OS/2', 'post'),
- "aots/classdef1_font1.otf": ('GSUB',),
- "aots/classdef1_font2.otf": ('GSUB',),
- "aots/classdef1_font3.otf": ('GSUB',),
- "aots/classdef1_font4.otf": ('GSUB',),
- "aots/classdef2_font1.otf": ('GSUB',),
- "aots/classdef2_font2.otf": ('GSUB',),
- "aots/classdef2_font3.otf": ('GSUB',),
- "aots/classdef2_font4.otf": ('GSUB',),
- "aots/cmap0_font1.otf": ('cmap',),
- "aots/cmap10_font1.otf": ('cmap',),
- "aots/cmap10_font2.otf": ('cmap',),
- "aots/cmap12_font1.otf": ('cmap',),
- "aots/cmap14_font1.otf": ('cmap',),
- "aots/cmap2_font1.otf": ('cmap',),
- "aots/cmap4_font1.otf": ('cmap',),
- "aots/cmap4_font2.otf": ('cmap',),
- "aots/cmap4_font3.otf": ('cmap',),
- "aots/cmap4_font4.otf": ('cmap',),
- "aots/cmap6_font1.otf": ('cmap',),
- "aots/cmap6_font2.otf": ('cmap',),
- "aots/cmap8_font1.otf": ('cmap',),
- "aots/cmap_composition_font1.otf": ('cmap',),
- "aots/cmap_subtableselection_font1.otf": ('cmap',),
- "aots/cmap_subtableselection_font2.otf": ('cmap',),
- "aots/cmap_subtableselection_font3.otf": ('cmap',),
- "aots/cmap_subtableselection_font4.otf": ('cmap',),
- "aots/cmap_subtableselection_font5.otf": ('cmap',),
- "aots/gpos1_1_lookupflag_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos1_1_simple_f1.otf": ('GPOS',),
- "aots/gpos1_1_simple_f2.otf": ('GPOS',),
- "aots/gpos1_1_simple_f3.otf": ('GPOS',),
- "aots/gpos1_1_simple_f4.otf": ('GPOS',),
- "aots/gpos1_2_font1.otf": ('GPOS',),
- "aots/gpos1_2_font2.otf": ('GDEF', 'GPOS'),
- "aots/gpos2_1_font6.otf": ('GPOS',),
- "aots/gpos2_1_font7.otf": ('GPOS',),
- "aots/gpos2_1_lookupflag_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos2_1_lookupflag_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos2_1_next_glyph_f1.otf": ('GPOS',),
- "aots/gpos2_1_next_glyph_f2.otf": ('GPOS',),
- "aots/gpos2_1_simple_f1.otf": ('GPOS',),
- "aots/gpos2_2_font1.otf": ('GPOS',),
- "aots/gpos2_2_font2.otf": ('GDEF', 'GPOS'),
- "aots/gpos2_2_font3.otf": ('GDEF', 'GPOS'),
- "aots/gpos2_2_font4.otf": ('GPOS',),
- "aots/gpos2_2_font5.otf": ('GPOS',),
- "aots/gpos3_font1.otf": ('GPOS',),
- "aots/gpos3_font2.otf": ('GDEF', 'GPOS'),
- "aots/gpos3_font3.otf": ('GDEF', 'GPOS'),
- "aots/gpos4_lookupflag_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos4_lookupflag_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos4_multiple_anchors_1.otf": ('GDEF', 'GPOS'),
- "aots/gpos4_simple_1.otf": ('GDEF', 'GPOS'),
- "aots/gpos5_font1.otf": ('GDEF', 'GPOS', 'GSUB'),
- "aots/gpos6_font1.otf": ('GDEF', 'GPOS'),
- "aots/gpos7_1_font1.otf": ('GPOS',),
- "aots/gpos9_font1.otf": ('GPOS',),
- "aots/gpos9_font2.otf": ('GPOS',),
- "aots/gpos_chaining1_boundary_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining1_boundary_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining1_boundary_f3.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining1_boundary_f4.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining1_lookupflag_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining1_multiple_subrules_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining1_multiple_subrules_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining1_next_glyph_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining1_simple_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining1_simple_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining1_successive_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining2_boundary_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining2_boundary_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining2_boundary_f3.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining2_boundary_f4.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining2_lookupflag_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining2_multiple_subrules_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining2_multiple_subrules_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining2_next_glyph_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining2_simple_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining2_simple_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining2_successive_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining3_boundary_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining3_boundary_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining3_boundary_f3.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining3_boundary_f4.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining3_lookupflag_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining3_next_glyph_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining3_simple_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining3_simple_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_chaining3_successive_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context1_boundary_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context1_boundary_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context1_expansion_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context1_lookupflag_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context1_lookupflag_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context1_multiple_subrules_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context1_multiple_subrules_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context1_next_glyph_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context1_simple_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context1_simple_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context1_successive_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_boundary_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_boundary_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_classes_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_classes_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_expansion_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_lookupflag_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_lookupflag_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_multiple_subrules_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_multiple_subrules_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_next_glyph_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_simple_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_simple_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context2_successive_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context3_boundary_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context3_boundary_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context3_lookupflag_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context3_lookupflag_f2.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context3_next_glyph_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context3_simple_f1.otf": ('GDEF', 'GPOS'),
- "aots/gpos_context3_successive_f1.otf": ('GDEF', 'GPOS'),
- "aots/gsub1_1_lookupflag_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub1_1_modulo_f1.otf": ('GSUB',),
- "aots/gsub1_1_simple_f1.otf": ('GSUB',),
- "aots/gsub1_2_lookupflag_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub1_2_simple_f1.otf": ('GSUB',),
- "aots/gsub2_1_lookupflag_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub2_1_multiple_sequences_f1.otf": ('GSUB',),
- "aots/gsub2_1_simple_f1.otf": ('GSUB',),
- "aots/gsub3_1_lookupflag_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub3_1_multiple_f1.otf": ('GSUB',),
- "aots/gsub3_1_simple_f1.otf": ('GSUB',),
- "aots/gsub4_1_lookupflag_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub4_1_multiple_ligatures_f1.otf": ('GSUB',),
- "aots/gsub4_1_multiple_ligatures_f2.otf": ('GSUB',),
- "aots/gsub4_1_multiple_ligsets_f1.otf": ('GSUB',),
- "aots/gsub4_1_simple_f1.otf": ('GSUB',),
- "aots/gsub7_font1.otf": ('GSUB',),
- "aots/gsub7_font2.otf": ('GSUB',),
- "aots/gsub_chaining1_boundary_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining1_boundary_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining1_boundary_f3.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining1_boundary_f4.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining1_lookupflag_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining1_multiple_subrules_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining1_multiple_subrules_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining1_next_glyph_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining1_simple_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining1_simple_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining1_successive_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining2_boundary_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining2_boundary_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining2_boundary_f3.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining2_boundary_f4.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining2_lookupflag_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining2_multiple_subrules_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining2_multiple_subrules_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining2_next_glyph_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining2_simple_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining2_simple_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining2_successive_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining3_boundary_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining3_boundary_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining3_boundary_f3.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining3_boundary_f4.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining3_lookupflag_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining3_next_glyph_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining3_simple_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining3_simple_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_chaining3_successive_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context1_boundary_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context1_boundary_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context1_expansion_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context1_lookupflag_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context1_lookupflag_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context1_multiple_subrules_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context1_multiple_subrules_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context1_next_glyph_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context1_simple_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context1_simple_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context1_successive_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_boundary_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_boundary_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_classes_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_classes_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_expansion_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_lookupflag_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_lookupflag_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_multiple_subrules_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_multiple_subrules_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_next_glyph_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_simple_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_simple_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context2_successive_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context3_boundary_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context3_boundary_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context3_lookupflag_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context3_lookupflag_f2.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context3_next_glyph_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context3_simple_f1.otf": ('GDEF', 'GSUB'),
- "aots/gsub_context3_successive_f1.otf": ('GDEF', 'GSUB'),
- "aots/lookupflag_ignore_attach_f1.otf": ('GDEF', 'GSUB'),
- "aots/lookupflag_ignore_base_f1.otf": ('GDEF', 'GSUB'),
- "aots/lookupflag_ignore_combination_f1.otf": ('GDEF', 'GSUB'),
- "aots/lookupflag_ignore_ligatures_f1.otf": ('GDEF', 'GSUB'),
- "aots/lookupflag_ignore_marks_f1.otf": ('GDEF', 'GSUB'),
- "graphite/graphite_tests.ttf": ('Silf', 'Glat', 'Feat', 'Sill'),
+ "aots/base.otf": (
+ "CFF ",
+ "cmap",
+ "head",
+ "hhea",
+ "hmtx",
+ "maxp",
+ "name",
+ "OS/2",
+ "post",
+ ),
+ "aots/classdef1_font1.otf": ("GSUB",),
+ "aots/classdef1_font2.otf": ("GSUB",),
+ "aots/classdef1_font3.otf": ("GSUB",),
+ "aots/classdef1_font4.otf": ("GSUB",),
+ "aots/classdef2_font1.otf": ("GSUB",),
+ "aots/classdef2_font2.otf": ("GSUB",),
+ "aots/classdef2_font3.otf": ("GSUB",),
+ "aots/classdef2_font4.otf": ("GSUB",),
+ "aots/cmap0_font1.otf": ("cmap",),
+ "aots/cmap10_font1.otf": ("cmap",),
+ "aots/cmap10_font2.otf": ("cmap",),
+ "aots/cmap12_font1.otf": ("cmap",),
+ "aots/cmap14_font1.otf": ("cmap",),
+ "aots/cmap2_font1.otf": ("cmap",),
+ "aots/cmap4_font1.otf": ("cmap",),
+ "aots/cmap4_font2.otf": ("cmap",),
+ "aots/cmap4_font3.otf": ("cmap",),
+ "aots/cmap4_font4.otf": ("cmap",),
+ "aots/cmap6_font1.otf": ("cmap",),
+ "aots/cmap6_font2.otf": ("cmap",),
+ "aots/cmap8_font1.otf": ("cmap",),
+ "aots/cmap_composition_font1.otf": ("cmap",),
+ "aots/cmap_subtableselection_font1.otf": ("cmap",),
+ "aots/cmap_subtableselection_font2.otf": ("cmap",),
+ "aots/cmap_subtableselection_font3.otf": ("cmap",),
+ "aots/cmap_subtableselection_font4.otf": ("cmap",),
+ "aots/cmap_subtableselection_font5.otf": ("cmap",),
+ "aots/gpos1_1_lookupflag_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos1_1_simple_f1.otf": ("GPOS",),
+ "aots/gpos1_1_simple_f2.otf": ("GPOS",),
+ "aots/gpos1_1_simple_f3.otf": ("GPOS",),
+ "aots/gpos1_1_simple_f4.otf": ("GPOS",),
+ "aots/gpos1_2_font1.otf": ("GPOS",),
+ "aots/gpos1_2_font2.otf": ("GDEF", "GPOS"),
+ "aots/gpos2_1_font6.otf": ("GPOS",),
+ "aots/gpos2_1_font7.otf": ("GPOS",),
+ "aots/gpos2_1_lookupflag_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos2_1_lookupflag_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos2_1_next_glyph_f1.otf": ("GPOS",),
+ "aots/gpos2_1_next_glyph_f2.otf": ("GPOS",),
+ "aots/gpos2_1_simple_f1.otf": ("GPOS",),
+ "aots/gpos2_2_font1.otf": ("GPOS",),
+ "aots/gpos2_2_font2.otf": ("GDEF", "GPOS"),
+ "aots/gpos2_2_font3.otf": ("GDEF", "GPOS"),
+ "aots/gpos2_2_font4.otf": ("GPOS",),
+ "aots/gpos2_2_font5.otf": ("GPOS",),
+ "aots/gpos3_font1.otf": ("GPOS",),
+ "aots/gpos3_font2.otf": ("GDEF", "GPOS"),
+ "aots/gpos3_font3.otf": ("GDEF", "GPOS"),
+ "aots/gpos4_lookupflag_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos4_lookupflag_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos4_multiple_anchors_1.otf": ("GDEF", "GPOS"),
+ "aots/gpos4_simple_1.otf": ("GDEF", "GPOS"),
+ "aots/gpos5_font1.otf": ("GDEF", "GPOS", "GSUB"),
+ "aots/gpos6_font1.otf": ("GDEF", "GPOS"),
+ "aots/gpos7_1_font1.otf": ("GPOS",),
+ "aots/gpos9_font1.otf": ("GPOS",),
+ "aots/gpos9_font2.otf": ("GPOS",),
+ "aots/gpos_chaining1_boundary_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining1_boundary_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining1_boundary_f3.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining1_boundary_f4.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining1_lookupflag_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining1_multiple_subrules_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining1_multiple_subrules_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining1_next_glyph_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining1_simple_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining1_simple_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining1_successive_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining2_boundary_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining2_boundary_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining2_boundary_f3.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining2_boundary_f4.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining2_lookupflag_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining2_multiple_subrules_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining2_multiple_subrules_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining2_next_glyph_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining2_simple_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining2_simple_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining2_successive_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining3_boundary_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining3_boundary_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining3_boundary_f3.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining3_boundary_f4.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining3_lookupflag_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining3_next_glyph_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining3_simple_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining3_simple_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_chaining3_successive_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context1_boundary_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context1_boundary_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context1_expansion_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context1_lookupflag_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context1_lookupflag_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context1_multiple_subrules_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context1_multiple_subrules_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context1_next_glyph_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context1_simple_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context1_simple_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context1_successive_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_boundary_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_boundary_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_classes_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_classes_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_expansion_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_lookupflag_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_lookupflag_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_multiple_subrules_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_multiple_subrules_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_next_glyph_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_simple_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_simple_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context2_successive_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context3_boundary_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context3_boundary_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context3_lookupflag_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context3_lookupflag_f2.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context3_next_glyph_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context3_simple_f1.otf": ("GDEF", "GPOS"),
+ "aots/gpos_context3_successive_f1.otf": ("GDEF", "GPOS"),
+ "aots/gsub1_1_lookupflag_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub1_1_modulo_f1.otf": ("GSUB",),
+ "aots/gsub1_1_simple_f1.otf": ("GSUB",),
+ "aots/gsub1_2_lookupflag_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub1_2_simple_f1.otf": ("GSUB",),
+ "aots/gsub2_1_lookupflag_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub2_1_multiple_sequences_f1.otf": ("GSUB",),
+ "aots/gsub2_1_simple_f1.otf": ("GSUB",),
+ "aots/gsub3_1_lookupflag_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub3_1_multiple_f1.otf": ("GSUB",),
+ "aots/gsub3_1_simple_f1.otf": ("GSUB",),
+ "aots/gsub4_1_lookupflag_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub4_1_multiple_ligatures_f1.otf": ("GSUB",),
+ "aots/gsub4_1_multiple_ligatures_f2.otf": ("GSUB",),
+ "aots/gsub4_1_multiple_ligsets_f1.otf": ("GSUB",),
+ "aots/gsub4_1_simple_f1.otf": ("GSUB",),
+ "aots/gsub7_font1.otf": ("GSUB",),
+ "aots/gsub7_font2.otf": ("GSUB",),
+ "aots/gsub_chaining1_boundary_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining1_boundary_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining1_boundary_f3.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining1_boundary_f4.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining1_lookupflag_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining1_multiple_subrules_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining1_multiple_subrules_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining1_next_glyph_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining1_simple_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining1_simple_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining1_successive_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining2_boundary_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining2_boundary_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining2_boundary_f3.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining2_boundary_f4.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining2_lookupflag_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining2_multiple_subrules_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining2_multiple_subrules_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining2_next_glyph_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining2_simple_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining2_simple_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining2_successive_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining3_boundary_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining3_boundary_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining3_boundary_f3.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining3_boundary_f4.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining3_lookupflag_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining3_next_glyph_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining3_simple_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining3_simple_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_chaining3_successive_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context1_boundary_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context1_boundary_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context1_expansion_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context1_lookupflag_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context1_lookupflag_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context1_multiple_subrules_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context1_multiple_subrules_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context1_next_glyph_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context1_simple_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context1_simple_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context1_successive_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_boundary_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_boundary_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_classes_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_classes_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_expansion_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_lookupflag_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_lookupflag_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_multiple_subrules_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_multiple_subrules_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_next_glyph_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_simple_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_simple_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context2_successive_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context3_boundary_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context3_boundary_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context3_lookupflag_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context3_lookupflag_f2.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context3_next_glyph_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context3_simple_f1.otf": ("GDEF", "GSUB"),
+ "aots/gsub_context3_successive_f1.otf": ("GDEF", "GSUB"),
+ "aots/lookupflag_ignore_attach_f1.otf": ("GDEF", "GSUB"),
+ "aots/lookupflag_ignore_base_f1.otf": ("GDEF", "GSUB"),
+ "aots/lookupflag_ignore_combination_f1.otf": ("GDEF", "GSUB"),
+ "aots/lookupflag_ignore_ligatures_f1.otf": ("GDEF", "GSUB"),
+ "aots/lookupflag_ignore_marks_f1.otf": ("GDEF", "GSUB"),
+ "graphite/graphite_tests.ttf": ("Silf", "Glat", "Feat", "Sill"),
}
TEST_REQUIREMENTS = {
- "aots/cmap4_font4.otf": ("unicodedata2",),
+ "aots/cmap4_font4.otf": ("unicodedata2",),
}
@@ -247,15 +256,15 @@ def getpath(testfile):
def read_expected_ttx(testfile, tableTag):
name = os.path.splitext(testfile)[0]
xml_expected_path = getpath("%s.ttx.%s" % (name, tagToXML(tableTag)))
- with open(xml_expected_path, 'r', encoding="utf-8") as xml_file:
- xml_expected = ttLibVersion_RE.sub('', xml_file.read())
+ with open(xml_expected_path, "r", encoding="utf-8") as xml_file:
+ xml_expected = ttLibVersion_RE.sub("", xml_file.read())
return xml_expected
def dump_ttx(font, tableTag):
f = StringIO()
font.saveXML(f, tables=[tableTag])
- return ttLibVersion_RE.sub('', f.getvalue())
+ return ttLibVersion_RE.sub("", f.getvalue())
def load_ttx(ttx):
@@ -280,7 +289,7 @@ def _skip_if_requirement_missing(testfile):
if testfile in TEST_REQUIREMENTS:
for req in TEST_REQUIREMENTS[testfile]:
if globals()[req] is None:
- pytest.skip('%s not installed' % req)
+ pytest.skip("%s not installed" % req)
def test_xml_from_binary(testfile, tableTag):
@@ -305,7 +314,7 @@ def test_xml_from_xml(testfile, tableTag):
name = os.path.splitext(testfile)[0]
setupfile = getpath("%s.ttx.%s.setup" % (name, tagToXML(tableTag)))
if os.path.exists(setupfile):
-# import pdb; pdb.set_trace()
+ # import pdb; pdb.set_trace()
font.importXML(setupfile)
xml_from_xml = dump_ttx(font, tableTag)
@@ -317,11 +326,13 @@ def pytest_generate_tests(metafunc):
fixturenames = metafunc.fixturenames
argnames = ("testfile", "tableTag")
if all(fn in fixturenames for fn in argnames):
- argvalues = [(testfile, tableTag)
- for testfile, tableTags in sorted(TESTS.items())
- for tableTag in tableTags]
+ argvalues = [
+ (testfile, tableTag)
+ for testfile, tableTags in sorted(TESTS.items())
+ for tableTag in tableTags
+ ]
metafunc.parametrize(argnames, argvalues)
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(pytest.main(sys.argv))
diff --git a/Tests/ttLib/tables/ttProgram_test.py b/Tests/ttLib/tables/ttProgram_test.py
index 13d1ba87..10a02958 100644
--- a/Tests/ttLib/tables/ttProgram_test.py
+++ b/Tests/ttLib/tables/ttProgram_test.py
@@ -7,62 +7,70 @@ import os
import unittest
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-DATA_DIR = os.path.join(CURR_DIR, 'data')
+DATA_DIR = os.path.join(CURR_DIR, "data")
TTPROGRAM_TTX = os.path.join(DATA_DIR, "ttProgram.ttx")
-#TTPROGRAM_BIN = os.path.join(DATA_DIR, "ttProgram.bin")
+# TTPROGRAM_BIN = os.path.join(DATA_DIR, "ttProgram.bin")
+
+ASSEMBLY = [
+ "PUSH[ ]",
+ "0 4 3",
+ "INSTCTRL[ ]",
+ "POP[ ]",
+]
BYTECODE = deHexStr(
- '403b3a393837363534333231302f2e2d2c2b2a292827262524232221201f1e1d1c1b1a'
- '191817161514131211100f0e0d0c0b0a090807060504030201002c01b0184358456ab0'
- '194360b0462344231020b0464ef04d2fb000121b21231133592d2c01b0184358b0052b'
- 'b000134bb0145058b100403859b0062b1b21231133592d2c01b01843584eb0032510f2'
- '21b000124d1b2045b00425b00425234a6164b0285258212310d61bb0032510f221b000'
- '1259592d2cb01a435821211bb00225b0022549b00325b003254a612064b01050582121'
- '211bb00325b0032549b0005058b0005058b8ffe238211bb0103821591bb0005258b01e'
- '38211bb8fff03821595959592d2c01b0184358b0052bb000134bb0145058b90000ffc0'
- '3859b0062b1b21231133592d2c4e018a10b146194344b00014b10046e2b00015b90000'
- 'fff03800b0003cb0282bb0022510b0003c2d2c0118b0002fb00114f2b00113b001154d'
- 'b000122d2c01b0184358b0052bb00013b90000ffe038b0062b1b21231133592d2c01b0'
- '18435845646a23456469b01943646060b0462344231020b046f02fb000121b2121208a'
- '208a525811331b212159592d2c01b10b0a432343650a2d2c00b10a0b4323430b2d2c00'
- 'b0462370b101463e01b0462370b10246453ab10200080d2d2cb0122bb0022545b00225'
- '456ab0408b60b0022523442121212d2cb0132bb0022545b00225456ab8ffc08c60b002'
- '2523442121212d2cb000b0122b2121212d2cb000b0132b2121212d2c01b00643b00743'
- '650a2d2c2069b04061b0008b20b12cc08a8cb8100062602b0c642364615c58b0036159'
- '2d2cb1000325456854b01c4b505a58b0032545b0032545606820b004252344b0042523'
- '441bb00325204568208a2344b00325456860b003252344592d2cb00325204568208a23'
- '44b003254564686560b00425b0016023442d2cb00943588721c01bb01243588745b011'
- '2bb0472344b0477ae41b038a45186920b04723448a8a8720b0a05158b0112bb0472344'
- 'b0477ae41b21b0477ae4595959182d2c208a4523456860442d2c456a422d2c01182f2d'
- '2c01b0184358b00425b00425496423456469b0408b6120b080626ab00225b00225618c'
- 'b0194360b0462344218a10b046f6211b21212121592d2c01b0184358b0022545b00225'
- '4564606ab00325456a6120b00425456a208a8b65b0042523448cb00325234421211b20'
- '456a4420456a44592d2c012045b00055b018435a584568234569b0408b6120b080626a'
- '208a236120b003258b65b0042523448cb00325234421211b2121b0192b592d2c018a8a'
- '45642345646164422d2cb00425b00425b0192bb0184358b00425b00425b00325b01b2b'
- '01b0022543b04054b0022543b000545a58b003252045b040614459b0022543b00054b0'
- '022543b040545a58b004252045b04060445959212121212d2c014b525843b002254523'
- '61441b2121592d2c014b525843b00225452360441b2121592d2c4b525845441b212159'
- '2d2c0120b003252349b04060b0206320b000525823b002253823b002256538008a6338'
- '1b212121212159012d2c4b505845441b2121592d2c01b005251023208af500b0016023'
- 'edec2d2c01b005251023208af500b0016123edec2d2c01b0062510f500edec2d2c4623'
- '46608a8a462320468a608a61b8ff8062232010238ab14b4b8a70456020b0005058b001'
- '61b8ffba8b1bb0468c59b0106068013a2d2c2045b00325465258b0022546206861b003'
- '25b003253f2321381b2111592d2c2045b00325465058b0022546206861b00325b00325'
- '3f2321381b2111592d2c00b00743b006430b2d2c8a10ec2d2cb00c4358211b2046b000'
- '5258b8fff0381bb0103859592d2c20b0005558b8100063b003254564b00325456461b0'
- '005358b0021bb04061b00359254569535845441b2121591b21b0022545b00225456164'
- 'b028515845441b212159592d2c21210c6423648bb84000622d2c21b08051580c642364'
- '8bb82000621bb200402f2b59b002602d2c21b0c051580c6423648bb81555621bb20080'
- '2f2b59b002602d2c0c6423648bb84000626023212d2c4b5358b00425b0042549642345'
- '6469b0408b6120b080626ab00225b00225618cb0462344218a10b046f6211b218a1123'
- '1220392f592d2cb00225b002254964b0c05458b8fff838b008381b2121592d2cb01343'
- '58031b02592d2cb0134358021b03592d2cb00a2b2310203cb0172b2d2cb00225b8fff0'
- '38b0282b8a102320d023b0102bb0054358c01b3c59201011b00012012d2c4b53234b51'
- '5a58381b2121592d2c01b0022510d023c901b00113b0001410b0013cb001162d2c01b0'
- '0013b001b0032549b0031738b001132d2c4b53234b515a5820458a60441b2121592d2c'
- '20392f2d')
+ "403b3a393837363534333231302f2e2d2c2b2a292827262524232221201f1e1d1c1b1a"
+ "191817161514131211100f0e0d0c0b0a090807060504030201002c01b0184358456ab0"
+ "194360b0462344231020b0464ef04d2fb000121b21231133592d2c01b0184358b0052b"
+ "b000134bb0145058b100403859b0062b1b21231133592d2c01b01843584eb0032510f2"
+ "21b000124d1b2045b00425b00425234a6164b0285258212310d61bb0032510f221b000"
+ "1259592d2cb01a435821211bb00225b0022549b00325b003254a612064b01050582121"
+ "211bb00325b0032549b0005058b0005058b8ffe238211bb0103821591bb0005258b01e"
+ "38211bb8fff03821595959592d2c01b0184358b0052bb000134bb0145058b90000ffc0"
+ "3859b0062b1b21231133592d2c4e018a10b146194344b00014b10046e2b00015b90000"
+ "fff03800b0003cb0282bb0022510b0003c2d2c0118b0002fb00114f2b00113b001154d"
+ "b000122d2c01b0184358b0052bb00013b90000ffe038b0062b1b21231133592d2c01b0"
+ "18435845646a23456469b01943646060b0462344231020b046f02fb000121b2121208a"
+ "208a525811331b212159592d2c01b10b0a432343650a2d2c00b10a0b4323430b2d2c00"
+ "b0462370b101463e01b0462370b10246453ab10200080d2d2cb0122bb0022545b00225"
+ "456ab0408b60b0022523442121212d2cb0132bb0022545b00225456ab8ffc08c60b002"
+ "2523442121212d2cb000b0122b2121212d2cb000b0132b2121212d2c01b00643b00743"
+ "650a2d2c2069b04061b0008b20b12cc08a8cb8100062602b0c642364615c58b0036159"
+ "2d2cb1000325456854b01c4b505a58b0032545b0032545606820b004252344b0042523"
+ "441bb00325204568208a2344b00325456860b003252344592d2cb00325204568208a23"
+ "44b003254564686560b00425b0016023442d2cb00943588721c01bb01243588745b011"
+ "2bb0472344b0477ae41b038a45186920b04723448a8a8720b0a05158b0112bb0472344"
+ "b0477ae41b21b0477ae4595959182d2c208a4523456860442d2c456a422d2c01182f2d"
+ "2c01b0184358b00425b00425496423456469b0408b6120b080626ab00225b00225618c"
+ "b0194360b0462344218a10b046f6211b21212121592d2c01b0184358b0022545b00225"
+ "4564606ab00325456a6120b00425456a208a8b65b0042523448cb00325234421211b20"
+ "456a4420456a44592d2c012045b00055b018435a584568234569b0408b6120b080626a"
+ "208a236120b003258b65b0042523448cb00325234421211b2121b0192b592d2c018a8a"
+ "45642345646164422d2cb00425b00425b0192bb0184358b00425b00425b00325b01b2b"
+ "01b0022543b04054b0022543b000545a58b003252045b040614459b0022543b00054b0"
+ "022543b040545a58b004252045b04060445959212121212d2c014b525843b002254523"
+ "61441b2121592d2c014b525843b00225452360441b2121592d2c4b525845441b212159"
+ "2d2c0120b003252349b04060b0206320b000525823b002253823b002256538008a6338"
+ "1b212121212159012d2c4b505845441b2121592d2c01b005251023208af500b0016023"
+ "edec2d2c01b005251023208af500b0016123edec2d2c01b0062510f500edec2d2c4623"
+ "46608a8a462320468a608a61b8ff8062232010238ab14b4b8a70456020b0005058b001"
+ "61b8ffba8b1bb0468c59b0106068013a2d2c2045b00325465258b0022546206861b003"
+ "25b003253f2321381b2111592d2c2045b00325465058b0022546206861b00325b00325"
+ "3f2321381b2111592d2c00b00743b006430b2d2c8a10ec2d2cb00c4358211b2046b000"
+ "5258b8fff0381bb0103859592d2c20b0005558b8100063b003254564b00325456461b0"
+ "005358b0021bb04061b00359254569535845441b2121591b21b0022545b00225456164"
+ "b028515845441b212159592d2c21210c6423648bb84000622d2c21b08051580c642364"
+ "8bb82000621bb200402f2b59b002602d2c21b0c051580c6423648bb81555621bb20080"
+ "2f2b59b002602d2c0c6423648bb84000626023212d2c4b5358b00425b0042549642345"
+ "6469b0408b6120b080626ab00225b00225618cb0462344218a10b046f6211b218a1123"
+ "1220392f592d2cb00225b002254964b0c05458b8fff838b008381b2121592d2cb01343"
+ "58031b02592d2cb0134358021b03592d2cb00a2b2310203cb0172b2d2cb00225b8fff0"
+ "38b0282b8a102320d023b0102bb0054358c01b3c59201011b00012012d2c4b53234b51"
+ "5a58381b2121592d2c01b0022510d023c901b00113b0001410b0013cb001162d2c01b0"
+ "0013b001b0032549b0031738b001132d2c4b53234b515a5820458a60441b2121592d2c"
+ "20392f2d"
+)
class TestFont(object):
@@ -70,7 +78,6 @@ class TestFont(object):
class ProgramTest(unittest.TestCase):
-
def test__bool__(self):
p = Program()
assert not bool(p)
@@ -83,13 +90,25 @@ class ProgramTest(unittest.TestCase):
assert not bool(p)
p = Program()
- asm = ['SVTCA[0]']
+ asm = ["SVTCA[0]"]
p.fromAssembly(asm)
assert bool(p)
- assert p.assembly.pop() == 'SVTCA[0]'
+ assert p.assembly.pop() == "SVTCA[0]"
assert not bool(p)
+ def test_from_assembly_list(self):
+ p = Program()
+ p.fromAssembly(ASSEMBLY)
+ asm = p.getAssembly()
+ assert ASSEMBLY == asm
+
+ def test_from_assembly_str(self):
+ p = Program()
+ p.fromAssembly("\n".join(ASSEMBLY))
+ asm = p.getAssembly()
+ assert ASSEMBLY == asm
+
def test_roundtrip(self):
p = Program()
p.fromBytecode(BYTECODE)
@@ -98,7 +117,7 @@ class ProgramTest(unittest.TestCase):
assert BYTECODE == p.getBytecode()
def test_xml_indentation(self):
- with open(TTPROGRAM_TTX, 'r', encoding='utf-8') as f:
+ with open(TTPROGRAM_TTX, "r", encoding="utf-8") as f:
ttProgramXML = f.read()
p = Program()
p.fromBytecode(BYTECODE)
@@ -110,8 +129,9 @@ class ProgramTest(unittest.TestCase):
finally:
output_string = buf.getvalue()
assert output_string == ttProgramXML
-
-if __name__ == '__main__':
+
+if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/ttLib/ttFont_test.py b/Tests/ttLib/ttFont_test.py
index e0e82b24..2203b4d9 100644
--- a/Tests/ttLib/ttFont_test.py
+++ b/Tests/ttLib/ttFont_test.py
@@ -2,8 +2,16 @@ import io
import os
import re
import random
+import tempfile
from fontTools.feaLib.builder import addOpenTypeFeaturesFromString
-from fontTools.ttLib import TTFont, newTable, registerCustomTableClass, unregisterCustomTableClass
+from fontTools.ttLib import (
+ TTFont,
+ TTLibError,
+ newTable,
+ registerCustomTableClass,
+ unregisterCustomTableClass,
+)
+from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
from fontTools.ttLib.tables.DefaultTable import DefaultTable
from fontTools.ttLib.tables._c_m_a_p import CmapSubtable
import pytest
@@ -13,7 +21,6 @@ DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class CustomTableClass(DefaultTable):
-
def decompile(self, data, ttFont):
self.numbers = list(data)
@@ -143,6 +150,17 @@ def test_setGlyphOrder_also_updates_glyf_glyphOrder():
assert font["glyf"].glyphOrder == new_order
+def test_getGlyphOrder_not_true_post_format_1(caplog):
+ # https://github.com/fonttools/fonttools/issues/2736
+ caplog.set_level("WARNING")
+ font = TTFont(os.path.join(DATA_DIR, "bogus_post_format_1.ttf"))
+ hmtx = font["hmtx"]
+ assert len(hmtx.metrics) > len(standardGlyphOrder)
+ log_rec = caplog.records[-1]
+ assert log_rec.levelname == "WARNING"
+ assert "Not enough names found in the 'post' table" in log_rec.message
+
+
@pytest.mark.parametrize("lazy", [None, True, False])
def test_ensureDecompiled(lazy):
# test that no matter the lazy value, ensureDecompiled decompiles all tables
@@ -159,7 +177,7 @@ def test_ensureDecompiled(lazy):
feature dist {
pos period period -30;
} dist;
- """
+ """,
)
# also add an additional cmap subtable that will be lazily-loaded
cm = CmapSubtable.newSubtable(14)
@@ -167,7 +185,7 @@ def test_ensureDecompiled(lazy):
cm.platEncID = 5
cm.language = 0
cm.cmap = {}
- cm.uvsDict = {0xFE00: [(0x002e, None)]}
+ cm.uvsDict = {0xFE00: [(0x002E, None)]}
font["cmap"].tables.append(cm)
# save and reload, potentially lazily
@@ -212,3 +230,91 @@ def test_ensureDecompiled(lazy):
assert "Lookup" in font["GSUB"].table.LookupList.__dict__
assert "reader" not in font["GPOS"].table.LookupList.__dict__
assert "Lookup" in font["GPOS"].table.LookupList.__dict__
+
+
+@pytest.fixture
+def testFont_fvar_avar():
+ ttxpath = os.path.join(DATA_DIR, "TestTTF_normalizeLocation.ttx")
+ ttf = TTFont()
+ ttf.importXML(ttxpath)
+ return ttf
+
+
+@pytest.mark.parametrize(
+ "userLocation, expectedNormalizedLocation",
+ [
+ ({}, {"wght": 0.0}),
+ ({"wght": 100}, {"wght": -1.0}),
+ ({"wght": 250}, {"wght": -0.75}),
+ ({"wght": 400}, {"wght": 0.0}),
+ ({"wght": 550}, {"wght": 0.75}),
+ ({"wght": 625}, {"wght": 0.875}),
+ ({"wght": 700}, {"wght": 1.0}),
+ ],
+)
+def test_font_normalizeLocation(
+ testFont_fvar_avar, userLocation, expectedNormalizedLocation
+):
+ normalizedLocation = testFont_fvar_avar.normalizeLocation(userLocation)
+ assert expectedNormalizedLocation == normalizedLocation
+
+
+def test_font_normalizeLocation_no_VF():
+ ttf = TTFont()
+ with pytest.raises(TTLibError, match="Not a variable font"):
+ ttf.normalizeLocation({})
+
+
+def test_getGlyphID():
+ font = TTFont()
+ font.importXML(os.path.join(DATA_DIR, "TestTTF-Regular.ttx"))
+
+ assert font.getGlyphID("space") == 3
+ assert font.getGlyphID("glyph12345") == 12345 # virtual glyph
+ with pytest.raises(KeyError):
+ font.getGlyphID("non_existent")
+ with pytest.raises(KeyError):
+ font.getGlyphID("glyph_prefix_but_invalid_id")
+
+
+def test_spooled_tempfile_may_not_have_attribute_seekable():
+ # SpooledTemporaryFile only got a seekable attribute on Python 3.11
+ # https://github.com/fonttools/fonttools/issues/3052
+ font = TTFont()
+ font.importXML(os.path.join(DATA_DIR, "TestTTF-Regular.ttx"))
+ tmp = tempfile.SpooledTemporaryFile()
+ font.save(tmp)
+ # this should not fail
+ _ = TTFont(tmp)
+
+
+def test_unseekable_file_lazy_loading_fails():
+ class NonSeekableFile:
+ def __init__(self):
+ self.file = io.BytesIO()
+
+ def read(self, size):
+ return self.file.read(size)
+
+ def seekable(self):
+ return False
+
+ f = NonSeekableFile()
+ with pytest.raises(TTLibError, match="Input file must be seekable when lazy=True"):
+ TTFont(f, lazy=True)
+
+
+def test_unsupported_seek_operation_lazy_loading_fails():
+ class UnsupportedSeekFile:
+ def __init__(self):
+ self.file = io.BytesIO()
+
+ def read(self, size):
+ return self.file.read(size)
+
+ def seek(self, offset):
+ raise io.UnsupportedOperation("Unsupported seek operation")
+
+ f = UnsupportedSeekFile()
+ with pytest.raises(TTLibError, match="Input file must be seekable when lazy=True"):
+ TTFont(f, lazy=True)
diff --git a/Tests/ttLib/ttGlyphSet_test.py b/Tests/ttLib/ttGlyphSet_test.py
index bc0bf2ce..56514464 100644
--- a/Tests/ttLib/ttGlyphSet_test.py
+++ b/Tests/ttLib/ttGlyphSet_test.py
@@ -1,112 +1,656 @@
from fontTools.ttLib import TTFont
from fontTools.ttLib import ttGlyphSet
-from fontTools.pens.recordingPen import RecordingPen
+from fontTools.pens.recordingPen import (
+ RecordingPen,
+ RecordingPointPen,
+ DecomposingRecordingPen,
+)
+from fontTools.misc.roundTools import otRound
+from fontTools.misc.transform import DecomposedTransform
import os
import pytest
class TTGlyphSetTest(object):
-
@staticmethod
def getpath(testfile):
path = os.path.dirname(__file__)
return os.path.join(path, "data", testfile)
@pytest.mark.parametrize(
- "location, expected",
+ "fontfile, location, expected",
[
(
+ "I.ttf",
None,
[
- ('moveTo', ((175, 0),)),
- ('lineTo', ((367, 0),)),
- ('lineTo', ((367, 1456),)),
- ('lineTo', ((175, 1456),)),
- ('closePath', ())
- ]
+ ("moveTo", ((175, 0),)),
+ ("lineTo", ((367, 0),)),
+ ("lineTo", ((367, 1456),)),
+ ("lineTo", ((175, 1456),)),
+ ("closePath", ()),
+ ],
),
(
+ "I.ttf",
{},
[
- ('moveTo', ((175, 0),)),
- ('lineTo', ((367, 0),)),
- ('lineTo', ((367, 1456),)),
- ('lineTo', ((175, 1456),)),
- ('closePath', ())
- ]
+ ("moveTo", ((175, 0),)),
+ ("lineTo", ((367, 0),)),
+ ("lineTo", ((367, 1456),)),
+ ("lineTo", ((175, 1456),)),
+ ("closePath", ()),
+ ],
),
(
- {'wght': 100},
+ "I.ttf",
+ {"wght": 100},
[
- ('moveTo', ((175, 0),)),
- ('lineTo', ((271, 0),)),
- ('lineTo', ((271, 1456),)),
- ('lineTo', ((175, 1456),)),
- ('closePath', ())
- ]
+ ("moveTo", ((175, 0),)),
+ ("lineTo", ((271, 0),)),
+ ("lineTo", ((271, 1456),)),
+ ("lineTo", ((175, 1456),)),
+ ("closePath", ()),
+ ],
),
(
- {'wght': 1000},
+ "I.ttf",
+ {"wght": 1000},
[
- ('moveTo', ((128, 0),)),
- ('lineTo', ((550, 0),)),
- ('lineTo', ((550, 1456),)),
- ('lineTo', ((128, 1456),)),
- ('closePath', ())
- ]
+ ("moveTo", ((128, 0),)),
+ ("lineTo", ((550, 0),)),
+ ("lineTo", ((550, 1456),)),
+ ("lineTo", ((128, 1456),)),
+ ("closePath", ()),
+ ],
),
(
- {'wght': 1000, 'wdth': 25},
+ "I.ttf",
+ {"wght": 1000, "wdth": 25},
[
- ('moveTo', ((140, 0),)),
- ('lineTo', ((553, 0),)),
- ('lineTo', ((553, 1456),)),
- ('lineTo', ((140, 1456),)),
- ('closePath', ())
- ]
+ ("moveTo", ((140, 0),)),
+ ("lineTo", ((553, 0),)),
+ ("lineTo", ((553, 1456),)),
+ ("lineTo", ((140, 1456),)),
+ ("closePath", ()),
+ ],
),
(
- {'wght': 1000, 'wdth': 50},
+ "I.ttf",
+ {"wght": 1000, "wdth": 50},
[
- ('moveTo', ((136, 0),)),
- ('lineTo', ((552, 0),)),
- ('lineTo', ((552, 1456),)),
- ('lineTo', ((136, 1456),)),
- ('closePath', ())
- ]
+ ("moveTo", ((136, 0),)),
+ ("lineTo", ((552, 0),)),
+ ("lineTo", ((552, 1456),)),
+ ("lineTo", ((136, 1456),)),
+ ("closePath", ()),
+ ],
),
- ]
+ (
+ "I.otf",
+ {"wght": 1000},
+ [
+ ("moveTo", ((179, 74),)),
+ ("lineTo", ((28, 59),)),
+ ("lineTo", ((28, 0),)),
+ ("lineTo", ((367, 0),)),
+ ("lineTo", ((367, 59),)),
+ ("lineTo", ((212, 74),)),
+ ("lineTo", ((179, 74),)),
+ ("closePath", ()),
+ ("moveTo", ((179, 578),)),
+ ("lineTo", ((212, 578),)),
+ ("lineTo", ((367, 593),)),
+ ("lineTo", ((367, 652),)),
+ ("lineTo", ((28, 652),)),
+ ("lineTo", ((28, 593),)),
+ ("lineTo", ((179, 578),)),
+ ("closePath", ()),
+ ("moveTo", ((98, 310),)),
+ ("curveTo", ((98, 205), (98, 101), (95, 0))),
+ ("lineTo", ((299, 0),)),
+ ("curveTo", ((296, 103), (296, 207), (296, 311))),
+ ("lineTo", ((296, 342),)),
+ ("curveTo", ((296, 447), (296, 551), (299, 652))),
+ ("lineTo", ((95, 652),)),
+ ("curveTo", ((98, 549), (98, 445), (98, 342))),
+ ("lineTo", ((98, 310),)),
+ ("closePath", ()),
+ ],
+ ),
+ (
+ # In this font, /I has an lsb of 30, but an xMin of 25, so an
+ # offset of 5 units needs to be applied when drawing the outline.
+ # See https://github.com/fonttools/fonttools/issues/2824
+ "issue2824.ttf",
+ None,
+ [
+ ("moveTo", ((309, 180),)),
+ ("qCurveTo", ((274, 151), (187, 136), (104, 166), (74, 201))),
+ ("qCurveTo", ((45, 236), (30, 323), (59, 407), (95, 436))),
+ ("qCurveTo", ((130, 466), (217, 480), (301, 451), (330, 415))),
+ ("qCurveTo", ((360, 380), (374, 293), (345, 210), (309, 180))),
+ ("closePath", ()),
+ ],
+ ),
+ ],
)
- def test_glyphset(
- self, location, expected
- ):
- # TODO: also test loading CFF-flavored fonts
- font = TTFont(self.getpath("I.ttf"))
+ def test_glyphset(self, fontfile, location, expected):
+ font = TTFont(self.getpath(fontfile))
glyphset = font.getGlyphSet(location=location)
assert isinstance(glyphset, ttGlyphSet._TTGlyphSet)
- if location:
- assert isinstance(glyphset, ttGlyphSet._TTVarGlyphSet)
assert list(glyphset.keys()) == [".notdef", "I"]
assert "I" in glyphset
- assert glyphset.has_key("I") # we should really get rid of this...
+ with pytest.deprecated_call():
+ assert glyphset.has_key("I") # we should really get rid of this...
assert len(glyphset) == 2
pen = RecordingPen()
- glyph = glyphset['I']
+ glyph = glyphset["I"]
assert glyphset.get("foobar") is None
assert isinstance(glyph, ttGlyphSet._TTGlyph)
- if location:
- assert isinstance(glyph, ttGlyphSet._TTVarGlyphGlyf)
- else:
- assert isinstance(glyph, ttGlyphSet._TTGlyphGlyf)
+ is_glyf = fontfile.endswith(".ttf")
+ glyphType = ttGlyphSet._TTGlyphGlyf if is_glyf else ttGlyphSet._TTGlyphCFF
+ assert isinstance(glyph, glyphType)
glyph.draw(pen)
actual = pen.value
assert actual == expected, (location, actual, expected)
+
+ def test_glyphset_varComposite_components(self):
+ font = TTFont(self.getpath("varc-ac00-ac01.ttf"))
+ glyphset = font.getGlyphSet()
+
+ pen = RecordingPen()
+ glyph = glyphset["uniAC00"]
+
+ glyph.draw(pen)
+ actual = pen.value
+
+ expected = [
+ (
+ "addVarComponent",
+ (
+ "glyph00003",
+ DecomposedTransform(460.0, 676.0, 0, 1, 1, 0, 0, 0, 0),
+ {
+ "0000": 0.84661865234375,
+ "0001": 0.98944091796875,
+ "0002": 0.47283935546875,
+ "0003": 0.446533203125,
+ },
+ ),
+ ),
+ (
+ "addVarComponent",
+ (
+ "glyph00004",
+ DecomposedTransform(932.0, 382.0, 0, 1, 1, 0, 0, 0, 0),
+ {
+ "0000": 0.93359375,
+ "0001": 0.916015625,
+ "0002": 0.523193359375,
+ "0003": 0.32806396484375,
+ "0004": 0.85089111328125,
+ },
+ ),
+ ),
+ ]
+
+ assert actual == expected, (actual, expected)
+
+ def test_glyphset_varComposite1(self):
+ font = TTFont(self.getpath("varc-ac00-ac01.ttf"))
+ glyphset = font.getGlyphSet(location={"wght": 600})
+
+ pen = DecomposingRecordingPen(glyphset)
+ glyph = glyphset["uniAC00"]
+
+ glyph.draw(pen)
+ actual = pen.value
+
+ expected = [
+ ("moveTo", ((432, 678),)),
+ ("lineTo", ((432, 620),)),
+ (
+ "qCurveTo",
+ (
+ (419, 620),
+ (374, 621),
+ (324, 619),
+ (275, 618),
+ (237, 617),
+ (228, 616),
+ ),
+ ),
+ ("qCurveTo", ((218, 616), (188, 612), (160, 605), (149, 601))),
+ ("qCurveTo", ((127, 611), (83, 639), (67, 654))),
+ ("qCurveTo", ((64, 657), (63, 662), (64, 666))),
+ ("lineTo", ((72, 678),)),
+ ("qCurveTo", ((93, 674), (144, 672), (164, 672))),
+ (
+ "qCurveTo",
+ (
+ (173, 672),
+ (213, 672),
+ (266, 673),
+ (323, 674),
+ (377, 675),
+ (421, 678),
+ (432, 678),
+ ),
+ ),
+ ("closePath", ()),
+ ("moveTo", ((525, 619),)),
+ ("lineTo", ((412, 620),)),
+ ("lineTo", ((429, 678),)),
+ ("lineTo", ((466, 697),)),
+ ("qCurveTo", ((470, 698), (482, 698), (486, 697))),
+ ("qCurveTo", ((494, 693), (515, 682), (536, 670), (541, 667))),
+ ("qCurveTo", ((545, 663), (545, 656), (543, 652))),
+ ("lineTo", ((525, 619),)),
+ ("closePath", ()),
+ ("moveTo", ((63, 118),)),
+ ("lineTo", ((47, 135),)),
+ ("qCurveTo", ((42, 141), (48, 146))),
+ ("qCurveTo", ((135, 213), (278, 373), (383, 541), (412, 620))),
+ ("lineTo", ((471, 642),)),
+ ("lineTo", ((525, 619),)),
+ ("qCurveTo", ((496, 529), (365, 342), (183, 179), (75, 121))),
+ ("qCurveTo", ((72, 119), (65, 118), (63, 118))),
+ ("closePath", ()),
+ ("moveTo", ((925, 372),)),
+ ("lineTo", ((739, 368),)),
+ ("lineTo", ((739, 427),)),
+ ("lineTo", ((822, 430),)),
+ ("lineTo", ((854, 451),)),
+ ("qCurveTo", ((878, 453), (930, 449), (944, 445))),
+ ("qCurveTo", ((961, 441), (962, 426))),
+ ("qCurveTo", ((964, 411), (956, 386), (951, 381))),
+ ("qCurveTo", ((947, 376), (931, 372), (925, 372))),
+ ("closePath", ()),
+ ("moveTo", ((729, -113),)),
+ ("lineTo", ((674, -113),)),
+ ("qCurveTo", ((671, -98), (669, -42), (666, 22), (665, 83), (665, 102))),
+ ("lineTo", ((665, 763),)),
+ ("qCurveTo", ((654, 780), (608, 810), (582, 820))),
+ ("lineTo", ((593, 850),)),
+ ("qCurveTo", ((594, 852), (599, 856), (607, 856))),
+ ("qCurveTo", ((628, 855), (684, 846), (736, 834), (752, 827))),
+ ("qCurveTo", ((766, 818), (766, 802))),
+ ("lineTo", ((762, 745),)),
+ ("lineTo", ((762, 134),)),
+ ("qCurveTo", ((762, 107), (757, 43), (749, -25), (737, -87), (729, -113))),
+ ("closePath", ()),
+ ]
+
+ actual = [
+ (op, tuple((otRound(pt[0]), otRound(pt[1])) for pt in args))
+ for op, args in actual
+ ]
+
+ assert actual == expected, (actual, expected)
+
+ # Test that drawing twice works, we accidentally don't change the component
+ pen = DecomposingRecordingPen(glyphset)
+ glyph.draw(pen)
+ actual = pen.value
+ actual = [
+ (op, tuple((otRound(pt[0]), otRound(pt[1])) for pt in args))
+ for op, args in actual
+ ]
+ assert actual == expected, (actual, expected)
+
+ pen = RecordingPointPen()
+ glyph.drawPoints(pen)
+ assert pen.value
+
+ def test_glyphset_varComposite2(self):
+ # This test font has axis variations
+
+ font = TTFont(self.getpath("varc-6868.ttf"))
+ glyphset = font.getGlyphSet(location={"wght": 600})
+
+ pen = DecomposingRecordingPen(glyphset)
+ glyph = glyphset["uni6868"]
+
+ glyph.draw(pen)
+ actual = pen.value
+
+ expected = [
+ ("moveTo", ((460, 565),)),
+ (
+ "qCurveTo",
+ (
+ (482, 577),
+ (526, 603),
+ (568, 632),
+ (607, 663),
+ (644, 698),
+ (678, 735),
+ (708, 775),
+ (721, 796),
+ ),
+ ),
+ ("lineTo", ((632, 835),)),
+ (
+ "qCurveTo",
+ (
+ (621, 817),
+ (595, 784),
+ (566, 753),
+ (534, 724),
+ (499, 698),
+ (462, 675),
+ (423, 653),
+ (403, 644),
+ ),
+ ),
+ ("closePath", ()),
+ ("moveTo", ((616, 765),)),
+ ("lineTo", ((590, 682),)),
+ ("lineTo", ((830, 682),)),
+ ("lineTo", ((833, 682),)),
+ ("lineTo", ((828, 693),)),
+ (
+ "qCurveTo",
+ (
+ (817, 671),
+ (775, 620),
+ (709, 571),
+ (615, 525),
+ (492, 490),
+ (413, 480),
+ ),
+ ),
+ ("lineTo", ((454, 386),)),
+ (
+ "qCurveTo",
+ (
+ (544, 403),
+ (687, 455),
+ (798, 519),
+ (877, 590),
+ (926, 655),
+ (937, 684),
+ ),
+ ),
+ ("lineTo", ((937, 765),)),
+ ("closePath", ()),
+ ("moveTo", ((723, 555),)),
+ (
+ "qCurveTo",
+ (
+ (713, 563),
+ (693, 579),
+ (672, 595),
+ (651, 610),
+ (629, 625),
+ (606, 638),
+ (583, 651),
+ (572, 657),
+ ),
+ ),
+ ("lineTo", ((514, 590),)),
+ (
+ "qCurveTo",
+ (
+ (525, 584),
+ (547, 572),
+ (568, 559),
+ (589, 545),
+ (609, 531),
+ (629, 516),
+ (648, 500),
+ (657, 492),
+ ),
+ ),
+ ("closePath", ()),
+ ("moveTo", ((387, 375),)),
+ ("lineTo", ((387, 830),)),
+ ("lineTo", ((289, 830),)),
+ ("lineTo", ((289, 375),)),
+ ("closePath", ()),
+ ("moveTo", ((96, 383),)),
+ (
+ "qCurveTo",
+ (
+ (116, 390),
+ (156, 408),
+ (194, 427),
+ (231, 449),
+ (268, 472),
+ (302, 497),
+ (335, 525),
+ (351, 539),
+ ),
+ ),
+ ("lineTo", ((307, 610),)),
+ (
+ "qCurveTo",
+ (
+ (291, 597),
+ (257, 572),
+ (221, 549),
+ (185, 528),
+ (147, 509),
+ (108, 492),
+ (69, 476),
+ (48, 469),
+ ),
+ ),
+ ("closePath", ()),
+ ("moveTo", ((290, 653),)),
+ (
+ "qCurveTo",
+ (
+ (281, 664),
+ (261, 687),
+ (240, 708),
+ (219, 729),
+ (196, 749),
+ (173, 768),
+ (148, 786),
+ (136, 794),
+ ),
+ ),
+ ("lineTo", ((69, 727),)),
+ (
+ "qCurveTo",
+ (
+ (81, 719),
+ (105, 702),
+ (129, 684),
+ (151, 665),
+ (173, 645),
+ (193, 625),
+ (213, 604),
+ (222, 593),
+ ),
+ ),
+ ("closePath", ()),
+ ("moveTo", ((913, -57),)),
+ ("lineTo", ((953, 30),)),
+ (
+ "qCurveTo",
+ (
+ (919, 41),
+ (854, 67),
+ (790, 98),
+ (729, 134),
+ (671, 173),
+ (616, 217),
+ (564, 264),
+ (540, 290),
+ ),
+ ),
+ ("lineTo", ((522, 286),)),
+ ("qCurveTo", ((511, 267), (498, 235), (493, 213), (492, 206))),
+ ("lineTo", ((515, 209),)),
+ ("qCurveTo", ((569, 146), (695, 44), (835, -32), (913, -57))),
+ ("closePath", ()),
+ ("moveTo", ((474, 274),)),
+ ("lineTo", ((452, 284),)),
+ (
+ "qCurveTo",
+ (
+ (428, 260),
+ (377, 214),
+ (323, 172),
+ (266, 135),
+ (206, 101),
+ (144, 71),
+ (80, 46),
+ (47, 36),
+ ),
+ ),
+ ("lineTo", ((89, -53),)),
+ ("qCurveTo", ((163, -29), (299, 46), (423, 142), (476, 201))),
+ ("lineTo", ((498, 196),)),
+ ("qCurveTo", ((498, 203), (494, 225), (482, 255), (474, 274))),
+ ("closePath", ()),
+ ("moveTo", ((450, 250),)),
+ ("lineTo", ((550, 250),)),
+ ("lineTo", ((550, 379),)),
+ ("lineTo", ((450, 379),)),
+ ("closePath", ()),
+ ("moveTo", ((68, 215),)),
+ ("lineTo", ((932, 215),)),
+ ("lineTo", ((932, 305),)),
+ ("lineTo", ((68, 305),)),
+ ("closePath", ()),
+ ("moveTo", ((450, -71),)),
+ ("lineTo", ((550, -71),)),
+ ("lineTo", ((550, -71),)),
+ ("lineTo", ((550, 267),)),
+ ("lineTo", ((450, 267),)),
+ ("lineTo", ((450, -71),)),
+ ("closePath", ()),
+ ]
+
+ actual = [
+ (op, tuple((otRound(pt[0]), otRound(pt[1])) for pt in args))
+ for op, args in actual
+ ]
+
+ assert actual == expected, (actual, expected)
+
+ pen = RecordingPointPen()
+ glyph.drawPoints(pen)
+ assert pen.value
+
+ def test_cubic_glyf(self):
+ font = TTFont(self.getpath("dot-cubic.ttf"))
+ glyphset = font.getGlyphSet()
+
+ expected = [
+ ("moveTo", ((76, 181),)),
+ ("curveTo", ((103, 181), (125, 158), (125, 131))),
+ ("curveTo", ((125, 104), (103, 82), (76, 82))),
+ ("curveTo", ((48, 82), (26, 104), (26, 131))),
+ ("curveTo", ((26, 158), (48, 181), (76, 181))),
+ ("closePath", ()),
+ ]
+
+ pen = RecordingPen()
+ glyphset["one"].draw(pen)
+ assert pen.value == expected
+
+ expectedPoints = [
+ ("beginPath", (), {}),
+ ("addPoint", ((76, 181), "curve", False, None), {}),
+ ("addPoint", ((103, 181), None, False, None), {}),
+ ("addPoint", ((125, 158), None, False, None), {}),
+ ("addPoint", ((125, 104), None, False, None), {}),
+ ("addPoint", ((103, 82), None, False, None), {}),
+ ("addPoint", ((76, 82), "curve", False, None), {}),
+ ("addPoint", ((48, 82), None, False, None), {}),
+ ("addPoint", ((26, 104), None, False, None), {}),
+ ("addPoint", ((26, 158), None, False, None), {}),
+ ("addPoint", ((48, 181), None, False, None), {}),
+ ("endPath", (), {}),
+ ]
+ pen = RecordingPointPen()
+ glyphset["one"].drawPoints(pen)
+ assert pen.value == expectedPoints
+
+ pen = RecordingPen()
+ glyphset["two"].draw(pen)
+ assert pen.value == expected
+
+ expectedPoints = [
+ ("beginPath", (), {}),
+ ("addPoint", ((26, 158), None, False, None), {}),
+ ("addPoint", ((48, 181), None, False, None), {}),
+ ("addPoint", ((76, 181), "curve", False, None), {}),
+ ("addPoint", ((103, 181), None, False, None), {}),
+ ("addPoint", ((125, 158), None, False, None), {}),
+ ("addPoint", ((125, 104), None, False, None), {}),
+ ("addPoint", ((103, 82), None, False, None), {}),
+ ("addPoint", ((76, 82), "curve", False, None), {}),
+ ("addPoint", ((48, 82), None, False, None), {}),
+ ("addPoint", ((26, 104), None, False, None), {}),
+ ("endPath", (), {}),
+ ]
+ pen = RecordingPointPen()
+ glyphset["two"].drawPoints(pen)
+ assert pen.value == expectedPoints
+
+ pen = RecordingPen()
+ glyphset["three"].draw(pen)
+ assert pen.value == expected
+
+ expectedPoints = [
+ ("beginPath", (), {}),
+ ("addPoint", ((48, 82), None, False, None), {}),
+ ("addPoint", ((26, 104), None, False, None), {}),
+ ("addPoint", ((26, 158), None, False, None), {}),
+ ("addPoint", ((48, 181), None, False, None), {}),
+ ("addPoint", ((76, 181), "curve", False, None), {}),
+ ("addPoint", ((103, 181), None, False, None), {}),
+ ("addPoint", ((125, 158), None, False, None), {}),
+ ("addPoint", ((125, 104), None, False, None), {}),
+ ("addPoint", ((103, 82), None, False, None), {}),
+ ("addPoint", ((76, 82), "curve", False, None), {}),
+ ("endPath", (), {}),
+ ]
+ pen = RecordingPointPen()
+ glyphset["three"].drawPoints(pen)
+ assert pen.value == expectedPoints
+
+ pen = RecordingPen()
+ glyphset["four"].draw(pen)
+ assert pen.value == [
+ ("moveTo", ((75.5, 181),)),
+ ("curveTo", ((103, 181), (125, 158), (125, 131))),
+ ("curveTo", ((125, 104), (103, 82), (75.5, 82))),
+ ("curveTo", ((48, 82), (26, 104), (26, 131))),
+ ("curveTo", ((26, 158), (48, 181), (75.5, 181))),
+ ("closePath", ()),
+ ]
+
+ # Ouch! We can't represent all-cubic-offcurves in pointPen!
+ # https://github.com/fonttools/fonttools/issues/3191
+ expectedPoints = [
+ ("beginPath", (), {}),
+ ("addPoint", ((103, 181), None, False, None), {}),
+ ("addPoint", ((125, 158), None, False, None), {}),
+ ("addPoint", ((125, 104), None, False, None), {}),
+ ("addPoint", ((103, 82), None, False, None), {}),
+ ("addPoint", ((48, 82), None, False, None), {}),
+ ("addPoint", ((26, 104), None, False, None), {}),
+ ("addPoint", ((26, 158), None, False, None), {}),
+ ("addPoint", ((48, 181), None, False, None), {}),
+ ("endPath", (), {}),
+ ]
+ pen = RecordingPointPen()
+ glyphset["four"].drawPoints(pen)
+ print(pen.value)
+ assert pen.value == expectedPoints
diff --git a/Tests/ttLib/ttVisitor_test.py b/Tests/ttLib/ttVisitor_test.py
index e84e213c..1c429343 100644
--- a/Tests/ttLib/ttVisitor_test.py
+++ b/Tests/ttLib/ttVisitor_test.py
@@ -21,14 +21,12 @@ class TestVisitor(TTVisitor):
class TTVisitorTest(object):
-
@staticmethod
def getpath(testfile):
path = os.path.dirname(__file__)
return os.path.join(path, "data", testfile)
def test_ttvisitor(self):
-
font = TTFont(self.getpath("TestVGID-Regular.otf"))
visitor = TestVisitor()
diff --git a/Tests/ttLib/woff2_test.py b/Tests/ttLib/woff2_test.py
index 7fe40dd1..e098eb99 100644
--- a/Tests/ttLib/woff2_test.py
+++ b/Tests/ttLib/woff2_test.py
@@ -2,16 +2,33 @@ from fontTools import ttLib
from fontTools.ttLib import woff2
from fontTools.ttLib.tables import _g_l_y_f
from fontTools.ttLib.woff2 import (
- WOFF2Reader, woff2DirectorySize, woff2DirectoryFormat,
- woff2FlagsSize, woff2UnknownTagSize, woff2Base128MaxSize, WOFF2DirectoryEntry,
- getKnownTagIndex, packBase128, base128Size, woff2UnknownTagIndex,
- WOFF2FlavorData, woff2TransformedTableTags, WOFF2GlyfTable, WOFF2LocaTable,
- WOFF2HmtxTable, WOFF2Writer, unpackBase128, unpack255UShort, pack255UShort)
+ WOFF2Reader,
+ woff2DirectorySize,
+ woff2DirectoryFormat,
+ woff2FlagsSize,
+ woff2UnknownTagSize,
+ woff2Base128MaxSize,
+ WOFF2DirectoryEntry,
+ getKnownTagIndex,
+ packBase128,
+ base128Size,
+ woff2UnknownTagIndex,
+ WOFF2FlavorData,
+ woff2TransformedTableTags,
+ WOFF2GlyfTable,
+ WOFF2LocaTable,
+ WOFF2HmtxTable,
+ WOFF2Writer,
+ unpackBase128,
+ unpack255UShort,
+ pack255UShort,
+)
import unittest
from fontTools.misc import sstruct
from fontTools.misc.textTools import Tag, bytechr, byteord
from fontTools import fontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
+from fontTools.pens.recordingPen import RecordingPen
from io import BytesIO
import struct
import os
@@ -23,1421 +40,1495 @@ import pytest
haveBrotli = False
try:
- try:
- import brotlicffi as brotli
- except ImportError:
- import brotli
- haveBrotli = True
+ try:
+ import brotlicffi as brotli
+ except ImportError:
+ import brotli
+ haveBrotli = True
except ImportError:
- pass
+ pass
# Python 3 renamed 'assertRaisesRegexp' to 'assertRaisesRegex', and fires
# deprecation warnings if a program uses the old name.
-if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
- unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
+if not hasattr(unittest.TestCase, "assertRaisesRegex"):
+ unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
current_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-data_dir = os.path.join(current_dir, 'data')
-TTX = os.path.join(data_dir, 'TestTTF-Regular.ttx')
-OTX = os.path.join(data_dir, 'TestOTF-Regular.otx')
-METADATA = os.path.join(data_dir, 'test_woff2_metadata.xml')
+data_dir = os.path.join(current_dir, "data")
+TTX = os.path.join(data_dir, "TestTTF-Regular.ttx")
+OTX = os.path.join(data_dir, "TestOTF-Regular.otx")
+METADATA = os.path.join(data_dir, "test_woff2_metadata.xml")
TT_WOFF2 = BytesIO()
CFF_WOFF2 = BytesIO()
def setUpModule():
- if not haveBrotli:
- raise unittest.SkipTest("No module named brotli")
- assert os.path.exists(TTX)
- assert os.path.exists(OTX)
- # import TT-flavoured test font and save it as WOFF2
- ttf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
- ttf.importXML(TTX)
- ttf.flavor = "woff2"
- ttf.save(TT_WOFF2, reorderTables=None)
- # import CFF-flavoured test font and save it as WOFF2
- otf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
- otf.importXML(OTX)
- otf.flavor = "woff2"
- otf.save(CFF_WOFF2, reorderTables=None)
+ if not haveBrotli:
+ raise unittest.SkipTest("No module named brotli")
+ assert os.path.exists(TTX)
+ assert os.path.exists(OTX)
+ # import TT-flavoured test font and save it as WOFF2
+ ttf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
+ ttf.importXML(TTX)
+ ttf.flavor = "woff2"
+ ttf.save(TT_WOFF2, reorderTables=None)
+ # import CFF-flavoured test font and save it as WOFF2
+ otf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
+ otf.importXML(OTX)
+ otf.flavor = "woff2"
+ otf.save(CFF_WOFF2, reorderTables=None)
class WOFF2ReaderTest(unittest.TestCase):
-
- @classmethod
- def setUpClass(cls):
- cls.file = BytesIO(CFF_WOFF2.getvalue())
- cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
- cls.font.importXML(OTX)
-
- def setUp(self):
- self.file.seek(0)
-
- def test_bad_signature(self):
- with self.assertRaisesRegex(ttLib.TTLibError, 'bad signature'):
- WOFF2Reader(BytesIO(b"wOFF"))
-
- def test_not_enough_data_header(self):
- incomplete_header = self.file.read(woff2DirectorySize - 1)
- with self.assertRaisesRegex(ttLib.TTLibError, 'not enough data'):
- WOFF2Reader(BytesIO(incomplete_header))
-
- def test_incorrect_compressed_size(self):
- data = self.file.read(woff2DirectorySize)
- header = sstruct.unpack(woff2DirectoryFormat, data)
- header['totalCompressedSize'] = 0
- data = sstruct.pack(woff2DirectoryFormat, header)
- with self.assertRaises((brotli.error, ttLib.TTLibError)):
- WOFF2Reader(BytesIO(data + self.file.read()))
-
- def test_incorrect_uncompressed_size(self):
- decompress_backup = brotli.decompress
- brotli.decompress = lambda data: b"" # return empty byte string
- with self.assertRaisesRegex(ttLib.TTLibError, 'unexpected size for decompressed'):
- WOFF2Reader(self.file)
- brotli.decompress = decompress_backup
-
- def test_incorrect_file_size(self):
- data = self.file.read(woff2DirectorySize)
- header = sstruct.unpack(woff2DirectoryFormat, data)
- header['length'] -= 1
- data = sstruct.pack(woff2DirectoryFormat, header)
- with self.assertRaisesRegex(
- ttLib.TTLibError, "doesn't match the actual file size"):
- WOFF2Reader(BytesIO(data + self.file.read()))
-
- def test_num_tables(self):
- tags = [t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')]
- data = self.file.read(woff2DirectorySize)
- header = sstruct.unpack(woff2DirectoryFormat, data)
- self.assertEqual(header['numTables'], len(tags))
-
- def test_table_tags(self):
- tags = set([t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')])
- reader = WOFF2Reader(self.file)
- self.assertEqual(set(reader.keys()), tags)
-
- def test_get_normal_tables(self):
- woff2Reader = WOFF2Reader(self.file)
- specialTags = woff2TransformedTableTags + ('head', 'GlyphOrder', 'DSIG')
- for tag in [t for t in self.font.keys() if t not in specialTags]:
- origData = self.font.getTableData(tag)
- decompressedData = woff2Reader[tag]
- self.assertEqual(origData, decompressedData)
-
- def test_reconstruct_unknown(self):
- reader = WOFF2Reader(self.file)
- with self.assertRaisesRegex(ttLib.TTLibError, 'transform for table .* unknown'):
- reader.reconstructTable('head')
+ @classmethod
+ def setUpClass(cls):
+ cls.file = BytesIO(CFF_WOFF2.getvalue())
+ cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
+ cls.font.importXML(OTX)
+
+ def setUp(self):
+ self.file.seek(0)
+
+ def test_bad_signature(self):
+ with self.assertRaisesRegex(ttLib.TTLibError, "bad signature"):
+ WOFF2Reader(BytesIO(b"wOFF"))
+
+ def test_not_enough_data_header(self):
+ incomplete_header = self.file.read(woff2DirectorySize - 1)
+ with self.assertRaisesRegex(ttLib.TTLibError, "not enough data"):
+ WOFF2Reader(BytesIO(incomplete_header))
+
+ def test_incorrect_compressed_size(self):
+ data = self.file.read(woff2DirectorySize)
+ header = sstruct.unpack(woff2DirectoryFormat, data)
+ header["totalCompressedSize"] = 0
+ data = sstruct.pack(woff2DirectoryFormat, header)
+ with self.assertRaises((brotli.error, ttLib.TTLibError)):
+ WOFF2Reader(BytesIO(data + self.file.read()))
+
+ def test_incorrect_uncompressed_size(self):
+ decompress_backup = brotli.decompress
+ brotli.decompress = lambda data: b"" # return empty byte string
+ with self.assertRaisesRegex(
+ ttLib.TTLibError, "unexpected size for decompressed"
+ ):
+ WOFF2Reader(self.file)
+ brotli.decompress = decompress_backup
+
+ def test_incorrect_file_size(self):
+ data = self.file.read(woff2DirectorySize)
+ header = sstruct.unpack(woff2DirectoryFormat, data)
+ header["length"] -= 1
+ data = sstruct.pack(woff2DirectoryFormat, header)
+ with self.assertRaisesRegex(
+ ttLib.TTLibError, "doesn't match the actual file size"
+ ):
+ WOFF2Reader(BytesIO(data + self.file.read()))
+
+ def test_num_tables(self):
+ tags = [t for t in self.font.keys() if t not in ("GlyphOrder", "DSIG")]
+ data = self.file.read(woff2DirectorySize)
+ header = sstruct.unpack(woff2DirectoryFormat, data)
+ self.assertEqual(header["numTables"], len(tags))
+
+ def test_table_tags(self):
+ tags = set([t for t in self.font.keys() if t not in ("GlyphOrder", "DSIG")])
+ reader = WOFF2Reader(self.file)
+ self.assertEqual(set(reader.keys()), tags)
+
+ def test_get_normal_tables(self):
+ woff2Reader = WOFF2Reader(self.file)
+ specialTags = woff2TransformedTableTags + ("head", "GlyphOrder", "DSIG")
+ for tag in [t for t in self.font.keys() if t not in specialTags]:
+ origData = self.font.getTableData(tag)
+ decompressedData = woff2Reader[tag]
+ self.assertEqual(origData, decompressedData)
+
+ def test_reconstruct_unknown(self):
+ reader = WOFF2Reader(self.file)
+ with self.assertRaisesRegex(ttLib.TTLibError, "transform for table .* unknown"):
+ reader.reconstructTable("head")
class WOFF2ReaderTTFTest(WOFF2ReaderTest):
- """ Tests specific to TT-flavored fonts. """
-
- @classmethod
- def setUpClass(cls):
- cls.file = BytesIO(TT_WOFF2.getvalue())
- cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
- cls.font.importXML(TTX)
-
- def setUp(self):
- self.file.seek(0)
-
- def test_reconstruct_glyf(self):
- woff2Reader = WOFF2Reader(self.file)
- reconstructedData = woff2Reader['glyf']
- self.assertEqual(self.font.getTableData('glyf'), reconstructedData)
-
- def test_reconstruct_loca(self):
- woff2Reader = WOFF2Reader(self.file)
- reconstructedData = woff2Reader['loca']
- self.font.getTableData("glyf") # 'glyf' needs to be compiled before 'loca'
- self.assertEqual(self.font.getTableData('loca'), reconstructedData)
- self.assertTrue(hasattr(woff2Reader.tables['glyf'], 'data'))
-
- def test_reconstruct_loca_not_match_orig_size(self):
- reader = WOFF2Reader(self.file)
- reader.tables['loca'].origLength -= 1
- with self.assertRaisesRegex(
- ttLib.TTLibError, "'loca' table doesn't match original size"):
- reader.reconstructTable('loca')
+ """Tests specific to TT-flavored fonts."""
+
+ @classmethod
+ def setUpClass(cls):
+ cls.file = BytesIO(TT_WOFF2.getvalue())
+ cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
+ cls.font.importXML(TTX)
+
+ def setUp(self):
+ self.file.seek(0)
+
+ def test_reconstruct_glyf(self):
+ woff2Reader = WOFF2Reader(self.file)
+ reconstructedData = woff2Reader["glyf"]
+ self.assertEqual(self.font.getTableData("glyf"), reconstructedData)
+
+ def test_reconstruct_loca(self):
+ woff2Reader = WOFF2Reader(self.file)
+ reconstructedData = woff2Reader["loca"]
+ self.font.getTableData("glyf") # 'glyf' needs to be compiled before 'loca'
+ self.assertEqual(self.font.getTableData("loca"), reconstructedData)
+ self.assertTrue(hasattr(woff2Reader.tables["glyf"], "data"))
+
+ def test_reconstruct_loca_not_match_orig_size(self):
+ reader = WOFF2Reader(self.file)
+ reader.tables["loca"].origLength -= 1
+ with self.assertRaisesRegex(
+ ttLib.TTLibError, "'loca' table doesn't match original size"
+ ):
+ reader.reconstructTable("loca")
def normalise_table(font, tag, padding=4):
- """ Return normalised table data. Keep 'font' instance unmodified. """
- assert tag in ('glyf', 'loca', 'head')
- assert tag in font
- if tag == 'head':
- origHeadFlags = font['head'].flags
- font['head'].flags |= (1 << 11)
- tableData = font['head'].compile(font)
- if font.sfntVersion in ("\x00\x01\x00\x00", "true"):
- assert {'glyf', 'loca', 'head'}.issubset(font.keys())
- origIndexFormat = font['head'].indexToLocFormat
- if hasattr(font['loca'], 'locations'):
- origLocations = font['loca'].locations[:]
- else:
- origLocations = []
- glyfTable = ttLib.newTable('glyf')
- glyfTable.decompile(font.getTableData('glyf'), font)
- glyfTable.padding = padding
- if tag == 'glyf':
- tableData = glyfTable.compile(font)
- elif tag == 'loca':
- glyfTable.compile(font)
- tableData = font['loca'].compile(font)
- if tag == 'head':
- glyfTable.compile(font)
- font['loca'].compile(font)
- tableData = font['head'].compile(font)
- font['head'].indexToLocFormat = origIndexFormat
- font['loca'].set(origLocations)
- if tag == 'head':
- font['head'].flags = origHeadFlags
- return tableData
+ """Return normalised table data. Keep 'font' instance unmodified."""
+ assert tag in ("glyf", "loca", "head")
+ assert tag in font
+ if tag == "head":
+ origHeadFlags = font["head"].flags
+ font["head"].flags |= 1 << 11
+ tableData = font["head"].compile(font)
+ if font.sfntVersion in ("\x00\x01\x00\x00", "true"):
+ assert {"glyf", "loca", "head"}.issubset(font.keys())
+ origIndexFormat = font["head"].indexToLocFormat
+ if hasattr(font["loca"], "locations"):
+ origLocations = font["loca"].locations[:]
+ else:
+ origLocations = []
+ glyfTable = ttLib.newTable("glyf")
+ glyfTable.decompile(font.getTableData("glyf"), font)
+ glyfTable.padding = padding
+ if tag == "glyf":
+ tableData = glyfTable.compile(font)
+ elif tag == "loca":
+ glyfTable.compile(font)
+ tableData = font["loca"].compile(font)
+ if tag == "head":
+ glyfTable.compile(font)
+ font["loca"].compile(font)
+ tableData = font["head"].compile(font)
+ font["head"].indexToLocFormat = origIndexFormat
+ font["loca"].set(origLocations)
+ if tag == "head":
+ font["head"].flags = origHeadFlags
+ return tableData
def normalise_font(font, padding=4):
- """ Return normalised font data. Keep 'font' instance unmodified. """
- # drop DSIG but keep a copy
- DSIG_copy = copy.deepcopy(font['DSIG'])
- del font['DSIG']
- # override TTFont attributes
- origFlavor = font.flavor
- origRecalcBBoxes = font.recalcBBoxes
- origRecalcTimestamp = font.recalcTimestamp
- origLazy = font.lazy
- font.flavor = None
- font.recalcBBoxes = False
- font.recalcTimestamp = False
- font.lazy = True
- # save font to temporary stream
- infile = BytesIO()
- font.save(infile)
- infile.seek(0)
- # reorder tables alphabetically
- outfile = BytesIO()
- reader = ttLib.sfnt.SFNTReader(infile)
- writer = ttLib.sfnt.SFNTWriter(
- outfile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData)
- for tag in sorted(reader.keys()):
- if tag in woff2TransformedTableTags + ('head',):
- writer[tag] = normalise_table(font, tag, padding)
- else:
- writer[tag] = reader[tag]
- writer.close()
- # restore font attributes
- font['DSIG'] = DSIG_copy
- font.flavor = origFlavor
- font.recalcBBoxes = origRecalcBBoxes
- font.recalcTimestamp = origRecalcTimestamp
- font.lazy = origLazy
- return outfile.getvalue()
+ """Return normalised font data. Keep 'font' instance unmodified."""
+ # drop DSIG but keep a copy
+ DSIG_copy = copy.deepcopy(font["DSIG"])
+ del font["DSIG"]
+ # override TTFont attributes
+ origFlavor = font.flavor
+ origRecalcBBoxes = font.recalcBBoxes
+ origRecalcTimestamp = font.recalcTimestamp
+ origLazy = font.lazy
+ font.flavor = None
+ font.recalcBBoxes = False
+ font.recalcTimestamp = False
+ font.lazy = True
+ # save font to temporary stream
+ infile = BytesIO()
+ font.save(infile)
+ infile.seek(0)
+ # reorder tables alphabetically
+ outfile = BytesIO()
+ reader = ttLib.sfnt.SFNTReader(infile)
+ writer = ttLib.sfnt.SFNTWriter(
+ outfile,
+ len(reader.tables),
+ reader.sfntVersion,
+ reader.flavor,
+ reader.flavorData,
+ )
+ for tag in sorted(reader.keys()):
+ if tag in woff2TransformedTableTags + ("head",):
+ writer[tag] = normalise_table(font, tag, padding)
+ else:
+ writer[tag] = reader[tag]
+ writer.close()
+ # restore font attributes
+ font["DSIG"] = DSIG_copy
+ font.flavor = origFlavor
+ font.recalcBBoxes = origRecalcBBoxes
+ font.recalcTimestamp = origRecalcTimestamp
+ font.lazy = origLazy
+ return outfile.getvalue()
class WOFF2DirectoryEntryTest(unittest.TestCase):
-
- def setUp(self):
- self.entry = WOFF2DirectoryEntry()
-
- def test_not_enough_data_table_flags(self):
- with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'flags'"):
- self.entry.fromString(b"")
-
- def test_not_enough_data_table_tag(self):
- incompleteData = bytearray([0x3F, 0, 0, 0])
- with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'tag'"):
- self.entry.fromString(bytes(incompleteData))
-
- def test_loca_zero_transformLength(self):
- data = bytechr(getKnownTagIndex('loca')) # flags
- data += packBase128(random.randint(1, 100)) # origLength
- data += packBase128(1) # non-zero transformLength
- with self.assertRaisesRegex(
- ttLib.TTLibError, "transformLength of the 'loca' table must be 0"):
- self.entry.fromString(data)
-
- def test_fromFile(self):
- unknownTag = Tag('ZZZZ')
- data = bytechr(getKnownTagIndex(unknownTag))
- data += unknownTag.tobytes()
- data += packBase128(random.randint(1, 100))
- expectedPos = len(data)
- f = BytesIO(data + b'\0'*100)
- self.entry.fromFile(f)
- self.assertEqual(f.tell(), expectedPos)
-
- def test_transformed_toString(self):
- self.entry.tag = Tag('glyf')
- self.entry.flags = getKnownTagIndex(self.entry.tag)
- self.entry.origLength = random.randint(101, 200)
- self.entry.length = random.randint(1, 100)
- expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength) +
- base128Size(self.entry.length))
- data = self.entry.toString()
- self.assertEqual(len(data), expectedSize)
-
- def test_known_toString(self):
- self.entry.tag = Tag('head')
- self.entry.flags = getKnownTagIndex(self.entry.tag)
- self.entry.origLength = 54
- expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength))
- data = self.entry.toString()
- self.assertEqual(len(data), expectedSize)
-
- def test_unknown_toString(self):
- self.entry.tag = Tag('ZZZZ')
- self.entry.flags = woff2UnknownTagIndex
- self.entry.origLength = random.randint(1, 100)
- expectedSize = (woff2FlagsSize + woff2UnknownTagSize +
- base128Size(self.entry.origLength))
- data = self.entry.toString()
- self.assertEqual(len(data), expectedSize)
-
- def test_glyf_loca_transform_flags(self):
- for tag in ("glyf", "loca"):
- entry = WOFF2DirectoryEntry()
- entry.tag = Tag(tag)
- entry.flags = getKnownTagIndex(entry.tag)
-
- self.assertEqual(entry.transformVersion, 0)
- self.assertTrue(entry.transformed)
-
- entry.transformed = False
-
- self.assertEqual(entry.transformVersion, 3)
- self.assertEqual(entry.flags & 0b11000000, (3 << 6))
- self.assertFalse(entry.transformed)
-
- def test_other_transform_flags(self):
- entry = WOFF2DirectoryEntry()
- entry.tag = Tag('ZZZZ')
- entry.flags = woff2UnknownTagIndex
-
- self.assertEqual(entry.transformVersion, 0)
- self.assertFalse(entry.transformed)
-
- entry.transformed = True
-
- self.assertEqual(entry.transformVersion, 1)
- self.assertEqual(entry.flags & 0b11000000, (1 << 6))
- self.assertTrue(entry.transformed)
+ def setUp(self):
+ self.entry = WOFF2DirectoryEntry()
+
+ def test_not_enough_data_table_flags(self):
+ with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'flags'"):
+ self.entry.fromString(b"")
+
+ def test_not_enough_data_table_tag(self):
+ incompleteData = bytearray([0x3F, 0, 0, 0])
+ with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'tag'"):
+ self.entry.fromString(bytes(incompleteData))
+
+ def test_loca_zero_transformLength(self):
+ data = bytechr(getKnownTagIndex("loca")) # flags
+ data += packBase128(random.randint(1, 100)) # origLength
+ data += packBase128(1) # non-zero transformLength
+ with self.assertRaisesRegex(
+ ttLib.TTLibError, "transformLength of the 'loca' table must be 0"
+ ):
+ self.entry.fromString(data)
+
+ def test_fromFile(self):
+ unknownTag = Tag("ZZZZ")
+ data = bytechr(getKnownTagIndex(unknownTag))
+ data += unknownTag.tobytes()
+ data += packBase128(random.randint(1, 100))
+ expectedPos = len(data)
+ f = BytesIO(data + b"\0" * 100)
+ self.entry.fromFile(f)
+ self.assertEqual(f.tell(), expectedPos)
+
+ def test_transformed_toString(self):
+ self.entry.tag = Tag("glyf")
+ self.entry.flags = getKnownTagIndex(self.entry.tag)
+ self.entry.origLength = random.randint(101, 200)
+ self.entry.length = random.randint(1, 100)
+ expectedSize = (
+ woff2FlagsSize
+ + base128Size(self.entry.origLength)
+ + base128Size(self.entry.length)
+ )
+ data = self.entry.toString()
+ self.assertEqual(len(data), expectedSize)
+
+ def test_known_toString(self):
+ self.entry.tag = Tag("head")
+ self.entry.flags = getKnownTagIndex(self.entry.tag)
+ self.entry.origLength = 54
+ expectedSize = woff2FlagsSize + base128Size(self.entry.origLength)
+ data = self.entry.toString()
+ self.assertEqual(len(data), expectedSize)
+
+ def test_unknown_toString(self):
+ self.entry.tag = Tag("ZZZZ")
+ self.entry.flags = woff2UnknownTagIndex
+ self.entry.origLength = random.randint(1, 100)
+ expectedSize = (
+ woff2FlagsSize + woff2UnknownTagSize + base128Size(self.entry.origLength)
+ )
+ data = self.entry.toString()
+ self.assertEqual(len(data), expectedSize)
+
+ def test_glyf_loca_transform_flags(self):
+ for tag in ("glyf", "loca"):
+ entry = WOFF2DirectoryEntry()
+ entry.tag = Tag(tag)
+ entry.flags = getKnownTagIndex(entry.tag)
+
+ self.assertEqual(entry.transformVersion, 0)
+ self.assertTrue(entry.transformed)
+
+ entry.transformed = False
+
+ self.assertEqual(entry.transformVersion, 3)
+ self.assertEqual(entry.flags & 0b11000000, (3 << 6))
+ self.assertFalse(entry.transformed)
+
+ def test_other_transform_flags(self):
+ entry = WOFF2DirectoryEntry()
+ entry.tag = Tag("ZZZZ")
+ entry.flags = woff2UnknownTagIndex
+
+ self.assertEqual(entry.transformVersion, 0)
+ self.assertFalse(entry.transformed)
+
+ entry.transformed = True
+
+ self.assertEqual(entry.transformVersion, 1)
+ self.assertEqual(entry.flags & 0b11000000, (1 << 6))
+ self.assertTrue(entry.transformed)
class DummyReader(WOFF2Reader):
-
- def __init__(self, file, checkChecksums=1, fontNumber=-1):
- self.file = file
- for attr in ('majorVersion', 'minorVersion', 'metaOffset', 'metaLength',
- 'metaOrigLength', 'privLength', 'privOffset'):
- setattr(self, attr, 0)
- self.tables = {}
+ def __init__(self, file, checkChecksums=1, fontNumber=-1):
+ self.file = file
+ for attr in (
+ "majorVersion",
+ "minorVersion",
+ "metaOffset",
+ "metaLength",
+ "metaOrigLength",
+ "privLength",
+ "privOffset",
+ ):
+ setattr(self, attr, 0)
+ self.tables = {}
class WOFF2FlavorDataTest(unittest.TestCase):
-
- @classmethod
- def setUpClass(cls):
- assert os.path.exists(METADATA)
- with open(METADATA, 'rb') as f:
- cls.xml_metadata = f.read()
- cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT)
- # make random byte strings; font data must be 4-byte aligned
- cls.fontdata = bytes(bytearray(random.sample(range(0, 256), 80)))
- cls.privData = bytes(bytearray(random.sample(range(0, 256), 20)))
-
- def setUp(self):
- self.file = BytesIO(self.fontdata)
- self.file.seek(0, 2)
-
- def test_get_metaData_no_privData(self):
- self.file.write(self.compressed_metadata)
- reader = DummyReader(self.file)
- reader.metaOffset = len(self.fontdata)
- reader.metaLength = len(self.compressed_metadata)
- reader.metaOrigLength = len(self.xml_metadata)
- flavorData = WOFF2FlavorData(reader)
- self.assertEqual(self.xml_metadata, flavorData.metaData)
-
- def test_get_privData_no_metaData(self):
- self.file.write(self.privData)
- reader = DummyReader(self.file)
- reader.privOffset = len(self.fontdata)
- reader.privLength = len(self.privData)
- flavorData = WOFF2FlavorData(reader)
- self.assertEqual(self.privData, flavorData.privData)
-
- def test_get_metaData_and_privData(self):
- self.file.write(self.compressed_metadata + self.privData)
- reader = DummyReader(self.file)
- reader.metaOffset = len(self.fontdata)
- reader.metaLength = len(self.compressed_metadata)
- reader.metaOrigLength = len(self.xml_metadata)
- reader.privOffset = reader.metaOffset + reader.metaLength
- reader.privLength = len(self.privData)
- flavorData = WOFF2FlavorData(reader)
- self.assertEqual(self.xml_metadata, flavorData.metaData)
- self.assertEqual(self.privData, flavorData.privData)
-
- def test_get_major_minorVersion(self):
- reader = DummyReader(self.file)
- reader.majorVersion = reader.minorVersion = 1
- flavorData = WOFF2FlavorData(reader)
- self.assertEqual(flavorData.majorVersion, 1)
- self.assertEqual(flavorData.minorVersion, 1)
-
- def test_mutually_exclusive_args(self):
- msg = "arguments are mutually exclusive"
- reader = DummyReader(self.file)
- with self.assertRaisesRegex(TypeError, msg):
- WOFF2FlavorData(reader, transformedTables={"hmtx"})
- with self.assertRaisesRegex(TypeError, msg):
- WOFF2FlavorData(reader, data=WOFF2FlavorData())
-
- def test_transformedTables_default(self):
- flavorData = WOFF2FlavorData()
- self.assertEqual(flavorData.transformedTables, set(woff2TransformedTableTags))
-
- def test_transformedTables_invalid(self):
- msg = r"'glyf' and 'loca' must be transformed \(or not\) together"
-
- with self.assertRaisesRegex(ValueError, msg):
- WOFF2FlavorData(transformedTables={"glyf"})
-
- with self.assertRaisesRegex(ValueError, msg):
- WOFF2FlavorData(transformedTables={"loca"})
+ @classmethod
+ def setUpClass(cls):
+ assert os.path.exists(METADATA)
+ with open(METADATA, "rb") as f:
+ cls.xml_metadata = f.read()
+ cls.compressed_metadata = brotli.compress(
+ cls.xml_metadata, mode=brotli.MODE_TEXT
+ )
+ # make random byte strings; font data must be 4-byte aligned
+ cls.fontdata = bytes(bytearray(random.sample(range(0, 256), 80)))
+ cls.privData = bytes(bytearray(random.sample(range(0, 256), 20)))
+
+ def setUp(self):
+ self.file = BytesIO(self.fontdata)
+ self.file.seek(0, 2)
+
+ def test_get_metaData_no_privData(self):
+ self.file.write(self.compressed_metadata)
+ reader = DummyReader(self.file)
+ reader.metaOffset = len(self.fontdata)
+ reader.metaLength = len(self.compressed_metadata)
+ reader.metaOrigLength = len(self.xml_metadata)
+ flavorData = WOFF2FlavorData(reader)
+ self.assertEqual(self.xml_metadata, flavorData.metaData)
+
+ def test_get_privData_no_metaData(self):
+ self.file.write(self.privData)
+ reader = DummyReader(self.file)
+ reader.privOffset = len(self.fontdata)
+ reader.privLength = len(self.privData)
+ flavorData = WOFF2FlavorData(reader)
+ self.assertEqual(self.privData, flavorData.privData)
+
+ def test_get_metaData_and_privData(self):
+ self.file.write(self.compressed_metadata + self.privData)
+ reader = DummyReader(self.file)
+ reader.metaOffset = len(self.fontdata)
+ reader.metaLength = len(self.compressed_metadata)
+ reader.metaOrigLength = len(self.xml_metadata)
+ reader.privOffset = reader.metaOffset + reader.metaLength
+ reader.privLength = len(self.privData)
+ flavorData = WOFF2FlavorData(reader)
+ self.assertEqual(self.xml_metadata, flavorData.metaData)
+ self.assertEqual(self.privData, flavorData.privData)
+
+ def test_get_major_minorVersion(self):
+ reader = DummyReader(self.file)
+ reader.majorVersion = reader.minorVersion = 1
+ flavorData = WOFF2FlavorData(reader)
+ self.assertEqual(flavorData.majorVersion, 1)
+ self.assertEqual(flavorData.minorVersion, 1)
+
+ def test_mutually_exclusive_args(self):
+ msg = "arguments are mutually exclusive"
+ reader = DummyReader(self.file)
+ with self.assertRaisesRegex(TypeError, msg):
+ WOFF2FlavorData(reader, transformedTables={"hmtx"})
+ with self.assertRaisesRegex(TypeError, msg):
+ WOFF2FlavorData(reader, data=WOFF2FlavorData())
+
+ def test_transformedTables_default(self):
+ flavorData = WOFF2FlavorData()
+ self.assertEqual(flavorData.transformedTables, set(woff2TransformedTableTags))
+
+ def test_transformedTables_invalid(self):
+ msg = r"'glyf' and 'loca' must be transformed \(or not\) together"
+
+ with self.assertRaisesRegex(ValueError, msg):
+ WOFF2FlavorData(transformedTables={"glyf"})
+
+ with self.assertRaisesRegex(ValueError, msg):
+ WOFF2FlavorData(transformedTables={"loca"})
class WOFF2WriterTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.font = ttLib.TTFont(
+ recalcBBoxes=False, recalcTimestamp=False, flavor="woff2"
+ )
+ cls.font.importXML(OTX)
+ cls.tags = sorted(t for t in cls.font.keys() if t != "GlyphOrder")
+ cls.numTables = len(cls.tags)
+ cls.file = BytesIO(CFF_WOFF2.getvalue())
+ cls.file.seek(0, 2)
+ cls.length = (cls.file.tell() + 3) & ~3
+ cls.setUpFlavorData()
+
+ @classmethod
+ def setUpFlavorData(cls):
+ assert os.path.exists(METADATA)
+ with open(METADATA, "rb") as f:
+ cls.xml_metadata = f.read()
+ cls.compressed_metadata = brotli.compress(
+ cls.xml_metadata, mode=brotli.MODE_TEXT
+ )
+ cls.privData = bytes(bytearray(random.sample(range(0, 256), 20)))
+
+ def setUp(self):
+ self.file.seek(0)
+ self.writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion)
+
+ def test_DSIG_dropped(self):
+ self.writer["DSIG"] = b"\0"
+ self.assertEqual(len(self.writer.tables), 0)
+ self.assertEqual(self.writer.numTables, self.numTables - 1)
+
+ def test_no_rewrite_table(self):
+ self.writer["ZZZZ"] = b"\0"
+ with self.assertRaisesRegex(ttLib.TTLibError, "cannot rewrite"):
+ self.writer["ZZZZ"] = b"\0"
+
+ def test_num_tables(self):
+ self.writer["ABCD"] = b"\0"
+ with self.assertRaisesRegex(ttLib.TTLibError, "wrong number of tables"):
+ self.writer.close()
+
+ def test_required_tables(self):
+ font = ttLib.TTFont(flavor="woff2")
+ with self.assertRaisesRegex(ttLib.TTLibError, "missing required table"):
+ font.save(BytesIO())
+
+ def test_head_transform_flag(self):
+ headData = self.font.getTableData("head")
+ origFlags = byteord(headData[16])
+ woff2font = ttLib.TTFont(self.file)
+ newHeadData = woff2font.getTableData("head")
+ modifiedFlags = byteord(newHeadData[16])
+ self.assertNotEqual(origFlags, modifiedFlags)
+ restoredFlags = modifiedFlags & ~0x08 # turn off bit 11
+ self.assertEqual(origFlags, restoredFlags)
+
+ def test_tables_sorted_alphabetically(self):
+ expected = sorted([t for t in self.tags if t != "DSIG"])
+ woff2font = ttLib.TTFont(self.file)
+ self.assertEqual(expected, list(woff2font.reader.keys()))
+
+ def test_checksums(self):
+ normFile = BytesIO(normalise_font(self.font, padding=4))
+ normFile.seek(0)
+ normFont = ttLib.TTFont(normFile, checkChecksums=2)
+ w2font = ttLib.TTFont(self.file)
+ # force reconstructing glyf table using 4-byte padding
+ w2font.reader.padding = 4
+ for tag in [t for t in self.tags if t != "DSIG"]:
+ w2data = w2font.reader[tag]
+ normData = normFont.reader[tag]
+ if tag == "head":
+ w2data = w2data[:8] + b"\0\0\0\0" + w2data[12:]
+ normData = normData[:8] + b"\0\0\0\0" + normData[12:]
+ w2CheckSum = ttLib.sfnt.calcChecksum(w2data)
+ normCheckSum = ttLib.sfnt.calcChecksum(normData)
+ self.assertEqual(w2CheckSum, normCheckSum)
+ normCheckSumAdjustment = normFont["head"].checkSumAdjustment
+ self.assertEqual(normCheckSumAdjustment, w2font["head"].checkSumAdjustment)
+
+ def test_calcSFNTChecksumsLengthsAndOffsets(self):
+ normFont = ttLib.TTFont(BytesIO(normalise_font(self.font, padding=4)))
+ for tag in self.tags:
+ self.writer[tag] = self.font.getTableData(tag)
+ self.writer._normaliseGlyfAndLoca(padding=4)
+ self.writer._setHeadTransformFlag()
+ self.writer.tables = OrderedDict(sorted(self.writer.tables.items()))
+ self.writer._calcSFNTChecksumsLengthsAndOffsets()
+ for tag, entry in normFont.reader.tables.items():
+ self.assertEqual(entry.offset, self.writer.tables[tag].origOffset)
+ self.assertEqual(entry.length, self.writer.tables[tag].origLength)
+ self.assertEqual(entry.checkSum, self.writer.tables[tag].checkSum)
+
+ def test_bad_sfntVersion(self):
+ for i in range(self.numTables):
+ self.writer[bytechr(65 + i) * 4] = b"\0"
+ self.writer.sfntVersion = "ZZZZ"
+ with self.assertRaisesRegex(ttLib.TTLibError, "bad sfntVersion"):
+ self.writer.close()
+
+ def test_calcTotalSize_no_flavorData(self):
+ expected = self.length
+ self.writer.file = BytesIO()
+ for tag in self.tags:
+ self.writer[tag] = self.font.getTableData(tag)
+ self.writer.close()
+ self.assertEqual(expected, self.writer.length)
+ self.assertEqual(expected, self.writer.file.tell())
+
+ def test_calcTotalSize_with_metaData(self):
+ expected = self.length + len(self.compressed_metadata)
+ flavorData = self.writer.flavorData = WOFF2FlavorData()
+ flavorData.metaData = self.xml_metadata
+ self.writer.file = BytesIO()
+ for tag in self.tags:
+ self.writer[tag] = self.font.getTableData(tag)
+ self.writer.close()
+ self.assertEqual(expected, self.writer.length)
+ self.assertEqual(expected, self.writer.file.tell())
+
+ def test_calcTotalSize_with_privData(self):
+ expected = self.length + len(self.privData)
+ flavorData = self.writer.flavorData = WOFF2FlavorData()
+ flavorData.privData = self.privData
+ self.writer.file = BytesIO()
+ for tag in self.tags:
+ self.writer[tag] = self.font.getTableData(tag)
+ self.writer.close()
+ self.assertEqual(expected, self.writer.length)
+ self.assertEqual(expected, self.writer.file.tell())
+
+ def test_calcTotalSize_with_metaData_and_privData(self):
+ metaDataLength = (len(self.compressed_metadata) + 3) & ~3
+ expected = self.length + metaDataLength + len(self.privData)
+ flavorData = self.writer.flavorData = WOFF2FlavorData()
+ flavorData.metaData = self.xml_metadata
+ flavorData.privData = self.privData
+ self.writer.file = BytesIO()
+ for tag in self.tags:
+ self.writer[tag] = self.font.getTableData(tag)
+ self.writer.close()
+ self.assertEqual(expected, self.writer.length)
+ self.assertEqual(expected, self.writer.file.tell())
+
+ def test_getVersion(self):
+ # no version
+ self.assertEqual((0, 0), self.writer._getVersion())
+ # version from head.fontRevision
+ fontRevision = self.font["head"].fontRevision
+ versionTuple = tuple(int(i) for i in str(fontRevision).split("."))
+ entry = self.writer.tables["head"] = ttLib.newTable("head")
+ entry.data = self.font.getTableData("head")
+ self.assertEqual(versionTuple, self.writer._getVersion())
+ # version from writer.flavorData
+ flavorData = self.writer.flavorData = WOFF2FlavorData()
+ flavorData.majorVersion, flavorData.minorVersion = (10, 11)
+ self.assertEqual((10, 11), self.writer._getVersion())
+
+ def test_hmtx_trasform(self):
+ tableTransforms = {"glyf", "loca", "hmtx"}
+
+ writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion)
+ writer.flavorData = WOFF2FlavorData(transformedTables=tableTransforms)
+
+ for tag in self.tags:
+ writer[tag] = self.font.getTableData(tag)
+ writer.close()
+
+ # enabling hmtx transform has no effect when font has no glyf table
+ self.assertEqual(writer.file.getvalue(), CFF_WOFF2.getvalue())
+
+ def test_no_transforms(self):
+ writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion)
+ writer.flavorData = WOFF2FlavorData(transformedTables=())
+
+ for tag in self.tags:
+ writer[tag] = self.font.getTableData(tag)
+ writer.close()
+
+ # transforms settings have no effect when font is CFF-flavored, since
+ # all the current transforms only apply to TrueType-flavored fonts.
+ self.assertEqual(writer.file.getvalue(), CFF_WOFF2.getvalue())
- @classmethod
- def setUpClass(cls):
- cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2")
- cls.font.importXML(OTX)
- cls.tags = sorted(t for t in cls.font.keys() if t != 'GlyphOrder')
- cls.numTables = len(cls.tags)
- cls.file = BytesIO(CFF_WOFF2.getvalue())
- cls.file.seek(0, 2)
- cls.length = (cls.file.tell() + 3) & ~3
- cls.setUpFlavorData()
-
- @classmethod
- def setUpFlavorData(cls):
- assert os.path.exists(METADATA)
- with open(METADATA, 'rb') as f:
- cls.xml_metadata = f.read()
- cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT)
- cls.privData = bytes(bytearray(random.sample(range(0, 256), 20)))
-
- def setUp(self):
- self.file.seek(0)
- self.writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion)
-
- def test_DSIG_dropped(self):
- self.writer['DSIG'] = b"\0"
- self.assertEqual(len(self.writer.tables), 0)
- self.assertEqual(self.writer.numTables, self.numTables-1)
-
- def test_no_rewrite_table(self):
- self.writer['ZZZZ'] = b"\0"
- with self.assertRaisesRegex(ttLib.TTLibError, "cannot rewrite"):
- self.writer['ZZZZ'] = b"\0"
-
- def test_num_tables(self):
- self.writer['ABCD'] = b"\0"
- with self.assertRaisesRegex(ttLib.TTLibError, "wrong number of tables"):
- self.writer.close()
-
- def test_required_tables(self):
- font = ttLib.TTFont(flavor="woff2")
- with self.assertRaisesRegex(ttLib.TTLibError, "missing required table"):
- font.save(BytesIO())
-
- def test_head_transform_flag(self):
- headData = self.font.getTableData('head')
- origFlags = byteord(headData[16])
- woff2font = ttLib.TTFont(self.file)
- newHeadData = woff2font.getTableData('head')
- modifiedFlags = byteord(newHeadData[16])
- self.assertNotEqual(origFlags, modifiedFlags)
- restoredFlags = modifiedFlags & ~0x08 # turn off bit 11
- self.assertEqual(origFlags, restoredFlags)
-
- def test_tables_sorted_alphabetically(self):
- expected = sorted([t for t in self.tags if t != 'DSIG'])
- woff2font = ttLib.TTFont(self.file)
- self.assertEqual(expected, list(woff2font.reader.keys()))
-
- def test_checksums(self):
- normFile = BytesIO(normalise_font(self.font, padding=4))
- normFile.seek(0)
- normFont = ttLib.TTFont(normFile, checkChecksums=2)
- w2font = ttLib.TTFont(self.file)
- # force reconstructing glyf table using 4-byte padding
- w2font.reader.padding = 4
- for tag in [t for t in self.tags if t != 'DSIG']:
- w2data = w2font.reader[tag]
- normData = normFont.reader[tag]
- if tag == "head":
- w2data = w2data[:8] + b'\0\0\0\0' + w2data[12:]
- normData = normData[:8] + b'\0\0\0\0' + normData[12:]
- w2CheckSum = ttLib.sfnt.calcChecksum(w2data)
- normCheckSum = ttLib.sfnt.calcChecksum(normData)
- self.assertEqual(w2CheckSum, normCheckSum)
- normCheckSumAdjustment = normFont['head'].checkSumAdjustment
- self.assertEqual(normCheckSumAdjustment, w2font['head'].checkSumAdjustment)
-
- def test_calcSFNTChecksumsLengthsAndOffsets(self):
- normFont = ttLib.TTFont(BytesIO(normalise_font(self.font, padding=4)))
- for tag in self.tags:
- self.writer[tag] = self.font.getTableData(tag)
- self.writer._normaliseGlyfAndLoca(padding=4)
- self.writer._setHeadTransformFlag()
- self.writer.tables = OrderedDict(sorted(self.writer.tables.items()))
- self.writer._calcSFNTChecksumsLengthsAndOffsets()
- for tag, entry in normFont.reader.tables.items():
- self.assertEqual(entry.offset, self.writer.tables[tag].origOffset)
- self.assertEqual(entry.length, self.writer.tables[tag].origLength)
- self.assertEqual(entry.checkSum, self.writer.tables[tag].checkSum)
-
- def test_bad_sfntVersion(self):
- for i in range(self.numTables):
- self.writer[bytechr(65 + i)*4] = b"\0"
- self.writer.sfntVersion = 'ZZZZ'
- with self.assertRaisesRegex(ttLib.TTLibError, "bad sfntVersion"):
- self.writer.close()
-
- def test_calcTotalSize_no_flavorData(self):
- expected = self.length
- self.writer.file = BytesIO()
- for tag in self.tags:
- self.writer[tag] = self.font.getTableData(tag)
- self.writer.close()
- self.assertEqual(expected, self.writer.length)
- self.assertEqual(expected, self.writer.file.tell())
-
- def test_calcTotalSize_with_metaData(self):
- expected = self.length + len(self.compressed_metadata)
- flavorData = self.writer.flavorData = WOFF2FlavorData()
- flavorData.metaData = self.xml_metadata
- self.writer.file = BytesIO()
- for tag in self.tags:
- self.writer[tag] = self.font.getTableData(tag)
- self.writer.close()
- self.assertEqual(expected, self.writer.length)
- self.assertEqual(expected, self.writer.file.tell())
-
- def test_calcTotalSize_with_privData(self):
- expected = self.length + len(self.privData)
- flavorData = self.writer.flavorData = WOFF2FlavorData()
- flavorData.privData = self.privData
- self.writer.file = BytesIO()
- for tag in self.tags:
- self.writer[tag] = self.font.getTableData(tag)
- self.writer.close()
- self.assertEqual(expected, self.writer.length)
- self.assertEqual(expected, self.writer.file.tell())
-
- def test_calcTotalSize_with_metaData_and_privData(self):
- metaDataLength = (len(self.compressed_metadata) + 3) & ~3
- expected = self.length + metaDataLength + len(self.privData)
- flavorData = self.writer.flavorData = WOFF2FlavorData()
- flavorData.metaData = self.xml_metadata
- flavorData.privData = self.privData
- self.writer.file = BytesIO()
- for tag in self.tags:
- self.writer[tag] = self.font.getTableData(tag)
- self.writer.close()
- self.assertEqual(expected, self.writer.length)
- self.assertEqual(expected, self.writer.file.tell())
-
- def test_getVersion(self):
- # no version
- self.assertEqual((0, 0), self.writer._getVersion())
- # version from head.fontRevision
- fontRevision = self.font['head'].fontRevision
- versionTuple = tuple(int(i) for i in str(fontRevision).split("."))
- entry = self.writer.tables['head'] = ttLib.newTable('head')
- entry.data = self.font.getTableData('head')
- self.assertEqual(versionTuple, self.writer._getVersion())
- # version from writer.flavorData
- flavorData = self.writer.flavorData = WOFF2FlavorData()
- flavorData.majorVersion, flavorData.minorVersion = (10, 11)
- self.assertEqual((10, 11), self.writer._getVersion())
-
- def test_hmtx_trasform(self):
- tableTransforms = {"glyf", "loca", "hmtx"}
-
- writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion)
- writer.flavorData = WOFF2FlavorData(transformedTables=tableTransforms)
-
- for tag in self.tags:
- writer[tag] = self.font.getTableData(tag)
- writer.close()
-
- # enabling hmtx transform has no effect when font has no glyf table
- self.assertEqual(writer.file.getvalue(), CFF_WOFF2.getvalue())
-
- def test_no_transforms(self):
- writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion)
- writer.flavorData = WOFF2FlavorData(transformedTables=())
-
- for tag in self.tags:
- writer[tag] = self.font.getTableData(tag)
- writer.close()
-
- # transforms settings have no effect when font is CFF-flavored, since
- # all the current transforms only apply to TrueType-flavored fonts.
- self.assertEqual(writer.file.getvalue(), CFF_WOFF2.getvalue())
class WOFF2WriterTTFTest(WOFF2WriterTest):
-
- @classmethod
- def setUpClass(cls):
- cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2")
- cls.font.importXML(TTX)
- cls.tags = sorted(t for t in cls.font.keys() if t != 'GlyphOrder')
- cls.numTables = len(cls.tags)
- cls.file = BytesIO(TT_WOFF2.getvalue())
- cls.file.seek(0, 2)
- cls.length = (cls.file.tell() + 3) & ~3
- cls.setUpFlavorData()
-
- def test_normaliseGlyfAndLoca(self):
- normTables = {}
- for tag in ('head', 'loca', 'glyf'):
- normTables[tag] = normalise_table(self.font, tag, padding=4)
- for tag in self.tags:
- tableData = self.font.getTableData(tag)
- self.writer[tag] = tableData
- if tag in normTables:
- self.assertNotEqual(tableData, normTables[tag])
- self.writer._normaliseGlyfAndLoca(padding=4)
- self.writer._setHeadTransformFlag()
- for tag in normTables:
- self.assertEqual(self.writer.tables[tag].data, normTables[tag])
-
- def test_hmtx_trasform(self):
- tableTransforms = {"glyf", "loca", "hmtx"}
-
- writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion)
- writer.flavorData = WOFF2FlavorData(transformedTables=tableTransforms)
-
- for tag in self.tags:
- writer[tag] = self.font.getTableData(tag)
- writer.close()
-
- length = len(writer.file.getvalue())
-
- # enabling optional hmtx transform shaves off a few bytes
- self.assertLess(length, len(TT_WOFF2.getvalue()))
-
- def test_no_transforms(self):
- writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion)
- writer.flavorData = WOFF2FlavorData(transformedTables=())
-
- for tag in self.tags:
- writer[tag] = self.font.getTableData(tag)
- writer.close()
-
- self.assertNotEqual(writer.file.getvalue(), TT_WOFF2.getvalue())
-
- writer.file.seek(0)
- reader = WOFF2Reader(writer.file)
- self.assertEqual(len(reader.flavorData.transformedTables), 0)
+ @classmethod
+ def setUpClass(cls):
+ cls.font = ttLib.TTFont(
+ recalcBBoxes=False, recalcTimestamp=False, flavor="woff2"
+ )
+ cls.font.importXML(TTX)
+ cls.tags = sorted(t for t in cls.font.keys() if t != "GlyphOrder")
+ cls.numTables = len(cls.tags)
+ cls.file = BytesIO(TT_WOFF2.getvalue())
+ cls.file.seek(0, 2)
+ cls.length = (cls.file.tell() + 3) & ~3
+ cls.setUpFlavorData()
+
+ def test_normaliseGlyfAndLoca(self):
+ normTables = {}
+ for tag in ("head", "loca", "glyf"):
+ normTables[tag] = normalise_table(self.font, tag, padding=4)
+ for tag in self.tags:
+ tableData = self.font.getTableData(tag)
+ self.writer[tag] = tableData
+ if tag in normTables:
+ self.assertNotEqual(tableData, normTables[tag])
+ self.writer._normaliseGlyfAndLoca(padding=4)
+ self.writer._setHeadTransformFlag()
+ for tag in normTables:
+ self.assertEqual(self.writer.tables[tag].data, normTables[tag])
+
+ def test_hmtx_trasform(self):
+ def compile_hmtx(compressed):
+ tableTransforms = woff2TransformedTableTags
+ if compressed:
+ tableTransforms += ("hmtx",)
+ writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion)
+ writer.flavorData = WOFF2FlavorData(transformedTables=tableTransforms)
+ for tag in self.tags:
+ writer[tag] = self.font.getTableData(tag)
+ writer.close()
+ return writer.tables["hmtx"].length
+
+ uncompressed_length = compile_hmtx(compressed=False)
+ compressed_length = compile_hmtx(compressed=True)
+
+ # enabling optional hmtx transform shaves off a few bytes
+ self.assertLess(compressed_length, uncompressed_length)
+
+ def test_no_transforms(self):
+ writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion)
+ writer.flavorData = WOFF2FlavorData(transformedTables=())
+
+ for tag in self.tags:
+ writer[tag] = self.font.getTableData(tag)
+ writer.close()
+
+ self.assertNotEqual(writer.file.getvalue(), TT_WOFF2.getvalue())
+
+ writer.file.seek(0)
+ reader = WOFF2Reader(writer.file)
+ self.assertEqual(len(reader.flavorData.transformedTables), 0)
class WOFF2LocaTableTest(unittest.TestCase):
-
- def setUp(self):
- self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
- font['head'] = ttLib.newTable('head')
- font['loca'] = WOFF2LocaTable()
- font['glyf'] = WOFF2GlyfTable()
-
- def test_compile_short_loca(self):
- locaTable = self.font['loca']
- locaTable.set(list(range(0, 0x20000, 2)))
- self.font['glyf'].indexFormat = 0
- locaData = locaTable.compile(self.font)
- self.assertEqual(len(locaData), 0x20000)
-
- def test_compile_short_loca_overflow(self):
- locaTable = self.font['loca']
- locaTable.set(list(range(0x20000 + 1)))
- self.font['glyf'].indexFormat = 0
- with self.assertRaisesRegex(
- ttLib.TTLibError, "indexFormat is 0 but local offsets > 0x20000"):
- locaTable.compile(self.font)
-
- def test_compile_short_loca_not_multiples_of_2(self):
- locaTable = self.font['loca']
- locaTable.set([1, 3, 5, 7])
- self.font['glyf'].indexFormat = 0
- with self.assertRaisesRegex(ttLib.TTLibError, "offsets not multiples of 2"):
- locaTable.compile(self.font)
-
- def test_compile_long_loca(self):
- locaTable = self.font['loca']
- locaTable.set(list(range(0x20001)))
- self.font['glyf'].indexFormat = 1
- locaData = locaTable.compile(self.font)
- self.assertEqual(len(locaData), 0x20001 * 4)
-
- def test_compile_set_indexToLocFormat_0(self):
- locaTable = self.font['loca']
- # offsets are all multiples of 2 and max length is < 0x10000
- locaTable.set(list(range(0, 0x20000, 2)))
- locaTable.compile(self.font)
- newIndexFormat = self.font['head'].indexToLocFormat
- self.assertEqual(0, newIndexFormat)
-
- def test_compile_set_indexToLocFormat_1(self):
- locaTable = self.font['loca']
- # offsets are not multiples of 2
- locaTable.set(list(range(10)))
- locaTable.compile(self.font)
- newIndexFormat = self.font['head'].indexToLocFormat
- self.assertEqual(1, newIndexFormat)
- # max length is >= 0x10000
- locaTable.set(list(range(0, 0x20000 + 1, 2)))
- locaTable.compile(self.font)
- newIndexFormat = self.font['head'].indexToLocFormat
- self.assertEqual(1, newIndexFormat)
+ def setUp(self):
+ self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
+ font["head"] = ttLib.newTable("head")
+ font["loca"] = WOFF2LocaTable()
+ font["glyf"] = WOFF2GlyfTable()
+
+ def test_compile_short_loca(self):
+ locaTable = self.font["loca"]
+ locaTable.set(list(range(0, 0x20000, 2)))
+ self.font["glyf"].indexFormat = 0
+ locaData = locaTable.compile(self.font)
+ self.assertEqual(len(locaData), 0x20000)
+
+ def test_compile_short_loca_overflow(self):
+ locaTable = self.font["loca"]
+ locaTable.set(list(range(0x20000 + 1)))
+ self.font["glyf"].indexFormat = 0
+ with self.assertRaisesRegex(
+ ttLib.TTLibError, "indexFormat is 0 but local offsets > 0x20000"
+ ):
+ locaTable.compile(self.font)
+
+ def test_compile_short_loca_not_multiples_of_2(self):
+ locaTable = self.font["loca"]
+ locaTable.set([1, 3, 5, 7])
+ self.font["glyf"].indexFormat = 0
+ with self.assertRaisesRegex(ttLib.TTLibError, "offsets not multiples of 2"):
+ locaTable.compile(self.font)
+
+ def test_compile_long_loca(self):
+ locaTable = self.font["loca"]
+ locaTable.set(list(range(0x20001)))
+ self.font["glyf"].indexFormat = 1
+ locaData = locaTable.compile(self.font)
+ self.assertEqual(len(locaData), 0x20001 * 4)
+
+ def test_compile_set_indexToLocFormat_0(self):
+ locaTable = self.font["loca"]
+ # offsets are all multiples of 2 and max length is < 0x10000
+ locaTable.set(list(range(0, 0x20000, 2)))
+ locaTable.compile(self.font)
+ newIndexFormat = self.font["head"].indexToLocFormat
+ self.assertEqual(0, newIndexFormat)
+
+ def test_compile_set_indexToLocFormat_1(self):
+ locaTable = self.font["loca"]
+ # offsets are not multiples of 2
+ locaTable.set(list(range(10)))
+ locaTable.compile(self.font)
+ newIndexFormat = self.font["head"].indexToLocFormat
+ self.assertEqual(1, newIndexFormat)
+ # max length is >= 0x10000
+ locaTable.set(list(range(0, 0x20000 + 1, 2)))
+ locaTable.compile(self.font)
+ newIndexFormat = self.font["head"].indexToLocFormat
+ self.assertEqual(1, newIndexFormat)
class WOFF2GlyfTableTest(unittest.TestCase):
-
- @classmethod
- def setUpClass(cls):
- font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
- font.importXML(TTX)
- cls.tables = {}
- cls.transformedTags = ('maxp', 'head', 'loca', 'glyf')
- for tag in reversed(cls.transformedTags): # compile in inverse order
- cls.tables[tag] = font.getTableData(tag)
- infile = BytesIO(TT_WOFF2.getvalue())
- reader = WOFF2Reader(infile)
- cls.transformedGlyfData = reader.tables['glyf'].loadData(
- reader.transformBuffer)
- cls.glyphOrder = ['.notdef'] + ["glyph%.5d" % i for i in range(1, font['maxp'].numGlyphs)]
-
- def setUp(self):
- self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
- font.setGlyphOrder(self.glyphOrder)
- font['head'] = ttLib.newTable('head')
- font['maxp'] = ttLib.newTable('maxp')
- font['loca'] = WOFF2LocaTable()
- font['glyf'] = WOFF2GlyfTable()
- for tag in self.transformedTags:
- font[tag].decompile(self.tables[tag], font)
-
- def test_reconstruct_glyf_padded_4(self):
- glyfTable = WOFF2GlyfTable()
- glyfTable.reconstruct(self.transformedGlyfData, self.font)
- glyfTable.padding = 4
- data = glyfTable.compile(self.font)
- normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding)
- self.assertEqual(normGlyfData, data)
-
- def test_reconstruct_glyf_padded_2(self):
- glyfTable = WOFF2GlyfTable()
- glyfTable.reconstruct(self.transformedGlyfData, self.font)
- glyfTable.padding = 2
- data = glyfTable.compile(self.font)
- normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding)
- self.assertEqual(normGlyfData, data)
-
- def test_reconstruct_glyf_unpadded(self):
- glyfTable = WOFF2GlyfTable()
- glyfTable.reconstruct(self.transformedGlyfData, self.font)
- data = glyfTable.compile(self.font)
- self.assertEqual(self.tables['glyf'], data)
-
- def test_reconstruct_glyf_incorrect_glyphOrder(self):
- glyfTable = WOFF2GlyfTable()
- badGlyphOrder = self.font.getGlyphOrder()[:-1]
- self.font.setGlyphOrder(badGlyphOrder)
- with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"):
- glyfTable.reconstruct(self.transformedGlyfData, self.font)
-
- def test_reconstruct_glyf_missing_glyphOrder(self):
- glyfTable = WOFF2GlyfTable()
- del self.font.glyphOrder
- numGlyphs = self.font['maxp'].numGlyphs
- del self.font['maxp']
- glyfTable.reconstruct(self.transformedGlyfData, self.font)
- expected = [".notdef"]
- expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)])
- self.assertEqual(expected, glyfTable.glyphOrder)
-
- def test_reconstruct_loca_padded_4(self):
- locaTable = self.font['loca'] = WOFF2LocaTable()
- glyfTable = self.font['glyf'] = WOFF2GlyfTable()
- glyfTable.reconstruct(self.transformedGlyfData, self.font)
- glyfTable.padding = 4
- glyfTable.compile(self.font)
- data = locaTable.compile(self.font)
- normLocaData = normalise_table(self.font, 'loca', glyfTable.padding)
- self.assertEqual(normLocaData, data)
-
- def test_reconstruct_loca_padded_2(self):
- locaTable = self.font['loca'] = WOFF2LocaTable()
- glyfTable = self.font['glyf'] = WOFF2GlyfTable()
- glyfTable.reconstruct(self.transformedGlyfData, self.font)
- glyfTable.padding = 2
- glyfTable.compile(self.font)
- data = locaTable.compile(self.font)
- normLocaData = normalise_table(self.font, 'loca', glyfTable.padding)
- self.assertEqual(normLocaData, data)
-
- def test_reconstruct_loca_unpadded(self):
- locaTable = self.font['loca'] = WOFF2LocaTable()
- glyfTable = self.font['glyf'] = WOFF2GlyfTable()
- glyfTable.reconstruct(self.transformedGlyfData, self.font)
- glyfTable.compile(self.font)
- data = locaTable.compile(self.font)
- self.assertEqual(self.tables['loca'], data)
-
- def test_reconstruct_glyf_header_not_enough_data(self):
- with self.assertRaisesRegex(ttLib.TTLibError, "not enough 'glyf' data"):
- WOFF2GlyfTable().reconstruct(b"", self.font)
-
- def test_reconstruct_glyf_table_incorrect_size(self):
- msg = "incorrect size of transformed 'glyf'"
- with self.assertRaisesRegex(ttLib.TTLibError, msg):
- WOFF2GlyfTable().reconstruct(self.transformedGlyfData + b"\x00", self.font)
- with self.assertRaisesRegex(ttLib.TTLibError, msg):
- WOFF2GlyfTable().reconstruct(self.transformedGlyfData[:-1], self.font)
-
- def test_transform_glyf(self):
- glyfTable = self.font['glyf']
- data = glyfTable.transform(self.font)
- self.assertEqual(self.transformedGlyfData, data)
-
- def test_roundtrip_glyf_reconstruct_and_transform(self):
- glyfTable = WOFF2GlyfTable()
- glyfTable.reconstruct(self.transformedGlyfData, self.font)
- data = glyfTable.transform(self.font)
- self.assertEqual(self.transformedGlyfData, data)
-
- def test_roundtrip_glyf_transform_and_reconstruct(self):
- glyfTable = self.font['glyf']
- transformedData = glyfTable.transform(self.font)
- newGlyfTable = WOFF2GlyfTable()
- newGlyfTable.reconstruct(transformedData, self.font)
- newGlyfTable.padding = 4
- reconstructedData = newGlyfTable.compile(self.font)
- normGlyfData = normalise_table(self.font, 'glyf', newGlyfTable.padding)
- self.assertEqual(normGlyfData, reconstructedData)
+ @classmethod
+ def setUpClass(cls):
+ font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
+ font.importXML(TTX)
+ cls.tables = {}
+ cls.transformedTags = ("maxp", "head", "loca", "glyf")
+ for tag in reversed(cls.transformedTags): # compile in inverse order
+ cls.tables[tag] = font.getTableData(tag)
+ infile = BytesIO(TT_WOFF2.getvalue())
+ reader = WOFF2Reader(infile)
+ cls.transformedGlyfData = reader.tables["glyf"].loadData(reader.transformBuffer)
+ cls.glyphOrder = [".notdef"] + [
+ "glyph%.5d" % i for i in range(1, font["maxp"].numGlyphs)
+ ]
+
+ def setUp(self):
+ self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
+ font.setGlyphOrder(self.glyphOrder)
+ font["head"] = ttLib.newTable("head")
+ font["maxp"] = ttLib.newTable("maxp")
+ font["loca"] = WOFF2LocaTable()
+ font["glyf"] = WOFF2GlyfTable()
+ for tag in self.transformedTags:
+ font[tag].decompile(self.tables[tag], font)
+
+ def test_reconstruct_glyf_padded_4(self):
+ glyfTable = WOFF2GlyfTable()
+ glyfTable.reconstruct(self.transformedGlyfData, self.font)
+ glyfTable.padding = 4
+ data = glyfTable.compile(self.font)
+ normGlyfData = normalise_table(self.font, "glyf", glyfTable.padding)
+ self.assertEqual(normGlyfData, data)
+
+ def test_reconstruct_glyf_padded_2(self):
+ glyfTable = WOFF2GlyfTable()
+ glyfTable.reconstruct(self.transformedGlyfData, self.font)
+ glyfTable.padding = 2
+ data = glyfTable.compile(self.font)
+ normGlyfData = normalise_table(self.font, "glyf", glyfTable.padding)
+ self.assertEqual(normGlyfData, data)
+
+ def test_reconstruct_glyf_unpadded(self):
+ glyfTable = WOFF2GlyfTable()
+ glyfTable.reconstruct(self.transformedGlyfData, self.font)
+ data = glyfTable.compile(self.font)
+ self.assertEqual(self.tables["glyf"], data)
+
+ def test_reconstruct_glyf_incorrect_glyphOrder(self):
+ glyfTable = WOFF2GlyfTable()
+ badGlyphOrder = self.font.getGlyphOrder()[:-1]
+ self.font.setGlyphOrder(badGlyphOrder)
+ with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"):
+ glyfTable.reconstruct(self.transformedGlyfData, self.font)
+
+ def test_reconstruct_glyf_missing_glyphOrder(self):
+ glyfTable = WOFF2GlyfTable()
+ del self.font.glyphOrder
+ numGlyphs = self.font["maxp"].numGlyphs
+ del self.font["maxp"]
+ glyfTable.reconstruct(self.transformedGlyfData, self.font)
+ expected = [".notdef"]
+ expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)])
+ self.assertEqual(expected, glyfTable.glyphOrder)
+
+ def test_reconstruct_loca_padded_4(self):
+ locaTable = self.font["loca"] = WOFF2LocaTable()
+ glyfTable = self.font["glyf"] = WOFF2GlyfTable()
+ glyfTable.reconstruct(self.transformedGlyfData, self.font)
+ glyfTable.padding = 4
+ glyfTable.compile(self.font)
+ data = locaTable.compile(self.font)
+ normLocaData = normalise_table(self.font, "loca", glyfTable.padding)
+ self.assertEqual(normLocaData, data)
+
+ def test_reconstruct_loca_padded_2(self):
+ locaTable = self.font["loca"] = WOFF2LocaTable()
+ glyfTable = self.font["glyf"] = WOFF2GlyfTable()
+ glyfTable.reconstruct(self.transformedGlyfData, self.font)
+ glyfTable.padding = 2
+ glyfTable.compile(self.font)
+ data = locaTable.compile(self.font)
+ normLocaData = normalise_table(self.font, "loca", glyfTable.padding)
+ self.assertEqual(normLocaData, data)
+
+ def test_reconstruct_loca_unpadded(self):
+ locaTable = self.font["loca"] = WOFF2LocaTable()
+ glyfTable = self.font["glyf"] = WOFF2GlyfTable()
+ glyfTable.reconstruct(self.transformedGlyfData, self.font)
+ glyfTable.compile(self.font)
+ data = locaTable.compile(self.font)
+ self.assertEqual(self.tables["loca"], data)
+
+ def test_reconstruct_glyf_header_not_enough_data(self):
+ with self.assertRaisesRegex(ttLib.TTLibError, "not enough 'glyf' data"):
+ WOFF2GlyfTable().reconstruct(b"", self.font)
+
+ def test_reconstruct_glyf_table_incorrect_size(self):
+ msg = "incorrect size of transformed 'glyf'"
+ with self.assertRaisesRegex(ttLib.TTLibError, msg):
+ WOFF2GlyfTable().reconstruct(self.transformedGlyfData + b"\x00", self.font)
+ with self.assertRaisesRegex(ttLib.TTLibError, msg):
+ WOFF2GlyfTable().reconstruct(self.transformedGlyfData[:-1], self.font)
+
+ def test_transform_glyf(self):
+ glyfTable = self.font["glyf"]
+ data = glyfTable.transform(self.font)
+ self.assertEqual(self.transformedGlyfData, data)
+
+ def test_roundtrip_glyf_reconstruct_and_transform(self):
+ glyfTable = WOFF2GlyfTable()
+ glyfTable.reconstruct(self.transformedGlyfData, self.font)
+ data = glyfTable.transform(self.font)
+ self.assertEqual(self.transformedGlyfData, data)
+
+ def test_roundtrip_glyf_transform_and_reconstruct(self):
+ glyfTable = self.font["glyf"]
+ transformedData = glyfTable.transform(self.font)
+ newGlyfTable = WOFF2GlyfTable()
+ newGlyfTable.reconstruct(transformedData, self.font)
+ newGlyfTable.padding = 4
+ reconstructedData = newGlyfTable.compile(self.font)
+ normGlyfData = normalise_table(self.font, "glyf", newGlyfTable.padding)
+ self.assertEqual(normGlyfData, reconstructedData)
@pytest.fixture(scope="module")
def fontfile():
-
- class Glyph(object):
- def __init__(self, empty=False, **kwargs):
- if not empty:
- self.draw = partial(self.drawRect, **kwargs)
- else:
- self.draw = lambda pen: None
-
- @staticmethod
- def drawRect(pen, xMin, xMax):
- pen.moveTo((xMin, 0))
- pen.lineTo((xMin, 1000))
- pen.lineTo((xMax, 1000))
- pen.lineTo((xMax, 0))
- pen.closePath()
-
- class CompositeGlyph(object):
- def __init__(self, components):
- self.components = components
-
- def draw(self, pen):
- for baseGlyph, (offsetX, offsetY) in self.components:
- pen.addComponent(baseGlyph, (1, 0, 0, 1, offsetX, offsetY))
-
- fb = fontBuilder.FontBuilder(unitsPerEm=1000, isTTF=True)
- fb.setupGlyphOrder(
- [".notdef", "space", "A", "acutecomb", "Aacute", "zero", "one", "two"]
- )
- fb.setupCharacterMap(
- {
- 0x20: "space",
- 0x41: "A",
- 0x0301: "acutecomb",
- 0xC1: "Aacute",
- 0x30: "zero",
- 0x31: "one",
- 0x32: "two",
- }
- )
- fb.setupHorizontalMetrics(
- {
- ".notdef": (500, 50),
- "space": (600, 0),
- "A": (550, 40),
- "acutecomb": (0, -40),
- "Aacute": (550, 40),
- "zero": (500, 30),
- "one": (500, 50),
- "two": (500, 40),
- }
- )
- fb.setupHorizontalHeader(ascent=1000, descent=-200)
-
- srcGlyphs = {
- ".notdef": Glyph(xMin=50, xMax=450),
- "space": Glyph(empty=True),
- "A": Glyph(xMin=40, xMax=510),
- "acutecomb": Glyph(xMin=-40, xMax=60),
- "Aacute": CompositeGlyph([("A", (0, 0)), ("acutecomb", (200, 0))]),
- "zero": Glyph(xMin=30, xMax=470),
- "one": Glyph(xMin=50, xMax=450),
- "two": Glyph(xMin=40, xMax=460),
- }
- pen = TTGlyphPen(srcGlyphs)
- glyphSet = {}
- for glyphName, glyph in srcGlyphs.items():
- glyph.draw(pen)
- glyphSet[glyphName] = pen.glyph()
- fb.setupGlyf(glyphSet)
-
- fb.setupNameTable(
- {
- "familyName": "TestWOFF2",
- "styleName": "Regular",
- "uniqueFontIdentifier": "TestWOFF2 Regular; Version 1.000; ABCD",
- "fullName": "TestWOFF2 Regular",
- "version": "Version 1.000",
- "psName": "TestWOFF2-Regular",
- }
- )
- fb.setupOS2()
- fb.setupPost()
-
- buf = BytesIO()
- fb.save(buf)
- buf.seek(0)
-
- assert fb.font["maxp"].numGlyphs == 8
- assert fb.font["hhea"].numberOfHMetrics == 6
- for glyphName in fb.font.getGlyphOrder():
- xMin = getattr(fb.font["glyf"][glyphName], "xMin", 0)
- assert xMin == fb.font["hmtx"][glyphName][1]
-
- return buf
+ class Glyph(object):
+ def __init__(self, empty=False, **kwargs):
+ if not empty:
+ self.draw = partial(self.drawRect, **kwargs)
+ else:
+ self.draw = lambda pen: None
+
+ @staticmethod
+ def drawRect(pen, xMin, xMax):
+ pen.moveTo((xMin, 0))
+ pen.lineTo((xMin, 1000))
+ pen.lineTo((xMax, 1000))
+ pen.lineTo((xMax, 0))
+ pen.closePath()
+
+ class CompositeGlyph(object):
+ def __init__(self, components):
+ self.components = components
+
+ def draw(self, pen):
+ for baseGlyph, (offsetX, offsetY) in self.components:
+ pen.addComponent(baseGlyph, (1, 0, 0, 1, offsetX, offsetY))
+
+ fb = fontBuilder.FontBuilder(unitsPerEm=1000, isTTF=True)
+ fb.setupGlyphOrder(
+ [".notdef", "space", "A", "acutecomb", "Aacute", "zero", "one", "two"]
+ )
+ fb.setupCharacterMap(
+ {
+ 0x20: "space",
+ 0x41: "A",
+ 0x0301: "acutecomb",
+ 0xC1: "Aacute",
+ 0x30: "zero",
+ 0x31: "one",
+ 0x32: "two",
+ }
+ )
+ fb.setupHorizontalMetrics(
+ {
+ ".notdef": (500, 50),
+ "space": (600, 0),
+ "A": (550, 40),
+ "acutecomb": (0, -40),
+ "Aacute": (550, 40),
+ "zero": (500, 30),
+ "one": (500, 50),
+ "two": (500, 40),
+ }
+ )
+ fb.setupHorizontalHeader(ascent=1000, descent=-200)
+
+ srcGlyphs = {
+ ".notdef": Glyph(xMin=50, xMax=450),
+ "space": Glyph(empty=True),
+ "A": Glyph(xMin=40, xMax=510),
+ "acutecomb": Glyph(xMin=-40, xMax=60),
+ "Aacute": CompositeGlyph([("A", (0, 0)), ("acutecomb", (200, 0))]),
+ "zero": Glyph(xMin=30, xMax=470),
+ "one": Glyph(xMin=50, xMax=450),
+ "two": Glyph(xMin=40, xMax=460),
+ }
+ pen = TTGlyphPen(srcGlyphs)
+ glyphSet = {}
+ for glyphName, glyph in srcGlyphs.items():
+ glyph.draw(pen)
+ glyphSet[glyphName] = pen.glyph()
+ fb.setupGlyf(glyphSet)
+
+ fb.setupNameTable(
+ {
+ "familyName": "TestWOFF2",
+ "styleName": "Regular",
+ "uniqueFontIdentifier": "TestWOFF2 Regular; Version 1.000; ABCD",
+ "fullName": "TestWOFF2 Regular",
+ "version": "Version 1.000",
+ "psName": "TestWOFF2-Regular",
+ }
+ )
+ fb.setupOS2()
+ fb.setupPost()
+
+ buf = BytesIO()
+ fb.save(buf)
+ buf.seek(0)
+
+ assert fb.font["maxp"].numGlyphs == 8
+ assert fb.font["hhea"].numberOfHMetrics == 6
+ for glyphName in fb.font.getGlyphOrder():
+ xMin = getattr(fb.font["glyf"][glyphName], "xMin", 0)
+ assert xMin == fb.font["hmtx"][glyphName][1]
+
+ return buf
@pytest.fixture
def ttFont(fontfile):
- return ttLib.TTFont(fontfile, recalcBBoxes=False, recalcTimestamp=False)
+ return ttLib.TTFont(fontfile, recalcBBoxes=False, recalcTimestamp=False)
class WOFF2HmtxTableTest(object):
- def test_transform_no_sidebearings(self, ttFont):
- hmtxTable = WOFF2HmtxTable()
- hmtxTable.metrics = ttFont["hmtx"].metrics
-
- data = hmtxTable.transform(ttFont)
-
- assert data == (
- b"\x03" # 00000011 | bits 0 and 1 are set (no sidebearings arrays)
-
- # advanceWidthArray
- b'\x01\xf4' # .notdef: 500
- b'\x02X' # space: 600
- b'\x02&' # A: 550
- b'\x00\x00' # acutecomb: 0
- b'\x02&' # Aacute: 550
- b'\x01\xf4' # zero: 500
- )
-
- def test_transform_proportional_sidebearings(self, ttFont):
- hmtxTable = WOFF2HmtxTable()
- metrics = ttFont["hmtx"].metrics
- # force one of the proportional glyphs to have its left sidebearing be
- # different from its xMin (40)
- metrics["A"] = (550, 39)
- hmtxTable.metrics = metrics
-
- assert ttFont["glyf"]["A"].xMin != metrics["A"][1]
-
- data = hmtxTable.transform(ttFont)
-
- assert data == (
- b"\x02" # 00000010 | bits 0 unset: explicit proportional sidebearings
-
- # advanceWidthArray
- b'\x01\xf4' # .notdef: 500
- b'\x02X' # space: 600
- b'\x02&' # A: 550
- b'\x00\x00' # acutecomb: 0
- b'\x02&' # Aacute: 550
- b'\x01\xf4' # zero: 500
-
- # lsbArray
- b'\x002' # .notdef: 50
- b'\x00\x00' # space: 0
- b"\x00'" # A: 39 (xMin: 40)
- b'\xff\xd8' # acutecomb: -40
- b'\x00(' # Aacute: 40
- b'\x00\x1e' # zero: 30
- )
-
- def test_transform_monospaced_sidebearings(self, ttFont):
- hmtxTable = WOFF2HmtxTable()
- metrics = ttFont["hmtx"].metrics
- hmtxTable.metrics = metrics
-
- # force one of the monospaced glyphs at the end of hmtx table to have
- # its xMin different from its left sidebearing (50)
- ttFont["glyf"]["one"].xMin = metrics["one"][1] + 1
-
- data = hmtxTable.transform(ttFont)
-
- assert data == (
- b"\x01" # 00000001 | bits 1 unset: explicit monospaced sidebearings
-
- # advanceWidthArray
- b'\x01\xf4' # .notdef: 500
- b'\x02X' # space: 600
- b'\x02&' # A: 550
- b'\x00\x00' # acutecomb: 0
- b'\x02&' # Aacute: 550
- b'\x01\xf4' # zero: 500
-
- # leftSideBearingArray
- b'\x002' # one: 50 (xMin: 51)
- b'\x00(' # two: 40
- )
-
- def test_transform_not_applicable(self, ttFont):
- hmtxTable = WOFF2HmtxTable()
- metrics = ttFont["hmtx"].metrics
- # force both a proportional and monospaced glyph to have sidebearings
- # different from the respective xMin coordinates
- metrics["A"] = (550, 39)
- metrics["one"] = (500, 51)
- hmtxTable.metrics = metrics
-
- # 'None' signals to fall back using untransformed hmtx table data
- assert hmtxTable.transform(ttFont) is None
-
- def test_reconstruct_no_sidebearings(self, ttFont):
- hmtxTable = WOFF2HmtxTable()
-
- data = (
- b"\x03" # 00000011 | bits 0 and 1 are set (no sidebearings arrays)
-
- # advanceWidthArray
- b'\x01\xf4' # .notdef: 500
- b'\x02X' # space: 600
- b'\x02&' # A: 550
- b'\x00\x00' # acutecomb: 0
- b'\x02&' # Aacute: 550
- b'\x01\xf4' # zero: 500
- )
-
- hmtxTable.reconstruct(data, ttFont)
-
- assert hmtxTable.metrics == {
- ".notdef": (500, 50),
- "space": (600, 0),
- "A": (550, 40),
- "acutecomb": (0, -40),
- "Aacute": (550, 40),
- "zero": (500, 30),
- "one": (500, 50),
- "two": (500, 40),
- }
-
- def test_reconstruct_proportional_sidebearings(self, ttFont):
- hmtxTable = WOFF2HmtxTable()
-
- data = (
- b"\x02" # 00000010 | bits 0 unset: explicit proportional sidebearings
-
- # advanceWidthArray
- b'\x01\xf4' # .notdef: 500
- b'\x02X' # space: 600
- b'\x02&' # A: 550
- b'\x00\x00' # acutecomb: 0
- b'\x02&' # Aacute: 550
- b'\x01\xf4' # zero: 500
-
- # lsbArray
- b'\x002' # .notdef: 50
- b'\x00\x00' # space: 0
- b"\x00'" # A: 39 (xMin: 40)
- b'\xff\xd8' # acutecomb: -40
- b'\x00(' # Aacute: 40
- b'\x00\x1e' # zero: 30
- )
-
- hmtxTable.reconstruct(data, ttFont)
-
- assert hmtxTable.metrics == {
- ".notdef": (500, 50),
- "space": (600, 0),
- "A": (550, 39),
- "acutecomb": (0, -40),
- "Aacute": (550, 40),
- "zero": (500, 30),
- "one": (500, 50),
- "two": (500, 40),
- }
-
- assert ttFont["glyf"]["A"].xMin == 40
-
- def test_reconstruct_monospaced_sidebearings(self, ttFont):
- hmtxTable = WOFF2HmtxTable()
-
- data = (
- b"\x01" # 00000001 | bits 1 unset: explicit monospaced sidebearings
-
- # advanceWidthArray
- b'\x01\xf4' # .notdef: 500
- b'\x02X' # space: 600
- b'\x02&' # A: 550
- b'\x00\x00' # acutecomb: 0
- b'\x02&' # Aacute: 550
- b'\x01\xf4' # zero: 500
-
- # leftSideBearingArray
- b'\x003' # one: 51 (xMin: 50)
- b'\x00(' # two: 40
- )
-
- hmtxTable.reconstruct(data, ttFont)
-
- assert hmtxTable.metrics == {
- ".notdef": (500, 50),
- "space": (600, 0),
- "A": (550, 40),
- "acutecomb": (0, -40),
- "Aacute": (550, 40),
- "zero": (500, 30),
- "one": (500, 51),
- "two": (500, 40),
- }
-
- assert ttFont["glyf"]["one"].xMin == 50
-
- def test_reconstruct_flags_reserved_bits(self):
- hmtxTable = WOFF2HmtxTable()
-
- with pytest.raises(
- ttLib.TTLibError, match="Bits 2-7 of 'hmtx' flags are reserved"
- ):
- hmtxTable.reconstruct(b"\xFF", ttFont=None)
-
- def test_reconstruct_flags_required_bits(self):
- hmtxTable = WOFF2HmtxTable()
-
- with pytest.raises(ttLib.TTLibError, match="either bits 0 or 1 .* must set"):
- hmtxTable.reconstruct(b"\x00", ttFont=None)
-
- def test_reconstruct_too_much_data(self, ttFont):
- ttFont["hhea"].numberOfHMetrics = 2
- data = b'\x03\x01\xf4\x02X\x02&'
- hmtxTable = WOFF2HmtxTable()
-
- with pytest.raises(ttLib.TTLibError, match="too much 'hmtx' table data"):
- hmtxTable.reconstruct(data, ttFont)
+ def test_transform_no_sidebearings(self, ttFont):
+ hmtxTable = WOFF2HmtxTable()
+ hmtxTable.metrics = ttFont["hmtx"].metrics
+
+ data = hmtxTable.transform(ttFont)
+
+ assert data == (
+ b"\x03" # 00000011 | bits 0 and 1 are set (no sidebearings arrays)
+ # advanceWidthArray
+ b"\x01\xf4" # .notdef: 500
+ b"\x02X" # space: 600
+ b"\x02&" # A: 550
+ b"\x00\x00" # acutecomb: 0
+ b"\x02&" # Aacute: 550
+ b"\x01\xf4" # zero: 500
+ )
+
+ def test_transform_proportional_sidebearings(self, ttFont):
+ hmtxTable = WOFF2HmtxTable()
+ metrics = ttFont["hmtx"].metrics
+ # force one of the proportional glyphs to have its left sidebearing be
+ # different from its xMin (40)
+ metrics["A"] = (550, 39)
+ hmtxTable.metrics = metrics
+
+ assert ttFont["glyf"]["A"].xMin != metrics["A"][1]
+
+ data = hmtxTable.transform(ttFont)
+
+ assert data == (
+ b"\x02" # 00000010 | bits 0 unset: explicit proportional sidebearings
+ # advanceWidthArray
+ b"\x01\xf4" # .notdef: 500
+ b"\x02X" # space: 600
+ b"\x02&" # A: 550
+ b"\x00\x00" # acutecomb: 0
+ b"\x02&" # Aacute: 550
+ b"\x01\xf4" # zero: 500
+ # lsbArray
+ b"\x002" # .notdef: 50
+ b"\x00\x00" # space: 0
+ b"\x00'" # A: 39 (xMin: 40)
+ b"\xff\xd8" # acutecomb: -40
+ b"\x00(" # Aacute: 40
+ b"\x00\x1e" # zero: 30
+ )
+
+ def test_transform_monospaced_sidebearings(self, ttFont):
+ hmtxTable = WOFF2HmtxTable()
+ metrics = ttFont["hmtx"].metrics
+ hmtxTable.metrics = metrics
+
+ # force one of the monospaced glyphs at the end of hmtx table to have
+ # its xMin different from its left sidebearing (50)
+ ttFont["glyf"]["one"].xMin = metrics["one"][1] + 1
+
+ data = hmtxTable.transform(ttFont)
+
+ assert data == (
+ b"\x01" # 00000001 | bits 1 unset: explicit monospaced sidebearings
+ # advanceWidthArray
+ b"\x01\xf4" # .notdef: 500
+ b"\x02X" # space: 600
+ b"\x02&" # A: 550
+ b"\x00\x00" # acutecomb: 0
+ b"\x02&" # Aacute: 550
+ b"\x01\xf4" # zero: 500
+ # leftSideBearingArray
+ b"\x002" # one: 50 (xMin: 51)
+ b"\x00(" # two: 40
+ )
+
+ def test_transform_not_applicable(self, ttFont):
+ hmtxTable = WOFF2HmtxTable()
+ metrics = ttFont["hmtx"].metrics
+ # force both a proportional and monospaced glyph to have sidebearings
+ # different from the respective xMin coordinates
+ metrics["A"] = (550, 39)
+ metrics["one"] = (500, 51)
+ hmtxTable.metrics = metrics
+
+ # 'None' signals to fall back using untransformed hmtx table data
+ assert hmtxTable.transform(ttFont) is None
+
+ def test_reconstruct_no_sidebearings(self, ttFont):
+ hmtxTable = WOFF2HmtxTable()
+
+ data = (
+ b"\x03" # 00000011 | bits 0 and 1 are set (no sidebearings arrays)
+ # advanceWidthArray
+ b"\x01\xf4" # .notdef: 500
+ b"\x02X" # space: 600
+ b"\x02&" # A: 550
+ b"\x00\x00" # acutecomb: 0
+ b"\x02&" # Aacute: 550
+ b"\x01\xf4" # zero: 500
+ )
+
+ hmtxTable.reconstruct(data, ttFont)
+
+ assert hmtxTable.metrics == {
+ ".notdef": (500, 50),
+ "space": (600, 0),
+ "A": (550, 40),
+ "acutecomb": (0, -40),
+ "Aacute": (550, 40),
+ "zero": (500, 30),
+ "one": (500, 50),
+ "two": (500, 40),
+ }
+
+ def test_reconstruct_proportional_sidebearings(self, ttFont):
+ hmtxTable = WOFF2HmtxTable()
+
+ data = (
+ b"\x02" # 00000010 | bits 0 unset: explicit proportional sidebearings
+ # advanceWidthArray
+ b"\x01\xf4" # .notdef: 500
+ b"\x02X" # space: 600
+ b"\x02&" # A: 550
+ b"\x00\x00" # acutecomb: 0
+ b"\x02&" # Aacute: 550
+ b"\x01\xf4" # zero: 500
+ # lsbArray
+ b"\x002" # .notdef: 50
+ b"\x00\x00" # space: 0
+ b"\x00'" # A: 39 (xMin: 40)
+ b"\xff\xd8" # acutecomb: -40
+ b"\x00(" # Aacute: 40
+ b"\x00\x1e" # zero: 30
+ )
+
+ hmtxTable.reconstruct(data, ttFont)
+
+ assert hmtxTable.metrics == {
+ ".notdef": (500, 50),
+ "space": (600, 0),
+ "A": (550, 39),
+ "acutecomb": (0, -40),
+ "Aacute": (550, 40),
+ "zero": (500, 30),
+ "one": (500, 50),
+ "two": (500, 40),
+ }
+
+ assert ttFont["glyf"]["A"].xMin == 40
+
+ def test_reconstruct_monospaced_sidebearings(self, ttFont):
+ hmtxTable = WOFF2HmtxTable()
+
+ data = (
+ b"\x01" # 00000001 | bits 1 unset: explicit monospaced sidebearings
+ # advanceWidthArray
+ b"\x01\xf4" # .notdef: 500
+ b"\x02X" # space: 600
+ b"\x02&" # A: 550
+ b"\x00\x00" # acutecomb: 0
+ b"\x02&" # Aacute: 550
+ b"\x01\xf4" # zero: 500
+ # leftSideBearingArray
+ b"\x003" # one: 51 (xMin: 50)
+ b"\x00(" # two: 40
+ )
+
+ hmtxTable.reconstruct(data, ttFont)
+
+ assert hmtxTable.metrics == {
+ ".notdef": (500, 50),
+ "space": (600, 0),
+ "A": (550, 40),
+ "acutecomb": (0, -40),
+ "Aacute": (550, 40),
+ "zero": (500, 30),
+ "one": (500, 51),
+ "two": (500, 40),
+ }
+
+ assert ttFont["glyf"]["one"].xMin == 50
+
+ def test_reconstruct_flags_reserved_bits(self):
+ hmtxTable = WOFF2HmtxTable()
+
+ with pytest.raises(
+ ttLib.TTLibError, match="Bits 2-7 of 'hmtx' flags are reserved"
+ ):
+ hmtxTable.reconstruct(b"\xFF", ttFont=None)
+
+ def test_reconstruct_flags_required_bits(self):
+ hmtxTable = WOFF2HmtxTable()
+
+ with pytest.raises(ttLib.TTLibError, match="either bits 0 or 1 .* must set"):
+ hmtxTable.reconstruct(b"\x00", ttFont=None)
+
+ def test_reconstruct_too_much_data(self, ttFont):
+ ttFont["hhea"].numberOfHMetrics = 2
+ data = b"\x03\x01\xf4\x02X\x02&"
+ hmtxTable = WOFF2HmtxTable()
+
+ with pytest.raises(ttLib.TTLibError, match="too much 'hmtx' table data"):
+ hmtxTable.reconstruct(data, ttFont)
class WOFF2RoundtripTest(object):
- @staticmethod
- def roundtrip(infile):
- infile.seek(0)
- ttFont = ttLib.TTFont(infile, recalcBBoxes=False, recalcTimestamp=False)
- outfile = BytesIO()
- ttFont.save(outfile)
- return outfile, ttFont
+ @staticmethod
+ def roundtrip(infile):
+ infile.seek(0)
+ ttFont = ttLib.TTFont(infile, recalcBBoxes=False, recalcTimestamp=False)
+ outfile = BytesIO()
+ ttFont.save(outfile)
+ return outfile, ttFont
- def test_roundtrip_default_transforms(self, ttFont):
- ttFont.flavor = "woff2"
- # ttFont.flavorData = None
- tmp = BytesIO()
- ttFont.save(tmp)
+ def test_roundtrip_default_transforms(self, ttFont):
+ ttFont.flavor = "woff2"
+ # ttFont.flavorData = None
+ tmp = BytesIO()
+ ttFont.save(tmp)
- tmp2, ttFont2 = self.roundtrip(tmp)
+ tmp2, ttFont2 = self.roundtrip(tmp)
- assert tmp.getvalue() == tmp2.getvalue()
- assert ttFont2.reader.flavorData.transformedTables == {"glyf", "loca"}
+ assert tmp.getvalue() == tmp2.getvalue()
+ assert ttFont2.reader.flavorData.transformedTables == {"glyf", "loca"}
- def test_roundtrip_no_transforms(self, ttFont):
- ttFont.flavor = "woff2"
- ttFont.flavorData = WOFF2FlavorData(transformedTables=[])
- tmp = BytesIO()
- ttFont.save(tmp)
+ def test_roundtrip_no_transforms(self, ttFont):
+ ttFont.flavor = "woff2"
+ ttFont.flavorData = WOFF2FlavorData(transformedTables=[])
+ tmp = BytesIO()
+ ttFont.save(tmp)
- tmp2, ttFont2 = self.roundtrip(tmp)
+ tmp2, ttFont2 = self.roundtrip(tmp)
- assert tmp.getvalue() == tmp2.getvalue()
- assert not ttFont2.reader.flavorData.transformedTables
+ assert tmp.getvalue() == tmp2.getvalue()
+ assert not ttFont2.reader.flavorData.transformedTables
- def test_roundtrip_all_transforms(self, ttFont):
- ttFont.flavor = "woff2"
- ttFont.flavorData = WOFF2FlavorData(transformedTables=["glyf", "loca", "hmtx"])
- tmp = BytesIO()
- ttFont.save(tmp)
+ def test_roundtrip_all_transforms(self, ttFont):
+ ttFont.flavor = "woff2"
+ ttFont.flavorData = WOFF2FlavorData(transformedTables=["glyf", "loca", "hmtx"])
+ tmp = BytesIO()
+ ttFont.save(tmp)
- tmp2, ttFont2 = self.roundtrip(tmp)
+ tmp2, ttFont2 = self.roundtrip(tmp)
- assert tmp.getvalue() == tmp2.getvalue()
- assert ttFont2.reader.flavorData.transformedTables == {"glyf", "loca", "hmtx"}
+ assert tmp.getvalue() == tmp2.getvalue()
+ assert ttFont2.reader.flavorData.transformedTables == {"glyf", "loca", "hmtx"}
- def test_roundtrip_only_hmtx_no_glyf_transform(self, ttFont):
- ttFont.flavor = "woff2"
- ttFont.flavorData = WOFF2FlavorData(transformedTables=["hmtx"])
- tmp = BytesIO()
- ttFont.save(tmp)
+ def test_roundtrip_only_hmtx_no_glyf_transform(self, ttFont):
+ ttFont.flavor = "woff2"
+ ttFont.flavorData = WOFF2FlavorData(transformedTables=["hmtx"])
+ tmp = BytesIO()
+ ttFont.save(tmp)
- tmp2, ttFont2 = self.roundtrip(tmp)
+ tmp2, ttFont2 = self.roundtrip(tmp)
- assert tmp.getvalue() == tmp2.getvalue()
- assert ttFont2.reader.flavorData.transformedTables == {"hmtx"}
+ assert tmp.getvalue() == tmp2.getvalue()
+ assert ttFont2.reader.flavorData.transformedTables == {"hmtx"}
- def test_roundtrip_no_glyf_and_loca_tables(self):
- ttx = os.path.join(
- os.path.dirname(current_dir), "subset", "data", "google_color.ttx"
- )
- ttFont = ttLib.TTFont()
- ttFont.importXML(ttx)
+ def test_roundtrip_no_glyf_and_loca_tables(self):
+ ttx = os.path.join(
+ os.path.dirname(current_dir), "subset", "data", "google_color.ttx"
+ )
+ ttFont = ttLib.TTFont()
+ ttFont.importXML(ttx)
- assert "glyf" not in ttFont
- assert "loca" not in ttFont
+ assert "glyf" not in ttFont
+ assert "loca" not in ttFont
- ttFont.flavor = "woff2"
- tmp = BytesIO()
- ttFont.save(tmp)
+ ttFont.flavor = "woff2"
+ tmp = BytesIO()
+ ttFont.save(tmp)
- tmp2, ttFont2 = self.roundtrip(tmp)
- assert tmp.getvalue() == tmp2.getvalue()
- assert ttFont.flavor == "woff2"
+ tmp2, ttFont2 = self.roundtrip(tmp)
+ assert tmp.getvalue() == tmp2.getvalue()
+ assert ttFont.flavor == "woff2"
- def test_roundtrip_off_curve_despite_overlap_bit(self):
- ttx = os.path.join(data_dir, "woff2_overlap_offcurve_in.ttx")
- ttFont = ttLib.TTFont()
- ttFont.importXML(ttx)
+ def test_roundtrip_off_curve_despite_overlap_bit(self):
+ ttx = os.path.join(data_dir, "woff2_overlap_offcurve_in.ttx")
+ ttFont = ttLib.TTFont()
+ ttFont.importXML(ttx)
- assert ttFont["glyf"]["A"].flags[0] == _g_l_y_f.flagOverlapSimple
+ assert ttFont["glyf"]["A"].flags[0] == _g_l_y_f.flagOverlapSimple
- ttFont.flavor = "woff2"
- tmp = BytesIO()
- ttFont.save(tmp)
+ ttFont.flavor = "woff2"
+ tmp = BytesIO()
+ ttFont.save(tmp)
- _, ttFont2 = self.roundtrip(tmp)
- assert ttFont2.flavor == "woff2"
- assert ttFont2["glyf"]["A"].flags[0] == 0
+ _, ttFont2 = self.roundtrip(tmp)
+ assert ttFont2.flavor == "woff2"
+ # check that the off-curve point is still there
+ assert ttFont2["glyf"]["A"].flags[0] & _g_l_y_f.flagOnCurve == 0
+ # check that the overlap bit is still there
+ assert ttFont2["glyf"]["A"].flags[0] & _g_l_y_f.flagOverlapSimple != 0
-class MainTest(object):
- @staticmethod
- def make_ttf(tmpdir):
- ttFont = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
- ttFont.importXML(TTX)
- filename = str(tmpdir / "TestTTF-Regular.ttf")
- ttFont.save(filename)
- return filename
+class MainTest(object):
+ @staticmethod
+ def make_ttf(tmpdir):
+ ttFont = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
+ ttFont.importXML(TTX)
+ filename = str(tmpdir / "TestTTF-Regular.ttf")
+ ttFont.save(filename)
+ return filename
- def test_compress_ttf(self, tmpdir):
- input_file = self.make_ttf(tmpdir)
+ def test_compress_ttf(self, tmpdir):
+ input_file = self.make_ttf(tmpdir)
- assert woff2.main(["compress", input_file]) is None
+ assert woff2.main(["compress", input_file]) is None
- assert (tmpdir / "TestTTF-Regular.woff2").check(file=True)
+ assert (tmpdir / "TestTTF-Regular.woff2").check(file=True)
- def test_compress_ttf_no_glyf_transform(self, tmpdir):
- input_file = self.make_ttf(tmpdir)
+ def test_compress_ttf_no_glyf_transform(self, tmpdir):
+ input_file = self.make_ttf(tmpdir)
- assert woff2.main(["compress", "--no-glyf-transform", input_file]) is None
+ assert woff2.main(["compress", "--no-glyf-transform", input_file]) is None
- assert (tmpdir / "TestTTF-Regular.woff2").check(file=True)
+ assert (tmpdir / "TestTTF-Regular.woff2").check(file=True)
- def test_compress_ttf_hmtx_transform(self, tmpdir):
- input_file = self.make_ttf(tmpdir)
+ def test_compress_ttf_hmtx_transform(self, tmpdir):
+ input_file = self.make_ttf(tmpdir)
- assert woff2.main(["compress", "--hmtx-transform", input_file]) is None
+ assert woff2.main(["compress", "--hmtx-transform", input_file]) is None
- assert (tmpdir / "TestTTF-Regular.woff2").check(file=True)
+ assert (tmpdir / "TestTTF-Regular.woff2").check(file=True)
- def test_compress_ttf_no_glyf_transform_hmtx_transform(self, tmpdir):
- input_file = self.make_ttf(tmpdir)
+ def test_compress_ttf_no_glyf_transform_hmtx_transform(self, tmpdir):
+ input_file = self.make_ttf(tmpdir)
- assert woff2.main(
- ["compress", "--no-glyf-transform", "--hmtx-transform", input_file]
- ) is None
+ assert (
+ woff2.main(
+ ["compress", "--no-glyf-transform", "--hmtx-transform", input_file]
+ )
+ is None
+ )
- assert (tmpdir / "TestTTF-Regular.woff2").check(file=True)
+ assert (tmpdir / "TestTTF-Regular.woff2").check(file=True)
- def test_compress_output_file(self, tmpdir):
- input_file = self.make_ttf(tmpdir)
- output_file = tmpdir / "TestTTF.woff2"
+ def test_compress_output_file(self, tmpdir):
+ input_file = self.make_ttf(tmpdir)
+ output_file = tmpdir / "TestTTF.woff2"
- assert woff2.main(
- ["compress", "-o", str(output_file), str(input_file)]
- ) is None
+ assert woff2.main(["compress", "-o", str(output_file), str(input_file)]) is None
- assert output_file.check(file=True)
+ assert output_file.check(file=True)
- def test_compress_otf(self, tmpdir):
- ttFont = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
- ttFont.importXML(OTX)
- input_file = str(tmpdir / "TestOTF-Regular.otf")
- ttFont.save(input_file)
+ def test_compress_otf(self, tmpdir):
+ ttFont = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
+ ttFont.importXML(OTX)
+ input_file = str(tmpdir / "TestOTF-Regular.otf")
+ ttFont.save(input_file)
- assert woff2.main(["compress", input_file]) is None
+ assert woff2.main(["compress", input_file]) is None
- assert (tmpdir / "TestOTF-Regular.woff2").check(file=True)
+ assert (tmpdir / "TestOTF-Regular.woff2").check(file=True)
- def test_recompress_woff2_keeps_flavorData(self, tmpdir):
- woff2_font = ttLib.TTFont(BytesIO(TT_WOFF2.getvalue()))
- woff2_font.flavorData.privData = b"FOOBAR"
- woff2_file = tmpdir / "TestTTF-Regular.woff2"
- woff2_font.save(str(woff2_file))
+ def test_recompress_woff2_keeps_flavorData(self, tmpdir):
+ woff2_font = ttLib.TTFont(BytesIO(TT_WOFF2.getvalue()))
+ woff2_font.flavorData.privData = b"FOOBAR"
+ woff2_file = tmpdir / "TestTTF-Regular.woff2"
+ woff2_font.save(str(woff2_file))
- assert woff2_font.flavorData.transformedTables == {"glyf", "loca"}
+ assert woff2_font.flavorData.transformedTables == {"glyf", "loca"}
- woff2.main(["compress", "--hmtx-transform", str(woff2_file)])
+ woff2.main(["compress", "--hmtx-transform", str(woff2_file)])
- output_file = tmpdir / "TestTTF-Regular#1.woff2"
- assert output_file.check(file=True)
+ output_file = tmpdir / "TestTTF-Regular#1.woff2"
+ assert output_file.check(file=True)
- new_woff2_font = ttLib.TTFont(str(output_file))
+ new_woff2_font = ttLib.TTFont(str(output_file))
- assert new_woff2_font.flavorData.transformedTables == {"glyf", "loca", "hmtx"}
- assert new_woff2_font.flavorData.privData == b"FOOBAR"
+ assert new_woff2_font.flavorData.transformedTables == {"glyf", "loca", "hmtx"}
+ assert new_woff2_font.flavorData.privData == b"FOOBAR"
- def test_decompress_ttf(self, tmpdir):
- input_file = tmpdir / "TestTTF-Regular.woff2"
- input_file.write_binary(TT_WOFF2.getvalue())
+ def test_decompress_ttf(self, tmpdir):
+ input_file = tmpdir / "TestTTF-Regular.woff2"
+ input_file.write_binary(TT_WOFF2.getvalue())
- assert woff2.main(["decompress", str(input_file)]) is None
+ assert woff2.main(["decompress", str(input_file)]) is None
- assert (tmpdir / "TestTTF-Regular.ttf").check(file=True)
+ assert (tmpdir / "TestTTF-Regular.ttf").check(file=True)
- def test_decompress_otf(self, tmpdir):
- input_file = tmpdir / "TestTTF-Regular.woff2"
- input_file.write_binary(CFF_WOFF2.getvalue())
+ def test_decompress_otf(self, tmpdir):
+ input_file = tmpdir / "TestTTF-Regular.woff2"
+ input_file.write_binary(CFF_WOFF2.getvalue())
- assert woff2.main(["decompress", str(input_file)]) is None
+ assert woff2.main(["decompress", str(input_file)]) is None
- assert (tmpdir / "TestTTF-Regular.otf").check(file=True)
+ assert (tmpdir / "TestTTF-Regular.otf").check(file=True)
- def test_decompress_output_file(self, tmpdir):
- input_file = tmpdir / "TestTTF-Regular.woff2"
- input_file.write_binary(TT_WOFF2.getvalue())
- output_file = tmpdir / "TestTTF.ttf"
+ def test_decompress_output_file(self, tmpdir):
+ input_file = tmpdir / "TestTTF-Regular.woff2"
+ input_file.write_binary(TT_WOFF2.getvalue())
+ output_file = tmpdir / "TestTTF.ttf"
- assert woff2.main(
- ["decompress", "-o", str(output_file), str(input_file)]
- ) is None
+ assert (
+ woff2.main(["decompress", "-o", str(output_file), str(input_file)]) is None
+ )
- assert output_file.check(file=True)
+ assert output_file.check(file=True)
- def test_no_subcommand_show_help(self, capsys):
- with pytest.raises(SystemExit):
- woff2.main(["--help"])
+ def test_no_subcommand_show_help(self, capsys):
+ with pytest.raises(SystemExit):
+ woff2.main(["--help"])
- captured = capsys.readouterr()
- assert "usage: fonttools ttLib.woff2" in captured.out
+ captured = capsys.readouterr()
+ assert "usage: fonttools ttLib.woff2" in captured.out
class Base128Test(unittest.TestCase):
-
- def test_unpackBase128(self):
- self.assertEqual(unpackBase128(b'\x3f\x00\x00'), (63, b"\x00\x00"))
- self.assertEqual(unpackBase128(b'\x8f\xff\xff\xff\x7f')[0], 4294967295)
-
- self.assertRaisesRegex(
- ttLib.TTLibError,
- "UIntBase128 value must not start with leading zeros",
- unpackBase128, b'\x80\x80\x3f')
-
- self.assertRaisesRegex(
- ttLib.TTLibError,
- "UIntBase128-encoded sequence is longer than 5 bytes",
- unpackBase128, b'\x8f\xff\xff\xff\xff\x7f')
-
- self.assertRaisesRegex(
- ttLib.TTLibError,
- r"UIntBase128 value exceeds 2\*\*32-1",
- unpackBase128, b'\x90\x80\x80\x80\x00')
-
- self.assertRaisesRegex(
- ttLib.TTLibError,
- "not enough data to unpack UIntBase128",
- unpackBase128, b'')
-
- def test_base128Size(self):
- self.assertEqual(base128Size(0), 1)
- self.assertEqual(base128Size(24567), 3)
- self.assertEqual(base128Size(2**32-1), 5)
-
- def test_packBase128(self):
- self.assertEqual(packBase128(63), b"\x3f")
- self.assertEqual(packBase128(2**32-1), b'\x8f\xff\xff\xff\x7f')
- self.assertRaisesRegex(
- ttLib.TTLibError,
- r"UIntBase128 format requires 0 <= integer <= 2\*\*32-1",
- packBase128, 2**32+1)
- self.assertRaisesRegex(
- ttLib.TTLibError,
- r"UIntBase128 format requires 0 <= integer <= 2\*\*32-1",
- packBase128, -1)
+ def test_unpackBase128(self):
+ self.assertEqual(unpackBase128(b"\x3f\x00\x00"), (63, b"\x00\x00"))
+ self.assertEqual(unpackBase128(b"\x8f\xff\xff\xff\x7f")[0], 4294967295)
+
+ self.assertRaisesRegex(
+ ttLib.TTLibError,
+ "UIntBase128 value must not start with leading zeros",
+ unpackBase128,
+ b"\x80\x80\x3f",
+ )
+
+ self.assertRaisesRegex(
+ ttLib.TTLibError,
+ "UIntBase128-encoded sequence is longer than 5 bytes",
+ unpackBase128,
+ b"\x8f\xff\xff\xff\xff\x7f",
+ )
+
+ self.assertRaisesRegex(
+ ttLib.TTLibError,
+ r"UIntBase128 value exceeds 2\*\*32-1",
+ unpackBase128,
+ b"\x90\x80\x80\x80\x00",
+ )
+
+ self.assertRaisesRegex(
+ ttLib.TTLibError,
+ "not enough data to unpack UIntBase128",
+ unpackBase128,
+ b"",
+ )
+
+ def test_base128Size(self):
+ self.assertEqual(base128Size(0), 1)
+ self.assertEqual(base128Size(24567), 3)
+ self.assertEqual(base128Size(2**32 - 1), 5)
+
+ def test_packBase128(self):
+ self.assertEqual(packBase128(63), b"\x3f")
+ self.assertEqual(packBase128(2**32 - 1), b"\x8f\xff\xff\xff\x7f")
+ self.assertRaisesRegex(
+ ttLib.TTLibError,
+ r"UIntBase128 format requires 0 <= integer <= 2\*\*32-1",
+ packBase128,
+ 2**32 + 1,
+ )
+ self.assertRaisesRegex(
+ ttLib.TTLibError,
+ r"UIntBase128 format requires 0 <= integer <= 2\*\*32-1",
+ packBase128,
+ -1,
+ )
class UShort255Test(unittest.TestCase):
-
- def test_unpack255UShort(self):
- self.assertEqual(unpack255UShort(bytechr(252))[0], 252)
- # some numbers (e.g. 506) can have multiple encodings
- self.assertEqual(
- unpack255UShort(struct.pack(b"BB", 254, 0))[0], 506)
- self.assertEqual(
- unpack255UShort(struct.pack(b"BB", 255, 253))[0], 506)
- self.assertEqual(
- unpack255UShort(struct.pack(b"BBB", 253, 1, 250))[0], 506)
-
- self.assertRaisesRegex(
- ttLib.TTLibError,
- "not enough data to unpack 255UInt16",
- unpack255UShort, struct.pack(b"BB", 253, 0))
-
- self.assertRaisesRegex(
- ttLib.TTLibError,
- "not enough data to unpack 255UInt16",
- unpack255UShort, struct.pack(b"B", 254))
-
- self.assertRaisesRegex(
- ttLib.TTLibError,
- "not enough data to unpack 255UInt16",
- unpack255UShort, struct.pack(b"B", 255))
-
- def test_pack255UShort(self):
- self.assertEqual(pack255UShort(252), b'\xfc')
- self.assertEqual(pack255UShort(505), b'\xff\xfc')
- self.assertEqual(pack255UShort(506), b'\xfe\x00')
- self.assertEqual(pack255UShort(762), b'\xfd\x02\xfa')
-
- self.assertRaisesRegex(
- ttLib.TTLibError,
- "255UInt16 format requires 0 <= integer <= 65535",
- pack255UShort, -1)
-
- self.assertRaisesRegex(
- ttLib.TTLibError,
- "255UInt16 format requires 0 <= integer <= 65535",
- pack255UShort, 0xFFFF+1)
+ def test_unpack255UShort(self):
+ self.assertEqual(unpack255UShort(bytechr(252))[0], 252)
+ # some numbers (e.g. 506) can have multiple encodings
+ self.assertEqual(unpack255UShort(struct.pack(b"BB", 254, 0))[0], 506)
+ self.assertEqual(unpack255UShort(struct.pack(b"BB", 255, 253))[0], 506)
+ self.assertEqual(unpack255UShort(struct.pack(b"BBB", 253, 1, 250))[0], 506)
+
+ self.assertRaisesRegex(
+ ttLib.TTLibError,
+ "not enough data to unpack 255UInt16",
+ unpack255UShort,
+ struct.pack(b"BB", 253, 0),
+ )
+
+ self.assertRaisesRegex(
+ ttLib.TTLibError,
+ "not enough data to unpack 255UInt16",
+ unpack255UShort,
+ struct.pack(b"B", 254),
+ )
+
+ self.assertRaisesRegex(
+ ttLib.TTLibError,
+ "not enough data to unpack 255UInt16",
+ unpack255UShort,
+ struct.pack(b"B", 255),
+ )
+
+ def test_pack255UShort(self):
+ self.assertEqual(pack255UShort(252), b"\xfc")
+ self.assertEqual(pack255UShort(505), b"\xff\xfc")
+ self.assertEqual(pack255UShort(506), b"\xfe\x00")
+ self.assertEqual(pack255UShort(762), b"\xfd\x02\xfa")
+
+ self.assertRaisesRegex(
+ ttLib.TTLibError,
+ "255UInt16 format requires 0 <= integer <= 65535",
+ pack255UShort,
+ -1,
+ )
+
+ self.assertRaisesRegex(
+ ttLib.TTLibError,
+ "255UInt16 format requires 0 <= integer <= 65535",
+ pack255UShort,
+ 0xFFFF + 1,
+ )
+
+
+class VarCompositeTest(unittest.TestCase):
+ def test_var_composite(self):
+ input_path = os.path.join(data_dir, "varc-ac00-ac01.ttf")
+ ttf = ttLib.TTFont(input_path)
+ ttf.flavor = "woff2"
+ out = BytesIO()
+ ttf.save(out)
+
+ ttf = ttLib.TTFont(out)
+ ttf.flavor = None
+ out = BytesIO()
+ ttf.save(out)
+
+
+class CubicTest(unittest.TestCase):
+ def test_cubic(self):
+ input_path = os.path.join(
+ data_dir, "..", "tables", "data", "NotoSans-VF-cubic.subset.ttf"
+ )
+ ttf = ttLib.TTFont(input_path)
+ pen1 = RecordingPen()
+ ttf.getGlyphSet()["a"].draw(pen1)
+ ttf.flavor = "woff2"
+ out = BytesIO()
+ ttf.save(out)
+
+ ttf = ttLib.TTFont(out)
+ ttf.flavor = None
+ pen2 = RecordingPen()
+ ttf.getGlyphSet()["a"].draw(pen2)
+ out = BytesIO()
+ ttf.save(out)
+
+ assert pen1.value == pen2.value
if __name__ == "__main__":
- import sys
- sys.exit(unittest.main())
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/Tests/ttx/data/TestOTF.ttx b/Tests/ttx/data/TestOTF.ttx
index 96f18449..b034a758 100644
--- a/Tests/ttx/data/TestOTF.ttx
+++ b/Tests/ttx/data/TestOTF.ttx
@@ -148,7 +148,7 @@
https://github.com/fonttools/fonttools
</namerecord>
<namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True">
- https://github.com/fonttools/fonttools/blob/master/LICENSE
+ https://github.com/fonttools/fonttools/blob/main/LICENSE
</namerecord>
<namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True">
Test TTF
@@ -190,7 +190,7 @@
https://github.com/fonttools/fonttools
</namerecord>
<namerecord nameID="14" platformID="3" platEncID="1" langID="0x409">
- https://github.com/fonttools/fonttools/blob/master/LICENSE
+ https://github.com/fonttools/fonttools/blob/main/LICENSE
</namerecord>
</name>
diff --git a/Tests/ttx/data/TestTTF.ttx b/Tests/ttx/data/TestTTF.ttx
index 66caf6ce..6ecca985 100644
--- a/Tests/ttx/data/TestTTF.ttx
+++ b/Tests/ttx/data/TestTTF.ttx
@@ -468,7 +468,7 @@
https://github.com/fonttools/fonttools
</namerecord>
<namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True">
- https://github.com/fonttools/fonttools/blob/master/LICENSE
+ https://github.com/fonttools/fonttools/blob/main/LICENSE
</namerecord>
<namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True">
Test TTF
@@ -510,7 +510,7 @@
https://github.com/fonttools/fonttools
</namerecord>
<namerecord nameID="14" platformID="3" platEncID="1" langID="0x409">
- https://github.com/fonttools/fonttools/blob/master/LICENSE
+ https://github.com/fonttools/fonttools/blob/main/LICENSE
</namerecord>
</name>
diff --git a/Tests/ttx/data/roundtrip_DSIG_split_at_XML_parse_buffer_size.ttx b/Tests/ttx/data/roundtrip_DSIG_split_at_XML_parse_buffer_size.ttx
new file mode 100644
index 00000000..cbab6111
--- /dev/null
+++ b/Tests/ttx/data/roundtrip_DSIG_split_at_XML_parse_buffer_size.ttx
@@ -0,0 +1,224 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.32">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name=".null"/>
+ <GlyphID id="2" name="A"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="1.0"/>
+ <checkSumAdjustment value="0x5c9585c9"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1024"/>
+ <created value="Fri May 6 19:55:13 2022"/>
+ <modified value="Fri May 6 19:55:13 2022"/>
+ <xMin value="0"/>
+ <yMin value="0"/>
+ <xMax value="0"/>
+ <yMax value="0"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="3"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="824"/>
+ <descent value="200"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="600"/>
+ <minLeftSideBearing value="0"/>
+ <minRightSideBearing value="0"/>
+ <xMaxExtent value="0"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="1"/>
+ </hhea>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="3"/>
+ <maxPoints value="0"/>
+ <maxContours value="0"/>
+ <maxCompositePoints value="0"/>
+ <maxCompositeContours value="0"/>
+ <maxZones value="2"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="0"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="0"/>
+ <maxSizeOfInstructions value="0"/>
+ <maxComponentElements value="0"/>
+ <maxComponentDepth value="0"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="3"/>
+ <xAvgCharWidth value="600"/>
+ <usWeightClass value="400"/>
+ <usWidthClass value="5"/>
+ <fsType value="00000000 00000100"/>
+ <ySubscriptXSize value="0"/>
+ <ySubscriptYSize value="0"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="0"/>
+ <ySuperscriptXSize value="0"/>
+ <ySuperscriptYSize value="0"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="0"/>
+ <yStrikeoutSize value="0"/>
+ <yStrikeoutPosition value="0"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="0"/>
+ <bSerifStyle value="0"/>
+ <bWeight value="0"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="????"/>
+ <fsSelection value="00000000 00000000"/>
+ <usFirstCharIndex value="65"/>
+ <usLastCharIndex value="65"/>
+ <sTypoAscender value="0"/>
+ <sTypoDescender value="0"/>
+ <sTypoLineGap value="0"/>
+ <usWinAscent value="0"/>
+ <usWinDescent value="0"/>
+ <ulCodePageRange1 value="00000000 00000000 00000000 00000000"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="0"/>
+ <sCapHeight value="0"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <hmtx>
+ <mtx name=".notdef" width="600" lsb="0"/>
+ <mtx name=".null" width="600" lsb="0"/>
+ <mtx name="A" width="600" lsb="0"/>
+ </hmtx>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x41" name="A"/><!-- LATIN CAPITAL LETTER A -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x41" name="A"/><!-- LATIN CAPITAL LETTER A -->
+ </cmap_format_4>
+ </cmap>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef"/><!-- contains no outline data -->
+
+ <TTGlyph name=".null"/><!-- contains no outline data -->
+
+ <TTGlyph name="A"/><!-- contains no outline data -->
+
+ </glyf>
+
+ <name>
+ <namerecord nameID="1" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ HelloTestFont
+ </namerecord>
+ <namerecord nameID="2" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ TotallyNormal
+ </namerecord>
+ <namerecord nameID="6" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ HelloTestFont-TotallyNormal
+ </namerecord>
+ <namerecord nameID="1" platformID="1" platEncID="0" langID="0x4" unicode="True">
+ HalloTestFont
+ </namerecord>
+ <namerecord nameID="2" platformID="1" platEncID="0" langID="0x4" unicode="True">
+ TotaalNormaal
+ </namerecord>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ HelloTestFont
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ TotallyNormal
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ HelloTestFont-TotallyNormal
+ </namerecord>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x413">
+ HalloTestFont
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x413">
+ TotaalNormaal
+ </namerecord>
+ </name>
+
+ <post>
+ <formatType value="2.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="0"/>
+ <underlineThickness value="0"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ <psNames>
+ <!-- This file uses unique glyph names based on the information
+ found in the 'post' table. Since these names might not be unique,
+ we have to invent artificial names in case of clashes. In order to
+ be able to retain the original information, we need a name to
+ ps name mapping for those cases where they differ. That's what
+ you see below.
+ -->
+ </psNames>
+ <extraNames>
+ <!-- following are the name that are not taken from the standard Mac glyph order -->
+ </extraNames>
+ </post>
+
+ <DSIG>
+ <!-- note that the Digital Signature will be invalid after recompilation! -->
+ <tableHeader flag="0x1" numSigs="1" version="1"/>
+ <SignatureRecord format="1">
+-----BEGIN PKCS7-----
+0000000100000000
+-----END PKCS7-----
+ </SignatureRecord>
+ </DSIG>
+
+</ttFont>
diff --git a/Tests/ttx/ttx_test.py b/Tests/ttx/ttx_test.py
index ef8d8789..be009b8a 100644
--- a/Tests/ttx/ttx_test.py
+++ b/Tests/ttx/ttx_test.py
@@ -1,14 +1,18 @@
from fontTools.misc.testTools import parseXML
from fontTools.misc.timeTools import timestampSinceEpoch
from fontTools.ttLib import TTFont, TTLibError
+from fontTools.ttLib.tables.DefaultTable import DefaultTable
from fontTools import ttx
+import base64
import getopt
import logging
import os
import shutil
+import subprocess
import sys
import tempfile
import unittest
+from pathlib import Path
import pytest
@@ -26,7 +30,6 @@ except ImportError:
class TTXTest(unittest.TestCase):
-
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
@@ -69,9 +72,7 @@ class TTXTest(unittest.TestCase):
def test_parseOptions_no_args(self):
with self.assertRaises(getopt.GetoptError) as cm:
ttx.parseOptions([])
- self.assertTrue(
- "Must specify at least one input file" in str(cm.exception)
- )
+ self.assertTrue("Must specify at least one input file" in str(cm.exception))
def test_parseOptions_invalid_path(self):
file_path = "invalid_font_path"
@@ -151,9 +152,7 @@ class TTXTest(unittest.TestCase):
jobs[i][1:],
(
os.path.join(self.tempdir, file_names[i]),
- os.path.join(
- self.tempdir, file_names[i].split(".")[0] + ".ttx"
- ),
+ os.path.join(self.tempdir, file_names[i].split(".")[0] + ".ttx"),
),
)
@@ -436,6 +435,7 @@ def test_options_b():
tto = ttx.Options([("-b", "")], 1)
assert tto.recalcBBoxes is False
+
def test_options_e():
tto = ttx.Options([("-e", "")], 1)
assert tto.ignoreDecompileErrors is False
@@ -966,9 +966,7 @@ def test_main_system_exit(tmpdir, monkeypatch):
inpath = os.path.join("Tests", "ttx", "data", "TestTTF.ttx")
outpath = tmpdir.join("TestTTF.ttf")
args = ["-o", str(outpath), inpath]
- monkeypatch.setattr(
- ttx, "process", (lambda x, y: raise_exception(SystemExit))
- )
+ monkeypatch.setattr(ttx, "process", (lambda x, y: raise_exception(SystemExit)))
ttx.main(args)
@@ -1002,6 +1000,55 @@ def test_main_base_exception(tmpdir, monkeypatch, caplog):
assert "Unhandled exception has occurred" in caplog.text
+def test_main_ttf_dump_stdin_to_stdout(tmp_path):
+ inpath = Path("Tests").joinpath("ttx", "data", "TestTTF.ttf")
+ outpath = tmp_path / "TestTTF.ttx"
+ args = [sys.executable, "-m", "fontTools.ttx", "-q", "-o", "-", "-"]
+ with inpath.open("rb") as infile, outpath.open("w", encoding="utf-8") as outfile:
+ subprocess.run(args, check=True, stdin=infile, stdout=outfile)
+ assert outpath.is_file()
+
+
+def test_main_ttx_compile_stdin_to_stdout(tmp_path):
+ inpath = Path("Tests").joinpath("ttx", "data", "TestTTF.ttx")
+ outpath = tmp_path / "TestTTF.ttf"
+ args = [sys.executable, "-m", "fontTools.ttx", "-q", "-o", "-", "-"]
+ with inpath.open("r", encoding="utf-8") as infile, outpath.open("wb") as outfile:
+ subprocess.run(args, check=True, stdin=infile, stdout=outfile)
+ assert outpath.is_file()
+
+
+def test_roundtrip_DSIG_split_at_XML_parse_buffer_size(tmp_path):
+ inpath = Path("Tests").joinpath(
+ "ttx", "data", "roundtrip_DSIG_split_at_XML_parse_buffer_size.ttx"
+ )
+ font = TTFont()
+ font.importXML(inpath)
+ font["DMMY"] = DefaultTable(tag="DMMY")
+ # just enough dummy bytes to hit the cut off point whereby DSIG data gets
+ # split into two chunks and triggers the bug from
+ # https://github.com/fonttools/fonttools/issues/2614
+ font["DMMY"].data = b"\x01\x02\x03\x04" * 2438
+ font.saveXML(tmp_path / "roundtrip_DSIG_split_at_XML_parse_buffer_size.ttx")
+
+ outpath = tmp_path / "font.ttf"
+ args = [
+ sys.executable,
+ "-m",
+ "fontTools.ttx",
+ "-q",
+ "-o",
+ str(outpath),
+ str(tmp_path / "roundtrip_DSIG_split_at_XML_parse_buffer_size.ttx"),
+ ]
+ subprocess.run(args, check=True)
+
+ assert outpath.is_file()
+ assert TTFont(outpath)["DSIG"].signatureRecords[0].pkcs7 == base64.b64decode(
+ b"0000000100000000"
+ )
+
+
# ---------------------------
# support functions for tests
# ---------------------------
diff --git a/Tests/ufoLib/GLIF1_test.py b/Tests/ufoLib/GLIF1_test.py
index 85fcc71f..c4991ca3 100644
--- a/Tests/ufoLib/GLIF1_test.py
+++ b/Tests/ufoLib/GLIF1_test.py
@@ -1,5 +1,9 @@
import unittest
-from fontTools.ufoLib.glifLib import GlifLibError, readGlyphFromString, writeGlyphToString
+from fontTools.ufoLib.glifLib import (
+ GlifLibError,
+ readGlyphFromString,
+ writeGlyphToString,
+)
from .testSupport import Glyph, stripText
from itertools import islice
@@ -7,256 +11,262 @@ from itertools import islice
# Test Cases
# ----------
-class TestGLIF1(unittest.TestCase):
- def assertEqual(self, first, second, msg=None):
- if isinstance(first, str):
- first = stripText(first)
- if isinstance(second, str):
- second = stripText(second)
- return super().assertEqual(first, second, msg=msg)
-
- def pyToGLIF(self, py):
- py = stripText(py)
- glyph = Glyph()
- exec(py, {"glyph" : glyph, "pointPen" : glyph})
- glif = writeGlyphToString(glyph.name, glyphObject=glyph, drawPointsFunc=glyph.drawPoints, formatVersion=1, validate=True)
- # discard the first line containing the xml declaration
- return "\n".join(islice(glif.splitlines(), 1, None))
-
- def glifToPy(self, glif):
- glif = stripText(glif)
- glif = "<?xml version=\"1.0\"?>\n" + glif
- glyph = Glyph()
- readGlyphFromString(glif, glyphObject=glyph, pointPen=glyph, validate=True)
- return glyph.py()
-
- def testTopElement(self):
- # not glyph
- glif = """
+class TestGLIF1(unittest.TestCase):
+ def assertEqual(self, first, second, msg=None):
+ if isinstance(first, str):
+ first = stripText(first)
+ if isinstance(second, str):
+ second = stripText(second)
+ return super().assertEqual(first, second, msg=msg)
+
+ def pyToGLIF(self, py):
+ py = stripText(py)
+ glyph = Glyph()
+ exec(py, {"glyph": glyph, "pointPen": glyph})
+ glif = writeGlyphToString(
+ glyph.name,
+ glyphObject=glyph,
+ drawPointsFunc=glyph.drawPoints,
+ formatVersion=1,
+ validate=True,
+ )
+ # discard the first line containing the xml declaration
+ return "\n".join(islice(glif.splitlines(), 1, None))
+
+ def glifToPy(self, glif):
+ glif = stripText(glif)
+ glif = '<?xml version="1.0"?>\n' + glif
+ glyph = Glyph()
+ readGlyphFromString(glif, glyphObject=glyph, pointPen=glyph, validate=True)
+ return glyph.py()
+
+ def testTopElement(self):
+ # not glyph
+ glif = """
<notglyph name="a" format="1">
<outline>
</outline>
</notglyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testName_legal(self):
- # legal
- glif = """
+ def testName_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="1">
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testName_empty(self):
- # empty
- glif = """
+ def testName_empty(self):
+ # empty
+ glif = """
<glyph name="" format="1">
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = ""
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testName_not_a_string(self):
- # not a string
- py = """
+ def testName_not_a_string(self):
+ # not a string
+ py = """
glyph.name = 1
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
- def testFormat_legal(self):
- # legal
- glif = """
+ def testFormat_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="1">
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testFormat_wrong_number(self):
- # wrong number
- glif = """
+ def testFormat_wrong_number(self):
+ # wrong number
+ glif = """
<glyph name="a" format="-1">
<outline>
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testFormat_not_an_int(self):
- # not an int
- glif = """
+ def testFormat_not_an_int(self):
+ # not an int
+ glif = """
<glyph name="a" format="A">
<outline>
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testBogusGlyphStructure_unknown_element(self):
- # unknown element
- glif = """
+ def testBogusGlyphStructure_unknown_element(self):
+ # unknown element
+ glif = """
<glyph name="a" format="1">
<unknown />
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testBogusGlyphStructure_content(self):
- # content
- glif = """
+ def testBogusGlyphStructure_content(self):
+ # content
+ glif = """
<glyph name="a" format="1">
Hello World.
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testAdvance_legal_width_and_height(self):
- # legal: width and height
- glif = """
+ def testAdvance_legal_width_and_height(self):
+ # legal: width and height
+ glif = """
<glyph name="a" format="1">
<advance height="200" width="100"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.width = 100
glyph.height = 200
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testAdvance_legal_width_and_height_floats(self):
- # legal: width and height floats
- glif = """
+ def testAdvance_legal_width_and_height_floats(self):
+ # legal: width and height floats
+ glif = """
<glyph name="a" format="1">
<advance height="200.1" width="100.1"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.width = 100.1
glyph.height = 200.1
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testAdvance_legal_width(self):
- # legal: width
- glif = """
+ def testAdvance_legal_width(self):
+ # legal: width
+ glif = """
<glyph name="a" format="1">
<advance width="100"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.width = 100
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testAdvance_legal_height(self):
- # legal: height
- glif = """
+ def testAdvance_legal_height(self):
+ # legal: height
+ glif = """
<glyph name="a" format="1">
<advance height="200"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.height = 200
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testAdvance_illegal_width(self):
- # illegal: not a number
- glif = """
+ def testAdvance_illegal_width(self):
+ # illegal: not a number
+ glif = """
<glyph name="a" format="1">
<advance width="a"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.width = "a"
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testAdvance_illegal_height(self):
- glif = """
+ def testAdvance_illegal_height(self):
+ glif = """
<glyph name="a" format="1">
<advance height="a"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.height = "a"
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testUnicodes_legal(self):
- # legal
- glif = """
+ def testUnicodes_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="1">
<unicode hex="0061"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.unicodes = [97]
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testUnicodes_legal_multiple(self):
- glif = """
+ def testUnicodes_legal_multiple(self):
+ glif = """
<glyph name="a" format="1">
<unicode hex="0062"/>
<unicode hex="0063"/>
@@ -265,33 +275,33 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.unicodes = [98, 99, 97]
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testUnicodes_illegal(self):
- # illegal
- glif = """
+ def testUnicodes_illegal(self):
+ # illegal
+ glif = """
<glyph name="a" format="1">
<unicode hex="1.1"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "zzzzzz"
glyph.unicodes = ["1.1"]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testNote(self):
- glif = """
+ def testNote(self):
+ glif = """
<glyph name="a" format="1">
<note>
\U0001F4A9
@@ -300,17 +310,17 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.note = "💩"
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testLib_legal(self):
- glif = """
+ def testLib_legal(self):
+ glif = """
<glyph name="a" format="1">
<outline>
</outline>
@@ -338,150 +348,150 @@ class TestGLIF1(unittest.TestCase):
</lib>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.lib = {"dict" : {"hello" : "world"}, "float" : 2.5, "int" : 1, "list" : ["a", "b", 1, 2.5], "string" : "a"}
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testOutline_unknown_element(self):
- # unknown element
- glif = """
+ def testOutline_unknown_element(self):
+ # unknown element
+ glif = """
<glyph name="a" format="1">
<outline>
<unknown/>
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testOutline_content(self):
- # content
- glif = """
+ def testOutline_content(self):
+ # content
+ glif = """
<glyph name="a" format="1">
<outline>
hello
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testComponent_legal(self):
- # legal
- glif = """
+ def testComponent_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, 1, 4)])
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testComponent_illegal_no_base(self):
- # no base
- glif = """
+ def testComponent_illegal_no_base(self):
+ # no base
+ glif = """
<glyph name="a" format="1">
<outline>
<component xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testComponent_bogus_transformation(self):
- # bogus values in transformation
- glif = """
+ def testComponent_bogus_transformation(self):
+ # bogus values in transformation
+ glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="a" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", ("a", 3, 6, 5, 1, 4)])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="a" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, "a", 6, 5, 1, 4)])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="a" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, "a", 5, 1, 4)])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="a" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, "a", 1, 4)])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="a" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, "a", 4)])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="1">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="a"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, 1, "a")])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testContour_legal_one_contour(self):
- # legal: one contour
- glif = """
+ def testContour_legal_one_contour(self):
+ # legal: one contour
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -489,19 +499,19 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testContour_legal_two_contours(self):
- # legal: two contours
- glif = """
+ def testContour_legal_two_contours(self):
+ # legal: two contours
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -515,7 +525,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
@@ -526,14 +536,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(10, 20)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testContour_illegal_unkonwn_element(self):
- # unknown element
- glif = """
+ def testContour_illegal_unkonwn_element(self):
+ # unknown element
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -542,11 +552,11 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointCoordinates_legal_int(self):
- # legal: int
- glif = """
+ def testPointCoordinates_legal_int(self):
+ # legal: int
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -556,21 +566,21 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"name" : "this is here so that the contour isn't seen as an anchor", "segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointCoordinates_legal_float(self):
- # legal: float
- glif = """
+ def testPointCoordinates_legal_float(self):
+ # legal: float
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -580,21 +590,21 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1.1, -2.2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"name" : "this is here so that the contour isn't seen as an anchor", "segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointCoordinates_illegal_x(self):
- # illegal: string
- glif = """
+ def testPointCoordinates_illegal_x(self):
+ # illegal: string
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -604,19 +614,19 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[("a", 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"name" : "this is here so that the contour isn't seen as an anchor", "segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointCoordinates_illegal_y(self):
- # legal: int
- glif = """
+ def testPointCoordinates_illegal_y(self):
+ # legal: int
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -626,19 +636,19 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, "a")], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"name" : "this is here so that the contour isn't seen as an anchor", "segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointTypeMove_legal(self):
- # legal
- glif = """
+ def testPointTypeMove_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -648,21 +658,21 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeMove_legal_smooth(self):
- # legal: smooth=True
- glif = """
+ def testPointTypeMove_legal_smooth(self):
+ # legal: smooth=True
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -672,21 +682,21 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : True})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeMove_illegal_not_at_start(self):
- # illegal: not at start
- glif = """
+ def testPointTypeMove_illegal_not_at_start(self):
+ # illegal: not at start
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -696,19 +706,19 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointTypeLine_legal(self):
- # legal
- glif = """
+ def testPointTypeLine_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -718,21 +728,21 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeLine_legal_start_of_contour(self):
- # legal: start of contour
- glif = """
+ def testPointTypeLine_legal_start_of_contour(self):
+ # legal: start of contour
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -742,21 +752,21 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeLine_legal_smooth(self):
- # legal: smooth=True
- glif = """
+ def testPointTypeLine_legal_smooth(self):
+ # legal: smooth=True
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -766,21 +776,21 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : True})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_legal(self):
- # legal
- glif = """
+ def testPointTypeCurve_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -792,7 +802,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -801,14 +811,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_legal_start_of_contour(self):
- # legal: start of contour
- glif = """
+ def testPointTypeCurve_legal_start_of_contour(self):
+ # legal: start of contour
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -819,7 +829,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
@@ -827,14 +837,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_legal_smooth(self):
- # legal: smooth=True
- glif = """
+ def testPointTypeCurve_legal_smooth(self):
+ # legal: smooth=True
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -846,7 +856,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -855,14 +865,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : True})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_legal_no_off_curves(self):
- # legal: no off-curves
- glif = """
+ def testPointTypeCurve_legal_no_off_curves(self):
+ # legal: no off-curves
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -872,21 +882,21 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_legal_1_off_curve(self):
- # legal: 1 off-curve
- glif = """
+ def testPointTypeCurve_legal_1_off_curve(self):
+ # legal: 1 off-curve
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -897,7 +907,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -905,14 +915,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_illegal_3_off_curves(self):
- # illegal: 3 off-curves
- glif = """
+ def testPointTypeCurve_illegal_3_off_curves(self):
+ # illegal: 3 off-curves
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -925,7 +935,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -935,12 +945,12 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointQCurve_legal(self):
- # legal
- glif = """
+ def testPointQCurve_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -952,7 +962,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -961,14 +971,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointQCurve_legal_start_of_contour(self):
- # legal: start of contour
- glif = """
+ def testPointQCurve_legal_start_of_contour(self):
+ # legal: start of contour
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -979,7 +989,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
@@ -987,14 +997,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointQCurve_legal_smooth(self):
- # legal: smooth=True
- glif = """
+ def testPointQCurve_legal_smooth(self):
+ # legal: smooth=True
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1006,7 +1016,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1015,14 +1025,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : True})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointQCurve_legal_no_off_curves(self):
- # legal: no off-curves
- glif = """
+ def testPointQCurve_legal_no_off_curves(self):
+ # legal: no off-curves
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1032,21 +1042,21 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointQCurve_legal_one_off_curve(self):
- # legal: 1 off-curve
- glif = """
+ def testPointQCurve_legal_one_off_curve(self):
+ # legal: 1 off-curve
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1057,7 +1067,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1065,14 +1075,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointQCurve_legal_3_off_curves(self):
- # legal: 3 off-curves
- glif = """
+ def testPointQCurve_legal_3_off_curves(self):
+ # legal: 3 off-curves
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1085,7 +1095,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1095,14 +1105,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testSpecialCaseQCurve(self):
- # contour with no on curve
- glif = """
+ def testSpecialCaseQCurve(self):
+ # contour with no on curve
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1114,7 +1124,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"smooth" : False})
@@ -1123,14 +1133,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(100, 0)], **{"smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeOffCurve_legal(self):
- # legal
- glif = """
+ def testPointTypeOffCurve_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1142,7 +1152,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1151,14 +1161,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeOffCurve_legal_start_of_contour(self):
- # legal: start of contour
- glif = """
+ def testPointTypeOffCurve_legal_start_of_contour(self):
+ # legal: start of contour
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1169,7 +1179,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
@@ -1177,14 +1187,14 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeOffCurve_illegal_before_move(self):
- # before move
- glif = """
+ def testPointTypeOffCurve_illegal_before_move(self):
+ # before move
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1194,19 +1204,19 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointTypeOffCurve_illegal_before_line(self):
- # before line
- glif = """
+ def testPointTypeOffCurve_illegal_before_line(self):
+ # before line
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1216,19 +1226,19 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointTypeOffCurve_illegal_smooth(self):
- # smooth=True
- glif = """
+ def testPointTypeOffCurve_illegal_smooth(self):
+ # smooth=True
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1238,20 +1248,20 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : True})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testSinglePoint_legal_without_name(self):
- # legal
- # glif format 1 single point without a name was not an anchor
- glif = """
+ def testSinglePoint_legal_without_name(self):
+ # legal
+ # glif format 1 single point without a name was not an anchor
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1260,19 +1270,19 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testAnchor_legal_with_name(self):
- glif = """
+ def testAnchor_legal_with_name(self):
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1281,18 +1291,18 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.anchors = [{"name" : "test", "x" : 1, "y" : 2}]
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testOpenContourLooseOffCurves_legal(self):
- # a piece of software was writing this kind of structure
- glif = """
+ def testOpenContourLooseOffCurves_legal(self):
+ # a piece of software was writing this kind of structure
+ glif = """
<glyph name="a" format="1">
<outline>
<contour>
@@ -1305,7 +1315,7 @@ class TestGLIF1(unittest.TestCase):
</outline>
</glyph>
"""
- expectedPy = """
+ expectedPy = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
@@ -1314,11 +1324,11 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- resultPy = self.glifToPy(glif)
- self.assertEqual(resultPy, expectedPy)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(resultPy, expectedPy)
- def testOpenContourLooseOffCurves_illegal(self):
- py = """
+ def testOpenContourLooseOffCurves_illegal(self):
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
@@ -1328,4 +1338,4 @@ class TestGLIF1(unittest.TestCase):
pointPen.addPoint(*[(1, 2)], **{"smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
diff --git a/Tests/ufoLib/GLIF2_test.py b/Tests/ufoLib/GLIF2_test.py
index ab9495db..d8c96d65 100644
--- a/Tests/ufoLib/GLIF2_test.py
+++ b/Tests/ufoLib/GLIF2_test.py
@@ -1,5 +1,9 @@
import unittest
-from fontTools.ufoLib.glifLib import GlifLibError, readGlyphFromString, writeGlyphToString
+from fontTools.ufoLib.glifLib import (
+ GlifLibError,
+ readGlyphFromString,
+ writeGlyphToString,
+)
from .testSupport import Glyph, stripText
from itertools import islice
@@ -7,256 +11,262 @@ from itertools import islice
# Test Cases
# ----------
-class TestGLIF2(unittest.TestCase):
- def assertEqual(self, first, second, msg=None):
- if isinstance(first, str):
- first = stripText(first)
- if isinstance(second, str):
- second = stripText(second)
- return super().assertEqual(first, second, msg=msg)
-
- def pyToGLIF(self, py):
- py = stripText(py)
- glyph = Glyph()
- exec(py, {"glyph" : glyph, "pointPen" : glyph})
- glif = writeGlyphToString(glyph.name, glyphObject=glyph, drawPointsFunc=glyph.drawPoints, formatVersion=2, validate=True)
- # discard the first line containing the xml declaration
- return "\n".join(islice(glif.splitlines(), 1, None))
-
- def glifToPy(self, glif):
- glif = stripText(glif)
- glif = "<?xml version=\"1.0\"?>\n" + glif
- glyph = Glyph()
- readGlyphFromString(glif, glyphObject=glyph, pointPen=glyph, validate=True)
- return glyph.py()
-
- def testTopElement(self):
- # not glyph
- glif = """
+class TestGLIF2(unittest.TestCase):
+ def assertEqual(self, first, second, msg=None):
+ if isinstance(first, str):
+ first = stripText(first)
+ if isinstance(second, str):
+ second = stripText(second)
+ return super().assertEqual(first, second, msg=msg)
+
+ def pyToGLIF(self, py):
+ py = stripText(py)
+ glyph = Glyph()
+ exec(py, {"glyph": glyph, "pointPen": glyph})
+ glif = writeGlyphToString(
+ glyph.name,
+ glyphObject=glyph,
+ drawPointsFunc=glyph.drawPoints,
+ formatVersion=2,
+ validate=True,
+ )
+ # discard the first line containing the xml declaration
+ return "\n".join(islice(glif.splitlines(), 1, None))
+
+ def glifToPy(self, glif):
+ glif = stripText(glif)
+ glif = '<?xml version="1.0"?>\n' + glif
+ glyph = Glyph()
+ readGlyphFromString(glif, glyphObject=glyph, pointPen=glyph, validate=True)
+ return glyph.py()
+
+ def testTopElement(self):
+ # not glyph
+ glif = """
<notglyph name="a" format="2">
<outline>
</outline>
</notglyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testName_legal(self):
- # legal
- glif = """
+ def testName_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testName_empty(self):
- # empty
- glif = """
+ def testName_empty(self):
+ # empty
+ glif = """
<glyph name="" format="2">
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = ""
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testName_not_a_string(self):
- # not a string
- py = """
+ def testName_not_a_string(self):
+ # not a string
+ py = """
glyph.name = 1
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
- def testFormat_legal(self):
- # legal
- glif = """
+ def testFormat_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testFormat_illegal_wrong_number(self):
- # wrong number
- glif = """
+ def testFormat_illegal_wrong_number(self):
+ # wrong number
+ glif = """
<glyph name="a" format="-1">
<outline>
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testFormat_illegal_not_int(self):
- # not an int
- glif = """
+ def testFormat_illegal_not_int(self):
+ # not an int
+ glif = """
<glyph name="a" format="A">
<outline>
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testBogusGlyphStructure_unknown_element(self):
- # unknown element
- glif = """
+ def testBogusGlyphStructure_unknown_element(self):
+ # unknown element
+ glif = """
<glyph name="a" format="2">
<unknown />
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testBogusGlyphStructure_content(self):
- # content
- glif = """
+ def testBogusGlyphStructure_content(self):
+ # content
+ glif = """
<glyph name="a" format="2">
Hello World.
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testAdvance_legal_widht_and_height(self):
- # legal: width and height
- glif = """
+ def testAdvance_legal_widht_and_height(self):
+ # legal: width and height
+ glif = """
<glyph name="a" format="2">
<advance height="200" width="100"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.width = 100
glyph.height = 200
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testAdvance_legal_width_and_height_floats(self):
- # legal: width and height floats
- glif = """
+ def testAdvance_legal_width_and_height_floats(self):
+ # legal: width and height floats
+ glif = """
<glyph name="a" format="2">
<advance height="200.1" width="100.1"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.width = 100.1
glyph.height = 200.1
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testAdvance_legal_width(self):
- # legal: width
- glif = """
+ def testAdvance_legal_width(self):
+ # legal: width
+ glif = """
<glyph name="a" format="2">
<advance width="100"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.width = 100
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testAdvance_legal_height(self):
- # legal: height
- glif = """
+ def testAdvance_legal_height(self):
+ # legal: height
+ glif = """
<glyph name="a" format="2">
<advance height="200"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.height = 200
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testAdvance_illegal_width(self):
- # illegal: not a number
- glif = """
+ def testAdvance_illegal_width(self):
+ # illegal: not a number
+ glif = """
<glyph name="a" format="2">
<advance width="a"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.width = "a"
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testAdvance_illegal_height(self):
- glif = """
+ def testAdvance_illegal_height(self):
+ glif = """
<glyph name="a" format="2">
<advance height="a"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.height = "a"
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testUnicodes_legal(self):
- # legal
- glif = """
+ def testUnicodes_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<unicode hex="0061"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.unicodes = [97]
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testUnicodes_legal_multiple(self):
- glif = """
+ def testUnicodes_legal_multiple(self):
+ glif = """
<glyph name="a" format="2">
<unicode hex="0062"/>
<unicode hex="0063"/>
@@ -265,33 +275,33 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.unicodes = [98, 99, 97]
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testUnicodes_illegal(self):
- # illegal
- glif = """
+ def testUnicodes_illegal(self):
+ # illegal
+ glif = """
<glyph name="a" format="2">
<unicode hex="1.1"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "zzzzzz"
glyph.unicodes = ["1.1"]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testNote(self):
- glif = """
+ def testNote(self):
+ glif = """
<glyph name="a" format="2">
<note>
hëllö
@@ -300,17 +310,17 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.note = "hëllö"
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testLib(self):
- glif = """
+ def testLib(self):
+ glif = """
<glyph name="a" format="2">
<outline>
</outline>
@@ -338,18 +348,18 @@ class TestGLIF2(unittest.TestCase):
</lib>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.lib = {"dict" : {"hello" : "world"}, "float" : 2.5, "int" : 1, "list" : ["a", "b", 1, 2.5], "string" : "a"}
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testGuidelines_legal(self):
- # legal
- glif = """
+ def testGuidelines_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<guideline x="1"/>
<guideline y="1"/>
@@ -362,143 +372,143 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"x" : 1}, {"y" : 1}, {"angle" : 0, "x" : 1, "y" : 1}, {"angle" : 360, "x" : 1, "y" : 1}, {"angle" : 45.5, "x" : 1.1, "y" : 1.1}, {"name" : "a", "x" : 1}, {"color" : "1,1,1,1", "x" : 1}]
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testGuidelines_illegal_x(self):
- # x not an int or float
- glif = """
+ def testGuidelines_illegal_x(self):
+ # x not an int or float
+ glif = """
<glyph name="a" format="2">
<guideline x="a" y="1" angle="45"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : 45, "x" : "a", "y" : 1}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testGuidelines_illegal_y(self):
- # y not an int or float
- glif = """
+ def testGuidelines_illegal_y(self):
+ # y not an int or float
+ glif = """
<glyph name="a" format="2">
<guideline x="1" y="y" angle="45"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : 45, "x" : 1, "y" : "a"}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testGuidelines_illegal_angle(self):
- # angle not an int or float
- glif = """
+ def testGuidelines_illegal_angle(self):
+ # angle not an int or float
+ glif = """
<glyph name="a" format="2">
<guideline x="1" y="1" angle="a"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : "a", "x" : 1, "y" : 1}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testGuidelines_illegal_x_missing(self):
- # x missing
- glif = """
+ def testGuidelines_illegal_x_missing(self):
+ # x missing
+ glif = """
<glyph name="a" format="2">
<guideline y="1" angle="45"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : 45, "y" : 1}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testGuidelines_illegal_y_missing(self):
- # y missing
- glif = """
+ def testGuidelines_illegal_y_missing(self):
+ # y missing
+ glif = """
<glyph name="a" format="2">
<guideline x="1" angle="45"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : 45, "x" : 1}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testGuidelines_illegal_angle_missing(self):
- # angle missing
- glif = """
+ def testGuidelines_illegal_angle_missing(self):
+ # angle missing
+ glif = """
<glyph name="a" format="2">
<guideline x="1" y="1"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"x" : 1, "y" : 1}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testGuidelines_illegal_angle_out_of_range(self):
- # angle out of range
- glif = """
+ def testGuidelines_illegal_angle_out_of_range(self):
+ # angle out of range
+ glif = """
<glyph name="a" format="2">
<guideline x="1" y="1" angle="-1"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : -1, "x" : "1", "y" : 1}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="2">
<guideline x="1" y="1" angle="361"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : 361, "x" : "1", "y" : 1}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testAnchors_legal(self):
- # legal
- glif = """
+ def testAnchors_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<anchor x="1" y="2" name="test" color="1,0,0,1"/>
<anchor x="1" y="2"/>
@@ -506,363 +516,363 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.anchors = [{"color" : "1,0,0,1", "name" : "test", "x" : 1, "y" : 2}, {"x" : 1, "y" : 2}]
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testAnchors_illegal_x(self):
- # x not an int or float
- glif = """
+ def testAnchors_illegal_x(self):
+ # x not an int or float
+ glif = """
<glyph name="a" format="2">
<anchor x="a" y="1"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.anchors = [{"x" : "a", "y" : 1}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testAnchors_illegal_y(self):
- # y not an int or float
- glif = """
+ def testAnchors_illegal_y(self):
+ # y not an int or float
+ glif = """
<glyph name="a" format="2">
<anchor x="1" y="a"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.anchors = [{"x" : 1, "y" : "a"}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testAnchors_illegal_x_missing(self):
- # x missing
- glif = """
+ def testAnchors_illegal_x_missing(self):
+ # x missing
+ glif = """
<glyph name="a" format="2">
<anchor y="1"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.anchors = [{"y" : 1}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testAnchors_illegal_y_missing(self):
- # y missing
- glif = """
+ def testAnchors_illegal_y_missing(self):
+ # y missing
+ glif = """
<glyph name="a" format="2">
<anchor x="1"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.anchors = [{"x" : 1}]
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testImage_legal(self):
- # legal
- glif = """
+ def testImage_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4" color="1,1,1,1"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.image = {"color" : "1,1,1,1", "fileName" : "test.png", "xOffset" : 1, "xScale" : 2, "xyScale" : 3, "yOffset" : 4, "yScale" : 5, "yxScale" : 6}
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testImage_legal_no_color_or_transformation(self):
- # legal: no color or transformation
- glif = """
+ def testImage_legal_no_color_or_transformation(self):
+ # legal: no color or transformation
+ glif = """
<glyph name="a" format="2">
<image fileName="test.png"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 0, "xScale" : 1, "xyScale" : 0, "yOffset" : 0, "yScale" : 1, "yxScale" : 0}
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testImage_illegal_no_file_name(self):
- # no file name
- glif = """
+ def testImage_illegal_no_file_name(self):
+ # no file name
+ glif = """
<glyph name="a" format="2">
<image xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4" color="1,1,1,1"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.image = {"color" : "1,1,1,1", "xOffset" : 1, "xScale" : 2, "xyScale" : 3, "yOffset" : 4, "yScale" : 5, "yxScale" : 6}
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testImage_bogus_transformation(self):
- # bogus transformation
- glif = """
+ def testImage_bogus_transformation(self):
+ # bogus transformation
+ glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="a" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 1, "xScale" : "a", "xyScale" : 3, "yOffset" : 4, "yScale" : 5, "yxScale" : 6}
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="a" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 1, "xScale" : 2, "xyScale" : "a", "yOffset" : 4, "yScale" : 5, "yxScale" : 6}
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="3" yxScale="a" yScale="5" xOffset="1" yOffset="4"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 1, "xScale" : 2, "xyScale" : 3, "yOffset" : 4, "yScale" : 5, "yxScale" : "a"}
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="3" yxScale="6" yScale="a" xOffset="1" yOffset="4"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 1, "xScale" : 2, "xyScale" : 3, "yOffset" : 4, "yScale" : "a", "yxScale" : 6}
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="a" yOffset="4"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : "a", "xScale" : 2, "xyScale" : 3, "yOffset" : 4, "yScale" : 5, "yxScale" : 6}
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="a"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 1, "xScale" : 2, "xyScale" : 3, "yOffset" : "a", "yScale" : 5, "yxScale" : 6}
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testImage_bogus_color(self):
- # bogus color
- glif = """
+ def testImage_bogus_color(self):
+ # bogus color
+ glif = """
<glyph name="a" format="2">
<image fileName="test.png" color="1,1,1,x"/>
<outline>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.image = {"color" : "1,1,1,x"}
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testOutline_unknown_element(self):
- # unknown element
- glif = """
+ def testOutline_unknown_element(self):
+ # unknown element
+ glif = """
<glyph name="a" format="2">
<outline>
<unknown/>
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testOutline_content(self):
- # content
- glif = """
+ def testOutline_content(self):
+ # content
+ glif = """
<glyph name="a" format="2">
<outline>
hello
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testComponent_legal(self):
- # legal
- glif = """
+ def testComponent_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, 1, 4)])
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testComponent_illegal_no_base(self):
- # no base
- glif = """
+ def testComponent_illegal_no_base(self):
+ # no base
+ glif = """
<glyph name="a" format="2">
<outline>
<component xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testComponent_illegal_bogus_transformation(self):
- # bogus values in transformation
- glif = """
+ def testComponent_illegal_bogus_transformation(self):
+ # bogus values in transformation
+ glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="a" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", ("a", 3, 6, 5, 1, 4)])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="a" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, "a", 6, 5, 1, 4)])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="a" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, "a", 5, 1, 4)])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="a" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, "a", 1, 4)])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="a" yOffset="4"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, "a", 4)])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- glif = """
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="a"/>
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, 1, "a")])
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testContour_legal_one_contour(self):
- # legal: one contour
- glif = """
+ def testContour_legal_one_contour(self):
+ # legal: one contour
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -870,19 +880,19 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testContour_legal_two_contours(self):
- # legal: two contours
- glif = """
+ def testContour_legal_two_contours(self):
+ # legal: two contours
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -895,7 +905,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
@@ -905,14 +915,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(10, 20)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testContour_illegal_unkonwn_element(self):
- # unknown element
- glif = """
+ def testContour_illegal_unkonwn_element(self):
+ # unknown element
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -921,10 +931,10 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testContourIdentifier(self):
- glif = """
+ def testContourIdentifier(self):
+ glif = """
<glyph name="a" format="2">
<outline>
<contour identifier="foo">
@@ -932,19 +942,19 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath(**{"identifier" : "foo"})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointCoordinates_legal_int(self):
- # legal: int
- glif = """
+ def testPointCoordinates_legal_int(self):
+ # legal: int
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -953,20 +963,20 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointCoordinates_legal_float(self):
- # legal: float
- glif = """
+ def testPointCoordinates_legal_float(self):
+ # legal: float
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -975,20 +985,20 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1.1, -2.2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointCoordinates_illegal_x(self):
- # illegal: x as string
- glif = """
+ def testPointCoordinates_illegal_x(self):
+ # illegal: x as string
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -997,18 +1007,18 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[("a", 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointCoordinates_illegal_y(self):
- # illegal: y as string
- glif = """
+ def testPointCoordinates_illegal_y(self):
+ # illegal: y as string
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1017,18 +1027,18 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, "a")], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointTypeMove_legal(self):
- # legal
- glif = """
+ def testPointTypeMove_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1038,21 +1048,21 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeMove_legal_smooth(self):
- # legal: smooth=True
- glif = """
+ def testPointTypeMove_legal_smooth(self):
+ # legal: smooth=True
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1062,21 +1072,21 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : True})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeMove_illegal_not_at_start(self):
- # illegal: not at start
- glif = """
+ def testPointTypeMove_illegal_not_at_start(self):
+ # illegal: not at start
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1086,19 +1096,19 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointTypeLine_legal(self):
- # legal
- glif = """
+ def testPointTypeLine_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1108,21 +1118,21 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeLine_legal_start_of_contour(self):
- # legal: start of contour
- glif = """
+ def testPointTypeLine_legal_start_of_contour(self):
+ # legal: start of contour
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1132,21 +1142,21 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeLine_legal_smooth(self):
- # legal: smooth=True
- glif = """
+ def testPointTypeLine_legal_smooth(self):
+ # legal: smooth=True
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1156,21 +1166,21 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : True})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_legal(self):
- # legal
- glif = """
+ def testPointTypeCurve_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1182,7 +1192,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1191,14 +1201,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_legal_start_of_contour(self):
- # legal: start of contour
- glif = """
+ def testPointTypeCurve_legal_start_of_contour(self):
+ # legal: start of contour
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1209,7 +1219,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
@@ -1217,14 +1227,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_legal_smooth(self):
- # legal: smooth=True
- glif = """
+ def testPointTypeCurve_legal_smooth(self):
+ # legal: smooth=True
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1236,7 +1246,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1245,14 +1255,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : True})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_legal_no_off_curves(self):
- # legal: no off-curves
- glif = """
+ def testPointTypeCurve_legal_no_off_curves(self):
+ # legal: no off-curves
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1262,21 +1272,21 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_legal_1_off_curve(self):
- # legal: 1 off-curve
- glif = """
+ def testPointTypeCurve_legal_1_off_curve(self):
+ # legal: 1 off-curve
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1287,7 +1297,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1295,14 +1305,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeCurve_illegal_3_off_curves(self):
- # illegal: 3 off-curves
- glif = """
+ def testPointTypeCurve_illegal_3_off_curves(self):
+ # illegal: 3 off-curves
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1315,7 +1325,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1325,12 +1335,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointQCurve_legal(self):
- # legal
- glif = """
+ def testPointQCurve_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1342,7 +1352,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1351,14 +1361,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointQCurve_legal_start_of_contour(self):
- # legal: start of contour
- glif = """
+ def testPointQCurve_legal_start_of_contour(self):
+ # legal: start of contour
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1369,7 +1379,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
@@ -1377,14 +1387,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointQCurve_legal_smooth(self):
- # legal: smooth=True
- glif = """
+ def testPointQCurve_legal_smooth(self):
+ # legal: smooth=True
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1396,7 +1406,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1405,14 +1415,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : True})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointQCurve_legal_no_off_curves(self):
- # legal: no off-curves
- glif = """
+ def testPointQCurve_legal_no_off_curves(self):
+ # legal: no off-curves
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1422,21 +1432,21 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointQCurve_legal_one_off_curve(self):
- # legal: 1 off-curve
- glif = """
+ def testPointQCurve_legal_one_off_curve(self):
+ # legal: 1 off-curve
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1447,7 +1457,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1455,14 +1465,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointQCurve_legal_3_off_curves(self):
- # legal: 3 off-curves
- glif = """
+ def testPointQCurve_legal_3_off_curves(self):
+ # legal: 3 off-curves
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1475,7 +1485,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1485,14 +1495,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testSpecialCaseQCurve_legal_no_on_curve(self):
- # contour with no on curve
- glif = """
+ def testSpecialCaseQCurve_legal_no_on_curve(self):
+ # contour with no on curve
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1504,7 +1514,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"smooth" : False})
@@ -1513,14 +1523,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(100, 0)], **{"smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeOffCurve_legal(self):
- # legal
- glif = """
+ def testPointTypeOffCurve_legal(self):
+ # legal
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1532,7 +1542,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
@@ -1541,14 +1551,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeOffCurve_legal_start_of_contour(self):
- # legal: start of contour
- glif = """
+ def testPointTypeOffCurve_legal_start_of_contour(self):
+ # legal: start of contour
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1559,7 +1569,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
@@ -1567,14 +1577,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testPointTypeOffCurve_illegal_before_move(self):
- # before move
- glif = """
+ def testPointTypeOffCurve_illegal_before_move(self):
+ # before move
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1584,19 +1594,19 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointTypeOffCurve_illegal_before_line(self):
- # before line
- glif = """
+ def testPointTypeOffCurve_illegal_before_line(self):
+ # before line
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1606,19 +1616,19 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testPointTypeOffCurve_illegal_smooth(self):
- # smooth=True
- glif = """
+ def testPointTypeOffCurve_illegal_smooth(self):
+ # smooth=True
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1628,18 +1638,18 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : True})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testOpenContourLooseOffCurves(self):
- glif = """
+ def testOpenContourLooseOffCurves(self):
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1652,8 +1662,8 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- self.assertRaises(GlifLibError, self.glifToPy, glif)
- py = """
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
@@ -1663,10 +1673,10 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(1, 2)], **{"smooth" : False})
pointPen.endPath()
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
- def testPointIdentifier(self):
- glif = """
+ def testPointIdentifier(self):
+ glif = """
<glyph name="a" format="2">
<outline>
<contour>
@@ -1678,7 +1688,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"identifier" : "1", "segmentType" : "move", "smooth" : False})
@@ -1687,13 +1697,13 @@ class TestGLIF2(unittest.TestCase):
pointPen.addPoint(*[(1, -2)], **{"identifier" : "4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testIdentifierConflict_legal_no_conflict(self):
- glif = """
+ def testIdentifierConflict_legal_no_conflict(self):
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -1714,7 +1724,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -1730,14 +1740,14 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- resultGlif = self.pyToGLIF(py)
- resultPy = self.glifToPy(glif)
- self.assertEqual(glif, resultGlif)
- self.assertEqual(py, resultPy)
+ resultGlif = self.pyToGLIF(py)
+ resultPy = self.glifToPy(glif)
+ self.assertEqual(glif, resultGlif)
+ self.assertEqual(py, resultPy)
- def testIdentifierConflict_point_point(self):
- # point - point
- glif = """
+ def testIdentifierConflict_point_point(self):
+ # point - point
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -1758,7 +1768,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -1774,12 +1784,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_point_contour(self):
- # point - contour
- glif = """
+ def testIdentifierConflict_point_contour(self):
+ # point - contour
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -1800,7 +1810,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -1816,12 +1826,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_point_component(self):
- # point - component
- glif = """
+ def testIdentifierConflict_point_component(self):
+ # point - component
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -1842,7 +1852,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -1858,12 +1868,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_point_guideline(self):
- # point - guideline
- glif = """
+ def testIdentifierConflict_point_guideline(self):
+ # point - guideline
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -1884,7 +1894,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -1900,12 +1910,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_point_anchor(self):
- # point - anchor
- glif = """
+ def testIdentifierConflict_point_anchor(self):
+ # point - anchor
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -1926,7 +1936,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -1942,12 +1952,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_contour_contour(self):
- # contour - contour
- glif = """
+ def testIdentifierConflict_contour_contour(self):
+ # contour - contour
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -1968,7 +1978,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -1984,12 +1994,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_contour_component(self):
- # contour - component
- glif = """
+ def testIdentifierConflict_contour_component(self):
+ # contour - component
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -2010,7 +2020,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -2026,12 +2036,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "contour1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_contour_guideline(self):
- # contour - guideline
- glif = """
+ def testIdentifierConflict_contour_guideline(self):
+ # contour - guideline
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="contour1"/>
<guideline x="0" identifier="guideline2"/>
@@ -2052,7 +2062,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "contour1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -2068,12 +2078,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_contour_anchor(self):
- # contour - anchor
- glif = """
+ def testIdentifierConflict_contour_anchor(self):
+ # contour - anchor
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -2094,7 +2104,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -2110,12 +2120,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_component_component(self):
- # component - component
- glif = """
+ def testIdentifierConflict_component_component(self):
+ # component - component
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -2136,7 +2146,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -2152,12 +2162,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_component_guideline(self):
- # component - guideline
- glif = """
+ def testIdentifierConflict_component_guideline(self):
+ # component - guideline
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="component1"/>
<guideline x="0" identifier="guideline2"/>
@@ -2178,7 +2188,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "component1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -2194,12 +2204,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_component_anchor(self):
- # component - anchor
- glif = """
+ def testIdentifierConflict_component_anchor(self):
+ # component - anchor
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -2220,7 +2230,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -2236,12 +2246,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "anchor1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_guideline_guideline(self):
- # guideline - guideline
- glif = """
+ def testIdentifierConflict_guideline_guideline(self):
+ # guideline - guideline
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline1"/>
@@ -2262,7 +2272,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline1", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -2278,12 +2288,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_guideline_anchor(self):
- # guideline - anchor
- glif = """
+ def testIdentifierConflict_guideline_anchor(self):
+ # guideline - anchor
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="anchor1"/>
<guideline x="0" identifier="guideline2"/>
@@ -2304,7 +2314,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "anchor1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
@@ -2320,12 +2330,12 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
- def testIdentifierConflict_anchor_anchor(self):
- # anchor - anchor
- glif = """
+ def testIdentifierConflict_anchor_anchor(self):
+ # anchor - anchor
+ glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
@@ -2346,7 +2356,7 @@ class TestGLIF2(unittest.TestCase):
</outline>
</glyph>
"""
- py = """
+ py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor1", "x" : 0, "y" : 0}]
@@ -2362,5 +2372,5 @@ class TestGLIF2(unittest.TestCase):
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
- self.assertRaises(GlifLibError, self.pyToGLIF, py)
- self.assertRaises(GlifLibError, self.glifToPy, glif)
+ self.assertRaises(GlifLibError, self.pyToGLIF, py)
+ self.assertRaises(GlifLibError, self.glifToPy, glif)
diff --git a/Tests/ufoLib/UFO1_test.py b/Tests/ufoLib/UFO1_test.py
index 5feb045a..aad35229 100644
--- a/Tests/ufoLib/UFO1_test.py
+++ b/Tests/ufoLib/UFO1_test.py
@@ -8,143 +8,129 @@ from fontTools.ufoLib import plistlib
from .testSupport import fontInfoVersion1, fontInfoVersion2
-class TestInfoObject: pass
+class TestInfoObject:
+ pass
class ReadFontInfoVersion1TestCase(unittest.TestCase):
-
- def setUp(self):
- self.dstDir = tempfile.mktemp()
- os.mkdir(self.dstDir)
- metaInfo = {
- "creator": "test",
- "formatVersion": 1
- }
- path = os.path.join(self.dstDir, "metainfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(metaInfo, f)
-
- def tearDown(self):
- shutil.rmtree(self.dstDir)
-
- def _writeInfoToPlist(self, info):
- path = os.path.join(self.dstDir, "fontinfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(info, f)
-
- def testRead(self):
- originalData = dict(fontInfoVersion1)
- self._writeInfoToPlist(originalData)
- infoObject = TestInfoObject()
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(infoObject)
- for attr in dir(infoObject):
- if attr not in fontInfoVersion2:
- continue
- originalValue = fontInfoVersion2[attr]
- readValue = getattr(infoObject, attr)
- self.assertEqual(originalValue, readValue)
-
- def testFontStyleConversion(self):
- fontStyle1To2 = {
- 64 : "regular",
- 1 : "italic",
- 32 : "bold",
- 33 : "bold italic"
- }
- for old, new in list(fontStyle1To2.items()):
- info = dict(fontInfoVersion1)
- info["fontStyle"] = old
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- infoObject = TestInfoObject()
- reader.readInfo(infoObject)
- self.assertEqual(new, infoObject.styleMapStyleName)
-
- def testWidthNameConversion(self):
- widthName1To2 = {
- "Ultra-condensed" : 1,
- "Extra-condensed" : 2,
- "Condensed" : 3,
- "Semi-condensed" : 4,
- "Medium (normal)" : 5,
- "Semi-expanded" : 6,
- "Expanded" : 7,
- "Extra-expanded" : 8,
- "Ultra-expanded" : 9
- }
- for old, new in list(widthName1To2.items()):
- info = dict(fontInfoVersion1)
- info["widthName"] = old
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- infoObject = TestInfoObject()
- reader.readInfo(infoObject)
- self.assertEqual(new, infoObject.openTypeOS2WidthClass)
+ def setUp(self):
+ self.dstDir = tempfile.mktemp()
+ os.mkdir(self.dstDir)
+ metaInfo = {"creator": "test", "formatVersion": 1}
+ path = os.path.join(self.dstDir, "metainfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(metaInfo, f)
+
+ def tearDown(self):
+ shutil.rmtree(self.dstDir)
+
+ def _writeInfoToPlist(self, info):
+ path = os.path.join(self.dstDir, "fontinfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(info, f)
+
+ def testRead(self):
+ originalData = dict(fontInfoVersion1)
+ self._writeInfoToPlist(originalData)
+ infoObject = TestInfoObject()
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(infoObject)
+ for attr in dir(infoObject):
+ if attr not in fontInfoVersion2:
+ continue
+ originalValue = fontInfoVersion2[attr]
+ readValue = getattr(infoObject, attr)
+ self.assertEqual(originalValue, readValue)
+
+ def testFontStyleConversion(self):
+ fontStyle1To2 = {64: "regular", 1: "italic", 32: "bold", 33: "bold italic"}
+ for old, new in list(fontStyle1To2.items()):
+ info = dict(fontInfoVersion1)
+ info["fontStyle"] = old
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ infoObject = TestInfoObject()
+ reader.readInfo(infoObject)
+ self.assertEqual(new, infoObject.styleMapStyleName)
+
+ def testWidthNameConversion(self):
+ widthName1To2 = {
+ "Ultra-condensed": 1,
+ "Extra-condensed": 2,
+ "Condensed": 3,
+ "Semi-condensed": 4,
+ "Medium (normal)": 5,
+ "Semi-expanded": 6,
+ "Expanded": 7,
+ "Extra-expanded": 8,
+ "Ultra-expanded": 9,
+ }
+ for old, new in list(widthName1To2.items()):
+ info = dict(fontInfoVersion1)
+ info["widthName"] = old
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ infoObject = TestInfoObject()
+ reader.readInfo(infoObject)
+ self.assertEqual(new, infoObject.openTypeOS2WidthClass)
class WriteFontInfoVersion1TestCase(unittest.TestCase):
-
- def setUp(self):
- self.tempDir = tempfile.mktemp()
- os.mkdir(self.tempDir)
- self.dstDir = os.path.join(self.tempDir, "test.ufo")
-
- def tearDown(self):
- shutil.rmtree(self.tempDir)
-
- def makeInfoObject(self):
- infoObject = TestInfoObject()
- for attr, value in list(fontInfoVersion2.items()):
- setattr(infoObject, attr, value)
- return infoObject
-
- def readPlist(self):
- path = os.path.join(self.dstDir, "fontinfo.plist")
- with open(path, "rb") as f:
- plist = plistlib.load(f)
- return plist
-
- def testWrite(self):
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=1)
- writer.writeInfo(infoObject)
- writtenData = self.readPlist()
- for attr, originalValue in list(fontInfoVersion1.items()):
- newValue = writtenData[attr]
- self.assertEqual(newValue, originalValue)
-
- def testFontStyleConversion(self):
- fontStyle1To2 = {
- 64 : "regular",
- 1 : "italic",
- 32 : "bold",
- 33 : "bold italic"
- }
- for old, new in list(fontStyle1To2.items()):
- infoObject = self.makeInfoObject()
- infoObject.styleMapStyleName = new
- writer = UFOWriter(self.dstDir, formatVersion=1)
- writer.writeInfo(infoObject)
- writtenData = self.readPlist()
- self.assertEqual(writtenData["fontStyle"], old)
-
- def testWidthNameConversion(self):
- widthName1To2 = {
- "Ultra-condensed" : 1,
- "Extra-condensed" : 2,
- "Condensed" : 3,
- "Semi-condensed" : 4,
- "Medium (normal)" : 5,
- "Semi-expanded" : 6,
- "Expanded" : 7,
- "Extra-expanded" : 8,
- "Ultra-expanded" : 9
- }
- for old, new in list(widthName1To2.items()):
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WidthClass = new
- writer = UFOWriter(self.dstDir, formatVersion=1)
- writer.writeInfo(infoObject)
- writtenData = self.readPlist()
- self.assertEqual(writtenData["widthName"], old)
+ def setUp(self):
+ self.tempDir = tempfile.mktemp()
+ os.mkdir(self.tempDir)
+ self.dstDir = os.path.join(self.tempDir, "test.ufo")
+
+ def tearDown(self):
+ shutil.rmtree(self.tempDir)
+
+ def makeInfoObject(self):
+ infoObject = TestInfoObject()
+ for attr, value in list(fontInfoVersion2.items()):
+ setattr(infoObject, attr, value)
+ return infoObject
+
+ def readPlist(self):
+ path = os.path.join(self.dstDir, "fontinfo.plist")
+ with open(path, "rb") as f:
+ plist = plistlib.load(f)
+ return plist
+
+ def testWrite(self):
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=1)
+ writer.writeInfo(infoObject)
+ writtenData = self.readPlist()
+ for attr, originalValue in list(fontInfoVersion1.items()):
+ newValue = writtenData[attr]
+ self.assertEqual(newValue, originalValue)
+
+ def testFontStyleConversion(self):
+ fontStyle1To2 = {64: "regular", 1: "italic", 32: "bold", 33: "bold italic"}
+ for old, new in list(fontStyle1To2.items()):
+ infoObject = self.makeInfoObject()
+ infoObject.styleMapStyleName = new
+ writer = UFOWriter(self.dstDir, formatVersion=1)
+ writer.writeInfo(infoObject)
+ writtenData = self.readPlist()
+ self.assertEqual(writtenData["fontStyle"], old)
+
+ def testWidthNameConversion(self):
+ widthName1To2 = {
+ "Ultra-condensed": 1,
+ "Extra-condensed": 2,
+ "Condensed": 3,
+ "Semi-condensed": 4,
+ "Medium (normal)": 5,
+ "Semi-expanded": 6,
+ "Expanded": 7,
+ "Extra-expanded": 8,
+ "Ultra-expanded": 9,
+ }
+ for old, new in list(widthName1To2.items()):
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WidthClass = new
+ writer = UFOWriter(self.dstDir, formatVersion=1)
+ writer.writeInfo(infoObject)
+ writtenData = self.readPlist()
+ self.assertEqual(writtenData["widthName"], old)
diff --git a/Tests/ufoLib/UFO2_test.py b/Tests/ufoLib/UFO2_test.py
index 68b4bafd..ccd20388 100644
--- a/Tests/ufoLib/UFO2_test.py
+++ b/Tests/ufoLib/UFO2_test.py
@@ -8,1405 +8,1605 @@ from fontTools.ufoLib import plistlib
from .testSupport import fontInfoVersion2
-class TestInfoObject: pass
+class TestInfoObject:
+ pass
class ReadFontInfoVersion2TestCase(unittest.TestCase):
+ def setUp(self):
+ self.dstDir = tempfile.mktemp()
+ os.mkdir(self.dstDir)
+ metaInfo = {"creator": "test", "formatVersion": 2}
+ path = os.path.join(self.dstDir, "metainfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(metaInfo, f)
- def setUp(self):
- self.dstDir = tempfile.mktemp()
- os.mkdir(self.dstDir)
- metaInfo = {
- "creator": "test",
- "formatVersion": 2
- }
- path = os.path.join(self.dstDir, "metainfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(metaInfo, f)
+ def tearDown(self):
+ shutil.rmtree(self.dstDir)
- def tearDown(self):
- shutil.rmtree(self.dstDir)
+ def _writeInfoToPlist(self, info):
+ path = os.path.join(self.dstDir, "fontinfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(info, f)
- def _writeInfoToPlist(self, info):
- path = os.path.join(self.dstDir, "fontinfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(info, f)
+ def testRead(self):
+ originalData = dict(fontInfoVersion2)
+ self._writeInfoToPlist(originalData)
+ infoObject = TestInfoObject()
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(infoObject)
+ readData = {}
+ for attr in list(fontInfoVersion2.keys()):
+ readData[attr] = getattr(infoObject, attr)
+ self.assertEqual(originalData, readData)
- def testRead(self):
- originalData = dict(fontInfoVersion2)
- self._writeInfoToPlist(originalData)
- infoObject = TestInfoObject()
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(infoObject)
- readData = {}
- for attr in list(fontInfoVersion2.keys()):
- readData[attr] = getattr(infoObject, attr)
- self.assertEqual(originalData, readData)
+ def testGenericRead(self):
+ # familyName
+ info = dict(fontInfoVersion2)
+ info["familyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # styleName
+ info = dict(fontInfoVersion2)
+ info["styleName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # styleMapFamilyName
+ info = dict(fontInfoVersion2)
+ info["styleMapFamilyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # styleMapStyleName
+ ## not a string
+ info = dict(fontInfoVersion2)
+ info["styleMapStyleName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info = dict(fontInfoVersion2)
+ info["styleMapStyleName"] = "REGULAR"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # versionMajor
+ info = dict(fontInfoVersion2)
+ info["versionMajor"] = "1"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # versionMinor
+ info = dict(fontInfoVersion2)
+ info["versionMinor"] = "0"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # copyright
+ info = dict(fontInfoVersion2)
+ info["copyright"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # trademark
+ info = dict(fontInfoVersion2)
+ info["trademark"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # unitsPerEm
+ info = dict(fontInfoVersion2)
+ info["unitsPerEm"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # descender
+ info = dict(fontInfoVersion2)
+ info["descender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # xHeight
+ info = dict(fontInfoVersion2)
+ info["xHeight"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # capHeight
+ info = dict(fontInfoVersion2)
+ info["capHeight"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # ascender
+ info = dict(fontInfoVersion2)
+ info["ascender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # italicAngle
+ info = dict(fontInfoVersion2)
+ info["italicAngle"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- def testGenericRead(self):
- # familyName
- info = dict(fontInfoVersion2)
- info["familyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # styleName
- info = dict(fontInfoVersion2)
- info["styleName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # styleMapFamilyName
- info = dict(fontInfoVersion2)
- info["styleMapFamilyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # styleMapStyleName
- ## not a string
- info = dict(fontInfoVersion2)
- info["styleMapStyleName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info = dict(fontInfoVersion2)
- info["styleMapStyleName"] = "REGULAR"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # versionMajor
- info = dict(fontInfoVersion2)
- info["versionMajor"] = "1"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # versionMinor
- info = dict(fontInfoVersion2)
- info["versionMinor"] = "0"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # copyright
- info = dict(fontInfoVersion2)
- info["copyright"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # trademark
- info = dict(fontInfoVersion2)
- info["trademark"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # unitsPerEm
- info = dict(fontInfoVersion2)
- info["unitsPerEm"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # descender
- info = dict(fontInfoVersion2)
- info["descender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # xHeight
- info = dict(fontInfoVersion2)
- info["xHeight"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # capHeight
- info = dict(fontInfoVersion2)
- info["capHeight"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # ascender
- info = dict(fontInfoVersion2)
- info["ascender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # italicAngle
- info = dict(fontInfoVersion2)
- info["italicAngle"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ def testHeadRead(self):
+ # openTypeHeadCreated
+ ## not a string
+ info = dict(fontInfoVersion2)
+ info["openTypeHeadCreated"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## invalid format
+ info = dict(fontInfoVersion2)
+ info["openTypeHeadCreated"] = "2000-Jan-01 00:00:00"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHeadLowestRecPPEM
+ info = dict(fontInfoVersion2)
+ info["openTypeHeadLowestRecPPEM"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHeadFlags
+ info = dict(fontInfoVersion2)
+ info["openTypeHeadFlags"] = [-1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- def testHeadRead(self):
- # openTypeHeadCreated
- ## not a string
- info = dict(fontInfoVersion2)
- info["openTypeHeadCreated"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## invalid format
- info = dict(fontInfoVersion2)
- info["openTypeHeadCreated"] = "2000-Jan-01 00:00:00"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHeadLowestRecPPEM
- info = dict(fontInfoVersion2)
- info["openTypeHeadLowestRecPPEM"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHeadFlags
- info = dict(fontInfoVersion2)
- info["openTypeHeadFlags"] = [-1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ def testHheaRead(self):
+ # openTypeHheaAscender
+ info = dict(fontInfoVersion2)
+ info["openTypeHheaAscender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHheaDescender
+ info = dict(fontInfoVersion2)
+ info["openTypeHheaDescender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHheaLineGap
+ info = dict(fontInfoVersion2)
+ info["openTypeHheaLineGap"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHheaCaretSlopeRise
+ info = dict(fontInfoVersion2)
+ info["openTypeHheaCaretSlopeRise"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHheaCaretSlopeRun
+ info = dict(fontInfoVersion2)
+ info["openTypeHheaCaretSlopeRun"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHheaCaretOffset
+ info = dict(fontInfoVersion2)
+ info["openTypeHheaCaretOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- def testHheaRead(self):
- # openTypeHheaAscender
- info = dict(fontInfoVersion2)
- info["openTypeHheaAscender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHheaDescender
- info = dict(fontInfoVersion2)
- info["openTypeHheaDescender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHheaLineGap
- info = dict(fontInfoVersion2)
- info["openTypeHheaLineGap"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHheaCaretSlopeRise
- info = dict(fontInfoVersion2)
- info["openTypeHheaCaretSlopeRise"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHheaCaretSlopeRun
- info = dict(fontInfoVersion2)
- info["openTypeHheaCaretSlopeRun"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHheaCaretOffset
- info = dict(fontInfoVersion2)
- info["openTypeHheaCaretOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ def testNameRead(self):
+ # openTypeNameDesigner
+ info = dict(fontInfoVersion2)
+ info["openTypeNameDesigner"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameDesignerURL
+ info = dict(fontInfoVersion2)
+ info["openTypeNameDesignerURL"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameManufacturer
+ info = dict(fontInfoVersion2)
+ info["openTypeNameManufacturer"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameManufacturerURL
+ info = dict(fontInfoVersion2)
+ info["openTypeNameManufacturerURL"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameLicense
+ info = dict(fontInfoVersion2)
+ info["openTypeNameLicense"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameLicenseURL
+ info = dict(fontInfoVersion2)
+ info["openTypeNameLicenseURL"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameVersion
+ info = dict(fontInfoVersion2)
+ info["openTypeNameVersion"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameUniqueID
+ info = dict(fontInfoVersion2)
+ info["openTypeNameUniqueID"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameDescription
+ info = dict(fontInfoVersion2)
+ info["openTypeNameDescription"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNamePreferredFamilyName
+ info = dict(fontInfoVersion2)
+ info["openTypeNamePreferredFamilyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNamePreferredSubfamilyName
+ info = dict(fontInfoVersion2)
+ info["openTypeNamePreferredSubfamilyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameCompatibleFullName
+ info = dict(fontInfoVersion2)
+ info["openTypeNameCompatibleFullName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameSampleText
+ info = dict(fontInfoVersion2)
+ info["openTypeNameSampleText"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameWWSFamilyName
+ info = dict(fontInfoVersion2)
+ info["openTypeNameWWSFamilyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameWWSSubfamilyName
+ info = dict(fontInfoVersion2)
+ info["openTypeNameWWSSubfamilyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- def testNameRead(self):
- # openTypeNameDesigner
- info = dict(fontInfoVersion2)
- info["openTypeNameDesigner"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameDesignerURL
- info = dict(fontInfoVersion2)
- info["openTypeNameDesignerURL"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameManufacturer
- info = dict(fontInfoVersion2)
- info["openTypeNameManufacturer"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameManufacturerURL
- info = dict(fontInfoVersion2)
- info["openTypeNameManufacturerURL"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameLicense
- info = dict(fontInfoVersion2)
- info["openTypeNameLicense"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameLicenseURL
- info = dict(fontInfoVersion2)
- info["openTypeNameLicenseURL"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameVersion
- info = dict(fontInfoVersion2)
- info["openTypeNameVersion"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameUniqueID
- info = dict(fontInfoVersion2)
- info["openTypeNameUniqueID"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameDescription
- info = dict(fontInfoVersion2)
- info["openTypeNameDescription"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNamePreferredFamilyName
- info = dict(fontInfoVersion2)
- info["openTypeNamePreferredFamilyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNamePreferredSubfamilyName
- info = dict(fontInfoVersion2)
- info["openTypeNamePreferredSubfamilyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameCompatibleFullName
- info = dict(fontInfoVersion2)
- info["openTypeNameCompatibleFullName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameSampleText
- info = dict(fontInfoVersion2)
- info["openTypeNameSampleText"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameWWSFamilyName
- info = dict(fontInfoVersion2)
- info["openTypeNameWWSFamilyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameWWSSubfamilyName
- info = dict(fontInfoVersion2)
- info["openTypeNameWWSSubfamilyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ def testOS2Read(self):
+ # openTypeOS2WidthClass
+ ## not an int
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2WidthClass"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out or range
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2WidthClass"] = 15
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2WeightClass
+ info = dict(fontInfoVersion2)
+ ## not an int
+ info["openTypeOS2WeightClass"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info["openTypeOS2WeightClass"] = -50
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2Selection
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2Selection"] = [-1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2VendorID
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2VendorID"] = 1234
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2Panose
+ ## not an int
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, str(9)]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## too few values
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2Panose"] = [0, 1, 2, 3]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2FamilyClass
+ ## not an int
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2FamilyClass"] = [1, str(1)]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## too few values
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2FamilyClass"] = [1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2FamilyClass"] = [1, 1, 1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2FamilyClass"] = [1, 201]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2UnicodeRanges
+ ## not an int
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2UnicodeRanges"] = ["0"]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2UnicodeRanges"] = [-1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2CodePageRanges
+ ## not an int
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2CodePageRanges"] = ["0"]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2CodePageRanges"] = [-1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2TypoAscender
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2TypoAscender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2TypoDescender
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2TypoDescender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2TypoLineGap
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2TypoLineGap"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2WinAscent
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2WinAscent"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2WinDescent
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2WinDescent"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2Type
+ ## not an int
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2Type"] = ["1"]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2Type"] = [-1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SubscriptXSize
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2SubscriptXSize"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SubscriptYSize
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2SubscriptYSize"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SubscriptXOffset
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2SubscriptXOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SubscriptYOffset
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2SubscriptYOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SuperscriptXSize
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2SuperscriptXSize"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SuperscriptYSize
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2SuperscriptYSize"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SuperscriptXOffset
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2SuperscriptXOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SuperscriptYOffset
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2SuperscriptYOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2StrikeoutSize
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2StrikeoutSize"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2StrikeoutPosition
+ info = dict(fontInfoVersion2)
+ info["openTypeOS2StrikeoutPosition"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- def testOS2Read(self):
- # openTypeOS2WidthClass
- ## not an int
- info = dict(fontInfoVersion2)
- info["openTypeOS2WidthClass"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out or range
- info = dict(fontInfoVersion2)
- info["openTypeOS2WidthClass"] = 15
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2WeightClass
- info = dict(fontInfoVersion2)
- ## not an int
- info["openTypeOS2WeightClass"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info["openTypeOS2WeightClass"] = -50
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2Selection
- info = dict(fontInfoVersion2)
- info["openTypeOS2Selection"] = [-1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2VendorID
- info = dict(fontInfoVersion2)
- info["openTypeOS2VendorID"] = 1234
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2Panose
- ## not an int
- info = dict(fontInfoVersion2)
- info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, str(9)]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## too few values
- info = dict(fontInfoVersion2)
- info["openTypeOS2Panose"] = [0, 1, 2, 3]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion2)
- info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2FamilyClass
- ## not an int
- info = dict(fontInfoVersion2)
- info["openTypeOS2FamilyClass"] = [1, str(1)]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## too few values
- info = dict(fontInfoVersion2)
- info["openTypeOS2FamilyClass"] = [1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion2)
- info["openTypeOS2FamilyClass"] = [1, 1, 1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info = dict(fontInfoVersion2)
- info["openTypeOS2FamilyClass"] = [1, 201]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2UnicodeRanges
- ## not an int
- info = dict(fontInfoVersion2)
- info["openTypeOS2UnicodeRanges"] = ["0"]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info = dict(fontInfoVersion2)
- info["openTypeOS2UnicodeRanges"] = [-1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2CodePageRanges
- ## not an int
- info = dict(fontInfoVersion2)
- info["openTypeOS2CodePageRanges"] = ["0"]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info = dict(fontInfoVersion2)
- info["openTypeOS2CodePageRanges"] = [-1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2TypoAscender
- info = dict(fontInfoVersion2)
- info["openTypeOS2TypoAscender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2TypoDescender
- info = dict(fontInfoVersion2)
- info["openTypeOS2TypoDescender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2TypoLineGap
- info = dict(fontInfoVersion2)
- info["openTypeOS2TypoLineGap"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2WinAscent
- info = dict(fontInfoVersion2)
- info["openTypeOS2WinAscent"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2WinDescent
- info = dict(fontInfoVersion2)
- info["openTypeOS2WinDescent"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2Type
- ## not an int
- info = dict(fontInfoVersion2)
- info["openTypeOS2Type"] = ["1"]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info = dict(fontInfoVersion2)
- info["openTypeOS2Type"] = [-1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SubscriptXSize
- info = dict(fontInfoVersion2)
- info["openTypeOS2SubscriptXSize"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SubscriptYSize
- info = dict(fontInfoVersion2)
- info["openTypeOS2SubscriptYSize"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SubscriptXOffset
- info = dict(fontInfoVersion2)
- info["openTypeOS2SubscriptXOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SubscriptYOffset
- info = dict(fontInfoVersion2)
- info["openTypeOS2SubscriptYOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SuperscriptXSize
- info = dict(fontInfoVersion2)
- info["openTypeOS2SuperscriptXSize"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SuperscriptYSize
- info = dict(fontInfoVersion2)
- info["openTypeOS2SuperscriptYSize"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SuperscriptXOffset
- info = dict(fontInfoVersion2)
- info["openTypeOS2SuperscriptXOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SuperscriptYOffset
- info = dict(fontInfoVersion2)
- info["openTypeOS2SuperscriptYOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2StrikeoutSize
- info = dict(fontInfoVersion2)
- info["openTypeOS2StrikeoutSize"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2StrikeoutPosition
- info = dict(fontInfoVersion2)
- info["openTypeOS2StrikeoutPosition"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ def testVheaRead(self):
+ # openTypeVheaVertTypoAscender
+ info = dict(fontInfoVersion2)
+ info["openTypeVheaVertTypoAscender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeVheaVertTypoDescender
+ info = dict(fontInfoVersion2)
+ info["openTypeVheaVertTypoDescender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeVheaVertTypoLineGap
+ info = dict(fontInfoVersion2)
+ info["openTypeVheaVertTypoLineGap"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeVheaCaretSlopeRise
+ info = dict(fontInfoVersion2)
+ info["openTypeVheaCaretSlopeRise"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeVheaCaretSlopeRun
+ info = dict(fontInfoVersion2)
+ info["openTypeVheaCaretSlopeRun"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeVheaCaretOffset
+ info = dict(fontInfoVersion2)
+ info["openTypeVheaCaretOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- def testVheaRead(self):
- # openTypeVheaVertTypoAscender
- info = dict(fontInfoVersion2)
- info["openTypeVheaVertTypoAscender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeVheaVertTypoDescender
- info = dict(fontInfoVersion2)
- info["openTypeVheaVertTypoDescender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeVheaVertTypoLineGap
- info = dict(fontInfoVersion2)
- info["openTypeVheaVertTypoLineGap"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeVheaCaretSlopeRise
- info = dict(fontInfoVersion2)
- info["openTypeVheaCaretSlopeRise"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeVheaCaretSlopeRun
- info = dict(fontInfoVersion2)
- info["openTypeVheaCaretSlopeRun"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeVheaCaretOffset
- info = dict(fontInfoVersion2)
- info["openTypeVheaCaretOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ def testFONDRead(self):
+ # macintoshFONDFamilyID
+ info = dict(fontInfoVersion2)
+ info["macintoshFONDFamilyID"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # macintoshFONDName
+ info = dict(fontInfoVersion2)
+ info["macintoshFONDName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- def testFONDRead(self):
- # macintoshFONDFamilyID
- info = dict(fontInfoVersion2)
- info["macintoshFONDFamilyID"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # macintoshFONDName
- info = dict(fontInfoVersion2)
- info["macintoshFONDName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
-
- def testPostscriptRead(self):
- # postscriptFontName
- info = dict(fontInfoVersion2)
- info["postscriptFontName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # postscriptFullName
- info = dict(fontInfoVersion2)
- info["postscriptFullName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # postscriptSlantAngle
- info = dict(fontInfoVersion2)
- info["postscriptSlantAngle"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # postscriptUniqueID
- info = dict(fontInfoVersion2)
- info["postscriptUniqueID"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptUnderlineThickness
- info = dict(fontInfoVersion2)
- info["postscriptUnderlineThickness"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptUnderlinePosition
- info = dict(fontInfoVersion2)
- info["postscriptUnderlinePosition"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptIsFixedPitch
- info = dict(fontInfoVersion2)
- info["postscriptIsFixedPitch"] = 2
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptBlueValues
- ## not a list
- info = dict(fontInfoVersion2)
- info["postscriptBlueValues"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## uneven value count
- info = dict(fontInfoVersion2)
- info["postscriptBlueValues"] = [500]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion2)
- info["postscriptBlueValues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptOtherBlues
- ## not a list
- info = dict(fontInfoVersion2)
- info["postscriptOtherBlues"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## uneven value count
- info = dict(fontInfoVersion2)
- info["postscriptOtherBlues"] = [500]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion2)
- info["postscriptOtherBlues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptFamilyBlues
- ## not a list
- info = dict(fontInfoVersion2)
- info["postscriptFamilyBlues"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## uneven value count
- info = dict(fontInfoVersion2)
- info["postscriptFamilyBlues"] = [500]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion2)
- info["postscriptFamilyBlues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptFamilyOtherBlues
- ## not a list
- info = dict(fontInfoVersion2)
- info["postscriptFamilyOtherBlues"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## uneven value count
- info = dict(fontInfoVersion2)
- info["postscriptFamilyOtherBlues"] = [500]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion2)
- info["postscriptFamilyOtherBlues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptStemSnapH
- ## not list
- info = dict(fontInfoVersion2)
- info["postscriptStemSnapH"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion2)
- info["postscriptStemSnapH"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptStemSnapV
- ## not list
- info = dict(fontInfoVersion2)
- info["postscriptStemSnapV"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion2)
- info["postscriptStemSnapV"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptBlueFuzz
- info = dict(fontInfoVersion2)
- info["postscriptBlueFuzz"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptBlueShift
- info = dict(fontInfoVersion2)
- info["postscriptBlueShift"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptBlueScale
- info = dict(fontInfoVersion2)
- info["postscriptBlueScale"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptForceBold
- info = dict(fontInfoVersion2)
- info["postscriptForceBold"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptDefaultWidthX
- info = dict(fontInfoVersion2)
- info["postscriptDefaultWidthX"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptNominalWidthX
- info = dict(fontInfoVersion2)
- info["postscriptNominalWidthX"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptWeightName
- info = dict(fontInfoVersion2)
- info["postscriptWeightName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptDefaultCharacter
- info = dict(fontInfoVersion2)
- info["postscriptDefaultCharacter"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptWindowsCharacterSet
- info = dict(fontInfoVersion2)
- info["postscriptWindowsCharacterSet"] = -1
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # macintoshFONDFamilyID
- info = dict(fontInfoVersion2)
- info["macintoshFONDFamilyID"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # macintoshFONDName
- info = dict(fontInfoVersion2)
- info["macintoshFONDName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ def testPostscriptRead(self):
+ # postscriptFontName
+ info = dict(fontInfoVersion2)
+ info["postscriptFontName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # postscriptFullName
+ info = dict(fontInfoVersion2)
+ info["postscriptFullName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # postscriptSlantAngle
+ info = dict(fontInfoVersion2)
+ info["postscriptSlantAngle"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # postscriptUniqueID
+ info = dict(fontInfoVersion2)
+ info["postscriptUniqueID"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptUnderlineThickness
+ info = dict(fontInfoVersion2)
+ info["postscriptUnderlineThickness"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptUnderlinePosition
+ info = dict(fontInfoVersion2)
+ info["postscriptUnderlinePosition"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptIsFixedPitch
+ info = dict(fontInfoVersion2)
+ info["postscriptIsFixedPitch"] = 2
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptBlueValues
+ ## not a list
+ info = dict(fontInfoVersion2)
+ info["postscriptBlueValues"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## uneven value count
+ info = dict(fontInfoVersion2)
+ info["postscriptBlueValues"] = [500]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion2)
+ info["postscriptBlueValues"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptOtherBlues
+ ## not a list
+ info = dict(fontInfoVersion2)
+ info["postscriptOtherBlues"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## uneven value count
+ info = dict(fontInfoVersion2)
+ info["postscriptOtherBlues"] = [500]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion2)
+ info["postscriptOtherBlues"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptFamilyBlues
+ ## not a list
+ info = dict(fontInfoVersion2)
+ info["postscriptFamilyBlues"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## uneven value count
+ info = dict(fontInfoVersion2)
+ info["postscriptFamilyBlues"] = [500]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion2)
+ info["postscriptFamilyBlues"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptFamilyOtherBlues
+ ## not a list
+ info = dict(fontInfoVersion2)
+ info["postscriptFamilyOtherBlues"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## uneven value count
+ info = dict(fontInfoVersion2)
+ info["postscriptFamilyOtherBlues"] = [500]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion2)
+ info["postscriptFamilyOtherBlues"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptStemSnapH
+ ## not list
+ info = dict(fontInfoVersion2)
+ info["postscriptStemSnapH"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion2)
+ info["postscriptStemSnapH"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptStemSnapV
+ ## not list
+ info = dict(fontInfoVersion2)
+ info["postscriptStemSnapV"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion2)
+ info["postscriptStemSnapV"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptBlueFuzz
+ info = dict(fontInfoVersion2)
+ info["postscriptBlueFuzz"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptBlueShift
+ info = dict(fontInfoVersion2)
+ info["postscriptBlueShift"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptBlueScale
+ info = dict(fontInfoVersion2)
+ info["postscriptBlueScale"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptForceBold
+ info = dict(fontInfoVersion2)
+ info["postscriptForceBold"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptDefaultWidthX
+ info = dict(fontInfoVersion2)
+ info["postscriptDefaultWidthX"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptNominalWidthX
+ info = dict(fontInfoVersion2)
+ info["postscriptNominalWidthX"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptWeightName
+ info = dict(fontInfoVersion2)
+ info["postscriptWeightName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptDefaultCharacter
+ info = dict(fontInfoVersion2)
+ info["postscriptDefaultCharacter"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptWindowsCharacterSet
+ info = dict(fontInfoVersion2)
+ info["postscriptWindowsCharacterSet"] = -1
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # macintoshFONDFamilyID
+ info = dict(fontInfoVersion2)
+ info["macintoshFONDFamilyID"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # macintoshFONDName
+ info = dict(fontInfoVersion2)
+ info["macintoshFONDName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
class WriteFontInfoVersion2TestCase(unittest.TestCase):
+ def setUp(self):
+ self.tempDir = tempfile.mktemp()
+ os.mkdir(self.tempDir)
+ self.dstDir = os.path.join(self.tempDir, "test.ufo")
- def setUp(self):
- self.tempDir = tempfile.mktemp()
- os.mkdir(self.tempDir)
- self.dstDir = os.path.join(self.tempDir, "test.ufo")
-
- def tearDown(self):
- shutil.rmtree(self.tempDir)
+ def tearDown(self):
+ shutil.rmtree(self.tempDir)
- def makeInfoObject(self):
- infoObject = TestInfoObject()
- for attr, value in list(fontInfoVersion2.items()):
- setattr(infoObject, attr, value)
- return infoObject
+ def makeInfoObject(self):
+ infoObject = TestInfoObject()
+ for attr, value in list(fontInfoVersion2.items()):
+ setattr(infoObject, attr, value)
+ return infoObject
- def readPlist(self):
- path = os.path.join(self.dstDir, "fontinfo.plist")
- with open(path, "rb") as f:
- plist = plistlib.load(f)
- return plist
+ def readPlist(self):
+ path = os.path.join(self.dstDir, "fontinfo.plist")
+ with open(path, "rb") as f:
+ plist = plistlib.load(f)
+ return plist
- def testWrite(self):
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=2)
- writer.writeInfo(infoObject)
- writtenData = self.readPlist()
- for attr, originalValue in list(fontInfoVersion2.items()):
- newValue = writtenData[attr]
- self.assertEqual(newValue, originalValue)
+ def testWrite(self):
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ writer.writeInfo(infoObject)
+ writtenData = self.readPlist()
+ for attr, originalValue in list(fontInfoVersion2.items()):
+ newValue = writtenData[attr]
+ self.assertEqual(newValue, originalValue)
- def testGenericWrite(self):
- # familyName
- infoObject = self.makeInfoObject()
- infoObject.familyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # styleName
- infoObject = self.makeInfoObject()
- infoObject.styleName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # styleMapFamilyName
- infoObject = self.makeInfoObject()
- infoObject.styleMapFamilyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # styleMapStyleName
- ## not a string
- infoObject = self.makeInfoObject()
- infoObject.styleMapStyleName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## out of range
- infoObject = self.makeInfoObject()
- infoObject.styleMapStyleName = "REGULAR"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # versionMajor
- infoObject = self.makeInfoObject()
- infoObject.versionMajor = "1"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # versionMinor
- infoObject = self.makeInfoObject()
- infoObject.versionMinor = "0"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # copyright
- infoObject = self.makeInfoObject()
- infoObject.copyright = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # trademark
- infoObject = self.makeInfoObject()
- infoObject.trademark = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # unitsPerEm
- infoObject = self.makeInfoObject()
- infoObject.unitsPerEm = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # descender
- infoObject = self.makeInfoObject()
- infoObject.descender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # xHeight
- infoObject = self.makeInfoObject()
- infoObject.xHeight = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # capHeight
- infoObject = self.makeInfoObject()
- infoObject.capHeight = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # ascender
- infoObject = self.makeInfoObject()
- infoObject.ascender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # italicAngle
- infoObject = self.makeInfoObject()
- infoObject.italicAngle = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ def testGenericWrite(self):
+ # familyName
+ infoObject = self.makeInfoObject()
+ infoObject.familyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # styleName
+ infoObject = self.makeInfoObject()
+ infoObject.styleName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # styleMapFamilyName
+ infoObject = self.makeInfoObject()
+ infoObject.styleMapFamilyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # styleMapStyleName
+ ## not a string
+ infoObject = self.makeInfoObject()
+ infoObject.styleMapStyleName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## out of range
+ infoObject = self.makeInfoObject()
+ infoObject.styleMapStyleName = "REGULAR"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # versionMajor
+ infoObject = self.makeInfoObject()
+ infoObject.versionMajor = "1"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # versionMinor
+ infoObject = self.makeInfoObject()
+ infoObject.versionMinor = "0"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # copyright
+ infoObject = self.makeInfoObject()
+ infoObject.copyright = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # trademark
+ infoObject = self.makeInfoObject()
+ infoObject.trademark = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # unitsPerEm
+ infoObject = self.makeInfoObject()
+ infoObject.unitsPerEm = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # descender
+ infoObject = self.makeInfoObject()
+ infoObject.descender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # xHeight
+ infoObject = self.makeInfoObject()
+ infoObject.xHeight = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # capHeight
+ infoObject = self.makeInfoObject()
+ infoObject.capHeight = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # ascender
+ infoObject = self.makeInfoObject()
+ infoObject.ascender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # italicAngle
+ infoObject = self.makeInfoObject()
+ infoObject.italicAngle = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- def testHeadWrite(self):
- # openTypeHeadCreated
- ## not a string
- infoObject = self.makeInfoObject()
- infoObject.openTypeHeadCreated = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## invalid format
- infoObject = self.makeInfoObject()
- infoObject.openTypeHeadCreated = "2000-Jan-01 00:00:00"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeHeadLowestRecPPEM
- infoObject = self.makeInfoObject()
- infoObject.openTypeHeadLowestRecPPEM = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeHeadFlags
- infoObject = self.makeInfoObject()
- infoObject.openTypeHeadFlags = [-1]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ def testHeadWrite(self):
+ # openTypeHeadCreated
+ ## not a string
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHeadCreated = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## invalid format
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHeadCreated = "2000-Jan-01 00:00:00"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeHeadLowestRecPPEM
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHeadLowestRecPPEM = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeHeadFlags
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHeadFlags = [-1]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- def testHheaWrite(self):
- # openTypeHheaAscender
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaAscender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeHheaDescender
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaDescender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeHheaLineGap
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaLineGap = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeHheaCaretSlopeRise
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaCaretSlopeRise = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeHheaCaretSlopeRun
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaCaretSlopeRun = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeHheaCaretOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaCaretOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ def testHheaWrite(self):
+ # openTypeHheaAscender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaAscender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeHheaDescender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaDescender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeHheaLineGap
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaLineGap = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeHheaCaretSlopeRise
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaCaretSlopeRise = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeHheaCaretSlopeRun
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaCaretSlopeRun = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeHheaCaretOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaCaretOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- def testNameWrite(self):
- # openTypeNameDesigner
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameDesigner = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameDesignerURL
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameDesignerURL = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameManufacturer
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameManufacturer = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameManufacturerURL
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameManufacturerURL = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameLicense
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameLicense = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameLicenseURL
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameLicenseURL = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameVersion
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameVersion = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameUniqueID
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameUniqueID = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameDescription
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameDescription = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNamePreferredFamilyName
- infoObject = self.makeInfoObject()
- infoObject.openTypeNamePreferredFamilyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNamePreferredSubfamilyName
- infoObject = self.makeInfoObject()
- infoObject.openTypeNamePreferredSubfamilyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameCompatibleFullName
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameCompatibleFullName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameSampleText
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameSampleText = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameWWSFamilyName
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameWWSFamilyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeNameWWSSubfamilyName
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameWWSSubfamilyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ def testNameWrite(self):
+ # openTypeNameDesigner
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameDesigner = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameDesignerURL
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameDesignerURL = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameManufacturer
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameManufacturer = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameManufacturerURL
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameManufacturerURL = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameLicense
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameLicense = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameLicenseURL
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameLicenseURL = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameVersion
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameVersion = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameUniqueID
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameUniqueID = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameDescription
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameDescription = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNamePreferredFamilyName
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNamePreferredFamilyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNamePreferredSubfamilyName
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNamePreferredSubfamilyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameCompatibleFullName
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameCompatibleFullName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameSampleText
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameSampleText = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameWWSFamilyName
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameWWSFamilyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeNameWWSSubfamilyName
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameWWSSubfamilyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- def testOS2Write(self):
- # openTypeOS2WidthClass
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WidthClass = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## out or range
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WidthClass = 15
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2WeightClass
- infoObject = self.makeInfoObject()
- ## not an int
- infoObject.openTypeOS2WeightClass = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## out of range
- infoObject.openTypeOS2WeightClass = -50
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2Selection
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Selection = [-1]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2VendorID
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2VendorID = 1234
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2Panose
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Panose = [0, 1, 2, 3, 4, 5, 6, 7, 8, str(9)]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## too few values
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Panose = [0, 1, 2, 3]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Panose = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2FamilyClass
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2FamilyClass = [0, str(1)]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## too few values
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2FamilyClass = [1]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2FamilyClass = [1, 1, 1]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## out of range
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2FamilyClass = [1, 20]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2UnicodeRanges
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2UnicodeRanges = ["0"]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## out of range
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2UnicodeRanges = [-1]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2CodePageRanges
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2CodePageRanges = ["0"]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## out of range
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2CodePageRanges = [-1]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2TypoAscender
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2TypoAscender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2TypoDescender
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2TypoDescender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2TypoLineGap
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2TypoLineGap = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2WinAscent
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WinAscent = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2WinDescent
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WinDescent = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2Type
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Type = ["1"]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## out of range
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Type = [-1]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2SubscriptXSize
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SubscriptXSize = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2SubscriptYSize
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SubscriptYSize = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2SubscriptXOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SubscriptXOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2SubscriptYOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SubscriptYOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2SuperscriptXSize
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SuperscriptXSize = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2SuperscriptYSize
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SuperscriptYSize = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2SuperscriptXOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SuperscriptXOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2SuperscriptYOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SuperscriptYOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2StrikeoutSize
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2StrikeoutSize = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeOS2StrikeoutPosition
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2StrikeoutPosition = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ def testOS2Write(self):
+ # openTypeOS2WidthClass
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WidthClass = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## out or range
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WidthClass = 15
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2WeightClass
+ infoObject = self.makeInfoObject()
+ ## not an int
+ infoObject.openTypeOS2WeightClass = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## out of range
+ infoObject.openTypeOS2WeightClass = -50
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2Selection
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Selection = [-1]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2VendorID
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2VendorID = 1234
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2Panose
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Panose = [0, 1, 2, 3, 4, 5, 6, 7, 8, str(9)]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## too few values
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Panose = [0, 1, 2, 3]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Panose = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2FamilyClass
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2FamilyClass = [0, str(1)]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## too few values
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2FamilyClass = [1]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2FamilyClass = [1, 1, 1]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## out of range
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2FamilyClass = [1, 20]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2UnicodeRanges
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2UnicodeRanges = ["0"]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## out of range
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2UnicodeRanges = [-1]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2CodePageRanges
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2CodePageRanges = ["0"]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## out of range
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2CodePageRanges = [-1]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2TypoAscender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2TypoAscender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2TypoDescender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2TypoDescender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2TypoLineGap
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2TypoLineGap = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2WinAscent
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WinAscent = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2WinDescent
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WinDescent = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2Type
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Type = ["1"]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## out of range
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Type = [-1]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2SubscriptXSize
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SubscriptXSize = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2SubscriptYSize
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SubscriptYSize = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2SubscriptXOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SubscriptXOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2SubscriptYOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SubscriptYOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2SuperscriptXSize
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SuperscriptXSize = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2SuperscriptYSize
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SuperscriptYSize = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2SuperscriptXOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SuperscriptXOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2SuperscriptYOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SuperscriptYOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2StrikeoutSize
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2StrikeoutSize = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeOS2StrikeoutPosition
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2StrikeoutPosition = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- def testVheaWrite(self):
- # openTypeVheaVertTypoAscender
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaVertTypoAscender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeVheaVertTypoDescender
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaVertTypoDescender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeVheaVertTypoLineGap
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaVertTypoLineGap = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeVheaCaretSlopeRise
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaCaretSlopeRise = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeVheaCaretSlopeRun
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaCaretSlopeRun = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # openTypeVheaCaretOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaCaretOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ def testVheaWrite(self):
+ # openTypeVheaVertTypoAscender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaVertTypoAscender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeVheaVertTypoDescender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaVertTypoDescender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeVheaVertTypoLineGap
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaVertTypoLineGap = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeVheaCaretSlopeRise
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaCaretSlopeRise = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeVheaCaretSlopeRun
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaCaretSlopeRun = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # openTypeVheaCaretOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaCaretOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- def testFONDWrite(self):
- # macintoshFONDFamilyID
- infoObject = self.makeInfoObject()
- infoObject.macintoshFONDFamilyID = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # macintoshFONDName
- infoObject = self.makeInfoObject()
- infoObject.macintoshFONDName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ def testFONDWrite(self):
+ # macintoshFONDFamilyID
+ infoObject = self.makeInfoObject()
+ infoObject.macintoshFONDFamilyID = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # macintoshFONDName
+ infoObject = self.makeInfoObject()
+ infoObject.macintoshFONDName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- def testPostscriptWrite(self):
- # postscriptFontName
- infoObject = self.makeInfoObject()
- infoObject.postscriptFontName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptFullName
- infoObject = self.makeInfoObject()
- infoObject.postscriptFullName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptSlantAngle
- infoObject = self.makeInfoObject()
- infoObject.postscriptSlantAngle = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptUniqueID
- infoObject = self.makeInfoObject()
- infoObject.postscriptUniqueID = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptUnderlineThickness
- infoObject = self.makeInfoObject()
- infoObject.postscriptUnderlineThickness = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptUnderlinePosition
- infoObject = self.makeInfoObject()
- infoObject.postscriptUnderlinePosition = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptIsFixedPitch
- infoObject = self.makeInfoObject()
- infoObject.postscriptIsFixedPitch = 2
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptBlueValues
- ## not a list
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueValues = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## uneven value count
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueValues = [500]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueValues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptOtherBlues
- ## not a list
- infoObject = self.makeInfoObject()
- infoObject.postscriptOtherBlues = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## uneven value count
- infoObject = self.makeInfoObject()
- infoObject.postscriptOtherBlues = [500]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptOtherBlues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptFamilyBlues
- ## not a list
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyBlues = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## uneven value count
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyBlues = [500]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyBlues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptFamilyOtherBlues
- ## not a list
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyOtherBlues = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## uneven value count
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyOtherBlues = [500]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyOtherBlues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptStemSnapH
- ## not list
- infoObject = self.makeInfoObject()
- infoObject.postscriptStemSnapH = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptStemSnapH = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptStemSnapV
- ## not list
- infoObject = self.makeInfoObject()
- infoObject.postscriptStemSnapV = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptStemSnapV = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptBlueFuzz
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueFuzz = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptBlueShift
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueShift = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptBlueScale
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueScale = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptForceBold
- infoObject = self.makeInfoObject()
- infoObject.postscriptForceBold = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptDefaultWidthX
- infoObject = self.makeInfoObject()
- infoObject.postscriptDefaultWidthX = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptNominalWidthX
- infoObject = self.makeInfoObject()
- infoObject.postscriptNominalWidthX = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptWeightName
- infoObject = self.makeInfoObject()
- infoObject.postscriptWeightName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptDefaultCharacter
- infoObject = self.makeInfoObject()
- infoObject.postscriptDefaultCharacter = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # postscriptWindowsCharacterSet
- infoObject = self.makeInfoObject()
- infoObject.postscriptWindowsCharacterSet = -1
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # macintoshFONDFamilyID
- infoObject = self.makeInfoObject()
- infoObject.macintoshFONDFamilyID = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- # macintoshFONDName
- infoObject = self.makeInfoObject()
- infoObject.macintoshFONDName = 123
- writer = UFOWriter(self.dstDir, formatVersion=2)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ def testPostscriptWrite(self):
+ # postscriptFontName
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFontName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptFullName
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFullName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptSlantAngle
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptSlantAngle = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptUniqueID
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptUniqueID = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptUnderlineThickness
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptUnderlineThickness = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptUnderlinePosition
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptUnderlinePosition = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptIsFixedPitch
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptIsFixedPitch = 2
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptBlueValues
+ ## not a list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueValues = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## uneven value count
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueValues = [500]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueValues = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptOtherBlues
+ ## not a list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptOtherBlues = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## uneven value count
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptOtherBlues = [500]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptOtherBlues = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptFamilyBlues
+ ## not a list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyBlues = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## uneven value count
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyBlues = [500]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyBlues = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptFamilyOtherBlues
+ ## not a list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyOtherBlues = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## uneven value count
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyOtherBlues = [500]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyOtherBlues = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptStemSnapH
+ ## not list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptStemSnapH = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptStemSnapH = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptStemSnapV
+ ## not list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptStemSnapV = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptStemSnapV = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptBlueFuzz
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueFuzz = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptBlueShift
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueShift = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptBlueScale
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueScale = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptForceBold
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptForceBold = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptDefaultWidthX
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptDefaultWidthX = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptNominalWidthX
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptNominalWidthX = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptWeightName
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptWeightName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptDefaultCharacter
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptDefaultCharacter = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # postscriptWindowsCharacterSet
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptWindowsCharacterSet = -1
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # macintoshFONDFamilyID
+ infoObject = self.makeInfoObject()
+ infoObject.macintoshFONDFamilyID = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ # macintoshFONDName
+ infoObject = self.makeInfoObject()
+ infoObject.macintoshFONDName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
diff --git a/Tests/ufoLib/UFO3_test.py b/Tests/ufoLib/UFO3_test.py
index c4218023..95a51c4d 100644
--- a/Tests/ufoLib/UFO3_test.py
+++ b/Tests/ufoLib/UFO3_test.py
@@ -9,4165 +9,4519 @@ from fontTools.misc import plistlib
from .testSupport import fontInfoVersion3
-class TestInfoObject: pass
+class TestInfoObject:
+ pass
# --------------
# fontinfo.plist
# --------------
-class ReadFontInfoVersion3TestCase(unittest.TestCase):
- def setUp(self):
- self.dstDir = tempfile.mktemp()
- os.mkdir(self.dstDir)
- metaInfo = {
- "creator": "test",
- "formatVersion": 3
- }
- path = os.path.join(self.dstDir, "metainfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(metaInfo, f)
-
- def tearDown(self):
- shutil.rmtree(self.dstDir)
-
- def _writeInfoToPlist(self, info):
- path = os.path.join(self.dstDir, "fontinfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(info, f)
-
- def testRead(self):
- originalData = dict(fontInfoVersion3)
- self._writeInfoToPlist(originalData)
- infoObject = TestInfoObject()
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(infoObject)
- readData = {}
- for attr in list(fontInfoVersion3.keys()):
- readData[attr] = getattr(infoObject, attr)
- self.assertEqual(originalData, readData)
-
- def testGenericRead(self):
- # familyName
- info = dict(fontInfoVersion3)
- info["familyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # styleName
- info = dict(fontInfoVersion3)
- info["styleName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # styleMapFamilyName
- info = dict(fontInfoVersion3)
- info["styleMapFamilyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # styleMapStyleName
- ## not a string
- info = dict(fontInfoVersion3)
- info["styleMapStyleName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info = dict(fontInfoVersion3)
- info["styleMapStyleName"] = "REGULAR"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # versionMajor
- info = dict(fontInfoVersion3)
- info["versionMajor"] = "1"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # versionMinor
- info = dict(fontInfoVersion3)
- info["versionMinor"] = "0"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["versionMinor"] = -1
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # copyright
- info = dict(fontInfoVersion3)
- info["copyright"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # trademark
- info = dict(fontInfoVersion3)
- info["trademark"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # unitsPerEm
- info = dict(fontInfoVersion3)
- info["unitsPerEm"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["unitsPerEm"] = -1
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["unitsPerEm"] = -1.0
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # descender
- info = dict(fontInfoVersion3)
- info["descender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # xHeight
- info = dict(fontInfoVersion3)
- info["xHeight"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # capHeight
- info = dict(fontInfoVersion3)
- info["capHeight"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # ascender
- info = dict(fontInfoVersion3)
- info["ascender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # italicAngle
- info = dict(fontInfoVersion3)
- info["italicAngle"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
-
- def testGaspRead(self):
- # not a list
- info = dict(fontInfoVersion3)
- info["openTypeGaspRangeRecords"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # empty list
- info = dict(fontInfoVersion3)
- info["openTypeGaspRangeRecords"] = []
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- # not a dict
- info = dict(fontInfoVersion3)
- info["openTypeGaspRangeRecords"] = ["abc"]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # dict not properly formatted
- info = dict(fontInfoVersion3)
- info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM=0xFFFF, notTheRightKey=1)]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeGaspRangeRecords"] = [dict(notTheRightKey=1, rangeGaspBehavior=[0])]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # not an int for ppem
- info = dict(fontInfoVersion3)
- info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM="abc", rangeGaspBehavior=[0]), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # not a list for behavior
- info = dict(fontInfoVersion3)
- info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM=10, rangeGaspBehavior="abc"), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # invalid behavior value
- info = dict(fontInfoVersion3)
- info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM=10, rangeGaspBehavior=[-1]), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # not sorted
- info = dict(fontInfoVersion3)
- info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]), dict(rangeMaxPPEM=10, rangeGaspBehavior=[0])]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # no 0xFFFF
- info = dict(fontInfoVersion3)
- info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM=10, rangeGaspBehavior=[0]), dict(rangeMaxPPEM=20, rangeGaspBehavior=[0])]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
-
- def testHeadRead(self):
- # openTypeHeadCreated
- ## not a string
- info = dict(fontInfoVersion3)
- info["openTypeHeadCreated"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## invalid format
- info = dict(fontInfoVersion3)
- info["openTypeHeadCreated"] = "2000-Jan-01 00:00:00"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHeadLowestRecPPEM
- info = dict(fontInfoVersion3)
- info["openTypeHeadLowestRecPPEM"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeHeadLowestRecPPEM"] = -1
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHeadFlags
- info = dict(fontInfoVersion3)
- info["openTypeHeadFlags"] = [-1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
-
- def testHheaRead(self):
- # openTypeHheaAscender
- info = dict(fontInfoVersion3)
- info["openTypeHheaAscender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHheaDescender
- info = dict(fontInfoVersion3)
- info["openTypeHheaDescender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHheaLineGap
- info = dict(fontInfoVersion3)
- info["openTypeHheaLineGap"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHheaCaretSlopeRise
- info = dict(fontInfoVersion3)
- info["openTypeHheaCaretSlopeRise"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHheaCaretSlopeRun
- info = dict(fontInfoVersion3)
- info["openTypeHheaCaretSlopeRun"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeHheaCaretOffset
- info = dict(fontInfoVersion3)
- info["openTypeHheaCaretOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
-
- def testNameRead(self):
- # openTypeNameDesigner
- info = dict(fontInfoVersion3)
- info["openTypeNameDesigner"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameDesignerURL
- info = dict(fontInfoVersion3)
- info["openTypeNameDesignerURL"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameManufacturer
- info = dict(fontInfoVersion3)
- info["openTypeNameManufacturer"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameManufacturerURL
- info = dict(fontInfoVersion3)
- info["openTypeNameManufacturerURL"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameLicense
- info = dict(fontInfoVersion3)
- info["openTypeNameLicense"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameLicenseURL
- info = dict(fontInfoVersion3)
- info["openTypeNameLicenseURL"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameVersion
- info = dict(fontInfoVersion3)
- info["openTypeNameVersion"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameUniqueID
- info = dict(fontInfoVersion3)
- info["openTypeNameUniqueID"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameDescription
- info = dict(fontInfoVersion3)
- info["openTypeNameDescription"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNamePreferredFamilyName
- info = dict(fontInfoVersion3)
- info["openTypeNamePreferredFamilyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNamePreferredSubfamilyName
- info = dict(fontInfoVersion3)
- info["openTypeNamePreferredSubfamilyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameCompatibleFullName
- info = dict(fontInfoVersion3)
- info["openTypeNameCompatibleFullName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameSampleText
- info = dict(fontInfoVersion3)
- info["openTypeNameSampleText"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameWWSFamilyName
- info = dict(fontInfoVersion3)
- info["openTypeNameWWSFamilyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameWWSSubfamilyName
- info = dict(fontInfoVersion3)
- info["openTypeNameWWSSubfamilyName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeNameRecords
- ## not a list
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## not a dict
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = ["abc"]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## invalid dict structure
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [dict(foo="bar")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## incorrect keys
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record.", foo="bar")
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(platformID=1, encodingID=1, languageID=1, string="Name Record.")
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(nameID=1, encodingID=1, languageID=1, string="Name Record.")
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(nameID=1, platformID=1, languageID=1, string="Name Record.")
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(nameID=1, platformID=1, encodingID=1, string="Name Record.")
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(nameID=1, platformID=1, encodingID=1, languageID=1)
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## invalid values
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(nameID="1", platformID=1, encodingID=1, languageID=1, string="Name Record.")
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(nameID=1, platformID="1", encodingID=1, languageID=1, string="Name Record.")
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(nameID=1, platformID=1, encodingID="1", languageID=1, string="Name Record.")
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(nameID=1, platformID=1, encodingID=1, languageID="1", string="Name Record.")
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(nameID=1, platformID=1, encodingID=1, languageID=1, string=1)
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## duplicate
- info = dict(fontInfoVersion3)
- info["openTypeNameRecords"] = [
- dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record."),
- dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record.")
- ]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
-
- def testOS2Read(self):
- # openTypeOS2WidthClass
- ## not an int
- info = dict(fontInfoVersion3)
- info["openTypeOS2WidthClass"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out or range
- info = dict(fontInfoVersion3)
- info["openTypeOS2WidthClass"] = 15
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2WeightClass
- info = dict(fontInfoVersion3)
- ## not an int
- info["openTypeOS2WeightClass"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info["openTypeOS2WeightClass"] = -50
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2Selection
- info = dict(fontInfoVersion3)
- info["openTypeOS2Selection"] = [-1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2VendorID
- info = dict(fontInfoVersion3)
- info["openTypeOS2VendorID"] = 1234
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2Panose
- ## not an int
- info = dict(fontInfoVersion3)
- info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, str(9)]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## negative
- info = dict(fontInfoVersion3)
- info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, -9]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## too few values
- info = dict(fontInfoVersion3)
- info["openTypeOS2Panose"] = [0, 1, 2, 3]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion3)
- info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2FamilyClass
- ## not an int
- info = dict(fontInfoVersion3)
- info["openTypeOS2FamilyClass"] = [1, str(1)]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## too few values
- info = dict(fontInfoVersion3)
- info["openTypeOS2FamilyClass"] = [1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion3)
- info["openTypeOS2FamilyClass"] = [1, 1, 1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info = dict(fontInfoVersion3)
- info["openTypeOS2FamilyClass"] = [1, 201]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2UnicodeRanges
- ## not an int
- info = dict(fontInfoVersion3)
- info["openTypeOS2UnicodeRanges"] = ["0"]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info = dict(fontInfoVersion3)
- info["openTypeOS2UnicodeRanges"] = [-1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2CodePageRanges
- ## not an int
- info = dict(fontInfoVersion3)
- info["openTypeOS2CodePageRanges"] = ["0"]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info = dict(fontInfoVersion3)
- info["openTypeOS2CodePageRanges"] = [-1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2TypoAscender
- info = dict(fontInfoVersion3)
- info["openTypeOS2TypoAscender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2TypoDescender
- info = dict(fontInfoVersion3)
- info["openTypeOS2TypoDescender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2TypoLineGap
- info = dict(fontInfoVersion3)
- info["openTypeOS2TypoLineGap"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2WinAscent
- info = dict(fontInfoVersion3)
- info["openTypeOS2WinAscent"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeOS2WinAscent"] = -1
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2WinDescent
- info = dict(fontInfoVersion3)
- info["openTypeOS2WinDescent"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- info = dict(fontInfoVersion3)
- info["openTypeOS2WinDescent"] = -1
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2Type
- ## not an int
- info = dict(fontInfoVersion3)
- info["openTypeOS2Type"] = ["1"]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- ## out of range
- info = dict(fontInfoVersion3)
- info["openTypeOS2Type"] = [-1]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SubscriptXSize
- info = dict(fontInfoVersion3)
- info["openTypeOS2SubscriptXSize"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SubscriptYSize
- info = dict(fontInfoVersion3)
- info["openTypeOS2SubscriptYSize"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SubscriptXOffset
- info = dict(fontInfoVersion3)
- info["openTypeOS2SubscriptXOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SubscriptYOffset
- info = dict(fontInfoVersion3)
- info["openTypeOS2SubscriptYOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SuperscriptXSize
- info = dict(fontInfoVersion3)
- info["openTypeOS2SuperscriptXSize"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SuperscriptYSize
- info = dict(fontInfoVersion3)
- info["openTypeOS2SuperscriptYSize"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SuperscriptXOffset
- info = dict(fontInfoVersion3)
- info["openTypeOS2SuperscriptXOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2SuperscriptYOffset
- info = dict(fontInfoVersion3)
- info["openTypeOS2SuperscriptYOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2StrikeoutSize
- info = dict(fontInfoVersion3)
- info["openTypeOS2StrikeoutSize"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeOS2StrikeoutPosition
- info = dict(fontInfoVersion3)
- info["openTypeOS2StrikeoutPosition"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
-
- def testVheaRead(self):
- # openTypeVheaVertTypoAscender
- info = dict(fontInfoVersion3)
- info["openTypeVheaVertTypoAscender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeVheaVertTypoDescender
- info = dict(fontInfoVersion3)
- info["openTypeVheaVertTypoDescender"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeVheaVertTypoLineGap
- info = dict(fontInfoVersion3)
- info["openTypeVheaVertTypoLineGap"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeVheaCaretSlopeRise
- info = dict(fontInfoVersion3)
- info["openTypeVheaCaretSlopeRise"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeVheaCaretSlopeRun
- info = dict(fontInfoVersion3)
- info["openTypeVheaCaretSlopeRun"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # openTypeVheaCaretOffset
- info = dict(fontInfoVersion3)
- info["openTypeVheaCaretOffset"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
-
- def testFONDRead(self):
- # macintoshFONDFamilyID
- info = dict(fontInfoVersion3)
- info["macintoshFONDFamilyID"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # macintoshFONDName
- info = dict(fontInfoVersion3)
- info["macintoshFONDName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
-
- def testPostscriptRead(self):
- # postscriptFontName
- info = dict(fontInfoVersion3)
- info["postscriptFontName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # postscriptFullName
- info = dict(fontInfoVersion3)
- info["postscriptFullName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # postscriptSlantAngle
- info = dict(fontInfoVersion3)
- info["postscriptSlantAngle"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
- # postscriptUniqueID
- info = dict(fontInfoVersion3)
- info["postscriptUniqueID"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptUnderlineThickness
- info = dict(fontInfoVersion3)
- info["postscriptUnderlineThickness"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptUnderlinePosition
- info = dict(fontInfoVersion3)
- info["postscriptUnderlinePosition"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptIsFixedPitch
- info = dict(fontInfoVersion3)
- info["postscriptIsFixedPitch"] = 2
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptBlueValues
- ## not a list
- info = dict(fontInfoVersion3)
- info["postscriptBlueValues"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## uneven value count
- info = dict(fontInfoVersion3)
- info["postscriptBlueValues"] = [500]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion3)
- info["postscriptBlueValues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptOtherBlues
- ## not a list
- info = dict(fontInfoVersion3)
- info["postscriptOtherBlues"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## uneven value count
- info = dict(fontInfoVersion3)
- info["postscriptOtherBlues"] = [500]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion3)
- info["postscriptOtherBlues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptFamilyBlues
- ## not a list
- info = dict(fontInfoVersion3)
- info["postscriptFamilyBlues"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## uneven value count
- info = dict(fontInfoVersion3)
- info["postscriptFamilyBlues"] = [500]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion3)
- info["postscriptFamilyBlues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptFamilyOtherBlues
- ## not a list
- info = dict(fontInfoVersion3)
- info["postscriptFamilyOtherBlues"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## uneven value count
- info = dict(fontInfoVersion3)
- info["postscriptFamilyOtherBlues"] = [500]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion3)
- info["postscriptFamilyOtherBlues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptStemSnapH
- ## not list
- info = dict(fontInfoVersion3)
- info["postscriptStemSnapH"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion3)
- info["postscriptStemSnapH"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptStemSnapV
- ## not list
- info = dict(fontInfoVersion3)
- info["postscriptStemSnapV"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many values
- info = dict(fontInfoVersion3)
- info["postscriptStemSnapV"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptBlueFuzz
- info = dict(fontInfoVersion3)
- info["postscriptBlueFuzz"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptBlueShift
- info = dict(fontInfoVersion3)
- info["postscriptBlueShift"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptBlueScale
- info = dict(fontInfoVersion3)
- info["postscriptBlueScale"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptForceBold
- info = dict(fontInfoVersion3)
- info["postscriptForceBold"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptDefaultWidthX
- info = dict(fontInfoVersion3)
- info["postscriptDefaultWidthX"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptNominalWidthX
- info = dict(fontInfoVersion3)
- info["postscriptNominalWidthX"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptWeightName
- info = dict(fontInfoVersion3)
- info["postscriptWeightName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptDefaultCharacter
- info = dict(fontInfoVersion3)
- info["postscriptDefaultCharacter"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # postscriptWindowsCharacterSet
- info = dict(fontInfoVersion3)
- info["postscriptWindowsCharacterSet"] = -1
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # macintoshFONDFamilyID
- info = dict(fontInfoVersion3)
- info["macintoshFONDFamilyID"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # macintoshFONDName
- info = dict(fontInfoVersion3)
- info["macintoshFONDName"] = 123
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
-
- def testWOFFRead(self):
- # woffMajorVersion
- info = dict(fontInfoVersion3)
- info["woffMajorVersion"] = 1.0
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["woffMajorVersion"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # woffMinorVersion
- info = dict(fontInfoVersion3)
- info["woffMinorVersion"] = 1.0
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["woffMinorVersion"] = "abc"
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # woffMetadataUniqueID
- ## none
- info = dict(fontInfoVersion3)
- del info["woffMetadataUniqueID"]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- ## not a dict
- info = dict(fontInfoVersion3)
- info["woffMetadataUniqueID"] = 1
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## unknown key
- info = dict(fontInfoVersion3)
- info["woffMetadataUniqueID"] = dict(id="foo", notTheRightKey=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## no id
- info = dict(fontInfoVersion3)
- info["woffMetadataUniqueID"] = dict()
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## not a string for id
- info = dict(fontInfoVersion3)
- info["woffMetadataUniqueID"] = dict(id=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## empty string
- info = dict(fontInfoVersion3)
- info["woffMetadataUniqueID"] = dict(id="")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- # woffMetadataVendor
- ## no name
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = dict(url="foo")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## name not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = dict(name=1, url="foo")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## name an empty string
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = dict(name="", url="foo")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- ## no URL
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = dict(name="foo")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- ## url not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = dict(name="foo", url=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## url empty string
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = dict(name="foo", url="")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- ## have dir
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = dict(name="foo", url="bar", dir="ltr")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = dict(name="foo", url="bar", dir="rtl")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- ## dir not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = dict(name="foo", url="bar", dir=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## dir not ltr or rtl
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = dict(name="foo", url="bar", dir="utd")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## have class
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = {"name" : "foo", "url" : "bar", "class" : "hello"}
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- ## class not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = {"name" : "foo", "url" : "bar", "class" : 1}
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## class empty string
- info = dict(fontInfoVersion3)
- info["woffMetadataVendor"] = {"name" : "foo", "url" : "bar", "class" : ""}
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- # woffMetadataCredits
- ## no credits attribute
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = {}
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## unknown attribute
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits=[dict(name="foo")], notTheRightKey=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## not a list
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits="abc")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## no elements in credits
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits=[])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## credit not a dict
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits=["abc"])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## unknown key
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits=[dict(name="foo", notTheRightKey=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## no name
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits=[dict(url="foo")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## name not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits=[dict(name=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## url not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits=[dict(name="foo", url=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## role not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits=[dict(name="foo", role=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## dir not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits=[dict(name="foo", dir=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## dir not ltr or rtl
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits=[dict(name="foo", dir="utd")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## class not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataCredits"] = dict(credits=[{"name" : "foo", "class" : 1}])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # woffMetadataDescription
- ## no url
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(text=[dict(text="foo")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- ## url not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(text=[dict(text="foo")], url=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## no text
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(url="foo")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text not a list
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(text="abc")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item not a dict
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(text=["abc"])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item unknown key
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(text=[dict(text="foo", notTheRightKey=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item missing text
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(text=[dict(language="foo")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(text=[dict(text=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## url not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(text=[dict(text="foo", url=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## language not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(text=[dict(text="foo", language=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## dir not ltr or rtl
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(text=[dict(text="foo", dir="utd")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## class not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataDescription"] = dict(text=[{"text" : "foo", "class" : 1}])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # woffMetadataLicense
- ## no url
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text=[dict(text="foo")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- ## url not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text=[dict(text="foo")], url=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## id not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text=[dict(text="foo")], id=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## no text
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(url="foo")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- ## text not a list
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text="abc")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item not a dict
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text=["abc"])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item unknown key
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text=[dict(text="foo", notTheRightKey=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item missing text
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text=[dict(language="foo")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text=[dict(text=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## url not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text=[dict(text="foo", url=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## language not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text=[dict(text="foo", language=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## dir not ltr or rtl
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text=[dict(text="foo", dir="utd")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## class not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataLicense"] = dict(text=[{"text" : "foo", "class" : 1}])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # woffMetadataCopyright
- ## unknown attribute
- info = dict(fontInfoVersion3)
- info["woffMetadataCopyright"] = dict(text=[dict(text="foo")], notTheRightKey=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## no text
- info = dict(fontInfoVersion3)
- info["woffMetadataCopyright"] = dict()
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text not a list
- info = dict(fontInfoVersion3)
- info["woffMetadataCopyright"] = dict(text="abc")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item not a dict
- info = dict(fontInfoVersion3)
- info["woffMetadataCopyright"] = dict(text=["abc"])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item unknown key
- info = dict(fontInfoVersion3)
- info["woffMetadataCopyright"] = dict(text=[dict(text="foo", notTheRightKey=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item missing text
- info = dict(fontInfoVersion3)
- info["woffMetadataCopyright"] = dict(text=[dict(language="foo")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataCopyright"] = dict(text=[dict(text=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## url not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataCopyright"] = dict(text=[dict(text="foo", url=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## language not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataCopyright"] = dict(text=[dict(text="foo", language=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## dir not ltr or rtl
- info = dict(fontInfoVersion3)
- info["woffMetadataCopyright"] = dict(text=[dict(text="foo", dir="utd")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## class not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataCopyright"] = dict(text=[{"text" : "foo", "class" : 1}])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # woffMetadataTrademark
- ## unknown attribute
- info = dict(fontInfoVersion3)
- info["woffMetadataTrademark"] = dict(text=[dict(text="foo")], notTheRightKey=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## no text
- info = dict(fontInfoVersion3)
- info["woffMetadataTrademark"] = dict()
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text not a list
- info = dict(fontInfoVersion3)
- info["woffMetadataTrademark"] = dict(text="abc")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item not a dict
- info = dict(fontInfoVersion3)
- info["woffMetadataTrademark"] = dict(text=["abc"])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item unknown key
- info = dict(fontInfoVersion3)
- info["woffMetadataTrademark"] = dict(text=[dict(text="foo", notTheRightKey=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text item missing text
- info = dict(fontInfoVersion3)
- info["woffMetadataTrademark"] = dict(text=[dict(language="foo")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## text not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataTrademark"] = dict(text=[dict(text=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## url not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataTrademark"] = dict(text=[dict(text="foo", url=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## language not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataTrademark"] = dict(text=[dict(text="foo", language=1)])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## dir not ltr or rtl
- info = dict(fontInfoVersion3)
- info["woffMetadataTrademark"] = dict(text=[dict(text="foo", dir="utd")])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## class not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataTrademark"] = dict(text=[{"text" : "foo", "class" : 1}])
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # woffMetadataLicensee
- ## no name
- info = dict(fontInfoVersion3)
- info["woffMetadataLicensee"] = dict()
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## unknown attribute
- info = dict(fontInfoVersion3)
- info["woffMetadataLicensee"] = dict(name="foo", notTheRightKey=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## name not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataLicensee"] = dict(name=1)
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## dir options
- info = dict(fontInfoVersion3)
- info["woffMetadataLicensee"] = dict(name="foo", dir="ltr")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- info = dict(fontInfoVersion3)
- info["woffMetadataLicensee"] = dict(name="foo", dir="rtl")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- ## dir not ltr or rtl
- info = dict(fontInfoVersion3)
- info["woffMetadataLicensee"] = dict(name="foo", dir="utd")
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## have class
- info = dict(fontInfoVersion3)
- info["woffMetadataLicensee"] = {"name" : "foo", "class" : "hello"}
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- reader.readInfo(TestInfoObject())
- ## class not a string
- info = dict(fontInfoVersion3)
- info["woffMetadataLicensee"] = {"name" : "foo", "class" : 1}
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
-
- def testGuidelinesRead(self):
- # x
- ## not an int or float
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x="1")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # y
- ## not an int or float
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(y="1")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # angle
- ## < 0
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, y=0, angle=-1)]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## > 360
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, y=0, angle=361)]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # name
- ## not a string
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, name=1)]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # color
- ## not a string
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color=1)]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## not enough commas
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="1 0, 0, 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="1 0 0, 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="1 0 0 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## not enough parts
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color=", 0, 0, 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="1, , 0, 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="1, 0, , 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="1, 0, 0, ")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color=", , , ")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## not a number in all positions
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="r, 1, 1, 1")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="1, g, 1, 1")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="1, 1, b, 1")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="1, 1, 1, a")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## too many parts
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="1, 0, 0, 0, 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## < 0 in each position
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="-1, 0, 0, 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="0, -1, 0, 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="0, 0, -1, 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="0, 0, 0, -1")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- ## > 1 in each position
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="2, 0, 0, 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="0, 2, 0, 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="0, 0, 2, 0")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, color="0, 0, 0, 2")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
- # identifier
- ## duplicate
- info = dict(fontInfoVersion3)
- info["guidelines"] = [dict(x=0, identifier="guide1"), dict(y=0, identifier="guide1")]
- self._writeInfoToPlist(info)
- reader = UFOReader(self.dstDir, validate=True)
- self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+class ReadFontInfoVersion3TestCase(unittest.TestCase):
+ def setUp(self):
+ self.dstDir = tempfile.mktemp()
+ os.mkdir(self.dstDir)
+ metaInfo = {"creator": "test", "formatVersion": 3}
+ path = os.path.join(self.dstDir, "metainfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(metaInfo, f)
+
+ def tearDown(self):
+ shutil.rmtree(self.dstDir)
+
+ def _writeInfoToPlist(self, info):
+ path = os.path.join(self.dstDir, "fontinfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(info, f)
+
+ def testRead(self):
+ originalData = dict(fontInfoVersion3)
+ self._writeInfoToPlist(originalData)
+ infoObject = TestInfoObject()
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(infoObject)
+ readData = {}
+ for attr in list(fontInfoVersion3.keys()):
+ readData[attr] = getattr(infoObject, attr)
+ self.assertEqual(originalData, readData)
+
+ def testGenericRead(self):
+ # familyName
+ info = dict(fontInfoVersion3)
+ info["familyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # styleName
+ info = dict(fontInfoVersion3)
+ info["styleName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # styleMapFamilyName
+ info = dict(fontInfoVersion3)
+ info["styleMapFamilyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # styleMapStyleName
+ ## not a string
+ info = dict(fontInfoVersion3)
+ info["styleMapStyleName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info = dict(fontInfoVersion3)
+ info["styleMapStyleName"] = "REGULAR"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # versionMajor
+ info = dict(fontInfoVersion3)
+ info["versionMajor"] = "1"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # versionMinor
+ info = dict(fontInfoVersion3)
+ info["versionMinor"] = "0"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["versionMinor"] = -1
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # copyright
+ info = dict(fontInfoVersion3)
+ info["copyright"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # trademark
+ info = dict(fontInfoVersion3)
+ info["trademark"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # unitsPerEm
+ info = dict(fontInfoVersion3)
+ info["unitsPerEm"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["unitsPerEm"] = -1
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["unitsPerEm"] = -1.0
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # descender
+ info = dict(fontInfoVersion3)
+ info["descender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # xHeight
+ info = dict(fontInfoVersion3)
+ info["xHeight"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # capHeight
+ info = dict(fontInfoVersion3)
+ info["capHeight"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # ascender
+ info = dict(fontInfoVersion3)
+ info["ascender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # italicAngle
+ info = dict(fontInfoVersion3)
+ info["italicAngle"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+
+ def testGaspRead(self):
+ # not a list
+ info = dict(fontInfoVersion3)
+ info["openTypeGaspRangeRecords"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # empty list
+ info = dict(fontInfoVersion3)
+ info["openTypeGaspRangeRecords"] = []
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ # not a dict
+ info = dict(fontInfoVersion3)
+ info["openTypeGaspRangeRecords"] = ["abc"]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # dict not properly formatted
+ info = dict(fontInfoVersion3)
+ info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM=0xFFFF, notTheRightKey=1)]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeGaspRangeRecords"] = [
+ dict(notTheRightKey=1, rangeGaspBehavior=[0])
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # not an int for ppem
+ info = dict(fontInfoVersion3)
+ info["openTypeGaspRangeRecords"] = [
+ dict(rangeMaxPPEM="abc", rangeGaspBehavior=[0]),
+ dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]),
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # not a list for behavior
+ info = dict(fontInfoVersion3)
+ info["openTypeGaspRangeRecords"] = [
+ dict(rangeMaxPPEM=10, rangeGaspBehavior="abc"),
+ dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]),
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # invalid behavior value
+ info = dict(fontInfoVersion3)
+ info["openTypeGaspRangeRecords"] = [
+ dict(rangeMaxPPEM=10, rangeGaspBehavior=[-1]),
+ dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]),
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # not sorted
+ info = dict(fontInfoVersion3)
+ info["openTypeGaspRangeRecords"] = [
+ dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]),
+ dict(rangeMaxPPEM=10, rangeGaspBehavior=[0]),
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # no 0xFFFF
+ info = dict(fontInfoVersion3)
+ info["openTypeGaspRangeRecords"] = [
+ dict(rangeMaxPPEM=10, rangeGaspBehavior=[0]),
+ dict(rangeMaxPPEM=20, rangeGaspBehavior=[0]),
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+
+ def testHeadRead(self):
+ # openTypeHeadCreated
+ ## not a string
+ info = dict(fontInfoVersion3)
+ info["openTypeHeadCreated"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## invalid format
+ info = dict(fontInfoVersion3)
+ info["openTypeHeadCreated"] = "2000-Jan-01 00:00:00"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHeadLowestRecPPEM
+ info = dict(fontInfoVersion3)
+ info["openTypeHeadLowestRecPPEM"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeHeadLowestRecPPEM"] = -1
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHeadFlags
+ info = dict(fontInfoVersion3)
+ info["openTypeHeadFlags"] = [-1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+
+ def testHheaRead(self):
+ # openTypeHheaAscender
+ info = dict(fontInfoVersion3)
+ info["openTypeHheaAscender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHheaDescender
+ info = dict(fontInfoVersion3)
+ info["openTypeHheaDescender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHheaLineGap
+ info = dict(fontInfoVersion3)
+ info["openTypeHheaLineGap"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHheaCaretSlopeRise
+ info = dict(fontInfoVersion3)
+ info["openTypeHheaCaretSlopeRise"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHheaCaretSlopeRun
+ info = dict(fontInfoVersion3)
+ info["openTypeHheaCaretSlopeRun"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeHheaCaretOffset
+ info = dict(fontInfoVersion3)
+ info["openTypeHheaCaretOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+
+ def testNameRead(self):
+ # openTypeNameDesigner
+ info = dict(fontInfoVersion3)
+ info["openTypeNameDesigner"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameDesignerURL
+ info = dict(fontInfoVersion3)
+ info["openTypeNameDesignerURL"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameManufacturer
+ info = dict(fontInfoVersion3)
+ info["openTypeNameManufacturer"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameManufacturerURL
+ info = dict(fontInfoVersion3)
+ info["openTypeNameManufacturerURL"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameLicense
+ info = dict(fontInfoVersion3)
+ info["openTypeNameLicense"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameLicenseURL
+ info = dict(fontInfoVersion3)
+ info["openTypeNameLicenseURL"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameVersion
+ info = dict(fontInfoVersion3)
+ info["openTypeNameVersion"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameUniqueID
+ info = dict(fontInfoVersion3)
+ info["openTypeNameUniqueID"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameDescription
+ info = dict(fontInfoVersion3)
+ info["openTypeNameDescription"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNamePreferredFamilyName
+ info = dict(fontInfoVersion3)
+ info["openTypeNamePreferredFamilyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNamePreferredSubfamilyName
+ info = dict(fontInfoVersion3)
+ info["openTypeNamePreferredSubfamilyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameCompatibleFullName
+ info = dict(fontInfoVersion3)
+ info["openTypeNameCompatibleFullName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameSampleText
+ info = dict(fontInfoVersion3)
+ info["openTypeNameSampleText"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameWWSFamilyName
+ info = dict(fontInfoVersion3)
+ info["openTypeNameWWSFamilyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameWWSSubfamilyName
+ info = dict(fontInfoVersion3)
+ info["openTypeNameWWSSubfamilyName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeNameRecords
+ ## not a list
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## not a dict
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = ["abc"]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## invalid dict structure
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [dict(foo="bar")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## incorrect keys
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(
+ nameID=1,
+ platformID=1,
+ encodingID=1,
+ languageID=1,
+ string="Name Record.",
+ foo="bar",
+ )
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(platformID=1, encodingID=1, languageID=1, string="Name Record.")
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(nameID=1, encodingID=1, languageID=1, string="Name Record.")
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(nameID=1, platformID=1, languageID=1, string="Name Record.")
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(nameID=1, platformID=1, encodingID=1, string="Name Record.")
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(nameID=1, platformID=1, encodingID=1, languageID=1)
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## invalid values
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(
+ nameID="1",
+ platformID=1,
+ encodingID=1,
+ languageID=1,
+ string="Name Record.",
+ )
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(
+ nameID=1,
+ platformID="1",
+ encodingID=1,
+ languageID=1,
+ string="Name Record.",
+ )
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(
+ nameID=1,
+ platformID=1,
+ encodingID="1",
+ languageID=1,
+ string="Name Record.",
+ )
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(
+ nameID=1,
+ platformID=1,
+ encodingID=1,
+ languageID="1",
+ string="Name Record.",
+ )
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(nameID=1, platformID=1, encodingID=1, languageID=1, string=1)
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## duplicate
+ info = dict(fontInfoVersion3)
+ info["openTypeNameRecords"] = [
+ dict(
+ nameID=1,
+ platformID=1,
+ encodingID=1,
+ languageID=1,
+ string="Name Record.",
+ ),
+ dict(
+ nameID=1,
+ platformID=1,
+ encodingID=1,
+ languageID=1,
+ string="Name Record.",
+ ),
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+
+ def testOS2Read(self):
+ # openTypeOS2WidthClass
+ ## not an int
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2WidthClass"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out or range
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2WidthClass"] = 15
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2WeightClass
+ info = dict(fontInfoVersion3)
+ ## not an int
+ info["openTypeOS2WeightClass"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info["openTypeOS2WeightClass"] = -50
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2Selection
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2Selection"] = [-1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2VendorID
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2VendorID"] = 1234
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2Panose
+ ## not an int
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, str(9)]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## negative
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, -9]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## too few values
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2Panose"] = [0, 1, 2, 3]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2FamilyClass
+ ## not an int
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2FamilyClass"] = [1, str(1)]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## too few values
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2FamilyClass"] = [1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2FamilyClass"] = [1, 1, 1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2FamilyClass"] = [1, 201]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2UnicodeRanges
+ ## not an int
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2UnicodeRanges"] = ["0"]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2UnicodeRanges"] = [-1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2CodePageRanges
+ ## not an int
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2CodePageRanges"] = ["0"]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2CodePageRanges"] = [-1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2TypoAscender
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2TypoAscender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2TypoDescender
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2TypoDescender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2TypoLineGap
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2TypoLineGap"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2WinAscent
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2WinAscent"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2WinAscent"] = -1
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2WinDescent
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2WinDescent"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2WinDescent"] = -1
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2Type
+ ## not an int
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2Type"] = ["1"]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ ## out of range
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2Type"] = [-1]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SubscriptXSize
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2SubscriptXSize"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SubscriptYSize
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2SubscriptYSize"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SubscriptXOffset
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2SubscriptXOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SubscriptYOffset
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2SubscriptYOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SuperscriptXSize
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2SuperscriptXSize"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SuperscriptYSize
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2SuperscriptYSize"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SuperscriptXOffset
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2SuperscriptXOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2SuperscriptYOffset
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2SuperscriptYOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2StrikeoutSize
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2StrikeoutSize"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeOS2StrikeoutPosition
+ info = dict(fontInfoVersion3)
+ info["openTypeOS2StrikeoutPosition"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+
+ def testVheaRead(self):
+ # openTypeVheaVertTypoAscender
+ info = dict(fontInfoVersion3)
+ info["openTypeVheaVertTypoAscender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeVheaVertTypoDescender
+ info = dict(fontInfoVersion3)
+ info["openTypeVheaVertTypoDescender"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeVheaVertTypoLineGap
+ info = dict(fontInfoVersion3)
+ info["openTypeVheaVertTypoLineGap"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeVheaCaretSlopeRise
+ info = dict(fontInfoVersion3)
+ info["openTypeVheaCaretSlopeRise"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeVheaCaretSlopeRun
+ info = dict(fontInfoVersion3)
+ info["openTypeVheaCaretSlopeRun"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # openTypeVheaCaretOffset
+ info = dict(fontInfoVersion3)
+ info["openTypeVheaCaretOffset"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+
+ def testFONDRead(self):
+ # macintoshFONDFamilyID
+ info = dict(fontInfoVersion3)
+ info["macintoshFONDFamilyID"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # macintoshFONDName
+ info = dict(fontInfoVersion3)
+ info["macintoshFONDName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+
+ def testPostscriptRead(self):
+ # postscriptFontName
+ info = dict(fontInfoVersion3)
+ info["postscriptFontName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # postscriptFullName
+ info = dict(fontInfoVersion3)
+ info["postscriptFullName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # postscriptSlantAngle
+ info = dict(fontInfoVersion3)
+ info["postscriptSlantAngle"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
+ # postscriptUniqueID
+ info = dict(fontInfoVersion3)
+ info["postscriptUniqueID"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptUnderlineThickness
+ info = dict(fontInfoVersion3)
+ info["postscriptUnderlineThickness"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptUnderlinePosition
+ info = dict(fontInfoVersion3)
+ info["postscriptUnderlinePosition"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptIsFixedPitch
+ info = dict(fontInfoVersion3)
+ info["postscriptIsFixedPitch"] = 2
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptBlueValues
+ ## not a list
+ info = dict(fontInfoVersion3)
+ info["postscriptBlueValues"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## uneven value count
+ info = dict(fontInfoVersion3)
+ info["postscriptBlueValues"] = [500]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion3)
+ info["postscriptBlueValues"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptOtherBlues
+ ## not a list
+ info = dict(fontInfoVersion3)
+ info["postscriptOtherBlues"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## uneven value count
+ info = dict(fontInfoVersion3)
+ info["postscriptOtherBlues"] = [500]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion3)
+ info["postscriptOtherBlues"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptFamilyBlues
+ ## not a list
+ info = dict(fontInfoVersion3)
+ info["postscriptFamilyBlues"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## uneven value count
+ info = dict(fontInfoVersion3)
+ info["postscriptFamilyBlues"] = [500]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion3)
+ info["postscriptFamilyBlues"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptFamilyOtherBlues
+ ## not a list
+ info = dict(fontInfoVersion3)
+ info["postscriptFamilyOtherBlues"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## uneven value count
+ info = dict(fontInfoVersion3)
+ info["postscriptFamilyOtherBlues"] = [500]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion3)
+ info["postscriptFamilyOtherBlues"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptStemSnapH
+ ## not list
+ info = dict(fontInfoVersion3)
+ info["postscriptStemSnapH"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion3)
+ info["postscriptStemSnapH"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptStemSnapV
+ ## not list
+ info = dict(fontInfoVersion3)
+ info["postscriptStemSnapV"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many values
+ info = dict(fontInfoVersion3)
+ info["postscriptStemSnapV"] = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptBlueFuzz
+ info = dict(fontInfoVersion3)
+ info["postscriptBlueFuzz"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptBlueShift
+ info = dict(fontInfoVersion3)
+ info["postscriptBlueShift"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptBlueScale
+ info = dict(fontInfoVersion3)
+ info["postscriptBlueScale"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptForceBold
+ info = dict(fontInfoVersion3)
+ info["postscriptForceBold"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptDefaultWidthX
+ info = dict(fontInfoVersion3)
+ info["postscriptDefaultWidthX"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptNominalWidthX
+ info = dict(fontInfoVersion3)
+ info["postscriptNominalWidthX"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptWeightName
+ info = dict(fontInfoVersion3)
+ info["postscriptWeightName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptDefaultCharacter
+ info = dict(fontInfoVersion3)
+ info["postscriptDefaultCharacter"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # postscriptWindowsCharacterSet
+ info = dict(fontInfoVersion3)
+ info["postscriptWindowsCharacterSet"] = -1
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # macintoshFONDFamilyID
+ info = dict(fontInfoVersion3)
+ info["macintoshFONDFamilyID"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # macintoshFONDName
+ info = dict(fontInfoVersion3)
+ info["macintoshFONDName"] = 123
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+
+ def testWOFFRead(self):
+ # woffMajorVersion
+ info = dict(fontInfoVersion3)
+ info["woffMajorVersion"] = 1.0
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["woffMajorVersion"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # woffMinorVersion
+ info = dict(fontInfoVersion3)
+ info["woffMinorVersion"] = 1.0
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["woffMinorVersion"] = "abc"
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # woffMetadataUniqueID
+ ## none
+ info = dict(fontInfoVersion3)
+ del info["woffMetadataUniqueID"]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ ## not a dict
+ info = dict(fontInfoVersion3)
+ info["woffMetadataUniqueID"] = 1
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## unknown key
+ info = dict(fontInfoVersion3)
+ info["woffMetadataUniqueID"] = dict(id="foo", notTheRightKey=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## no id
+ info = dict(fontInfoVersion3)
+ info["woffMetadataUniqueID"] = dict()
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## not a string for id
+ info = dict(fontInfoVersion3)
+ info["woffMetadataUniqueID"] = dict(id=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## empty string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataUniqueID"] = dict(id="")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ # woffMetadataVendor
+ ## no name
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = dict(url="foo")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## name not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = dict(name=1, url="foo")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## name an empty string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = dict(name="", url="foo")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ ## no URL
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = dict(name="foo")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ ## url not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = dict(name="foo", url=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## url empty string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = dict(name="foo", url="")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ ## have dir
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = dict(name="foo", url="bar", dir="ltr")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = dict(name="foo", url="bar", dir="rtl")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ ## dir not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = dict(name="foo", url="bar", dir=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## dir not ltr or rtl
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = dict(name="foo", url="bar", dir="utd")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## have class
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = {"name": "foo", "url": "bar", "class": "hello"}
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ ## class not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = {"name": "foo", "url": "bar", "class": 1}
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## class empty string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataVendor"] = {"name": "foo", "url": "bar", "class": ""}
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ # woffMetadataCredits
+ ## no credits attribute
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = {}
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## unknown attribute
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits=[dict(name="foo")], notTheRightKey=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## not a list
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits="abc")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## no elements in credits
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits=[])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## credit not a dict
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits=["abc"])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## unknown key
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits=[dict(name="foo", notTheRightKey=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## no name
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits=[dict(url="foo")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## name not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits=[dict(name=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## url not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits=[dict(name="foo", url=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## role not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits=[dict(name="foo", role=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## dir not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits=[dict(name="foo", dir=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## dir not ltr or rtl
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits=[dict(name="foo", dir="utd")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## class not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCredits"] = dict(credits=[{"name": "foo", "class": 1}])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # woffMetadataDescription
+ ## no url
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(text=[dict(text="foo")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ ## url not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(text=[dict(text="foo")], url=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## no text
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(url="foo")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text not a list
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(text="abc")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item not a dict
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(text=["abc"])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item unknown key
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(
+ text=[dict(text="foo", notTheRightKey=1)]
+ )
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item missing text
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(text=[dict(language="foo")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(text=[dict(text=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## url not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(text=[dict(text="foo", url=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## language not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(text=[dict(text="foo", language=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## dir not ltr or rtl
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(text=[dict(text="foo", dir="utd")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## class not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataDescription"] = dict(text=[{"text": "foo", "class": 1}])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # woffMetadataLicense
+ ## no url
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text=[dict(text="foo")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ ## url not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text=[dict(text="foo")], url=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## id not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text=[dict(text="foo")], id=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## no text
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(url="foo")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ ## text not a list
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text="abc")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item not a dict
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text=["abc"])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item unknown key
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text=[dict(text="foo", notTheRightKey=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item missing text
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text=[dict(language="foo")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text=[dict(text=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## url not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text=[dict(text="foo", url=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## language not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text=[dict(text="foo", language=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## dir not ltr or rtl
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text=[dict(text="foo", dir="utd")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## class not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicense"] = dict(text=[{"text": "foo", "class": 1}])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # woffMetadataCopyright
+ ## unknown attribute
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCopyright"] = dict(text=[dict(text="foo")], notTheRightKey=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## no text
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCopyright"] = dict()
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text not a list
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCopyright"] = dict(text="abc")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item not a dict
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCopyright"] = dict(text=["abc"])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item unknown key
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCopyright"] = dict(text=[dict(text="foo", notTheRightKey=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item missing text
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCopyright"] = dict(text=[dict(language="foo")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCopyright"] = dict(text=[dict(text=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## url not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCopyright"] = dict(text=[dict(text="foo", url=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## language not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCopyright"] = dict(text=[dict(text="foo", language=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## dir not ltr or rtl
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCopyright"] = dict(text=[dict(text="foo", dir="utd")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## class not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataCopyright"] = dict(text=[{"text": "foo", "class": 1}])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # woffMetadataTrademark
+ ## unknown attribute
+ info = dict(fontInfoVersion3)
+ info["woffMetadataTrademark"] = dict(text=[dict(text="foo")], notTheRightKey=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## no text
+ info = dict(fontInfoVersion3)
+ info["woffMetadataTrademark"] = dict()
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text not a list
+ info = dict(fontInfoVersion3)
+ info["woffMetadataTrademark"] = dict(text="abc")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item not a dict
+ info = dict(fontInfoVersion3)
+ info["woffMetadataTrademark"] = dict(text=["abc"])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item unknown key
+ info = dict(fontInfoVersion3)
+ info["woffMetadataTrademark"] = dict(text=[dict(text="foo", notTheRightKey=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text item missing text
+ info = dict(fontInfoVersion3)
+ info["woffMetadataTrademark"] = dict(text=[dict(language="foo")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## text not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataTrademark"] = dict(text=[dict(text=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## url not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataTrademark"] = dict(text=[dict(text="foo", url=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## language not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataTrademark"] = dict(text=[dict(text="foo", language=1)])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## dir not ltr or rtl
+ info = dict(fontInfoVersion3)
+ info["woffMetadataTrademark"] = dict(text=[dict(text="foo", dir="utd")])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## class not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataTrademark"] = dict(text=[{"text": "foo", "class": 1}])
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # woffMetadataLicensee
+ ## no name
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicensee"] = dict()
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## unknown attribute
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicensee"] = dict(name="foo", notTheRightKey=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## name not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicensee"] = dict(name=1)
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## dir options
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicensee"] = dict(name="foo", dir="ltr")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicensee"] = dict(name="foo", dir="rtl")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ ## dir not ltr or rtl
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicensee"] = dict(name="foo", dir="utd")
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## have class
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicensee"] = {"name": "foo", "class": "hello"}
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ reader.readInfo(TestInfoObject())
+ ## class not a string
+ info = dict(fontInfoVersion3)
+ info["woffMetadataLicensee"] = {"name": "foo", "class": 1}
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+
+ def testGuidelinesRead(self):
+ # x
+ ## not an int or float
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x="1")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # y
+ ## not an int or float
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(y="1")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # angle
+ ## < 0
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, y=0, angle=-1)]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## > 360
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, y=0, angle=361)]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # name
+ ## not a string
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, name=1)]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # color
+ ## not a string
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color=1)]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## not enough commas
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="1 0, 0, 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="1 0 0, 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="1 0 0 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## not enough parts
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color=", 0, 0, 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="1, , 0, 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="1, 0, , 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="1, 0, 0, ")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color=", , , ")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## not a number in all positions
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="r, 1, 1, 1")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="1, g, 1, 1")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="1, 1, b, 1")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="1, 1, 1, a")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## too many parts
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="1, 0, 0, 0, 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## < 0 in each position
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="-1, 0, 0, 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="0, -1, 0, 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="0, 0, -1, 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="0, 0, 0, -1")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ ## > 1 in each position
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="2, 0, 0, 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="0, 2, 0, 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="0, 0, 2, 0")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [dict(x=0, color="0, 0, 0, 2")]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
+ # identifier
+ ## duplicate
+ info = dict(fontInfoVersion3)
+ info["guidelines"] = [
+ dict(x=0, identifier="guide1"),
+ dict(y=0, identifier="guide1"),
+ ]
+ self._writeInfoToPlist(info)
+ reader = UFOReader(self.dstDir, validate=True)
+ self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
class WriteFontInfoVersion3TestCase(unittest.TestCase):
-
- def setUp(self):
- self.tempDir = tempfile.mktemp()
- os.mkdir(self.tempDir)
- self.dstDir = os.path.join(self.tempDir, "test.ufo")
-
- def tearDown(self):
- shutil.rmtree(self.tempDir)
-
- def tearDownUFO(self):
- if os.path.exists(self.dstDir):
- shutil.rmtree(self.dstDir)
-
- def makeInfoObject(self):
- infoObject = TestInfoObject()
- for attr, value in list(fontInfoVersion3.items()):
- setattr(infoObject, attr, value)
- return infoObject
-
- def readPlist(self):
- path = os.path.join(self.dstDir, "fontinfo.plist")
- with open(path, "rb") as f:
- plist = plistlib.load(f)
- return plist
-
- def testWrite(self):
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- writtenData = self.readPlist()
- for attr, originalValue in list(fontInfoVersion3.items()):
- newValue = writtenData[attr]
- self.assertEqual(newValue, originalValue)
- self.tearDownUFO()
-
- def testGenericWrite(self):
- # familyName
- infoObject = self.makeInfoObject()
- infoObject.familyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # styleName
- infoObject = self.makeInfoObject()
- infoObject.styleName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # styleMapFamilyName
- infoObject = self.makeInfoObject()
- infoObject.styleMapFamilyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # styleMapStyleName
- ## not a string
- infoObject = self.makeInfoObject()
- infoObject.styleMapStyleName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## out of range
- infoObject = self.makeInfoObject()
- infoObject.styleMapStyleName = "REGULAR"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # versionMajor
- infoObject = self.makeInfoObject()
- infoObject.versionMajor = "1"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # versionMinor
- infoObject = self.makeInfoObject()
- infoObject.versionMinor = "0"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # copyright
- infoObject = self.makeInfoObject()
- infoObject.copyright = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # trademark
- infoObject = self.makeInfoObject()
- infoObject.trademark = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # unitsPerEm
- infoObject = self.makeInfoObject()
- infoObject.unitsPerEm = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # descender
- infoObject = self.makeInfoObject()
- infoObject.descender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # xHeight
- infoObject = self.makeInfoObject()
- infoObject.xHeight = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # capHeight
- infoObject = self.makeInfoObject()
- infoObject.capHeight = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # ascender
- infoObject = self.makeInfoObject()
- infoObject.ascender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # italicAngle
- infoObject = self.makeInfoObject()
- infoObject.italicAngle = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
-
- def testGaspWrite(self):
- # not a list
- infoObject = self.makeInfoObject()
- infoObject.openTypeGaspRangeRecords = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # empty list
- infoObject = self.makeInfoObject()
- infoObject.openTypeGaspRangeRecords = []
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- # not a dict
- infoObject = self.makeInfoObject()
- infoObject.openTypeGaspRangeRecords = ["abc"]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # dict not properly formatted
- infoObject = self.makeInfoObject()
- infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM=0xFFFF, notTheRightKey=1)]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeGaspRangeRecords = [dict(notTheRightKey=1, rangeGaspBehavior=[0])]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # not an int for ppem
- infoObject = self.makeInfoObject()
- infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM="abc", rangeGaspBehavior=[0]), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # not a list for behavior
- infoObject = self.makeInfoObject()
- infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM=10, rangeGaspBehavior="abc"), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # invalid behavior value
- infoObject = self.makeInfoObject()
- infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM=10, rangeGaspBehavior=[-1]), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # not sorted
- infoObject = self.makeInfoObject()
- infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]), dict(rangeMaxPPEM=10, rangeGaspBehavior=[0])]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # no 0xFFFF
- infoObject = self.makeInfoObject()
- infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM=10, rangeGaspBehavior=[0]), dict(rangeMaxPPEM=20, rangeGaspBehavior=[0])]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
-
- def testHeadWrite(self):
- # openTypeHeadCreated
- ## not a string
- infoObject = self.makeInfoObject()
- infoObject.openTypeHeadCreated = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## invalid format
- infoObject = self.makeInfoObject()
- infoObject.openTypeHeadCreated = "2000-Jan-01 00:00:00"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeHeadLowestRecPPEM
- infoObject = self.makeInfoObject()
- infoObject.openTypeHeadLowestRecPPEM = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeHeadFlags
- infoObject = self.makeInfoObject()
- infoObject.openTypeHeadFlags = [-1]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
-
- def testHheaWrite(self):
- # openTypeHheaAscender
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaAscender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeHheaDescender
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaDescender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeHheaLineGap
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaLineGap = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeHheaCaretSlopeRise
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaCaretSlopeRise = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeHheaCaretSlopeRun
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaCaretSlopeRun = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeHheaCaretOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeHheaCaretOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
-
- def testNameWrite(self):
- # openTypeNameDesigner
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameDesigner = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameDesignerURL
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameDesignerURL = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameManufacturer
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameManufacturer = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameManufacturerURL
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameManufacturerURL = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameLicense
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameLicense = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameLicenseURL
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameLicenseURL = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameVersion
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameVersion = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameUniqueID
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameUniqueID = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameDescription
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameDescription = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNamePreferredFamilyName
- infoObject = self.makeInfoObject()
- infoObject.openTypeNamePreferredFamilyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNamePreferredSubfamilyName
- infoObject = self.makeInfoObject()
- infoObject.openTypeNamePreferredSubfamilyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameCompatibleFullName
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameCompatibleFullName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameSampleText
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameSampleText = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameWWSFamilyName
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameWWSFamilyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameWWSSubfamilyName
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameWWSSubfamilyName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeNameRecords
- ## not a list
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## not a dict
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = ["abc"]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## invalid dict structure
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [dict(foo="bar")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## incorrect keys
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record.", foo="bar")
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(platformID=1, encodingID=1, languageID=1, string="Name Record.")
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(nameID=1, encodingID=1, languageID=1, string="Name Record.")
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(nameID=1, platformID=1, languageID=1, string="Name Record.")
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(nameID=1, platformID=1, encodingID=1, string="Name Record.")
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(nameID=1, platformID=1, encodingID=1, languageID=1)
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## invalid values
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(nameID="1", platformID=1, encodingID=1, languageID=1, string="Name Record.")
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(nameID=1, platformID="1", encodingID=1, languageID=1, string="Name Record.")
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(nameID=1, platformID=1, encodingID="1", languageID=1, string="Name Record.")
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(nameID=1, platformID=1, encodingID=1, languageID="1", string="Name Record.")
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(nameID=1, platformID=1, encodingID=1, languageID=1, string=1)
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## duplicate
- infoObject = self.makeInfoObject()
- infoObject.openTypeNameRecords = [
- dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record."),
- dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record.")
- ]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
-
- def testOS2Write(self):
- # openTypeOS2WidthClass
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WidthClass = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## out or range
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WidthClass = 15
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2WeightClass
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WeightClass = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## out of range
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WeightClass = -50
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2Selection
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Selection = [-1]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2VendorID
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2VendorID = 1234
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2Panose
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Panose = [0, 1, 2, 3, 4, 5, 6, 7, 8, str(9)]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## too few values
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Panose = [0, 1, 2, 3]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Panose = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2FamilyClass
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2FamilyClass = [0, str(1)]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## too few values
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2FamilyClass = [1]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2FamilyClass = [1, 1, 1]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## out of range
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2FamilyClass = [1, 20]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2UnicodeRanges
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2UnicodeRanges = ["0"]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## out of range
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2UnicodeRanges = [-1]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2CodePageRanges
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2CodePageRanges = ["0"]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## out of range
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2CodePageRanges = [-1]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2TypoAscender
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2TypoAscender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2TypoDescender
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2TypoDescender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2TypoLineGap
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2TypoLineGap = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2WinAscent
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WinAscent = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WinAscent = -1
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2WinDescent
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WinDescent = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2WinDescent = -1
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2Type
- ## not an int
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Type = ["1"]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## out of range
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2Type = [-1]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2SubscriptXSize
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SubscriptXSize = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2SubscriptYSize
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SubscriptYSize = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2SubscriptXOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SubscriptXOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2SubscriptYOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SubscriptYOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2SuperscriptXSize
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SuperscriptXSize = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2SuperscriptYSize
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SuperscriptYSize = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2SuperscriptXOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SuperscriptXOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2SuperscriptYOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2SuperscriptYOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2StrikeoutSize
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2StrikeoutSize = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeOS2StrikeoutPosition
- infoObject = self.makeInfoObject()
- infoObject.openTypeOS2StrikeoutPosition = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
-
- def testVheaWrite(self):
- # openTypeVheaVertTypoAscender
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaVertTypoAscender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeVheaVertTypoDescender
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaVertTypoDescender = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeVheaVertTypoLineGap
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaVertTypoLineGap = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeVheaCaretSlopeRise
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaCaretSlopeRise = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeVheaCaretSlopeRun
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaCaretSlopeRun = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # openTypeVheaCaretOffset
- infoObject = self.makeInfoObject()
- infoObject.openTypeVheaCaretOffset = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
-
- def testFONDWrite(self):
- # macintoshFONDFamilyID
- infoObject = self.makeInfoObject()
- infoObject.macintoshFONDFamilyID = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # macintoshFONDName
- infoObject = self.makeInfoObject()
- infoObject.macintoshFONDName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
-
- def testPostscriptWrite(self):
- # postscriptFontName
- infoObject = self.makeInfoObject()
- infoObject.postscriptFontName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptFullName
- infoObject = self.makeInfoObject()
- infoObject.postscriptFullName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptSlantAngle
- infoObject = self.makeInfoObject()
- infoObject.postscriptSlantAngle = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptUniqueID
- infoObject = self.makeInfoObject()
- infoObject.postscriptUniqueID = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptUnderlineThickness
- infoObject = self.makeInfoObject()
- infoObject.postscriptUnderlineThickness = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptUnderlinePosition
- infoObject = self.makeInfoObject()
- infoObject.postscriptUnderlinePosition = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptIsFixedPitch
- infoObject = self.makeInfoObject()
- infoObject.postscriptIsFixedPitch = 2
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptBlueValues
- ## not a list
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueValues = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## uneven value count
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueValues = [500]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueValues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptOtherBlues
- ## not a list
- infoObject = self.makeInfoObject()
- infoObject.postscriptOtherBlues = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## uneven value count
- infoObject = self.makeInfoObject()
- infoObject.postscriptOtherBlues = [500]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptOtherBlues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptFamilyBlues
- ## not a list
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyBlues = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## uneven value count
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyBlues = [500]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyBlues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptFamilyOtherBlues
- ## not a list
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyOtherBlues = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## uneven value count
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyOtherBlues = [500]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptFamilyOtherBlues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptStemSnapH
- ## not list
- infoObject = self.makeInfoObject()
- infoObject.postscriptStemSnapH = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptStemSnapH = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptStemSnapV
- ## not list
- infoObject = self.makeInfoObject()
- infoObject.postscriptStemSnapV = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## too many values
- infoObject = self.makeInfoObject()
- infoObject.postscriptStemSnapV = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptBlueFuzz
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueFuzz = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptBlueShift
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueShift = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptBlueScale
- infoObject = self.makeInfoObject()
- infoObject.postscriptBlueScale = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptForceBold
- infoObject = self.makeInfoObject()
- infoObject.postscriptForceBold = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptDefaultWidthX
- infoObject = self.makeInfoObject()
- infoObject.postscriptDefaultWidthX = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptNominalWidthX
- infoObject = self.makeInfoObject()
- infoObject.postscriptNominalWidthX = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptWeightName
- infoObject = self.makeInfoObject()
- infoObject.postscriptWeightName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptDefaultCharacter
- infoObject = self.makeInfoObject()
- infoObject.postscriptDefaultCharacter = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # postscriptWindowsCharacterSet
- infoObject = self.makeInfoObject()
- infoObject.postscriptWindowsCharacterSet = -1
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # macintoshFONDFamilyID
- infoObject = self.makeInfoObject()
- infoObject.macintoshFONDFamilyID = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # macintoshFONDName
- infoObject = self.makeInfoObject()
- infoObject.macintoshFONDName = 123
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
-
- def testWOFFWrite(self):
- # woffMajorVersion
- infoObject = self.makeInfoObject()
- infoObject.woffMajorVersion = 1.0
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.woffMajorVersion = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # woffMinorVersion
- infoObject = self.makeInfoObject()
- infoObject.woffMinorVersion = 1.0
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.woffMinorVersion = "abc"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # woffMetadataUniqueID
- ## none
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataUniqueID = None
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- ## not a dict
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataUniqueID = 1
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## unknown key
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataUniqueID = dict(id="foo", notTheRightKey=1)
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## no id
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataUniqueID = dict()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## not a string for id
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataUniqueID = dict(id=1)
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## empty string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataUniqueID = dict(id="")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- # woffMetadataVendor
- ## no name
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = dict(url="foo")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## name not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = dict(name=1, url="foo")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## name an empty string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = dict(name="", url="foo")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- ## no URL
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = dict(name="foo")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- ## url not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = dict(name="foo", url=1)
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## url empty string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = dict(name="foo", url="")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- ## have dir
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir="ltr")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir="rtl")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- ## dir not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir=1)
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## dir not ltr or rtl
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir="utd")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## have class
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = {"name" : "foo", "url" : "bar", "class" : "hello"}
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- ## class not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = {"name" : "foo", "url" : "bar", "class" : 1}
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## class empty string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataVendor = {"name" : "foo", "url" : "bar", "class" : ""}
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- # woffMetadataCredits
- ## no credits attribute
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = {}
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## unknown attribute
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits=[dict(name="foo")], notTheRightKey=1)
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## not a list
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits="abc")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## no elements in credits
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits=[])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## credit not a dict
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits=["abc"])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## unknown key
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", notTheRightKey=1)])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## no name
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits=[dict(url="foo")])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## name not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits=[dict(name=1)])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## url not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", url=1)])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## role not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", role=1)])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## dir not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", dir=1)])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## dir not ltr or rtl
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", dir="utd")])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## class not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCredits = dict(credits=[{"name" : "foo", "class" : 1}])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # woffMetadataDescription
- ## no url
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(text=[dict(text="foo")])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- ## url not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(text=[dict(text="foo")], url=1)
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## no text
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(url="foo")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text not a list
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(text="abc")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item not a dict
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(text=["abc"])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item unknown key
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(text=[dict(text="foo", notTheRightKey=1)])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item missing text
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(text=[dict(language="foo")])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(text=[dict(text=1)])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## url not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(text=[dict(text="foo", url=1)])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## language not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(text=[dict(text="foo", language=1)])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## dir not ltr or rtl
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(text=[dict(text="foo", dir="utd")])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## class not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataDescription = dict(text=[{"text" : "foo", "class" : 1}])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # woffMetadataLicense
- ## no url
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicense = dict(text=[dict(text="foo")])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- ## url not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicense = dict(text=[dict(text="foo")], url=1)
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## id not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicense = dict(text=[dict(text="foo")], id=1)
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## no text
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicense = dict(url="foo")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- ## text not a list
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicense = dict(text="abc")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item not a dict
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicense = dict(text=["abc"])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item unknown key
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicense = dict(text=[dict(text="foo", notTheRightKey=1)])
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item missing text
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataLicense = dict(text=[dict(language="foo")])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataLicense = dict(text=[dict(text=1)])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## url not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataLicense = dict(text=[dict(text="foo", url=1)])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## language not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataLicense = dict(text=[dict(text="foo", language=1)])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## dir not ltr or rtl
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataLicense = dict(text=[dict(text="foo", dir="utd")])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## class not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataLicense = dict(text=[{"text" : "foo", "class" : 1}])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # woffMetadataCopyright
- ## unknown attribute
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataCopyright = dict(text=[dict(text="foo")], notTheRightKey=1)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## no text
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataCopyright = dict()
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text not a list
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataCopyright = dict(text="abc")
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item not a dict
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataCopyright = dict(text=["abc"])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item unknown key
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataCopyright = dict(text=[dict(text="foo", notTheRightKey=1)])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item missing text
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataCopyright = dict(text=[dict(language="foo")])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataCopyright = dict(text=[dict(text=1)])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## url not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataCopyright = dict(text=[dict(text="foo", url=1)])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## language not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataCopyright = dict(text=[dict(text="foo", language=1)])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## dir not ltr or rtl
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataCopyright = dict(text=[dict(text="foo", dir="utd")])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## class not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataCopyright = dict(text=[{"text" : "foo", "class" : 1}])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # woffMetadataTrademark
- ## unknown attribute
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataTrademark = dict(text=[dict(text="foo")], notTheRightKey=1)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## no text
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataTrademark = dict()
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text not a list
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataTrademark = dict(text="abc")
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item not a dict
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataTrademark = dict(text=["abc"])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item unknown key
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataTrademark = dict(text=[dict(text="foo", notTheRightKey=1)])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text item missing text
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataTrademark = dict(text=[dict(language="foo")])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## text not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataTrademark = dict(text=[dict(text=1)])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## url not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataTrademark = dict(text=[dict(text="foo", url=1)])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## language not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataTrademark = dict(text=[dict(text="foo", language=1)])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## dir not ltr or rtl
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataTrademark = dict(text=[dict(text="foo", dir="utd")])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## class not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataTrademark = dict(text=[{"text" : "foo", "class" : 1}])
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # woffMetadataLicensee
- ## no name
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataLicensee = dict()
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## unknown attribute
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataLicensee = dict(name="foo", notTheRightKey=1)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## name not a string
- infoObject = self.makeInfoObject()
- writer = UFOWriter(self.dstDir, formatVersion=3)
- infoObject.woffMetadataLicensee = dict(name=1)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## dir options
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicensee = dict(name="foo", dir="ltr")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicensee = dict(name="foo", dir="rtl")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- ## dir not ltr or rtl
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicensee = dict(name="foo", dir="utd")
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## have class
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicensee = {"name" : "foo", "class" : "hello"}
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeInfo(infoObject)
- self.tearDownUFO()
- ## class not a string
- infoObject = self.makeInfoObject()
- infoObject.woffMetadataLicensee = {"name" : "foo", "class" : 1}
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
-
- def testGuidelinesWrite(self):
- # x
- ## not an int or float
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x="1")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # y
- ## not an int or float
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(y="1")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # angle
- ## < 0
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, y=0, angle=-1)]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## > 360
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, y=0, angle=361)]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # name
- ## not a string
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, name=1)]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # color
- ## not a string
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color=1)]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## not enough commas
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="1 0, 0, 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="1 0 0, 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="1 0 0 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## not enough parts
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color=", 0, 0, 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="1, , 0, 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="1, 0, , 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="1, 0, 0, ")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color=", , , ")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## not a number in all positions
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="r, 1, 1, 1")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="1, g, 1, 1")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="1, 1, b, 1")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="1, 1, 1, a")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## too many parts
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="1, 0, 0, 0, 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## < 0 in each position
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="-1, 0, 0, 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="0, -1, 0, 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="0, 0, -1, 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="0, 0, 0, -1")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## > 1 in each position
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="2, 0, 0, 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="0, 2, 0, 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="0, 0, 2, 0")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, color="0, 0, 0, 2")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- # identifier
- ## duplicate
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, identifier="guide1"), dict(y=0, identifier="guide1")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## below min
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, identifier="\0x1F")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
- ## above max
- infoObject = self.makeInfoObject()
- infoObject.guidelines = [dict(x=0, identifier="\0x7F")]
- writer = UFOWriter(self.dstDir, formatVersion=3)
- self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
- self.tearDownUFO()
+ def setUp(self):
+ self.tempDir = tempfile.mktemp()
+ os.mkdir(self.tempDir)
+ self.dstDir = os.path.join(self.tempDir, "test.ufo")
+
+ def tearDown(self):
+ shutil.rmtree(self.tempDir)
+
+ def tearDownUFO(self):
+ if os.path.exists(self.dstDir):
+ shutil.rmtree(self.dstDir)
+
+ def makeInfoObject(self):
+ infoObject = TestInfoObject()
+ for attr, value in list(fontInfoVersion3.items()):
+ setattr(infoObject, attr, value)
+ return infoObject
+
+ def readPlist(self):
+ path = os.path.join(self.dstDir, "fontinfo.plist")
+ with open(path, "rb") as f:
+ plist = plistlib.load(f)
+ return plist
+
+ def testWrite(self):
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ writtenData = self.readPlist()
+ for attr, originalValue in list(fontInfoVersion3.items()):
+ newValue = writtenData[attr]
+ self.assertEqual(newValue, originalValue)
+ self.tearDownUFO()
+
+ def testGenericWrite(self):
+ # familyName
+ infoObject = self.makeInfoObject()
+ infoObject.familyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # styleName
+ infoObject = self.makeInfoObject()
+ infoObject.styleName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # styleMapFamilyName
+ infoObject = self.makeInfoObject()
+ infoObject.styleMapFamilyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # styleMapStyleName
+ ## not a string
+ infoObject = self.makeInfoObject()
+ infoObject.styleMapStyleName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## out of range
+ infoObject = self.makeInfoObject()
+ infoObject.styleMapStyleName = "REGULAR"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # versionMajor
+ infoObject = self.makeInfoObject()
+ infoObject.versionMajor = "1"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # versionMinor
+ infoObject = self.makeInfoObject()
+ infoObject.versionMinor = "0"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # copyright
+ infoObject = self.makeInfoObject()
+ infoObject.copyright = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # trademark
+ infoObject = self.makeInfoObject()
+ infoObject.trademark = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # unitsPerEm
+ infoObject = self.makeInfoObject()
+ infoObject.unitsPerEm = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # descender
+ infoObject = self.makeInfoObject()
+ infoObject.descender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # xHeight
+ infoObject = self.makeInfoObject()
+ infoObject.xHeight = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # capHeight
+ infoObject = self.makeInfoObject()
+ infoObject.capHeight = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # ascender
+ infoObject = self.makeInfoObject()
+ infoObject.ascender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # italicAngle
+ infoObject = self.makeInfoObject()
+ infoObject.italicAngle = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+
+ def testGaspWrite(self):
+ # not a list
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeGaspRangeRecords = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # empty list
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeGaspRangeRecords = []
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ # not a dict
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeGaspRangeRecords = ["abc"]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # dict not properly formatted
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeGaspRangeRecords = [
+ dict(rangeMaxPPEM=0xFFFF, notTheRightKey=1)
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeGaspRangeRecords = [
+ dict(notTheRightKey=1, rangeGaspBehavior=[0])
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # not an int for ppem
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeGaspRangeRecords = [
+ dict(rangeMaxPPEM="abc", rangeGaspBehavior=[0]),
+ dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]),
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # not a list for behavior
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeGaspRangeRecords = [
+ dict(rangeMaxPPEM=10, rangeGaspBehavior="abc"),
+ dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]),
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # invalid behavior value
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeGaspRangeRecords = [
+ dict(rangeMaxPPEM=10, rangeGaspBehavior=[-1]),
+ dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]),
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # not sorted
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeGaspRangeRecords = [
+ dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]),
+ dict(rangeMaxPPEM=10, rangeGaspBehavior=[0]),
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # no 0xFFFF
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeGaspRangeRecords = [
+ dict(rangeMaxPPEM=10, rangeGaspBehavior=[0]),
+ dict(rangeMaxPPEM=20, rangeGaspBehavior=[0]),
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+
+ def testHeadWrite(self):
+ # openTypeHeadCreated
+ ## not a string
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHeadCreated = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## invalid format
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHeadCreated = "2000-Jan-01 00:00:00"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeHeadLowestRecPPEM
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHeadLowestRecPPEM = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeHeadFlags
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHeadFlags = [-1]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+
+ def testHheaWrite(self):
+ # openTypeHheaAscender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaAscender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeHheaDescender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaDescender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeHheaLineGap
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaLineGap = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeHheaCaretSlopeRise
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaCaretSlopeRise = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeHheaCaretSlopeRun
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaCaretSlopeRun = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeHheaCaretOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeHheaCaretOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+
+ def testNameWrite(self):
+ # openTypeNameDesigner
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameDesigner = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameDesignerURL
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameDesignerURL = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameManufacturer
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameManufacturer = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameManufacturerURL
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameManufacturerURL = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameLicense
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameLicense = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameLicenseURL
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameLicenseURL = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameVersion
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameVersion = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameUniqueID
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameUniqueID = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameDescription
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameDescription = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNamePreferredFamilyName
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNamePreferredFamilyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNamePreferredSubfamilyName
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNamePreferredSubfamilyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameCompatibleFullName
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameCompatibleFullName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameSampleText
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameSampleText = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameWWSFamilyName
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameWWSFamilyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameWWSSubfamilyName
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameWWSSubfamilyName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeNameRecords
+ ## not a list
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## not a dict
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = ["abc"]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## invalid dict structure
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [dict(foo="bar")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## incorrect keys
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(
+ nameID=1,
+ platformID=1,
+ encodingID=1,
+ languageID=1,
+ string="Name Record.",
+ foo="bar",
+ )
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(platformID=1, encodingID=1, languageID=1, string="Name Record.")
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(nameID=1, encodingID=1, languageID=1, string="Name Record.")
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(nameID=1, platformID=1, languageID=1, string="Name Record.")
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(nameID=1, platformID=1, encodingID=1, string="Name Record.")
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(nameID=1, platformID=1, encodingID=1, languageID=1)
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## invalid values
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(
+ nameID="1",
+ platformID=1,
+ encodingID=1,
+ languageID=1,
+ string="Name Record.",
+ )
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(
+ nameID=1,
+ platformID="1",
+ encodingID=1,
+ languageID=1,
+ string="Name Record.",
+ )
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(
+ nameID=1,
+ platformID=1,
+ encodingID="1",
+ languageID=1,
+ string="Name Record.",
+ )
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(
+ nameID=1,
+ platformID=1,
+ encodingID=1,
+ languageID="1",
+ string="Name Record.",
+ )
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(nameID=1, platformID=1, encodingID=1, languageID=1, string=1)
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## duplicate
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeNameRecords = [
+ dict(
+ nameID=1,
+ platformID=1,
+ encodingID=1,
+ languageID=1,
+ string="Name Record.",
+ ),
+ dict(
+ nameID=1,
+ platformID=1,
+ encodingID=1,
+ languageID=1,
+ string="Name Record.",
+ ),
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+
+ def testOS2Write(self):
+ # openTypeOS2WidthClass
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WidthClass = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## out or range
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WidthClass = 15
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2WeightClass
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WeightClass = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## out of range
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WeightClass = -50
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2Selection
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Selection = [-1]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2VendorID
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2VendorID = 1234
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2Panose
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Panose = [0, 1, 2, 3, 4, 5, 6, 7, 8, str(9)]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## too few values
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Panose = [0, 1, 2, 3]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Panose = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2FamilyClass
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2FamilyClass = [0, str(1)]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## too few values
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2FamilyClass = [1]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2FamilyClass = [1, 1, 1]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## out of range
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2FamilyClass = [1, 20]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2UnicodeRanges
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2UnicodeRanges = ["0"]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## out of range
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2UnicodeRanges = [-1]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2CodePageRanges
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2CodePageRanges = ["0"]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## out of range
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2CodePageRanges = [-1]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2TypoAscender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2TypoAscender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2TypoDescender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2TypoDescender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2TypoLineGap
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2TypoLineGap = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2WinAscent
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WinAscent = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WinAscent = -1
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2WinDescent
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WinDescent = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2WinDescent = -1
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2Type
+ ## not an int
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Type = ["1"]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## out of range
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2Type = [-1]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2SubscriptXSize
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SubscriptXSize = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2SubscriptYSize
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SubscriptYSize = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2SubscriptXOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SubscriptXOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2SubscriptYOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SubscriptYOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2SuperscriptXSize
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SuperscriptXSize = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2SuperscriptYSize
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SuperscriptYSize = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2SuperscriptXOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SuperscriptXOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2SuperscriptYOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2SuperscriptYOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2StrikeoutSize
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2StrikeoutSize = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeOS2StrikeoutPosition
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeOS2StrikeoutPosition = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+
+ def testVheaWrite(self):
+ # openTypeVheaVertTypoAscender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaVertTypoAscender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeVheaVertTypoDescender
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaVertTypoDescender = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeVheaVertTypoLineGap
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaVertTypoLineGap = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeVheaCaretSlopeRise
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaCaretSlopeRise = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeVheaCaretSlopeRun
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaCaretSlopeRun = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # openTypeVheaCaretOffset
+ infoObject = self.makeInfoObject()
+ infoObject.openTypeVheaCaretOffset = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+
+ def testFONDWrite(self):
+ # macintoshFONDFamilyID
+ infoObject = self.makeInfoObject()
+ infoObject.macintoshFONDFamilyID = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # macintoshFONDName
+ infoObject = self.makeInfoObject()
+ infoObject.macintoshFONDName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+
+ def testPostscriptWrite(self):
+ # postscriptFontName
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFontName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptFullName
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFullName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptSlantAngle
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptSlantAngle = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptUniqueID
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptUniqueID = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptUnderlineThickness
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptUnderlineThickness = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptUnderlinePosition
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptUnderlinePosition = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptIsFixedPitch
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptIsFixedPitch = 2
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptBlueValues
+ ## not a list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueValues = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## uneven value count
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueValues = [500]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueValues = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptOtherBlues
+ ## not a list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptOtherBlues = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## uneven value count
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptOtherBlues = [500]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptOtherBlues = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptFamilyBlues
+ ## not a list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyBlues = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## uneven value count
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyBlues = [500]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyBlues = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptFamilyOtherBlues
+ ## not a list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyOtherBlues = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## uneven value count
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyOtherBlues = [500]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptFamilyOtherBlues = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptStemSnapH
+ ## not list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptStemSnapH = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptStemSnapH = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptStemSnapV
+ ## not list
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptStemSnapV = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## too many values
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptStemSnapV = [
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 110,
+ 120,
+ 130,
+ 140,
+ 150,
+ 160,
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptBlueFuzz
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueFuzz = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptBlueShift
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueShift = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptBlueScale
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptBlueScale = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptForceBold
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptForceBold = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptDefaultWidthX
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptDefaultWidthX = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptNominalWidthX
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptNominalWidthX = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptWeightName
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptWeightName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptDefaultCharacter
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptDefaultCharacter = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # postscriptWindowsCharacterSet
+ infoObject = self.makeInfoObject()
+ infoObject.postscriptWindowsCharacterSet = -1
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # macintoshFONDFamilyID
+ infoObject = self.makeInfoObject()
+ infoObject.macintoshFONDFamilyID = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # macintoshFONDName
+ infoObject = self.makeInfoObject()
+ infoObject.macintoshFONDName = 123
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+
+ def testWOFFWrite(self):
+ # woffMajorVersion
+ infoObject = self.makeInfoObject()
+ infoObject.woffMajorVersion = 1.0
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.woffMajorVersion = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # woffMinorVersion
+ infoObject = self.makeInfoObject()
+ infoObject.woffMinorVersion = 1.0
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.woffMinorVersion = "abc"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # woffMetadataUniqueID
+ ## none
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataUniqueID = None
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ ## not a dict
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataUniqueID = 1
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## unknown key
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataUniqueID = dict(id="foo", notTheRightKey=1)
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## no id
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataUniqueID = dict()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## not a string for id
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataUniqueID = dict(id=1)
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## empty string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataUniqueID = dict(id="")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ # woffMetadataVendor
+ ## no name
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = dict(url="foo")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## name not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = dict(name=1, url="foo")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## name an empty string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = dict(name="", url="foo")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ ## no URL
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = dict(name="foo")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ ## url not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = dict(name="foo", url=1)
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## url empty string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = dict(name="foo", url="")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ ## have dir
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir="ltr")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir="rtl")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ ## dir not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir=1)
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## dir not ltr or rtl
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir="utd")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## have class
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = {"name": "foo", "url": "bar", "class": "hello"}
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ ## class not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = {"name": "foo", "url": "bar", "class": 1}
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## class empty string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataVendor = {"name": "foo", "url": "bar", "class": ""}
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ # woffMetadataCredits
+ ## no credits attribute
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = {}
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## unknown attribute
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(
+ credits=[dict(name="foo")], notTheRightKey=1
+ )
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## not a list
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(credits="abc")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## no elements in credits
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(credits=[])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## credit not a dict
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(credits=["abc"])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## unknown key
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(
+ credits=[dict(name="foo", notTheRightKey=1)]
+ )
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## no name
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(credits=[dict(url="foo")])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## name not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(credits=[dict(name=1)])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## url not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", url=1)])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## role not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", role=1)])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## dir not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", dir=1)])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## dir not ltr or rtl
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", dir="utd")])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## class not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCredits = dict(credits=[{"name": "foo", "class": 1}])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # woffMetadataDescription
+ ## no url
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(text=[dict(text="foo")])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ ## url not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(text=[dict(text="foo")], url=1)
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## no text
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(url="foo")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text not a list
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(text="abc")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item not a dict
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(text=["abc"])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item unknown key
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(
+ text=[dict(text="foo", notTheRightKey=1)]
+ )
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item missing text
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(text=[dict(language="foo")])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(text=[dict(text=1)])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## url not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(text=[dict(text="foo", url=1)])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## language not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(text=[dict(text="foo", language=1)])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## dir not ltr or rtl
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(text=[dict(text="foo", dir="utd")])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## class not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataDescription = dict(text=[{"text": "foo", "class": 1}])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # woffMetadataLicense
+ ## no url
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicense = dict(text=[dict(text="foo")])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ ## url not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicense = dict(text=[dict(text="foo")], url=1)
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## id not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicense = dict(text=[dict(text="foo")], id=1)
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## no text
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicense = dict(url="foo")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ ## text not a list
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicense = dict(text="abc")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item not a dict
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicense = dict(text=["abc"])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item unknown key
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicense = dict(text=[dict(text="foo", notTheRightKey=1)])
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item missing text
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataLicense = dict(text=[dict(language="foo")])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataLicense = dict(text=[dict(text=1)])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## url not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataLicense = dict(text=[dict(text="foo", url=1)])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## language not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataLicense = dict(text=[dict(text="foo", language=1)])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## dir not ltr or rtl
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataLicense = dict(text=[dict(text="foo", dir="utd")])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## class not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataLicense = dict(text=[{"text": "foo", "class": 1}])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # woffMetadataCopyright
+ ## unknown attribute
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataCopyright = dict(
+ text=[dict(text="foo")], notTheRightKey=1
+ )
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## no text
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataCopyright = dict()
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text not a list
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataCopyright = dict(text="abc")
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item not a dict
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataCopyright = dict(text=["abc"])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item unknown key
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataCopyright = dict(
+ text=[dict(text="foo", notTheRightKey=1)]
+ )
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item missing text
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataCopyright = dict(text=[dict(language="foo")])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataCopyright = dict(text=[dict(text=1)])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## url not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataCopyright = dict(text=[dict(text="foo", url=1)])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## language not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataCopyright = dict(text=[dict(text="foo", language=1)])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## dir not ltr or rtl
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataCopyright = dict(text=[dict(text="foo", dir="utd")])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## class not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataCopyright = dict(text=[{"text": "foo", "class": 1}])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # woffMetadataTrademark
+ ## unknown attribute
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataTrademark = dict(
+ text=[dict(text="foo")], notTheRightKey=1
+ )
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## no text
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataTrademark = dict()
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text not a list
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataTrademark = dict(text="abc")
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item not a dict
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataTrademark = dict(text=["abc"])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item unknown key
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataTrademark = dict(
+ text=[dict(text="foo", notTheRightKey=1)]
+ )
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text item missing text
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataTrademark = dict(text=[dict(language="foo")])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## text not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataTrademark = dict(text=[dict(text=1)])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## url not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataTrademark = dict(text=[dict(text="foo", url=1)])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## language not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataTrademark = dict(text=[dict(text="foo", language=1)])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## dir not ltr or rtl
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataTrademark = dict(text=[dict(text="foo", dir="utd")])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## class not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataTrademark = dict(text=[{"text": "foo", "class": 1}])
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # woffMetadataLicensee
+ ## no name
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataLicensee = dict()
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## unknown attribute
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataLicensee = dict(name="foo", notTheRightKey=1)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## name not a string
+ infoObject = self.makeInfoObject()
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ infoObject.woffMetadataLicensee = dict(name=1)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## dir options
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicensee = dict(name="foo", dir="ltr")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicensee = dict(name="foo", dir="rtl")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ ## dir not ltr or rtl
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicensee = dict(name="foo", dir="utd")
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## have class
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicensee = {"name": "foo", "class": "hello"}
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeInfo(infoObject)
+ self.tearDownUFO()
+ ## class not a string
+ infoObject = self.makeInfoObject()
+ infoObject.woffMetadataLicensee = {"name": "foo", "class": 1}
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+
+ def testGuidelinesWrite(self):
+ # x
+ ## not an int or float
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x="1")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # y
+ ## not an int or float
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(y="1")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # angle
+ ## < 0
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, y=0, angle=-1)]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## > 360
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, y=0, angle=361)]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # name
+ ## not a string
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, name=1)]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # color
+ ## not a string
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color=1)]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## not enough commas
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="1 0, 0, 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="1 0 0, 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="1 0 0 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## not enough parts
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color=", 0, 0, 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="1, , 0, 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="1, 0, , 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="1, 0, 0, ")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color=", , , ")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## not a number in all positions
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="r, 1, 1, 1")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="1, g, 1, 1")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="1, 1, b, 1")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="1, 1, 1, a")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## too many parts
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="1, 0, 0, 0, 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## < 0 in each position
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="-1, 0, 0, 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="0, -1, 0, 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="0, 0, -1, 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="0, 0, 0, -1")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## > 1 in each position
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="2, 0, 0, 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="0, 2, 0, 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="0, 0, 2, 0")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, color="0, 0, 0, 2")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ # identifier
+ ## duplicate
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [
+ dict(x=0, identifier="guide1"),
+ dict(y=0, identifier="guide1"),
+ ]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## below min
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, identifier="\0x1F")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
+ ## above max
+ infoObject = self.makeInfoObject()
+ infoObject.guidelines = [dict(x=0, identifier="\0x7F")]
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
+ self.tearDownUFO()
# ------
# layers
# ------
-class UFO3ReadLayersTestCase(unittest.TestCase):
- def setUp(self):
- self.tempDir = tempfile.mktemp()
- os.mkdir(self.tempDir)
- self.ufoPath = os.path.join(self.tempDir, "test.ufo")
-
- def tearDown(self):
- shutil.rmtree(self.tempDir)
-
- def makeUFO(self, metaInfo=None, layerContents=None):
- self.clearUFO()
- if not os.path.exists(self.ufoPath):
- os.mkdir(self.ufoPath)
- # metainfo.plist
- if metaInfo is None:
- metaInfo = dict(creator="test", formatVersion=3)
- path = os.path.join(self.ufoPath, "metainfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(metaInfo, f)
- # layers
- if layerContents is None:
- layerContents = [
- ("public.default", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2"),
- ]
- if layerContents:
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- else:
- layerContents = [("", "glyphs")]
- for name, directory in layerContents:
- glyphsPath = os.path.join(self.ufoPath, directory)
- os.mkdir(glyphsPath)
- contents = dict(a="a.glif")
- path = os.path.join(glyphsPath, "contents.plist")
- with open(path, "wb") as f:
- plistlib.dump(contents, f)
- path = os.path.join(glyphsPath, "a.glif")
- with open(path, "w") as f:
- f.write(" ")
-
- def clearUFO(self):
- if os.path.exists(self.ufoPath):
- shutil.rmtree(self.ufoPath)
-
- # valid
-
- def testValidRead(self):
- # UFO 1
- self.makeUFO(
- metaInfo=dict(creator="test", formatVersion=1),
- layerContents=dict()
- )
- reader = UFOReader(self.ufoPath, validate=True)
- reader.getGlyphSet()
- # UFO 2
- self.makeUFO(
- metaInfo=dict(creator="test", formatVersion=2),
- layerContents=dict()
- )
- reader = UFOReader(self.ufoPath, validate=True)
- reader.getGlyphSet()
- # UFO 3
- self.makeUFO()
- reader = UFOReader(self.ufoPath, validate=True)
- reader.getGlyphSet()
-
- # missing layer contents
-
- def testMissingLayerContents(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- reader = UFOReader(self.ufoPath, validate=True)
- self.assertRaises(UFOLibError, reader.getGlyphSet)
-
- # layer contents invalid format
-
- def testInvalidLayerContentsFormat(self):
- # bogus
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- with open(path, "w") as f:
- f.write("test")
- reader = UFOReader(self.ufoPath, validate=True)
- self.assertRaises(UFOLibError, reader.getGlyphSet)
- # dict
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = {
- "public.default" : "glyphs",
- "layer 1" : "glyphs.layer 1",
- "layer 2" : "glyphs.layer 2",
- }
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- self.assertRaises(UFOLibError, reader.getGlyphSet)
-
- # layer contents invalid name format
-
- def testInvalidLayerContentsNameFormat(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- (1, "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- self.assertRaises(UFOLibError, reader.getGlyphSet)
-
- # layer contents invalid directory format
-
- def testInvalidLayerContentsDirectoryFormat(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("public.foregound", "glyphs"),
- ("layer 1", 1),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- self.assertRaises(UFOLibError, reader.getGlyphSet)
-
- # directory listed in contents not on disk
-
- def testLayerContentsHasMissingDirectory(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("public.foregound", "glyphs"),
- ("layer 1", "glyphs.doesnotexist"),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- self.assertRaises(UFOLibError, reader.getGlyphSet)
-
- # # directory on disk not listed in contents
- # XXX should this raise an error?
- #
- # def testLayerContentsHasMissingDirectory(self):
- # self.makeUFO()
- # path = os.path.join(self.ufoPath, "layercontents.plist")
- # os.remove(path)
- # layerContents = [
- # ("public.foregound", "glyphs"),
- # ("layer 1", "glyphs.layer 2")
- # ]
- # with open(path, "wb") as f:
- # plistlib.dump(layerContents, f)
- # reader = UFOReader(self.ufoPath, validate=True)
- # with self.assertRaises(UFOLibError):
- # reader.getGlyphSet()
-
- # no default layer on disk
-
- def testMissingDefaultLayer(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- self.assertRaises(UFOLibError, reader.getGlyphSet)
-
- # duplicate layer name
-
- def testDuplicateLayerName(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("public.foregound", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 1", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- self.assertRaises(UFOLibError, reader.getGlyphSet)
-
- # directory referenced by two layer names
-
- def testDuplicateLayerDirectory(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("public.foregound", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 1")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- self.assertRaises(UFOLibError, reader.getGlyphSet)
-
- # default without a name
-
- def testDefaultLayerNoName(self):
- # get the glyph set
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("public.foregound", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- reader.getGlyphSet()
-
- # default with a name
-
- def testDefaultLayerName(self):
- # get the name
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("custom name", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2")
- ]
- expected = layerContents[0][0]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- result = reader.getDefaultLayerName()
- self.assertEqual(expected, result)
- # get the glyph set
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("custom name", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- reader.getGlyphSet(expected)
-
- # layer order
-
- def testLayerOrder(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("public.foregound", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2")
- ]
- expected = [name for (name, directory) in layerContents]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- result = reader.getLayerNames()
- self.assertEqual(expected, result)
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("layer 1", "glyphs.layer 1"),
- ("public.foregound", "glyphs"),
- ("layer 2", "glyphs.layer 2")
- ]
- expected = [name for (name, directory) in layerContents]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- result = reader.getLayerNames()
- self.assertEqual(expected, result)
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("layer 2", "glyphs.layer 2"),
- ("layer 1", "glyphs.layer 1"),
- ("public.foregound", "glyphs")
- ]
- expected = [name for (name, directory) in layerContents]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- reader = UFOReader(self.ufoPath, validate=True)
- result = reader.getLayerNames()
- self.assertEqual(expected, result)
+class UFO3ReadLayersTestCase(unittest.TestCase):
+ def setUp(self):
+ self.tempDir = tempfile.mktemp()
+ os.mkdir(self.tempDir)
+ self.ufoPath = os.path.join(self.tempDir, "test.ufo")
+
+ def tearDown(self):
+ shutil.rmtree(self.tempDir)
+
+ def makeUFO(self, metaInfo=None, layerContents=None):
+ self.clearUFO()
+ if not os.path.exists(self.ufoPath):
+ os.mkdir(self.ufoPath)
+ # metainfo.plist
+ if metaInfo is None:
+ metaInfo = dict(creator="test", formatVersion=3)
+ path = os.path.join(self.ufoPath, "metainfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(metaInfo, f)
+ # layers
+ if layerContents is None:
+ layerContents = [
+ ("public.default", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ if layerContents:
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ else:
+ layerContents = [("", "glyphs")]
+ for name, directory in layerContents:
+ glyphsPath = os.path.join(self.ufoPath, directory)
+ os.mkdir(glyphsPath)
+ contents = dict(a="a.glif")
+ path = os.path.join(glyphsPath, "contents.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(contents, f)
+ path = os.path.join(glyphsPath, "a.glif")
+ with open(path, "w") as f:
+ f.write(" ")
+
+ def clearUFO(self):
+ if os.path.exists(self.ufoPath):
+ shutil.rmtree(self.ufoPath)
+
+ # valid
+
+ def testValidRead(self):
+ # UFO 1
+ self.makeUFO(
+ metaInfo=dict(creator="test", formatVersion=1), layerContents=dict()
+ )
+ reader = UFOReader(self.ufoPath, validate=True)
+ reader.getGlyphSet()
+ # UFO 2
+ self.makeUFO(
+ metaInfo=dict(creator="test", formatVersion=2), layerContents=dict()
+ )
+ reader = UFOReader(self.ufoPath, validate=True)
+ reader.getGlyphSet()
+ # UFO 3
+ self.makeUFO()
+ reader = UFOReader(self.ufoPath, validate=True)
+ reader.getGlyphSet()
+
+ # missing layer contents
+
+ def testMissingLayerContents(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ reader = UFOReader(self.ufoPath, validate=True)
+ self.assertRaises(UFOLibError, reader.getGlyphSet)
+
+ # layer contents invalid format
+
+ def testInvalidLayerContentsFormat(self):
+ # bogus
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ with open(path, "w") as f:
+ f.write("test")
+ reader = UFOReader(self.ufoPath, validate=True)
+ self.assertRaises(UFOLibError, reader.getGlyphSet)
+ # dict
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = {
+ "public.default": "glyphs",
+ "layer 1": "glyphs.layer 1",
+ "layer 2": "glyphs.layer 2",
+ }
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ self.assertRaises(UFOLibError, reader.getGlyphSet)
+
+ # layer contents invalid name format
+
+ def testInvalidLayerContentsNameFormat(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ (1, "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ self.assertRaises(UFOLibError, reader.getGlyphSet)
+
+ # layer contents invalid directory format
+
+ def testInvalidLayerContentsDirectoryFormat(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("public.foregound", "glyphs"),
+ ("layer 1", 1),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ self.assertRaises(UFOLibError, reader.getGlyphSet)
+
+ # directory listed in contents not on disk
+
+ def testLayerContentsHasMissingDirectory(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("public.foregound", "glyphs"),
+ ("layer 1", "glyphs.doesnotexist"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ self.assertRaises(UFOLibError, reader.getGlyphSet)
+
+ # # directory on disk not listed in contents
+ # XXX should this raise an error?
+ #
+ # def testLayerContentsHasMissingDirectory(self):
+ # self.makeUFO()
+ # path = os.path.join(self.ufoPath, "layercontents.plist")
+ # os.remove(path)
+ # layerContents = [
+ # ("public.foregound", "glyphs"),
+ # ("layer 1", "glyphs.layer 2")
+ # ]
+ # with open(path, "wb") as f:
+ # plistlib.dump(layerContents, f)
+ # reader = UFOReader(self.ufoPath, validate=True)
+ # with self.assertRaises(UFOLibError):
+ # reader.getGlyphSet()
+
+ # no default layer on disk
+
+ def testMissingDefaultLayer(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [("layer 1", "glyphs.layer 1"), ("layer 2", "glyphs.layer 2")]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ self.assertRaises(UFOLibError, reader.getGlyphSet)
+
+ # duplicate layer name
+
+ def testDuplicateLayerName(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("public.foregound", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 1", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ self.assertRaises(UFOLibError, reader.getGlyphSet)
+
+ # directory referenced by two layer names
+
+ def testDuplicateLayerDirectory(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("public.foregound", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 1"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ self.assertRaises(UFOLibError, reader.getGlyphSet)
+
+ # default without a name
+
+ def testDefaultLayerNoName(self):
+ # get the glyph set
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("public.foregound", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ reader.getGlyphSet()
+
+ # default with a name
+
+ def testDefaultLayerName(self):
+ # get the name
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("custom name", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ expected = layerContents[0][0]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ result = reader.getDefaultLayerName()
+ self.assertEqual(expected, result)
+ # get the glyph set
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("custom name", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ reader.getGlyphSet(expected)
+
+ # layer order
+
+ def testLayerOrder(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("public.foregound", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ expected = [name for (name, directory) in layerContents]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ result = reader.getLayerNames()
+ self.assertEqual(expected, result)
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("layer 1", "glyphs.layer 1"),
+ ("public.foregound", "glyphs"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ expected = [name for (name, directory) in layerContents]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ result = reader.getLayerNames()
+ self.assertEqual(expected, result)
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("layer 2", "glyphs.layer 2"),
+ ("layer 1", "glyphs.layer 1"),
+ ("public.foregound", "glyphs"),
+ ]
+ expected = [name for (name, directory) in layerContents]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ reader = UFOReader(self.ufoPath, validate=True)
+ result = reader.getLayerNames()
+ self.assertEqual(expected, result)
class UFO3WriteLayersTestCase(unittest.TestCase):
+ def setUp(self):
+ self.tempDir = tempfile.mktemp()
+ os.mkdir(self.tempDir)
+ self.ufoPath = os.path.join(self.tempDir, "test.ufo")
+
+ def tearDown(self):
+ shutil.rmtree(self.tempDir)
+
+ def makeUFO(self, metaInfo=None, layerContents=None):
+ self.clearUFO()
+ if not os.path.exists(self.ufoPath):
+ os.mkdir(self.ufoPath)
+ # metainfo.plist
+ if metaInfo is None:
+ metaInfo = dict(creator="test", formatVersion=3)
+ path = os.path.join(self.ufoPath, "metainfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(metaInfo, f)
+ # layers
+ if layerContents is None:
+ layerContents = [
+ ("public.default", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ if layerContents:
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ else:
+ layerContents = [("", "glyphs")]
+ for name, directory in layerContents:
+ glyphsPath = os.path.join(self.ufoPath, directory)
+ os.mkdir(glyphsPath)
+ contents = dict(a="a.glif")
+ path = os.path.join(glyphsPath, "contents.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(contents, f)
+ path = os.path.join(glyphsPath, "a.glif")
+ with open(path, "w") as f:
+ f.write(" ")
+
+ def clearUFO(self):
+ if os.path.exists(self.ufoPath):
+ shutil.rmtree(self.ufoPath)
+
+ # __init__: missing layer contents
+
+ def testMissingLayerContents(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
+
+ # __init__: layer contents invalid format
+
+ def testInvalidLayerContentsFormat(self):
+ # bogus
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ with open(path, "w") as f:
+ f.write("test")
+ self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
+ # dict
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = {
+ "public.default": "glyphs",
+ "layer 1": "glyphs.layer 1",
+ "layer 2": "glyphs.layer 2",
+ }
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
+
+ # __init__: layer contents invalid name format
+
+ def testInvalidLayerContentsNameFormat(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ (1, "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
+
+ # __init__: layer contents invalid directory format
+
+ def testInvalidLayerContentsDirectoryFormat(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("public.foregound", "glyphs"),
+ ("layer 1", 1),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
+
+ # __init__: directory listed in contents not on disk
+
+ def testLayerContentsHasMissingDirectory(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("public.foregound", "glyphs"),
+ ("layer 1", "glyphs.doesnotexist"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
+
+ # __init__: no default layer on disk
+
+ def testMissingDefaultLayer(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [("layer 1", "glyphs.layer 1"), ("layer 2", "glyphs.layer 2")]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
+
+ # __init__: duplicate layer name
+
+ def testDuplicateLayerName(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("public.foregound", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 1", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
+
+ # __init__: directory referenced by two layer names
+
+ def testDuplicateLayerDirectory(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("public.foregound", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 1"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
+
+ # __init__: default without a name
+
+ def testDefaultLayerNoName(self):
+ # get the glyph set
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("public.foregound", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ writer = UFOWriter(self.ufoPath)
+
+ # __init__: default with a name
+
+ def testDefaultLayerName(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ os.remove(path)
+ layerContents = [
+ ("custom name", "glyphs"),
+ ("layer 1", "glyphs.layer 1"),
+ ("layer 2", "glyphs.layer 2"),
+ ]
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ writer = UFOWriter(self.ufoPath)
+
+ # __init__: up convert 1 > 3
+
+ def testUpConvert1To3(self):
+ self.makeUFO(
+ metaInfo=dict(creator="test", formatVersion=1), layerContents=dict()
+ )
+ writer = UFOWriter(self.ufoPath)
+ writer.writeLayerContents(["public.default"])
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "rb") as f:
+ result = plistlib.load(f)
+ expected = [["public.default", "glyphs"]]
+ self.assertEqual(expected, result)
+
+ # __init__: up convert 2 > 3
+
+ def testUpConvert2To3(self):
+ self.makeUFO(
+ metaInfo=dict(creator="test", formatVersion=2), layerContents=dict()
+ )
+ writer = UFOWriter(self.ufoPath)
+ writer.writeLayerContents(["public.default"])
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "rb") as f:
+ result = plistlib.load(f)
+ expected = [["public.default", "glyphs"]]
+ self.assertEqual(expected, result)
+
+ # __init__: down convert 3 > 1
+
+ def testDownConvert3To1(self):
+ self.makeUFO()
+ self.assertRaises(UFOLibError, UFOWriter, self.ufoPath, formatVersion=1)
+
+ # __init__: down convert 3 > 2
+
+ def testDownConvert3To2(self):
+ self.makeUFO()
+ self.assertRaises(UFOLibError, UFOWriter, self.ufoPath, formatVersion=2)
+
+ # get glyph sets
+
+ def testGetGlyphSets(self):
+ self.makeUFO()
+ # hack contents.plist
+ path = os.path.join(self.ufoPath, "glyphs.layer 1", "contents.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(dict(b="a.glif"), f)
+ path = os.path.join(self.ufoPath, "glyphs.layer 2", "contents.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(dict(c="a.glif"), f)
+ # now test
+ writer = UFOWriter(self.ufoPath)
+ # default
+ expected = ["a"]
+ result = list(writer.getGlyphSet().keys())
+ self.assertEqual(expected, result)
+ # layer 1
+ expected = ["b"]
+ result = list(writer.getGlyphSet("layer 1", defaultLayer=False).keys())
+ self.assertEqual(expected, result)
+ # layer 2
+ expected = ["c"]
+ result = list(writer.getGlyphSet("layer 2", defaultLayer=False).keys())
+ self.assertEqual(expected, result)
+
+ def testGetGlyphSetNoContents(self):
+ self.makeUFO()
+ os.remove(os.path.join(self.ufoPath, "glyphs.layer 1", "contents.plist"))
+
+ reader = UFOReader(self.ufoPath, validate=True)
+ with self.assertRaises(GlifLibError):
+ reader.getGlyphSet("layer 1")
+
+ writer = UFOWriter(self.ufoPath, validate=True)
+ with self.assertRaises(GlifLibError):
+ writer.getGlyphSet("layer 1", defaultLayer=False, expectContentsFile=True)
+
+ # There's a separate code path for < v3 UFOs.
+ with open(os.path.join(self.ufoPath, "metainfo.plist"), "wb") as f:
+ plistlib.dump(dict(creator="test", formatVersion=2), f)
+ os.remove(os.path.join(self.ufoPath, "glyphs", "contents.plist"))
+ writer = UFOWriter(self.ufoPath, validate=True, formatVersion=2)
+ with self.assertRaises(GlifLibError):
+ writer.getGlyphSet(expectContentsFile=True)
+
+ # make a new font with two layers
+
+ def testNewFontOneLayer(self):
+ self.clearUFO()
+ writer = UFOWriter(self.ufoPath)
+ writer.getGlyphSet()
+ writer.writeLayerContents(["public.default"])
+ # directory
+ path = os.path.join(self.ufoPath, "glyphs")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ # layer contents
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "rb") as f:
+ result = plistlib.load(f)
+ expected = [["public.default", "glyphs"]]
+ self.assertEqual(expected, result)
+
+ def testNewFontThreeLayers(self):
+ self.clearUFO()
+ writer = UFOWriter(self.ufoPath)
+ writer.getGlyphSet("layer 1", defaultLayer=False)
+ writer.getGlyphSet()
+ writer.getGlyphSet("layer 2", defaultLayer=False)
+ writer.writeLayerContents(["layer 1", "public.default", "layer 2"])
+ # directories
+ path = os.path.join(self.ufoPath, "glyphs")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 1")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 2")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ # layer contents
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "rb") as f:
+ result = plistlib.load(f)
+ expected = [
+ ["layer 1", "glyphs.layer 1"],
+ ["public.default", "glyphs"],
+ ["layer 2", "glyphs.layer 2"],
+ ]
+ self.assertEqual(expected, result)
+
+ # add a layer to an existing font
+
+ def testAddLayerToExistingFont(self):
+ self.makeUFO()
+ writer = UFOWriter(self.ufoPath)
+ writer.getGlyphSet("layer 3", defaultLayer=False)
+ writer.writeLayerContents(["public.default", "layer 1", "layer 2", "layer 3"])
+ # directories
+ path = os.path.join(self.ufoPath, "glyphs")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 1")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 2")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 3")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ # layer contents
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "rb") as f:
+ result = plistlib.load(f)
+ expected = [
+ ["public.default", "glyphs"],
+ ["layer 1", "glyphs.layer 1"],
+ ["layer 2", "glyphs.layer 2"],
+ ["layer 3", "glyphs.layer 3"],
+ ]
+ self.assertEqual(expected, result)
+
+ # rename valid name
+
+ def testRenameLayer(self):
+ self.makeUFO()
+ writer = UFOWriter(self.ufoPath)
+ writer.renameGlyphSet("layer 1", "layer 3")
+ writer.writeLayerContents(["public.default", "layer 3", "layer 2"])
+ # directories
+ path = os.path.join(self.ufoPath, "glyphs")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 1")
+ exists = os.path.exists(path)
+ self.assertEqual(False, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 2")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 3")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ # layer contents
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "rb") as f:
+ result = plistlib.load(f)
+ expected = [
+ ["public.default", "glyphs"],
+ ["layer 3", "glyphs.layer 3"],
+ ["layer 2", "glyphs.layer 2"],
+ ]
+ self.assertEqual(expected, result)
+
+ def testRenameLayerDefault(self):
+ self.makeUFO()
+ writer = UFOWriter(self.ufoPath)
+ writer.renameGlyphSet("public.default", "layer xxx")
+ writer.renameGlyphSet("layer 1", "layer 1", defaultLayer=True)
+ writer.writeLayerContents(["layer xxx", "layer 1", "layer 2"])
+ path = os.path.join(self.ufoPath, "glyphs")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 1")
+ exists = os.path.exists(path)
+ self.assertEqual(False, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 2")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer xxx")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ # layer contents
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "rb") as f:
+ result = plistlib.load(f)
+ expected = [
+ ["layer xxx", "glyphs.layer xxx"],
+ ["layer 1", "glyphs"],
+ ["layer 2", "glyphs.layer 2"],
+ ]
+ self.assertEqual(expected, result)
+
+ # rename duplicate name
+
+ def testRenameLayerDuplicateName(self):
+ self.makeUFO()
+ writer = UFOWriter(self.ufoPath)
+ self.assertRaises(UFOLibError, writer.renameGlyphSet, "layer 1", "layer 2")
+
+ # rename unknown layer
+
+ def testRenameLayerUnknownName(self):
+ self.makeUFO()
+ writer = UFOWriter(self.ufoPath)
+ self.assertRaises(
+ UFOLibError, writer.renameGlyphSet, "does not exist", "layer 2"
+ )
+
+ # remove valid layer
+
+ def testRemoveLayer(self):
+ self.makeUFO()
+ writer = UFOWriter(self.ufoPath)
+ writer.deleteGlyphSet("layer 1")
+ writer.writeLayerContents(["public.default", "layer 2"])
+ # directories
+ path = os.path.join(self.ufoPath, "glyphs")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 1")
+ exists = os.path.exists(path)
+ self.assertEqual(False, exists)
+ path = os.path.join(self.ufoPath, "glyphs.layer 2")
+ exists = os.path.exists(path)
+ self.assertEqual(True, exists)
+ # layer contents
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "rb") as f:
+ result = plistlib.load(f)
+ expected = [["public.default", "glyphs"], ["layer 2", "glyphs.layer 2"]]
+ self.assertEqual(expected, result)
+
+ # remove default layer
+
+ def testRemoveDefaultLayer(self):
+ self.makeUFO()
+ writer = UFOWriter(self.ufoPath)
+ writer.deleteGlyphSet("public.default")
+ writer.writeLayerContents(["layer 1", "layer 2"])
+ # directories
+ path = os.path.join(self.ufoPath, "glyphs")
+ self.assertEqual(False, os.path.exists(path))
+ path = os.path.join(self.ufoPath, "glyphs.layer 1")
+ self.assertEqual(True, os.path.exists(path))
+ path = os.path.join(self.ufoPath, "glyphs.layer 2")
+ self.assertEqual(True, os.path.exists(path))
+ # layer contents
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "rb") as f:
+ result = plistlib.load(f)
+ expected = [["layer 1", "glyphs.layer 1"], ["layer 2", "glyphs.layer 2"]]
+ self.assertEqual(expected, result)
+
+ # remove unknown layer
+
+ def testRemoveDefaultLayer2(self):
+ self.makeUFO()
+ writer = UFOWriter(self.ufoPath)
+ self.assertRaises(UFOLibError, writer.deleteGlyphSet, "does not exist")
+
+ def testWriteAsciiLayerOrder(self):
+ self.makeUFO(
+ layerContents=[
+ ["public.default", "glyphs"],
+ ["layer 1", "glyphs.layer 1"],
+ ["layer 2", "glyphs.layer 2"],
+ ]
+ )
+ writer = UFOWriter(self.ufoPath)
+ writer.writeLayerContents(["public.default", "layer 2", "layer 1"])
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "rb") as f:
+ result = plistlib.load(f)
+ expected = [
+ ["public.default", "glyphs"],
+ ["layer 2", "glyphs.layer 2"],
+ ["layer 1", "glyphs.layer 1"],
+ ]
+ self.assertEqual(expected, result)
+ for layerName, _ in result:
+ assert isinstance(layerName, str)
- def setUp(self):
- self.tempDir = tempfile.mktemp()
- os.mkdir(self.tempDir)
- self.ufoPath = os.path.join(self.tempDir, "test.ufo")
-
- def tearDown(self):
- shutil.rmtree(self.tempDir)
-
- def makeUFO(self, metaInfo=None, layerContents=None):
- self.clearUFO()
- if not os.path.exists(self.ufoPath):
- os.mkdir(self.ufoPath)
- # metainfo.plist
- if metaInfo is None:
- metaInfo = dict(creator="test", formatVersion=3)
- path = os.path.join(self.ufoPath, "metainfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(metaInfo, f)
- # layers
- if layerContents is None:
- layerContents = [
- ("public.default", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2"),
- ]
- if layerContents:
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- else:
- layerContents = [("", "glyphs")]
- for name, directory in layerContents:
- glyphsPath = os.path.join(self.ufoPath, directory)
- os.mkdir(glyphsPath)
- contents = dict(a="a.glif")
- path = os.path.join(glyphsPath, "contents.plist")
- with open(path, "wb") as f:
- plistlib.dump(contents, f)
- path = os.path.join(glyphsPath, "a.glif")
- with open(path, "w") as f:
- f.write(" ")
-
- def clearUFO(self):
- if os.path.exists(self.ufoPath):
- shutil.rmtree(self.ufoPath)
-
- # __init__: missing layer contents
-
- def testMissingLayerContents(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
-
- # __init__: layer contents invalid format
-
- def testInvalidLayerContentsFormat(self):
- # bogus
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- with open(path, "w") as f:
- f.write("test")
- self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
- # dict
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = {
- "public.default" : "glyphs",
- "layer 1" : "glyphs.layer 1",
- "layer 2" : "glyphs.layer 2",
- }
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
-
- # __init__: layer contents invalid name format
-
- def testInvalidLayerContentsNameFormat(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- (1, "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
-
- # __init__: layer contents invalid directory format
-
- def testInvalidLayerContentsDirectoryFormat(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("public.foregound", "glyphs"),
- ("layer 1", 1),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
-
- # __init__: directory listed in contents not on disk
-
- def testLayerContentsHasMissingDirectory(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("public.foregound", "glyphs"),
- ("layer 1", "glyphs.doesnotexist"),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
-
- # __init__: no default layer on disk
-
- def testMissingDefaultLayer(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
-
- # __init__: duplicate layer name
-
- def testDuplicateLayerName(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("public.foregound", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 1", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
-
- # __init__: directory referenced by two layer names
-
- def testDuplicateLayerDirectory(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("public.foregound", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 1")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
-
- # __init__: default without a name
-
- def testDefaultLayerNoName(self):
- # get the glyph set
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("public.foregound", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- writer = UFOWriter(self.ufoPath)
-
- # __init__: default with a name
-
- def testDefaultLayerName(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "layercontents.plist")
- os.remove(path)
- layerContents = [
- ("custom name", "glyphs"),
- ("layer 1", "glyphs.layer 1"),
- ("layer 2", "glyphs.layer 2")
- ]
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- writer = UFOWriter(self.ufoPath)
-
- # __init__: up convert 1 > 3
-
- def testUpConvert1To3(self):
- self.makeUFO(
- metaInfo=dict(creator="test", formatVersion=1),
- layerContents=dict()
- )
- writer = UFOWriter(self.ufoPath)
- writer.writeLayerContents(["public.default"])
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "rb") as f:
- result = plistlib.load(f)
- expected = [["public.default", "glyphs"]]
- self.assertEqual(expected, result)
-
- # __init__: up convert 2 > 3
-
- def testUpConvert2To3(self):
- self.makeUFO(
- metaInfo=dict(creator="test", formatVersion=2),
- layerContents=dict()
- )
- writer = UFOWriter(self.ufoPath)
- writer.writeLayerContents(["public.default"])
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "rb") as f:
- result = plistlib.load(f)
- expected = [["public.default", "glyphs"]]
- self.assertEqual(expected, result)
-
- # __init__: down convert 3 > 1
-
- def testDownConvert3To1(self):
- self.makeUFO()
- self.assertRaises(UFOLibError, UFOWriter, self.ufoPath, formatVersion=1)
-
- # __init__: down convert 3 > 2
-
- def testDownConvert3To2(self):
- self.makeUFO()
- self.assertRaises(UFOLibError, UFOWriter, self.ufoPath, formatVersion=2)
-
- # get glyph sets
-
- def testGetGlyphSets(self):
- self.makeUFO()
- # hack contents.plist
- path = os.path.join(self.ufoPath, "glyphs.layer 1", "contents.plist")
- with open(path, "wb") as f:
- plistlib.dump(dict(b="a.glif"), f)
- path = os.path.join(self.ufoPath, "glyphs.layer 2", "contents.plist")
- with open(path, "wb") as f:
- plistlib.dump(dict(c="a.glif"), f)
- # now test
- writer = UFOWriter(self.ufoPath)
- # default
- expected = ["a"]
- result = list(writer.getGlyphSet().keys())
- self.assertEqual(expected, result)
- # layer 1
- expected = ["b"]
- result = list(writer.getGlyphSet("layer 1", defaultLayer=False).keys())
- self.assertEqual(expected, result)
- # layer 2
- expected = ["c"]
- result = list(writer.getGlyphSet("layer 2", defaultLayer=False).keys())
- self.assertEqual(expected, result)
-
- def testGetGlyphSetNoContents(self):
- self.makeUFO()
- os.remove(os.path.join(self.ufoPath, "glyphs.layer 1", "contents.plist"))
-
- reader = UFOReader(self.ufoPath, validate=True)
- with self.assertRaises(GlifLibError):
- reader.getGlyphSet("layer 1")
-
- writer = UFOWriter(self.ufoPath, validate=True)
- with self.assertRaises(GlifLibError):
- writer.getGlyphSet("layer 1", defaultLayer=False, expectContentsFile=True)
-
- # There's a separate code path for < v3 UFOs.
- with open(os.path.join(self.ufoPath, "metainfo.plist"), "wb") as f:
- plistlib.dump(dict(creator="test", formatVersion=2), f)
- os.remove(os.path.join(self.ufoPath, "glyphs", "contents.plist"))
- writer = UFOWriter(self.ufoPath, validate=True, formatVersion=2)
- with self.assertRaises(GlifLibError):
- writer.getGlyphSet(expectContentsFile=True)
-
- # make a new font with two layers
-
- def testNewFontOneLayer(self):
- self.clearUFO()
- writer = UFOWriter(self.ufoPath)
- writer.getGlyphSet()
- writer.writeLayerContents(["public.default"])
- # directory
- path = os.path.join(self.ufoPath, "glyphs")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- # layer contents
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "rb") as f:
- result = plistlib.load(f)
- expected = [["public.default", "glyphs"]]
- self.assertEqual(expected, result)
-
- def testNewFontThreeLayers(self):
- self.clearUFO()
- writer = UFOWriter(self.ufoPath)
- writer.getGlyphSet("layer 1", defaultLayer=False)
- writer.getGlyphSet()
- writer.getGlyphSet("layer 2", defaultLayer=False)
- writer.writeLayerContents(["layer 1", "public.default", "layer 2"])
- # directories
- path = os.path.join(self.ufoPath, "glyphs")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 1")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 2")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- # layer contents
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "rb") as f:
- result = plistlib.load(f)
- expected = [["layer 1", "glyphs.layer 1"], ["public.default", "glyphs"], ["layer 2", "glyphs.layer 2"]]
- self.assertEqual(expected, result)
-
- # add a layer to an existing font
-
- def testAddLayerToExistingFont(self):
- self.makeUFO()
- writer = UFOWriter(self.ufoPath)
- writer.getGlyphSet("layer 3", defaultLayer=False)
- writer.writeLayerContents(["public.default", "layer 1", "layer 2", "layer 3"])
- # directories
- path = os.path.join(self.ufoPath, "glyphs")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 1")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 2")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 3")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- # layer contents
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "rb") as f:
- result = plistlib.load(f)
- expected = [['public.default', 'glyphs'], ['layer 1', 'glyphs.layer 1'], ['layer 2', 'glyphs.layer 2'], ["layer 3", "glyphs.layer 3"]]
- self.assertEqual(expected, result)
-
- # rename valid name
-
- def testRenameLayer(self):
- self.makeUFO()
- writer = UFOWriter(self.ufoPath)
- writer.renameGlyphSet("layer 1", "layer 3")
- writer.writeLayerContents(["public.default", "layer 3", "layer 2"])
- # directories
- path = os.path.join(self.ufoPath, "glyphs")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 1")
- exists = os.path.exists(path)
- self.assertEqual(False, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 2")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 3")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- # layer contents
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "rb") as f:
- result = plistlib.load(f)
- expected = [['public.default', 'glyphs'], ['layer 3', 'glyphs.layer 3'], ['layer 2', 'glyphs.layer 2']]
- self.assertEqual(expected, result)
-
- def testRenameLayerDefault(self):
- self.makeUFO()
- writer = UFOWriter(self.ufoPath)
- writer.renameGlyphSet("public.default", "layer xxx")
- writer.renameGlyphSet("layer 1", "layer 1", defaultLayer=True)
- writer.writeLayerContents(["layer xxx", "layer 1", "layer 2"])
- path = os.path.join(self.ufoPath, "glyphs")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 1")
- exists = os.path.exists(path)
- self.assertEqual(False, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 2")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer xxx")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- # layer contents
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "rb") as f:
- result = plistlib.load(f)
- expected = [['layer xxx', 'glyphs.layer xxx'], ['layer 1', 'glyphs'], ['layer 2', 'glyphs.layer 2']]
- self.assertEqual(expected, result)
-
- # rename duplicate name
-
- def testRenameLayerDuplicateName(self):
- self.makeUFO()
- writer = UFOWriter(self.ufoPath)
- self.assertRaises(UFOLibError, writer.renameGlyphSet, "layer 1", "layer 2")
-
- # rename unknown layer
-
- def testRenameLayerUnknownName(self):
- self.makeUFO()
- writer = UFOWriter(self.ufoPath)
- self.assertRaises(UFOLibError, writer.renameGlyphSet, "does not exist", "layer 2")
-
- # remove valid layer
-
- def testRemoveLayer(self):
- self.makeUFO()
- writer = UFOWriter(self.ufoPath)
- writer.deleteGlyphSet("layer 1")
- writer.writeLayerContents(["public.default", "layer 2"])
- # directories
- path = os.path.join(self.ufoPath, "glyphs")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 1")
- exists = os.path.exists(path)
- self.assertEqual(False, exists)
- path = os.path.join(self.ufoPath, "glyphs.layer 2")
- exists = os.path.exists(path)
- self.assertEqual(True, exists)
- # layer contents
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "rb") as f:
- result = plistlib.load(f)
- expected = [["public.default", "glyphs"], ["layer 2", "glyphs.layer 2"]]
- self.assertEqual(expected, result)
-
- # remove default layer
-
- def testRemoveDefaultLayer(self):
- self.makeUFO()
- writer = UFOWriter(self.ufoPath)
- writer.deleteGlyphSet("public.default")
- writer.writeLayerContents(["layer 1", "layer 2"])
- # directories
- path = os.path.join(self.ufoPath, "glyphs")
- self.assertEqual(False, os.path.exists(path))
- path = os.path.join(self.ufoPath, "glyphs.layer 1")
- self.assertEqual(True, os.path.exists(path))
- path = os.path.join(self.ufoPath, "glyphs.layer 2")
- self.assertEqual(True, os.path.exists(path))
- # layer contents
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "rb") as f:
- result = plistlib.load(f)
- expected = [["layer 1", "glyphs.layer 1"], ["layer 2", "glyphs.layer 2"]]
- self.assertEqual(expected, result)
-
- # remove unknown layer
-
- def testRemoveDefaultLayer2(self):
- self.makeUFO()
- writer = UFOWriter(self.ufoPath)
- self.assertRaises(UFOLibError, writer.deleteGlyphSet, "does not exist")
-
- def testWriteAsciiLayerOrder(self):
- self.makeUFO(
- layerContents=[
- ["public.default", "glyphs"],
- ["layer 1", "glyphs.layer 1"],
- ["layer 2", "glyphs.layer 2"],
- ]
- )
- writer = UFOWriter(self.ufoPath)
- writer.writeLayerContents(["public.default", "layer 2", "layer 1"])
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "rb") as f:
- result = plistlib.load(f)
- expected = [
- ["public.default", "glyphs"],
- ["layer 2", "glyphs.layer 2"],
- ["layer 1", "glyphs.layer 1"],
- ]
- self.assertEqual(expected, result)
- for layerName, _ in result:
- assert isinstance(layerName, str)
# -----
# /data
@@ -4175,539 +4529,549 @@ class UFO3WriteLayersTestCase(unittest.TestCase):
class UFO3ReadDataTestCase(unittest.TestCase):
-
- def getFontPath(self):
- testdata = os.path.join(os.path.dirname(__file__), "testdata")
- return os.path.join(testdata, "UFO3-Read Data.ufo")
-
- def testUFOReaderDataDirectoryListing(self):
- reader = UFOReader(self.getFontPath())
- found = reader.getDataDirectoryListing()
- expected = [
- 'org.unifiedfontobject.directory/bar/lol.txt',
- 'org.unifiedfontobject.directory/foo.txt',
- 'org.unifiedfontobject.file.txt'
- ]
- self.assertEqual(set(found), set(expected))
-
- def testUFOReaderBytesFromPath(self):
- reader = UFOReader(self.getFontPath())
- found = reader.readBytesFromPath("data/org.unifiedfontobject.file.txt")
- expected = b"file.txt"
- self.assertEqual(found, expected)
- found = reader.readBytesFromPath("data/org.unifiedfontobject.directory/bar/lol.txt")
- expected = b"lol.txt"
- self.assertEqual(found, expected)
- found = reader.readBytesFromPath("data/org.unifiedfontobject.doesNotExist")
- expected = None
- self.assertEqual(found, expected)
-
- def testUFOReaderReadFileFromPath(self):
- reader = UFOReader(self.getFontPath())
- fileObject = reader.getReadFileForPath("data/org.unifiedfontobject.file.txt")
- self.assertNotEqual(fileObject, None)
- hasRead = hasattr(fileObject, "read")
- self.assertEqual(hasRead, True)
- fileObject.close()
- fileObject = reader.getReadFileForPath("data/org.unifiedfontobject.doesNotExist")
- self.assertEqual(fileObject, None)
-
- def testUFOReaderKernGroupDuplicatesRemoved(self):
- # Non-kerning group duplicates are kept
- # Kerning group duplicates are removed
- expected_groups = {
- "group1" : ["A"],
- "group2" : ["B", "C", "B"],
- "public.kern1.A" : ["A"],
- "public.kern2.B" : ["B", "A", "C"],
- }
- reader = UFOReader(self.getFontPath())
- groups = reader.readGroups()
- self.assertEqual(expected_groups, groups)
+ def getFontPath(self):
+ testdata = os.path.join(os.path.dirname(__file__), "testdata")
+ return os.path.join(testdata, "UFO3-Read Data.ufo")
+
+ def testUFOReaderDataDirectoryListing(self):
+ reader = UFOReader(self.getFontPath())
+ found = reader.getDataDirectoryListing()
+ expected = [
+ "org.unifiedfontobject.directory/bar/lol.txt",
+ "org.unifiedfontobject.directory/foo.txt",
+ "org.unifiedfontobject.file.txt",
+ ]
+ self.assertEqual(set(found), set(expected))
+
+ def testUFOReaderBytesFromPath(self):
+ reader = UFOReader(self.getFontPath())
+ found = reader.readBytesFromPath("data/org.unifiedfontobject.file.txt")
+ expected = b"file.txt"
+ self.assertEqual(found, expected)
+ found = reader.readBytesFromPath(
+ "data/org.unifiedfontobject.directory/bar/lol.txt"
+ )
+ expected = b"lol.txt"
+ self.assertEqual(found, expected)
+ found = reader.readBytesFromPath("data/org.unifiedfontobject.doesNotExist")
+ expected = None
+ self.assertEqual(found, expected)
+
+ def testUFOReaderReadFileFromPath(self):
+ reader = UFOReader(self.getFontPath())
+ fileObject = reader.getReadFileForPath("data/org.unifiedfontobject.file.txt")
+ self.assertNotEqual(fileObject, None)
+ hasRead = hasattr(fileObject, "read")
+ self.assertEqual(hasRead, True)
+ fileObject.close()
+ fileObject = reader.getReadFileForPath(
+ "data/org.unifiedfontobject.doesNotExist"
+ )
+ self.assertEqual(fileObject, None)
+
+ def testUFOReaderKernGroupDuplicatesRemoved(self):
+ # Non-kerning group duplicates are kept
+ # Kerning group duplicates are removed
+ expected_groups = {
+ "group1": ["A"],
+ "group2": ["B", "C", "B"],
+ "public.kern1.A": ["A"],
+ "public.kern2.B": ["B", "A", "C"],
+ }
+ reader = UFOReader(self.getFontPath())
+ groups = reader.readGroups()
+ self.assertEqual(expected_groups, groups)
class UFO3WriteDataTestCase(unittest.TestCase):
+ def setUp(self):
+ self.tempDir = tempfile.mktemp()
+ os.mkdir(self.tempDir)
+ self.dstDir = os.path.join(self.tempDir, "test.ufo")
+
+ def tearDown(self):
+ shutil.rmtree(self.tempDir)
+
+ def tearDownUFO(self):
+ if os.path.exists(self.dstDir):
+ shutil.rmtree(self.dstDir)
+
+ def testUFOWriterWriteBytesToPath(self):
+ # basic file
+ path = "data/org.unifiedfontobject.writebytesbasicfile.txt"
+ testBytes = b"test"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeBytesToPath(path, testBytes)
+ path = os.path.join(self.dstDir, path)
+ self.assertEqual(os.path.exists(path), True)
+ with open(path, "rb") as f:
+ written = f.read()
+ self.assertEqual(testBytes, written)
+ self.tearDownUFO()
+ # basic file with unicode text
+ path = "data/org.unifiedfontobject.writebytesbasicunicodefile.txt"
+ text = b"t\xeb\xdft"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeBytesToPath(path, text)
+ path = os.path.join(self.dstDir, path)
+ self.assertEqual(os.path.exists(path), True)
+ with open(path, "rb") as f:
+ written = f.read()
+ self.assertEqual(text, written)
+ self.tearDownUFO()
+ # basic directory
+ path = "data/org.unifiedfontobject.writebytesdirectory/level1/level2/file.txt"
+ testBytes = b"test"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeBytesToPath(path, testBytes)
+ path = os.path.join(self.dstDir, path)
+ self.assertEqual(os.path.exists(path), True)
+ with open(path, "rb") as f:
+ written = f.read()
+ self.assertEqual(testBytes, written)
+ self.tearDownUFO()
+
+ def testUFOWriterWriteFileToPath(self):
+ # basic file
+ path = "data/org.unifiedfontobject.getwritefile.txt"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ fileObject = writer.getFileObjectForPath(path)
+ self.assertNotEqual(fileObject, None)
+ hasRead = hasattr(fileObject, "read")
+ self.assertEqual(hasRead, True)
+ fileObject.close()
+ self.tearDownUFO()
+
+ def testUFOWriterRemoveFile(self):
+ path1 = "data/org.unifiedfontobject.removefile/level1/level2/file1.txt"
+ path2 = "data/org.unifiedfontobject.removefile/level1/level2/file2.txt"
+ path3 = "data/org.unifiedfontobject.removefile/level1/file3.txt"
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.writeBytesToPath(path1, b"test")
+ writer.writeBytesToPath(path2, b"test")
+ writer.writeBytesToPath(path3, b"test")
+ self.assertEqual(os.path.exists(os.path.join(self.dstDir, path1)), True)
+ self.assertEqual(os.path.exists(os.path.join(self.dstDir, path2)), True)
+ self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), True)
+ writer.removeFileForPath(path1)
+ self.assertEqual(os.path.exists(os.path.join(self.dstDir, path1)), False)
+ self.assertEqual(
+ os.path.exists(os.path.dirname(os.path.join(self.dstDir, path1))), True
+ )
+ self.assertEqual(os.path.exists(os.path.join(self.dstDir, path2)), True)
+ self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), True)
+ writer.removeFileForPath(path2)
+ self.assertEqual(
+ os.path.exists(os.path.dirname(os.path.join(self.dstDir, path1))), False
+ )
+ self.assertEqual(os.path.exists(os.path.join(self.dstDir, path2)), False)
+ self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), True)
+ writer.removeFileForPath(path3)
+ self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), False)
+ self.assertEqual(
+ os.path.exists(os.path.dirname(os.path.join(self.dstDir, path2))), False
+ )
+ self.assertEqual(
+ os.path.exists(
+ os.path.join(self.dstDir, "data/org.unifiedfontobject.removefile")
+ ),
+ False,
+ )
+ self.assertRaises(
+ UFOLibError,
+ writer.removeFileForPath,
+ path="data/org.unifiedfontobject.doesNotExist.txt",
+ )
+ self.tearDownUFO()
+
+ def testUFOWriterCopy(self):
+ sourceDir = self.dstDir.replace(".ufo", "") + "-copy source" + ".ufo"
+ dataPath = "data/org.unifiedfontobject.copy/level1/level2/file1.txt"
+ writer = UFOWriter(sourceDir, formatVersion=3)
+ writer.writeBytesToPath(dataPath, b"test")
+ # copy a file
+ reader = UFOReader(sourceDir)
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ writer.copyFromReader(reader, dataPath, dataPath)
+ path = os.path.join(self.dstDir, dataPath)
+ self.assertEqual(os.path.exists(path), True)
+ self.tearDownUFO()
+ # copy a directory
+ reader = UFOReader(sourceDir)
+ writer = UFOWriter(self.dstDir, formatVersion=3)
+ p = "data/org.unifiedfontobject.copy"
+ writer.copyFromReader(reader, p, p)
+ path = os.path.join(self.dstDir, dataPath)
+ self.assertEqual(os.path.exists(path), True)
+ self.tearDownUFO()
- def setUp(self):
- self.tempDir = tempfile.mktemp()
- os.mkdir(self.tempDir)
- self.dstDir = os.path.join(self.tempDir, "test.ufo")
-
- def tearDown(self):
- shutil.rmtree(self.tempDir)
-
- def tearDownUFO(self):
- if os.path.exists(self.dstDir):
- shutil.rmtree(self.dstDir)
-
- def testUFOWriterWriteBytesToPath(self):
- # basic file
- path = "data/org.unifiedfontobject.writebytesbasicfile.txt"
- testBytes = b"test"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeBytesToPath(path, testBytes)
- path = os.path.join(self.dstDir, path)
- self.assertEqual(os.path.exists(path), True)
- with open(path, "rb") as f:
- written = f.read()
- self.assertEqual(testBytes, written)
- self.tearDownUFO()
- # basic file with unicode text
- path = "data/org.unifiedfontobject.writebytesbasicunicodefile.txt"
- text = b"t\xeb\xdft"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeBytesToPath(path, text)
- path = os.path.join(self.dstDir, path)
- self.assertEqual(os.path.exists(path), True)
- with open(path, "rb") as f:
- written = f.read()
- self.assertEqual(text, written)
- self.tearDownUFO()
- # basic directory
- path = "data/org.unifiedfontobject.writebytesdirectory/level1/level2/file.txt"
- testBytes = b"test"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeBytesToPath(path, testBytes)
- path = os.path.join(self.dstDir, path)
- self.assertEqual(os.path.exists(path), True)
- with open(path, "rb") as f:
- written = f.read()
- self.assertEqual(testBytes, written)
- self.tearDownUFO()
-
- def testUFOWriterWriteFileToPath(self):
- # basic file
- path = "data/org.unifiedfontobject.getwritefile.txt"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- fileObject = writer.getFileObjectForPath(path)
- self.assertNotEqual(fileObject, None)
- hasRead = hasattr(fileObject, "read")
- self.assertEqual(hasRead, True)
- fileObject.close()
- self.tearDownUFO()
-
- def testUFOWriterRemoveFile(self):
- path1 = "data/org.unifiedfontobject.removefile/level1/level2/file1.txt"
- path2 = "data/org.unifiedfontobject.removefile/level1/level2/file2.txt"
- path3 = "data/org.unifiedfontobject.removefile/level1/file3.txt"
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.writeBytesToPath(path1, b"test")
- writer.writeBytesToPath(path2, b"test")
- writer.writeBytesToPath(path3, b"test")
- self.assertEqual(os.path.exists(os.path.join(self.dstDir, path1)), True)
- self.assertEqual(os.path.exists(os.path.join(self.dstDir, path2)), True)
- self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), True)
- writer.removeFileForPath(path1)
- self.assertEqual(os.path.exists(os.path.join(self.dstDir, path1)), False)
- self.assertEqual(os.path.exists(os.path.dirname(os.path.join(self.dstDir, path1))), True)
- self.assertEqual(os.path.exists(os.path.join(self.dstDir, path2)), True)
- self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), True)
- writer.removeFileForPath(path2)
- self.assertEqual(os.path.exists(os.path.dirname(os.path.join(self.dstDir, path1))), False)
- self.assertEqual(os.path.exists(os.path.join(self.dstDir, path2)), False)
- self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), True)
- writer.removeFileForPath(path3)
- self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), False)
- self.assertEqual(os.path.exists(os.path.dirname(os.path.join(self.dstDir, path2))), False)
- self.assertEqual(os.path.exists(os.path.join(self.dstDir, "data/org.unifiedfontobject.removefile")), False)
- self.assertRaises(UFOLibError, writer.removeFileForPath, path="data/org.unifiedfontobject.doesNotExist.txt")
- self.tearDownUFO()
-
- def testUFOWriterCopy(self):
- sourceDir = self.dstDir.replace(".ufo", "") + "-copy source" + ".ufo"
- dataPath = "data/org.unifiedfontobject.copy/level1/level2/file1.txt"
- writer = UFOWriter(sourceDir, formatVersion=3)
- writer.writeBytesToPath(dataPath, b"test")
- # copy a file
- reader = UFOReader(sourceDir)
- writer = UFOWriter(self.dstDir, formatVersion=3)
- writer.copyFromReader(reader, dataPath, dataPath)
- path = os.path.join(self.dstDir, dataPath)
- self.assertEqual(os.path.exists(path), True)
- self.tearDownUFO()
- # copy a directory
- reader = UFOReader(sourceDir)
- writer = UFOWriter(self.dstDir, formatVersion=3)
- p = "data/org.unifiedfontobject.copy"
- writer.copyFromReader(reader, p, p)
- path = os.path.join(self.dstDir, dataPath)
- self.assertEqual(os.path.exists(path), True)
- self.tearDownUFO()
# ---------------
# layerinfo.plist
# ---------------
-class TestLayerInfoObject:
- color = guidelines = lib = None
+class TestLayerInfoObject:
+ color = guidelines = lib = None
class UFO3ReadLayerInfoTestCase(unittest.TestCase):
-
- def setUp(self):
- self.tempDir = tempfile.mktemp()
- os.mkdir(self.tempDir)
- self.ufoPath = os.path.join(self.tempDir, "test.ufo")
-
- def tearDown(self):
- shutil.rmtree(self.tempDir)
-
- def makeUFO(self, formatVersion=3, layerInfo=None):
- self.clearUFO()
- if not os.path.exists(self.ufoPath):
- os.mkdir(self.ufoPath)
- # metainfo.plist
- metaInfo = dict(creator="test", formatVersion=formatVersion)
- path = os.path.join(self.ufoPath, "metainfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(metaInfo, f)
- # layercontents.plist
- layerContents = [("public.default", "glyphs")]
- path = os.path.join(self.ufoPath, "layercontents.plist")
- with open(path, "wb") as f:
- plistlib.dump(layerContents, f)
- # glyphs
- glyphsPath = os.path.join(self.ufoPath, "glyphs")
- os.mkdir(glyphsPath)
- contents = dict(a="a.glif")
- path = os.path.join(glyphsPath, "contents.plist")
- with open(path, "wb") as f:
- plistlib.dump(contents, f)
- path = os.path.join(glyphsPath, "a.glif")
- with open(path, "w") as f:
- f.write(" ")
- # layerinfo.plist
- if layerInfo is None:
- layerInfo = dict(
- color="0,0,0,1",
- lib={"foo" : "bar"}
- )
- path = os.path.join(glyphsPath, "layerinfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(layerInfo, f)
-
- def clearUFO(self):
- if os.path.exists(self.ufoPath):
- shutil.rmtree(self.ufoPath)
-
- def testValidLayerInfo(self):
- self.makeUFO()
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- info = TestLayerInfoObject()
- glyphSet.readLayerInfo(info)
- expectedColor = "0,0,0,1"
- self.assertEqual(expectedColor, info.color)
- expectedLib = {"foo": "bar"}
- self.assertEqual(expectedLib, info.lib)
-
- def testMissingLayerInfo(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
- os.remove(path)
- # read
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- info = TestLayerInfoObject()
- glyphSet.readLayerInfo(info)
- self.assertEqual(None, info.color)
- self.assertEqual(None, info.guidelines)
- self.assertEqual(None, info.lib)
-
- def testBogusLayerInfo(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
- os.remove(path)
- with open(path, "w") as f:
- f.write("test")
- # read
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- info = TestLayerInfoObject()
- self.assertRaises(UFOLibError, glyphSet.readLayerInfo, info)
-
- def testInvalidFormatLayerInfo(self):
- self.makeUFO()
- path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
- info = [("color", "0,0,0,0")]
- with open(path, "wb") as f:
- plistlib.dump(info, f)
- # read
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- info = TestLayerInfoObject()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, info)
-
- def testColor(self):
- ## not a string
- info = {}
- info["color"] = 1
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- ## not enough commas
- info = {}
- info["color"] = "1 0, 0, 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "1 0 0, 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "1 0 0 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- ## not enough parts
- info = {}
- info["color"] = ", 0, 0, 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "1, , 0, 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "1, 0, , 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "1, 0, 0, "
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = ", , , "
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- ## not a number in all positions
- info = {}
- info["color"] = "r, 1, 1, 1"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "1, g, 1, 1"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "1, 1, b, 1"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "1, 1, 1, a"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- ## too many parts
- info = {}
- info["color"] = "1, 0, 0, 0, 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- ## < 0 in each position
- info = {}
- info["color"] = "-1, 0, 0, 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "0, -1, 0, 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "0, 0, -1, 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "0, 0, 0, -1"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- ## > 1 in each position
- info = {}
- info["color"] = "2, 0, 0, 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "0, 2, 0, 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "0, 0, 2, 0"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
- info = {}
- info["color"] = "0, 0, 0, 2"
- self.makeUFO(layerInfo=info)
- reader = UFOReader(self.ufoPath, validate=True)
- glyphSet = reader.getGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ def setUp(self):
+ self.tempDir = tempfile.mktemp()
+ os.mkdir(self.tempDir)
+ self.ufoPath = os.path.join(self.tempDir, "test.ufo")
+
+ def tearDown(self):
+ shutil.rmtree(self.tempDir)
+
+ def makeUFO(self, formatVersion=3, layerInfo=None):
+ self.clearUFO()
+ if not os.path.exists(self.ufoPath):
+ os.mkdir(self.ufoPath)
+ # metainfo.plist
+ metaInfo = dict(creator="test", formatVersion=formatVersion)
+ path = os.path.join(self.ufoPath, "metainfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(metaInfo, f)
+ # layercontents.plist
+ layerContents = [("public.default", "glyphs")]
+ path = os.path.join(self.ufoPath, "layercontents.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(layerContents, f)
+ # glyphs
+ glyphsPath = os.path.join(self.ufoPath, "glyphs")
+ os.mkdir(glyphsPath)
+ contents = dict(a="a.glif")
+ path = os.path.join(glyphsPath, "contents.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(contents, f)
+ path = os.path.join(glyphsPath, "a.glif")
+ with open(path, "w") as f:
+ f.write(" ")
+ # layerinfo.plist
+ if layerInfo is None:
+ layerInfo = dict(color="0,0,0,1", lib={"foo": "bar"})
+ path = os.path.join(glyphsPath, "layerinfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(layerInfo, f)
+
+ def clearUFO(self):
+ if os.path.exists(self.ufoPath):
+ shutil.rmtree(self.ufoPath)
+
+ def testValidLayerInfo(self):
+ self.makeUFO()
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ info = TestLayerInfoObject()
+ glyphSet.readLayerInfo(info)
+ expectedColor = "0,0,0,1"
+ self.assertEqual(expectedColor, info.color)
+ expectedLib = {"foo": "bar"}
+ self.assertEqual(expectedLib, info.lib)
+
+ def testMissingLayerInfo(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
+ os.remove(path)
+ # read
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ info = TestLayerInfoObject()
+ glyphSet.readLayerInfo(info)
+ self.assertEqual(None, info.color)
+ self.assertEqual(None, info.guidelines)
+ self.assertEqual(None, info.lib)
+
+ def testBogusLayerInfo(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
+ os.remove(path)
+ with open(path, "w") as f:
+ f.write("test")
+ # read
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ info = TestLayerInfoObject()
+ self.assertRaises(UFOLibError, glyphSet.readLayerInfo, info)
+
+ def testInvalidFormatLayerInfo(self):
+ self.makeUFO()
+ path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
+ info = [("color", "0,0,0,0")]
+ with open(path, "wb") as f:
+ plistlib.dump(info, f)
+ # read
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ info = TestLayerInfoObject()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, info)
+
+ def testColor(self):
+ ## not a string
+ info = {}
+ info["color"] = 1
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ ## not enough commas
+ info = {}
+ info["color"] = "1 0, 0, 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "1 0 0, 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "1 0 0 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ ## not enough parts
+ info = {}
+ info["color"] = ", 0, 0, 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "1, , 0, 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "1, 0, , 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "1, 0, 0, "
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = ", , , "
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ ## not a number in all positions
+ info = {}
+ info["color"] = "r, 1, 1, 1"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "1, g, 1, 1"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "1, 1, b, 1"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "1, 1, 1, a"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ ## too many parts
+ info = {}
+ info["color"] = "1, 0, 0, 0, 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ ## < 0 in each position
+ info = {}
+ info["color"] = "-1, 0, 0, 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "0, -1, 0, 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "0, 0, -1, 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "0, 0, 0, -1"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ ## > 1 in each position
+ info = {}
+ info["color"] = "2, 0, 0, 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "0, 2, 0, 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "0, 0, 2, 0"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
+ info = {}
+ info["color"] = "0, 0, 0, 2"
+ self.makeUFO(layerInfo=info)
+ reader = UFOReader(self.ufoPath, validate=True)
+ glyphSet = reader.getGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
class UFO3WriteLayerInfoTestCase(unittest.TestCase):
-
- def setUp(self):
- self.tempDir = tempfile.mktemp()
- os.mkdir(self.tempDir)
- self.ufoPath = os.path.join(self.tempDir, "test.ufo")
-
- def tearDown(self):
- shutil.rmtree(self.tempDir)
-
- def makeGlyphSet(self):
- self.clearUFO()
- writer = UFOWriter(self.ufoPath)
- return writer.getGlyphSet()
-
- def clearUFO(self):
- if os.path.exists(self.ufoPath):
- shutil.rmtree(self.ufoPath)
-
- def testValidWrite(self):
- expected = dict(
- color="0,0,0,1",
- lib={"foo" : "bar"}
- )
- info = TestLayerInfoObject()
- info.color = expected["color"]
- info.lib = expected["lib"]
- glyphSet = self.makeGlyphSet()
- glyphSet.writeLayerInfo(info)
- path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
- with open(path, "rb") as f:
- result = plistlib.load(f)
- self.assertEqual(expected, result)
-
- def testColor(self):
- ## not a string
- info = TestLayerInfoObject()
- info.color = 1
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- ## not enough commas
- info = TestLayerInfoObject()
- info.color = "1 0, 0, 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "1 0 0, 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "1 0 0 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- ## not enough parts
- info = TestLayerInfoObject()
- info.color = ", 0, 0, 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "1, , 0, 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "1, 0, , 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "1, 0, 0, "
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = ", , , "
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- ## not a number in all positions
- info = TestLayerInfoObject()
- info.color = "r, 1, 1, 1"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "1, g, 1, 1"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "1, 1, b, 1"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "1, 1, 1, a"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- ## too many parts
- info = TestLayerInfoObject()
- info.color = "1, 0, 0, 0, 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- ## < 0 in each position
- info = TestLayerInfoObject()
- info.color = "-1, 0, 0, 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "0, -1, 0, 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "0, 0, -1, 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "0, 0, 0, -1"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- ## > 1 in each position
- info = TestLayerInfoObject()
- info.color = "2, 0, 0, 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "0, 2, 0, 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "0, 0, 2, 0"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
- info = TestLayerInfoObject()
- info.color = "0, 0, 0, 2"
- glyphSet = self.makeGlyphSet()
- self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ def setUp(self):
+ self.tempDir = tempfile.mktemp()
+ os.mkdir(self.tempDir)
+ self.ufoPath = os.path.join(self.tempDir, "test.ufo")
+
+ def tearDown(self):
+ shutil.rmtree(self.tempDir)
+
+ def makeGlyphSet(self):
+ self.clearUFO()
+ writer = UFOWriter(self.ufoPath)
+ return writer.getGlyphSet()
+
+ def clearUFO(self):
+ if os.path.exists(self.ufoPath):
+ shutil.rmtree(self.ufoPath)
+
+ def testValidWrite(self):
+ expected = dict(color="0,0,0,1", lib={"foo": "bar"})
+ info = TestLayerInfoObject()
+ info.color = expected["color"]
+ info.lib = expected["lib"]
+ glyphSet = self.makeGlyphSet()
+ glyphSet.writeLayerInfo(info)
+ path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
+ with open(path, "rb") as f:
+ result = plistlib.load(f)
+ self.assertEqual(expected, result)
+
+ def testColor(self):
+ ## not a string
+ info = TestLayerInfoObject()
+ info.color = 1
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ ## not enough commas
+ info = TestLayerInfoObject()
+ info.color = "1 0, 0, 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "1 0 0, 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "1 0 0 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ ## not enough parts
+ info = TestLayerInfoObject()
+ info.color = ", 0, 0, 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "1, , 0, 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "1, 0, , 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "1, 0, 0, "
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = ", , , "
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ ## not a number in all positions
+ info = TestLayerInfoObject()
+ info.color = "r, 1, 1, 1"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "1, g, 1, 1"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "1, 1, b, 1"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "1, 1, 1, a"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ ## too many parts
+ info = TestLayerInfoObject()
+ info.color = "1, 0, 0, 0, 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ ## < 0 in each position
+ info = TestLayerInfoObject()
+ info.color = "-1, 0, 0, 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "0, -1, 0, 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "0, 0, -1, 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "0, 0, 0, -1"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ ## > 1 in each position
+ info = TestLayerInfoObject()
+ info.color = "2, 0, 0, 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "0, 2, 0, 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "0, 0, 2, 0"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
+ info = TestLayerInfoObject()
+ info.color = "0, 0, 0, 2"
+ glyphSet = self.makeGlyphSet()
+ self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
diff --git a/Tests/ufoLib/UFOConversion_test.py b/Tests/ufoLib/UFOConversion_test.py
index 98a08121..5519cef6 100644
--- a/Tests/ufoLib/UFOConversion_test.py
+++ b/Tests/ufoLib/UFOConversion_test.py
@@ -11,357 +11,314 @@ from .testSupport import expectedFontInfo1To2Conversion, expectedFontInfo2To1Con
# the format version 1 lib.plist contains some data
# that these tests shouldn't be concerned about.
removeFromFormatVersion1Lib = [
- "org.robofab.opentype.classes",
- "org.robofab.opentype.features",
- "org.robofab.opentype.featureorder",
- "org.robofab.postScriptHintData"
+ "org.robofab.opentype.classes",
+ "org.robofab.opentype.features",
+ "org.robofab.opentype.featureorder",
+ "org.robofab.postScriptHintData",
]
class ConversionFunctionsTestCase(unittest.TestCase):
-
- def tearDown(self):
- path = self.getFontPath("TestFont1 (UFO1) converted.ufo")
- if os.path.exists(path):
- shutil.rmtree(path)
- path = self.getFontPath("TestFont1 (UFO2) converted.ufo")
- if os.path.exists(path):
- shutil.rmtree(path)
-
- def getFontPath(self, fileName):
- testdata = os.path.join(os.path.dirname(__file__), "testdata")
- return os.path.join(testdata, fileName)
-
- def compareFileStructures(self, path1, path2, expectedInfoData, testFeatures):
- # result
- metainfoPath1 = os.path.join(path1, "metainfo.plist")
- fontinfoPath1 = os.path.join(path1, "fontinfo.plist")
- kerningPath1 = os.path.join(path1, "kerning.plist")
- groupsPath1 = os.path.join(path1, "groups.plist")
- libPath1 = os.path.join(path1, "lib.plist")
- featuresPath1 = os.path.join(path1, "features.plist")
- glyphsPath1 = os.path.join(path1, "glyphs")
- glyphsPath1_contents = os.path.join(glyphsPath1, "contents.plist")
- glyphsPath1_A = os.path.join(glyphsPath1, "A_.glif")
- glyphsPath1_B = os.path.join(glyphsPath1, "B_.glif")
- # expected result
- metainfoPath2 = os.path.join(path2, "metainfo.plist")
- fontinfoPath2 = os.path.join(path2, "fontinfo.plist")
- kerningPath2 = os.path.join(path2, "kerning.plist")
- groupsPath2 = os.path.join(path2, "groups.plist")
- libPath2 = os.path.join(path2, "lib.plist")
- featuresPath2 = os.path.join(path2, "features.plist")
- glyphsPath2 = os.path.join(path2, "glyphs")
- glyphsPath2_contents = os.path.join(glyphsPath2, "contents.plist")
- glyphsPath2_A = os.path.join(glyphsPath2, "A_.glif")
- glyphsPath2_B = os.path.join(glyphsPath2, "B_.glif")
- # look for existence
- self.assertEqual(os.path.exists(metainfoPath1), True)
- self.assertEqual(os.path.exists(fontinfoPath1), True)
- self.assertEqual(os.path.exists(kerningPath1), True)
- self.assertEqual(os.path.exists(groupsPath1), True)
- self.assertEqual(os.path.exists(libPath1), True)
- self.assertEqual(os.path.exists(glyphsPath1), True)
- self.assertEqual(os.path.exists(glyphsPath1_contents), True)
- self.assertEqual(os.path.exists(glyphsPath1_A), True)
- self.assertEqual(os.path.exists(glyphsPath1_B), True)
- if testFeatures:
- self.assertEqual(os.path.exists(featuresPath1), True)
- # look for aggrement
- with open(metainfoPath1, "rb") as f:
- data1 = plistlib.load(f)
- with open(metainfoPath2, "rb") as f:
- data2 = plistlib.load(f)
- self.assertEqual(data1, data2)
- with open(fontinfoPath1, "rb") as f:
- data1 = plistlib.load(f)
- self.assertEqual(sorted(data1.items()), sorted(expectedInfoData.items()))
- with open(kerningPath1, "rb") as f:
- data1 = plistlib.load(f)
- with open(kerningPath2, "rb") as f:
- data2 = plistlib.load(f)
- self.assertEqual(data1, data2)
- with open(groupsPath1, "rb") as f:
- data1 = plistlib.load(f)
- with open(groupsPath2, "rb") as f:
- data2 = plistlib.load(f)
- self.assertEqual(data1, data2)
- with open(libPath1, "rb") as f:
- data1 = plistlib.load(f)
- with open(libPath2, "rb") as f:
- data2 = plistlib.load(f)
- if "UFO1" in libPath1:
- for key in removeFromFormatVersion1Lib:
- if key in data1:
- del data1[key]
- if "UFO1" in libPath2:
- for key in removeFromFormatVersion1Lib:
- if key in data2:
- del data2[key]
- self.assertEqual(data1, data2)
- with open(glyphsPath1_contents, "rb") as f:
- data1 = plistlib.load(f)
- with open(glyphsPath2_contents, "rb") as f:
- data2 = plistlib.load(f)
- self.assertEqual(data1, data2)
- with open(glyphsPath1_A, "rb") as f:
- data1 = plistlib.load(f)
- with open(glyphsPath2_A, "rb") as f:
- data2 = plistlib.load(f)
- self.assertEqual(data1, data2)
- with open(glyphsPath1_B, "rb") as f:
- data1 = plistlib.load(f)
- with open(glyphsPath2_B, "rb") as f:
- data2 = plistlib.load(f)
- self.assertEqual(data1, data2)
+ def tearDown(self):
+ path = self.getFontPath("TestFont1 (UFO1) converted.ufo")
+ if os.path.exists(path):
+ shutil.rmtree(path)
+ path = self.getFontPath("TestFont1 (UFO2) converted.ufo")
+ if os.path.exists(path):
+ shutil.rmtree(path)
+
+ def getFontPath(self, fileName):
+ testdata = os.path.join(os.path.dirname(__file__), "testdata")
+ return os.path.join(testdata, fileName)
+
+ def compareFileStructures(self, path1, path2, expectedInfoData, testFeatures):
+ # result
+ metainfoPath1 = os.path.join(path1, "metainfo.plist")
+ fontinfoPath1 = os.path.join(path1, "fontinfo.plist")
+ kerningPath1 = os.path.join(path1, "kerning.plist")
+ groupsPath1 = os.path.join(path1, "groups.plist")
+ libPath1 = os.path.join(path1, "lib.plist")
+ featuresPath1 = os.path.join(path1, "features.plist")
+ glyphsPath1 = os.path.join(path1, "glyphs")
+ glyphsPath1_contents = os.path.join(glyphsPath1, "contents.plist")
+ glyphsPath1_A = os.path.join(glyphsPath1, "A_.glif")
+ glyphsPath1_B = os.path.join(glyphsPath1, "B_.glif")
+ # expected result
+ metainfoPath2 = os.path.join(path2, "metainfo.plist")
+ fontinfoPath2 = os.path.join(path2, "fontinfo.plist")
+ kerningPath2 = os.path.join(path2, "kerning.plist")
+ groupsPath2 = os.path.join(path2, "groups.plist")
+ libPath2 = os.path.join(path2, "lib.plist")
+ featuresPath2 = os.path.join(path2, "features.plist")
+ glyphsPath2 = os.path.join(path2, "glyphs")
+ glyphsPath2_contents = os.path.join(glyphsPath2, "contents.plist")
+ glyphsPath2_A = os.path.join(glyphsPath2, "A_.glif")
+ glyphsPath2_B = os.path.join(glyphsPath2, "B_.glif")
+ # look for existence
+ self.assertEqual(os.path.exists(metainfoPath1), True)
+ self.assertEqual(os.path.exists(fontinfoPath1), True)
+ self.assertEqual(os.path.exists(kerningPath1), True)
+ self.assertEqual(os.path.exists(groupsPath1), True)
+ self.assertEqual(os.path.exists(libPath1), True)
+ self.assertEqual(os.path.exists(glyphsPath1), True)
+ self.assertEqual(os.path.exists(glyphsPath1_contents), True)
+ self.assertEqual(os.path.exists(glyphsPath1_A), True)
+ self.assertEqual(os.path.exists(glyphsPath1_B), True)
+ if testFeatures:
+ self.assertEqual(os.path.exists(featuresPath1), True)
+ # look for aggrement
+ with open(metainfoPath1, "rb") as f:
+ data1 = plistlib.load(f)
+ with open(metainfoPath2, "rb") as f:
+ data2 = plistlib.load(f)
+ self.assertEqual(data1, data2)
+ with open(fontinfoPath1, "rb") as f:
+ data1 = plistlib.load(f)
+ self.assertEqual(sorted(data1.items()), sorted(expectedInfoData.items()))
+ with open(kerningPath1, "rb") as f:
+ data1 = plistlib.load(f)
+ with open(kerningPath2, "rb") as f:
+ data2 = plistlib.load(f)
+ self.assertEqual(data1, data2)
+ with open(groupsPath1, "rb") as f:
+ data1 = plistlib.load(f)
+ with open(groupsPath2, "rb") as f:
+ data2 = plistlib.load(f)
+ self.assertEqual(data1, data2)
+ with open(libPath1, "rb") as f:
+ data1 = plistlib.load(f)
+ with open(libPath2, "rb") as f:
+ data2 = plistlib.load(f)
+ if "UFO1" in libPath1:
+ for key in removeFromFormatVersion1Lib:
+ if key in data1:
+ del data1[key]
+ if "UFO1" in libPath2:
+ for key in removeFromFormatVersion1Lib:
+ if key in data2:
+ del data2[key]
+ self.assertEqual(data1, data2)
+ with open(glyphsPath1_contents, "rb") as f:
+ data1 = plistlib.load(f)
+ with open(glyphsPath2_contents, "rb") as f:
+ data2 = plistlib.load(f)
+ self.assertEqual(data1, data2)
+ with open(glyphsPath1_A, "rb") as f:
+ data1 = plistlib.load(f)
+ with open(glyphsPath2_A, "rb") as f:
+ data2 = plistlib.load(f)
+ self.assertEqual(data1, data2)
+ with open(glyphsPath1_B, "rb") as f:
+ data1 = plistlib.load(f)
+ with open(glyphsPath2_B, "rb") as f:
+ data2 = plistlib.load(f)
+ self.assertEqual(data1, data2)
# ---------------------
# kerning up conversion
# ---------------------
-class TestInfoObject: pass
-
-
-class KerningUpConversionTestCase(unittest.TestCase):
-
- expectedKerning = {
- ("public.kern1.BGroup", "public.kern2.CGroup"): 7,
- ("public.kern1.BGroup", "public.kern2.DGroup"): 8,
- ("public.kern1.BGroup", "A"): 5,
- ("public.kern1.BGroup", "B"): 6,
- ("public.kern1.CGroup", "public.kern2.CGroup"): 11,
- ("public.kern1.CGroup", "public.kern2.DGroup"): 12,
- ("public.kern1.CGroup", "A"): 9,
- ("public.kern1.CGroup", "B"): 10,
- ("A", "public.kern2.CGroup"): 3,
- ("A", "public.kern2.DGroup"): 4,
- ("A", "A"): 1,
- ("A", "B"): 2,
- ("X", "A"): 13,
- ("X", "public.kern2.CGroup"): 14
- }
-
- expectedGroups = {
- "BGroup": ["B"],
- "CGroup": ["C", "Ccedilla"],
- "DGroup": ["D"],
- "public.kern1.BGroup": ["B"],
- "public.kern1.CGroup": ["C", "Ccedilla"],
- "public.kern2.CGroup": ["C", "Ccedilla"],
- "public.kern2.DGroup": ["D"],
- "Not A Kerning Group" : ["A"],
- "X": ["X", "X.sc"]
- }
-
- def setUp(self):
- self.tempDir = tempfile.mktemp()
- os.mkdir(self.tempDir)
- self.ufoPath = os.path.join(self.tempDir, "test.ufo")
-
- def tearDown(self):
- shutil.rmtree(self.tempDir)
-
- def makeUFO(self, formatVersion):
- self.clearUFO()
- if not os.path.exists(self.ufoPath):
- os.mkdir(self.ufoPath)
-
- # glyphs
- glyphsPath = os.path.join(self.ufoPath, "glyphs")
- if not os.path.exists(glyphsPath):
- os.mkdir(glyphsPath)
- glyphFile = "X_.glif"
- glyphsContents = dict(X=glyphFile)
- path = os.path.join(glyphsPath, "contents.plist")
- with open(path, "wb") as f:
- plistlib.dump(glyphsContents, f)
- path = os.path.join(glyphsPath, glyphFile)
- with open(path, "w") as f:
- f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
- # metainfo.plist
- metaInfo = dict(creator="test", formatVersion=formatVersion)
- path = os.path.join(self.ufoPath, "metainfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(metaInfo, f)
- # kerning
- kerning = {
- "A" : {
- "A" : 1,
- "B" : 2,
- "CGroup" : 3,
- "DGroup" : 4
- },
- "BGroup" : {
- "A" : 5,
- "B" : 6,
- "CGroup" : 7,
- "DGroup" : 8
- },
- "CGroup" : {
- "A" : 9,
- "B" : 10,
- "CGroup" : 11,
- "DGroup" : 12
- },
- "X": {
- "A" : 13,
- "CGroup" : 14
- }
- }
- path = os.path.join(self.ufoPath, "kerning.plist")
- with open(path, "wb") as f:
- plistlib.dump(kerning, f)
- # groups
- groups = {
- "BGroup" : ["B"],
- "CGroup" : ["C", "Ccedilla"],
- "DGroup" : ["D"],
- "Not A Kerning Group" : ["A"],
- "X" : ["X", "X.sc"] # a group with a name that is also a glyph name
- }
- path = os.path.join(self.ufoPath, "groups.plist")
- with open(path, "wb") as f:
- plistlib.dump(groups, f)
- # font info
- fontInfo = {
- "familyName" : "Test"
- }
- path = os.path.join(self.ufoPath, "fontinfo.plist")
- with open(path, "wb") as f:
- plistlib.dump(fontInfo, f)
+class TestInfoObject:
+ pass
- def clearUFO(self):
- if os.path.exists(self.ufoPath):
- shutil.rmtree(self.ufoPath)
- def testUFO1(self):
- self.makeUFO(formatVersion=2)
- reader = UFOReader(self.ufoPath, validate=True)
- kerning = reader.readKerning()
- self.assertEqual(self.expectedKerning, kerning)
- groups = reader.readGroups()
- self.assertEqual(self.expectedGroups, groups)
- info = TestInfoObject()
- reader.readInfo(info)
-
- def testUFO2(self):
- self.makeUFO(formatVersion=2)
- reader = UFOReader(self.ufoPath, validate=True)
- kerning = reader.readKerning()
- self.assertEqual(self.expectedKerning, kerning)
- groups = reader.readGroups()
- self.assertEqual(self.expectedGroups, groups)
- info = TestInfoObject()
- reader.readInfo(info)
+class KerningUpConversionTestCase(unittest.TestCase):
+ expectedKerning = {
+ ("public.kern1.BGroup", "public.kern2.CGroup"): 7,
+ ("public.kern1.BGroup", "public.kern2.DGroup"): 8,
+ ("public.kern1.BGroup", "A"): 5,
+ ("public.kern1.BGroup", "B"): 6,
+ ("public.kern1.CGroup", "public.kern2.CGroup"): 11,
+ ("public.kern1.CGroup", "public.kern2.DGroup"): 12,
+ ("public.kern1.CGroup", "A"): 9,
+ ("public.kern1.CGroup", "B"): 10,
+ ("A", "public.kern2.CGroup"): 3,
+ ("A", "public.kern2.DGroup"): 4,
+ ("A", "A"): 1,
+ ("A", "B"): 2,
+ ("X", "A"): 13,
+ ("X", "public.kern2.CGroup"): 14,
+ }
+
+ expectedGroups = {
+ "BGroup": ["B"],
+ "CGroup": ["C", "Ccedilla"],
+ "DGroup": ["D"],
+ "public.kern1.BGroup": ["B"],
+ "public.kern1.CGroup": ["C", "Ccedilla"],
+ "public.kern2.CGroup": ["C", "Ccedilla"],
+ "public.kern2.DGroup": ["D"],
+ "Not A Kerning Group": ["A"],
+ "X": ["X", "X.sc"],
+ }
+
+ def setUp(self):
+ self.tempDir = tempfile.mktemp()
+ os.mkdir(self.tempDir)
+ self.ufoPath = os.path.join(self.tempDir, "test.ufo")
+
+ def tearDown(self):
+ shutil.rmtree(self.tempDir)
+
+ def makeUFO(self, formatVersion):
+ self.clearUFO()
+ if not os.path.exists(self.ufoPath):
+ os.mkdir(self.ufoPath)
+
+ # glyphs
+ glyphsPath = os.path.join(self.ufoPath, "glyphs")
+ if not os.path.exists(glyphsPath):
+ os.mkdir(glyphsPath)
+ glyphFile = "X_.glif"
+ glyphsContents = dict(X=glyphFile)
+ path = os.path.join(glyphsPath, "contents.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(glyphsContents, f)
+ path = os.path.join(glyphsPath, glyphFile)
+ with open(path, "w") as f:
+ f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
+
+ # metainfo.plist
+ metaInfo = dict(creator="test", formatVersion=formatVersion)
+ path = os.path.join(self.ufoPath, "metainfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(metaInfo, f)
+ # kerning
+ kerning = {
+ "A": {"A": 1, "B": 2, "CGroup": 3, "DGroup": 4},
+ "BGroup": {"A": 5, "B": 6, "CGroup": 7, "DGroup": 8},
+ "CGroup": {"A": 9, "B": 10, "CGroup": 11, "DGroup": 12},
+ "X": {"A": 13, "CGroup": 14},
+ }
+ path = os.path.join(self.ufoPath, "kerning.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(kerning, f)
+ # groups
+ groups = {
+ "BGroup": ["B"],
+ "CGroup": ["C", "Ccedilla"],
+ "DGroup": ["D"],
+ "Not A Kerning Group": ["A"],
+ "X": ["X", "X.sc"], # a group with a name that is also a glyph name
+ }
+ path = os.path.join(self.ufoPath, "groups.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(groups, f)
+ # font info
+ fontInfo = {"familyName": "Test"}
+ path = os.path.join(self.ufoPath, "fontinfo.plist")
+ with open(path, "wb") as f:
+ plistlib.dump(fontInfo, f)
+
+ def clearUFO(self):
+ if os.path.exists(self.ufoPath):
+ shutil.rmtree(self.ufoPath)
+
+ def testUFO1(self):
+ self.makeUFO(formatVersion=2)
+ reader = UFOReader(self.ufoPath, validate=True)
+ kerning = reader.readKerning()
+ self.assertEqual(self.expectedKerning, kerning)
+ groups = reader.readGroups()
+ self.assertEqual(self.expectedGroups, groups)
+ info = TestInfoObject()
+ reader.readInfo(info)
+
+ def testUFO2(self):
+ self.makeUFO(formatVersion=2)
+ reader = UFOReader(self.ufoPath, validate=True)
+ kerning = reader.readKerning()
+ self.assertEqual(self.expectedKerning, kerning)
+ groups = reader.readGroups()
+ self.assertEqual(self.expectedGroups, groups)
+ info = TestInfoObject()
+ reader.readInfo(info)
class KerningDownConversionTestCase(unittest.TestCase):
-
- expectedKerning = {
- ("public.kern1.BGroup", "public.kern2.CGroup"): 7,
- ("public.kern1.BGroup", "public.kern2.DGroup"): 8,
- ("public.kern1.BGroup", "A"): 5,
- ("public.kern1.BGroup", "B"): 6,
- ("public.kern1.CGroup", "public.kern2.CGroup"): 11,
- ("public.kern1.CGroup", "public.kern2.DGroup"): 12,
- ("public.kern1.CGroup", "A"): 9,
- ("public.kern1.CGroup", "B"): 10,
- ("A", "public.kern2.CGroup"): 3,
- ("A", "public.kern2.DGroup"): 4,
- ("A", "A"): 1,
- ("A", "B"): 2
- }
-
- groups = {
- "BGroup": ["B"],
- "CGroup": ["C"],
- "DGroup": ["D"],
- "public.kern1.BGroup": ["B"],
- "public.kern1.CGroup": ["C", "Ccedilla"],
- "public.kern2.CGroup": ["C", "Ccedilla"],
- "public.kern2.DGroup": ["D"],
- "Not A Kerning Group" : ["A"]
- }
- expectedWrittenGroups = {
- "BGroup": ["B"],
- "CGroup": ["C", "Ccedilla"],
- "DGroup": ["D"],
- "Not A Kerning Group" : ["A"]
- }
-
- kerning = {
- ("public.kern1.BGroup", "public.kern2.CGroup"): 7,
- ("public.kern1.BGroup", "public.kern2.DGroup"): 8,
- ("public.kern1.BGroup", "A"): 5,
- ("public.kern1.BGroup", "B"): 6,
- ("public.kern1.CGroup", "public.kern2.CGroup"): 11,
- ("public.kern1.CGroup", "public.kern2.DGroup"): 12,
- ("public.kern1.CGroup", "A"): 9,
- ("public.kern1.CGroup", "B"): 10,
- ("A", "public.kern2.CGroup"): 3,
- ("A", "public.kern2.DGroup"): 4,
- ("A", "A"): 1,
- ("A", "B"): 2
- }
- expectedWrittenKerning = {
- "BGroup" : {
- "CGroup" : 7,
- "DGroup" : 8,
- "A" : 5,
- "B" : 6
- },
- "CGroup" : {
- "CGroup" : 11,
- "DGroup" : 12,
- "A" : 9,
- "B" : 10
- },
- "A" : {
- "CGroup" : 3,
- "DGroup" : 4,
- "A" : 1,
- "B" : 2
- }
- }
-
-
- downConversionMapping = {
- "side1" : {
- "BGroup" : "public.kern1.BGroup",
- "CGroup" : "public.kern1.CGroup"
- },
- "side2" : {
- "CGroup" : "public.kern2.CGroup",
- "DGroup" : "public.kern2.DGroup"
- }
- }
-
- def setUp(self):
- self.tempDir = tempfile.mktemp()
- os.mkdir(self.tempDir)
- self.dstDir = os.path.join(self.tempDir, "test.ufo")
-
- def tearDown(self):
- shutil.rmtree(self.tempDir)
-
- def tearDownUFO(self):
- shutil.rmtree(self.dstDir)
-
- def testWrite(self):
- writer = UFOWriter(self.dstDir, formatVersion=2)
- writer.setKerningGroupConversionRenameMaps(self.downConversionMapping)
- writer.writeKerning(self.kerning)
- writer.writeGroups(self.groups)
- # test groups
- path = os.path.join(self.dstDir, "groups.plist")
- with open(path, "rb") as f:
- writtenGroups = plistlib.load(f)
- self.assertEqual(writtenGroups, self.expectedWrittenGroups)
- # test kerning
- path = os.path.join(self.dstDir, "kerning.plist")
- with open(path, "rb") as f:
- writtenKerning = plistlib.load(f)
- self.assertEqual(writtenKerning, self.expectedWrittenKerning)
- self.tearDownUFO()
+ expectedKerning = {
+ ("public.kern1.BGroup", "public.kern2.CGroup"): 7,
+ ("public.kern1.BGroup", "public.kern2.DGroup"): 8,
+ ("public.kern1.BGroup", "A"): 5,
+ ("public.kern1.BGroup", "B"): 6,
+ ("public.kern1.CGroup", "public.kern2.CGroup"): 11,
+ ("public.kern1.CGroup", "public.kern2.DGroup"): 12,
+ ("public.kern1.CGroup", "A"): 9,
+ ("public.kern1.CGroup", "B"): 10,
+ ("A", "public.kern2.CGroup"): 3,
+ ("A", "public.kern2.DGroup"): 4,
+ ("A", "A"): 1,
+ ("A", "B"): 2,
+ }
+
+ groups = {
+ "BGroup": ["B"],
+ "CGroup": ["C"],
+ "DGroup": ["D"],
+ "public.kern1.BGroup": ["B"],
+ "public.kern1.CGroup": ["C", "Ccedilla"],
+ "public.kern2.CGroup": ["C", "Ccedilla"],
+ "public.kern2.DGroup": ["D"],
+ "Not A Kerning Group": ["A"],
+ }
+ expectedWrittenGroups = {
+ "BGroup": ["B"],
+ "CGroup": ["C", "Ccedilla"],
+ "DGroup": ["D"],
+ "Not A Kerning Group": ["A"],
+ }
+
+ kerning = {
+ ("public.kern1.BGroup", "public.kern2.CGroup"): 7,
+ ("public.kern1.BGroup", "public.kern2.DGroup"): 8,
+ ("public.kern1.BGroup", "A"): 5,
+ ("public.kern1.BGroup", "B"): 6,
+ ("public.kern1.CGroup", "public.kern2.CGroup"): 11,
+ ("public.kern1.CGroup", "public.kern2.DGroup"): 12,
+ ("public.kern1.CGroup", "A"): 9,
+ ("public.kern1.CGroup", "B"): 10,
+ ("A", "public.kern2.CGroup"): 3,
+ ("A", "public.kern2.DGroup"): 4,
+ ("A", "A"): 1,
+ ("A", "B"): 2,
+ }
+ expectedWrittenKerning = {
+ "BGroup": {"CGroup": 7, "DGroup": 8, "A": 5, "B": 6},
+ "CGroup": {"CGroup": 11, "DGroup": 12, "A": 9, "B": 10},
+ "A": {"CGroup": 3, "DGroup": 4, "A": 1, "B": 2},
+ }
+
+ downConversionMapping = {
+ "side1": {"BGroup": "public.kern1.BGroup", "CGroup": "public.kern1.CGroup"},
+ "side2": {"CGroup": "public.kern2.CGroup", "DGroup": "public.kern2.DGroup"},
+ }
+
+ def setUp(self):
+ self.tempDir = tempfile.mktemp()
+ os.mkdir(self.tempDir)
+ self.dstDir = os.path.join(self.tempDir, "test.ufo")
+
+ def tearDown(self):
+ shutil.rmtree(self.tempDir)
+
+ def tearDownUFO(self):
+ shutil.rmtree(self.dstDir)
+
+ def testWrite(self):
+ writer = UFOWriter(self.dstDir, formatVersion=2)
+ writer.setKerningGroupConversionRenameMaps(self.downConversionMapping)
+ writer.writeKerning(self.kerning)
+ writer.writeGroups(self.groups)
+ # test groups
+ path = os.path.join(self.dstDir, "groups.plist")
+ with open(path, "rb") as f:
+ writtenGroups = plistlib.load(f)
+ self.assertEqual(writtenGroups, self.expectedWrittenGroups)
+ # test kerning
+ path = os.path.join(self.dstDir, "kerning.plist")
+ with open(path, "rb") as f:
+ writtenKerning = plistlib.load(f)
+ self.assertEqual(writtenKerning, self.expectedWrittenKerning)
+ self.tearDownUFO()
diff --git a/Tests/ufoLib/UFOZ_test.py b/Tests/ufoLib/UFOZ_test.py
index 6ea39e9a..e2b35034 100644
--- a/Tests/ufoLib/UFOZ_test.py
+++ b/Tests/ufoLib/UFOZ_test.py
@@ -12,9 +12,7 @@ import pytest
import warnings
-TESTDATA = fs.osfs.OSFS(
- os.path.join(os.path.dirname(__file__), "testdata")
-)
+TESTDATA = fs.osfs.OSFS(os.path.join(os.path.dirname(__file__), "testdata"))
TEST_UFO3 = "TestFont1 (UFO3).ufo"
TEST_UFOZ = "TestFont1 (UFO3).ufoz"
@@ -38,7 +36,6 @@ def testufoz():
class TestUFOZ:
-
def test_read(self, testufoz):
with UFOReader(testufoz) as reader:
assert reader.fileStructure == UFOFileStructure.ZIP
@@ -52,9 +49,7 @@ class TestUFOZ:
def test_pathlike(testufo):
-
class PathLike:
-
def __init__(self, s):
self._path = s
@@ -84,7 +79,6 @@ def memufo():
class TestMemoryFS:
-
def test_init_reader(self, memufo):
with UFOReader(memufo) as reader:
assert reader.formatVersion == 3
diff --git a/Tests/ufoLib/__init__.py b/Tests/ufoLib/__init__.py
index e69de29b..e563776a 100644
--- a/Tests/ufoLib/__init__.py
+++ b/Tests/ufoLib/__init__.py
@@ -0,0 +1,3 @@
+import pytest
+
+pytest.importorskip("fontTools.ufoLib")
diff --git a/Tests/ufoLib/filenames_test.py b/Tests/ufoLib/filenames_test.py
index bad41353..22fb10b4 100644
--- a/Tests/ufoLib/filenames_test.py
+++ b/Tests/ufoLib/filenames_test.py
@@ -3,7 +3,6 @@ from fontTools.ufoLib.filenames import userNameToFileName, handleClash1, handleC
class TestFilenames(unittest.TestCase):
-
def test_userNameToFileName(self):
self.assertEqual(userNameToFileName("a"), "a")
self.assertEqual(userNameToFileName("A"), "A_")
@@ -21,8 +20,7 @@ class TestFilenames(unittest.TestCase):
self.assertEqual(userNameToFileName("t_h"), "t_h")
self.assertEqual(userNameToFileName("F_F_I"), "F__F__I_")
self.assertEqual(userNameToFileName("f_f_i"), "f_f_i")
- self.assertEqual(userNameToFileName("Aacute_V.swash"),
- "A_acute_V_.swash")
+ self.assertEqual(userNameToFileName("Aacute_V.swash"), "A_acute_V_.swash")
self.assertEqual(userNameToFileName(".notdef"), "_notdef")
self.assertEqual(userNameToFileName("con"), "_con")
self.assertEqual(userNameToFileName("CON"), "C_O_N_")
@@ -60,25 +58,22 @@ class TestFilenames(unittest.TestCase):
e = list(existing)
self.assertEqual(
- handleClash1(userName="A" * 5, existing=e, prefix=prefix,
- suffix=suffix),
- '00000.AAAAA000000000000001.0000000000'
+ handleClash1(userName="A" * 5, existing=e, prefix=prefix, suffix=suffix),
+ "00000.AAAAA000000000000001.0000000000",
)
e = list(existing)
e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
self.assertEqual(
- handleClash1(userName="A" * 5, existing=e, prefix=prefix,
- suffix=suffix),
- '00000.AAAAA000000000000002.0000000000'
+ handleClash1(userName="A" * 5, existing=e, prefix=prefix, suffix=suffix),
+ "00000.AAAAA000000000000002.0000000000",
)
e = list(existing)
e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
self.assertEqual(
- handleClash1(userName="A" * 5, existing=e, prefix=prefix,
- suffix=suffix),
- '00000.AAAAA000000000000001.0000000000'
+ handleClash1(userName="A" * 5, existing=e, prefix=prefix, suffix=suffix),
+ "00000.AAAAA000000000000001.0000000000",
)
def test_handleClash2(self):
@@ -89,19 +84,17 @@ class TestFilenames(unittest.TestCase):
e = list(existing)
self.assertEqual(
handleClash2(existing=e, prefix=prefix, suffix=suffix),
- '00000.100.0000000000'
+ "00000.100.0000000000",
)
e = list(existing)
e.remove(prefix + "1" + suffix)
self.assertEqual(
- handleClash2(existing=e, prefix=prefix, suffix=suffix),
- '00000.1.0000000000'
+ handleClash2(existing=e, prefix=prefix, suffix=suffix), "00000.1.0000000000"
)
e = list(existing)
e.remove(prefix + "2" + suffix)
self.assertEqual(
- handleClash2(existing=e, prefix=prefix, suffix=suffix),
- '00000.2.0000000000'
+ handleClash2(existing=e, prefix=prefix, suffix=suffix), "00000.2.0000000000"
)
diff --git a/Tests/ufoLib/glifLib_test.py b/Tests/ufoLib/glifLib_test.py
index 485c2bd9..8f48168d 100644
--- a/Tests/ufoLib/glifLib_test.py
+++ b/Tests/ufoLib/glifLib_test.py
@@ -3,12 +3,20 @@ import os
import tempfile
import shutil
import unittest
+from pathlib import Path
from io import open
from .testSupport import getDemoFontGlyphSetPath
from fontTools.ufoLib.glifLib import (
- GlyphSet, glyphNameToFileName, readGlyphFromString, writeGlyphToString,
+ GlyphSet,
+ glyphNameToFileName,
+ readGlyphFromString,
+ writeGlyphToString,
+)
+from fontTools.ufoLib.errors import (
+ GlifLibError,
+ UnsupportedGLIFFormat,
+ UnsupportedUFOFormat,
)
-from fontTools.ufoLib.errors import GlifLibError, UnsupportedGLIFFormat, UnsupportedUFOFormat
from fontTools.misc.etree import XML_DECLARATION
from fontTools.pens.recordingPen import RecordingPointPen
import pytest
@@ -17,191 +25,223 @@ GLYPHSETDIR = getDemoFontGlyphSetPath()
class GlyphSetTests(unittest.TestCase):
-
- def setUp(self):
- self.dstDir = tempfile.mktemp()
- os.mkdir(self.dstDir)
-
- def tearDown(self):
- shutil.rmtree(self.dstDir)
-
- def testRoundTrip(self):
- import difflib
- srcDir = GLYPHSETDIR
- dstDir = self.dstDir
- src = GlyphSet(srcDir, ufoFormatVersion=2, validateRead=True, validateWrite=True)
- dst = GlyphSet(dstDir, ufoFormatVersion=2, validateRead=True, validateWrite=True)
- for glyphName in src.keys():
- g = src[glyphName]
- g.drawPoints(None) # load attrs
- dst.writeGlyph(glyphName, g, g.drawPoints)
- # compare raw file data:
- for glyphName in sorted(src.keys()):
- fileName = src.contents[glyphName]
- with open(os.path.join(srcDir, fileName), "r") as f:
- org = f.read()
- with open(os.path.join(dstDir, fileName), "r") as f:
- new = f.read()
- added = []
- removed = []
- for line in difflib.unified_diff(
- org.split("\n"), new.split("\n")):
- if line.startswith("+ "):
- added.append(line[1:])
- elif line.startswith("- "):
- removed.append(line[1:])
- self.assertEqual(
- added, removed,
- "%s.glif file differs after round tripping" % glyphName)
-
- def testContentsExist(self):
- with self.assertRaises(GlifLibError):
- GlyphSet(
- self.dstDir,
- ufoFormatVersion=2,
- validateRead=True,
- validateWrite=True,
- expectContentsFile=True,
- )
-
- def testRebuildContents(self):
- gset = GlyphSet(GLYPHSETDIR, validateRead=True, validateWrite=True)
- contents = gset.contents
- gset.rebuildContents()
- self.assertEqual(contents, gset.contents)
-
- def testReverseContents(self):
- gset = GlyphSet(GLYPHSETDIR, validateRead=True, validateWrite=True)
- d = {}
- for k, v in gset.getReverseContents().items():
- d[v] = k
- org = {}
- for k, v in gset.contents.items():
- org[k] = v.lower()
- self.assertEqual(d, org)
-
- def testReverseContents2(self):
- src = GlyphSet(GLYPHSETDIR, validateRead=True, validateWrite=True)
- dst = GlyphSet(self.dstDir, validateRead=True, validateWrite=True)
- dstMap = dst.getReverseContents()
- self.assertEqual(dstMap, {})
- for glyphName in src.keys():
- g = src[glyphName]
- g.drawPoints(None) # load attrs
- dst.writeGlyph(glyphName, g, g.drawPoints)
- self.assertNotEqual(dstMap, {})
- srcMap = dict(src.getReverseContents()) # copy
- self.assertEqual(dstMap, srcMap)
- del srcMap["a.glif"]
- dst.deleteGlyph("a")
- self.assertEqual(dstMap, srcMap)
-
- def testCustomFileNamingScheme(self):
- def myGlyphNameToFileName(glyphName, glyphSet):
- return "prefix" + glyphNameToFileName(glyphName, glyphSet)
- src = GlyphSet(GLYPHSETDIR, validateRead=True, validateWrite=True)
- dst = GlyphSet(self.dstDir, myGlyphNameToFileName, validateRead=True, validateWrite=True)
- for glyphName in src.keys():
- g = src[glyphName]
- g.drawPoints(None) # load attrs
- dst.writeGlyph(glyphName, g, g.drawPoints)
- d = {}
- for k, v in src.contents.items():
- d[k] = "prefix" + v
- self.assertEqual(d, dst.contents)
-
- def testGetUnicodes(self):
- src = GlyphSet(GLYPHSETDIR, validateRead=True, validateWrite=True)
- unicodes = src.getUnicodes()
- for glyphName in src.keys():
- g = src[glyphName]
- g.drawPoints(None) # load attrs
- if not hasattr(g, "unicodes"):
- self.assertEqual(unicodes[glyphName], [])
- else:
- self.assertEqual(g.unicodes, unicodes[glyphName])
+ def setUp(self):
+ self.dstDir = tempfile.mktemp()
+ os.mkdir(self.dstDir)
+
+ def tearDown(self):
+ shutil.rmtree(self.dstDir)
+
+ def testRoundTrip(self):
+ import difflib
+
+ srcDir = GLYPHSETDIR
+ dstDir = self.dstDir
+ src = GlyphSet(
+ srcDir, ufoFormatVersion=2, validateRead=True, validateWrite=True
+ )
+ dst = GlyphSet(
+ dstDir, ufoFormatVersion=2, validateRead=True, validateWrite=True
+ )
+ for glyphName in src.keys():
+ g = src[glyphName]
+ g.drawPoints(None) # load attrs
+ dst.writeGlyph(glyphName, g, g.drawPoints)
+ # compare raw file data:
+ for glyphName in sorted(src.keys()):
+ fileName = src.contents[glyphName]
+ with open(os.path.join(srcDir, fileName), "r") as f:
+ org = f.read()
+ with open(os.path.join(dstDir, fileName), "r") as f:
+ new = f.read()
+ added = []
+ removed = []
+ for line in difflib.unified_diff(org.split("\n"), new.split("\n")):
+ if line.startswith("+ "):
+ added.append(line[1:])
+ elif line.startswith("- "):
+ removed.append(line[1:])
+ self.assertEqual(
+ added, removed, "%s.glif file differs after round tripping" % glyphName
+ )
+
+ def testContentsExist(self):
+ with self.assertRaises(GlifLibError):
+ GlyphSet(
+ self.dstDir,
+ ufoFormatVersion=2,
+ validateRead=True,
+ validateWrite=True,
+ expectContentsFile=True,
+ )
+
+ def testRebuildContents(self):
+ gset = GlyphSet(GLYPHSETDIR, validateRead=True, validateWrite=True)
+ contents = gset.contents
+ gset.rebuildContents()
+ self.assertEqual(contents, gset.contents)
+
+ def testReverseContents(self):
+ gset = GlyphSet(GLYPHSETDIR, validateRead=True, validateWrite=True)
+ d = {}
+ for k, v in gset.getReverseContents().items():
+ d[v] = k
+ org = {}
+ for k, v in gset.contents.items():
+ org[k] = v.lower()
+ self.assertEqual(d, org)
+
+ def testReverseContents2(self):
+ src = GlyphSet(GLYPHSETDIR, validateRead=True, validateWrite=True)
+ dst = GlyphSet(self.dstDir, validateRead=True, validateWrite=True)
+ dstMap = dst.getReverseContents()
+ self.assertEqual(dstMap, {})
+ for glyphName in src.keys():
+ g = src[glyphName]
+ g.drawPoints(None) # load attrs
+ dst.writeGlyph(glyphName, g, g.drawPoints)
+ self.assertNotEqual(dstMap, {})
+ srcMap = dict(src.getReverseContents()) # copy
+ self.assertEqual(dstMap, srcMap)
+ del srcMap["a.glif"]
+ dst.deleteGlyph("a")
+ self.assertEqual(dstMap, srcMap)
+
+ def testCustomFileNamingScheme(self):
+ def myGlyphNameToFileName(glyphName, glyphSet):
+ return "prefix" + glyphNameToFileName(glyphName, glyphSet)
+
+ src = GlyphSet(GLYPHSETDIR, validateRead=True, validateWrite=True)
+ dst = GlyphSet(
+ self.dstDir, myGlyphNameToFileName, validateRead=True, validateWrite=True
+ )
+ for glyphName in src.keys():
+ g = src[glyphName]
+ g.drawPoints(None) # load attrs
+ dst.writeGlyph(glyphName, g, g.drawPoints)
+ d = {}
+ for k, v in src.contents.items():
+ d[k] = "prefix" + v
+ self.assertEqual(d, dst.contents)
+
+ def testGetUnicodes(self):
+ src = GlyphSet(GLYPHSETDIR, validateRead=True, validateWrite=True)
+ unicodes = src.getUnicodes()
+ for glyphName in src.keys():
+ g = src[glyphName]
+ g.drawPoints(None) # load attrs
+ if not hasattr(g, "unicodes"):
+ self.assertEqual(unicodes[glyphName], [])
+ else:
+ self.assertEqual(g.unicodes, unicodes[glyphName])
+
+ def testReadGlyphInvalidXml(self):
+ """Test that calling readGlyph() to read a .glif with invalid XML raises
+ a library error, instead of an exception from the XML dependency that is
+ used internally. In addition, check that the raised exception describes
+ the glyph by name and gives the location of the broken .glif file."""
+
+ # Create a glyph set with three empty glyphs.
+ glyph_set = GlyphSet(self.dstDir)
+ glyph_set.writeGlyph("a", _Glyph())
+ glyph_set.writeGlyph("b", _Glyph())
+ glyph_set.writeGlyph("c", _Glyph())
+
+ # Corrupt the XML of /c.
+ invalid_xml = b"<abc></def>"
+ Path(self.dstDir, glyph_set.contents["c"]).write_bytes(invalid_xml)
+
+ # Confirm that reading /a and /b is fine...
+ glyph_set.readGlyph("a", _Glyph())
+ glyph_set.readGlyph("b", _Glyph())
+
+ # ...but that reading /c raises a descriptive library error.
+ expected_message = (
+ r"GLIF contains invalid XML\.\n"
+ r"The issue is in glyph 'c', located in '.*c\.glif.*\."
+ )
+ with pytest.raises(GlifLibError, match=expected_message):
+ glyph_set.readGlyph("c", _Glyph())
class FileNameTest:
-
- def test_default_file_name_scheme(self):
- assert glyphNameToFileName("a", None) == "a.glif"
- assert glyphNameToFileName("A", None) == "A_.glif"
- assert glyphNameToFileName("Aring", None) == "A_ring.glif"
- assert glyphNameToFileName("F_A_B", None) == "F__A__B_.glif"
- assert glyphNameToFileName("A.alt", None) == "A_.alt.glif"
- assert glyphNameToFileName("A.Alt", None) == "A_.A_lt.glif"
- assert glyphNameToFileName(".notdef", None) == "_notdef.glif"
- assert glyphNameToFileName("T_H", None) =="T__H_.glif"
- assert glyphNameToFileName("T_h", None) =="T__h.glif"
- assert glyphNameToFileName("t_h", None) =="t_h.glif"
- assert glyphNameToFileName("F_F_I", None) == "F__F__I_.glif"
- assert glyphNameToFileName("f_f_i", None) == "f_f_i.glif"
- assert glyphNameToFileName("AE", None) == "A_E_.glif"
- assert glyphNameToFileName("Ae", None) == "A_e.glif"
- assert glyphNameToFileName("ae", None) == "ae.glif"
- assert glyphNameToFileName("aE", None) == "aE_.glif"
- assert glyphNameToFileName("a.alt", None) == "a.alt.glif"
- assert glyphNameToFileName("A.aLt", None) == "A_.aL_t.glif"
- assert glyphNameToFileName("A.alT", None) == "A_.alT_.glif"
- assert glyphNameToFileName("Aacute_V.swash", None) == "A_acute_V_.swash.glif"
- assert glyphNameToFileName(".notdef", None) == "_notdef.glif"
- assert glyphNameToFileName("con", None) == "_con.glif"
- assert glyphNameToFileName("CON", None) == "C_O_N_.glif"
- assert glyphNameToFileName("con.alt", None) == "_con.alt.glif"
- assert glyphNameToFileName("alt.con", None) == "alt._con.glif"
-
- def test_conflicting_case_insensitive_file_names(self, tmp_path):
- src = GlyphSet(GLYPHSETDIR)
- dst = GlyphSet(tmp_path)
- glyph = src["a"]
-
- dst.writeGlyph("a", glyph)
- dst.writeGlyph("A", glyph)
- dst.writeGlyph("a_", glyph)
- dst.deleteGlyph("a_")
- dst.writeGlyph("a_", glyph)
- dst.writeGlyph("A_", glyph)
- dst.writeGlyph("i_j", glyph)
-
- assert dst.contents == {
- 'a': 'a.glif',
- 'A': 'A_.glif',
- 'a_': 'a_000000000000001.glif',
- 'A_': 'A__.glif',
- 'i_j': 'i_j.glif',
- }
-
- # make sure filenames are unique even on case-insensitive filesystems
- assert len({fileName.lower() for fileName in dst.contents.values()}) == 5
+ def test_default_file_name_scheme(self):
+ assert glyphNameToFileName("a", None) == "a.glif"
+ assert glyphNameToFileName("A", None) == "A_.glif"
+ assert glyphNameToFileName("Aring", None) == "A_ring.glif"
+ assert glyphNameToFileName("F_A_B", None) == "F__A__B_.glif"
+ assert glyphNameToFileName("A.alt", None) == "A_.alt.glif"
+ assert glyphNameToFileName("A.Alt", None) == "A_.A_lt.glif"
+ assert glyphNameToFileName(".notdef", None) == "_notdef.glif"
+ assert glyphNameToFileName("T_H", None) == "T__H_.glif"
+ assert glyphNameToFileName("T_h", None) == "T__h.glif"
+ assert glyphNameToFileName("t_h", None) == "t_h.glif"
+ assert glyphNameToFileName("F_F_I", None) == "F__F__I_.glif"
+ assert glyphNameToFileName("f_f_i", None) == "f_f_i.glif"
+ assert glyphNameToFileName("AE", None) == "A_E_.glif"
+ assert glyphNameToFileName("Ae", None) == "A_e.glif"
+ assert glyphNameToFileName("ae", None) == "ae.glif"
+ assert glyphNameToFileName("aE", None) == "aE_.glif"
+ assert glyphNameToFileName("a.alt", None) == "a.alt.glif"
+ assert glyphNameToFileName("A.aLt", None) == "A_.aL_t.glif"
+ assert glyphNameToFileName("A.alT", None) == "A_.alT_.glif"
+ assert glyphNameToFileName("Aacute_V.swash", None) == "A_acute_V_.swash.glif"
+ assert glyphNameToFileName(".notdef", None) == "_notdef.glif"
+ assert glyphNameToFileName("con", None) == "_con.glif"
+ assert glyphNameToFileName("CON", None) == "C_O_N_.glif"
+ assert glyphNameToFileName("con.alt", None) == "_con.alt.glif"
+ assert glyphNameToFileName("alt.con", None) == "alt._con.glif"
+
+ def test_conflicting_case_insensitive_file_names(self, tmp_path):
+ src = GlyphSet(GLYPHSETDIR)
+ dst = GlyphSet(tmp_path)
+ glyph = src["a"]
+
+ dst.writeGlyph("a", glyph)
+ dst.writeGlyph("A", glyph)
+ dst.writeGlyph("a_", glyph)
+ dst.deleteGlyph("a_")
+ dst.writeGlyph("a_", glyph)
+ dst.writeGlyph("A_", glyph)
+ dst.writeGlyph("i_j", glyph)
+
+ assert dst.contents == {
+ "a": "a.glif",
+ "A": "A_.glif",
+ "a_": "a_000000000000001.glif",
+ "A_": "A__.glif",
+ "i_j": "i_j.glif",
+ }
+
+ # make sure filenames are unique even on case-insensitive filesystems
+ assert len({fileName.lower() for fileName in dst.contents.values()}) == 5
class _Glyph:
- pass
+ pass
class ReadWriteFuncTest:
+ def test_roundtrip(self):
+ glyph = _Glyph()
+ glyph.name = "a"
+ glyph.unicodes = [0x0061]
- def test_roundtrip(self):
- glyph = _Glyph()
- glyph.name = "a"
- glyph.unicodes = [0x0061]
-
- s1 = writeGlyphToString(glyph.name, glyph)
+ s1 = writeGlyphToString(glyph.name, glyph)
- glyph2 = _Glyph()
- readGlyphFromString(s1, glyph2)
- assert glyph.__dict__ == glyph2.__dict__
+ glyph2 = _Glyph()
+ readGlyphFromString(s1, glyph2)
+ assert glyph.__dict__ == glyph2.__dict__
- s2 = writeGlyphToString(glyph2.name, glyph2)
- assert s1 == s2
+ s2 = writeGlyphToString(glyph2.name, glyph2)
+ assert s1 == s2
- def test_xml_declaration(self):
- s = writeGlyphToString("a", _Glyph())
- assert s.startswith(XML_DECLARATION % "UTF-8")
+ def test_xml_declaration(self):
+ s = writeGlyphToString("a", _Glyph())
+ assert s.startswith(XML_DECLARATION % "UTF-8")
- def test_parse_xml_remove_comments(self):
- s = b"""<?xml version='1.0' encoding='UTF-8'?>
+ def test_parse_xml_remove_comments(self):
+ s = b"""<?xml version='1.0' encoding='UTF-8'?>
<!-- a comment -->
<glyph name="A" format="2">
<advance width="1290"/>
@@ -210,64 +250,74 @@ class ReadWriteFuncTest:
</glyph>
"""
- g = _Glyph()
- readGlyphFromString(s, g)
+ g = _Glyph()
+ readGlyphFromString(s, g)
- assert g.name == "A"
- assert g.width == 1290
- assert g.unicodes == [0x0041]
+ assert g.name == "A"
+ assert g.width == 1290
+ assert g.unicodes == [0x0041]
- def test_read_unsupported_format_version(self, caplog):
- s = """<?xml version='1.0' encoding='utf-8'?>
+ def test_read_invalid_xml(self):
+ """Test that calling readGlyphFromString() with invalid XML raises a
+ library error, instead of an exception from the XML dependency that is
+ used internally."""
+
+ invalid_xml = b"<abc></def>"
+ empty_glyph = _Glyph()
+
+ with pytest.raises(GlifLibError, match="GLIF contains invalid XML"):
+ readGlyphFromString(invalid_xml, empty_glyph)
+
+ def test_read_unsupported_format_version(self, caplog):
+ s = """<?xml version='1.0' encoding='utf-8'?>
<glyph name="A" format="0" formatMinor="0">
<advance width="500"/>
<unicode hex="0041"/>
</glyph>
"""
- with pytest.raises(UnsupportedGLIFFormat):
- readGlyphFromString(s, _Glyph()) # validate=True by default
+ with pytest.raises(UnsupportedGLIFFormat):
+ readGlyphFromString(s, _Glyph()) # validate=True by default
- with pytest.raises(UnsupportedGLIFFormat):
- readGlyphFromString(s, _Glyph(), validate=True)
+ with pytest.raises(UnsupportedGLIFFormat):
+ readGlyphFromString(s, _Glyph(), validate=True)
- caplog.clear()
- with caplog.at_level(logging.WARNING, logger="fontTools.ufoLib.glifLib"):
- readGlyphFromString(s, _Glyph(), validate=False)
+ caplog.clear()
+ with caplog.at_level(logging.WARNING, logger="fontTools.ufoLib.glifLib"):
+ readGlyphFromString(s, _Glyph(), validate=False)
- assert len(caplog.records) == 1
- assert "Unsupported GLIF format" in caplog.text
- assert "Assuming the latest supported version" in caplog.text
+ assert len(caplog.records) == 1
+ assert "Unsupported GLIF format" in caplog.text
+ assert "Assuming the latest supported version" in caplog.text
- def test_read_allow_format_versions(self):
- s = """<?xml version='1.0' encoding='utf-8'?>
+ def test_read_allow_format_versions(self):
+ s = """<?xml version='1.0' encoding='utf-8'?>
<glyph name="A" format="2">
<advance width="500"/>
<unicode hex="0041"/>
</glyph>
"""
- # these two calls are are equivalent
- readGlyphFromString(s, _Glyph(), formatVersions=[1, 2])
- readGlyphFromString(s, _Glyph(), formatVersions=[(1, 0), (2, 0)])
+ # these two calls are are equivalent
+ readGlyphFromString(s, _Glyph(), formatVersions=[1, 2])
+ readGlyphFromString(s, _Glyph(), formatVersions=[(1, 0), (2, 0)])
- # if at least one supported formatVersion, unsupported ones are ignored
- readGlyphFromString(s, _Glyph(), formatVersions=[(2, 0), (123, 456)])
+ # if at least one supported formatVersion, unsupported ones are ignored
+ readGlyphFromString(s, _Glyph(), formatVersions=[(2, 0), (123, 456)])
- with pytest.raises(
- ValueError,
- match="None of the requested GLIF formatVersions are supported"
- ):
- readGlyphFromString(s, _Glyph(), formatVersions=[0, 2001])
+ with pytest.raises(
+ ValueError, match="None of the requested GLIF formatVersions are supported"
+ ):
+ readGlyphFromString(s, _Glyph(), formatVersions=[0, 2001])
- with pytest.raises(GlifLibError, match="Forbidden GLIF format version"):
- readGlyphFromString(s, _Glyph(), formatVersions=[1])
+ with pytest.raises(GlifLibError, match="Forbidden GLIF format version"):
+ readGlyphFromString(s, _Glyph(), formatVersions=[1])
- def test_read_ensure_x_y(self):
- """Ensure that a proper GlifLibError is raised when point coordinates are
- missing, regardless of validation setting."""
+ def test_read_ensure_x_y(self):
+ """Ensure that a proper GlifLibError is raised when point coordinates are
+ missing, regardless of validation setting."""
- s = """<?xml version='1.0' encoding='utf-8'?>
+ s = """<?xml version='1.0' encoding='utf-8'?>
<glyph name="A" format="2">
<outline>
<contour>
@@ -277,40 +327,41 @@ class ReadWriteFuncTest:
</outline>
</glyph>
"""
- pen = RecordingPointPen()
+ pen = RecordingPointPen()
+
+ with pytest.raises(GlifLibError, match="Required y attribute"):
+ readGlyphFromString(s, _Glyph(), pen)
- with pytest.raises(GlifLibError, match="Required y attribute"):
- readGlyphFromString(s, _Glyph(), pen)
+ with pytest.raises(GlifLibError, match="Required y attribute"):
+ readGlyphFromString(s, _Glyph(), pen, validate=False)
- with pytest.raises(GlifLibError, match="Required y attribute"):
- readGlyphFromString(s, _Glyph(), pen, validate=False)
def test_GlyphSet_unsupported_ufoFormatVersion(tmp_path, caplog):
- with pytest.raises(UnsupportedUFOFormat):
- GlyphSet(tmp_path, ufoFormatVersion=0)
- with pytest.raises(UnsupportedUFOFormat):
- GlyphSet(tmp_path, ufoFormatVersion=(0, 1))
+ with pytest.raises(UnsupportedUFOFormat):
+ GlyphSet(tmp_path, ufoFormatVersion=0)
+ with pytest.raises(UnsupportedUFOFormat):
+ GlyphSet(tmp_path, ufoFormatVersion=(0, 1))
def test_GlyphSet_writeGlyph_formatVersion(tmp_path):
- src = GlyphSet(GLYPHSETDIR)
- dst = GlyphSet(tmp_path, ufoFormatVersion=(2, 0))
- glyph = src["A"]
-
- # no explicit formatVersion passed: use the more recent GLIF formatVersion
- # that is supported by given ufoFormatVersion (GLIF 1 for UFO 2)
- dst.writeGlyph("A", glyph)
- glif = dst.getGLIF("A")
- assert b'format="1"' in glif
- assert b'formatMinor' not in glif # omitted when 0
-
- # explicit, unknown formatVersion
- with pytest.raises(UnsupportedGLIFFormat):
- dst.writeGlyph("A", glyph, formatVersion=(0, 0))
-
- # explicit, known formatVersion but unsupported by given ufoFormatVersion
- with pytest.raises(
- UnsupportedGLIFFormat,
- match="Unsupported GLIF format version .*for UFO format version",
- ):
- dst.writeGlyph("A", glyph, formatVersion=(2, 0))
+ src = GlyphSet(GLYPHSETDIR)
+ dst = GlyphSet(tmp_path, ufoFormatVersion=(2, 0))
+ glyph = src["A"]
+
+ # no explicit formatVersion passed: use the more recent GLIF formatVersion
+ # that is supported by given ufoFormatVersion (GLIF 1 for UFO 2)
+ dst.writeGlyph("A", glyph)
+ glif = dst.getGLIF("A")
+ assert b'format="1"' in glif
+ assert b"formatMinor" not in glif # omitted when 0
+
+ # explicit, unknown formatVersion
+ with pytest.raises(UnsupportedGLIFFormat):
+ dst.writeGlyph("A", glyph, formatVersion=(0, 0))
+
+ # explicit, known formatVersion but unsupported by given ufoFormatVersion
+ with pytest.raises(
+ UnsupportedGLIFFormat,
+ match="Unsupported GLIF format version .*for UFO format version",
+ ):
+ dst.writeGlyph("A", glyph, formatVersion=(2, 0))
diff --git a/Tests/ufoLib/testSupport.py b/Tests/ufoLib/testSupport.py
index 49f6a539..b29e3a15 100755
--- a/Tests/ufoLib/testSupport.py
+++ b/Tests/ufoLib/testSupport.py
@@ -5,663 +5,651 @@ from fontTools.ufoLib.utils import numberTypes
def getDemoFontPath():
- """Return the path to Data/DemoFont.ufo/."""
- testdata = os.path.join(os.path.dirname(__file__), "testdata")
- return os.path.join(testdata, "DemoFont.ufo")
+ """Return the path to Data/DemoFont.ufo/."""
+ testdata = os.path.join(os.path.dirname(__file__), "testdata")
+ return os.path.join(testdata, "DemoFont.ufo")
def getDemoFontGlyphSetPath():
- """Return the path to Data/DemoFont.ufo/glyphs/."""
- return os.path.join(getDemoFontPath(), "glyphs")
+ """Return the path to Data/DemoFont.ufo/glyphs/."""
+ return os.path.join(getDemoFontPath(), "glyphs")
# GLIF test tools
+
class Glyph:
+ def __init__(self):
+ self.name = None
+ self.width = None
+ self.height = None
+ self.unicodes = None
+ self.note = None
+ self.lib = None
+ self.image = None
+ self.guidelines = None
+ self.anchors = None
+ self.outline = []
- def __init__(self):
- self.name = None
- self.width = None
- self.height = None
- self.unicodes = None
- self.note = None
- self.lib = None
- self.image = None
- self.guidelines = None
- self.anchors = None
- self.outline = []
+ def _writePointPenCommand(self, command, args, kwargs):
+ args = _listToString(args)
+ kwargs = _dictToString(kwargs)
+ if args and kwargs:
+ return f"pointPen.{command}(*{args}, **{kwargs})"
+ elif len(args):
+ return f"pointPen.{command}(*{args})"
+ elif len(kwargs):
+ return f"pointPen.{command}(**{kwargs})"
+ else:
+ return "pointPen.%s()" % command
- def _writePointPenCommand(self, command, args, kwargs):
- args = _listToString(args)
- kwargs = _dictToString(kwargs)
- if args and kwargs:
- return f"pointPen.{command}(*{args}, **{kwargs})"
- elif len(args):
- return f"pointPen.{command}(*{args})"
- elif len(kwargs):
- return f"pointPen.{command}(**{kwargs})"
- else:
- return "pointPen.%s()" % command
+ def beginPath(self, **kwargs):
+ self.outline.append(self._writePointPenCommand("beginPath", [], kwargs))
- def beginPath(self, **kwargs):
- self.outline.append(self._writePointPenCommand("beginPath", [], kwargs))
+ def endPath(self):
+ self.outline.append(self._writePointPenCommand("endPath", [], {}))
- def endPath(self):
- self.outline.append(self._writePointPenCommand("endPath", [], {}))
+ def addPoint(self, *args, **kwargs):
+ self.outline.append(self._writePointPenCommand("addPoint", args, kwargs))
- def addPoint(self, *args, **kwargs):
- self.outline.append(self._writePointPenCommand("addPoint", args, kwargs))
+ def addComponent(self, *args, **kwargs):
+ self.outline.append(self._writePointPenCommand("addComponent", args, kwargs))
- def addComponent(self, *args, **kwargs):
- self.outline.append(self._writePointPenCommand("addComponent", args, kwargs))
+ def drawPoints(self, pointPen):
+ if self.outline:
+ py = "\n".join(self.outline)
+ exec(py, {"pointPen": pointPen})
- def drawPoints(self, pointPen):
- if self.outline:
- py = "\n".join(self.outline)
- exec(py, {"pointPen" : pointPen})
+ def py(self):
+ text = []
+ if self.name is not None:
+ text.append('glyph.name = "%s"' % self.name)
+ if self.width:
+ text.append("glyph.width = %r" % self.width)
+ if self.height:
+ text.append("glyph.height = %r" % self.height)
+ if self.unicodes is not None:
+ text.append(
+ "glyph.unicodes = [%s]" % ", ".join([str(i) for i in self.unicodes])
+ )
+ if self.note is not None:
+ text.append('glyph.note = "%s"' % self.note)
+ if self.lib is not None:
+ text.append("glyph.lib = %s" % _dictToString(self.lib))
+ if self.image is not None:
+ text.append("glyph.image = %s" % _dictToString(self.image))
+ if self.guidelines is not None:
+ text.append("glyph.guidelines = %s" % _listToString(self.guidelines))
+ if self.anchors is not None:
+ text.append("glyph.anchors = %s" % _listToString(self.anchors))
+ if self.outline:
+ text += self.outline
+ return "\n".join(text)
- def py(self):
- text = []
- if self.name is not None:
- text.append("glyph.name = \"%s\"" % self.name)
- if self.width:
- text.append("glyph.width = %r" % self.width)
- if self.height:
- text.append("glyph.height = %r" % self.height)
- if self.unicodes is not None:
- text.append("glyph.unicodes = [%s]" % ", ".join([str(i) for i in self.unicodes]))
- if self.note is not None:
- text.append("glyph.note = \"%s\"" % self.note)
- if self.lib is not None:
- text.append("glyph.lib = %s" % _dictToString(self.lib))
- if self.image is not None:
- text.append("glyph.image = %s" % _dictToString(self.image))
- if self.guidelines is not None:
- text.append("glyph.guidelines = %s" % _listToString(self.guidelines))
- if self.anchors is not None:
- text.append("glyph.anchors = %s" % _listToString(self.anchors))
- if self.outline:
- text += self.outline
- return "\n".join(text)
def _dictToString(d):
- text = []
- for key, value in sorted(d.items()):
- if value is None:
- continue
- key = "\"%s\"" % key
- if isinstance(value, dict):
- value = _dictToString(value)
- elif isinstance(value, list):
- value = _listToString(value)
- elif isinstance(value, tuple):
- value = _tupleToString(value)
- elif isinstance(value, numberTypes):
- value = repr(value)
- elif isinstance(value, str):
- value = "\"%s\"" % value
- text.append(f"{key} : {value}")
- if not text:
- return ""
- return "{%s}" % ", ".join(text)
+ text = []
+ for key, value in sorted(d.items()):
+ if value is None:
+ continue
+ key = '"%s"' % key
+ if isinstance(value, dict):
+ value = _dictToString(value)
+ elif isinstance(value, list):
+ value = _listToString(value)
+ elif isinstance(value, tuple):
+ value = _tupleToString(value)
+ elif isinstance(value, numberTypes):
+ value = repr(value)
+ elif isinstance(value, str):
+ value = '"%s"' % value
+ text.append(f"{key} : {value}")
+ if not text:
+ return ""
+ return "{%s}" % ", ".join(text)
+
def _listToString(l):
- text = []
- for value in l:
- if isinstance(value, dict):
- value = _dictToString(value)
- elif isinstance(value, list):
- value = _listToString(value)
- elif isinstance(value, tuple):
- value = _tupleToString(value)
- elif isinstance(value, numberTypes):
- value = repr(value)
- elif isinstance(value, str):
- value = "\"%s\"" % value
- text.append(value)
- if not text:
- return ""
- return "[%s]" % ", ".join(text)
+ text = []
+ for value in l:
+ if isinstance(value, dict):
+ value = _dictToString(value)
+ elif isinstance(value, list):
+ value = _listToString(value)
+ elif isinstance(value, tuple):
+ value = _tupleToString(value)
+ elif isinstance(value, numberTypes):
+ value = repr(value)
+ elif isinstance(value, str):
+ value = '"%s"' % value
+ text.append(value)
+ if not text:
+ return ""
+ return "[%s]" % ", ".join(text)
+
def _tupleToString(t):
- text = []
- for value in t:
- if isinstance(value, dict):
- value = _dictToString(value)
- elif isinstance(value, list):
- value = _listToString(value)
- elif isinstance(value, tuple):
- value = _tupleToString(value)
- elif isinstance(value, numberTypes):
- value = repr(value)
- elif isinstance(value, str):
- value = "\"%s\"" % value
- text.append(value)
- if not text:
- return ""
- return "(%s)" % ", ".join(text)
+ text = []
+ for value in t:
+ if isinstance(value, dict):
+ value = _dictToString(value)
+ elif isinstance(value, list):
+ value = _listToString(value)
+ elif isinstance(value, tuple):
+ value = _tupleToString(value)
+ elif isinstance(value, numberTypes):
+ value = repr(value)
+ elif isinstance(value, str):
+ value = '"%s"' % value
+ text.append(value)
+ if not text:
+ return ""
+ return "(%s)" % ", ".join(text)
+
def stripText(text):
- new = []
- for line in text.strip().splitlines():
- line = line.strip()
- if not line:
- continue
- new.append(line)
- return "\n".join(new)
+ new = []
+ for line in text.strip().splitlines():
+ line = line.strip()
+ if not line:
+ continue
+ new.append(line)
+ return "\n".join(new)
+
# font info values used by several tests
fontInfoVersion1 = {
- "familyName" : "Some Font (Family Name)",
- "styleName" : "Regular (Style Name)",
- "fullName" : "Some Font-Regular (Postscript Full Name)",
- "fontName" : "SomeFont-Regular (Postscript Font Name)",
- "menuName" : "Some Font Regular (Style Map Family Name)",
- "fontStyle" : 64,
- "note" : "A note.",
- "versionMajor" : 1,
- "versionMinor" : 0,
- "year" : 2008,
- "copyright" : "Copyright Some Foundry.",
- "notice" : "Some Font by Some Designer for Some Foundry.",
- "trademark" : "Trademark Some Foundry",
- "license" : "License info for Some Foundry.",
- "licenseURL" : "http://somefoundry.com/license",
- "createdBy" : "Some Foundry",
- "designer" : "Some Designer",
- "designerURL" : "http://somedesigner.com",
- "vendorURL" : "http://somefoundry.com",
- "unitsPerEm" : 1000,
- "ascender" : 750,
- "descender" : -250,
- "capHeight" : 750,
- "xHeight" : 500,
- "defaultWidth" : 400,
- "slantAngle" : -12.5,
- "italicAngle" : -12.5,
- "widthName" : "Medium (normal)",
- "weightName" : "Medium",
- "weightValue" : 500,
- "fondName" : "SomeFont Regular (FOND Name)",
- "otFamilyName" : "Some Font (Preferred Family Name)",
- "otStyleName" : "Regular (Preferred Subfamily Name)",
- "otMacName" : "Some Font Regular (Compatible Full Name)",
- "msCharSet" : 0,
- "fondID" : 15000,
- "uniqueID" : 4000000,
- "ttVendor" : "SOME",
- "ttUniqueID" : "OpenType name Table Unique ID",
- "ttVersion" : "OpenType name Table Version",
+ "familyName": "Some Font (Family Name)",
+ "styleName": "Regular (Style Name)",
+ "fullName": "Some Font-Regular (Postscript Full Name)",
+ "fontName": "SomeFont-Regular (Postscript Font Name)",
+ "menuName": "Some Font Regular (Style Map Family Name)",
+ "fontStyle": 64,
+ "note": "A note.",
+ "versionMajor": 1,
+ "versionMinor": 0,
+ "year": 2008,
+ "copyright": "Copyright Some Foundry.",
+ "notice": "Some Font by Some Designer for Some Foundry.",
+ "trademark": "Trademark Some Foundry",
+ "license": "License info for Some Foundry.",
+ "licenseURL": "http://somefoundry.com/license",
+ "createdBy": "Some Foundry",
+ "designer": "Some Designer",
+ "designerURL": "http://somedesigner.com",
+ "vendorURL": "http://somefoundry.com",
+ "unitsPerEm": 1000,
+ "ascender": 750,
+ "descender": -250,
+ "capHeight": 750,
+ "xHeight": 500,
+ "defaultWidth": 400,
+ "slantAngle": -12.5,
+ "italicAngle": -12.5,
+ "widthName": "Medium (normal)",
+ "weightName": "Medium",
+ "weightValue": 500,
+ "fondName": "SomeFont Regular (FOND Name)",
+ "otFamilyName": "Some Font (Preferred Family Name)",
+ "otStyleName": "Regular (Preferred Subfamily Name)",
+ "otMacName": "Some Font Regular (Compatible Full Name)",
+ "msCharSet": 0,
+ "fondID": 15000,
+ "uniqueID": 4000000,
+ "ttVendor": "SOME",
+ "ttUniqueID": "OpenType name Table Unique ID",
+ "ttVersion": "OpenType name Table Version",
}
fontInfoVersion2 = {
- "familyName" : "Some Font (Family Name)",
- "styleName" : "Regular (Style Name)",
- "styleMapFamilyName" : "Some Font Regular (Style Map Family Name)",
- "styleMapStyleName" : "regular",
- "versionMajor" : 1,
- "versionMinor" : 0,
- "year" : 2008,
- "copyright" : "Copyright Some Foundry.",
- "trademark" : "Trademark Some Foundry",
- "unitsPerEm" : 1000,
- "descender" : -250,
- "xHeight" : 500,
- "capHeight" : 750,
- "ascender" : 750,
- "italicAngle" : -12.5,
- "note" : "A note.",
- "openTypeHeadCreated" : "2000/01/01 00:00:00",
- "openTypeHeadLowestRecPPEM" : 10,
- "openTypeHeadFlags" : [0, 1],
- "openTypeHheaAscender" : 750,
- "openTypeHheaDescender" : -250,
- "openTypeHheaLineGap" : 200,
- "openTypeHheaCaretSlopeRise" : 1,
- "openTypeHheaCaretSlopeRun" : 0,
- "openTypeHheaCaretOffset" : 0,
- "openTypeNameDesigner" : "Some Designer",
- "openTypeNameDesignerURL" : "http://somedesigner.com",
- "openTypeNameManufacturer" : "Some Foundry",
- "openTypeNameManufacturerURL" : "http://somefoundry.com",
- "openTypeNameLicense" : "License info for Some Foundry.",
- "openTypeNameLicenseURL" : "http://somefoundry.com/license",
- "openTypeNameVersion" : "OpenType name Table Version",
- "openTypeNameUniqueID" : "OpenType name Table Unique ID",
- "openTypeNameDescription" : "Some Font by Some Designer for Some Foundry.",
- "openTypeNamePreferredFamilyName" : "Some Font (Preferred Family Name)",
- "openTypeNamePreferredSubfamilyName" : "Regular (Preferred Subfamily Name)",
- "openTypeNameCompatibleFullName" : "Some Font Regular (Compatible Full Name)",
- "openTypeNameSampleText" : "Sample Text for Some Font.",
- "openTypeNameWWSFamilyName" : "Some Font (WWS Family Name)",
- "openTypeNameWWSSubfamilyName" : "Regular (WWS Subfamily Name)",
- "openTypeOS2WidthClass" : 5,
- "openTypeOS2WeightClass" : 500,
- "openTypeOS2Selection" : [3],
- "openTypeOS2VendorID" : "SOME",
- "openTypeOS2Panose" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
- "openTypeOS2FamilyClass" : [1, 1],
- "openTypeOS2UnicodeRanges" : [0, 1],
- "openTypeOS2CodePageRanges" : [0, 1],
- "openTypeOS2TypoAscender" : 750,
- "openTypeOS2TypoDescender" : -250,
- "openTypeOS2TypoLineGap" : 200,
- "openTypeOS2WinAscent" : 750,
- "openTypeOS2WinDescent" : 250,
- "openTypeOS2Type" : [],
- "openTypeOS2SubscriptXSize" : 200,
- "openTypeOS2SubscriptYSize" : 400,
- "openTypeOS2SubscriptXOffset" : 0,
- "openTypeOS2SubscriptYOffset" : -100,
- "openTypeOS2SuperscriptXSize" : 200,
- "openTypeOS2SuperscriptYSize" : 400,
- "openTypeOS2SuperscriptXOffset" : 0,
- "openTypeOS2SuperscriptYOffset" : 200,
- "openTypeOS2StrikeoutSize" : 20,
- "openTypeOS2StrikeoutPosition" : 300,
- "openTypeVheaVertTypoAscender" : 750,
- "openTypeVheaVertTypoDescender" : -250,
- "openTypeVheaVertTypoLineGap" : 200,
- "openTypeVheaCaretSlopeRise" : 0,
- "openTypeVheaCaretSlopeRun" : 1,
- "openTypeVheaCaretOffset" : 0,
- "postscriptFontName" : "SomeFont-Regular (Postscript Font Name)",
- "postscriptFullName" : "Some Font-Regular (Postscript Full Name)",
- "postscriptSlantAngle" : -12.5,
- "postscriptUniqueID" : 4000000,
- "postscriptUnderlineThickness" : 20,
- "postscriptUnderlinePosition" : -200,
- "postscriptIsFixedPitch" : False,
- "postscriptBlueValues" : [500, 510],
- "postscriptOtherBlues" : [-250, -260],
- "postscriptFamilyBlues" : [500, 510],
- "postscriptFamilyOtherBlues" : [-250, -260],
- "postscriptStemSnapH" : [100, 120],
- "postscriptStemSnapV" : [80, 90],
- "postscriptBlueFuzz" : 1,
- "postscriptBlueShift" : 7,
- "postscriptBlueScale" : 0.039625,
- "postscriptForceBold" : True,
- "postscriptDefaultWidthX" : 400,
- "postscriptNominalWidthX" : 400,
- "postscriptWeightName" : "Medium",
- "postscriptDefaultCharacter" : ".notdef",
- "postscriptWindowsCharacterSet" : 1,
- "macintoshFONDFamilyID" : 15000,
- "macintoshFONDName" : "SomeFont Regular (FOND Name)",
+ "familyName": "Some Font (Family Name)",
+ "styleName": "Regular (Style Name)",
+ "styleMapFamilyName": "Some Font Regular (Style Map Family Name)",
+ "styleMapStyleName": "regular",
+ "versionMajor": 1,
+ "versionMinor": 0,
+ "year": 2008,
+ "copyright": "Copyright Some Foundry.",
+ "trademark": "Trademark Some Foundry",
+ "unitsPerEm": 1000,
+ "descender": -250,
+ "xHeight": 500,
+ "capHeight": 750,
+ "ascender": 750,
+ "italicAngle": -12.5,
+ "note": "A note.",
+ "openTypeHeadCreated": "2000/01/01 00:00:00",
+ "openTypeHeadLowestRecPPEM": 10,
+ "openTypeHeadFlags": [0, 1],
+ "openTypeHheaAscender": 750,
+ "openTypeHheaDescender": -250,
+ "openTypeHheaLineGap": 200,
+ "openTypeHheaCaretSlopeRise": 1,
+ "openTypeHheaCaretSlopeRun": 0,
+ "openTypeHheaCaretOffset": 0,
+ "openTypeNameDesigner": "Some Designer",
+ "openTypeNameDesignerURL": "http://somedesigner.com",
+ "openTypeNameManufacturer": "Some Foundry",
+ "openTypeNameManufacturerURL": "http://somefoundry.com",
+ "openTypeNameLicense": "License info for Some Foundry.",
+ "openTypeNameLicenseURL": "http://somefoundry.com/license",
+ "openTypeNameVersion": "OpenType name Table Version",
+ "openTypeNameUniqueID": "OpenType name Table Unique ID",
+ "openTypeNameDescription": "Some Font by Some Designer for Some Foundry.",
+ "openTypeNamePreferredFamilyName": "Some Font (Preferred Family Name)",
+ "openTypeNamePreferredSubfamilyName": "Regular (Preferred Subfamily Name)",
+ "openTypeNameCompatibleFullName": "Some Font Regular (Compatible Full Name)",
+ "openTypeNameSampleText": "Sample Text for Some Font.",
+ "openTypeNameWWSFamilyName": "Some Font (WWS Family Name)",
+ "openTypeNameWWSSubfamilyName": "Regular (WWS Subfamily Name)",
+ "openTypeOS2WidthClass": 5,
+ "openTypeOS2WeightClass": 500,
+ "openTypeOS2Selection": [3],
+ "openTypeOS2VendorID": "SOME",
+ "openTypeOS2Panose": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
+ "openTypeOS2FamilyClass": [1, 1],
+ "openTypeOS2UnicodeRanges": [0, 1],
+ "openTypeOS2CodePageRanges": [0, 1],
+ "openTypeOS2TypoAscender": 750,
+ "openTypeOS2TypoDescender": -250,
+ "openTypeOS2TypoLineGap": 200,
+ "openTypeOS2WinAscent": 750,
+ "openTypeOS2WinDescent": 250,
+ "openTypeOS2Type": [],
+ "openTypeOS2SubscriptXSize": 200,
+ "openTypeOS2SubscriptYSize": 400,
+ "openTypeOS2SubscriptXOffset": 0,
+ "openTypeOS2SubscriptYOffset": -100,
+ "openTypeOS2SuperscriptXSize": 200,
+ "openTypeOS2SuperscriptYSize": 400,
+ "openTypeOS2SuperscriptXOffset": 0,
+ "openTypeOS2SuperscriptYOffset": 200,
+ "openTypeOS2StrikeoutSize": 20,
+ "openTypeOS2StrikeoutPosition": 300,
+ "openTypeVheaVertTypoAscender": 750,
+ "openTypeVheaVertTypoDescender": -250,
+ "openTypeVheaVertTypoLineGap": 200,
+ "openTypeVheaCaretSlopeRise": 0,
+ "openTypeVheaCaretSlopeRun": 1,
+ "openTypeVheaCaretOffset": 0,
+ "postscriptFontName": "SomeFont-Regular (Postscript Font Name)",
+ "postscriptFullName": "Some Font-Regular (Postscript Full Name)",
+ "postscriptSlantAngle": -12.5,
+ "postscriptUniqueID": 4000000,
+ "postscriptUnderlineThickness": 20,
+ "postscriptUnderlinePosition": -200,
+ "postscriptIsFixedPitch": False,
+ "postscriptBlueValues": [500, 510],
+ "postscriptOtherBlues": [-250, -260],
+ "postscriptFamilyBlues": [500, 510],
+ "postscriptFamilyOtherBlues": [-250, -260],
+ "postscriptStemSnapH": [100, 120],
+ "postscriptStemSnapV": [80, 90],
+ "postscriptBlueFuzz": 1,
+ "postscriptBlueShift": 7,
+ "postscriptBlueScale": 0.039625,
+ "postscriptForceBold": True,
+ "postscriptDefaultWidthX": 400,
+ "postscriptNominalWidthX": 400,
+ "postscriptWeightName": "Medium",
+ "postscriptDefaultCharacter": ".notdef",
+ "postscriptWindowsCharacterSet": 1,
+ "macintoshFONDFamilyID": 15000,
+ "macintoshFONDName": "SomeFont Regular (FOND Name)",
}
fontInfoVersion3 = {
- "familyName" : "Some Font (Family Name)",
- "styleName" : "Regular (Style Name)",
- "styleMapFamilyName" : "Some Font Regular (Style Map Family Name)",
- "styleMapStyleName" : "regular",
- "versionMajor" : 1,
- "versionMinor" : 0,
- "year" : 2008,
- "copyright" : "Copyright Some Foundry.",
- "trademark" : "Trademark Some Foundry",
- "unitsPerEm" : 1000,
- "descender" : -250,
- "xHeight" : 500,
- "capHeight" : 750,
- "ascender" : 750,
- "italicAngle" : -12.5,
- "note" : "A note.",
- "openTypeGaspRangeRecords" : [
- dict(rangeMaxPPEM=10, rangeGaspBehavior=[0]),
- dict(rangeMaxPPEM=20, rangeGaspBehavior=[1]),
- dict(rangeMaxPPEM=30, rangeGaspBehavior=[2]),
- dict(rangeMaxPPEM=40, rangeGaspBehavior=[3]),
- dict(rangeMaxPPEM=50, rangeGaspBehavior=[0, 1, 2, 3]),
- dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])
- ],
- "openTypeHeadCreated" : "2000/01/01 00:00:00",
- "openTypeHeadLowestRecPPEM" : 10,
- "openTypeHeadFlags" : [0, 1],
- "openTypeHheaAscender" : 750,
- "openTypeHheaDescender" : -250,
- "openTypeHheaLineGap" : 200,
- "openTypeHheaCaretSlopeRise" : 1,
- "openTypeHheaCaretSlopeRun" : 0,
- "openTypeHheaCaretOffset" : 0,
- "openTypeNameDesigner" : "Some Designer",
- "openTypeNameDesignerURL" : "http://somedesigner.com",
- "openTypeNameManufacturer" : "Some Foundry",
- "openTypeNameManufacturerURL" : "http://somefoundry.com",
- "openTypeNameLicense" : "License info for Some Foundry.",
- "openTypeNameLicenseURL" : "http://somefoundry.com/license",
- "openTypeNameVersion" : "OpenType name Table Version",
- "openTypeNameUniqueID" : "OpenType name Table Unique ID",
- "openTypeNameDescription" : "Some Font by Some Designer for Some Foundry.",
- "openTypeNamePreferredFamilyName" : "Some Font (Preferred Family Name)",
- "openTypeNamePreferredSubfamilyName" : "Regular (Preferred Subfamily Name)",
- "openTypeNameCompatibleFullName" : "Some Font Regular (Compatible Full Name)",
- "openTypeNameSampleText" : "Sample Text for Some Font.",
- "openTypeNameWWSFamilyName" : "Some Font (WWS Family Name)",
- "openTypeNameWWSSubfamilyName" : "Regular (WWS Subfamily Name)",
- "openTypeNameRecords" : [
- dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record."),
- dict(nameID=2, platformID=1, encodingID=1, languageID=1, string="Name Record.")
- ],
- "openTypeOS2WidthClass" : 5,
- "openTypeOS2WeightClass" : 500,
- "openTypeOS2Selection" : [3],
- "openTypeOS2VendorID" : "SOME",
- "openTypeOS2Panose" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
- "openTypeOS2FamilyClass" : [1, 1],
- "openTypeOS2UnicodeRanges" : [0, 1],
- "openTypeOS2CodePageRanges" : [0, 1],
- "openTypeOS2TypoAscender" : 750,
- "openTypeOS2TypoDescender" : -250,
- "openTypeOS2TypoLineGap" : 200,
- "openTypeOS2WinAscent" : 750,
- "openTypeOS2WinDescent" : 250,
- "openTypeOS2Type" : [],
- "openTypeOS2SubscriptXSize" : 200,
- "openTypeOS2SubscriptYSize" : 400,
- "openTypeOS2SubscriptXOffset" : 0,
- "openTypeOS2SubscriptYOffset" : -100,
- "openTypeOS2SuperscriptXSize" : 200,
- "openTypeOS2SuperscriptYSize" : 400,
- "openTypeOS2SuperscriptXOffset" : 0,
- "openTypeOS2SuperscriptYOffset" : 200,
- "openTypeOS2StrikeoutSize" : 20,
- "openTypeOS2StrikeoutPosition" : 300,
- "openTypeVheaVertTypoAscender" : 750,
- "openTypeVheaVertTypoDescender" : -250,
- "openTypeVheaVertTypoLineGap" : 200,
- "openTypeVheaCaretSlopeRise" : 0,
- "openTypeVheaCaretSlopeRun" : 1,
- "openTypeVheaCaretOffset" : 0,
- "postscriptFontName" : "SomeFont-Regular (Postscript Font Name)",
- "postscriptFullName" : "Some Font-Regular (Postscript Full Name)",
- "postscriptSlantAngle" : -12.5,
- "postscriptUniqueID" : 4000000,
- "postscriptUnderlineThickness" : 20,
- "postscriptUnderlinePosition" : -200,
- "postscriptIsFixedPitch" : False,
- "postscriptBlueValues" : [500, 510],
- "postscriptOtherBlues" : [-250, -260],
- "postscriptFamilyBlues" : [500, 510],
- "postscriptFamilyOtherBlues" : [-250, -260],
- "postscriptStemSnapH" : [100, 120],
- "postscriptStemSnapV" : [80, 90],
- "postscriptBlueFuzz" : 1,
- "postscriptBlueShift" : 7,
- "postscriptBlueScale" : 0.039625,
- "postscriptForceBold" : True,
- "postscriptDefaultWidthX" : 400,
- "postscriptNominalWidthX" : 400,
- "postscriptWeightName" : "Medium",
- "postscriptDefaultCharacter" : ".notdef",
- "postscriptWindowsCharacterSet" : 1,
- "macintoshFONDFamilyID" : 15000,
- "macintoshFONDName" : "SomeFont Regular (FOND Name)",
- "woffMajorVersion" : 1,
- "woffMinorVersion" : 0,
- "woffMetadataUniqueID" : dict(id="string"),
- "woffMetadataVendor" : dict(name="Some Foundry", url="http://somefoundry.com"),
- "woffMetadataCredits" : dict(
- credits=[
- dict(name="Some Designer"),
- dict(name=""),
- dict(name="Some Designer", url="http://somedesigner.com"),
- dict(name="Some Designer", url=""),
- dict(name="Some Designer", role="Designer"),
- dict(name="Some Designer", role=""),
- dict(name="Some Designer", dir="ltr"),
- dict(name="rengiseD emoS", dir="rtl"),
- {"name" : "Some Designer", "class" : "hello"},
- {"name" : "Some Designer", "class" : ""},
- ]
- ),
- "woffMetadataDescription" : dict(
- url="http://somefoundry.com/foo/description",
- text=[
- dict(text="foo"),
- dict(text=""),
- dict(text="foo", language="bar"),
- dict(text="foo", language=""),
- dict(text="foo", dir="ltr"),
- dict(text="foo", dir="rtl"),
- {"text" : "foo", "class" : "foo"},
- {"text" : "foo", "class" : ""},
- ]
- ),
- "woffMetadataLicense" : dict(
- url="http://somefoundry.com/foo/license",
- id="foo",
- text=[
- dict(text="foo"),
- dict(text=""),
- dict(text="foo", language="bar"),
- dict(text="foo", language=""),
- dict(text="foo", dir="ltr"),
- dict(text="foo", dir="rtl"),
- {"text" : "foo", "class" : "foo"},
- {"text" : "foo", "class" : ""},
- ]
- ),
- "woffMetadataCopyright" : dict(
- text=[
- dict(text="foo"),
- dict(text=""),
- dict(text="foo", language="bar"),
- dict(text="foo", language=""),
- dict(text="foo", dir="ltr"),
- dict(text="foo", dir="rtl"),
- {"text" : "foo", "class" : "foo"},
- {"text" : "foo", "class" : ""},
- ]
- ),
- "woffMetadataTrademark" : dict(
- text=[
- dict(text="foo"),
- dict(text=""),
- dict(text="foo", language="bar"),
- dict(text="foo", language=""),
- dict(text="foo", dir="ltr"),
- dict(text="foo", dir="rtl"),
- {"text" : "foo", "class" : "foo"},
- {"text" : "foo", "class" : ""},
- ]
- ),
- "woffMetadataLicensee" : dict(
- name="Some Licensee"
- ),
- "woffMetadataExtensions" : [
- dict(
- # everything
- names=[
- dict(text="foo"),
- dict(text=""),
- dict(text="foo", language="bar"),
- dict(text="foo", language=""),
- dict(text="foo", dir="ltr"),
- dict(text="foo", dir="rtl"),
- {"text" : "foo", "class" : "hello"},
- {"text" : "foo", "class" : ""},
- ],
- items=[
- # everything
- dict(
- id="foo",
- names=[
- dict(text="foo"),
- dict(text=""),
- dict(text="foo", language="bar"),
- dict(text="foo", language=""),
- dict(text="foo", dir="ltr"),
- dict(text="foo", dir="rtl"),
- {"text" : "foo", "class" : "hello"},
- {"text" : "foo", "class" : ""},
- ],
- values=[
- dict(text="foo"),
- dict(text=""),
- dict(text="foo", language="bar"),
- dict(text="foo", language=""),
- dict(text="foo", dir="ltr"),
- dict(text="foo", dir="rtl"),
- {"text" : "foo", "class" : "hello"},
- {"text" : "foo", "class" : ""},
- ]
- ),
- # no id
- dict(
- names=[
- dict(text="foo")
- ],
- values=[
- dict(text="foo")
- ]
- )
- ]
- ),
- # no names
- dict(
- items=[
- dict(
- id="foo",
- names=[
- dict(text="foo")
- ],
- values=[
- dict(text="foo")
- ]
- )
- ]
- ),
- ],
- "guidelines" : [
- # ints
- dict(x=100, y=200, angle=45),
- # floats
- dict(x=100.5, y=200.5, angle=45.5),
- # edges
- dict(x=0, y=0, angle=0),
- dict(x=0, y=0, angle=360),
- dict(x=0, y=0, angle=360.0),
- # no y
- dict(x=100),
- # no x
- dict(y=200),
- # name
- dict(x=100, y=200, angle=45, name="foo"),
- dict(x=100, y=200, angle=45, name=""),
- # identifier
- dict(x=100, y=200, angle=45, identifier="guide1"),
- dict(x=100, y=200, angle=45, identifier="guide2"),
- dict(x=100, y=200, angle=45, identifier="\x20"),
- dict(x=100, y=200, angle=45, identifier="\x7E"),
- # colors
- dict(x=100, y=200, angle=45, color="0,0,0,0"),
- dict(x=100, y=200, angle=45, color="1,0,0,0"),
- dict(x=100, y=200, angle=45, color="1,1,1,1"),
- dict(x=100, y=200, angle=45, color="0,1,0,0"),
- dict(x=100, y=200, angle=45, color="0,0,1,0"),
- dict(x=100, y=200, angle=45, color="0,0,0,1"),
- dict(x=100, y=200, angle=45, color="1, 0, 0, 0"),
- dict(x=100, y=200, angle=45, color="0, 1, 0, 0"),
- dict(x=100, y=200, angle=45, color="0, 0, 1, 0"),
- dict(x=100, y=200, angle=45, color="0, 0, 0, 1"),
- dict(x=100, y=200, angle=45, color=".5,0,0,0"),
- dict(x=100, y=200, angle=45, color="0,.5,0,0"),
- dict(x=100, y=200, angle=45, color="0,0,.5,0"),
- dict(x=100, y=200, angle=45, color="0,0,0,.5"),
- dict(x=100, y=200, angle=45, color=".5,1,1,1"),
- dict(x=100, y=200, angle=45, color="1,.5,1,1"),
- dict(x=100, y=200, angle=45, color="1,1,.5,1"),
- dict(x=100, y=200, angle=45, color="1,1,1,.5"),
- ],
+ "familyName": "Some Font (Family Name)",
+ "styleName": "Regular (Style Name)",
+ "styleMapFamilyName": "Some Font Regular (Style Map Family Name)",
+ "styleMapStyleName": "regular",
+ "versionMajor": 1,
+ "versionMinor": 0,
+ "year": 2008,
+ "copyright": "Copyright Some Foundry.",
+ "trademark": "Trademark Some Foundry",
+ "unitsPerEm": 1000,
+ "descender": -250,
+ "xHeight": 500,
+ "capHeight": 750,
+ "ascender": 750,
+ "italicAngle": -12.5,
+ "note": "A note.",
+ "openTypeGaspRangeRecords": [
+ dict(rangeMaxPPEM=10, rangeGaspBehavior=[0]),
+ dict(rangeMaxPPEM=20, rangeGaspBehavior=[1]),
+ dict(rangeMaxPPEM=30, rangeGaspBehavior=[2]),
+ dict(rangeMaxPPEM=40, rangeGaspBehavior=[3]),
+ dict(rangeMaxPPEM=50, rangeGaspBehavior=[0, 1, 2, 3]),
+ dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]),
+ ],
+ "openTypeHeadCreated": "2000/01/01 00:00:00",
+ "openTypeHeadLowestRecPPEM": 10,
+ "openTypeHeadFlags": [0, 1],
+ "openTypeHheaAscender": 750,
+ "openTypeHheaDescender": -250,
+ "openTypeHheaLineGap": 200,
+ "openTypeHheaCaretSlopeRise": 1,
+ "openTypeHheaCaretSlopeRun": 0,
+ "openTypeHheaCaretOffset": 0,
+ "openTypeNameDesigner": "Some Designer",
+ "openTypeNameDesignerURL": "http://somedesigner.com",
+ "openTypeNameManufacturer": "Some Foundry",
+ "openTypeNameManufacturerURL": "http://somefoundry.com",
+ "openTypeNameLicense": "License info for Some Foundry.",
+ "openTypeNameLicenseURL": "http://somefoundry.com/license",
+ "openTypeNameVersion": "OpenType name Table Version",
+ "openTypeNameUniqueID": "OpenType name Table Unique ID",
+ "openTypeNameDescription": "Some Font by Some Designer for Some Foundry.",
+ "openTypeNamePreferredFamilyName": "Some Font (Preferred Family Name)",
+ "openTypeNamePreferredSubfamilyName": "Regular (Preferred Subfamily Name)",
+ "openTypeNameCompatibleFullName": "Some Font Regular (Compatible Full Name)",
+ "openTypeNameSampleText": "Sample Text for Some Font.",
+ "openTypeNameWWSFamilyName": "Some Font (WWS Family Name)",
+ "openTypeNameWWSSubfamilyName": "Regular (WWS Subfamily Name)",
+ "openTypeNameRecords": [
+ dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record."),
+ dict(nameID=2, platformID=1, encodingID=1, languageID=1, string="Name Record."),
+ ],
+ "openTypeOS2WidthClass": 5,
+ "openTypeOS2WeightClass": 500,
+ "openTypeOS2Selection": [3],
+ "openTypeOS2VendorID": "SOME",
+ "openTypeOS2Panose": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
+ "openTypeOS2FamilyClass": [1, 1],
+ "openTypeOS2UnicodeRanges": [0, 1],
+ "openTypeOS2CodePageRanges": [0, 1],
+ "openTypeOS2TypoAscender": 750,
+ "openTypeOS2TypoDescender": -250,
+ "openTypeOS2TypoLineGap": 200,
+ "openTypeOS2WinAscent": 750,
+ "openTypeOS2WinDescent": 250,
+ "openTypeOS2Type": [],
+ "openTypeOS2SubscriptXSize": 200,
+ "openTypeOS2SubscriptYSize": 400,
+ "openTypeOS2SubscriptXOffset": 0,
+ "openTypeOS2SubscriptYOffset": -100,
+ "openTypeOS2SuperscriptXSize": 200,
+ "openTypeOS2SuperscriptYSize": 400,
+ "openTypeOS2SuperscriptXOffset": 0,
+ "openTypeOS2SuperscriptYOffset": 200,
+ "openTypeOS2StrikeoutSize": 20,
+ "openTypeOS2StrikeoutPosition": 300,
+ "openTypeVheaVertTypoAscender": 750,
+ "openTypeVheaVertTypoDescender": -250,
+ "openTypeVheaVertTypoLineGap": 200,
+ "openTypeVheaCaretSlopeRise": 0,
+ "openTypeVheaCaretSlopeRun": 1,
+ "openTypeVheaCaretOffset": 0,
+ "postscriptFontName": "SomeFont-Regular (Postscript Font Name)",
+ "postscriptFullName": "Some Font-Regular (Postscript Full Name)",
+ "postscriptSlantAngle": -12.5,
+ "postscriptUniqueID": 4000000,
+ "postscriptUnderlineThickness": 20,
+ "postscriptUnderlinePosition": -200,
+ "postscriptIsFixedPitch": False,
+ "postscriptBlueValues": [500, 510],
+ "postscriptOtherBlues": [-250, -260],
+ "postscriptFamilyBlues": [500, 510],
+ "postscriptFamilyOtherBlues": [-250, -260],
+ "postscriptStemSnapH": [100, 120],
+ "postscriptStemSnapV": [80, 90],
+ "postscriptBlueFuzz": 1,
+ "postscriptBlueShift": 7,
+ "postscriptBlueScale": 0.039625,
+ "postscriptForceBold": True,
+ "postscriptDefaultWidthX": 400,
+ "postscriptNominalWidthX": 400,
+ "postscriptWeightName": "Medium",
+ "postscriptDefaultCharacter": ".notdef",
+ "postscriptWindowsCharacterSet": 1,
+ "macintoshFONDFamilyID": 15000,
+ "macintoshFONDName": "SomeFont Regular (FOND Name)",
+ "woffMajorVersion": 1,
+ "woffMinorVersion": 0,
+ "woffMetadataUniqueID": dict(id="string"),
+ "woffMetadataVendor": dict(name="Some Foundry", url="http://somefoundry.com"),
+ "woffMetadataCredits": dict(
+ credits=[
+ dict(name="Some Designer"),
+ dict(name=""),
+ dict(name="Some Designer", url="http://somedesigner.com"),
+ dict(name="Some Designer", url=""),
+ dict(name="Some Designer", role="Designer"),
+ dict(name="Some Designer", role=""),
+ dict(name="Some Designer", dir="ltr"),
+ dict(name="rengiseD emoS", dir="rtl"),
+ {"name": "Some Designer", "class": "hello"},
+ {"name": "Some Designer", "class": ""},
+ ]
+ ),
+ "woffMetadataDescription": dict(
+ url="http://somefoundry.com/foo/description",
+ text=[
+ dict(text="foo"),
+ dict(text=""),
+ dict(text="foo", language="bar"),
+ dict(text="foo", language=""),
+ dict(text="foo", dir="ltr"),
+ dict(text="foo", dir="rtl"),
+ {"text": "foo", "class": "foo"},
+ {"text": "foo", "class": ""},
+ ],
+ ),
+ "woffMetadataLicense": dict(
+ url="http://somefoundry.com/foo/license",
+ id="foo",
+ text=[
+ dict(text="foo"),
+ dict(text=""),
+ dict(text="foo", language="bar"),
+ dict(text="foo", language=""),
+ dict(text="foo", dir="ltr"),
+ dict(text="foo", dir="rtl"),
+ {"text": "foo", "class": "foo"},
+ {"text": "foo", "class": ""},
+ ],
+ ),
+ "woffMetadataCopyright": dict(
+ text=[
+ dict(text="foo"),
+ dict(text=""),
+ dict(text="foo", language="bar"),
+ dict(text="foo", language=""),
+ dict(text="foo", dir="ltr"),
+ dict(text="foo", dir="rtl"),
+ {"text": "foo", "class": "foo"},
+ {"text": "foo", "class": ""},
+ ]
+ ),
+ "woffMetadataTrademark": dict(
+ text=[
+ dict(text="foo"),
+ dict(text=""),
+ dict(text="foo", language="bar"),
+ dict(text="foo", language=""),
+ dict(text="foo", dir="ltr"),
+ dict(text="foo", dir="rtl"),
+ {"text": "foo", "class": "foo"},
+ {"text": "foo", "class": ""},
+ ]
+ ),
+ "woffMetadataLicensee": dict(name="Some Licensee"),
+ "woffMetadataExtensions": [
+ dict(
+ # everything
+ names=[
+ dict(text="foo"),
+ dict(text=""),
+ dict(text="foo", language="bar"),
+ dict(text="foo", language=""),
+ dict(text="foo", dir="ltr"),
+ dict(text="foo", dir="rtl"),
+ {"text": "foo", "class": "hello"},
+ {"text": "foo", "class": ""},
+ ],
+ items=[
+ # everything
+ dict(
+ id="foo",
+ names=[
+ dict(text="foo"),
+ dict(text=""),
+ dict(text="foo", language="bar"),
+ dict(text="foo", language=""),
+ dict(text="foo", dir="ltr"),
+ dict(text="foo", dir="rtl"),
+ {"text": "foo", "class": "hello"},
+ {"text": "foo", "class": ""},
+ ],
+ values=[
+ dict(text="foo"),
+ dict(text=""),
+ dict(text="foo", language="bar"),
+ dict(text="foo", language=""),
+ dict(text="foo", dir="ltr"),
+ dict(text="foo", dir="rtl"),
+ {"text": "foo", "class": "hello"},
+ {"text": "foo", "class": ""},
+ ],
+ ),
+ # no id
+ dict(names=[dict(text="foo")], values=[dict(text="foo")]),
+ ],
+ ),
+ # no names
+ dict(
+ items=[dict(id="foo", names=[dict(text="foo")], values=[dict(text="foo")])]
+ ),
+ ],
+ "guidelines": [
+ # ints
+ dict(x=100, y=200, angle=45),
+ # floats
+ dict(x=100.5, y=200.5, angle=45.5),
+ # edges
+ dict(x=0, y=0, angle=0),
+ dict(x=0, y=0, angle=360),
+ dict(x=0, y=0, angle=360.0),
+ # no y
+ dict(x=100),
+ # no x
+ dict(y=200),
+ # name
+ dict(x=100, y=200, angle=45, name="foo"),
+ dict(x=100, y=200, angle=45, name=""),
+ # identifier
+ dict(x=100, y=200, angle=45, identifier="guide1"),
+ dict(x=100, y=200, angle=45, identifier="guide2"),
+ dict(x=100, y=200, angle=45, identifier="\x20"),
+ dict(x=100, y=200, angle=45, identifier="\x7E"),
+ # colors
+ dict(x=100, y=200, angle=45, color="0,0,0,0"),
+ dict(x=100, y=200, angle=45, color="1,0,0,0"),
+ dict(x=100, y=200, angle=45, color="1,1,1,1"),
+ dict(x=100, y=200, angle=45, color="0,1,0,0"),
+ dict(x=100, y=200, angle=45, color="0,0,1,0"),
+ dict(x=100, y=200, angle=45, color="0,0,0,1"),
+ dict(x=100, y=200, angle=45, color="1, 0, 0, 0"),
+ dict(x=100, y=200, angle=45, color="0, 1, 0, 0"),
+ dict(x=100, y=200, angle=45, color="0, 0, 1, 0"),
+ dict(x=100, y=200, angle=45, color="0, 0, 0, 1"),
+ dict(x=100, y=200, angle=45, color=".5,0,0,0"),
+ dict(x=100, y=200, angle=45, color="0,.5,0,0"),
+ dict(x=100, y=200, angle=45, color="0,0,.5,0"),
+ dict(x=100, y=200, angle=45, color="0,0,0,.5"),
+ dict(x=100, y=200, angle=45, color=".5,1,1,1"),
+ dict(x=100, y=200, angle=45, color="1,.5,1,1"),
+ dict(x=100, y=200, angle=45, color="1,1,.5,1"),
+ dict(x=100, y=200, angle=45, color="1,1,1,.5"),
+ ],
}
expectedFontInfo1To2Conversion = {
- "familyName" : "Some Font (Family Name)",
- "styleMapFamilyName" : "Some Font Regular (Style Map Family Name)",
- "styleMapStyleName" : "regular",
- "styleName" : "Regular (Style Name)",
- "unitsPerEm" : 1000,
- "ascender" : 750,
- "capHeight" : 750,
- "xHeight" : 500,
- "descender" : -250,
- "italicAngle" : -12.5,
- "versionMajor" : 1,
- "versionMinor" : 0,
- "year" : 2008,
- "copyright" : "Copyright Some Foundry.",
- "trademark" : "Trademark Some Foundry",
- "note" : "A note.",
- "macintoshFONDFamilyID" : 15000,
- "macintoshFONDName" : "SomeFont Regular (FOND Name)",
- "openTypeNameCompatibleFullName" : "Some Font Regular (Compatible Full Name)",
- "openTypeNameDescription" : "Some Font by Some Designer for Some Foundry.",
- "openTypeNameDesigner" : "Some Designer",
- "openTypeNameDesignerURL" : "http://somedesigner.com",
- "openTypeNameLicense" : "License info for Some Foundry.",
- "openTypeNameLicenseURL" : "http://somefoundry.com/license",
- "openTypeNameManufacturer" : "Some Foundry",
- "openTypeNameManufacturerURL" : "http://somefoundry.com",
- "openTypeNamePreferredFamilyName" : "Some Font (Preferred Family Name)",
- "openTypeNamePreferredSubfamilyName": "Regular (Preferred Subfamily Name)",
- "openTypeNameCompatibleFullName" : "Some Font Regular (Compatible Full Name)",
- "openTypeNameUniqueID" : "OpenType name Table Unique ID",
- "openTypeNameVersion" : "OpenType name Table Version",
- "openTypeOS2VendorID" : "SOME",
- "openTypeOS2WeightClass" : 500,
- "openTypeOS2WidthClass" : 5,
- "postscriptDefaultWidthX" : 400,
- "postscriptFontName" : "SomeFont-Regular (Postscript Font Name)",
- "postscriptFullName" : "Some Font-Regular (Postscript Full Name)",
- "postscriptSlantAngle" : -12.5,
- "postscriptUniqueID" : 4000000,
- "postscriptWeightName" : "Medium",
- "postscriptWindowsCharacterSet" : 1
+ "familyName": "Some Font (Family Name)",
+ "styleMapFamilyName": "Some Font Regular (Style Map Family Name)",
+ "styleMapStyleName": "regular",
+ "styleName": "Regular (Style Name)",
+ "unitsPerEm": 1000,
+ "ascender": 750,
+ "capHeight": 750,
+ "xHeight": 500,
+ "descender": -250,
+ "italicAngle": -12.5,
+ "versionMajor": 1,
+ "versionMinor": 0,
+ "year": 2008,
+ "copyright": "Copyright Some Foundry.",
+ "trademark": "Trademark Some Foundry",
+ "note": "A note.",
+ "macintoshFONDFamilyID": 15000,
+ "macintoshFONDName": "SomeFont Regular (FOND Name)",
+ "openTypeNameCompatibleFullName": "Some Font Regular (Compatible Full Name)",
+ "openTypeNameDescription": "Some Font by Some Designer for Some Foundry.",
+ "openTypeNameDesigner": "Some Designer",
+ "openTypeNameDesignerURL": "http://somedesigner.com",
+ "openTypeNameLicense": "License info for Some Foundry.",
+ "openTypeNameLicenseURL": "http://somefoundry.com/license",
+ "openTypeNameManufacturer": "Some Foundry",
+ "openTypeNameManufacturerURL": "http://somefoundry.com",
+ "openTypeNamePreferredFamilyName": "Some Font (Preferred Family Name)",
+ "openTypeNamePreferredSubfamilyName": "Regular (Preferred Subfamily Name)",
+ "openTypeNameCompatibleFullName": "Some Font Regular (Compatible Full Name)",
+ "openTypeNameUniqueID": "OpenType name Table Unique ID",
+ "openTypeNameVersion": "OpenType name Table Version",
+ "openTypeOS2VendorID": "SOME",
+ "openTypeOS2WeightClass": 500,
+ "openTypeOS2WidthClass": 5,
+ "postscriptDefaultWidthX": 400,
+ "postscriptFontName": "SomeFont-Regular (Postscript Font Name)",
+ "postscriptFullName": "Some Font-Regular (Postscript Full Name)",
+ "postscriptSlantAngle": -12.5,
+ "postscriptUniqueID": 4000000,
+ "postscriptWeightName": "Medium",
+ "postscriptWindowsCharacterSet": 1,
}
expectedFontInfo2To1Conversion = {
- "familyName" : "Some Font (Family Name)",
- "menuName" : "Some Font Regular (Style Map Family Name)",
- "fontStyle" : 64,
- "styleName" : "Regular (Style Name)",
- "unitsPerEm" : 1000,
- "ascender" : 750,
- "capHeight" : 750,
- "xHeight" : 500,
- "descender" : -250,
- "italicAngle" : -12.5,
- "versionMajor" : 1,
- "versionMinor" : 0,
- "copyright" : "Copyright Some Foundry.",
- "trademark" : "Trademark Some Foundry",
- "note" : "A note.",
- "fondID" : 15000,
- "fondName" : "SomeFont Regular (FOND Name)",
- "fullName" : "Some Font Regular (Compatible Full Name)",
- "notice" : "Some Font by Some Designer for Some Foundry.",
- "designer" : "Some Designer",
- "designerURL" : "http://somedesigner.com",
- "license" : "License info for Some Foundry.",
- "licenseURL" : "http://somefoundry.com/license",
- "createdBy" : "Some Foundry",
- "vendorURL" : "http://somefoundry.com",
- "otFamilyName" : "Some Font (Preferred Family Name)",
- "otStyleName" : "Regular (Preferred Subfamily Name)",
- "otMacName" : "Some Font Regular (Compatible Full Name)",
- "ttUniqueID" : "OpenType name Table Unique ID",
- "ttVersion" : "OpenType name Table Version",
- "ttVendor" : "SOME",
- "weightValue" : 500,
- "widthName" : "Medium (normal)",
- "defaultWidth" : 400,
- "fontName" : "SomeFont-Regular (Postscript Font Name)",
- "fullName" : "Some Font-Regular (Postscript Full Name)",
- "slantAngle" : -12.5,
- "uniqueID" : 4000000,
- "weightName" : "Medium",
- "msCharSet" : 0,
- "year" : 2008
+ "familyName": "Some Font (Family Name)",
+ "menuName": "Some Font Regular (Style Map Family Name)",
+ "fontStyle": 64,
+ "styleName": "Regular (Style Name)",
+ "unitsPerEm": 1000,
+ "ascender": 750,
+ "capHeight": 750,
+ "xHeight": 500,
+ "descender": -250,
+ "italicAngle": -12.5,
+ "versionMajor": 1,
+ "versionMinor": 0,
+ "copyright": "Copyright Some Foundry.",
+ "trademark": "Trademark Some Foundry",
+ "note": "A note.",
+ "fondID": 15000,
+ "fondName": "SomeFont Regular (FOND Name)",
+ "fullName": "Some Font Regular (Compatible Full Name)",
+ "notice": "Some Font by Some Designer for Some Foundry.",
+ "designer": "Some Designer",
+ "designerURL": "http://somedesigner.com",
+ "license": "License info for Some Foundry.",
+ "licenseURL": "http://somefoundry.com/license",
+ "createdBy": "Some Foundry",
+ "vendorURL": "http://somefoundry.com",
+ "otFamilyName": "Some Font (Preferred Family Name)",
+ "otStyleName": "Regular (Preferred Subfamily Name)",
+ "otMacName": "Some Font Regular (Compatible Full Name)",
+ "ttUniqueID": "OpenType name Table Unique ID",
+ "ttVersion": "OpenType name Table Version",
+ "ttVendor": "SOME",
+ "weightValue": 500,
+ "widthName": "Medium (normal)",
+ "defaultWidth": 400,
+ "fontName": "SomeFont-Regular (Postscript Font Name)",
+ "fullName": "Some Font-Regular (Postscript Full Name)",
+ "slantAngle": -12.5,
+ "uniqueID": 4000000,
+ "weightName": "Medium",
+ "msCharSet": 0,
+ "year": 2008,
}
diff --git a/Tests/unicodedata_test.py b/Tests/unicodedata_test.py
index 5cdb3404..77301f4d 100644
--- a/Tests/unicodedata_test.py
+++ b/Tests/unicodedata_test.py
@@ -10,147 +10,148 @@ def test_script():
assert unicodedata.script(chr(0x10FFFF)) == "Zzzz"
# these were randomly sampled, one character per script
- assert unicodedata.script(chr(0x1E918)) == 'Adlm'
- assert unicodedata.script(chr(0x1170D)) == 'Ahom'
- assert unicodedata.script(chr(0x145A0)) == 'Hluw'
- assert unicodedata.script(chr(0x0607)) == 'Arab'
- assert unicodedata.script(chr(0x056C)) == 'Armn'
- assert unicodedata.script(chr(0x10B27)) == 'Avst'
- assert unicodedata.script(chr(0x1B41)) == 'Bali'
- assert unicodedata.script(chr(0x168AD)) == 'Bamu'
- assert unicodedata.script(chr(0x16ADD)) == 'Bass'
- assert unicodedata.script(chr(0x1BE5)) == 'Batk'
- assert unicodedata.script(chr(0x09F3)) == 'Beng'
- assert unicodedata.script(chr(0x11C5B)) == 'Bhks'
- assert unicodedata.script(chr(0x3126)) == 'Bopo'
- assert unicodedata.script(chr(0x1103B)) == 'Brah'
- assert unicodedata.script(chr(0x2849)) == 'Brai'
- assert unicodedata.script(chr(0x1A0A)) == 'Bugi'
- assert unicodedata.script(chr(0x174E)) == 'Buhd'
- assert unicodedata.script(chr(0x18EE)) == 'Cans'
- assert unicodedata.script(chr(0x102B7)) == 'Cari'
- assert unicodedata.script(chr(0x1053D)) == 'Aghb'
- assert unicodedata.script(chr(0x11123)) == 'Cakm'
- assert unicodedata.script(chr(0xAA1F)) == 'Cham'
- assert unicodedata.script(chr(0xAB95)) == 'Cher'
- assert unicodedata.script(chr(0x1F0C7)) == 'Zyyy'
- assert unicodedata.script(chr(0x2C85)) == 'Copt'
- assert unicodedata.script(chr(0x12014)) == 'Xsux'
- assert unicodedata.script(chr(0x1082E)) == 'Cprt'
- assert unicodedata.script(chr(0xA686)) == 'Cyrl'
- assert unicodedata.script(chr(0x10417)) == 'Dsrt'
- assert unicodedata.script(chr(0x093E)) == 'Deva'
- assert unicodedata.script(chr(0x1BC4B)) == 'Dupl'
- assert unicodedata.script(chr(0x1310C)) == 'Egyp'
- assert unicodedata.script(chr(0x1051C)) == 'Elba'
- assert unicodedata.script(chr(0x2DA6)) == 'Ethi'
- assert unicodedata.script(chr(0x10AD)) == 'Geor'
- assert unicodedata.script(chr(0x2C52)) == 'Glag'
- assert unicodedata.script(chr(0x10343)) == 'Goth'
- assert unicodedata.script(chr(0x11371)) == 'Gran'
- assert unicodedata.script(chr(0x03D0)) == 'Grek'
- assert unicodedata.script(chr(0x0AAA)) == 'Gujr'
- assert unicodedata.script(chr(0x0A4C)) == 'Guru'
- assert unicodedata.script(chr(0x23C9F)) == 'Hani'
- assert unicodedata.script(chr(0xC259)) == 'Hang'
- assert unicodedata.script(chr(0x1722)) == 'Hano'
- assert unicodedata.script(chr(0x108F5)) == 'Hatr'
- assert unicodedata.script(chr(0x05C2)) == 'Hebr'
- assert unicodedata.script(chr(0x1B072)) == 'Hira'
- assert unicodedata.script(chr(0x10847)) == 'Armi'
- assert unicodedata.script(chr(0x033A)) == 'Zinh'
- assert unicodedata.script(chr(0x10B66)) == 'Phli'
- assert unicodedata.script(chr(0x10B4B)) == 'Prti'
- assert unicodedata.script(chr(0xA98A)) == 'Java'
- assert unicodedata.script(chr(0x110B2)) == 'Kthi'
- assert unicodedata.script(chr(0x0CC6)) == 'Knda'
- assert unicodedata.script(chr(0x3337)) == 'Kana'
- assert unicodedata.script(chr(0xA915)) == 'Kali'
- assert unicodedata.script(chr(0x10A2E)) == 'Khar'
- assert unicodedata.script(chr(0x17AA)) == 'Khmr'
- assert unicodedata.script(chr(0x11225)) == 'Khoj'
- assert unicodedata.script(chr(0x112B6)) == 'Sind'
- assert unicodedata.script(chr(0x0ED7)) == 'Laoo'
- assert unicodedata.script(chr(0xAB3C)) == 'Latn'
- assert unicodedata.script(chr(0x1C48)) == 'Lepc'
- assert unicodedata.script(chr(0x1923)) == 'Limb'
- assert unicodedata.script(chr(0x1071D)) == 'Lina'
- assert unicodedata.script(chr(0x100EC)) == 'Linb'
- assert unicodedata.script(chr(0xA4E9)) == 'Lisu'
- assert unicodedata.script(chr(0x10284)) == 'Lyci'
- assert unicodedata.script(chr(0x10926)) == 'Lydi'
- assert unicodedata.script(chr(0x11161)) == 'Mahj'
- assert unicodedata.script(chr(0x0D56)) == 'Mlym'
- assert unicodedata.script(chr(0x0856)) == 'Mand'
- assert unicodedata.script(chr(0x10AF0)) == 'Mani'
- assert unicodedata.script(chr(0x11CB0)) == 'Marc'
- assert unicodedata.script(chr(0x11D28)) == 'Gonm'
- assert unicodedata.script(chr(0xABDD)) == 'Mtei'
- assert unicodedata.script(chr(0x1E897)) == 'Mend'
- assert unicodedata.script(chr(0x109B0)) == 'Merc'
- assert unicodedata.script(chr(0x10993)) == 'Mero'
- assert unicodedata.script(chr(0x16F5D)) == 'Plrd'
- assert unicodedata.script(chr(0x1160B)) == 'Modi'
- assert unicodedata.script(chr(0x18A8)) == 'Mong'
- assert unicodedata.script(chr(0x16A48)) == 'Mroo'
- assert unicodedata.script(chr(0x1128C)) == 'Mult'
- assert unicodedata.script(chr(0x105B)) == 'Mymr'
- assert unicodedata.script(chr(0x108AF)) == 'Nbat'
- assert unicodedata.script(chr(0x19B3)) == 'Talu'
- assert unicodedata.script(chr(0x1143D)) == 'Newa'
- assert unicodedata.script(chr(0x07F4)) == 'Nkoo'
- assert unicodedata.script(chr(0x1B192)) == 'Nshu'
- assert unicodedata.script(chr(0x169C)) == 'Ogam'
- assert unicodedata.script(chr(0x1C56)) == 'Olck'
- assert unicodedata.script(chr(0x10CE9)) == 'Hung'
- assert unicodedata.script(chr(0x10316)) == 'Ital'
- assert unicodedata.script(chr(0x10A93)) == 'Narb'
- assert unicodedata.script(chr(0x1035A)) == 'Perm'
- assert unicodedata.script(chr(0x103D5)) == 'Xpeo'
- assert unicodedata.script(chr(0x10A65)) == 'Sarb'
- assert unicodedata.script(chr(0x10C09)) == 'Orkh'
- assert unicodedata.script(chr(0x0B60)) == 'Orya'
- assert unicodedata.script(chr(0x104CF)) == 'Osge'
- assert unicodedata.script(chr(0x104A8)) == 'Osma'
- assert unicodedata.script(chr(0x16B12)) == 'Hmng'
- assert unicodedata.script(chr(0x10879)) == 'Palm'
- assert unicodedata.script(chr(0x11AF1)) == 'Pauc'
- assert unicodedata.script(chr(0xA869)) == 'Phag'
- assert unicodedata.script(chr(0x10909)) == 'Phnx'
- assert unicodedata.script(chr(0x10B81)) == 'Phlp'
- assert unicodedata.script(chr(0xA941)) == 'Rjng'
- assert unicodedata.script(chr(0x16C3)) == 'Runr'
- assert unicodedata.script(chr(0x0814)) == 'Samr'
- assert unicodedata.script(chr(0xA88C)) == 'Saur'
- assert unicodedata.script(chr(0x111C8)) == 'Shrd'
- assert unicodedata.script(chr(0x1045F)) == 'Shaw'
- assert unicodedata.script(chr(0x115AD)) == 'Sidd'
- assert unicodedata.script(chr(0x1D8C0)) == 'Sgnw'
- assert unicodedata.script(chr(0x0DB9)) == 'Sinh'
- assert unicodedata.script(chr(0x110F9)) == 'Sora'
- assert unicodedata.script(chr(0x11A60)) == 'Soyo'
- assert unicodedata.script(chr(0x1B94)) == 'Sund'
- assert unicodedata.script(chr(0xA81F)) == 'Sylo'
- assert unicodedata.script(chr(0x0740)) == 'Syrc'
- assert unicodedata.script(chr(0x1714)) == 'Tglg'
- assert unicodedata.script(chr(0x1761)) == 'Tagb'
- assert unicodedata.script(chr(0x1965)) == 'Tale'
- assert unicodedata.script(chr(0x1A32)) == 'Lana'
- assert unicodedata.script(chr(0xAA86)) == 'Tavt'
- assert unicodedata.script(chr(0x116A5)) == 'Takr'
- assert unicodedata.script(chr(0x0B8E)) == 'Taml'
- assert unicodedata.script(chr(0x1754D)) == 'Tang'
- assert unicodedata.script(chr(0x0C40)) == 'Telu'
- assert unicodedata.script(chr(0x07A4)) == 'Thaa'
- assert unicodedata.script(chr(0x0E42)) == 'Thai'
- assert unicodedata.script(chr(0x0F09)) == 'Tibt'
- assert unicodedata.script(chr(0x2D3A)) == 'Tfng'
- assert unicodedata.script(chr(0x114B0)) == 'Tirh'
- assert unicodedata.script(chr(0x1038B)) == 'Ugar'
- assert unicodedata.script(chr(0xA585)) == 'Vaii'
- assert unicodedata.script(chr(0x118CF)) == 'Wara'
- assert unicodedata.script(chr(0xA066)) == 'Yiii'
- assert unicodedata.script(chr(0x11A31)) == 'Zanb'
+ assert unicodedata.script(chr(0x1E918)) == "Adlm"
+ assert unicodedata.script(chr(0x1170D)) == "Ahom"
+ assert unicodedata.script(chr(0x145A0)) == "Hluw"
+ assert unicodedata.script(chr(0x0607)) == "Arab"
+ assert unicodedata.script(chr(0x056C)) == "Armn"
+ assert unicodedata.script(chr(0x10B27)) == "Avst"
+ assert unicodedata.script(chr(0x1B41)) == "Bali"
+ assert unicodedata.script(chr(0x168AD)) == "Bamu"
+ assert unicodedata.script(chr(0x16ADD)) == "Bass"
+ assert unicodedata.script(chr(0x1BE5)) == "Batk"
+ assert unicodedata.script(chr(0x09F3)) == "Beng"
+ assert unicodedata.script(chr(0x11C5B)) == "Bhks"
+ assert unicodedata.script(chr(0x3126)) == "Bopo"
+ assert unicodedata.script(chr(0x1103B)) == "Brah"
+ assert unicodedata.script(chr(0x2849)) == "Brai"
+ assert unicodedata.script(chr(0x1A0A)) == "Bugi"
+ assert unicodedata.script(chr(0x174E)) == "Buhd"
+ assert unicodedata.script(chr(0x18EE)) == "Cans"
+ assert unicodedata.script(chr(0x102B7)) == "Cari"
+ assert unicodedata.script(chr(0x1053D)) == "Aghb"
+ assert unicodedata.script(chr(0x11123)) == "Cakm"
+ assert unicodedata.script(chr(0xAA1F)) == "Cham"
+ assert unicodedata.script(chr(0xAB95)) == "Cher"
+ assert unicodedata.script(chr(0x1F0C7)) == "Zyyy"
+ assert unicodedata.script(chr(0x2C85)) == "Copt"
+ assert unicodedata.script(chr(0x12014)) == "Xsux"
+ assert unicodedata.script(chr(0x1082E)) == "Cprt"
+ assert unicodedata.script(chr(0xA686)) == "Cyrl"
+ assert unicodedata.script(chr(0x10417)) == "Dsrt"
+ assert unicodedata.script(chr(0x093E)) == "Deva"
+ assert unicodedata.script(chr(0x1BC4B)) == "Dupl"
+ assert unicodedata.script(chr(0x1310C)) == "Egyp"
+ assert unicodedata.script(chr(0x1051C)) == "Elba"
+ assert unicodedata.script(chr(0x2DA6)) == "Ethi"
+ assert unicodedata.script(chr(0x10AD)) == "Geor"
+ assert unicodedata.script(chr(0x2C52)) == "Glag"
+ assert unicodedata.script(chr(0x10343)) == "Goth"
+ assert unicodedata.script(chr(0x11371)) == "Gran"
+ assert unicodedata.script(chr(0x03D0)) == "Grek"
+ assert unicodedata.script(chr(0x0AAA)) == "Gujr"
+ assert unicodedata.script(chr(0x0A4C)) == "Guru"
+ assert unicodedata.script(chr(0x23C9F)) == "Hani"
+ assert unicodedata.script(chr(0xC259)) == "Hang"
+ assert unicodedata.script(chr(0x1722)) == "Hano"
+ assert unicodedata.script(chr(0x108F5)) == "Hatr"
+ assert unicodedata.script(chr(0x05C2)) == "Hebr"
+ assert unicodedata.script(chr(0x1B072)) == "Hira"
+ assert unicodedata.script(chr(0x10847)) == "Armi"
+ assert unicodedata.script(chr(0x033A)) == "Zinh"
+ assert unicodedata.script(chr(0x10B66)) == "Phli"
+ assert unicodedata.script(chr(0x10B4B)) == "Prti"
+ assert unicodedata.script(chr(0xA98A)) == "Java"
+ assert unicodedata.script(chr(0x110B2)) == "Kthi"
+ assert unicodedata.script(chr(0x0CC6)) == "Knda"
+ assert unicodedata.script(chr(0x3337)) == "Kana"
+ assert unicodedata.script(chr(0xA915)) == "Kali"
+ assert unicodedata.script(chr(0x10A2E)) == "Khar"
+ assert unicodedata.script(chr(0x17AA)) == "Khmr"
+ assert unicodedata.script(chr(0x11225)) == "Khoj"
+ assert unicodedata.script(chr(0x112B6)) == "Sind"
+ assert unicodedata.script(chr(0x0ED7)) == "Laoo"
+ assert unicodedata.script(chr(0xAB3C)) == "Latn"
+ assert unicodedata.script(chr(0x1C48)) == "Lepc"
+ assert unicodedata.script(chr(0x1923)) == "Limb"
+ assert unicodedata.script(chr(0x1071D)) == "Lina"
+ assert unicodedata.script(chr(0x100EC)) == "Linb"
+ assert unicodedata.script(chr(0xA4E9)) == "Lisu"
+ assert unicodedata.script(chr(0x10284)) == "Lyci"
+ assert unicodedata.script(chr(0x10926)) == "Lydi"
+ assert unicodedata.script(chr(0x11161)) == "Mahj"
+ assert unicodedata.script(chr(0x0D56)) == "Mlym"
+ assert unicodedata.script(chr(0x0856)) == "Mand"
+ assert unicodedata.script(chr(0x10AF0)) == "Mani"
+ assert unicodedata.script(chr(0x11CB0)) == "Marc"
+ assert unicodedata.script(chr(0x11D28)) == "Gonm"
+ assert unicodedata.script(chr(0xABDD)) == "Mtei"
+ assert unicodedata.script(chr(0x1E897)) == "Mend"
+ assert unicodedata.script(chr(0x109B0)) == "Merc"
+ assert unicodedata.script(chr(0x10993)) == "Mero"
+ assert unicodedata.script(chr(0x16F5D)) == "Plrd"
+ assert unicodedata.script(chr(0x1160B)) == "Modi"
+ assert unicodedata.script(chr(0x18A8)) == "Mong"
+ assert unicodedata.script(chr(0x16A48)) == "Mroo"
+ assert unicodedata.script(chr(0x1128C)) == "Mult"
+ assert unicodedata.script(chr(0x105B)) == "Mymr"
+ assert unicodedata.script(chr(0x108AF)) == "Nbat"
+ assert unicodedata.script(chr(0x19B3)) == "Talu"
+ assert unicodedata.script(chr(0x1143D)) == "Newa"
+ assert unicodedata.script(chr(0x07F4)) == "Nkoo"
+ assert unicodedata.script(chr(0x1B192)) == "Nshu"
+ assert unicodedata.script(chr(0x169C)) == "Ogam"
+ assert unicodedata.script(chr(0x1C56)) == "Olck"
+ assert unicodedata.script(chr(0x10CE9)) == "Hung"
+ assert unicodedata.script(chr(0x10316)) == "Ital"
+ assert unicodedata.script(chr(0x10A93)) == "Narb"
+ assert unicodedata.script(chr(0x1035A)) == "Perm"
+ assert unicodedata.script(chr(0x103D5)) == "Xpeo"
+ assert unicodedata.script(chr(0x10A65)) == "Sarb"
+ assert unicodedata.script(chr(0x10C09)) == "Orkh"
+ assert unicodedata.script(chr(0x0B60)) == "Orya"
+ assert unicodedata.script(chr(0x104CF)) == "Osge"
+ assert unicodedata.script(chr(0x104A8)) == "Osma"
+ assert unicodedata.script(chr(0x16B12)) == "Hmng"
+ assert unicodedata.script(chr(0x10879)) == "Palm"
+ assert unicodedata.script(chr(0x11AF1)) == "Pauc"
+ assert unicodedata.script(chr(0xA869)) == "Phag"
+ assert unicodedata.script(chr(0x10909)) == "Phnx"
+ assert unicodedata.script(chr(0x10B81)) == "Phlp"
+ assert unicodedata.script(chr(0xA941)) == "Rjng"
+ assert unicodedata.script(chr(0x16C3)) == "Runr"
+ assert unicodedata.script(chr(0x0814)) == "Samr"
+ assert unicodedata.script(chr(0xA88C)) == "Saur"
+ assert unicodedata.script(chr(0x111C8)) == "Shrd"
+ assert unicodedata.script(chr(0x1045F)) == "Shaw"
+ assert unicodedata.script(chr(0x115AD)) == "Sidd"
+ assert unicodedata.script(chr(0x1D8C0)) == "Sgnw"
+ assert unicodedata.script(chr(0x0DB9)) == "Sinh"
+ assert unicodedata.script(chr(0x110F9)) == "Sora"
+ assert unicodedata.script(chr(0x11A60)) == "Soyo"
+ assert unicodedata.script(chr(0x1B94)) == "Sund"
+ assert unicodedata.script(chr(0xA81F)) == "Sylo"
+ assert unicodedata.script(chr(0x0740)) == "Syrc"
+ assert unicodedata.script(chr(0x1714)) == "Tglg"
+ assert unicodedata.script(chr(0x1761)) == "Tagb"
+ assert unicodedata.script(chr(0x1965)) == "Tale"
+ assert unicodedata.script(chr(0x1A32)) == "Lana"
+ assert unicodedata.script(chr(0xAA86)) == "Tavt"
+ assert unicodedata.script(chr(0x116A5)) == "Takr"
+ assert unicodedata.script(chr(0x0B8E)) == "Taml"
+ assert unicodedata.script(chr(0x1754D)) == "Tang"
+ assert unicodedata.script(chr(0x0C40)) == "Telu"
+ assert unicodedata.script(chr(0x07A4)) == "Thaa"
+ assert unicodedata.script(chr(0x0E42)) == "Thai"
+ assert unicodedata.script(chr(0x0F09)) == "Tibt"
+ assert unicodedata.script(chr(0x2D3A)) == "Tfng"
+ assert unicodedata.script(chr(0x114B0)) == "Tirh"
+ assert unicodedata.script(chr(0x1038B)) == "Ugar"
+ assert unicodedata.script(chr(0xA585)) == "Vaii"
+ assert unicodedata.script(chr(0x118CF)) == "Wara"
+ assert unicodedata.script(chr(0xA066)) == "Yiii"
+ assert unicodedata.script(chr(0x11A31)) == "Zanb"
+ assert unicodedata.script(chr(0x11F00)) == "Kawi"
def test_script_extension():
@@ -159,11 +160,29 @@ def test_script_extension():
assert unicodedata.script_extension(chr(0x0378)) == {"Zzzz"}
assert unicodedata.script_extension(chr(0x10FFFF)) == {"Zzzz"}
- assert unicodedata.script_extension("\u0660") == {'Arab', 'Thaa', 'Yezi'}
+ assert unicodedata.script_extension("\u0660") == {"Arab", "Thaa", "Yezi"}
assert unicodedata.script_extension("\u0964") == {
- 'Beng', 'Deva', 'Dogr', 'Gong', 'Gonm', 'Gran', 'Gujr', 'Guru', 'Knda',
- 'Mahj', 'Mlym', 'Nand', 'Orya', 'Sind', 'Sinh', 'Sylo', 'Takr', 'Taml',
- 'Telu', 'Tirh'}
+ "Beng",
+ "Deva",
+ "Dogr",
+ "Gong",
+ "Gonm",
+ "Gran",
+ "Gujr",
+ "Guru",
+ "Knda",
+ "Mahj",
+ "Mlym",
+ "Nand",
+ "Orya",
+ "Sind",
+ "Sinh",
+ "Sylo",
+ "Takr",
+ "Taml",
+ "Telu",
+ "Tirh",
+ }
def test_script_name():
@@ -199,6 +218,7 @@ def test_block():
assert unicodedata.block("\x80") == "Latin-1 Supplement"
assert unicodedata.block("\u1c90") == "Georgian Extended"
assert unicodedata.block("\u0870") == "Arabic Extended-B"
+ assert unicodedata.block("\U00011B00") == "Devanagari Extended-A"
def test_ot_tags_from_script():
@@ -208,6 +228,7 @@ def test_ot_tags_from_script():
assert unicodedata.ot_tags_from_script("Deva") == ["dev2", "deva"]
# exceptions
assert unicodedata.ot_tags_from_script("Hira") == ["kana"]
+ assert unicodedata.ot_tags_from_script("Zmth") == ["math"]
# special script codes map to DFLT
assert unicodedata.ot_tags_from_script("Zinh") == ["DFLT"]
assert unicodedata.ot_tags_from_script("Zyyy") == ["DFLT"]
@@ -230,6 +251,7 @@ def test_ot_tag_to_script():
assert unicodedata.ot_tag_to_script("vai ") == "Vaii"
assert unicodedata.ot_tag_to_script("lao ") == "Laoo"
assert unicodedata.ot_tag_to_script("yi") == "Yiii"
+ assert unicodedata.ot_tag_to_script("math") == "Zmth"
# both 'hang' and 'jamo' tags map to the Hangul script
assert unicodedata.ot_tag_to_script("hang") == "Hang"
assert unicodedata.ot_tag_to_script("jamo") == "Hang"
@@ -247,10 +269,10 @@ def test_script_horizontal_direction():
with pytest.raises(KeyError):
unicodedata.script_horizontal_direction("Azzz")
- assert unicodedata.script_horizontal_direction("Azzz",
- default="LTR") == "LTR"
+ assert unicodedata.script_horizontal_direction("Azzz", default="LTR") == "LTR"
if __name__ == "__main__":
import sys
+
sys.exit(pytest.main(sys.argv))
diff --git a/Tests/varLib/builder_test.py b/Tests/varLib/builder_test.py
index 6cad103a..33d1dfb0 100644
--- a/Tests/varLib/builder_test.py
+++ b/Tests/varLib/builder_test.py
@@ -2,27 +2,31 @@ from fontTools.varLib.builder import buildVarData
import pytest
-@pytest.mark.parametrize("region_indices, items, expected_num_shorts", [
- ([], [], 0),
- ([0], [[1]], 0),
- ([0], [[128]], 1),
- ([0, 1, 2], [[128, 1, 2], [3, -129, 5], [6, 7, 8]], 2),
- ([0, 1, 2], [[0, 128, 2], [3, 4, 5], [6, 7, -129]], 3),
- ([0], [[32768]], 0x8001),
- ([0, 1, 2], [[32768, 1, 2], [3, -129, 5], [6, 7, 8]], 0x8001),
- ([0, 1, 2], [[32768, 1, 2], [3, -32769, 5], [6, 7, 8]], 0x8002),
- ([0, 1, 2], [[0, 32768, 2], [3, 4, 5], [6, 7, -32769]], 0x8003),
-], ids=[
- "0_regions_0_deltas",
- "1_region_1_uint8",
- "1_region_1_short",
- "3_regions_2_shorts_ordered",
- "3_regions_2_shorts_unordered",
- "1_region_1_long",
- "3_regions_1_long_ordered",
- "3_regions_2_longs_ordered",
- "3_regions_2_longs_unordered",
-])
+@pytest.mark.parametrize(
+ "region_indices, items, expected_num_shorts",
+ [
+ ([], [], 0),
+ ([0], [[1]], 0),
+ ([0], [[128]], 1),
+ ([0, 1, 2], [[128, 1, 2], [3, -129, 5], [6, 7, 8]], 2),
+ ([0, 1, 2], [[0, 128, 2], [3, 4, 5], [6, 7, -129]], 3),
+ ([0], [[32768]], 0x8001),
+ ([0, 1, 2], [[32768, 1, 2], [3, -129, 5], [6, 7, 8]], 0x8001),
+ ([0, 1, 2], [[32768, 1, 2], [3, -32769, 5], [6, 7, 8]], 0x8002),
+ ([0, 1, 2], [[0, 32768, 2], [3, 4, 5], [6, 7, -32769]], 0x8003),
+ ],
+ ids=[
+ "0_regions_0_deltas",
+ "1_region_1_uint8",
+ "1_region_1_short",
+ "3_regions_2_shorts_ordered",
+ "3_regions_2_shorts_unordered",
+ "1_region_1_long",
+ "3_regions_1_long_ordered",
+ "3_regions_2_longs_ordered",
+ "3_regions_2_longs_unordered",
+ ],
+)
def test_buildVarData_no_optimize(region_indices, items, expected_num_shorts):
data = buildVarData(region_indices, items, optimize=False)
@@ -33,48 +37,110 @@ def test_buildVarData_no_optimize(region_indices, items, expected_num_shorts):
assert data.Item == items
-@pytest.mark.parametrize([
- "region_indices", "items", "expected_num_shorts",
- "expected_regions", "expected_items"
-], [
- ([0, 1, 2], [[0, 1, 2], [3, 4, 5], [6, 7, 8]], 0,
- [0, 1, 2], [[0, 1, 2], [3, 4, 5], [6, 7, 8]]),
- ([0, 1, 2], [[0, 128, 2], [3, 4, 5], [6, 7, 8]], 1,
- [1, 0, 2], [[128, 0, 2], [4, 3, 5], [7, 6, 8]]),
- ([0, 1, 2], [[0, 1, 128], [3, 4, 5], [6, -129, 8]], 2,
- [1, 2, 0], [[1, 128, 0], [4, 5, 3], [-129, 8, 6]]),
- ([0, 1, 2], [[128, 1, -129], [3, 4, 5], [6, 7, 8]], 2,
- [0, 2, 1], [[128, -129, 1], [3, 5, 4], [6, 8, 7]]),
- ([0, 1, 2], [[0, 1, 128], [3, -129, 5], [256, 7, 8]], 3,
- [0, 1, 2], [[0, 1, 128], [3, -129, 5], [256, 7, 8]]),
- ([0, 1, 2], [[0, 128, 2], [0, 4, 5], [0, 7, 8]], 1,
- [1, 2], [[128, 2], [4, 5], [7, 8]]),
- ([0, 1, 2], [[0, 32768, 2], [3, 4, 5], [6, 7, 8]], 0x8001,
- [1, 0, 2], [[32768, 0, 2], [4, 3, 5], [7, 6, 8]]),
- ([0, 1, 2], [[0, 1, 32768], [3, 4, 5], [6, -32769, 8]], 0x8002,
- [1, 2, 0], [[1, 32768, 0], [4, 5, 3], [-32769, 8, 6]]),
- ([0, 1, 2], [[32768, 1, -32769], [3, 4, 5], [6, 7, 8]], 0x8002,
- [0, 2, 1], [[32768, -32769, 1], [3, 5, 4], [6, 8, 7]]),
- ([0, 1, 2], [[0, 1, 32768], [3, -32769, 5], [65536, 7, 8]], 0x8003,
- [0, 1, 2], [[0, 1, 32768], [3, -32769, 5], [65536, 7, 8]]),
- ([0, 1, 2], [[0, 32768, 2], [0, 4, 5], [0, 7, 8]], 0x8001,
- [1, 2], [[32768, 2], [4, 5], [7, 8]]),
-], ids=[
- "0/3_shorts_no_reorder",
- "1/3_shorts_reorder",
- "2/3_shorts_reorder",
- "2/3_shorts_same_row_reorder",
- "3/3_shorts_no_reorder",
- "1/3_shorts_1/3_zeroes",
- "1/3_longs_reorder",
- "2/3_longs_reorder",
- "2/3_longs_same_row_reorder",
- "3/3_longs_no_reorder",
- "1/3_longs_1/3_zeroes",
-])
+@pytest.mark.parametrize(
+ [
+ "region_indices",
+ "items",
+ "expected_num_shorts",
+ "expected_regions",
+ "expected_items",
+ ],
+ [
+ (
+ [0, 1, 2],
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8]],
+ 0,
+ [0, 1, 2],
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8]],
+ ),
+ (
+ [0, 1, 2],
+ [[0, 128, 2], [3, 4, 5], [6, 7, 8]],
+ 1,
+ [1, 0, 2],
+ [[128, 0, 2], [4, 3, 5], [7, 6, 8]],
+ ),
+ (
+ [0, 1, 2],
+ [[0, 1, 128], [3, 4, 5], [6, -129, 8]],
+ 2,
+ [1, 2, 0],
+ [[1, 128, 0], [4, 5, 3], [-129, 8, 6]],
+ ),
+ (
+ [0, 1, 2],
+ [[128, 1, -129], [3, 4, 5], [6, 7, 8]],
+ 2,
+ [0, 2, 1],
+ [[128, -129, 1], [3, 5, 4], [6, 8, 7]],
+ ),
+ (
+ [0, 1, 2],
+ [[0, 1, 128], [3, -129, 5], [256, 7, 8]],
+ 3,
+ [0, 1, 2],
+ [[0, 1, 128], [3, -129, 5], [256, 7, 8]],
+ ),
+ (
+ [0, 1, 2],
+ [[0, 128, 2], [0, 4, 5], [0, 7, 8]],
+ 1,
+ [1, 2],
+ [[128, 2], [4, 5], [7, 8]],
+ ),
+ (
+ [0, 1, 2],
+ [[0, 32768, 2], [3, 4, 5], [6, 7, 8]],
+ 0x8001,
+ [1, 0, 2],
+ [[32768, 0, 2], [4, 3, 5], [7, 6, 8]],
+ ),
+ (
+ [0, 1, 2],
+ [[0, 1, 32768], [3, 4, 5], [6, -32769, 8]],
+ 0x8002,
+ [1, 2, 0],
+ [[1, 32768, 0], [4, 5, 3], [-32769, 8, 6]],
+ ),
+ (
+ [0, 1, 2],
+ [[32768, 1, -32769], [3, 4, 5], [6, 7, 8]],
+ 0x8002,
+ [0, 2, 1],
+ [[32768, -32769, 1], [3, 5, 4], [6, 8, 7]],
+ ),
+ (
+ [0, 1, 2],
+ [[0, 1, 32768], [3, -32769, 5], [65536, 7, 8]],
+ 0x8003,
+ [0, 1, 2],
+ [[0, 1, 32768], [3, -32769, 5], [65536, 7, 8]],
+ ),
+ (
+ [0, 1, 2],
+ [[0, 32768, 2], [0, 4, 5], [0, 7, 8]],
+ 0x8001,
+ [1, 2],
+ [[32768, 2], [4, 5], [7, 8]],
+ ),
+ ],
+ ids=[
+ "0/3_shorts_no_reorder",
+ "1/3_shorts_reorder",
+ "2/3_shorts_reorder",
+ "2/3_shorts_same_row_reorder",
+ "3/3_shorts_no_reorder",
+ "1/3_shorts_1/3_zeroes",
+ "1/3_longs_reorder",
+ "2/3_longs_reorder",
+ "2/3_longs_same_row_reorder",
+ "3/3_longs_no_reorder",
+ "1/3_longs_1/3_zeroes",
+ ],
+)
def test_buildVarData_optimize(
- region_indices, items, expected_num_shorts, expected_regions,
- expected_items):
+ region_indices, items, expected_num_shorts, expected_regions, expected_items
+):
data = buildVarData(region_indices, items, optimize=True)
assert data.ItemCount == len(items)
@@ -86,4 +152,5 @@ def test_buildVarData_optimize(
if __name__ == "__main__":
import sys
+
sys.exit(pytest.main(sys.argv))
diff --git a/Tests/varLib/data/BuildAvar2.designspace b/Tests/varLib/data/BuildAvar2.designspace
new file mode 100644
index 00000000..1cfa94ab
--- /dev/null
+++ b/Tests/varLib/data/BuildAvar2.designspace
@@ -0,0 +1,55 @@
+<?xml version='1.0' encoding='utf-8'?>
+<designspace format="3">
+ <axes>
+ <axis default="400.0" maximum="900.0" minimum="100.0" name="weight" tag="wght">
+ <map input="100.0" output="26" />
+ <map input="200.0" output="39" />
+ <map input="300.0" output="58" />
+ <map input="400.0" output="90" />
+ <map input="500.0" output="108" />
+ <map input="600.0" output="128" />
+ <map input="700.0" output="151" />
+ <map input="800.0" output="169" />
+ <map input="900.0" output="190" />
+ <labelname xml:lang="en">Weight</labelname>
+ </axis>
+ <mappings>
+ <mapping>
+ <input>
+ <dimension name="weight" xvalue="128"/>
+ </input>
+ <output>
+ <dimension name="weight" xvalue="138"/>
+ </output>
+ </mapping>
+ </mappings>
+ </axes>
+ <sources>
+ <source familyname="Test Family 3" filename="master_ufo/TestFamily3-Light.ufo" name="Test Family 3 Light" stylename="Light">
+ <location>
+ <dimension name="weight" xvalue="26.000000" />
+ </location>
+ </source>
+ <source familyname="Test Family 3" filename="master_ufo/TestFamily3-Regular.ufo" name="Test Family 3 Regular" stylename="Regular">
+ <lib copy="1" />
+ <groups copy="1" />
+ <features copy="1" />
+ <info copy="1" />
+ <location>
+ <dimension name="weight" xvalue="90.000000" />
+ </location>
+ </source>
+ <source familyname="Test Family 3" filename="master_ufo/TestFamily3-SemiBold.ufo" name="Test Family 3 SemiBold" stylename="SemiBold">
+ <location>
+ <dimension name="weight" xvalue="151.000000" />
+ </location>
+ </source>
+ <source familyname="Test Family 3" filename="master_ufo/TestFamily3-Bold.ufo" name="Test Family 3 Bold" stylename="Bold">
+ <location>
+ <dimension name="weight" xvalue="190.000000" />
+ </location>
+ </source>
+ </sources>
+ <instances>
+ </instances>
+</designspace>
diff --git a/Tests/varLib/data/DropOnCurves.designspace b/Tests/varLib/data/DropOnCurves.designspace
new file mode 100644
index 00000000..a4769aa2
--- /dev/null
+++ b/Tests/varLib/data/DropOnCurves.designspace
@@ -0,0 +1,20 @@
+<?xml version='1.0' encoding='utf-8'?>
+<designspace format="3">
+ <axes>
+ <axis default="400" maximum="1000" minimum="400" name="weight" tag="wght" />
+ </axes>
+ <sources>
+ <source familyname="Test Family" filename="master_ufo/TestFamily-Master1.ttx" name="master_1" stylename="Master1">
+ <location>
+ <dimension name="weight" xvalue="400" />
+ </location>
+ </source>
+ <source familyname="Test Family" filename="master_ufo/TestFamily-Master2.ttx" name="master_2" stylename="Master2">
+ <location>
+ <dimension name="weight" xvalue="1000" />
+ </location>
+ </source>
+ </sources>
+ <instances>
+ </instances>
+</designspace>
diff --git a/Tests/varLib/data/InterpolateLayout.glyphs b/Tests/varLib/data/InterpolateLayout.glyphs
new file mode 100644
index 00000000..90493950
--- /dev/null
+++ b/Tests/varLib/data/InterpolateLayout.glyphs
@@ -0,0 +1,2402 @@
+{
+.appVersion = "895";
+customParameters = (
+{
+name = hheaAscender;
+value = 984;
+},
+{
+name = hheaDescender;
+value = -273;
+},
+{
+name = hheaLineGap;
+value = 0;
+},
+{
+name = panose;
+value = (
+2,
+11,
+5,
+3,
+3,
+4,
+3,
+2,
+2,
+4
+);
+},
+{
+name = typoAscender;
+value = 750;
+},
+{
+name = typoDescender;
+value = -250;
+},
+{
+name = typoLineGap;
+value = 0;
+},
+{
+name = unicodeRanges;
+value = (
+0,
+1
+);
+},
+{
+name = blueScale;
+value = 0.0625;
+},
+{
+name = underlinePosition;
+value = -75;
+},
+{
+name = vendorID;
+value = ADBO;
+},
+{
+name = postscriptFontName;
+value = "TestFamily2-Master0";
+},
+{
+name = postscriptBlueFuzz;
+value = 0;
+},
+{
+name = postscriptForceBold;
+value = 0;
+},
+{
+name = styleMapFamilyName;
+value = "Test Family 2";
+},
+{
+name = postscriptFamilyBlues;
+value = (
+-12,
+0,
+486,
+498,
+518,
+530,
+574,
+586,
+638,
+650,
+656,
+668,
+712,
+724
+);
+},
+{
+name = postscriptFamilyOtherBlues;
+value = (
+-217,
+-205
+);
+},
+{
+name = codePageRanges;
+value = (
+1252,
+1250
+);
+},
+{
+name = codePageRangesUnsupportedBits;
+value = (
+29
+);
+},
+{
+name = winAscent;
+value = 984;
+},
+{
+name = winDescent;
+value = 273;
+},
+{
+name = weightClass;
+value = 200;
+},
+{
+name = glyphOrder;
+value = (
+.notdef,
+space,
+A,
+a,
+d,
+f,
+n,
+t,
+f_t,
+a.alt,
+A.sc,
+atilde,
+ampersand,
+circledotted,
+tildecmb,
+dieresiscmb,
+tildebelowcmb,
+dieresisbelowcmb
+);
+},
+{
+name = "Disable Last Change";
+value = 1;
+},
+{
+name = Axes;
+value = (
+{
+Name = weight;
+Tag = wght;
+}
+);
+}
+);
+designer = "Paul D. Hunt";
+disablesAutomaticAlignment = 1;
+familyName = "Test Family 2";
+featurePrefixes = (
+{
+code = "# Do not use Glyphs to edit features.\012#\012# This Glyphs file was made from several UFOs that had different\012# features. As a result, the features are not editable in Glyphs and\012# the original features will be restored when you go back to UFOs.\012";
+name = WARNING;
+}
+);
+fontMaster = (
+{
+alignmentZones = (
+"{722, 12}",
+"{660, 12}",
+"{640, 12}",
+"{570, 12}",
+"{510, 12}",
+"{478, 12}",
+"{0, -12}",
+"{-222, -12}"
+);
+ascender = 722;
+capHeight = 660;
+customParameters = (
+{
+name = "UFO Filename";
+value = "master_ufo/TestFamily2-Master0.ufo";
+},
+{
+name = "Master Name";
+value = "Master 0";
+},
+{
+name = hheaAscender;
+value = 984;
+},
+{
+name = hheaDescender;
+value = -273;
+},
+{
+name = hheaLineGap;
+value = 0;
+},
+{
+name = panose;
+value = (
+2,
+11,
+5,
+3,
+3,
+4,
+3,
+2,
+2,
+4
+);
+},
+{
+name = typoAscender;
+value = 750;
+},
+{
+name = typoDescender;
+value = -250;
+},
+{
+name = typoLineGap;
+value = 0;
+},
+{
+name = unicodeRanges;
+value = (
+0,
+1
+);
+},
+{
+name = blueScale;
+value = 0.0625;
+},
+{
+name = underlinePosition;
+value = -75;
+},
+{
+name = vendorID;
+value = ADBO;
+},
+{
+name = postscriptFontName;
+value = "TestFamily2-Master0";
+},
+{
+name = postscriptBlueFuzz;
+value = 0;
+},
+{
+name = postscriptForceBold;
+value = 0;
+},
+{
+name = styleMapFamilyName;
+value = "Test Family 2";
+},
+{
+name = postscriptFamilyBlues;
+value = (
+-12,
+0,
+486,
+498,
+518,
+530,
+574,
+586,
+638,
+650,
+656,
+668,
+712,
+724
+);
+},
+{
+name = postscriptFamilyOtherBlues;
+value = (
+-217,
+-205
+);
+},
+{
+name = codePageRanges;
+value = (
+1252,
+1250
+);
+},
+{
+name = codePageRangesUnsupportedBits;
+value = (
+29
+);
+},
+{
+name = winAscent;
+value = 984;
+},
+{
+name = winDescent;
+value = 273;
+},
+{
+name = weightClass;
+value = 200;
+}
+);
+descender = -222;
+horizontalStems = (
+28,
+40
+);
+id = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+userData = {
+com.schriftgestaltung.Glyphs.originalFeatureCode = "table head {\012 FontRevision 2.020;\012} head;\012\012\012table name {\012 nameid 9 \"Paul D. Hunt\";\012 nameid 9 1 \"Paul D. Hunt\";\012} name;\012\012\012table hhea {\012 Ascender 984;\012 Descender -273;\012 LineGap 0;\012} hhea;\012\012\012table BASE {\012 HorizAxis.BaseTagList ideo romn;\012 HorizAxis.BaseScriptList\012 latn romn -170 0,\012 grek romn -170 0,\012 cyrl romn -170 0,\012 DFLT romn -170 0;\012} BASE;\012\012\012table OS/2 {\012 Panose 2 11 3 3 3 4 3 2 2 4;\012 XHeight 478;\012 WeightClass 200;\012\012 TypoAscender 750;\012 TypoDescender -250;\012 TypoLineGap 0;\012 winAscent 984;\012 winDescent 273;\012\012 CapHeight 660;\012 WidthClass 5;\012 Vendor \"ADBO\";\012 FSType 0;\012} OS/2;\012\012\012languagesystem DFLT dflt;\012languagesystem latn dflt;\012\012# GSUB =========================================\012# Merging of GSUB is not performed. The variable\012# font will inherit the GSUB table from the\012# base master.\012\012feature c2sc {\012 sub A by A.sc; # GSUB LookupType 1\012} c2sc;\012\012feature ss01 {\012 featureNames {\012 name \"Alternate a\";\012 name 1 0 0 \"Alternate a\";};\012 sub a by a.alt;\012} ss01;\012\012feature ccmp {\012 sub ampersand by a n d; # GSUB LookupType 2\012} ccmp;\012\012feature salt {\012 sub a from [a.alt A.sc]; # GSUB LookupType 3\012} salt;\012\012feature liga {\012 sub f t by f_t; # GSUB LookupType 4\012} liga;\012\012feature calt {\012 sub a' t by a.alt; # GSUB LookupType 6\012} calt;\012\012";
+};
+verticalStems = (
+32,
+48
+);
+weightValue = 0;
+xHeight = 478;
+},
+{
+alignmentZones = (
+"{696, 12}",
+"{650, 12}",
+"{634, 12}",
+"{580, 12}",
+"{532, 12}",
+"{500, 12}",
+"{0, -12}",
+"{-176, -12}"
+);
+ascender = 696;
+capHeight = 650;
+customParameters = (
+{
+name = "UFO Filename";
+value = "master_ufo/TestFamily2-Master1.ufo";
+},
+{
+name = "Master Name";
+value = "Master 1";
+},
+{
+name = hheaAscender;
+value = 984;
+},
+{
+name = hheaDescender;
+value = -273;
+},
+{
+name = hheaLineGap;
+value = 0;
+},
+{
+name = panose;
+value = (
+2,
+11,
+5,
+3,
+3,
+4,
+3,
+2,
+2,
+4
+);
+},
+{
+name = typoAscender;
+value = 750;
+},
+{
+name = typoDescender;
+value = -250;
+},
+{
+name = typoLineGap;
+value = 0;
+},
+{
+name = unicodeRanges;
+value = (
+0,
+1
+);
+},
+{
+name = blueScale;
+value = 0.0625;
+},
+{
+name = underlinePosition;
+value = -75;
+},
+{
+name = vendorID;
+value = ADBO;
+},
+{
+name = postscriptFontName;
+value = "TestFamily2-Master1";
+},
+{
+name = postscriptBlueFuzz;
+value = 0;
+},
+{
+name = postscriptForceBold;
+value = 0;
+},
+{
+name = styleMapFamilyName;
+value = "Test Family 2";
+},
+{
+name = postscriptFamilyBlues;
+value = (
+-12,
+0,
+486,
+498,
+518,
+530,
+574,
+586,
+638,
+650,
+656,
+668,
+712,
+724
+);
+},
+{
+name = postscriptFamilyOtherBlues;
+value = (
+-217,
+-205
+);
+},
+{
+name = codePageRanges;
+value = (
+1252,
+1250
+);
+},
+{
+name = codePageRangesUnsupportedBits;
+value = (
+29
+);
+},
+{
+name = winAscent;
+value = 984;
+},
+{
+name = winDescent;
+value = 273;
+},
+{
+name = weightClass;
+value = 900;
+}
+);
+descender = -176;
+horizontalStems = (
+134,
+144
+);
+id = "A99E50E2-B754-449B-A60B-37BA27802C99";
+userData = {
+com.schriftgestaltung.Glyphs.originalFeatureCode = "table head {\012 FontRevision 2.020;\012} head;\012\012\012table name {\012 nameid 9 \"Paul D. Hunt\";\012 nameid 9 1 \"Paul D. Hunt\";\012} name;\012\012\012table hhea {\012 Ascender 984;\012 Descender -273;\012 LineGap 0;\012} hhea;\012\012\012table BASE {\012 HorizAxis.BaseTagList ideo romn;\012 HorizAxis.BaseScriptList\012 latn romn -170 0,\012 grek romn -170 0,\012 cyrl romn -170 0,\012 DFLT romn -170 0;\012} BASE;\012\012\012table OS/2 {\012 Panose 2 11 8 3 3 4 3 2 2 4;\012 XHeight 500;\012 WeightClass 900;\012\012 TypoAscender 750;\012 TypoDescender -250;\012 TypoLineGap 0;\012 winAscent 984;\012 winDescent 273;\012\012 CapHeight 660;\012 WidthClass 5;\012 Vendor \"ADBO\";\012 FSType 0;\012} OS/2;\012\012\012languagesystem DFLT dflt;\012languagesystem latn dflt;\012\012# GSUB =========================================\012# No merging of GSUB is performed. The variable\012# font will inherit the GSUB table from the\012# base master.\012\012";
+};
+verticalStems = (
+172,
+176
+);
+weightValue = 1000;
+xHeight = 500;
+}
+);
+glyphs = (
+{
+glyphname = .notdef;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"528 0 LINE",
+"528 660 LINE",
+"96 660 LINE",
+"96 0 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"246 208 LINE",
+"310 314 LINE",
+"314 314 LINE",
+"376 208 LINE",
+"476 32 LINE",
+"144 32 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"254 458 LINE",
+"160 626 LINE",
+"462 626 LINE",
+"368 458 LINE",
+"314 366 LINE",
+"310 366 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"134 610 LINE",
+"288 340 LINE",
+"134 74 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"336 340 LINE",
+"488 610 LINE",
+"488 74 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 624;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"628 0 LINE",
+"628 660 LINE",
+"76 660 LINE",
+"76 0 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"314 160 LINE",
+"350 256 LINE",
+"354 256 LINE",
+"390 160 LINE",
+"416 104 LINE",
+"288 104 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"310 520 LINE",
+"292 556 LINE",
+"412 556 LINE",
+"394 520 LINE",
+"354 424 LINE",
+"350 424 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"188 508 LINE",
+"270 340 LINE",
+"188 172 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"434 340 LINE",
+"516 508 LINE",
+"516 172 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 704;
+}
+);
+note = "";
+},
+{
+glyphname = space;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+vertWidth = 0;
+width = 200;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+vertWidth = 0;
+width = 200;
+}
+);
+note = "";
+unicode = 0020;
+},
+{
+glyphname = A;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"42 0 LINE",
+"182 396 LINE SMOOTH",
+"210 476 OFFCURVE",
+"234 544 OFFCURVE",
+"258 626 CURVE",
+"262 626 LINE",
+"286 544 OFFCURVE",
+"310 476 OFFCURVE",
+"338 396 CURVE SMOOTH",
+"476 0 LINE",
+"510 0 LINE",
+"274 660 LINE",
+"246 660 LINE",
+"10 0 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"405 236 LINE",
+"405 264 LINE",
+"112 264 LINE",
+"112 236 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 520;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"166 0 LINE",
+"240 316 LINE SMOOTH",
+"256 378 OFFCURVE",
+"272 456 OFFCURVE",
+"286 522 CURVE",
+"290 522 LINE",
+"306 457 OFFCURVE",
+"322 378 OFFCURVE",
+"338 316 CURVE SMOOTH",
+"412 0 LINE",
+"594 0 LINE",
+"396 650 LINE",
+"188 650 LINE",
+"-10 0 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"450 138 LINE",
+"450 271 LINE",
+"132 271 LINE",
+"132 138 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 584;
+}
+);
+note = "";
+unicode = 0041;
+},
+{
+glyphname = a;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"262 -12 OFFCURVE",
+"322 24 OFFCURVE",
+"372 64 CURVE",
+"374 64 LINE",
+"378 0 LINE",
+"404 0 LINE",
+"404 310 LINE SMOOTH",
+"404 406 OFFCURVE",
+"370 490 OFFCURVE",
+"258 490 CURVE SMOOTH",
+"180 490 OFFCURVE",
+"114 450 OFFCURVE",
+"84 428 CURVE",
+"100 404 LINE",
+"130 428 OFFCURVE",
+"188 462 OFFCURVE",
+"256 462 CURVE SMOOTH",
+"356 462 OFFCURVE",
+"376 376 OFFCURVE",
+"374 298 CURVE",
+"158 274 OFFCURVE",
+"60 224 OFFCURVE",
+"60 117 CURVE SMOOTH",
+"60 26 OFFCURVE",
+"124 -12 OFFCURVE",
+"198 -12 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"142 16 OFFCURVE",
+"92 44 OFFCURVE",
+"92 118 CURVE SMOOTH",
+"92 200 OFFCURVE",
+"164 248 OFFCURVE",
+"374 272 CURVE",
+"374 98 LINE",
+"310 44 OFFCURVE",
+"258 16 OFFCURVE",
+"200 16 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 486;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"242 -12 OFFCURVE",
+"286 12 OFFCURVE",
+"326 48 CURVE",
+"330 48 LINE",
+"342 0 LINE",
+"482 0 LINE",
+"482 278 LINE SMOOTH",
+"482 442 OFFCURVE",
+"404 512 OFFCURVE",
+"274 512 CURVE SMOOTH",
+"196 512 OFFCURVE",
+"124 488 OFFCURVE",
+"54 446 CURVE",
+"114 334 LINE",
+"166 362 OFFCURVE",
+"204 376 OFFCURVE",
+"240 376 CURVE SMOOTH",
+"284 376 OFFCURVE",
+"306 360 OFFCURVE",
+"310 324 CURVE",
+"118 304 OFFCURVE",
+"38 246 OFFCURVE",
+"38 142 CURVE SMOOTH",
+"38 60 OFFCURVE",
+"94 -12 OFFCURVE",
+"188 -12 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"218 120 OFFCURVE",
+"202 133 OFFCURVE",
+"202 156 CURVE SMOOTH",
+"202 184 OFFCURVE",
+"228 210 OFFCURVE",
+"310 222 CURVE",
+"310 154 LINE",
+"292 134 OFFCURVE",
+"276 120 OFFCURVE",
+"248 120 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 536;
+}
+);
+note = "";
+unicode = 0061;
+},
+{
+glyphname = d;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"318 -12 OFFCURVE",
+"372 24 OFFCURVE",
+"412 64 CURVE",
+"414 64 LINE",
+"418 0 LINE",
+"444 0 LINE",
+"444 722 LINE",
+"414 722 LINE",
+"414 520 LINE",
+"416 430 LINE",
+"366 468 OFFCURVE",
+"326 490 OFFCURVE",
+"268 490 CURVE SMOOTH",
+"152 490 OFFCURVE",
+"54 392 OFFCURVE",
+"54 238 CURVE SMOOTH",
+"54 76 OFFCURVE",
+"132 -12 OFFCURVE",
+"252 -12 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"146 16 OFFCURVE",
+"86 106 OFFCURVE",
+"86 238 CURVE SMOOTH",
+"86 362 OFFCURVE",
+"164 462 OFFCURVE",
+"266 462 CURVE SMOOTH",
+"316 462 OFFCURVE",
+"360 444 OFFCURVE",
+"414 396 CURVE",
+"414 100 LINE",
+"360 46 OFFCURVE",
+"310 16 OFFCURVE",
+"254 16 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 540;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"284 -12 OFFCURVE",
+"332 12 OFFCURVE",
+"366 46 CURVE",
+"370 46 LINE",
+"382 0 LINE",
+"522 0 LINE",
+"522 696 LINE",
+"350 696 LINE",
+"350 534 LINE",
+"356 462 LINE",
+"326 492 OFFCURVE",
+"294 512 OFFCURVE",
+"240 512 CURVE SMOOTH",
+"138 512 OFFCURVE",
+"36 414 OFFCURVE",
+"36 250 CURVE SMOOTH",
+"36 88 OFFCURVE",
+"116 -12 OFFCURVE",
+"240 -12 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"240 128 OFFCURVE",
+"212 162 OFFCURVE",
+"212 252 CURVE SMOOTH",
+"212 340 OFFCURVE",
+"246 372 OFFCURVE",
+"282 372 CURVE SMOOTH",
+"304 372 OFFCURVE",
+"330 366 OFFCURVE",
+"350 348 CURVE",
+"350 164 LINE",
+"332 136 OFFCURVE",
+"312 128 OFFCURVE",
+"286 128 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 580;
+}
+);
+note = "";
+unicode = 0064;
+},
+{
+glyphname = f;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"130 0 LINE",
+"130 592 LINE SMOOTH",
+"130 664 OFFCURVE",
+"154 706 OFFCURVE",
+"208 706 CURVE SMOOTH",
+"226 706 OFFCURVE",
+"246 702 OFFCURVE",
+"266 692 CURVE",
+"276 718 LINE",
+"254 728 OFFCURVE",
+"230 734 OFFCURVE",
+"210 734 CURVE SMOOTH",
+"142 734 OFFCURVE",
+"100 690 OFFCURVE",
+"100 596 CURVE SMOOTH",
+"100 0 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"244 450 LINE",
+"244 478 LINE",
+"100 478 LINE",
+"34 474 LINE",
+"34 450 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 252;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"260 0 LINE",
+"260 512 LINE SMOOTH",
+"260 559 OFFCURVE",
+"280 574 OFFCURVE",
+"312 574 CURVE SMOOTH",
+"328 574 OFFCURVE",
+"346 570 OFFCURVE",
+"362 564 CURVE",
+"392 690 LINE",
+"370 698 OFFCURVE",
+"332 708 OFFCURVE",
+"286 708 CURVE SMOOTH",
+"138 708 OFFCURVE",
+"88 613 OFFCURVE",
+"88 506 CURVE SMOOTH",
+"88 0 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"344 366 LINE",
+"344 500 LINE",
+"98 500 LINE",
+"22 494 LINE",
+"22 366 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 360;
+}
+);
+note = "";
+unicode = 0066;
+},
+{
+glyphname = n;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"126 0 LINE",
+"126 366 LINE",
+"188 430 OFFCURVE",
+"232 462 OFFCURVE",
+"292 462 CURVE SMOOTH",
+"374 462 OFFCURVE",
+"408 410 OFFCURVE",
+"408 304 CURVE SMOOTH",
+"408 0 LINE",
+"438 0 LINE",
+"438 308 LINE SMOOTH",
+"438 432 OFFCURVE",
+"392 490 OFFCURVE",
+"294 490 CURVE SMOOTH",
+"228 490 OFFCURVE",
+"178 452 OFFCURVE",
+"128 402 CURVE",
+"126 402 LINE",
+"122 478 LINE",
+"96 478 LINE",
+"96 0 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 526;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"230 0 LINE",
+"230 328 LINE",
+"256 352 OFFCURVE",
+"274 366 OFFCURVE",
+"306 366 CURVE SMOOTH",
+"340 366 OFFCURVE",
+"356 350 OFFCURVE",
+"356 286 CURVE SMOOTH",
+"356 0 LINE",
+"528 0 LINE",
+"528 308 LINE SMOOTH",
+"528 432 OFFCURVE",
+"482 512 OFFCURVE",
+"372 512 CURVE SMOOTH",
+"304 512 OFFCURVE",
+"254 478 OFFCURVE",
+"214 440 CURVE",
+"210 440 LINE",
+"198 500 LINE",
+"58 500 LINE",
+"58 0 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 582;
+}
+);
+note = "";
+unicode = 006E;
+},
+{
+glyphname = t;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"234 -12 OFFCURVE",
+"264 -4 OFFCURVE",
+"292 6 CURVE",
+"282 32 LINE",
+"264 24 OFFCURVE",
+"238 16 OFFCURVE",
+"220 16 CURVE SMOOTH",
+"150 16 OFFCURVE",
+"136 60 OFFCURVE",
+"136 122 CURVE SMOOTH",
+"136 450 LINE",
+"278 450 LINE",
+"278 478 LINE",
+"136 478 LINE",
+"136 618 LINE",
+"110 618 LINE",
+"106 478 LINE",
+"30 474 LINE",
+"30 450 LINE",
+"106 450 LINE",
+"106 126 LINE SMOOTH",
+"106 44 OFFCURVE",
+"130 -12 OFFCURVE",
+"218 -12 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 302;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"319 -12 OFFCURVE",
+"356 -2 OFFCURVE",
+"382 6 CURVE",
+"356 130 LINE",
+"344 126 OFFCURVE",
+"328 122 OFFCURVE",
+"312 122 CURVE SMOOTH",
+"280 122 OFFCURVE",
+"252 140 OFFCURVE",
+"252 195 CURVE SMOOTH",
+"252 366 LINE",
+"366 366 LINE",
+"366 500 LINE",
+"252 500 LINE",
+"252 630 LINE",
+"110 630 LINE",
+"90 500 LINE",
+"14 494 LINE",
+"14 366 LINE",
+"80 366 LINE",
+"80 192 LINE SMOOTH",
+"80 70 OFFCURVE",
+"134 -12 OFFCURVE",
+"264 -12 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 400;
+}
+);
+note = "";
+unicode = 0074;
+},
+{
+glyphname = f_t;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"130 0 LINE",
+"130 592 LINE SMOOTH",
+"130 664 OFFCURVE",
+"154 706 OFFCURVE",
+"208 706 CURVE SMOOTH",
+"226 706 OFFCURVE",
+"246 702 OFFCURVE",
+"266 692 CURVE",
+"276 718 LINE",
+"254 728 OFFCURVE",
+"230 734 OFFCURVE",
+"210 734 CURVE SMOOTH",
+"142 734 OFFCURVE",
+"100 690 OFFCURVE",
+"100 596 CURVE SMOOTH",
+"100 0 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"450 -12 OFFCURVE",
+"480 -4 OFFCURVE",
+"508 6 CURVE",
+"498 32 LINE",
+"480 24 OFFCURVE",
+"454 16 OFFCURVE",
+"436 16 CURVE SMOOTH",
+"366 16 OFFCURVE",
+"352 60 OFFCURVE",
+"352 122 CURVE SMOOTH",
+"352 450 LINE",
+"494 450 LINE",
+"494 478 LINE",
+"352 478 LINE",
+"352 618 LINE",
+"326 618 LINE",
+"322 478 LINE",
+"100 478 LINE",
+"34 474 LINE",
+"34 450 LINE",
+"322 450 LINE",
+"322 126 LINE SMOOTH",
+"322 44 OFFCURVE",
+"346 -12 OFFCURVE",
+"434 -12 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 518;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"260 0 LINE",
+"260 512 LINE SMOOTH",
+"260 559 OFFCURVE",
+"280 574 OFFCURVE",
+"312 574 CURVE SMOOTH",
+"328 574 OFFCURVE",
+"346 570 OFFCURVE",
+"362 564 CURVE",
+"392 690 LINE",
+"370 698 OFFCURVE",
+"332 708 OFFCURVE",
+"286 708 CURVE SMOOTH",
+"138 708 OFFCURVE",
+"88 613 OFFCURVE",
+"88 506 CURVE SMOOTH",
+"88 0 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"643 -12 OFFCURVE",
+"680 -2 OFFCURVE",
+"706 6 CURVE",
+"680 130 LINE",
+"668 126 OFFCURVE",
+"652 122 OFFCURVE",
+"636 122 CURVE SMOOTH",
+"604 122 OFFCURVE",
+"576 140 OFFCURVE",
+"576 195 CURVE",
+"576 366 LINE",
+"690 366 LINE",
+"690 500 LINE",
+"576 500 LINE",
+"576 630 LINE",
+"434 630 LINE",
+"414 500 LINE",
+"98 500 LINE",
+"22 494 LINE",
+"22 366 LINE",
+"404 366 LINE",
+"404 192 LINE SMOOTH",
+"404 70 OFFCURVE",
+"458 -12 OFFCURVE",
+"588 -12 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 724;
+}
+);
+note = "";
+},
+{
+glyphname = a.alt;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"318 -12 OFFCURVE",
+"372 24 OFFCURVE",
+"412 64 CURVE",
+"414 64 LINE",
+"418 0 LINE",
+"444 0 LINE",
+"444 478 LINE",
+"416 478 LINE",
+"414 432 LINE",
+"412 432 LINE",
+"366 468 OFFCURVE",
+"326 490 OFFCURVE",
+"268 490 CURVE SMOOTH",
+"152 490 OFFCURVE",
+"54 392 OFFCURVE",
+"54 238 CURVE SMOOTH",
+"54 76 OFFCURVE",
+"132 -12 OFFCURVE",
+"252 -12 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"146 16 OFFCURVE",
+"86 106 OFFCURVE",
+"86 238 CURVE SMOOTH",
+"86 362 OFFCURVE",
+"164 462 OFFCURVE",
+"266 462 CURVE SMOOTH",
+"316 462 OFFCURVE",
+"360 444 OFFCURVE",
+"414 396 CURVE",
+"414 100 LINE",
+"360 46 OFFCURVE",
+"310 16 OFFCURVE",
+"254 16 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 540;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"284 -12 OFFCURVE",
+"332 12 OFFCURVE",
+"366 46 CURVE",
+"370 46 LINE",
+"382 0 LINE",
+"522 0 LINE",
+"522 500 LINE",
+"388 500 LINE",
+"374 450 LINE",
+"370 450 LINE",
+"332 494 OFFCURVE",
+"292 512 OFFCURVE",
+"244 512 CURVE SMOOTH",
+"142 512 OFFCURVE",
+"36 414 OFFCURVE",
+"36 250 CURVE SMOOTH",
+"36 88 OFFCURVE",
+"116 -12 OFFCURVE",
+"240 -12 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"240 128 OFFCURVE",
+"212 162 OFFCURVE",
+"212 252 CURVE SMOOTH",
+"212 340 OFFCURVE",
+"246 372 OFFCURVE",
+"282 372 CURVE SMOOTH",
+"304 372 OFFCURVE",
+"330 366 OFFCURVE",
+"350 348 CURVE",
+"350 164 LINE",
+"332 136 OFFCURVE",
+"312 128 OFFCURVE",
+"286 128 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 580;
+}
+);
+note = "";
+},
+{
+glyphname = A.sc;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"42 0 LINE",
+"158 304 LINE SMOOTH",
+"181 366 OFFCURVE",
+"199 414 OFFCURVE",
+"220 475 CURVE",
+"224 475 LINE",
+"245 415 OFFCURVE",
+"263 367 OFFCURVE",
+"286 304 CURVE SMOOTH",
+"400 0 LINE",
+"434 0 LINE",
+"236 510 LINE",
+"207 510 LINE",
+"10 0 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"345 176 LINE",
+"345 204 LINE",
+"97 204 LINE",
+"97 176 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 444;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"164 0 LINE",
+"219 244 LINE SMOOTH",
+"230 292 OFFCURVE",
+"241 358 OFFCURVE",
+"252 409 CURVE",
+"256 409 LINE",
+"269 359 OFFCURVE",
+"280 292 OFFCURVE",
+"291 244 CURVE SMOOTH",
+"346 0 LINE",
+"526 0 LINE",
+"361 532 LINE",
+"155 532 LINE",
+"-10 0 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"397 94 LINE",
+"397 216 LINE",
+"118 216 LINE",
+"118 94 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 516;
+}
+);
+note = "";
+},
+{
+glyphname = atilde;
+layers = (
+{
+components = (
+{
+name = a;
+},
+{
+name = tildecmb;
+transform = "{1, 0, 0, 1, 242, 0}";
+}
+);
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+vertWidth = 0;
+width = 486;
+},
+{
+components = (
+{
+name = a;
+},
+{
+name = tildecmb;
+transform = "{1, 0, 0, 1, 266, 0}";
+}
+);
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+vertWidth = 0;
+width = 536;
+}
+);
+note = "";
+unicode = 00E3;
+},
+{
+glyphname = ampersand;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"302 -12 OFFCURVE",
+"360 28 OFFCURVE",
+"410 84 CURVE SMOOTH",
+"468 153 OFFCURVE",
+"510 244 OFFCURVE",
+"538 342 CURVE",
+"508 342 LINE",
+"482 248 OFFCURVE",
+"444 166 OFFCURVE",
+"388 102 CURVE SMOOTH",
+"344 52 OFFCURVE",
+"288 16 OFFCURVE",
+"226 16 CURVE SMOOTH",
+"142 16 OFFCURVE",
+"70 76 OFFCURVE",
+"70 168 CURVE SMOOTH",
+"70 332 OFFCURVE",
+"364 392 OFFCURVE",
+"364 556 CURVE SMOOTH",
+"364 622 OFFCURVE",
+"328 672 OFFCURVE",
+"260 672 CURVE SMOOTH",
+"184 672 OFFCURVE",
+"130 612 OFFCURVE",
+"130 528 CURVE SMOOTH",
+"130 382 OFFCURVE",
+"264 196 OFFCURVE",
+"392 82 CURVE SMOOTH",
+"446 34 OFFCURVE",
+"496 4 OFFCURVE",
+"538 -12 CURVE",
+"550 16 LINE",
+"508 32 OFFCURVE",
+"460 62 OFFCURVE",
+"410 106 CURVE SMOOTH",
+"290 210 OFFCURVE",
+"160 392 OFFCURVE",
+"160 530 CURVE SMOOTH",
+"160 592 OFFCURVE",
+"196 644 OFFCURVE",
+"258 644 CURVE SMOOTH",
+"314 644 OFFCURVE",
+"334 598 OFFCURVE",
+"334 554 CURVE SMOOTH",
+"334 402 OFFCURVE",
+"38 346 OFFCURVE",
+"38 166 CURVE SMOOTH",
+"38 56 OFFCURVE",
+"124 -12 OFFCURVE",
+"224 -12 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 562;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"362 -12 OFFCURVE",
+"452 34 OFFCURVE",
+"516 104 CURVE SMOOTH",
+"590 187 OFFCURVE",
+"638 276 OFFCURVE",
+"668 374 CURVE",
+"512 374 LINE",
+"490 292 OFFCURVE",
+"448 228 OFFCURVE",
+"398 180 CURVE SMOOTH",
+"356 142 OFFCURVE",
+"310 118 OFFCURVE",
+"268 118 CURVE SMOOTH",
+"216 118 OFFCURVE",
+"184 146 OFFCURVE",
+"184 186 CURVE SMOOTH",
+"184 296 OFFCURVE",
+"458 332 OFFCURVE",
+"458 508 CURVE SMOOTH",
+"458 602 OFFCURVE",
+"390 662 OFFCURVE",
+"286 662 CURVE SMOOTH",
+"170 662 OFFCURVE",
+"98 580 OFFCURVE",
+"98 486 CURVE SMOOTH",
+"98 359 OFFCURVE",
+"244 182 OFFCURVE",
+"415 75 CURVE SMOOTH",
+"485 31 OFFCURVE",
+"560 0 OFFCURVE",
+"630 -12 CURVE",
+"670 126 LINE",
+"627 131 OFFCURVE",
+"573 153 OFFCURVE",
+"518 183 CURVE SMOOTH",
+"382 258 OFFCURVE",
+"239 390 OFFCURVE",
+"239 486 CURVE SMOOTH",
+"239 528 OFFCURVE",
+"263 550 OFFCURVE",
+"290 550 CURVE SMOOTH",
+"315 550 OFFCURVE",
+"328 536 OFFCURVE",
+"328 508 CURVE SMOOTH",
+"328 386 OFFCURVE",
+"22 396 OFFCURVE",
+"22 176 CURVE SMOOTH",
+"22 78 OFFCURVE",
+"95 -12 OFFCURVE",
+"246 -12 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 690;
+}
+);
+note = "";
+unicode = 0026;
+},
+{
+glyphname = circledotted;
+production = uni25CC;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"129 97 OFFCURVE",
+"141 110 OFFCURVE",
+"141 129 CURVE SMOOTH",
+"141 150 OFFCURVE",
+"128 161 OFFCURVE",
+"110 161 CURVE SMOOTH",
+"94 161 OFFCURVE",
+"81 150 OFFCURVE",
+"81 129 CURVE SMOOTH",
+"81 110 OFFCURVE",
+"94 97 OFFCURVE",
+"110 97 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"101 207 OFFCURVE",
+"114 219 OFFCURVE",
+"114 239 CURVE SMOOTH",
+"114 260 OFFCURVE",
+"101 270 OFFCURVE",
+"82 270 CURVE SMOOTH",
+"67 270 OFFCURVE",
+"54 260 OFFCURVE",
+"54 239 CURVE SMOOTH",
+"54 219 OFFCURVE",
+"67 207 OFFCURVE",
+"82 207 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"129 318 OFFCURVE",
+"141 330 OFFCURVE",
+"141 351 CURVE SMOOTH",
+"141 371 OFFCURVE",
+"128 382 OFFCURVE",
+"110 382 CURVE SMOOTH",
+"94 382 OFFCURVE",
+"81 371 OFFCURVE",
+"81 351 CURVE SMOOTH",
+"81 330 OFFCURVE",
+"94 318 OFFCURVE",
+"110 318 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"207 15 OFFCURVE",
+"219 27 OFFCURVE",
+"219 49 CURVE SMOOTH",
+"219 68 OFFCURVE",
+"206 78 OFFCURVE",
+"189 78 CURVE SMOOTH",
+"173 78 OFFCURVE",
+"160 68 OFFCURVE",
+"160 49 CURVE SMOOTH",
+"160 27 OFFCURVE",
+"173 15 OFFCURVE",
+"189 15 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"207 400 OFFCURVE",
+"219 412 OFFCURVE",
+"219 431 CURVE SMOOTH",
+"219 453 OFFCURVE",
+"206 463 OFFCURVE",
+"189 463 CURVE SMOOTH",
+"173 463 OFFCURVE",
+"160 453 OFFCURVE",
+"160 431 CURVE SMOOTH",
+"160 412 OFFCURVE",
+"173 400 OFFCURVE",
+"189 400 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"313 -12 OFFCURVE",
+"326 -1 OFFCURVE",
+"326 20 CURVE SMOOTH",
+"326 40 OFFCURVE",
+"313 51 OFFCURVE",
+"295 51 CURVE SMOOTH",
+"279 51 OFFCURVE",
+"266 40 OFFCURVE",
+"266 20 CURVE SMOOTH",
+"266 -1 OFFCURVE",
+"279 -12 OFFCURVE",
+"295 -12 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"313 426 OFFCURVE",
+"326 438 OFFCURVE",
+"326 458 CURVE SMOOTH",
+"326 478 OFFCURVE",
+"313 490 OFFCURVE",
+"295 490 CURVE SMOOTH",
+"279 490 OFFCURVE",
+"266 478 OFFCURVE",
+"266 458 CURVE SMOOTH",
+"266 438 OFFCURVE",
+"279 426 OFFCURVE",
+"295 426 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"420 15 OFFCURVE",
+"431 27 OFFCURVE",
+"431 49 CURVE SMOOTH",
+"431 68 OFFCURVE",
+"418 78 OFFCURVE",
+"401 78 CURVE SMOOTH",
+"386 78 OFFCURVE",
+"373 68 OFFCURVE",
+"373 49 CURVE SMOOTH",
+"373 27 OFFCURVE",
+"386 15 OFFCURVE",
+"401 15 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"420 399 OFFCURVE",
+"431 412 OFFCURVE",
+"431 431 CURVE SMOOTH",
+"431 452 OFFCURVE",
+"418 462 OFFCURVE",
+"401 462 CURVE SMOOTH",
+"386 462 OFFCURVE",
+"373 452 OFFCURVE",
+"373 431 CURVE SMOOTH",
+"373 412 OFFCURVE",
+"386 399 OFFCURVE",
+"401 399 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"499 97 OFFCURVE",
+"510 110 OFFCURVE",
+"510 129 CURVE SMOOTH",
+"510 150 OFFCURVE",
+"497 161 OFFCURVE",
+"480 161 CURVE SMOOTH",
+"465 161 OFFCURVE",
+"451 150 OFFCURVE",
+"451 129 CURVE SMOOTH",
+"451 110 OFFCURVE",
+"465 97 OFFCURVE",
+"480 97 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"526 207 OFFCURVE",
+"538 219 OFFCURVE",
+"538 239 CURVE SMOOTH",
+"538 260 OFFCURVE",
+"523 270 OFFCURVE",
+"508 270 CURVE SMOOTH",
+"491 270 OFFCURVE",
+"478 260 OFFCURVE",
+"478 239 CURVE SMOOTH",
+"478 219 OFFCURVE",
+"491 207 OFFCURVE",
+"508 207 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"499 317 OFFCURVE",
+"510 329 OFFCURVE",
+"510 349 CURVE SMOOTH",
+"510 369 OFFCURVE",
+"497 380 OFFCURVE",
+"480 380 CURVE SMOOTH",
+"465 380 OFFCURVE",
+"451 369 OFFCURVE",
+"451 349 CURVE SMOOTH",
+"451 329 OFFCURVE",
+"465 317 OFFCURVE",
+"480 317 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 592;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"131 96 OFFCURVE",
+"149 112 OFFCURVE",
+"149 141 CURVE SMOOTH",
+"149 170 OFFCURVE",
+"130 187 OFFCURVE",
+"104 187 CURVE SMOOTH",
+"82 187 OFFCURVE",
+"61 170 OFFCURVE",
+"61 141 CURVE SMOOTH",
+"61 112 OFFCURVE",
+"82 96 OFFCURVE",
+"104 96 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"104 204 OFFCURVE",
+"122 221 OFFCURVE",
+"122 251 CURVE SMOOTH",
+"122 279 OFFCURVE",
+"102 295 OFFCURVE",
+"76 295 CURVE SMOOTH",
+"53 295 OFFCURVE",
+"32 279 OFFCURVE",
+"32 251 CURVE SMOOTH",
+"32 221 OFFCURVE",
+"53 204 OFFCURVE",
+"76 204 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"131 313 OFFCURVE",
+"149 331 OFFCURVE",
+"149 360 CURVE SMOOTH",
+"149 390 OFFCURVE",
+"130 405 OFFCURVE",
+"104 405 CURVE SMOOTH",
+"82 405 OFFCURVE",
+"61 390 OFFCURVE",
+"61 360 CURVE SMOOTH",
+"61 331 OFFCURVE",
+"82 313 OFFCURVE",
+"104 313 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"208 14 OFFCURVE",
+"227 31 OFFCURVE",
+"227 61 CURVE SMOOTH",
+"227 89 OFFCURVE",
+"206 105 OFFCURVE",
+"182 105 CURVE SMOOTH",
+"158 105 OFFCURVE",
+"137 89 OFFCURVE",
+"137 61 CURVE SMOOTH",
+"137 31 OFFCURVE",
+"158 14 OFFCURVE",
+"182 14 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"208 395 OFFCURVE",
+"227 412 OFFCURVE",
+"227 440 CURVE SMOOTH",
+"227 470 OFFCURVE",
+"206 486 OFFCURVE",
+"182 486 CURVE SMOOTH",
+"158 486 OFFCURVE",
+"137 470 OFFCURVE",
+"137 440 CURVE SMOOTH",
+"137 412 OFFCURVE",
+"158 395 OFFCURVE",
+"182 395 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"314 -13 OFFCURVE",
+"332 4 OFFCURVE",
+"332 34 CURVE SMOOTH",
+"332 62 OFFCURVE",
+"313 78 OFFCURVE",
+"287 78 CURVE SMOOTH",
+"264 78 OFFCURVE",
+"244 62 OFFCURVE",
+"244 34 CURVE SMOOTH",
+"244 4 OFFCURVE",
+"264 -13 OFFCURVE",
+"287 -13 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"314 421 OFFCURVE",
+"332 439 OFFCURVE",
+"332 468 CURVE SMOOTH",
+"332 496 OFFCURVE",
+"313 512 OFFCURVE",
+"287 512 CURVE SMOOTH",
+"264 512 OFFCURVE",
+"244 496 OFFCURVE",
+"244 468 CURVE SMOOTH",
+"244 439 OFFCURVE",
+"264 421 OFFCURVE",
+"287 421 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"420 14 OFFCURVE",
+"438 31 OFFCURVE",
+"438 61 CURVE SMOOTH",
+"438 89 OFFCURVE",
+"417 105 OFFCURVE",
+"392 105 CURVE SMOOTH",
+"369 105 OFFCURVE",
+"348 89 OFFCURVE",
+"348 61 CURVE SMOOTH",
+"348 31 OFFCURVE",
+"369 14 OFFCURVE",
+"392 14 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"420 394 OFFCURVE",
+"438 411 OFFCURVE",
+"438 440 CURVE SMOOTH",
+"438 469 OFFCURVE",
+"417 486 OFFCURVE",
+"392 486 CURVE SMOOTH",
+"369 486 OFFCURVE",
+"348 469 OFFCURVE",
+"348 440 CURVE SMOOTH",
+"348 411 OFFCURVE",
+"369 394 OFFCURVE",
+"392 394 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"498 96 OFFCURVE",
+"516 112 OFFCURVE",
+"516 141 CURVE SMOOTH",
+"516 170 OFFCURVE",
+"496 187 OFFCURVE",
+"472 187 CURVE SMOOTH",
+"447 187 OFFCURVE",
+"426 170 OFFCURVE",
+"426 141 CURVE SMOOTH",
+"426 112 OFFCURVE",
+"447 96 OFFCURVE",
+"472 96 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"524 204 OFFCURVE",
+"543 221 OFFCURVE",
+"543 251 CURVE SMOOTH",
+"543 279 OFFCURVE",
+"522 295 OFFCURVE",
+"498 295 CURVE SMOOTH",
+"473 295 OFFCURVE",
+"453 279 OFFCURVE",
+"453 251 CURVE SMOOTH",
+"453 221 OFFCURVE",
+"473 204 OFFCURVE",
+"498 204 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"498 313 OFFCURVE",
+"516 330 OFFCURVE",
+"516 359 CURVE SMOOTH",
+"516 388 OFFCURVE",
+"496 404 OFFCURVE",
+"472 404 CURVE SMOOTH",
+"447 404 OFFCURVE",
+"426 388 OFFCURVE",
+"426 359 CURVE SMOOTH",
+"426 330 OFFCURVE",
+"447 313 OFFCURVE",
+"472 313 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 574;
+}
+);
+note = "";
+unicode = 25CC;
+},
+{
+glyphname = tildecmb;
+production = uni0303;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"140 580 OFFCURVE",
+"156 646 OFFCURVE",
+"160 702 CURVE",
+"134 704 LINE",
+"132 652 OFFCURVE",
+"116 606 OFFCURVE",
+"79 606 CURVE SMOOTH",
+"20 606 OFFCURVE",
+"0 706 OFFCURVE",
+"-76 706 CURVE SMOOTH",
+"-140 706 OFFCURVE",
+"-156 641 OFFCURVE",
+"-160 584 CURVE",
+"-134 582 LINE",
+"-132 636 OFFCURVE",
+"-116 680 OFFCURVE",
+"-78 680 CURVE SMOOTH",
+"-20 680 OFFCURVE",
+"0 580 OFFCURVE",
+"77 580 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 0;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"144 572 OFFCURVE",
+"194 617 OFFCURVE",
+"196 730 CURVE",
+"90 736 LINE",
+"86 700 OFFCURVE",
+"76 690 OFFCURVE",
+"60 690 CURVE SMOOTH",
+"34 690 OFFCURVE",
+"-4 746 OFFCURVE",
+"-64 746 CURVE SMOOTH",
+"-144 746 OFFCURVE",
+"-194 701 OFFCURVE",
+"-196 588 CURVE",
+"-90 582 LINE",
+"-86 618 OFFCURVE",
+"-76 628 OFFCURVE",
+"-60 628 CURVE SMOOTH",
+"-34 628 OFFCURVE",
+"4 572 OFFCURVE",
+"64 572 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 0;
+}
+);
+note = "";
+unicode = 0303;
+},
+{
+glyphname = dieresiscmb;
+production = uni0308;
+layers = (
+{
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+paths = (
+{
+closed = 1;
+nodes = (
+"-68 602 OFFCURVE",
+"-54 616 OFFCURVE",
+"-54 634 CURVE SMOOTH",
+"-54 652 OFFCURVE",
+"-68 666 OFFCURVE",
+"-86 666 CURVE SMOOTH",
+"-104 666 OFFCURVE",
+"-118 652 OFFCURVE",
+"-118 634 CURVE SMOOTH",
+"-118 616 OFFCURVE",
+"-104 602 OFFCURVE",
+"-86 602 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"104 602 OFFCURVE",
+"118 616 OFFCURVE",
+"118 634 CURVE SMOOTH",
+"118 652 OFFCURVE",
+"104 666 OFFCURVE",
+"86 666 CURVE SMOOTH",
+"68 666 OFFCURVE",
+"54 652 OFFCURVE",
+"54 634 CURVE SMOOTH",
+"54 616 OFFCURVE",
+"68 602 OFFCURVE",
+"86 602 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 0;
+},
+{
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+paths = (
+{
+closed = 1;
+nodes = (
+"-67 562 OFFCURVE",
+"-34 597 OFFCURVE",
+"-34 642 CURVE SMOOTH",
+"-34 687 OFFCURVE",
+"-67 722 OFFCURVE",
+"-114 722 CURVE SMOOTH",
+"-161 722 OFFCURVE",
+"-194 687 OFFCURVE",
+"-194 642 CURVE SMOOTH",
+"-194 597 OFFCURVE",
+"-161 562 OFFCURVE",
+"-114 562 CURVE SMOOTH"
+);
+},
+{
+closed = 1;
+nodes = (
+"161 562 OFFCURVE",
+"194 597 OFFCURVE",
+"194 642 CURVE SMOOTH",
+"194 687 OFFCURVE",
+"161 722 OFFCURVE",
+"114 722 CURVE SMOOTH",
+"67 722 OFFCURVE",
+"34 687 OFFCURVE",
+"34 642 CURVE SMOOTH",
+"34 597 OFFCURVE",
+"67 562 OFFCURVE",
+"114 562 CURVE SMOOTH"
+);
+}
+);
+vertWidth = 0;
+width = 0;
+}
+);
+note = "";
+unicode = 0308;
+},
+{
+glyphname = tildebelowcmb;
+production = uni0330;
+layers = (
+{
+components = (
+{
+name = tildecmb;
+transform = "{1, 0, 0, 1, 0, -800}";
+}
+);
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+vertWidth = 0;
+width = 0;
+},
+{
+components = (
+{
+name = tildecmb;
+transform = "{1, 0, 0, 1, 0, -800}";
+}
+);
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+vertWidth = 0;
+width = 0;
+}
+);
+note = "";
+unicode = 0330;
+},
+{
+glyphname = dieresisbelowcmb;
+production = uni0324;
+layers = (
+{
+components = (
+{
+name = dieresiscmb;
+transform = "{1, 0, 0, 1, 0, -790}";
+}
+);
+layerId = "8DB0CCF0-BD6F-426B-90E2-48FD021BE868";
+vertWidth = 0;
+width = 0;
+},
+{
+components = (
+{
+name = dieresiscmb;
+transform = "{1, 0, 0, 1, 0, -786}";
+}
+);
+layerId = "A99E50E2-B754-449B-A60B-37BA27802C99";
+vertWidth = 0;
+width = 0;
+}
+);
+note = "";
+unicode = 0324;
+}
+);
+instances = (
+{
+customParameters = (
+{
+name = weightClass;
+value = 0;
+},
+{
+name = postscriptFontName;
+value = "TestFamily2-ExtraLight";
+},
+{
+name = "UFO Filename";
+value = "instances/TestFamily2-ExtraLight.ufo";
+}
+);
+interpolationWeight = 0;
+name = ExtraLight;
+weightClass = Thin;
+},
+{
+customParameters = (
+{
+name = postscriptFontName;
+value = "TestFamily2-Light";
+},
+{
+name = "UFO Filename";
+value = "instances/TestFamily2-Light.ufo";
+}
+);
+name = Light;
+weightClass = Thin;
+},
+{
+customParameters = (
+{
+name = weightClass;
+value = 368;
+},
+{
+name = postscriptFontName;
+value = "TestFamily2-Regular";
+},
+{
+name = "UFO Filename";
+value = "instances/TestFamily2-Regular.ufo";
+}
+);
+interpolationWeight = 368;
+name = Regular;
+weightClass = Normal;
+},
+{
+customParameters = (
+{
+name = postscriptFontName;
+value = "TestFamily2-Semibold";
+},
+{
+name = "UFO Filename";
+value = "instances/TestFamily2-Semibold.ufo";
+}
+);
+interpolationWeight = 600;
+name = Semibold;
+weightClass = DemiBold;
+},
+{
+customParameters = (
+{
+name = weightClass;
+value = 824;
+},
+{
+name = postscriptFontName;
+value = "TestFamily2-Bold";
+},
+{
+name = "UFO Filename";
+value = "instances/TestFamily2-Bold.ufo";
+}
+);
+interpolationWeight = 824;
+name = Bold;
+weightClass = ExtraBold;
+},
+{
+customParameters = (
+{
+name = weightClass;
+value = 1000;
+},
+{
+name = postscriptFontName;
+value = "TestFamily2-Black";
+},
+{
+name = "UFO Filename";
+value = "instances/TestFamily2-Black.ufo";
+}
+);
+interpolationWeight = 1000;
+name = Black;
+weightClass = Black;
+}
+);
+unitsPerEm = 1000;
+userData = {
+com.schriftgestaltung.Glyphs.groupsNotInFeature = (
+);
+};
+versionMajor = 2;
+versionMinor = 20;
+}
diff --git a/Tests/varLib/data/SparseCFF2.designspace b/Tests/varLib/data/SparseCFF2.designspace
new file mode 100644
index 00000000..cd8823a7
--- /dev/null
+++ b/Tests/varLib/data/SparseCFF2.designspace
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<designspace format="5.0">
+ <axes>
+ <axis tag="wght" name="Weight" minimum="350" maximum="625" default="350"/>
+ </axes>
+ <sources>
+ <source filename="master_sparse_cff2_empty/SparseCFF-Regular.ttx" name="Sparse Font Regular" familyname="Sparse Font" stylename="Regular">
+ <location>
+ <dimension name="Weight" xvalue="350"/>
+ </location>
+ </source>
+ <source filename="master_sparse_cff2_empty/SparseCFF-Medium.ttx" name="Sparse Font Medium" familyname="Sparse Font" stylename="Medium">
+ <location>
+ <dimension name="Weight" xvalue="450"/>
+ </location>
+ </source>
+ <source filename="master_sparse_cff2_empty/SparseCFF-Bold.ttx" name="Sparse Font Bold" familyname="Sparse Font" stylename="Bold">
+ <location>
+ <dimension name="Weight" xvalue="625"/>
+ </location>
+ </source>
+ </sources>
+</designspace>
diff --git a/Tests/varLib/data/SparseMasters.glyphs b/Tests/varLib/data/SparseMasters.glyphs
new file mode 100644
index 00000000..a9843a46
--- /dev/null
+++ b/Tests/varLib/data/SparseMasters.glyphs
@@ -0,0 +1,486 @@
+{
+.appVersion = "895";
+customParameters = (
+{
+name = glyphOrder;
+value = (
+.notdef,
+a,
+e,
+edotabove,
+s,
+dotabovecomb
+);
+},
+{
+name = "Disable Last Change";
+value = 1;
+}
+);
+disablesAutomaticAlignment = 1;
+familyName = "Sparse Masters";
+fontMaster = (
+{
+ascender = 750;
+capHeight = 700;
+customParameters = (
+{
+name = "UFO Filename";
+value = "master_ufo/SparseMasters-Regular.ufo";
+}
+);
+descender = -250;
+id = "CCC32AD0-E3D7-4595-BA12-BA39A95902C9";
+userData = {
+com.defcon.sortDescriptor = (
+{
+ascending = (
+.notdef,
+a,
+e,
+edotabove,
+s,
+dotabovecomb
+);
+type = glyphList;
+}
+);
+};
+weightValue = 350;
+xHeight = 500;
+},
+{
+ascender = 750;
+capHeight = 700;
+customParameters = (
+{
+name = "UFO Filename";
+value = "master_ufo/SparseMasters-Medium.ufo";
+},
+{
+name = "Master Name";
+value = Medium;
+}
+);
+descender = -250;
+id = "2B2F6A55-E8C4-4456-AFD7-7A9468BB18B9";
+userData = {
+};
+weightValue = 450;
+xHeight = 500;
+},
+{
+ascender = 750;
+capHeight = 700;
+customParameters = (
+{
+name = "UFO Filename";
+value = "master_ufo/SparseMasters-Bold.ufo";
+},
+{
+name = "Master Name";
+value = Bold;
+}
+);
+descender = -250;
+id = "36D5BF76-782C-4F60-A6DB-0A9BC5828108";
+userData = {
+com.defcon.sortDescriptor = (
+{
+ascending = (
+.notdef,
+a,
+e,
+edotabove,
+s,
+dotabovecomb
+);
+type = glyphList;
+}
+);
+};
+weightValue = 625;
+xHeight = 500;
+}
+);
+glyphs = (
+{
+glyphname = .notdef;
+layers = (
+{
+layerId = "CCC32AD0-E3D7-4595-BA12-BA39A95902C9";
+paths = (
+{
+closed = 1;
+nodes = (
+"450 750 LINE",
+"450 -250 LINE",
+"50 -250 LINE",
+"50 750 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"400 700 LINE",
+"100 700 LINE",
+"100 -200 LINE",
+"400 -200 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 500;
+},
+{
+layerId = "2B2F6A55-E8C4-4456-AFD7-7A9468BB18B9";
+paths = (
+{
+closed = 1;
+nodes = (
+"450 750 LINE",
+"450 -250 LINE",
+"50 -250 LINE",
+"50 750 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"400 700 LINE",
+"100 700 LINE",
+"100 -200 LINE",
+"400 -200 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 500;
+},
+{
+layerId = "36D5BF76-782C-4F60-A6DB-0A9BC5828108";
+paths = (
+{
+closed = 1;
+nodes = (
+"450 750 LINE",
+"450 -250 LINE",
+"50 -250 LINE",
+"50 750 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"400 700 LINE",
+"100 700 LINE",
+"100 -200 LINE",
+"400 -200 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 500;
+}
+);
+note = .notdef;
+},
+{
+glyphname = a;
+layers = (
+{
+layerId = "CCC32AD0-E3D7-4595-BA12-BA39A95902C9";
+paths = (
+{
+closed = 1;
+nodes = (
+"214 504 LINE",
+"9 428 LINE",
+"36 337 LINE",
+"208 397 LINE",
+"363 357 LINE",
+"366 -3 LINE",
+"468 -1 LINE",
+"447 434 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"29 22 LINE",
+"168 -12 LINE",
+"389 71 LINE",
+"383 134 LINE",
+"161 74 LINE",
+"86 126 LINE",
+"88 172 LINE",
+"382 207 LINE",
+"378 263 LINE",
+"26 240 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 600;
+},
+{
+layerId = "36D5BF76-782C-4F60-A6DB-0A9BC5828108";
+paths = (
+{
+closed = 1;
+nodes = (
+"214 504 LINE",
+"9 428 LINE",
+"36 281 LINE",
+"208 341 LINE",
+"304 303 LINE",
+"307 -1 LINE",
+"468 -1 LINE",
+"447 434 LINE"
+);
+},
+{
+closed = 1;
+nodes = (
+"29 22 LINE",
+"168 -12 LINE",
+"389 71 LINE",
+"383 149 LINE",
+"201 102 LINE",
+"163 133 LINE",
+"165 179 LINE",
+"381 184 LINE",
+"378 263 LINE",
+"26 240 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 600;
+}
+);
+note = a;
+unicode = 0061;
+},
+{
+glyphname = e;
+layers = (
+{
+layerId = "CCC32AD0-E3D7-4595-BA12-BA39A95902C9";
+paths = (
+{
+closed = 1;
+nodes = (
+"571 305 LINE",
+"316 513 LINE",
+"40 261 LINE",
+"188 -18 LINE",
+"526 45 LINE",
+"509 129 LINE",
+"229 75 LINE",
+"147 263 LINE",
+"317 416 LINE",
+"480 292 LINE",
+"125 298 LINE",
+"127 228 LINE",
+"576 226 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 600;
+},
+{
+layerId = "2B2F6A55-E8C4-4456-AFD7-7A9468BB18B9";
+paths = (
+{
+closed = 1;
+nodes = (
+"571 305 LINE",
+"316 513 LINE",
+"40 261 LINE",
+"188 -18 LINE",
+"526 45 LINE",
+"507 157 LINE",
+"264 116 LINE",
+"180 264 LINE",
+"318 387 LINE",
+"396 297 LINE",
+"125 298 LINE",
+"126 203 LINE",
+"576 199 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 600;
+},
+{
+layerId = "36D5BF76-782C-4F60-A6DB-0A9BC5828108";
+paths = (
+{
+closed = 1;
+nodes = (
+"596 304 LINE",
+"314 548 LINE",
+"9 262 LINE",
+"188 -18 LINE",
+"528 0 LINE",
+"524 184 LINE",
+"244 130 LINE",
+"217 264 LINE",
+"301 360 LINE",
+"404 293 LINE",
+"195 299 LINE",
+"197 229 LINE",
+"601 225 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 600;
+}
+);
+note = e;
+unicode = 0065;
+},
+{
+glyphname = edotabove;
+layers = (
+{
+components = (
+{
+name = e;
+},
+{
+name = dotabovecomb;
+transform = "{1, 0, 0, 1, 313, 96}";
+}
+);
+layerId = "CCC32AD0-E3D7-4595-BA12-BA39A95902C9";
+vertWidth = 0;
+width = 600;
+},
+{
+components = (
+{
+name = e;
+},
+{
+name = dotabovecomb;
+transform = "{1, 0, 0, 1, 307, 187}";
+}
+);
+layerId = "36D5BF76-782C-4F60-A6DB-0A9BC5828108";
+vertWidth = 0;
+width = 600;
+}
+);
+note = edotabove;
+unicode = 0117;
+},
+{
+glyphname = s;
+layers = (
+{
+layerId = "CCC32AD0-E3D7-4595-BA12-BA39A95902C9";
+paths = (
+{
+closed = 1;
+nodes = (
+"38 343 LINE",
+"427 155 LINE",
+"282 76 LINE",
+"53 174 LINE",
+"25 83 LINE",
+"304 -13 LINE",
+"582 174 LINE",
+"213 366 LINE",
+"326 442 LINE",
+"539 376 LINE",
+"559 459 LINE",
+"324 530 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 600;
+},
+{
+layerId = "36D5BF76-782C-4F60-A6DB-0A9BC5828108";
+paths = (
+{
+closed = 1;
+nodes = (
+"16 398 LINE",
+"347 149 LINE",
+"221 119 LINE",
+"26 226 LINE",
+"7 79 LINE",
+"284 -58 LINE",
+"608 141 LINE",
+"268 357 LINE",
+"324 402 LINE",
+"537 336 LINE",
+"559 459 LINE",
+"324 530 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 600;
+}
+);
+note = s;
+unicode = 0073;
+},
+{
+glyphname = dotabovecomb;
+layers = (
+{
+layerId = "CCC32AD0-E3D7-4595-BA12-BA39A95902C9";
+paths = (
+{
+closed = 1;
+nodes = (
+"41 501 LINE",
+"50 589 LINE",
+"-21 597 LINE",
+"-37 503 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 0;
+},
+{
+layerId = "36D5BF76-782C-4F60-A6DB-0A9BC5828108";
+paths = (
+{
+closed = 1;
+nodes = (
+"58 488 LINE",
+"63 605 LINE",
+"-29 625 LINE",
+"-64 483 LINE"
+);
+}
+);
+vertWidth = 0;
+width = 0;
+}
+);
+note = dotabovecomb;
+unicode = 0307;
+}
+);
+instances = (
+);
+unitsPerEm = 1000;
+userData = {
+com.schriftgestaltung.Glyphs.groupsNotInFeature = (
+);
+};
+versionMajor = 1;
+versionMinor = 0;
+}
diff --git a/Tests/varLib/data/SparseMasters_ufo.designspace b/Tests/varLib/data/SparseMasters_ufo.designspace
new file mode 100644
index 00000000..1fd57bca
--- /dev/null
+++ b/Tests/varLib/data/SparseMasters_ufo.designspace
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<designspace format="4.0">
+ <axes>
+ <axis tag="wght" name="Weight" minimum="350" maximum="625" default="350"/>
+ </axes>
+ <sources>
+ <source filename="master_ufo/SparseMasters-Regular.ufo" name="Sparse Masters Regular">
+ <location>
+ <dimension name="Weight" xvalue="350"/>
+ </location>
+ </source>
+ <source filename="master_ufo/SparseMasters-Medium.ufo" name="Sparse Masters Medium">
+ <location>
+ <dimension name="Weight" xvalue="450"/>
+ </location>
+ </source>
+ <source filename="master_ufo/SparseMasters-Bold.ufo" name="Sparse Masters Bold">
+ <location>
+ <dimension name="Weight" xvalue="625"/>
+ </location>
+ </source>
+ </sources>
+</designspace>
diff --git a/Tests/varLib/data/TestNoOverwriteSTAT.designspace b/Tests/varLib/data/TestNoOverwriteSTAT.designspace
new file mode 100644
index 00000000..e06a1be5
--- /dev/null
+++ b/Tests/varLib/data/TestNoOverwriteSTAT.designspace
@@ -0,0 +1,36 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<designspace format="5.0">
+ <axes>
+ <axis tag="wght" name="Weight" minimum="100" maximum="900" default="100">
+ <map input="100" output="30"/>
+ <map input="900" output="240"/>
+ </axis>
+ <axis tag="wdth" name="Width" minimum="80" maximum="115" default="80"/>
+ </axes>
+ <sources>
+ <source filename="master_no_overwrite_stat/Test-CondensedThin.ttx" name="Test Condensed Thin">
+ <location>
+ <dimension name="Weight" xvalue="30"/>
+ <dimension name="Width" xvalue="80"/>
+ </location>
+ </source>
+ <source filename="master_no_overwrite_stat/Test-CondensedBlack.ttx" name="Test Condensed Black">
+ <location>
+ <dimension name="Weight" xvalue="240"/>
+ <dimension name="Width" xvalue="80"/>
+ </location>
+ </source>
+ <source filename="master_no_overwrite_stat/Test-ExtendedThin.ttx" name="Test Extended Thin">
+ <location>
+ <dimension name="Weight" xvalue="30"/>
+ <dimension name="Width" xvalue="115"/>
+ </location>
+ </source>
+ <source filename="master_no_overwrite_stat/Test-ExtendedBlack.ttx" name="Test Extended Black">
+ <location>
+ <dimension name="Weight" xvalue="240"/>
+ <dimension name="Width" xvalue="115"/>
+ </location>
+ </source>
+ </sources>
+</designspace>
diff --git a/Tests/varLib/data/master_no_overwrite_stat/Test-CondensedBlack.ttx b/Tests/varLib/data/master_no_overwrite_stat/Test-CondensedBlack.ttx
new file mode 100644
index 00000000..db687a9f
--- /dev/null
+++ b/Tests/varLib/data/master_no_overwrite_stat/Test-CondensedBlack.ttx
@@ -0,0 +1,243 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.39">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="A"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="3.013"/>
+ <checkSumAdjustment value="0x37268237"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Fri Feb 17 14:29:44 2023"/>
+ <modified value="Tue Mar 7 12:56:58 2023"/>
+ <xMin value="-2"/>
+ <yMin value="-250"/>
+ <xMax value="583"/>
+ <yMax value="750"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="6"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="1000"/>
+ <descent value="-300"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="582"/>
+ <minLeftSideBearing value="-2"/>
+ <minRightSideBearing value="-1"/>
+ <xMaxExtent value="583"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="2"/>
+ </hhea>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="2"/>
+ <maxPoints value="19"/>
+ <maxContours value="2"/>
+ <maxCompositePoints value="0"/>
+ <maxCompositeContours value="0"/>
+ <maxZones value="1"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="0"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="0"/>
+ <maxSizeOfInstructions value="0"/>
+ <maxComponentElements value="0"/>
+ <maxComponentDepth value="0"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="541"/>
+ <usWeightClass value="900"/>
+ <usWidthClass value="3"/>
+ <fsType value="00000000 00001000"/>
+ <ySubscriptXSize value="650"/>
+ <ySubscriptYSize value="600"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="75"/>
+ <ySuperscriptXSize value="650"/>
+ <ySuperscriptYSize value="600"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="350"/>
+ <yStrikeoutSize value="80"/>
+ <yStrikeoutPosition value="303"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="0"/>
+ <bSerifStyle value="0"/>
+ <bWeight value="0"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="NONE"/>
+ <fsSelection value="00000000 01000000"/>
+ <usFirstCharIndex value="65"/>
+ <usLastCharIndex value="65"/>
+ <sTypoAscender value="750"/>
+ <sTypoDescender value="-250"/>
+ <sTypoLineGap value="250"/>
+ <usWinAscent value="1000"/>
+ <usWinDescent value="300"/>
+ <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="505"/>
+ <sCapHeight value="670"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <hmtx>
+ <mtx name=".notdef" width="500" lsb="50"/>
+ <mtx name="A" width="582" lsb="-2"/>
+ </hmtx>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x41" name="A"/><!-- LATIN CAPITAL LETTER A -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x41" name="A"/><!-- LATIN CAPITAL LETTER A -->
+ </cmap_format_4>
+ </cmap>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef" xMin="50" yMin="-250" xMax="450" yMax="750">
+ <contour>
+ <pt x="50" y="-250" on="1"/>
+ <pt x="50" y="750" on="1"/>
+ <pt x="450" y="750" on="1"/>
+ <pt x="450" y="-250" on="1"/>
+ </contour>
+ <contour>
+ <pt x="100" y="-200" on="1"/>
+ <pt x="400" y="-200" on="1"/>
+ <pt x="400" y="700" on="1"/>
+ <pt x="100" y="700" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="A" xMin="-2" yMin="0" xMax="583" yMax="672">
+ <contour>
+ <pt x="410" y="672" on="1"/>
+ <pt x="570" y="81" on="1"/>
+ <pt x="576" y="58" on="0"/>
+ <pt x="583" y="19" on="0"/>
+ <pt x="583" y="0" on="1"/>
+ <pt x="384" y="0" on="1"/>
+ <pt x="315" y="355" on="1"/>
+ <pt x="297" y="480" on="1"/>
+ <pt x="292" y="480" on="1"/>
+ <pt x="195" y="0" on="1"/>
+ <pt x="-2" y="0" on="1"/>
+ <pt x="-2" y="15" on="0"/>
+ <pt x="2" y="55" on="0"/>
+ <pt x="9" y="79" on="1"/>
+ <pt x="176" y="668" on="1"/>
+ </contour>
+ <contour>
+ <pt x="422" y="257" on="1"/>
+ <pt x="422" y="107" on="1"/>
+ <pt x="137" y="107" on="1"/>
+ <pt x="137" y="257" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ </glyf>
+
+ <name>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ Test Condensed Black
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ 3.013;NONE;Test-CondensedBlack
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ Test Condensed Black
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ Version 3.013
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ Test-CondensedBlack
+ </namerecord>
+ <namerecord nameID="16" platformID="3" platEncID="1" langID="0x409">
+ Test
+ </namerecord>
+ <namerecord nameID="17" platformID="3" platEncID="1" langID="0x409">
+ Condensed Black
+ </namerecord>
+ </name>
+
+ <post>
+ <formatType value="2.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="-130"/>
+ <underlineThickness value="80"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ <psNames>
+ <!-- This file uses unique glyph names based on the information
+ found in the 'post' table. Since these names might not be unique,
+ we have to invent artificial names in case of clashes. In order to
+ be able to retain the original information, we need a name to
+ ps name mapping for those cases where they differ. That's what
+ you see below.
+ -->
+ </psNames>
+ <extraNames>
+ <!-- following are the name that are not taken from the standard Mac glyph order -->
+ </extraNames>
+ </post>
+
+</ttFont>
diff --git a/Tests/varLib/data/master_no_overwrite_stat/Test-CondensedThin.ttx b/Tests/varLib/data/master_no_overwrite_stat/Test-CondensedThin.ttx
new file mode 100644
index 00000000..e3d16464
--- /dev/null
+++ b/Tests/varLib/data/master_no_overwrite_stat/Test-CondensedThin.ttx
@@ -0,0 +1,373 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.39">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="A"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="3.013"/>
+ <checkSumAdjustment value="0x176a479f"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Fri Feb 17 14:29:44 2023"/>
+ <modified value="Tue Mar 7 12:56:58 2023"/>
+ <xMin value="28"/>
+ <yMin value="-250"/>
+ <xMax value="450"/>
+ <yMax value="750"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="6"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="1000"/>
+ <descent value="-300"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="500"/>
+ <minLeftSideBearing value="28"/>
+ <minRightSideBearing value="31"/>
+ <xMaxExtent value="450"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="2"/>
+ </hhea>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="2"/>
+ <maxPoints value="19"/>
+ <maxContours value="2"/>
+ <maxCompositePoints value="0"/>
+ <maxCompositeContours value="0"/>
+ <maxZones value="1"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="0"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="0"/>
+ <maxSizeOfInstructions value="0"/>
+ <maxComponentElements value="0"/>
+ <maxComponentDepth value="0"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="482"/>
+ <usWeightClass value="100"/>
+ <usWidthClass value="3"/>
+ <fsType value="00000000 00001000"/>
+ <ySubscriptXSize value="650"/>
+ <ySubscriptYSize value="600"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="75"/>
+ <ySuperscriptXSize value="650"/>
+ <ySuperscriptYSize value="600"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="350"/>
+ <yStrikeoutSize value="25"/>
+ <yStrikeoutPosition value="274"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="0"/>
+ <bSerifStyle value="0"/>
+ <bWeight value="0"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="NONE"/>
+ <fsSelection value="00000000 01000000"/>
+ <usFirstCharIndex value="65"/>
+ <usLastCharIndex value="65"/>
+ <sTypoAscender value="750"/>
+ <sTypoDescender value="-250"/>
+ <sTypoLineGap value="250"/>
+ <usWinAscent value="1000"/>
+ <usWinDescent value="300"/>
+ <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="456"/>
+ <sCapHeight value="670"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <hmtx>
+ <mtx name=".notdef" width="500" lsb="50"/>
+ <mtx name="A" width="464" lsb="28"/>
+ </hmtx>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x41" name="A"/><!-- LATIN CAPITAL LETTER A -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x41" name="A"/><!-- LATIN CAPITAL LETTER A -->
+ </cmap_format_4>
+ </cmap>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef" xMin="50" yMin="-250" xMax="450" yMax="750">
+ <contour>
+ <pt x="50" y="-250" on="1"/>
+ <pt x="50" y="750" on="1"/>
+ <pt x="450" y="750" on="1"/>
+ <pt x="450" y="-250" on="1"/>
+ </contour>
+ <contour>
+ <pt x="100" y="-200" on="1"/>
+ <pt x="400" y="-200" on="1"/>
+ <pt x="400" y="700" on="1"/>
+ <pt x="100" y="700" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="A" xMin="28" yMin="0" xMax="433" yMax="672">
+ <contour>
+ <pt x="247" y="672" on="1"/>
+ <pt x="408" y="90" on="1"/>
+ <pt x="414" y="68" on="0"/>
+ <pt x="427" y="23" on="0"/>
+ <pt x="433" y="0" on="1"/>
+ <pt x="399" y="0" on="1"/>
+ <pt x="266" y="482" on="1"/>
+ <pt x="235" y="613" on="1"/>
+ <pt x="233" y="613" on="1"/>
+ <pt x="62" y="0" on="1"/>
+ <pt x="28" y="0" on="1"/>
+ <pt x="35" y="23" on="0"/>
+ <pt x="47" y="68" on="0"/>
+ <pt x="54" y="90" on="1"/>
+ <pt x="218" y="668" on="1"/>
+ </contour>
+ <contour>
+ <pt x="354" y="244" on="1"/>
+ <pt x="354" y="217" on="1"/>
+ <pt x="109" y="217" on="1"/>
+ <pt x="109" y="244" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ </glyf>
+
+ <name>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ Test Condensed Thin
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ 3.013;NONE;Test-CondensedThin
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ Test Condensed Thin
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ Version 3.013
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ Test-CondensedThin
+ </namerecord>
+ <namerecord nameID="16" platformID="3" platEncID="1" langID="0x409">
+ Test
+ </namerecord>
+ <namerecord nameID="17" platformID="3" platEncID="1" langID="0x409">
+ Condensed Thin
+ </namerecord>
+ <namerecord nameID="256" platformID="3" platEncID="1" langID="0x409">
+ Standard
+ </namerecord>
+ <namerecord nameID="257" platformID="3" platEncID="1" langID="0x409">
+ Heaviness
+ </namerecord>
+ <namerecord nameID="258" platformID="3" platEncID="1" langID="0x409">
+ Extralight
+ </namerecord>
+ <namerecord nameID="259" platformID="3" platEncID="1" langID="0x409">
+ Thin
+ </namerecord>
+ <namerecord nameID="260" platformID="3" platEncID="1" langID="0x409">
+ Light
+ </namerecord>
+ <namerecord nameID="261" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="262" platformID="3" platEncID="1" langID="0x409">
+ Medium
+ </namerecord>
+ <namerecord nameID="263" platformID="3" platEncID="1" langID="0x409">
+ Semibold
+ </namerecord>
+ <namerecord nameID="264" platformID="3" platEncID="1" langID="0x409">
+ Bold
+ </namerecord>
+ <namerecord nameID="265" platformID="3" platEncID="1" langID="0x409">
+ Wideness
+ </namerecord>
+ <namerecord nameID="266" platformID="3" platEncID="1" langID="0x409">
+ Compressed
+ </namerecord>
+ <namerecord nameID="267" platformID="3" platEncID="1" langID="0x409">
+ Condensed
+ </namerecord>
+ <namerecord nameID="268" platformID="3" platEncID="1" langID="0x409">
+ Normal
+ </namerecord>
+ <namerecord nameID="269" platformID="3" platEncID="1" langID="0x409">
+ Extended
+ </namerecord>
+ </name>
+
+ <post>
+ <formatType value="2.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="-130"/>
+ <underlineThickness value="25"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ <psNames>
+ <!-- This file uses unique glyph names based on the information
+ found in the 'post' table. Since these names might not be unique,
+ we have to invent artificial names in case of clashes. In order to
+ be able to retain the original information, we need a name to
+ ps name mapping for those cases where they differ. That's what
+ you see below.
+ -->
+ </psNames>
+ <extraNames>
+ <!-- following are the name that are not taken from the standard Mac glyph order -->
+ </extraNames>
+ </post>
+
+ <STAT>
+ <Version value="0x00010001"/>
+ <DesignAxisRecordSize value="8"/>
+ <!-- DesignAxisCount=2 -->
+ <DesignAxisRecord>
+ <Axis index="0">
+ <AxisTag value="wght"/>
+ <AxisNameID value="257"/> <!-- Heaviness -->
+ <AxisOrdering value="0"/>
+ </Axis>
+ <Axis index="1">
+ <AxisTag value="wdth"/>
+ <AxisNameID value="265"/> <!-- Wideness -->
+ <AxisOrdering value="1"/>
+ </Axis>
+ </DesignAxisRecord>
+ <!-- AxisValueCount=11 -->
+ <AxisValueArray>
+ <AxisValue index="0" Format="1">
+ <AxisIndex value="0"/>
+ <Flags value="0"/>
+ <ValueNameID value="258"/> <!-- Extralight -->
+ <Value value="100.0"/>
+ </AxisValue>
+ <AxisValue index="1" Format="1">
+ <AxisIndex value="0"/>
+ <Flags value="0"/>
+ <ValueNameID value="259"/> <!-- Thin -->
+ <Value value="200.0"/>
+ </AxisValue>
+ <AxisValue index="2" Format="1">
+ <AxisIndex value="0"/>
+ <Flags value="0"/>
+ <ValueNameID value="260"/> <!-- Light -->
+ <Value value="300.0"/>
+ </AxisValue>
+ <AxisValue index="3" Format="1">
+ <AxisIndex value="0"/>
+ <Flags value="2"/> <!-- ElidableAxisValueName -->
+ <ValueNameID value="261"/> <!-- Regular -->
+ <Value value="400.0"/>
+ </AxisValue>
+ <AxisValue index="4" Format="1">
+ <AxisIndex value="0"/>
+ <Flags value="0"/>
+ <ValueNameID value="262"/> <!-- Medium -->
+ <Value value="500.0"/>
+ </AxisValue>
+ <AxisValue index="5" Format="1">
+ <AxisIndex value="0"/>
+ <Flags value="0"/>
+ <ValueNameID value="263"/> <!-- Semibold -->
+ <Value value="600.0"/>
+ </AxisValue>
+ <AxisValue index="6" Format="1">
+ <AxisIndex value="0"/>
+ <Flags value="0"/>
+ <ValueNameID value="264"/> <!-- Bold -->
+ <Value value="700.0"/>
+ </AxisValue>
+ <AxisValue index="7" Format="1">
+ <AxisIndex value="1"/>
+ <Flags value="0"/>
+ <ValueNameID value="266"/> <!-- Compressed -->
+ <Value value="80.0"/>
+ </AxisValue>
+ <AxisValue index="8" Format="1">
+ <AxisIndex value="1"/>
+ <Flags value="0"/>
+ <ValueNameID value="267"/> <!-- Condensed -->
+ <Value value="90.0"/>
+ </AxisValue>
+ <AxisValue index="9" Format="1">
+ <AxisIndex value="1"/>
+ <Flags value="0"/>
+ <ValueNameID value="268"/> <!-- Normal -->
+ <Value value="100.0"/>
+ </AxisValue>
+ <AxisValue index="10" Format="1">
+ <AxisIndex value="1"/>
+ <Flags value="0"/>
+ <ValueNameID value="269"/> <!-- Extended -->
+ <Value value="115.0"/>
+ </AxisValue>
+ </AxisValueArray>
+ <ElidedFallbackNameID value="256"/> <!-- Standard -->
+ </STAT>
+
+</ttFont>
diff --git a/Tests/varLib/data/master_no_overwrite_stat/Test-ExtendedBlack.ttx b/Tests/varLib/data/master_no_overwrite_stat/Test-ExtendedBlack.ttx
new file mode 100644
index 00000000..8dc69399
--- /dev/null
+++ b/Tests/varLib/data/master_no_overwrite_stat/Test-ExtendedBlack.ttx
@@ -0,0 +1,243 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.39">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="A"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="3.013"/>
+ <checkSumAdjustment value="0xc2b89a77"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Fri Feb 17 14:29:44 2023"/>
+ <modified value="Tue Mar 7 12:56:58 2023"/>
+ <xMin value="-6"/>
+ <yMin value="-250"/>
+ <xMax value="759"/>
+ <yMax value="750"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="6"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="1000"/>
+ <descent value="-300"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="762"/>
+ <minLeftSideBearing value="-6"/>
+ <minRightSideBearing value="3"/>
+ <xMaxExtent value="759"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="2"/>
+ </hhea>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="2"/>
+ <maxPoints value="19"/>
+ <maxContours value="2"/>
+ <maxCompositePoints value="0"/>
+ <maxCompositeContours value="0"/>
+ <maxZones value="1"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="0"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="0"/>
+ <maxSizeOfInstructions value="0"/>
+ <maxComponentElements value="0"/>
+ <maxComponentDepth value="0"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="631"/>
+ <usWeightClass value="900"/>
+ <usWidthClass value="6"/>
+ <fsType value="00000000 00001000"/>
+ <ySubscriptXSize value="650"/>
+ <ySubscriptYSize value="600"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="75"/>
+ <ySuperscriptXSize value="650"/>
+ <ySuperscriptYSize value="600"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="350"/>
+ <yStrikeoutSize value="100"/>
+ <yStrikeoutPosition value="303"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="0"/>
+ <bSerifStyle value="0"/>
+ <bWeight value="0"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="NONE"/>
+ <fsSelection value="00000000 01000000"/>
+ <usFirstCharIndex value="65"/>
+ <usLastCharIndex value="65"/>
+ <sTypoAscender value="750"/>
+ <sTypoDescender value="-250"/>
+ <sTypoLineGap value="250"/>
+ <usWinAscent value="1000"/>
+ <usWinDescent value="300"/>
+ <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="505"/>
+ <sCapHeight value="670"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <hmtx>
+ <mtx name=".notdef" width="500" lsb="50"/>
+ <mtx name="A" width="762" lsb="-6"/>
+ </hmtx>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x41" name="A"/><!-- LATIN CAPITAL LETTER A -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x41" name="A"/><!-- LATIN CAPITAL LETTER A -->
+ </cmap_format_4>
+ </cmap>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef" xMin="50" yMin="-250" xMax="450" yMax="750">
+ <contour>
+ <pt x="50" y="-250" on="1"/>
+ <pt x="50" y="750" on="1"/>
+ <pt x="450" y="750" on="1"/>
+ <pt x="450" y="-250" on="1"/>
+ </contour>
+ <contour>
+ <pt x="100" y="-200" on="1"/>
+ <pt x="400" y="-200" on="1"/>
+ <pt x="400" y="700" on="1"/>
+ <pt x="100" y="700" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="A" xMin="-6" yMin="0" xMax="759" yMax="672">
+ <contour>
+ <pt x="542" y="672" on="1"/>
+ <pt x="735" y="121" on="1"/>
+ <pt x="747" y="86" on="0"/>
+ <pt x="759" y="24" on="0"/>
+ <pt x="759" y="0" on="1"/>
+ <pt x="501" y="0" on="1"/>
+ <pt x="421" y="258" on="1"/>
+ <pt x="384" y="458" on="1"/>
+ <pt x="377" y="458" on="1"/>
+ <pt x="244" y="0" on="1"/>
+ <pt x="-6" y="0" on="1"/>
+ <pt x="-6" y="21" on="0"/>
+ <pt x="4" y="87" on="0"/>
+ <pt x="17" y="121" on="1"/>
+ <pt x="233" y="668" on="1"/>
+ </contour>
+ <contour>
+ <pt x="545" y="259" on="1"/>
+ <pt x="545" y="92" on="1"/>
+ <pt x="176" y="92" on="1"/>
+ <pt x="176" y="259" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ </glyf>
+
+ <name>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ Test Extended Black
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ 3.013;NONE;Test-ExtendedBlack
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ Test Extended Black
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ Version 3.013
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ Test-ExtendedBlack
+ </namerecord>
+ <namerecord nameID="16" platformID="3" platEncID="1" langID="0x409">
+ Test
+ </namerecord>
+ <namerecord nameID="17" platformID="3" platEncID="1" langID="0x409">
+ Extended Black
+ </namerecord>
+ </name>
+
+ <post>
+ <formatType value="2.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="-130"/>
+ <underlineThickness value="100"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ <psNames>
+ <!-- This file uses unique glyph names based on the information
+ found in the 'post' table. Since these names might not be unique,
+ we have to invent artificial names in case of clashes. In order to
+ be able to retain the original information, we need a name to
+ ps name mapping for those cases where they differ. That's what
+ you see below.
+ -->
+ </psNames>
+ <extraNames>
+ <!-- following are the name that are not taken from the standard Mac glyph order -->
+ </extraNames>
+ </post>
+
+</ttFont>
diff --git a/Tests/varLib/data/master_no_overwrite_stat/Test-ExtendedThin.ttx b/Tests/varLib/data/master_no_overwrite_stat/Test-ExtendedThin.ttx
new file mode 100644
index 00000000..d9209edd
--- /dev/null
+++ b/Tests/varLib/data/master_no_overwrite_stat/Test-ExtendedThin.ttx
@@ -0,0 +1,243 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.39">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="A"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="3.013"/>
+ <checkSumAdjustment value="0x4af8381f"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Fri Feb 17 14:29:44 2023"/>
+ <modified value="Tue Mar 7 12:56:58 2023"/>
+ <xMin value="23"/>
+ <yMin value="-250"/>
+ <xMax value="605"/>
+ <yMax value="750"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="6"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="1000"/>
+ <descent value="-300"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="633"/>
+ <minLeftSideBearing value="23"/>
+ <minRightSideBearing value="28"/>
+ <xMaxExtent value="605"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="2"/>
+ </hhea>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="2"/>
+ <maxPoints value="19"/>
+ <maxContours value="2"/>
+ <maxCompositePoints value="0"/>
+ <maxCompositeContours value="0"/>
+ <maxZones value="1"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="0"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="0"/>
+ <maxSizeOfInstructions value="0"/>
+ <maxComponentElements value="0"/>
+ <maxComponentDepth value="0"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="567"/>
+ <usWeightClass value="100"/>
+ <usWidthClass value="6"/>
+ <fsType value="00000000 00001000"/>
+ <ySubscriptXSize value="650"/>
+ <ySubscriptYSize value="600"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="75"/>
+ <ySuperscriptXSize value="650"/>
+ <ySuperscriptYSize value="600"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="350"/>
+ <yStrikeoutSize value="28"/>
+ <yStrikeoutPosition value="274"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="0"/>
+ <bSerifStyle value="0"/>
+ <bWeight value="0"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="NONE"/>
+ <fsSelection value="00000000 01000000"/>
+ <usFirstCharIndex value="65"/>
+ <usLastCharIndex value="65"/>
+ <sTypoAscender value="750"/>
+ <sTypoDescender value="-250"/>
+ <sTypoLineGap value="250"/>
+ <usWinAscent value="1000"/>
+ <usWinDescent value="300"/>
+ <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="456"/>
+ <sCapHeight value="670"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <hmtx>
+ <mtx name=".notdef" width="500" lsb="50"/>
+ <mtx name="A" width="633" lsb="23"/>
+ </hmtx>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x41" name="A"/><!-- LATIN CAPITAL LETTER A -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x41" name="A"/><!-- LATIN CAPITAL LETTER A -->
+ </cmap_format_4>
+ </cmap>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef" xMin="50" yMin="-250" xMax="450" yMax="750">
+ <contour>
+ <pt x="50" y="-250" on="1"/>
+ <pt x="50" y="750" on="1"/>
+ <pt x="450" y="750" on="1"/>
+ <pt x="450" y="-250" on="1"/>
+ </contour>
+ <contour>
+ <pt x="100" y="-200" on="1"/>
+ <pt x="400" y="-200" on="1"/>
+ <pt x="400" y="700" on="1"/>
+ <pt x="100" y="700" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="A" xMin="23" yMin="0" xMax="605" yMax="672">
+ <contour>
+ <pt x="339" y="672" on="1"/>
+ <pt x="566" y="100" on="1"/>
+ <pt x="576" y="75" on="0"/>
+ <pt x="595" y="25" on="0"/>
+ <pt x="605" y="0" on="1"/>
+ <pt x="563" y="0" on="1"/>
+ <pt x="384" y="453" on="1"/>
+ <pt x="320" y="626" on="1"/>
+ <pt x="317" y="626" on="1"/>
+ <pt x="62" y="0" on="1"/>
+ <pt x="23" y="0" on="1"/>
+ <pt x="34" y="25" on="0"/>
+ <pt x="54" y="75" on="0"/>
+ <pt x="64" y="100" on="1"/>
+ <pt x="297" y="668" on="1"/>
+ </contour>
+ <contour>
+ <pt x="491" y="250" on="1"/>
+ <pt x="491" y="217" on="1"/>
+ <pt x="139" y="217" on="1"/>
+ <pt x="139" y="250" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ </glyf>
+
+ <name>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ Test Extended Thin
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ 3.013;NONE;Test-ExtendedThin
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ Test Extended Thin
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ Version 3.013
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ Test-ExtendedThin
+ </namerecord>
+ <namerecord nameID="16" platformID="3" platEncID="1" langID="0x409">
+ Test
+ </namerecord>
+ <namerecord nameID="17" platformID="3" platEncID="1" langID="0x409">
+ Extended Thin
+ </namerecord>
+ </name>
+
+ <post>
+ <formatType value="2.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="-130"/>
+ <underlineThickness value="28"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ <psNames>
+ <!-- This file uses unique glyph names based on the information
+ found in the 'post' table. Since these names might not be unique,
+ we have to invent artificial names in case of clashes. In order to
+ be able to retain the original information, we need a name to
+ ps name mapping for those cases where they differ. That's what
+ you see below.
+ -->
+ </psNames>
+ <extraNames>
+ <!-- following are the name that are not taken from the standard Mac glyph order -->
+ </extraNames>
+ </post>
+
+</ttFont>
diff --git a/Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Bold.ttx b/Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Bold.ttx
new file mode 100644
index 00000000..410489dc
--- /dev/null
+++ b/Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Bold.ttx
@@ -0,0 +1,302 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="OTTO" ttLibVersion="4.41">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="a"/>
+ <GlyphID id="2" name="e"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="0.0"/>
+ <checkSumAdjustment value="0xaa7fc0dd"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Wed Nov 21 11:49:03 2018"/>
+ <modified value="Wed Aug 2 11:47:17 2023"/>
+ <xMin value="-64"/>
+ <yMin value="-350"/>
+ <xMax value="608"/>
+ <yMax value="812"/>
+ <macStyle value="00000000 00000001"/>
+ <lowestRecPPEM value="6"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="950"/>
+ <descent value="-250"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="600"/>
+ <minLeftSideBearing value="-64"/>
+ <minRightSideBearing value="-63"/>
+ <xMaxExtent value="608"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="2"/>
+ </hhea>
+
+ <maxp>
+ <tableVersion value="0x5000"/>
+ <numGlyphs value="3"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="580"/>
+ <usWeightClass value="400"/>
+ <usWidthClass value="5"/>
+ <fsType value="00000000 00000100"/>
+ <ySubscriptXSize value="650"/>
+ <ySubscriptYSize value="600"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="75"/>
+ <ySuperscriptXSize value="650"/>
+ <ySuperscriptYSize value="600"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="350"/>
+ <yStrikeoutSize value="50"/>
+ <yStrikeoutPosition value="300"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="0"/>
+ <bSerifStyle value="0"/>
+ <bWeight value="0"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="NONE"/>
+ <fsSelection value="00000000 00100000"/>
+ <usFirstCharIndex value="97"/>
+ <usLastCharIndex value="101"/>
+ <sTypoAscender value="750"/>
+ <sTypoDescender value="-250"/>
+ <sTypoLineGap value="200"/>
+ <usWinAscent value="950"/>
+ <usWinDescent value="250"/>
+ <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="500"/>
+ <sCapHeight value="700"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="4"/>
+ </OS_2>
+
+ <name>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ Sparse CFF
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Bold
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ 0.000;NONE;SparseCFF-Bold
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ Sparse CFF Bold
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ Version 0.000
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ SparseCFF-Bold
+ </namerecord>
+ </name>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x61" name="a"/><!-- LATIN SMALL LETTER A -->
+ <map code="0x65" name="e"/><!-- LATIN SMALL LETTER E -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x61" name="a"/><!-- LATIN SMALL LETTER A -->
+ <map code="0x65" name="e"/><!-- LATIN SMALL LETTER E -->
+ </cmap_format_4>
+ </cmap>
+
+ <post>
+ <formatType value="3.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="-100"/>
+ <underlineThickness value="50"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ </post>
+
+ <CFF>
+ <major value="1"/>
+ <minor value="0"/>
+ <CFFFont name="SparseCFF-Bold">
+ <version value="0.0"/>
+ <Notice value=""/>
+ <Copyright value=""/>
+ <FullName value="Sparse CFF Bold"/>
+ <FamilyName value="Sparse CFF"/>
+ <isFixedPitch value="0"/>
+ <ItalicAngle value="0"/>
+ <UnderlinePosition value="-100"/>
+ <UnderlineThickness value="50"/>
+ <PaintType value="0"/>
+ <CharstringType value="2"/>
+ <FontMatrix value="0.001 0 0 0.001 0 0"/>
+ <FontBBox value="-64 -350 608 812"/>
+ <StrokeWidth value="0"/>
+ <!-- charset is dumped separately as the 'GlyphOrder' element -->
+ <Encoding name="StandardEncoding"/>
+ <Private>
+ <BlueScale value="0.039625"/>
+ <BlueShift value="7"/>
+ <BlueFuzz value="1"/>
+ <ForceBold value="0"/>
+ <LanguageGroup value="0"/>
+ <ExpansionFactor value="0.06"/>
+ <initialRandomSeed value="0"/>
+ <defaultWidthX value="600"/>
+ <nominalWidthX value="0"/>
+ </Private>
+ <CharStrings>
+ <CharString name=".notdef">
+ 500 50 -350 rmoveto
+ 500 0 rlineto
+ 0 1100 rlineto
+ -500 0 rlineto
+ 50 -950 rmoveto
+ 0 900 rlineto
+ 300 0 rlineto
+ 0 -900 rlineto
+ endchar
+ </CharString>
+ <CharString name="a">
+ 468 -1 rmoveto
+ -21 435 rlineto
+ -233 70 rlineto
+ -205 -76 rlineto
+ 27 -147 rlineto
+ 172 60 rlineto
+ 96 -38 rlineto
+ 3 -304 rlineto
+ 71 264 rmoveto
+ -352 -23 rlineto
+ 3 -218 rlineto
+ 139 -34 rlineto
+ 221 83 rlineto
+ -6 78 rlineto
+ -182 -47 rlineto
+ -38 31 rlineto
+ 2 46 rlineto
+ 216 5 rlineto
+ endchar
+ </CharString>
+ <CharString name="e">
+ 197 229 rmoveto
+ 404 -4 rlineto
+ -5 79 rlineto
+ -282 244 rlineto
+ -305 -286 rlineto
+ 179 -280 rlineto
+ 340 18 rlineto
+ -4 184 rlineto
+ -280 -54 rlineto
+ -27 134 rlineto
+ 84 96 rlineto
+ 103 -67 rlineto
+ -209 6 rlineto
+ endchar
+ </CharString>
+ </CharStrings>
+ </CFFFont>
+
+ <GlobalSubrs>
+ <!-- The 'index' attribute is only for humans; it is ignored when parsed. -->
+ </GlobalSubrs>
+ </CFF>
+
+ <GDEF>
+ <Version value="0x00010000"/>
+ <GlyphClassDef>
+ <ClassDef glyph="e" class="1"/>
+ </GlyphClassDef>
+ </GDEF>
+
+ <GPOS>
+ <Version value="0x00010000"/>
+ <ScriptList>
+ <!-- ScriptCount=1 -->
+ <ScriptRecord index="0">
+ <ScriptTag value="DFLT"/>
+ <Script>
+ <DefaultLangSys>
+ <ReqFeatureIndex value="65535"/>
+ <!-- FeatureCount=0 -->
+ </DefaultLangSys>
+ <!-- LangSysCount=0 -->
+ </Script>
+ </ScriptRecord>
+ </ScriptList>
+ <FeatureList>
+ <!-- FeatureCount=0 -->
+ </FeatureList>
+ <LookupList>
+ <!-- LookupCount=0 -->
+ </LookupList>
+ </GPOS>
+
+ <GSUB>
+ <Version value="0x00010000"/>
+ <ScriptList>
+ <!-- ScriptCount=1 -->
+ <ScriptRecord index="0">
+ <ScriptTag value="DFLT"/>
+ <Script>
+ <DefaultLangSys>
+ <ReqFeatureIndex value="65535"/>
+ <!-- FeatureCount=0 -->
+ </DefaultLangSys>
+ <!-- LangSysCount=0 -->
+ </Script>
+ </ScriptRecord>
+ </ScriptList>
+ <FeatureList>
+ <!-- FeatureCount=0 -->
+ </FeatureList>
+ <LookupList>
+ <!-- LookupCount=0 -->
+ </LookupList>
+ </GSUB>
+
+ <hmtx>
+ <mtx name=".notdef" width="800" lsb="200"/>
+ <mtx name="a" width="600" lsb="9"/>
+ <mtx name="e" width="600" lsb="9"/>
+ </hmtx>
+
+</ttFont>
diff --git a/Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Medium.ttx b/Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Medium.ttx
new file mode 100644
index 00000000..1b583bd3
--- /dev/null
+++ b/Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Medium.ttx
@@ -0,0 +1,100 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="OTTO" ttLibVersion="4.41">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="e"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="0.0"/>
+ <checkSumAdjustment value="0x263f439"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Wed Nov 21 11:49:03 2018"/>
+ <modified value="Wed Aug 2 11:47:17 2023"/>
+ <xMin value="40"/>
+ <yMin value="-18"/>
+ <xMax value="576"/>
+ <yMax value="513"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="6"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <maxp>
+ <tableVersion value="0x5000"/>
+ <numGlyphs value="2"/>
+ </maxp>
+
+ <CFF>
+ <major value="1"/>
+ <minor value="0"/>
+ <CFFFont name="LayerFont-Regular">
+ <version value="0.0"/>
+ <Notice value=""/>
+ <Copyright value=""/>
+ <FullName value="Layer Font Regular"/>
+ <FamilyName value="Layer Font"/>
+ <isFixedPitch value="0"/>
+ <ItalicAngle value="0"/>
+ <UnderlinePosition value="-75"/>
+ <UnderlineThickness value="50"/>
+ <PaintType value="0"/>
+ <CharstringType value="2"/>
+ <FontMatrix value="0.001 0 0 0.001 0 0"/>
+ <FontBBox value="40 -18 576 513"/>
+ <StrokeWidth value="0"/>
+ <!-- charset is dumped separately as the 'GlyphOrder' element -->
+ <Encoding name="StandardEncoding"/>
+ <Private>
+ <BlueScale value="0.039625"/>
+ <BlueShift value="7"/>
+ <BlueFuzz value="1"/>
+ <ForceBold value="0"/>
+ <LanguageGroup value="0"/>
+ <ExpansionFactor value="0.06"/>
+ <initialRandomSeed value="0"/>
+ <defaultWidthX value="500"/>
+ <nominalWidthX value="500"/>
+ </Private>
+ <CharStrings>
+ <CharString name=".notdef">
+ endchar
+ </CharString>
+ <CharString name="e">
+ 100 126 203 rmoveto
+ 450 -4 rlineto
+ -5 106 rlineto
+ -255 208 rlineto
+ -276 -252 rlineto
+ 148 -279 rlineto
+ 338 63 rlineto
+ -19 112 rlineto
+ -243 -41 rlineto
+ -84 148 rlineto
+ 138 123 rlineto
+ 78 -90 rlineto
+ -271 1 rlineto
+ endchar
+ </CharString>
+ </CharStrings>
+ </CFFFont>
+
+ <GlobalSubrs>
+ <!-- The 'index' attribute is only for humans; it is ignored when parsed. -->
+ </GlobalSubrs>
+ </CFF>
+
+ <hmtx>
+ <mtx name=".notdef" width="65535" lsb="0"/>
+ <mtx name="e" width="600" lsb="40"/>
+ </hmtx>
+
+</ttFont>
diff --git a/Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Regular.ttx b/Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Regular.ttx
new file mode 100644
index 00000000..a4dda26f
--- /dev/null
+++ b/Tests/varLib/data/master_sparse_cff2_empty/SparseCFF-Regular.ttx
@@ -0,0 +1,302 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="OTTO" ttLibVersion="4.41">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="a"/>
+ <GlyphID id="2" name="e"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="0.0"/>
+ <checkSumAdjustment value="0xbfef2bdd"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Wed Nov 21 11:49:03 2018"/>
+ <modified value="Wed Aug 2 11:47:17 2023"/>
+ <xMin value="-37"/>
+ <yMin value="-250"/>
+ <xMax value="582"/>
+ <yMax value="750"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="6"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="950"/>
+ <descent value="-250"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="600"/>
+ <minLeftSideBearing value="-37"/>
+ <minRightSideBearing value="-50"/>
+ <xMaxExtent value="582"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="2"/>
+ </hhea>
+
+ <maxp>
+ <tableVersion value="0x5000"/>
+ <numGlyphs value="3"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="580"/>
+ <usWeightClass value="400"/>
+ <usWidthClass value="5"/>
+ <fsType value="00000000 00000100"/>
+ <ySubscriptXSize value="650"/>
+ <ySubscriptYSize value="600"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="75"/>
+ <ySuperscriptXSize value="650"/>
+ <ySuperscriptYSize value="600"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="350"/>
+ <yStrikeoutSize value="50"/>
+ <yStrikeoutPosition value="300"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="0"/>
+ <bSerifStyle value="0"/>
+ <bWeight value="0"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="NONE"/>
+ <fsSelection value="00000000 01000000"/>
+ <usFirstCharIndex value="97"/>
+ <usLastCharIndex value="101"/>
+ <sTypoAscender value="750"/>
+ <sTypoDescender value="-250"/>
+ <sTypoLineGap value="200"/>
+ <usWinAscent value="950"/>
+ <usWinDescent value="250"/>
+ <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="500"/>
+ <sCapHeight value="700"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="4"/>
+ </OS_2>
+
+ <name>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ Sparse CFF
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ 0.000;NONE;SparseCFF-Regular
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ Sparse CFF Regular
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ Version 0.000
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ SparseCFF-Regular
+ </namerecord>
+ </name>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x61" name="a"/><!-- LATIN SMALL LETTER A -->
+ <map code="0x65" name="e"/><!-- LATIN SMALL LETTER E -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x61" name="a"/><!-- LATIN SMALL LETTER A -->
+ <map code="0x65" name="e"/><!-- LATIN SMALL LETTER E -->
+ </cmap_format_4>
+ </cmap>
+
+ <post>
+ <formatType value="3.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="-75"/>
+ <underlineThickness value="50"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ </post>
+
+ <CFF>
+ <major value="1"/>
+ <minor value="0"/>
+ <CFFFont name="SparseCFF-Regular">
+ <version value="0.0"/>
+ <Notice value=""/>
+ <Copyright value=""/>
+ <FullName value="Sparse CFF Regular"/>
+ <FamilyName value="Sparse CFF"/>
+ <isFixedPitch value="0"/>
+ <ItalicAngle value="0"/>
+ <UnderlinePosition value="-75"/>
+ <UnderlineThickness value="50"/>
+ <PaintType value="0"/>
+ <CharstringType value="2"/>
+ <FontMatrix value="0.001 0 0 0.001 0 0"/>
+ <FontBBox value="-37 -250 582 750"/>
+ <StrokeWidth value="0"/>
+ <!-- charset is dumped separately as the 'GlyphOrder' element -->
+ <Encoding name="StandardEncoding"/>
+ <Private>
+ <BlueScale value="0.039625"/>
+ <BlueShift value="7"/>
+ <BlueFuzz value="1"/>
+ <ForceBold value="0"/>
+ <LanguageGroup value="0"/>
+ <ExpansionFactor value="0.06"/>
+ <initialRandomSeed value="0"/>
+ <defaultWidthX value="600"/>
+ <nominalWidthX value="0"/>
+ </Private>
+ <CharStrings>
+ <CharString name=".notdef">
+ 500 50 -250 rmoveto
+ 400 0 rlineto
+ 0 1000 rlineto
+ -400 0 rlineto
+ 50 -950 rmoveto
+ 0 900 rlineto
+ 300 0 rlineto
+ 0 -900 rlineto
+ endchar
+ </CharString>
+ <CharString name="a">
+ 468 -1 rmoveto
+ -21 435 rlineto
+ -233 70 rlineto
+ -205 -76 rlineto
+ 27 -91 rlineto
+ 172 60 rlineto
+ 155 -40 rlineto
+ 3 -360 rlineto
+ 12 266 rmoveto
+ -352 -23 rlineto
+ 3 -218 rlineto
+ 139 -34 rlineto
+ 221 83 rlineto
+ -6 63 rlineto
+ -222 -60 rlineto
+ -75 52 rlineto
+ 2 46 rlineto
+ 294 35 rlineto
+ endchar
+ </CharString>
+ <CharString name="e">
+ 127 228 rmoveto
+ 449 -2 rlineto
+ -5 79 rlineto
+ -255 208 rlineto
+ -276 -252 rlineto
+ 148 -279 rlineto
+ 338 63 rlineto
+ -17 84 rlineto
+ -280 -54 rlineto
+ -82 188 rlineto
+ 170 153 rlineto
+ 163 -124 rlineto
+ -355 6 rlineto
+ endchar
+ </CharString>
+ </CharStrings>
+ </CFFFont>
+
+ <GlobalSubrs>
+ <!-- The 'index' attribute is only for humans; it is ignored when parsed. -->
+ </GlobalSubrs>
+ </CFF>
+
+ <GDEF>
+ <Version value="0x00010000"/>
+ <GlyphClassDef>
+ <ClassDef glyph="e" class="1"/>
+ </GlyphClassDef>
+ </GDEF>
+
+ <GPOS>
+ <Version value="0x00010000"/>
+ <ScriptList>
+ <!-- ScriptCount=1 -->
+ <ScriptRecord index="0">
+ <ScriptTag value="DFLT"/>
+ <Script>
+ <DefaultLangSys>
+ <ReqFeatureIndex value="65535"/>
+ <!-- FeatureCount=0 -->
+ </DefaultLangSys>
+ <!-- LangSysCount=0 -->
+ </Script>
+ </ScriptRecord>
+ </ScriptList>
+ <FeatureList>
+ <!-- FeatureCount=0 -->
+ </FeatureList>
+ <LookupList>
+ <!-- LookupCount=0 -->
+ </LookupList>
+ </GPOS>
+
+ <GSUB>
+ <Version value="0x00010000"/>
+ <ScriptList>
+ <!-- ScriptCount=1 -->
+ <ScriptRecord index="0">
+ <ScriptTag value="DFLT"/>
+ <Script>
+ <DefaultLangSys>
+ <ReqFeatureIndex value="65535"/>
+ <!-- FeatureCount=0 -->
+ </DefaultLangSys>
+ <!-- LangSysCount=0 -->
+ </Script>
+ </ScriptRecord>
+ </ScriptList>
+ <FeatureList>
+ <!-- FeatureCount=0 -->
+ </FeatureList>
+ <LookupList>
+ <!-- LookupCount=0 -->
+ </LookupList>
+ </GSUB>
+
+ <hmtx>
+ <mtx name=".notdef" width="500" lsb="50"/>
+ <mtx name="a" width="600" lsb="9"/>
+ <mtx name="e" width="600" lsb="40"/>
+ </hmtx>
+
+</ttFont>
diff --git a/Tests/varLib/data/master_ttx_drop_oncurves/TestFamily-Master1.ttx b/Tests/varLib/data/master_ttx_drop_oncurves/TestFamily-Master1.ttx
new file mode 100644
index 00000000..14e64a75
--- /dev/null
+++ b/Tests/varLib/data/master_ttx_drop_oncurves/TestFamily-Master1.ttx
@@ -0,0 +1,312 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="3.7">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="uni0020"/>
+ <GlyphID id="2" name="uni0061"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="1.001"/>
+ <checkSumAdjustment value="0xd723fbc6"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Tue Feb 28 16:48:24 2017"/>
+ <modified value="Tue Feb 28 16:48:24 2017"/>
+ <xMin value="5"/>
+ <yMin value="-115"/>
+ <xMax value="653"/>
+ <yMax value="750"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="6"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="918"/>
+ <descent value="-335"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="663"/>
+ <minLeftSideBearing value="5"/>
+ <minRightSideBearing value="7"/>
+ <xMaxExtent value="653"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="5"/>
+ </hhea>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="3"/>
+ <maxPoints value="60"/>
+ <maxContours value="4"/>
+ <maxCompositePoints value="0"/>
+ <maxCompositeContours value="0"/>
+ <maxZones value="1"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="1"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="1"/>
+ <maxSizeOfInstructions value="5"/>
+ <maxComponentElements value="0"/>
+ <maxComponentDepth value="0"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="506"/>
+ <usWeightClass value="400"/>
+ <usWidthClass value="5"/>
+ <fsType value="00000000 00000100"/>
+ <ySubscriptXSize value="650"/>
+ <ySubscriptYSize value="600"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="75"/>
+ <ySuperscriptXSize value="650"/>
+ <ySuperscriptYSize value="600"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="350"/>
+ <yStrikeoutSize value="50"/>
+ <yStrikeoutPosition value="284"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="2"/>
+ <bSerifStyle value="4"/>
+ <bWeight value="6"/>
+ <bProportion value="3"/>
+ <bContrast value="5"/>
+ <bStrokeVariation value="4"/>
+ <bArmStyle value="5"/>
+ <bLetterForm value="2"/>
+ <bMidline value="2"/>
+ <bXHeight value="4"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000011"/>
+ <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="ADBO"/>
+ <fsSelection value="00000000 01000000"/>
+ <usFirstCharIndex value="32"/>
+ <usLastCharIndex value="97"/>
+ <sTypoAscender value="730"/>
+ <sTypoDescender value="-270"/>
+ <sTypoLineGap value="0"/>
+ <usWinAscent value="918"/>
+ <usWinDescent value="335"/>
+ <ulCodePageRange1 value="00100000 00000000 00000000 00000011"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="474"/>
+ <sCapHeight value="677"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <hmtx>
+ <mtx name=".notdef" width="640" lsb="80"/>
+ <mtx name="uni0020" width="234" lsb="0"/>
+ <mtx name="uni0061" width="508" lsb="46"/>
+ </hmtx>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x20" name="uni0020"/><!-- SPACE -->
+ <map code="0x61" name="uni0061"/><!-- LATIN SMALL LETTER A -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x20" name="uni0020"/><!-- SPACE -->
+ <map code="0x61" name="uni0061"/><!-- LATIN SMALL LETTER A -->
+ </cmap_format_4>
+ </cmap>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef" xMin="80" yMin="0" xMax="560" yMax="670">
+ <contour>
+ <pt x="80" y="0" on="1"/>
+ <pt x="500" y="670" on="1"/>
+ <pt x="560" y="670" on="1"/>
+ <pt x="140" y="0" on="1"/>
+ </contour>
+ <contour>
+ <pt x="560" y="0" on="1"/>
+ <pt x="500" y="0" on="1"/>
+ <pt x="80" y="670" on="1"/>
+ <pt x="140" y="670" on="1"/>
+ </contour>
+ <contour>
+ <pt x="140" y="50" on="1"/>
+ <pt x="500" y="50" on="1"/>
+ <pt x="500" y="620" on="1"/>
+ <pt x="140" y="620" on="1"/>
+ </contour>
+ <contour>
+ <pt x="80" y="0" on="1"/>
+ <pt x="80" y="670" on="1"/>
+ <pt x="560" y="670" on="1"/>
+ <pt x="560" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="uni0020"/><!-- contains no outline data -->
+
+ <TTGlyph name="uni0061" xMin="46" yMin="-13" xMax="501" yMax="487">
+ <contour>
+ <pt x="46" y="102" on="1"/>
+ <pt x="46" y="154" on="0"/>
+ <pt x="110" y="225" on="0"/>
+ <pt x="210" y="262" on="1"/>
+ <pt x="242" y="273" on="0"/>
+ <pt x="328" y="297" on="0"/>
+ <pt x="365" y="304" on="1"/>
+ <pt x="365" y="268" on="1"/>
+ <pt x="331" y="261" on="0"/>
+ <pt x="254" y="237" on="0"/>
+ <pt x="231" y="228" on="1"/>
+ <pt x="164" y="202" on="0"/>
+ <pt x="131" y="148" on="0"/>
+ <pt x="131" y="126" on="1"/>
+ <pt x="131" y="86" on="0"/>
+ <pt x="178" y="52" on="0"/>
+ <pt x="212" y="52" on="1"/>
+ <pt x="238" y="52" on="0"/>
+ <pt x="283" y="76" on="0"/>
+ <pt x="330" y="110" on="1"/>
+ <pt x="350" y="125" on="1"/>
+ <pt x="364" y="104" on="1"/>
+ <pt x="335" y="75" on="1"/>
+ <pt x="290" y="30" on="0"/>
+ <pt x="226" y="-13" on="0"/>
+ <pt x="180" y="-13" on="1"/>
+ <pt x="125" y="-13" on="0"/>
+ <pt x="46" y="50" on="0"/>
+ </contour>
+ <contour>
+ <pt x="325" y="92" on="1"/>
+ <pt x="325" y="320" on="1"/>
+ <pt x="325" y="394" on="0"/>
+ <pt x="280" y="442" on="0"/>
+ <pt x="231" y="442" on="1"/>
+ <pt x="214" y="442" on="0"/>
+ <pt x="169" y="435" on="0"/>
+ <pt x="141" y="424" on="1"/>
+ <pt x="181" y="455" on="1"/>
+ <pt x="155" y="369" on="1"/>
+ <pt x="148" y="347" on="0"/>
+ <pt x="124" y="324" on="0"/>
+ <pt x="104" y="324" on="1"/>
+ <pt x="62" y="324" on="0"/>
+ <pt x="59" y="364" on="1"/>
+ <pt x="73" y="421" on="0"/>
+ <pt x="177" y="487" on="0"/>
+ <pt x="252" y="487" on="1"/>
+ <pt x="329" y="487" on="0"/>
+ <pt x="405" y="408" on="0"/>
+ <pt x="405" y="314" on="1"/>
+ <pt x="405" y="102" on="1"/>
+ <pt x="405" y="68" on="0"/>
+ <pt x="425" y="41" on="0"/>
+ <pt x="442" y="41" on="1"/>
+ <pt x="455" y="41" on="0"/>
+ <pt x="473" y="53" on="0"/>
+ <pt x="481" y="63" on="1"/>
+ <pt x="501" y="41" on="1"/>
+ <pt x="469" y="-10" on="0"/>
+ <pt x="416" y="-10" on="1"/>
+ <pt x="375" y="-10" on="0"/>
+ <pt x="325" y="46" on="0"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ </glyf>
+
+ <name>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ Test Family
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ Version 1.001;ADBO;Test Family Regular
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ Test Family
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ Version 1.001
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ TestFamily-Master1
+ </namerecord>
+ <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409">
+ Frank Grießhammer
+ </namerecord>
+ <namerecord nameID="17" platformID="3" platEncID="1" langID="0x409">
+ Master 1
+ </namerecord>
+ </name>
+
+ <post>
+ <formatType value="2.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="-75"/>
+ <underlineThickness value="50"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ <psNames>
+ <!-- This file uses unique glyph names based on the information
+ found in the 'post' table. Since these names might not be unique,
+ we have to invent artificial names in case of clashes. In order to
+ be able to retain the original information, we need a name to
+ ps name mapping for those cases where they differ. That's what
+ you see below.
+ -->
+ </psNames>
+ <extraNames>
+ <!-- following are the name that are not taken from the standard Mac glyph order -->
+ <psName name="uni0020"/>
+ <psName name="uni0061"/>
+ </extraNames>
+ </post>
+
+ <GDEF>
+ <Version value="0x00010003"/>
+ <GlyphClassDef>
+ <ClassDef glyph="uni0061" class="1"/>
+ </GlyphClassDef>
+ </GDEF>
+
+</ttFont>
diff --git a/Tests/varLib/data/master_ttx_drop_oncurves/TestFamily-Master2.ttx b/Tests/varLib/data/master_ttx_drop_oncurves/TestFamily-Master2.ttx
new file mode 100644
index 00000000..1559071a
--- /dev/null
+++ b/Tests/varLib/data/master_ttx_drop_oncurves/TestFamily-Master2.ttx
@@ -0,0 +1,313 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="3.7">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="uni0020"/>
+ <GlyphID id="2" name="uni0061"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="1.001"/>
+ <checkSumAdjustment value="0x4b3253f0"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Tue Feb 28 16:48:24 2017"/>
+ <modified value="Tue Feb 28 16:48:24 2017"/>
+ <xMin value="10"/>
+ <yMin value="-115"/>
+ <xMax value="665"/>
+ <yMax value="731"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="6"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="918"/>
+ <descent value="-335"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="680"/>
+ <minLeftSideBearing value="10"/>
+ <minRightSideBearing value="-8"/>
+ <xMaxExtent value="665"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="5"/>
+ </hhea>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="3"/>
+ <maxPoints value="60"/>
+ <maxContours value="4"/>
+ <maxCompositePoints value="0"/>
+ <maxCompositeContours value="0"/>
+ <maxZones value="1"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="0"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="0"/>
+ <maxSizeOfInstructions value="0"/>
+ <maxComponentElements value="0"/>
+ <maxComponentDepth value="0"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="531"/>
+ <usWeightClass value="900"/>
+ <usWidthClass value="5"/>
+ <fsType value="00000000 00000100"/>
+ <ySubscriptXSize value="650"/>
+ <ySubscriptYSize value="600"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="75"/>
+ <ySuperscriptXSize value="650"/>
+ <ySuperscriptYSize value="600"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="350"/>
+ <yStrikeoutSize value="50"/>
+ <yStrikeoutPosition value="292"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="2"/>
+ <bSerifStyle value="4"/>
+ <bWeight value="9"/>
+ <bProportion value="3"/>
+ <bContrast value="5"/>
+ <bStrokeVariation value="4"/>
+ <bArmStyle value="5"/>
+ <bLetterForm value="2"/>
+ <bMidline value="2"/>
+ <bXHeight value="4"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000011"/>
+ <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="ADBO"/>
+ <fsSelection value="00000000 01000000"/>
+ <usFirstCharIndex value="32"/>
+ <usLastCharIndex value="97"/>
+ <sTypoAscender value="730"/>
+ <sTypoDescender value="-270"/>
+ <sTypoLineGap value="0"/>
+ <usWinAscent value="918"/>
+ <usWinDescent value="335"/>
+ <ulCodePageRange1 value="00100000 00000000 00000000 00000011"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="487"/>
+ <sCapHeight value="677"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <hmtx>
+ <mtx name=".notdef" width="640" lsb="80"/>
+ <mtx name="uni0020" width="206" lsb="0"/>
+ <mtx name="uni0061" width="540" lsb="25"/>
+ </hmtx>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x20" name="uni0020"/><!-- SPACE -->
+ <map code="0x61" name="uni0061"/><!-- LATIN SMALL LETTER A -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x20" name="uni0020"/><!-- SPACE -->
+ <map code="0x61" name="uni0061"/><!-- LATIN SMALL LETTER A -->
+ </cmap_format_4>
+ </cmap>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef" xMin="80" yMin="0" xMax="560" yMax="652">
+ <contour>
+ <pt x="80" y="0" on="1"/>
+ <pt x="480" y="652" on="1"/>
+ <pt x="560" y="652" on="1"/>
+ <pt x="160" y="0" on="1"/>
+ </contour>
+ <contour>
+ <pt x="560" y="0" on="1"/>
+ <pt x="480" y="0" on="1"/>
+ <pt x="80" y="652" on="1"/>
+ <pt x="160" y="652" on="1"/>
+ </contour>
+ <contour>
+ <pt x="150" y="60" on="1"/>
+ <pt x="490" y="60" on="1"/>
+ <pt x="490" y="592" on="1"/>
+ <pt x="150" y="592" on="1"/>
+ </contour>
+ <contour>
+ <pt x="80" y="0" on="1"/>
+ <pt x="80" y="652" on="1"/>
+ <pt x="560" y="652" on="1"/>
+ <pt x="560" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="uni0020"/><!-- contains no outline data -->
+
+ <TTGlyph name="uni0061" xMin="25" yMin="-16" xMax="548" yMax="503">
+ <contour>
+ <pt x="25" y="111" on="1"/>
+ <pt x="25" y="170" on="0"/>
+ <pt x="108" y="253" on="0"/>
+ <pt x="230" y="285" on="1"/>
+ <pt x="261" y="293" on="0"/>
+ <pt x="356" y="318" on="0"/>
+ <pt x="391" y="327" on="1"/>
+ <pt x="391" y="283" on="1"/>
+ <pt x="355" y="273" on="0"/>
+ <pt x="284" y="254" on="0"/>
+ <pt x="262" y="243" on="1"/>
+ <pt x="241" y="233" on="0"/>
+ <pt x="197" y="184" on="0"/>
+ <pt x="197" y="144" on="1"/>
+ <pt x="197" y="107" on="0"/>
+ <pt x="227" y="71" on="0"/>
+ <pt x="249" y="71" on="1"/>
+ <pt x="259" y="71" on="0"/>
+ <pt x="281" y="81" on="0"/>
+ <pt x="296" y="92" on="1"/>
+ <pt x="344" y="128" on="1"/>
+ <pt x="353" y="116" on="1"/>
+ <pt x="306" y="64" on="1"/>
+ <pt x="273" y="28" on="0"/>
+ <pt x="213" y="-16" on="0"/>
+ <pt x="155" y="-16" on="1"/>
+ <pt x="96" y="-16" on="0"/>
+ <pt x="25" y="52" on="0"/>
+ </contour>
+ <contour>
+ <pt x="291" y="78" on="1"/>
+ <pt x="291" y="337" on="1"/>
+ <pt x="291" y="401" on="0"/>
+ <pt x="262" y="449" on="0"/>
+ <pt x="215" y="449" on="1"/>
+ <pt x="196" y="449" on="0"/>
+ <pt x="154" y="444" on="0"/>
+ <pt x="120" y="436" on="1"/>
+ <pt x="200" y="478" on="1"/>
+ <pt x="200" y="415" on="1"/>
+ <pt x="200" y="354" on="0"/>
+ <pt x="150" y="303" on="0"/>
+ <pt x="118" y="303" on="1"/>
+ <pt x="57" y="303" on="0"/>
+ <pt x="42" y="357" on="1"/>
+ <pt x="42" y="422" on="0"/>
+ <pt x="165" y="503" on="0"/>
+ <pt x="286" y="503" on="1"/>
+ <pt x="390" y="503" on="0"/>
+ <pt x="475" y="412" on="0"/>
+ <pt x="475" y="309" on="1"/>
+ <pt x="475" y="80" on="1"/>
+ <pt x="475" y="72" on="0"/>
+ <pt x="484" y="63" on="0"/>
+ <pt x="492" y="63" on="1"/>
+ <pt x="498" y="63" on="0"/>
+ <pt x="510" y="72" on="0"/>
+ <pt x="519" y="85" on="1"/>
+ <pt x="548" y="69" on="1"/>
+ <pt x="515" y="-16" on="0"/>
+ <pt x="414" y="-16" on="1"/>
+ <pt x="359" y="-16" on="0"/>
+ <pt x="300" y="33" on="0"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ </glyf>
+
+ <name>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ Test Family
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ Version 1.001;ADBO;Test Family Regular
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ Test Family
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ Version 1.001
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ TestFamily-Master2
+ </namerecord>
+ <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409">
+ Frank Grießhammer
+ </namerecord>
+ <namerecord nameID="17" platformID="3" platEncID="1" langID="0x409">
+ Master 2
+ </namerecord>
+ </name>
+
+ <post>
+ <formatType value="2.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="-75"/>
+ <underlineThickness value="50"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ <psNames>
+ <!-- This file uses unique glyph names based on the information
+ found in the 'post' table. Since these names might not be unique,
+ we have to invent artificial names in case of clashes. In order to
+ be able to retain the original information, we need a name to
+ ps name mapping for those cases where they differ. That's what
+ you see below.
+ -->
+ </psNames>
+ <extraNames>
+ <!-- following are the name that are not taken from the standard Mac glyph order -->
+ <psName name="dollar.nostroke"/>
+ <psName name="uni0020"/>
+ <psName name="uni0061"/>
+ </extraNames>
+ </post>
+
+ <GDEF>
+ <Version value="0x00010003"/>
+ <GlyphClassDef>
+ <ClassDef glyph="uni0061" class="1"/>
+ </GlyphClassDef>
+ </GDEF>
+
+</ttFont>
diff --git a/Tests/varLib/data/master_ttx_varfont_otf/TestCFF2VF.ttx b/Tests/varLib/data/master_ttx_varfont_otf/TestCFF2VF.ttx
index 29c5bb31..5de73115 100644
--- a/Tests/varLib/data/master_ttx_varfont_otf/TestCFF2VF.ttx
+++ b/Tests/varLib/data/master_ttx_varfont_otf/TestCFF2VF.ttx
@@ -761,6 +761,7 @@
</STAT>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="-0.5" to="-0.7283"/>
diff --git a/Tests/varLib/data/master_ttx_varfont_ttf/SparseMasters-VF.ttx b/Tests/varLib/data/master_ttx_varfont_ttf/SparseMasters-VF.ttx
new file mode 100644
index 00000000..819b3441
--- /dev/null
+++ b/Tests/varLib/data/master_ttx_varfont_ttf/SparseMasters-VF.ttx
@@ -0,0 +1,501 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.39">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="a"/>
+ <GlyphID id="2" name="e"/>
+ <GlyphID id="3" name="edotabove"/>
+ <GlyphID id="4" name="s"/>
+ <GlyphID id="5" name="dotabovecomb"/>
+ </GlyphOrder>
+
+ <head>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="1.0"/>
+ <fontRevision value="0.0"/>
+ <checkSumAdjustment value="0x193b520f"/>
+ <magicNumber value="0x5f0f3cf5"/>
+ <flags value="00000000 00000011"/>
+ <unitsPerEm value="1000"/>
+ <created value="Thu Apr 6 00:27:50 2023"/>
+ <modified value="Thu Apr 6 00:27:50 2023"/>
+ <xMin value="-37"/>
+ <yMin value="-250"/>
+ <xMax value="582"/>
+ <yMax value="750"/>
+ <macStyle value="00000000 00000000"/>
+ <lowestRecPPEM value="6"/>
+ <fontDirectionHint value="2"/>
+ <indexToLocFormat value="0"/>
+ <glyphDataFormat value="0"/>
+ </head>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="950"/>
+ <descent value="-250"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="600"/>
+ <minLeftSideBearing value="-37"/>
+ <minRightSideBearing value="-50"/>
+ <xMaxExtent value="582"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="6"/>
+ </hhea>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="6"/>
+ <maxPoints value="18"/>
+ <maxContours value="2"/>
+ <maxCompositePoints value="17"/>
+ <maxCompositeContours value="2"/>
+ <maxZones value="1"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="0"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="0"/>
+ <maxSizeOfInstructions value="0"/>
+ <maxComponentElements value="2"/>
+ <maxComponentDepth value="1"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="580"/>
+ <usWeightClass value="350"/>
+ <usWidthClass value="5"/>
+ <fsType value="00000000 00000100"/>
+ <ySubscriptXSize value="650"/>
+ <ySubscriptYSize value="600"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="75"/>
+ <ySuperscriptXSize value="650"/>
+ <ySuperscriptYSize value="600"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="350"/>
+ <yStrikeoutSize value="50"/>
+ <yStrikeoutPosition value="300"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="0"/>
+ <bSerifStyle value="0"/>
+ <bWeight value="0"/>
+ <bProportion value="0"/>
+ <bContrast value="0"/>
+ <bStrokeVariation value="0"/>
+ <bArmStyle value="0"/>
+ <bLetterForm value="0"/>
+ <bMidline value="0"/>
+ <bXHeight value="0"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 01000101"/>
+ <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="NONE"/>
+ <fsSelection value="00000000 01000000"/>
+ <usFirstCharIndex value="97"/>
+ <usLastCharIndex value="775"/>
+ <sTypoAscender value="750"/>
+ <sTypoDescender value="-250"/>
+ <sTypoLineGap value="200"/>
+ <usWinAscent value="950"/>
+ <usWinDescent value="250"/>
+ <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="500"/>
+ <sCapHeight value="700"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <hmtx>
+ <mtx name=".notdef" width="500" lsb="50"/>
+ <mtx name="a" width="600" lsb="9"/>
+ <mtx name="dotabovecomb" width="0" lsb="-37"/>
+ <mtx name="e" width="600" lsb="40"/>
+ <mtx name="edotabove" width="600" lsb="40"/>
+ <mtx name="s" width="600" lsb="25"/>
+ </hmtx>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x61" name="a"/><!-- LATIN SMALL LETTER A -->
+ <map code="0x65" name="e"/><!-- LATIN SMALL LETTER E -->
+ <map code="0x73" name="s"/><!-- LATIN SMALL LETTER S -->
+ <map code="0x117" name="edotabove"/><!-- LATIN SMALL LETTER E WITH DOT ABOVE -->
+ <map code="0x307" name="dotabovecomb"/><!-- COMBINING DOT ABOVE -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x61" name="a"/><!-- LATIN SMALL LETTER A -->
+ <map code="0x65" name="e"/><!-- LATIN SMALL LETTER E -->
+ <map code="0x73" name="s"/><!-- LATIN SMALL LETTER S -->
+ <map code="0x117" name="edotabove"/><!-- LATIN SMALL LETTER E WITH DOT ABOVE -->
+ <map code="0x307" name="dotabovecomb"/><!-- COMBINING DOT ABOVE -->
+ </cmap_format_4>
+ </cmap>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef" xMin="50" yMin="-250" xMax="450" yMax="750">
+ <contour>
+ <pt x="50" y="750" on="1"/>
+ <pt x="50" y="-250" on="1"/>
+ <pt x="450" y="-250" on="1"/>
+ <pt x="450" y="750" on="1"/>
+ </contour>
+ <contour>
+ <pt x="400" y="-200" on="1"/>
+ <pt x="100" y="-200" on="1"/>
+ <pt x="100" y="700" on="1"/>
+ <pt x="400" y="700" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="a" xMin="9" yMin="-12" xMax="468" yMax="504">
+ <contour>
+ <pt x="447" y="434" on="1"/>
+ <pt x="468" y="-1" on="1"/>
+ <pt x="366" y="-3" on="1"/>
+ <pt x="363" y="357" on="1"/>
+ <pt x="208" y="397" on="1"/>
+ <pt x="36" y="337" on="1"/>
+ <pt x="9" y="428" on="1"/>
+ <pt x="214" y="504" on="1"/>
+ </contour>
+ <contour>
+ <pt x="26" y="240" on="1"/>
+ <pt x="378" y="263" on="1"/>
+ <pt x="382" y="207" on="1"/>
+ <pt x="88" y="172" on="1"/>
+ <pt x="86" y="126" on="1"/>
+ <pt x="161" y="74" on="1"/>
+ <pt x="383" y="134" on="1"/>
+ <pt x="389" y="71" on="1"/>
+ <pt x="168" y="-12" on="1"/>
+ <pt x="29" y="22" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="dotabovecomb" xMin="-37" yMin="501" xMax="50" yMax="597">
+ <contour>
+ <pt x="-37" y="503" on="1"/>
+ <pt x="-21" y="597" on="1"/>
+ <pt x="50" y="589" on="1"/>
+ <pt x="41" y="501" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="e" xMin="40" yMin="-18" xMax="576" yMax="513">
+ <contour>
+ <pt x="576" y="226" on="1"/>
+ <pt x="127" y="228" on="1"/>
+ <pt x="125" y="298" on="1"/>
+ <pt x="480" y="292" on="1"/>
+ <pt x="317" y="416" on="1"/>
+ <pt x="147" y="263" on="1"/>
+ <pt x="229" y="75" on="1"/>
+ <pt x="509" y="129" on="1"/>
+ <pt x="526" y="45" on="1"/>
+ <pt x="188" y="-18" on="1"/>
+ <pt x="40" y="261" on="1"/>
+ <pt x="316" y="513" on="1"/>
+ <pt x="571" y="305" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="edotabove" xMin="40" yMin="-18" xMax="576" yMax="693">
+ <component glyphName="e" x="0" y="0" flags="0x204"/>
+ <component glyphName="dotabovecomb" x="313" y="96" flags="0x4"/>
+ </TTGlyph>
+
+ <TTGlyph name="s" xMin="25" yMin="-13" xMax="582" yMax="530">
+ <contour>
+ <pt x="324" y="530" on="1"/>
+ <pt x="559" y="459" on="1"/>
+ <pt x="539" y="376" on="1"/>
+ <pt x="326" y="442" on="1"/>
+ <pt x="213" y="366" on="1"/>
+ <pt x="582" y="174" on="1"/>
+ <pt x="304" y="-13" on="1"/>
+ <pt x="25" y="83" on="1"/>
+ <pt x="53" y="174" on="1"/>
+ <pt x="282" y="76" on="1"/>
+ <pt x="427" y="155" on="1"/>
+ <pt x="38" y="343" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ </glyf>
+
+ <name>
+ <namerecord nameID="256" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ Weight
+ </namerecord>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ Sparse Masters
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ 0.000;NONE;SparseMasters-Regular
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ Sparse Masters Regular
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ Version 0.000
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ SparseMasters-Regular
+ </namerecord>
+ <namerecord nameID="256" platformID="3" platEncID="1" langID="0x409">
+ Weight
+ </namerecord>
+ </name>
+
+ <post>
+ <formatType value="2.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="-75"/>
+ <underlineThickness value="50"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ <psNames>
+ <!-- This file uses unique glyph names based on the information
+ found in the 'post' table. Since these names might not be unique,
+ we have to invent artificial names in case of clashes. In order to
+ be able to retain the original information, we need a name to
+ ps name mapping for those cases where they differ. That's what
+ you see below.
+ -->
+ </psNames>
+ <extraNames>
+ <!-- following are the name that are not taken from the standard Mac glyph order -->
+ <psName name="edotabove"/>
+ <psName name="dotabovecomb"/>
+ </extraNames>
+ </post>
+
+ <HVAR>
+ <Version value="0x00010000"/>
+ <VarStore Format="1">
+ <Format value="1"/>
+ <VarRegionList>
+ <!-- RegionAxisCount=1 -->
+ <!-- RegionCount=3 -->
+ <Region index="0">
+ <VarRegionAxis index="0">
+ <StartCoord value="0.0"/>
+ <PeakCoord value="0.36365"/>
+ <EndCoord value="1.0"/>
+ </VarRegionAxis>
+ </Region>
+ <Region index="1">
+ <VarRegionAxis index="0">
+ <StartCoord value="0.36365"/>
+ <PeakCoord value="1.0"/>
+ <EndCoord value="1.0"/>
+ </VarRegionAxis>
+ </Region>
+ <Region index="2">
+ <VarRegionAxis index="0">
+ <StartCoord value="0.0"/>
+ <PeakCoord value="1.0"/>
+ <EndCoord value="1.0"/>
+ </VarRegionAxis>
+ </Region>
+ </VarRegionList>
+ <!-- VarDataCount=1 -->
+ <VarData index="0">
+ <!-- ItemCount=1 -->
+ <NumShorts value="0"/>
+ <!-- VarRegionCount=0 -->
+ <Item index="0" value="[]"/>
+ </VarData>
+ </VarStore>
+ <AdvWidthMap>
+ <Map glyph=".notdef" outer="0" inner="0"/>
+ <Map glyph="a" outer="0" inner="0"/>
+ <Map glyph="dotabovecomb" outer="0" inner="0"/>
+ <Map glyph="e" outer="0" inner="0"/>
+ <Map glyph="edotabove" outer="0" inner="0"/>
+ <Map glyph="s" outer="0" inner="0"/>
+ </AdvWidthMap>
+ </HVAR>
+
+ <STAT>
+ <Version value="0x00010001"/>
+ <DesignAxisRecordSize value="8"/>
+ <!-- DesignAxisCount=1 -->
+ <DesignAxisRecord>
+ <Axis index="0">
+ <AxisTag value="wght"/>
+ <AxisNameID value="256"/> <!-- Weight -->
+ <AxisOrdering value="0"/>
+ </Axis>
+ </DesignAxisRecord>
+ <!-- AxisValueCount=0 -->
+ <ElidedFallbackNameID value="2"/> <!-- Regular -->
+ </STAT>
+
+ <fvar>
+
+ <!-- Weight -->
+ <Axis>
+ <AxisTag>wght</AxisTag>
+ <Flags>0x0</Flags>
+ <MinValue>350.0</MinValue>
+ <DefaultValue>350.0</DefaultValue>
+ <MaxValue>625.0</MaxValue>
+ <AxisNameID>256</AxisNameID>
+ </Axis>
+ </fvar>
+
+ <gvar>
+ <version value="1"/>
+ <reserved value="0"/>
+ <glyphVariations glyph="a">
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="0"/>
+ <delta pt="2" x="-59" y="2"/>
+ <delta pt="3" x="-59" y="-54"/>
+ <delta pt="4" x="0" y="-56"/>
+ <delta pt="5" x="0" y="-56"/>
+ <delta pt="6" x="0" y="0"/>
+ <delta pt="7" x="0" y="0"/>
+ <delta pt="8" x="0" y="0"/>
+ <delta pt="9" x="0" y="0"/>
+ <delta pt="10" x="-1" y="-23"/>
+ <delta pt="11" x="77" y="7"/>
+ <delta pt="12" x="77" y="7"/>
+ <delta pt="13" x="40" y="28"/>
+ <delta pt="14" x="0" y="15"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="0" y="0"/>
+ <delta pt="17" x="0" y="0"/>
+ <delta pt="18" x="0" y="0"/>
+ <delta pt="19" x="0" y="0"/>
+ <delta pt="20" x="0" y="0"/>
+ <delta pt="21" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ <glyphVariations glyph="dotabovecomb">
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="-27" y="-20"/>
+ <delta pt="1" x="-8" y="28"/>
+ <delta pt="2" x="13" y="16"/>
+ <delta pt="3" x="17" y="-13"/>
+ <delta pt="4" x="0" y="0"/>
+ <delta pt="5" x="0" y="0"/>
+ <delta pt="6" x="0" y="0"/>
+ <delta pt="7" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ <glyphVariations glyph="e">
+ <tuple>
+ <coord axis="wght" min="0.0" value="0.36365" max="1.0"/>
+ <delta pt="0" x="0" y="-27"/>
+ <delta pt="1" x="-1" y="-25"/>
+ <delta pt="2" x="0" y="0"/>
+ <delta pt="3" x="-84" y="5"/>
+ <delta pt="4" x="1" y="-29"/>
+ <delta pt="5" x="33" y="1"/>
+ <delta pt="6" x="35" y="41"/>
+ <delta pt="7" x="-2" y="28"/>
+ <delta pt="8" x="0" y="0"/>
+ <delta pt="9" x="0" y="0"/>
+ <delta pt="10" x="0" y="0"/>
+ <delta pt="11" x="0" y="0"/>
+ <delta pt="12" x="0" y="0"/>
+ <delta pt="13" x="0" y="0"/>
+ <delta pt="14" x="0" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="0" y="0"/>
+ </tuple>
+ <tuple>
+ <coord axis="wght" min="0.36365" value="1.0" max="1.0"/>
+ <delta pt="0" x="25" y="-1"/>
+ <delta pt="1" x="70" y="1"/>
+ <delta pt="2" x="70" y="1"/>
+ <delta pt="3" x="-76" y="1"/>
+ <delta pt="4" x="-16" y="-56"/>
+ <delta pt="5" x="70" y="1"/>
+ <delta pt="6" x="15" y="55"/>
+ <delta pt="7" x="15" y="55"/>
+ <delta pt="8" x="2" y="-45"/>
+ <delta pt="9" x="0" y="0"/>
+ <delta pt="10" x="-31" y="1"/>
+ <delta pt="11" x="-2" y="35"/>
+ <delta pt="12" x="25" y="-1"/>
+ <delta pt="13" x="0" y="0"/>
+ <delta pt="14" x="0" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ <glyphVariations glyph="edotabove">
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="1" x="-6" y="91"/>
+ </tuple>
+ </glyphVariations>
+ <glyphVariations glyph="s">
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="0" y="0"/>
+ <delta pt="2" x="-2" y="-40"/>
+ <delta pt="3" x="-2" y="-40"/>
+ <delta pt="4" x="55" y="-9"/>
+ <delta pt="5" x="26" y="-33"/>
+ <delta pt="6" x="-20" y="-45"/>
+ <delta pt="7" x="-18" y="-4"/>
+ <delta pt="8" x="-27" y="52"/>
+ <delta pt="9" x="-61" y="43"/>
+ <delta pt="10" x="-80" y="-6"/>
+ <delta pt="11" x="-22" y="55"/>
+ <delta pt="12" x="0" y="0"/>
+ <delta pt="13" x="0" y="0"/>
+ <delta pt="14" x="0" y="0"/>
+ <delta pt="15" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ </gvar>
+
+</ttFont>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/fontinfo.plist b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/fontinfo.plist
new file mode 100644
index 00000000..3898ecc8
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/fontinfo.plist
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>ascender</key>
+ <integer>750</integer>
+ <key>capHeight</key>
+ <integer>700</integer>
+ <key>descender</key>
+ <integer>-250</integer>
+ <key>familyName</key>
+ <string>Sparse Masters</string>
+ <key>styleName</key>
+ <string>Bold</string>
+ <key>unitsPerEm</key>
+ <integer>1000</integer>
+ <key>xHeight</key>
+ <integer>500</integer>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/_notdef.glif b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/_notdef.glif
new file mode 100644
index 00000000..5d3ca4d6
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/_notdef.glif
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name=".notdef" format="2">
+ <advance width="500"/>
+ <outline>
+ <contour>
+ <point x="50" y="750" type="line"/>
+ <point x="450" y="750" type="line"/>
+ <point x="450" y="-250" type="line"/>
+ <point x="50" y="-250" type="line"/>
+ </contour>
+ <contour>
+ <point x="400" y="-200" type="line"/>
+ <point x="400" y="700" type="line"/>
+ <point x="100" y="700" type="line"/>
+ <point x="100" y="-200" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/a.glif b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/a.glif
new file mode 100644
index 00000000..0e038d68
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/a.glif
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="a" format="2">
+ <unicode hex="0061"/>
+ <advance width="600"/>
+ <outline>
+ <contour>
+ <point x="447" y="434" type="line"/>
+ <point x="214" y="504" type="line"/>
+ <point x="9" y="428" type="line"/>
+ <point x="36" y="281" type="line"/>
+ <point x="208" y="341" type="line"/>
+ <point x="304" y="303" type="line"/>
+ <point x="307" y="-1" type="line"/>
+ <point x="468" y="-1" type="line"/>
+ </contour>
+ <contour>
+ <point x="26" y="240" type="line"/>
+ <point x="29" y="22" type="line"/>
+ <point x="168" y="-12" type="line"/>
+ <point x="389" y="71" type="line"/>
+ <point x="383" y="149" type="line"/>
+ <point x="201" y="102" type="line"/>
+ <point x="163" y="133" type="line"/>
+ <point x="165" y="179" type="line"/>
+ <point x="381" y="184" type="line"/>
+ <point x="378" y="263" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/contents.plist b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/contents.plist
new file mode 100644
index 00000000..da7e7a78
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/contents.plist
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>.notdef</key>
+ <string>_notdef.glif</string>
+ <key>a</key>
+ <string>a.glif</string>
+ <key>dotabovecomb</key>
+ <string>dotabovecomb.glif</string>
+ <key>e</key>
+ <string>e.glif</string>
+ <key>edotabove</key>
+ <string>edotabove.glif</string>
+ <key>s</key>
+ <string>s.glif</string>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/dotabovecomb.glif b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/dotabovecomb.glif
new file mode 100644
index 00000000..1c11088c
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/dotabovecomb.glif
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="dotabovecomb" format="2">
+ <unicode hex="0307"/>
+ <outline>
+ <contour>
+ <point x="-64" y="483" type="line"/>
+ <point x="58" y="488" type="line"/>
+ <point x="63" y="605" type="line"/>
+ <point x="-29" y="625" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/e.glif b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/e.glif
new file mode 100644
index 00000000..c78c38f4
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/e.glif
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="e" format="2">
+ <unicode hex="0065"/>
+ <advance width="600"/>
+ <outline>
+ <contour>
+ <point x="601" y="225" type="line"/>
+ <point x="596" y="304" type="line"/>
+ <point x="314" y="548" type="line"/>
+ <point x="9" y="262" type="line"/>
+ <point x="188" y="-18" type="line"/>
+ <point x="528" y="0" type="line"/>
+ <point x="524" y="184" type="line"/>
+ <point x="244" y="130" type="line"/>
+ <point x="217" y="264" type="line"/>
+ <point x="301" y="360" type="line"/>
+ <point x="404" y="293" type="line"/>
+ <point x="195" y="299" type="line"/>
+ <point x="197" y="229" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/edotabove.glif b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/edotabove.glif
new file mode 100644
index 00000000..bf481928
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/edotabove.glif
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="edotabove" format="2">
+ <unicode hex="0117"/>
+ <advance width="600"/>
+ <outline>
+ <component base="e"/>
+ <component base="dotabovecomb" xOffset="307" yOffset="187"/>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/s.glif b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/s.glif
new file mode 100644
index 00000000..ae47e9a9
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/glyphs/s.glif
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="s" format="2">
+ <unicode hex="0073"/>
+ <advance width="600"/>
+ <outline>
+ <contour>
+ <point x="324" y="530" type="line"/>
+ <point x="16" y="398" type="line"/>
+ <point x="347" y="149" type="line"/>
+ <point x="221" y="119" type="line"/>
+ <point x="26" y="226" type="line"/>
+ <point x="7" y="79" type="line"/>
+ <point x="284" y="-58" type="line"/>
+ <point x="608" y="141" type="line"/>
+ <point x="268" y="357" type="line"/>
+ <point x="324" y="402" type="line"/>
+ <point x="537" y="336" type="line"/>
+ <point x="559" y="459" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/layercontents.plist b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/layercontents.plist
new file mode 100644
index 00000000..03e5dde5
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/layercontents.plist
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <array>
+ <array>
+ <string>public.default</string>
+ <string>glyphs</string>
+ </array>
+ </array>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/lib.plist b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/lib.plist
new file mode 100644
index 00000000..b0fd5eb2
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/lib.plist
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>public.glyphOrder</key>
+ <array>
+ <string>.notdef</string>
+ <string>a</string>
+ <string>e</string>
+ <string>edotabove</string>
+ <string>s</string>
+ <string>dotabovecomb</string>
+ </array>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/metainfo.plist b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/metainfo.plist
new file mode 100644
index 00000000..555d9ce4
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Bold.ufo/metainfo.plist
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>creator</key>
+ <string>com.github.fonttools.ufoLib</string>
+ <key>formatVersion</key>
+ <integer>3</integer>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/fontinfo.plist b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/fontinfo.plist
new file mode 100644
index 00000000..a8f59388
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/fontinfo.plist
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>ascender</key>
+ <integer>750</integer>
+ <key>capHeight</key>
+ <integer>700</integer>
+ <key>descender</key>
+ <integer>-250</integer>
+ <key>familyName</key>
+ <string>Sparse Masters</string>
+ <key>styleName</key>
+ <string>Medium</string>
+ <key>unitsPerEm</key>
+ <integer>1000</integer>
+ <key>xHeight</key>
+ <integer>500</integer>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/_notdef.glif b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/_notdef.glif
new file mode 100644
index 00000000..5d3ca4d6
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/_notdef.glif
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name=".notdef" format="2">
+ <advance width="500"/>
+ <outline>
+ <contour>
+ <point x="50" y="750" type="line"/>
+ <point x="450" y="750" type="line"/>
+ <point x="450" y="-250" type="line"/>
+ <point x="50" y="-250" type="line"/>
+ </contour>
+ <contour>
+ <point x="400" y="-200" type="line"/>
+ <point x="400" y="700" type="line"/>
+ <point x="100" y="700" type="line"/>
+ <point x="100" y="-200" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/contents.plist b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/contents.plist
new file mode 100644
index 00000000..456fd5de
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/contents.plist
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>.notdef</key>
+ <string>_notdef.glif</string>
+ <key>e</key>
+ <string>e.glif</string>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/e.glif b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/e.glif
new file mode 100644
index 00000000..bf15c1ab
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/glyphs/e.glif
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="e" format="2">
+ <advance width="600"/>
+ <outline>
+ <contour>
+ <point x="576" y="199" type="line"/>
+ <point x="571" y="305" type="line"/>
+ <point x="316" y="513" type="line"/>
+ <point x="40" y="261" type="line"/>
+ <point x="188" y="-18" type="line"/>
+ <point x="526" y="45" type="line"/>
+ <point x="507" y="157" type="line"/>
+ <point x="264" y="116" type="line"/>
+ <point x="180" y="264" type="line"/>
+ <point x="318" y="387" type="line"/>
+ <point x="396" y="297" type="line"/>
+ <point x="125" y="298" type="line"/>
+ <point x="126" y="203" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/layercontents.plist b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/layercontents.plist
new file mode 100644
index 00000000..03e5dde5
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/layercontents.plist
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <array>
+ <array>
+ <string>public.default</string>
+ <string>glyphs</string>
+ </array>
+ </array>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/lib.plist b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/lib.plist
new file mode 100644
index 00000000..3326cd65
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/lib.plist
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>public.glyphOrder</key>
+ <array>
+ <string>.notdef</string>
+ <string>e</string>
+ </array>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/metainfo.plist b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/metainfo.plist
new file mode 100644
index 00000000..555d9ce4
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Medium.ufo/metainfo.plist
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>creator</key>
+ <string>com.github.fonttools.ufoLib</string>
+ <key>formatVersion</key>
+ <integer>3</integer>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/fontinfo.plist b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/fontinfo.plist
new file mode 100644
index 00000000..a36990b7
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/fontinfo.plist
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>ascender</key>
+ <integer>750</integer>
+ <key>capHeight</key>
+ <integer>700</integer>
+ <key>descender</key>
+ <integer>-250</integer>
+ <key>familyName</key>
+ <string>Sparse Masters</string>
+ <key>styleName</key>
+ <string>Regular</string>
+ <key>unitsPerEm</key>
+ <integer>1000</integer>
+ <key>xHeight</key>
+ <integer>500</integer>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/_notdef.glif b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/_notdef.glif
new file mode 100644
index 00000000..5d3ca4d6
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/_notdef.glif
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name=".notdef" format="2">
+ <advance width="500"/>
+ <outline>
+ <contour>
+ <point x="50" y="750" type="line"/>
+ <point x="450" y="750" type="line"/>
+ <point x="450" y="-250" type="line"/>
+ <point x="50" y="-250" type="line"/>
+ </contour>
+ <contour>
+ <point x="400" y="-200" type="line"/>
+ <point x="400" y="700" type="line"/>
+ <point x="100" y="700" type="line"/>
+ <point x="100" y="-200" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/a.glif b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/a.glif
new file mode 100644
index 00000000..5dcc9322
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/a.glif
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="a" format="2">
+ <unicode hex="0061"/>
+ <advance width="600"/>
+ <outline>
+ <contour>
+ <point x="447" y="434" type="line"/>
+ <point x="214" y="504" type="line"/>
+ <point x="9" y="428" type="line"/>
+ <point x="36" y="337" type="line"/>
+ <point x="208" y="397" type="line"/>
+ <point x="363" y="357" type="line"/>
+ <point x="366" y="-3" type="line"/>
+ <point x="468" y="-1" type="line"/>
+ </contour>
+ <contour>
+ <point x="26" y="240" type="line"/>
+ <point x="29" y="22" type="line"/>
+ <point x="168" y="-12" type="line"/>
+ <point x="389" y="71" type="line"/>
+ <point x="383" y="134" type="line"/>
+ <point x="161" y="74" type="line"/>
+ <point x="86" y="126" type="line"/>
+ <point x="88" y="172" type="line"/>
+ <point x="382" y="207" type="line"/>
+ <point x="378" y="263" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/contents.plist b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/contents.plist
new file mode 100644
index 00000000..da7e7a78
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/contents.plist
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>.notdef</key>
+ <string>_notdef.glif</string>
+ <key>a</key>
+ <string>a.glif</string>
+ <key>dotabovecomb</key>
+ <string>dotabovecomb.glif</string>
+ <key>e</key>
+ <string>e.glif</string>
+ <key>edotabove</key>
+ <string>edotabove.glif</string>
+ <key>s</key>
+ <string>s.glif</string>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/dotabovecomb.glif b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/dotabovecomb.glif
new file mode 100644
index 00000000..3abb24fd
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/dotabovecomb.glif
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="dotabovecomb" format="2">
+ <unicode hex="0307"/>
+ <outline>
+ <contour>
+ <point x="-37" y="503" type="line"/>
+ <point x="41" y="501" type="line"/>
+ <point x="50" y="589" type="line"/>
+ <point x="-21" y="597" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/e.glif b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/e.glif
new file mode 100644
index 00000000..52fc2b3c
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/e.glif
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="e" format="2">
+ <unicode hex="0065"/>
+ <advance width="600"/>
+ <outline>
+ <contour>
+ <point x="576" y="226" type="line"/>
+ <point x="571" y="305" type="line"/>
+ <point x="316" y="513" type="line"/>
+ <point x="40" y="261" type="line"/>
+ <point x="188" y="-18" type="line"/>
+ <point x="526" y="45" type="line"/>
+ <point x="509" y="129" type="line"/>
+ <point x="229" y="75" type="line"/>
+ <point x="147" y="263" type="line"/>
+ <point x="317" y="416" type="line"/>
+ <point x="480" y="292" type="line"/>
+ <point x="125" y="298" type="line"/>
+ <point x="127" y="228" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/edotabove.glif b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/edotabove.glif
new file mode 100644
index 00000000..9a6dbc56
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/edotabove.glif
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="edotabove" format="2">
+ <unicode hex="0117"/>
+ <advance width="600"/>
+ <outline>
+ <component base="e"/>
+ <component base="dotabovecomb" xOffset="313" yOffset="96"/>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/s.glif b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/s.glif
new file mode 100644
index 00000000..205b0e3d
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/glyphs/s.glif
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="s" format="2">
+ <unicode hex="0073"/>
+ <advance width="600"/>
+ <outline>
+ <contour>
+ <point x="324" y="530" type="line"/>
+ <point x="38" y="343" type="line"/>
+ <point x="427" y="155" type="line"/>
+ <point x="282" y="76" type="line"/>
+ <point x="53" y="174" type="line"/>
+ <point x="25" y="83" type="line"/>
+ <point x="304" y="-13" type="line"/>
+ <point x="582" y="174" type="line"/>
+ <point x="213" y="366" type="line"/>
+ <point x="326" y="442" type="line"/>
+ <point x="539" y="376" type="line"/>
+ <point x="559" y="459" type="line"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/layercontents.plist b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/layercontents.plist
new file mode 100644
index 00000000..03e5dde5
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/layercontents.plist
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <array>
+ <array>
+ <string>public.default</string>
+ <string>glyphs</string>
+ </array>
+ </array>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/lib.plist b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/lib.plist
new file mode 100644
index 00000000..b0fd5eb2
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/lib.plist
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>public.glyphOrder</key>
+ <array>
+ <string>.notdef</string>
+ <string>a</string>
+ <string>e</string>
+ <string>edotabove</string>
+ <string>s</string>
+ <string>dotabovecomb</string>
+ </array>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/metainfo.plist b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/metainfo.plist
new file mode 100644
index 00000000..555d9ce4
--- /dev/null
+++ b/Tests/varLib/data/master_ufo/SparseMasters-Regular.ufo/metainfo.plist
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>creator</key>
+ <string>com.github.fonttools.ufoLib</string>
+ <key>formatVersion</key>
+ <integer>3</integer>
+ </dict>
+</plist>
diff --git a/Tests/varLib/data/test_results/Build.ttx b/Tests/varLib/data/test_results/Build.ttx
index c802bf32..144cca5e 100644
--- a/Tests/varLib/data/test_results/Build.ttx
+++ b/Tests/varLib/data/test_results/Build.ttx
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
-<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="3.17">
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.42">
<GDEF>
<Version value="0x00010003"/>
@@ -17,7 +17,7 @@
<Format value="1"/>
<VarRegionList>
<!-- RegionAxisCount=2 -->
- <!-- RegionCount=5 -->
+ <!-- RegionCount=2 -->
<Region index="0">
<VarRegionAxis index="0">
<StartCoord value="-1.0"/>
@@ -42,58 +42,29 @@
<EndCoord value="0.0"/>
</VarRegionAxis>
</Region>
- <Region index="2">
- <VarRegionAxis index="0">
- <StartCoord value="0.0"/>
- <PeakCoord value="0.0"/>
- <EndCoord value="0.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
- <Region index="3">
- <VarRegionAxis index="0">
- <StartCoord value="-1.0"/>
- <PeakCoord value="-1.0"/>
- <EndCoord value="0.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
- <Region index="4">
- <VarRegionAxis index="0">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
</VarRegionList>
<!-- VarDataCount=1 -->
<VarData index="0">
- <!-- ItemCount=6 -->
+ <!-- ItemCount=5 -->
<NumShorts value="0"/>
<!-- VarRegionCount=2 -->
<VarRegionIndex index="0" value="0"/>
<VarRegionIndex index="1" value="1"/>
- <Item index="0" value="[0, 0]"/>
- <Item index="1" value="[14, -28]"/>
- <Item index="2" value="[-10, 17]"/>
- <Item index="3" value="[-3, 32]"/>
- <Item index="4" value="[-7, 63]"/>
- <Item index="5" value="[-7, 63]"/>
+ <Item index="0" value="[-10, 17]"/>
+ <Item index="1" value="[-7, 63]"/>
+ <Item index="2" value="[-3, 32]"/>
+ <Item index="3" value="[0, 0]"/>
+ <Item index="4" value="[14, -28]"/>
</VarData>
</VarStore>
+ <AdvWidthMap>
+ <Map glyph=".notdef" outer="0" inner="3"/>
+ <Map glyph="uni0020" outer="0" inner="4"/>
+ <Map glyph="uni0024" outer="0" inner="1"/>
+ <Map glyph="uni0024.nostroke" outer="0" inner="1"/>
+ <Map glyph="uni0041" outer="0" inner="0"/>
+ <Map glyph="uni0061" outer="0" inner="2"/>
+ </AdvWidthMap>
</HVAR>
<MVAR>
@@ -105,7 +76,7 @@
<Format value="1"/>
<VarRegionList>
<!-- RegionAxisCount=2 -->
- <!-- RegionCount=5 -->
+ <!-- RegionCount=2 -->
<Region index="0">
<VarRegionAxis index="0">
<StartCoord value="-1.0"/>
@@ -130,42 +101,6 @@
<EndCoord value="0.0"/>
</VarRegionAxis>
</Region>
- <Region index="2">
- <VarRegionAxis index="0">
- <StartCoord value="0.0"/>
- <PeakCoord value="0.0"/>
- <EndCoord value="0.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
- <Region index="3">
- <VarRegionAxis index="0">
- <StartCoord value="-1.0"/>
- <PeakCoord value="-1.0"/>
- <EndCoord value="0.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
- <Region index="4">
- <VarRegionAxis index="0">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
</VarRegionList>
<!-- VarDataCount=1 -->
<VarData index="0">
diff --git a/Tests/varLib/data/test_results/BuildAvar2.ttx b/Tests/varLib/data/test_results/BuildAvar2.ttx
new file mode 100644
index 00000000..27a41bfb
--- /dev/null
+++ b/Tests/varLib/data/test_results/BuildAvar2.ttx
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="3.14">
+
+ <avar>
+ <version major="2" minor="0"/>
+ <segment axis="wght">
+ <mapping from="-1.0" to="-1.0"/>
+ <mapping from="-0.6667" to="-0.7969"/>
+ <mapping from="-0.3333" to="-0.5"/>
+ <mapping from="0.0" to="0.0"/>
+ <mapping from="0.2" to="0.18"/>
+ <mapping from="0.4" to="0.38"/>
+ <mapping from="0.6" to="0.61"/>
+ <mapping from="0.8" to="0.79"/>
+ <mapping from="1.0" to="1.0"/>
+ </segment>
+ <VarStore Format="1">
+ <Format value="1"/>
+ <VarRegionList>
+ <!-- RegionAxisCount=1 -->
+ <!-- RegionCount=1 -->
+ <Region index="0">
+ <VarRegionAxis index="0">
+ <StartCoord value="0.0"/>
+ <PeakCoord value="0.38"/>
+ <EndCoord value="0.38"/>
+ </VarRegionAxis>
+ </Region>
+ </VarRegionList>
+ <!-- VarDataCount=1 -->
+ <VarData index="0">
+ <!-- ItemCount=1 -->
+ <NumShorts value="1"/>
+ <!-- VarRegionCount=1 -->
+ <VarRegionIndex index="0" value="0"/>
+ <Item index="0" value="[1638]"/>
+ </VarData>
+ </VarStore>
+ </avar>
+
+</ttFont>
diff --git a/Tests/varLib/data/test_results/BuildAvarEmptyAxis.ttx b/Tests/varLib/data/test_results/BuildAvarEmptyAxis.ttx
index aee6f5ae..bff0993c 100644
--- a/Tests/varLib/data/test_results/BuildAvarEmptyAxis.ttx
+++ b/Tests/varLib/data/test_results/BuildAvarEmptyAxis.ttx
@@ -2,6 +2,7 @@
<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="3.14">
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="0.0" to="0.0"/>
diff --git a/Tests/varLib/data/test_results/BuildAvarIdentityMaps.ttx b/Tests/varLib/data/test_results/BuildAvarIdentityMaps.ttx
index 799d68f1..f348a5b7 100644
--- a/Tests/varLib/data/test_results/BuildAvarIdentityMaps.ttx
+++ b/Tests/varLib/data/test_results/BuildAvarIdentityMaps.ttx
@@ -2,6 +2,7 @@
<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="3.14">
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="-0.6667" to="-0.7969"/>
diff --git a/Tests/varLib/data/test_results/BuildAvarSingleAxis.ttx b/Tests/varLib/data/test_results/BuildAvarSingleAxis.ttx
index 9daa330f..aacd2888 100644
--- a/Tests/varLib/data/test_results/BuildAvarSingleAxis.ttx
+++ b/Tests/varLib/data/test_results/BuildAvarSingleAxis.ttx
@@ -2,6 +2,7 @@
<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="3.14">
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="-0.6667" to="-0.7969"/>
diff --git a/Tests/varLib/data/test_results/BuildMain.ttx b/Tests/varLib/data/test_results/BuildMain.ttx
index 27d02d1d..3a1bcfd3 100644
--- a/Tests/varLib/data/test_results/BuildMain.ttx
+++ b/Tests/varLib/data/test_results/BuildMain.ttx
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
-<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="3.19">
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.42">
<GlyphOrder>
<!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
@@ -629,7 +629,7 @@
<Format value="1"/>
<VarRegionList>
<!-- RegionAxisCount=2 -->
- <!-- RegionCount=5 -->
+ <!-- RegionCount=2 -->
<Region index="0">
<VarRegionAxis index="0">
<StartCoord value="-1.0"/>
@@ -654,58 +654,29 @@
<EndCoord value="0.0"/>
</VarRegionAxis>
</Region>
- <Region index="2">
- <VarRegionAxis index="0">
- <StartCoord value="0.0"/>
- <PeakCoord value="0.0"/>
- <EndCoord value="0.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
- <Region index="3">
- <VarRegionAxis index="0">
- <StartCoord value="-1.0"/>
- <PeakCoord value="-1.0"/>
- <EndCoord value="0.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
- <Region index="4">
- <VarRegionAxis index="0">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
</VarRegionList>
<!-- VarDataCount=1 -->
<VarData index="0">
- <!-- ItemCount=6 -->
+ <!-- ItemCount=5 -->
<NumShorts value="0"/>
<!-- VarRegionCount=2 -->
<VarRegionIndex index="0" value="0"/>
<VarRegionIndex index="1" value="1"/>
- <Item index="0" value="[0, 0]"/>
- <Item index="1" value="[14, -28]"/>
- <Item index="2" value="[-10, 17]"/>
- <Item index="3" value="[-3, 32]"/>
- <Item index="4" value="[-7, 63]"/>
- <Item index="5" value="[-7, 63]"/>
+ <Item index="0" value="[-10, 17]"/>
+ <Item index="1" value="[-7, 63]"/>
+ <Item index="2" value="[-3, 32]"/>
+ <Item index="3" value="[0, 0]"/>
+ <Item index="4" value="[14, -28]"/>
</VarData>
</VarStore>
+ <AdvWidthMap>
+ <Map glyph=".notdef" outer="0" inner="3"/>
+ <Map glyph="uni0020" outer="0" inner="4"/>
+ <Map glyph="uni0024" outer="0" inner="1"/>
+ <Map glyph="uni0024.nostroke" outer="0" inner="1"/>
+ <Map glyph="uni0041" outer="0" inner="0"/>
+ <Map glyph="uni0061" outer="0" inner="2"/>
+ </AdvWidthMap>
</HVAR>
<MVAR>
@@ -717,7 +688,7 @@
<Format value="1"/>
<VarRegionList>
<!-- RegionAxisCount=2 -->
- <!-- RegionCount=5 -->
+ <!-- RegionCount=2 -->
<Region index="0">
<VarRegionAxis index="0">
<StartCoord value="-1.0"/>
@@ -742,42 +713,6 @@
<EndCoord value="0.0"/>
</VarRegionAxis>
</Region>
- <Region index="2">
- <VarRegionAxis index="0">
- <StartCoord value="0.0"/>
- <PeakCoord value="0.0"/>
- <EndCoord value="0.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
- <Region index="3">
- <VarRegionAxis index="0">
- <StartCoord value="-1.0"/>
- <PeakCoord value="-1.0"/>
- <EndCoord value="0.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
- <Region index="4">
- <VarRegionAxis index="0">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- <VarRegionAxis index="1">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
</VarRegionList>
<!-- VarDataCount=1 -->
<VarData index="0">
diff --git a/Tests/varLib/data/test_results/DropOnCurves.ttx b/Tests/varLib/data/test_results/DropOnCurves.ttx
new file mode 100644
index 00000000..4bfd36ad
--- /dev/null
+++ b/Tests/varLib/data/test_results/DropOnCurves.ttx
@@ -0,0 +1,498 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont>
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="uni0020"/>
+ <GlyphID id="2" name="uni0061"/>
+ </GlyphOrder>
+
+ <hhea>
+ <tableVersion value="0x00010000"/>
+ <ascent value="918"/>
+ <descent value="-335"/>
+ <lineGap value="0"/>
+ <advanceWidthMax value="640"/>
+ <minLeftSideBearing value="46"/>
+ <minRightSideBearing value="7"/>
+ <xMaxExtent value="560"/>
+ <caretSlopeRise value="1"/>
+ <caretSlopeRun value="0"/>
+ <caretOffset value="0"/>
+ <reserved0 value="0"/>
+ <reserved1 value="0"/>
+ <reserved2 value="0"/>
+ <reserved3 value="0"/>
+ <metricDataFormat value="0"/>
+ <numberOfHMetrics value="3"/>
+ </hhea>
+
+ <maxp>
+ <!-- Most of this table will be recalculated by the compiler -->
+ <tableVersion value="0x10000"/>
+ <numGlyphs value="3"/>
+ <maxPoints value="60"/>
+ <maxContours value="4"/>
+ <maxCompositePoints value="0"/>
+ <maxCompositeContours value="0"/>
+ <maxZones value="1"/>
+ <maxTwilightPoints value="0"/>
+ <maxStorage value="0"/>
+ <maxFunctionDefs value="1"/>
+ <maxInstructionDefs value="0"/>
+ <maxStackElements value="1"/>
+ <maxSizeOfInstructions value="5"/>
+ <maxComponentElements value="0"/>
+ <maxComponentDepth value="0"/>
+ </maxp>
+
+ <OS_2>
+ <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
+ will be recalculated by the compiler -->
+ <version value="4"/>
+ <xAvgCharWidth value="506"/>
+ <usWeightClass value="400"/>
+ <usWidthClass value="5"/>
+ <fsType value="00000000 00000100"/>
+ <ySubscriptXSize value="650"/>
+ <ySubscriptYSize value="600"/>
+ <ySubscriptXOffset value="0"/>
+ <ySubscriptYOffset value="75"/>
+ <ySuperscriptXSize value="650"/>
+ <ySuperscriptYSize value="600"/>
+ <ySuperscriptXOffset value="0"/>
+ <ySuperscriptYOffset value="350"/>
+ <yStrikeoutSize value="50"/>
+ <yStrikeoutPosition value="284"/>
+ <sFamilyClass value="0"/>
+ <panose>
+ <bFamilyType value="2"/>
+ <bSerifStyle value="4"/>
+ <bWeight value="6"/>
+ <bProportion value="3"/>
+ <bContrast value="5"/>
+ <bStrokeVariation value="4"/>
+ <bArmStyle value="5"/>
+ <bLetterForm value="2"/>
+ <bMidline value="2"/>
+ <bXHeight value="4"/>
+ </panose>
+ <ulUnicodeRange1 value="00000000 00000000 00000000 00000011"/>
+ <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/>
+ <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/>
+ <achVendID value="ADBO"/>
+ <fsSelection value="00000000 01000000"/>
+ <usFirstCharIndex value="32"/>
+ <usLastCharIndex value="97"/>
+ <sTypoAscender value="730"/>
+ <sTypoDescender value="-270"/>
+ <sTypoLineGap value="0"/>
+ <usWinAscent value="918"/>
+ <usWinDescent value="335"/>
+ <ulCodePageRange1 value="00100000 00000000 00000000 00000011"/>
+ <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/>
+ <sxHeight value="474"/>
+ <sCapHeight value="677"/>
+ <usDefaultChar value="0"/>
+ <usBreakChar value="32"/>
+ <usMaxContext value="0"/>
+ </OS_2>
+
+ <hmtx>
+ <mtx name=".notdef" width="640" lsb="80"/>
+ <mtx name="uni0020" width="234" lsb="0"/>
+ <mtx name="uni0061" width="508" lsb="46"/>
+ </hmtx>
+
+ <cmap>
+ <tableVersion version="0"/>
+ <cmap_format_4 platformID="0" platEncID="3" language="0">
+ <map code="0x20" name="uni0020"/><!-- SPACE -->
+ <map code="0x61" name="uni0061"/><!-- LATIN SMALL LETTER A -->
+ </cmap_format_4>
+ <cmap_format_4 platformID="3" platEncID="1" language="0">
+ <map code="0x20" name="uni0020"/><!-- SPACE -->
+ <map code="0x61" name="uni0061"/><!-- LATIN SMALL LETTER A -->
+ </cmap_format_4>
+ </cmap>
+
+ <loca>
+ <!-- The 'loca' table will be calculated by the compiler -->
+ </loca>
+
+ <glyf>
+
+ <!-- The xMin, yMin, xMax and yMax values
+ will be recalculated by the compiler. -->
+
+ <TTGlyph name=".notdef" xMin="80" yMin="0" xMax="560" yMax="670">
+ <contour>
+ <pt x="80" y="0" on="1"/>
+ <pt x="500" y="670" on="1"/>
+ <pt x="560" y="670" on="1"/>
+ <pt x="140" y="0" on="1"/>
+ </contour>
+ <contour>
+ <pt x="560" y="0" on="1"/>
+ <pt x="500" y="0" on="1"/>
+ <pt x="80" y="670" on="1"/>
+ <pt x="140" y="670" on="1"/>
+ </contour>
+ <contour>
+ <pt x="140" y="50" on="1"/>
+ <pt x="500" y="50" on="1"/>
+ <pt x="500" y="620" on="1"/>
+ <pt x="140" y="620" on="1"/>
+ </contour>
+ <contour>
+ <pt x="80" y="0" on="1"/>
+ <pt x="80" y="670" on="1"/>
+ <pt x="560" y="670" on="1"/>
+ <pt x="560" y="0" on="1"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ <TTGlyph name="uni0020"/><!-- contains no outline data -->
+
+ <TTGlyph name="uni0061" xMin="46" yMin="-13" xMax="501" yMax="487">
+ <contour>
+ <pt x="46" y="154" on="0"/>
+ <pt x="110" y="225" on="0"/>
+ <pt x="210" y="262" on="1"/>
+ <pt x="242" y="273" on="0"/>
+ <pt x="328" y="297" on="0"/>
+ <pt x="365" y="304" on="1"/>
+ <pt x="365" y="268" on="1"/>
+ <pt x="331" y="261" on="0"/>
+ <pt x="254" y="237" on="0"/>
+ <pt x="231" y="228" on="1"/>
+ <pt x="164" y="202" on="0"/>
+ <pt x="131" y="148" on="0"/>
+ <pt x="131" y="126" on="1"/>
+ <pt x="131" y="86" on="0"/>
+ <pt x="178" y="52" on="0"/>
+ <pt x="212" y="52" on="1"/>
+ <pt x="238" y="52" on="0"/>
+ <pt x="283" y="76" on="0"/>
+ <pt x="330" y="110" on="1"/>
+ <pt x="350" y="125" on="1"/>
+ <pt x="364" y="104" on="1"/>
+ <pt x="335" y="75" on="1"/>
+ <pt x="290" y="30" on="0"/>
+ <pt x="226" y="-13" on="0"/>
+ <pt x="180" y="-13" on="1"/>
+ <pt x="125" y="-13" on="0"/>
+ <pt x="46" y="50" on="0"/>
+ </contour>
+ <contour>
+ <pt x="325" y="92" on="1"/>
+ <pt x="325" y="320" on="1"/>
+ <pt x="325" y="394" on="0"/>
+ <pt x="280" y="442" on="0"/>
+ <pt x="231" y="442" on="1"/>
+ <pt x="214" y="442" on="0"/>
+ <pt x="169" y="435" on="0"/>
+ <pt x="141" y="424" on="1"/>
+ <pt x="181" y="455" on="1"/>
+ <pt x="155" y="369" on="1"/>
+ <pt x="148" y="347" on="0"/>
+ <pt x="124" y="324" on="0"/>
+ <pt x="104" y="324" on="1"/>
+ <pt x="62" y="324" on="0"/>
+ <pt x="59" y="364" on="1"/>
+ <pt x="73" y="421" on="0"/>
+ <pt x="177" y="487" on="0"/>
+ <pt x="252" y="487" on="1"/>
+ <pt x="329" y="487" on="0"/>
+ <pt x="405" y="408" on="0"/>
+ <pt x="405" y="314" on="1"/>
+ <pt x="405" y="102" on="1"/>
+ <pt x="405" y="68" on="0"/>
+ <pt x="425" y="41" on="0"/>
+ <pt x="442" y="41" on="1"/>
+ <pt x="455" y="41" on="0"/>
+ <pt x="473" y="53" on="0"/>
+ <pt x="481" y="63" on="1"/>
+ <pt x="501" y="41" on="1"/>
+ <pt x="469" y="-10" on="0"/>
+ <pt x="416" y="-10" on="1"/>
+ <pt x="375" y="-10" on="0"/>
+ <pt x="325" y="46" on="0"/>
+ </contour>
+ <instructions/>
+ </TTGlyph>
+
+ </glyf>
+
+ <name>
+ <namerecord nameID="256" platformID="1" platEncID="0" langID="0x0" unicode="True">
+ Weight
+ </namerecord>
+ <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409">
+ Test Family
+ </namerecord>
+ <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409">
+ Regular
+ </namerecord>
+ <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409">
+ Version 1.001;ADBO;Test Family Regular
+ </namerecord>
+ <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409">
+ Test Family
+ </namerecord>
+ <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409">
+ Version 1.001
+ </namerecord>
+ <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409">
+ TestFamily-Master1
+ </namerecord>
+ <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409">
+ Frank Grießhammer
+ </namerecord>
+ <namerecord nameID="17" platformID="3" platEncID="1" langID="0x409">
+ Master 1
+ </namerecord>
+ <namerecord nameID="256" platformID="3" platEncID="1" langID="0x409">
+ Weight
+ </namerecord>
+ </name>
+
+ <post>
+ <formatType value="2.0"/>
+ <italicAngle value="0.0"/>
+ <underlinePosition value="-75"/>
+ <underlineThickness value="50"/>
+ <isFixedPitch value="0"/>
+ <minMemType42 value="0"/>
+ <maxMemType42 value="0"/>
+ <minMemType1 value="0"/>
+ <maxMemType1 value="0"/>
+ <psNames>
+ <!-- This file uses unique glyph names based on the information
+ found in the 'post' table. Since these names might not be unique,
+ we have to invent artificial names in case of clashes. In order to
+ be able to retain the original information, we need a name to
+ ps name mapping for those cases where they differ. That's what
+ you see below.
+ -->
+ </psNames>
+ <extraNames>
+ <!-- following are the name that are not taken from the standard Mac glyph order -->
+ <psName name="uni0020"/>
+ <psName name="uni0061"/>
+ </extraNames>
+ </post>
+
+ <GDEF>
+ <Version value="0x00010003"/>
+ <GlyphClassDef>
+ <ClassDef glyph="uni0061" class="1"/>
+ </GlyphClassDef>
+ </GDEF>
+
+ <HVAR>
+ <Version value="0x00010000"/>
+ <VarStore Format="1">
+ <Format value="1"/>
+ <VarRegionList>
+ <!-- RegionAxisCount=1 -->
+ <!-- RegionCount=1 -->
+ <Region index="0">
+ <VarRegionAxis index="0">
+ <StartCoord value="0.0"/>
+ <PeakCoord value="1.0"/>
+ <EndCoord value="1.0"/>
+ </VarRegionAxis>
+ </Region>
+ </VarRegionList>
+ <!-- VarDataCount=1 -->
+ <VarData index="0">
+ <!-- ItemCount=3 -->
+ <NumShorts value="0"/>
+ <!-- VarRegionCount=1 -->
+ <VarRegionIndex index="0" value="0"/>
+ <Item index="0" value="[0]"/>
+ <Item index="1" value="[-28]"/>
+ <Item index="2" value="[32]"/>
+ </VarData>
+ </VarStore>
+ </HVAR>
+
+ <MVAR>
+ <Version value="0x00010000"/>
+ <Reserved value="0"/>
+ <ValueRecordSize value="8"/>
+ <!-- ValueRecordCount=2 -->
+ <VarStore Format="1">
+ <Format value="1"/>
+ <VarRegionList>
+ <!-- RegionAxisCount=1 -->
+ <!-- RegionCount=1 -->
+ <Region index="0">
+ <VarRegionAxis index="0">
+ <StartCoord value="0.0"/>
+ <PeakCoord value="1.0"/>
+ <EndCoord value="1.0"/>
+ </VarRegionAxis>
+ </Region>
+ </VarRegionList>
+ <!-- VarDataCount=1 -->
+ <VarData index="0">
+ <!-- ItemCount=2 -->
+ <NumShorts value="0"/>
+ <!-- VarRegionCount=1 -->
+ <VarRegionIndex index="0" value="0"/>
+ <Item index="0" value="[8]"/>
+ <Item index="1" value="[13]"/>
+ </VarData>
+ </VarStore>
+ <ValueRecord index="0">
+ <ValueTag value="stro"/>
+ <VarIdx value="0"/>
+ </ValueRecord>
+ <ValueRecord index="1">
+ <ValueTag value="xhgt"/>
+ <VarIdx value="1"/>
+ </ValueRecord>
+ </MVAR>
+
+ <STAT>
+ <Version value="0x00010001"/>
+ <DesignAxisRecordSize value="8"/>
+ <!-- DesignAxisCount=1 -->
+ <DesignAxisRecord>
+ <Axis index="0">
+ <AxisTag value="wght"/>
+ <AxisNameID value="256"/> <!-- Weight -->
+ <AxisOrdering value="0"/>
+ </Axis>
+ </DesignAxisRecord>
+ <!-- AxisValueCount=0 -->
+ <ElidedFallbackNameID value="2"/> <!-- Regular -->
+ </STAT>
+
+ <fvar>
+
+ <!-- Weight -->
+ <Axis>
+ <AxisTag>wght</AxisTag>
+ <Flags>0x0</Flags>
+ <MinValue>400.0</MinValue>
+ <DefaultValue>400.0</DefaultValue>
+ <MaxValue>1000.0</MaxValue>
+ <AxisNameID>256</AxisNameID>
+ </Axis>
+ </fvar>
+
+ <gvar>
+ <version value="1"/>
+ <reserved value="0"/>
+ <glyphVariations glyph=".notdef">
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="-20" y="-18"/>
+ <delta pt="2" x="0" y="-18"/>
+ <delta pt="3" x="20" y="0"/>
+ <delta pt="4" x="0" y="0"/>
+ <delta pt="5" x="-20" y="0"/>
+ <delta pt="6" x="0" y="-18"/>
+ <delta pt="7" x="20" y="-18"/>
+ <delta pt="8" x="10" y="10"/>
+ <delta pt="9" x="-10" y="10"/>
+ <delta pt="10" x="-10" y="-28"/>
+ <delta pt="11" x="10" y="-28"/>
+ <delta pt="12" x="0" y="0"/>
+ <delta pt="13" x="0" y="-18"/>
+ <delta pt="14" x="0" y="-18"/>
+ <delta pt="15" x="0" y="0"/>
+ <delta pt="16" x="0" y="0"/>
+ <delta pt="17" x="0" y="0"/>
+ <delta pt="18" x="0" y="0"/>
+ <delta pt="19" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ <glyphVariations glyph="uni0020">
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="0" y="0"/>
+ <delta pt="1" x="-28" y="0"/>
+ <delta pt="2" x="0" y="0"/>
+ <delta pt="3" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ <glyphVariations glyph="uni0061">
+ <tuple>
+ <coord axis="wght" value="1.0"/>
+ <delta pt="0" x="-21" y="16"/>
+ <delta pt="1" x="-2" y="28"/>
+ <delta pt="2" x="20" y="23"/>
+ <delta pt="3" x="19" y="20"/>
+ <delta pt="4" x="28" y="21"/>
+ <delta pt="5" x="26" y="23"/>
+ <delta pt="6" x="26" y="15"/>
+ <delta pt="7" x="24" y="12"/>
+ <delta pt="8" x="30" y="17"/>
+ <delta pt="9" x="31" y="15"/>
+ <delta pt="10" x="77" y="31"/>
+ <delta pt="11" x="66" y="36"/>
+ <delta pt="12" x="66" y="18"/>
+ <delta pt="13" x="66" y="21"/>
+ <delta pt="14" x="49" y="19"/>
+ <delta pt="15" x="37" y="19"/>
+ <delta pt="16" x="21" y="19"/>
+ <delta pt="17" x="-2" y="5"/>
+ <delta pt="18" x="-34" y="-18"/>
+ <delta pt="19" x="-6" y="3"/>
+ <delta pt="20" x="-11" y="12"/>
+ <delta pt="21" x="-29" y="-11"/>
+ <delta pt="22" x="-17" y="-2"/>
+ <delta pt="23" x="-13" y="-3"/>
+ <delta pt="24" x="-25" y="-3"/>
+ <delta pt="25" x="-29" y="-3"/>
+ <delta pt="26" x="-21" y="2"/>
+ <delta pt="27" x="-34" y="-14"/>
+ <delta pt="28" x="-34" y="17"/>
+ <delta pt="29" x="-34" y="7"/>
+ <delta pt="30" x="-18" y="7"/>
+ <delta pt="31" x="-16" y="7"/>
+ <delta pt="32" x="-18" y="7"/>
+ <delta pt="33" x="-15" y="9"/>
+ <delta pt="34" x="-21" y="12"/>
+ <delta pt="35" x="19" y="23"/>
+ <delta pt="36" x="45" y="46"/>
+ <delta pt="37" x="52" y="7"/>
+ <delta pt="38" x="26" y="-21"/>
+ <delta pt="39" x="14" y="-21"/>
+ <delta pt="40" x="-5" y="-21"/>
+ <delta pt="41" x="-17" y="-7"/>
+ <delta pt="42" x="-31" y="1"/>
+ <delta pt="43" x="-12" y="16"/>
+ <delta pt="44" x="34" y="16"/>
+ <delta pt="45" x="61" y="16"/>
+ <delta pt="46" x="70" y="4"/>
+ <delta pt="47" x="70" y="-5"/>
+ <delta pt="48" x="70" y="-22"/>
+ <delta pt="49" x="70" y="4"/>
+ <delta pt="50" x="59" y="22"/>
+ <delta pt="51" x="50" y="22"/>
+ <delta pt="52" x="43" y="22"/>
+ <delta pt="53" x="37" y="19"/>
+ <delta pt="54" x="38" y="22"/>
+ <delta pt="55" x="47" y="28"/>
+ <delta pt="56" x="46" y="-6"/>
+ <delta pt="57" x="-2" y="-6"/>
+ <delta pt="58" x="-16" y="-6"/>
+ <delta pt="59" x="-25" y="-13"/>
+ <delta pt="60" x="0" y="0"/>
+ <delta pt="61" x="32" y="0"/>
+ <delta pt="62" x="0" y="0"/>
+ <delta pt="63" x="0" y="0"/>
+ </tuple>
+ </glyphVariations>
+ </gvar>
+
+</ttFont>
diff --git a/Tests/varLib/data/test_results/FeatureVars_rclt.ttx b/Tests/varLib/data/test_results/FeatureVars_rclt.ttx
index b889f3a5..43691364 100644
--- a/Tests/varLib/data/test_results/FeatureVars_rclt.ttx
+++ b/Tests/varLib/data/test_results/FeatureVars_rclt.ttx
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
-<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="3.29">
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.39">
<fvar>
diff --git a/Tests/varLib/data/test_results/InterpolateLayoutGPOS_7_diff.ttx b/Tests/varLib/data/test_results/InterpolateLayoutGPOS_7_diff.ttx
new file mode 100644
index 00000000..8a73402e
--- /dev/null
+++ b/Tests/varLib/data/test_results/InterpolateLayoutGPOS_7_diff.ttx
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont>
+
+ <GPOS>
+ <Version value="0x00010000"/>
+ <ScriptList>
+ <!-- ScriptCount=1 -->
+ <ScriptRecord index="0">
+ <ScriptTag value="DFLT"/>
+ <Script>
+ <DefaultLangSys>
+ <ReqFeatureIndex value="65535"/>
+ <!-- FeatureCount=1 -->
+ <FeatureIndex index="0" value="0"/>
+ </DefaultLangSys>
+ <!-- LangSysCount=0 -->
+ </Script>
+ </ScriptRecord>
+ </ScriptList>
+ <FeatureList>
+ <!-- FeatureCount=1 -->
+ <FeatureRecord index="0">
+ <FeatureTag value="xxxx"/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="2"/>
+ </Feature>
+ </FeatureRecord>
+ </FeatureList>
+ <LookupList>
+ <!-- LookupCount=3 -->
+ <Lookup index="0">
+ <LookupType value="2"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <PairPos index="0" Format="1">
+ <Coverage>
+ <Glyph value="A"/>
+ </Coverage>
+ <ValueFormat1 value="4"/>
+ <ValueFormat2 value="0"/>
+ <!-- PairSetCount=1 -->
+ <PairSet index="0">
+ <!-- PairValueCount=1 -->
+ <PairValueRecord index="0">
+ <SecondGlyph value="a"/>
+ <Value1 XAdvance="17"/>
+ </PairValueRecord>
+ </PairSet>
+ </PairPos>
+ </Lookup>
+ <Lookup index="1">
+ <LookupType value="4"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <MarkBasePos index="0" Format="1">
+ <MarkCoverage>
+ <Glyph value="uni0303"/>
+ </MarkCoverage>
+ <BaseCoverage>
+ <Glyph value="a"/>
+ </BaseCoverage>
+ <!-- ClassCount=1 -->
+ <MarkArray>
+ <!-- MarkCount=1 -->
+ <MarkRecord index="0">
+ <Class value="0"/>
+ <MarkAnchor Format="1">
+ <XCoordinate value="0"/>
+ <YCoordinate value="510"/>
+ </MarkAnchor>
+ </MarkRecord>
+ </MarkArray>
+ <BaseArray>
+ <!-- BaseCount=1 -->
+ <BaseRecord index="0">
+ <BaseAnchor index="0" Format="1">
+ <XCoordinate value="273"/>
+ <YCoordinate value="510"/>
+ </BaseAnchor>
+ </BaseRecord>
+ </BaseArray>
+ </MarkBasePos>
+ </Lookup>
+ <Lookup index="2">
+ <LookupType value="7"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <ContextPos index="0" Format="1">
+ <Coverage>
+ <Glyph value="A"/>
+ </Coverage>
+ <!-- PosRuleSetCount=1 -->
+ <PosRuleSet index="0">
+ <!-- PosRuleCount=1 -->
+ <PosRule index="0">
+ <!-- GlyphCount=3 -->
+ <!-- PosCount=2 -->
+ <Input index="0" value="a"/>
+ <Input index="1" value="uni0303"/>
+ <PosLookupRecord index="0">
+ <SequenceIndex value="0"/>
+ <LookupListIndex value="0"/>
+ </PosLookupRecord>
+ <PosLookupRecord index="1">
+ <SequenceIndex value="2"/>
+ <LookupListIndex value="1"/>
+ </PosLookupRecord>
+ </PosRule>
+ </PosRuleSet>
+ </ContextPos>
+ </Lookup>
+ </LookupList>
+ </GPOS>
+
+</ttFont>
diff --git a/Tests/varLib/data/test_results/InterpolateLayoutGPOS_7_same.ttx b/Tests/varLib/data/test_results/InterpolateLayoutGPOS_7_same.ttx
new file mode 100644
index 00000000..17636512
--- /dev/null
+++ b/Tests/varLib/data/test_results/InterpolateLayoutGPOS_7_same.ttx
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont>
+
+ <GPOS>
+ <Version value="0x00010000"/>
+ <ScriptList>
+ <!-- ScriptCount=1 -->
+ <ScriptRecord index="0">
+ <ScriptTag value="DFLT"/>
+ <Script>
+ <DefaultLangSys>
+ <ReqFeatureIndex value="65535"/>
+ <!-- FeatureCount=1 -->
+ <FeatureIndex index="0" value="0"/>
+ </DefaultLangSys>
+ <!-- LangSysCount=0 -->
+ </Script>
+ </ScriptRecord>
+ </ScriptList>
+ <FeatureList>
+ <!-- FeatureCount=1 -->
+ <FeatureRecord index="0">
+ <FeatureTag value="xxxx"/>
+ <Feature>
+ <!-- LookupCount=1 -->
+ <LookupListIndex index="0" value="2"/>
+ </Feature>
+ </FeatureRecord>
+ </FeatureList>
+ <LookupList>
+ <!-- LookupCount=3 -->
+ <Lookup index="0">
+ <LookupType value="2"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <PairPos index="0" Format="1">
+ <Coverage>
+ <Glyph value="A"/>
+ </Coverage>
+ <ValueFormat1 value="4"/>
+ <ValueFormat2 value="0"/>
+ <!-- PairSetCount=1 -->
+ <PairSet index="0">
+ <!-- PairValueCount=1 -->
+ <PairValueRecord index="0">
+ <SecondGlyph value="a"/>
+ <Value1 XAdvance="-23"/>
+ </PairValueRecord>
+ </PairSet>
+ </PairPos>
+ </Lookup>
+ <Lookup index="1">
+ <LookupType value="4"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <MarkBasePos index="0" Format="1">
+ <MarkCoverage>
+ <Glyph value="uni0303"/>
+ </MarkCoverage>
+ <BaseCoverage>
+ <Glyph value="a"/>
+ </BaseCoverage>
+ <!-- ClassCount=1 -->
+ <MarkArray>
+ <!-- MarkCount=1 -->
+ <MarkRecord index="0">
+ <Class value="0"/>
+ <MarkAnchor Format="1">
+ <XCoordinate value="0"/>
+ <YCoordinate value="500"/>
+ </MarkAnchor>
+ </MarkRecord>
+ </MarkArray>
+ <BaseArray>
+ <!-- BaseCount=1 -->
+ <BaseRecord index="0">
+ <BaseAnchor index="0" Format="1">
+ <XCoordinate value="260"/>
+ <YCoordinate value="500"/>
+ </BaseAnchor>
+ </BaseRecord>
+ </BaseArray>
+ </MarkBasePos>
+ </Lookup>
+ <Lookup index="2">
+ <LookupType value="7"/>
+ <LookupFlag value="0"/>
+ <!-- SubTableCount=1 -->
+ <ContextPos index="0" Format="1">
+ <Coverage>
+ <Glyph value="A"/>
+ </Coverage>
+ <!-- PosRuleSetCount=1 -->
+ <PosRuleSet index="0">
+ <!-- PosRuleCount=1 -->
+ <PosRule index="0">
+ <!-- GlyphCount=3 -->
+ <!-- PosCount=2 -->
+ <Input index="0" value="a"/>
+ <Input index="1" value="uni0303"/>
+ <PosLookupRecord index="0">
+ <SequenceIndex value="0"/>
+ <LookupListIndex value="0"/>
+ </PosLookupRecord>
+ <PosLookupRecord index="1">
+ <SequenceIndex value="2"/>
+ <LookupListIndex value="1"/>
+ </PosLookupRecord>
+ </PosRule>
+ </PosRuleSet>
+ </ContextPos>
+ </Lookup>
+ </LookupList>
+ </GPOS>
+
+</ttFont>
diff --git a/Tests/varLib/data/test_results/SparseCFF2-VF.ttx b/Tests/varLib/data/test_results/SparseCFF2-VF.ttx
new file mode 100644
index 00000000..4a1861c9
--- /dev/null
+++ b/Tests/varLib/data/test_results/SparseCFF2-VF.ttx
@@ -0,0 +1,157 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ttFont sfntVersion="OTTO" ttLibVersion="4.42">
+
+ <GlyphOrder>
+ <!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
+ <GlyphID id="0" name=".notdef"/>
+ <GlyphID id="1" name="a"/>
+ <GlyphID id="2" name="e"/>
+ </GlyphOrder>
+
+ <CFF2>
+ <major value="2"/>
+ <minor value="0"/>
+ <CFFFont name="CFF2Font">
+ <FontMatrix value="0.001 0 0 0.001 0 0"/>
+ <FDArray>
+ <FontDict index="0">
+ <Private>
+ <BlueScale value="0.039625"/>
+ <BlueShift value="7"/>
+ <BlueFuzz value="1"/>
+ <LanguageGroup value="0"/>
+ <ExpansionFactor value="0.06"/>
+ </Private>
+ </FontDict>
+ </FDArray>
+ <CharStrings>
+ <CharString name=".notdef">
+ 50 -250 -100 1 blend
+ rmoveto
+ 400 1000 -400 100 100 -100 3 blend
+ hlineto
+ 50 -950 rmoveto
+ 900 300 -900 vlineto
+ </CharString>
+ <CharString name="a">
+ 468 -1 rmoveto
+ -21 435 -233 70 -205 -76 27 -91 -56 1 blend
+ 172 60 155 -40 -59 2 2 blend
+ 3 -360 56 1 blend
+ rlineto
+ 12 266 59 -2 2 blend
+ rmoveto
+ -352 -23 3 -218 139 -34 221 83 -6 63 -222 -60 -75 52 15 40 13 37 -21 5 blend
+ 2 46 294 35 -78 -30 2 blend
+ rlineto
+ </CharString>
+ <CharString name="e">
+ 1 vsindex
+ 127 228 -1 70 -25 1 2 blend
+ rmoveto
+ 449 -2 1 -45 -2 -2 2 blend
+ -5 79 -255 208 -276 -252 148 -279 338 63 -17 84 -280 -54 -82 188 170 153 163 -124 -355 6 27 0 0 -27 0 36 0 -29 0 -34 0 31 0 -1 0 2 0 -45 -2 13 28 100 37 0 13 0 -2 55 -40 -54 -32 -86 -30 -57 -85 -60 34 57 84 146 -5 0 21 blend
+ rlineto
+ </CharString>
+ </CharStrings>
+ <VarStore Format="1">
+ <Format value="1"/>
+ <VarRegionList>
+ <!-- RegionAxisCount=1 -->
+ <!-- RegionCount=3 -->
+ <Region index="0">
+ <VarRegionAxis index="0">
+ <StartCoord value="0.0"/>
+ <PeakCoord value="1.0"/>
+ <EndCoord value="1.0"/>
+ </VarRegionAxis>
+ </Region>
+ <Region index="1">
+ <VarRegionAxis index="0">
+ <StartCoord value="0.0"/>
+ <PeakCoord value="0.36365"/>
+ <EndCoord value="1.0"/>
+ </VarRegionAxis>
+ </Region>
+ <Region index="2">
+ <VarRegionAxis index="0">
+ <StartCoord value="0.36365"/>
+ <PeakCoord value="1.0"/>
+ <EndCoord value="1.0"/>
+ </VarRegionAxis>
+ </Region>
+ </VarRegionList>
+ <!-- VarDataCount=2 -->
+ <VarData index="0">
+ <!-- ItemCount=0 -->
+ <NumShorts value="0"/>
+ <!-- VarRegionCount=1 -->
+ <VarRegionIndex index="0" value="0"/>
+ </VarData>
+ <VarData index="1">
+ <!-- ItemCount=0 -->
+ <NumShorts value="0"/>
+ <!-- VarRegionCount=2 -->
+ <VarRegionIndex index="0" value="1"/>
+ <VarRegionIndex index="1" value="2"/>
+ </VarData>
+ </VarStore>
+ </CFFFont>
+
+ <GlobalSubrs>
+ <!-- The 'index' attribute is only for humans; it is ignored when parsed. -->
+ </GlobalSubrs>
+ </CFF2>
+
+ <fvar>
+
+ <!-- Weight -->
+ <Axis>
+ <AxisTag>wght</AxisTag>
+ <Flags>0x0</Flags>
+ <MinValue>350.0</MinValue>
+ <DefaultValue>350.0</DefaultValue>
+ <MaxValue>625.0</MaxValue>
+ <AxisNameID>256</AxisNameID>
+ </Axis>
+ </fvar>
+
+ <hmtx>
+ <mtx name=".notdef" width="500" lsb="50"/>
+ <mtx name="a" width="600" lsb="9"/>
+ <mtx name="e" width="600" lsb="40"/>
+ </hmtx>
+
+ <HVAR>
+ <Version value="0x00010000"/>
+ <VarStore Format="1">
+ <Format value="1"/>
+ <VarRegionList>
+ <!-- RegionAxisCount=1 -->
+ <!-- RegionCount=1 -->
+ <Region index="0">
+ <VarRegionAxis index="0">
+ <StartCoord value="0.0"/>
+ <PeakCoord value="1.0"/>
+ <EndCoord value="1.0"/>
+ </VarRegionAxis>
+ </Region>
+ </VarRegionList>
+ <!-- VarDataCount=1 -->
+ <VarData index="0">
+ <!-- ItemCount=2 -->
+ <NumShorts value="1"/>
+ <!-- VarRegionCount=1 -->
+ <VarRegionIndex index="0" value="0"/>
+ <Item index="0" value="[0]"/>
+ <Item index="1" value="[300]"/>
+ </VarData>
+ </VarStore>
+ <AdvWidthMap>
+ <Map glyph=".notdef" outer="0" inner="1"/>
+ <Map glyph="a" outer="0" inner="0"/>
+ <Map glyph="e" outer="0" inner="0"/>
+ </AdvWidthMap>
+ </HVAR>
+
+</ttFont>
diff --git a/Tests/varLib/data/test_results/SparseMasters.ttx b/Tests/varLib/data/test_results/SparseMasters.ttx
index a3f8e619..2871e24f 100644
--- a/Tests/varLib/data/test_results/SparseMasters.ttx
+++ b/Tests/varLib/data/test_results/SparseMasters.ttx
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
-<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="3.35">
+<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="4.42">
<GlyphOrder>
<!-- The 'id' attribute is only for humans; it is ignored when parsed. -->
@@ -440,28 +440,7 @@
<Format value="1"/>
<VarRegionList>
<!-- RegionAxisCount=1 -->
- <!-- RegionCount=3 -->
- <Region index="0">
- <VarRegionAxis index="0">
- <StartCoord value="0.0"/>
- <PeakCoord value="0.36365"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
- <Region index="1">
- <VarRegionAxis index="0">
- <StartCoord value="0.36365"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
- <Region index="2">
- <VarRegionAxis index="0">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
+ <!-- RegionCount=0 -->
</VarRegionList>
<!-- VarDataCount=1 -->
<VarData index="0">
diff --git a/Tests/varLib/data/test_results/TestSparseCFF2VF.ttx b/Tests/varLib/data/test_results/TestSparseCFF2VF.ttx
index 264a3d4a..7c3267a8 100644
--- a/Tests/varLib/data/test_results/TestSparseCFF2VF.ttx
+++ b/Tests/varLib/data/test_results/TestSparseCFF2VF.ttx
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
-<ttFont sfntVersion="OTTO" ttLibVersion="3.41">
+<ttFont sfntVersion="OTTO" ttLibVersion="4.37">
<fvar>
@@ -189,7 +189,7 @@
<CharString name="cid00002" fdSelectIndex="2">
-12 83 1 126 2 blend
hstemhm
- 74 73 -57 40 -26 129 -125 122 4 blend
+ 74 73 -57 40 -26 129 -125 121.5 4 blend
vstemhm
hintmask 10100000
134 755 107 18 2 blend
@@ -208,927 +208,927 @@
</CharString>
<CharString name="cid01177" fdSelectIndex="1">
1 vsindex
- -72 30 253 30 94 30 92 30 65 30 131 45 -30 112 -99 17 -8 0 -12 59 -2 85 -64 2 -92 39 -1 56 -44 2 -63 35 -1 51 -43 1 -62 39 -1 56 -21 0 -31 56 -2 81 -56 2 -81 44 -2 63 -57 3 -81 31 -1 45 -18 0 -27 44 -2 63 16 blend
+ -72 30 253 30 94 30 92 30 65 30 131 45 -30 112 -99 17 -8 -0.02417 -12 59 -1.82175 85 -64 1.80664 -92 39 -0.88217 56 -44 1.86707 -63 35 -0.89426 51 -43 0.87009 -62 39 -0.88217 56 -21 -0.06345 -31 56 -1.83081 81 -56 1.83081 -81 44 -1.86707 63 -57 2.82779 -81 31 -0.90634 45 -18 -0.05438 -27 44 -1.86707 63 16 blend
hstemhm
- 193 30 83 30 147 30 173 30 66 30 -20 1 -28 75 -3 107 -74 3 -106 78 -4 111 -99 5 -141 81 -3 116 -79 2 -114 64 -2 92 -81 4 -115 80 -4 114 10 blend
+ 193 30 83 30 147 30 173 30 66 30 -20 0.93958 -28 75 -2.7734 107 -74 2.77643 -106 78 -3.76434 111 -99 4.70091 -141 81 -2.75528 116 -79 1.76132 -114 64 -1.80664 92 -81 3.75528 -115 80 -3.7583 114 10 blend
vstemhm
hintmask 1111100011111000
- 306 142 -19 1 -27 7 0 10 2 blend
+ 306 142 -19 0.9426 -27 7 0.02115 10 2 blend
rmoveto
- -156 45 -2 64 1 blend
+ -156 45 -1.86404 64 1 blend
vlineto
- -50 22 -8 79 -42 2 -59 8 -1 11 -18 0 -27 43 -1 62 4 blend
+ -50 22 -8 79 -42 1.87311 -59 8 -0.97583 11 -18 -0.05438 -27 43 -0.87009 62 4 blend
vhcurveto
- 17 186 8 -1 11 -68 3 -97 2 blend
- 0 18 8 0 12 1 blend
+ 17 186 8 -0.97583 11 -68 2.79456 -97 2 blend
+ 0 18 8 0.02417 12 1 blend
hhcurveto
- 70 13 25 114 5 22 -1 31 17 -1 24 2 1 4 0 -1 -1 7 0 10 5 blend
+ 70 13 25 114 5 22 -0.93353 31 17 -0.94864 24 2 1 4 0 -1 -1 7 0.02115 10 5 blend
hvcurveto
- -9 3 -12 4 -9 6 -20 1 -28 3 0 4 -31 1 -45 10 0 15 -13 0 -19 9 -1 13 6 blend
+ -9 3 -12 4 -9 6 -20 0.93958 -28 3 0 4 -31 0.90634 -45 10 0.03021 15 -13 -0.03928 -19 9 -0.97281 13 6 blend
rrcurveto
- -109 -4 -7 -13 -49 -38 -156 33 -1 47 -1 0 -1 1 0 1 2 0 3 10 0 15 9 0 13 60 -3 85 7 blend
- 0 -27 5 0 8 1 blend
+ -109 -4 -7 -13 -49 -38 -156 33 -0.9003 47 -1 0 -1 1 0 1 2 0 3 10 0.03021 15 9 0.02719 13 60 -2.81873 85 7 blend
+ 0 -27 5 0.0151 8 1 blend
hhcurveto
- -59 -10 5 22 11 0 16 2 -1 2 -1 0 -2 4 0 6 4 blend
+ -59 -10 5 22 11 0.03323 16 2 -1 2 -1 0 -2 4 0.01208 6 4 blend
hvcurveto
- 157 -47 2 -67 1 blend
+ 157 -47 1.858 -67 1 blend
vlineto
- 63 34 -74 3 -106 -25 1 -36 2 blend
+ 63 34 -74 2.77643 -106 -25 0.92447 -36 2 blend
rmoveto
- 65 -30 74 -47 37 -37 -7 1 -10 5 -1 7 -3 0 -4 5 0 7 -3 0 -4 6 0 9 6 blend
+ 65 -30 74 -47 37 -37 -7 0.97885 -10 5 -0.9849 7 -3 0 -4 5 0.0151 7 -3 0 -4 6 0.01813 9 6 blend
rrcurveto
- 20 22 -37 36 -75 47 -65 28 48 -2 68 47 -2 67 -1 0 -1 -6 1 -8 2 0 3 -8 0 -11 8 -1 11 -6 0 -9 8 blend
+ 20 22 -37 36 -75 47 -65 28 48 -1.85498 68 47 -1.858 67 -1 0 -1 -6 0.98187 -8 2 0 3 -8 -0.02417 -11 8 -0.97583 11 -6 -0.01813 -9 8 blend
rlinecurve
- 320 -64 -49 3 -69 -32 1 -46 2 blend
+ 320 -64 -49 2.85196 -69 -32 0.90332 -46 2 blend
rmoveto
- 76 -49 83 -75 38 -12 0 -18 -6 1 -8 -13 0 -19 -4 0 -6 -9 1 -12 5 blend
+ 76 -49 83 -75 38 -12 -0.03625 -18 -6 0.98187 -8 -13 -0.03928 -19 -4 -0.01208 -6 -9 0.97281 -12 5 blend
-54 rrcurveto
- 23 19 -38 54 -84 73 -76 49 69 -3 98 35 -2 50 5 0 8 2 0 3 11 0 16 2 0 2 13 -1 18 2 0 4 8 blend
+ 23 19 -38 54 -84 73 -76 49 69 -2.79153 98 35 -1.89426 50 5 0.0151 8 2 0 3 11 0.03323 16 2 0 2 13 -0.96072 18 2 0 4 8 blend
rlinecurve
- -557 -5 -85 4 -121 -6 0 -9 2 blend
+ -557 -5 -85 3.74321 -121 -6 -0.01813 -9 2 blend
rmoveto
- -28 -68 -50 -72 -77 -40 5 -1 6 1 0 1 3 1 5 6 0 9 13 -1 18 1 0 1 6 blend
+ -28 -68 -50 -72 -77 -40 5 -0.9849 6 1 0 1 3 1 5 6 0.01813 9 13 -0.96072 18 1 0 1 6 blend
rrcurveto
- 24 -17 79 42 47 74 31 71 62 -3 89 -42 2 -60 -7 1 -10 5 -1 7 -6 0 -8 1 0 1 -2 -1 -4 4 1 7 8 blend
+ 24 -17 79 42 47 74 31 71 62 -2.81268 89 -42 1.87311 -60 -7 0.97885 -10 5 -0.9849 7 -6 -0.01813 -8 1 0 1 -2 -1 -4 4 1.01208 7 8 blend
rlinecurve
- -117 625 -26 2 -36 42 -3 59 2 blend
+ -117 625 -26 1.92145 -36 42 -2.87311 59 2 blend
rmoveto
- -30 775 30 -57 3 -81 -3 0 -5 57 -3 81 3 blend
+ -30 775 30 -57 2.82779 -81 -3 0 -5 57 -2.82779 81 3 blend
vlineto
- -818 -176 -1 0 -2 12 0 18 2 blend
+ -818 -176 -1 0 -2 12 0.03625 18 2 blend
rmoveto
- -30 869 30 -56 2 -81 3 0 5 56 -2 81 3 blend
+ -30 869 30 -56 1.83081 -81 3 0 5 56 -1.83081 81 3 blend
vlineto
hintmask 0000001000100000
- -455 258 -40 2 -57 -38 2 -54 2 blend
+ -455 258 -40 1.87915 -57 -38 1.8852 -54 2 blend
rmoveto
hintmask 0000000100100000
- -99 30 -18 0 -27 81 -3 116 2 blend
+ -99 30 -18 -0.05438 -27 81 -2.75528 116 2 blend
vlineto
hintmask 0000001000100000
- 99 18 0 27 1 blend
+ 99 18 0.05438 27 1 blend
vlineto
hintmask 0111010010001000
- -236 -127 -60 2 -86 -18 0 -27 2 blend
+ -236 -127 -60 1.81873 -86 -18 -0.05438 -27 2 blend
rmoveto
- 26 -40 25 -53 9 -36 -12 1 -17 10 0 15 -10 -1 -15 13 0 19 -4 0 -6 11 -1 15 6 blend
+ 26 -40 25 -53 9 -36 -12 0.96375 -17 10 0.03021 15 -10 -1.03021 -15 13 0.03928 19 -4 -0.01208 -6 11 -0.96677 15 6 blend
rrcurveto
- 29 12 -10 35 -25 53 -27 39 76 -3 109 12 0 18 3 1 5 -10 0 -15 10 -1 14 -15 1 -21 11 0 16 -11 0 -16 8 blend
+ 29 12 -10 35 -25 53 -27 39 76 -2.77039 109 12 0.03625 18 3 1 5 -10 -0.03021 -15 10 -0.96979 14 -15 0.95468 -21 11 0.03323 16 -11 -0.03323 -16 8 blend
rlinecurve
- 393 2 -112 4 -161 -1 0 -2 2 blend
+ 393 2 -112 3.66164 -161 -1 0 -2 2 blend
rmoveto
- -16 -38 -31 -57 -23 -35 7 0 11 12 -1 17 13 -1 18 19 0 28 10 0 14 8 -1 11 6 blend
+ -16 -38 -31 -57 -23 -35 7 0.02115 11 12 -0.96375 17 13 -0.96072 18 19 0.0574 28 10 0.03021 14 8 -0.97583 11 6 blend
rrcurveto
- 27 -12 24 36 26 48 23 46 70 -2 101 -10 1 -14 -8 0 -12 -13 1 -18 -6 -1 -9 -17 0 -25 -1 1 -1 -10 1 -14 8 blend
+ 27 -12 24 36 26 48 23 46 70 -1.78851 101 -10 0.96979 -14 -8 -0.02417 -12 -13 0.96072 -18 -6 -1.01813 -9 -17 -0.05136 -25 -1 1 -1 -10 0.96979 -14 8 blend
rlinecurve
- -504 -378 27 -1 39 -8 0 -12 2 blend
+ -504 -378 27 -0.91843 39 -8 -0.02417 -12 2 blend
rmoveto
- 559 -94 -559 -110 5 -157 44 -2 63 110 -5 157 3 blend
+ 559 -94 -559 -110 4.66768 -157 44 -1.86707 63 110 -4.66768 157 3 blend
hlineto
- 216 -52 2 -74 1 blend
+ 216 -52 1.8429 -74 1 blend
vmoveto
- 559 -92 -559 -110 5 -157 43 -1 62 110 -5 157 3 blend
+ 559 -92 -559 -110 4.66768 -157 43 -0.87009 62 110 -4.66768 157 3 blend
hlineto
- -30 122 -75 3 -107 -4 0 -6 2 blend
+ -30 122 -75 2.7734 -107 -4 -0.01208 -6 2 blend
rmoveto
- -276 619 276 -26 0 -38 45 -2 64 26 0 38 3 blend
+ -276 619 276 -26 -0.07855 -38 45 -1.86404 64 26 0.07855 38 3 blend
vlineto
</CharString>
<CharString name="cid06449" fdSelectIndex="1">
2 vsindex
- -60 30 203 30 -9 9 67 7 -7 14 -14 30 -20 20 80 30 59 30 121 30 18 93 -30 30 -30 108 -23 0 -26 67 2 76 -98 -2 -111 42 0 47 -13 0 -14 13 0 14 -33 0 -37 11 0 13 -11 0 -13 8 0 8 -8 0 -8 53 0 60 -32 0 -36 32 0 36 -52 0 -59 57 1 65 -33 0 -38 53 0 60 -83 -1 -93 54 0 60 -6 -19 -24 33 19 55 -76 -1 -86 76 1 86 -76 -1 -86 59 1 67 26 blend
+ -60 30 203 30 -9 9 67 7 -7 14 -14 30 -20 20 80 30 59 30 121 30 18 93 -30 30 -30 108 -23 -0.20721 -26 67 1.6036 76 -98 -1.88289 -111 42 0.37837 47 -13 -0.11711 -14 13 0.11711 14 -33 -0.2973 -37 11 0.0991 13 -11 -0.0991 -13 7.5 0.06757 8.5 -7.5 -0.06757 -8.5 53 0.47748 60 -32 -0.28828 -36 32 0.28828 36 -52 -0.46848 -59 57 0.51352 65 -33 -0.2973 -38 53 0.47748 60 -83 -0.74774 -93 54 0.48648 60 -6 -19.05405 -24 33 19.2973 55 -76 -0.68468 -86 76 0.68468 86 -76 -0.68468 -86 59 0.53152 67 26 blend
hstemhm
- 77 30 42 30 139 30 23 30 71 10 74 30 15 30 16 30 158 30 28 30 -4 29 -14 0 -16 88 1 99 -82 -1 -92 87 1 98 -130 -1 -146 102 1 114 -73 -1 -82 74 2 84 -112 -2 -126 27 0 30 13 0 15 90 1 101 -126 -1 -142 75 1 84 -68 -1 -76 102 1 115 -144 -1 -162 94 1 105 -79 -1 -88 95 1 106 -81 -1 -91 74 1 83 22 blend
+ 77 30 42 30 139 30 23 30 71 10 74 30 15 30 16 30 158 30 28 30 -4 29 -14 -0.12613 -16 88 0.79279 99 -82 -0.73874 -92 87 0.78378 98 -130 -1.17117 -146 102 0.91891 114 -73 -0.65765 -82 74 1.66667 84 -112 -2 -126 27 0.24324 30 13 0.11711 15 90 0.8108 101 -126 -1.13513 -142 75 0.67567 84 -68 -0.61261 -76 102 0.91891 115 -144 -1.2973 -162 94 0.84685 105 -79 -0.71172 -88 95 0.85585 106 -81 -0.72974 -91 74 0.66667 83 22 blend
vstemhm
hintmask 110001011101011101101101
- 53 761 -3 0 -3 31 0 35 2 blend
+ 53 761 -3 -0.02702 -3 31 0.27928 35 2 blend
rmoveto
- -30 896 30 -76 -1 -86 5 0 5 76 1 86 3 blend
+ -30 896 30 -76 -0.68468 -86 5 0.04504 5 76 0.68468 86 3 blend
vlineto
- -802 -461 2 0 2 -23 0 -26 2 blend
+ -802 -461 2 0.01802 2 -23 -0.20721 -26 2 blend
rmoveto
- -30 703 30 -53 0 -60 3 0 4 53 0 60 3 blend
+ -30 703 30 -53 -0.47748 -60 3 0.02702 4 53 0.47748 60 3 blend
vlineto
hintmask 000000000000100100000000
- -532 539 -58 -1 -65 6 0 7 2 blend
+ -532 539 -58 -0.52252 -65 6 0.05405 7 2 blend
rmoveto
hintmask 000000000010000100000000
- -171 30 -16 -19 -36 102 1 114 2 blend
+ -171 30 -16 -19.14415 -36 102 0.91891 114 2 blend
vlineto
hintmask 000000000000100100001000
- 171 16 19 36 1 blend
+ 171 16 19.14415 36 1 blend
vlineto
- 299 -100 -1 -112 1 blend
+ 299 -100 -0.9009 -112 1 blend
hmoveto
hintmask 000000000010000000001000
- -171 30 -16 -19 -36 102 1 115 2 blend
+ -171 30 -16 -19.14415 -36 102 0.91891 115 2 blend
vlineto
hintmask 000000000000100000001000
- 171 16 19 36 1 blend
+ 171 16 19.14415 36 1 blend
vlineto
hintmask 000000111100011010010100
- -46 -219 -34 0 -39 -64 -1 -72 2 blend
+ -46 -219 -34 -0.3063 -39 -64 -0.57658 -72 2 blend
rmoveto
- 204 -121 -204 -110 -1 -123 83 1 93 110 1 123 3 blend
+ 204 -121 -204 -110 -1 -123 83 0.74774 93 110 1 123 3 blend
hlineto
- -230 121 33 1 38 -83 -1 -93 2 blend
+ -230 121 33 1.2973 38 -83 -0.74774 -93 2 blend
rmoveto
- 200 -121 -200 -108 -2 -122 83 1 93 108 2 122 3 blend
+ 200 -121 -200 -108 -1.97298 -122 83 0.74774 93 108 1.97298 122 3 blend
hlineto
- -222 121 27 -1 30 -83 -1 -93 2 blend
+ -222 121 27 -0.75676 30 -83 -0.74774 -93 2 blend
rmoveto
- 192 -121 -192 -101 -1 -114 83 1 93 101 1 114 3 blend
+ 192 -121 -192 -101 -0.90991 -114 83 0.74774 93 101 0.90991 114 3 blend
hlineto
- -30 151 -87 -1 -98 -29 0 -33 2 blend
+ -30 151 -87 -0.78378 -98 -29 -0.26126 -33 2 blend
rmoveto
- -181 716 181 -24 0 -27 11 0 12 24 0 27 3 blend
+ -181 716 181 -24 -0.21622 -27 11 0.0991 12 24 0.21622 27 3 blend
vlineto
- -788 -240 -17 0 -19 9 0 11 2 blend
+ -788 -240 -17 -0.15315 -19 9 0.08109 11 2 blend
rmoveto
- -130 30 100 -37 0 -42 88 1 99 -20 0 -23 3 blend
+ -130 30 100 -37 -0.33333 -42 88 0.79279 99 -20 -0.18018 -23 3 blend
vlineto
hintmask 000000110000000000000010
- 786 -100 30 130 -150 -1 -168 20 0 23 95 1 106 37 0 42 4 blend
+ 786 -100 30 130 -150 -1.35135 -168 20 0.18018 23 95 0.85585 106 37 0.33333 42 4 blend
hlineto
hintmask 000010000000000100000000
- -610 -123 -56 -1 -63 -44 0 -50 2 blend
+ -610 -123 -56 -0.5045 -63 -44 -0.3964 -50 2 blend
rmoveto
- -50 -62 -93 -73 -118 -54 8 -4 10 -9 6 -7 9 0 11 13 0 15 19 0 21 29 0 32 9 0 10 22 0 25 12 0 14 -11 0 -12 19 0 21 -26 0 -30 7 0 8 -16 0 -18 12 blend
+ -50 -62 -93 -73 -118 -54 8 -4 10 -9 6 -7 9 0.08109 11 13 0.11711 15 19 0.17117 21 29 0.26126 32 9 0.08109 10 22 0.1982 25 12 0.10811 14 -11 -0.0991 -12 19 0.17117 21 -26 -0.23424 -30 7 0.06306 8 -16 -0.14415 -18 12 blend
rrcurveto
hintmask 010000000000000001000000
- 121 58 92 75 59 70 3 0 3 -13 0 -14 -10 0 -11 -19 0 -21 -2 0 -2 8 0 8 6 blend
+ 121 58 92 75 59 70 3 0.02702 3 -13 -0.11711 -14 -10 -0.09009 -11 -19 -0.17117 -21 -2 -0.01802 -2 8 0.07207 8 6 blend
rrcurveto
- 124 -78 -89 -1 -100 32 0 36 2 blend
+ 124 -78 -89 -0.8018 -100 32 0.28828 36 2 blend
rmoveto
- -7 -6 0 -6 1 blend
+ -7 -6 -0.05405 -6 1 blend
vlineto
- -65 -139 -176 -81 -162 -31 6 -6 8 -12 3 -8 16 0 17 30 0 34 36 0 41 26 0 29 -7 0 -8 12 0 13 12 0 14 -16 0 -18 15 0 16 -30 0 -33 5 0 6 -18 0 -21 12 blend
+ -65 -139 -176 -81 -162 -31 6 -6 8 -12 3 -8 16 0.14415 17 30 0.27026 34 36 0.32433 41 26 0.23424 29 -7 -0.06306 -8 12 0.10811 13 12 0.10811 14 -16 -0.14415 -18 15 0.13513 16 -30 -0.27026 -33 5 0.04504 6 -18 -0.16216 -21 12 blend
rrcurveto
hintmask 001000000000000001000000
- 168 37 178 84 72 154 26 0 29 -5 0 -5 -23 0 -26 -12 0 -14 -5 0 -5 6 0 7 6 blend
+ 168 37 178 84 72 154 26 0.23424 29 -5 -0.04504 -5 -23 -0.20721 -26 -12 -0.10811 -14 -5 -0.04504 -5 6 0.05405 7 6 blend
rrcurveto
hintmask 110100000000000001100001
- -19 11 -6 -2 -47 0 -53 13 0 15 -13 0 -15 0 0 -1 4 blend
+ -19 11 -6 -2 -47 -0.42342 -53 13 0.11711 15 -13 -0.11711 -15 0 0 -1 4 blend
rlineto
- -333 -72 75 1 85 -55 0 -61 2 blend
+ -333 -72 75 0.67567 85 -55 -0.4955 -61 2 blend
rmoveto
- 65 -25 75 -46 38 -35 -35 0 -40 8 0 9 -38 0 -42 15 0 17 -18 0 -21 14 0 15 6 blend
+ 65 -25 75 -46 38 -35 -35 -0.31532 -40 8 0.07207 9 -38 -0.34235 -42 15 0.13513 17 -18 -0.16216 -21 14 0.12613 15 6 blend
rrcurveto
- 26 19 -39 34 -76 45 -64 25 49 0 56 31 0 35 19 0 21 -14 0 -16 39 0 44 -18 0 -20 32 0 36 -9 0 -10 8 blend
+ 26 19 -39 34 -76 45 -64 25 49 0.44144 56 31 0.27928 35 19 0.17117 21 -14 -0.12613 -16 39 0.35135 44 -18 -0.16216 -20 32 0.28828 36 -9 -0.08109 -10 8 blend
rlinecurve
- 72 55 -55 0 -62 28 0 31 2 blend
+ 72 55 -55 -0.4955 -62 28 0.25226 31 2 blend
rmoveto
- -30 -30 -42 0 -47 -42 0 -47 2 blend
+ -30 -30 -42 -0.37837 -47 -42 -0.37837 -47 2 blend
rlineto
- 269 30 -14 0 -16 42 0 47 2 blend
+ 269 30 -14 -0.12613 -16 42 0.37837 47 2 blend
hlineto
- 74 74 13 0 15 -22 0 -24 2 blend
+ 74 74 13 0.11711 15 -22 -0.1982 -24 2 blend
rmoveto
- -276 80 1 90 1 blend
+ -276 80 0.72072 90 1 blend
vlineto
- -52 21 -9 77 -48 0 -54 8 0 9 -21 0 -24 44 0 49 4 blend
+ -52 21 -9 77 -48 -0.43243 -54 8 0.07207 9 -21 -0.1892 -24 44 0.3964 49 4 blend
vhcurveto
- 16 182 8 0 9 -90 -1 -101 2 blend
- 0 18 8 0 9 1 blend
+ 16 182 8 0.07207 9 -90 -0.8108 -101 2 blend
+ 0 18 8 0.07207 9 1 blend
hhcurveto
- 62 12 21 88 4 25 0 28 20 0 22 6 0 7 10 0 11 9 0 10 5 blend
+ 62 12 21 88 4 25 0.22522 28 20 0.18018 22 6 0.05405 7 10 0.09009 11 9 0.08109 10 5 blend
hvcurveto
- -9 2 -12 5 -8 6 -24 0 -26 4 0 5 -34 0 -39 12 0 13 -16 0 -18 10 0 11 6 blend
+ -9 2 -12 5 -8 6 -24 -0.21622 -26 4 0.03604 5 -34 -0.3063 -39 12 0.10811 13 -16 -0.14415 -18 10 0.09009 11 6 blend
rrcurveto
- -81 25 0 28 1 blend
- -4 -6 -11 -41 -37 -154 -1 0 -1 0 1 1 11 -1 12 15 1 17 79 1 89 5 blend
- 0 -26 9 0 10 1 blend
+ -81 25 0.22522 28 1 blend
+ -4 -6 -11 -41 -37 -154 -1 0 -1 0 1 1 11 -0.9009 12 15 1.13513 17 79 0.71172 89 5 blend
+ 0 -26 9 0.08109 10 1 blend
hhcurveto
- -56 -9 6 25 17 0 19 2 0 3 -1 -1 -2 4 0 5 4 blend
+ -56 -9 6 25 17 0.15315 19 2 0.01802 3 -1 -1 -2 4 0.03604 5 4 blend
hvcurveto
- 276 -81 -1 -91 1 blend
+ 276 -81 -0.72974 -91 1 blend
vlineto
- 278 -62 -114 -1 -128 32 0 36 2 blend
+ 278 -62 -114 -1.02702 -128 32 0.28828 36 2 blend
rmoveto
- -66 -32 -126 -33 -107 -23 5 -7 5 -10 2 -7 110 22 126 32 81 36 10 0 11 7 0 8 30 0 34 11 0 12 21 0 23 9 0 10 7 0 8 -14 0 -16 9 0 10 -27 0 -30 3 0 4 -15 0 -17 -15 0 -17 -10 0 -11 -12 0 -14 -11 0 -12 3 0 4 -3 0 -4 18 blend
+ -66 -32 -126 -33 -107 -23 5 -7 5 -10 2 -7 110 22 126 32 81 36 10 0.09009 11 7 0.06306 8 30 0.27026 34 11 0.0991 12 21 0.1892 23 9 0.08109 10 7 0.06306 8 -14 -0.12613 -16 9 0.08109 10 -27 -0.24324 -30 3 0.02702 4 -15 -0.13513 -17 -15 -0.13513 -17 -10 -0.09009 -11 -12 -0.10811 -14 -11 -0.0991 -12 3 0.02702 4 -3 -0.02702 -4 18 blend
rrcurveto
</CharString>
<CharString name="cid06821" fdSelectIndex="1">
3 vsindex
- -58 30 100 30 70 22 -22 30 94 30 19 31 -17 28 152 20 -20 30 -12 12 66 30 -30 89 -5 30 -30 121 -11 0 -24 36 0 81 -32 0 -74 22 0 52 -17 0 -39 16 1 37 -16 -1 -37 21 0 48 -27 0 -63 21 0 49 -11 0 -26 41 0 93 -47 0 -107 24 0 56 -34 0 -78 11 0 26 -11 0 -26 17 0 39 -15 0 -35 15 0 35 -19 0 -43 12 0 26 -12 0 -26 4 0 8 -5 0 -11 28 0 65 -28 0 -65 23 0 52 28 blend
+ -58 30 100 30 70 22 -22 30 94 30 19 31 -17 28 152 20 -20 30 -12 12 66 30 -30 89 -5 30 -30 121 -11 -0.0196 -24 36 0.06418 81 -32 -0.05704 -74 22 0.03922 52 -17 -0.0303 -39 16 1.02852 37 -16 -1.02852 -37 21 0.03743 48 -27 -0.04813 -63 21 0.03743 49 -11 -0.0196 -26 41 0.07309 93 -47 -0.08379 -107 24 0.04279 56 -34 -0.06061 -78 11 0.0196 26 -11 -0.0196 -26 17 0.0303 39 -15 -0.02673 -35 15 0.02673 35 -19 -0.03387 -43 12 0.0214 26 -12 -0.0214 -26 4 0 8 -5 0 -11 28 0.04991 65 -28 -0.04991 -65 23 0.041 52 28 blend
hstemhm
- 127 30 -18 18 199 30 -20 20 -20 30 -24 14 97 30 -11 11 72 31 202 30 87 29 -12 0 -27 44 1 101 -19 -1 -45 19 1 45 -46 -1 -106 37 0 85 -31 0 -71 31 0 71 -31 0 -71 40 0 91 -27 0 -62 18 0 42 -47 0 -108 51 0 117 -27 0 -62 27 0 62 -53 0 -122 43 0 99 -60 -1 -138 52 1 120 -32 0 -73 32 0 72 22 blend
+ 127 30 -18 18 199 30 -20 20 -20 30 -24 14 97 30 -11 11 72 31 202 30 87 29 -12 -0.0214 -27 44 1.07843 101 -19 -1.03387 -45 19 1.03387 45 -46 -1.082 -106 37 0.06595 85 -31 -0.05525 -71 31 0.05525 71 -31 -0.05525 -71 40 0.0713 91 -27 -0.04813 -62 18 0.03209 42 -47 -0.08379 -108 51 0.09091 117 -27 -0.04813 -62 27 0.04813 62 -53 -0.09447 -122 43 0.07664 99 -60 -1.10695 -138 52 1.0927 120 -32 -0.05704 -73 32 0.05704 72 22 blend
vstemhm
hintmask 00011000000000000000000100000000
- 193 296 41 0 93 -8 0 -19 2 blend
+ 193 296 41 0.07309 93 -8 -0.01427 -19 2 blend
rmoveto
- 625 -94 -625 -84 -1 -192 27 0 63 84 1 192 3 blend
+ 625 -94 -625 -84 -1.14973 -192 27 0.04813 63 84 1.14973 192 3 blend
hlineto
- -30 124 -48 0 -110 -6 0 -14 2 blend
+ -30 124 -48 -0.08556 -110 -6 -0.0107 -14 2 blend
rmoveto
- -154 685 154 -15 0 -34 16 0 38 15 0 34 3 blend
+ -154 685 154 -15 -0.02673 -34 16 0.02852 38 15 0.02673 34 3 blend
vlineto
hintmask 00100000000000000000100000000000
- -365 -132 -33 0 -76 1 1 3 2 blend
+ -365 -132 -33 -0.05882 -76 1 1 3 2 blend
rmoveto
- -232 -7 -1 -16 1 blend
+ -232 -7 -1.01248 -16 1 blend
vlineto
- 30 -5 51 0 117 -11 0 -27 2 blend
+ 30 -5 51 0.09091 117 -11 -0.0196 -27 2 blend
rlineto
- 237 18 1 43 1 blend
+ 237 18 1.03209 43 1 blend
vlineto
hintmask 01000000000010010000010000000000
- -11 -92 -27 0 -62 1 -1 2 2 blend
+ -11 -92 -27 -0.04813 -62 1 -1 2 2 blend
rmoveto
- -30 397 30 -22 0 -52 -12 0 -27 22 0 52 3 blend
+ -30 397 30 -22 -0.03922 -52 -12 -0.0214 -27 22 0.03922 52 3 blend
vlineto
- -760 647 25 0 56 -4 0 -9 2 blend
+ -760 647 25 0.04456 56 -4 0 -9 2 blend
rmoveto
- -30 811 30 -28 0 -65 -12 0 -27 28 0 65 3 blend
+ -30 811 30 -28 -0.04991 -65 -12 -0.0214 -27 28 0.04991 65 3 blend
vlineto
hintmask 00000000000010100000000000000000
- -823 -13 0 -29 1 blend
+ -823 -13 -0.02318 -29 1 blend
hmoveto
- -143 12 0 27 1 blend
+ -143 12 0.0214 27 1 blend
vlineto
-83 -13 -107 -75 -82 4 0 9 3 0 6 5 1 12 -1 0 -1 5 -1 11 5 blend
vhcurveto
- 7 -4 11 -9 5 -6 10 0 21 -5 0 -12 20 0 46 -17 0 -38 6 0 15 -8 0 -18 6 blend
+ 7 -4 11 -9 5 -6 10 0.01782 21 -5 0 -12 20 0.03564 46 -17 -0.0303 -38 6 0.0107 15 -8 -0.01427 -18 6 blend
rrcurveto
79 5 0 11 1 blend
- 85 16 118 88 1 1 3 9 0 19 6 0 15 3 blend
+ 85 16 118 88 1 1 3 9 0.01604 19 6 0.0107 15 3 blend
vvcurveto
- 143 -11 0 -25 1 blend
+ 143 -11 -0.0196 -25 1 blend
vlineto
hintmask 00000000010100001000000000000000
- 199 -25 -46 -1 -106 -23 0 -54 2 blend
+ 199 -25 -46 -1.082 -106 -23 -0.041 -54 2 blend
rmoveto
-167 vlineto
hintmask 00000000010100000100000000000000
- 30 37 0 85 1 blend
+ 30 37 0.06595 85 1 blend
167 hlineto
hintmask 00000000101000000001000000000000
- -14 -59 -18 0 -42 8 0 18 2 blend
+ -14 -59 -18 -0.03209 -42 8 0.01427 18 2 blend
rmoveto
- -30 185 30 -12 0 -26 -4 0 -9 12 0 26 3 blend
+ -30 185 30 -12 -0.0214 -26 -4 0 -9 12 0.0214 26 3 blend
vlineto
- -365 -96 10 0 22 7 0 17 2 blend
+ -365 -96 10 0.01782 22 7 0.01248 17 2 blend
rmoveto
- -30 392 30 -17 0 -39 -4 0 -9 17 0 39 3 blend
+ -30 392 30 -17 -0.0303 -39 -4 0 -9 17 0.0303 39 3 blend
vlineto
hintmask 00000011000000000100000000000000
- -218 -10 -15 0 -33 -6 0 -13 2 blend
+ -218 -10 -15 -0.02673 -33 -6 -0.0107 -13 2 blend
rmoveto
- -160 23 0 51 1 blend
+ -160 23 0.041 51 1 blend
vlineto
-8 -2 0 0 -1 1 blend
-3 -11 -1 1 0 3 0 0 1 2 blend
vhcurveto
-11 -1 -30 2 0 4 1 0 1 4 0 10 3 blend
- 0 -47 13 0 30 1 blend
- 1 5 -9 6 -10 2 -9 4 0 8 -6 0 -13 6 0 13 -11 0 -25 2 0 6 -8 0 -19 6 blend
+ 0 -47 13 0.02318 30 1 blend
+ 1 5 -9 6 -10 2 -9 4 0 8 -6 -0.0107 -13 6 0.0107 13 -11 -0.0196 -25 2 0 6 -8 -0.01427 -19 6 blend
rrcurveto
hintmask 00000011000001000010001000000000
50 30 -5 0 -11 1 0 2 2 blend
0 6 17 3 0 8 5 1 12 2 blend
hvcurveto
- 17 5 4 9 21 6 -1 12 4 0 9 1 0 3 4 0 8 11 0 25 5 blend
+ 17 5 4 9 21 6 -0.9893 12 4 0 9 1 0 3 4 0 8 11 0.0196 25 5 blend
vvcurveto
- 159 -21 0 -46 1 blend
+ 159 -21 -0.03743 -46 1 blend
vlineto
- -132 -50 -39 0 -88 1 0 1 2 blend
+ -132 -50 -39 -0.06952 -88 1 0 1 2 blend
rmoveto
- -25 -42 -40 -39 -44 -30 8 -4 13 -10 5 -4 41 6 0 12 3 0 8 7 0 16 3 0 5 5 0 13 1 0 4 6 0 13 -3 0 -8 10 0 22 -6 0 -14 5 0 12 -5 0 -10 -3 0 -8 13 blend
- 30 45 -7 0 -14 1 blend
+ -25 -42 -40 -39 -44 -30 8 -4 13 -10 5 -4 41 6 0.0107 12 3 0 8 7 0.01248 16 3 0 5 5 0 13 1 0 4 6 0.0107 13 -3 0 -8 10 0.01782 22 -6 -0.0107 -14 5 0 12 -5 0 -10 -3 0 -8 13 blend
+ 30 45 -7 -0.01248 -14 1 blend
47 26 45 -3 0 -8 1 0 1 2 blend
rrcurveto
- 153 -7 -13 0 -30 -1 0 -2 2 blend
+ 153 -7 -13 -0.02318 -30 -1 0 -2 2 blend
rmoveto
- 35 -27 38 -39 18 -28 -8 3 -11 3 -5 -3 -9 3 -14 6 -7 -3 -5 1 -9 4 -4 0 6 blend
+ 35 -27 38 -39 18 -28 -8 2.98573 -11 3 -5 -3 -9 2.98396 -14 6 -6.9893 -3 -5 1 -9 4 -4 0 6 blend
rrcurveto
- 24 18 -18 27 -39 39 -34 25 23 1 55 6 -1 12 4 -1 8 -3 4 1 9 -3 13 -6 7 2 7 -3 9 -4 5 4 8 blend
+ 24 18 -18 27 -39 39 -34 25 23 1.041 55 6 -0.9893 12 4 -1 8 -3 4 1 9 -2.98396 13 -6 6.9893 2 7 -2.98752 9 -4 5 4 8 blend
rlinecurve
- 115 330 -53 -1 -124 9 1 21 2 blend
+ 115 330 -53 -1.09447 -124 9 1.01604 21 2 blend
rmoveto
hintmask 10000101000001000000001010000000
- 14 -286 131 -209 160 0 50 1 18 34 6 108 -9 3 -11 5 -9 7 -4 -92 -9 -34 -31 -1 -137 -2 -126 185 -12 281 3 0 8 6 0 14 5 0 10 -10 0 -22 -3 0 -6 0 0 -1 14 0 33 -1 0 -1 11 0 23 -3 0 -8 5 0 12 10 0 24 -10 0 -23 3 0 7 -14 0 -32 8 0 18 -8 0 -17 8 0 17 0 0 -1 11 0 26 0 0 1 4 0 9 5 0 11 1 0 1 29 0 67 0 1 2 8 0 17 0 -1 -1 -2 0 -4 -37 0 -85 30 blend
+ 14 -286 131 -209 160 0 50 1 18 34 6 108 -9 3 -11 5 -9 7 -4 -92 -9 -34 -31 -1 -137 -2 -126 185 -12 281 3 0 8 6 0.0107 14 5 0 10 -10 -0.01782 -22 -3 0 -6 0 0 -1 14 0.02495 33 -1 0 -1 11 0.0196 23 -3 0 -8 5 0 12 10 0.01782 24 -10 -0.01782 -23 3 0 7 -14 -0.02495 -32 8 0.01427 18 -8 -0.01427 -17 8 0.01427 17 0 0 -1 11 0.0196 26 0 0 1 4 0 9 5 0 11 1 0 1 29 0.0517 67 0 1 2 8 0.01427 17 0 -1 -1 -2 0 -4 -37 -0.06595 -85 30 blend
rrcurveto
- 207 -169 -37 0 -85 -4 0 -9 2 blend
+ 207 -169 -37 -0.06595 -85 -4 0 -9 2 blend
rmoveto
- -61 -129 -111 -108 -121 -69 7 -5 12 -11 5 -6 119 74 113 110 66 136 4 15 19 8 14 33 4 28 29 8 5 22 2 28 27 6 2 17 8 1 20 -6 0 -14 14 1 34 -13 -1 -31 6 0 15 -7 0 -17 0 -28 -23 -4 -2 -10 0 -27 -20 -1 -3 -5 -1 -16 -12 -1 -15 -19 18 blend
+ -61 -129 -111 -108 -121 -69 7 -5 12 -11 5 -6 119 74 113 110 66 136 4 15 19 8 14.01427 33 4 28 29 8 5.01427 22 2 28 27 6 2.0107 17 8 1.01427 20 -6 -0.0107 -14 14 1.02495 34 -13 -1.02318 -31 6 0.0107 15 -7 -0.01248 -17 0 -28 -23 -4 -2 -10 0 -27 -20 -1 -3 -5 -1 -16 -12 -1 -15 -19 18 blend
rrcurveto
- -156 153 -20 -2 -49 -2 0 -3 2 blend
+ -156 153 -20 -2.03564 -49 -2 0 -3 2 blend
rmoveto
52 -15 63 -26 34 -1 0 -3 -1 0 -1 0 0 1 0 0 -2 0 0 -2 5 blend
-21 rrcurveto
- 15 27 -34 20 -64 24 -51 14 21 0 48 20 0 47 -1 0 -1 1 0 1 0 0 -1 0 0 1 1 0 3 -1 0 -2 8 blend
+ 15 27 -34 20 -64 24 -51 14 21 0.03743 48 20 0.03564 47 -1 0 -1 1 0 1 0 0 -1 0 0 1 1 0 3 -1 0 -2 8 blend
rlinecurve
- -453 -763 1 0 2 12 0 27 2 blend
+ -453 -763 1 0 2 12 0.0214 27 2 blend
rmoveto
- -25 -16 -31 0 -71 -7 0 -17 2 blend
+ -25 -16 -31 -0.05525 -71 -7 -0.01248 -17 2 blend
rlineto
- -100 89 146 -18 233 -21 0 -46 -5 0 -12 -13 0 -29 -4 0 -9 -8 0 -18 5 blend
+ -100 89 146 -18 233 -21 -0.03743 -46 -5 0 -12 -13 -0.02318 -29 -4 0 -9 -8 -0.01427 -18 5 blend
hhcurveto
- 249 23 0 53 1 blend
+ 249 23 0.03743 53 1 blend
hlineto
- 2 8 6 14 6 8 -35 0 -207 2 -1 3 11 0 25 5 1 12 17 0 38 4 0 10 8 0 18 -16 0 -37 -1 0 -3 -1 0 -2 9 blend
- 0 -22 -14 0 -32 1 blend
- 0 -214 0 -150 15 -78 89 24 0 55 0 0 1 18 0 40 -2 0 -5 12 0 28 -1 0 -2 6 blend
+ 2 8 6 14 6 8 -35 0 -207 2 -1 3 11 0.0196 25 5 1 12 17 0.0303 38 4 0 10 8 0.01427 18 -16 -0.02852 -37 -1 0 -3 -1 0 -2 9 blend
+ 0 -22 -14 -0.02495 -32 1 blend
+ 0 -214 0 -150 15 -78 89 24 0.04279 55 0 0 1 18 0.03209 40 -2 0 -5 12 0.0214 28 -1 0 -2 6 blend
rrcurveto
- 5 62 -50 0 -114 -10 0 -22 2 blend
+ 5 62 -50 -0.08913 -114 -10 -0.01782 -22 2 blend
rmoveto
- -30 -97 -92 -60 -107 -36 8 -6 12 -11 4 -6 105 41 99 65 32 106 5 0 12 7 0 15 15 0 34 1 0 3 7 0 16 1 0 2 10 0 22 -6 0 -15 18 0 41 -17 0 -37 8 0 18 -8 0 -19 -2 0 -5 4 0 9 -12 0 -27 7 0 16 -1 0 -2 6 0 14 18 blend
+ -30 -97 -92 -60 -107 -36 8 -6 12 -11 4 -6 105 41 99 65 32 106 5 0 12 7 0.01248 15 15 0.02673 34 1 0 3 7 0.01248 16 1 0 2 10 0.01782 22 -6 -0.0107 -15 18 0.03209 41 -17 -0.0303 -37 8 0.01427 18 -8 -0.01427 -19 -2 0 -5 4 0 9 -12 -0.0214 -27 7 0.01248 16 -1 0 -2 6 0.0107 14 18 blend
rrcurveto
</CharString>
<CharString name="cid07253" fdSelectIndex="1">
1 vsindex
- -80 27 95 49 -48 48 -45 45 -30 30 -16 16 -13 13 49 30 48 30 47 19 -19 30 53 30 -18 18 51 11 -11 30 -22 22 62 30 60 30 15 81 -30 30 -30 102 -10 1 -14 41 -2 59 -53 2 -76 27 -1 38 -26 1 -37 26 -1 37 -27 1 -39 27 -1 39 -27 1 -39 27 -1 39 -13 0 -19 13 0 19 -14 0 -20 14 0 20 -19 1 -27 13 -1 19 -18 1 -26 13 0 19 -18 0 -26 18 0 26 -18 0 -26 23 -1 33 -21 1 -30 42 -2 60 -29 1 -42 29 -1 42 -19 1 -27 7 0 10 -7 0 -10 26 -1 37 -24 1 -34 24 -1 34 -27 1 -39 24 -1 34 -26 1 -37 26 -1 37 -40 1 -45 53 -2 66 -44 2 -62 44 -2 62 -44 2 -62 18 0 23 42 blend
+ -80 27 95 49 -48 48 -45 45 -30 30 -16 16 -13 13 49 30 48 30 47 19 -19 30 53 30 -18 18 51 11 -11 30 -22 22 62 30 60 30 15 81 -30 30 -30 102 -10 0.96979 -14 41 -1.87613 59 -53 1.83987 -76 27 -0.91843 38 -26 0.92145 -37 26 -0.92145 37 -27 0.91843 -39 27 -0.91843 39 -27 0.91843 -39 27 -0.91843 39 -13 -0.03928 -19 13 0.03928 19 -14 -0.0423 -20 14 0.0423 20 -19 0.9426 -27 13 -0.96072 19 -18 0.94562 -26 13 0.03928 19 -18 -0.05438 -26 18 0.05438 26 -18 -0.05438 -26 23 -0.93051 33 -21 0.93655 -30 42 -1.87311 60 -29 0.91238 -42 29 -0.91238 42 -19 0.9426 -27 7 0.02115 10 -7 -0.02115 -10 26 -0.92145 37 -24 0.92749 -34 24 -0.92749 34 -27 0.91843 -39 24 -0.92749 34 -26 0.92145 -37 26 -0.92145 37 -40 0.87915 -45 53 -1.83987 66 -44 1.86707 -62 44 -1.86707 62 -44 1.86707 -62 18 0.05438 23 42 blend
hstemhm
- 193 30 -1 30 -15 15 106 29 96 30 142 30 109 30 5 10 -28 1 -40 71 -2 102 -56 2 -80 75 -4 106 -21 2 -29 21 -2 29 -104 5 -148 55 -3 78 -42 3 -59 69 -4 98 -84 4 -120 79 -3 113 -94 3 -135 76 -3 109 -51 2 -73 25 -1 36 16 blend
+ 193 30 -1 30 -15 15 106 29 96 30 142 30 109 30 5 10 -28 0.9154 -40 71 -1.78549 102 -56 1.83081 -80 75 -3.7734 106 -21 1.93655 -29 21 -1.93655 29 -104 4.6858 -148 55 -2.83383 78 -42 2.87311 -59 69 -3.79153 98 -84 3.74623 -120 79 -2.76132 113 -94 2.71602 -135 76 -2.77039 109 -51 1.84592 -73 25 -0.92447 36 16 blend
vstemhm
hintmask 10000011101100101101000101110000
- 55 767 2 0 3 37 -2 55 2 blend
+ 55 767 2 0 3 37 -1.88821 55 2 blend
rmoveto
- -30 892 30 -44 2 -62 -6 0 -9 44 -2 62 3 blend
+ -30 892 30 -44 1.86707 -62 -6 -0.01813 -9 44 -1.86707 62 3 blend
vlineto
hintmask 00000000000000000000100000000000
- -637 72 -28 1 -40 -26 2 -39 2 blend
+ -637 72 -28 0.9154 -40 -26 1.92145 -39 2 blend
rmoveto
hintmask 00000000000000000010000000000000
- -153 30 -27 0 -27 77 -2 111 2 blend
+ -153 30 -27 -0.08157 -27 77 -1.76736 111 2 blend
vlineto
hintmask 00000000000000000000100000100000
- 153 27 0 27 1 blend
+ 153 27 0.08157 27 1 blend
vlineto
- 315 -89 3 -128 1 blend
+ 315 -89 2.73112 -128 1 blend
hmoveto
hintmask 00000000000000000010000000100000
- -153 30 -27 0 -27 79 -3 113 2 blend
+ -153 30 -27 -0.08157 -27 79 -2.76132 113 2 blend
vlineto
hintmask 00000000000100101100110000110000
- 153 27 0 27 1 blend
+ 153 27 0.08157 27 1 blend
vlineto
- -462 -288 8 0 12 -11 0 -16 2 blend
+ -462 -288 8 0.02417 12 -11 -0.03323 -16 2 blend
rmoveto
- 571 -62 -571 -102 3 -147 27 -1 39 102 -3 147 3 blend
+ 571 -62 -571 -102 2.69185 -147 27 -0.91843 39 102 -2.69185 147 3 blend
hlineto
- 152 -29 1 -42 1 blend
+ 152 -29 0.91238 -42 1 blend
vmoveto
- 571 -60 -571 -102 3 -147 26 -1 37 102 -3 147 3 blend
+ 571 -60 -571 -102 2.69185 -147 26 -0.92145 37 102 -2.69185 147 3 blend
hlineto
- -30 -71 2 -102 1 blend
+ -30 -71 1.78549 -102 1 blend
90 rmoveto
- -212 631 212 -23 1 -32 45 -2 64 23 -1 32 3 blend
+ -212 631 212 -23 0.93051 -32 45 -1.86404 64 23 -0.93051 32 3 blend
vlineto
- -776 -263 -22 1 -31 -4 0 -5 2 blend
+ -776 -263 -22 0.93353 -31 -4 -0.01208 -5 2 blend
rmoveto
- -30 905 30 -42 2 -60 10 0 14 42 -2 60 3 blend
+ -30 905 30 -42 1.87311 -60 10 0.03021 14 42 -1.87311 60 3 blend
vlineto
hintmask 00000001100000000000000100000000
- -716 -160 36 -1 52 -26 2 -37 2 blend
+ -716 -160 36 -0.89124 52 -26 1.92145 -37 2 blend
rmoveto
- -30 554 30 -13 0 -19 -59 2 -85 13 0 19 3 blend
+ -30 554 30 -13 -0.03928 -19 -59 1.82175 -85 13 0.03928 19 3 blend
vlineto
- -554 -78 59 -2 85 5 -1 7 2 blend
+ -554 -78 59 -1.82175 85 5 -0.9849 7 2 blend
rmoveto
- -30 563 30 -13 1 -19 -56 1 -81 13 -1 19 3 blend
+ -30 563 30 -13 0.96072 -19 -56 0.83081 -81 13 -0.96072 19 3 blend
vlineto
hintmask 00000010000000000000001000000000
- -578 -79 2 1 4 6 0 8 2 blend
+ -578 -79 2 1 4 6 0.01813 8 2 blend
rmoveto
hintmask 00001000000000000000001000001000
- -30 617 -27 1 -39 4 -1 5 2 blend
+ -30 617 -27 0.91843 -39 4 -0.98792 5 2 blend
vlineto
hintmask 00000010000001000000000000001000
- 30 27 -1 39 1 blend
+ 30 27 -0.91843 39 1 blend
vlineto
- -477 382 -24 2 -34 8 0 12 2 blend
+ -477 382 -24 1.92749 -34 8 0.02417 12 2 blend
rmoveto
- -46 -92 -113 -104 -167 -65 7 -5 10 -9 5 -8 6 -1 8 -5 -1 -8 17 0 25 11 0 16 -3 -1 -5 6 0 9 12 0 18 -11 0 -16 18 0 26 -27 1 -39 6 -1 8 -16 1 -23 12 blend
+ -46 -92 -113 -104 -167 -65 7 -5 10 -9 5 -8 6 -0.98187 8 -5 -1.0151 -8 17 0.05136 25 11 0.03323 16 -3 -1 -5 6 0.01813 9 12 0.03625 18 -11 -0.03323 -16 18 0.05438 26 -27 0.91843 -39 6 -0.98187 8 -16 0.95166 -23 12 blend
rrcurveto
hintmask 00000100010010010000000001000000
- 172 70 111 106 55 101 14 0 20 3 0 5 -6 0 -8 1 0 1 3 0 4 28 -1 41 6 blend
+ 172 70 111 106 55 101 14 0.0423 20 3 0 5 -6 -0.01813 -8 1 0 1 3 0 4 28 -0.9154 41 6 blend
rrcurveto
- 298 -65 -24 0 -35 3 0 4 2 blend
+ 298 -65 -24 -0.07251 -35 3 0 4 2 blend
rmoveto
- -25 -12 -55 2 -79 -15 0 -22 2 blend
+ -25 -12 -55 1.83383 -79 -15 -0.04532 -22 2 blend
rlineto
- 62 -80 121 -81 100 -38 5 8 9 11 7 6 -101 33 -119 76 -59 77 2 0 3 -14 1 -20 -10 1 -14 2 0 3 20 0 29 1 0 2 9 -1 13 18 -1 25 20 -1 28 26 -1 38 14 0 21 14 -1 19 -13 0 -19 -7 0 -10 9 0 13 -18 2 -25 4 -1 5 -7 0 -10 18 blend
+ 62 -80 121 -81 100 -38 5 8 9 11 7 6 -101 33 -119 76 -59 77 2 0 3 -14 0.9577 -20 -10 0.96979 -14 2 0 3 20 0.06042 29 1 0 2 9 -0.97281 13 18 -0.94562 25 20 -0.93958 28 26 -0.92145 38 14 0.0423 21 14 -0.9577 19 -13 -0.03928 -19 -7 -0.02115 -10 9 0.02719 13 -18 1.94562 -25 4 -0.98792 5 -7 -0.02115 -10 18 blend
rrcurveto
- -211 -88 -39 3 -55 -12 1 -17 2 blend
+ -211 -88 -39 2.88217 -55 -12 0.96375 -17 2 blend
rmoveto
- -239 30 239 -2 -1 -4 69 -4 98 2 1 4 3 blend
+ -239 30 239 -2 -1 -4 69 -3.79153 98 2 1 4 3 blend
vlineto
hintmask 10000010000000000000000000001000
- 316 -223 -74 3 -106 11 -1 15 2 blend
+ 316 -223 -74 2.77643 -106 11 -0.96677 15 2 blend
rmoveto
- -6 -4 0 -6 1 blend
+ -6 -4 -0.01208 -6 1 blend
vlineto
- -8 -87 -7 -34 -10 -10 2 0 3 24 -1 35 -1 1 -1 6 0 9 1 -1 1 1 0 1 6 blend
+ -8 -87 -7 -34 -10 -10 2 0 3 24 -0.92749 35 -1 1 -1 6 0.01813 9 1 -1 1 1 0 1 6 blend
rrcurveto
-6 -1 0 -1 1 blend
-6 -6 -1 -12 2 0 3 1 blend
hhcurveto
- -11 -31 1 0 1 10 0 15 2 blend
- 1 3 -34 0 -1 -1 9 0 13 2 blend
+ -11 -31 1 0 1 10 0.03021 15 2 blend
+ 1 3 -34 0 -1 -1 9 0.02719 13 2 blend
hvcurveto
- 5 -8 3 -13 6 -1 8 -11 1 -16 5 0 8 -19 1 -26 4 blend
- 1 -8 28 -2 30 -1 14 1 -14 1 -20 7 -1 9 1 0 1 2 0 3 2 -1 2 3 1 5 0 1 1 7 blend
- 21 0 10 4 10 9 16 15 7 35 2 -1 2 8 -1 11 2 0 3 5 0 7 5 0 8 3 -1 4 3 0 4 2 1 4 3 0 4 9 blend
- 9 89 -15 1 -21 1 blend
+ 5 -8 3 -13 6 -0.98187 8 -11 0.96677 -16 5 0.0151 8 -19 0.9426 -26 4 blend
+ 1 -8 28 -2 30 -1 14 1 -14 0.9577 -20 7 -0.97885 9 1 0 1 2 0 3 2 -1 2 3 1 5 0 1 1 7 blend
+ 21 0 10 4 10 9 16 15 7 35 2 -1 2 8 -0.97583 11 2 0 3 5 0.0151 7 5 0.0151 8 3 -1 4 3 0 4 2 1 4 3 0 4 9 blend
+ 9 89 -15 0.95468 -21 1 blend
rrcurveto
- 7 1 1 12 6 -1 8 1 0 1 1 -1 1 9 0 13 4 blend
+ 7 1 1 12 6 -0.98187 8 1 0 1 1 -1 1 9 0.02719 13 4 blend
0 hhcurveto
- -660 -34 -57 3 -82 -8 0 -11 2 blend
+ -660 -34 -57 2.82779 -82 -8 -0.02417 -11 2 blend
rmoveto
- -17 -46 1 0 2 7 0 10 2 blend
- -32 -46 -46 5 0 7 5 0 7 2 blend
- -23 20 -21 56 -2 81 -24 0 -35 2 blend
+ -17 -46 1 0 2 7 0.02115 10 2 blend
+ -32 -46 -46 5 0.0151 7 5 0.0151 7 2 blend
+ -23 20 -21 56 -1.83081 81 -24 -0.07251 -35 2 blend
rcurveline
hintmask 10010000000000000000000000000000
- 52 28 31 51 17 46 -4 -1 -7 0 1 1 -4 1 -5 -7 0 -10 1 -1 1 0 0 -1 6 blend
+ 52 28 31 51 17 46 -4 -1.01208 -7 0 1 1 -4 0.98792 -5 -7 -0.02115 -10 1 -1 1 0 0 -1 6 blend
rrcurveto
hintmask 00100000000000000000000010000000
- 110 -3 -67 3 -96 1 0 2 2 blend
+ 110 -3 -67 2.79758 -96 1 0 2 2 blend
rmoveto
- 13 -38 10 -49 0 -32 -3 0 -4 4 -1 5 -2 0 -3 2 1 4 -1 1 -1 3 0 4 6 blend
+ 13 -38 10 -49 0 -32 -3 0 -4 4 -0.98792 5 -2 0 -3 2 1 4 -1 1 -1 3 0 4 6 blend
rrcurveto
- 29 6 55 -3 78 8 -1 11 2 blend
- -1 31 -10 50 -15 37 -3 0 -4 0 1 1 -4 0 -6 3 -1 4 -4 1 -5 5 blend
+ 29 6 55 -2.83383 78 8 -0.97583 11 2 blend
+ -1 31 -10 50 -15 37 -3 0 -4 0 1 1 -4 -0.01208 -6 3 -1 4 -4 0.98792 -5 5 blend
rlinecurve
hintmask 01000000000000000000000000100000
- 113 -6 -56 3 -80 -7 0 -10 2 blend
+ 113 -6 -56 2.83081 -80 -7 -0.02115 -10 2 blend
rmoveto
22 -32 20 -44 7 -30 2 0 3 1 -1 1 3 -1 3 1 0 1 2 0 3 5 blend
rrcurveto
- 28 10 -8 29 -21 44 -23 32 48 -2 69 15 0 22 -2 1 -2 -1 0 -2 0 -1 -1 -5 1 -6 -1 1 -1 -4 0 -6 8 blend
+ 28 10 -8 29 -21 44 -23 32 48 -1.85498 69 15 0.04532 22 -2 1 -2 -1 0 -2 0 -1 -1 -5 0.9849 -6 -1 1 -1 -4 -0.01208 -6 8 blend
rlinecurve
hintmask 00010000001000000000001000000000
- 117 -5 -45 1 -65 -17 1 -24 2 blend
+ 117 -5 -45 0.86404 -65 -17 0.94864 -24 2 blend
rmoveto
25 -23 -1 0 -1 2 -1 2 2 blend
27 -32 13 -23 -2 1 -2 1 0 2 2 blend
rrcurveto
- 21 14 -12 44 -2 63 20 -1 28 0 0 -1 3 blend
+ 21 14 -12 44 -1.86707 63 20 -0.93958 28 0 0 -1 3 blend
22 -27 32 -26 22 -2 0 -2 -2 0 -3 1 0 1 -2 1 -2 4 blend
rlinecurve
- -381 267 39 -1 56 7 -1 10 2 blend
+ -381 267 39 -0.88217 56 7 -0.97885 10 2 blend
rmoveto
- -16 -30 -33 1 -47 -23 1 -33 2 blend
+ -16 -30 -33 0.9003 -47 -23 0.93051 -33 2 blend
rlineto
- 498 30 -42 1 -61 23 -1 33 2 blend
+ 498 30 -42 0.87311 -61 23 -0.93051 33 2 blend
hlineto
- -516 -23 21 0 31 -14 0 -21 2 blend
+ -516 -23 21 0.06345 31 -14 -0.0423 -21 2 blend
rmoveto
hintmask 00000010000000000000001000000000
- -224 6 0 9 1 blend
+ -224 6 0.01813 9 1 blend
vlineto
hintmask 00000010001000000000000100000000
- 30 247 75 -4 106 10 0 14 2 blend
+ 30 247 75 -3.7734 106 10 0.03021 14 2 blend
hlineto
</CharString>
<CharString name="cid13393" fdSelectIndex="1">
4 vsindex
- -50 30 -19 19 114 30 44 30 23 30 -30 114 35 30 316 30 -10 10 37 12 -21 0 -26 66 0 82 -29 21 -10 29 -21 10 -64 0 -80 55 0 69 -79 0 -99 75 0 94 -46 0 -58 56 0 71 -56 0 -71 26 21 59 -18 -25 -54 54 0 68 -76 8 -85 58 0 73 -24 0 -31 24 0 31 -46 -4 -63 30 0 37 20 blend
+ -50 30 -19 19 114 30 44 30 23 30 -30 114 35 30 316 30 -10 10 37 12 -21 -0.10448 -26 66 0.32835 82 -29 20.85573 -10 29 -20.85573 10 -64 -0.3184 -80 55 0.27364 69 -79 -0.39304 -99 75 0.37314 94 -46 -0.22885 -58 56 0.27861 71 -56 -0.27861 -71 26 21.12935 59 -18 -25.08955 -54 54 0.26866 68 -76 7.62189 -85 58 0.28856 73 -24 -0.1194 -31 24 0.1194 31 -46 -4.22885 -63 30 0.14925 37 20 blend
hstemhm
- 82 30 197 30 -26 8 317 30 168 13 -13 0 -16 77 0 96 -109 -1 -136 78 0 97 -77 0 -96 29 0 36 -10 0 -12 84 0 105 -86 0 -108 21 0 27 10 blend
+ 82 30 197 30 -26 8 317 30 168 13 -13 -0.06468 -16 77 0.38309 96 -109 -0.54228 -136 78 0.38806 97 -77 -0.38309 -96 29 0.14427 36 -10 -0.04974 -12 84 0.41791 105 -86 -0.42786 -108 21 0.10448 27 10 blend
vstemhm
hintmask 1010101101110110
- 529 746 23 0 29 30 4 43 2 blend
+ 529 746 23 0.11443 29 30 4.14925 43 2 blend
rmoveto
- -30 320 30 -58 0 -73 -29 0 -36 58 0 73 3 blend
+ -30 320 30 -58 -0.28856 -73 -29 -0.14427 -36 58 0.28856 73 3 blend
vlineto
- -397 -495 15 0 18 12 -4 10 2 blend
+ -397 -495 15 0.07463 18 12 -3.94029 10 2 blend
rmoveto
- -30 442 30 -56 0 -71 21 0 27 56 0 71 3 blend
+ -30 442 30 -56 -0.27861 -71 21 0.10448 27 56 0.27861 71 3 blend
vlineto
- -420 149 -6 0 -8 6 -4 2 2 blend
+ -420 149 -6 -0.02985 -8 6 -3.97015 2 2 blend
rmoveto
- -30 374 30 -54 0 -68 -25 0 -31 54 0 68 3 blend
+ -30 374 30 -54 -0.26866 -68 -25 -0.12437 -31 54 0.26866 68 3 blend
vlineto
- -514 -420 34 0 42 -3 4 1 2 blend
+ -514 -420 34 0.16916 42 -3 3.98508 1 2 blend
rmoveto
- -30 626 30 -66 0 -82 -29 0 -36 66 0 82 3 blend
+ -30 626 30 -66 -0.32835 -82 -29 -0.14427 -36 66 0.32835 82 3 blend
vlineto
- -531 144 15 0 19 -9 0 -11 2 blend
+ -531 144 15 0.07463 19 -9 -0.04477 -11 2 blend
rmoveto
- -30 460 30 -55 0 -69 -4 0 -5 55 0 69 3 blend
+ -30 460 30 -55 -0.27364 -69 -4 -0.0199 -5 55 0.27364 69 3 blend
vlineto
- -53 622 -42 0 -53 -6 4 -2 2 blend
+ -53 622 -42 -0.20895 -53 -6 3.97015 -2 2 blend
rmoveto
- -7 -9 0 -12 1 blend
+ -7 -9 -0.04477 -12 1 blend
vlineto
- -86 -171 -222 -118 -188 -45 7 -7 8 -11 3 -8 14 0 18 37 0 46 27 0 34 19 0 24 -7 0 -9 5 0 7 15 0 18 -16 0 -20 17 0 22 -32 0 -40 9 0 11 -19 0 -24 12 blend
+ -86 -171 -222 -118 -188 -45 7 -7 8 -11 3 -8 14 0.06966 18 37 0.18408 46 27 0.13432 34 19 0.09453 24 -7 -0.03482 -9 5 0.02487 7 15 0.07463 18 -16 -0.0796 -20 17 0.08458 22 -32 -0.15921 -40 9 0.04477 11 -19 -0.09453 -24 12 blend
rrcurveto
hintmask 0000000010000010
- 192 51 224 119 94 187 21 0 26 3 0 3 -17 0 -21 -9 0 -11 2 0 2 -3 0 -4 6 blend
+ 192 51 224 119 94 187 21 0.10448 26 3 0.01492 3 -17 -0.08458 -21 -9 -0.04477 -11 2 0 2 -3 -0.01492 -4 6 blend
rrcurveto
hintmask 0100010100000110
- -19 12 -6 -2 -55 0 -68 27 0 34 -12 0 -15 -3 0 -3 4 blend
+ -19 12 -6 -2 -55 -0.27364 -68 27 0.13432 34 -12 -0.05971 -15 -3 -0.01492 -3 4 blend
rlineto
- -323 -32 55 0 69 -25 0 -32 2 blend
+ -323 -32 55 0.27364 69 -25 -0.12437 -32 2 blend
rmoveto
- -25 -11 -68 0 -86 -23 0 -28 2 blend
+ -25 -11 -68 -0.3383 -86 -23 -0.11443 -28 2 blend
rlineto
- 83 -154 177 -116 201 -44 4 8 9 12 7 6 -200 39 -177 113 -79 147 11 0 14 12 0 15 -18 0 -22 21 0 26 -1 0 -1 4 0 5 11 0 13 21 0 26 21 0 27 32 0 40 17 0 21 16 0 20 9 0 11 -10 0 -12 17 0 21 -36 0 -45 1 0 2 -37 0 -47 18 blend
+ 83 -154 177 -116 201 -44 4 8 9 12 7 6 -200 39 -177 113 -79 147 11 0.05473 14 12 0.05971 15 -18 -0.08955 -22 21 0.10448 26 -1 0 -1 4 0.0199 5 11 0.05473 13 21 0.10448 26 21 0.10448 27 32 0.15921 40 17 0.08458 21 16 0.0796 20 9 0.04477 11 -10 -0.04974 -12 17 0.08458 21 -36 -0.17911 -45 1 0 2 -37 -0.18408 -47 18 blend
rrcurveto
- 59 127 -46 0 -58 9 -4 6 2 blend
+ 59 127 -46 -0.22885 -58 9 -3.95523 6 2 blend
rmoveto
- -40 -82 -80 -104 -112 -75 8 -4 10 -9 6 -7 115 80 2 0 2 8 0 10 7 0 9 23 0 29 2 0 3 16 0 20 16 0 20 -12 0 -15 26 0 32 -30 0 -37 10 0 13 -18 0 -23 8 0 10 -4 0 -5 14 blend
- 80 106 47 90 -13 0 -16 11 0 13 14 0 17 3 blend
+ -40 -82 -80 -104 -112 -75 8 -4 10 -9 6 -7 115 80 2 0 2 8 0.0398 10 7 0.03482 9 23 0.11443 29 2 0 3 16 0.0796 20 16 0.0796 20 -12 -0.05971 -15 26 0.12935 32 -30 -0.14925 -37 10 0.04974 13 -18 -0.08955 -23 8 0.0398 10 -4 -0.0199 -5 14 blend
+ 80 106 47 90 -13 -0.06468 -16 11 0.05473 13 14 0.06966 17 3 blend
rrcurveto
- -129 -493 -106 -5 -137 21 6 34 2 blend
+ -129 -493 -106 -4.52736 -137 21 6.10448 34 2 blend
rmoveto
- -27 -73 -43 -71 -51 -50 8 -5 13 -9 5 -5 49 52 47 77 29 77 6 0 8 11 0 14 7 0 8 8 0 10 5 0 7 8 0 10 16 0 20 -8 0 -10 28 0 35 -17 -1 -22 15 0 18 -11 0 -14 -3 0 -4 -4 0 -5 -2 0 -2 -1 0 -1 -3 0 -4 -3 1 -3 18 blend
+ -27 -73 -43 -71 -51 -50 8 -5 13 -9 5 -5 49 52 47 77 29 77 6 0.02985 8 11 0.05473 14 7 0.03482 8 8 0.0398 10 5 0.02487 7 8 0.0398 10 16 0.0796 20 -8 -0.0398 -10 28 0.1393 35 -17 -1.08458 -22 15 0.07463 18 -11 -0.05473 -14 -3 -0.01492 -4 -4 -0.0199 -5 -2 0 -2 -1 0 -1 -3 -0.01492 -4 -3 0.98508 -3 18 blend
rrcurveto
- 124 -1 -66 4 -77 10 15 31 2 blend
+ 124 -1 -66 3.67165 -77 10 15.04974 31 2 blend
rmoveto
- -374 30 374 4 0 5 84 0 105 -4 0 -5 3 blend
+ -374 30 374 4 0.0199 5 84 0.41791 105 -4 -0.0199 -5 3 blend
vlineto
hintmask 0000000000101000
- -586 460 -72 0 -90 2 -21 -24 2 blend
+ -586 460 -72 -0.35822 -90 2 -21 -24 2 blend
rmoveto
- -875 30 845 209 30 -27 0 -33 77 0 96 -53 0 -66 -79 0 -99 80 0 99 5 blend
+ -875 30 845 209 30 -27 -0.13432 -33 77 0.38309 96 -53 -0.26369 -66 -79 -0.39304 -99 80 0.39801 99 5 blend
vlineto
- -8 -29 0 -36 1 blend
+ -8 -29 -0.14427 -36 1 blend
hmoveto
- -7 -29 0 -36 1 blend
+ -7 -29 -0.14427 -36 1 blend
vlineto
- -28 -75 -43 -102 -46 -95 14 0 17 10 0 13 11 0 14 -41 0 -51 17 0 22 4 0 5 6 blend
+ -28 -75 -43 -102 -46 -95 14 0.06966 17 10 0.04974 13 11 0.05473 14 -41 -0.20398 -51 17 0.08458 22 4 0.0199 5 6 blend
rrcurveto
hintmask 0001000000010000
- 89 -91 24 -74 -63 -32 0 -40 23 0 28 -11 0 -14 10 0 13 17 0 21 5 blend
+ 89 -91 24 -74 -63 -32 -0.15921 -40 23 0.11443 28 -11 -0.05473 -14 10 0.04974 13 17 0.08458 21 5 blend
vvcurveto
- -33 -6 -35 -19 -13 3 0 4 1 0 1 15 0 18 7 0 9 4 0 5 5 blend
+ -33 -6 -35 -19 -13 3 0.01492 4 1 0 1 15 0.07463 18 7 0.03482 9 4 0.0199 5 5 blend
vhcurveto
- -10 -6 -12 3 0 3 0 0 1 1 0 2 3 blend
- -3 -14 -1 -20 -2 -26 1 -29 4 0 5 1 0 1 7 0 9 2 0 2 13 0 16 -1 0 -1 11 0 14 7 blend
- 2 7 -9 4 -13 11 0 13 -21 0 -26 5 0 6 -33 0 -41 4 blend
- 1 -8 22 -2 27 0 22 -21 0 -27 1 0 2 1 0 1 -3 0 -4 0 0 1 -4 0 -5 6 blend
- 2 19 2 17 5 12 9 3 0 4 2 0 2 3 0 3 2 0 2 4 0 5 3 0 4 6 blend
+ -10 -6 -12 3 0.01492 3 0 0 1 1 0 2 3 blend
+ -3 -14 -1 -20 -2 -26 1 -29 4 0.0199 5 1 0 1 7 0.03482 9 2 0 2 13 0.06468 16 -1 0 -1 11 0.05473 14 7 blend
+ 2 7 -9 4 -13 11 0.05473 13 -21 -0.10448 -26 5 0.02487 6 -33 -0.16418 -41 4 blend
+ 1 -8 22 -2 27 0 22 -21 -0.10448 -27 1 0 2 1 0 1 -3 -0.01492 -4 0 0 1 -4 -0.0199 -5 6 blend
+ 2 19 2 17 5 12 9 3 0.01492 4 2 0 2 3 0.01492 3 2 0 2 4 0.0199 5 3 0.01492 4 6 blend
rrcurveto
- 25 17 10 7 0 9 5 0 7 4 0 5 3 blend
- 43 44 22 0 27 1 blend
+ 25 17 10 7 0.03482 9 5 0.02487 7 4 0.0199 5 3 blend
+ 43 44 22 0.10945 27 1 blend
vvcurveto
- 67 -22 76 -86 89 -8 0 -10 9 0 12 -6 0 -8 24 0 30 -11 0 -13 5 blend
+ 67 -22 76 -86 89 -8 -0.0398 -10 9 0.04477 12 -6 -0.02985 -8 24 0.1194 30 -11 -0.05473 -13 5 blend
vhcurveto
hintmask 0000000001001000
- 39 84 42 98 33 81 -10 0 -13 -4 0 -5 -8 0 -10 14 0 17 -6 0 -8 7 0 9 6 blend
+ 39 84 42 98 33 81 -10 -0.04974 -13 -4 -0.0199 -5 -8 -0.0398 -10 14 0.06966 17 -6 -0.02985 -8 7 0.03482 9 6 blend
rrcurveto
hintmask 0000000000001000
- -20 14 -6 -2 -60 0 -75 32 0 40 -12 0 -14 -2 0 -3 4 blend
+ -20 14 -6 -2 -60 -0.29851 -75 32 0.15921 40 -12 -0.05971 -14 -2 0 -3 4 blend
rlineto
</CharString>
<CharString name="cid17290" fdSelectIndex="1">
5 vsindex
- 121 30 -22 22 148 30 -30 136 23 30 129 30 116 30 -21 4 -29 52 3 92 -32 23 -21 32 -23 21 -54 9 -83 50 4 90 -50 -4 -90 22 27 62 -2 -43 -47 41 0 69 -44 0 -74 37 0 62 -50 0 -84 36 0 61 14 blend
+ 121 30 -22 22 148 30 -30 136 23 30 129 30 116 30 -21 3.94763 -29 52 3.12967 92 -32 22.9202 -21 32 -22.9202 21 -54 8.86534 -83 50 4.1247 90 -50 -4.1247 -90 22 27.05486 62 -2 -43 -47 41 0.10225 69 -44 -0.10973 -74 37 0.09227 62 -50 -0.1247 -84 36 0.08978 61 14 blend
hstemhm
- 167 30 129 30 -16 16 123 30 48 30 -6 29 -29 111 -30 30 -16 16 201 30 1 29 -29 0 -49 64 0 108 -34 0 -57 51 0 85 -29 0 -48 29 0 48 -72 -2 -123 60 2 103 -69 0 -115 46 0 77 -42 0 -70 42 0 70 -42 0 -70 67 0 111 -51 0 -85 51 0 85 -29 0 -48 29 0 48 -79 0 -132 47 0 79 -45 0 -75 42 0 70 22 blend
+ 167 30 129 30 -16 16 123 30 48 30 -6 29 -29 111 -30 30 -16 16 201 30 1 29 -29 -0.07233 -49 64 0.1596 108 -34 -0.0848 -57 51 0.12718 85 -29 -0.07233 -48 29 0.07233 48 -72 -2.17955 -123 60 2.14963 103 -69 -0.17207 -115 46 0.11472 77 -42 -0.10474 -70 42 0.10474 70 -42 -0.10474 -70 67 0.16708 111 -51 -0.12718 -85 51 0.12718 85 -29 -0.07233 -48 29 0.07233 48 -79 -0.197 -132 47 0.1172 79 -45 -0.11221 -75 42 0.10474 70 22 blend
vstemhm
hintmask 011011111011001010000000
- 326 793 1 0 2 17 0 29 2 blend
+ 326 793 1 0 2 17 0.04239 29 2 blend
rmoveto
- -280 24 0 40 1 blend
+ -280 24 0.05984 40 1 blend
vlineto
- -47 16 -8 59 -31 0 -53 6 0 10 -13 0 -21 20 0 33 4 blend
+ -47 16 -8 59 -31 -0.0773 -53 6 0.01497 10 -13 -0.03242 -21 20 0.04988 33 4 blend
vhcurveto
hintmask 000010000000100000000000
- 13 120 4 0 6 -46 0 -76 2 blend
+ 13 120 4 0 6 -46 -0.11472 -76 2 blend
0 13 4 0 7 1 blend
hhcurveto
- 49 10 20 82 4 12 0 19 12 0 20 3 0 5 2 0 3 4 0 8 5 blend
+ 49 10 20 82 4 12 0.02992 19 12 0.02992 20 3 0 5 2 0 3 4 0 8 5 blend
hvcurveto
hintmask 101010101000010000000000
- -10 2 -11 5 -8 6 -12 0 -21 3 0 5 -21 0 -35 6 0 11 -9 0 -14 7 0 11 6 blend
+ -10 2 -11 5 -8 6 -12 -0.02992 -21 3 0 5 -21 -0.05237 -35 6 0.01497 11 -9 -0.02245 -14 7 0.01746 11 6 blend
rrcurveto
- -75 19 0 32 1 blend
- -3 -5 -10 -29 -24 -102 1 0 1 1 0 2 7 0 12 9 0 14 42 0 70 5 blend
- 0 -18 6 0 10 1 blend
+ -75 19 0.04738 32 1 blend
+ -3 -5 -10 -29 -24 -102 1 0 1 1 0 2 7 0.01746 12 9 0.02245 14 42 0.10474 70 5 blend
+ 0 -18 6 0.01497 10 1 blend
hhcurveto
- -38 -6 4 21 10 0 18 2 0 3 0 0 -1 4 0 7 4 blend
+ -38 -6 4 21 10 0.02493 18 2 0 3 0 0 -1 4 0 7 4 blend
hvcurveto
- 280 -25 0 -41 1 blend
+ 280 -25 -0.06235 -41 1 blend
vlineto
- -41 -464 -40 -8 -74 10 20 41 2 blend
+ -41 -464 -40 -8.09975 -74 10 20.02493 41 2 blend
rmoveto
- -30 617 30 -50 -4 -90 -5 12 5 50 4 90 3 blend
+ -30 617 30 -50 -4.1247 -90 -5 11.98753 5 50 4.1247 90 3 blend
vlineto
- -661 -178 11 -4 12 4 -13 -7 2 blend
+ -661 -178 11 -3.97256 12 4 -13 -7 2 blend
rmoveto
- -30 689 30 -52 -3 -92 -11 0 -18 52 3 92 3 blend
+ -30 689 30 -52 -3.12967 -92 -11 -0.02744 -18 52 3.12967 92 3 blend
vlineto
hintmask 010101100111001000000000
- -481 284 -27 -2 -48 -32 36 -21 2 blend
+ -481 284 -27 -2.06734 -48 -32 35.9202 -21 2 blend
rmoveto
- -306 30 306 0 -13 0 60 2 103 0 13 0 3 blend
+ -306 30 306 0 -13 0 60 2.14963 103 0 13 0 3 blend
vlineto
- 218 0 -61 0 -102 -1 0 -1 2 blend
+ 218 0 -61 -0.15211 -102 -1 0 -1 2 blend
rmoveto
- -306 30 306 0 -13 0 61 1 104 0 13 0 3 blend
+ -306 30 306 0 -13 0 61 1.15211 104 0 13 0 3 blend
vlineto
- -417 358 -17 -1 -30 19 -43 -12 2 blend
+ -417 358 -17 -1.04239 -30 19 -42.95262 -12 2 blend
rmoveto
- -30 217 -116 -217 -30 247 176 -36 0 -61 -52 0 -87 50 0 84 52 0 87 -37 0 -62 -6 0 -10 23 0 39 7 blend
+ -30 217 -116 -217 -30 247 176 -36 -0.08978 -61 -52 -0.12967 -87 50 0.1247 84 52 0.12967 87 -37 -0.09227 -62 -6 -0.01497 -10 23 0.05736 39 7 blend
vlineto
- 75 -26 0 -44 1 blend
+ 75 -26 -0.06483 -44 1 blend
hmoveto
hintmask 000010100000001001000000
- -280 24 0 40 1 blend
+ -280 24 0.05984 40 1 blend
vlineto
- -47 17 -8 60 -31 0 -53 5 0 9 -13 0 -21 20 0 33 4 blend
+ -47 17 -8 60 -31 -0.0773 -53 5 0.01247 9 -13 -0.03242 -21 20 0.04988 33 4 blend
vhcurveto
- 12 125 5 0 8 -47 0 -78 2 blend
+ 12 125 5 0.01247 8 -47 -0.1172 -78 2 blend
0 14 4 0 7 1 blend
hhcurveto
- 49 11 20 82 3 12 0 20 12 0 19 3 1 6 2 1 6 5 0 9 5 blend
+ 49 11 20 82 3 12 0.02992 20 12 0.02992 19 3 1 6 2 1 6 5 0.01247 9 5 blend
hvcurveto
- -9 2 -12 4 -8 7 -14 1 -22 3 0 5 -19 -1 -34 7 0 12 -9 0 -14 6 0 10 6 blend
+ -9 2 -12 4 -8 7 -14 0.96509 -22 3 0 5 -19 -1.04738 -34 7 0.01746 12 -9 -0.02245 -14 6 0.01497 10 6 blend
rrcurveto
- -75 19 -1 29 1 blend
- -3 -5 -10 -30 -25 -105 1 -1 1 8 0 13 8 0 14 42 0 70 4 blend
- 0 -18 6 0 9 1 blend
+ -75 19 -0.95262 29 1 blend
+ -3 -5 -10 -30 -25 -105 1 -1 1 8 0.01994 13 8 0.01994 14 42 0.10474 70 4 blend
+ 0 -18 6 0.01497 9 1 blend
hhcurveto
- -40 -6 4 21 11 0 19 2 0 3 0 0 -1 4 1 8 4 blend
+ -40 -6 4 21 11 0.02744 19 2 0 3 0 0 -1 4 1 8 4 blend
hvcurveto
- 280 -25 -1 -42 1 blend
+ 280 -25 -1.06235 -42 1 blend
vlineto
hintmask 000001110000000110000000
- -16 -29 0 -48 1 blend
+ -16 -29 -0.07233 -48 1 blend
hmoveto
- -30 217 -116 -217 -30 247 176 -36 0 -61 -50 0 -84 50 0 84 50 0 84 -37 0 -62 -3 0 -5 23 0 39 7 blend
+ -30 217 -116 -217 -30 247 176 -36 -0.08978 -61 -50 -0.1247 -84 50 0.1247 84 50 0.1247 84 -37 -0.09227 -62 -3 0 -5 23 0.05736 39 7 blend
vlineto
- -424 -714 -19 0 -32 -12 0 -21 2 blend
+ -424 -714 -19 -0.04738 -32 -12 -0.02992 -21 2 blend
rmoveto
- -52 -54 -91 -49 -81 -33 8 -5 11 -13 4 -6 80 36 94 56 56 58 7 0 11 9 0 15 5 0 9 11 0 18 -2 0 -3 9 0 15 13 0 22 -11 0 -18 24 0 39 -22 0 -36 11 0 19 -12 0 -21 4 0 7 -4 0 -6 2 0 2 -2 0 -4 -1 0 -1 3 0 5 18 blend
+ -52 -54 -91 -49 -81 -33 8 -5 11 -13 4 -6 80 36 94 56 56 58 7 0.01746 11 9 0.02245 15 5 0.01247 9 11 0.02744 18 -2 0 -3 9 0.02245 15 13 0.03242 22 -11 -0.02744 -18 24 0.05984 39 -22 -0.05486 -36 11 0.02744 19 -12 -0.02992 -21 4 0 7 -4 0 -6 2 0 2 -2 0 -4 -1 0 -1 3 0 5 18 blend
rrcurveto
- 200 -7 -92 0 -154 -5 0 -8 2 blend
+ 200 -7 -92 -0.22943 -154 -5 -0.01247 -8 2 blend
rmoveto
- 76 -41 90 -62 46 -42 -6 0 -10 5 0 8 -5 0 -7 6 0 10 -4 0 -7 4 0 7 6 blend
+ 76 -41 90 -62 46 -42 -6 -0.01497 -10 5 0.01247 8 -5 -0.01247 -7 6 0.01497 10 -4 0 -7 4 0 7 6 blend
rrcurveto
- 22 23 -46 42 -91 60 -75 39 60 0 100 29 0 48 0 0 -1 -3 0 -5 3 0 5 -7 0 -11 6 0 11 -7 0 -11 8 blend
+ 22 23 -46 42 -91 60 -75 39 60 0.14963 100 29 0.07233 48 0 0 -1 -3 0 -5 3 0 5 -7 -0.01746 -11 6 0.01497 11 -7 -0.01746 -11 8 blend
rlinecurve
- -499 750 -48 0 -81 6 0 10 2 blend
+ -499 750 -48 -0.1197 -81 6 0.01497 10 2 blend
rmoveto
- -54 -167 -87 -164 -96 -108 7 -6 11 -12 4 -6 98 116 88 165 58 175 7 0 13 15 0 25 10 0 16 14 0 22 11 0 19 10 0 17 9 0 15 -20 0 -33 15 0 24 -44 0 -73 4 0 7 -18 0 -30 4 0 6 4 0 6 3 0 6 19 0 32 0 0 -1 1 0 1 18 blend
+ -54 -167 -87 -164 -96 -108 7 -6 11 -12 4 -6 98 116 88 165 58 175 7 0.01746 13 15 0.0374 25 10 0.02493 16 14 0.03491 22 11 0.02744 19 10 0.02493 17 9 0.02245 15 -20 -0.04988 -33 15 0.0374 24 -44 -0.10973 -73 4 0 7 -18 -0.04489 -30 4 0 6 4 0 6 3 0 6 19 0.04738 32 0 0 -1 1 0 1 18 blend
rrcurveto
- -113 -214 -60 0 -100 -23 0 -37 2 blend
+ -113 -214 -60 -0.14963 -100 -23 -0.05736 -37 2 blend
rmoveto
- -691 30 718 20 0 33 64 0 108 43 0 72 3 blend
+ -691 30 718 20 0.04988 33 64 0.1596 108 43 0.10724 72 3 blend
vlineto
-1 -1 0 -3 1 blend
2 rlineto
</CharString>
<CharString name="cid17852" fdSelectIndex="1">
5 vsindex
- -67 29 219 30 154 30 -16 16 150 30 -30 122 -85 30 -18 18 87 30 -30 140 -122 12 -14 0 -22 46 0 78 -59 -3 -106 46 0 77 -53 -9 -92 46 2 81 -18 20 -1 18 -20 1 -54 13 -80 46 2 81 -46 -2 -81 25 31 61 -14 -34 -48 60 0 100 -64 0 -107 64 0 107 -55 0 -92 54 0 90 -54 0 -90 36 0 59 -19 0 -31 37 0 62 22 blend
+ -67 29 219 30 154 30 -16 16 150 30 -30 122 -85 30 -18 18 87 30 -30 140 -122 12 -14 -0.03491 -22 46 0.11472 78 -59 -3.14713 -106 46 0.11472 77 -53 -9.13217 -92 46 2.11472 81 -18 19.95511 -1 18 -19.95511 1 -54 12.86534 -80 46 2.11472 81 -46 -2.11472 -81 25 31.06235 61 -14 -34.03491 -48 60 0.14963 100 -64 -0.1596 -107 64 0.1596 107 -55 -0.13716 -92 54 0.13466 90 -54 -0.13466 -90 36 0.08978 59 -19 -0.04738 -31 37 0.09227 62 22 blend
hstemhm
- 51 188 -30 30 -30 149 21 30 -18 18 -13 13 66 30 -12 12 135 30 41 30 172 30 -6 28 -8 0 -14 30 0 50 -62 0 -103 62 0 103 -62 0 -103 32 0 53 -5 0 -7 59 0 98 -24 0 -41 24 0 41 -16 0 -27 16 0 27 -32 0 -53 53 0 88 -33 0 -56 33 0 56 -87 0 -146 63 0 106 -42 0 -70 54 0 90 -99 0 -165 55 0 91 -42 0 -70 45 0 75 24 blend
+ 51 188 -30 30 -30 149 21 30 -18 18 -13 13 66 30 -12 12 135 30 41 30 172 30 -6 28 -8 -0.01994 -14 30 0.07481 50 -62 -0.15462 -103 62 0.15462 103 -62 -0.15462 -103 32 0.0798 53 -5 -0.01247 -7 59 0.14713 98 -24 -0.05984 -41 24 0.05984 41 -16 -0.0399 -27 16 0.0399 27 -32 -0.0798 -53 53 0.13217 88 -33 -0.08229 -56 33 0.08229 56 -87 -0.21696 -146 63 0.1571 106 -42 -0.10474 -70 54 0.13466 90 -99 -0.24689 -165 55 0.13716 91 -42 -0.10474 -70 45 0.11221 75 24 blend
vstemhm
hintmask 000000100001000000000000
- 51 612 -8 0 -14 29 0 49 2 blend
+ 51 612 -8 -0.01994 -14 29 0.07233 49 2 blend
rmoveto
- -30 -60 0 -100 1 blend
+ -30 -60 -0.14963 -100 1 blend
vlineto
hintmask 000000100000010000000000
- 307 30 60 0 100 1 blend
+ 307 30 60 0.14963 100 1 blend
hlineto
hintmask 000000010010100100000000
- -149 228 -32 0 -53 -20 0 -34 2 blend
+ -149 228 -32 -0.0798 -53 -20 -0.04988 -34 2 blend
rmoveto
- -918 30 918 -19 0 -32 62 0 103 19 0 32 3 blend
+ -918 30 918 -19 -0.04738 -32 62 0.15462 103 19 0.04738 32 3 blend
vlineto
- -36 -238 -55 0 -91 -32 0 -53 2 blend
+ -36 -238 -55 -0.13716 -91 -32 -0.0798 -53 2 blend
rmoveto
- -31 -160 -74 -193 -68 -95 7 -5 10 -11 6 -8 70 101 74 203 33 160 6 0 10 25 0 42 13 0 21 23 0 37 4 0 7 1 0 2 8 0 14 -18 0 -30 13 0 21 -27 0 -44 4 0 7 -19 0 -32 1 0 2 6 0 10 -12 0 -20 -2 0 -3 -2 0 -4 -1 0 -2 18 blend
+ -31 -160 -74 -193 -68 -95 7 -5 10 -11 6 -8 70 101 74 203 33 160 6 0.01497 10 25 0.06235 42 13 0.03242 21 23 0.05736 37 4 0 7 1 0 2 8 0.01994 14 -18 -0.04489 -30 13 0.03242 21 -27 -0.06734 -44 4 0 7 -19 -0.04738 -32 1 0 2 6 0.01497 10 -12 -0.02992 -20 -2 0 -3 -2 0 -4 -1 0 -2 18 blend
rrcurveto
- 4 -143 19 0 32 77 0 128 2 blend
+ 4 -143 19 0.04738 32 77 0.19202 128 2 blend
rmoveto
- -21 -16 25 -26 72 -92 21 -33 -23 0 -38 -34 0 -57 1 0 2 -15 0 -24 -12 0 -21 -6 0 -11 2 0 3 -18 0 -29 8 blend
+ -21 -16 25 -26 72 -92 21 -33 -23 -0.05736 -38 -34 -0.0848 -57 1 0 2 -15 -0.0374 -24 -12 -0.02992 -21 -6 -0.01497 -11 2 0 3 -18 -0.04489 -29 8 blend
rlinecurve
- 24 24 -18 25 -81 96 -22 22 28 0 48 63 0 105 2 0 2 -1 0 -2 1 0 3 10 0 16 1 0 1 1 0 2 8 blend
+ 24 24 -18 25 -81 96 -22 22 28 0.06982 48 63 0.1571 105 2 0 2 -1 0 -2 1 0 3 10 0.02493 16 1 0 1 1 0 2 8 blend
rlinecurve
- 157 278 1 0 1 -14 0 -23 2 blend
+ 157 278 1 0 1 -14 -0.03491 -23 2 blend
rmoveto
hintmask 000000001000000100000000
- -30 559 -54 0 -90 -17 3 -23 2 blend
+ -30 559 -54 -0.13466 -90 -17 2.95761 -23 2 blend
vlineto
hintmask 010000000010000000100000
- 30 54 0 90 1 blend
+ 30 54 0.13466 90 1 blend
vlineto
- -457 -518 29 -3 43 -9 -3 -20 2 blend
+ -457 -518 29 -2.92767 43 -9 -3.02245 -20 2 blend
rmoveto
- -30 176 30 -46 0 -77 -17 0 -27 46 0 77 3 blend
+ -30 176 30 -46 -0.11472 -77 -17 -0.04239 -27 46 0.11472 77 3 blend
vlineto
hintmask 000000000100000001010000
- -194 120 -3 0 -5 -42 37 -35 2 blend
+ -194 120 -3 0 -5 -42 36.89526 -35 2 blend
rmoveto
- -365 30 365 38 -29 45 53 0 88 -38 29 -45 3 blend
+ -365 30 365 38 -28.90524 45 53 0.13217 88 -38 28.90524 -45 3 blend
vlineto
- 135 508 -87 0 -146 33 -34 24 2 blend
+ 135 508 -87 -0.21696 -146 33 -33.91771 24 2 blend
rmoveto
hintmask 000000000010000000010000
- -122 30 -19 0 -31 63 0 106 2 blend
+ -122 30 -19 -0.04738 -31 63 0.1571 106 2 blend
vlineto
hintmask 000101000100000000010000
- 122 19 0 31 1 blend
+ 122 19 0.04738 31 1 blend
vlineto
- -115 -172 -60 0 -100 -27 34 -19 2 blend
+ -115 -172 -60 -0.14963 -100 -27 33.93266 -19 2 blend
rmoveto
- -288 30 288 11 -24 18 50 0 83 -11 24 -18 3 blend
+ -288 30 288 11 -23.97256 18 50 0.1247 83 -11 23.97256 -18 3 blend
vlineto
- 148 -62 -2 -106 1 blend
+ 148 -62 -2.15462 -106 1 blend
hmoveto
- -288 30 288 11 -24 18 50 0 83 -11 24 -18 3 blend
+ -288 30 288 11 -23.97256 18 50 0.1247 83 -11 23.97256 -18 3 blend
vlineto
- 156 -394 -30 2 -47 19 -34 6 2 blend
+ 156 -394 -30 1.92519 -47 19 -33.95262 6 2 blend
rmoveto
- -52 -36 -89 -48 -61 -29 7 0 12 2 0 4 14 0 23 3 0 4 11 0 18 4 0 8 6 blend
+ -52 -36 -89 -48 -61 -29 7 0.01746 12 2 0 4 14 0.03491 23 3 0 4 11 0.02744 18 4 0 8 6 blend
rrcurveto
- 15 -21 62 28 86 41 57 44 25 0 42 -39 0 -66 -10 0 -17 -4 0 -6 -12 0 -19 -3 0 -5 -6 0 -11 -5 0 -9 8 blend
+ 15 -21 62 28 86 41 57 44 25 0.06235 42 -39 -0.09726 -66 -10 -0.02493 -17 -4 0 -6 -12 -0.02992 -19 -3 0 -5 -6 -0.01497 -11 -5 -0.01247 -9 8 blend
rlinecurve
hintmask 101010000000000010001100
- -541 323 10 0 17 44 5 84 2 blend
+ -541 323 10 0.02493 17 44 5.10973 84 2 blend
rmoveto
- -30 517 -150 -517 -30 547 210 -46 -2 -81 -74 0 -123 54 -13 80 74 0 123 -46 -2 -81 -19 0 -32 38 17 82 7 blend
+ -30 517 -150 -517 -30 547 210 -46 -2.11472 -81 -74 -0.18454 -123 54 -12.86534 80 74 0.18454 123 -46 -2.11472 -81 -19 -0.04738 -32 38 17.09476 82 7 blend
vlineto
- -232 -242 -10 0 -16 -28 29 -27 2 blend
+ -232 -242 -10 -0.02493 -16 -28 28.93018 -27 2 blend
rmoveto
- -344 58 -32 71 1 blend
+ -344 58 -31.85536 71 1 blend
vlineto
- -47 15 -9 54 -33 -2 -58 3 0 4 -15 0 -25 22 0 37 4 blend
+ -47 15 -9 54 -33 -2.08229 -58 3 0 4 -15 -0.0374 -25 22 0.05486 37 4 blend
vhcurveto
hintmask 100000000010001000001010
- 12 100 3 0 5 -47 0 -78 2 blend
+ 12 100 3 0 5 -47 -0.1172 -78 2 blend
0 12 4 0 6 1 blend
hhcurveto
- 48 10 25 102 3 12 0 20 11 0 19 4 1 9 11 -1 16 5 0 8 5 blend
+ 48 10 25 102 3 12 0.02992 20 11 0.02744 19 4 1 9 11 -0.97256 16 5 0.01247 8 5 blend
hvcurveto
- -9 3 -11 4 -8 6 -14 0 -23 3 -1 5 -23 1 -37 8 1 15 -8 -1 -15 8 0 12 6 blend
+ -9 3 -11 4 -8 6 -14 -0.03491 -23 3 -1 5 -23 0.94264 -37 8 1.01994 15 -8 -1.01994 -15 8 0.01994 12 6 blend
rrcurveto
- -97 11 1 20 1 blend
- -3 -4 -14 -29 -21 -84 0 0 1 1 -1 1 10 0 16 10 1 17 43 -1 71 5 blend
- 0 -16 7 0 12 1 blend
+ -97 11 1.02744 20 1 blend
+ -3 -4 -14 -29 -21 -84 0 0 1 1 -1 1 10 0.02493 16 10 1.02493 17 43 -0.89276 71 5 blend
+ 0 -16 7 0.01746 12 1 blend
hhcurveto
- -33 -6 5 22 13 0 22 3 0 5 -1 0 -2 4 0 7 4 blend
+ -33 -6 5 22 13 0.03242 22 3 0 5 -1 0 -2 4 0 7 4 blend
hvcurveto
- 344 -59 34 -71 1 blend
+ 344 -59 33.85287 -71 1 blend
vlineto
- -346 -371 -24 0 -41 65 -34 78 2 blend
+ -346 -371 -24 -0.05984 -41 65 -33.8379 78 2 blend
rmoveto
- 10 -31 77 16 100 22 99 21 3 0 5 -54 0 -90 -2 0 -3 -3 0 -5 -9 0 -15 -6 0 -10 -10 0 -17 -5 0 -8 8 blend
+ 10 -31 77 16 100 22 99 21 3 0 5 -54 -0.13466 -90 -2 0 -3 -3 0 -5 -9 -0.02245 -15 -6 -0.01497 -10 -10 -0.02493 -17 -5 -0.01247 -8 8 blend
rlinecurve
- -2 29 -108 -22 -104 -22 -72 -13 -3 0 -5 52 0 86 9 0 16 6 0 10 8 0 13 6 0 11 4 0 6 4 0 6 8 blend
+ -2 29 -108 -22 -104 -22 -72 -13 -3 0 -5 52 0.12967 86 9 0.02245 16 6 0.01497 10 8 0.01994 13 6 0.01497 11 4 0 6 4 0 6 8 blend
rlinecurve
- -16 767 -44 0 -72 -13 0 -21 2 blend
+ -16 767 -44 -0.10973 -72 -13 -0.03242 -21 2 blend
rmoveto
- -316 -6 0 -11 1 blend
+ -316 -6 -0.01497 -11 1 blend
vlineto
- -142 -7 -194 -74 -141 2 0 2 -2 0 -2 2 0 4 6 0 9 4 blend
+ -142 -7 -194 -74 -141 2 0 2 -2 0 -2 2 0 4 6 0.01497 9 4 blend
vhcurveto
- 8 -3 13 -7 5 -6 13 0 21 -7 0 -11 25 0 43 -20 0 -34 11 0 17 -10 0 -17 6 blend
+ 8 -3 13 -7 5 -6 13 0.03242 21 -7 -0.01746 -11 25 0.06235 43 -20 -0.04988 -34 11 0.02744 17 -10 -0.02493 -17 6 blend
rrcurveto
- 75 143 10 205 145 4 0 7 3 0 5 2 0 4 21 0 35 9 0 15 5 blend
+ 75 143 10 205 145 4 0 7 3 0 5 2 0 4 21 0.05237 35 9 0.02245 15 5 blend
vvcurveto
- 316 6 0 11 1 blend
+ 316 6 0.01497 11 1 blend
vlineto
</CharString>
<CharString name="cid18480" fdSelectIndex="1">
3 vsindex
- -71 30 427 30 153 30 33 111 -30 30 -30 126 -6 0 -13 45 0 102 -58 0 -132 38 0 87 -48 0 -111 38 0 87 -4 -2 -13 21 2 53 -43 0 -99 43 0 99 -43 0 -99 24 0 55 12 blend
+ -71 30 427 30 153 30 33 111 -30 30 -30 126 -6 -0.0107 -13 45 0.08022 102 -58 -0.1034 -132 38 0.06773 87 -48 -0.08556 -111 38 0.06773 87 -4 -2 -13 21 2.03743 53 -43 -0.07664 -99 43 0.07664 99 -43 -0.07664 -99 24 0.04279 55 12 blend
hstemhm
- 159 30 -19 19 126 30 -6 30 281 30 160 30 18 31 -7 0 -16 50 0 114 -18 0 -42 18 0 42 -71 0 -161 50 0 114 -26 0 -61 48 0 111 -66 0 -150 51 0 115 -68 0 -154 50 0 114 -36 -1 -84 44 1 101 14 blend
+ 159 30 -19 19 126 30 -6 30 281 30 160 30 18 31 -7 -0.01248 -16 50 0.08913 114 -18 -0.03209 -42 18 0.03209 42 -71 -0.12656 -161 50 0.08913 114 -26 -0.04634 -61 48 0.08556 111 -66 -0.11765 -150 51 0.09091 115 -68 -0.12122 -154 50 0.08913 114 -36 -1.06418 -84 44 1.07843 101 14 blend
vstemhm
hintmask 1110100101110000
- 58 743 -1 0 -2 26 0 60 2 blend
+ 58 743 -1 0 -2 26 0.04634 60 2 blend
rmoveto
- -30 887 30 -43 0 -99 2 0 5 43 0 99 3 blend
+ -30 887 30 -43 -0.07664 -99 2 0 5 43 0.07664 99 3 blend
vlineto
hintmask 0000010010000000
- -630 96 -29 0 -66 -19 0 -44 2 blend
+ -630 96 -29 -0.0517 -66 -19 -0.03387 -44 2 blend
rmoveto
hintmask 0001000010000000
- -207 30 -2 -2 -9 50 0 114 2 blend
+ -207 30 -2 -2 -9 50 0.08913 114 2 blend
vlineto
hintmask 0000010010100000
207 2 2 9 1 blend
vlineto
- 305 -44 0 -100 1 blend
+ 305 -44 -0.07843 -100 1 blend
hmoveto
hintmask 0001000000100000
- -207 30 -2 -2 -9 51 0 115 2 blend
+ -207 30 -2 -2 -9 51 0.09091 115 2 blend
vlineto
hintmask 0010011000100000
207 2 2 9 1 blend
vlineto
- -521 -240 -36 0 -82 2 0 4 2 blend
+ -521 -240 -36 -0.06418 -82 2 0 4 2 blend
rmoveto
-206 -5 0 -10 1 blend
vlineto
- -137 -15 -184 -109 -136 5 0 11 3 0 6 5 0 10 -1 0 -1 8 0 19 5 blend
+ -137 -15 -184 -109 -136 5 0 11 3 0 6 5 0 10 -1 0 -1 8 0.01427 19 5 blend
vhcurveto
- 7 -3 12 -9 5 -6 12 0 27 -6 0 -13 22 0 51 -15 0 -35 10 0 21 -8 0 -19 6 blend
+ 7 -3 12 -9 5 -6 12 0.0214 27 -6 -0.0107 -13 22 0.03922 51 -15 -0.02673 -35 10 0.01782 21 -8 -0.01427 -19 6 blend
rrcurveto
hintmask 1110000101010000
- 112 139 18 194 141 3 0 7 -4 0 -8 1 0 3 11 0 24 4 0 10 5 blend
+ 112 139 18 194 141 3 0 7 -4 0 -8 1 0 3 11 0.0196 24 4 0 10 5 blend
vvcurveto
207 5 0 11 1 blend
vlineto
- -19 -18 0 -42 1 blend
+ -19 -18 -0.03209 -42 1 blend
hmoveto
- -30 670 -153 -670 -30 700 213 -38 0 -87 -64 0 -144 48 0 111 64 0 144 -38 0 -87 -14 0 -30 28 0 63 7 blend
+ -30 670 -153 -670 -30 700 213 -38 -0.06773 -87 -64 -0.11407 -144 48 0.08556 111 64 0.11407 144 -38 -0.06773 -87 -14 -0.02495 -30 28 0.04991 63 7 blend
vlineto
- -531 -249 -15 0 -36 -23 0 -51 2 blend
+ -531 -249 -15 -0.02673 -36 -23 -0.041 -51 2 blend
rmoveto
- -343 50 0 112 1 blend
+ -343 50 0.08913 112 1 blend
vlineto
- -66 31 -12 105 -29 0 -66 6 0 14 -13 0 -28 29 0 66 4 blend
+ -66 31 -12 105 -29 -0.0517 -66 6 0.0107 14 -13 -0.02318 -28 29 0.0517 66 4 blend
vhcurveto
- 23 278 5 0 12 -59 0 -134 2 blend
- 0 24 6 0 14 1 blend
+ 23 278 5 0 12 -59 -0.10516 -134 2 blend
+ 0 24 6 0.0107 14 1 blend
hhcurveto
hintmask 1000000001001000
- 96 15 31 123 8 20 0 44 11 0 26 4 0 8 14 0 32 5 0 11 5 blend
+ 96 15 31 123 8 20 0.03564 44 11 0.0196 26 4 0 8 14 0.02495 32 5 0 11 5 blend
hvcurveto
- -9 3 -13 4 -9 7 -13 0 -30 2 0 5 -21 0 -48 8 0 17 -10 -1 -23 6 0 15 6 blend
+ -9 3 -13 4 -9 7 -13 -0.02318 -30 2 0 5 -21 -0.03743 -48 8 0.01427 17 -10 -1.01782 -23 6 0.0107 15 6 blend
rrcurveto
- -117 -6 -11 -21 -69 -56 -236 8 0 18 -1 1 -1 1 -1 1 3 0 7 3 1 9 7 0 15 49 0 112 7 blend
+ -117 -6 -11 -21 -69 -56 -236 8 0.01427 18 -1 1 -1 1 -1 1 3 0 7 3 1 9 7 0.01248 15 49 0.08734 112 7 blend
0 -41 4 0 8 1 blend
hhcurveto
-84 -16 11 37 4 0 10 2 0 5 -3 0 -7 1 0 2 4 blend
hvcurveto
- 343 -51 0 -115 1 blend
+ 343 -51 -0.09091 -115 1 blend
vlineto
- 444 -47 -59 0 -135 26 0 59 2 blend
+ 444 -47 -59 -0.10516 -135 26 0.04634 59 2 blend
rmoveto
- -101 -52 -195 -56 -169 -40 4 -7 5 -10 3 -7 172 40 193 54 120 56 4 0 8 3 0 7 18 0 43 9 0 19 12 0 26 8 0 19 5 0 12 -10 0 -22 7 0 15 -18 0 -41 1 0 3 -11 0 -25 -8 0 -19 -9 0 -21 -8 0 -18 -8 0 -19 5 0 11 0 0 1 18 blend
+ -101 -52 -195 -56 -169 -40 4 -7 5 -10 3 -7 172 40 193 54 120 56 4 0 8 3 0 7 18 0.03209 43 9 0.01604 19 12 0.0214 26 8 0.01427 19 5 0 12 -10 -0.01782 -22 7 0.01248 15 -18 -0.03209 -41 1 0 3 -11 -0.0196 -25 -8 -0.01427 -19 -9 -0.01604 -21 -8 -0.01427 -18 -8 -0.01427 -19 5 0 11 0 0 1 18 blend
rrcurveto
</CharString>
<CharString name="cid22370" fdSelectIndex="1">
2 vsindex
- 64 30 77 30 76 30 74 30 72 30 109 30 25 84 -30 30 -30 108 -2 0 -2 42 0 47 -48 0 -54 38 0 43 -48 0 -54 38 0 43 -46 0 -52 42 0 47 -43 0 -48 56 1 63 -72 -1 -81 57 1 64 -8 -32 -41 30 32 65 -65 -1 -73 65 1 73 -65 -1 -73 43 0 49 18 blend
+ 64 30 77 30 76 30 74 30 72 30 109 30 25 84 -30 30 -30 108 -2 -0.01802 -2 42 0.37837 47 -48 -0.43243 -54 38 0.34235 43 -48 -0.43243 -54 38 0.34235 43 -46 -0.41441 -52 42 0.37837 47 -43 -0.38739 -48 56 0.5045 63 -72 -0.64865 -81 57 0.51352 64 -8 -32.07207 -41 30 32.27026 65 -65 -0.58559 -73 65 0.58559 73 -65 -0.58559 -73 43 0.38739 49 18 blend
hstemhm
- 135 30 21 30 102 30 14 30 205 30 17 30 113 30 19 30 -19 0 -21 87 2 98 -86 -2 -97 99 1 111 -125 -1 -141 98 1 111 -79 -1 -89 75 1 84 -99 -1 -111 75 1 84 -77 -1 -86 100 1 112 -127 -1 -143 105 1 118 -102 -1 -114 94 1 105 16 blend
+ 135 30 21 30 102 30 14 30 205 30 17 30 113 30 19 30 -19 -0.17117 -21 87 1.78378 98 -86 -1.77478 -97 99 0.89189 111 -125 -1.12613 -141 98 0.88289 111 -79 -0.71172 -89 75 0.67567 84 -99 -0.89189 -111 75 0.67567 84 -77 -0.6937 -86 100 0.9009 112 -127 -1.14415 -143 105 0.94595 118 -102 -0.91891 -114 94 0.84685 105 16 blend
vstemhm
hintmask 111111010011001100000000
- 53 761 -3 0 -3 36 0 40 2 blend
+ 53 761 -3 -0.02702 -3 36 0.32433 40 2 blend
rmoveto
- -30 896 30 -65 -1 -73 5 0 5 65 1 73 3 blend
+ -30 896 30 -65 -0.58559 -73 5 0.04504 5 65 0.58559 73 3 blend
vlineto
hintmask 000000001001000000000000
- -631 78 -46 0 -52 -22 0 -24 2 blend
+ -631 78 -46 -0.41441 -52 -22 -0.1982 -24 2 blend
rmoveto
hintmask 000000100001000000000000
- -162 30 -8 -32 -41 98 1 111 2 blend
+ -162 30 -8 -32.07207 -41 98 0.88289 111 2 blend
vlineto
hintmask 000000001001001000000000
- 162 8 32 41 1 blend
+ 162 8 32.07207 41 1 blend
vlineto
- 296 -105 -1 -118 1 blend
+ 296 -105 -0.94595 -118 1 blend
hmoveto
hintmask 000000100000001000000000
- -162 30 -8 -32 -41 100 1 112 2 blend
+ -162 30 -8 -32.07207 -41 100 0.9009 112 2 blend
vlineto
hintmask 000000001000001000000000
- 162 8 32 41 1 blend
+ 162 8 32.07207 41 1 blend
vlineto
hintmask 000011000100110010000000
- -47 -217 -23 0 -26 -57 -1 -64 2 blend
+ -47 -217 -23 -0.20721 -26 -57 -0.51352 -64 2 blend
rmoveto
- 209 -109 -209 -101 -1 -113 72 1 81 101 1 113 3 blend
+ 209 -109 -209 -101 -0.90991 -113 72 0.64865 81 101 0.90991 113 3 blend
hlineto
- -235 109 24 0 27 -72 -1 -81 2 blend
+ -235 109 24 0.21622 27 -72 -0.64865 -81 2 blend
rmoveto
- 205 -109 -205 -99 -1 -111 72 1 81 99 1 111 3 blend
+ 205 -109 -205 -99 -0.89189 -111 72 0.64865 81 99 0.89189 111 3 blend
hlineto
- -227 109 18 1 21 -72 -1 -81 2 blend
+ -227 109 18 1.16216 21 -72 -0.64865 -81 2 blend
rmoveto
- 197 -109 -197 -93 -2 -105 72 1 81 93 2 105 3 blend
+ 197 -109 -197 -93 -1.83784 -105 72 0.64865 81 93 1.83784 105 3 blend
hlineto
- -30 139 -87 -2 -98 -15 0 -17 2 blend
+ -30 139 -87 -1.78378 -98 -15 -0.13513 -17 2 blend
rmoveto
- -169 731 169 -41 0 -46 38 0 42 41 0 46 3 blend
+ -169 731 169 -41 -0.36937 -46 38 0.34235 42 41 0.36937 46 3 blend
vlineto
hintmask 111100000010000100000000
- -650 -375 62 1 70 -32 0 -36 2 blend
+ -650 -375 62 0.55856 70 -32 -0.28828 -36 2 blend
rmoveto
- 571 -76 -571 -159 -1 -179 48 0 54 159 1 179 3 blend
+ 571 -76 -571 -159 -1.43243 -179 48 0.43243 54 159 1.43243 179 3 blend
hlineto
- -30 -38 0 -43 1 blend
+ -30 -38 -0.34235 -43 1 blend
vmoveto
- 571 -77 -571 -159 -1 -179 48 0 54 159 1 179 3 blend
+ 571 -77 -571 -159 -1.43243 -179 48 0.43243 54 159 1.43243 179 3 blend
hlineto
- 287 -66 -1 -74 1 blend
+ 287 -66 -0.59459 -74 1 blend
vmoveto
- 571 -74 -571 -159 -1 -179 46 0 52 159 1 179 3 blend
+ 571 -74 -571 -159 -1.43243 -179 46 0.41441 52 159 1.43243 179 3 blend
hlineto
- -30 104 -99 -1 -111 -4 0 -5 2 blend
+ -30 104 -99 -0.89189 -111 -4 -0.03604 -5 2 blend
rmoveto
- -347 631 347 -18 0 -20 45 0 50 18 0 20 3 blend
+ -347 631 347 -18 -0.16216 -20 45 0.40541 50 18 0.16216 20 3 blend
vlineto
- -216 -389 -86 -1 -96 -31 0 -35 2 blend
+ -216 -389 -86 -0.77478 -96 -31 -0.27928 -35 2 blend
rmoveto
- 127 -34 121 -39 72 -31 -17 0 -19 2 0 2 -13 0 -15 -2 0 -2 -13 0 -15 3 0 3 6 blend
+ 127 -34 121 -39 72 -31 -17 -0.15315 -19 2 0.01802 2 -13 -0.11711 -15 -2 -0.01802 -2 -13 -0.11711 -15 3 0.02702 3 6 blend
rrcurveto
- 31 22 -78 32 -126 39 -121 136 1 153 39 0 44 1 0 1 -3 0 -3 -8 0 -9 4 0 5 9 0 10 7 blend
+ 31 22 -78 32 -126 39 -121 136 1.22522 153 39 0.35135 44 1 0 1 -3 -0.02702 -3 -8 -0.07207 -9 4 0.03604 5 9 0.08109 10 7 blend
31 rlinecurve
- -258 -1 -67 -1 -75 0 0 -1 2 blend
+ -258 -1 -67 -0.6036 -75 0 0 -1 2 blend
rmoveto
- -81 -39 -128 -36 -107 -23 8 -6 12 -12 5 -6 103 25 130 41 86 43 9 0 10 6 0 7 3 0 4 7 0 8 -4 0 -5 7 0 8 19 0 22 -14 0 -16 32 0 36 -32 0 -36 17 0 19 -19 0 -21 3 0 3 -1 0 -1 5 0 6 2 0 2 1 0 1 4 0 5 18 blend
+ -81 -39 -128 -36 -107 -23 8 -6 12 -12 5 -6 103 25 130 41 86 43 9 0.08109 10 6 0.05405 7 3 0.02702 4 7 0.06306 8 -4 -0.03604 -5 7 0.06306 8 19 0.17117 22 -14 -0.12613 -16 32 0.28828 36 -32 -0.28828 -36 17 0.15315 19 -19 -0.17117 -21 3 0.02702 3 -1 0 -1 5 0.04504 6 2 0.01802 2 1 0 1 4 0.03604 5 18 blend
rrcurveto
</CharString>
</CharStrings>
diff --git a/Tests/varLib/data/test_results/TestVVAR.ttx b/Tests/varLib/data/test_results/TestVVAR.ttx
index 53c038c1..c16266d3 100644
--- a/Tests/varLib/data/test_results/TestVVAR.ttx
+++ b/Tests/varLib/data/test_results/TestVVAR.ttx
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
-<ttFont sfntVersion="OTTO" ttLibVersion="3.39">
+<ttFont sfntVersion="OTTO" ttLibVersion="4.42">
<VVAR>
<Version value="0x00010000"/>
@@ -7,14 +7,7 @@
<Format value="1"/>
<VarRegionList>
<!-- RegionAxisCount=1 -->
- <!-- RegionCount=1 -->
- <Region index="0">
- <VarRegionAxis index="0">
- <StartCoord value="0.0"/>
- <PeakCoord value="1.0"/>
- <EndCoord value="1.0"/>
- </VarRegionAxis>
- </Region>
+ <!-- RegionCount=0 -->
</VarRegionList>
<!-- VarDataCount=1 -->
<VarData index="0">
diff --git a/Tests/varLib/featureVars_test.py b/Tests/varLib/featureVars_test.py
index 89675af2..7a3a6650 100644
--- a/Tests/varLib/featureVars_test.py
+++ b/Tests/varLib/featureVars_test.py
@@ -1,36 +1,45 @@
-from fontTools.varLib.featureVars import (
- overlayFeatureVariations)
+from fontTools.varLib.featureVars import overlayFeatureVariations, overlayBox
-def test_linear(n = 10):
+def _test_linear(n):
conds = []
for i in range(n):
end = i / n
- start = end - 1.
- region = [{'X': (start, end)}]
- subst = {'g%.2g'%start: 'g%.2g'%end}
+ start = end - 1.0
+ region = [{"X": (start, end)}]
+ subst = {"g%.2g" % start: "g%.2g" % end}
conds.append((region, subst))
overlaps = overlayFeatureVariations(conds)
assert len(overlaps) == 2 * n - 1, overlaps
return conds, overlaps
-def test_quadratic(n = 10):
+
+def test_linear():
+ _test_linear(10)
+
+
+def _test_quadratic(n):
conds = []
for i in range(1, n + 1):
- region = [{'X': (0, i / n),
- 'Y': (0, (n + 1 - i) / n)}]
+ region = [{"X": (0, i / n), "Y": (0, (n + 1 - i) / n)}]
subst = {str(i): str(n + 1 - i)}
conds.append((region, subst))
overlaps = overlayFeatureVariations(conds)
assert len(overlaps) == n * (n + 1) // 2, overlaps
return conds, overlaps
+
+def test_quadratic():
+ _test_quadratic(10)
+
+
def _merge_substitutions(substitutions):
merged = {}
for subst in substitutions:
merged.update(subst)
return merged
+
def _match_condition(location, overlaps):
for box, substitutions in overlaps:
for tag, coord in location.items():
@@ -39,59 +48,69 @@ def _match_condition(location, overlaps):
return _merge_substitutions(substitutions)
return {} # no match
+
def test_overlaps_1():
# https://github.com/fonttools/fonttools/issues/1400
conds = [
- ([{'abcd': (4, 9)}], {0: 0}),
- ([{'abcd': (5, 10)}], {1: 1}),
- ([{'abcd': (0, 8)}], {2: 2}),
- ([{'abcd': (3, 7)}], {3: 3}),
+ ([{"abcd": (4, 9)}], {0: 0}),
+ ([{"abcd": (5, 10)}], {1: 1}),
+ ([{"abcd": (0, 8)}], {2: 2}),
+ ([{"abcd": (3, 7)}], {3: 3}),
]
overlaps = overlayFeatureVariations(conds)
- subst = _match_condition({'abcd': 0}, overlaps)
+ subst = _match_condition({"abcd": 0}, overlaps)
assert subst == {2: 2}
- subst = _match_condition({'abcd': 1}, overlaps)
+ subst = _match_condition({"abcd": 1}, overlaps)
assert subst == {2: 2}
- subst = _match_condition({'abcd': 3}, overlaps)
+ subst = _match_condition({"abcd": 3}, overlaps)
assert subst == {2: 2, 3: 3}
- subst = _match_condition({'abcd': 4}, overlaps)
+ subst = _match_condition({"abcd": 4}, overlaps)
assert subst == {0: 0, 2: 2, 3: 3}
- subst = _match_condition({'abcd': 5}, overlaps)
+ subst = _match_condition({"abcd": 5}, overlaps)
assert subst == {0: 0, 1: 1, 2: 2, 3: 3}
- subst = _match_condition({'abcd': 7}, overlaps)
+ subst = _match_condition({"abcd": 7}, overlaps)
assert subst == {0: 0, 1: 1, 2: 2, 3: 3}
- subst = _match_condition({'abcd': 8}, overlaps)
+ subst = _match_condition({"abcd": 8}, overlaps)
assert subst == {0: 0, 1: 1, 2: 2}
- subst = _match_condition({'abcd': 9}, overlaps)
+ subst = _match_condition({"abcd": 9}, overlaps)
assert subst == {0: 0, 1: 1}
- subst = _match_condition({'abcd': 10}, overlaps)
+ subst = _match_condition({"abcd": 10}, overlaps)
assert subst == {1: 1}
+
def test_overlaps_2():
# https://github.com/fonttools/fonttools/issues/1400
conds = [
- ([{'abcd': (1, 9)}], {0: 0}),
- ([{'abcd': (8, 10)}], {1: 1}),
- ([{'abcd': (3, 4)}], {2: 2}),
- ([{'abcd': (1, 10)}], {3: 3}),
+ ([{"abcd": (1, 9)}], {0: 0}),
+ ([{"abcd": (8, 10)}], {1: 1}),
+ ([{"abcd": (3, 4)}], {2: 2}),
+ ([{"abcd": (1, 10)}], {3: 3}),
]
overlaps = overlayFeatureVariations(conds)
- subst = _match_condition({'abcd': 0}, overlaps)
+ subst = _match_condition({"abcd": 0}, overlaps)
assert subst == {}
- subst = _match_condition({'abcd': 1}, overlaps)
+ subst = _match_condition({"abcd": 1}, overlaps)
assert subst == {0: 0, 3: 3}
- subst = _match_condition({'abcd': 2}, overlaps)
+ subst = _match_condition({"abcd": 2}, overlaps)
assert subst == {0: 0, 3: 3}
- subst = _match_condition({'abcd': 3}, overlaps)
+ subst = _match_condition({"abcd": 3}, overlaps)
assert subst == {0: 0, 2: 2, 3: 3}
- subst = _match_condition({'abcd': 5}, overlaps)
+ subst = _match_condition({"abcd": 5}, overlaps)
assert subst == {0: 0, 3: 3}
- subst = _match_condition({'abcd': 10}, overlaps)
+ subst = _match_condition({"abcd": 10}, overlaps)
assert subst == {1: 1, 3: 3}
-def run(test, n, quiet):
+def test_overlayBox():
+ # https://github.com/fonttools/fonttools/issues/3003
+ top = {"opsz": (0.75, 1.0), "wght": (0.5, 1.0)}
+ bot = {"wght": (0.25, 1.0)}
+ intersection, remainder = overlayBox(top, bot)
+ assert intersection == {"opsz": (0.75, 1.0), "wght": (0.5, 1.0)}
+ assert remainder == {"wght": (0.25, 1.0)}
+
+def run(test, n, quiet):
print()
print("%s:" % test.__name__)
input, output = test(n)
@@ -106,16 +125,18 @@ def run(test, n, quiet):
pprint(output)
print()
+
if __name__ == "__main__":
import sys
from pprint import pprint
+
quiet = False
n = 3
- if len(sys.argv) > 1 and sys.argv[1] == '-q':
+ if len(sys.argv) > 1 and sys.argv[1] == "-q":
quiet = True
del sys.argv[1]
if len(sys.argv) > 1:
n = int(sys.argv[1])
- run(test_linear, n=n, quiet=quiet)
- run(test_quadratic, n=n, quiet=quiet)
+ run(_test_linear, n=n, quiet=quiet)
+ run(_test_quadratic, n=n, quiet=quiet)
diff --git a/Tests/varLib/instancer/data/PartialInstancerTest-VF.ttx b/Tests/varLib/instancer/data/PartialInstancerTest-VF.ttx
index 268b5068..2f1754b0 100644
--- a/Tests/varLib/instancer/data/PartialInstancerTest-VF.ttx
+++ b/Tests/varLib/instancer/data/PartialInstancerTest-VF.ttx
@@ -728,7 +728,7 @@
<AxisOrdering value="2"/>
</Axis>
</DesignAxisRecord>
- <!-- AxisValueCount=5 -->
+ <!-- AxisValueCount=7 -->
<AxisValueArray>
<AxisValue index="0" Format="1">
<AxisIndex value="0"/>
@@ -743,7 +743,13 @@
<Value value="400.0"/>
<LinkedValue value="700.0"/>
</AxisValue>
- <AxisValue index="2" Format="2">
+ <AxisValue index="2" Format="1">
+ <AxisIndex value="0"/>
+ <Flags value="0"/>
+ <ValueNameID value="262"/> <!-- Medium -->
+ <Value value="500.0"/>
+ </AxisValue>
+ <AxisValue index="3" Format="2">
<AxisIndex value="0"/>
<Flags value="0"/>
<ValueNameID value="266"/> <!-- Black -->
@@ -751,7 +757,7 @@
<RangeMinValue value="801.0"/>
<RangeMaxValue value="900.0"/>
</AxisValue>
- <AxisValue index="3" Format="4">
+ <AxisValue index="4" Format="4">
<!-- AxisCount=1 -->
<Flags value="0"/>
<ValueNameID value="279"/> <!-- Condensed -->
@@ -760,14 +766,14 @@
<Value value="79.0"/>
</AxisValueRecord>
</AxisValue>
- <AxisValue index="4" Format="3">
+ <AxisValue index="5" Format="3">
<AxisIndex value="2"/>
<Flags value="2"/>
<ValueNameID value="295"/> <!-- Upright -->
<Value value="0.0"/>
<LinkedValue value="1.0"/>
</AxisValue>
- <AxisValue index="3" Format="4">
+ <AxisValue index="6" Format="4">
<!-- AxisCount=1 -->
<Flags value="2"/>
<ValueNameID value="297"/> <!-- Normal -->
@@ -781,6 +787,7 @@
</STAT>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="-0.6667" to="-0.7969"/>
diff --git a/Tests/varLib/instancer/data/PartialInstancerTest2-VF.ttx b/Tests/varLib/instancer/data/PartialInstancerTest2-VF.ttx
index cd7ffa05..3acbf56d 100644
--- a/Tests/varLib/instancer/data/PartialInstancerTest2-VF.ttx
+++ b/Tests/varLib/instancer/data/PartialInstancerTest2-VF.ttx
@@ -1139,6 +1139,7 @@
</STAT>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="-0.6667" to="-0.7969"/>
diff --git a/Tests/varLib/instancer/data/STATInstancerTest.ttx b/Tests/varLib/instancer/data/STATInstancerTest.ttx
index eee24d82..e4506cec 100644
--- a/Tests/varLib/instancer/data/STATInstancerTest.ttx
+++ b/Tests/varLib/instancer/data/STATInstancerTest.ttx
@@ -1336,6 +1336,7 @@
</STAT>
<avar>
+ <version major="1" minor="0"/>
<segment axis="wght">
<mapping from="-1.0" to="-1.0"/>
<mapping from="-0.6667" to="-0.74194"/>
diff --git a/Tests/varLib/instancer/data/SinglePos.ttx b/Tests/varLib/instancer/data/SinglePos.ttx
index 64ffd9f5..dda441e3 100644
--- a/Tests/varLib/instancer/data/SinglePos.ttx
+++ b/Tests/varLib/instancer/data/SinglePos.ttx
@@ -213,6 +213,7 @@
</GPOS>
<avar>
+ <version major="1" minor="0"/>
<segment axis="opsz">
<mapping from="-1.0" to="-1.0"/>
<mapping from="-0.01" to="-0.9"/>
diff --git a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-100,100.ttx b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-100,100.ttx
index 776a92f1..c89949c2 100644
--- a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-100,100.ttx
+++ b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-100,100.ttx
@@ -74,7 +74,7 @@
<!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
will be recalculated by the compiler -->
<version value="4"/>
- <xAvgCharWidth value="577"/>
+ <xAvgCharWidth value="502"/>
<usWeightClass value="100"/>
<usWidthClass value="5"/>
<fsType value="00000000 00000000"/>
diff --git a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-100,62.5.ttx b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-100,62.5.ttx
index 61bc41cc..a78019f8 100644
--- a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-100,62.5.ttx
+++ b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-100,62.5.ttx
@@ -74,7 +74,7 @@
<!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
will be recalculated by the compiler -->
<version value="4"/>
- <xAvgCharWidth value="577"/>
+ <xAvgCharWidth value="383"/>
<usWeightClass value="100"/>
<usWidthClass value="2"/>
<fsType value="00000000 00000000"/>
diff --git a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-400,100.ttx b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-400,100.ttx
index c2d20571..635acd71 100644
--- a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-400,100.ttx
+++ b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-400,100.ttx
@@ -74,7 +74,7 @@
<!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
will be recalculated by the compiler -->
<version value="4"/>
- <xAvgCharWidth value="577"/>
+ <xAvgCharWidth value="543"/>
<usWeightClass value="400"/>
<usWidthClass value="5"/>
<fsType value="00000000 00000000"/>
diff --git a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-400,62.5.ttx b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-400,62.5.ttx
index 63eeb0e7..fcafe91c 100644
--- a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-400,62.5.ttx
+++ b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-400,62.5.ttx
@@ -74,7 +74,7 @@
<!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
will be recalculated by the compiler -->
<version value="4"/>
- <xAvgCharWidth value="577"/>
+ <xAvgCharWidth value="428"/>
<usWeightClass value="400"/>
<usWidthClass value="2"/>
<fsType value="00000000 00000000"/>
diff --git a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-900,100.ttx b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-900,100.ttx
index 013ba1e7..61c3b2bb 100644
--- a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-900,100.ttx
+++ b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-900,100.ttx
@@ -74,7 +74,7 @@
<!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
will be recalculated by the compiler -->
<version value="4"/>
- <xAvgCharWidth value="577"/>
+ <xAvgCharWidth value="609"/>
<usWeightClass value="900"/>
<usWidthClass value="5"/>
<fsType value="00000000 00000000"/>
diff --git a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-900,62.5.ttx b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-900,62.5.ttx
index 45e34cbf..fa31886a 100644
--- a/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-900,62.5.ttx
+++ b/Tests/varLib/instancer/data/test_results/PartialInstancerTest2-VF-instance-900,62.5.ttx
@@ -74,7 +74,7 @@
<!-- The fields 'usFirstCharIndex' and 'usLastCharIndex'
will be recalculated by the compiler -->
<version value="4"/>
- <xAvgCharWidth value="577"/>
+ <xAvgCharWidth value="506"/>
<usWeightClass value="900"/>
<usWidthClass value="2"/>
<fsType value="00000000 00000000"/>
diff --git a/Tests/varLib/instancer/instancer_test.py b/Tests/varLib/instancer/instancer_test.py
index db224cca..20d9194f 100644
--- a/Tests/varLib/instancer/instancer_test.py
+++ b/Tests/varLib/instancer/instancer_test.py
@@ -1,4 +1,5 @@
from fontTools.misc.fixedTools import floatToFixedToFloat
+from fontTools.misc.roundTools import noRound
from fontTools.misc.testTools import stripVariableItemsFromTTX
from fontTools.misc.textTools import Tag
from fontTools import ttLib
@@ -51,7 +52,15 @@ def fvarAxes():
def _get_coordinates(varfont, glyphname):
# converts GlyphCoordinates to a list of (x, y) tuples, so that pytest's
# assert will give us a nicer diff
- return list(varfont["glyf"].getCoordinatesAndControls(glyphname, varfont)[0])
+ return list(
+ varfont["glyf"]._getCoordinatesAndControls(
+ glyphname,
+ varfont["hmtx"].metrics,
+ varfont["vmtx"].metrics,
+ # the tests expect float coordinates
+ round=noRound,
+ )[0]
+ )
class InstantiateGvarTest(object):
@@ -112,6 +121,8 @@ class InstantiateGvarTest(object):
],
)
def test_pin_and_drop_axis(self, varfont, glyph_name, location, expected, optimize):
+ location = instancer.NormalizedAxisLimits(location)
+
instancer.instantiateGvar(varfont, location, optimize=optimize)
assert _get_coordinates(varfont, glyph_name) == expected[glyph_name]
@@ -124,9 +135,9 @@ class InstantiateGvarTest(object):
)
def test_full_instance(self, varfont, optimize):
- instancer.instantiateGvar(
- varfont, {"wght": 0.0, "wdth": -0.5}, optimize=optimize
- )
+ location = instancer.NormalizedAxisLimits(wght=0.0, wdth=-0.5)
+
+ instancer.instantiateGvar(varfont, location, optimize=optimize)
assert _get_coordinates(varfont, "hyphen") == [
(33.5, 229),
@@ -169,7 +180,7 @@ class InstantiateGvarTest(object):
assert hmtx["minus"] == (422, 40)
assert vmtx["minus"] == (536, 229)
- location = {"wght": -1.0, "wdth": -1.0}
+ location = instancer.NormalizedAxisLimits(wght=-1.0, wdth=-1.0)
instancer.instantiateGvar(varfont, location)
@@ -206,6 +217,8 @@ class InstantiateCvarTest(object):
],
)
def test_pin_and_drop_axis(self, varfont, location, expected):
+ location = instancer.NormalizedAxisLimits(location)
+
instancer.instantiateCvar(varfont, location)
assert list(varfont["cvt "].values) == expected
@@ -217,7 +230,9 @@ class InstantiateCvarTest(object):
)
def test_full_instance(self, varfont):
- instancer.instantiateCvar(varfont, {"wght": -0.5, "wdth": -0.5})
+ location = instancer.NormalizedAxisLimits(wght=-0.5, wdth=-0.5)
+
+ instancer.instantiateCvar(varfont, location)
assert list(varfont["cvt "].values) == [500, -400, 165, 225]
@@ -272,6 +287,8 @@ class InstantiateMVARTest(object):
assert mvar.VarStore.VarData[1].VarRegionCount == 1
assert all(len(item) == 1 for item in mvar.VarStore.VarData[1].Item)
+ location = instancer.NormalizedAxisLimits(location)
+
instancer.instantiateMVAR(varfont, location)
for mvar_tag, expected_value in expected.items():
@@ -312,6 +329,8 @@ class InstantiateMVARTest(object):
],
)
def test_full_instance(self, varfont, location, expected):
+ location = instancer.NormalizedAxisLimits(location)
+
instancer.instantiateMVAR(varfont, location)
for mvar_tag, expected_value in expected.items():
@@ -344,6 +363,8 @@ class InstantiateHVARTest(object):
],
)
def test_partial_instance(self, varfont, location, expectedRegions, expectedDeltas):
+ location = instancer.NormalizedAxisLimits(location)
+
instancer.instantiateHVAR(varfont, location)
assert "HVAR" in varfont
@@ -376,7 +397,9 @@ class InstantiateHVARTest(object):
assert varStore.VarData[varIdx >> 16].Item[varIdx & 0xFFFF] == expectedDeltas
def test_full_instance(self, varfont):
- instancer.instantiateHVAR(varfont, {"wght": 0, "wdth": 0})
+ location = instancer.NormalizedAxisLimits(wght=0, wdth=0)
+
+ instancer.instantiateHVAR(varfont, location)
assert "HVAR" not in varfont
@@ -390,7 +413,9 @@ class InstantiateHVARTest(object):
axis.axisTag = "TEST"
fvar.axes.append(axis)
- instancer.instantiateHVAR(varfont, {"wght": 0, "wdth": 0})
+ location = instancer.NormalizedAxisLimits(wght=0, wdth=0)
+
+ instancer.instantiateHVAR(varfont, location)
assert "HVAR" in varfont
@@ -452,6 +477,8 @@ class InstantiateItemVariationStoreTest(object):
def test_instantiate_default_deltas(
self, varStore, fvarAxes, location, expected_deltas, num_regions
):
+ location = instancer.NormalizedAxisLimits(location)
+
defaultDeltas = instancer.instantiateItemVariationStore(
varStore, fvarAxes, location
)
@@ -504,8 +531,9 @@ class TupleVarStoreAdapterTest(object):
adapter = instancer._TupleVarStoreAdapter(
regions, axisOrder, tupleVarData, itemCounts=[2, 2]
)
+ location = instancer.NormalizedAxisLimits(wght=0.5)
- defaultDeltaArray = adapter.instantiate({"wght": 0.5})
+ defaultDeltaArray = adapter.instantiate(location)
assert defaultDeltaArray == [[15, 45], [0, 0]]
assert adapter.regions == [{"wdth": (-1.0, -1.0, 0)}]
@@ -747,6 +775,8 @@ class InstantiateOTLTest(object):
vf = varfontGDEF
assert "GDEF" in vf
+ location = instancer.NormalizedAxisLimits(location)
+
instancer.instantiateOTL(vf, location)
assert "GDEF" in vf
@@ -778,6 +808,8 @@ class InstantiateOTLTest(object):
vf = varfontGDEF
assert "GDEF" in vf
+ location = instancer.NormalizedAxisLimits(location)
+
instancer.instantiateOTL(vf, location)
assert "GDEF" in vf
@@ -806,6 +838,8 @@ class InstantiateOTLTest(object):
assert "GDEF" in vf
assert "GPOS" in vf
+ location = instancer.NormalizedAxisLimits(location)
+
instancer.instantiateOTL(vf, location)
gdef = vf["GDEF"].table
@@ -839,6 +873,8 @@ class InstantiateOTLTest(object):
assert "GDEF" in vf
assert "GPOS" in vf
+ location = instancer.NormalizedAxisLimits(location)
+
instancer.instantiateOTL(vf, location)
assert "GDEF" not in vf
@@ -870,6 +906,8 @@ class InstantiateOTLTest(object):
assert "GDEF" in vf
assert "GPOS" in vf
+ location = instancer.NormalizedAxisLimits(location)
+
instancer.instantiateOTL(vf, location)
v1, v2 = expected
@@ -915,6 +953,8 @@ class InstantiateOTLTest(object):
assert "GDEF" in vf
assert "GPOS" in vf
+ location = instancer.NormalizedAxisLimits(location)
+
instancer.instantiateOTL(vf, location)
v1, v2 = expected
@@ -955,7 +995,7 @@ class InstantiateOTLTest(object):
# check that MutatorMerger for ValueRecord doesn't raise AttributeError
# when XAdvDevice is present but there's no corresponding XAdvance.
- instancer.instantiateOTL(vf, {"wght": 0.5})
+ instancer.instantiateOTL(vf, instancer.NormalizedAxisLimits(wght=0.5))
pairPos = vf["GPOS"].table.LookupList.Lookup[0].SubTable[0]
assert pairPos.ValueFormat1 == 0x4
@@ -967,12 +1007,16 @@ class InstantiateOTLTest(object):
class InstantiateAvarTest(object):
@pytest.mark.parametrize("location", [{"wght": 0.0}, {"wdth": 0.0}])
def test_pin_and_drop_axis(self, varfont, location):
+ location = instancer.AxisLimits(location)
+
instancer.instantiateAvar(varfont, location)
assert set(varfont["avar"].segments).isdisjoint(location)
def test_full_instance(self, varfont):
- instancer.instantiateAvar(varfont, {"wght": 0.0, "wdth": 0.0})
+ location = instancer.AxisLimits(wght=0.0, wdth=0.0)
+
+ instancer.instantiateAvar(varfont, location)
assert "avar" not in varfont
@@ -1139,6 +1183,8 @@ class InstantiateAvarTest(object):
],
)
def test_limit_axes(self, varfont, axisLimits, expectedSegments):
+ axisLimits = instancer.AxisLimits(axisLimits)
+
instancer.instantiateAvar(varfont, axisLimits)
newSegments = varfont["avar"].segments
@@ -1162,8 +1208,10 @@ class InstantiateAvarTest(object):
def test_drop_invalid_segment_map(self, varfont, invalidSegmentMap, caplog):
varfont["avar"].segments["wght"] = invalidSegmentMap
+ axisLimits = instancer.AxisLimits(wght=(100, 400))
+
with caplog.at_level(logging.WARNING, logger="fontTools.varLib.instancer"):
- instancer.instantiateAvar(varfont, {"wght": (100, 400)})
+ instancer.instantiateAvar(varfont, axisLimits)
assert "Invalid avar" in caplog.text
assert "wght" not in varfont["avar"].segments
@@ -1210,6 +1258,8 @@ class InstantiateFvarTest(object):
],
)
def test_pin_and_drop_axis(self, varfont, location, instancesLeft):
+ location = instancer.AxisLimits(location)
+
instancer.instantiateFvar(varfont, location)
fvar = varfont["fvar"]
@@ -1224,20 +1274,51 @@ class InstantiateFvarTest(object):
] == instancesLeft
def test_full_instance(self, varfont):
- instancer.instantiateFvar(varfont, {"wght": 0.0, "wdth": 0.0})
+ location = instancer.AxisLimits({"wght": 0.0, "wdth": 0.0})
+
+ instancer.instantiateFvar(varfont, location)
assert "fvar" not in varfont
+ @pytest.mark.parametrize(
+ "location, expected",
+ [
+ ({"wght": (30, 40, 700)}, (100, 100, 700)),
+ ({"wght": (30, 40, None)}, (100, 100, 900)),
+ ({"wght": (30, None, 700)}, (100, 400, 700)),
+ ({"wght": (None, 200, 700)}, (100, 200, 700)),
+ ({"wght": (40, None, None)}, (100, 400, 900)),
+ ({"wght": (None, 40, None)}, (100, 100, 900)),
+ ({"wght": (None, None, 700)}, (100, 400, 700)),
+ ({"wght": (None, None, None)}, (100, 400, 900)),
+ ],
+ )
+ def test_axis_limits(self, varfont, location, expected):
+ location = instancer.AxisLimits(location)
+
+ varfont = instancer.instantiateVariableFont(varfont, location)
+
+ fvar = varfont["fvar"]
+ axes = {a.axisTag: a for a in fvar.axes}
+ assert axes["wght"].minValue == expected[0]
+ assert axes["wght"].defaultValue == expected[1]
+ assert axes["wght"].maxValue == expected[2]
+
class InstantiateSTATTest(object):
@pytest.mark.parametrize(
"location, expected",
[
({"wght": 400}, ["Regular", "Condensed", "Upright", "Normal"]),
- ({"wdth": 100}, ["Thin", "Regular", "Black", "Upright", "Normal"]),
+ (
+ {"wdth": 100},
+ ["Thin", "Regular", "Medium", "Black", "Upright", "Normal"],
+ ),
],
)
def test_pin_and_drop_axis(self, varfont, location, expected):
+ location = instancer.AxisLimits(location)
+
instancer.instantiateSTAT(varfont, location)
stat = varfont["STAT"].table
@@ -1256,7 +1337,7 @@ class InstantiateSTATTest(object):
def test_skip_table_no_axis_value_array(self, varfont):
varfont["STAT"].table.AxisValueArray = None
- instancer.instantiateSTAT(varfont, {"wght": 100})
+ instancer.instantiateSTAT(varfont, instancer.AxisLimits(wght=100))
assert len(varfont["STAT"].table.DesignAxisRecord.Axis) == 3
assert varfont["STAT"].table.AxisValueArray is None
@@ -1318,7 +1399,9 @@ class InstantiateSTATTest(object):
return result
def test_limit_axes(self, varfont2):
- instancer.instantiateSTAT(varfont2, {"wght": (400, 500), "wdth": (75, 100)})
+ axisLimits = instancer.AxisLimits({"wght": (400, 500), "wdth": (75, 100)})
+
+ instancer.instantiateSTAT(varfont2, axisLimits)
assert len(varfont2["STAT"].table.AxisValueArray.AxisValue) == 5
assert self.get_STAT_axis_values(varfont2["STAT"].table) == [
@@ -1344,11 +1427,11 @@ class InstantiateSTATTest(object):
axisValue.AxisValueRecord.append(rec)
stat.AxisValueArray.AxisValue.append(axisValue)
- instancer.instantiateSTAT(varfont2, {"wght": (100, 600)})
+ instancer.instantiateSTAT(varfont2, instancer.AxisLimits(wght=(100, 600)))
assert axisValue in varfont2["STAT"].table.AxisValueArray.AxisValue
- instancer.instantiateSTAT(varfont2, {"wdth": (62.5, 87.5)})
+ instancer.instantiateSTAT(varfont2, instancer.AxisLimits(wdth=(62.5, 87.5)))
assert axisValue not in varfont2["STAT"].table.AxisValueArray.AxisValue
@@ -1359,7 +1442,7 @@ class InstantiateSTATTest(object):
stat.AxisValueArray.AxisValue.append(axisValue)
with caplog.at_level(logging.WARNING, logger="fontTools.varLib.instancer"):
- instancer.instantiateSTAT(varfont2, {"wght": 400})
+ instancer.instantiateSTAT(varfont2, instancer.AxisLimits(wght=400))
assert "Unknown AxisValue table format (5)" in caplog.text
assert axisValue in varfont2["STAT"].table.AxisValueArray.AxisValue
@@ -1452,6 +1535,18 @@ class InstantiateVariableFontTest(object):
assert _dump_ttx(instance) == expected
+ def test_move_weight_width_axis_default(self, varfont2):
+ # https://github.com/fonttools/fonttools/issues/2885
+ assert varfont2["OS/2"].usWeightClass == 400
+ assert varfont2["OS/2"].usWidthClass == 5
+
+ varfont = instancer.instantiateVariableFont(
+ varfont2, {"wght": (100, 500, 900), "wdth": 87.5}
+ )
+
+ assert varfont["OS/2"].usWeightClass == 500
+ assert varfont["OS/2"].usWidthClass == 4
+
@pytest.mark.parametrize(
"overlap, wght",
[
@@ -1482,20 +1577,39 @@ class InstantiateVariableFontTest(object):
location = {"wght": 280, "opsz": 18}
instance = instancer.instantiateVariableFont(
- varfont, location,
+ varfont,
+ location,
)
- expected = _get_expected_instance_ttx(
- "SinglePos", *location.values()
- )
+ expected = _get_expected_instance_ttx("SinglePos", *location.values())
assert _dump_ttx(instance) == expected
+ def test_varComposite(self):
+ input_path = os.path.join(
+ TESTDATA, "..", "..", "..", "ttLib", "data", "varc-ac00-ac01.ttf"
+ )
+ varfont = ttLib.TTFont(input_path)
+
+ location = {"wght": 600}
+
+ instance = instancer.instantiateVariableFont(
+ varfont,
+ location,
+ )
+
+ location = {"0000": 0.5}
+
+ instance = instancer.instantiateVariableFont(
+ varfont,
+ location,
+ )
def _conditionSetAsDict(conditionSet, axisOrder):
result = {}
- for cond in conditionSet.ConditionTable:
+ conditionSets = conditionSet.ConditionTable if conditionSet is not None else []
+ for cond in conditionSets:
assert cond.Format == 1
axisTag = axisOrder[cond.AxisIndex]
result[axisTag] = (cond.FilterRangeMinValue, cond.FilterRangeMaxValue)
@@ -1541,10 +1655,11 @@ class InstantiateFeatureVariationsTest(object):
({"wght": 0}, {}, [({"cntr": (0.75, 1.0)}, {"uni0041": "uni0061"})]),
(
{"wght": -1.0},
- {},
+ {"uni0061": "uni0041"},
[
({"cntr": (0, 0.25)}, {"uni0061": "uni0041"}),
({"cntr": (0.75, 1.0)}, {"uni0041": "uni0061"}),
+ ({}, {}),
],
),
(
@@ -1554,7 +1669,8 @@ class InstantiateFeatureVariationsTest(object):
(
{"cntr": (0.75, 1.0)},
{"uni0024": "uni0024.nostroke", "uni0041": "uni0061"},
- )
+ ),
+ ({}, {}),
],
),
(
@@ -1572,7 +1688,66 @@ class InstantiateFeatureVariationsTest(object):
(
{"wght": (0.20886, 1.0)},
{"uni0024": "uni0024.nostroke", "uni0041": "uni0061"},
- )
+ ),
+ ({}, {}),
+ ],
+ ),
+ (
+ {"cntr": (-0.5, 0, 1.0)},
+ {},
+ [
+ (
+ {"wght": (0.20886, 1.0), "cntr": (0.75, 1)},
+ {"uni0024": "uni0024.nostroke", "uni0041": "uni0061"},
+ ),
+ (
+ {"wght": (-1.0, -0.45654), "cntr": (0, 0.25)},
+ {"uni0061": "uni0041"},
+ ),
+ (
+ {"cntr": (0.75, 1.0)},
+ {"uni0041": "uni0061"},
+ ),
+ (
+ {"wght": (0.20886, 1.0)},
+ {"uni0024": "uni0024.nostroke"},
+ ),
+ ],
+ ),
+ (
+ {"cntr": (0.8, 0.9, 1.0)},
+ {"uni0041": "uni0061"},
+ [
+ (
+ {"wght": (0.20886, 1.0)},
+ {"uni0024": "uni0024.nostroke", "uni0041": "uni0061"},
+ ),
+ (
+ {},
+ {"uni0041": "uni0061"},
+ ),
+ ],
+ ),
+ (
+ {"cntr": (0.7, 0.9, 1.0)},
+ {"uni0041": "uni0061"},
+ [
+ (
+ {"cntr": (-0.7499999999999999, 1.0), "wght": (0.20886, 1.0)},
+ {"uni0024": "uni0024.nostroke", "uni0041": "uni0061"},
+ ),
+ (
+ {"cntr": (-0.7499999999999999, 1.0)},
+ {"uni0041": "uni0061"},
+ ),
+ (
+ {"wght": (0.20886, 1.0)},
+ {"uni0024": "uni0024.nostroke"},
+ ),
+ (
+ {},
+ {},
+ ),
],
),
],
@@ -1589,25 +1764,30 @@ class InstantiateFeatureVariationsTest(object):
]
)
- instancer.instantiateFeatureVariations(font, location)
+ limits = instancer.NormalizedAxisLimits(location)
+ instancer.instantiateFeatureVariations(font, limits)
gsub = font["GSUB"].table
featureVariations = gsub.FeatureVariations
assert featureVariations.FeatureVariationCount == len(expectedRecords)
- axisOrder = [a.axisTag for a in font["fvar"].axes if a.axisTag not in location]
+ axisOrder = [
+ a.axisTag
+ for a in font["fvar"].axes
+ if a.axisTag not in location or isinstance(location[a.axisTag], tuple)
+ ]
for i, (expectedConditionSet, expectedSubs) in enumerate(expectedRecords):
rec = featureVariations.FeatureVariationRecord[i]
conditionSet = _conditionSetAsDict(rec.ConditionSet, axisOrder)
- assert conditionSet == expectedConditionSet
+ assert conditionSet == expectedConditionSet, i
subsRecord = rec.FeatureTableSubstitution.SubstitutionRecord[0]
lookupIndices = subsRecord.Feature.LookupListIndex
substitutions = _getSubstitutions(gsub, lookupIndices)
- assert substitutions == expectedSubs
+ assert substitutions == expectedSubs, i
appliedLookupIndices = gsub.FeatureList.FeatureRecord[0].Feature.LookupListIndex
@@ -1638,11 +1818,16 @@ class InstantiateFeatureVariationsTest(object):
),
]
)
+ gsub = font["GSUB"].table
+ assert gsub.FeatureVariations
+ assert gsub.Version == 0x00010001
+
+ location = instancer.NormalizedAxisLimits(location)
instancer.instantiateFeatureVariations(font, location)
- gsub = font["GSUB"].table
assert not hasattr(gsub, "FeatureVariations")
+ assert gsub.Version == 0x00010000
if appliedSubs:
lookupIndices = gsub.FeatureList.FeatureRecord[0].Feature.LookupListIndex
@@ -1650,6 +1835,24 @@ class InstantiateFeatureVariationsTest(object):
else:
assert not gsub.FeatureList.FeatureRecord
+ def test_null_conditionset(self):
+ # A null ConditionSet offset should be treated like an empty ConditionTable, i.e.
+ # all contexts are matched; see https://github.com/fonttools/fonttools/issues/3211
+ font = makeFeatureVarsFont(
+ [([{"wght": (-1.0, 1.0)}], {"uni0024": "uni0024.nostroke"})]
+ )
+ gsub = font["GSUB"].table
+ gsub.FeatureVariations.FeatureVariationRecord[0].ConditionSet = None
+
+ location = instancer.NormalizedAxisLimits({"wght": 0.5})
+ instancer.instantiateFeatureVariations(font, location)
+
+ assert not hasattr(gsub, "FeatureVariations")
+ assert gsub.Version == 0x00010000
+
+ lookupIndices = gsub.FeatureList.FeatureRecord[0].Feature.LookupListIndex
+ assert _getSubstitutions(gsub, lookupIndices) == {"uni0024": "uni0024.nostroke"}
+
def test_unsupported_condition_format(self, caplog):
font = makeFeatureVarsFont(
[
@@ -1665,7 +1868,9 @@ class InstantiateFeatureVariationsTest(object):
rec1.ConditionSet.ConditionTable[0].Format = 2
with caplog.at_level(logging.WARNING, logger="fontTools.varLib.instancer"):
- instancer.instantiateFeatureVariations(font, {"wdth": 0})
+ instancer.instantiateFeatureVariations(
+ font, instancer.NormalizedAxisLimits(wdth=0)
+ )
assert (
"Condition table 0 of FeatureVariationRecord 0 "
@@ -1695,7 +1900,7 @@ class InstantiateFeatureVariationsTest(object):
class LimitTupleVariationAxisRangesTest:
def check_limit_single_var_axis_range(self, var, axisTag, axisRange, expected):
- result = instancer.limitTupleVariationAxisRange(var, axisTag, axisRange)
+ result = instancer.changeTupleVariationAxisLimit(var, axisTag, axisRange)
print(result)
assert len(result) == len(expected)
@@ -1758,8 +1963,8 @@ class LimitTupleVariationAxisRangesTest:
"wght",
0.4,
[
- TupleVariation({"wght": (0.0, 0.5, 1.99994)}, [100, 100]),
- TupleVariation({"wght": (0.5, 1.0, 1.0)}, [8.33333, 8.33333]),
+ TupleVariation({"wght": (0.0, 0.5, 1.0)}, [100, 100]),
+ TupleVariation({"wght": (0.5, 1.0, 1.0)}, [75, 75]),
],
),
(
@@ -1777,7 +1982,7 @@ class LimitTupleVariationAxisRangesTest:
],
)
def test_positive_var(self, var, axisTag, newMax, expected):
- axisRange = instancer.NormalizedAxisRange(0, newMax)
+ axisRange = instancer.NormalizedAxisTripleAndDistances(0, 0, newMax)
self.check_limit_single_var_axis_range(var, axisTag, axisRange, expected)
@pytest.mark.parametrize(
@@ -1837,8 +2042,8 @@ class LimitTupleVariationAxisRangesTest:
"wght",
-0.4,
[
- TupleVariation({"wght": (-2.0, -0.5, -0.0)}, [100, 100]),
- TupleVariation({"wght": (-1.0, -1.0, -0.5)}, [8.33333, 8.33333]),
+ TupleVariation({"wght": (-1.0, -0.5, -0.0)}, [100, 100]),
+ TupleVariation({"wght": (-1.0, -1.0, -0.5)}, [75, 75]),
],
),
(
@@ -1856,30 +2061,30 @@ class LimitTupleVariationAxisRangesTest:
],
)
def test_negative_var(self, var, axisTag, newMin, expected):
- axisRange = instancer.NormalizedAxisRange(newMin, 0)
+ axisRange = instancer.NormalizedAxisTripleAndDistances(newMin, 0, 0, 1, 1)
self.check_limit_single_var_axis_range(var, axisTag, axisRange, expected)
@pytest.mark.parametrize(
- "oldRange, newRange, expected",
+ "oldRange, newLimit, expected",
[
- ((1.0, -1.0), (-1.0, 1.0), None), # invalid oldRange min > max
- ((0.6, 1.0), (0, 0.5), None),
- ((-1.0, -0.6), (-0.5, 0), None),
- ((0.4, 1.0), (0, 0.5), (0.8, 1.0)),
- ((-1.0, -0.4), (-0.5, 0), (-1.0, -0.8)),
- ((0.4, 1.0), (0, 0.4), (1.0, 1.0)),
- ((-1.0, -0.4), (-0.4, 0), (-1.0, -1.0)),
- ((-0.5, 0.5), (-0.4, 0.4), (-1.0, 1.0)),
- ((0, 1.0), (-1.0, 0), (0, 0)), # or None?
- ((-1.0, 0), (0, 1.0), (0, 0)), # or None?
+ ((1.0, -1.0), (-1.0, 0, 1.0), None), # invalid oldRange min > max
+ ((0.6, 1.0), (0, 0, 0.5), None),
+ ((-1.0, -0.6), (-0.5, 0, 0), None),
+ ((0.4, 1.0), (0, 0, 0.5), (0.8, 1.0)),
+ ((-1.0, -0.4), (-0.5, 0, 0), (-1.0, -0.8)),
+ ((0.4, 1.0), (0, 0, 0.4), (1.0, 1.0)),
+ ((-1.0, -0.4), (-0.4, 0, 0), (-1.0, -1.0)),
+ ((-0.5, 0.5), (-0.4, 0, 0.4), (-1.0, 1.0)),
+ ((0, 1.0), (-1.0, 0, 0), (0, 0)), # or None?
+ ((-1.0, 0), (0, 0, 1.0), (0, 0)), # or None?
],
)
-def test_limitFeatureVariationConditionRange(oldRange, newRange, expected):
+def test_limitFeatureVariationConditionRange(oldRange, newLimit, expected):
condition = featureVars.buildConditionTable(0, *oldRange)
- result = instancer._limitFeatureVariationConditionRange(
- condition, instancer.NormalizedAxisRange(*newRange)
+ result = instancer.featureVars._limitFeatureVariationConditionRange(
+ condition, instancer.NormalizedAxisTripleAndDistances(*newLimit, 1, 1)
)
assert result == expected
@@ -1890,12 +2095,33 @@ def test_limitFeatureVariationConditionRange(oldRange, newRange, expected):
[
(["wght=400", "wdth=100"], {"wght": 400, "wdth": 100}),
(["wght=400:900"], {"wght": (400, 900)}),
- (["slnt=11.4"], {"slnt": pytest.approx(11.399994)}),
+ (["wght=400:700:900"], {"wght": (400, 700, 900)}),
+ (["slnt=11.4"], {"slnt": 11.399994}),
(["ABCD=drop"], {"ABCD": None}),
+ (["wght=:500:"], {"wght": (None, 500, None)}),
+ (["wght=::700"], {"wght": (None, None, 700)}),
+ (["wght=200::"], {"wght": (200, None, None)}),
+ (["wght=200:300:"], {"wght": (200, 300, None)}),
+ (["wght=:300:500"], {"wght": (None, 300, 500)}),
+ (["wght=300::700"], {"wght": (300, None, 700)}),
+ (["wght=300:700"], {"wght": (300, None, 700)}),
+ (["wght=:700"], {"wght": (None, None, 700)}),
+ (["wght=200:"], {"wght": (200, None, None)}),
],
)
def test_parseLimits(limits, expected):
- assert instancer.parseLimits(limits) == expected
+ limits = instancer.parseLimits(limits)
+ expected = instancer.AxisLimits(expected)
+
+ assert limits.keys() == expected.keys()
+ for axis, triple in limits.items():
+ expected_triple = expected[axis]
+ if expected_triple is None:
+ assert triple is None
+ else:
+ assert isinstance(triple, instancer.AxisTriple)
+ assert isinstance(expected_triple, instancer.AxisTriple)
+ assert triple == pytest.approx(expected_triple)
@pytest.mark.parametrize(
@@ -1906,27 +2132,35 @@ def test_parseLimits_invalid(limits):
instancer.parseLimits(limits)
-def test_normalizeAxisLimits_tuple(varfont):
- normalized = instancer.normalizeAxisLimits(varfont, {"wght": (100, 400)})
- assert normalized == {"wght": (-1.0, 0)}
+@pytest.mark.parametrize(
+ "limits, expected",
+ [
+ # 300, 500 come from the font having 100,400,900 fvar axis limits.
+ ({"wght": (100, 400)}, {"wght": (-1.0, 0, 0, 300, 500)}),
+ ({"wght": (100, 400, 400)}, {"wght": (-1.0, 0, 0, 300, 500)}),
+ ({"wght": (100, 300, 400)}, {"wght": (-1.0, -0.5, 0, 300, 500)}),
+ ],
+)
+def test_normalizeAxisLimits(varfont, limits, expected):
+ limits = instancer.AxisLimits(limits)
+ normalized = limits.normalize(varfont)
-def test_normalizeAxisLimits_unsupported_range(varfont):
- with pytest.raises(NotImplementedError, match="Unsupported range"):
- instancer.normalizeAxisLimits(varfont, {"wght": (401, 700)})
+ assert normalized == instancer.NormalizedAxisLimits(expected)
def test_normalizeAxisLimits_no_avar(varfont):
del varfont["avar"]
- normalized = instancer.normalizeAxisLimits(varfont, {"wght": (400, 500)})
+ limits = instancer.AxisLimits(wght=(400, 400, 500))
+ normalized = limits.normalize(varfont)
- assert normalized["wght"] == pytest.approx((0, 0.2), 1e-4)
+ assert normalized["wght"] == pytest.approx((0, 0, 0.2, 300, 500), 1e-4)
def test_normalizeAxisLimits_missing_from_fvar(varfont):
with pytest.raises(ValueError, match="not present in fvar"):
- instancer.normalizeAxisLimits(varfont, {"ZZZZ": 1000})
+ instancer.AxisLimits({"ZZZZ": 1000}).normalize(varfont)
def test_sanityCheckVariableTables(varfont):
diff --git a/Tests/varLib/instancer/names_test.py b/Tests/varLib/instancer/names_test.py
index 9774458a..0d7ef1a8 100644
--- a/Tests/varLib/instancer/names_test.py
+++ b/Tests/varLib/instancer/names_test.py
@@ -115,7 +115,7 @@ def _test_name_records(varfont, expected, isNonRIBBI, platforms=[0x409]):
),
# Condensed with unpinned weights
(
- {"wdth": 79, "wght": instancer.AxisRange(400, 900)},
+ {"wdth": 79, "wght": (400, 900)},
{
(1, 3, 1, 0x409): "Test Variable Font Condensed",
(2, 3, 1, 0x409): "Regular",
@@ -126,6 +126,19 @@ def _test_name_records(varfont, expected, isNonRIBBI, platforms=[0x409]):
},
True,
),
+ # Restrict weight and move default, new minimum (500) > old default (400)
+ (
+ {"wght": (500, 900)},
+ {
+ (1, 3, 1, 0x409): "Test Variable Font Medium",
+ (2, 3, 1, 0x409): "Regular",
+ (3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-Medium",
+ (6, 3, 1, 0x409): "TestVariableFont-Medium",
+ (16, 3, 1, 0x409): "Test Variable Font",
+ (17, 3, 1, 0x409): "Medium",
+ },
+ True,
+ ),
],
)
def test_updateNameTable_with_registered_axes_ribbi(
@@ -215,7 +228,7 @@ def test_updateNameTable_with_multilingual_names(varfont, limits, expected, isNo
def test_updateNameTable_missing_axisValues(varfont):
- with pytest.raises(ValueError, match="Cannot find Axis Values \['wght=200'\]"):
+ with pytest.raises(ValueError, match="Cannot find Axis Values {'wght': 200}"):
instancer.names.updateNameTable(varfont, {"wght": 200})
@@ -257,7 +270,7 @@ def test_updateNameTable_missing_stat(varfont):
def test_updateNameTable_vf_with_italic_attribute(
varfont, limits, expected, isNonRIBBI
):
- font_link_axisValue = varfont["STAT"].table.AxisValueArray.AxisValue[4]
+ font_link_axisValue = varfont["STAT"].table.AxisValueArray.AxisValue[5]
# Unset ELIDABLE_AXIS_VALUE_NAME flag
font_link_axisValue.Flags &= ~instancer.names.ELIDABLE_AXIS_VALUE_NAME
font_link_axisValue.ValueNameID = 294 # Roman --> Italic
@@ -320,3 +333,21 @@ def test_updateNameTable_existing_subfamily_name_is_not_regular(varfont):
instancer.names.updateNameTable(varfont, {"wght": 100})
expected = {(2, 3, 1, 0x409): "Regular", (17, 3, 1, 0x409): "Thin"}
_test_name_records(varfont, expected, isNonRIBBI=True)
+
+
+def test_name_irrelevant_axes(varfont):
+ # Cannot update name table if not on a named axis value location
+ with pytest.raises(ValueError) as excinfo:
+ location = {"wght": 400, "wdth": 90}
+ instance = instancer.instantiateVariableFont(
+ varfont, location, updateFontNames=True
+ )
+ assert "Cannot find Axis Values" in str(excinfo.value)
+
+ # Now let's make the wdth axis "irrelevant" to naming (no axis values)
+ varfont["STAT"].table.AxisValueArray.AxisValue.pop(6)
+ varfont["STAT"].table.AxisValueArray.AxisValue.pop(4)
+ location = {"wght": 400, "wdth": 90}
+ instance = instancer.instantiateVariableFont(
+ varfont, location, updateFontNames=True
+ )
diff --git a/Tests/varLib/instancer/solver_test.py b/Tests/varLib/instancer/solver_test.py
new file mode 100644
index 00000000..b9acf82f
--- /dev/null
+++ b/Tests/varLib/instancer/solver_test.py
@@ -0,0 +1,300 @@
+from fontTools.varLib.instancer import solver
+from fontTools.varLib.instancer import NormalizedAxisTripleAndDistances
+import pytest
+
+
+class RebaseTentTest(object):
+ @pytest.mark.parametrize(
+ "tent, axisRange, expected",
+ [
+ # Case 1: # Pin at default
+ pytest.param((0, 1, 1), (0.0, 0.0, 0.0), []),
+ # Case 1:
+ pytest.param((0.3, 0.5, 0.8), (0.1, 0.2, 0.3), []),
+ # Pin axis
+ pytest.param(
+ (0, 1, 1),
+ (0.5, 0.5, 0.5),
+ [
+ (0.5, None),
+ ],
+ ),
+ # Case 2:
+ pytest.param(
+ (0, 1, 1),
+ (-1, 0, 0.5),
+ [
+ (0.5, (0, 1, 1)),
+ ],
+ ),
+ # Case 2:
+ pytest.param(
+ (0, 1, 1),
+ (-1, 0, 0.75),
+ [
+ (0.75, (0, 1, 1)),
+ ],
+ ),
+ #
+ # Without gain:
+ #
+ # Case 3
+ pytest.param(
+ (0, 0.2, 1),
+ (-1, 0, 0.8),
+ [
+ (1, (0, 0.25, 1.25)),
+ ],
+ ),
+ # Case 3 boundary
+ pytest.param(
+ (0, 0.4, 1),
+ (-1, 0, 0.5),
+ [
+ (1, (0, 0.8, 1.99994)),
+ ],
+ ),
+ # Case 4
+ pytest.param(
+ (0, 0.25, 1),
+ (-1, 0, 0.4),
+ [
+ (1, (0, 0.625, 1)),
+ (0.8, (0.625, 1, 1)),
+ ],
+ ),
+ pytest.param(
+ (0.25, 0.3, 1.05),
+ (0, 0.2, 0.4),
+ [
+ (1, (0.25, 0.5, 1)),
+ (2.6 / 3, (0.5, 1, 1)),
+ ],
+ ),
+ # Case 4 boundary
+ pytest.param(
+ (0.25, 0.5, 1),
+ (0, 0.25, 0.5),
+ [
+ (1, (0, 1, 1)),
+ ],
+ ),
+ #
+ # With gain:
+ #
+ # Case 3a/1neg
+ pytest.param(
+ (0.0, 0.5, 1),
+ (0, 0.5, 1),
+ [
+ (1, None),
+ (-1, (0, 1, 1)),
+ (-1, (-1, -1, 0)),
+ ],
+ ),
+ pytest.param(
+ (0.0, 0.5, 1),
+ (0, 0.5, 0.75),
+ [
+ (1, None),
+ (-0.5, (0, 1, 1)),
+ (-1, (-1, -1, 0)),
+ ],
+ ),
+ pytest.param(
+ (0.0, 0.5, 1),
+ (0, 0.25, 0.8),
+ [
+ (0.5, None),
+ (0.5, (0, 0.45454545, 0.9090909090)),
+ (-0.1, (0.9090909090, 1.0, 1.0)),
+ (-0.5, (-1, -1, 0)),
+ ],
+ ),
+ # Case 3a/1neg
+ pytest.param(
+ (0.0, 0.5, 2),
+ (0.2, 0.5, 0.8),
+ [
+ (1, None),
+ (-0.2, (0, 1, 1)),
+ (-0.6, (-1, -1, 0)),
+ ],
+ ),
+ # Case 3a/1neg
+ pytest.param(
+ (0.0, 0.5, 2),
+ (0.2, 0.5, 1),
+ [
+ (1, None),
+ (-1 / 3, (0, 1, 1)),
+ (-0.6, (-1, -1, 0)),
+ ],
+ ),
+ # Case 3
+ pytest.param(
+ (0, 0.5, 1),
+ (0.25, 0.25, 0.75),
+ [
+ (0.5, None),
+ (0.5, (0, 0.5, 1.0)),
+ ],
+ ),
+ # Case 1neg
+ pytest.param(
+ (0.0, 0.5, 1),
+ (0, 0.25, 0.5),
+ [
+ (0.5, None),
+ (0.5, (0, 1, 1)),
+ (-0.5, (-1, -1, 0)),
+ ],
+ ),
+ # Case 2neg
+ pytest.param(
+ (0.05, 0.55, 1),
+ (0, 0.25, 0.5),
+ [
+ (0.4, None),
+ (0.5, (0, 1, 1)),
+ (-0.4, (-1, -0.8, 0)),
+ (-0.4, (-1, -1, -0.8)),
+ ],
+ ),
+ # Case 2neg, other side
+ pytest.param(
+ (-1, -0.55, -0.05),
+ (-0.5, -0.25, 0),
+ [
+ (0.4, None),
+ (0.5, (-1, -1, 0)),
+ (-0.4, (0, 0.8, 1)),
+ (-0.4, (0.8, 1, 1)),
+ ],
+ ),
+ #
+ # Misc corner cases
+ #
+ pytest.param(
+ (0.5, 0.5, 0.5),
+ (0.5, 0.5, 0.5),
+ [
+ (1, None),
+ ],
+ ),
+ pytest.param(
+ (0.3, 0.5, 0.7),
+ (0.1, 0.5, 0.9),
+ [
+ (1, None),
+ (-1, (0, 0.5, 1)),
+ (-1, (0.5, 1, 1)),
+ (-1, (-1, -0.5, 0)),
+ (-1, (-1, -1, -0.5)),
+ ],
+ ),
+ pytest.param(
+ (0.5, 0.5, 0.5),
+ (0.25, 0.25, 0.5),
+ [
+ (1, (1, 1, 1)),
+ ],
+ ),
+ pytest.param(
+ (0.5, 0.5, 0.5),
+ (0.25, 0.35, 0.5),
+ [
+ (1, (1, 1, 1)),
+ ],
+ ),
+ pytest.param(
+ (0.5, 0.5, 0.55),
+ (0.25, 0.35, 0.5),
+ [
+ (1, (1, 1, 1)),
+ ],
+ ),
+ pytest.param(
+ (0.5, 0.5, 1),
+ (0.5, 0.5, 1),
+ [
+ (1, None),
+ (-1, (0, 1, 1)),
+ ],
+ ),
+ pytest.param(
+ (0.25, 0.5, 1),
+ (0.5, 0.5, 1),
+ [
+ (1, None),
+ (-1, (0, 1, 1)),
+ ],
+ ),
+ pytest.param(
+ (0, 0.2, 1),
+ (0, 0, 0.5),
+ [
+ (1, (0, 0.4, 1.99994)),
+ ],
+ ),
+ # https://github.com/fonttools/fonttools/issues/3139
+ pytest.param(
+ (0, 0.5, 1),
+ (-1, 0.25, 1),
+ [
+ (0.5, None),
+ (0.5, (0.0, 1 / 3, 2 / 3)),
+ (-0.5, (2 / 3, 1, 1)),
+ (-0.5, (-1, -0.2, 0)),
+ (-0.5, (-1, -1, -0.2)),
+ ],
+ ),
+ # Dirac delta at new default. Fancy!
+ pytest.param(
+ (0.5, 0.5, 0.5),
+ (0, 0.5, 1),
+ [
+ (1, None),
+ (-1, (0, 0.0001220703, 1)),
+ (-1, (0.0001220703, 1, 1)),
+ (-1, (-1, -0.0001220703, 0)),
+ (-1, (-1, -1, -0.0001220703)),
+ ],
+ ),
+ # https://github.com/fonttools/fonttools/issues/3177
+ pytest.param(
+ (0, 1, 1),
+ (-1, -0.5, +1, 1, 1),
+ [
+ (1.0, (1 / 3, 1.0, 1.0)),
+ ],
+ ),
+ pytest.param(
+ (0, 1, 1),
+ (-1, -0.5, +1, 2, 1),
+ [
+ (1.0, (0.5, 1.0, 1.0)),
+ ],
+ ),
+ # https://github.com/fonttools/fonttools/issues/3291
+ pytest.param(
+ (0.6, 0.7, 0.8),
+ (-1, 0.2, +1, 1, 1),
+ [
+ (1.0, (0.5, 0.625, 0.75)),
+ ],
+ ),
+ ],
+ )
+ def test_rebaseTent(self, tent, axisRange, expected):
+ axisRange = NormalizedAxisTripleAndDistances(*axisRange)
+
+ sol = solver.rebaseTent(tent, axisRange)
+
+ a = pytest.approx
+ expected = [
+ (a(scalar), (a(v[0]), a(v[1]), a(v[2])) if v is not None else None)
+ for scalar, v in expected
+ ]
+
+ assert sol == expected, (tent, axisRange)
diff --git a/Tests/varLib/interpolatable_test.py b/Tests/varLib/interpolatable_test.py
index a30be71e..10b9cc30 100644
--- a/Tests/varLib/interpolatable_test.py
+++ b/Tests/varLib/interpolatable_test.py
@@ -5,6 +5,7 @@ import shutil
import sys
import tempfile
import unittest
+import pytest
try:
import scipy
@@ -35,12 +36,12 @@ class InterpolatableTest(unittest.TestCase):
shutil.rmtree(self.tempdir)
@staticmethod
- def get_test_input(test_file_or_folder):
+ def get_test_input(*test_file_or_folder):
path, _ = os.path.split(__file__)
- return os.path.join(path, "data", test_file_or_folder)
+ return os.path.join(path, "data", *test_file_or_folder)
@staticmethod
- def get_file_list(folder, suffix, prefix=''):
+ def get_file_list(folder, suffix, prefix=""):
all_files = os.listdir(folder)
file_list = []
for p in all_files:
@@ -51,8 +52,7 @@ class InterpolatableTest(unittest.TestCase):
def temp_path(self, suffix):
self.temp_dir()
self.num_tempfiles += 1
- return os.path.join(self.tempdir,
- "tmp%d%s" % (self.num_tempfiles, suffix))
+ return os.path.join(self.tempdir, "tmp%d%s" % (self.num_tempfiles, suffix))
def temp_dir(self):
if not self.tempdir:
@@ -60,41 +60,201 @@ class InterpolatableTest(unittest.TestCase):
def compile_font(self, path, suffix, temp_dir):
ttx_filename = os.path.basename(path)
- savepath = os.path.join(temp_dir, ttx_filename.replace('.ttx', suffix))
+ savepath = os.path.join(temp_dir, ttx_filename.replace(".ttx", suffix))
font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.importXML(path)
font.save(savepath, reorderTables=None)
return font, savepath
-# -----
-# Tests
-# -----
+ # -----
+ # Tests
+ # -----
def test_interpolatable_ttf(self):
- suffix = '.ttf'
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ suffix = ".ttf"
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for path in ttx_paths:
self.compile_font(path, suffix, self.tempdir)
ttf_paths = self.get_file_list(self.tempdir, suffix)
self.assertIsNone(interpolatable_main(ttf_paths))
-
def test_interpolatable_otf(self):
- suffix = '.otf'
- ttx_dir = self.get_test_input('master_ttx_interpolatable_otf')
+ suffix = ".otf"
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_otf")
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for path in ttx_paths:
self.compile_font(path, suffix, self.tempdir)
otf_paths = self.get_file_list(self.tempdir, suffix)
self.assertIsNone(interpolatable_main(otf_paths))
+ def test_interpolatable_ufo(self):
+ ttx_dir = self.get_test_input("master_ufo")
+ ufo_paths = self.get_file_list(ttx_dir, ".ufo", "TestFamily2-")
+ self.assertIsNone(interpolatable_main(ufo_paths))
+
+ def test_designspace(self):
+ designspace_path = self.get_test_input("InterpolateLayout.designspace")
+ self.assertIsNone(interpolatable_main([designspace_path]))
+
+ def test_glyphsapp(self):
+ pytest.importorskip("glyphsLib")
+ glyphsapp_path = self.get_test_input("InterpolateLayout.glyphs")
+ self.assertIsNone(interpolatable_main([glyphsapp_path]))
+
+ def test_VF(self):
+ suffix = ".ttf"
+ ttx_dir = self.get_test_input("master_ttx_varfont_ttf")
+
+ self.temp_dir()
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "SparseMasters-")
+ for path in ttx_paths:
+ self.compile_font(path, suffix, self.tempdir)
+
+ ttf_paths = self.get_file_list(self.tempdir, suffix)
+
+ problems = interpolatable_main(["--quiet"] + ttf_paths)
+ self.assertIsNone(problems)
+
+ def test_sparse_interpolatable_ttfs(self):
+ suffix = ".ttf"
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
+
+ self.temp_dir()
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "SparseMasters-")
+ for path in ttx_paths:
+ self.compile_font(path, suffix, self.tempdir)
+
+ ttf_paths = self.get_file_list(self.tempdir, suffix)
+
+ # without --ignore-missing
+ problems = interpolatable_main(["--quiet"] + ttf_paths)
+ self.assertEqual(
+ problems["a"], [{"type": "missing", "master": "SparseMasters-Medium"}]
+ )
+ self.assertEqual(
+ problems["s"], [{"type": "missing", "master": "SparseMasters-Medium"}]
+ )
+ self.assertEqual(
+ problems["edotabove"],
+ [{"type": "missing", "master": "SparseMasters-Medium"}],
+ )
+ self.assertEqual(
+ problems["dotabovecomb"],
+ [{"type": "missing", "master": "SparseMasters-Medium"}],
+ )
+
+ # normal order, with --ignore-missing
+ self.assertIsNone(interpolatable_main(["--ignore-missing"] + ttf_paths))
+ # purposely putting the sparse master (medium) first
+ self.assertIsNone(
+ interpolatable_main(
+ ["--ignore-missing"] + [ttf_paths[1]] + [ttf_paths[0]] + [ttf_paths[2]]
+ )
+ )
+ # purposely putting the sparse master (medium) last
+ self.assertIsNone(
+ interpolatable_main(
+ ["--ignore-missing"] + [ttf_paths[0]] + [ttf_paths[2]] + [ttf_paths[1]]
+ )
+ )
+
+ def test_sparse_interpolatable_ufos(self):
+ ttx_dir = self.get_test_input("master_ufo")
+ ufo_paths = self.get_file_list(ttx_dir, ".ufo", "SparseMasters-")
+
+ # without --ignore-missing
+ problems = interpolatable_main(["--quiet"] + ufo_paths)
+ self.assertEqual(
+ problems["a"], [{"type": "missing", "master": "SparseMasters-Medium"}]
+ )
+ self.assertEqual(
+ problems["s"], [{"type": "missing", "master": "SparseMasters-Medium"}]
+ )
+ self.assertEqual(
+ problems["edotabove"],
+ [{"type": "missing", "master": "SparseMasters-Medium"}],
+ )
+ self.assertEqual(
+ problems["dotabovecomb"],
+ [{"type": "missing", "master": "SparseMasters-Medium"}],
+ )
+
+ # normal order, with --ignore-missing
+ self.assertIsNone(interpolatable_main(["--ignore-missing"] + ufo_paths))
+ # purposely putting the sparse master (medium) first
+ self.assertIsNone(
+ interpolatable_main(
+ ["--ignore-missing"] + [ufo_paths[1]] + [ufo_paths[0]] + [ufo_paths[2]]
+ )
+ )
+ # purposely putting the sparse master (medium) last
+ self.assertIsNone(
+ interpolatable_main(
+ ["--ignore-missing"] + [ufo_paths[0]] + [ufo_paths[2]] + [ufo_paths[1]]
+ )
+ )
+
+ def test_sparse_designspace(self):
+ designspace_path = self.get_test_input("SparseMasters_ufo.designspace")
+
+ problems = interpolatable_main(["--quiet", designspace_path])
+ self.assertEqual(
+ problems["a"], [{"type": "missing", "master": "SparseMasters-Medium"}]
+ )
+ self.assertEqual(
+ problems["s"], [{"type": "missing", "master": "SparseMasters-Medium"}]
+ )
+ self.assertEqual(
+ problems["edotabove"],
+ [{"type": "missing", "master": "SparseMasters-Medium"}],
+ )
+ self.assertEqual(
+ problems["dotabovecomb"],
+ [{"type": "missing", "master": "SparseMasters-Medium"}],
+ )
+
+ # normal order, with --ignore-missing
+ self.assertIsNone(interpolatable_main(["--ignore-missing", designspace_path]))
+
+ def test_sparse_glyphsapp(self):
+ pytest.importorskip("glyphsLib")
+ glyphsapp_path = self.get_test_input("SparseMasters.glyphs")
+
+ problems = interpolatable_main(["--quiet", glyphsapp_path])
+ self.assertEqual(
+ problems["a"], [{"type": "missing", "master": "Sparse Masters-Medium"}]
+ )
+ self.assertEqual(
+ problems["s"], [{"type": "missing", "master": "Sparse Masters-Medium"}]
+ )
+ self.assertEqual(
+ problems["edotabove"],
+ [{"type": "missing", "master": "Sparse Masters-Medium"}],
+ )
+ self.assertEqual(
+ problems["dotabovecomb"],
+ [{"type": "missing", "master": "Sparse Masters-Medium"}],
+ )
+
+ # normal order, with --ignore-missing
+ self.assertIsNone(interpolatable_main(["--ignore-missing", glyphsapp_path]))
+
+ def test_interpolatable_varComposite(self):
+ input_path = self.get_test_input(
+ "..", "..", "ttLib", "data", "varc-ac00-ac01.ttf"
+ )
+ # This particular test font which was generated by machine-learning
+ # exhibits an "error" in one of the masters; it's a false-positive.
+ # Just make sure the code runs.
+ interpolatable_main((input_path,))
+
if __name__ == "__main__":
sys.exit(unittest.main())
diff --git a/Tests/varLib/interpolate_layout_test.py b/Tests/varLib/interpolate_layout_test.py
index 219f087f..1844e3b1 100644
--- a/Tests/varLib/interpolate_layout_test.py
+++ b/Tests/varLib/interpolate_layout_test.py
@@ -39,7 +39,7 @@ class InterpolateLayoutTest(unittest.TestCase):
return os.path.join(path, "data", "test_results", test_file_or_folder)
@staticmethod
- def get_file_list(folder, suffix, prefix=''):
+ def get_file_list(folder, suffix, prefix=""):
all_files = os.listdir(folder)
file_list = []
for p in all_files:
@@ -50,8 +50,7 @@ class InterpolateLayoutTest(unittest.TestCase):
def temp_path(self, suffix):
self.temp_dir()
self.num_tempfiles += 1
- return os.path.join(self.tempdir,
- "tmp%d%s" % (self.num_tempfiles, suffix))
+ return os.path.join(self.tempdir, "tmp%d%s" % (self.num_tempfiles, suffix))
def temp_dir(self):
if not self.tempdir:
@@ -75,7 +74,8 @@ class InterpolateLayoutTest(unittest.TestCase):
expected = self.read_ttx(expected_ttx)
if actual != expected:
for line in difflib.unified_diff(
- expected, actual, fromfile=expected_ttx, tofile=path):
+ expected, actual, fromfile=expected_ttx, tofile=path
+ ):
sys.stdout.write(line)
self.fail("TTX output is different from expected")
@@ -85,19 +85,21 @@ class InterpolateLayoutTest(unittest.TestCase):
font.save(path)
self.expect_ttx(TTFont(path), expected_ttx, tables)
- def compile_font(self, path, suffix, temp_dir, features=None):
+ def compile_font(self, path, suffix, temp_dir, features=None, cfg=None):
ttx_filename = os.path.basename(path)
- savepath = os.path.join(temp_dir, ttx_filename.replace('.ttx', suffix))
+ savepath = os.path.join(temp_dir, ttx_filename.replace(".ttx", suffix))
font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
+ if cfg:
+ font.cfg.update(cfg)
font.importXML(path)
if features:
addOpenTypeFeaturesFromString(font, features)
font.save(savepath, reorderTables=None)
return font, savepath
-# -----
-# Tests
-# -----
+ # -----
+ # Tests
+ # -----
def test_varlib_interpolate_layout_GSUB_only_ttf(self):
"""Only GSUB, and only in the base master.
@@ -105,49 +107,47 @@ class InterpolateLayoutTest(unittest.TestCase):
The variable font will inherit the GSUB table from the
base master.
"""
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for path in ttx_paths:
self.compile_font(path, suffix, self.tempdir)
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GSUB']
- expected_ttx_path = self.get_test_output('InterpolateLayout.ttx')
+ tables = ["GSUB"]
+ expected_ttx_path = self.get_test_output("InterpolateLayout.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_no_GSUB_ttf(self):
"""The base master has no GSUB table.
The variable font will end up without a GSUB table.
"""
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout2.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout2.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for path in ttx_paths:
self.compile_font(path, suffix, self.tempdir)
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GSUB']
- expected_ttx_path = self.get_test_output('InterpolateLayout2.ttx')
+ tables = ["GSUB"]
+ expected_ttx_path = self.get_test_output("InterpolateLayout2.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GSUB_only_no_axes_ttf(self):
"""Only GSUB, and only in the base master.
Designspace file has no <axes> element.
@@ -155,17 +155,16 @@ class InterpolateLayoutTest(unittest.TestCase):
The variable font will inherit the GSUB table from the
base master.
"""
- ds_path = self.get_test_input('InterpolateLayout3.designspace')
+ ds_path = self.get_test_input("InterpolateLayout3.designspace")
with self.assertRaisesRegex(DesignSpaceDocumentError, "No axes defined"):
- instfont = interpolate_layout(ds_path, {'weight': 500})
+ instfont = interpolate_layout(ds_path, {"weight": 500})
def test_varlib_interpolate_layout_GPOS_only_size_feat_same_val_ttf(self):
- """Only GPOS; 'size' feature; same values in all masters.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; 'size' feature; same values in all masters."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str = """
feature size {
@@ -175,26 +174,26 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str] * 2
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_size_feat_same.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output(
+ "InterpolateLayoutGPOS_size_feat_same.ttx"
+ )
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_1_same_val_ttf(self):
- """Only GPOS; LookupType 1; same values in all masters.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 1; same values in all masters."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str = """
feature xxxx {
@@ -204,26 +203,24 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str] * 2
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_1_same.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_1_same.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_1_diff_val_ttf(self):
- """Only GPOS; LookupType 1; different values in each master.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 1; different values in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str_0 = """
feature xxxx {
@@ -238,26 +235,24 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str_0, fea_str_1]
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_1_diff.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_1_diff.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_1_diff2_val_ttf(self):
- """Only GPOS; LookupType 1; different values and items in each master.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 1; different values and items in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str_0 = """
feature xxxx {
@@ -273,26 +268,26 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str_0, fea_str_1]
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_1_diff2.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_1_diff2.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
- def test_varlib_interpolate_layout_GPOS_only_LookupType_2_spec_pairs_same_val_ttf(self):
- """Only GPOS; LookupType 2 specific pairs; same values in all masters.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ def test_varlib_interpolate_layout_GPOS_only_LookupType_2_spec_pairs_same_val_ttf(
+ self,
+ ):
+ """Only GPOS; LookupType 2 specific pairs; same values in all masters."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str = """
feature xxxx {
@@ -302,26 +297,28 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str] * 2
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_spec_same.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output(
+ "InterpolateLayoutGPOS_2_spec_same.ttx"
+ )
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
- def test_varlib_interpolate_layout_GPOS_only_LookupType_2_spec_pairs_diff_val_ttf(self):
- """Only GPOS; LookupType 2 specific pairs; different values in each master.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ def test_varlib_interpolate_layout_GPOS_only_LookupType_2_spec_pairs_diff_val_ttf(
+ self,
+ ):
+ """Only GPOS; LookupType 2 specific pairs; different values in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str_0 = """
feature xxxx {
@@ -336,26 +333,28 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str_0, fea_str_1]
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_spec_diff.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output(
+ "InterpolateLayoutGPOS_2_spec_diff.ttx"
+ )
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
- def test_varlib_interpolate_layout_GPOS_only_LookupType_2_spec_pairs_diff2_val_ttf(self):
- """Only GPOS; LookupType 2 specific pairs; different values and items in each master.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ def test_varlib_interpolate_layout_GPOS_only_LookupType_2_spec_pairs_diff2_val_ttf(
+ self,
+ ):
+ """Only GPOS; LookupType 2 specific pairs; different values and items in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str_0 = """
feature xxxx {
@@ -371,26 +370,28 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str_0, fea_str_1]
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_spec_diff2.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output(
+ "InterpolateLayoutGPOS_2_spec_diff2.ttx"
+ )
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
- def test_varlib_interpolate_layout_GPOS_only_LookupType_2_class_pairs_same_val_ttf(self):
- """Only GPOS; LookupType 2 class pairs; same values in all masters.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ def test_varlib_interpolate_layout_GPOS_only_LookupType_2_class_pairs_same_val_ttf(
+ self,
+ ):
+ """Only GPOS; LookupType 2 class pairs; same values in all masters."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str = """
feature xxxx {
@@ -400,26 +401,28 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str] * 2
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_class_same.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output(
+ "InterpolateLayoutGPOS_2_class_same.ttx"
+ )
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
- def test_varlib_interpolate_layout_GPOS_only_LookupType_2_class_pairs_diff_val_ttf(self):
- """Only GPOS; LookupType 2 class pairs; different values in each master.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ def test_varlib_interpolate_layout_GPOS_only_LookupType_2_class_pairs_diff_val_ttf(
+ self,
+ ):
+ """Only GPOS; LookupType 2 class pairs; different values in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str_0 = """
feature xxxx {
@@ -434,26 +437,28 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str_0, fea_str_1]
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_class_diff.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output(
+ "InterpolateLayoutGPOS_2_class_diff.ttx"
+ )
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
- def test_varlib_interpolate_layout_GPOS_only_LookupType_2_class_pairs_diff2_val_ttf(self):
- """Only GPOS; LookupType 2 class pairs; different values and items in each master.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ def test_varlib_interpolate_layout_GPOS_only_LookupType_2_class_pairs_diff2_val_ttf(
+ self,
+ ):
+ """Only GPOS; LookupType 2 class pairs; different values and items in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str_0 = """
feature xxxx {
@@ -469,26 +474,26 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str_0, fea_str_1]
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_class_diff2.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output(
+ "InterpolateLayoutGPOS_2_class_diff2.ttx"
+ )
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_3_same_val_ttf(self):
- """Only GPOS; LookupType 3; same values in all masters.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 3; same values in all masters."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str = """
feature xxxx {
@@ -498,26 +503,24 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str] * 2
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_3_same.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_3_same.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_3_diff_val_ttf(self):
- """Only GPOS; LookupType 3; different values in each master.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 3; different values in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str_0 = """
feature xxxx {
@@ -532,26 +535,24 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str_0, fea_str_1]
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_3_diff.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_3_diff.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_4_same_val_ttf(self):
- """Only GPOS; LookupType 4; same values in all masters.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 4; same values in all masters."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str = """
markClass uni0303 <anchor 0 500> @MARKS_ABOVE;
@@ -562,26 +563,24 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str] * 2
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_4_same.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_4_same.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_4_diff_val_ttf(self):
- """Only GPOS; LookupType 4; different values in each master.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 4; different values in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str_0 = """
markClass uni0303 <anchor 0 500> @MARKS_ABOVE;
@@ -598,26 +597,24 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str_0, fea_str_1]
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_4_diff.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_4_diff.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_5_same_val_ttf(self):
- """Only GPOS; LookupType 5; same values in all masters.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 5; same values in all masters."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str = """
markClass uni0330 <anchor 0 -50> @MARKS_BELOW;
@@ -629,26 +626,24 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str] * 2
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_5_same.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_5_same.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_5_diff_val_ttf(self):
- """Only GPOS; LookupType 5; different values in each master.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 5; different values in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str_0 = """
markClass uni0330 <anchor 0 -50> @MARKS_BELOW;
@@ -667,26 +662,24 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str_0, fea_str_1]
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_5_diff.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_5_diff.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_6_same_val_ttf(self):
- """Only GPOS; LookupType 6; same values in all masters.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 6; same values in all masters."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str = """
markClass uni0303 <anchor 0 500> @MARKS_ABOVE;
@@ -697,26 +690,24 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str] * 2
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_6_same.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_6_same.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_6_diff_val_ttf(self):
- """Only GPOS; LookupType 6; different values in each master.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 6; different values in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str_0 = """
markClass uni0303 <anchor 0 500> @MARKS_ABOVE;
@@ -733,26 +724,112 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str_0, fea_str_1]
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_6_diff.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_6_diff.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
+ def test_varlib_interpolate_layout_GPOS_only_LookupType_7_same_val_ttf(self):
+ """Only GPOS; LookupType 7; same values in all masters."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
- def test_varlib_interpolate_layout_GPOS_only_LookupType_8_same_val_ttf(self):
- """Only GPOS; LookupType 8; same values in all masters.
+ fea_str = """
+ markClass uni0303 <anchor 0 500> @MARKS_ABOVE;
+ lookup CNTXT_PAIR_POS {
+ pos A a -23;
+ } CNTXT_PAIR_POS;
+
+ lookup CNTXT_MARK_TO_BASE {
+ pos base a <anchor 260 500> mark @MARKS_ABOVE;
+ } CNTXT_MARK_TO_BASE;
+
+ feature xxxx {
+ pos A' lookup CNTXT_PAIR_POS a' @MARKS_ABOVE' lookup CNTXT_MARK_TO_BASE;
+ } xxxx;
"""
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ features = [fea_str] * 2
+
+ self.temp_dir()
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
+ cfg = {"fontTools.otlLib.builder:WRITE_GPOS7": True}
+ for i, path in enumerate(ttx_paths):
+ self.compile_font(path, suffix, self.tempdir, features[i], cfg)
+
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
+
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_7_same.ttx")
+ self.expect_ttx(instfont, expected_ttx_path, tables)
+ self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
+
+ def test_varlib_interpolate_layout_GPOS_only_LookupType_7_diff_val_ttf(self):
+ """Only GPOS; LookupType 7; different values in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
+
+ fea_str_0 = """
+ markClass uni0303 <anchor 0 500> @MARKS_ABOVE;
+ lookup CNTXT_PAIR_POS {
+ pos A a -23;
+ } CNTXT_PAIR_POS;
+
+ lookup CNTXT_MARK_TO_BASE {
+ pos base a <anchor 260 500> mark @MARKS_ABOVE;
+ } CNTXT_MARK_TO_BASE;
+
+ feature xxxx {
+ pos A' lookup CNTXT_PAIR_POS a' @MARKS_ABOVE' lookup CNTXT_MARK_TO_BASE;
+ } xxxx;
+ """
+ fea_str_1 = """
+ markClass uni0303 <anchor 0 520> @MARKS_ABOVE;
+ lookup CNTXT_PAIR_POS {
+ pos A a 57;
+ } CNTXT_PAIR_POS;
+
+ lookup CNTXT_MARK_TO_BASE {
+ pos base a <anchor 285 520> mark @MARKS_ABOVE;
+ } CNTXT_MARK_TO_BASE;
+
+ feature xxxx {
+ pos A' lookup CNTXT_PAIR_POS a' @MARKS_ABOVE' lookup CNTXT_MARK_TO_BASE;
+ } xxxx;
+ """
+ features = [fea_str_0, fea_str_1]
+
+ self.temp_dir()
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
+ cfg = {"fontTools.otlLib.builder:WRITE_GPOS7": True}
+ for i, path in enumerate(ttx_paths):
+ self.compile_font(path, suffix, self.tempdir, features[i], cfg)
+
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
+
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_7_diff.ttx")
+ self.expect_ttx(instfont, expected_ttx_path, tables)
+ self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
+
+ def test_varlib_interpolate_layout_GPOS_only_LookupType_8_same_val_ttf(self):
+ """Only GPOS; LookupType 8; same values in all masters."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str = """
markClass uni0303 <anchor 0 500> @MARKS_ABOVE;
@@ -771,26 +848,24 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str] * 2
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_8_same.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_8_same.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_GPOS_only_LookupType_8_diff_val_ttf(self):
- """Only GPOS; LookupType 8; different values in each master.
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('InterpolateLayout.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Only GPOS; LookupType 8; different values in each master."""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("InterpolateLayout.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
fea_str_0 = """
markClass uni0303 <anchor 0 500> @MARKS_ABOVE;
@@ -823,49 +898,47 @@ class InterpolateLayoutTest(unittest.TestCase):
features = [fea_str_0, fea_str_1]
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily2-")
for i, path in enumerate(ttx_paths):
self.compile_font(path, suffix, self.tempdir, features[i])
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
- instfont = interpolate_layout(ds_path, {'weight': 500}, finder)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
+ instfont = interpolate_layout(ds_path, {"weight": 500}, finder)
- tables = ['GPOS']
- expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_8_diff.ttx')
+ tables = ["GPOS"]
+ expected_ttx_path = self.get_test_output("InterpolateLayoutGPOS_8_diff.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix)
-
def test_varlib_interpolate_layout_main_ttf(self):
- """Mostly for testing varLib.interpolate_layout.main()
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('Build.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Mostly for testing varLib.interpolate_layout.main()"""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("Build.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
self.temp_dir()
- ttf_dir = os.path.join(self.tempdir, 'master_ttf_interpolatable')
+ ttf_dir = os.path.join(self.tempdir, "master_ttf_interpolatable")
os.makedirs(ttf_dir)
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily-")
for path in ttx_paths:
self.compile_font(path, suffix, ttf_dir)
- finder = lambda s: s.replace(ufo_dir, ttf_dir).replace('.ufo', suffix)
+ finder = lambda s: s.replace(ufo_dir, ttf_dir).replace(".ufo", suffix)
varfont, _, _ = build(ds_path, finder)
- varfont_name = 'InterpolateLayoutMain'
+ varfont_name = "InterpolateLayoutMain"
varfont_path = os.path.join(self.tempdir, varfont_name + suffix)
varfont.save(varfont_path)
- ds_copy = os.path.splitext(varfont_path)[0] + '.designspace'
+ ds_copy = os.path.splitext(varfont_path)[0] + ".designspace"
shutil.copy2(ds_path, ds_copy)
- args = [ds_copy, 'weight=500', 'contrast=50']
+ args = [ds_copy, "weight=500", "contrast=50"]
interpolate_layout_main(args)
- instfont_path = os.path.splitext(varfont_path)[0] + '-instance' + suffix
+ instfont_path = os.path.splitext(varfont_path)[0] + "-instance" + suffix
instfont = TTFont(instfont_path)
- tables = [table_tag for table_tag in instfont.keys() if table_tag != 'head']
- expected_ttx_path = self.get_test_output(varfont_name + '.ttx')
+ tables = [table_tag for table_tag in instfont.keys() if table_tag != "head"]
+ expected_ttx_path = self.get_test_output(varfont_name + ".ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
diff --git a/Tests/varLib/iup_test.py b/Tests/varLib/iup_test.py
index 76b2af51..36f63e0e 100644
--- a/Tests/varLib/iup_test.py
+++ b/Tests/varLib/iup_test.py
@@ -4,40 +4,109 @@ import pytest
class IupTest:
-
-# -----
-# Tests
-# -----
+ # -----
+ # Tests
+ # -----
@pytest.mark.parametrize(
"delta, coords, forced",
[
- (
- [(0, 0)],
- [(1, 2)],
- set()
- ),
- (
- [(0, 0), (0, 0), (0, 0)],
- [(1, 2), (3, 2), (2, 3)],
- set()
- ),
+ ([(0, 0)], [(1, 2)], set()),
+ ([(0, 0), (0, 0), (0, 0)], [(1, 2), (3, 2), (2, 3)], set()),
(
[(1, 1), (-1, 1), (-1, -1), (1, -1)],
[(0, 0), (2, 0), (2, 2), (0, 2)],
- set()
+ set(),
),
(
- [(-1, 0), (-1, 0), (-1, 0), (-1, 0), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (-1, 0)],
- [(-35, -152), (-86, -101), (-50, -65), (0, -116), (51, -65), (86, -99), (35, -151), (87, -202), (51, -238), (-1, -187), (-53, -239), (-88, -205)],
- {11}
+ [
+ (-1, 0),
+ (-1, 0),
+ (-1, 0),
+ (-1, 0),
+ (-1, 0),
+ (0, 0),
+ (0, 0),
+ (0, 0),
+ (0, 0),
+ (0, 0),
+ (0, 0),
+ (-1, 0),
+ ],
+ [
+ (-35, -152),
+ (-86, -101),
+ (-50, -65),
+ (0, -116),
+ (51, -65),
+ (86, -99),
+ (35, -151),
+ (87, -202),
+ (51, -238),
+ (-1, -187),
+ (-53, -239),
+ (-88, -205),
+ ],
+ {11},
),
(
- [(0, 0), (1, 0), (2, 0), (2, 0), (0, 0), (1, 0), (3, 0), (3, 0), (2, 0), (2, 0), (0, 0), (0, 0), (-1, 0), (-1, 0), (-1, 0), (-3, 0), (-1, 0), (0, 0), (0, 0), (-2, 0), (-2, 0), (-1, 0), (-1, 0), (-1, 0), (-4, 0)],
- [(330, 65), (401, 65), (499, 117), (549, 225), (549, 308), (549, 422), (549, 500), (497, 600), (397, 648), (324, 648), (271, 648), (200, 620), (165, 570), (165, 536), (165, 473), (252, 407), (355, 407), (396, 407), (396, 333), (354, 333), (249, 333), (141, 268), (141, 203), (141, 131), (247, 65)],
- {5, 15, 24}
+ [
+ (0, 0),
+ (1, 0),
+ (2, 0),
+ (2, 0),
+ (0, 0),
+ (1, 0),
+ (3, 0),
+ (3, 0),
+ (2, 0),
+ (2, 0),
+ (0, 0),
+ (0, 0),
+ (-1, 0),
+ (-1, 0),
+ (-1, 0),
+ (-3, 0),
+ (-1, 0),
+ (0, 0),
+ (0, 0),
+ (-2, 0),
+ (-2, 0),
+ (-1, 0),
+ (-1, 0),
+ (-1, 0),
+ (-4, 0),
+ ],
+ [
+ (330, 65),
+ (401, 65),
+ (499, 117),
+ (549, 225),
+ (549, 308),
+ (549, 422),
+ (549, 500),
+ (497, 600),
+ (397, 648),
+ (324, 648),
+ (271, 648),
+ (200, 620),
+ (165, 570),
+ (165, 536),
+ (165, 473),
+ (252, 407),
+ (355, 407),
+ (396, 407),
+ (396, 333),
+ (354, 333),
+ (249, 333),
+ (141, 268),
+ (141, 203),
+ (141, 131),
+ (247, 65),
+ ],
+ {5, 15, 24},
),
- ]
+ ],
)
def test_forced_set(self, delta, coords, forced):
f = iup._iup_contour_bound_forced_set(delta, coords)
@@ -49,5 +118,6 @@ class IupTest:
assert chain1 == chain2, f
assert costs1 == costs2, f
+
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv))
diff --git a/Tests/varLib/merger_test.py b/Tests/varLib/merger_test.py
index aa7a6998..e44d466d 100644
--- a/Tests/varLib/merger_test.py
+++ b/Tests/varLib/merger_test.py
@@ -7,6 +7,7 @@ from fontTools.varLib.models import VariationModel
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otBase import OTTableReader, OTTableWriter
+from io import BytesIO
import pytest
@@ -1842,3 +1843,102 @@ class COLRVariationMergerTest:
if colr.table.LayerList:
assert len({id(p) for p in colr.table.LayerList.Paint}) == after_layer_count
+
+
+class SparsePositioningMergerTest:
+ def test_zero_kern_at_default(self):
+ # https://github.com/fonttools/fonttools/issues/3111
+
+ pytest.importorskip("ufo2ft")
+ pytest.importorskip("ufoLib2")
+
+ from fontTools.designspaceLib import DesignSpaceDocument
+ from ufo2ft import compileVariableTTF
+ from ufoLib2 import Font
+
+ ds = DesignSpaceDocument()
+ ds.addAxisDescriptor(
+ name="wght", tag="wght", minimum=100, maximum=900, default=400
+ )
+ ds.addSourceDescriptor(font=Font(), location=dict(wght=100))
+ ds.addSourceDescriptor(font=Font(), location=dict(wght=400))
+ ds.addSourceDescriptor(font=Font(), location=dict(wght=900))
+
+ ds.sources[0].font.newGlyph("a").unicode = ord("a")
+ ds.sources[0].font.newGlyph("b").unicode = ord("b")
+ ds.sources[0].font.features.text = "feature kern { pos a b b' 100; } kern;"
+
+ ds.sources[1].font.newGlyph("a").unicode = ord("a")
+ ds.sources[1].font.newGlyph("b").unicode = ord("b")
+ ds.sources[1].font.features.text = "feature kern { pos a b b' 0; } kern;"
+
+ ds.sources[2].font.newGlyph("a").unicode = ord("a")
+ ds.sources[2].font.newGlyph("b").unicode = ord("b")
+ ds.sources[2].font.features.text = "feature kern { pos a b b' -100; } kern;"
+
+ font = compileVariableTTF(ds, inplace=True)
+ b = BytesIO()
+ font.save(b)
+
+ assert font["GDEF"].table.VarStore.VarData[0].Item[0] == [100, -100]
+
+ def test_sparse_cursive(self):
+ # https://github.com/fonttools/fonttools/issues/3168
+
+ pytest.importorskip("ufo2ft")
+ pytest.importorskip("ufoLib2")
+
+ from fontTools.designspaceLib import DesignSpaceDocument
+ from ufo2ft import compileVariableTTF
+ from ufoLib2 import Font
+
+ ds = DesignSpaceDocument()
+ ds.addAxisDescriptor(
+ name="wght", tag="wght", minimum=100, maximum=900, default=400
+ )
+ ds.addSourceDescriptor(font=Font(), location=dict(wght=100))
+ ds.addSourceDescriptor(font=Font(), location=dict(wght=400))
+ ds.addSourceDescriptor(font=Font(), location=dict(wght=900))
+
+ ds.sources[0].font.newGlyph("a").unicode = ord("a")
+ ds.sources[0].font.newGlyph("b").unicode = ord("b")
+ ds.sources[0].font.newGlyph("c").unicode = ord("c")
+ ds.sources[
+ 0
+ ].font.features.text = """
+ feature curs {
+ position cursive a <anchor 400 20> <anchor 0 -20>;
+ position cursive c <anchor NULL> <anchor 0 -20>;
+ } curs;
+ """
+
+ ds.sources[1].font.newGlyph("a").unicode = ord("a")
+ ds.sources[1].font.newGlyph("b").unicode = ord("b")
+ ds.sources[1].font.newGlyph("c").unicode = ord("c")
+ ds.sources[
+ 1
+ ].font.features.text = """
+ feature curs {
+ position cursive a <anchor 500 20> <anchor 0 -20>;
+ position cursive b <anchor 50 22> <anchor 0 -10>;
+ position cursive c <anchor NULL> <anchor 0 -20>;
+ } curs;
+ """
+
+ ds.sources[2].font.newGlyph("a").unicode = ord("a")
+ ds.sources[2].font.newGlyph("b").unicode = ord("b")
+ ds.sources[2].font.newGlyph("c").unicode = ord("c")
+ ds.sources[
+ 2
+ ].font.features.text = """
+ feature curs {
+ position cursive b <anchor 100 40> <anchor 0 -30>;
+ position cursive c <anchor NULL> <anchor 0 -20>;
+ } curs;
+ """
+
+ font = compileVariableTTF(ds, inplace=True)
+ b = BytesIO()
+ font.save(b)
+
+ assert font["GDEF"].table.VarStore.VarData[0].Item[0] == [-100, 0]
diff --git a/Tests/varLib/models_test.py b/Tests/varLib/models_test.py
index e0080129..11ec1a1e 100644
--- a/Tests/varLib/models_test.py
+++ b/Tests/varLib/models_test.py
@@ -31,15 +31,130 @@ def test_normalizeLocation():
assert normalizeLocation({"wght": 1001}, axes) == {"wght": 0.0}
+@pytest.mark.parametrize(
+ "axes, location, expected",
+ [
+ # lower != default != upper
+ ({"wght": (100, 400, 900)}, {"wght": 1000}, {"wght": 1.2}),
+ ({"wght": (100, 400, 900)}, {"wght": 900}, {"wght": 1.0}),
+ ({"wght": (100, 400, 900)}, {"wght": 650}, {"wght": 0.5}),
+ ({"wght": (100, 400, 900)}, {"wght": 400}, {"wght": 0.0}),
+ ({"wght": (100, 400, 900)}, {"wght": 250}, {"wght": -0.5}),
+ ({"wght": (100, 400, 900)}, {"wght": 100}, {"wght": -1.0}),
+ ({"wght": (100, 400, 900)}, {"wght": 25}, {"wght": -1.25}),
+ # lower == default != upper
+ (
+ {"wght": (400, 400, 900), "wdth": (100, 100, 150)},
+ {"wght": 1000, "wdth": 200},
+ {"wght": 1.2, "wdth": 2.0},
+ ),
+ (
+ {"wght": (400, 400, 900), "wdth": (100, 100, 150)},
+ {"wght": 25, "wdth": 25},
+ {"wght": -0.75, "wdth": -1.5},
+ ),
+ # lower != default == upper
+ (
+ {"wght": (100, 400, 400), "wdth": (50, 100, 100)},
+ {"wght": 700, "wdth": 150},
+ {"wght": 1.0, "wdth": 1.0},
+ ),
+ (
+ {"wght": (100, 400, 400), "wdth": (50, 100, 100)},
+ {"wght": -50, "wdth": 25},
+ {"wght": -1.5, "wdth": -1.5},
+ ),
+ # degenerate case with lower == default == upper, normalized location always 0
+ ({"wght": (400, 400, 400)}, {"wght": 100}, {"wght": 0.0}),
+ ({"wght": (400, 400, 400)}, {"wght": 400}, {"wght": 0.0}),
+ ({"wght": (400, 400, 400)}, {"wght": 700}, {"wght": 0.0}),
+ ],
+)
+def test_normalizeLocation_extrapolate(axes, location, expected):
+ assert normalizeLocation(location, axes, extrapolate=True) == expected
+
+
def test_supportScalar():
assert supportScalar({}, {}) == 1.0
assert supportScalar({"wght": 0.2}, {}) == 1.0
assert supportScalar({"wght": 0.2}, {"wght": (0, 2, 3)}) == 0.1
assert supportScalar({"wght": 2.5}, {"wght": (0, 2, 4)}) == 0.75
- assert supportScalar({"wght": 4}, {"wght": (0, 2, 2)}) == 0.0
- assert supportScalar({"wght": 4}, {"wght": (0, 2, 2)}, extrapolate=True) == 2.0
- assert supportScalar({"wght": 4}, {"wght": (0, 2, 3)}, extrapolate=True) == 2.0
- assert supportScalar({"wght": 2}, {"wght": (0, 0.75, 1)}, extrapolate=True) == -4.0
+ assert supportScalar({"wght": 3}, {"wght": (0, 2, 2)}) == 0.0
+ assert (
+ supportScalar(
+ {"wght": 3},
+ {"wght": (0, 2, 2)},
+ extrapolate=True,
+ axisRanges={"wght": (0, 2)},
+ )
+ == 1.5
+ )
+ assert (
+ supportScalar(
+ {"wght": -1},
+ {"wght": (0, 2, 2)},
+ extrapolate=True,
+ axisRanges={"wght": (0, 2)},
+ )
+ == -0.5
+ )
+ assert (
+ supportScalar(
+ {"wght": 3},
+ {"wght": (0, 1, 2)},
+ extrapolate=True,
+ axisRanges={"wght": (0, 2)},
+ )
+ == -1.0
+ )
+ assert (
+ supportScalar(
+ {"wght": -1},
+ {"wght": (0, 1, 2)},
+ extrapolate=True,
+ axisRanges={"wght": (0, 2)},
+ )
+ == -1.0
+ )
+ assert (
+ supportScalar(
+ {"wght": 2},
+ {"wght": (0, 0.75, 1)},
+ extrapolate=True,
+ axisRanges={"wght": (0, 1)},
+ )
+ == -4.0
+ )
+ with pytest.raises(TypeError):
+ supportScalar(
+ {"wght": 2}, {"wght": (0, 0.75, 1)}, extrapolate=True, axisRanges=None
+ )
+
+
+def test_model_extrapolate():
+ locations = [{}, {"a": 1}, {"b": 1}, {"a": 1, "b": 1}]
+ model = VariationModel(locations, extrapolate=True)
+ masterValues = [100, 200, 300, 400]
+ testLocsAndValues = [
+ ({"a": -1, "b": -1}, -200),
+ ({"a": -1, "b": 0}, 0),
+ ({"a": -1, "b": 1}, 200),
+ ({"a": -1, "b": 2}, 400),
+ ({"a": 0, "b": -1}, -100),
+ ({"a": 0, "b": 0}, 100),
+ ({"a": 0, "b": 1}, 300),
+ ({"a": 0, "b": 2}, 500),
+ ({"a": 1, "b": -1}, 0),
+ ({"a": 1, "b": 0}, 200),
+ ({"a": 1, "b": 1}, 400),
+ ({"a": 1, "b": 2}, 600),
+ ({"a": 2, "b": -1}, 100),
+ ({"a": 2, "b": 0}, 300),
+ ({"a": 2, "b": 1}, 500),
+ ({"a": 2, "b": 2}, 700),
+ ]
+ for loc, expectedValue in testLocsAndValues:
+ assert expectedValue == model.interpolateFromMasters(loc, masterValues)
@pytest.mark.parametrize(
diff --git a/Tests/varLib/mutator_test.py b/Tests/varLib/mutator_test.py
index 03ad870f..a3149c95 100644
--- a/Tests/varLib/mutator_test.py
+++ b/Tests/varLib/mutator_test.py
@@ -37,7 +37,7 @@ class MutatorTest(unittest.TestCase):
return os.path.join(path, "data", "test_results", test_file_or_folder)
@staticmethod
- def get_file_list(folder, suffix, prefix=''):
+ def get_file_list(folder, suffix, prefix=""):
all_files = os.listdir(folder)
file_list = []
for p in all_files:
@@ -48,8 +48,7 @@ class MutatorTest(unittest.TestCase):
def temp_path(self, suffix):
self.temp_dir()
self.num_tempfiles += 1
- return os.path.join(self.tempdir,
- "tmp%d%s" % (self.num_tempfiles, suffix))
+ return os.path.join(self.tempdir, "tmp%d%s" % (self.num_tempfiles, suffix))
def temp_dir(self):
if not self.tempdir:
@@ -73,110 +72,111 @@ class MutatorTest(unittest.TestCase):
expected = self.read_ttx(expected_ttx)
if actual != expected:
for line in difflib.unified_diff(
- expected, actual, fromfile=expected_ttx, tofile=path):
+ expected, actual, fromfile=expected_ttx, tofile=path
+ ):
sys.stdout.write(line)
self.fail("TTX output is different from expected")
def compile_font(self, path, suffix, temp_dir):
ttx_filename = os.path.basename(path)
- savepath = os.path.join(temp_dir, ttx_filename.replace('.ttx', suffix))
+ savepath = os.path.join(temp_dir, ttx_filename.replace(".ttx", suffix))
font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.importXML(path)
font.save(savepath, reorderTables=None)
return font, savepath
-# -----
-# Tests
-# -----
+ # -----
+ # Tests
+ # -----
def test_varlib_mutator_ttf(self):
- suffix = '.ttf'
- ds_path = self.get_test_input('Build.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ suffix = ".ttf"
+ ds_path = self.get_test_input("Build.designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily-")
for path in ttx_paths:
self.compile_font(path, suffix, self.tempdir)
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
varfont, _, _ = build(ds_path, finder)
- varfont_name = 'Mutator'
+ varfont_name = "Mutator"
varfont_path = os.path.join(self.tempdir, varfont_name + suffix)
varfont.save(varfont_path)
- args = [varfont_path, 'wght=500', 'cntr=50']
+ args = [varfont_path, "wght=500", "cntr=50"]
mutator(args)
- instfont_path = os.path.splitext(varfont_path)[0] + '-instance' + suffix
+ instfont_path = os.path.splitext(varfont_path)[0] + "-instance" + suffix
instfont = TTFont(instfont_path)
- tables = [table_tag for table_tag in instfont.keys() if table_tag != 'head']
- expected_ttx_path = self.get_test_output(varfont_name + '.ttx')
+ tables = [table_tag for table_tag in instfont.keys() if table_tag != "head"]
+ expected_ttx_path = self.get_test_output(varfont_name + ".ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
def test_varlib_mutator_getvar_ttf(self):
- suffix = '.ttf'
- ttx_dir = self.get_test_input('master_ttx_getvar_ttf')
+ suffix = ".ttf"
+ ttx_dir = self.get_test_input("master_ttx_getvar_ttf")
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'Mutator_Getvar')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "Mutator_Getvar")
for path in ttx_paths:
self.compile_font(path, suffix, self.tempdir)
- varfont_name = 'Mutator_Getvar'
+ varfont_name = "Mutator_Getvar"
varfont_path = os.path.join(self.tempdir, varfont_name + suffix)
- args = [varfont_path, 'wdth=80', 'ASCN=628']
+ args = [varfont_path, "wdth=80", "ASCN=628"]
mutator(args)
- instfont_path = os.path.splitext(varfont_path)[0] + '-instance' + suffix
+ instfont_path = os.path.splitext(varfont_path)[0] + "-instance" + suffix
instfont = TTFont(instfont_path)
- tables = [table_tag for table_tag in instfont.keys() if table_tag != 'head']
- expected_ttx_path = self.get_test_output(varfont_name + '-instance.ttx')
+ tables = [table_tag for table_tag in instfont.keys() if table_tag != "head"]
+ expected_ttx_path = self.get_test_output(varfont_name + "-instance.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
def test_varlib_mutator_iup_ttf(self):
- suffix = '.ttf'
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_varfont_ttf')
+ suffix = ".ttf"
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_varfont_ttf")
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'Mutator_IUP')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "Mutator_IUP")
for path in ttx_paths:
self.compile_font(path, suffix, self.tempdir)
- varfont_name = 'Mutator_IUP'
+ varfont_name = "Mutator_IUP"
varfont_path = os.path.join(self.tempdir, varfont_name + suffix)
-
- args = [varfont_path, 'wdth=80', 'ASCN=628']
+
+ args = [varfont_path, "wdth=80", "ASCN=628"]
mutator(args)
- instfont_path = os.path.splitext(varfont_path)[0] + '-instance' + suffix
+ instfont_path = os.path.splitext(varfont_path)[0] + "-instance" + suffix
instfont = TTFont(instfont_path)
- tables = [table_tag for table_tag in instfont.keys() if table_tag != 'head']
- expected_ttx_path = self.get_test_output(varfont_name + '-instance.ttx')
+ tables = [table_tag for table_tag in instfont.keys() if table_tag != "head"]
+ expected_ttx_path = self.get_test_output(varfont_name + "-instance.ttx")
self.expect_ttx(instfont, expected_ttx_path, tables)
def test_varlib_mutator_CFF2(self):
- suffix = '.otf'
- ttx_dir = self.get_test_input('master_ttx_varfont_otf')
+ suffix = ".otf"
+ ttx_dir = self.get_test_input("master_ttx_varfont_otf")
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestCFF2VF')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestCFF2VF")
for path in ttx_paths:
self.compile_font(path, suffix, self.tempdir)
- varfont_name = 'TestCFF2VF'
+ varfont_name = "TestCFF2VF"
varfont_path = os.path.join(self.tempdir, varfont_name + suffix)
- expected_ttx_name = 'InterpolateTestCFF2VF'
+ expected_ttx_name = "InterpolateTestCFF2VF"
tables = ["hmtx", "CFF2"]
- loc = {'wght':float(200)}
+ loc = {"wght": float(200)}
varfont = TTFont(varfont_path)
new_font = make_instance(varfont, loc)
- expected_ttx_path = self.get_test_output(expected_ttx_name + '.ttx')
+ expected_ttx_path = self.get_test_output(expected_ttx_name + ".ttx")
self.expect_ttx(new_font, expected_ttx_path, tables)
diff --git a/Tests/varLib/stat_test.py b/Tests/varLib/stat_test.py
index 6def990e..ce04423a 100644
--- a/Tests/varLib/stat_test.py
+++ b/Tests/varLib/stat_test.py
@@ -65,6 +65,18 @@ def test_getStatAxes(datadir):
"rangeMaxValue": 900.0,
"rangeMinValue": 850.0,
},
+ {
+ "flags": 2,
+ "name": {"en": "Regular"},
+ "value": 400.0,
+ "linkedValue": 700.0,
+ },
+ {
+ "flags": 0,
+ "name": {"en": "Bold"},
+ "value": 700.0,
+ "linkedValue": 400.0,
+ },
],
"name": {"en": "Wéíght", "fa-IR": "قطر"},
"ordering": 2,
@@ -120,6 +132,18 @@ def test_getStatAxes(datadir):
"rangeMaxValue": 850.0,
"rangeMinValue": 650.0,
},
+ {
+ "flags": 2,
+ "name": {"en": "Regular"},
+ "value": 400.0,
+ "linkedValue": 700.0,
+ },
+ {
+ "flags": 0,
+ "name": {"en": "Bold"},
+ "value": 700.0,
+ "linkedValue": 400.0,
+ },
],
"name": {"en": "Wéíght", "fa-IR": "قطر"},
"ordering": 2,
diff --git a/Tests/varLib/varLib_test.py b/Tests/varLib/varLib_test.py
index 29f909ae..87616ae2 100644
--- a/Tests/varLib/varLib_test.py
+++ b/Tests/varLib/varLib_test.py
@@ -1,15 +1,21 @@
+from fontTools.colorLib.builder import buildCOLR
from fontTools.ttLib import TTFont, newTable
-from fontTools.varLib import build, load_designspace
+from fontTools.ttLib.tables import otTables as ot
+from fontTools.varLib import build, build_many, load_designspace, _add_COLR
from fontTools.varLib.errors import VarLibValidationError
import fontTools.varLib.errors as varLibErrors
+from fontTools.varLib.models import VariationModel
from fontTools.varLib.mutator import instantiateVariableFont
from fontTools.varLib import main as varLib_main, load_masters
from fontTools.varLib import set_default_weight_width_slant
from fontTools.designspaceLib import (
- DesignSpaceDocumentError, DesignSpaceDocument, SourceDescriptor,
+ DesignSpaceDocumentError,
+ DesignSpaceDocument,
+ SourceDescriptor,
)
from fontTools.feaLib.builder import addOpenTypeFeaturesFromString
import difflib
+from copy import deepcopy
from io import BytesIO
import os
import shutil
@@ -62,7 +68,7 @@ class BuildTest(unittest.TestCase):
return os.path.join(path, "data", "test_results", test_file_or_folder)
@staticmethod
- def get_file_list(folder, suffix, prefix=''):
+ def get_file_list(folder, suffix, prefix=""):
all_files = os.listdir(folder)
file_list = []
for p in all_files:
@@ -73,8 +79,7 @@ class BuildTest(unittest.TestCase):
def temp_path(self, suffix):
self.temp_dir()
self.num_tempfiles += 1
- return os.path.join(self.tempdir,
- "tmp%d%s" % (self.num_tempfiles, suffix))
+ return os.path.join(self.tempdir, "tmp%d%s" % (self.num_tempfiles, suffix))
def temp_dir(self):
if not self.tempdir:
@@ -98,7 +103,8 @@ class BuildTest(unittest.TestCase):
expected = self.read_ttx(expected_ttx)
if actual != expected:
for line in difflib.unified_diff(
- expected, actual, fromfile=expected_ttx, tofile=path):
+ expected, actual, fromfile=expected_ttx, tofile=path
+ ):
sys.stdout.write(line)
self.fail("TTX output is different from expected")
@@ -110,28 +116,34 @@ class BuildTest(unittest.TestCase):
def compile_font(self, path, suffix, temp_dir):
ttx_filename = os.path.basename(path)
- savepath = os.path.join(temp_dir, ttx_filename.replace('.ttx', suffix))
+ savepath = os.path.join(temp_dir, ttx_filename.replace(".ttx", suffix))
font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.importXML(path)
font.save(savepath, reorderTables=None)
return font, savepath
- def _run_varlib_build_test(self, designspace_name, font_name, tables,
- expected_ttx_name, save_before_dump=False,
- post_process_master=None):
- suffix = '.ttf'
- ds_path = self.get_test_input(designspace_name + '.designspace')
- ufo_dir = self.get_test_input('master_ufo')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ def _run_varlib_build_test(
+ self,
+ designspace_name,
+ font_name,
+ tables,
+ expected_ttx_name,
+ save_before_dump=False,
+ post_process_master=None,
+ ):
+ suffix = ".ttf"
+ ds_path = self.get_test_input(designspace_name + ".designspace")
+ ufo_dir = self.get_test_input("master_ufo")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
self.temp_dir()
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', font_name + '-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", font_name + "-")
for path in ttx_paths:
font, savepath = self.compile_font(path, suffix, self.tempdir)
if post_process_master is not None:
post_process_master(font, savepath)
- finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
+ finder = lambda s: s.replace(ufo_dir, self.tempdir).replace(".ufo", suffix)
varfont, model, _ = build(ds_path, finder)
if save_before_dump:
@@ -140,25 +152,26 @@ class BuildTest(unittest.TestCase):
# dumps we need to save to a temporary stream, and realod the font
varfont = reload_font(varfont)
- expected_ttx_path = self.get_test_output(expected_ttx_name + '.ttx')
+ expected_ttx_path = self.get_test_output(expected_ttx_name + ".ttx")
self.expect_ttx(varfont, expected_ttx_path, tables)
self.check_ttx_dump(varfont, expected_ttx_path, tables, suffix)
-# -----
-# Tests
-# -----
+
+ # -----
+ # Tests
+ # -----
def test_varlib_build_ttf(self):
"""Designspace file contains <axes> element."""
self._run_varlib_build_test(
- designspace_name='Build',
- font_name='TestFamily',
- tables=['GDEF', 'HVAR', 'MVAR', 'fvar', 'gvar'],
- expected_ttx_name='Build'
+ designspace_name="Build",
+ font_name="TestFamily",
+ tables=["GDEF", "HVAR", "MVAR", "fvar", "gvar"],
+ expected_ttx_name="Build",
)
def test_varlib_build_no_axes_ttf(self):
"""Designspace file does not contain an <axes> element."""
- ds_path = self.get_test_input('InterpolateLayout3.designspace')
+ ds_path = self.get_test_input("InterpolateLayout3.designspace")
with self.assertRaisesRegex(DesignSpaceDocumentError, "No axes defined"):
build(ds_path)
@@ -166,12 +179,12 @@ class BuildTest(unittest.TestCase):
"""Designspace file contains a 'weight' axis with <map> elements
modifying the normalization mapping. An 'avar' table is generated.
"""
- test_name = 'BuildAvarSingleAxis'
+ test_name = "BuildAvarSingleAxis"
self._run_varlib_build_test(
designspace_name=test_name,
- font_name='TestFamily3',
- tables=['avar'],
- expected_ttx_name=test_name
+ font_name="TestFamily3",
+ tables=["avar"],
+ expected_ttx_name=test_name,
)
def test_varlib_avar_with_identity_maps(self):
@@ -186,12 +199,12 @@ class BuildTest(unittest.TestCase):
https://github.com/googlei18n/fontmake/issues/295
https://github.com/fonttools/fonttools/issues/1011
"""
- test_name = 'BuildAvarIdentityMaps'
+ test_name = "BuildAvarIdentityMaps"
self._run_varlib_build_test(
designspace_name=test_name,
- font_name='TestFamily3',
- tables=['avar'],
- expected_ttx_name=test_name
+ font_name="TestFamily3",
+ tables=["avar"],
+ expected_ttx_name=test_name,
)
def test_varlib_avar_empty_axis(self):
@@ -206,12 +219,25 @@ class BuildTest(unittest.TestCase):
https://github.com/googlei18n/fontmake/issues/295
https://github.com/fonttools/fonttools/issues/1011
"""
- test_name = 'BuildAvarEmptyAxis'
+ test_name = "BuildAvarEmptyAxis"
self._run_varlib_build_test(
designspace_name=test_name,
- font_name='TestFamily3',
- tables=['avar'],
- expected_ttx_name=test_name
+ font_name="TestFamily3",
+ tables=["avar"],
+ expected_ttx_name=test_name,
+ )
+
+ def test_varlib_avar2(self):
+ """Designspace file contains a 'weight' axis with <map> elements
+ modifying the normalization mapping as well as <mappings> element
+ modifying it post-normalization. An 'avar' table is generated.
+ """
+ test_name = "BuildAvar2"
+ self._run_varlib_build_test(
+ designspace_name=test_name,
+ font_name="TestFamily3",
+ tables=["avar"],
+ expected_ttx_name=test_name,
)
def test_varlib_build_feature_variations(self):
@@ -274,6 +300,7 @@ class BuildTest(unittest.TestCase):
The multiple languages are done to verify whether multiple existing
'rclt' features are updated correctly.
"""
+
def add_rclt(font, savepath):
features = """
languagesystem DFLT dflt;
@@ -294,6 +321,7 @@ class BuildTest(unittest.TestCase):
"""
addOpenTypeFeaturesFromString(font, features)
font.save(savepath)
+
self._run_varlib_build_test(
designspace_name="FeatureVars",
font_name="TestFamily",
@@ -310,22 +338,22 @@ class BuildTest(unittest.TestCase):
https://github.com/fonttools/fonttools/issues/1381
"""
- test_name = 'BuildGvarCompositeExplicitDelta'
+ test_name = "BuildGvarCompositeExplicitDelta"
self._run_varlib_build_test(
designspace_name=test_name,
- font_name='TestFamily4',
- tables=['gvar'],
- expected_ttx_name=test_name
+ font_name="TestFamily4",
+ tables=["gvar"],
+ expected_ttx_name=test_name,
)
def test_varlib_nonmarking_CFF2(self):
self.temp_dir()
- ds_path = self.get_test_input('TestNonMarkingCFF2.designspace', copy=True)
+ ds_path = self.get_test_input("TestNonMarkingCFF2.designspace", copy=True)
ttx_dir = self.get_test_input("master_non_marking_cff2")
expected_ttx_path = self.get_test_output("TestNonMarkingCFF2.ttx")
- for path in self.get_file_list(ttx_dir, '.ttx', 'TestNonMarkingCFF2_'):
+ for path in self.get_file_list(ttx_dir, ".ttx", "TestNonMarkingCFF2_"):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
@@ -344,11 +372,11 @@ class BuildTest(unittest.TestCase):
def test_varlib_build_CFF2(self):
self.temp_dir()
- ds_path = self.get_test_input('TestCFF2.designspace', copy=True)
+ ds_path = self.get_test_input("TestCFF2.designspace", copy=True)
ttx_dir = self.get_test_input("master_cff2")
expected_ttx_path = self.get_test_output("BuildTestCFF2.ttx")
- for path in self.get_file_list(ttx_dir, '.ttx', 'TestCFF2_'):
+ for path in self.get_file_list(ttx_dir, ".ttx", "TestCFF2_"):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
@@ -367,11 +395,11 @@ class BuildTest(unittest.TestCase):
def test_varlib_build_CFF2_from_CFF2(self):
self.temp_dir()
- ds_path = self.get_test_input('TestCFF2Input.designspace', copy=True)
+ ds_path = self.get_test_input("TestCFF2Input.designspace", copy=True)
ttx_dir = self.get_test_input("master_cff2_input")
expected_ttx_path = self.get_test_output("BuildTestCFF2.ttx")
- for path in self.get_file_list(ttx_dir, '.ttx', 'TestCFF2_'):
+ for path in self.get_file_list(ttx_dir, ".ttx", "TestCFF2_"):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
@@ -390,11 +418,11 @@ class BuildTest(unittest.TestCase):
def test_varlib_build_sparse_CFF2(self):
self.temp_dir()
- ds_path = self.get_test_input('TestSparseCFF2VF.designspace', copy=True)
+ ds_path = self.get_test_input("TestSparseCFF2VF.designspace", copy=True)
ttx_dir = self.get_test_input("master_sparse_cff2")
expected_ttx_path = self.get_test_output("TestSparseCFF2VF.ttx")
- for path in self.get_file_list(ttx_dir, '.ttx', 'MasterSet_Kanji-'):
+ for path in self.get_file_list(ttx_dir, ".ttx", "MasterSet_Kanji-"):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
@@ -413,11 +441,11 @@ class BuildTest(unittest.TestCase):
def test_varlib_build_vpal(self):
self.temp_dir()
- ds_path = self.get_test_input('test_vpal.designspace', copy=True)
+ ds_path = self.get_test_input("test_vpal.designspace", copy=True)
ttx_dir = self.get_test_input("master_vpal_test")
expected_ttx_path = self.get_test_output("test_vpal.ttx")
- for path in self.get_file_list(ttx_dir, '.ttx', 'master_vpal_test_'):
+ for path in self.get_file_list(ttx_dir, ".ttx", "master_vpal_test_"):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
@@ -434,20 +462,19 @@ class BuildTest(unittest.TestCase):
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_main_ttf(self):
- """Mostly for testing varLib.main()
- """
- suffix = '.ttf'
- ds_path = self.get_test_input('Build.designspace')
- ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
+ """Mostly for testing varLib.main()"""
+ suffix = ".ttf"
+ ds_path = self.get_test_input("Build.designspace")
+ ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
self.temp_dir()
- ttf_dir = os.path.join(self.tempdir, 'master_ttf_interpolatable')
+ ttf_dir = os.path.join(self.tempdir, "master_ttf_interpolatable")
os.makedirs(ttf_dir)
- ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily-')
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily-")
for path in ttx_paths:
self.compile_font(path, suffix, ttf_dir)
- ds_copy = os.path.join(self.tempdir, 'BuildMain.designspace')
+ ds_copy = os.path.join(self.tempdir, "BuildMain.designspace")
shutil.copy2(ds_path, ds_copy)
# by default, varLib.main finds master TTFs inside a
@@ -459,7 +486,7 @@ class BuildTest(unittest.TestCase):
finally:
os.chdir(cwd)
- varfont_path = os.path.splitext(ds_copy)[0] + '-VF' + suffix
+ varfont_path = os.path.splitext(ds_copy)[0] + "-VF" + suffix
self.assertTrue(os.path.exists(varfont_path))
# try again passing an explicit --master-finder
@@ -475,17 +502,110 @@ class BuildTest(unittest.TestCase):
self.assertTrue(os.path.exists(varfont_path))
varfont = TTFont(varfont_path)
- tables = [table_tag for table_tag in varfont.keys() if table_tag != 'head']
- expected_ttx_path = self.get_test_output('BuildMain.ttx')
+ tables = [table_tag for table_tag in varfont.keys() if table_tag != "head"]
+ expected_ttx_path = self.get_test_output("BuildMain.ttx")
+ self.expect_ttx(varfont, expected_ttx_path, tables)
+
+ def test_varLib_main_output_dir(self):
+ self.temp_dir()
+ outdir = os.path.join(self.tempdir, "output_dir_test")
+ self.assertFalse(os.path.exists(outdir))
+
+ ds_path = os.path.join(self.tempdir, "BuildMain.designspace")
+ shutil.copy2(self.get_test_input("Build.designspace"), ds_path)
+
+ shutil.copytree(
+ self.get_test_input("master_ttx_interpolatable_ttf"),
+ os.path.join(outdir, "master_ttx"),
+ )
+
+ finder = "%s/output_dir_test/master_ttx/{stem}.ttx" % self.tempdir
+
+ varLib_main([ds_path, "--output-dir", outdir, "--master-finder", finder])
+
+ self.assertTrue(os.path.isdir(outdir))
+ self.assertTrue(os.path.exists(os.path.join(outdir, "BuildMain-VF.ttf")))
+
+ def test_varLib_main_filter_variable_fonts(self):
+ self.temp_dir()
+ outdir = os.path.join(self.tempdir, "filter_variable_fonts_test")
+ self.assertFalse(os.path.exists(outdir))
+
+ ds_path = os.path.join(self.tempdir, "BuildMain.designspace")
+ shutil.copy2(self.get_test_input("Build.designspace"), ds_path)
+
+ shutil.copytree(
+ self.get_test_input("master_ttx_interpolatable_ttf"),
+ os.path.join(outdir, "master_ttx"),
+ )
+
+ finder = "%s/filter_variable_fonts_test/master_ttx/{stem}.ttx" % self.tempdir
+
+ cmd = [ds_path, "--output-dir", outdir, "--master-finder", finder]
+
+ with pytest.raises(SystemExit):
+ varLib_main(cmd + ["--variable-fonts", "FooBar"]) # no font matches
+
+ varLib_main(cmd + ["--variable-fonts", "Build.*"]) # this does match
+
+ self.assertTrue(os.path.isdir(outdir))
+ self.assertTrue(os.path.exists(os.path.join(outdir, "BuildMain-VF.ttf")))
+
+ def test_varLib_main_drop_implied_oncurves(self):
+ self.temp_dir()
+ outdir = os.path.join(self.tempdir, "drop_implied_oncurves_test")
+ self.assertFalse(os.path.exists(outdir))
+
+ ttf_dir = os.path.join(outdir, "master_ttf_interpolatable")
+ os.makedirs(ttf_dir)
+ ttx_dir = self.get_test_input("master_ttx_drop_oncurves")
+ ttx_paths = self.get_file_list(ttx_dir, ".ttx", "TestFamily-")
+ for path in ttx_paths:
+ self.compile_font(path, ".ttf", ttf_dir)
+
+ ds_copy = os.path.join(outdir, "DropOnCurves.designspace")
+ ds_path = self.get_test_input("DropOnCurves.designspace")
+ shutil.copy2(ds_path, ds_copy)
+
+ finder = "%s/master_ttf_interpolatable/{stem}.ttf" % outdir
+ varLib_main([ds_copy, "--master-finder", finder, "--drop-implied-oncurves"])
+
+ vf_path = os.path.join(outdir, "DropOnCurves-VF.ttf")
+ varfont = TTFont(vf_path)
+ tables = [table_tag for table_tag in varfont.keys() if table_tag != "head"]
+ expected_ttx_path = self.get_test_output("DropOnCurves.ttx")
self.expect_ttx(varfont, expected_ttx_path, tables)
+ def test_varLib_build_many_no_overwrite_STAT(self):
+ # Ensure that varLib.build_many doesn't overwrite a pre-existing STAT table,
+ # e.g. one built by feaLib from features.fea; the VF simply should inherit the
+ # STAT from the base master: https://github.com/googlefonts/fontmake/issues/985
+ base_master = TTFont()
+ base_master.importXML(
+ self.get_test_input("master_no_overwrite_stat/Test-CondensedThin.ttx")
+ )
+ assert "STAT" in base_master
+
+ vf = next(
+ iter(
+ build_many(
+ DesignSpaceDocument.fromfile(
+ self.get_test_input("TestNoOverwriteSTAT.designspace")
+ )
+ ).values()
+ )
+ )
+ assert "STAT" in vf
+
+ assert vf["STAT"].table == base_master["STAT"].table
+
def test_varlib_build_from_ds_object_in_memory_ttfonts(self):
ds_path = self.get_test_input("Build.designspace")
ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
expected_ttx_path = self.get_test_output("BuildMain.ttx")
self.temp_dir()
- for path in self.get_file_list(ttx_dir, '.ttx', 'TestFamily-'):
+ for path in self.get_file_list(ttx_dir, ".ttx", "TestFamily-"):
self.compile_font(path, ".ttf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
@@ -510,7 +630,7 @@ class BuildTest(unittest.TestCase):
ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
expected_ttx_path = self.get_test_output("BuildMain.ttx")
- for path in self.get_file_list(ttx_dir, '.ttx', 'TestFamily-'):
+ for path in self.get_file_list(ttx_dir, ".ttx", "TestFamily-"):
self.compile_font(path, ".ttf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
@@ -656,12 +776,12 @@ class BuildTest(unittest.TestCase):
def test_varlib_build_VVAR_CFF2(self):
self.temp_dir()
- ds_path = self.get_test_input('TestVVAR.designspace', copy=True)
+ ds_path = self.get_test_input("TestVVAR.designspace", copy=True)
ttx_dir = self.get_test_input("master_vvar_cff2")
- expected_ttx_name = 'TestVVAR'
- suffix = '.otf'
+ expected_ttx_name = "TestVVAR"
+ suffix = ".otf"
- for path in self.get_file_list(ttx_dir, '.ttx', 'TestVVAR'):
+ for path in self.get_file_list(ttx_dir, ".ttx", "TestVVAR"):
font, savepath = self.compile_font(path, suffix, self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
@@ -674,7 +794,7 @@ class BuildTest(unittest.TestCase):
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
- expected_ttx_path = self.get_test_output(expected_ttx_name + '.ttx')
+ expected_ttx_path = self.get_test_output(expected_ttx_name + ".ttx")
tables = ["VVAR"]
self.expect_ttx(varfont, expected_ttx_path, tables)
self.check_ttx_dump(varfont, expected_ttx_path, tables, suffix)
@@ -682,12 +802,12 @@ class BuildTest(unittest.TestCase):
def test_varlib_build_BASE(self):
self.temp_dir()
- ds_path = self.get_test_input('TestBASE.designspace', copy=True)
+ ds_path = self.get_test_input("TestBASE.designspace", copy=True)
ttx_dir = self.get_test_input("master_base_test")
- expected_ttx_name = 'TestBASE'
- suffix = '.otf'
+ expected_ttx_name = "TestBASE"
+ suffix = ".otf"
- for path in self.get_file_list(ttx_dir, '.ttx', 'TestBASE'):
+ for path in self.get_file_list(ttx_dir, ".ttx", "TestBASE"):
font, savepath = self.compile_font(path, suffix, self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
@@ -700,17 +820,17 @@ class BuildTest(unittest.TestCase):
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
- expected_ttx_path = self.get_test_output(expected_ttx_name + '.ttx')
+ expected_ttx_path = self.get_test_output(expected_ttx_name + ".ttx")
tables = ["BASE"]
self.expect_ttx(varfont, expected_ttx_path, tables)
self.check_ttx_dump(varfont, expected_ttx_path, tables, suffix)
def test_varlib_build_single_master(self):
self._run_varlib_build_test(
- designspace_name='SingleMaster',
- font_name='TestFamily',
- tables=['GDEF', 'HVAR', 'MVAR', 'STAT', 'fvar', 'cvar', 'gvar', 'name'],
- expected_ttx_name='SingleMaster',
+ designspace_name="SingleMaster",
+ font_name="TestFamily",
+ tables=["GDEF", "HVAR", "MVAR", "STAT", "fvar", "cvar", "gvar", "name"],
+ expected_ttx_name="SingleMaster",
save_before_dump=True,
)
@@ -718,7 +838,7 @@ class BuildTest(unittest.TestCase):
"""Test the correct merging of class-based pair kerning.
Problem description at https://github.com/fonttools/fonttools/pull/1638.
- Test font and Designspace generated by
+ Test font and Designspace generated by
https://gist.github.com/madig/183d0440c9f7d05f04bd1280b9664bd1.
"""
ds_path = self.get_test_input("KerningMerging.designspace")
@@ -756,7 +876,7 @@ class BuildTest(unittest.TestCase):
assert getattr(class2_zero.Value1, "XAdvDevice", None) is None
# Assert the variable font's kerning table (without deltas) is equal to the
- # default font's kerning table. The bug fixed in
+ # default font's kerning table. The bug fixed in
# https://github.com/fonttools/fonttools/pull/1638 caused rogue kerning
# values to be written to the variable font.
assert _extract_flat_kerning(varfont, class_kerning_table) == {
@@ -816,7 +936,7 @@ class BuildTest(unittest.TestCase):
def test_varlib_build_incompatible_features(self):
with pytest.raises(
varLibErrors.ShouldBeConstant,
- match = """
+ match="""
Couldn't merge the fonts, because some values were different, but should have
been the same. This happened while performing the following operation:
@@ -828,8 +948,8 @@ Expected to see .FeatureCount==2, instead saw 1
Incompatible features between masters.
Expected: kern, mark.
Got: kern.
-"""):
-
+""",
+ ):
self._run_varlib_build_test(
designspace_name="IncompatibleFeatures",
font_name="IncompatibleFeatures",
@@ -840,8 +960,7 @@ Got: kern.
def test_varlib_build_incompatible_lookup_types(self):
with pytest.raises(
- varLibErrors.MismatchedTypes,
- match = r"'MarkBasePos', instead saw 'PairPos'"
+ varLibErrors.MismatchedTypes, match=r"'MarkBasePos', instead saw 'PairPos'"
):
self._run_varlib_build_test(
designspace_name="IncompatibleLookupTypes",
@@ -854,14 +973,14 @@ Got: kern.
def test_varlib_build_incompatible_arrays(self):
with pytest.raises(
varLibErrors.ShouldBeConstant,
- match = """
+ match="""
Couldn't merge the fonts, because some values were different, but should have
been the same. This happened while performing the following operation:
GPOS.table.ScriptList.ScriptCount
The problem is likely to be in Simple Two Axis Bold:
-Expected to see .ScriptCount==1, instead saw 0"""
+Expected to see .ScriptCount==1, instead saw 0""",
):
self._run_varlib_build_test(
designspace_name="IncompatibleArrays",
@@ -873,13 +992,24 @@ Expected to see .ScriptCount==1, instead saw 0"""
def test_varlib_build_variable_colr(self):
self._run_varlib_build_test(
- designspace_name='TestVariableCOLR',
- font_name='TestVariableCOLR',
+ designspace_name="TestVariableCOLR",
+ font_name="TestVariableCOLR",
tables=["GlyphOrder", "fvar", "glyf", "COLR", "CPAL"],
- expected_ttx_name='TestVariableCOLR-VF',
+ expected_ttx_name="TestVariableCOLR-VF",
save_before_dump=True,
)
+ def test_varlib_build_variable_cff2_with_empty_sparse_glyph(self):
+ # https://github.com/fonttools/fonttools/issues/3233
+ self._run_varlib_build_test(
+ designspace_name="SparseCFF2",
+ font_name="SparseCFF2",
+ tables=["GlyphOrder", "CFF2", "fvar", "hmtx", "HVAR"],
+ expected_ttx_name="SparseCFF2-VF",
+ save_before_dump=True,
+ )
+
+
def test_load_masters_layerName_without_required_font():
ds = DesignSpaceDocument()
s = SourceDescriptor()
@@ -985,5 +1115,38 @@ class SetDefaultWeightWidthSlantTest(object):
assert ttFont["post"].italicAngle == -12.0
+def test_variable_COLR_without_VarIndexMap():
+ # test we don't add a no-op VarIndexMap to variable COLR when not needed
+ # https://github.com/fonttools/fonttools/issues/2800
+
+ font1 = TTFont()
+ font1.setGlyphOrder([".notdef", "A"])
+ font1["COLR"] = buildCOLR({"A": (ot.PaintFormat.PaintSolid, 0, 1.0)})
+ # font2 == font1 except for PaintSolid.Alpha
+ font2 = deepcopy(font1)
+ font2["COLR"].table.BaseGlyphList.BaseGlyphPaintRecord[0].Paint.Alpha = 0.0
+ master_fonts = [font1, font2]
+
+ varfont = deepcopy(font1)
+ axis_order = ["XXXX"]
+ model = VariationModel([{}, {"XXXX": 1.0}], axis_order)
+
+ _add_COLR(varfont, model, master_fonts, axis_order)
+
+ colr = varfont["COLR"].table
+
+ assert len(colr.BaseGlyphList.BaseGlyphPaintRecord) == 1
+ baserec = colr.BaseGlyphList.BaseGlyphPaintRecord[0]
+ assert baserec.Paint.Format == ot.PaintFormat.PaintVarSolid
+ assert baserec.Paint.VarIndexBase == 0
+
+ assert colr.VarStore is not None
+ assert len(colr.VarStore.VarData) == 1
+ assert len(colr.VarStore.VarData[0].Item) == 1
+ assert colr.VarStore.VarData[0].Item[0] == [-16384]
+
+ assert colr.VarIndexMap is None
+
+
if __name__ == "__main__":
sys.exit(unittest.main())
diff --git a/Tests/varLib/varStore_test.py b/Tests/varLib/varStore_test.py
index cad8ac73..7eb9d740 100644
--- a/Tests/varLib/varStore_test.py
+++ b/Tests/varLib/varStore_test.py
@@ -1,4 +1,7 @@
import pytest
+from io import StringIO
+from fontTools.misc.xmlWriter import XMLWriter
+from fontTools.misc.roundTools import noRound
from fontTools.varLib.models import VariationModel
from fontTools.varLib.varStore import OnlineVarStoreBuilder, VarStoreInstancer
from fontTools.ttLib import TTFont, newTable
@@ -13,7 +16,7 @@ from fontTools.ttLib.tables.otTables import VarStore
(
[{}, {"a": 1}],
[
- [10, 10], # Test NO_VARIATION_INDEX
+ [10, 10], # Test NO_VARIATION_INDEX
[100, 2000],
[100, 22000],
],
@@ -80,3 +83,204 @@ def buildAxis(axisTag):
axis = Axis()
axis.axisTag = axisTag
return axis
+
+
+@pytest.mark.parametrize(
+ "numRegions, varData, expectedNumVarData, expectedBytes",
+ [
+ (
+ 5,
+ [
+ [10, 10, 0, 0, 20],
+ {3: 300},
+ ],
+ 1,
+ 126,
+ ),
+ (
+ 5,
+ [
+ [10, 10, 0, 0, 20],
+ [10, 11, 0, 0, 20],
+ [10, 12, 0, 0, 20],
+ [10, 13, 0, 0, 20],
+ {3: 300},
+ ],
+ 1,
+ 175,
+ ),
+ (
+ 5,
+ [
+ [10, 11, 0, 0, 20],
+ [10, 300, 0, 0, 20],
+ [10, 301, 0, 0, 20],
+ [10, 302, 0, 0, 20],
+ [10, 303, 0, 0, 20],
+ [10, 304, 0, 0, 20],
+ ],
+ 1,
+ 180,
+ ),
+ (
+ 5,
+ [
+ [0, 11, 12, 0, 20],
+ [0, 13, 12, 0, 20],
+ [0, 14, 12, 0, 20],
+ [0, 15, 12, 0, 20],
+ [0, 16, 12, 0, 20],
+ [10, 300, 0, 0, 20],
+ [10, 301, 0, 0, 20],
+ [10, 302, 0, 0, 20],
+ [10, 303, 0, 0, 20],
+ [10, 304, 0, 0, 20],
+ ],
+ 1,
+ 200,
+ ),
+ (
+ 5,
+ [
+ [0, 11, 12, 0, 20],
+ [0, 13, 12, 0, 20],
+ [0, 14, 12, 0, 20],
+ [0, 15, 12, 0, 20],
+ [0, 16, 12, 0, 20],
+ [0, 17, 12, 0, 20],
+ [0, 18, 12, 0, 20],
+ [0, 19, 12, 0, 20],
+ [0, 20, 12, 0, 20],
+ [10, 300, 0, 0, 20],
+ [10, 301, 0, 0, 20],
+ [10, 302, 0, 0, 20],
+ [10, 303, 0, 0, 20],
+ [10, 304, 0, 0, 20],
+ ],
+ 2,
+ 218,
+ ),
+ (
+ 3,
+ [
+ [10, 10, 10],
+ ],
+ 0,
+ 12,
+ ),
+ ],
+)
+def test_optimize(numRegions, varData, expectedNumVarData, expectedBytes):
+ locations = [{i: i / 16384.0} for i in range(numRegions)]
+ axisTags = sorted({k for loc in locations for k in loc})
+
+ model = VariationModel(locations)
+ builder = OnlineVarStoreBuilder(axisTags)
+ builder.setModel(model)
+
+ for data in varData:
+ if type(data) is dict:
+ newData = [0] * numRegions
+ for k, v in data.items():
+ newData[k] = v
+ data = newData
+
+ builder.storeMasters(data)
+
+ varStore = builder.finish()
+ varStore.optimize()
+
+ dummyFont = TTFont()
+
+ writer = XMLWriter(StringIO())
+ varStore.toXML(writer, dummyFont)
+ xml = writer.file.getvalue()
+
+ assert len(varStore.VarData) == expectedNumVarData, xml
+
+ writer = OTTableWriter()
+ varStore.compile(writer, dummyFont)
+ data = writer.getAllData()
+
+ assert len(data) == expectedBytes, xml
+
+
+@pytest.mark.parametrize(
+ "quantization, expectedBytes",
+ [
+ (1, 200),
+ (2, 180),
+ (3, 170),
+ (4, 175),
+ (8, 170),
+ (32, 92),
+ (64, 56),
+ ],
+)
+def test_quantize(quantization, expectedBytes):
+ varData = [
+ [0, 11, 12, 0, 20],
+ [0, 13, 12, 0, 20],
+ [0, 14, 12, 0, 20],
+ [0, 15, 12, 0, 20],
+ [0, 16, 12, 0, 20],
+ [10, 300, 0, 0, 20],
+ [10, 301, 0, 0, 20],
+ [10, 302, 0, 0, 20],
+ [10, 303, 0, 0, 20],
+ [10, 304, 0, 0, 20],
+ ]
+
+ numRegions = 5
+ locations = [{i: i / 16384.0} for i in range(numRegions)]
+ axisTags = sorted({k for loc in locations for k in loc})
+
+ model = VariationModel(locations)
+
+ builder = OnlineVarStoreBuilder(axisTags)
+ builder.setModel(model)
+
+ for data in varData:
+ builder.storeMasters(data)
+
+ varStore = builder.finish()
+ varStore.optimize(quantization=quantization)
+
+ dummyFont = TTFont()
+
+ writer = XMLWriter(StringIO())
+ varStore.toXML(writer, dummyFont)
+ xml = writer.file.getvalue()
+
+ writer = OTTableWriter()
+ varStore.compile(writer, dummyFont)
+ data = writer.getAllData()
+
+ assert len(data) == expectedBytes, xml
+
+
+def test_optimize_overflow():
+ numRegions = 1
+ locations = [{"wght": 0}, {"wght": 0.5}]
+ axisTags = ["wght"]
+
+ model = VariationModel(locations)
+ builder = OnlineVarStoreBuilder(axisTags)
+ builder.setModel(model)
+
+ for data in range(0, 0xFFFF * 2):
+ data = [0, data]
+ builder.storeMasters(data, round=noRound)
+
+ varStore = builder.finish()
+ varStore.optimize()
+
+ for s in varStore.VarData:
+ print(len(s.Item))
+
+ # 5 data-sets:
+ # - 0..127: 1-byte dataset
+ # - 128..32767: 2-byte dataset
+ # - 32768..32768+65535-1: 4-byte dataset
+ # - 32768+65535..65535+65535-1: 4-byte dataset
+ assert len(varStore.VarData) == 4
diff --git a/Tests/voltLib/data/Empty.ttf b/Tests/voltLib/data/Empty.ttf
new file mode 100644
index 00000000..4eb6d88a
--- /dev/null
+++ b/Tests/voltLib/data/Empty.ttf
Binary files differ
diff --git a/Tests/voltLib/data/NamdhinggoSIL1006.fea b/Tests/voltLib/data/NamdhinggoSIL1006.fea
new file mode 100644
index 00000000..aa8ab1a5
--- /dev/null
+++ b/Tests/voltLib/data/NamdhinggoSIL1006.fea
@@ -0,0 +1,506 @@
+# Glyph classes
+@Cons = [uni1901 uni1902 uni1903 uni1904 uni1905 uni1906 uni1907 uni1908 uni1909 uni190A uni190B uni190C uni190D uni190E uni190F uni1910 uni1911 uni1912 uni1913 uni1914 uni1915 uni1916 uni1917 uni1918 uni1919 uni191A uni191B uni191C uni1940];
+@ConsRaU = [uni1901192A1922 uni1902192A1922 uni1903192A1922 uni1904192A1922 uni1905192A1922 uni1906192A1922 uni1907192A1922 uni1908192A1922 uni1909192A1922 uni190A192A1922 uni190B192A1922 uni190C192A1922 uni190D192A1922 uni190192AE1922 uni190F192A1922 uni1910192A1922 uni1911192A1922 uni1912192A1922 uni1913192A1922 uni1914192A1922 uni1915192A1922 uni1916192A1922 uni1917192A1922 uni1918192A1922 uni1919192A1922 uni1919192A1922 uni191A192A1922 uni191B192A1922 uni191C192A1922 uni1940192A1922];
+@ConsU = [uni19011922 uni19021922 uni19031922 uni19041922 uni19051922 uni19061922 uni19071922 uni19081922 uni19091922 uni190A1922 uni190B1922 uni190C1922 uni190D1922 uni190E1922 uni190F1922 uni19101922 uni19111922 uni19121922 uni19131922 uni19141922 uni19151922 uni19161922 uni19171922 uni19181922 uni19191922 uni191A1922 uni191B1922 uni191C1922 uni19401922];
+@Ikar = [uni1921 uni1921193A];
+@Vowels = [uni1920 uni1927 uni1928];
+@YaWa = [uni1929 uni192B];
+@AllCons = [@Cons @ConsU @ConsRaU];
+@VowelsKem = [@Vowels uni193A];
+
+# Mark classes
+markClass uni1920 <anchor -500 1050> @Aabove;
+markClass uni1922 <anchor -150 -15> @U;
+markClass uni1927 <anchor -300 1050> @eo;
+markClass uni1928 <anchor -190 1050> @eo;
+markClass uni193A <anchor -260 1250> @K;
+markClass uni193A <anchor -260 1250> @VK;
+
+# Lookups
+lookup EEAIDecomp {
+ sub uni1925 by uni1920 uni1923;
+ sub uni1926 by uni1920 uni1924;
+} EEAIDecomp;
+
+lookup OoAuKComp {
+ sub uni1923 uni193A by uni1923193A;
+ sub uni1924 uni193A by uni1924193A;
+} OoAuKComp;
+
+lookup OoAuKDecomp {
+ # The OoAuDecomp substitution rule replaces the OO and AU vowels with their visually constitutent components A plus EE or AI respectively. This is so that the 'A' portion can be positioned independently over the consonant when a Glide occurs between the consonant and the vowel.
+ sub uni1923193A by uni193A uni1923;
+ sub uni1924193A by uni193A uni1924;
+} OoAuKDecomp;
+
+lookup GlideVowelComp {
+ sub uni1929 uni1920 uni193A by uni19291920193A;
+ sub uni1929 uni1922 uni193A by uni19291922193A;
+ sub uni1929 uni1927 uni193A by uni19291927193A;
+ sub uni1929 uni1928 uni193A by uni19291928193A;
+ sub uni1929 uni193A by uni1929193A;
+ sub uni1929 uni1920 by uni19291920;
+ sub uni1929 uni1922 by uni19291922;
+ sub uni1929 uni1927 by uni19291927;
+ sub uni1929 uni1928 by uni19291928;
+ sub uni192B uni1920 uni193A by uni192B1920193A;
+ sub uni192B uni1922 uni193A by uni192B1922193A;
+ sub uni192B uni1927 uni193A by uni192B1927193A;
+ sub uni192B uni1928 uni193A by uni192B1928193A;
+ sub uni192B uni193A by uni192B193A;
+ sub uni192B uni1920 by uni192B1920;
+ sub uni192B uni1922 by uni192B1922;
+ sub uni192B uni1927 by uni192B1927;
+ sub uni192B uni1928 by uni192B1928;
+} GlideVowelComp;
+
+lookup GlideVowelDecomp {
+ sub uni19291920193A by uni1920 uni193A uni1929;
+ sub uni19291922193A by uni1922 uni193A uni1929;
+ sub uni19291927193A by uni1927 uni193A uni1929;
+ sub uni19291928193A by uni1928 uni193A uni1929;
+ sub uni1929193A by uni193A uni1929;
+ sub uni19291920 by uni1920 uni1929;
+ sub uni19291922 by uni1922 uni1929;
+ sub uni19291927 by uni1927 uni1929;
+ sub uni19291928 by uni1928 uni1929;
+ sub uni192B1920193A by uni1920 uni193A uni192B;
+ sub uni192B1922193A by uni1922 uni193A uni192B;
+ sub uni192B1927193A by uni1927 uni193A uni192B;
+ sub uni192B1928193A by uni1928 uni193A uni192B;
+ sub uni192B193A by uni193A uni192B;
+ sub uni192B1920 by uni1920 uni192B;
+ sub uni192B1922 by uni1922 uni192B;
+ sub uni192B1927 by uni1927 uni192B;
+ sub uni192B1928 by uni1928 uni192B;
+} GlideVowelDecomp;
+
+lookup RaUkar {
+ # The RaUkar substitution rule replaces Consonant, Ra, Ukar with a ligature.
+ sub @Cons uni192A uni1922 by @ConsRaU;
+} RaUkar;
+
+lookup Ukar {
+ # The Ukar substitution rule replaces Consonant + Ukar with a ligature. It also applies to the Vowel-Carrier, which has its own ligature with ukar.
+ sub @Cons uni1922 by @ConsU;
+ sub uni1900 uni1922 by uni19001922;
+} Ukar;
+
+lookup IkarK {
+ # The IkarK substitution rule replaces Ikar + Kemphreng with a ligature. The ligature is then positioned properly on the base consonant via the positioning rule IEO.
+ sub uni1921 uni193A by uni1921193A;
+} IkarK;
+
+lookup GlideIkar_target {
+ pos @YaWa -475;
+} GlideIkar_target;
+
+lookup GlideIkar {
+ pos [@YaWa]' lookup GlideIkar_target @Ikar;
+} GlideIkar;
+
+lookup IkarKWid_target {
+ pos uni1921193A 110;
+} IkarKWid_target;
+
+lookup IkarKWid {
+ # The IkarKWid lookup, applied to the Kern feature, adds 110 units of width to the IkarKemphreng ligature when followed by a consonant with akar on it. This prevents the akar from overprinting the rightmost dot of the kemphreng. (The dot overhangs to the right slightly, which is OK unless the following character has akar on it).
+ pos [uni1921193A]' lookup IkarKWid_target @Cons uni1920;
+} IkarKWid;
+
+lookup Akar {
+ # The Akar positioning rule positions the Akar on all consonants.
+ pos base uni1901
+ <anchor 487 1050> mark @Aabove;
+ pos base uni1902
+ <anchor 622 1050> mark @Aabove;
+ pos base uni1903
+ <anchor 475 1050> mark @Aabove;
+ pos base uni1904
+ <anchor 460 1050> mark @Aabove;
+ pos base uni1905
+ <anchor 590 1050> mark @Aabove;
+ pos base uni1906
+ <anchor 519 1050> mark @Aabove;
+ pos base uni1907
+ <anchor 570 1050> mark @Aabove;
+ pos base uni1908
+ <anchor 564 1050> mark @Aabove;
+ pos base uni1909
+ <anchor 430 1050> mark @Aabove;
+ pos base uni190A
+ <anchor 575 1050> mark @Aabove;
+ pos base uni190B
+ <anchor 450 1050> mark @Aabove;
+ pos base uni190C
+ <anchor 556 1050> mark @Aabove;
+ pos base uni190D
+ <anchor 515 1050> mark @Aabove;
+ pos base uni190E
+ <anchor 510 1050> mark @Aabove;
+ pos base uni190F
+ <anchor 497 1050> mark @Aabove;
+ pos base uni1910
+ <anchor 657 1050> mark @Aabove;
+ pos base uni1911
+ <anchor 690 1050> mark @Aabove;
+ pos base uni1912
+ <anchor 538 1050> mark @Aabove;
+ pos base uni1913
+ <anchor 571 1050> mark @Aabove;
+ pos base uni1914
+ <anchor 538 1050> mark @Aabove;
+ pos base uni1915
+ <anchor 470 1050> mark @Aabove;
+ pos base uni1916
+ <anchor 503 1050> mark @Aabove;
+ pos base uni1917
+ <anchor 548 1050> mark @Aabove;
+ pos base uni1918
+ <anchor 511 1050> mark @Aabove;
+ pos base uni1919
+ <anchor 560 1050> mark @Aabove;
+ pos base uni191A
+ <anchor 420 1050> mark @Aabove;
+ pos base uni191B
+ <anchor 580 1050> mark @Aabove;
+ pos base uni191C
+ <anchor 540 1050> mark @Aabove;
+ pos base uni1940
+ <anchor 480 1050> mark @Aabove;
+} Akar;
+
+lookup Kemphreng {
+ # The Kemphreng positioning rule positions the Kemphreng on all consonants, including the vowel carrier.
+ pos base uni1901
+ <anchor 500 1050> mark @K;
+ pos base uni1902
+ <anchor 680 1050> mark @K;
+ pos base uni1903
+ <anchor 540 1050> mark @K;
+ pos base uni1904
+ <anchor 500 1050> mark @K;
+ pos base uni1905
+ <anchor 590 1050> mark @K;
+ pos base uni1906
+ <anchor 540 1050> mark @K;
+ pos base uni1907
+ <anchor 620 1050> mark @K;
+ pos base uni1908
+ <anchor 580 1050> mark @K;
+ pos base uni1909
+ <anchor 450 1050> mark @K;
+ pos base uni190A
+ <anchor 580 1050> mark @K;
+ pos base uni190B
+ <anchor 450 1050> mark @K;
+ pos base uni190C
+ <anchor 656 1050> mark @K;
+ pos base uni190D
+ <anchor 570 1050> mark @K;
+ pos base uni190E
+ <anchor 530 1050> mark @K;
+ pos base uni190F
+ <anchor 515 1050> mark @K;
+ pos base uni1910
+ <anchor 680 1050> mark @K;
+ pos base uni1911
+ <anchor 720 1050> mark @K;
+ pos base uni1912
+ <anchor 580 1050> mark @K;
+ pos base uni1913
+ <anchor 600 1050> mark @K;
+ pos base uni1914
+ <anchor 560 1050> mark @K;
+ pos base uni1915
+ <anchor 480 1050> mark @K;
+ pos base uni1916
+ <anchor 520 1050> mark @K;
+ pos base uni1917
+ <anchor 585 1050> mark @K;
+ pos base uni1918
+ <anchor 610 1050> mark @K;
+ pos base uni1919
+ <anchor 520 1050> mark @K;
+ pos base uni191A
+ <anchor 440 1050> mark @K;
+ pos base uni191B
+ <anchor 600 1050> mark @K;
+ pos base uni191C
+ <anchor 600 1050> mark @K;
+ pos base uni1940
+ <anchor 490 1050> mark @K;
+ pos base uni19011922
+ <anchor 500 1050> mark @K;
+ pos base uni19021922
+ <anchor 680 1050> mark @K;
+ pos base uni19031922
+ <anchor 540 1050> mark @K;
+ pos base uni19041922
+ <anchor 500 1050> mark @K;
+ pos base uni19051922
+ <anchor 590 1050> mark @K;
+ pos base uni19061922
+ <anchor 540 1050> mark @K;
+ pos base uni19071922
+ <anchor 620 1050> mark @K;
+ pos base uni19081922
+ <anchor 580 1050> mark @K;
+ pos base uni19091922
+ <anchor 450 1050> mark @K;
+ pos base uni190A1922
+ <anchor 580 1050> mark @K;
+ pos base uni190B1922
+ <anchor 450 1050> mark @K;
+ pos base uni190C1922
+ <anchor 656 1050> mark @K;
+ pos base uni190D1922
+ <anchor 570 1050> mark @K;
+ pos base uni190E1922
+ <anchor 530 1050> mark @K;
+ pos base uni190F1922
+ <anchor 515 1050> mark @K;
+ pos base uni19101922
+ <anchor 680 1050> mark @K;
+ pos base uni19111922
+ <anchor 720 1050> mark @K;
+ pos base uni19121922
+ <anchor 580 1050> mark @K;
+ pos base uni19131922
+ <anchor 600 1050> mark @K;
+ pos base uni19141922
+ <anchor 560 1050> mark @K;
+ pos base uni19151922
+ <anchor 480 1050> mark @K;
+ pos base uni19161922
+ <anchor 520 1050> mark @K;
+ pos base uni19171922
+ <anchor 585 1050> mark @K;
+ pos base uni19181922
+ <anchor 610 1050> mark @K;
+ pos base uni19191922
+ <anchor 520 1050> mark @K;
+ pos base uni191A1922
+ <anchor 440 1050> mark @K;
+ pos base uni191B1922
+ <anchor 600 1050> mark @K;
+ pos base uni191C1922
+ <anchor 600 1050> mark @K;
+ pos base uni19401922
+ <anchor 490 1050> mark @K;
+ pos base uni1901192A1922
+ <anchor 500 1050> mark @K;
+ pos base uni1902192A1922
+ <anchor 680 1050> mark @K;
+ pos base uni1903192A1922
+ <anchor 540 1050> mark @K;
+ pos base uni1904192A1922
+ <anchor 500 1050> mark @K;
+ pos base uni1905192A1922
+ <anchor 590 1050> mark @K;
+ pos base uni1906192A1922
+ <anchor 540 1050> mark @K;
+ pos base uni1907192A1922
+ <anchor 620 1050> mark @K;
+ pos base uni1908192A1922
+ <anchor 580 1050> mark @K;
+ pos base uni1909192A1922
+ <anchor 450 1050> mark @K;
+ pos base uni190A192A1922
+ <anchor 580 1050> mark @K;
+ pos base uni190B192A1922
+ <anchor 450 1050> mark @K;
+ pos base uni190C192A1922
+ <anchor 656 1050> mark @K;
+ pos base uni190D192A1922
+ <anchor 570 1050> mark @K;
+ pos base uni190192AE1922
+ <anchor 530 1050> mark @K;
+ pos base uni190F192A1922
+ <anchor 515 1050> mark @K;
+ pos base uni1910192A1922
+ <anchor 680 1050> mark @K;
+ pos base uni1911192A1922
+ <anchor 720 1050> mark @K;
+ pos base uni1912192A1922
+ <anchor 580 1050> mark @K;
+ pos base uni1913192A1922
+ <anchor 600 1050> mark @K;
+ pos base uni1914192A1922
+ <anchor 560 1050> mark @K;
+ pos base uni1915192A1922
+ <anchor 480 1050> mark @K;
+ pos base uni1916192A1922
+ <anchor 520 1050> mark @K;
+ pos base uni1917192A1922
+ <anchor 585 1050> mark @K;
+ pos base uni1918192A1922
+ <anchor 610 1050> mark @K;
+ pos base uni1919192A1922
+ <anchor 520 1050> mark @K;
+ pos base uni191A192A1922
+ <anchor 440 1050> mark @K;
+ pos base uni191B192A1922
+ <anchor 600 1050> mark @K;
+ pos base uni191C192A1922
+ <anchor 600 1050> mark @K;
+ pos base uni1940192A1922
+ <anchor 490 1050> mark @K;
+ pos base uni1900
+ <anchor 525 1050> mark @K;
+} Kemphreng;
+
+lookup EO {
+ # The IEO positioning rule positions ikar (including the ligature with kemphreng), e and o on all consonants plus the vowel carrier.
+ pos base uni1901
+ <anchor 755 1050> mark @eo;
+ pos base uni1902
+ <anchor 943 1050> mark @eo;
+ pos base uni1903
+ <anchor 790 1050> mark @eo;
+ pos base uni1904
+ <anchor 780 1050> mark @eo;
+ pos base uni1905
+ <anchor 790 1050> mark @eo;
+ pos base uni1906
+ <anchor 878 1050> mark @eo;
+ pos base uni1907
+ <anchor 825 1050> mark @eo;
+ pos base uni1908
+ <anchor 968 1050> mark @eo;
+ pos base uni1909
+ <anchor 660 1050> mark @eo;
+ pos base uni190A
+ <anchor 569 1050> mark @eo;
+ pos base uni190B
+ <anchor 690 1050> mark @eo;
+ pos base uni190C
+ <anchor 649 1050> mark @eo;
+ pos base uni190D
+ <anchor 682 1050> mark @eo;
+ pos base uni190E
+ <anchor 680 1050> mark @eo;
+ pos base uni190F
+ <anchor 778 1050> mark @eo;
+ pos base uni1910
+ <anchor 920 1050> mark @eo;
+ pos base uni1911
+ <anchor 894 1050> mark @eo;
+ pos base uni1912
+ <anchor 782 1050> mark @eo;
+ pos base uni1913
+ <anchor 982 1050> mark @eo;
+ pos base uni1914
+ <anchor 917 1050> mark @eo;
+ pos base uni1915
+ <anchor 730 1050> mark @eo;
+ pos base uni1916
+ <anchor 767 1050> mark @eo;
+ pos base uni1917
+ <anchor 937 1050> mark @eo;
+ pos base uni1918
+ <anchor 862 1050> mark @eo;
+ pos base uni1919
+ <anchor 670 1050> mark @eo;
+ pos base uni191A
+ <anchor 682 1050> mark @eo;
+ pos base uni191B
+ <anchor 921 1050> mark @eo;
+ pos base uni191C
+ <anchor 870 1050> mark @eo;
+ pos base uni1940
+ <anchor 650 1050> mark @eo;
+ pos base uni1900
+ <anchor 810 1050> mark @eo;
+} EO;
+
+lookup VKem {
+ lookupflag MarkAttachmentType @VowelsKem;
+ # The VKem positioning rule positions the kemphreng on all upper vowels (except ikar, which has its own ligature). The vowel itself is positioned on the consonant with the Akar or IEO positioning rule.
+ pos mark uni1920
+ <anchor -260 1250> mark @VK;
+ pos mark uni1927
+ <anchor -300 1250> mark @VK;
+ pos mark uni1928
+ <anchor -150 1455> mark @VK;
+} VKem;
+
+lookup GlideU {
+ # The GlideU positioning rule positions the ukar on the glides Ya and Wa. (There is already a ligature for each consonant with the Ra+Ukar combination).
+ pos base uni1929
+ <anchor -135 -40> mark @U;
+ pos base uni192B
+ <anchor -135 -40> mark @U;
+} GlideU;
+
+# Features
+feature ccmp {
+ script latn;
+ language dflt;
+ lookup EEAIDecomp;
+ lookup OoAuKComp;
+ lookup OoAuKDecomp;
+ lookup GlideVowelComp;
+ lookup GlideVowelDecomp;
+ script limb;
+ language dflt;
+ lookup EEAIDecomp;
+ lookup OoAuKComp;
+ lookup OoAuKDecomp;
+ lookup GlideVowelComp;
+ lookup GlideVowelDecomp;
+} ccmp;
+
+feature kern {
+ script latn;
+ language dflt;
+ lookup GlideIkar;
+ lookup IkarKWid;
+ script limb;
+ language dflt;
+ lookup GlideIkar;
+ lookup IkarKWid;
+} kern;
+
+feature mark {
+ script latn;
+ language dflt;
+ lookup Akar;
+ lookup Kemphreng;
+ lookup EO;
+ script limb;
+ language dflt;
+ lookup Akar;
+ lookup Kemphreng;
+ lookup EO;
+} mark;
+
+feature mkmk {
+ script latn;
+ language dflt;
+ lookup VKem;
+ lookup GlideU;
+ script limb;
+ language dflt;
+ lookup VKem;
+ lookup GlideU;
+} mkmk;
+
+feature liga {
+ script latn;
+ language dflt;
+ lookup RaUkar;
+ lookup Ukar;
+ lookup IkarK;
+ script limb;
+ language dflt;
+ lookup RaUkar;
+ lookup Ukar;
+ lookup IkarK;
+} liga;
+
+@GDEF_base = [glyph0 .null CR space exclam quotedbl numbersign dollar percent quotesingle parenleft parenright asterisk plus comma hyphen period slash zero one two three four five six seven eight nine colon semicolon less equal greater question at A B C D E F G H I J K L M N O P Q R S T U V W X Y Z bracketleft backslash bracketright asciicircum underscore grave a b c d e f g h i j k l m n o p q r s t u v w x y z braceleft bar braceright asciitilde uni0965 uni1900 uni19001922 uni1901 uni19011922 uni1901192A1922 uni1902 uni19021922 uni1902192A1922 uni1903 uni19031922 uni1903192A1922 uni1904 uni19041922 uni1904192A1922 uni1905 uni19051922 uni1905192A1922 uni1906 uni19061922 uni1906192A1922 uni1907 uni19071922 uni1907192A1922 uni1908 uni19081922 uni1908192A1922 uni1909 uni19091922 uni1909192A1922 uni190A uni190A1922 uni190A192A1922 uni190B uni190B1922 uni190B192A1922 uni190C uni190C1922 uni190C192A1922 uni190D uni190D1922 uni190D192A1922 uni190E uni190E1922 uni190192AE1922 uni190F uni190F1922 uni190F192A1922 uni1910 uni19101922 uni1910192A1922 uni1911 uni19111922 uni1911192A1922 uni1912 uni19121922 uni1912192A1922 uni1913 uni19131922 uni1913192A1922 uni1914 uni19141922 uni1914192A1922 uni1915 uni19151922 uni1915192A1922 uni1916 uni19161922 uni1916192A1922 uni1917 uni19171922 uni1917192A1922 uni1918 uni19181922 uni1918192A1922 uni1919 uni19191922 uni1919192A1922 uni191A uni191A1922 uni191A192A1922 uni191B uni191B1922 uni191B192A1922 uni191C uni191C1922 uni191C192A1922 uni1921 uni1923 uni1924 uni1929 uni192B uni1930 uni1931 uni1932 uni1933 uni1934 uni1935 uni1936 uni1937 uni1938 uni1939 uni1940 uni19401922 uni1940192A1922 uni1944 uni1945 uni1946 uni1947 uni1948 uni1949 uni194A uni194B uni194C uni194D uni194E uni194F quoteleft quoteright quotedblleft quotedblright uni1921193A ampersand uni2009 endash emdash uni202F uni1923193A uni1924193A uni19291920 uni19291922 uni19291927 uni19291928 uni1929193A uni19291920193A uni19291922193A uni19291927193A uni19291928193A uni192B1920 uni192B1922 uni192B1927 uni192B1928 uni192B193A uni192B1920193A uni192B1922193A uni192B1927193A uni192B1928193A uni25CC uni191E uni191E1922 uni191E192A1922 uni191D uni191D1922 uni191D192A1922];
+@GDEF_mark = [uni1920 uni1920.widC uni1920.widD uni1922 uni1922.altA uni1922.altB uni1922.altC uni1925 uni1926 uni1927 uni1928 uni192A uni193A uni193A.widC uni193B uni193B.widA uni193B.widB uni193B.widC uni192A1922];
+table GDEF {
+ GlyphClassDef @GDEF_base, , @GDEF_mark, ;
+} GDEF;
diff --git a/Tests/voltLib/data/NamdhinggoSIL1006.vtp b/Tests/voltLib/data/NamdhinggoSIL1006.vtp
new file mode 100644
index 00000000..7f2072b0
--- /dev/null
+++ b/Tests/voltLib/data/NamdhinggoSIL1006.vtp
@@ -0,0 +1 @@
+ DEF_GLYPH "glyph0" ID 0 TYPE BASE END_GLYPH DEF_GLYPH ".null" ID 1 TYPE BASE END_GLYPH DEF_GLYPH "CR" ID 2 TYPE BASE END_GLYPH DEF_GLYPH "space" ID 3 UNICODE 32 TYPE BASE END_GLYPH DEF_GLYPH "exclam" ID 4 UNICODE 33 TYPE BASE END_GLYPH DEF_GLYPH "quotedbl" ID 5 UNICODE 34 TYPE BASE END_GLYPH DEF_GLYPH "numbersign" ID 6 UNICODE 35 TYPE BASE END_GLYPH DEF_GLYPH "dollar" ID 7 UNICODE 36 TYPE BASE END_GLYPH DEF_GLYPH "percent" ID 8 UNICODE 37 TYPE BASE END_GLYPH DEF_GLYPH "quotesingle" ID 9 UNICODE 39 TYPE BASE END_GLYPH DEF_GLYPH "parenleft" ID 10 UNICODE 40 TYPE BASE END_GLYPH DEF_GLYPH "parenright" ID 11 UNICODE 41 TYPE BASE END_GLYPH DEF_GLYPH "asterisk" ID 12 UNICODE 42 TYPE BASE END_GLYPH DEF_GLYPH "plus" ID 13 UNICODE 43 TYPE BASE END_GLYPH DEF_GLYPH "comma" ID 14 UNICODE 44 TYPE BASE END_GLYPH DEF_GLYPH "hyphen" ID 15 UNICODE 45 TYPE BASE END_GLYPH DEF_GLYPH "period" ID 16 UNICODE 46 TYPE BASE END_GLYPH DEF_GLYPH "slash" ID 17 UNICODE 47 TYPE BASE END_GLYPH DEF_GLYPH "zero" ID 18 UNICODE 48 TYPE BASE END_GLYPH DEF_GLYPH "one" ID 19 UNICODE 49 TYPE BASE END_GLYPH DEF_GLYPH "two" ID 20 UNICODE 50 TYPE BASE END_GLYPH DEF_GLYPH "three" ID 21 UNICODE 51 TYPE BASE END_GLYPH DEF_GLYPH "four" ID 22 UNICODE 52 TYPE BASE END_GLYPH DEF_GLYPH "five" ID 23 UNICODE 53 TYPE BASE END_GLYPH DEF_GLYPH "six" ID 24 UNICODE 54 TYPE BASE END_GLYPH DEF_GLYPH "seven" ID 25 UNICODE 55 TYPE BASE END_GLYPH DEF_GLYPH "eight" ID 26 UNICODE 56 TYPE BASE END_GLYPH DEF_GLYPH "nine" ID 27 UNICODE 57 TYPE BASE END_GLYPH DEF_GLYPH "colon" ID 28 UNICODE 58 TYPE BASE END_GLYPH DEF_GLYPH "semicolon" ID 29 UNICODE 59 TYPE BASE END_GLYPH DEF_GLYPH "less" ID 30 UNICODE 60 TYPE BASE END_GLYPH DEF_GLYPH "equal" ID 31 UNICODE 61 TYPE BASE END_GLYPH DEF_GLYPH "greater" ID 32 UNICODE 62 TYPE BASE END_GLYPH DEF_GLYPH "question" ID 33 UNICODE 63 TYPE BASE END_GLYPH DEF_GLYPH "at" ID 34 UNICODE 64 TYPE BASE END_GLYPH DEF_GLYPH "A" ID 35 UNICODE 65 TYPE BASE END_GLYPH DEF_GLYPH "B" ID 36 UNICODE 66 TYPE BASE END_GLYPH DEF_GLYPH "C" ID 37 UNICODE 67 TYPE BASE END_GLYPH DEF_GLYPH "D" ID 38 UNICODE 68 TYPE BASE END_GLYPH DEF_GLYPH "E" ID 39 UNICODE 69 TYPE BASE END_GLYPH DEF_GLYPH "F" ID 40 UNICODE 70 TYPE BASE END_GLYPH DEF_GLYPH "G" ID 41 UNICODE 71 TYPE BASE END_GLYPH DEF_GLYPH "H" ID 42 UNICODE 72 TYPE BASE END_GLYPH DEF_GLYPH "I" ID 43 UNICODE 73 TYPE BASE END_GLYPH DEF_GLYPH "J" ID 44 UNICODE 74 TYPE BASE END_GLYPH DEF_GLYPH "K" ID 45 UNICODE 75 TYPE BASE END_GLYPH DEF_GLYPH "L" ID 46 UNICODE 76 TYPE BASE END_GLYPH DEF_GLYPH "M" ID 47 UNICODE 77 TYPE BASE END_GLYPH DEF_GLYPH "N" ID 48 UNICODE 78 TYPE BASE END_GLYPH DEF_GLYPH "O" ID 49 UNICODE 79 TYPE BASE END_GLYPH DEF_GLYPH "P" ID 50 UNICODE 80 TYPE BASE END_GLYPH DEF_GLYPH "Q" ID 51 UNICODE 81 TYPE BASE END_GLYPH DEF_GLYPH "R" ID 52 UNICODE 82 TYPE BASE END_GLYPH DEF_GLYPH "S" ID 53 UNICODE 83 TYPE BASE END_GLYPH DEF_GLYPH "T" ID 54 UNICODE 84 TYPE BASE END_GLYPH DEF_GLYPH "U" ID 55 UNICODE 85 TYPE BASE END_GLYPH DEF_GLYPH "V" ID 56 UNICODE 86 TYPE BASE END_GLYPH DEF_GLYPH "W" ID 57 UNICODE 87 TYPE BASE END_GLYPH DEF_GLYPH "X" ID 58 UNICODE 88 TYPE BASE END_GLYPH DEF_GLYPH "Y" ID 59 UNICODE 89 TYPE BASE END_GLYPH DEF_GLYPH "Z" ID 60 UNICODE 90 TYPE BASE END_GLYPH DEF_GLYPH "bracketleft" ID 61 UNICODE 91 TYPE BASE END_GLYPH DEF_GLYPH "backslash" ID 62 UNICODE 92 TYPE BASE END_GLYPH DEF_GLYPH "bracketright" ID 63 UNICODE 93 TYPE BASE END_GLYPH DEF_GLYPH "asciicircum" ID 64 UNICODE 94 TYPE BASE END_GLYPH DEF_GLYPH "underscore" ID 65 UNICODE 95 TYPE BASE END_GLYPH DEF_GLYPH "grave" ID 66 UNICODE 96 TYPE BASE END_GLYPH DEF_GLYPH "a" ID 67 UNICODE 97 TYPE BASE END_GLYPH DEF_GLYPH "b" ID 68 UNICODE 98 TYPE BASE END_GLYPH DEF_GLYPH "c" ID 69 UNICODE 99 TYPE BASE END_GLYPH DEF_GLYPH "d" ID 70 UNICODE 100 TYPE BASE END_GLYPH DEF_GLYPH "e" ID 71 UNICODE 101 TYPE BASE END_GLYPH DEF_GLYPH "f" ID 72 UNICODE 102 TYPE BASE END_GLYPH DEF_GLYPH "g" ID 73 UNICODE 103 TYPE BASE END_GLYPH DEF_GLYPH "h" ID 74 UNICODE 104 TYPE BASE END_GLYPH DEF_GLYPH "i" ID 75 UNICODE 105 TYPE BASE END_GLYPH DEF_GLYPH "j" ID 76 UNICODE 106 TYPE BASE END_GLYPH DEF_GLYPH "k" ID 77 UNICODE 107 TYPE BASE END_GLYPH DEF_GLYPH "l" ID 78 UNICODE 108 TYPE BASE END_GLYPH DEF_GLYPH "m" ID 79 UNICODE 109 TYPE BASE END_GLYPH DEF_GLYPH "n" ID 80 UNICODE 110 TYPE BASE END_GLYPH DEF_GLYPH "o" ID 81 UNICODE 111 TYPE BASE END_GLYPH DEF_GLYPH "p" ID 82 UNICODE 112 TYPE BASE END_GLYPH DEF_GLYPH "q" ID 83 UNICODE 113 TYPE BASE END_GLYPH DEF_GLYPH "r" ID 84 UNICODE 114 TYPE BASE END_GLYPH DEF_GLYPH "s" ID 85 UNICODE 115 TYPE BASE END_GLYPH DEF_GLYPH "t" ID 86 UNICODE 116 TYPE BASE END_GLYPH DEF_GLYPH "u" ID 87 UNICODE 117 TYPE BASE END_GLYPH DEF_GLYPH "v" ID 88 UNICODE 118 TYPE BASE END_GLYPH DEF_GLYPH "w" ID 89 UNICODE 119 TYPE BASE END_GLYPH DEF_GLYPH "x" ID 90 UNICODE 120 TYPE BASE END_GLYPH DEF_GLYPH "y" ID 91 UNICODE 121 TYPE BASE END_GLYPH DEF_GLYPH "z" ID 92 UNICODE 122 TYPE BASE END_GLYPH DEF_GLYPH "braceleft" ID 93 UNICODE 123 TYPE BASE END_GLYPH DEF_GLYPH "bar" ID 94 UNICODE 124 TYPE BASE END_GLYPH DEF_GLYPH "braceright" ID 95 UNICODE 125 TYPE BASE END_GLYPH DEF_GLYPH "asciitilde" ID 96 UNICODE 126 TYPE BASE END_GLYPH DEF_GLYPH "uni0965" ID 97 UNICODE 2405 TYPE BASE END_GLYPH DEF_GLYPH "uni1900" ID 98 UNICODE 6400 TYPE BASE END_GLYPH DEF_GLYPH "uni19001922" ID 99 TYPE BASE END_GLYPH DEF_GLYPH "uni1901" ID 100 UNICODE 6401 TYPE BASE END_GLYPH DEF_GLYPH "uni19011922" ID 101 TYPE BASE END_GLYPH DEF_GLYPH "uni1901192A1922" ID 102 TYPE BASE END_GLYPH DEF_GLYPH "uni1902" ID 103 UNICODE 6402 TYPE BASE END_GLYPH DEF_GLYPH "uni19021922" ID 104 TYPE BASE END_GLYPH DEF_GLYPH "uni1902192A1922" ID 105 TYPE BASE END_GLYPH DEF_GLYPH "uni1903" ID 106 UNICODE 6403 TYPE BASE END_GLYPH DEF_GLYPH "uni19031922" ID 107 TYPE BASE END_GLYPH DEF_GLYPH "uni1903192A1922" ID 108 TYPE BASE END_GLYPH DEF_GLYPH "uni1904" ID 109 UNICODE 6404 TYPE BASE END_GLYPH DEF_GLYPH "uni19041922" ID 110 TYPE BASE END_GLYPH DEF_GLYPH "uni1904192A1922" ID 111 TYPE BASE END_GLYPH DEF_GLYPH "uni1905" ID 112 UNICODE 6405 TYPE BASE END_GLYPH DEF_GLYPH "uni19051922" ID 113 TYPE BASE END_GLYPH DEF_GLYPH "uni1905192A1922" ID 114 TYPE BASE END_GLYPH DEF_GLYPH "uni1906" ID 115 UNICODE 6406 TYPE BASE END_GLYPH DEF_GLYPH "uni19061922" ID 116 TYPE BASE END_GLYPH DEF_GLYPH "uni1906192A1922" ID 117 TYPE BASE END_GLYPH DEF_GLYPH "uni1907" ID 118 UNICODE 6407 TYPE BASE END_GLYPH DEF_GLYPH "uni19071922" ID 119 TYPE BASE END_GLYPH DEF_GLYPH "uni1907192A1922" ID 120 TYPE BASE END_GLYPH DEF_GLYPH "uni1908" ID 121 UNICODE 6408 TYPE BASE END_GLYPH DEF_GLYPH "uni19081922" ID 122 TYPE BASE END_GLYPH DEF_GLYPH "uni1908192A1922" ID 123 TYPE BASE END_GLYPH DEF_GLYPH "uni1909" ID 124 UNICODE 6409 TYPE BASE END_GLYPH DEF_GLYPH "uni19091922" ID 125 TYPE BASE END_GLYPH DEF_GLYPH "uni1909192A1922" ID 126 TYPE BASE END_GLYPH DEF_GLYPH "uni190A" ID 127 UNICODE 6410 TYPE BASE END_GLYPH DEF_GLYPH "uni190A1922" ID 128 TYPE BASE END_GLYPH DEF_GLYPH "uni190A192A1922" ID 129 TYPE BASE END_GLYPH DEF_GLYPH "uni190B" ID 130 UNICODE 6411 TYPE BASE END_GLYPH DEF_GLYPH "uni190B1922" ID 131 TYPE BASE END_GLYPH DEF_GLYPH "uni190B192A1922" ID 132 TYPE BASE END_GLYPH DEF_GLYPH "uni190C" ID 133 UNICODE 6412 TYPE BASE END_GLYPH DEF_GLYPH "uni190C1922" ID 134 TYPE BASE END_GLYPH DEF_GLYPH "uni190C192A1922" ID 135 TYPE BASE END_GLYPH DEF_GLYPH "uni190D" ID 136 UNICODE 6413 TYPE BASE END_GLYPH DEF_GLYPH "uni190D1922" ID 137 TYPE BASE END_GLYPH DEF_GLYPH "uni190D192A1922" ID 138 TYPE BASE END_GLYPH DEF_GLYPH "uni190E" ID 139 UNICODE 6414 TYPE BASE END_GLYPH DEF_GLYPH "uni190E1922" ID 140 TYPE BASE END_GLYPH DEF_GLYPH "uni190192AE1922" ID 141 TYPE BASE END_GLYPH DEF_GLYPH "uni190F" ID 142 UNICODE 6415 TYPE BASE END_GLYPH DEF_GLYPH "uni190F1922" ID 143 TYPE BASE END_GLYPH DEF_GLYPH "uni190F192A1922" ID 144 TYPE BASE END_GLYPH DEF_GLYPH "uni1910" ID 145 UNICODE 6416 TYPE BASE END_GLYPH DEF_GLYPH "uni19101922" ID 146 TYPE BASE END_GLYPH DEF_GLYPH "uni1910192A1922" ID 147 TYPE BASE END_GLYPH DEF_GLYPH "uni1911" ID 148 UNICODE 6417 TYPE BASE END_GLYPH DEF_GLYPH "uni19111922" ID 149 TYPE BASE END_GLYPH DEF_GLYPH "uni1911192A1922" ID 150 TYPE BASE END_GLYPH DEF_GLYPH "uni1912" ID 151 UNICODE 6418 TYPE BASE END_GLYPH DEF_GLYPH "uni19121922" ID 152 TYPE BASE END_GLYPH DEF_GLYPH "uni1912192A1922" ID 153 TYPE BASE END_GLYPH DEF_GLYPH "uni1913" ID 154 UNICODE 6419 TYPE BASE END_GLYPH DEF_GLYPH "uni19131922" ID 155 TYPE BASE END_GLYPH DEF_GLYPH "uni1913192A1922" ID 156 TYPE BASE END_GLYPH DEF_GLYPH "uni1914" ID 157 UNICODE 6420 TYPE BASE END_GLYPH DEF_GLYPH "uni19141922" ID 158 TYPE BASE END_GLYPH DEF_GLYPH "uni1914192A1922" ID 159 TYPE BASE END_GLYPH DEF_GLYPH "uni1915" ID 160 UNICODE 6421 TYPE BASE END_GLYPH DEF_GLYPH "uni19151922" ID 161 TYPE BASE END_GLYPH DEF_GLYPH "uni1915192A1922" ID 162 TYPE BASE END_GLYPH DEF_GLYPH "uni1916" ID 163 UNICODE 6422 TYPE BASE END_GLYPH DEF_GLYPH "uni19161922" ID 164 TYPE BASE END_GLYPH DEF_GLYPH "uni1916192A1922" ID 165 TYPE BASE END_GLYPH DEF_GLYPH "uni1917" ID 166 UNICODE 6423 TYPE BASE END_GLYPH DEF_GLYPH "uni19171922" ID 167 TYPE BASE END_GLYPH DEF_GLYPH "uni1917192A1922" ID 168 TYPE BASE END_GLYPH DEF_GLYPH "uni1918" ID 169 UNICODE 6424 TYPE BASE END_GLYPH DEF_GLYPH "uni19181922" ID 170 TYPE BASE END_GLYPH DEF_GLYPH "uni1918192A1922" ID 171 TYPE BASE END_GLYPH DEF_GLYPH "uni1919" ID 172 UNICODE 6425 TYPE BASE END_GLYPH DEF_GLYPH "uni19191922" ID 173 TYPE BASE END_GLYPH DEF_GLYPH "uni1919192A1922" ID 174 TYPE BASE END_GLYPH DEF_GLYPH "uni191A" ID 175 UNICODE 6426 TYPE BASE END_GLYPH DEF_GLYPH "uni191A1922" ID 176 TYPE BASE END_GLYPH DEF_GLYPH "uni191A192A1922" ID 177 TYPE BASE END_GLYPH DEF_GLYPH "uni191B" ID 178 UNICODE 6427 TYPE BASE END_GLYPH DEF_GLYPH "uni191B1922" ID 179 TYPE BASE END_GLYPH DEF_GLYPH "uni191B192A1922" ID 180 TYPE BASE END_GLYPH DEF_GLYPH "uni191C" ID 181 UNICODE 6428 TYPE BASE END_GLYPH DEF_GLYPH "uni191C1922" ID 182 TYPE BASE END_GLYPH DEF_GLYPH "uni191C192A1922" ID 183 TYPE BASE END_GLYPH DEF_GLYPH "uni1920" ID 184 UNICODE 6432 TYPE MARK END_GLYPH DEF_GLYPH "uni1920.widC" ID 185 TYPE MARK END_GLYPH DEF_GLYPH "uni1920.widD" ID 186 TYPE MARK END_GLYPH DEF_GLYPH "uni1921" ID 187 UNICODE 6433 TYPE BASE END_GLYPH DEF_GLYPH "uni1922" ID 188 UNICODE 6434 TYPE MARK END_GLYPH DEF_GLYPH "uni1922.altA" ID 189 TYPE MARK END_GLYPH DEF_GLYPH "uni1922.altB" ID 190 TYPE MARK END_GLYPH DEF_GLYPH "uni1922.altC" ID 191 TYPE MARK END_GLYPH DEF_GLYPH "uni1923" ID 192 UNICODE 6435 TYPE BASE END_GLYPH DEF_GLYPH "uni1924" ID 193 UNICODE 6436 TYPE BASE END_GLYPH DEF_GLYPH "uni1925" ID 194 UNICODE 6437 TYPE MARK END_GLYPH DEF_GLYPH "uni1926" ID 195 UNICODE 6438 TYPE MARK END_GLYPH DEF_GLYPH "uni1927" ID 196 UNICODE 6439 TYPE MARK END_GLYPH DEF_GLYPH "uni1928" ID 197 UNICODE 6440 TYPE MARK END_GLYPH DEF_GLYPH "uni1929" ID 198 UNICODE 6441 TYPE BASE END_GLYPH DEF_GLYPH "uni192A" ID 199 UNICODE 6442 TYPE MARK END_GLYPH DEF_GLYPH "uni192B" ID 200 UNICODE 6443 TYPE BASE END_GLYPH DEF_GLYPH "uni1930" ID 201 UNICODE 6448 TYPE BASE END_GLYPH DEF_GLYPH "uni1931" ID 202 UNICODE 6449 TYPE BASE END_GLYPH DEF_GLYPH "uni1932" ID 203 UNICODE 6450 TYPE BASE END_GLYPH DEF_GLYPH "uni1933" ID 204 UNICODE 6451 TYPE BASE END_GLYPH DEF_GLYPH "uni1934" ID 205 UNICODE 6452 TYPE BASE END_GLYPH DEF_GLYPH "uni1935" ID 206 UNICODE 6453 TYPE BASE END_GLYPH DEF_GLYPH "uni1936" ID 207 UNICODE 6454 TYPE BASE END_GLYPH DEF_GLYPH "uni1937" ID 208 UNICODE 6455 TYPE BASE END_GLYPH DEF_GLYPH "uni1938" ID 209 UNICODE 6456 TYPE BASE END_GLYPH DEF_GLYPH "uni1939" ID 210 UNICODE 6457 TYPE BASE END_GLYPH DEF_GLYPH "uni193A" ID 211 UNICODE 6458 TYPE MARK END_GLYPH DEF_GLYPH "uni193A.widC" ID 212 TYPE MARK END_GLYPH DEF_GLYPH "uni193B" ID 213 UNICODE 6459 TYPE MARK END_GLYPH DEF_GLYPH "uni193B.widA" ID 214 TYPE MARK END_GLYPH DEF_GLYPH "uni193B.widB" ID 215 TYPE MARK END_GLYPH DEF_GLYPH "uni193B.widC" ID 216 TYPE MARK END_GLYPH DEF_GLYPH "uni1940" ID 217 UNICODE 6464 TYPE BASE END_GLYPH DEF_GLYPH "uni19401922" ID 218 TYPE BASE END_GLYPH DEF_GLYPH "uni1940192A1922" ID 219 TYPE BASE END_GLYPH DEF_GLYPH "uni1944" ID 220 UNICODE 6468 TYPE BASE END_GLYPH DEF_GLYPH "uni1945" ID 221 UNICODE 6469 TYPE BASE END_GLYPH DEF_GLYPH "uni1946" ID 222 UNICODE 6470 TYPE BASE END_GLYPH DEF_GLYPH "uni1947" ID 223 UNICODE 6471 TYPE BASE END_GLYPH DEF_GLYPH "uni1948" ID 224 UNICODE 6472 TYPE BASE END_GLYPH DEF_GLYPH "uni1949" ID 225 UNICODE 6473 TYPE BASE END_GLYPH DEF_GLYPH "uni194A" ID 226 UNICODE 6474 TYPE BASE END_GLYPH DEF_GLYPH "uni194B" ID 227 UNICODE 6475 TYPE BASE END_GLYPH DEF_GLYPH "uni194C" ID 228 UNICODE 6476 TYPE BASE END_GLYPH DEF_GLYPH "uni194D" ID 229 UNICODE 6477 TYPE BASE END_GLYPH DEF_GLYPH "uni194E" ID 230 UNICODE 6478 TYPE BASE END_GLYPH DEF_GLYPH "uni194F" ID 231 UNICODE 6479 TYPE BASE END_GLYPH DEF_GLYPH "quoteleft" ID 232 UNICODE 8216 TYPE BASE END_GLYPH DEF_GLYPH "quoteright" ID 233 UNICODE 8217 TYPE BASE END_GLYPH DEF_GLYPH "quotedblleft" ID 234 UNICODE 8220 TYPE BASE END_GLYPH DEF_GLYPH "quotedblright" ID 235 UNICODE 8221 TYPE BASE END_GLYPH DEF_GLYPH "uni1921193A" ID 236 TYPE BASE END_GLYPH DEF_GLYPH "uni192A1922" ID 237 TYPE MARK END_GLYPH DEF_GLYPH "ampersand" ID 238 UNICODE 38 TYPE BASE END_GLYPH DEF_GLYPH "uni2009" ID 239 UNICODE 8201 TYPE BASE END_GLYPH DEF_GLYPH "endash" ID 240 UNICODE 8211 TYPE BASE END_GLYPH DEF_GLYPH "emdash" ID 241 UNICODE 8212 TYPE BASE END_GLYPH DEF_GLYPH "uni202F" ID 242 UNICODE 8239 TYPE BASE END_GLYPH DEF_GLYPH "uni1923193A" ID 243 TYPE BASE END_GLYPH DEF_GLYPH "uni1924193A" ID 244 TYPE BASE END_GLYPH DEF_GLYPH "uni19291920" ID 245 TYPE BASE END_GLYPH DEF_GLYPH "uni19291922" ID 246 TYPE BASE END_GLYPH DEF_GLYPH "uni19291927" ID 247 TYPE BASE END_GLYPH DEF_GLYPH "uni19291928" ID 248 TYPE BASE END_GLYPH DEF_GLYPH "uni1929193A" ID 249 TYPE BASE END_GLYPH DEF_GLYPH "uni19291920193A" ID 250 TYPE BASE END_GLYPH DEF_GLYPH "uni19291922193A" ID 251 TYPE BASE END_GLYPH DEF_GLYPH "uni19291927193A" ID 252 TYPE BASE END_GLYPH DEF_GLYPH "uni19291928193A" ID 253 TYPE BASE END_GLYPH DEF_GLYPH "uni192B1920" ID 254 TYPE BASE END_GLYPH DEF_GLYPH "uni192B1922" ID 255 TYPE BASE END_GLYPH DEF_GLYPH "uni192B1927" ID 256 TYPE BASE END_GLYPH DEF_GLYPH "uni192B1928" ID 257 TYPE BASE END_GLYPH DEF_GLYPH "uni192B193A" ID 258 TYPE BASE END_GLYPH DEF_GLYPH "uni192B1920193A" ID 259 TYPE BASE END_GLYPH DEF_GLYPH "uni192B1922193A" ID 260 TYPE BASE END_GLYPH DEF_GLYPH "uni192B1927193A" ID 261 TYPE BASE END_GLYPH DEF_GLYPH "uni192B1928193A" ID 262 TYPE BASE END_GLYPH DEF_GLYPH "uni25CC" ID 263 UNICODE 9676 TYPE BASE END_GLYPH DEF_GLYPH "uni191E" ID 264 UNICODE 6430 TYPE BASE END_GLYPH DEF_GLYPH "uni191E1922" ID 265 TYPE BASE END_GLYPH DEF_GLYPH "uni191E192A1922" ID 266 TYPE BASE END_GLYPH DEF_GLYPH "uni191D" ID 267 UNICODE 6429 TYPE BASE END_GLYPH DEF_GLYPH "uni191D1922" ID 268 TYPE BASE END_GLYPH DEF_GLYPH "uni191D192A1922" ID 269 TYPE BASE END_GLYPH DEF_SCRIPT NAME "Latin" TAG "latn" DEF_LANGSYS NAME "Default" TAG "dflt" DEF_FEATURE NAME "Glyph Composition/Decomposition" TAG "ccmp" LOOKUP "EEAIDecomp" LOOKUP "OoAuKComp" LOOKUP "OoAuKDecomp" LOOKUP "GlideVowelComp" LOOKUP "GlideVowelDecomp" END_FEATURE DEF_FEATURE NAME "Kerning" TAG "kern" LOOKUP "GlideIkar" LOOKUP "IkarKWid" END_FEATURE DEF_FEATURE NAME "Mark Positioning" TAG "mark" LOOKUP "Akar" LOOKUP "Kemphreng" LOOKUP "EO" END_FEATURE DEF_FEATURE NAME "Mark to Mark Positioning" TAG "mkmk" LOOKUP "VKem" LOOKUP "GlideU" END_FEATURE DEF_FEATURE NAME "Standard Ligatures" TAG "liga" LOOKUP "RaUkar" LOOKUP "Ukar" LOOKUP "IkarK" END_FEATURE END_LANGSYS END_SCRIPT DEF_SCRIPT NAME "Limbu" TAG "limb" DEF_LANGSYS NAME "Default" TAG "dflt" DEF_FEATURE NAME "Glyph Composition/Decomposition" TAG "ccmp" LOOKUP "EEAIDecomp" LOOKUP "OoAuKComp" LOOKUP "OoAuKDecomp" LOOKUP "GlideVowelComp" LOOKUP "GlideVowelDecomp" END_FEATURE DEF_FEATURE NAME "Kerning" TAG "kern" LOOKUP "GlideIkar" LOOKUP "IkarKWid" END_FEATURE DEF_FEATURE NAME "Mark Positioning" TAG "mark" LOOKUP "Akar" LOOKUP "Kemphreng" LOOKUP "EO" END_FEATURE DEF_FEATURE NAME "Mark to Mark Positioning" TAG "mkmk" LOOKUP "VKem" LOOKUP "GlideU" END_FEATURE DEF_FEATURE NAME "Standard Ligatures" TAG "liga" LOOKUP "RaUkar" LOOKUP "Ukar" LOOKUP "IkarK" END_FEATURE END_LANGSYS END_SCRIPT DEF_GROUP "AllCons" ENUM GROUP "cons" GROUP "ConsU" GROUP "ConsRaU" END_ENUM END_GROUP DEF_GROUP "Cons" ENUM GLYPH "uni1901" GLYPH "uni1902" GLYPH "uni1903" GLYPH "uni1904" GLYPH "uni1905" GLYPH "uni1906" GLYPH "uni1907" GLYPH "uni1908" GLYPH "uni1909" GLYPH "uni190A" GLYPH "uni190B" GLYPH "uni190C" GLYPH "uni190D" GLYPH "uni190E" GLYPH "uni190F" GLYPH "uni1910" GLYPH "uni1911" GLYPH "uni1912" GLYPH "uni1913" GLYPH "uni1914" GLYPH "uni1915" GLYPH "uni1916" GLYPH "uni1917" GLYPH "uni1918" GLYPH "uni1919" GLYPH "uni191A" GLYPH "uni191B" GLYPH "uni191C" GLYPH "uni1940" END_ENUM END_GROUP DEF_GROUP "ConsRaU" ENUM GLYPH "uni1901192A1922" GLYPH "uni1902192A1922" GLYPH "uni1903192A1922" GLYPH "uni1904192A1922" GLYPH "uni1905192A1922" GLYPH "uni1906192A1922" GLYPH "uni1907192A1922" GLYPH "uni1908192A1922" GLYPH "uni1909192A1922" GLYPH "uni190A192A1922" GLYPH "uni190B192A1922" GLYPH "uni190C192A1922" GLYPH "uni190D192A1922" GLYPH "uni190192AE1922" GLYPH "uni190F192A1922" GLYPH "uni1910192A1922" GLYPH "uni1911192A1922" GLYPH "uni1912192A1922" GLYPH "uni1913192A1922" GLYPH "uni1914192A1922" GLYPH "uni1915192A1922" GLYPH "uni1916192A1922" GLYPH "uni1917192A1922" GLYPH "uni1918192A1922" GLYPH "uni1919192A1922" GLYPH "uni1919192A1922" GLYPH "uni191A192A1922" GLYPH "uni191B192A1922" GLYPH "uni191C192A1922" GLYPH "uni1940192A1922" END_ENUM END_GROUP DEF_GROUP "ConsU" ENUM GLYPH "uni19011922" GLYPH "uni19021922" GLYPH "uni19031922" GLYPH "uni19041922" GLYPH "uni19051922" GLYPH "uni19061922" GLYPH "uni19071922" GLYPH "uni19081922" GLYPH "uni19091922" GLYPH "uni190A1922" GLYPH "uni190B1922" GLYPH "uni190C1922" GLYPH "uni190D1922" GLYPH "uni190E1922" GLYPH "uni190F1922" GLYPH "uni19101922" GLYPH "uni19111922" GLYPH "uni19121922" GLYPH "uni19131922" GLYPH "uni19141922" GLYPH "uni19151922" GLYPH "uni19161922" GLYPH "uni19171922" GLYPH "uni19181922" GLYPH "uni19191922" GLYPH "uni191A1922" GLYPH "uni191B1922" GLYPH "uni191C1922" GLYPH "uni19401922" END_ENUM END_GROUP DEF_GROUP "Ikar" ENUM GLYPH "uni1921" GLYPH "uni1921193A" END_ENUM END_GROUP DEF_GROUP "Vowels" ENUM GLYPH "uni1920" GLYPH "uni1927" GLYPH "uni1928" END_ENUM END_GROUP DEF_GROUP "VowelsKem" ENUM GROUP "Vowels" GLYPH "uni193A" END_ENUM END_GROUP DEF_GROUP "YaWa" ENUM GLYPH "uni1929" GLYPH "uni192B" END_ENUM END_GROUP DEF_LOOKUP "EEAIDecomp" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT END_CONTEXT AS_SUBSTITUTION SUB GLYPH "uni1925" WITH GLYPH "uni1920" GLYPH "uni1923" END_SUB SUB GLYPH "uni1926" WITH GLYPH "uni1920" GLYPH "uni1924" END_SUB END_SUBSTITUTION DEF_LOOKUP "OoAuKComp" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT END_CONTEXT AS_SUBSTITUTION SUB GLYPH "uni1923" GLYPH "uni193A" WITH GLYPH "uni1923193A" END_SUB SUB GLYPH "uni1924" GLYPH "uni193A" WITH GLYPH "uni1924193A" END_SUB END_SUBSTITUTION DEF_LOOKUP "OoAuKDecomp" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR COMMENTS "The OoAuDecomp substitution rule replaces the OO and AU vowels with their visually constitutent components A plus EE or AI respectively. This is so that the 'A' portion can be positioned independently over the consonant when a Glide occurs between the consonant and the vowel." IN_CONTEXT END_CONTEXT AS_SUBSTITUTION SUB GLYPH "uni1923193A" WITH GLYPH "uni193A" GLYPH "uni1923" END_SUB SUB GLYPH "uni1924193A" WITH GLYPH "uni193A" GLYPH "uni1924" END_SUB END_SUBSTITUTION DEF_LOOKUP "GlideVowelComp" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT END_CONTEXT AS_SUBSTITUTION SUB GLYPH "uni1929" GLYPH "uni1920" GLYPH "uni193A" WITH GLYPH "uni19291920193A" END_SUB SUB GLYPH "uni1929" GLYPH "uni1922" GLYPH "uni193A" WITH GLYPH "uni19291922193A" END_SUB SUB GLYPH "uni1929" GLYPH "uni1927" GLYPH "uni193A" WITH GLYPH "uni19291927193A" END_SUB SUB GLYPH "uni1929" GLYPH "uni1928" GLYPH "uni193A" WITH GLYPH "uni19291928193A" END_SUB SUB GLYPH "uni1929" GLYPH "uni193A" WITH GLYPH "uni1929193A" END_SUB SUB GLYPH "uni1929" GLYPH "uni1920" WITH GLYPH "uni19291920" END_SUB SUB GLYPH "uni1929" GLYPH "uni1922" WITH GLYPH "uni19291922" END_SUB SUB GLYPH "uni1929" GLYPH "uni1927" WITH GLYPH "uni19291927" END_SUB SUB GLYPH "uni1929" GLYPH "uni1928" WITH GLYPH "uni19291928" END_SUB SUB GLYPH "uni192B" GLYPH "uni1920" GLYPH "uni193A" WITH GLYPH "uni192B1920193A" END_SUB SUB GLYPH "uni192B" GLYPH "uni1922" GLYPH "uni193A" WITH GLYPH "uni192B1922193A" END_SUB SUB GLYPH "uni192B" GLYPH "uni1927" GLYPH "uni193A" WITH GLYPH "uni192B1927193A" END_SUB SUB GLYPH "uni192B" GLYPH "uni1928" GLYPH "uni193A" WITH GLYPH "uni192B1928193A" END_SUB SUB GLYPH "uni192B" GLYPH "uni193A" WITH GLYPH "uni192B193A" END_SUB SUB GLYPH "uni192B" GLYPH "uni1920" WITH GLYPH "uni192B1920" END_SUB SUB GLYPH "uni192B" GLYPH "uni1922" WITH GLYPH "uni192B1922" END_SUB SUB GLYPH "uni192B" GLYPH "uni1927" WITH GLYPH "uni192B1927" END_SUB SUB GLYPH "uni192B" GLYPH "uni1928" WITH GLYPH "uni192B1928" END_SUB END_SUBSTITUTION DEF_LOOKUP "GlideVowelDecomp" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT END_CONTEXT AS_SUBSTITUTION SUB GLYPH "uni19291920193A" WITH GLYPH "uni1920" GLYPH "uni193A" GLYPH "uni1929" END_SUB SUB GLYPH "uni19291922193A" WITH GLYPH "uni1922" GLYPH "uni193A" GLYPH "uni1929" END_SUB SUB GLYPH "uni19291927193A" WITH GLYPH "uni1927" GLYPH "uni193A" GLYPH "uni1929" END_SUB SUB GLYPH "uni19291928193A" WITH GLYPH "uni1928" GLYPH "uni193A" GLYPH "uni1929" END_SUB SUB GLYPH "uni1929193A" WITH GLYPH "uni193A" GLYPH "uni1929" END_SUB SUB GLYPH "uni19291920" WITH GLYPH "uni1920" GLYPH "uni1929" END_SUB SUB GLYPH "uni19291922" WITH GLYPH "uni1922" GLYPH "uni1929" END_SUB SUB GLYPH "uni19291927" WITH GLYPH "uni1927" GLYPH "uni1929" END_SUB SUB GLYPH "uni19291928" WITH GLYPH "uni1928" GLYPH "uni1929" END_SUB SUB GLYPH "uni192B1920193A" WITH GLYPH "uni1920" GLYPH "uni193A" GLYPH "uni192B" END_SUB SUB GLYPH "uni192B1922193A" WITH GLYPH "uni1922" GLYPH "uni193A" GLYPH "uni192B" END_SUB SUB GLYPH "uni192B1927193A" WITH GLYPH "uni1927" GLYPH "uni193A" GLYPH "uni192B" END_SUB SUB GLYPH "uni192B1928193A" WITH GLYPH "uni1928" GLYPH "uni193A" GLYPH "uni192B" END_SUB SUB GLYPH "uni192B193A" WITH GLYPH "uni193A" GLYPH "uni192B" END_SUB SUB GLYPH "uni192B1920" WITH GLYPH "uni1920" GLYPH "uni192B" END_SUB SUB GLYPH "uni192B1922" WITH GLYPH "uni1922" GLYPH "uni192B" END_SUB SUB GLYPH "uni192B1927" WITH GLYPH "uni1927" GLYPH "uni192B" END_SUB SUB GLYPH "uni192B1928" WITH GLYPH "uni1928" GLYPH "uni192B" END_SUB END_SUBSTITUTION DEF_LOOKUP "RaUkar" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR COMMENTS "The RaUkar substitution rule replaces Consonant, Ra, Ukar with a ligature." IN_CONTEXT END_CONTEXT AS_SUBSTITUTION SUB GROUP "Cons" GLYPH "uni192A" GLYPH "uni1922" WITH GROUP "ConsRaU" END_SUB SUB WITH END_SUB END_SUBSTITUTION DEF_LOOKUP "Ukar" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR COMMENTS "The Ukar substitution rule replaces Consonant + Ukar with a ligature. It also applies to the Vowel-Carrier, which has its own ligature with ukar." IN_CONTEXT END_CONTEXT AS_SUBSTITUTION SUB GROUP "Cons" GLYPH "uni1922" WITH GROUP "ConsU" END_SUB SUB GLYPH "uni1900" GLYPH "uni1922" WITH GLYPH "uni19001922" END_SUB END_SUBSTITUTION DEF_LOOKUP "IkarK" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR COMMENTS "The IkarK substitution rule replaces Ikar + Kemphreng with a ligature. The ligature is then positioned properly on the base consonant via the positioning rule IEO." IN_CONTEXT END_CONTEXT AS_SUBSTITUTION SUB GLYPH "uni1921" GLYPH "uni193A" WITH GLYPH "uni1921193A" END_SUB END_SUBSTITUTION DEF_LOOKUP "GlideIkar" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT RIGHT GROUP "Ikar" END_CONTEXT AS_POSITION ADJUST_SINGLE GROUP "YaWa" BY POS ADV -475 END_POS END_ADJUST END_POSITION DEF_LOOKUP "IkarKWid" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR COMMENTS "The IkarKWid lookup, applied to the Kern feature, adds 110 units of width to the IkarKemphreng ligature when followed by a consonant with akar on it. This prevents the akar from overprinting the rightmost dot of the kemphreng. (The dot overhangs to the right slightly, which is OK unless the following character has akar on it)." IN_CONTEXT RIGHT GROUP "cons" RIGHT GLYPH "uni1920" END_CONTEXT AS_POSITION ADJUST_SINGLE GLYPH "uni1921193A" BY POS ADV 110 END_POS END_ADJUST END_POSITION DEF_LOOKUP "Akar" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR COMMENTS "The Akar positioning rule positions the Akar on all consonants." IN_CONTEXT END_CONTEXT AS_POSITION ATTACH GROUP "cons" TO GLYPH "uni1920" AT ANCHOR "Aabove" END_ATTACH END_POSITION DEF_LOOKUP "Kemphreng" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR COMMENTS "The Kemphreng positioning rule positions the Kemphreng on all consonants, including the vowel carrier." IN_CONTEXT END_CONTEXT AS_POSITION ATTACH GROUP "AllCons" GLYPH "uni1900" TO GLYPH "uni193A" AT ANCHOR "K" GLYPH "uni193A" AT ANCHOR "K" END_ATTACH END_POSITION DEF_LOOKUP "EO" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR COMMENTS "The IEO positioning rule positions ikar (including the ligature with kemphreng), e and o on all consonants plus the vowel carrier." IN_CONTEXT END_CONTEXT AS_POSITION ATTACH GROUP "cons" GLYPH "uni1900" TO GLYPH "uni1927" AT ANCHOR "eo" GLYPH "uni1928" AT ANCHOR "eo" END_ATTACH END_POSITION DEF_LOOKUP "VKem" PROCESS_BASE PROCESS_MARKS "VowelsKem" DIRECTION LTR COMMENTS "The VKem positioning rule positions the kemphreng on all upper vowels (except ikar, which has its own ligature). The vowel itself is positioned on the consonant with the Akar or IEO positioning rule." IN_CONTEXT END_CONTEXT AS_POSITION ATTACH GROUP "Vowels" TO GLYPH "uni193A" AT ANCHOR "VK" END_ATTACH END_POSITION DEF_LOOKUP "GlideU" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR COMMENTS "The GlideU positioning rule positions the ukar on the glides Ya and Wa. (There is already a ligature for each consonant with the Ra+Ukar combination)." IN_CONTEXT END_CONTEXT AS_POSITION ATTACH GROUP "YaWa" TO GLYPH "uni1922" AT ANCHOR "U" END_ATTACH END_POSITION DEF_ANCHOR "MARK_Aabove" ON 184 GLYPH uni1920 COMPONENT 1 LOCKED AT POS DX -500 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 100 GLYPH uni1901 COMPONENT 1 AT POS DX 487 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 103 GLYPH uni1902 COMPONENT 1 AT POS DX 622 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 106 GLYPH uni1903 COMPONENT 1 AT POS DX 475 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 109 GLYPH uni1904 COMPONENT 1 AT POS DX 460 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 112 GLYPH uni1905 COMPONENT 1 AT POS DX 590 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 115 GLYPH uni1906 COMPONENT 1 AT POS DX 519 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 118 GLYPH uni1907 COMPONENT 1 AT POS DX 570 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 121 GLYPH uni1908 COMPONENT 1 AT POS DX 564 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 124 GLYPH uni1909 COMPONENT 1 AT POS DX 430 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 127 GLYPH uni190A COMPONENT 1 AT POS DX 575 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 130 GLYPH uni190B COMPONENT 1 AT POS DX 450 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 133 GLYPH uni190C COMPONENT 1 AT POS DX 556 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 136 GLYPH uni190D COMPONENT 1 AT POS DX 515 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 139 GLYPH uni190E COMPONENT 1 AT POS DX 510 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 142 GLYPH uni190F COMPONENT 1 AT POS DX 497 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 145 GLYPH uni1910 COMPONENT 1 AT POS DX 657 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 148 GLYPH uni1911 COMPONENT 1 AT POS DX 690 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 151 GLYPH uni1912 COMPONENT 1 AT POS DX 538 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 154 GLYPH uni1913 COMPONENT 1 AT POS DX 571 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 157 GLYPH uni1914 COMPONENT 1 AT POS DX 538 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 160 GLYPH uni1915 COMPONENT 1 AT POS DX 470 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 163 GLYPH uni1916 COMPONENT 1 AT POS DX 503 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 166 GLYPH uni1917 COMPONENT 1 AT POS DX 548 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 169 GLYPH uni1918 COMPONENT 1 AT POS DX 511 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 172 GLYPH uni1919 COMPONENT 1 AT POS DX 560 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 175 GLYPH uni191A COMPONENT 1 AT POS DX 420 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 178 GLYPH uni191B COMPONENT 1 AT POS DX 580 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 181 GLYPH uni191C COMPONENT 1 AT POS DX 540 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "Aabove" ON 217 GLYPH uni1940 COMPONENT 1 AT POS DX 480 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "MARK_K" ON 211 GLYPH uni193A COMPONENT 1 LOCKED AT POS DX -260 DY 1250 END_POS END_ANCHOR DEF_ANCHOR "K" ON 100 GLYPH uni1901 COMPONENT 1 AT POS DX 500 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 101 GLYPH uni19011922 COMPONENT 1 AT POS DX 500 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 102 GLYPH uni1901192A1922 COMPONENT 1 AT POS DX 500 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 103 GLYPH uni1902 COMPONENT 1 AT POS DX 680 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 104 GLYPH uni19021922 COMPONENT 1 AT POS DX 680 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 105 GLYPH uni1902192A1922 COMPONENT 1 AT POS DX 680 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 106 GLYPH uni1903 COMPONENT 1 AT POS DX 540 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 107 GLYPH uni19031922 COMPONENT 1 AT POS DX 540 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 108 GLYPH uni1903192A1922 COMPONENT 1 AT POS DX 540 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 109 GLYPH uni1904 COMPONENT 1 AT POS DX 500 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 110 GLYPH uni19041922 COMPONENT 1 AT POS DX 500 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 111 GLYPH uni1904192A1922 COMPONENT 1 AT POS DX 500 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 112 GLYPH uni1905 COMPONENT 1 AT POS DX 590 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 113 GLYPH uni19051922 COMPONENT 1 AT POS DX 590 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 114 GLYPH uni1905192A1922 COMPONENT 1 AT POS DX 590 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 115 GLYPH uni1906 COMPONENT 1 AT POS DX 540 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 116 GLYPH uni19061922 COMPONENT 1 AT POS DX 540 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 117 GLYPH uni1906192A1922 COMPONENT 1 AT POS DX 540 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 118 GLYPH uni1907 COMPONENT 1 AT POS DX 620 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 119 GLYPH uni19071922 COMPONENT 1 AT POS DX 620 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 120 GLYPH uni1907192A1922 COMPONENT 1 AT POS DX 620 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 121 GLYPH uni1908 COMPONENT 1 AT POS DX 580 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 122 GLYPH uni19081922 COMPONENT 1 AT POS DX 580 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 123 GLYPH uni1908192A1922 COMPONENT 1 AT POS DX 580 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 124 GLYPH uni1909 COMPONENT 1 AT POS DX 450 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 125 GLYPH uni19091922 COMPONENT 1 AT POS DX 450 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 126 GLYPH uni1909192A1922 COMPONENT 1 AT POS DX 450 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 127 GLYPH uni190A COMPONENT 1 AT POS DX 580 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 128 GLYPH uni190A1922 COMPONENT 1 AT POS DX 580 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 129 GLYPH uni190A192A1922 COMPONENT 1 AT POS DX 580 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 130 GLYPH uni190B COMPONENT 1 AT POS DX 450 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 131 GLYPH uni190B1922 COMPONENT 1 AT POS DX 450 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 132 GLYPH uni190B192A1922 COMPONENT 1 AT POS DX 450 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 133 GLYPH uni190C COMPONENT 1 AT POS DX 656 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 134 GLYPH uni190C1922 COMPONENT 1 AT POS DX 656 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 135 GLYPH uni190C192A1922 COMPONENT 1 AT POS DX 656 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 136 GLYPH uni190D COMPONENT 1 AT POS DX 570 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 137 GLYPH uni190D1922 COMPONENT 1 AT POS DX 570 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 138 GLYPH uni190D192A1922 COMPONENT 1 AT POS DX 570 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 139 GLYPH uni190E COMPONENT 1 AT POS DX 530 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 140 GLYPH uni190E1922 COMPONENT 1 AT POS DX 530 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 141 GLYPH uni190192AE1922 COMPONENT 1 AT POS DX 530 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 142 GLYPH uni190F COMPONENT 1 AT POS DX 515 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 143 GLYPH uni190F1922 COMPONENT 1 AT POS DX 515 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 144 GLYPH uni190F192A1922 COMPONENT 1 AT POS DX 515 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 145 GLYPH uni1910 COMPONENT 1 AT POS DX 680 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 146 GLYPH uni19101922 COMPONENT 1 AT POS DX 680 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 147 GLYPH uni1910192A1922 COMPONENT 1 AT POS DX 680 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 148 GLYPH uni1911 COMPONENT 1 AT POS DX 720 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 149 GLYPH uni19111922 COMPONENT 1 AT POS DX 720 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 150 GLYPH uni1911192A1922 COMPONENT 1 AT POS DX 720 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 151 GLYPH uni1912 COMPONENT 1 AT POS DX 580 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 152 GLYPH uni19121922 COMPONENT 1 AT POS DX 580 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 153 GLYPH uni1912192A1922 COMPONENT 1 AT POS DX 580 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 154 GLYPH uni1913 COMPONENT 1 AT POS DX 600 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 155 GLYPH uni19131922 COMPONENT 1 AT POS DX 600 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 156 GLYPH uni1913192A1922 COMPONENT 1 AT POS DX 600 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 157 GLYPH uni1914 COMPONENT 1 AT POS DX 560 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 158 GLYPH uni19141922 COMPONENT 1 AT POS DX 560 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 159 GLYPH uni1914192A1922 COMPONENT 1 AT POS DX 560 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 160 GLYPH uni1915 COMPONENT 1 AT POS DX 480 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 161 GLYPH uni19151922 COMPONENT 1 AT POS DX 480 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 162 GLYPH uni1915192A1922 COMPONENT 1 AT POS DX 480 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 163 GLYPH uni1916 COMPONENT 1 AT POS DX 520 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 164 GLYPH uni19161922 COMPONENT 1 AT POS DX 520 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 165 GLYPH uni1916192A1922 COMPONENT 1 AT POS DX 520 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 166 GLYPH uni1917 COMPONENT 1 AT POS DX 585 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 167 GLYPH uni19171922 COMPONENT 1 AT POS DX 585 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 168 GLYPH uni1917192A1922 COMPONENT 1 AT POS DX 585 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 169 GLYPH uni1918 COMPONENT 1 AT POS DX 610 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 170 GLYPH uni19181922 COMPONENT 1 AT POS DX 610 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 171 GLYPH uni1918192A1922 COMPONENT 1 AT POS DX 610 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 172 GLYPH uni1919 COMPONENT 1 AT POS DX 520 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 173 GLYPH uni19191922 COMPONENT 1 AT POS DX 520 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 174 GLYPH uni1919192A1922 COMPONENT 1 AT POS DX 520 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 175 GLYPH uni191A COMPONENT 1 AT POS DX 440 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 176 GLYPH uni191A1922 COMPONENT 1 AT POS DX 440 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 177 GLYPH uni191A192A1922 COMPONENT 1 AT POS DX 440 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 178 GLYPH uni191B COMPONENT 1 AT POS DX 600 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 179 GLYPH uni191B1922 COMPONENT 1 AT POS DX 600 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 180 GLYPH uni191B192A1922 COMPONENT 1 AT POS DX 600 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 181 GLYPH uni191C COMPONENT 1 AT POS DX 600 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 182 GLYPH uni191C1922 COMPONENT 1 AT POS DX 600 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 183 GLYPH uni191C192A1922 COMPONENT 1 AT POS DX 600 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 217 GLYPH uni1940 COMPONENT 1 AT POS DX 490 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 218 GLYPH uni19401922 COMPONENT 1 AT POS DX 490 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 219 GLYPH uni1940192A1922 COMPONENT 1 AT POS DX 490 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "K" ON 98 GLYPH uni1900 COMPONENT 1 AT POS DX 525 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "MARK_eo" ON 196 GLYPH uni1927 COMPONENT 1 AT POS DX -300 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 100 GLYPH uni1901 COMPONENT 1 AT POS DX 755 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 103 GLYPH uni1902 COMPONENT 1 AT POS DX 943 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 106 GLYPH uni1903 COMPONENT 1 AT POS DX 790 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 109 GLYPH uni1904 COMPONENT 1 AT POS DX 780 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 112 GLYPH uni1905 COMPONENT 1 AT POS DX 790 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 115 GLYPH uni1906 COMPONENT 1 AT POS DX 878 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 118 GLYPH uni1907 COMPONENT 1 AT POS DX 825 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 121 GLYPH uni1908 COMPONENT 1 AT POS DX 968 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 124 GLYPH uni1909 COMPONENT 1 AT POS DX 660 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 127 GLYPH uni190A COMPONENT 1 AT POS DX 569 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 130 GLYPH uni190B COMPONENT 1 AT POS DX 690 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 133 GLYPH uni190C COMPONENT 1 AT POS DX 649 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 136 GLYPH uni190D COMPONENT 1 AT POS DX 682 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 139 GLYPH uni190E COMPONENT 1 AT POS DX 680 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 142 GLYPH uni190F COMPONENT 1 AT POS DX 778 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 145 GLYPH uni1910 COMPONENT 1 AT POS DX 920 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 148 GLYPH uni1911 COMPONENT 1 AT POS DX 894 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 151 GLYPH uni1912 COMPONENT 1 AT POS DX 782 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 154 GLYPH uni1913 COMPONENT 1 AT POS DX 982 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 157 GLYPH uni1914 COMPONENT 1 AT POS DX 917 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 160 GLYPH uni1915 COMPONENT 1 AT POS DX 730 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 163 GLYPH uni1916 COMPONENT 1 AT POS DX 767 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 166 GLYPH uni1917 COMPONENT 1 AT POS DX 937 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 169 GLYPH uni1918 COMPONENT 1 AT POS DX 862 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 172 GLYPH uni1919 COMPONENT 1 AT POS DX 670 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 175 GLYPH uni191A COMPONENT 1 AT POS DX 682 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 178 GLYPH uni191B COMPONENT 1 AT POS DX 921 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 181 GLYPH uni191C COMPONENT 1 AT POS DX 870 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 217 GLYPH uni1940 COMPONENT 1 AT POS DX 650 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "eo" ON 98 GLYPH uni1900 COMPONENT 1 AT POS DX 810 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "MARK_eo" ON 197 GLYPH uni1928 COMPONENT 1 AT POS DX -190 DY 1050 END_POS END_ANCHOR DEF_ANCHOR "MARK_VK" ON 211 GLYPH uni193A COMPONENT 1 LOCKED AT POS DX -260 DY 1250 END_POS END_ANCHOR DEF_ANCHOR "VK" ON 184 GLYPH uni1920 COMPONENT 1 AT POS DX -260 DY 1250 END_POS END_ANCHOR DEF_ANCHOR "VK" ON 196 GLYPH uni1927 COMPONENT 1 AT POS DX -300 DY 1250 END_POS END_ANCHOR DEF_ANCHOR "VK" ON 197 GLYPH uni1928 COMPONENT 1 AT POS DX -150 DY 1455 END_POS END_ANCHOR DEF_ANCHOR "MARK_U" ON 188 GLYPH uni1922 COMPONENT 1 LOCKED AT POS DX -150 DY -15 END_POS END_ANCHOR DEF_ANCHOR "U" ON 198 GLYPH uni1929 COMPONENT 1 AT POS DX -135 DY -40 END_POS END_ANCHOR DEF_ANCHOR "U" ON 200 GLYPH uni192B COMPONENT 1 AT POS DX -135 DY -40 END_POS END_ANCHOR GRID_PPEM 20 PRESENTATION_PPEM 72 PPOSITIONING_PPEM 144 CMAP_FORMAT 0 3 4 CMAP_FORMAT 1 0 6 CMAP_FORMAT 3 1 4 END \ No newline at end of file
diff --git a/Tests/voltLib/data/Nutso.fea b/Tests/voltLib/data/Nutso.fea
new file mode 100644
index 00000000..7a2c44bb
--- /dev/null
+++ b/Tests/voltLib/data/Nutso.fea
@@ -0,0 +1,328 @@
+# Glyph classes
+@dnom = [zero.dnom one.dnom two.dnom three.dnom four.dnom five.dnom six.dnom seven.dnom eight.dnom nine.dnom];
+@numerals = [zero one two three four five six seven eight nine];
+@numr = [zero.numr one.numr two.numr three.numr four.numr five.numr six.numr seven.numr eight.numr nine.numr];
+@slash = [slash fraction];
+
+# Mark classes
+markClass eight.numr <anchor 0 0> @INIT.1.10;
+markClass eight.numr <anchor 0 0> @INIT.2.10;
+markClass eight.numr <anchor 0 0> @INIT.3.10;
+markClass eight.numr <anchor 0 0> @INIT.4.10;
+markClass eight.numr <anchor 0 0> @INIT.5.10;
+markClass eight.numr <anchor 0 0> @INIT.6.10;
+markClass eight.numr <anchor 0 0> @INIT.7.10;
+markClass eight.numr <anchor 0 0> @INIT.8.10;
+markClass eight.numr <anchor 0 0> @INIT.9.10;
+markClass eight.numr <anchor 0 0> @NUMRNUMR;
+markClass five.numr <anchor 0 0> @INIT.1.10;
+markClass five.numr <anchor 0 0> @INIT.2.10;
+markClass five.numr <anchor 0 0> @INIT.3.10;
+markClass five.numr <anchor 0 0> @INIT.4.10;
+markClass five.numr <anchor 0 0> @INIT.5.10;
+markClass five.numr <anchor 0 0> @INIT.6.10;
+markClass five.numr <anchor 0 0> @INIT.7.10;
+markClass five.numr <anchor 0 0> @INIT.8.10;
+markClass five.numr <anchor 0 0> @INIT.9.10;
+markClass five.numr <anchor 0 0> @NUMRNUMR;
+markClass four.numr <anchor 0 0> @INIT.1.10;
+markClass four.numr <anchor 0 0> @INIT.2.10;
+markClass four.numr <anchor 0 0> @INIT.3.10;
+markClass four.numr <anchor 0 0> @INIT.4.10;
+markClass four.numr <anchor 0 0> @INIT.5.10;
+markClass four.numr <anchor 0 0> @INIT.6.10;
+markClass four.numr <anchor 0 0> @INIT.7.10;
+markClass four.numr <anchor 0 0> @INIT.8.10;
+markClass four.numr <anchor 0 0> @INIT.9.10;
+markClass four.numr <anchor 0 0> @NUMRNUMR;
+markClass nine.numr <anchor 0 0> @INIT.1.10;
+markClass nine.numr <anchor 0 0> @INIT.2.10;
+markClass nine.numr <anchor 0 0> @INIT.3.10;
+markClass nine.numr <anchor 0 0> @INIT.4.10;
+markClass nine.numr <anchor 0 0> @INIT.5.10;
+markClass nine.numr <anchor 0 0> @INIT.6.10;
+markClass nine.numr <anchor 0 0> @INIT.7.10;
+markClass nine.numr <anchor 0 0> @INIT.8.10;
+markClass nine.numr <anchor 0 0> @INIT.9.10;
+markClass nine.numr <anchor 0 0> @NUMRNUMR;
+markClass one.numr <anchor 0 0> @INIT.1.10;
+markClass one.numr <anchor 0 0> @INIT.2.10;
+markClass one.numr <anchor 0 0> @INIT.3.10;
+markClass one.numr <anchor 0 0> @INIT.4.10;
+markClass one.numr <anchor 0 0> @INIT.5.10;
+markClass one.numr <anchor 0 0> @INIT.6.10;
+markClass one.numr <anchor 0 0> @INIT.7.10;
+markClass one.numr <anchor 0 0> @INIT.8.10;
+markClass one.numr <anchor 0 0> @INIT.9.10;
+markClass one.numr <anchor 0 0> @NUMRNUMR;
+markClass seven.numr <anchor 0 0> @INIT.1.10;
+markClass seven.numr <anchor 0 0> @INIT.2.10;
+markClass seven.numr <anchor 0 0> @INIT.3.10;
+markClass seven.numr <anchor 0 0> @INIT.4.10;
+markClass seven.numr <anchor 0 0> @INIT.5.10;
+markClass seven.numr <anchor 0 0> @INIT.6.10;
+markClass seven.numr <anchor 0 0> @INIT.7.10;
+markClass seven.numr <anchor 0 0> @INIT.8.10;
+markClass seven.numr <anchor 0 0> @INIT.9.10;
+markClass seven.numr <anchor 0 0> @NUMRNUMR;
+markClass six.numr <anchor 0 0> @INIT.1.10;
+markClass six.numr <anchor 0 0> @INIT.2.10;
+markClass six.numr <anchor 0 0> @INIT.3.10;
+markClass six.numr <anchor 0 0> @INIT.4.10;
+markClass six.numr <anchor 0 0> @INIT.5.10;
+markClass six.numr <anchor 0 0> @INIT.6.10;
+markClass six.numr <anchor 0 0> @INIT.7.10;
+markClass six.numr <anchor 0 0> @INIT.8.10;
+markClass six.numr <anchor 0 0> @INIT.9.10;
+markClass six.numr <anchor 0 0> @NUMRNUMR;
+markClass three.numr <anchor 0 0> @INIT.1.10;
+markClass three.numr <anchor 0 0> @INIT.2.10;
+markClass three.numr <anchor 0 0> @INIT.3.10;
+markClass three.numr <anchor 0 0> @INIT.4.10;
+markClass three.numr <anchor 0 0> @INIT.5.10;
+markClass three.numr <anchor 0 0> @INIT.6.10;
+markClass three.numr <anchor 0 0> @INIT.7.10;
+markClass three.numr <anchor 0 0> @INIT.8.10;
+markClass three.numr <anchor 0 0> @INIT.9.10;
+markClass three.numr <anchor 0 0> @NUMRNUMR;
+markClass two.numr <anchor 0 0> @INIT.1.10;
+markClass two.numr <anchor 0 0> @INIT.2.10;
+markClass two.numr <anchor 0 0> @INIT.3.10;
+markClass two.numr <anchor 0 0> @INIT.4.10;
+markClass two.numr <anchor 0 0> @INIT.5.10;
+markClass two.numr <anchor 0 0> @INIT.6.10;
+markClass two.numr <anchor 0 0> @INIT.7.10;
+markClass two.numr <anchor 0 0> @INIT.8.10;
+markClass two.numr <anchor 0 0> @INIT.9.10;
+markClass two.numr <anchor 0 0> @NUMRNUMR;
+markClass zero.numr <anchor 0 0> @INIT.1.10;
+markClass zero.numr <anchor 0 0> @INIT.2.10;
+markClass zero.numr <anchor 0 0> @INIT.3.10;
+markClass zero.numr <anchor 0 0> @INIT.4.10;
+markClass zero.numr <anchor 0 0> @INIT.5.10;
+markClass zero.numr <anchor 0 0> @INIT.6.10;
+markClass zero.numr <anchor 0 0> @INIT.7.10;
+markClass zero.numr <anchor 0 0> @INIT.8.10;
+markClass zero.numr <anchor 0 0> @INIT.9.10;
+markClass zero.numr <anchor 0 0> @NUMRNUMR;
+
+# Lookups
+lookup frac.numr {
+ sub @numerals by @numr;
+} frac.numr;
+
+lookup frac.dnom {
+ sub [@slash @dnom] @numr' by @dnom;
+} frac.dnom;
+
+lookup frac.noslash {
+ sub @numr slash by @numr;
+ sub @numr fraction by @numr;
+} frac.noslash;
+
+lookup frac.fracinit {
+ ignore sub @numr @numr';
+ sub @numr' by fracinit @numr;
+} frac.fracinit;
+
+lookup kern.numeral_to_fraction {
+ enum pos @numerals fracinit 140;
+ pos @dnom @numerals 140;
+} kern.numeral_to_fraction;
+
+lookup fracmark.init_1.10_target {
+ pos base fracinit
+ <anchor 3150 0> mark @INIT.1.10;
+} fracmark.init_1.10_target;
+
+lookup fracmark.init_2.10_target {
+ pos base fracinit
+ <anchor 2800 0> mark @INIT.2.10;
+} fracmark.init_2.10_target;
+
+lookup fracmark.init_3.10_target {
+ pos base fracinit
+ <anchor 2450 0> mark @INIT.3.10;
+} fracmark.init_3.10_target;
+
+lookup fracmark.init_4.10_target {
+ pos base fracinit
+ <anchor 2100 0> mark @INIT.4.10;
+} fracmark.init_4.10_target;
+
+lookup fracmark.init_5.10_target {
+ pos base fracinit
+ <anchor 1750 0> mark @INIT.5.10;
+} fracmark.init_5.10_target;
+
+lookup fracmark.init_6.10_target {
+ pos base fracinit
+ <anchor 1400 0> mark @INIT.6.10;
+} fracmark.init_6.10_target;
+
+lookup fracmark.init_7.10_target {
+ pos base fracinit
+ <anchor 1050 0> mark @INIT.7.10;
+} fracmark.init_7.10_target;
+
+lookup fracmark.init_8.10_target {
+ pos base fracinit
+ <anchor 700 0> mark @INIT.8.10;
+} fracmark.init_8.10_target;
+
+lookup fracmark.init_9.10_target {
+ pos base fracinit
+ <anchor 350 0> mark @INIT.9.10;
+} fracmark.init_9.10_target;
+
+lookup fracmark.init {
+ # fracmark.init\1.10
+ pos [@numr]' lookup fracmark.init_1.10_target @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ subtable;
+ # fracmark.init\2.10
+ pos [@numr]' lookup fracmark.init_2.10_target @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_2.10_target @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ subtable;
+ # fracmark.init\3.10
+ pos [@numr]' lookup fracmark.init_3.10_target @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_3.10_target @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_3.10_target @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ subtable;
+ # fracmark.init\4.10
+ pos [@numr]' lookup fracmark.init_4.10_target @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_4.10_target @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_4.10_target @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_4.10_target @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ subtable;
+ # fracmark.init\5.10
+ pos [@numr]' lookup fracmark.init_5.10_target @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_5.10_target @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_5.10_target @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_5.10_target @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_5.10_target @dnom @dnom @dnom @dnom @dnom @dnom;
+ subtable;
+ # fracmark.init\6.10
+ pos [@numr]' lookup fracmark.init_6.10_target @numr @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_6.10_target @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_6.10_target @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_6.10_target @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_6.10_target @numr @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_6.10_target @dnom @dnom @dnom @dnom @dnom;
+ subtable;
+ # fracmark.init\7.10
+ pos [@numr]' lookup fracmark.init_7.10_target @numr @numr @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_7.10_target @numr @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_7.10_target @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_7.10_target @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_7.10_target @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_7.10_target @numr @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_7.10_target @dnom @dnom @dnom @dnom;
+ subtable;
+ # fracmark.init\8.10
+ pos [@numr]' lookup fracmark.init_8.10_target @numr @numr @numr @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_8.10_target @numr @numr @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_8.10_target @numr @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_8.10_target @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_8.10_target @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_8.10_target @numr @numr @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_8.10_target @numr @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_8.10_target @dnom @dnom @dnom;
+ subtable;
+ # fracmark.init\9.10
+ pos [@numr]' lookup fracmark.init_9.10_target @numr @numr @numr @numr @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_9.10_target @numr @numr @numr @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_9.10_target @numr @numr @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_9.10_target @numr @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_9.10_target @numr @numr @numr @numr @dnom @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_9.10_target @numr @numr @numr @dnom @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_9.10_target @numr @numr @dnom @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_9.10_target @numr @dnom @dnom @dnom;
+ pos [@numr]' lookup fracmark.init_9.10_target @dnom @dnom;
+} fracmark.init;
+
+lookup fracmkmk.numrspacing {
+ pos mark zero.numr
+ <anchor 700 0> mark @NUMRNUMR;
+ pos mark one.numr
+ <anchor 700 0> mark @NUMRNUMR;
+ pos mark two.numr
+ <anchor 700 0> mark @NUMRNUMR;
+ pos mark three.numr
+ <anchor 700 0> mark @NUMRNUMR;
+ pos mark four.numr
+ <anchor 700 0> mark @NUMRNUMR;
+ pos mark five.numr
+ <anchor 700 0> mark @NUMRNUMR;
+ pos mark six.numr
+ <anchor 700 0> mark @NUMRNUMR;
+ pos mark seven.numr
+ <anchor 700 0> mark @NUMRNUMR;
+ pos mark eight.numr
+ <anchor 700 0> mark @NUMRNUMR;
+ pos mark nine.numr
+ <anchor 700 0> mark @NUMRNUMR;
+} fracmkmk.numrspacing;
+
+# Features
+feature afrc {
+ script DFLT;
+ language dflt;
+ lookup frac.numr;
+ lookup frac.dnom;
+ lookup frac.noslash;
+ lookup frac.fracinit;
+ script latn;
+ language dflt;
+ lookup frac.numr;
+ lookup frac.dnom;
+ lookup frac.noslash;
+ lookup frac.fracinit;
+} afrc;
+
+feature frac {
+ script DFLT;
+ language dflt;
+ lookup frac.numr;
+ lookup frac.dnom;
+ lookup frac.noslash;
+ lookup frac.fracinit;
+ script latn;
+ language dflt;
+ lookup frac.numr;
+ lookup frac.dnom;
+ lookup frac.noslash;
+ lookup frac.fracinit;
+} frac;
+
+feature kern {
+ script DFLT;
+ language dflt;
+ lookup kern.numeral_to_fraction;
+ script latn;
+ language dflt;
+ lookup kern.numeral_to_fraction;
+} kern;
+
+feature mark {
+ script DFLT;
+ language dflt;
+ lookup fracmark.init;
+ script latn;
+ language dflt;
+ lookup fracmark.init;
+} mark;
+
+feature mkmk {
+ script DFLT;
+ language dflt;
+ lookup fracmkmk.numrspacing;
+ script latn;
+ language dflt;
+ lookup fracmkmk.numrspacing;
+} mkmk;
+
+@GDEF_base = [glyph0 \NULL CR space zero one two three four five six seven eight nine slash fraction fracinit zero.dnom one.dnom two.dnom three.dnom four.dnom five.dnom six.dnom seven.dnom eight.dnom nine.dnom];
+@GDEF_mark = [zero.numr one.numr two.numr three.numr four.numr five.numr six.numr seven.numr eight.numr nine.numr];
+table GDEF {
+ GlyphClassDef @GDEF_base, , @GDEF_mark, ;
+} GDEF;
diff --git a/Tests/voltLib/data/Nutso.ttf b/Tests/voltLib/data/Nutso.ttf
new file mode 100644
index 00000000..5efec568
--- /dev/null
+++ b/Tests/voltLib/data/Nutso.ttf
Binary files differ
diff --git a/Tests/voltLib/data/Nutso.vtp b/Tests/voltLib/data/Nutso.vtp
new file mode 100644
index 00000000..9572a002
--- /dev/null
+++ b/Tests/voltLib/data/Nutso.vtp
@@ -0,0 +1 @@
+ DEF_GLYPH "glyph0" ID 0 TYPE BASE END_GLYPH DEF_GLYPH "NULL" ID 1 UNICODE 0 TYPE BASE END_GLYPH DEF_GLYPH "CR" ID 2 UNICODE 13 TYPE BASE END_GLYPH DEF_GLYPH "space" ID 3 UNICODE 32 TYPE BASE END_GLYPH DEF_GLYPH "zero" ID 4 UNICODE 48 TYPE BASE END_GLYPH DEF_GLYPH "one" ID 5 UNICODE 49 TYPE BASE END_GLYPH DEF_GLYPH "two" ID 6 UNICODE 50 TYPE BASE END_GLYPH DEF_GLYPH "three" ID 7 UNICODE 51 TYPE BASE END_GLYPH DEF_GLYPH "four" ID 8 UNICODE 52 TYPE BASE END_GLYPH DEF_GLYPH "five" ID 9 UNICODE 53 TYPE BASE END_GLYPH DEF_GLYPH "six" ID 10 UNICODE 54 TYPE BASE END_GLYPH DEF_GLYPH "seven" ID 11 UNICODE 55 TYPE BASE END_GLYPH DEF_GLYPH "eight" ID 12 UNICODE 56 TYPE BASE END_GLYPH DEF_GLYPH "nine" ID 13 UNICODE 57 TYPE BASE END_GLYPH DEF_GLYPH "slash" ID 14 UNICODE 47 TYPE BASE END_GLYPH DEF_GLYPH "fraction" ID 15 UNICODE 8260 TYPE BASE END_GLYPH DEF_GLYPH "fracinit" ID 16 TYPE BASE END_GLYPH DEF_GLYPH "zero.numr" ID 17 TYPE MARK END_GLYPH DEF_GLYPH "one.numr" ID 18 TYPE MARK END_GLYPH DEF_GLYPH "two.numr" ID 19 TYPE MARK END_GLYPH DEF_GLYPH "three.numr" ID 20 TYPE MARK END_GLYPH DEF_GLYPH "four.numr" ID 21 TYPE MARK END_GLYPH DEF_GLYPH "five.numr" ID 22 TYPE MARK END_GLYPH DEF_GLYPH "six.numr" ID 23 TYPE MARK END_GLYPH DEF_GLYPH "seven.numr" ID 24 TYPE MARK END_GLYPH DEF_GLYPH "eight.numr" ID 25 TYPE MARK END_GLYPH DEF_GLYPH "nine.numr" ID 26 TYPE MARK END_GLYPH DEF_GLYPH "zero.dnom" ID 27 TYPE BASE END_GLYPH DEF_GLYPH "one.dnom" ID 28 TYPE BASE END_GLYPH DEF_GLYPH "two.dnom" ID 29 TYPE BASE END_GLYPH DEF_GLYPH "three.dnom" ID 30 TYPE BASE END_GLYPH DEF_GLYPH "four.dnom" ID 31 TYPE BASE END_GLYPH DEF_GLYPH "five.dnom" ID 32 TYPE BASE END_GLYPH DEF_GLYPH "six.dnom" ID 33 TYPE BASE END_GLYPH DEF_GLYPH "seven.dnom" ID 34 TYPE BASE END_GLYPH DEF_GLYPH "eight.dnom" ID 35 TYPE BASE END_GLYPH DEF_GLYPH "nine.dnom" ID 36 TYPE BASE END_GLYPH DEF_SCRIPT NAME "Default" TAG "DFLT" DEF_LANGSYS NAME "Default" TAG "dflt" DEF_FEATURE NAME "Alternative Fractions" TAG "afrc" LOOKUP "frac.numr" LOOKUP "frac.dnom" LOOKUP "frac.noslash" LOOKUP "frac.fracinit" END_FEATURE DEF_FEATURE NAME "Fractions" TAG "frac" LOOKUP "frac.numr" LOOKUP "frac.dnom" LOOKUP "frac.noslash" LOOKUP "frac.fracinit" END_FEATURE DEF_FEATURE NAME "Kerning" TAG "kern" LOOKUP "kern.numeral-to-fraction" END_FEATURE DEF_FEATURE NAME "Mark Positioning" TAG "mark" LOOKUP "fracmark.init\1.10" LOOKUP "fracmark.init\2.10" LOOKUP "fracmark.init\3.10" LOOKUP "fracmark.init\4.10" LOOKUP "fracmark.init\5.10" LOOKUP "fracmark.init\6.10" LOOKUP "fracmark.init\7.10" LOOKUP "fracmark.init\8.10" LOOKUP "fracmark.init\9.10" END_FEATURE DEF_FEATURE NAME "Mark to Mark Positioning" TAG "mkmk" LOOKUP "fracmkmk.numrspacing" END_FEATURE END_LANGSYS END_SCRIPT DEF_SCRIPT NAME "Latin" TAG "latn" DEF_LANGSYS NAME "Default" TAG "dflt" DEF_FEATURE NAME "Alternative Fractions" TAG "afrc" LOOKUP "frac.numr" LOOKUP "frac.dnom" LOOKUP "frac.noslash" LOOKUP "frac.fracinit" END_FEATURE DEF_FEATURE NAME "Fractions" TAG "frac" LOOKUP "frac.numr" LOOKUP "frac.dnom" LOOKUP "frac.noslash" LOOKUP "frac.fracinit" END_FEATURE DEF_FEATURE NAME "Kerning" TAG "kern" LOOKUP "kern.numeral-to-fraction" END_FEATURE DEF_FEATURE NAME "Mark Positioning" TAG "mark" LOOKUP "fracmark.init\1.10" LOOKUP "fracmark.init\2.10" LOOKUP "fracmark.init\3.10" LOOKUP "fracmark.init\4.10" LOOKUP "fracmark.init\5.10" LOOKUP "fracmark.init\6.10" LOOKUP "fracmark.init\7.10" LOOKUP "fracmark.init\8.10" LOOKUP "fracmark.init\9.10" END_FEATURE DEF_FEATURE NAME "Mark to Mark Positioning" TAG "mkmk" LOOKUP "fracmkmk.numrspacing" END_FEATURE END_LANGSYS END_SCRIPT DEF_GROUP "dnom" ENUM GLYPH "zero.dnom" GLYPH "one.dnom" GLYPH "two.dnom" GLYPH "three.dnom" GLYPH "four.dnom" GLYPH "five.dnom" GLYPH "six.dnom" GLYPH "seven.dnom" GLYPH "eight.dnom" GLYPH "nine.dnom" END_ENUM END_GROUP DEF_GROUP "numerals" ENUM GLYPH "zero" GLYPH "one" GLYPH "two" GLYPH "three" GLYPH "four" GLYPH "five" GLYPH "six" GLYPH "seven" GLYPH "eight" GLYPH "nine" END_ENUM END_GROUP DEF_GROUP "numr" ENUM GLYPH "zero.numr" GLYPH "one.numr" GLYPH "two.numr" GLYPH "three.numr" GLYPH "four.numr" GLYPH "five.numr" GLYPH "six.numr" GLYPH "seven.numr" GLYPH "eight.numr" GLYPH "nine.numr" END_ENUM END_GROUP DEF_GROUP "slash" ENUM GLYPH "slash" GLYPH "fraction" END_ENUM END_GROUP DEF_LOOKUP "frac.numr" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT END_CONTEXT AS_SUBSTITUTION SUB GROUP "numerals" WITH GROUP "numr" END_SUB END_SUBSTITUTION DEF_LOOKUP "frac.dnom" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT LEFT ENUM GROUP "slash" GROUP "dnom" END_ENUM END_CONTEXT AS_SUBSTITUTION SUB GROUP "numr" WITH GROUP "dnom" END_SUB END_SUBSTITUTION DEF_LOOKUP "frac.noslash" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT END_CONTEXT AS_SUBSTITUTION SUB GROUP "numr" GLYPH "slash" WITH GROUP "numr" END_SUB SUB GROUP "numr" GLYPH "fraction" WITH GROUP "numr" END_SUB END_SUBSTITUTION DEF_LOOKUP "frac.fracinit" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR EXCEPT_CONTEXT LEFT GROUP "numr" END_CONTEXT AS_SUBSTITUTION SUB GROUP "numr" WITH GLYPH "fracinit" GROUP "numr" END_SUB END_SUBSTITUTION DEF_LOOKUP "kern.numeral-to-fraction" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT END_CONTEXT AS_POSITION ADJUST_PAIR FIRST GROUP "numerals" FIRST GROUP "dnom" SECOND GROUP "numerals" SECOND GLYPH "fracinit" 1 2 BY POS ADV 140 END_POS POS END_POS 2 1 BY POS ADV 140 END_POS POS END_POS END_ADJUST END_POSITION DEF_LOOKUP "fracmark.init\1.10" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT AS_POSITION ATTACH GLYPH "fracinit" TO GROUP "numr" AT ANCHOR "INIT.1.10" END_ATTACH END_POSITION DEF_LOOKUP "fracmark.init\2.10" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT AS_POSITION ATTACH GLYPH "fracinit" TO GROUP "numr" AT ANCHOR "INIT.2.10" END_ATTACH END_POSITION DEF_LOOKUP "fracmark.init\3.10" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT AS_POSITION ATTACH GLYPH "fracinit" TO GROUP "numr" AT ANCHOR "INIT.3.10" END_ATTACH END_POSITION DEF_LOOKUP "fracmark.init\4.10" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT AS_POSITION ATTACH GLYPH "fracinit" TO GROUP "numr" AT ANCHOR "INIT.4.10" END_ATTACH END_POSITION DEF_LOOKUP "fracmark.init\5.10" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT AS_POSITION ATTACH GLYPH "fracinit" TO GROUP "numr" AT ANCHOR "INIT.5.10" END_ATTACH END_POSITION DEF_LOOKUP "fracmark.init\6.10" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT AS_POSITION ATTACH GLYPH "fracinit" TO GROUP "numr" AT ANCHOR "INIT.6.10" END_ATTACH END_POSITION DEF_LOOKUP "fracmark.init\7.10" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT AS_POSITION ATTACH GLYPH "fracinit" TO GROUP "numr" AT ANCHOR "INIT.7.10" END_ATTACH END_POSITION DEF_LOOKUP "fracmark.init\8.10" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT AS_POSITION ATTACH GLYPH "fracinit" TO GROUP "numr" AT ANCHOR "INIT.8.10" END_ATTACH END_POSITION DEF_LOOKUP "fracmark.init\9.10" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "numr" RIGHT GROUP "dnom" RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT IN_CONTEXT RIGHT GROUP "dnom" RIGHT GROUP "dnom" END_CONTEXT AS_POSITION ATTACH GLYPH "fracinit" TO GROUP "numr" AT ANCHOR "INIT.9.10" END_ATTACH END_POSITION DEF_LOOKUP "fracmkmk.numrspacing" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR IN_CONTEXT END_CONTEXT AS_POSITION ATTACH GROUP "numr" TO GROUP "numr" AT ANCHOR "NUMRNUMR" END_ATTACH END_POSITION DEF_ANCHOR "NUMRNUMR" ON 17 GLYPH zero.numr COMPONENT 1 AT POS DX 700 END_POS END_ANCHOR DEF_ANCHOR "MARK_NUMRNUMR" ON 17 GLYPH zero.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_NUMRNUMR" ON 18 GLYPH one.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_NUMRNUMR" ON 19 GLYPH two.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_NUMRNUMR" ON 20 GLYPH three.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_NUMRNUMR" ON 21 GLYPH four.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_NUMRNUMR" ON 22 GLYPH five.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_NUMRNUMR" ON 23 GLYPH six.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_NUMRNUMR" ON 24 GLYPH seven.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_NUMRNUMR" ON 25 GLYPH eight.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_NUMRNUMR" ON 26 GLYPH nine.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "NUMRNUMR" ON 18 GLYPH one.numr COMPONENT 1 AT POS DX 700 END_POS END_ANCHOR DEF_ANCHOR "NUMRNUMR" ON 19 GLYPH two.numr COMPONENT 1 AT POS DX 700 END_POS END_ANCHOR DEF_ANCHOR "NUMRNUMR" ON 20 GLYPH three.numr COMPONENT 1 AT POS DX 700 END_POS END_ANCHOR DEF_ANCHOR "NUMRNUMR" ON 21 GLYPH four.numr COMPONENT 1 AT POS DX 700 END_POS END_ANCHOR DEF_ANCHOR "NUMRNUMR" ON 22 GLYPH five.numr COMPONENT 1 AT POS DX 700 END_POS END_ANCHOR DEF_ANCHOR "NUMRNUMR" ON 23 GLYPH six.numr COMPONENT 1 AT POS DX 700 END_POS END_ANCHOR DEF_ANCHOR "NUMRNUMR" ON 24 GLYPH seven.numr COMPONENT 1 AT POS DX 700 END_POS END_ANCHOR DEF_ANCHOR "NUMRNUMR" ON 25 GLYPH eight.numr COMPONENT 1 AT POS DX 700 END_POS END_ANCHOR DEF_ANCHOR "NUMRNUMR" ON 26 GLYPH nine.numr COMPONENT 1 AT POS DX 700 END_POS END_ANCHOR DEF_ANCHOR "INIT.1.10" ON 16 GLYPH fracinit COMPONENT 1 AT POS DX 3150 END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.1.10" ON 17 GLYPH zero.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.1.10" ON 18 GLYPH one.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.1.10" ON 19 GLYPH two.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.1.10" ON 20 GLYPH three.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.1.10" ON 21 GLYPH four.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.1.10" ON 22 GLYPH five.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.1.10" ON 23 GLYPH six.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.1.10" ON 24 GLYPH seven.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.1.10" ON 25 GLYPH eight.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.1.10" ON 26 GLYPH nine.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "INIT.2.10" ON 16 GLYPH fracinit COMPONENT 1 AT POS DX 2800 END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.2.10" ON 17 GLYPH zero.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.2.10" ON 18 GLYPH one.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.2.10" ON 19 GLYPH two.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.2.10" ON 20 GLYPH three.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.2.10" ON 21 GLYPH four.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.2.10" ON 22 GLYPH five.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.2.10" ON 23 GLYPH six.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.2.10" ON 24 GLYPH seven.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.2.10" ON 25 GLYPH eight.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.2.10" ON 26 GLYPH nine.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "INIT.3.10" ON 16 GLYPH fracinit COMPONENT 1 AT POS DX 2450 END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.3.10" ON 17 GLYPH zero.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.3.10" ON 18 GLYPH one.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.3.10" ON 19 GLYPH two.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.3.10" ON 20 GLYPH three.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.3.10" ON 21 GLYPH four.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.3.10" ON 22 GLYPH five.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.3.10" ON 23 GLYPH six.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.3.10" ON 24 GLYPH seven.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.3.10" ON 25 GLYPH eight.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.3.10" ON 26 GLYPH nine.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "INIT.4.10" ON 16 GLYPH fracinit COMPONENT 1 AT POS DX 2100 END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.4.10" ON 17 GLYPH zero.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.4.10" ON 18 GLYPH one.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.4.10" ON 19 GLYPH two.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.4.10" ON 20 GLYPH three.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.4.10" ON 21 GLYPH four.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.4.10" ON 22 GLYPH five.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.4.10" ON 23 GLYPH six.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.4.10" ON 24 GLYPH seven.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.4.10" ON 25 GLYPH eight.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.4.10" ON 26 GLYPH nine.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "INIT.5.10" ON 16 GLYPH fracinit COMPONENT 1 AT POS DX 1750 END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.5.10" ON 17 GLYPH zero.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.5.10" ON 18 GLYPH one.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.5.10" ON 19 GLYPH two.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.5.10" ON 20 GLYPH three.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.5.10" ON 21 GLYPH four.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.5.10" ON 22 GLYPH five.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.5.10" ON 23 GLYPH six.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.5.10" ON 24 GLYPH seven.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.5.10" ON 25 GLYPH eight.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.5.10" ON 26 GLYPH nine.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "INIT.6.10" ON 16 GLYPH fracinit COMPONENT 1 AT POS DX 1400 END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.6.10" ON 17 GLYPH zero.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.6.10" ON 18 GLYPH one.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.6.10" ON 19 GLYPH two.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.6.10" ON 20 GLYPH three.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.6.10" ON 21 GLYPH four.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.6.10" ON 22 GLYPH five.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.6.10" ON 23 GLYPH six.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.6.10" ON 24 GLYPH seven.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.6.10" ON 25 GLYPH eight.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.6.10" ON 26 GLYPH nine.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "INIT.7.10" ON 16 GLYPH fracinit COMPONENT 1 AT POS DX 1050 END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.7.10" ON 17 GLYPH zero.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.7.10" ON 18 GLYPH one.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.7.10" ON 19 GLYPH two.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.7.10" ON 20 GLYPH three.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.7.10" ON 21 GLYPH four.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.7.10" ON 22 GLYPH five.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.7.10" ON 23 GLYPH six.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.7.10" ON 24 GLYPH seven.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.7.10" ON 25 GLYPH eight.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.7.10" ON 26 GLYPH nine.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "INIT.8.10" ON 16 GLYPH fracinit COMPONENT 1 AT POS DX 700 END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.8.10" ON 17 GLYPH zero.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.8.10" ON 18 GLYPH one.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.8.10" ON 19 GLYPH two.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.8.10" ON 20 GLYPH three.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.8.10" ON 21 GLYPH four.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.8.10" ON 22 GLYPH five.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.8.10" ON 23 GLYPH six.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.8.10" ON 24 GLYPH seven.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.8.10" ON 25 GLYPH eight.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.8.10" ON 26 GLYPH nine.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "INIT.9.10" ON 16 GLYPH fracinit COMPONENT 1 AT POS DX 350 END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.9.10" ON 17 GLYPH zero.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.9.10" ON 18 GLYPH one.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.9.10" ON 19 GLYPH two.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.9.10" ON 20 GLYPH three.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.9.10" ON 21 GLYPH four.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.9.10" ON 22 GLYPH five.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.9.10" ON 23 GLYPH six.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.9.10" ON 24 GLYPH seven.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.9.10" ON 25 GLYPH eight.numr COMPONENT 1 AT POS END_POS END_ANCHOR DEF_ANCHOR "MARK_INIT.9.10" ON 26 GLYPH nine.numr COMPONENT 1 AT POS END_POS END_ANCHOR GRID_PPEM 20 PRESENTATION_PPEM 144 PPOSITIONING_PPEM 205 CMAP_FORMAT 0 3 4 CMAP_FORMAT 1 0 6 CMAP_FORMAT 3 1 4 END \ No newline at end of file
diff --git a/Tests/voltLib/lexer_test.py b/Tests/voltLib/lexer_test.py
index 2145d079..a8e849b1 100644
--- a/Tests/voltLib/lexer_test.py
+++ b/Tests/voltLib/lexer_test.py
@@ -16,18 +16,21 @@ class LexerTest(unittest.TestCase):
self.assertEqual(lex("\t"), [])
def test_string(self):
- self.assertEqual(lex('"foo" "bar"'),
- [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")])
+ self.assertEqual(
+ lex('"foo" "bar"'), [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")]
+ )
self.assertRaises(VoltLibError, lambda: lex('"foo\n bar"'))
def test_name(self):
- self.assertEqual(lex('DEF_FOO bar.alt1'),
- [(Lexer.NAME, "DEF_FOO"), (Lexer.NAME, "bar.alt1")])
+ self.assertEqual(
+ lex("DEF_FOO bar.alt1"), [(Lexer.NAME, "DEF_FOO"), (Lexer.NAME, "bar.alt1")]
+ )
def test_number(self):
- self.assertEqual(lex("123 -456"),
- [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)])
+ self.assertEqual(lex("123 -456"), [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)])
+
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/voltLib/parser_test.py b/Tests/voltLib/parser_test.py
index 0e0191fc..abc02d3b 100644
--- a/Tests/voltLib/parser_test.py
+++ b/Tests/voltLib/parser_test.py
@@ -24,98 +24,175 @@ class ParserTest(unittest.TestCase):
[def_glyph] = self.parse(
'DEF_GLYPH ".notdef" ID 0 TYPE BASE END_GLYPH'
).statements
- self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
- def_glyph.type, def_glyph.components),
- (".notdef", 0, None, "BASE", None))
+ self.assertEqual(
+ (
+ def_glyph.name,
+ def_glyph.id,
+ def_glyph.unicode,
+ def_glyph.type,
+ def_glyph.components,
+ ),
+ (".notdef", 0, None, "BASE", None),
+ )
def test_def_glyph_base_with_unicode(self):
[def_glyph] = self.parse(
'DEF_GLYPH "space" ID 3 UNICODE 32 TYPE BASE END_GLYPH'
).statements
- self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
- def_glyph.type, def_glyph.components),
- ("space", 3, [0x0020], "BASE", None))
+ self.assertEqual(
+ (
+ def_glyph.name,
+ def_glyph.id,
+ def_glyph.unicode,
+ def_glyph.type,
+ def_glyph.components,
+ ),
+ ("space", 3, [0x0020], "BASE", None),
+ )
def test_def_glyph_base_with_unicodevalues(self):
[def_glyph] = self.parse_(
- 'DEF_GLYPH "CR" ID 2 UNICODEVALUES "U+0009" '
- 'TYPE BASE END_GLYPH'
+ 'DEF_GLYPH "CR" ID 2 UNICODEVALUES "U+0009" ' "TYPE BASE END_GLYPH"
).statements
- self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
- def_glyph.type, def_glyph.components),
- ("CR", 2, [0x0009], "BASE", None))
+ self.assertEqual(
+ (
+ def_glyph.name,
+ def_glyph.id,
+ def_glyph.unicode,
+ def_glyph.type,
+ def_glyph.components,
+ ),
+ ("CR", 2, [0x0009], "BASE", None),
+ )
def test_def_glyph_base_with_mult_unicodevalues(self):
[def_glyph] = self.parse(
- 'DEF_GLYPH "CR" ID 2 UNICODEVALUES "U+0009,U+000D" '
- 'TYPE BASE END_GLYPH'
+ 'DEF_GLYPH "CR" ID 2 UNICODEVALUES "U+0009,U+000D" ' "TYPE BASE END_GLYPH"
).statements
- self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
- def_glyph.type, def_glyph.components),
- ("CR", 2, [0x0009, 0x000D], "BASE", None))
+ self.assertEqual(
+ (
+ def_glyph.name,
+ def_glyph.id,
+ def_glyph.unicode,
+ def_glyph.type,
+ def_glyph.components,
+ ),
+ ("CR", 2, [0x0009, 0x000D], "BASE", None),
+ )
def test_def_glyph_base_with_empty_unicodevalues(self):
[def_glyph] = self.parse_(
- 'DEF_GLYPH "i.locl" ID 269 UNICODEVALUES "" '
- 'TYPE BASE END_GLYPH'
+ 'DEF_GLYPH "i.locl" ID 269 UNICODEVALUES "" ' "TYPE BASE END_GLYPH"
).statements
- self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
- def_glyph.type, def_glyph.components),
- ("i.locl", 269, None, "BASE", None))
+ self.assertEqual(
+ (
+ def_glyph.name,
+ def_glyph.id,
+ def_glyph.unicode,
+ def_glyph.type,
+ def_glyph.components,
+ ),
+ ("i.locl", 269, None, "BASE", None),
+ )
def test_def_glyph_base_2_components(self):
[def_glyph] = self.parse(
'DEF_GLYPH "glyphBase" ID 320 TYPE BASE COMPONENTS 2 END_GLYPH'
).statements
- self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
- def_glyph.type, def_glyph.components),
- ("glyphBase", 320, None, "BASE", 2))
+ self.assertEqual(
+ (
+ def_glyph.name,
+ def_glyph.id,
+ def_glyph.unicode,
+ def_glyph.type,
+ def_glyph.components,
+ ),
+ ("glyphBase", 320, None, "BASE", 2),
+ )
def test_def_glyph_ligature_2_components(self):
[def_glyph] = self.parse(
'DEF_GLYPH "f_f" ID 320 TYPE LIGATURE COMPONENTS 2 END_GLYPH'
).statements
- self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
- def_glyph.type, def_glyph.components),
- ("f_f", 320, None, "LIGATURE", 2))
+ self.assertEqual(
+ (
+ def_glyph.name,
+ def_glyph.id,
+ def_glyph.unicode,
+ def_glyph.type,
+ def_glyph.components,
+ ),
+ ("f_f", 320, None, "LIGATURE", 2),
+ )
def test_def_glyph_mark(self):
[def_glyph] = self.parse(
'DEF_GLYPH "brevecomb" ID 320 TYPE MARK END_GLYPH'
).statements
- self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
- def_glyph.type, def_glyph.components),
- ("brevecomb", 320, None, "MARK", None))
+ self.assertEqual(
+ (
+ def_glyph.name,
+ def_glyph.id,
+ def_glyph.unicode,
+ def_glyph.type,
+ def_glyph.components,
+ ),
+ ("brevecomb", 320, None, "MARK", None),
+ )
def test_def_glyph_component(self):
[def_glyph] = self.parse(
'DEF_GLYPH "f.f_f" ID 320 TYPE COMPONENT END_GLYPH'
).statements
- self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
- def_glyph.type, def_glyph.components),
- ("f.f_f", 320, None, "COMPONENT", None))
+ self.assertEqual(
+ (
+ def_glyph.name,
+ def_glyph.id,
+ def_glyph.unicode,
+ def_glyph.type,
+ def_glyph.components,
+ ),
+ ("f.f_f", 320, None, "COMPONENT", None),
+ )
def test_def_glyph_no_type(self):
- [def_glyph] = self.parse(
- 'DEF_GLYPH "glyph20" ID 20 END_GLYPH'
- ).statements
- self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
- def_glyph.type, def_glyph.components),
- ("glyph20", 20, None, None, None))
+ [def_glyph] = self.parse('DEF_GLYPH "glyph20" ID 20 END_GLYPH').statements
+ self.assertEqual(
+ (
+ def_glyph.name,
+ def_glyph.id,
+ def_glyph.unicode,
+ def_glyph.type,
+ def_glyph.components,
+ ),
+ ("glyph20", 20, None, None, None),
+ )
def test_def_glyph_case_sensitive(self):
def_glyphs = self.parse(
'DEF_GLYPH "A" ID 3 UNICODE 65 TYPE BASE END_GLYPH\n'
'DEF_GLYPH "a" ID 4 UNICODE 97 TYPE BASE END_GLYPH'
).statements
- self.assertEqual((def_glyphs[0].name, def_glyphs[0].id,
- def_glyphs[0].unicode, def_glyphs[0].type,
- def_glyphs[0].components),
- ("A", 3, [0x41], "BASE", None))
- self.assertEqual((def_glyphs[1].name, def_glyphs[1].id,
- def_glyphs[1].unicode, def_glyphs[1].type,
- def_glyphs[1].components),
- ("a", 4, [0x61], "BASE", None))
+ self.assertEqual(
+ (
+ def_glyphs[0].name,
+ def_glyphs[0].id,
+ def_glyphs[0].unicode,
+ def_glyphs[0].type,
+ def_glyphs[0].components,
+ ),
+ ("A", 3, [0x41], "BASE", None),
+ )
+ self.assertEqual(
+ (
+ def_glyphs[1].name,
+ def_glyphs[1].id,
+ def_glyphs[1].unicode,
+ def_glyphs[1].type,
+ def_glyphs[1].components,
+ ),
+ ("a", 4, [0x61], "BASE", None),
+ )
def test_def_group_glyphs(self):
[def_group] = self.parse(
@@ -123,61 +200,70 @@ class ParserTest(unittest.TestCase):
' ENUM GLYPH "aacute" GLYPH "abreve" GLYPH "acircumflex" '
'GLYPH "adieresis" GLYPH "ae" GLYPH "agrave" GLYPH "amacron" '
'GLYPH "aogonek" GLYPH "aring" GLYPH "atilde" END_ENUM\n'
- 'END_GROUP'
+ "END_GROUP"
).statements
- self.assertEqual((def_group.name, def_group.enum.glyphSet()),
- ("aaccented",
- ("aacute", "abreve", "acircumflex", "adieresis",
- "ae", "agrave", "amacron", "aogonek", "aring",
- "atilde")))
+ self.assertEqual(
+ (def_group.name, def_group.enum.glyphSet()),
+ (
+ "aaccented",
+ (
+ "aacute",
+ "abreve",
+ "acircumflex",
+ "adieresis",
+ "ae",
+ "agrave",
+ "amacron",
+ "aogonek",
+ "aring",
+ "atilde",
+ ),
+ ),
+ )
def test_def_group_groups(self):
[group1, group2, test_group] = self.parse(
'DEF_GROUP "Group1"\n'
' ENUM GLYPH "a" GLYPH "b" GLYPH "c" GLYPH "d" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_GROUP "Group2"\n'
' ENUM GLYPH "e" GLYPH "f" GLYPH "g" GLYPH "h" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_GROUP "TestGroup"\n'
' ENUM GROUP "Group1" GROUP "Group2" END_ENUM\n'
- 'END_GROUP'
+ "END_GROUP"
).statements
groups = [g.group for g in test_group.enum.enum]
- self.assertEqual((test_group.name, groups),
- ("TestGroup", ["Group1", "Group2"]))
+ self.assertEqual((test_group.name, groups), ("TestGroup", ["Group1", "Group2"]))
def test_def_group_groups_not_yet_defined(self):
- [group1, test_group1, test_group2, test_group3, group2] = \
- self.parse(
+ [group1, test_group1, test_group2, test_group3, group2] = self.parse(
'DEF_GROUP "Group1"\n'
' ENUM GLYPH "a" GLYPH "b" GLYPH "c" GLYPH "d" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_GROUP "TestGroup1"\n'
' ENUM GROUP "Group1" GROUP "Group2" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_GROUP "TestGroup2"\n'
' ENUM GROUP "Group2" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_GROUP "TestGroup3"\n'
' ENUM GROUP "Group2" GROUP "Group1" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_GROUP "Group2"\n'
' ENUM GLYPH "e" GLYPH "f" GLYPH "g" GLYPH "h" END_ENUM\n'
- 'END_GROUP'
+ "END_GROUP"
).statements
groups = [g.group for g in test_group1.enum.enum]
self.assertEqual(
- (test_group1.name, groups),
- ("TestGroup1", ["Group1", "Group2"]))
+ (test_group1.name, groups), ("TestGroup1", ["Group1", "Group2"])
+ )
groups = [g.group for g in test_group2.enum.enum]
- self.assertEqual(
- (test_group2.name, groups),
- ("TestGroup2", ["Group2"]))
+ self.assertEqual((test_group2.name, groups), ("TestGroup2", ["Group2"]))
groups = [g.group for g in test_group3.enum.enum]
self.assertEqual(
- (test_group3.name, groups),
- ("TestGroup3", ["Group2", "Group1"]))
+ (test_group3.name, groups), ("TestGroup3", ["Group2", "Group1"])
+ )
# def test_def_group_groups_undefined(self):
# with self.assertRaisesRegex(
@@ -198,14 +284,16 @@ class ParserTest(unittest.TestCase):
' ENUM GLYPH "aacute" GLYPH "abreve" GLYPH "acircumflex" '
'GLYPH "adieresis" GLYPH "ae" GLYPH "agrave" GLYPH "amacron" '
'GLYPH "aogonek" GLYPH "aring" GLYPH "atilde" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_GROUP "KERN_lc_a_2ND"\n'
' ENUM GLYPH "a" GROUP "aaccented" END_ENUM\n'
- 'END_GROUP'
+ "END_GROUP"
).statements
items = def_group2.enum.enum
- self.assertEqual((def_group2.name, items[0].glyphSet(), items[1].group),
- ("KERN_lc_a_2ND", ("a",), "aaccented"))
+ self.assertEqual(
+ (def_group2.name, items[0].glyphSet(), items[1].group),
+ ("KERN_lc_a_2ND", ("a",), "aaccented"),
+ )
def test_def_group_range(self):
def_group = self.parse(
@@ -220,169 +308,166 @@ class ParserTest(unittest.TestCase):
'DEF_GLYPH "cdotaccent" ID 210 UNICODE 267 TYPE BASE END_GLYPH\n'
'DEF_GROUP "KERN_lc_a_2ND"\n'
' ENUM RANGE "a" TO "atilde" GLYPH "b" RANGE "c" TO "cdotaccent" '
- 'END_ENUM\n'
- 'END_GROUP'
+ "END_ENUM\n"
+ "END_GROUP"
).statements[-1]
- self.assertEqual((def_group.name, def_group.enum.glyphSet()),
- ("KERN_lc_a_2ND",
- ("a", "agrave", "aacute", "acircumflex", "atilde",
- "b", "c", "ccaron", "ccedilla", "cdotaccent")))
+ self.assertEqual(
+ (def_group.name, def_group.enum.glyphSet()),
+ (
+ "KERN_lc_a_2ND",
+ (
+ "a",
+ "agrave",
+ "aacute",
+ "acircumflex",
+ "atilde",
+ "b",
+ "c",
+ "ccaron",
+ "ccedilla",
+ "cdotaccent",
+ ),
+ ),
+ )
def test_group_duplicate(self):
self.assertRaisesRegex(
VoltLibError,
- 'Glyph group "dupe" already defined, '
- 'group names are case insensitive',
- self.parse, 'DEF_GROUP "dupe"\n'
- 'ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
- 'END_GROUP\n'
- 'DEF_GROUP "dupe"\n'
- 'ENUM GLYPH "x" END_ENUM\n'
- 'END_GROUP'
+ 'Glyph group "dupe" already defined, ' "group names are case insensitive",
+ self.parse,
+ 'DEF_GROUP "dupe"\n'
+ 'ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_GROUP "dupe"\n'
+ 'ENUM GLYPH "x" END_ENUM\n'
+ "END_GROUP",
)
def test_group_duplicate_case_insensitive(self):
self.assertRaisesRegex(
VoltLibError,
- 'Glyph group "Dupe" already defined, '
- 'group names are case insensitive',
- self.parse, 'DEF_GROUP "dupe"\n'
- 'ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
- 'END_GROUP\n'
- 'DEF_GROUP "Dupe"\n'
- 'ENUM GLYPH "x" END_ENUM\n'
- 'END_GROUP'
+ 'Glyph group "Dupe" already defined, ' "group names are case insensitive",
+ self.parse,
+ 'DEF_GROUP "dupe"\n'
+ 'ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_GROUP "Dupe"\n'
+ 'ENUM GLYPH "x" END_ENUM\n'
+ "END_GROUP",
)
def test_script_without_langsys(self):
[script] = self.parse(
- 'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
- 'END_SCRIPT'
+ 'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n' "END_SCRIPT"
).statements
- self.assertEqual((script.name, script.tag, script.langs),
- ("Latin", "latn", []))
+ self.assertEqual((script.name, script.tag, script.langs), ("Latin", "latn", []))
def test_langsys_normal(self):
[def_script] = self.parse(
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n\n'
- 'END_LANGSYS\n'
+ "END_LANGSYS\n"
'DEF_LANGSYS NAME "Moldavian" TAG "MOL "\n\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
).statements
- self.assertEqual((def_script.name, def_script.tag),
- ("Latin",
- "latn"))
+ self.assertEqual((def_script.name, def_script.tag), ("Latin", "latn"))
def_lang = def_script.langs[0]
- self.assertEqual((def_lang.name, def_lang.tag),
- ("Romanian",
- "ROM "))
+ self.assertEqual((def_lang.name, def_lang.tag), ("Romanian", "ROM "))
def_lang = def_script.langs[1]
- self.assertEqual((def_lang.name, def_lang.tag),
- ("Moldavian",
- "MOL "))
+ self.assertEqual((def_lang.name, def_lang.tag), ("Moldavian", "MOL "))
def test_langsys_no_script_name(self):
[langsys] = self.parse(
'DEF_SCRIPT TAG "latn"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
).statements
- self.assertEqual((langsys.name, langsys.tag),
- (None,
- "latn"))
+ self.assertEqual((langsys.name, langsys.tag), (None, "latn"))
lang = langsys.langs[0]
- self.assertEqual((lang.name, lang.tag),
- ("Default",
- "dflt"))
+ self.assertEqual((lang.name, lang.tag), ("Default", "dflt"))
def test_langsys_no_script_tag_fails(self):
- with self.assertRaisesRegex(
- VoltLibError,
- r'.*Expected "TAG"'):
+ with self.assertRaisesRegex(VoltLibError, r'.*Expected "TAG"'):
[langsys] = self.parse(
'DEF_SCRIPT NAME "Latin"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
).statements
def test_langsys_duplicate_script(self):
with self.assertRaisesRegex(
- VoltLibError,
- 'Script "DFLT" already defined, '
- 'script tags are case insensitive'):
+ VoltLibError,
+ 'Script "DFLT" already defined, ' "script tags are case insensitive",
+ ):
[langsys1, langsys2] = self.parse(
'DEF_SCRIPT NAME "Default" TAG "DFLT"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT\n'
+ "END_LANGSYS\n"
+ "END_SCRIPT\n"
'DEF_SCRIPT TAG "DFLT"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
).statements
def test_langsys_duplicate_lang(self):
with self.assertRaisesRegex(
- VoltLibError,
- 'Language "dflt" already defined in script "DFLT", '
- 'language tags are case insensitive'):
+ VoltLibError,
+ 'Language "dflt" already defined in script "DFLT", '
+ "language tags are case insensitive",
+ ):
[langsys] = self.parse(
'DEF_SCRIPT NAME "Default" TAG "DFLT"\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n'
- 'END_LANGSYS\n'
+ "END_LANGSYS\n"
'DEF_LANGSYS NAME "Default" TAG "dflt"\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
).statements
def test_langsys_lang_in_separate_scripts(self):
[langsys1, langsys2] = self.parse(
'DEF_SCRIPT NAME "Default" TAG "DFLT"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
- 'END_LANGSYS\n'
+ "END_LANGSYS\n"
'DEF_LANGSYS NAME "Default" TAG "ROM "\n\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT\n'
+ "END_LANGSYS\n"
+ "END_SCRIPT\n"
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
- 'END_LANGSYS\n'
+ "END_LANGSYS\n"
'DEF_LANGSYS NAME "Default" TAG "ROM "\n\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
).statements
- self.assertEqual((langsys1.langs[0].tag, langsys1.langs[1].tag),
- ("dflt", "ROM "))
- self.assertEqual((langsys2.langs[0].tag, langsys2.langs[1].tag),
- ("dflt", "ROM "))
+ self.assertEqual(
+ (langsys1.langs[0].tag, langsys1.langs[1].tag), ("dflt", "ROM ")
+ )
+ self.assertEqual(
+ (langsys2.langs[0].tag, langsys2.langs[1].tag), ("dflt", "ROM ")
+ )
def test_langsys_no_lang_name(self):
[langsys] = self.parse(
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'DEF_LANGSYS TAG "dflt"\n\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
).statements
- self.assertEqual((langsys.name, langsys.tag),
- ("Latin",
- "latn"))
+ self.assertEqual((langsys.name, langsys.tag), ("Latin", "latn"))
lang = langsys.langs[0]
- self.assertEqual((lang.name, lang.tag),
- (None,
- "dflt"))
+ self.assertEqual((lang.name, lang.tag), (None, "dflt"))
def test_langsys_no_langsys_tag_fails(self):
- with self.assertRaisesRegex(
- VoltLibError,
- r'.*Expected "TAG"'):
+ with self.assertRaisesRegex(VoltLibError, r'.*Expected "TAG"'):
[langsys] = self.parse(
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'DEF_LANGSYS NAME "Default"\n\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
).statements
def test_feature(self):
@@ -391,181 +476,168 @@ class ParserTest(unittest.TestCase):
'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n\n'
'DEF_FEATURE NAME "Fractions" TAG "frac"\n'
' LOOKUP "fraclookup"\n'
- 'END_FEATURE\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT'
+ "END_FEATURE\n"
+ "END_LANGSYS\n"
+ "END_SCRIPT"
).statements
def_feature = def_script.langs[0].features[0]
- self.assertEqual((def_feature.name, def_feature.tag,
- def_feature.lookups),
- ("Fractions",
- "frac",
- ["fraclookup"]))
+ self.assertEqual(
+ (def_feature.name, def_feature.tag, def_feature.lookups),
+ ("Fractions", "frac", ["fraclookup"]),
+ )
[def_script] = self.parse(
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n\n'
'DEF_FEATURE NAME "Kerning" TAG "kern"\n'
' LOOKUP "kern1" LOOKUP "kern2"\n'
- 'END_FEATURE\n'
- 'END_LANGSYS\n'
- 'END_SCRIPT'
+ "END_FEATURE\n"
+ "END_LANGSYS\n"
+ "END_SCRIPT"
).statements
def_feature = def_script.langs[0].features[0]
- self.assertEqual((def_feature.name, def_feature.tag,
- def_feature.lookups),
- ("Kerning",
- "kern",
- ["kern1", "kern2"]))
+ self.assertEqual(
+ (def_feature.name, def_feature.tag, def_feature.lookups),
+ ("Kerning", "kern", ["kern1", "kern2"]),
+ )
def test_lookup_duplicate(self):
with self.assertRaisesRegex(
VoltLibError,
- 'Lookup "dupe" already defined, '
- 'lookup names are case insensitive',
+ 'Lookup "dupe" already defined, ' "lookup names are case insensitive",
):
[lookup1, lookup2] = self.parse(
'DEF_LOOKUP "dupe"\n'
- 'AS_SUBSTITUTION\n'
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "a"\n'
'WITH GLYPH "a.alt"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION\n"
'DEF_LOOKUP "dupe"\n'
- 'AS_SUBSTITUTION\n'
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "b"\n'
'WITH GLYPH "b.alt"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION\n"
).statements
def test_lookup_duplicate_insensitive_case(self):
with self.assertRaisesRegex(
VoltLibError,
- 'Lookup "Dupe" already defined, '
- 'lookup names are case insensitive',
+ 'Lookup "Dupe" already defined, ' "lookup names are case insensitive",
):
[lookup1, lookup2] = self.parse(
'DEF_LOOKUP "dupe"\n'
- 'AS_SUBSTITUTION\n'
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "a"\n'
'WITH GLYPH "a.alt"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION\n"
'DEF_LOOKUP "Dupe"\n'
- 'AS_SUBSTITUTION\n'
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "b"\n'
'WITH GLYPH "b.alt"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION\n"
).statements
def test_lookup_name_starts_with_letter(self):
with self.assertRaisesRegex(
- VoltLibError,
- r'Lookup name "\\lookupname" must start with a letter'
+ VoltLibError, r'Lookup name "\\lookupname" must start with a letter'
):
[lookup] = self.parse(
'DEF_LOOKUP "\\lookupname"\n'
- 'AS_SUBSTITUTION\n'
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "a"\n'
'WITH GLYPH "a.alt"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION\n"
).statements
def test_lookup_comments(self):
[lookup] = self.parse(
'DEF_LOOKUP "test" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR\n'
'COMMENTS "Hello\\nWorld"\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "a"\n'
'WITH GLYPH "b"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
self.assertEqual(lookup.name, "test")
self.assertEqual(lookup.comments, "Hello\nWorld")
def test_substitution_empty(self):
- with self.assertRaisesRegex(
- VoltLibError,
- r'Expected SUB'):
+ with self.assertRaisesRegex(VoltLibError, r"Expected SUB"):
[lookup] = self.parse(
'DEF_LOOKUP "empty_substitution" PROCESS_BASE PROCESS_MARKS '
- 'ALL DIRECTION LTR\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
- 'END_SUBSTITUTION'
+ "ALL DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ "END_SUBSTITUTION"
).statements
def test_substitution_invalid_many_to_many(self):
- with self.assertRaisesRegex(
- VoltLibError,
- r'Invalid substitution type'):
+ with self.assertRaisesRegex(VoltLibError, r"Invalid substitution type"):
[lookup] = self.parse(
'DEF_LOOKUP "invalid_substitution" PROCESS_BASE PROCESS_MARKS '
- 'ALL DIRECTION LTR\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "ALL DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "f" GLYPH "i"\n'
'WITH GLYPH "f.alt" GLYPH "i.alt"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
def test_substitution_invalid_reverse_chaining_single(self):
- with self.assertRaisesRegex(
- VoltLibError,
- r'Invalid substitution type'):
+ with self.assertRaisesRegex(VoltLibError, r"Invalid substitution type"):
[lookup] = self.parse(
'DEF_LOOKUP "invalid_substitution" PROCESS_BASE PROCESS_MARKS '
- 'ALL DIRECTION LTR REVERSAL\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "ALL DIRECTION LTR REVERSAL\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "f" GLYPH "i"\n'
'WITH GLYPH "f_i"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
def test_substitution_invalid_mixed(self):
- with self.assertRaisesRegex(
- VoltLibError,
- r'Invalid substitution type'):
+ with self.assertRaisesRegex(VoltLibError, r"Invalid substitution type"):
[lookup] = self.parse(
'DEF_LOOKUP "invalid_substitution" PROCESS_BASE PROCESS_MARKS '
- 'ALL DIRECTION LTR\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "ALL DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "fi"\n'
'WITH GLYPH "f" GLYPH "i"\n'
- 'END_SUB\n'
+ "END_SUB\n"
'SUB GLYPH "f" GLYPH "l"\n'
'WITH GLYPH "f_l"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
def test_substitution_single(self):
[lookup] = self.parse(
'DEF_LOOKUP "smcp" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "a"\n'
'WITH GLYPH "a.sc"\n'
- 'END_SUB\n'
+ "END_SUB\n"
'SUB GLYPH "b"\n'
'WITH GLYPH "b.sc"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
self.assertEqual(lookup.name, "smcp")
self.assertSubEqual(lookup.sub, [["a"], ["b"]], [["a.sc"], ["b.sc"]])
@@ -574,20 +646,20 @@ class ParserTest(unittest.TestCase):
[group, lookup] = self.parse(
'DEF_GROUP "Denominators"\n'
' ENUM GLYPH "one.dnom" GLYPH "two.dnom" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_LOOKUP "fracdnom" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR\n'
- 'IN_CONTEXT\n'
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
' LEFT ENUM GROUP "Denominators" GLYPH "fraction" END_ENUM\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "one"\n'
'WITH GLYPH "one.dnom"\n'
- 'END_SUB\n'
+ "END_SUB\n"
'SUB GLYPH "two"\n'
'WITH GLYPH "two.dnom"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
context = lookup.context[0]
@@ -599,29 +671,30 @@ class ParserTest(unittest.TestCase):
self.assertEqual(context.left[0][0].enum[0].group, "Denominators")
self.assertEqual(context.left[0][0].enum[1].glyph, "fraction")
self.assertEqual(context.right, [])
- self.assertSubEqual(lookup.sub, [["one"], ["two"]],
- [["one.dnom"], ["two.dnom"]])
+ self.assertSubEqual(
+ lookup.sub, [["one"], ["two"]], [["one.dnom"], ["two.dnom"]]
+ )
def test_substitution_single_in_contexts(self):
[group, lookup] = self.parse(
'DEF_GROUP "Hebrew"\n'
' ENUM GLYPH "uni05D0" GLYPH "uni05D1" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_LOOKUP "HebrewCurrency" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR\n'
- 'IN_CONTEXT\n'
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
' RIGHT GROUP "Hebrew"\n'
' RIGHT GLYPH "one.Hebr"\n'
- 'END_CONTEXT\n'
- 'IN_CONTEXT\n'
+ "END_CONTEXT\n"
+ "IN_CONTEXT\n"
' LEFT GROUP "Hebrew"\n'
' LEFT GLYPH "one.Hebr"\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "dollar"\n'
'WITH GLYPH "dollar.Hebr"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
context1 = lookup.context[0]
context2 = lookup.context[1]
@@ -648,273 +721,251 @@ class ParserTest(unittest.TestCase):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "marka" GLYPH "markb" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_LOOKUP "SomeSub" SKIP_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
- self.assertEqual(
- (lookup.name, lookup.process_base),
- ("SomeSub", False))
+ self.assertEqual((lookup.name, lookup.process_base), ("SomeSub", False))
def test_substitution_process_base(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "marka" GLYPH "markb" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
- self.assertEqual(
- (lookup.name, lookup.process_base),
- ("SomeSub", True))
+ self.assertEqual((lookup.name, lookup.process_base), ("SomeSub", True))
def test_substitution_process_marks(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "marka" GLYPH "markb" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS "SomeMarks"\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
- self.assertEqual(
- (lookup.name, lookup.process_marks),
- ("SomeSub", 'SomeMarks'))
+ self.assertEqual((lookup.name, lookup.process_marks), ("SomeSub", "SomeMarks"))
def test_substitution_process_marks_all(self):
[lookup] = self.parse(
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS ALL\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
- self.assertEqual(
- (lookup.name, lookup.process_marks),
- ("SomeSub", True))
+ self.assertEqual((lookup.name, lookup.process_marks), ("SomeSub", True))
def test_substitution_process_marks_none(self):
[lookup] = self.parse_(
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS "NONE"\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
- self.assertEqual(
- (lookup.name, lookup.process_marks),
- ("SomeSub", False))
+ self.assertEqual((lookup.name, lookup.process_marks), ("SomeSub", False))
def test_substitution_process_marks_bad(self):
with self.assertRaisesRegex(
- VoltLibError,
- 'Expected ALL, NONE, MARK_GLYPH_SET or an ID'):
+ VoltLibError, "Expected ALL, NONE, MARK_GLYPH_SET or an ID"
+ ):
self.parse(
'DEF_GROUP "SomeMarks" ENUM GLYPH "marka" GLYPH "markb" '
- 'END_ENUM END_GROUP\n'
+ "END_ENUM END_GROUP\n"
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS SomeMarks '
- 'AS_SUBSTITUTION\n'
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "A" WITH GLYPH "A.c2sc"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
)
def test_substitution_skip_marks(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "marka" GLYPH "markb" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_LOOKUP "SomeSub" PROCESS_BASE SKIP_MARKS DIRECTION LTR\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
- self.assertEqual(
- (lookup.name, lookup.process_marks),
- ("SomeSub", False))
+ self.assertEqual((lookup.name, lookup.process_marks), ("SomeSub", False))
def test_substitution_mark_attachment(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "acutecmb" GLYPH "gravecmb" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_LOOKUP "SomeSub" PROCESS_BASE '
'PROCESS_MARKS "SomeMarks" DIRECTION RTL\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
- self.assertEqual(
- (lookup.name, lookup.process_marks),
- ("SomeSub", "SomeMarks"))
+ self.assertEqual((lookup.name, lookup.process_marks), ("SomeSub", "SomeMarks"))
def test_substitution_mark_glyph_set(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "acutecmb" GLYPH "gravecmb" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_LOOKUP "SomeSub" PROCESS_BASE '
'PROCESS_MARKS MARK_GLYPH_SET "SomeMarks" DIRECTION RTL\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
- self.assertEqual(
- (lookup.name, lookup.mark_glyph_set),
- ("SomeSub", "SomeMarks"))
+ self.assertEqual((lookup.name, lookup.mark_glyph_set), ("SomeSub", "SomeMarks"))
def test_substitution_process_all_marks(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "acutecmb" GLYPH "gravecmb" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION RTL\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "DIRECTION RTL\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
- self.assertEqual(
- (lookup.name, lookup.process_marks),
- ("SomeSub", True))
+ self.assertEqual((lookup.name, lookup.process_marks), ("SomeSub", True))
def test_substitution_no_reversal(self):
# TODO: check right context with no reversal
[lookup] = self.parse(
'DEF_LOOKUP "Lookup" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR\n'
- 'IN_CONTEXT\n'
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
' RIGHT ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "a"\n'
'WITH GLYPH "a.alt"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
- self.assertEqual(
- (lookup.name, lookup.reversal),
- ("Lookup", None)
- )
+ self.assertEqual((lookup.name, lookup.reversal), ("Lookup", None))
def test_substitution_reversal(self):
lookup = self.parse(
'DEF_GROUP "DFLT_Num_standardFigures"\n'
' ENUM GLYPH "zero" GLYPH "one" GLYPH "two" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_GROUP "DFLT_Num_numerators"\n'
' ENUM GLYPH "zero.numr" GLYPH "one.numr" GLYPH "two.numr" END_ENUM\n'
- 'END_GROUP\n'
+ "END_GROUP\n"
'DEF_LOOKUP "RevLookup" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR REVERSAL\n'
- 'IN_CONTEXT\n'
+ "DIRECTION LTR REVERSAL\n"
+ "IN_CONTEXT\n"
' RIGHT ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GROUP "DFLT_Num_standardFigures"\n'
'WITH GROUP "DFLT_Num_numerators"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements[-1]
- self.assertEqual(
- (lookup.name, lookup.reversal),
- ("RevLookup", True)
- )
+ self.assertEqual((lookup.name, lookup.reversal), ("RevLookup", True))
def test_substitution_single_to_multiple(self):
[lookup] = self.parse(
'DEF_LOOKUP "ccmp" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "aacute"\n'
'WITH GLYPH "a" GLYPH "acutecomb"\n'
- 'END_SUB\n'
+ "END_SUB\n"
'SUB GLYPH "agrave"\n'
'WITH GLYPH "a" GLYPH "gravecomb"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
self.assertEqual(lookup.name, "ccmp")
- self.assertSubEqual(lookup.sub, [["aacute"], ["agrave"]],
- [["a", "acutecomb"], ["a", "gravecomb"]])
+ self.assertSubEqual(
+ lookup.sub,
+ [["aacute"], ["agrave"]],
+ [["a", "acutecomb"], ["a", "gravecomb"]],
+ )
def test_substitution_multiple_to_single(self):
[lookup] = self.parse(
'DEF_LOOKUP "liga" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB GLYPH "f" GLYPH "i"\n'
'WITH GLYPH "f_i"\n'
- 'END_SUB\n'
+ "END_SUB\n"
'SUB GLYPH "f" GLYPH "t"\n'
'WITH GLYPH "f_t"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
self.assertEqual(lookup.name, "liga")
- self.assertSubEqual(lookup.sub, [["f", "i"], ["f", "t"]],
- [["f_i"], ["f_t"]])
+ self.assertSubEqual(lookup.sub, [["f", "i"], ["f", "t"]], [["f_i"], ["f_t"]])
def test_substitution_reverse_chaining_single(self):
[lookup] = self.parse(
'DEF_LOOKUP "numr" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR REVERSAL\n'
- 'IN_CONTEXT\n'
- ' RIGHT ENUM '
+ "DIRECTION LTR REVERSAL\n"
+ "IN_CONTEXT\n"
+ " RIGHT ENUM "
'GLYPH "fraction" '
'RANGE "zero.numr" TO "nine.numr" '
- 'END_ENUM\n'
- 'END_CONTEXT\n'
- 'AS_SUBSTITUTION\n'
+ "END_ENUM\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
'SUB RANGE "zero" TO "nine"\n'
'WITH RANGE "zero.numr" TO "nine.numr"\n'
- 'END_SUB\n'
- 'END_SUBSTITUTION'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
).statements
mapping = lookup.sub.mapping
@@ -922,16 +973,17 @@ class ParserTest(unittest.TestCase):
replacement = [[(r.start, r.end) for r in v] for v in mapping.values()]
self.assertEqual(lookup.name, "numr")
- self.assertEqual(glyphs, [[('zero', 'nine')]])
- self.assertEqual(replacement, [[('zero.numr', 'nine.numr')]])
+ self.assertEqual(glyphs, [[("zero", "nine")]])
+ self.assertEqual(replacement, [[("zero.numr", "nine.numr")]])
self.assertEqual(len(lookup.context[0].right), 1)
self.assertEqual(len(lookup.context[0].right[0]), 1)
enum = lookup.context[0].right[0][0]
self.assertEqual(len(enum.enum), 2)
self.assertEqual(enum.enum[0].glyph, "fraction")
- self.assertEqual((enum.enum[1].start, enum.enum[1].end),
- ('zero.numr', 'nine.numr'))
+ self.assertEqual(
+ (enum.enum[1].start, enum.enum[1].end), ("zero.numr", "nine.numr")
+ )
# GPOS
# ATTACH_CURSIVE
@@ -940,296 +992,365 @@ class ParserTest(unittest.TestCase):
# ADJUST_SINGLE
def test_position_empty(self):
with self.assertRaisesRegex(
- VoltLibError,
- 'Expected ATTACH, ATTACH_CURSIVE, ADJUST_PAIR, ADJUST_SINGLE'):
+ VoltLibError, "Expected ATTACH, ATTACH_CURSIVE, ADJUST_PAIR, ADJUST_SINGLE"
+ ):
[lookup] = self.parse(
'DEF_LOOKUP "empty_position" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR\n'
- 'EXCEPT_CONTEXT\n'
+ "DIRECTION LTR\n"
+ "EXCEPT_CONTEXT\n"
' LEFT GLYPH "glyph"\n'
- 'END_CONTEXT\n'
- 'AS_POSITION\n'
- 'END_POSITION'
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ "END_POSITION"
).statements
def test_position_attach(self):
[lookup, anchor1, anchor2, anchor3, anchor4] = self.parse(
'DEF_LOOKUP "anchor_top" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION RTL\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_POSITION\n'
+ "DIRECTION RTL\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
'ATTACH GLYPH "a" GLYPH "e"\n'
'TO GLYPH "acutecomb" AT ANCHOR "top" '
'GLYPH "gravecomb" AT ANCHOR "top"\n'
- 'END_ATTACH\n'
- 'END_POSITION\n'
+ "END_ATTACH\n"
+ "END_POSITION\n"
'DEF_ANCHOR "MARK_top" ON 120 GLYPH acutecomb COMPONENT 1 '
- 'AT POS DX 0 DY 450 END_POS END_ANCHOR\n'
+ "AT POS DX 0 DY 450 END_POS END_ANCHOR\n"
'DEF_ANCHOR "MARK_top" ON 121 GLYPH gravecomb COMPONENT 1 '
- 'AT POS DX 0 DY 450 END_POS END_ANCHOR\n'
+ "AT POS DX 0 DY 450 END_POS END_ANCHOR\n"
'DEF_ANCHOR "top" ON 31 GLYPH a COMPONENT 1 '
- 'AT POS DX 210 DY 450 END_POS END_ANCHOR\n'
+ "AT POS DX 210 DY 450 END_POS END_ANCHOR\n"
'DEF_ANCHOR "top" ON 35 GLYPH e COMPONENT 1 '
- 'AT POS DX 215 DY 450 END_POS END_ANCHOR'
+ "AT POS DX 215 DY 450 END_POS END_ANCHOR"
).statements
pos = lookup.pos
coverage = [g.glyph for g in pos.coverage]
coverage_to = [[[g.glyph for g in e], a] for (e, a) in pos.coverage_to]
self.assertEqual(
(lookup.name, coverage, coverage_to),
- ("anchor_top", ["a", "e"],
- [[["acutecomb"], "top"], [["gravecomb"], "top"]])
+ (
+ "anchor_top",
+ ["a", "e"],
+ [[["acutecomb"], "top"], [["gravecomb"], "top"]],
+ ),
)
self.assertEqual(
- (anchor1.name, anchor1.gid, anchor1.glyph_name, anchor1.component,
- anchor1.locked, anchor1.pos),
- ("MARK_top", 120, "acutecomb", 1, False, (None, 0, 450, {}, {},
- {}))
+ (
+ anchor1.name,
+ anchor1.gid,
+ anchor1.glyph_name,
+ anchor1.component,
+ anchor1.locked,
+ anchor1.pos,
+ ),
+ ("MARK_top", 120, "acutecomb", 1, False, (None, 0, 450, {}, {}, {})),
)
self.assertEqual(
- (anchor2.name, anchor2.gid, anchor2.glyph_name, anchor2.component,
- anchor2.locked, anchor2.pos),
- ("MARK_top", 121, "gravecomb", 1, False, (None, 0, 450, {}, {},
- {}))
+ (
+ anchor2.name,
+ anchor2.gid,
+ anchor2.glyph_name,
+ anchor2.component,
+ anchor2.locked,
+ anchor2.pos,
+ ),
+ ("MARK_top", 121, "gravecomb", 1, False, (None, 0, 450, {}, {}, {})),
)
self.assertEqual(
- (anchor3.name, anchor3.gid, anchor3.glyph_name, anchor3.component,
- anchor3.locked, anchor3.pos),
- ("top", 31, "a", 1, False, (None, 210, 450, {}, {}, {}))
+ (
+ anchor3.name,
+ anchor3.gid,
+ anchor3.glyph_name,
+ anchor3.component,
+ anchor3.locked,
+ anchor3.pos,
+ ),
+ ("top", 31, "a", 1, False, (None, 210, 450, {}, {}, {})),
)
self.assertEqual(
- (anchor4.name, anchor4.gid, anchor4.glyph_name, anchor4.component,
- anchor4.locked, anchor4.pos),
- ("top", 35, "e", 1, False, (None, 215, 450, {}, {}, {}))
+ (
+ anchor4.name,
+ anchor4.gid,
+ anchor4.glyph_name,
+ anchor4.component,
+ anchor4.locked,
+ anchor4.pos,
+ ),
+ ("top", 35, "e", 1, False, (None, 215, 450, {}, {}, {})),
)
def test_position_attach_cursive(self):
[lookup] = self.parse(
'DEF_LOOKUP "SomeLookup" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION RTL\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_POSITION\n'
+ "DIRECTION RTL\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
'ATTACH_CURSIVE\nEXIT GLYPH "a" GLYPH "b"\nENTER GLYPH "c"\n'
- 'END_ATTACH\n'
- 'END_POSITION'
+ "END_ATTACH\n"
+ "END_POSITION"
).statements
exit = [[g.glyph for g in v] for v in lookup.pos.coverages_exit]
enter = [[g.glyph for g in v] for v in lookup.pos.coverages_enter]
self.assertEqual(
- (lookup.name, exit, enter),
- ("SomeLookup", [["a", "b"]], [["c"]])
+ (lookup.name, exit, enter), ("SomeLookup", [["a", "b"]], [["c"]])
)
def test_position_adjust_pair(self):
[lookup] = self.parse(
'DEF_LOOKUP "kern1" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION RTL\n'
- 'IN_CONTEXT\n'
- 'END_CONTEXT\n'
- 'AS_POSITION\n'
- 'ADJUST_PAIR\n'
+ "DIRECTION RTL\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ "ADJUST_PAIR\n"
' FIRST GLYPH "A"\n'
' SECOND GLYPH "V"\n'
- ' 1 2 BY POS ADV -30 END_POS POS END_POS\n'
- ' 2 1 BY POS ADV -30 END_POS POS END_POS\n\n'
- 'END_ADJUST\n'
- 'END_POSITION'
+ " 1 2 BY POS ADV -30 END_POS POS END_POS\n"
+ " 2 1 BY POS ADV -30 END_POS POS END_POS\n\n"
+ "END_ADJUST\n"
+ "END_POSITION"
).statements
coverages_1 = [[g.glyph for g in v] for v in lookup.pos.coverages_1]
coverages_2 = [[g.glyph for g in v] for v in lookup.pos.coverages_2]
self.assertEqual(
- (lookup.name, coverages_1, coverages_2,
- lookup.pos.adjust_pair),
- ("kern1", [["A"]], [["V"]],
- {(1, 2): ((-30, None, None, {}, {}, {}),
- (None, None, None, {}, {}, {})),
- (2, 1): ((-30, None, None, {}, {}, {}),
- (None, None, None, {}, {}, {}))})
+ (lookup.name, coverages_1, coverages_2, lookup.pos.adjust_pair),
+ (
+ "kern1",
+ [["A"]],
+ [["V"]],
+ {
+ (1, 2): (
+ (-30, None, None, {}, {}, {}),
+ (None, None, None, {}, {}, {}),
+ ),
+ (2, 1): (
+ (-30, None, None, {}, {}, {}),
+ (None, None, None, {}, {}, {}),
+ ),
+ },
+ ),
)
def test_position_adjust_single(self):
[lookup] = self.parse(
'DEF_LOOKUP "TestLookup" PROCESS_BASE PROCESS_MARKS ALL '
- 'DIRECTION LTR\n'
- 'IN_CONTEXT\n'
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
# ' LEFT GLYPH "leftGlyph"\n'
# ' RIGHT GLYPH "rightGlyph"\n'
- 'END_CONTEXT\n'
- 'AS_POSITION\n'
- 'ADJUST_SINGLE'
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ "ADJUST_SINGLE"
' GLYPH "glyph1" BY POS ADV 0 DX 123 END_POS'
' GLYPH "glyph2" BY POS ADV 0 DX 456 END_POS\n'
- 'END_ADJUST\n'
- 'END_POSITION'
+ "END_ADJUST\n"
+ "END_POSITION"
).statements
pos = lookup.pos
adjust = [[[g.glyph for g in a], b] for (a, b) in pos.adjust_single]
self.assertEqual(
(lookup.name, adjust),
- ("TestLookup",
- [[["glyph1"], (0, 123, None, {}, {}, {})],
- [["glyph2"], (0, 456, None, {}, {}, {})]])
+ (
+ "TestLookup",
+ [
+ [["glyph1"], (0, 123, None, {}, {}, {})],
+ [["glyph2"], (0, 456, None, {}, {}, {})],
+ ],
+ ),
)
def test_def_anchor(self):
[anchor1, anchor2, anchor3] = self.parse(
'DEF_ANCHOR "top" ON 120 GLYPH a '
- 'COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n'
+ "COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n"
'DEF_ANCHOR "MARK_top" ON 120 GLYPH acutecomb '
- 'COMPONENT 1 AT POS DX 0 DY 450 END_POS END_ANCHOR\n'
+ "COMPONENT 1 AT POS DX 0 DY 450 END_POS END_ANCHOR\n"
'DEF_ANCHOR "bottom" ON 120 GLYPH a '
- 'COMPONENT 1 AT POS DX 250 DY 0 END_POS END_ANCHOR'
+ "COMPONENT 1 AT POS DX 250 DY 0 END_POS END_ANCHOR"
).statements
self.assertEqual(
- (anchor1.name, anchor1.gid, anchor1.glyph_name, anchor1.component,
- anchor1.locked, anchor1.pos),
- ("top", 120, "a", 1,
- False, (None, 250, 450, {}, {}, {}))
+ (
+ anchor1.name,
+ anchor1.gid,
+ anchor1.glyph_name,
+ anchor1.component,
+ anchor1.locked,
+ anchor1.pos,
+ ),
+ ("top", 120, "a", 1, False, (None, 250, 450, {}, {}, {})),
)
self.assertEqual(
- (anchor2.name, anchor2.gid, anchor2.glyph_name, anchor2.component,
- anchor2.locked, anchor2.pos),
- ("MARK_top", 120, "acutecomb", 1,
- False, (None, 0, 450, {}, {}, {}))
+ (
+ anchor2.name,
+ anchor2.gid,
+ anchor2.glyph_name,
+ anchor2.component,
+ anchor2.locked,
+ anchor2.pos,
+ ),
+ ("MARK_top", 120, "acutecomb", 1, False, (None, 0, 450, {}, {}, {})),
)
self.assertEqual(
- (anchor3.name, anchor3.gid, anchor3.glyph_name, anchor3.component,
- anchor3.locked, anchor3.pos),
- ("bottom", 120, "a", 1,
- False, (None, 250, 0, {}, {}, {}))
+ (
+ anchor3.name,
+ anchor3.gid,
+ anchor3.glyph_name,
+ anchor3.component,
+ anchor3.locked,
+ anchor3.pos,
+ ),
+ ("bottom", 120, "a", 1, False, (None, 250, 0, {}, {}, {})),
)
def test_def_anchor_multi_component(self):
[anchor1, anchor2] = self.parse(
'DEF_ANCHOR "top" ON 120 GLYPH a '
- 'COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n'
+ "COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n"
'DEF_ANCHOR "top" ON 120 GLYPH a '
- 'COMPONENT 2 AT POS DX 250 DY 450 END_POS END_ANCHOR'
+ "COMPONENT 2 AT POS DX 250 DY 450 END_POS END_ANCHOR"
).statements
self.assertEqual(
(anchor1.name, anchor1.gid, anchor1.glyph_name, anchor1.component),
- ("top", 120, "a", 1)
+ ("top", 120, "a", 1),
)
self.assertEqual(
(anchor2.name, anchor2.gid, anchor2.glyph_name, anchor2.component),
- ("top", 120, "a", 2)
+ ("top", 120, "a", 2),
)
def test_def_anchor_duplicate(self):
self.assertRaisesRegex(
VoltLibError,
- 'Anchor "dupe" already defined, '
- 'anchor names are case insensitive',
+ 'Anchor "dupe" already defined, ' "anchor names are case insensitive",
self.parse,
'DEF_ANCHOR "dupe" ON 120 GLYPH a '
- 'COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n'
+ "COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n"
'DEF_ANCHOR "dupe" ON 120 GLYPH a '
- 'COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR'
+ "COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR",
)
def test_def_anchor_locked(self):
[anchor] = self.parse(
'DEF_ANCHOR "top" ON 120 GLYPH a '
- 'COMPONENT 1 LOCKED AT POS DX 250 DY 450 END_POS END_ANCHOR'
+ "COMPONENT 1 LOCKED AT POS DX 250 DY 450 END_POS END_ANCHOR"
).statements
self.assertEqual(
- (anchor.name, anchor.gid, anchor.glyph_name, anchor.component,
- anchor.locked, anchor.pos),
- ("top", 120, "a", 1,
- True, (None, 250, 450, {}, {}, {}))
+ (
+ anchor.name,
+ anchor.gid,
+ anchor.glyph_name,
+ anchor.component,
+ anchor.locked,
+ anchor.pos,
+ ),
+ ("top", 120, "a", 1, True, (None, 250, 450, {}, {}, {})),
)
def test_anchor_adjust_device(self):
[anchor] = self.parse(
'DEF_ANCHOR "MARK_top" ON 123 GLYPH diacglyph '
- 'COMPONENT 1 AT POS DX 0 DY 456 ADJUST_BY 12 AT 34 '
- 'ADJUST_BY 56 AT 78 END_POS END_ANCHOR'
+ "COMPONENT 1 AT POS DX 0 DY 456 ADJUST_BY 12 AT 34 "
+ "ADJUST_BY 56 AT 78 END_POS END_ANCHOR"
).statements
self.assertEqual(
(anchor.name, anchor.pos),
- ("MARK_top", (None, 0, 456, {}, {}, {34: 12, 78: 56}))
+ ("MARK_top", (None, 0, 456, {}, {}, {34: 12, 78: 56})),
)
def test_ppem(self):
[grid_ppem, pres_ppem, ppos_ppem] = self.parse(
- 'GRID_PPEM 20\n'
- 'PRESENTATION_PPEM 72\n'
- 'PPOSITIONING_PPEM 144'
+ "GRID_PPEM 20\n" "PRESENTATION_PPEM 72\n" "PPOSITIONING_PPEM 144"
).statements
self.assertEqual(
- ((grid_ppem.name, grid_ppem.value),
- (pres_ppem.name, pres_ppem.value),
- (ppos_ppem.name, ppos_ppem.value)),
- (("GRID_PPEM", 20), ("PRESENTATION_PPEM", 72),
- ("PPOSITIONING_PPEM", 144))
+ (
+ (grid_ppem.name, grid_ppem.value),
+ (pres_ppem.name, pres_ppem.value),
+ (ppos_ppem.name, ppos_ppem.value),
+ ),
+ (("GRID_PPEM", 20), ("PRESENTATION_PPEM", 72), ("PPOSITIONING_PPEM", 144)),
)
def test_compiler_flags(self):
[setting1, setting2] = self.parse(
- 'COMPILER_USEEXTENSIONLOOKUPS\n'
- 'COMPILER_USEPAIRPOSFORMAT2'
+ "COMPILER_USEEXTENSIONLOOKUPS\n" "COMPILER_USEPAIRPOSFORMAT2"
).statements
self.assertEqual(
- ((setting1.name, setting1.value),
- (setting2.name, setting2.value)),
- (("COMPILER_USEEXTENSIONLOOKUPS", True),
- ("COMPILER_USEPAIRPOSFORMAT2", True))
+ ((setting1.name, setting1.value), (setting2.name, setting2.value)),
+ (
+ ("COMPILER_USEEXTENSIONLOOKUPS", True),
+ ("COMPILER_USEPAIRPOSFORMAT2", True),
+ ),
)
def test_cmap(self):
[cmap_format1, cmap_format2, cmap_format3] = self.parse(
- 'CMAP_FORMAT 0 3 4\n'
- 'CMAP_FORMAT 1 0 6\n'
- 'CMAP_FORMAT 3 1 4'
+ "CMAP_FORMAT 0 3 4\n" "CMAP_FORMAT 1 0 6\n" "CMAP_FORMAT 3 1 4"
).statements
self.assertEqual(
- ((cmap_format1.name, cmap_format1.value),
- (cmap_format2.name, cmap_format2.value),
- (cmap_format3.name, cmap_format3.value)),
- (("CMAP_FORMAT", (0, 3, 4)),
- ("CMAP_FORMAT", (1, 0, 6)),
- ("CMAP_FORMAT", (3, 1, 4)))
+ (
+ (cmap_format1.name, cmap_format1.value),
+ (cmap_format2.name, cmap_format2.value),
+ (cmap_format3.name, cmap_format3.value),
+ ),
+ (
+ ("CMAP_FORMAT", (0, 3, 4)),
+ ("CMAP_FORMAT", (1, 0, 6)),
+ ("CMAP_FORMAT", (3, 1, 4)),
+ ),
)
def test_do_not_touch_cmap(self):
[option1, option2, option3, option4] = self.parse(
- 'DO_NOT_TOUCH_CMAP\n'
- 'CMAP_FORMAT 0 3 4\n'
- 'CMAP_FORMAT 1 0 6\n'
- 'CMAP_FORMAT 3 1 4'
+ "DO_NOT_TOUCH_CMAP\n"
+ "CMAP_FORMAT 0 3 4\n"
+ "CMAP_FORMAT 1 0 6\n"
+ "CMAP_FORMAT 3 1 4"
).statements
self.assertEqual(
- ((option1.name, option1.value),
- (option2.name, option2.value),
- (option3.name, option3.value),
- (option4.name, option4.value)),
- (("DO_NOT_TOUCH_CMAP", True),
- ("CMAP_FORMAT", (0, 3, 4)),
- ("CMAP_FORMAT", (1, 0, 6)),
- ("CMAP_FORMAT", (3, 1, 4)))
+ (
+ (option1.name, option1.value),
+ (option2.name, option2.value),
+ (option3.name, option3.value),
+ (option4.name, option4.value),
+ ),
+ (
+ ("DO_NOT_TOUCH_CMAP", True),
+ ("CMAP_FORMAT", (0, 3, 4)),
+ ("CMAP_FORMAT", (1, 0, 6)),
+ ("CMAP_FORMAT", (3, 1, 4)),
+ ),
)
def test_stop_at_end(self):
- doc = self.parse_(
- 'DEF_GLYPH ".notdef" ID 0 TYPE BASE END_GLYPH END\0\0\0\0'
- )
+ doc = self.parse_('DEF_GLYPH ".notdef" ID 0 TYPE BASE END_GLYPH END\0\0\0\0')
[def_glyph] = doc.statements
- self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
- def_glyph.type, def_glyph.components),
- (".notdef", 0, None, "BASE", None))
- self.assertEqual(str(doc),
- '\nDEF_GLYPH ".notdef" ID 0 TYPE BASE END_GLYPH END\n')
+ self.assertEqual(
+ (
+ def_glyph.name,
+ def_glyph.id,
+ def_glyph.unicode,
+ def_glyph.type,
+ def_glyph.components,
+ ),
+ (".notdef", 0, None, "BASE", None),
+ )
+ self.assertEqual(
+ str(doc), '\nDEF_GLYPH ".notdef" ID 0 TYPE BASE END_GLYPH END\n'
+ )
def parse_(self, text):
return Parser(StringIO(text)).parse()
def parse(self, text):
doc = self.parse_(text)
- self.assertEqual('\n'.join(str(s) for s in doc.statements), text)
+ self.assertEqual("\n".join(str(s) for s in doc.statements), text)
return Parser(StringIO(text)).parse()
+
if __name__ == "__main__":
import sys
+
sys.exit(unittest.main())
diff --git a/Tests/voltLib/volttofea_test.py b/Tests/voltLib/volttofea_test.py
new file mode 100644
index 00000000..0d8d8d28
--- /dev/null
+++ b/Tests/voltLib/volttofea_test.py
@@ -0,0 +1,1253 @@
+import pathlib
+import shutil
+import tempfile
+import unittest
+from io import StringIO
+
+from fontTools.voltLib.voltToFea import VoltToFea
+
+DATADIR = pathlib.Path(__file__).parent / "data"
+
+
+class ToFeaTest(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ cls.tempdir = None
+ cls.num_tempfiles = 0
+
+ @classmethod
+ def teardown_class(cls):
+ if cls.tempdir:
+ shutil.rmtree(cls.tempdir, ignore_errors=True)
+
+ @classmethod
+ def temp_path(cls):
+ if not cls.tempdir:
+ cls.tempdir = pathlib.Path(tempfile.mkdtemp())
+ cls.num_tempfiles += 1
+ return cls.tempdir / f"tmp{cls.num_tempfiles}"
+
+ def test_def_glyph_base(self):
+ fea = self.parse('DEF_GLYPH ".notdef" ID 0 TYPE BASE END_GLYPH')
+ self.assertEqual(
+ fea,
+ "@GDEF_base = [.notdef];\n"
+ "table GDEF {\n"
+ " GlyphClassDef @GDEF_base, , , ;\n"
+ "} GDEF;\n",
+ )
+
+ def test_def_glyph_base_2_components(self):
+ fea = self.parse(
+ 'DEF_GLYPH "glyphBase" ID 320 TYPE BASE COMPONENTS 2 END_GLYPH'
+ )
+ self.assertEqual(
+ fea,
+ "@GDEF_base = [glyphBase];\n"
+ "table GDEF {\n"
+ " GlyphClassDef @GDEF_base, , , ;\n"
+ "} GDEF;\n",
+ )
+
+ def test_def_glyph_ligature_2_components(self):
+ fea = self.parse('DEF_GLYPH "f_f" ID 320 TYPE LIGATURE COMPONENTS 2 END_GLYPH')
+ self.assertEqual(
+ fea,
+ "@GDEF_ligature = [f_f];\n"
+ "table GDEF {\n"
+ " GlyphClassDef , @GDEF_ligature, , ;\n"
+ "} GDEF;\n",
+ )
+
+ def test_def_glyph_mark(self):
+ fea = self.parse('DEF_GLYPH "brevecomb" ID 320 TYPE MARK END_GLYPH')
+ self.assertEqual(
+ fea,
+ "@GDEF_mark = [brevecomb];\n"
+ "table GDEF {\n"
+ " GlyphClassDef , , @GDEF_mark, ;\n"
+ "} GDEF;\n",
+ )
+
+ def test_def_glyph_component(self):
+ fea = self.parse('DEF_GLYPH "f.f_f" ID 320 TYPE COMPONENT END_GLYPH')
+ self.assertEqual(
+ fea,
+ "@GDEF_component = [f.f_f];\n"
+ "table GDEF {\n"
+ " GlyphClassDef , , , @GDEF_component;\n"
+ "} GDEF;\n",
+ )
+
+ def test_def_glyph_no_type(self):
+ fea = self.parse('DEF_GLYPH "glyph20" ID 20 END_GLYPH')
+ self.assertEqual(fea, "")
+
+ def test_def_glyph_case_sensitive(self):
+ fea = self.parse(
+ 'DEF_GLYPH "A" ID 3 UNICODE 65 TYPE BASE END_GLYPH\n'
+ 'DEF_GLYPH "a" ID 4 UNICODE 97 TYPE BASE END_GLYPH\n'
+ )
+ self.assertEqual(
+ fea,
+ "@GDEF_base = [A a];\n"
+ "table GDEF {\n"
+ " GlyphClassDef @GDEF_base, , , ;\n"
+ "} GDEF;\n",
+ )
+
+ def test_def_group_glyphs(self):
+ fea = self.parse(
+ 'DEF_GROUP "aaccented"\n'
+ 'ENUM GLYPH "aacute" GLYPH "abreve" GLYPH "acircumflex" '
+ 'GLYPH "adieresis" GLYPH "ae" GLYPH "agrave" GLYPH "amacron" '
+ 'GLYPH "aogonek" GLYPH "aring" GLYPH "atilde" END_ENUM\n'
+ "END_GROUP\n"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@aaccented = [aacute abreve acircumflex adieresis ae"
+ " agrave amacron aogonek aring atilde];",
+ )
+
+ def test_def_group_groups(self):
+ fea = self.parse(
+ 'DEF_GROUP "Group1"\n'
+ 'ENUM GLYPH "a" GLYPH "b" GLYPH "c" GLYPH "d" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_GROUP "Group2"\n'
+ 'ENUM GLYPH "e" GLYPH "f" GLYPH "g" GLYPH "h" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_GROUP "TestGroup"\n'
+ 'ENUM GROUP "Group1" GROUP "Group2" END_ENUM\n'
+ "END_GROUP\n"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@Group1 = [a b c d];\n"
+ "@Group2 = [e f g h];\n"
+ "@TestGroup = [@Group1 @Group2];",
+ )
+
+ def test_def_group_groups_not_yet_defined(self):
+ fea = self.parse(
+ 'DEF_GROUP "Group1"\n'
+ 'ENUM GLYPH "a" GLYPH "b" GLYPH "c" GLYPH "d" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_GROUP "TestGroup1"\n'
+ 'ENUM GROUP "Group1" GROUP "Group2" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_GROUP "TestGroup2"\n'
+ 'ENUM GROUP "Group2" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_GROUP "TestGroup3"\n'
+ 'ENUM GROUP "Group2" GROUP "Group1" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_GROUP "Group2"\n'
+ 'ENUM GLYPH "e" GLYPH "f" GLYPH "g" GLYPH "h" END_ENUM\n'
+ "END_GROUP\n"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@Group1 = [a b c d];\n"
+ "@Group2 = [e f g h];\n"
+ "@TestGroup1 = [@Group1 @Group2];\n"
+ "@TestGroup2 = [@Group2];\n"
+ "@TestGroup3 = [@Group2 @Group1];",
+ )
+
+ def test_def_group_glyphs_and_group(self):
+ fea = self.parse(
+ 'DEF_GROUP "aaccented"\n'
+ 'ENUM GLYPH "aacute" GLYPH "abreve" GLYPH "acircumflex" '
+ 'GLYPH "adieresis" GLYPH "ae" GLYPH "agrave" GLYPH "amacron" '
+ 'GLYPH "aogonek" GLYPH "aring" GLYPH "atilde" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_GROUP "KERN_lc_a_2ND"\n'
+ 'ENUM GLYPH "a" GROUP "aaccented" END_ENUM\n'
+ "END_GROUP"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@aaccented = [aacute abreve acircumflex adieresis ae"
+ " agrave amacron aogonek aring atilde];\n"
+ "@KERN_lc_a_2ND = [a @aaccented];",
+ )
+
+ def test_def_group_range(self):
+ fea = self.parse(
+ 'DEF_GLYPH "a" ID 163 UNICODE 97 TYPE BASE END_GLYPH\n'
+ 'DEF_GLYPH "agrave" ID 194 UNICODE 224 TYPE BASE END_GLYPH\n'
+ 'DEF_GLYPH "aacute" ID 195 UNICODE 225 TYPE BASE END_GLYPH\n'
+ 'DEF_GLYPH "acircumflex" ID 196 UNICODE 226 TYPE BASE END_GLYPH\n'
+ 'DEF_GLYPH "atilde" ID 197 UNICODE 227 TYPE BASE END_GLYPH\n'
+ 'DEF_GLYPH "c" ID 165 UNICODE 99 TYPE BASE END_GLYPH\n'
+ 'DEF_GLYPH "ccaron" ID 209 UNICODE 269 TYPE BASE END_GLYPH\n'
+ 'DEF_GLYPH "ccedilla" ID 210 UNICODE 231 TYPE BASE END_GLYPH\n'
+ 'DEF_GLYPH "cdotaccent" ID 210 UNICODE 267 TYPE BASE END_GLYPH\n'
+ 'DEF_GROUP "KERN_lc_a_2ND"\n'
+ 'ENUM RANGE "a" TO "atilde" GLYPH "b" RANGE "c" TO "cdotaccent" '
+ "END_ENUM\n"
+ "END_GROUP"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@KERN_lc_a_2ND = [a - atilde b c - cdotaccent];\n"
+ "@GDEF_base = [a agrave aacute acircumflex atilde c"
+ " ccaron ccedilla cdotaccent];\n"
+ "table GDEF {\n"
+ " GlyphClassDef @GDEF_base, , , ;\n"
+ "} GDEF;\n",
+ )
+
+ def test_script_without_langsys(self):
+ fea = self.parse('DEF_SCRIPT NAME "Latin" TAG "latn"\n' "END_SCRIPT")
+ self.assertEqual(fea, "")
+
+ def test_langsys_normal(self):
+ fea = self.parse(
+ 'DEF_SCRIPT NAME "Latin" TAG "latn"\n'
+ 'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n'
+ "END_LANGSYS\n"
+ 'DEF_LANGSYS NAME "Moldavian" TAG "MOL "\n'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
+ )
+ self.assertEqual(fea, "")
+
+ def test_langsys_no_script_name(self):
+ fea = self.parse(
+ 'DEF_SCRIPT TAG "latn"\n'
+ 'DEF_LANGSYS NAME "Default" TAG "dflt"\n'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
+ )
+ self.assertEqual(fea, "")
+
+ def test_langsys_lang_in_separate_scripts(self):
+ fea = self.parse(
+ 'DEF_SCRIPT NAME "Default" TAG "DFLT"\n'
+ 'DEF_LANGSYS NAME "Default" TAG "dflt"\n'
+ "END_LANGSYS\n"
+ 'DEF_LANGSYS NAME "Default" TAG "ROM "\n'
+ "END_LANGSYS\n"
+ "END_SCRIPT\n"
+ 'DEF_SCRIPT NAME "Latin" TAG "latn"\n'
+ 'DEF_LANGSYS NAME "Default" TAG "dflt"\n'
+ "END_LANGSYS\n"
+ 'DEF_LANGSYS NAME "Default" TAG "ROM "\n'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
+ )
+ self.assertEqual(fea, "")
+
+ def test_langsys_no_lang_name(self):
+ fea = self.parse(
+ 'DEF_SCRIPT NAME "Latin" TAG "latn"\n'
+ 'DEF_LANGSYS TAG "dflt"\n'
+ "END_LANGSYS\n"
+ "END_SCRIPT"
+ )
+ self.assertEqual(fea, "")
+
+ def test_feature(self):
+ fea = self.parse(
+ 'DEF_SCRIPT NAME "Latin" TAG "latn"\n'
+ 'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n'
+ 'DEF_FEATURE NAME "Fractions" TAG "frac"\n'
+ 'LOOKUP "fraclookup"\n'
+ "END_FEATURE\n"
+ "END_LANGSYS\n"
+ "END_SCRIPT\n"
+ 'DEF_LOOKUP "fraclookup" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "one" GLYPH "slash" GLYPH "two"\n'
+ 'WITH GLYPH "one_slash_two.frac"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup fraclookup {\n"
+ " sub one slash two by one_slash_two.frac;\n"
+ "} fraclookup;\n"
+ "\n"
+ "# Features\n"
+ "feature frac {\n"
+ " script latn;\n"
+ " language ROM exclude_dflt;\n"
+ " lookup fraclookup;\n"
+ "} frac;\n",
+ )
+
+ def test_feature_sub_lookups(self):
+ fea = self.parse(
+ 'DEF_SCRIPT NAME "Latin" TAG "latn"\n'
+ 'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n'
+ 'DEF_FEATURE NAME "Fractions" TAG "frac"\n'
+ 'LOOKUP "fraclookup\\1"\n'
+ 'LOOKUP "fraclookup\\1"\n'
+ "END_FEATURE\n"
+ "END_LANGSYS\n"
+ "END_SCRIPT\n"
+ 'DEF_LOOKUP "fraclookup\\1" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION RTL\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "one" GLYPH "slash" GLYPH "two"\n'
+ 'WITH GLYPH "one_slash_two.frac"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION\n"
+ 'DEF_LOOKUP "fraclookup\\2" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION RTL\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "one" GLYPH "slash" GLYPH "three"\n'
+ 'WITH GLYPH "one_slash_three.frac"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup fraclookup {\n"
+ " lookupflag RightToLeft;\n"
+ " # fraclookup\\1\n"
+ " sub one slash two by one_slash_two.frac;\n"
+ " subtable;\n"
+ " # fraclookup\\2\n"
+ " sub one slash three by one_slash_three.frac;\n"
+ "} fraclookup;\n"
+ "\n"
+ "# Features\n"
+ "feature frac {\n"
+ " script latn;\n"
+ " language ROM exclude_dflt;\n"
+ " lookup fraclookup;\n"
+ "} frac;\n",
+ )
+
+ def test_lookup_comment(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "smcp" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ 'COMMENTS "Smallcaps lookup for testing"\n'
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "a"\n'
+ 'WITH GLYPH "a.sc"\n'
+ "END_SUB\n"
+ 'SUB GLYPH "b"\n'
+ 'WITH GLYPH "b.sc"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup smcp {\n"
+ " # Smallcaps lookup for testing\n"
+ " sub a by a.sc;\n"
+ " sub b by b.sc;\n"
+ "} smcp;\n",
+ )
+
+ def test_substitution_single(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "smcp" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "a"\n'
+ 'WITH GLYPH "a.sc"\n'
+ "END_SUB\n"
+ 'SUB GLYPH "b"\n'
+ 'WITH GLYPH "b.sc"\n'
+ "END_SUB\n"
+ "SUB WITH\n" # Empty substitution, will be ignored
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup smcp {\n"
+ " sub a by a.sc;\n"
+ " sub b by b.sc;\n"
+ "} smcp;\n",
+ )
+
+ def test_substitution_single_in_context(self):
+ fea = self.parse(
+ 'DEF_GROUP "Denominators" ENUM GLYPH "one.dnom" GLYPH "two.dnom" '
+ "END_ENUM END_GROUP\n"
+ 'DEF_LOOKUP "fracdnom" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ 'IN_CONTEXT LEFT ENUM GROUP "Denominators" GLYPH "fraction" '
+ "END_ENUM\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "one"\n'
+ 'WITH GLYPH "one.dnom"\n'
+ "END_SUB\n"
+ 'SUB GLYPH "two"\n'
+ 'WITH GLYPH "two.dnom"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@Denominators = [one.dnom two.dnom];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup fracdnom {\n"
+ " sub [@Denominators fraction] one' by one.dnom;\n"
+ " sub [@Denominators fraction] two' by two.dnom;\n"
+ "} fracdnom;\n",
+ )
+
+ def test_substitution_single_in_contexts(self):
+ fea = self.parse(
+ 'DEF_GROUP "Hebrew" ENUM GLYPH "uni05D0" GLYPH "uni05D1" '
+ "END_ENUM END_GROUP\n"
+ 'DEF_LOOKUP "HebrewCurrency" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ 'RIGHT GROUP "Hebrew"\n'
+ 'RIGHT GLYPH "one.Hebr"\n'
+ "END_CONTEXT\n"
+ "IN_CONTEXT\n"
+ 'LEFT GROUP "Hebrew"\n'
+ 'LEFT GLYPH "one.Hebr"\n'
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "dollar"\n'
+ 'WITH GLYPH "dollar.Hebr"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@Hebrew = [uni05D0 uni05D1];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup HebrewCurrency {\n"
+ " sub dollar' @Hebrew one.Hebr by dollar.Hebr;\n"
+ " sub @Hebrew one.Hebr dollar' by dollar.Hebr;\n"
+ "} HebrewCurrency;\n",
+ )
+
+ def test_substitution_single_except_context(self):
+ fea = self.parse(
+ 'DEF_GROUP "Hebrew" ENUM GLYPH "uni05D0" GLYPH "uni05D1" '
+ "END_ENUM END_GROUP\n"
+ 'DEF_LOOKUP "HebrewCurrency" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "EXCEPT_CONTEXT\n"
+ 'RIGHT GROUP "Hebrew"\n'
+ 'RIGHT GLYPH "one.Hebr"\n'
+ "END_CONTEXT\n"
+ "IN_CONTEXT\n"
+ 'LEFT GROUP "Hebrew"\n'
+ 'LEFT GLYPH "one.Hebr"\n'
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "dollar"\n'
+ 'WITH GLYPH "dollar.Hebr"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@Hebrew = [uni05D0 uni05D1];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup HebrewCurrency {\n"
+ " ignore sub dollar' @Hebrew one.Hebr;\n"
+ " sub @Hebrew one.Hebr dollar' by dollar.Hebr;\n"
+ "} HebrewCurrency;\n",
+ )
+
+ def test_substitution_skip_base(self):
+ fea = self.parse(
+ 'DEF_GROUP "SomeMarks" ENUM GLYPH "marka" GLYPH "markb" '
+ "END_ENUM END_GROUP\n"
+ 'DEF_LOOKUP "SomeSub" SKIP_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "A"\n'
+ 'WITH GLYPH "A.c2sc"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@SomeMarks = [marka markb];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup SomeSub {\n"
+ " lookupflag IgnoreBaseGlyphs;\n"
+ " sub A by A.c2sc;\n"
+ "} SomeSub;\n",
+ )
+
+ def test_substitution_process_base(self):
+ fea = self.parse(
+ 'DEF_GROUP "SomeMarks" ENUM GLYPH "marka" GLYPH "markb" '
+ "END_ENUM END_GROUP\n"
+ 'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "A"\n'
+ 'WITH GLYPH "A.c2sc"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@SomeMarks = [marka markb];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup SomeSub {\n"
+ " sub A by A.c2sc;\n"
+ "} SomeSub;\n",
+ )
+
+ def test_substitution_process_marks_all(self):
+ fea = self.parse(
+ 'DEF_GROUP "SomeMarks" ENUM GLYPH "marka" GLYPH "markb" '
+ "END_ENUM END_GROUP\n"
+ 'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS "ALL"'
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "A"\n'
+ 'WITH GLYPH "A.c2sc"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@SomeMarks = [marka markb];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup SomeSub {\n"
+ " sub A by A.c2sc;\n"
+ "} SomeSub;\n",
+ )
+
+ def test_substitution_process_marks_none(self):
+ fea = self.parse(
+ 'DEF_GROUP "SomeMarks" ENUM GLYPH "marka" GLYPH "markb" '
+ "END_ENUM END_GROUP\n"
+ 'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS "NONE"'
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "A"\n'
+ 'WITH GLYPH "A.c2sc"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@SomeMarks = [marka markb];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup SomeSub {\n"
+ " lookupflag IgnoreMarks;\n"
+ " sub A by A.c2sc;\n"
+ "} SomeSub;\n",
+ )
+
+ def test_substitution_skip_marks(self):
+ fea = self.parse(
+ 'DEF_GROUP "SomeMarks" ENUM GLYPH "marka" GLYPH "markb" '
+ "END_ENUM END_GROUP\n"
+ 'DEF_LOOKUP "SomeSub" PROCESS_BASE SKIP_MARKS '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "A"\n'
+ 'WITH GLYPH "A.c2sc"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@SomeMarks = [marka markb];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup SomeSub {\n"
+ " lookupflag IgnoreMarks;\n"
+ " sub A by A.c2sc;\n"
+ "} SomeSub;\n",
+ )
+
+ def test_substitution_mark_attachment(self):
+ fea = self.parse(
+ 'DEF_GROUP "SomeMarks" ENUM GLYPH "acutecmb" GLYPH "gravecmb" '
+ "END_ENUM END_GROUP\n"
+ 'DEF_LOOKUP "SomeSub" PROCESS_BASE '
+ 'PROCESS_MARKS "SomeMarks" \n'
+ "DIRECTION RTL\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "A"\n'
+ 'WITH GLYPH "A.c2sc"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@SomeMarks = [acutecmb gravecmb];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup SomeSub {\n"
+ " lookupflag RightToLeft MarkAttachmentType"
+ " @SomeMarks;\n"
+ " sub A by A.c2sc;\n"
+ "} SomeSub;\n",
+ )
+
+ def test_substitution_mark_glyph_set(self):
+ fea = self.parse(
+ 'DEF_GROUP "SomeMarks" ENUM GLYPH "acutecmb" GLYPH "gravecmb" '
+ "END_ENUM END_GROUP\n"
+ 'DEF_LOOKUP "SomeSub" PROCESS_BASE '
+ 'PROCESS_MARKS MARK_GLYPH_SET "SomeMarks" \n'
+ "DIRECTION RTL\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "A"\n'
+ 'WITH GLYPH "A.c2sc"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@SomeMarks = [acutecmb gravecmb];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup SomeSub {\n"
+ " lookupflag RightToLeft UseMarkFilteringSet"
+ " @SomeMarks;\n"
+ " sub A by A.c2sc;\n"
+ "} SomeSub;\n",
+ )
+
+ def test_substitution_process_all_marks(self):
+ fea = self.parse(
+ 'DEF_GROUP "SomeMarks" ENUM GLYPH "acutecmb" GLYPH "gravecmb" '
+ "END_ENUM END_GROUP\n"
+ 'DEF_LOOKUP "SomeSub" PROCESS_BASE '
+ "PROCESS_MARKS ALL \n"
+ "DIRECTION RTL\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "A"\n'
+ 'WITH GLYPH "A.c2sc"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@SomeMarks = [acutecmb gravecmb];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup SomeSub {\n"
+ " lookupflag RightToLeft;\n"
+ " sub A by A.c2sc;\n"
+ "} SomeSub;\n",
+ )
+
+ def test_substitution_no_reversal(self):
+ # TODO: check right context with no reversal
+ fea = self.parse(
+ 'DEF_LOOKUP "Lookup" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ 'RIGHT ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "a"\n'
+ 'WITH GLYPH "a.alt"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup Lookup {\n"
+ " sub a' [a b] by a.alt;\n"
+ "} Lookup;\n",
+ )
+
+ def test_substitution_reversal(self):
+ fea = self.parse(
+ 'DEF_GROUP "DFLT_Num_standardFigures"\n'
+ 'ENUM GLYPH "zero" GLYPH "one" GLYPH "two" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_GROUP "DFLT_Num_numerators"\n'
+ 'ENUM GLYPH "zero.numr" GLYPH "one.numr" GLYPH "two.numr" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_LOOKUP "RevLookup" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR REVERSAL\n"
+ "IN_CONTEXT\n"
+ 'RIGHT ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GROUP "DFLT_Num_standardFigures"\n'
+ 'WITH GROUP "DFLT_Num_numerators"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@DFLT_Num_standardFigures = [zero one two];\n"
+ "@DFLT_Num_numerators = [zero.numr one.numr two.numr];\n"
+ "\n"
+ "# Lookups\n"
+ "lookup RevLookup {\n"
+ " rsub @DFLT_Num_standardFigures' [a b] by @DFLT_Num_numerators;\n"
+ "} RevLookup;\n",
+ )
+
+ def test_substitution_single_to_multiple(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "ccmp" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "aacute"\n'
+ 'WITH GLYPH "a" GLYPH "acutecomb"\n'
+ "END_SUB\n"
+ 'SUB GLYPH "agrave"\n'
+ 'WITH GLYPH "a" GLYPH "gravecomb"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup ccmp {\n"
+ " sub aacute by a acutecomb;\n"
+ " sub agrave by a gravecomb;\n"
+ "} ccmp;\n",
+ )
+
+ def test_substitution_multiple_to_single(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "liga" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB GLYPH "f" GLYPH "i"\n'
+ 'WITH GLYPH "f_i"\n'
+ "END_SUB\n"
+ 'SUB GLYPH "f" GLYPH "t"\n'
+ 'WITH GLYPH "f_t"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup liga {\n"
+ " sub f i by f_i;\n"
+ " sub f t by f_t;\n"
+ "} liga;\n",
+ )
+
+ def test_substitution_reverse_chaining_single(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "numr" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR REVERSAL\n"
+ "IN_CONTEXT\n"
+ "RIGHT ENUM "
+ 'GLYPH "fraction" '
+ 'RANGE "zero.numr" TO "nine.numr" '
+ "END_ENUM\n"
+ "END_CONTEXT\n"
+ "AS_SUBSTITUTION\n"
+ 'SUB RANGE "zero" TO "nine"\n'
+ 'WITH RANGE "zero.numr" TO "nine.numr"\n'
+ "END_SUB\n"
+ "END_SUBSTITUTION"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup numr {\n"
+ " rsub zero - nine' [fraction zero.numr - nine.numr] by zero.numr - nine.numr;\n"
+ "} numr;\n",
+ )
+
+ # GPOS
+ # ATTACH_CURSIVE
+ # ATTACH
+ # ADJUST_PAIR
+ # ADJUST_SINGLE
+ def test_position_attach(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "anchor_top" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION RTL\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ 'ATTACH GLYPH "a" GLYPH "e"\n'
+ 'TO GLYPH "acutecomb" AT ANCHOR "top" '
+ 'GLYPH "gravecomb" AT ANCHOR "top"\n'
+ "END_ATTACH\n"
+ "END_POSITION\n"
+ 'DEF_ANCHOR "MARK_top" ON 120 GLYPH acutecomb COMPONENT 1 '
+ "AT POS DX 0 DY 450 END_POS END_ANCHOR\n"
+ 'DEF_ANCHOR "MARK_top" ON 121 GLYPH gravecomb COMPONENT 1 '
+ "AT POS DX 0 DY 450 END_POS END_ANCHOR\n"
+ 'DEF_ANCHOR "top" ON 31 GLYPH a COMPONENT 1 '
+ "AT POS DX 210 DY 450 END_POS END_ANCHOR\n"
+ 'DEF_ANCHOR "top" ON 35 GLYPH e COMPONENT 1 '
+ "AT POS DX 215 DY 450 END_POS END_ANCHOR\n"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Mark classes\n"
+ "markClass acutecomb <anchor 0 450> @top;\n"
+ "markClass gravecomb <anchor 0 450> @top;\n"
+ "\n"
+ "# Lookups\n"
+ "lookup anchor_top {\n"
+ " lookupflag RightToLeft;\n"
+ " pos base a\n"
+ " <anchor 210 450> mark @top;\n"
+ " pos base e\n"
+ " <anchor 215 450> mark @top;\n"
+ "} anchor_top;\n",
+ )
+
+ def test_position_attach_mkmk(self):
+ fea = self.parse(
+ 'DEF_GLYPH "brevecomb" ID 1 TYPE MARK END_GLYPH\n'
+ 'DEF_GLYPH "gravecomb" ID 2 TYPE MARK END_GLYPH\n'
+ 'DEF_LOOKUP "anchor_top" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION RTL\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ 'ATTACH GLYPH "gravecomb"\n'
+ 'TO GLYPH "acutecomb" AT ANCHOR "top"\n'
+ "END_ATTACH\n"
+ "END_POSITION\n"
+ 'DEF_ANCHOR "MARK_top" ON 1 GLYPH acutecomb COMPONENT 1 '
+ "AT POS DX 0 DY 450 END_POS END_ANCHOR\n"
+ 'DEF_ANCHOR "top" ON 2 GLYPH gravecomb COMPONENT 1 '
+ "AT POS DX 210 DY 450 END_POS END_ANCHOR\n"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Mark classes\n"
+ "markClass acutecomb <anchor 0 450> @top;\n"
+ "\n"
+ "# Lookups\n"
+ "lookup anchor_top {\n"
+ " lookupflag RightToLeft;\n"
+ " pos mark gravecomb\n"
+ " <anchor 210 450> mark @top;\n"
+ "} anchor_top;\n"
+ "\n"
+ "@GDEF_mark = [brevecomb gravecomb];\n"
+ "table GDEF {\n"
+ " GlyphClassDef , , @GDEF_mark, ;\n"
+ "} GDEF;\n",
+ )
+
+ def test_position_attach_in_context(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "test" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION RTL\n"
+ 'EXCEPT_CONTEXT LEFT GLYPH "a" END_CONTEXT\n'
+ "AS_POSITION\n"
+ 'ATTACH GLYPH "a"\n'
+ 'TO GLYPH "acutecomb" AT ANCHOR "top" '
+ 'GLYPH "gravecomb" AT ANCHOR "top"\n'
+ "END_ATTACH\n"
+ "END_POSITION\n"
+ 'DEF_ANCHOR "MARK_top" ON 120 GLYPH acutecomb COMPONENT 1 '
+ "AT POS DX 0 DY 450 END_POS END_ANCHOR\n"
+ 'DEF_ANCHOR "MARK_top" ON 121 GLYPH gravecomb COMPONENT 1 '
+ "AT POS DX 0 DY 450 END_POS END_ANCHOR\n"
+ 'DEF_ANCHOR "top" ON 31 GLYPH a COMPONENT 1 '
+ "AT POS DX 210 DY 450 END_POS END_ANCHOR\n"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Mark classes\n"
+ "markClass acutecomb <anchor 0 450> @top;\n"
+ "markClass gravecomb <anchor 0 450> @top;\n"
+ "\n"
+ "# Lookups\n"
+ "lookup test_target {\n"
+ " pos base a\n"
+ " <anchor 210 450> mark @top;\n"
+ "} test_target;\n"
+ "\n"
+ "lookup test {\n"
+ " lookupflag RightToLeft;\n"
+ " ignore pos a [acutecomb gravecomb]';\n"
+ " pos [acutecomb gravecomb]' lookup test_target;\n"
+ "} test;\n",
+ )
+
+ def test_position_attach_cursive(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "SomeLookup" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION RTL\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ 'ATTACH_CURSIVE EXIT GLYPH "a" GLYPH "b" '
+ 'ENTER GLYPH "a" GLYPH "c"\n'
+ "END_ATTACH\n"
+ "END_POSITION\n"
+ 'DEF_ANCHOR "exit" ON 1 GLYPH a COMPONENT 1 AT POS END_POS END_ANCHOR\n'
+ 'DEF_ANCHOR "entry" ON 1 GLYPH a COMPONENT 1 AT POS END_POS END_ANCHOR\n'
+ 'DEF_ANCHOR "exit" ON 2 GLYPH b COMPONENT 1 AT POS END_POS END_ANCHOR\n'
+ 'DEF_ANCHOR "entry" ON 3 GLYPH c COMPONENT 1 AT POS END_POS END_ANCHOR\n'
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup SomeLookup {\n"
+ " lookupflag RightToLeft;\n"
+ " pos cursive a <anchor 0 0> <anchor 0 0>;\n"
+ " pos cursive c <anchor 0 0> <anchor NULL>;\n"
+ " pos cursive b <anchor NULL> <anchor 0 0>;\n"
+ "} SomeLookup;\n",
+ )
+
+ def test_position_adjust_pair(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "kern1" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION RTL\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ "ADJUST_PAIR\n"
+ ' FIRST GLYPH "A" FIRST GLYPH "V"\n'
+ ' SECOND GLYPH "A" SECOND GLYPH "V"\n'
+ " 1 2 BY POS ADV -30 END_POS POS END_POS\n"
+ " 2 1 BY POS ADV -25 END_POS POS END_POS\n"
+ "END_ADJUST\n"
+ "END_POSITION\n"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup kern1 {\n"
+ " lookupflag RightToLeft;\n"
+ " enum pos A V -30;\n"
+ " enum pos V A -25;\n"
+ "} kern1;\n",
+ )
+
+ def test_position_adjust_pair_in_context(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "kern1" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ 'EXCEPT_CONTEXT LEFT GLYPH "A" END_CONTEXT\n'
+ "AS_POSITION\n"
+ "ADJUST_PAIR\n"
+ ' FIRST GLYPH "A" FIRST GLYPH "V"\n'
+ ' SECOND GLYPH "A" SECOND GLYPH "V"\n'
+ " 2 1 BY POS ADV -25 END_POS POS END_POS\n"
+ "END_ADJUST\n"
+ "END_POSITION\n"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup kern1_target {\n"
+ " enum pos V A -25;\n"
+ "} kern1_target;\n"
+ "\n"
+ "lookup kern1 {\n"
+ " ignore pos A V' A';\n"
+ " pos V' lookup kern1_target A' lookup kern1_target;\n"
+ "} kern1;\n",
+ )
+
+ def test_position_adjust_single(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "TestLookup" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ "ADJUST_SINGLE"
+ ' GLYPH "glyph1" BY POS ADV 0 DX 123 END_POS\n'
+ ' GLYPH "glyph2" BY POS ADV 0 DX 456 END_POS\n'
+ "END_ADJUST\n"
+ "END_POSITION\n"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup TestLookup {\n"
+ " pos glyph1 <123 0 0 0>;\n"
+ " pos glyph2 <456 0 0 0>;\n"
+ "} TestLookup;\n",
+ )
+
+ def test_position_adjust_single_in_context(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "TestLookup" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "EXCEPT_CONTEXT\n"
+ 'LEFT GLYPH "leftGlyph"\n'
+ 'RIGHT GLYPH "rightGlyph"\n'
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ "ADJUST_SINGLE"
+ ' GLYPH "glyph1" BY POS ADV 0 DX 123 END_POS\n'
+ ' GLYPH "glyph2" BY POS ADV 0 DX 456 END_POS\n'
+ "END_ADJUST\n"
+ "END_POSITION\n"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup TestLookup_target {\n"
+ " pos glyph1 <123 0 0 0>;\n"
+ " pos glyph2 <456 0 0 0>;\n"
+ "} TestLookup_target;\n"
+ "\n"
+ "lookup TestLookup {\n"
+ " ignore pos leftGlyph [glyph1 glyph2]' rightGlyph;\n"
+ " pos [glyph1 glyph2]' lookup TestLookup_target;\n"
+ "} TestLookup;\n",
+ )
+
+ def test_def_anchor(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "TestLookup" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ 'ATTACH GLYPH "a"\n'
+ 'TO GLYPH "acutecomb" AT ANCHOR "top"\n'
+ "END_ATTACH\n"
+ "END_POSITION\n"
+ 'DEF_ANCHOR "top" ON 120 GLYPH a '
+ "COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n"
+ 'DEF_ANCHOR "MARK_top" ON 120 GLYPH acutecomb '
+ "COMPONENT 1 AT POS DX 0 DY 450 END_POS END_ANCHOR"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Mark classes\n"
+ "markClass acutecomb <anchor 0 450> @top;\n"
+ "\n"
+ "# Lookups\n"
+ "lookup TestLookup {\n"
+ " pos base a\n"
+ " <anchor 250 450> mark @top;\n"
+ "} TestLookup;\n",
+ )
+
+ def test_def_anchor_multi_component(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "TestLookup" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ 'ATTACH GLYPH "f_f"\n'
+ 'TO GLYPH "acutecomb" AT ANCHOR "top"\n'
+ "END_ATTACH\n"
+ "END_POSITION\n"
+ 'DEF_GLYPH "f_f" ID 120 TYPE LIGATURE COMPONENTS 2 END_GLYPH\n'
+ 'DEF_ANCHOR "top" ON 120 GLYPH f_f '
+ "COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n"
+ 'DEF_ANCHOR "top" ON 120 GLYPH f_f '
+ "COMPONENT 2 AT POS DX 450 DY 450 END_POS END_ANCHOR\n"
+ 'DEF_ANCHOR "MARK_top" ON 120 GLYPH acutecomb '
+ "COMPONENT 1 AT POS END_POS END_ANCHOR"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Mark classes\n"
+ "markClass acutecomb <anchor 0 0> @top;\n"
+ "\n"
+ "# Lookups\n"
+ "lookup TestLookup {\n"
+ " pos ligature f_f\n"
+ " <anchor 250 450> mark @top\n"
+ " ligComponent\n"
+ " <anchor 450 450> mark @top;\n"
+ "} TestLookup;\n"
+ "\n"
+ "@GDEF_ligature = [f_f];\n"
+ "table GDEF {\n"
+ " GlyphClassDef , @GDEF_ligature, , ;\n"
+ "} GDEF;\n",
+ )
+
+ def test_anchor_adjust_device(self):
+ fea = self.parse(
+ 'DEF_ANCHOR "MARK_top" ON 123 GLYPH diacglyph '
+ "COMPONENT 1 AT POS DX 0 DY 456 ADJUST_BY 12 AT 34 "
+ "ADJUST_BY 56 AT 78 END_POS END_ANCHOR"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Mark classes\n"
+ "#markClass diacglyph <anchor 0 456 <device NULL>"
+ " <device 34 12, 78 56>> @top;",
+ )
+
+ def test_use_extension(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "kern1" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR\n"
+ "IN_CONTEXT\n"
+ "END_CONTEXT\n"
+ "AS_POSITION\n"
+ "ADJUST_PAIR\n"
+ ' FIRST GLYPH "A" FIRST GLYPH "V"\n'
+ ' SECOND GLYPH "A" SECOND GLYPH "V"\n'
+ " 1 2 BY POS ADV -30 END_POS POS END_POS\n"
+ " 2 1 BY POS ADV -25 END_POS POS END_POS\n"
+ "END_ADJUST\n"
+ "END_POSITION\n"
+ "COMPILER_USEEXTENSIONLOOKUPS\n"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup kern1 useExtension {\n"
+ " enum pos A V -30;\n"
+ " enum pos V A -25;\n"
+ "} kern1;\n",
+ )
+
+ def test_unsupported_compiler_flags(self):
+ with self.assertLogs(level="WARNING") as logs:
+ fea = self.parse("CMAP_FORMAT 0 3 4")
+ self.assertEqual(fea, "")
+ self.assertEqual(
+ logs.output,
+ [
+ "WARNING:fontTools.voltLib.voltToFea:Unsupported setting ignored: CMAP_FORMAT"
+ ],
+ )
+
+ def test_sanitize_lookup_name(self):
+ fea = self.parse(
+ 'DEF_LOOKUP "Test Lookup" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR IN_CONTEXT END_CONTEXT\n"
+ "AS_POSITION ADJUST_PAIR END_ADJUST END_POSITION\n"
+ 'DEF_LOOKUP "Test-Lookup" PROCESS_BASE PROCESS_MARKS ALL '
+ "DIRECTION LTR IN_CONTEXT END_CONTEXT\n"
+ "AS_POSITION ADJUST_PAIR END_ADJUST END_POSITION\n"
+ )
+ self.assertEqual(
+ fea,
+ "\n# Lookups\n"
+ "lookup Test_Lookup {\n"
+ " \n"
+ "} Test_Lookup;\n"
+ "\n"
+ "lookup Test_Lookup_ {\n"
+ " \n"
+ "} Test_Lookup_;\n",
+ )
+
+ def test_sanitize_group_name(self):
+ fea = self.parse(
+ 'DEF_GROUP "aaccented glyphs"\n'
+ 'ENUM GLYPH "aacute" GLYPH "abreve" END_ENUM\n'
+ "END_GROUP\n"
+ 'DEF_GROUP "aaccented+glyphs"\n'
+ 'ENUM GLYPH "aacute" GLYPH "abreve" END_ENUM\n'
+ "END_GROUP\n"
+ )
+ self.assertEqual(
+ fea,
+ "# Glyph classes\n"
+ "@aaccented_glyphs = [aacute abreve];\n"
+ "@aaccented_glyphs_ = [aacute abreve];",
+ )
+
+ def test_cli_vtp(self):
+ vtp = DATADIR / "Nutso.vtp"
+ fea = DATADIR / "Nutso.fea"
+ self.cli(vtp, fea)
+
+ def test_group_order(self):
+ vtp = DATADIR / "NamdhinggoSIL1006.vtp"
+ fea = DATADIR / "NamdhinggoSIL1006.fea"
+ self.cli(vtp, fea)
+
+ def test_cli_ttf(self):
+ ttf = DATADIR / "Nutso.ttf"
+ fea = DATADIR / "Nutso.fea"
+ self.cli(ttf, fea)
+
+ def test_cli_ttf_no_TSIV(self):
+ from fontTools.voltLib.voltToFea import main as cli
+
+ ttf = DATADIR / "Empty.ttf"
+ temp = self.temp_path()
+ self.assertEqual(1, cli([str(ttf), str(temp)]))
+
+ def cli(self, source, fea):
+ from fontTools.voltLib.voltToFea import main as cli
+
+ temp = self.temp_path()
+ cli([str(source), str(temp)])
+ with temp.open() as f:
+ res = f.read()
+ with fea.open() as f:
+ ref = f.read()
+ self.assertEqual(ref, res)
+
+ def parse(self, text):
+ return VoltToFea(StringIO(text)).convert()
+
+
+if __name__ == "__main__":
+ import sys
+
+ sys.exit(unittest.main())
diff --git a/dev-requirements.txt b/dev-requirements.txt
index 73eae680..69601f35 100644
--- a/dev-requirements.txt
+++ b/dev-requirements.txt
@@ -3,3 +3,6 @@ tox>=2.5
bump2version>=0.5.6
sphinx>=1.5.5
mypy>=0.782
+
+# Pin black as each version could change formatting, breaking CI randomly.
+black==23.10.0
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..5a8e741b
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,2 @@
+[tool.black]
+target-version = ["py37"]
diff --git a/requirements.txt b/requirements.txt
index 1bab6d77..8a764102 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,16 +1,19 @@
# we use the official Brotli module on CPython and the CFFI-based
# extension 'brotlipy' on PyPy
-brotli==1.0.9; platform_python_implementation != "PyPy"
-brotlicffi==1.0.9.2; platform_python_implementation == "PyPy"
-unicodedata2==14.0.0; python_version < '3.11'
-scipy==1.7.3; platform_python_implementation != "PyPy" and python_version <= '3.7' # pyup: ignore
-scipy==1.9.0; platform_python_implementation != "PyPy" and python_version > '3.7'
+brotli==1.1.0; platform_python_implementation != "PyPy"
+brotlicffi==1.1.0.0; platform_python_implementation == "PyPy"
+unicodedata2==15.1.0; python_version <= '3.11'
+scipy==1.10.0; platform_python_implementation != "PyPy" and python_version <= '3.8' # pyup: ignore
+scipy==1.11.3; platform_python_implementation != "PyPy" and python_version >= '3.9'
munkres==1.1.4; platform_python_implementation == "PyPy"
-zopfli==0.2.1
+zopfli==0.2.3
fs==2.4.16
-skia-pathops==0.7.2; platform_python_implementation != "PyPy"
+skia-pathops==0.8.0.post1; platform_python_implementation != "PyPy"
# this is only required to run Tests/cu2qu/{ufo,cli}_test.py
-ufoLib2==0.13.1
-pyobjc==8.5; sys_platform == "darwin"
-freetype-py==2.3.0
-uharfbuzz==0.30.0
+ufoLib2==0.16.0
+ufo2ft==2.33.4
+pyobjc==10.0; sys_platform == "darwin"
+freetype-py==2.4.0
+uharfbuzz==0.37.3
+glyphsLib==6.4.1 # this is only required to run Tests/varLib/interpolatable_test.py
+lxml==4.9.3
diff --git a/setup.cfg b/setup.cfg
index 69bfde14..3c41de2f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 4.37.1
+current_version = 4.44.0
commit = True
tag = False
tag_name = {new_version}
@@ -24,14 +24,11 @@ replace = __version__ = "{new_version}"
search = version="{current_version}"
replace = version="{new_version}"
-[sdist]
-formats = zip
-
[metadata]
-license_file = LICENSE
+license_files = LICENSE
[tool:pytest]
-minversion = 3.0
+minversion = 7.0.0
testpaths =
Tests
fontTools
diff --git a/setup.py b/setup.py
index 03395ac5..da616903 100755
--- a/setup.py
+++ b/setup.py
@@ -17,429 +17,447 @@ import re
# Force distutils to use py_compile.compile() function with 'doraise' argument
# set to True, in order to raise an exception on compilation errors
import py_compile
+
orig_py_compile = py_compile.compile
+
def doraise_py_compile(file, cfile=None, dfile=None, doraise=False):
- orig_py_compile(file, cfile=cfile, dfile=dfile, doraise=True)
+ orig_py_compile(file, cfile=cfile, dfile=dfile, doraise=True)
+
py_compile.compile = doraise_py_compile
setup_requires = []
-if {'bdist_wheel'}.intersection(sys.argv):
- setup_requires.append('wheel')
+if {"bdist_wheel"}.intersection(sys.argv):
+ setup_requires.append("wheel")
-if {'release'}.intersection(sys.argv):
- setup_requires.append('bump2version')
+if {"release"}.intersection(sys.argv):
+ setup_requires.append("bump2version")
try:
- __import__("cython")
+ __import__("cython")
except ImportError:
- has_cython = False
+ has_cython = False
else:
- has_cython = True
+ has_cython = True
env_with_cython = os.environ.get("FONTTOOLS_WITH_CYTHON")
with_cython = (
- True if env_with_cython in {"1", "true", "yes"}
- else False if env_with_cython in {"0", "false", "no"}
- else None
+ True
+ if env_with_cython in {"1", "true", "yes"}
+ else False
+ if env_with_cython in {"0", "false", "no"}
+ else None
)
# --with-cython/--without-cython options override environment variables
-opt_with_cython = {'--with-cython'}.intersection(sys.argv)
-opt_without_cython = {'--without-cython'}.intersection(sys.argv)
+opt_with_cython = {"--with-cython"}.intersection(sys.argv)
+opt_without_cython = {"--without-cython"}.intersection(sys.argv)
if opt_with_cython and opt_without_cython:
- sys.exit(
- "error: the options '--with-cython' and '--without-cython' are "
- "mutually exclusive"
- )
+ sys.exit(
+ "error: the options '--with-cython' and '--without-cython' are "
+ "mutually exclusive"
+ )
elif opt_with_cython:
- sys.argv.remove("--with-cython")
- with_cython = True
+ sys.argv.remove("--with-cython")
+ with_cython = True
elif opt_without_cython:
- sys.argv.remove("--without-cython")
- with_cython = False
+ sys.argv.remove("--without-cython")
+ with_cython = False
if with_cython and not has_cython:
- setup_requires.append("cython")
+ setup_requires.append("cython")
ext_modules = []
if with_cython is True or (with_cython is None and has_cython):
- ext_modules.append(
- Extension("fontTools.cu2qu.cu2qu", ["Lib/fontTools/cu2qu/cu2qu.py"]),
- )
- ext_modules.append(
- Extension("fontTools.pens.momentsPen", ["Lib/fontTools/pens/momentsPen.py"]),
- )
- ext_modules.append(
- Extension("fontTools.varLib.iup", ["Lib/fontTools/varLib/iup.py"]),
- )
+ ext_modules.append(
+ Extension("fontTools.cu2qu.cu2qu", ["Lib/fontTools/cu2qu/cu2qu.py"]),
+ )
+ ext_modules.append(
+ Extension("fontTools.qu2cu.qu2cu", ["Lib/fontTools/qu2cu/qu2cu.py"]),
+ )
+ ext_modules.append(
+ Extension("fontTools.misc.bezierTools", ["Lib/fontTools/misc/bezierTools.py"]),
+ )
+ ext_modules.append(
+ Extension("fontTools.pens.momentsPen", ["Lib/fontTools/pens/momentsPen.py"]),
+ )
+ ext_modules.append(
+ Extension("fontTools.varLib.iup", ["Lib/fontTools/varLib/iup.py"]),
+ )
+ ext_modules.append(
+ Extension("fontTools.feaLib.lexer", ["Lib/fontTools/feaLib/lexer.py"]),
+ )
extras_require = {
- # for fontTools.ufoLib: to read/write UFO fonts
- "ufo": [
- "fs >= 2.2.0, < 3",
- ],
- # for fontTools.misc.etree and fontTools.misc.plistlib: use lxml to
- # read/write XML files (faster/safer than built-in ElementTree)
- "lxml": [
- "lxml >= 4.0, < 5",
- ],
- # for fontTools.sfnt and fontTools.woff2: to compress/uncompress
- # WOFF 1.0 and WOFF 2.0 webfonts.
- "woff": [
- "brotli >= 1.0.1; platform_python_implementation == 'CPython'",
- "brotlicffi >= 0.8.0; platform_python_implementation != 'CPython'",
- "zopfli >= 0.1.4",
- ],
- # for fontTools.unicode and fontTools.unicodedata: to use the latest version
- # of the Unicode Character Database instead of the built-in unicodedata
- # which varies between python versions and may be outdated.
- "unicode": [
- # Python 3.11 already has Unicode 14.0, so the backport is not needed.
- (
- "unicodedata2 >= 14.0.0; python_version < '3.11'"
- ),
- ],
- # for graphite type tables in ttLib/tables (Silf, Glat, Gloc)
- "graphite": [
- "lz4 >= 1.7.4.2"
- ],
- # for fontTools.interpolatable: to solve the "minimum weight perfect
- # matching problem in bipartite graphs" (aka Assignment problem)
- "interpolatable": [
- # use pure-python alternative on pypy
- "scipy; platform_python_implementation != 'PyPy'",
- "munkres; platform_python_implementation == 'PyPy'",
- ],
- # for fontTools.varLib.plot, to visualize DesignSpaceDocument and resulting
- # VariationModel
- "plot": [
- # TODO: figure out the minimum version of matplotlib that we need
- "matplotlib",
- ],
- # for fontTools.misc.symfont, module for symbolic font statistics analysis
- "symfont": [
- "sympy",
- ],
- # To get file creator and type of Macintosh PostScript Type 1 fonts (macOS only)
- "type1": [
- "xattr; sys_platform == 'darwin'",
- ],
- # for fontTools.ttLib.removeOverlaps, to remove overlaps in TTF fonts
- "pathops": [
- "skia-pathops >= 0.5.0",
- ],
- # for packing GSUB/GPOS tables with Harfbuzz repacker
- "repacker": [
- "uharfbuzz >= 0.23.0",
- ],
+ # for fontTools.ufoLib: to read/write UFO fonts
+ "ufo": [
+ "fs >= 2.2.0, < 3",
+ ],
+ # for fontTools.misc.etree and fontTools.misc.plistlib: use lxml to
+ # read/write XML files (faster/safer than built-in ElementTree)
+ "lxml": [
+ "lxml >= 4.0, < 5",
+ ],
+ # for fontTools.sfnt and fontTools.woff2: to compress/uncompress
+ # WOFF 1.0 and WOFF 2.0 webfonts.
+ "woff": [
+ "brotli >= 1.0.1; platform_python_implementation == 'CPython'",
+ "brotlicffi >= 0.8.0; platform_python_implementation != 'CPython'",
+ "zopfli >= 0.1.4",
+ ],
+ # for fontTools.unicode and fontTools.unicodedata: to use the latest version
+ # of the Unicode Character Database instead of the built-in unicodedata
+ # which varies between python versions and may be outdated.
+ "unicode": [
+ ("unicodedata2 >= 15.1.0; python_version <= '3.12'"),
+ ],
+ # for graphite type tables in ttLib/tables (Silf, Glat, Gloc)
+ "graphite": ["lz4 >= 1.7.4.2"],
+ # for fontTools.interpolatable: to solve the "minimum weight perfect
+ # matching problem in bipartite graphs" (aka Assignment problem)
+ "interpolatable": [
+ # use pure-python alternative on pypy
+ "scipy; platform_python_implementation != 'PyPy'",
+ "munkres; platform_python_implementation == 'PyPy'",
+ ],
+ # for fontTools.varLib.plot, to visualize DesignSpaceDocument and resulting
+ # VariationModel
+ "plot": [
+ # TODO: figure out the minimum version of matplotlib that we need
+ "matplotlib",
+ ],
+ # for fontTools.misc.symfont, module for symbolic font statistics analysis
+ "symfont": [
+ "sympy",
+ ],
+ # To get file creator and type of Macintosh PostScript Type 1 fonts (macOS only)
+ "type1": [
+ "xattr; sys_platform == 'darwin'",
+ ],
+ # for fontTools.ttLib.removeOverlaps, to remove overlaps in TTF fonts
+ "pathops": [
+ "skia-pathops >= 0.5.0",
+ ],
+ # for packing GSUB/GPOS tables with Harfbuzz repacker
+ "repacker": [
+ "uharfbuzz >= 0.23.0",
+ ],
}
# use a special 'all' key as shorthand to includes all the extra dependencies
extras_require["all"] = sum(extras_require.values(), [])
# Trove classifiers for PyPI
-classifiers = {"classifiers": [
- "Development Status :: 5 - Production/Stable",
- "Environment :: Console",
- "Environment :: Other Environment",
- "Intended Audience :: Developers",
- "Intended Audience :: End Users/Desktop",
- "License :: OSI Approved :: MIT License",
- "Natural Language :: English",
- "Operating System :: OS Independent",
- "Programming Language :: Python",
- "Programming Language :: Python :: 2",
- "Programming Language :: Python :: 3",
- "Topic :: Text Processing :: Fonts",
- "Topic :: Multimedia :: Graphics",
- "Topic :: Multimedia :: Graphics :: Graphics Conversion",
-]}
+classifiers = {
+ "classifiers": [
+ "Development Status :: 5 - Production/Stable",
+ "Environment :: Console",
+ "Environment :: Other Environment",
+ "Intended Audience :: Developers",
+ "Intended Audience :: End Users/Desktop",
+ "License :: OSI Approved :: MIT License",
+ "Natural Language :: English",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3",
+ "Topic :: Text Processing :: Fonts",
+ "Topic :: Multimedia :: Graphics",
+ "Topic :: Multimedia :: Graphics :: Graphics Conversion",
+ ]
+}
# concatenate README.rst and NEWS.rest into long_description so they are
# displayed on the FontTols project page on PyPI
with io.open("README.rst", "r", encoding="utf-8") as readme:
- long_description = readme.read()
+ long_description = readme.read()
long_description += "\nChangelog\n~~~~~~~~~\n\n"
with io.open("NEWS.rst", "r", encoding="utf-8") as changelog:
- long_description += changelog.read()
+ long_description += changelog.read()
@contextlib.contextmanager
def capture_logger(name):
- """ Context manager to capture a logger output with a StringIO stream.
- """
- import logging
-
- logger = logging.getLogger(name)
- try:
- import StringIO
- stream = StringIO.StringIO()
- except ImportError:
- stream = io.StringIO()
- handler = logging.StreamHandler(stream)
- logger.addHandler(handler)
- try:
- yield stream
- finally:
- logger.removeHandler(handler)
+ """Context manager to capture a logger output with a StringIO stream."""
+ import logging
+
+ logger = logging.getLogger(name)
+ try:
+ import StringIO
+
+ stream = StringIO.StringIO()
+ except ImportError:
+ stream = io.StringIO()
+ handler = logging.StreamHandler(stream)
+ logger.addHandler(handler)
+ try:
+ yield stream
+ finally:
+ logger.removeHandler(handler)
class release(Command):
- """
- Tag a new release with a single command, using the 'bumpversion' tool
- to update all the version strings in the source code.
- The version scheme conforms to 'SemVer' and PEP 440 specifications.
-
- Firstly, the pre-release '.devN' suffix is dropped to signal that this is
- a stable release. If '--major' or '--minor' options are passed, the
- the first or second 'semver' digit is also incremented. Major is usually
- for backward-incompatible API changes, while minor is used when adding
- new backward-compatible functionalities. No options imply 'patch' or bug-fix
- release.
-
- A new header is also added to the changelog file ("NEWS.rst"), containing
- the new version string and the current 'YYYY-MM-DD' date.
-
- All changes are committed, and an annotated git tag is generated. With the
- --sign option, the tag is GPG-signed with the user's default key.
-
- Finally, the 'patch' part of the version string is bumped again, and a
- pre-release suffix '.dev0' is appended to mark the opening of a new
- development cycle.
-
- Links:
- - http://semver.org/
- - https://www.python.org/dev/peps/pep-0440/
- - https://github.com/c4urself/bump2version
- """
-
- description = "update version strings for release"
-
- user_options = [
- ("major", None, "bump the first digit (incompatible API changes)"),
- ("minor", None, "bump the second digit (new backward-compatible features)"),
- ("sign", "s", "make a GPG-signed tag, using the default key"),
- ("allow-dirty", None, "don't abort if working directory is dirty"),
- ]
-
- changelog_name = "NEWS.rst"
- version_RE = re.compile("^[0-9]+\.[0-9]+")
- date_fmt = u"%Y-%m-%d"
- header_fmt = u"%s (released %s)"
- commit_message = "Release {new_version}"
- tag_name = "{new_version}"
- version_files = [
- "setup.cfg",
- "setup.py",
- "Lib/fontTools/__init__.py",
- ]
-
- def initialize_options(self):
- self.minor = False
- self.major = False
- self.sign = False
- self.allow_dirty = False
-
- def finalize_options(self):
- if all([self.major, self.minor]):
- from distutils.errors import DistutilsOptionError
- raise DistutilsOptionError("--major/--minor are mutually exclusive")
- self.part = "major" if self.major else "minor" if self.minor else None
-
- def run(self):
- if self.part is not None:
- log.info("bumping '%s' version" % self.part)
- self.bumpversion(self.part, commit=False)
- release_version = self.bumpversion(
- "release", commit=False, allow_dirty=True)
- else:
- log.info("stripping pre-release suffix")
- release_version = self.bumpversion("release")
- log.info(" version = %s" % release_version)
-
- changes = self.format_changelog(release_version)
-
- self.git_commit(release_version)
- self.git_tag(release_version, changes, self.sign)
-
- log.info("bumping 'patch' version and pre-release suffix")
- next_dev_version = self.bumpversion('patch', commit=True)
- log.info(" version = %s" % next_dev_version)
-
- def git_commit(self, version):
- """ Stage and commit all relevant version files, and format the commit
- message with specified 'version' string.
- """
- files = self.version_files + [self.changelog_name]
-
- log.info("committing changes")
- for f in files:
- log.info(" %s" % f)
- if self.dry_run:
- return
- sp.check_call(["git", "add"] + files)
- msg = self.commit_message.format(new_version=version)
- sp.check_call(["git", "commit", "-m", msg], stdout=sp.PIPE)
-
- def git_tag(self, version, message, sign=False):
- """ Create annotated git tag with given 'version' and 'message'.
- Optionally 'sign' the tag with the user's GPG key.
- """
- log.info("creating %s git tag '%s'" % (
- "signed" if sign else "annotated", version))
- if self.dry_run:
- return
- # create an annotated (or signed) tag from the new version
- tag_opt = "-s" if sign else "-a"
- tag_name = self.tag_name.format(new_version=version)
- proc = sp.Popen(
- ["git", "tag", tag_opt, "-F", "-", tag_name], stdin=sp.PIPE)
- # use the latest changes from the changelog file as the tag message
- tag_message = u"%s\n\n%s" % (tag_name, message)
- proc.communicate(tag_message.encode('utf-8'))
- if proc.returncode != 0:
- sys.exit(proc.returncode)
-
- def bumpversion(self, part, commit=False, message=None, allow_dirty=None):
- """ Run bumpversion.main() with the specified arguments, and return the
- new computed version string (cf. 'bumpversion --help' for more info)
- """
- import bumpversion.cli
-
- args = (
- (['--verbose'] if self.verbose > 1 else []) +
- (['--dry-run'] if self.dry_run else []) +
- (['--allow-dirty'] if (allow_dirty or self.allow_dirty) else []) +
- (['--commit'] if commit else ['--no-commit']) +
- (['--message', message] if message is not None else []) +
- ['--list', part]
- )
- log.debug("$ bumpversion %s" % " ".join(a.replace(" ", "\\ ") for a in args))
-
- with capture_logger("bumpversion.list") as out:
- bumpversion.cli.main(args)
-
- last_line = out.getvalue().splitlines()[-1]
- new_version = last_line.replace("new_version=", "")
- return new_version
-
- def format_changelog(self, version):
- """ Write new header at beginning of changelog file with the specified
- 'version' and the current date.
- Return the changelog content for the current release.
- """
- from datetime import datetime
-
- log.info("formatting changelog")
-
- changes = []
- with io.open(self.changelog_name, "r+", encoding="utf-8") as f:
- for ln in f:
- if self.version_RE.match(ln):
- break
- else:
- changes.append(ln)
- if not self.dry_run:
- f.seek(0)
- content = f.read()
- date = datetime.today().strftime(self.date_fmt)
- f.seek(0)
- header = self.header_fmt % (version, date)
- f.write(header + u"\n" + u"-"*len(header) + u"\n\n" + content)
-
- return u"".join(changes)
+ """
+ Tag a new release with a single command, using the 'bumpversion' tool
+ to update all the version strings in the source code.
+ The version scheme conforms to 'SemVer' and PEP 440 specifications.
+
+ Firstly, the pre-release '.devN' suffix is dropped to signal that this is
+ a stable release. If '--major' or '--minor' options are passed, the
+ the first or second 'semver' digit is also incremented. Major is usually
+ for backward-incompatible API changes, while minor is used when adding
+ new backward-compatible functionalities. No options imply 'patch' or bug-fix
+ release.
+
+ A new header is also added to the changelog file ("NEWS.rst"), containing
+ the new version string and the current 'YYYY-MM-DD' date.
+
+ All changes are committed, and an annotated git tag is generated. With the
+ --sign option, the tag is GPG-signed with the user's default key.
+
+ Finally, the 'patch' part of the version string is bumped again, and a
+ pre-release suffix '.dev0' is appended to mark the opening of a new
+ development cycle.
+
+ Links:
+ - http://semver.org/
+ - https://www.python.org/dev/peps/pep-0440/
+ - https://github.com/c4urself/bump2version
+ """
+
+ description = "update version strings for release"
+
+ user_options = [
+ ("major", None, "bump the first digit (incompatible API changes)"),
+ ("minor", None, "bump the second digit (new backward-compatible features)"),
+ ("sign", "s", "make a GPG-signed tag, using the default key"),
+ ("allow-dirty", None, "don't abort if working directory is dirty"),
+ ]
+
+ changelog_name = "NEWS.rst"
+ version_RE = re.compile("^[0-9]+\.[0-9]+")
+ date_fmt = "%Y-%m-%d"
+ header_fmt = "%s (released %s)"
+ commit_message = "Release {new_version}"
+ tag_name = "{new_version}"
+ version_files = [
+ "setup.cfg",
+ "setup.py",
+ "Lib/fontTools/__init__.py",
+ ]
+
+ def initialize_options(self):
+ self.minor = False
+ self.major = False
+ self.sign = False
+ self.allow_dirty = False
+
+ def finalize_options(self):
+ if all([self.major, self.minor]):
+ from distutils.errors import DistutilsOptionError
+
+ raise DistutilsOptionError("--major/--minor are mutually exclusive")
+ self.part = "major" if self.major else "minor" if self.minor else None
+
+ def run(self):
+ if self.part is not None:
+ log.info("bumping '%s' version" % self.part)
+ self.bumpversion(self.part, commit=False)
+ release_version = self.bumpversion(
+ "release", commit=False, allow_dirty=True
+ )
+ else:
+ log.info("stripping pre-release suffix")
+ release_version = self.bumpversion("release")
+ log.info(" version = %s" % release_version)
+
+ changes = self.format_changelog(release_version)
+
+ self.git_commit(release_version)
+ self.git_tag(release_version, changes, self.sign)
+
+ log.info("bumping 'patch' version and pre-release suffix")
+ next_dev_version = self.bumpversion("patch", commit=True)
+ log.info(" version = %s" % next_dev_version)
+
+ def git_commit(self, version):
+ """Stage and commit all relevant version files, and format the commit
+ message with specified 'version' string.
+ """
+ files = self.version_files + [self.changelog_name]
+
+ log.info("committing changes")
+ for f in files:
+ log.info(" %s" % f)
+ if self.dry_run:
+ return
+ sp.check_call(["git", "add"] + files)
+ msg = self.commit_message.format(new_version=version)
+ sp.check_call(["git", "commit", "-m", msg], stdout=sp.PIPE)
+
+ def git_tag(self, version, message, sign=False):
+ """Create annotated git tag with given 'version' and 'message'.
+ Optionally 'sign' the tag with the user's GPG key.
+ """
+ log.info(
+ "creating %s git tag '%s'" % ("signed" if sign else "annotated", version)
+ )
+ if self.dry_run:
+ return
+ # create an annotated (or signed) tag from the new version
+ tag_opt = "-s" if sign else "-a"
+ tag_name = self.tag_name.format(new_version=version)
+ proc = sp.Popen(["git", "tag", tag_opt, "-F", "-", tag_name], stdin=sp.PIPE)
+ # use the latest changes from the changelog file as the tag message
+ tag_message = "%s\n\n%s" % (tag_name, message)
+ proc.communicate(tag_message.encode("utf-8"))
+ if proc.returncode != 0:
+ sys.exit(proc.returncode)
+
+ def bumpversion(self, part, commit=False, message=None, allow_dirty=None):
+ """Run bumpversion.main() with the specified arguments, and return the
+ new computed version string (cf. 'bumpversion --help' for more info)
+ """
+ import bumpversion.cli
+
+ args = (
+ (["--verbose"] if self.verbose > 1 else [])
+ + (["--dry-run"] if self.dry_run else [])
+ + (["--allow-dirty"] if (allow_dirty or self.allow_dirty) else [])
+ + (["--commit"] if commit else ["--no-commit"])
+ + (["--message", message] if message is not None else [])
+ + ["--list", part]
+ )
+ log.debug("$ bumpversion %s" % " ".join(a.replace(" ", "\\ ") for a in args))
+
+ with capture_logger("bumpversion.list") as out:
+ bumpversion.cli.main(args)
+
+ last_line = out.getvalue().splitlines()[-1]
+ new_version = last_line.replace("new_version=", "")
+ return new_version
+
+ def format_changelog(self, version):
+ """Write new header at beginning of changelog file with the specified
+ 'version' and the current date.
+ Return the changelog content for the current release.
+ """
+ from datetime import datetime
+
+ log.info("formatting changelog")
+
+ changes = []
+ with io.open(self.changelog_name, "r+", encoding="utf-8") as f:
+ for ln in f:
+ if self.version_RE.match(ln):
+ break
+ else:
+ changes.append(ln)
+ if not self.dry_run:
+ f.seek(0)
+ content = f.read()
+ date = datetime.today().strftime(self.date_fmt)
+ f.seek(0)
+ header = self.header_fmt % (version, date)
+ f.write(header + "\n" + "-" * len(header) + "\n\n" + content)
+
+ return "".join(changes)
def find_data_files(manpath="share/man"):
- """ Find FontTools's data_files (just man pages at this point).
+ """Find FontTools's data_files (just man pages at this point).
- By default, we install man pages to "share/man" directory relative to the
- base installation directory for data_files. The latter can be changed with
- the --install-data option of 'setup.py install' sub-command.
+ By default, we install man pages to "share/man" directory relative to the
+ base installation directory for data_files. The latter can be changed with
+ the --install-data option of 'setup.py install' sub-command.
- E.g., if the data files installation directory is "/usr", the default man
- page installation directory will be "/usr/share/man".
+ E.g., if the data files installation directory is "/usr", the default man
+ page installation directory will be "/usr/share/man".
- You can override this via the $FONTTOOLS_MANPATH environment variable.
+ You can override this via the $FONTTOOLS_MANPATH environment variable.
- E.g., on some BSD systems man pages are installed to 'man' instead of
- 'share/man'; you can export $FONTTOOLS_MANPATH variable just before
- installing:
+ E.g., on some BSD systems man pages are installed to 'man' instead of
+ 'share/man'; you can export $FONTTOOLS_MANPATH variable just before
+ installing:
- $ FONTTOOLS_MANPATH="man" pip install -v .
- [...]
- running install_data
- copying Doc/man/ttx.1 -> /usr/man/man1
+ $ FONTTOOLS_MANPATH="man" pip install -v .
+ [...]
+ running install_data
+ copying Doc/man/ttx.1 -> /usr/man/man1
- When installing from PyPI, for this variable to have effect you need to
- force pip to install from the source distribution instead of the wheel
- package (otherwise setup.py is not run), by using the --no-binary option:
+ When installing from PyPI, for this variable to have effect you need to
+ force pip to install from the source distribution instead of the wheel
+ package (otherwise setup.py is not run), by using the --no-binary option:
- $ FONTTOOLS_MANPATH="man" pip install --no-binary=fonttools fonttools
+ $ FONTTOOLS_MANPATH="man" pip install --no-binary=fonttools fonttools
- Note that you can only override the base man path, i.e. without the
- section number (man1, man3, etc.). The latter is always implied to be 1,
- for "general commands".
- """
+ Note that you can only override the base man path, i.e. without the
+ section number (man1, man3, etc.). The latter is always implied to be 1,
+ for "general commands".
+ """
- # get base installation directory for man pages
- manpagebase = os.environ.get('FONTTOOLS_MANPATH', convert_path(manpath))
- # all our man pages go to section 1
- manpagedir = pjoin(manpagebase, 'man1')
+ # get base installation directory for man pages
+ manpagebase = os.environ.get("FONTTOOLS_MANPATH", convert_path(manpath))
+ # all our man pages go to section 1
+ manpagedir = pjoin(manpagebase, "man1")
- manpages = [f for f in glob(pjoin('Doc', 'man', 'man1', '*.1')) if isfile(f)]
+ manpages = [f for f in glob(pjoin("Doc", "man", "man1", "*.1")) if isfile(f)]
- data_files = [(manpagedir, manpages)]
- return data_files
+ data_files = [(manpagedir, manpages)]
+ return data_files
class cython_build_ext(_build_ext):
- """Compile *.pyx source files to *.c using cythonize if Cython is
- installed and there is a working C compiler, else fall back to pure python dist.
- """
-
- def finalize_options(self):
- from Cython.Build import cythonize
-
- # optionally enable line tracing for test coverage support
- linetrace = os.environ.get("CYTHON_TRACE") == "1"
-
- self.distribution.ext_modules[:] = cythonize(
- self.distribution.ext_modules,
- force=linetrace or self.force,
- annotate=os.environ.get("CYTHON_ANNOTATE") == "1",
- quiet=not self.verbose,
- compiler_directives={
- "linetrace": linetrace,
- "language_level": 3,
- "embedsignature": True,
- },
- )
-
- _build_ext.finalize_options(self)
-
- def build_extensions(self):
- try:
- _build_ext.build_extensions(self)
- except Exception as e:
- if with_cython:
- raise
- from distutils.errors import DistutilsModuleError
-
- # optional compilation failed: we delete 'ext_modules' and make sure
- # the generated wheel is 'pure'
- del self.distribution.ext_modules[:]
- try:
- bdist_wheel = self.get_finalized_command("bdist_wheel")
- except DistutilsModuleError:
- # 'bdist_wheel' command not available as wheel is not installed
- pass
- else:
- bdist_wheel.root_is_pure = True
- log.error('error: building extensions failed: %s' % e)
+ """Compile *.pyx source files to *.c using cythonize if Cython is
+ installed and there is a working C compiler, else fall back to pure python dist.
+ """
+
+ def finalize_options(self):
+ from Cython.Build import cythonize
+
+ # optionally enable line tracing for test coverage support
+ linetrace = os.environ.get("CYTHON_TRACE") == "1"
+
+ self.distribution.ext_modules[:] = cythonize(
+ self.distribution.ext_modules,
+ force=linetrace or self.force,
+ annotate=os.environ.get("CYTHON_ANNOTATE") == "1",
+ quiet=not self.verbose,
+ compiler_directives={
+ "linetrace": linetrace,
+ "language_level": 3,
+ "embedsignature": True,
+ },
+ )
+
+ _build_ext.finalize_options(self)
+
+ def build_extensions(self):
+ try:
+ _build_ext.build_extensions(self)
+ except Exception as e:
+ if with_cython:
+ raise
+ from distutils.errors import DistutilsModuleError
+
+ # optional compilation failed: we delete 'ext_modules' and make sure
+ # the generated wheel is 'pure'
+ del self.distribution.ext_modules[:]
+ try:
+ bdist_wheel = self.get_finalized_command("bdist_wheel")
+ except DistutilsModuleError:
+ # 'bdist_wheel' command not available as wheel is not installed
+ pass
+ else:
+ bdist_wheel.root_is_pure = True
+ log.error("error: building extensions failed: %s" % e)
+
cmdclass = {"release": release}
@@ -448,37 +466,37 @@ if ext_modules:
setup_params = dict(
- name="fonttools",
- version="4.37.1",
- description="Tools to manipulate font files",
- author="Just van Rossum",
- author_email="just@letterror.com",
- maintainer="Behdad Esfahbod",
- maintainer_email="behdad@behdad.org",
- url="http://github.com/fonttools/fonttools",
- license="MIT",
- platforms=["Any"],
- python_requires=">=3.7",
- long_description=long_description,
- package_dir={'': 'Lib'},
- packages=find_packages("Lib"),
- include_package_data=True,
- data_files=find_data_files(),
- ext_modules=ext_modules,
- setup_requires=setup_requires,
- extras_require=extras_require,
- entry_points={
- 'console_scripts': [
- "fonttools = fontTools.__main__:main",
- "ttx = fontTools.ttx:main",
- "pyftsubset = fontTools.subset:main",
- "pyftmerge = fontTools.merge:main",
- ]
- },
- cmdclass=cmdclass,
- **classifiers
+ name="fonttools",
+ version="4.44.0",
+ description="Tools to manipulate font files",
+ author="Just van Rossum",
+ author_email="just@letterror.com",
+ maintainer="Behdad Esfahbod",
+ maintainer_email="behdad@behdad.org",
+ url="http://github.com/fonttools/fonttools",
+ license="MIT",
+ platforms=["Any"],
+ python_requires=">=3.8",
+ long_description=long_description,
+ package_dir={"": "Lib"},
+ packages=find_packages("Lib"),
+ include_package_data=True,
+ data_files=find_data_files(),
+ ext_modules=ext_modules,
+ setup_requires=setup_requires,
+ extras_require=extras_require,
+ entry_points={
+ "console_scripts": [
+ "fonttools = fontTools.__main__:main",
+ "ttx = fontTools.ttx:main",
+ "pyftsubset = fontTools.subset:main",
+ "pyftmerge = fontTools.merge:main",
+ ]
+ },
+ cmdclass=cmdclass,
+ **classifiers,
)
if __name__ == "__main__":
- setup(**setup_params)
+ setup(**setup_params)
diff --git a/tox.ini b/tox.ini
index 22b1de74..ae8b2167 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
[tox]
minversion = 3.0
-envlist = mypy, py3{7,8,9}-cov, htmlcov
+envlist = lint, py3{8,9,10,11,12}-cov, htmlcov
skip_missing_interpreters=true
[testenv]
@@ -11,18 +11,15 @@ setenv =
# relatively recent 'manylinux2014' platform tag.
# https://github.com/tox-dev/tox/issues/791#issuecomment-518713438
download = true
+# building lxml from source takes too long on CI, force it to download pre-compiled
+# wheel or fail if none is found with --only-binary=lxml
+install_command = python -m pip install --only-binary=lxml {opts} {packages}
deps =
cov: coverage>=4.3
- pytest
+ pytest>=7.0.0
pytest-randomly
- -rrequirements.txt
- !nolxml: lxml==4.6.4
-extras =
- ufo
- woff
- unicode
- interpolatable
- !nolxml: lxml
+ # add -noextra to tox -e to skip installing extras and only test the core fonttools
+ !noextra: -rrequirements.txt
commands =
cy: python -c "from fontTools.cu2qu.cu2qu import COMPILED; assert COMPILED"
!cy: python -c "from fontTools.cu2qu.cu2qu import COMPILED; assert not COMPILED"
@@ -38,11 +35,12 @@ commands =
coverage combine
coverage html
-[testenv:mypy]
+[testenv:lint]
deps =
-r dev-requirements.txt
skip_install = true
commands =
+ black --check --diff .
mypy
[testenv:codecov]