summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-03-02 20:01:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-03-02 20:01:10 +0000
commitda875fcb62c801b8d19b3d4d984ad963574fb356 (patch)
tree3d85503747c56c2a387b291524442946f4bebb73 /lib
parentInitial commit. (diff)
downloadpysilfont-da875fcb62c801b8d19b3d4d984ad963574fb356.tar.xz
pysilfont-da875fcb62c801b8d19b3d4d984ad963574fb356.zip
Adding upstream version 1.6.0.upstream/1.6.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/silfont/__init__.py5
-rw-r--r--lib/silfont/comp.py358
-rwxr-xr-xlib/silfont/core.py748
-rw-r--r--lib/silfont/data/required_chars.csv308
-rw-r--r--lib/silfont/data/required_chars.md32
-rw-r--r--lib/silfont/etutil.py270
-rwxr-xr-xlib/silfont/fbtests/__init__.py0
-rw-r--r--lib/silfont/fbtests/silnotcjk.py230
-rw-r--r--lib/silfont/fbtests/silttfchecks.py250
-rw-r--r--lib/silfont/fbtests/ttfchecks.py305
-rw-r--r--lib/silfont/feax_ast.py445
-rw-r--r--lib/silfont/feax_lexer.py105
-rw-r--r--lib/silfont/feax_parser.py727
-rw-r--r--lib/silfont/ftml.py433
-rw-r--r--lib/silfont/ftml_builder.py750
-rwxr-xr-xlib/silfont/harfbuzz.py71
-rw-r--r--lib/silfont/ipython.py135
-rwxr-xr-xlib/silfont/scripts/__init__.py0
-rwxr-xr-xlib/silfont/scripts/psfaddanchors.py73
-rwxr-xr-xlib/silfont/scripts/psfbuildcomp.py309
-rw-r--r--lib/silfont/scripts/psfbuildcompgc.py45
-rwxr-xr-xlib/silfont/scripts/psfbuildfea.py89
-rwxr-xr-xlib/silfont/scripts/psfchangegdlnames.py160
-rw-r--r--lib/silfont/scripts/psfchangettfglyphnames.py35
-rw-r--r--lib/silfont/scripts/psfcheckbasicchars.py68
-rw-r--r--lib/silfont/scripts/psfcheckclassorders.py142
-rw-r--r--lib/silfont/scripts/psfcheckftml.py65
-rw-r--r--lib/silfont/scripts/psfcheckglyphinventory.py125
-rw-r--r--lib/silfont/scripts/psfcheckinterpolatable.py75
-rwxr-xr-xlib/silfont/scripts/psfcompdef2xml.py65
-rwxr-xr-xlib/silfont/scripts/psfcompressgr.py100
-rw-r--r--lib/silfont/scripts/psfcopyglyphs.py243
-rwxr-xr-xlib/silfont/scripts/psfcopymeta.py148
-rw-r--r--lib/silfont/scripts/psfcreateinstances.py228
-rw-r--r--lib/silfont/scripts/psfcsv2comp.py129
-rwxr-xr-xlib/silfont/scripts/psfdeflang.py47
-rw-r--r--lib/silfont/scripts/psfdeleteglyphs.py144
-rw-r--r--lib/silfont/scripts/psfdupglyphs.py48
-rwxr-xr-xlib/silfont/scripts/psfexportanchors.py101
-rwxr-xr-xlib/silfont/scripts/psfexportmarkcolors.py55
-rwxr-xr-xlib/silfont/scripts/psfexportpsnames.py47
-rwxr-xr-xlib/silfont/scripts/psfexportunicodes.py43
-rwxr-xr-xlib/silfont/scripts/psffixffglifs.py48
-rwxr-xr-xlib/silfont/scripts/psffixfontlab.py169
-rwxr-xr-xlib/silfont/scripts/psfftml2TThtml.py389
-rwxr-xr-xlib/silfont/scripts/psfftml2odt.py453
-rwxr-xr-xlib/silfont/scripts/psfgetglyphnames.py87
-rw-r--r--lib/silfont/scripts/psfglyphs2ufo.py275
-rw-r--r--lib/silfont/scripts/psfmakedeprecated.py74
-rwxr-xr-xlib/silfont/scripts/psfmakefea.py369
-rw-r--r--lib/silfont/scripts/psfmakescaledshifted.py117
-rwxr-xr-xlib/silfont/scripts/psfmakewoffmetadata.py225
-rwxr-xr-xlib/silfont/scripts/psfnormalize.py31
-rw-r--r--lib/silfont/scripts/psfremovegliflibkeys.py71
-rw-r--r--lib/silfont/scripts/psfrenameglyphs.py587
-rwxr-xr-xlib/silfont/scripts/psfrunfbchecks.py249
-rwxr-xr-xlib/silfont/scripts/psfsetassocfeat.py54
-rwxr-xr-xlib/silfont/scripts/psfsetassocuids.py51
-rw-r--r--lib/silfont/scripts/psfsetdummydsig.py36
-rw-r--r--lib/silfont/scripts/psfsetglyphdata.py143
-rwxr-xr-xlib/silfont/scripts/psfsetglyphorder.py91
-rwxr-xr-xlib/silfont/scripts/psfsetkeys.py100
-rwxr-xr-xlib/silfont/scripts/psfsetmarkcolors.py106
-rwxr-xr-xlib/silfont/scripts/psfsetpsnames.py86
-rwxr-xr-xlib/silfont/scripts/psfsetunicodes.py79
-rwxr-xr-xlib/silfont/scripts/psfsetversion.py85
-rwxr-xr-xlib/silfont/scripts/psfshownames.py235
-rw-r--r--lib/silfont/scripts/psfsubset.py112
-rw-r--r--lib/silfont/scripts/psfsyncmasters.py293
-rwxr-xr-xlib/silfont/scripts/psfsyncmeta.py303
-rw-r--r--lib/silfont/scripts/psftuneraliases.py121
-rw-r--r--lib/silfont/scripts/psfufo2glyphs.py69
-rw-r--r--lib/silfont/scripts/psfufo2ttf.py108
-rwxr-xr-xlib/silfont/scripts/psfversion.py64
-rw-r--r--lib/silfont/scripts/psfwoffit.py99
-rwxr-xr-xlib/silfont/scripts/psfxml2compdef.py33
-rwxr-xr-xlib/silfont/ufo.py1386
-rwxr-xr-xlib/silfont/util.py374
78 files changed, 14928 insertions, 0 deletions
diff --git a/lib/silfont/__init__.py b/lib/silfont/__init__.py
new file mode 100644
index 0000000..aa2763c
--- /dev/null
+++ b/lib/silfont/__init__.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2014-2022 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__version__ = '1.6.0'
diff --git a/lib/silfont/comp.py b/lib/silfont/comp.py
new file mode 100644
index 0000000..d99985a
--- /dev/null
+++ b/lib/silfont/comp.py
@@ -0,0 +1,358 @@
+#!/usr/bin/env python
+'Composite glyph definition'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Rowe'
+
+import re
+from xml.etree import ElementTree as ET
+
+# REs to parse (from right to left) comment, SIL extension parameters, markinfo, UID, metrics,
+# and (from left) glyph name
+
+# Extract comment from end of line (NB: Doesn't use re.VERBOSE because it contains #.)
+# beginning of line, optional whitespace, remainder, optional whitespace, comment to end of line
+inputline=re.compile(r"""^\s*(?P<remainder>.*?)(\s*#\s*(?P<commenttext>.*))?$""")
+
+# Parse SIL extension parameters in [...], but only after |
+paraminfo=re.compile(r"""^\s*
+ (?P<remainder>[^|]*
+ ($|
+ \|[^[]*$|
+ \|[^[]*\[(?P<paraminfo>[^]]*)\]))
+ \s*$""",re.VERBOSE)
+
+# Parse markinfo
+markinfo=re.compile(r"""^\s*
+ (?P<remainder>[^!]*?)
+ \s*
+ (?:!\s*(?P<markinfo>[.0-9]+(?:,[ .0-9]+){3}))? # ! markinfo
+ (?P<remainder2>[^!]*?)
+ \s*$""",re.VERBOSE)
+
+# Parse uid
+uidinfo=re.compile(r"""^\s*
+ (?P<remainder>[^|]*?)
+ \s*
+ (?:\|\s*(?P<UID>[^^!]*)?)? # | followed by nothing, or 4- to 6-digit UID
+ (?P<remainder2>[^|]*?)
+ \s*$""",re.VERBOSE)
+
+# Parse metrics
+metricsinfo=re.compile(r"""^\s*
+ (?P<remainder>[^^]*?)
+ \s*
+ (?:\^\s*(?P<metrics>[-0-9]+\s*(?:,\s*[-0-9]+)?))? # metrics (either ^x,y or ^a)
+ (?P<remainder2>[^^]*?)
+ \s*$""",re.VERBOSE)
+
+# Parse glyph information (up to =)
+glyphdef=re.compile(r"""^\s*
+ (?P<PSName>[._A-Za-z][._A-Za-z0-9-]*) # glyphname
+ \s*=\s*
+ (?P<remainder>.*?)
+ \s*$""",re.VERBOSE)
+
+# break tokens off the right hand side from right to left and finally off left hand side (up to =)
+initialtokens=[ (inputline, 'commenttext', ""),
+ (paraminfo, 'paraminfo', "Error parsing parameters in [...]"),
+ (markinfo, 'markinfo', "Error parsing information after !"),
+ (uidinfo, 'UID', "Error parsing information after |"),
+ (metricsinfo, 'metrics', "Error parsing information after ^"),
+ (glyphdef, 'PSName', "Error parsing glyph name before =") ]
+
+# Parse base and diacritic information
+compdef=re.compile(r"""^\s*
+ (?P<compname>[._A-Za-z][._A-Za-z0-9-]*) # name of base or diacritic in composite definition
+ (?:@ # @ precedes position information
+ (?:(?:\s*(?P<base>[^: ]+)):)? # optional base glyph followed by :
+ \s*
+ (?P<position>(?:[^ +&[])+) # position information (delimited by space + & [ or end of line)
+ \s*)? # end of @ clause
+ \s*
+ (?:\[(?P<params>[^]]*)\])? # parameters inside [..]
+ \s*
+ (?P<remainder>.*)$
+ """,re.VERBOSE)
+
+# Parse metrics
+lsb_rsb=re.compile(r"""^\s*
+ (?P<lsb>[-0-9]+)\s*(?:,\s*(?P<rsb>[-0-9]+))? # optional metrics (either ^lsb,rsb or ^adv)
+ \s*$""",re.VERBOSE)
+
+# RE to break off one key=value parameter from text inside [key=value;key=value;key=value]
+paramdef=re.compile(r"""^\s*
+ (?P<paramname>[a-z0-9]+) # paramname
+ \s*=\s* # = (with optional white space before/after)
+ (?P<paramval>[^;]+?) # any text up to ; or end of string
+ \s* # optional whitespace
+ (?:;\s*(?P<rest>.+)$|\s*$) # either ; and (non-empty) rest of parameters, or end of line
+ """,re.VERBOSE)
+
+class CompGlyph(object):
+
+ def __init__(self, CDelement=None, CDline=None):
+ self.CDelement = CDelement
+ self.CDline = CDline
+
+ def _parseparams(self, rest):
+ """Parse a parameter line such as:
+ key1=value1;key2=value2
+ and return a dictionary with key:value pairs.
+ """
+ params = {}
+ while rest:
+ matchparam=re.match(paramdef,rest)
+ if matchparam == None:
+ raise ValueError("Parameter error: " + rest)
+ params[matchparam.group('paramname')] = matchparam.group('paramval')
+ rest = matchparam.group('rest')
+ return(params)
+
+ def parsefromCDline(self):
+ """Parse the composite glyph information (in self.CDline) such as:
+ LtnCapADiear = LtnCapA + CombDiaer@U |00C4 ! 1, 0, 0, 1 # comment
+ and return a <glyph> element (in self.CDelement)
+ <glyph PSName="LtnCapADiear" UID="00C4">
+ <note>comment</note>
+ <property name="mark" value="1, 0, 0, 1"/>
+ <base PSName="LtnCapA">
+ <attach PSName="CombDiaer" with="_U" at="U"/>
+ </base>
+ </glyph>
+ Position info after @ can include optional base glyph name followed by colon.
+ """
+ line = self.CDline
+ results = {}
+ for parseinfo in initialtokens:
+ if len(line) > 0:
+ regex, groupname, errormsg = parseinfo
+ matchresults = re.match(regex,line)
+ if matchresults == None:
+ raise ValueError(errormsg)
+ line = matchresults.group('remainder')
+ resultsval = matchresults.group(groupname)
+ if resultsval != None:
+ results[groupname] = resultsval.strip()
+ if groupname == 'paraminfo': # paraminfo match needs to be removed from remainder
+ line = line.rstrip('['+resultsval+']')
+ if 'remainder2' in matchresults.groupdict().keys(): line += ' ' + matchresults.group('remainder2')
+# At this point results optionally may contain entries for any of 'commenttext', 'paraminfo', 'markinfo', 'UID', or 'metrics',
+# but it must have 'PSName' if any of 'paraminfo', 'markinfo', 'UID', or 'metrics' present
+ note = results.pop('commenttext', None)
+ if 'PSName' not in results:
+ if len(results) > 0:
+ raise ValueError("Missing glyph name")
+ else: # comment only, or blank line
+ return None
+ dic = {}
+ UIDpresent = 'UID' in results
+ if UIDpresent and results['UID'] == '':
+ results.pop('UID')
+ if 'paraminfo' in results:
+ paramdata = results.pop('paraminfo')
+ if UIDpresent:
+ dic = self._parseparams(paramdata)
+ else:
+ line += " [" + paramdata + "]"
+ mark = results.pop('markinfo', None)
+ if 'metrics' in results:
+ m = results.pop('metrics')
+ matchmetrics = re.match(lsb_rsb,m)
+ if matchmetrics == None:
+ raise ValueError("Error in parameters: " + m)
+ elif matchmetrics.group('rsb'):
+ metricdic = {'lsb': matchmetrics.group('lsb'), 'rsb': matchmetrics.group('rsb')}
+ else:
+ metricdic = {'advance': matchmetrics.group('lsb')}
+ else:
+ metricdic = None
+
+ # Create <glyph> element and assign attributes
+ g = ET.Element('glyph',attrib=results)
+ if note: # note from commenttext becomes <note> subelement
+ n = ET.SubElement(g,'note')
+ n.text = note.rstrip()
+ # markinfo becomes <property> subelement
+ if mark:
+ p = ET.SubElement(g, 'property', name = 'mark', value = mark)
+ # paraminfo parameters (now in dic) become <property> subelements
+ if dic:
+ for key in dic:
+ p = ET.SubElement(g, 'property', name = key, value = dic[key])
+ # metrics parameters (now in metricdic) become subelements
+ if metricdic:
+ for key in metricdic:
+ k = ET.SubElement(g, key, width=metricdic[key])
+
+ # Prepare to parse remainder of line
+ prevbase = None
+ prevdiac = None
+ remainder = line
+ expectingdiac = False
+
+ # top of loop to process remainder of line, breaking off base or diacritics from left to right
+ while remainder != "":
+ matchresults=re.match(compdef,remainder)
+ if matchresults == None or matchresults.group('compname') == "" :
+ raise ValueError("Error parsing glyph name: " + remainder)
+ propdic = {}
+ if matchresults.group('params'):
+ propdic = self._parseparams(matchresults.group('params'))
+ base = matchresults.group('base')
+ position = matchresults.group('position')
+ if expectingdiac:
+ # Determine parent element, based on previous base and diacritic glyphs and optional
+ # matchresults.group('base'), indicating diacritic attaches to a different glyph
+ if base == None:
+ if prevdiac != None:
+ parent = prevdiac
+ else:
+ parent = prevbase
+ elif base != prevbase.attrib['PSName']:
+ raise ValueError("Error in diacritic alternate base glyph: " + base)
+ else:
+ parent = prevbase
+ if prevdiac == None:
+ raise ValueError("Unnecessary diacritic alternate base glyph: " + base)
+ # Because 'with' is Python reserved word, passing it directly as a parameter
+ # causes Python syntax error, so build dictionary to pass to SubElement
+ att = {'PSName': matchresults.group('compname')}
+ if position:
+ if 'with' in propdic:
+ withval = propdic.pop('with')
+ else:
+ withval = "_" + position
+ att['at'] = position
+ att['with'] = withval
+ # Create <attach> subelement
+ e = ET.SubElement(parent, 'attach', attrib=att)
+ prevdiac = e
+ elif (base or position):
+ raise ValueError("Position information on base glyph not supported")
+ else:
+ # Create <base> subelement
+ e = ET.SubElement(g, 'base', PSName=matchresults.group('compname'))
+ prevbase = e
+ prevdiac = None
+ if 'shift' in propdic:
+ xval, yval = propdic.pop('shift').split(',')
+ s = ET.SubElement(e, 'shift', x=xval, y=yval)
+ # whatever parameters are left in propdic become <property> subelements
+ for key, val in propdic.items():
+ p = ET.SubElement(e, 'property', name=key, value=val)
+
+ remainder = matchresults.group('remainder').lstrip()
+ nextchar = remainder[:1]
+ remainder = remainder[1:].lstrip()
+ expectingdiac = nextchar == '+'
+ if nextchar == '&' or nextchar == '+':
+ if len(remainder) == 0:
+ raise ValueError("Expecting glyph name after & or +")
+ elif len(nextchar) > 0:
+ raise ValueError("Expecting & or + and found " + nextchar)
+ self.CDelement = g
+
+ def _diacinfo(self, node, parent, lastglyph):
+ """receives attach element, PSName of its parent, PSName of most recent glyph
+ returns a string equivalent of this node (and all its descendants)
+ and a string with the name of the most recent glyph
+ """
+ diacname = node.get('PSName')
+ atstring = node.get('at')
+ withstring = node.get('with')
+ propdic = {}
+ if withstring != "_" + atstring:
+ propdic['with'] = withstring
+ subattachlist = []
+ attachglyph = ""
+ if parent != lastglyph:
+ attachglyph = parent + ":"
+ for subelement in node:
+ if subelement.tag == 'property':
+ propdic[subelement.get('name')] = subelement.get('value')
+ elif subelement.tag == 'attach':
+ subattachlist.append(subelement)
+ elif subelement.tag == 'shift':
+ propdic['shift'] = subelement.get('x') + "," + subelement.get('y')
+ # else flag error/warning?
+ propstring = ""
+ if propdic:
+ propstring += " [" + ";".join( [k + "=" + v for k,v in propdic.items()] ) + "]"
+ returnstring = " + " + diacname + "@" + attachglyph + atstring + propstring
+ prevglyph = diacname
+ for s in subattachlist:
+ string, prevglyph = self._diacinfo(s, diacname, prevglyph)
+ returnstring += string
+ return returnstring, prevglyph
+
+ def _basediacinfo(self, baseelement):
+ """receives base element and returns a string equivalent of this node (and all its desendants)"""
+ basename = baseelement.get('PSName')
+ returnstring = basename
+ prevglyph = basename
+ bpropdic = {}
+ for child in baseelement:
+ if child.tag == 'attach':
+ string, prevglyph = self._diacinfo(child, basename, prevglyph)
+ returnstring += string
+ elif child.tag == 'shift':
+ bpropdic['shift'] = child.get('x') + "," + child.get('y')
+ if bpropdic:
+ returnstring += " [" + ";".join( [k + "=" + v for k,v in bpropdic.items()] ) + "]"
+ return returnstring
+
+ def parsefromCDelement(self):
+ """Parse a glyph element such as:
+ <glyph PSName="LtnSmITildeGraveDotBlw" UID="E000">
+ <note>i tilde grave dot-below</note>
+ <base PSName="LtnSmDotlessI">
+ <attach PSName="CombDotBlw" at="L" with="_L" />
+ <attach PSName="CombTilde" at="U" with="_U">
+ <attach PSName="CombGrave" at="U" with="_U" />
+ </attach>
+ </base>
+ </glyph>
+ and produce the equivalent CDline in format:
+ LtnSmITildeGraveDotBlw = LtnSmDotlessI + CombDotBlw@L + CombTilde@LtnSmDotlessI:U + CombGrave@U | E000 # i tilde grave dot-below
+ """
+ g = self.CDelement
+ lsb = None
+ rsb = None
+ adv = None
+ markinfo = None
+ note = None
+ paramdic = {}
+ outputline = [g.get('PSName')]
+ resultUID = g.get('UID')
+ basesep = " = "
+
+ for child in g:
+ if child.tag == 'note': note = child.text
+ elif child.tag == 'property':
+ if child.get('name') == 'mark': markinfo = child.get('value')
+ else: paramdic[child.get('name')] = child.get('value')
+ elif child.tag == 'lsb': lsb = child.get('width')
+ elif child.tag == 'rsb': rsb = child.get('width')
+ elif child.tag == 'advance': adv = child.get('width')
+ elif child.tag == 'base':
+ outputline.extend([basesep, self._basediacinfo(child)])
+ basesep = " & "
+
+ if paramdic and resultUID == None:
+ resultUID = " " # to force output of |
+ if adv: outputline.extend([' ^', adv])
+ if lsb and rsb: outputline.extend([' ^', lsb, ',', rsb])
+ if resultUID: outputline.extend([' |', resultUID])
+ if markinfo: outputline.extend([' !', markinfo])
+ if paramdic:
+ paramsep = " ["
+ for k in paramdic:
+ outputline.extend([paramsep, k, "=", paramdic[k]])
+ paramsep = ";"
+ outputline.append("]")
+ if note:
+ outputline.extend([" # ", note])
+ self.CDline = "".join(outputline)
+
diff --git a/lib/silfont/core.py b/lib/silfont/core.py
new file mode 100755
index 0000000..fae37ec
--- /dev/null
+++ b/lib/silfont/core.py
@@ -0,0 +1,748 @@
+#!/usr/bin/env python
+'General classes and functions for use in pysilfont scripts'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2014-2022 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from glob import glob
+from collections import OrderedDict
+import sys, os, argparse, datetime, shutil, csv, configparser
+
+import silfont
+
+class loggerobj(object):
+ # For handling log messages.
+ # Use S for severe errors caused by data, parameters supplied by user etc
+ # Use X for severe errors caused by bad code to get traceback exception
+
+ def __init__(self, logfile=None, loglevels="", leveltext="", loglevel="W", scrlevel="P"):
+ self.logfile = logfile
+ self.loglevels = loglevels
+ self.leveltext = leveltext
+ self.errorcount = 0
+ self.warningcount = 0
+ if not self.loglevels: self.loglevels = {'X': 0, 'S': 1, 'E': 2, 'P': 3, 'W': 4, 'I': 5, 'V': 6}
+ if not self.leveltext: self.leveltext = ('Exception ', 'Severe: ', 'Error: ', 'Progress: ', 'Warning: ', 'Info: ', 'Verbose: ')
+ super(loggerobj, self).__setattr__("loglevel", "E") # Temp values so invalid log levels can be reported
+ super(loggerobj, self).__setattr__("scrlevel", "E") #
+ self.loglevel = loglevel
+ self.scrlevel = scrlevel
+
+ def __setattr__(self, name, value):
+ if name in ("loglevel", "scrlevel"):
+ if value in self.loglevels:
+ (minlevel, minnum) = ("E",2) if name == "loglevel" else ("S", 1)
+ if self.loglevels[value] < minnum:
+ value = minlevel
+ self.log(name + " increased to minimum level of " + minlevel, "E")
+ else:
+ self.log("Invalid " + name + " value: " + value, "S")
+ super(loggerobj, self).__setattr__(name, value)
+ if name == "scrlevel" : self._basescrlevel = value # Used by resetscrlevel
+
+ def log(self, logmessage, msglevel="W"):
+ levelval = self.loglevels[msglevel]
+ message = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S ") + self.leveltext[levelval] + str(logmessage)
+ #message = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[0:22] +" "+ self.leveltext[levelval] + logmessage ## added milliseconds for timing tests
+ if levelval <= self.loglevels[self.scrlevel]: print(message)
+ if self.logfile and levelval <= self.loglevels[self.loglevel]: self.logfile.write(message + "\n")
+ if msglevel == "S":
+ print("\n **** Fatal error - exiting ****")
+ sys.exit(1)
+ if msglevel == "X": assert False, message
+ if msglevel == "E": self.errorcount += 1
+ if msglevel == "W": self.warningcount += 1
+
+ def raisescrlevel(self, level): # Temporarily increase screen logging
+ if level not in self.loglevels or level == "X" : self.log("Invalid scrlevel: " + level, "X")
+ if self.loglevels[level] > self.loglevels[self.scrlevel]:
+ current = self.scrlevel
+ self.scrlevel = level
+ self._basescrlevel = current
+ self.log("scrlevel raised to " + level, "I")
+
+ def resetscrlevel(self):
+ self.scrlevel = self._basescrlevel
+
+
+class parameters(object):
+ # Object for holding parameters information, organised by class (eg logging)
+
+ # Default parameters for use in pysilfont modules
+ # Names must be case-insensitively unique across all parameter classes
+ # Parameter types are deduced from the default values
+
+ def __init__(self):
+ # Default parameters for all modules
+ defparams = {}
+ defparams['system'] = {'version': silfont.__version__, 'copyright': silfont.__copyright__} # Code treats these as read-only
+ defparams['logging'] = {'scrlevel': 'P', 'loglevel': 'W'}
+ defparams['backups'] = {'backup': True, 'backupdir': 'backups', 'backupkeep': 5}
+ # Default parameters for UFO module
+ defparams['outparams'] = OrderedDict([ # Use ordered dict so parameters show in logical order with -h p
+ ("UFOversion", ""), # UFOversion - defaults to existing unless a value is supplied
+ ("indentIncr", " "), # XML Indent increment
+ ("indentFirst", " "), # First XML indent
+ ("indentML", False), # Should multi-line string values be indented?
+ ("plistIndentFirst", ""), # First indent amount for plists
+ ('precision', 6), # Decimal precision to use in XML output - both for real values and for attributes if float
+ ("floatAttribs", ['xScale', 'xyScale', 'yxScale', 'yScale', 'angle']), # Used with precision above
+ ("intAttribs", ['pos', 'width', 'height', 'xOffset', 'yOffset', 'x', 'y']),
+ ("sortDicts", True), # Should dict elements be sorted alphabetically?
+ ("renameGlifs", True), # Rename glifs based on UFO3 suggested algorithm
+ ("format1Glifs", False), # Force output format 1 glifs including UFO2-style anchors (for use with FontForge
+ ("glifElemOrder", ['advance', 'unicode', 'note', 'image', 'guideline', 'anchor', 'outline', 'lib']), # Order to output glif elements
+ ("attribOrders.glif",['pos', 'width', 'height', 'fileName', 'base', 'xScale', 'xyScale', 'yxScale', 'yScale', 'xOffset', 'yOffset',
+ 'x', 'y', 'angle', 'type', 'smooth', 'name', 'format', 'color', 'identifier'])
+ ])
+ defparams['ufometadata'] = {"checkfix": "check"} # Apply metadata fixes when reading UFOs
+
+ self.paramshelp = {} # Info used when outputting help about parame options
+ self.paramshelp["classdesc"] = {
+ "logging": "controls the level of log messages go to screen or log files.",
+ "backups": "controls backup settings for scripts that output fonts - by default backups are made if the output font is overwriting the input font",
+ "outparams": "Output options for UFOs - cover UFO version and normalization",
+ "ufometadata": "controls if UFO metadata be checked, or checked and fixed"
+ }
+ self.paramshelp["paramsdesc"] = {
+ "scrlevel": "Logging level for screen messages - one of S,E,P.W,I or V",
+ "loglevel": "Logging level for log file messages - one of E,P.W,I or V",
+ "backup": "Should font backups be made",
+ "backupdir": "Directory to use for font backups",
+ "backupkeep": "How many backups to keep",
+ "indentIncr": "XML Indent increment",
+ "indentFirst": "First XML indent",
+ "indentML": "Should multi-line string values be indented?",
+ "plistIndentFirst": "First indent amount for plists",
+ "sortDicts": "Should dict elements be sorted alphabetically?",
+ "precision": "Decimal precision to use in XML output - both for real values and for attributes if numeric",
+ "renameGlifs": "Rename glifs based on UFO3 suggested algorithm",
+ "UFOversion": "UFOversion to output - defaults to version of the input UFO",
+ "format1Glifs": "Force output format 1 glifs including UFO2-style anchors (was used with FontForge; no longer needed)",
+ "glifElemOrder": "Order to output glif elements",
+ "floatAttribs": "List of float attributes - used when setting decimal precision",
+ "intAttribs": "List of attributes that should be integers",
+ "attribOrders.glif": "Order in which to output glif attributes",
+ "checkfix": "Should check & fix tests be done - one of None, Check or Fix"
+ }
+ self.paramshelp["defaultsdesc"] = { # For use where default needs clarifying with text
+ "indentIncr" : "<two spaces>",
+ "indentFirst": "<two spaces>",
+ "plistIndentFirst": "<No indent>",
+ "UFOversion": "<Existing version>"
+ }
+
+ self.classes = {} # Dictionary containing a list of parameters in each class
+ self.paramclass = {} # Dictionary of class name for each parameter name
+ self.types = {} # Python type for each parameter deduced from initial values supplied
+ self.listtypes = {} # If type is dict, the type of values in the dict
+ self.logger = loggerobj()
+ defset = _paramset(self, "default", "defaults")
+ self.sets = {"default": defset}
+ self.lcase = {} # Lower case index of parameters names
+ for classn in defparams:
+ self.classes[classn] = []
+ for parn in defparams[classn]:
+ value = defparams[classn][parn]
+ self.classes[classn].append(parn)
+ self.paramclass[parn] = classn
+ self.types[parn] = type(value)
+ if type(value) is list: self.listtypes[parn] = type(value[0])
+ super(_paramset, defset).__setitem__(parn, value) # __setitem__ in paramset does not allow new values!
+ self.lcase[parn.lower()] = parn
+
+ def addset(self, name, sourcedesc=None, inputdict=None, configfile=None, copyset=None):
+ # Create a subset from one of a dict, config file or existing set
+ # Only one option should used per call
+ # sourcedesc should be added for user-supplied data (eg config file) for reporting purposes
+ dict = {}
+ if configfile:
+ config = configparser.ConfigParser()
+ config.read_file(open(configfile, encoding="utf-8"))
+ if sourcedesc is None: sourcedesc = configfile
+ for classn in config.sections():
+ for item in config.items(classn):
+ parn = item[0]
+ if self.paramclass[parn] == "system":
+ self.logger.log("Can't change " + parn + " parameter via config file", "S")
+ val = item[1].strip('"').strip("'")
+ dict[parn] = val
+ elif copyset:
+ if sourcedesc is None: sourcedesc = "Copy of " + copyset
+ for parn in self.sets[copyset]:
+ dict[parn] = self.sets[copyset][parn]
+ elif inputdict:
+ dict = inputdict
+ if sourcedesc is None: sourcedesc = "unspecified source"
+ self.sets[name] = _paramset(self, name, sourcedesc, dict)
+
+ def printhelp(self):
+ phelp = self.paramshelp
+ print("\nMost pysilfont scripts have -p, --params options which can be used to change default behaviour of scripts. For example '-p scrlevel=w' will log warning messages to screen \n")
+ print("Listed below are all such parameters, grouped by purpose. Not all apply to all scripts - "
+ "in partucular outparams and ufometadata only apply to scripts using pysilfont's own UFO code")
+ for classn in ("logging", "backups", "ufometadata", "outparams"):
+ print("\n" + classn[0].upper() + classn[1:] + " - " + phelp["classdesc"][classn])
+ for param in self.classes[classn]:
+ if param == "format1Glifs": continue # Param due to be phased out
+ paramdesc = phelp["paramsdesc"][param]
+ paramtype = self.types[param].__name__
+ defaultdesc = phelp["defaultsdesc"][param] if param in phelp["defaultsdesc"] else self.sets["default"][param]
+ print(' {:<20}: {}'.format(param, paramdesc))
+ print(' (Type: {:<6} Default: {})'.format(paramtype + ",", defaultdesc))
+ print("\nNote parameter names are case-insensitive\n")
+ print("For more help see https://github.com/silnrsi/pysilfont/blob/master/docs/parameters.md\n")
+
+class _paramset(dict):
+ # Set of parameter values
+ def __init__(self, params, name, sourcedesc, inputdict=None):
+ if inputdict is None: inputdict = {}
+ self.name = name
+ self.sourcedesc = sourcedesc # Description of source for reporting
+ self.params = params # Parent parameters object
+ for parn in inputdict:
+ if params.paramclass[parn] == "system": # system values can't be changed
+ if inputdict[parn] != params.sets["default"][parn]:
+ self.params.logger.log("Can't change " + parn + " - system parameters can't be changed", "X")
+ else:
+ super(_paramset, self).__setitem__(parn, inputdict[parn])
+ else:
+ self[parn] = inputdict[parn]
+
+ def __setitem__(self, parn, value):
+ origvalue = value
+ origparn = parn
+ parn = parn.lower()
+ if self.params.paramclass[origparn] == "system":
+ self.params.logger.log("Can't change " + parn + " - system parameters are read-only", "X")
+ if parn not in self.params.lcase:
+ self.params.logger.log("Invalid parameter " + origparn + " from " + self.sourcedesc, "S")
+ else:
+ parn = self.params.lcase[parn]
+ ptyp = self.params.types[parn]
+ if ptyp is bool:
+ value = str2bool(value)
+ if value is None: self.params.logger.log(self.sourcedesc+" parameter "+origparn+" must be boolean: " + origvalue, "S")
+ if ptyp is list:
+ if type(value) is not list: value = value.split(",") # Convert csv string into list
+ if len(value) < 2: self.params.logger.log(self.sourcedesc+" parameter "+origparn+" must have a list of values: " + origvalue, "S")
+ valuesOK = True
+ listtype = self.params.listtypes[parn]
+ for i, val in enumerate(value):
+ if listtype is bool:
+ val = str2bool(val)
+ if val is None: self.params.logger.log (self.sourcedesc+" parameter "+origparn+" must contain boolean values: " + origvalue, "S")
+ value[i] = val
+ if type(val) != listtype:
+ valuesOK = False
+ badtype = str(type(val))
+ if not valuesOK: self.params.logger.log("Invalid "+badtype+" parameter type for "+origparn+": "+self.params.types[parn], "S")
+ if parn in ("loglevel", "scrlevel"): # Need to check log level is valid before setting it since otherwise logging will fail
+ value = value.upper()
+ if value not in self.params.logger.loglevels: self.params.logger.log (self.sourcedesc+" parameter "+parn+" invalid", "S")
+ super(_paramset, self).__setitem__(parn, value)
+
+ def updatewith(self, update, sourcedesc=None, log=True):
+ # Update a set with values from another set
+ if sourcedesc is None: sourcedesc = self.params.sets[update].sourcedesc
+ for parn in self.params.sets[update]:
+ oldval = self[parn] if parn in self else ""
+ self[parn] = self.params.sets[update][parn]
+ if log and oldval != "" and self[parn] != oldval:
+ old = str(oldval)
+ new = str(self[parn])
+ if old != old.strip() or new != new.strip(): # Add quotes if there are leading or trailing spaces
+ old = '"'+old+'"'
+ new = '"'+new+'"'
+ self.params.logger.log(sourcedesc + " parameters: changing "+parn+" from " + old + " to " + new, "I")
+
+
+class csvreader(object): # Iterator for csv files, skipping comments and checking number of fields
+ def __init__(self, filename, minfields=0, maxfields=999, numfields=None, logger=None):
+ self.filename = filename
+ self.minfields = minfields
+ self.maxfields = maxfields
+ self.numfields = numfields
+ self.logger = logger if logger else loggerobj() # If no logger supplied, will just log to screen
+ # Open the file and create reader
+ try:
+ file = open(filename, "rt", encoding="utf-8")
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+ self.file = file
+ self.reader = csv.reader(file)
+ # Find the first non-comment line then reset so __iter__ still returns the first line
+ # This is so scripts can analyse first line (eg to look for headers) before starting iterating
+ self.firstline = None
+ self._commentsbeforefirstline = -1
+ while not self.firstline:
+ row = next(self.reader, None)
+ if row is None: logger.log("Input csv is empty or all lines are comments or blank", "S")
+ self._commentsbeforefirstline += 1
+ if row == []: continue # Skip blank lines
+ if row[0].lstrip().startswith("#"): continue # Skip comments - ie lines starting with #
+ self.firstline = row
+ file.seek(0) # Reset the csv and skip comments
+ for i in range(self._commentsbeforefirstline): next(self.reader, None)
+
+ def __setattr__(self, name, value):
+ if name == "numfields" and value is not None: # If numfields is changed, reset min and max fields
+ self.minfields = value
+ self.maxfields = value
+ super(csvreader, self).__setattr__(name, value)
+
+ def __iter__(self):
+ for row in self.reader:
+ self.line_num = self.reader.line_num - 1 - self._commentsbeforefirstline # Count is out due to reading first line in __init__
+ if row == []: continue # Skip blank lines
+ if row[0].lstrip().startswith("#"): continue # Skip comments - ie lines starting with #
+ if len(row) < self.minfields or len(row) > self.maxfields:
+ self.logger.log("Invalid number of fields on line " + str(self.line_num) + " in "+self.filename, "E" )
+ continue
+ yield row
+
+
+def execute(tool, fn, scriptargspec, chain = None):
+ # Function to handle parameter parsing, font and file opening etc in command-line scripts
+ # Supports opening (and saving) fonts using PysilFont UFO (UFO), fontParts (FP) or fontTools (FT)
+ # Special handling for:
+ # -d variation on -h to print extra info about defaults
+ # -q quiet mode - only output a single line with count of errors (if there are any)
+ # -l opens log file and also creates a logger function to write to the log file
+ # -p other parameters. Includes backup settings and loglevel/scrlevel settings for logger
+ # for UFOlib scripts, also includes all outparams keys and ufometadata settings
+
+ argspec = list(scriptargspec)
+
+ chainfirst = False
+ if chain == "first": # If first call to execute has this set, only do the final return part of chaining
+ chainfirst = True
+ chain = None
+
+ params = chain["params"] if chain else parameters()
+ logger = chain["logger"] if chain else params.logger # paramset has already created a basic logger
+ argv = chain["argv"] if chain else sys.argv
+
+ if tool == "UFO":
+ from silfont.ufo import Ufont
+ elif tool == "FT":
+ from fontTools import ttLib
+ elif tool == "FP":
+ from fontParts.world import OpenFont
+ elif tool == "" or tool is None:
+ tool = None
+ else:
+ logger.log("Invalid tool in call to execute()", "X")
+ return
+ basemodule = sys.modules[fn.__module__]
+ poptions = {}
+ poptions['prog'] = splitfn(argv[0])[1]
+ poptions['description'] = basemodule.__doc__
+ poptions['formatter_class'] = argparse.RawDescriptionHelpFormatter
+ epilog = "For more help options use -h ?. For more documentation see https://github.com/silnrsi/pysilfont/blob/master/docs/scripts.md#" + poptions['prog'] + "\n\n"
+ poptions['epilog'] = epilog + "Version: " + params.sets['default']['version'] + "\n" + params.sets['default']['copyright']
+
+ parser = argparse.ArgumentParser(**poptions)
+ parser._optionals.title = "other arguments"
+
+
+ # Add standard arguments
+ standardargs = {
+ 'quiet': ('-q', '--quiet', {'help': 'Quiet mode - only display severe errors', 'action': 'store_true'}, {}),
+ 'log': ('-l', '--log', {'help': 'Log file'}, {'type': 'outfile'}),
+ 'params': ('-p', '--params', {'help': 'Other parameters - see parameters.md for details', 'action': 'append'}, {'type': 'optiondict'}),
+ 'nq': ('--nq', {'help': argparse.SUPPRESS, 'action': 'store_true'}, {})}
+
+ suppliedargs = []
+ for a in argspec:
+ argn = a[:-2][-1] # [:-2] will give either 1 or 2, the last of which is the full argument name
+ if argn[0:2] == "--": argn = argn[2:] # Will start with -- for options
+ suppliedargs.append(argn)
+ for arg in sorted(standardargs):
+ if arg not in suppliedargs: argspec.append(standardargs[arg])
+
+ defhelp = False
+ if "-h" in argv: # Look for help option supplied
+ pos = argv.index("-h")
+ if pos < len(argv)-1: # There is something following -h!
+ opt = argv[pos+1]
+ if opt in ("d", "defaults"):
+ defhelp = True # Normal help will be displayed with default info displayed by the epilog
+ deffiles = []
+ defother = []
+ elif opt in ("p", "params"):
+ params.printhelp()
+ sys.exit(0)
+ else:
+ if opt != "?":
+ print("Invalid -h value")
+ print("-h ? displays help options")
+ print("-h d (or -h defaults) lists details of default values for arguments and parameters")
+ print("-h p (or -h params) gives help on parameters that can be set with -p or --params")
+ sys.exit(0)
+
+ quiet = True if "-q" in argv and '--nq' not in argv else False
+ if quiet: logger.scrlevel = "S"
+
+ # Process the supplied argument specs, add args to parser, store other info in arginfo
+ arginfo = []
+ logdef = None
+ for a in argspec:
+ # Process all but last tuple entry as argparse arguments
+ nonkwds = a[:-2]
+ kwds = a[-2]
+ parser.add_argument(*nonkwds, **kwds)
+ # Create ainfo, a dict of framework keywords using argument name
+ argn = nonkwds[-1] # Find the argument name from first 1 or 2 tuple entries
+ if argn[0:2] == "--": # Will start with -- for options
+ argn = argn[2:].replace("-", "_") # Strip the -- and replace any - in name with _
+ ainfo=dict(a[-1]) #Make a copy so original argspec is not changed
+ for key in ainfo: # Check all keys are valid
+ if key not in ("def", "type", "optlog") : logger.log("Invalid argspec framework key: " + key, "X")
+ ainfo['name']=argn
+ if argn == 'log':
+ logdef = ainfo['def'] if 'def' in ainfo else None
+ optlog = ainfo['optlog'] if 'optlog' in ainfo else False
+ arginfo.append(ainfo)
+ if defhelp:
+ arg = nonkwds[0]
+ if 'def' in ainfo:
+ defval = ainfo['def']
+ if argn == 'log' and logdef: defval += " in logs subdirectory"
+ deffiles.append([arg, defval])
+ elif 'default' in kwds:
+ defother.append([arg, kwds['default']])
+
+ # if -h d specified, change the help epilog to info about argument defaults
+ if defhelp:
+ if not (deffiles or defother):
+ deftext = "No defaults for parameters/options"
+ else:
+ deftext = "Defaults for parameters/options - see user docs for details\n"
+ if deffiles:
+ deftext = deftext + "\n Font/file names\n"
+ for (param, defv) in deffiles:
+ deftext = deftext + ' {:<20}{}\n'.format(param, defv)
+ if defother:
+ deftext = deftext + "\n Other parameters\n"
+ for (param, defv) in defother:
+ deftext = deftext + ' {:<20}{}\n'.format(param, defv)
+ parser.epilog = deftext + "\n\n" + parser.epilog
+
+ # Parse the command-line arguments. If errors or -h used, procedure will exit here
+ args = parser.parse_args(argv[1:])
+
+ # Process the first positional parameter to get defaults for file names
+ fppval = getattr(args, arginfo[0]['name'])
+ if isinstance(fppval, list): # When nargs="+" or nargs="*" is used a list is returned
+ (fppath, fpbase, fpext) = splitfn(fppval[0])
+ if len(fppval) > 1 : fpbase = "wildcard"
+ else:
+ if fppval is None: fppval = "" # For scripts that can be run with no positional parameters
+ (fppath, fpbase, fpext) = splitfn(fppval) # First pos param use for defaulting
+
+ # Process parameters
+ if chain:
+ execparams = params.sets["main"]
+ args.params = {} # clparams not used when chaining
+ else:
+ # Read config file from disk if it exists
+ configname = os.path.join(fppath, "pysilfont.cfg")
+ if os.path.exists(configname):
+ params.addset("config file", configname, configfile=configname)
+ else:
+ params.addset("config file") # Create empty set
+ if not quiet and "scrlevel" in params.sets["config file"]: logger.scrlevel = params.sets["config file"]["scrlevel"]
+
+ # Process command-line parameters
+ clparams = {}
+ if 'params' in args.__dict__:
+ if args.params is not None:
+ for param in args.params:
+ x = param.split("=", 1)
+ if len(x) != 2:
+ logger.log("params must be of the form 'param=value'", "S")
+ if x[1] == "\\t": x[1] = "\t" # Special handling for tab characters
+ clparams[x[0]] = x[1]
+
+ args.params = clparams
+ params.addset("command line", "command line", inputdict=clparams)
+ if not quiet and "scrlevel" in params.sets["command line"]: logger.scrlevel = params.sets["command line"]["scrlevel"]
+
+ # Create main set of parameters based on defaults then update with config file values and command line values
+ params.addset("main", copyset="default")
+ params.sets["main"].updatewith("config file")
+ params.sets["main"].updatewith("command line")
+ execparams = params.sets["main"]
+
+ # Set up logging
+ if chain:
+ setattr(args, 'logger', logger)
+ args.logfile = logger.logfile
+ else:
+ logfile = None
+ logname = args.log if 'log' in args.__dict__ and args.log is not None else ""
+ if 'log' in args.__dict__:
+ if logdef is not None and (logname != "" or optlog == False):
+ (path, base, ext) = splitfn(logname)
+ (dpath, dbase, dext) = splitfn(logdef)
+ if not path:
+ if base and ext: # If both specified then use cwd, ie no path
+ path = ""
+ else:
+ path = (fppath if dpath == "" else os.path.join(fppath, dpath))
+ path = os.path.join(path, "logs")
+ if not base:
+ if dbase == "":
+ base = fpbase
+ elif dbase[0] == "_": # Append to font name if starts with _
+ base = fpbase + dbase
+ else:
+ base = dbase
+ if not ext and dext: ext = dext
+ logname = os.path.join(path, base+ext)
+ if logname == "":
+ logfile = None
+ else:
+ (logname, logpath, exists) = fullpath(logname)
+ if not exists:
+ (parent,subd) = os.path.split(logpath)
+ if subd == "logs" and os.path.isdir(parent): # Create directory if just logs subdir missing
+ logger.log("Creating logs subdirectory in " + parent, "P")
+ os.mkdir(logpath)
+ else: # Fails, since missing dir is probably a typo!
+ logger.log("Directory " + parent + " does not exist", "S")
+ logger.log('Opening log file for output: ' + logname, "P")
+ try:
+ logfile = open(logname, "w", encoding="utf-8")
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+ args.log = logfile
+ # Set up logger details
+ logger.loglevel = execparams['loglevel'].upper()
+ logger.logfile = logfile
+ if not quiet: logger.scrlevel = "E" # suppress next log message from screen
+ logger.log("Running: " + " ".join(argv), "P")
+ if not quiet: logger.scrlevel = execparams['scrlevel'].upper()
+ setattr(args, 'logger', logger)
+
+# Process the argument values returned from argparse
+
+ outfont = None
+ infontlist = []
+ for c, ainfo in enumerate(arginfo):
+ aval = getattr(args, ainfo['name'])
+ if ainfo['name'] in ('params', 'log'): continue # params and log already processed
+ atype = None
+ adef = None
+ if 'type' in ainfo:
+ atype = ainfo['type']
+ if atype not in ('infont', 'outfont', 'infile', 'outfile', 'incsv', 'filename', 'optiondict'):
+ logger.log("Invalid type of " + atype + " supplied in argspec", "X")
+ if atype != 'optiondict': # All other types are file types, so adef must be set, even if just to ""
+ adef = ainfo['def'] if 'def' in ainfo else ""
+ if adef is None and aval is None: # If def explicitly set to None then this is optional
+ setattr(args, ainfo['name'], None)
+ continue
+
+ if c == 0:
+ if aval is None : logger.log("Invalid first positional parameter spec", "X")
+ if aval[-1] in ("\\","/"): aval = aval[0:-1] # Remove trailing slashes
+ else: #Handle defaults for all but first positional parameter
+ if adef is not None:
+ if not aval: aval = ""
+# if aval == "" and adef == "": # Only valid for output font parameter
+# if atype != "outfont":
+# logger.log("No value suppiled for " + ainfo['name'], "S")
+# ## Not sure why this needs to fail - we need to cope with other optional file or filename parameters
+ (apath, abase, aext) = splitfn(aval)
+ (dpath, dbase, dext) = splitfn(adef) # dpath should be None
+ if not apath:
+ if abase and aext: # If both specified then use cwd, ie no path
+ apath = ""
+ else:
+ apath = fppath
+ if not abase:
+ if dbase == "":
+ abase = fpbase
+ elif dbase[0] == "_": # Append to font name if starts with _
+ abase = fpbase + dbase
+ else:
+ abase = dbase
+ if not aext:
+ if dext:
+ aext = dext
+ elif (atype == 'outfont' or atype == 'infont'): aext = fpext
+ aval = os.path.join(apath, abase+aext)
+
+ # Open files/fonts
+ if atype == 'infont':
+ if tool is None:
+ logger.log("Can't specify a font without a font tool", "X")
+ infontlist.append((ainfo['name'], aval)) # Build list of fonts to open when other args processed
+ elif atype == 'infile':
+ logger.log('Opening file for input: '+aval, "P")
+ try:
+ aval = open(aval, "r", encoding="utf-8")
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+ elif atype == 'incsv':
+ logger.log('Opening file for input: '+aval, "P")
+ aval = csvreader(aval, logger=logger)
+ elif atype == 'outfile':
+ (aval, path, exists) = fullpath(aval)
+ if not exists:
+ logger.log("Output file directory " + path + " does not exist", "S")
+ logger.log('Opening file for output: ' + aval, "P")
+ try:
+ aval = open(aval, 'w', encoding="utf-8")
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+ elif atype == 'outfont':
+ if tool is None:
+ logger.log("Can't specify a font without a font tool", "X")
+ outfont = aval
+ outfontpath = apath
+ outfontbase = abase
+ outfontext = aext
+
+ elif atype == 'optiondict': # Turn multiple options in the form ['opt1=a', 'opt2=b'] into a dictionary
+ avaldict={}
+ if aval is not None:
+ for option in aval:
+ x = option.split("=", 1)
+ if len(x) != 2:
+ logger.log("options must be of the form 'param=value'", "S")
+ if x[1] == "\\t": x[1] = "\t" # Special handling for tab characters
+ avaldict[x[0]] = x[1]
+ aval = avaldict
+
+ setattr(args, ainfo['name'], aval)
+
+# Open fonts - needs to be done after processing other arguments so logger and params are defined
+
+ for name, aval in infontlist:
+ if chain and name == 'ifont':
+ aval = chain["font"]
+ else:
+ if tool == "UFO": aval = Ufont(aval, params=params)
+ if tool == "FT" : aval = ttLib.TTFont(aval)
+ if tool == "FP" : aval = OpenFont(aval)
+ setattr(args, name, aval) # Assign the font object to args attribute
+
+# All arguments processed, now call the main function
+ setattr(args, "paramsobj", params)
+ setattr(args, "cmdlineargs", argv)
+ newfont = fn(args)
+# If an output font is expected and one is returned, output the font
+ if chainfirst: chain = True # Special handling for first call of chaining
+ if newfont:
+ if chain: # return font to be handled by chain()
+ return (args, newfont)
+ else:
+ if outfont:
+ # Backup the font if output is overwriting original input font
+ if outfont == infontlist[0][1]:
+ backupdir = os.path.join(outfontpath, execparams['backupdir'])
+ backupmax = int(execparams['backupkeep'])
+ backup = str2bool(execparams['backup'])
+
+ if backup:
+ if not os.path.isdir(backupdir): # Create backup directory if not present
+ try:
+ os.mkdir(backupdir)
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+ backupbase = os.path.join(backupdir, outfontbase+outfontext)
+ # Work out backup name based on existing backups
+ nums = sorted([int(i[len(backupbase)+1-len(i):-1]) for i in glob(backupbase+".*~")]) # Extract list of backup numbers from existing backups
+ newnum = max(nums)+1 if nums else 1
+ backupname = backupbase+"."+str(newnum)+"~"
+ # Backup the font
+ logger.log("Backing up input font to "+backupname, "P")
+ shutil.copytree(outfont, backupname)
+ # Purge old backups
+ for i in range(0, len(nums) - backupmax + 1):
+ backupname = backupbase+"."+str(nums[i])+"~"
+ logger.log("Purging old backup "+backupname, "I")
+ shutil.rmtree(backupname)
+ else:
+ logger.log("No font backup done due to backup parameter setting", "W")
+ # Output the font
+ if tool in ("FT", "FP"):
+ logger.log("Saving font to " + outfont, "P")
+ newfont.save(outfont)
+ else: # Must be Pyslifont Ufont
+ newfont.write(outfont)
+ else:
+ logger.log("Font returned to execute() but no output font is specified in arg spec", "X")
+ elif chain: # ) When chaining return just args - the font can be accessed by args.ifont
+ return (args, None) # ) assuming that the script has not changed the input font
+
+ if logger.errorcount or logger.warningcount:
+ message = "Command completed with " + str(logger.errorcount) + " errors and " + str(logger.warningcount) + " warnings"
+ if logger.scrlevel in ("S", "E") and logname != "":
+ if logger.scrlevel == "S" or logger.warningcount: message = message + " - see " + logname
+ if logger.errorcount:
+ if quiet: logger.raisescrlevel("E")
+ logger.log(message, "E")
+ logger.resetscrlevel()
+ else:
+ logger.log(message, "P")
+ if logger.scrlevel == "P" and logger.warningcount: logger.log("See log file for warning messages or rerun with '-p scrlevel=w'", "P")
+ else:
+ logger.log("Command completed with no warnings", "P")
+
+ return (args, newfont)
+
+
+def chain(argv, function, argspec, font, params, logger, quiet): # Chain multiple command-line scripts using UFO module together without writing font to disk
+ ''' argv is a command-line call to a script in sys.argv format. function and argspec are from the script being called.
+ Although input font name must be supplied for the command line to be parsed correctly by execute() it is not used - instead the supplied
+ font object is used. Similarly -params, logfile and quiet settings in argv are not used by execute() when chaining is used'''
+ if quiet and "-q" not in argv: argv.append("-q")
+ logger.log("Chaining to " + argv[0], "P")
+ font = execute("UFO", function, argspec,
+ {'argv' : argv,
+ 'font' : font,
+ 'params': params,
+ 'logger': logger,
+ 'quiet' : quiet})
+ logger.log("Returning from " + argv[0], "P")
+ return font
+
+
+def splitfn(fn): # Split filename into path, base and extension
+ if fn: # Remove trailing slashes
+ if fn[-1] in ("\\","/"): fn = fn[0:-1]
+ (path, base) = os.path.split(fn)
+ (base, ext) = os.path.splitext(base)
+ # Handle special case where just a directory is supplied
+ if ext == "": # If there's an extension, treat as file name, eg a ufo directory
+ if os.path.isdir(fn):
+ path = fn
+ base = ""
+ return (path, base, ext)
+
+
+def str2bool(v): # If v is not a boolean, convert from string to boolean
+ if type(v) == bool: return v
+ v = v.lower()
+ if v in ("yes", "y", "true", "t", "1"):
+ v = True
+ elif v in ("no", "n", "false", "f", "0"):
+ v = False
+ else:
+ v = None
+ return v
+
+def fullpath(filen): # Changes file name to one with full path and checks directory exists
+ fullname = os.path.abspath(filen)
+ (fpath,dummy) = os.path.split(fullname)
+ return fullname, fpath, os.path.isdir(fpath)
diff --git a/lib/silfont/data/required_chars.csv b/lib/silfont/data/required_chars.csv
new file mode 100644
index 0000000..939c371
--- /dev/null
+++ b/lib/silfont/data/required_chars.csv
@@ -0,0 +1,308 @@
+USV,ps_name,glyph_name,sil_set,rationale,additional_notes
+U+0020,space,space,basic,A,
+U+0021,exclam,exclam,basic,A,
+U+0022,quotedbl,quotedbl,basic,A,
+U+0023,numbersign,numbersign,basic,A,
+U+0024,dollar,dollar,basic,A,
+U+0025,percent,percent,basic,A,
+U+0026,ampersand,ampersand,basic,A,
+U+0027,quotesingle,quotesingle,basic,A,
+U+0028,parenleft,parenleft,basic,A,
+U+0029,parenright,parenright,basic,A,
+U+002A,asterisk,asterisk,basic,A,
+U+002B,plus,plus,basic,A,
+U+002C,comma,comma,basic,A,
+U+002D,hyphen,hyphen,basic,A,
+U+002E,period,period,basic,A,
+U+002F,slash,slash,basic,A,
+U+0030,zero,zero,basic,A,
+U+0031,one,one,basic,A,
+U+0032,two,two,basic,A,
+U+0033,three,three,basic,A,
+U+0034,four,four,basic,A,
+U+0035,five,five,basic,A,
+U+0036,six,six,basic,A,
+U+0037,seven,seven,basic,A,
+U+0038,eight,eight,basic,A,
+U+0039,nine,nine,basic,A,
+U+003A,colon,colon,basic,A,
+U+003B,semicolon,semicolon,basic,A,
+U+003C,less,less,basic,A,
+U+003D,equal,equal,basic,A,
+U+003E,greater,greater,basic,A,
+U+003F,question,question,basic,A,
+U+0040,at,at,basic,A,
+U+0041,A,A,basic,A,
+U+0042,B,B,basic,A,
+U+0043,C,C,basic,A,
+U+0044,D,D,basic,A,
+U+0045,E,E,basic,A,
+U+0046,F,F,basic,A,
+U+0047,G,G,basic,A,
+U+0048,H,H,basic,A,
+U+0049,I,I,basic,A,
+U+004A,J,J,basic,A,
+U+004B,K,K,basic,A,
+U+004C,L,L,basic,A,
+U+004D,M,M,basic,A,
+U+004E,N,N,basic,A,
+U+004F,O,O,basic,A,
+U+0050,P,P,basic,A,
+U+0051,Q,Q,basic,A,
+U+0052,R,R,basic,A,
+U+0053,S,S,basic,A,
+U+0054,T,T,basic,A,
+U+0055,U,U,basic,A,
+U+0056,V,V,basic,A,
+U+0057,W,W,basic,A,
+U+0058,X,X,basic,A,
+U+0059,Y,Y,basic,A,
+U+005A,Z,Z,basic,A,
+U+005B,bracketleft,bracketleft,basic,A,
+U+005C,backslash,backslash,basic,A,
+U+005D,bracketright,bracketright,basic,A,
+U+005E,asciicircum,asciicircum,basic,A,
+U+005F,underscore,underscore,basic,A,
+U+0060,grave,grave,basic,A,
+U+0061,a,a,basic,A,
+U+0062,b,b,basic,A,
+U+0063,c,c,basic,A,
+U+0064,d,d,basic,A,
+U+0065,e,e,basic,A,
+U+0066,f,f,basic,A,
+U+0067,g,g,basic,A,
+U+0068,h,h,basic,A,
+U+0069,i,i,basic,A,
+U+006A,j,j,basic,A,
+U+006B,k,k,basic,A,
+U+006C,l,l,basic,A,
+U+006D,m,m,basic,A,
+U+006E,n,n,basic,A,
+U+006F,o,o,basic,A,
+U+0070,p,p,basic,A,
+U+0071,q,q,basic,A,
+U+0072,r,r,basic,A,
+U+0073,s,s,basic,A,
+U+0074,t,t,basic,A,
+U+0075,u,u,basic,A,
+U+0076,v,v,basic,A,
+U+0077,w,w,basic,A,
+U+0078,x,x,basic,A,
+U+0079,y,y,basic,A,
+U+007A,z,z,basic,A,
+U+007B,braceleft,braceleft,basic,A,
+U+007C,bar,bar,basic,A,
+U+007D,braceright,braceright,basic,A,
+U+007E,asciitilde,asciitilde,basic,A,
+U+00A0,uni00A0,nbspace,basic,A,
+U+00A1,exclamdown,exclamdown,basic,A,
+U+00A2,cent,cent,basic,A,
+U+00A3,sterling,sterling,basic,A,
+U+00A4,currency,currency,basic,A,
+U+00A5,yen,yen,basic,A,
+U+00A6,brokenbar,brokenbar,basic,A,
+U+00A7,section,section,basic,A,
+U+00A8,dieresis,dieresis,basic,A,
+U+00A9,copyright,copyright,basic,A,
+U+00AA,ordfeminine,ordfeminine,basic,A,
+U+00AB,guillemotleft,guillemetleft,basic,A,
+U+00AC,logicalnot,logicalnot,basic,A,
+U+00AD,uni00AD,softhyphen,basic,A,
+U+00AE,registered,registered,basic,A,
+U+00AF,macron,macron,basic,A,
+U+00B0,degree,degree,basic,A,
+U+00B1,plusminus,plusminus,basic,A,
+U+00B2,uni00B2,twosuperior,basic,A,
+U+00B3,uni00B3,threesuperior,basic,A,
+U+00B4,acute,acute,basic,A,
+U+00B5,mu,micro,basic,A,
+U+00B6,paragraph,paragraph,basic,A,
+U+00B7,periodcentered,periodcentered,basic,A,
+U+00B8,cedilla,cedilla,basic,A,
+U+00B9,uni00B9,onesuperior,basic,A,
+U+00BA,ordmasculine,ordmasculine,basic,A,
+U+00BB,guillemotright,guillemetright,basic,A,
+U+00BC,onequarter,onequarter,basic,A,
+U+00BD,onehalf,onehalf,basic,A,
+U+00BE,threequarters,threequarters,basic,A,
+U+00BF,questiondown,questiondown,basic,A,
+U+00C0,Agrave,Agrave,basic,A,
+U+00C1,Aacute,Aacute,basic,A,
+U+00C2,Acircumflex,Acircumflex,basic,A,
+U+00C3,Atilde,Atilde,basic,A,
+U+00C4,Adieresis,Adieresis,basic,A,
+U+00C5,Aring,Aring,basic,A,
+U+00C6,AE,AE,basic,A,
+U+00C7,Ccedilla,Ccedilla,basic,A,
+U+00C8,Egrave,Egrave,basic,A,
+U+00C9,Eacute,Eacute,basic,A,
+U+00CA,Ecircumflex,Ecircumflex,basic,A,
+U+00CB,Edieresis,Edieresis,basic,A,
+U+00CC,Igrave,Igrave,basic,A,
+U+00CD,Iacute,Iacute,basic,A,
+U+00CE,Icircumflex,Icircumflex,basic,A,
+U+00CF,Idieresis,Idieresis,basic,A,
+U+00D0,Eth,Eth,basic,A,
+U+00D1,Ntilde,Ntilde,basic,A,
+U+00D2,Ograve,Ograve,basic,A,
+U+00D3,Oacute,Oacute,basic,A,
+U+00D4,Ocircumflex,Ocircumflex,basic,A,
+U+00D5,Otilde,Otilde,basic,A,
+U+00D6,Odieresis,Odieresis,basic,A,
+U+00D7,multiply,multiply,basic,A,
+U+00D8,Oslash,Oslash,basic,A,
+U+00D9,Ugrave,Ugrave,basic,A,
+U+00DA,Uacute,Uacute,basic,A,
+U+00DB,Ucircumflex,Ucircumflex,basic,A,
+U+00DC,Udieresis,Udieresis,basic,A,
+U+00DD,Yacute,Yacute,basic,A,
+U+00DE,Thorn,Thorn,basic,A,
+U+00DF,germandbls,germandbls,basic,A,
+U+00E0,agrave,agrave,basic,A,
+U+00E1,aacute,aacute,basic,A,
+U+00E2,acircumflex,acircumflex,basic,A,
+U+00E3,atilde,atilde,basic,A,
+U+00E4,adieresis,adieresis,basic,A,
+U+00E5,aring,aring,basic,A,
+U+00E6,ae,ae,basic,A,
+U+00E7,ccedilla,ccedilla,basic,A,
+U+00E8,egrave,egrave,basic,A,
+U+00E9,eacute,eacute,basic,A,
+U+00EA,ecircumflex,ecircumflex,basic,A,
+U+00EB,edieresis,edieresis,basic,A,
+U+00EC,igrave,igrave,basic,A,
+U+00ED,iacute,iacute,basic,A,
+U+00EE,icircumflex,icircumflex,basic,A,
+U+00EF,idieresis,idieresis,basic,A,
+U+00F0,eth,eth,basic,A,
+U+00F1,ntilde,ntilde,basic,A,
+U+00F2,ograve,ograve,basic,A,
+U+00F3,oacute,oacute,basic,A,
+U+00F4,ocircumflex,ocircumflex,basic,A,
+U+00F5,otilde,otilde,basic,A,
+U+00F6,odieresis,odieresis,basic,A,
+U+00F7,divide,divide,basic,A,
+U+00F8,oslash,oslash,basic,A,
+U+00F9,ugrave,ugrave,basic,A,
+U+00FA,uacute,uacute,basic,A,
+U+00FB,ucircumflex,ucircumflex,basic,A,
+U+00FC,udieresis,udieresis,basic,A,
+U+00FD,yacute,yacute,basic,A,
+U+00FE,thorn,thorn,basic,A,
+U+00FF,ydieresis,ydieresis,basic,A,
+U+0131,dotlessi,idotless,basic,B,
+U+0152,OE,OE,basic,A,
+U+0153,oe,oe,basic,A,
+U+0160,Scaron,Scaron,basic,A,
+U+0161,scaron,scaron,basic,A,
+U+0178,Ydieresis,Ydieresis,basic,A,
+U+017D,Zcaron,Zcaron,basic,A,
+U+017E,zcaron,zcaron,basic,A,
+U+0192,florin,florin,basic,A,
+U+02C6,circumflex,circumflex,basic,A,
+U+02C7,caron,caron,basic,B,
+U+02D8,breve,breve,basic,B,
+U+02D9,dotaccent,dotaccent,basic,B,
+U+02DA,ring,ring,basic,B,
+U+02DB,ogonek,ogonek,basic,B,
+U+02DC,tilde,tilde,basic,A,
+U+02DD,hungarumlaut,hungarumlaut,basic,B,
+U+034F,uni034F,graphemejoinercomb,basic,D,
+U+03C0,pi,pi,basic,B,
+U+2000,uni2000,enquad,basic,C,
+U+2001,uni2001,emquad,basic,C,
+U+2002,uni2002,enspace,basic,C,
+U+2003,uni2003,emspace,basic,C,
+U+2004,uni2004,threeperemspace,basic,C,
+U+2005,uni2005,fourperemspace,basic,C,
+U+2006,uni2006,sixperemspace,basic,C,
+U+2007,uni2007,figurespace,basic,C,
+U+2008,uni2008,punctuationspace,basic,C,
+U+2009,uni2009,thinspace,basic,C,
+U+200A,uni200A,hairspace,basic,C,
+U+200B,uni200B,zerowidthspace,basic,C,
+U+200C,uni200C,zerowidthnonjoiner,basic,D,
+U+200D,uni200D,zerowidthjoiner,basic,D,
+U+200E,uni200E,lefttorightmark,rtl,D,
+U+200F,uni200F,righttoleftmark,rtl,D,
+U+2010,uni2010,hyphentwo,basic,C,
+U+2011,uni2011,nonbreakinghyphen,basic,C,
+U+2012,figuredash,figuredash,basic,C,
+U+2013,endash,endash,basic,A,
+U+2014,emdash,emdash,basic,A,
+U+2015,uni2015,horizontalbar,basic,C,
+U+2018,quoteleft,quoteleft,basic,A,
+U+2019,quoteright,quoteright,basic,A,
+U+201A,quotesinglbase,quotesinglbase,basic,A,
+U+201C,quotedblleft,quotedblleft,basic,A,
+U+201D,quotedblright,quotedblright,basic,A,
+U+201E,quotedblbase,quotedblbase,basic,A,
+U+2020,dagger,dagger,basic,A,
+U+2021,daggerdbl,daggerdbl,basic,A,
+U+2022,bullet,bullet,basic,A,
+U+2026,ellipsis,ellipsis,basic,A,
+U+2027,uni2027,hyphenationpoint,basic,C,
+U+2028,uni2028,lineseparator,basic,C,
+U+2029,uni2029,paragraphseparator,basic,C,
+U+202A,uni202A,lefttorightembedding,rtl,D,
+U+202B,uni202B,righttoleftembedding,rtl,D,
+U+202C,uni202C,popdirectionalformatting,rtl,D,
+U+202D,uni202D,lefttorightoverride,rtl,D,
+U+202E,uni202E,righttoleftoverride,rtl,D,
+U+202F,uni202F,narrownbspace,basic,C,
+U+2030,perthousand,perthousand,basic,A,
+U+2039,guilsinglleft,guilsinglleft,basic,A,
+U+203A,guilsinglright,guilsinglright,basic,A,
+U+2044,fraction,fraction,basic,B,
+U+2060,uni2060,wordjoiner,basic,D,
+U+2066,uni2066,lefttorightisolate,rtl,D,
+U+2067,uni2067,righttoleftisolate,rtl,D,
+U+2068,uni2068,firststrongisolate,rtl,D,
+U+2069,uni2069,popdirectionalisolate,rtl,D,
+U+206C,uni206C,inhibitformshaping-ar,rtl,D,
+U+206D,uni206D,activateformshaping-ar,rtl,D,
+U+2074,uni2074,foursuperior,basic,E,
+U+20AC,Euro,euro,basic,A,
+U+2122,trademark,trademark,basic,A,
+U+2126,Omega,Ohm,basic,B,
+U+2202,partialdiff,partialdiff,basic,B,
+U+2206,Delta,Delta,basic,B,
+U+220F,product,product,basic,B,
+U+2211,summation,summation,basic,B,
+U+2212,minus,minus,basic,E,
+U+2215,uni2215,divisionslash,basic,E,
+U+2219,uni2219,bulletoperator,basic,C,Some applications use this instead of 00B7
+U+221A,radical,radical,basic,B,
+U+221E,infinity,infinity,basic,B,
+U+222B,integral,integral,basic,B,
+U+2248,approxequal,approxequal,basic,B,
+U+2260,notequal,notequal,basic,B,
+U+2264,lessequal,lessequal,basic,B,
+U+2265,greaterequal,greaterequal,basic,B,
+U+2423,uni2423,blank,basic,F,Advanced width should probably be the same as a space.
+U+25CA,lozenge,lozenge,basic,B,
+U+25CC,uni25CC,dottedCircle,basic,J,"If your OpenType font supports combining diacritics, be sure to include U+25CC DOTTED CIRCLE in your font, and optionally include this in your positioning rules for all your combining marks. This is because Uniscribe will insert U+25CC between ""illegal"" diacritic sequences (such as two U+064E characters in a row) to make the mistake more visible. (https://docs.microsoft.com/en-us/typography/script-development/arabic#handling-invalid-combining-marks)"
+U+F130,uniF130,FontBslnSideBrngMrkrLft,sil,K,
+U+F131,uniF131,FontBslnSideBrngMrkrRt,sil,K,
+U+FB01,uniFB01,fi,basic,B,
+U+FB02,uniFB02,fl,basic,B,
+U+FE00,uniFE00,VS1,basic,H,Add this to the cmap and point them to null glyphs
+U+FE01,uniFE01,VS2,basic,H,Add this to the cmap and point them to null glyphs
+U+FE02,uniFE02,VS3,basic,H,Add this to the cmap and point them to null glyphs
+U+FE03,uniFE03,VS4,basic,H,Add this to the cmap and point them to null glyphs
+U+FE04,uniFE04,VS5,basic,H,Add this to the cmap and point them to null glyphs
+U+FE05,uniFE05,VS6,basic,H,Add this to the cmap and point them to null glyphs
+U+FE06,uniFE06,VS7,basic,H,Add this to the cmap and point them to null glyphs
+U+FE07,uniFE07,VS8,basic,H,Add this to the cmap and point them to null glyphs
+U+FE08,uniFE08,VS9,basic,H,Add this to the cmap and point them to null glyphs
+U+FE09,uniFE09,VS10,basic,H,Add this to the cmap and point them to null glyphs
+U+FE0A,uniFE0A,VS11,basic,H,Add this to the cmap and point them to null glyphs
+U+FE0B,uniFE0B,VS12,basic,H,Add this to the cmap and point them to null glyphs
+U+FE0C,uniFE0C,VS13,basic,H,Add this to the cmap and point them to null glyphs
+U+FE0D,uniFE0D,VS14,basic,H,Add this to the cmap and point them to null glyphs
+U+FE0E,uniFE0E,VS15,basic,H,Add this to the cmap and point them to null glyphs
+U+FE0F,uniFE0F,VS16,basic,H,Add this to the cmap and point them to null glyphs
+U+FEFF,uniFEFF,zeroWidthNoBreakSpace,basic,I,Making this visible might be helpful
+U+FFFC,uniFFFC,objectReplacementCharacter,basic,G,It is easier for someone looking at the converted text to figure out what's going on if these have a visual representation.
+U+FFFD,uniFFFD,replacementCharacter,basic,G,It is easier for someone looking at the converted text to figure out what's going on if these have a visual representation.
+,,,,,
diff --git a/lib/silfont/data/required_chars.md b/lib/silfont/data/required_chars.md
new file mode 100644
index 0000000..444f2e6
--- /dev/null
+++ b/lib/silfont/data/required_chars.md
@@ -0,0 +1,32 @@
+# required_chars - recommended characters for Non-Roman fonts
+
+For optimal compatibility with a variety of operating systems, all Non-Roman fonts should include
+a set of glyphs for basic Roman characters and punctuation. Ideally this should include all the
+following characters, although some depend on other considerations (see the notes). The basis
+for this list is a union of the Windows Codepage 1252 and MacRoman character sets plus additional
+useful characters.
+
+The csv includes the following headers:
+
+* USV - Unicode Scalar Value
+* ps_name - postscript name of glyph that will end up in production
+* glyph_name - glyphsApp name that will be used in UFO
+* sil_set - set to include in a font
+ * basic - should be included in any Non-Roman font
+ * rtl - should be included in any right-to-left script font
+ * sil - should be included in any SIL font
+* rationale - worded to complete the phrase: "This character is needed ..."
+ * A - in Codepage 1252
+ * B - in MacRoman
+ * C - for publishing
+ * D - for Non-Roman fonts and publishing
+ * E - by Google Fonts
+ * F - by TeX for visible space
+ * G - for encoding conversion utilities
+ * H - in case Variation Sequences are defined in future
+ * I - to detect byte order
+ * J - to render combining marks in isolation
+ * K - to view sidebearings for every glyph using these characters
+* additional_notes - how the character might be used
+
+The list was previously maintained here: https://scriptsource.org/entry/gg5wm9hhd3
diff --git a/lib/silfont/etutil.py b/lib/silfont/etutil.py
new file mode 100644
index 0000000..3587231
--- /dev/null
+++ b/lib/silfont/etutil.py
@@ -0,0 +1,270 @@
+#!/usr/bin/env python
+'Classes and functions for handling XML files in pysilfont scripts'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from xml.etree import ElementTree as ET
+import silfont.core
+
+import re, os, codecs, io, collections
+
+_elementprotect = {
+ '&' : '&amp;',
+ '<' : '&lt;',
+ '>' : '&gt;' }
+_attribprotect = dict(_elementprotect)
+_attribprotect['"'] = '&quot;' # Copy of element protect with double quote added
+
+class ETWriter(object) :
+ """ General purpose ElementTree pretty printer complete with options for attribute order
+ beyond simple sorting, and which elements should use cdata
+
+ Note there is no support for namespaces. Originally there was, and if it is needed in the future look at
+ commits from 10th May 2018 or earlier. The code there would need reworking!"""
+
+ def __init__(self, etree, attributeOrder = {}, takesCData = set(),
+ indentIncr = " ", indentFirst = " ", indentML = False, inlineelem=[], precision = None, floatAttribs = [], intAttribs = []):
+ self.root = etree
+ self.attributeOrder = attributeOrder # Sort order for attributes - just one list for all elements
+ self.takesCData = takesCData
+ self.indentIncr = indentIncr # Incremental increase in indent
+ self.indentFirst = indentFirst # Indent for first level
+ self.indentML = indentML # Add indent to multi-line strings
+ self.inlineelem = inlineelem # For supporting in-line elements. Does not work with mix of inline and other subelements in same element
+ self.precision = precision # Precision to use outputting numeric attribute values
+ self.floatAttribs = floatAttribs # List of float/real attributes used with precision
+ self.intAttribs = intAttribs
+
+ def _protect(self, txt, base=_attribprotect) :
+ return re.sub(r'['+r"".join(base.keys())+r"]", lambda m: base[m.group(0)], txt)
+
+ def serialize_xml(self, base = None, indent = '') :
+ # Create the xml and return as a string
+ outstrings = []
+ outstr=""
+ if base is None :
+ base = self.root
+ outstr += '<?xml version="1.0" encoding="UTF-8"?>\n'
+ if '.pi' in base.attrib : # Processing instructions
+ for pi in base.attrib['.pi'].split(",") : outstr += '<?{}?>\n'.format(pi)
+
+ if '.doctype' in base.attrib : outstr += '<!DOCTYPE {}>\n'.format(base.attrib['.doctype'])
+
+ tag = base.tag
+ attribs = base.attrib
+
+ if '.comments' in attribs :
+ for c in attribs['.comments'].split(",") : outstr += '{}<!--{}-->\n'.format(indent, c)
+
+ i = indent if tag not in self.inlineelem else ""
+ outstr += '{}<{}'.format(i, tag)
+
+ for k in sorted(list(attribs.keys()), key=lambda x: self.attributeOrder.get(x, x)):
+ if k[0] != '.' :
+ att = attribs[k]
+ if self.precision is not None and k in self.floatAttribs :
+ if "." in att:
+ num = round(float(att), self.precision)
+ att = int(num) if num == int(num) else num
+ elif k in self.intAttribs :
+ att = int(round(float(att)))
+ else:
+ att = self._protect(att)
+ outstr += ' {}="{}"'.format(k, att)
+
+ if len(base) or (base.text and base.text.strip()) :
+ outstr += '>'
+ if base.text and base.text.strip() :
+ if tag not in self.takesCData :
+ t = base.text
+ if self.indentML : t = t.replace('\n', '\n' + indent)
+ t = self._protect(t, base=_elementprotect)
+ else :
+ t = "<![CDATA[\n\t" + indent + base.text.replace('\n', '\n\t' + indent) + "\n" + indent + "]]>"
+ outstr += t
+ if len(base) :
+ if base[0].tag not in self.inlineelem : outstr += '\n'
+ if base == self.root:
+ incr = self.indentFirst
+ else:
+ incr = self.indentIncr
+ outstrings.append(outstr); outstr=""
+ for b in base : outstrings.append(self.serialize_xml(base=b, indent=indent + incr))
+ if base[-1].tag not in self.inlineelem : outstr += indent
+ outstr += '</{}>'.format(tag)
+ else :
+ outstr += '/>'
+ if base.tail and base.tail.strip() :
+ outstr += self._protect(base.tail, base=_elementprotect)
+ if tag not in self.inlineelem : outstr += "\n"
+
+ if '.commentsafter' in base.attrib :
+ for c in base.attrib['.commentsafter'].split(",") : outstr += '{}<!--{}-->\n'.format(indent, c)
+
+ outstrings.append(outstr)
+ return "".join(outstrings)
+
+class _container(object) :
+ # Parent class for other objects
+ def __init_(self) :
+ self._contents = {}
+ # Define methods so it acts like an imutable container
+ # (changes should be made via object functions etc)
+ def __len__(self):
+ return len(self._contents)
+ def __getitem__(self, key):
+ return self._contents[key]
+ def __iter__(self):
+ return iter(self._contents)
+ def keys(self) :
+ return self._contents.keys()
+
+class xmlitem(_container):
+ """ The xml data item for an xml file"""
+
+ def __init__(self, dirn = None, filen = None, parse = True, logger=None) :
+ self.logger = logger if logger else silfont.core.loggerobj()
+ self._contents = {}
+ self.dirn = dirn
+ self.filen = filen
+ self.inxmlstr = ""
+ self.outxmlstr = ""
+ self.etree = None
+ self.type = None
+ if filen and dirn :
+ fulln = os.path.join( dirn, filen)
+ self.inxmlstr = io.open(fulln, "rt", encoding="utf-8").read()
+ if parse :
+ try:
+ self.etree = ET.fromstring(self.inxmlstr)
+ except:
+ try:
+ self.etree = ET.fromstring(self.inxmlstr.encode("utf-8"))
+ except Exception as e:
+ self.logger.log("Failed to parse xml for " + fulln, "E")
+ self.logger.log(str(e), "S")
+
+ def write_to_file(self,dirn,filen) :
+ outfile = io.open(os.path.join(dirn,filen),'w', encoding="utf-8")
+ outfile.write(self.outxmlstr)
+
+class ETelement(_container):
+ # Class for an etree element. Mainly used as a parent class
+ # For each tag in the element, ETelement[tag] returns a list of sub-elements with that tag
+ # process_subelements can set attributes for each tag based on a supplied spec
+ def __init__(self,element) :
+ self.element = element
+ self._contents = {}
+ self.reindex()
+
+ def reindex(self) :
+ self._contents = collections.defaultdict(list)
+ for e in self.element :
+ self._contents[e.tag].append(e)
+
+ def remove(self,subelement) :
+ self._contents[subelement.tag].remove(subelement)
+ self.element.remove(subelement)
+
+ def append(self,subelement) :
+ self._contents[subelement.tag].append(subelement)
+ self.element.append(subelement)
+
+ def insert(self,index,subelement) :
+ self._contents[subelement.tag].insert(index,subelement)
+ self.element.insert(index,subelement)
+
+ def replace(self,index,subelement) :
+ self._contents[subelement.tag][index] = subelement
+ self.element[index] = subelement
+
+ def process_attributes(self, attrspec, others = False) :
+ # Process attributes based on list of attributes in the format:
+ # (element attr name, object attr name, required)
+ # If attr does not exist and is not required, set to None
+ # If others is True, attributes not in the list are allowed
+ # Attributes should be listed in the order they should be output if writing xml out
+
+ if not hasattr(self,"parseerrors") or self.parseerrors is None: self.parseerrors=[]
+
+ speclist = {}
+ for (i,spec) in enumerate(attrspec) : speclist[spec[0]] = attrspec[i]
+
+ for eaname in speclist :
+ (eaname,oaname,req) = speclist[eaname]
+ setattr(self, oaname, getattrib(self.element,eaname))
+ if req and getattr(self, oaname) is None : self.parseerrors.append("Required attribute " + eaname + " missing")
+
+ # check for any other attributes
+ for att in self.element.attrib :
+ if att not in speclist :
+ if others:
+ setattr(self, att, getattrib(self.element,att))
+ else :
+ self.parseerrors.append("Invalid attribute " + att)
+
+ def process_subelements(self,subspec, offspec = False) :
+ # Process all subelements based on spec of expected elements
+ # subspec is a list of elements, with each list in the format:
+ # (element name, attribute name, class name, required, multiple valeus allowed)
+ # If cl is set, attribute is set to an object made with that class; otherwise just text of the element
+
+ if not hasattr(self,"parseerrors") or self.parseerrors is None : self.parseerrors=[]
+
+ def make_obj(self,cl,element) : # Create object from element and cascade parse errors down
+ if cl is None : return element.text
+ if cl is ETelement :
+ obj = cl(element) # ETelement does not require parent object, ie self
+ else :
+ obj = cl(self,element)
+ if hasattr(obj,"parseerrors") and obj.parseerrors != [] :
+ if hasattr(obj,"name") and obj.name is not None : # Try to find a name for error reporting
+ name = obj.name
+ elif hasattr(obj,"label") and obj.label is not None :
+ name = obj.label
+ else :
+ name = ""
+
+ self.parseerrors.append("Errors parsing " + element.tag + " element: " + name)
+ for error in obj.parseerrors :
+ self.parseerrors.append(" " + error)
+ return obj
+
+ speclist = {}
+ for (i,spec) in enumerate(subspec) : speclist[spec[0]] = subspec[i]
+
+ for ename in speclist :
+ (ename,aname,cl,req,multi) = speclist[ename]
+ initval = [] if multi else None
+ setattr(self,aname,initval)
+
+ for ename in self : # Process all elements
+ if ename in speclist :
+ (ename,aname,cl,req,multi) = speclist[ename]
+ elements = self[ename]
+ if multi :
+ for elem in elements : getattr(self,aname).append(make_obj(self,cl,elem))
+ else :
+ setattr(self,aname,make_obj(self,cl,elements[0]))
+ if len(elements) > 1 : self.parseerrors.append("Multiple " + ename + " elements not allowed")
+ else:
+ if offspec: # Elements not in spec are allowed so create list of sub-elemente.
+ setattr(self,ename,[])
+ for elem in elements : getattr(self,ename).append(ETelement(elem))
+ else :
+ self.parseerrors.append("Invalid element: " + ename)
+
+ for ename in speclist : # Check values exist for required elements etc
+ (ename,aname,cl,req,multi) = speclist[ename]
+
+ val = getattr(self,aname)
+ if req :
+ if multi and val == [] : self.parseerrors.append("No " + ename + " elements ")
+ if not multi and val == None : self.parseerrors.append("No " + ename + " element")
+
+def makeAttribOrder(attriblist) : # Turn a list of attrib names into an attributeOrder dict for ETWriter
+ return dict(map(lambda x:(x[1], x[0]), enumerate(attriblist)))
+
+def getattrib(element,attrib) : return element.attrib[attrib] if attrib in element.attrib else None \ No newline at end of file
diff --git a/lib/silfont/fbtests/__init__.py b/lib/silfont/fbtests/__init__.py
new file mode 100755
index 0000000..e69de29
--- /dev/null
+++ b/lib/silfont/fbtests/__init__.py
diff --git a/lib/silfont/fbtests/silnotcjk.py b/lib/silfont/fbtests/silnotcjk.py
new file mode 100644
index 0000000..9ebdafd
--- /dev/null
+++ b/lib/silfont/fbtests/silnotcjk.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python
+'''These are copies of checks that have the "not is_cjk" condition, but these versions have that condition removed.
+The is_cjk condition was being matched by multiple fonts that are not cjk fonts - but do have some cjk punctuation characters.
+These checks based on based on examples from Font Bakery, copyright 2017 The Font Bakery Authors, licensed under the Apache 2.0 license'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2022 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from fontbakery.checkrunner import Section, PASS, FAIL, WARN, ERROR, INFO, SKIP
+from fontbakery.callable import condition, check, disable
+from fontbakery.message import Message
+from fontbakery.profiles.shared_conditions import typo_metrics_enabled
+import os
+from fontbakery.constants import NameID, PlatformID, WindowsEncodingID
+
+@check(
+ id = 'org.sil/check/family/win_ascent_and_descent',
+ conditions = ['vmetrics'],
+ rationale = """
+ Based on com.google.fonts/check/family/win_ascent_and_descent but with the 'not is_cjk' condition removed
+ """
+)
+def org_sil_check_family_win_ascent_and_descent(ttFont, vmetrics):
+ """Checking OS/2 usWinAscent & usWinDescent."""
+
+ if "OS/2" not in ttFont:
+ yield FAIL,\
+ Message("lacks-OS/2",
+ "Font file lacks OS/2 table")
+ return
+
+ failed = False
+ os2_table = ttFont['OS/2']
+ win_ascent = os2_table.usWinAscent
+ win_descent = os2_table.usWinDescent
+ y_max = vmetrics['ymax']
+ y_min = vmetrics['ymin']
+
+ # OS/2 usWinAscent:
+ if win_ascent < y_max:
+ failed = True
+ yield FAIL,\
+ Message("ascent",
+ f"OS/2.usWinAscent value should be"
+ f" equal or greater than {y_max},"
+ f" but got {win_ascent} instead")
+ if win_ascent > y_max * 2:
+ failed = True
+ yield FAIL,\
+ Message("ascent",
+ f"OS/2.usWinAscent value"
+ f" {win_ascent} is too large."
+ f" It should be less than double the yMax."
+ f" Current yMax value is {y_max}")
+ # OS/2 usWinDescent:
+ if win_descent < abs(y_min):
+ failed = True
+ yield FAIL,\
+ Message("descent",
+ f"OS/2.usWinDescent value should be equal or"
+ f" greater than {abs(y_min)}, but got"
+ f" {win_descent} instead.")
+
+ if win_descent > abs(y_min) * 2:
+ failed = True
+ yield FAIL,\
+ Message("descent",
+ f"OS/2.usWinDescent value"
+ f" {win_descent} is too large."
+ f" It should be less than double the yMin."
+ f" Current absolute yMin value is {abs(y_min)}")
+ if not failed:
+ yield PASS, "OS/2 usWinAscent & usWinDescent values look good!"
+
+
+@check(
+ id = 'org.sil/check/os2_metrics_match_hhea',
+ rationale="""
+ Based on com.google.fonts/check/os2_metrics_match_hhea but with the 'not is_cjk' condition removed
+ """
+)
+def org_sil_check_os2_metrics_match_hhea(ttFont):
+ """Checking OS/2 Metrics match hhea Metrics."""
+
+ filename = os.path.basename(ttFont.reader.file.name)
+
+ # Check both OS/2 and hhea are present.
+ missing_tables = False
+
+ required = ["OS/2", "hhea"]
+ for key in required:
+ if key not in ttFont:
+ missing_tables = True
+ yield FAIL,\
+ Message(f'lacks-{key}',
+ f"{filename} lacks a '{key}' table.")
+
+ if missing_tables:
+ return
+
+ # OS/2 sTypoAscender and sTypoDescender match hhea ascent and descent
+ if ttFont["OS/2"].sTypoAscender != ttFont["hhea"].ascent:
+ yield FAIL,\
+ Message("ascender",
+ f"OS/2 sTypoAscender ({ttFont['OS/2'].sTypoAscender})"
+ f" and hhea ascent ({ttFont['hhea'].ascent})"
+ f" must be equal.")
+ elif ttFont["OS/2"].sTypoDescender != ttFont["hhea"].descent:
+ yield FAIL,\
+ Message("descender",
+ f"OS/2 sTypoDescender ({ttFont['OS/2'].sTypoDescender})"
+ f" and hhea descent ({ttFont['hhea'].descent})"
+ f" must be equal.")
+ elif ttFont["OS/2"].sTypoLineGap != ttFont["hhea"].lineGap:
+ yield FAIL,\
+ Message("lineGap",
+ f"OS/2 sTypoLineGap ({ttFont['OS/2'].sTypoLineGap})"
+ f" and hhea lineGap ({ttFont['hhea'].lineGap})"
+ f" must be equal.")
+ else:
+ yield PASS, ("OS/2.sTypoAscender/Descender values"
+ " match hhea.ascent/descent.")
+
+@check(
+ id = "org.sil/check/os2/use_typo_metrics",
+ rationale="""
+ Based on com.google.fonts/check/os2/use_typo_metrics but with the 'not is_cjk' condition removed
+ """
+ )
+def corg_sil_check_os2_fsselectionbit7(ttFonts):
+ """OS/2.fsSelection bit 7 (USE_TYPO_METRICS) is set in all fonts."""
+
+ bad_fonts = []
+ for ttFont in ttFonts:
+ if not ttFont["OS/2"].fsSelection & (1 << 7):
+ bad_fonts.append(ttFont.reader.file.name)
+
+ if bad_fonts:
+ yield FAIL,\
+ Message('missing-os2-fsselection-bit7',
+ f"OS/2.fsSelection bit 7 (USE_TYPO_METRICS) was"
+ f"NOT set in the following fonts: {bad_fonts}.")
+ else:
+ yield PASS, "OK"
+
+
+@check(
+ id = 'org.sil/check/vertical_metrics',
+# conditions = ['not remote_styles'],
+ rationale="""
+ Based on com.google.fonts/check/vertical_metrics but with the 'not is_cjk' condition removed
+ """
+)
+def org_sil_check_vertical_metrics(ttFont):
+ """Check font follows the Google Fonts vertical metric schema"""
+ filename = os.path.basename(ttFont.reader.file.name)
+
+ # Check necessary tables are present.
+ missing_tables = False
+ required = ["OS/2", "hhea", "head"]
+ for key in required:
+ if key not in ttFont:
+ missing_tables = True
+ yield FAIL,\
+ Message(f'lacks-{key}',
+ f"{filename} lacks a '{key}' table.")
+
+ if missing_tables:
+ return
+
+ font_upm = ttFont['head'].unitsPerEm
+ font_metrics = {
+ 'OS/2.sTypoAscender': ttFont['OS/2'].sTypoAscender,
+ 'OS/2.sTypoDescender': ttFont['OS/2'].sTypoDescender,
+ 'OS/2.sTypoLineGap': ttFont['OS/2'].sTypoLineGap,
+ 'hhea.ascent': ttFont['hhea'].ascent,
+ 'hhea.descent': ttFont['hhea'].descent,
+ 'hhea.lineGap': ttFont['hhea'].lineGap,
+ 'OS/2.usWinAscent': ttFont['OS/2'].usWinAscent,
+ 'OS/2.usWinDescent': ttFont['OS/2'].usWinDescent
+ }
+ expected_metrics = {
+ 'OS/2.sTypoLineGap': 0,
+ 'hhea.lineGap': 0,
+ }
+
+ failed = False
+ warn = False
+
+ # Check typo metrics and hhea lineGap match our expected values
+ for k in expected_metrics:
+ if font_metrics[k] != expected_metrics[k]:
+ failed = True
+ yield FAIL,\
+ Message(f'bad-{k}',
+ f'{k} is "{font_metrics[k]}" it should be {expected_metrics[k]}')
+
+ hhea_sum = (font_metrics['hhea.ascent'] +
+ abs(font_metrics['hhea.descent']) +
+ font_metrics['hhea.lineGap']) / font_upm
+
+ # Check the sum of the hhea metrics is not below 1.2
+ # (120% of upm or 1200 units for 1000 upm font)
+ if hhea_sum < 1.2:
+ failed = True
+ yield FAIL,\
+ Message('bad-hhea-range',
+ 'The sum of hhea.ascender+abs(hhea.descender)+hhea.lineGap '
+ f'is {int(hhea_sum*font_upm)} when it should be at least {int(font_upm*1.2)}')
+
+ # Check the sum of the hhea metrics is below 2.0
+ elif hhea_sum > 2.0:
+ failed = True
+ yield FAIL,\
+ Message('bad-hhea-range',
+ 'The sum of hhea.ascender+abs(hhea.descender)+hhea.lineGap '
+ f'is {int(hhea_sum*font_upm)} when it should be at most {int(font_upm*2.0)}')
+
+ # Check the sum of the hhea metrics is between 1.1-1.5x of the font's upm
+ elif hhea_sum > 1.5:
+ warn = True
+ yield WARN,\
+ Message('bad-hhea-range',
+ "We recommend the absolute sum of the hhea metrics should be"
+ f" between 1.2-1.5x of the font's upm. This font has {hhea_sum}x ({int(hhea_sum*font_upm)})")
+
+ if not failed and not warn:
+ yield PASS, 'Vertical metrics are good'
+
diff --git a/lib/silfont/fbtests/silttfchecks.py b/lib/silfont/fbtests/silttfchecks.py
new file mode 100644
index 0000000..d38ab77
--- /dev/null
+++ b/lib/silfont/fbtests/silttfchecks.py
@@ -0,0 +1,250 @@
+#!/usr/bin/env python
+'''Checks to be imported by ttfchecks.py
+Some checks based on examples from Font Bakery, copyright 2017 The Font Bakery Authors, licensed under the Apache 2.0 license'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2022 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from fontbakery.checkrunner import Section, PASS, FAIL, WARN, ERROR, INFO, SKIP
+from fontbakery.callable import condition, check, disable
+from fontbakery.message import Message
+from fontbakery.constants import NameID, PlatformID, WindowsEncodingID
+
+@check(
+ id = 'org.sil/check/name/version_format',
+ rationale = """
+ Based on com.google.fonts/check/name/version_format but:
+ - Checks for two valid formats:
+ - Production: exactly 3 digits after decimal point
+
+
+ - Allows major version to be 0
+ - Allows extra info after numbers, eg for beta or dev versions
+ """
+)
+def org_sil_version_format(ttFont):
+ "Version format is correct in 'name' table?"
+
+ from fontbakery.utils import get_name_entry_strings
+ import re
+
+ failed = False
+ version_entries = get_name_entry_strings(ttFont, NameID.VERSION_STRING)
+ if len(version_entries) == 0:
+ failed = True
+ yield FAIL,\
+ Message("no-version-string",
+ f"Font lacks a NameID.VERSION_STRING"
+ f" (nameID={NameID.VERSION_STRING}) entry")
+
+ for ventry in version_entries:
+ if not re.match(r'Version [0-9]+\.\d{3}( .+)*$', ventry):
+ failed = True
+ yield FAIL,\
+ Message("bad-version-strings",
+ f'The NameID.VERSION_STRING'
+ f' (nameID={NameID.VERSION_STRING}) value must'
+ f' follow the pattern "Version X.nnn devstring" with X.nnn'
+ f' greater than or equal to 0.000.'
+ f' Current version string is: "{ventry}"')
+ if not failed:
+ yield PASS, "Version format in NAME table entries is correct."
+
+@check(
+ id = 'org.sil/check/whitespace_widths'
+)
+def org_sil_whitespace_widths(ttFont):
+ """Checks with widths of space characters in the font against best practice"""
+ from fontbakery.utils import get_glyph_name
+
+ allok = True
+ space_data = {
+ 0x0020: ['Space'],
+ 0x00A0: ['No-break space'],
+ 0x2008: ['Punctuation space'],
+ 0x2003: ['Em space'],
+ 0x2002: ['En space'],
+ 0x2000: ['En quad'],
+ 0x2001: ['Em quad'],
+ 0x2004: ['Three-per-em space'],
+ 0x2005: ['Four-per-em space'],
+ 0x2006: ['Six-per-em space'],
+ 0x2009: ['Thin space'],
+ 0x200A: ['Hair space'],
+ 0x202F: ['Narrow no-break space'],
+ 0x002E: ['Full stop'], # Non-space character where the width is needed for comparison
+ }
+ for sp in space_data:
+ spname = get_glyph_name(ttFont, sp)
+ if spname is None:
+ spwidth = None
+ else:
+ spwidth = ttFont['hmtx'][spname][0]
+ space_data[sp].append(spname)
+ space_data[sp].append(spwidth)
+
+ # Other width info needed from the font
+ upm = ttFont['head'].unitsPerEm
+ fullstopw = space_data[46][2]
+
+ # Widths used for comparisons
+ spw = space_data[32][2]
+ if spw is None:
+ allok = False
+ yield WARN, "No space in the font so No-break space (if present) can't be checked"
+ emw = space_data[0x2003][2]
+ if emw is None:
+ allok = False
+ yield WARN, f'No em space in the font. Will be assumed to be units per em ({upm}) for other checking'
+ emw = upm
+ enw = space_data[0x2002][2]
+ if enw is None:
+ allok = False
+ yield WARN, f'No en space in the font. Will be assumed to be 1/2 em space width ({emw/2}) for checking en quad (if present)'
+ enw = emw/2
+
+ # Now check all the specific space widths. Only check if the space exists in the font
+ def checkspace(spacechar, minwidth, maxwidth=None):
+ sdata = space_data[spacechar]
+ if sdata[1]: # Name is set to None if not in font
+ # Allow for width(s) not being integer (eg em/6) so test against rounding up or down
+ minw = int(minwidth)
+ if maxwidth:
+ maxw = int(maxwidth)
+ if maxwidth > maxw: maxw += 1 # Had been rounded down, so round up
+ else:
+ maxw = minw if minw == minwidth else minw +1 # Had been rounded down, so allow rounded up as well
+ charw = sdata[2]
+ if not(minw <= charw <= maxw):
+ return (f'Width of {sdata[0]} ({spacechar:#04x}) is {str(charw)}: ', minw, maxw)
+ return (None,0,0)
+
+ # No-break space
+ (message, minw, maxw) = checkspace(0x00A0, spw)
+ if message: allok = False; yield FAIL, message + f"Should match width of space ({spw})"
+ # Punctuation space
+ (message, minw, maxw) = checkspace(0x2008, fullstopw)
+ if message: allok = False; yield FAIL, message + f"Should match width of full stop ({fullstopw})"
+ # Em space
+ (message, minw, maxw) = checkspace(0x2003, upm)
+ if message: allok = False; yield WARN, message + f"Should match units per em ({upm})"
+ # En space
+ (message, minw, maxw) = checkspace(0x2002, emw/2)
+ if message:
+ allok = False
+ widths = f'{minw}' if minw == maxw else f'{minw} or {maxw}'
+ yield WARN, message + f"Should be half the width of em ({widths})"
+ # En quad
+ (message, minw, maxw) = checkspace(0x2000, enw)
+ if message: allok = False; yield WARN, message + f"Should be the same width as en ({enw})"
+ # Em quad
+ (message, minw, maxw) = checkspace(0x2001, emw)
+ if message: allok = False; yield WARN, message + f"Should be the same width as em ({emw})"
+ # Three-per-em space
+ (message, minw, maxw) = checkspace(0x2004, emw/3)
+ if message:
+ allok = False
+ widths = f'{minw}' if minw == maxw else f'{minw} or {maxw}'
+ yield WARN, message + f"Should be 1/3 the width of em ({widths})"
+ # Four-per-em space
+ (message, minw, maxw) = checkspace(0x2005, emw/4)
+ if message:
+ allok = False
+ widths = f'{minw}' if minw == maxw else f'{minw} or {maxw}'
+ yield WARN, message + f"Should be 1/4 the width of em ({widths})",
+ # Six-per-em space
+ (message, minw, maxw) = checkspace(0x2006, emw/6)
+ if message:
+ allok = False
+ widths = f'{minw}' if minw == maxw else f'{minw} or {maxw}'
+ yield WARN, message + f"Should be 1/6 the width of em ({widths})",
+ # Thin space
+ (message, minw, maxw) = checkspace(0x2009, emw/6, emw/5)
+ if message:
+ allok = False
+ yield WARN, message + f"Should be between 1/6 and 1/5 the width of em ({minw} and {maxw})"
+ # Hair space
+ (message, minw, maxw) = checkspace(0x200A,
+ emw/16, emw/10)
+ if message:
+ allok = False
+ yield WARN, message + f"Should be between 1/16 and 1/10 the width of em ({minw} and {maxw})"
+ # Narrow no-break space
+ (message, minw, maxw) = checkspace(0x202F,
+ emw/6, emw/5)
+ if message:
+ allok = False
+ yield WARN, message + f"Should be between 1/6 and 1/5 the width of em ({minw} and {maxw})"
+
+ if allok:
+ yield PASS, "Space widths all match expected values"
+
+@check(
+ id = 'org.sil/check/number_widths'
+)
+def org_sil_number_widths(ttFont, config):
+ """Check widths of latin digits 0-9 are equal and match that of figure space"""
+ from fontbakery.utils import get_glyph_name
+
+ num_data = {
+ 0x0030: ['zero'],
+ 0x0031: ['one'],
+ 0x0032: ['two'],
+ 0x0033: ['three'],
+ 0x0034: ['four'],
+ 0x0035: ['five'],
+ 0x0036: ['six'],
+ 0x0037: ['seven'],
+ 0x0038: ['eight'],
+ 0x0039: ['nine'],
+ 0x2007: ['figurespace'] # Figure space should be the same as numerals
+ }
+
+ fontnames = []
+ for x in (ttFont['name'].names[1].string, ttFont['name'].names[2].string):
+ txt=""
+ for i in range(1,len(x),2): txt += x.decode()[i]
+ fontnames.append(txt)
+
+ for num in num_data:
+ name = get_glyph_name(ttFont, num)
+ if name is None:
+ width = -1 # So different from Zero!
+ else:
+ width = ttFont['hmtx'][name][0]
+ num_data[num].append(name)
+ num_data[num].append(width)
+
+ zerowidth = num_data[48][2]
+ if zerowidth ==-1:
+ yield FAIL, "No zero in font - remainder of check not run"
+ return
+
+ # Check non-zero digits are present and have same width as zero
+ digitsdiff = ""
+ digitsmissing = ""
+ for i in range(49,58):
+ ndata = num_data[i]
+ width = ndata[2]
+ if width != zerowidth:
+ if width == -1:
+ digitsmissing += ndata[1] + " "
+ else:
+ digitsdiff += ndata[1] + " "
+
+ # Check figure space
+ figuremess = ""
+ ndata = num_data[0x2007]
+ width = ndata[2]
+ if width != zerowidth:
+ if width == -1:
+ figuremess = "No figure space in font"
+ else:
+ figuremess = f'The width of figure space ({ndata[1]}) does not match the width of zero'
+ if digitsmissing or digitsdiff or figuremess:
+ if digitsmissing: yield FAIL, f"Digits missing: {digitsmissing}"
+ if digitsdiff: yield WARN, f"Digits with different width from Zero: {digitsdiff}"
+ if figuremess: yield WARN, figuremess
+ else:
+ yield PASS, "All number widths are OK"
diff --git a/lib/silfont/fbtests/ttfchecks.py b/lib/silfont/fbtests/ttfchecks.py
new file mode 100644
index 0000000..e05ecb1
--- /dev/null
+++ b/lib/silfont/fbtests/ttfchecks.py
@@ -0,0 +1,305 @@
+#!/usr/bin/env python
+'Support for use of Fontbakery ttf checks'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2020 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from fontbakery.checkrunner import Section, PASS, FAIL, WARN, ERROR, INFO, SKIP
+from fontbakery.callable import condition, check, disable
+from fontbakery.message import Message
+from fontbakery.fonts_profile import profile_factory
+from fontbakery.constants import NameID, PlatformID, WindowsEncodingID
+from fontbakery.profiles.googlefonts import METADATA_CHECKS, REPO_CHECKS, DESCRIPTION_CHECKS
+from fontbakery.profiles.ufo_sources import UFO_PROFILE_CHECKS
+from fontbakery.profiles.universal import DESIGNSPACE_CHECKS
+from silfont.fbtests.silttfchecks import *
+from silfont.fbtests.silnotcjk import *
+
+from collections import OrderedDict
+
+# Set imports of standard ttf tests
+
+profile_imports = ("fontbakery.profiles.universal",
+ "fontbakery.profiles.googlefonts",
+ "fontbakery.profiles.adobefonts",
+ "fontbakery.profiles.notofonts",
+ "fontbakery.profiles.fontval")
+
+def make_base_profile():
+ profile = profile_factory(default_section=Section("SIL Fonts"))
+ profile.auto_register(globals())
+
+ # Exclude groups of checks that check files other than ttfs
+ for checkid in DESCRIPTION_CHECKS + DESIGNSPACE_CHECKS + METADATA_CHECKS + REPO_CHECKS + UFO_PROFILE_CHECKS:
+ if checkid in profile._check_registry: profile.remove_check(checkid)
+
+ return profile
+
+def make_profile(check_list, variable_font=False):
+ profile = make_base_profile()
+
+ # Exclude all the checks we don't want to run
+ for checkid in check_list:
+ if checkid in profile._check_registry:
+ check_item = check_list[checkid]
+ exclude = check_item["exclude"] if "exclude" in check_item else False
+ if exclude: profile.remove_check(checkid)
+
+ # Exclude further sets of checks to reduce number of skips and so have less clutter in html results
+ for checkid in sorted(set(profile._check_registry.keys())):
+ section = profile._check_registry[checkid]
+ check = section.get_check(checkid)
+ conditions = getattr(check, "conditions")
+ exclude = False
+ if variable_font and "not is_variable_font" in conditions: exclude = True
+ if not variable_font and "is_variable_font" in conditions: exclude = True
+ if "noto" in checkid.lower(): exclude = True # These will be specific to Noto fonts
+ if ":adobefonts" in checkid.lower(): exclude = True # Copy of standard test with overridden results so no new info
+
+ if exclude: profile.remove_check(checkid)
+ # Remove further checks that are only relevant for variable fonts but don't use the is_variable_font condition
+ if not variable_font:
+ for checkid in (
+ "com.adobe.fonts/check/stat_has_axis_value_tables",
+ "com.google.fonts/check/STAT_strings",
+ "com.google.fonts/check/STAT/axis_order"):
+ if checkid in profile._check_registry.keys(): profile.remove_check(checkid)
+ return profile
+
+def all_checks_dict(): # An ordered dict of all checks designed for exporting the data
+ profile = make_base_profile()
+ check_dict=OrderedDict()
+
+ for checkid in sorted(set(profile._check_registry.keys()), key=str.casefold):
+ if "noto" in checkid.lower(): continue # We wxclude these in make_profile()
+ if ":adobefonts" in checkid.lower(): continue # We wxclude these in make_profile()
+
+ section = profile._check_registry[checkid]
+ check = section.get_check(checkid)
+
+ conditions = getattr(check, "conditions")
+ conditionstxt=""
+ for condition in conditions:
+ conditionstxt += condition + "\n"
+ conditionstxt = conditionstxt.strip()
+
+ rationale = getattr(check,"rationale")
+ rationale = "" if rationale is None else rationale.strip().replace("\n ", "\n") # Remove extraneous whitespace
+
+ psfaction = psfcheck_list[checkid] if checkid in psfcheck_list else "Not in psfcheck_list"
+
+ item = {"psfaction": psfaction,
+ "section": section.name,
+ "description": getattr(check, "description"),
+ "rationale": rationale,
+ "conditions": conditionstxt
+ }
+ check_dict[checkid] = item
+
+ for checkid in psfcheck_list: # Look for checks no longer in Font Bakery
+ if checkid not in check_dict:
+ check_dict[checkid] = {"psfaction": psfcheck_list[checkid],
+ "section": "Missing",
+ "description": "Check not found",
+ "rationale": "",
+ "conditions": ""
+ }
+
+ return check_dict
+
+psfcheck_list = {}
+psfcheck_list['com.adobe.fonts/check/cff_call_depth'] = {'exclude': True}
+psfcheck_list['com.adobe.fonts/check/cff_deprecated_operators'] = {'exclude': True}
+psfcheck_list['com.adobe.fonts/check/cff2_call_depth'] = {'exclude': True}
+psfcheck_list['com.adobe.fonts/check/family/bold_italic_unique_for_nameid1'] = {}
+psfcheck_list['com.adobe.fonts/check/family/consistent_upm'] = {}
+psfcheck_list['com.adobe.fonts/check/family/max_4_fonts_per_family_name'] = {}
+psfcheck_list['com.adobe.fonts/check/find_empty_letters'] = {}
+psfcheck_list['com.adobe.fonts/check/freetype_rasterizer'] = {'exclude': True}
+psfcheck_list['com.adobe.fonts/check/fsselection_matches_macstyle'] = {}
+psfcheck_list['com.adobe.fonts/check/name/empty_records'] = {}
+psfcheck_list['com.adobe.fonts/check/name/postscript_name_consistency'] = {}
+psfcheck_list['com.adobe.fonts/check/nameid_1_win_english'] = {}
+psfcheck_list['com.adobe.fonts/check/name/postscript_vs_cff'] = {'exclude': True}
+psfcheck_list['com.adobe.fonts/check/sfnt_version'] = {}
+psfcheck_list['com.adobe.fonts/check/stat_has_axis_value_tables'] = {}
+psfcheck_list['com.adobe.fonts/check/varfont/distinct_instance_records'] = {}
+psfcheck_list['com.adobe.fonts/check/varfont/same_size_instance_records'] = {}
+psfcheck_list['com.adobe.fonts/check/varfont/valid_axis_nameid'] = {}
+psfcheck_list['com.adobe.fonts/check/varfont/valid_default_instance_nameids'] = {}
+psfcheck_list['com.adobe.fonts/check/varfont/valid_postscript_nameid'] = {}
+psfcheck_list['com.adobe.fonts/check/varfont/valid_subfamily_nameid'] = {}
+psfcheck_list['com.fontwerk/check/inconsistencies_between_fvar_stat'] = {}
+psfcheck_list['com.fontwerk/check/weight_class_fvar'] = {}
+psfcheck_list['com.google.fonts/check/aat'] = {}
+psfcheck_list['com.google.fonts/check/all_glyphs_have_codepoints'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/canonical_filename'] = {}
+psfcheck_list['com.google.fonts/check/cjk_chws_feature'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/cjk_not_enough_glyphs'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/cjk_vertical_metrics'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/cjk_vertical_metrics_regressions'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/cmap/alien_codepoints'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/cmap/format_12'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/cmap/unexpected_subtables'] = {}
+psfcheck_list['com.google.fonts/check/code_pages'] = {}
+psfcheck_list['com.google.fonts/check/contour_count'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/dotted_circle'] = {}
+psfcheck_list['com.google.fonts/check/dsig'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/epar'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/family/control_chars'] = {}
+psfcheck_list['com.google.fonts/check/family/equal_font_versions'] = {}
+psfcheck_list['com.google.fonts/check/family/equal_unicode_encodings'] = {}
+psfcheck_list['com.google.fonts/check/gpos7'] = {}
+psfcheck_list['com.google.fonts/check/family/has_license'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/family/italics_have_roman_counterparts'] = {}
+psfcheck_list['com.google.fonts/check/family/panose_familytype'] = {}
+psfcheck_list['com.google.fonts/check/family/panose_proportion'] = {}
+psfcheck_list['com.google.fonts/check/family/single_directory'] = {}
+psfcheck_list['com.google.fonts/check/family/tnum_horizontal_metrics'] = {}
+psfcheck_list['com.google.fonts/check/family/underline_thickness'] = {}
+psfcheck_list['com.google.fonts/check/family/vertical_metrics'] = {}
+psfcheck_list['com.google.fonts/check/family/win_ascent_and_descent'] = {'exclude': True}
+# {'change_status': {'FAIL': 'WARN', 'reason': 'Under review'}}
+psfcheck_list['com.google.fonts/check/family_naming_recommendations'] = {}
+psfcheck_list['com.google.fonts/check/file_size'] = {}
+psfcheck_list['com.google.fonts/check/font_copyright'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/font_version'] = {}
+psfcheck_list['com.google.fonts/check/fontbakery_version'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/fontdata_namecheck'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/fontv'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/fontvalidator'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/fsselection'] = {}
+psfcheck_list['com.google.fonts/check/fstype'] = {}
+psfcheck_list['com.google.fonts/check/fvar_name_entries'] = {}
+psfcheck_list['com.google.fonts/check/gasp'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/gdef_mark_chars'] = {}
+psfcheck_list['com.google.fonts/check/gdef_non_mark_chars'] = {}
+psfcheck_list['com.google.fonts/check/gdef_spacing_marks'] = {}
+psfcheck_list['com.google.fonts/check/gf-axisregistry/fvar_axis_defaults'] = {}
+psfcheck_list['com.google.fonts/check/glyf_nested_components'] = {}
+psfcheck_list['com.google.fonts/check/glyf_non_transformed_duplicate_components'] = {}
+psfcheck_list['com.google.fonts/check/glyf_unused_data'] = {}
+psfcheck_list['com.google.fonts/check/glyph_coverage'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/gpos_kerning_info'] = {}
+psfcheck_list['com.google.fonts/check/has_ttfautohint_params'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/hinting_impact'] = {}
+psfcheck_list['com.google.fonts/check/hmtx/comma_period'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/hmtx/encoded_latin_digits'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/hmtx/whitespace_advances'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/integer_ppem_if_hinted'] = {}
+psfcheck_list['com.google.fonts/check/italic_angle'] = {}
+psfcheck_list['com.google.fonts/check/kern_table'] = {}
+psfcheck_list['com.google.fonts/check/kerning_for_non_ligated_sequences'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/layout_valid_feature_tags'] = {}
+psfcheck_list['com.google.fonts/check/layout_valid_language_tags'] = \
+ {'change_status': {'FAIL': 'WARN', 'reason': 'The "invalid" ones are used by Harfbuzz'}}
+psfcheck_list['com.google.fonts/check/layout_valid_script_tags'] = {}
+psfcheck_list['com.google.fonts/check/ligature_carets'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/linegaps'] = {}
+psfcheck_list['com.google.fonts/check/loca/maxp_num_glyphs'] = {}
+psfcheck_list['com.google.fonts/check/mac_style'] = {}
+psfcheck_list['com.google.fonts/check/mandatory_avar_table'] = {}
+psfcheck_list['com.google.fonts/check/mandatory_glyphs'] = {}
+psfcheck_list['com.google.fonts/check/maxadvancewidth'] = {}
+psfcheck_list['com.google.fonts/check/meta/script_lang_tags'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/missing_small_caps_glyphs'] = {}
+psfcheck_list['com.google.fonts/check/monospace'] = {}
+psfcheck_list['com.google.fonts/check/name/ascii_only_entries'] = {}
+psfcheck_list['com.google.fonts/check/name/copyright_length'] = {}
+psfcheck_list['com.google.fonts/check/name/description_max_length'] = {}
+psfcheck_list['com.google.fonts/check/name/family_and_style_max_length'] = {}
+psfcheck_list['com.google.fonts/check/name/familyname'] = {}
+psfcheck_list['com.google.fonts/check/name/familyname_first_char'] = {}
+psfcheck_list['com.google.fonts/check/name/fullfontname'] = {}
+psfcheck_list['com.google.fonts/check/name/license'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/name/license_url'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/name/line_breaks'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/name/mandatory_entries'] = {}
+psfcheck_list['com.google.fonts/check/name/match_familyname_fullfont'] = {}
+psfcheck_list['com.google.fonts/check/name/no_copyright_on_description'] = {}
+psfcheck_list['com.google.fonts/check/name/postscriptname'] = {}
+psfcheck_list['com.google.fonts/check/name/rfn'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/name/subfamilyname'] = {}
+psfcheck_list['com.google.fonts/check/name/trailing_spaces'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/name/typographicfamilyname'] = {}
+psfcheck_list['com.google.fonts/check/name/typographicsubfamilyname'] = {}
+psfcheck_list['com.google.fonts/check/name/unwanted_chars'] = {}
+psfcheck_list['com.google.fonts/check/name/version_format'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/no_debugging_tables'] = {}
+psfcheck_list['com.google.fonts/check/old_ttfautohint'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/os2/use_typo_metrics'] = {'exclude': True}
+#psfcheck_list['com.google.fonts/check/os2/use_typo_metrics'] = \ (Left a copy commented out as an
+# {'change_status': {'FAIL': 'WARN', 'reason': 'Under review'}} example of an override!)
+psfcheck_list['com.google.fonts/check/os2_metrics_match_hhea'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/ots'] = {}
+psfcheck_list['com.google.fonts/check/outline_alignment_miss'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/outline_colinear_vectors'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/outline_jaggy_segments'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/outline_semi_vertical'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/outline_short_segments'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/points_out_of_bounds'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/post_table_version'] = {}
+psfcheck_list['com.google.fonts/check/production_glyphs_similarity'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/render_own_name'] = {}
+psfcheck_list['com.google.fonts/check/required_tables'] = {}
+psfcheck_list['com.google.fonts/check/rupee'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/shaping/collides'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/shaping/forbidden'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/shaping/regression'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/smart_dropout'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/STAT/axis_order'] = {}
+psfcheck_list['com.google.fonts/check/STAT/gf-axisregistry'] = {}
+psfcheck_list['com.google.fonts/check/STAT_strings'] = {}
+psfcheck_list['com.google.fonts/check/stylisticset_description'] = {}
+psfcheck_list['com.google.fonts/check/superfamily/list'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/superfamily/vertical_metrics'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/transformed_components'] = {}
+psfcheck_list['com.google.fonts/check/ttx-roundtrip'] = {}
+psfcheck_list['com.google.fonts/check/unicode_range_bits'] = {}
+psfcheck_list['com.google.fonts/check/unique_glyphnames'] = {}
+psfcheck_list['com.google.fonts/check/unitsperem'] = {}
+psfcheck_list['com.google.fonts/check/unitsperem_strict'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/unreachable_glyphs'] = {}
+psfcheck_list['com.google.fonts/check/unwanted_tables'] = {}
+psfcheck_list['com.google.fonts/check/usweightclass'] = {}
+psfcheck_list['com.google.fonts/check/valid_glyphnames'] = {}
+psfcheck_list['com.google.fonts/check/varfont_duplicate_instance_names'] = {}
+psfcheck_list['com.google.fonts/check/varfont_has_instances'] = {}
+psfcheck_list['com.google.fonts/check/varfont_instance_coordinates'] = {}
+psfcheck_list['com.google.fonts/check/varfont_instance_names'] = {}
+psfcheck_list['com.google.fonts/check/varfont_weight_instances'] = {}
+psfcheck_list['com.google.fonts/check/varfont/bold_wght_coord'] = {}
+psfcheck_list['com.google.fonts/check/varfont/consistent_axes'] = {}
+psfcheck_list['com.google.fonts/check/varfont/generate_static'] = {}
+psfcheck_list['com.google.fonts/check/varfont/grade_reflow'] = {}
+psfcheck_list['com.google.fonts/check/varfont/has_HVAR'] = {}
+psfcheck_list['com.google.fonts/check/varfont/regular_ital_coord'] = {}
+psfcheck_list['com.google.fonts/check/varfont/regular_opsz_coord'] = {}
+psfcheck_list['com.google.fonts/check/varfont/regular_slnt_coord'] = {}
+psfcheck_list['com.google.fonts/check/varfont/regular_wdth_coord'] = {}
+psfcheck_list['com.google.fonts/check/varfont/regular_wght_coord'] = {}
+psfcheck_list['com.google.fonts/check/varfont/slnt_range'] = {}
+psfcheck_list['com.google.fonts/check/varfont/stat_axis_record_for_each_axis'] = {}
+psfcheck_list['com.google.fonts/check/varfont/unsupported_axes'] = {}
+psfcheck_list['com.google.fonts/check/varfont/wdth_valid_range'] = {}
+psfcheck_list['com.google.fonts/check/varfont/wght_valid_range'] = {}
+psfcheck_list['com.google.fonts/check/vendor_id'] = {}
+psfcheck_list['com.google.fonts/check/version_bump'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/vertical_metrics'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/vertical_metrics_regressions'] = {'exclude': True}
+psfcheck_list['com.google.fonts/check/vttclean'] = {}
+psfcheck_list['com.google.fonts/check/whitespace_glyphnames'] = {}
+psfcheck_list['com.google.fonts/check/whitespace_glyphs'] = {}
+psfcheck_list['com.google.fonts/check/whitespace_ink'] = {}
+psfcheck_list['com.google.fonts/check/whitespace_widths'] = {}
+psfcheck_list['com.google.fonts/check/xavgcharwidth'] = {}
+psfcheck_list['org.sil/check/family/win_ascent_and_descent'] = {}
+psfcheck_list['org.sil/check/os2/use_typo_metrics'] = {}
+psfcheck_list['org.sil/check/os2_metrics_match_hhea'] = {}
+psfcheck_list['org.sil/check/vertical_metrics'] = {}
+psfcheck_list['org.sil/check/number_widths'] = {}
+psfcheck_list['org.sil/check/name/version_format'] = {}
+psfcheck_list['org.sil/check/whitespace_widths'] = {}
+
+profile = make_profile(check_list=psfcheck_list)
diff --git a/lib/silfont/feax_ast.py b/lib/silfont/feax_ast.py
new file mode 100644
index 0000000..67d45b1
--- /dev/null
+++ b/lib/silfont/feax_ast.py
@@ -0,0 +1,445 @@
+import ast as pyast
+from fontTools.feaLib import ast
+from fontTools.feaLib.ast import asFea
+from fontTools.feaLib.error import FeatureLibError
+import re, math
+
+def asFea(g):
+ if hasattr(g, 'asClassFea'):
+ return g.asClassFea()
+ elif hasattr(g, 'asFea'):
+ return g.asFea()
+ elif isinstance(g, tuple) and len(g) == 2:
+ return asFea(g[0]) + "-" + asFea(g[1]) # a range
+ elif g.lower() in ast.fea_keywords:
+ return "\\" + g
+ else:
+ return g
+
+ast.asFea = asFea
+SHIFT = ast.SHIFT
+
+def asLiteralFea(self, indent=""):
+ Element.mode = 'literal'
+ return self.asFea(indent=indent)
+ Element.mode = 'flat'
+
+ast.Element.asLiteralFea = asLiteralFea
+ast.Element.mode = 'flat'
+
+class ast_Comment(ast.Comment):
+ def __init__(self, text, location=None):
+ super(ast_Comment, self).__init__(text, location=location)
+ self.pretext = ""
+ self.posttext = ""
+
+ def asFea(self, indent=""):
+ return self.pretext + self.text + self.posttext
+
+class ast_MarkClass(ast.MarkClass):
+ # This is better fixed upstream in parser.parse_glyphclass_ to handle MarkClasses
+ def asClassFea(self, indent=""):
+ return "[" + " ".join(map(asFea, self.glyphs)) + "]"
+
+class ast_BaseClass(ast_MarkClass) :
+ def asFea(self, indent="") :
+ return "@" + self.name + " = [" + " ".join(map(asFea, self.glyphs.keys())) + "];"
+
+class ast_BaseClassDefinition(ast.MarkClassDefinition):
+ def asFea(self, indent="") :
+ # like base class asFea
+ return ("# " if self.mode != 'literal' else "") + \
+ "{}baseClass {} {} @{};".format(indent, self.glyphs.asFea(),
+ self.anchor.asFea(), self.markClass.name)
+
+class ast_MarkBasePosStatement(ast.MarkBasePosStatement):
+ def asFea(self, indent=""):
+ # handles members added by parse_position_base_ with feax syntax
+ if isinstance(self.base, ast.MarkClassName): # flattens pos @BASECLASS mark @MARKCLASS
+ res = ""
+ if self.mode == 'literal':
+ res += "pos base @{} ".format(self.base.markClass.name)
+ res += " ".join("mark @{}".format(m.name) for m in self.marks)
+ res += ";"
+ else:
+ for bcd in self.base.markClass.definitions:
+ if res != "":
+ res += "\n{}".format(indent)
+ res += "pos base {} {}".format(bcd.glyphs.asFea(), bcd.anchor.asFea())
+ res += "".join(" mark @{}".format(m.name) for m in self.marks)
+ res += ";"
+ else: # like base class method
+ res = "pos base {}".format(self.base.asFea())
+ res += "".join(" {} mark @{}".format(a.asFea(), m.name) for a, m in self.marks)
+ res += ";"
+ return res
+
+ def build(self, builder) :
+ #TODO: do the right thing here (write to ttf?)
+ pass
+
+class ast_MarkMarkPosStatement(ast.MarkMarkPosStatement):
+ # super class __init__() for reference
+ # def __init__(self, location, baseMarks, marks):
+ # Statement.__init__(self, location)
+ # self.baseMarks, self.marks = baseMarks, marks
+
+ def asFea(self, indent=""):
+ # handles members added by parse_position_base_ with feax syntax
+ if isinstance(self.baseMarks, ast.MarkClassName): # flattens pos @MARKCLASS mark @MARKCLASS
+ res = ""
+ if self.mode == 'literal':
+ res += "pos mark @{} ".format(self.base.markClass.name)
+ res += " ".join("mark @{}".format(m.name) for m in self.marks)
+ res += ";"
+ else:
+ for mcd in self.baseMarks.markClass.definitions:
+ if res != "":
+ res += "\n{}".format(indent)
+ res += "pos mark {} {}".format(mcd.glyphs.asFea(), mcd.anchor.asFea())
+ for m in self.marks:
+ res += " mark @{}".format(m.name)
+ res += ";"
+ else: # like base class method
+ res = "pos mark {}".format(self.baseMarks.asFea())
+ for a, m in self.marks:
+ res += " {} mark @{}".format(a.asFea() if a else "<anchor NULL>", m.name)
+ res += ";"
+ return res
+
+ def build(self, builder):
+ # builder.add_mark_mark_pos(self.location, self.baseMarks.glyphSet(), self.marks)
+ #TODO: do the right thing
+ pass
+
+class ast_CursivePosStatement(ast.CursivePosStatement):
+ # super class __init__() for reference
+ # def __init__(self, location, glyphclass, entryAnchor, exitAnchor):
+ # Statement.__init__(self, location)
+ # self.glyphclass = glyphclass
+ # self.entryAnchor, self.exitAnchor = entryAnchor, exitAnchor
+
+ def asFea(self, indent=""):
+ if isinstance(self.exitAnchor, ast.MarkClass): # pos cursive @BASE1 @BASE2
+ res = ""
+ if self.mode == 'literal':
+ res += "pos cursive @{} @{};".format(self.glyphclass.name, self.exitAnchor.name)
+ else:
+ allglyphs = set(self.glyphclass.glyphSet())
+ allglyphs.update(self.exitAnchor.glyphSet())
+ for g in sorted(allglyphs):
+ entry = self.glyphclass.glyphs.get(g, None)
+ exit = self.exitAnchor.glyphs.get(g, None)
+ if res != "":
+ res += "\n{}".format(indent)
+ res += "pos cursive {} {} {};".format(g,
+ (entry.anchor.asFea() if entry else "<anchor NULL>"),
+ (exit.anchor.asFea() if exit else "<anchor NULL>"))
+ else:
+ res = super(ast_CursivePosStatement, self).asFea(indent)
+ return res
+
+ def build(self, builder) :
+ #TODO: do the right thing here (write to ttf?)
+ pass
+
+class ast_MarkLigPosStatement(ast.MarkLigPosStatement):
+ def __init__(self, ligatures, marks, location=None):
+ ast.MarkLigPosStatement.__init__(self, ligatures, marks, location)
+ self.classBased = False
+ for l in marks:
+ if l is not None:
+ for m in l:
+ if m is not None and not isinstance(m[0], ast.Anchor):
+ self.classBased = True
+ break
+
+ def build(self, builder):
+ builder.add_mark_lig_pos(self.location, self.ligatures.glyphSet(), self.marks)
+
+ def asFea(self, indent=""):
+ if not self.classBased or self.mode == "literal":
+ return super(ast_MarkLigPosStatement, self).asFea(indent)
+
+ res = []
+ for g in self.ligatures.glyphSet():
+ comps = []
+ for l in self.marks:
+ onecomp = []
+ if l is not None and len(l):
+ for a, m in l:
+ if not isinstance(a, ast.Anchor):
+ if g not in a.markClass.glyphs:
+ continue
+ left = a.markClass.glyphs[g].anchor.asFea()
+ else:
+ left = a.asFea()
+ onecomp.append("{} mark @{}".format(left, m.name))
+ if not len(onecomp):
+ onecomp = ["<anchor NULL>"]
+ comps.append(" ".join(onecomp))
+ res.append("pos ligature {} ".format(asFea(g)) + ("\n"+indent+SHIFT+"ligComponent ").join(comps))
+ return (";\n"+indent).join(res) + ";"
+
+#similar to ast.MultipleSubstStatement
+#one-to-many substitution, one glyph class is on LHS, multiple glyph classes may be on RHS
+# equivalent to generation of one stmt for each glyph in the LHS class
+# that's matched to corresponding glyphs in the RHS classes
+#prefix and suffx are for contextual lookups and do not need processing
+#replacement could contain multiple slots
+#TODO: below only supports one RHS class?
+class ast_MultipleSubstStatement(ast.Statement):
+ def __init__(self, prefix, glyph, suffix, replacement, forceChain, location=None):
+ ast.Statement.__init__(self, location)
+ self.prefix, self.glyph, self.suffix = prefix, glyph, suffix
+ self.replacement = replacement
+ self.forceChain = forceChain
+ lenglyphs = len(self.glyph.glyphSet())
+ for i, r in enumerate(self.replacement) :
+ if len(r.glyphSet()) == lenglyphs:
+ self.multindex = i #first RHS slot with a glyph class
+ break
+ else:
+ if lenglyphs > 1:
+ raise FeatureLibError("No replacement class is of the same length as the matching class",
+ location)
+ else:
+ self.multindex = 0;
+
+ def build(self, builder):
+ prefix = [p.glyphSet() for p in self.prefix]
+ suffix = [s.glyphSet() for s in self.suffix]
+ glyphs = self.glyph.glyphSet()
+ replacements = self.replacement[self.multindex].glyphSet()
+ lenglyphs = len(glyphs)
+ for i in range(max(lenglyphs, len(replacements))) :
+ builder.add_multiple_subst(
+ self.location, prefix, glyphs[i if lenglyphs > 1 else 0], suffix,
+ self.replacement[0:self.multindex] + [replacements[i]] + self.replacement[self.multindex+1:],
+ self.forceChain)
+
+ def asFea(self, indent=""):
+ res = ""
+ pres = (" ".join(map(asFea, self.prefix)) + " ") if len(self.prefix) else ""
+ sufs = (" " + " ".join(map(asFea, self.suffix))) if len(self.suffix) else ""
+ mark = "'" if len(self.prefix) or len(self.suffix) or self.forceChain else ""
+ if self.mode == 'literal':
+ res += "sub " + pres + self.glyph.asFea() + mark + sufs + " by "
+ res += " ".join(asFea(g) for g in self.replacement) + ";"
+ return res
+ glyphs = self.glyph.glyphSet()
+ replacements = self.replacement[self.multindex].glyphSet()
+ lenglyphs = len(glyphs)
+ count = max(lenglyphs, len(replacements))
+ for i in range(count) :
+ res += ("\n" + indent if i > 0 else "") + "sub " + pres
+ res += asFea(glyphs[i if lenglyphs > 1 else 0]) + mark + sufs
+ res += " by "
+ res += " ".join(asFea(g) for g in self.replacement[0:self.multindex] + [replacements[i]] + self.replacement[self.multindex+1:])
+ res += ";"
+ return res
+
+
+# similar to ast.LigatureSubstStatement
+# many-to-one substitution, one glyph class is on RHS, multiple glyph classes may be on LHS
+# equivalent to generation of one stmt for each glyph in the RHS class
+# that's matched to corresponding glyphs in the LHS classes
+# it's unclear which LHS class should correspond to the RHS class
+# prefix and suffx are for contextual lookups and do not need processing
+# replacement could contain multiple slots
+#TODO: below only supports one LHS class?
+class ast_LigatureSubstStatement(ast.Statement):
+ def __init__(self, prefix, glyphs, suffix, replacement,
+ forceChain, location=None):
+ ast.Statement.__init__(self, location)
+ self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix)
+ self.replacement, self.forceChain = replacement, forceChain
+ lenreplace = len(self.replacement.glyphSet())
+ for i, g in enumerate(self.glyphs):
+ if len(g.glyphSet()) == lenreplace:
+ self.multindex = i #first LHS slot with a glyph class
+ break
+ else:
+ if lenreplace > 1:
+ raise FeatureLibError("No class matches replacement class length", location)
+ else:
+ self.multindex = 0
+
+ def build(self, builder):
+ prefix = [p.glyphSet() for p in self.prefix]
+ glyphs = [g.glyphSet() for g in self.glyphs]
+ suffix = [s.glyphSet() for s in self.suffix]
+ replacements = self.replacement.glyphSet()
+ lenreplace = len(replacements.glyphSet())
+ glyphs = self.glyphs[self.multindex].glyphSet()
+ for i in range(max(len(glyphs), len(replacements))):
+ builder.add_ligature_subst(
+ self.location, prefix,
+ self.glyphs[:self.multindex] + glyphs[i] + self.glyphs[self.multindex+1:],
+ suffix, replacements[i if lenreplace > 1 else 0], self.forceChain)
+
+ def asFea(self, indent=""):
+ res = ""
+ pres = (" ".join(map(asFea, self.prefix)) + " ") if len(self.prefix) else ""
+ sufs = (" " + " ".join(map(asFea, self.suffix))) if len(self.suffix) else ""
+ mark = "'" if len(self.prefix) or len(self.suffix) or self.forceChain else ""
+ if self.mode == 'literal':
+ res += "sub " + pres + " ".join(asFea(g)+mark for g in self.glyphs) + sufs + " by "
+ res += self.replacements.asFea() + ";"
+ return res
+ glyphs = self.glyphs[self.multindex].glyphSet()
+ replacements = self.replacement.glyphSet()
+ lenreplace = len(replacements)
+ count = max(len(glyphs), len(replacements))
+ for i in range(count) :
+ res += ("\n" + indent if i > 0 else "") + "sub " + pres
+ res += " ".join(asFea(g)+mark for g in self.glyphs[:self.multindex] + [glyphs[i]] + self.glyphs[self.multindex+1:])
+ res += sufs + " by "
+ res += asFea(replacements[i if lenreplace > 1 else 0])
+ res += ";"
+ return res
+
+class ast_AlternateSubstStatement(ast.Statement):
+ def __init__(self, prefix, glyphs, suffix, replacements, location=None):
+ ast.Statement.__init__(self, location)
+ self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix)
+ self.replacements = replacements
+
+ def build(self, builder):
+ prefix = [p.glyphSet() for p in self.prefix]
+ suffix = [s.glyphSet() for s in self.suffix]
+ l = len(self.glyphs.glyphSet())
+ for i, glyph in enumerate(self.glyphs.glyphSet()):
+ replacement = self.replacements.glyphSet()[i::l]
+ builder.add_alternate_subst(self.location, prefix, glyph, suffix,
+ replacement)
+
+ def asFea(self, indent=""):
+ res = ""
+ l = len(self.glyphs.glyphSet())
+ for i, glyph in enumerate(self.glyphs.glyphSet()):
+ if i > 0:
+ res += "\n" + indent
+ res += "sub "
+ if len(self.prefix) or len(self.suffix):
+ if len(self.prefix):
+ res += " ".join(map(asFea, self.prefix)) + " "
+ res += asFea(glyph) + "'" # even though we really only use 1
+ if len(self.suffix):
+ res += " " + " ".join(map(asFea, self.suffix))
+ else:
+ res += asFea(glyph)
+ res += " from "
+ replacements = ast.GlyphClass(glyphs=self.replacements.glyphSet()[i::l], location=self.location)
+ res += asFea(replacements)
+ res += ";"
+ return res
+
+class ast_IfBlock(ast.Block):
+ def __init__(self, testfn, name, cond, location=None):
+ ast.Block.__init__(self, location=location)
+ self.testfn = testfn
+ self.name = name
+
+ def asFea(self, indent=""):
+ if self.mode == 'literal':
+ res = "{}if{}({}) {{".format(indent, name, cond)
+ res += ast.Block.asFea(self, indent=indent)
+ res += indent + "}\n"
+ return res
+ elif self.testfn():
+ return ast.Block.asFea(self, indent=indent)
+ else:
+ return ""
+
+
+class ast_DoSubStatement(ast.Statement):
+ def __init__(self, varnames, location=None):
+ ast.Statement.__init__(self, location=location)
+ self.names = varnames
+
+ def items(self, variables):
+ yield ((None, None),)
+
+class ast_DoForSubStatement(ast_DoSubStatement):
+ def __init__(self, varname, glyphs, location=None):
+ ast_DoSubStatement.__init__(self, [varname], location=location)
+ self.glyphs = glyphs.glyphSet()
+
+ def items(self, variables):
+ for g in self.glyphs:
+ yield((self.names[0], g),)
+
+def safeeval(exp):
+ # no dunders in attribute names
+ for n in pyast.walk(pyast.parse(exp)):
+ v = getattr(n, 'id', "")
+ # if v in ('_getiter_', '__next__'):
+ # continue
+ if "__" in v:
+ return False
+ return True
+
+class ast_DoLetSubStatement(ast_DoSubStatement):
+ def __init__(self, varnames, expression, parser, location=None):
+ ast_DoSubStatement.__init__(self, varnames, location=location)
+ self.parser = parser
+ if not safeeval(expression):
+ expression='"Unsafe Expression"'
+ self.expr = expression
+
+ def items(self, variables):
+ gbls = dict(self.parser.fns, **variables)
+ try:
+ v = eval(self.expr, gbls)
+ except Exception as e:
+ raise FeatureLibError(str(e) + " in " + self.expr, self.location)
+ if self.names is None: # in an if
+ yield((None, v),)
+ elif len(self.names) == 1:
+ yield((self.names[0], v),)
+ else:
+ yield(zip(self.names, list(v) + [None] * (len(self.names) - len(v))))
+
+class ast_DoIfSubStatement(ast_DoLetSubStatement):
+ def __init__(self, expression, parser, block, location=None):
+ ast_DoLetSubStatement.__init__(self, None, expression, parser, location=None)
+ self.block = block
+
+ def items(self, variables):
+ (_, v) = list(ast_DoLetSubStatement.items(self, variables))[0][0]
+ yield (None, (v if v else None),)
+
+class ast_KernPairsStatement(ast.Statement):
+ def __init__(self, kerninfo, location=None):
+ super(ast_KernPairsStatement, self).__init__(location)
+ self.kerninfo = kerninfo
+
+ def asFea(self, indent=""):
+ # return ("\n"+indent).join("pos {} {} {};".format(k1, round(v), k2) \
+ # for k1, x in self.kerninfo.items() for k2, v in x.items())
+ coverage = set()
+ rules = dict()
+
+ # first sort into lists by type of rule
+ for k1, x in self.kerninfo.items():
+ for k2, v in x.items():
+ # Determine pair kern type, where:
+ # 'gg' = glyph-glyph, 'gc' = glyph-class', 'cg' = class-glyph, 'cc' = class-class
+ ruleType = 'gc'[k1[0]=='@'] + 'gc'[k2[0]=='@']
+ rules.setdefault(ruleType, list()).append([k1, round(v), k2])
+ # for glyph-glyph rules, make list of first glyphs:
+ if ruleType == 'gg':
+ coverage.add(k1)
+
+ # Now assemble lines in order and convert gc rules to gg where possible:
+ res = []
+ for ruleType in filter(lambda x: x in rules, ('gg', 'gc', 'cg', 'cc')):
+ if ruleType != 'gc':
+ res.extend(['pos {} {} {};'.format(k1, v, k2) for k1,v,k2 in rules[ruleType]])
+ else:
+ res.extend(['enum pos {} {} {};'.format(k1, v, k2) for k1, v, k2 in rules[ruleType] if k1 not in coverage])
+ res.extend(['pos {} {} {};'.format(k1, v, k2) for k1, v, k2 in rules[ruleType] if k1 in coverage])
+
+ return ("\n"+indent).join(res)
+
diff --git a/lib/silfont/feax_lexer.py b/lib/silfont/feax_lexer.py
new file mode 100644
index 0000000..58ea72d
--- /dev/null
+++ b/lib/silfont/feax_lexer.py
@@ -0,0 +1,105 @@
+from fontTools.feaLib.lexer import IncludingLexer, Lexer
+from fontTools.feaLib.error import FeatureLibError
+import re, io
+
+VARIABLE = "VARIABLE"
+
+class feax_Lexer(Lexer):
+
+ def __init__(self, *a):
+ Lexer.__init__(self, *a)
+ self.tokens_ = None
+ self.stack_ = []
+ self.empty_ = False
+
+ def next_(self, recurse=False):
+ while (not self.empty_):
+ if self.tokens_ is not None:
+ res = self.tokens_.pop(0)
+ if not len(self.tokens_):
+ self.popstack()
+ if res[0] != VARIABLE:
+ return (res[0], res[1], self.location_())
+ return self.parse_variable(res[1])
+
+ try:
+ res = Lexer.next_(self)
+ except IndexError as e:
+ self.popstack()
+ continue
+ except StopIteration as e:
+ self.popstack()
+ continue
+ except FeatureLibError as e:
+ if u"Unexpected character" not in str(e):
+ raise e
+
+ # only executes if exception occurred
+ location = self.location_()
+ text = self.text_
+ start = self.pos_
+ cur_char = text[start]
+ if cur_char == '$':
+ self.pos_ += 1
+ self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
+ varname = text[start+1:self.pos_]
+ if len(varname) < 1 or len(varname) > 63:
+ raise FeatureLibError("Bad variable name length", location)
+ res = (VARIABLE, varname, location)
+ else:
+ raise FeatureLibError("Unexpected character: %r" % cur_char, location)
+ return res
+ raise StopIteration
+
+ def __repr__(self):
+ if self.tokens_ is not None:
+ return str(self.tokens_)
+ else:
+ return str((self.text_[self.pos_:self.pos_+20], self.pos_, self.text_length_))
+
+ def popstack(self):
+ if len(self.stack_) == 0:
+ self.empty_ = True
+ return
+ t = self.stack_.pop()
+ if t[0] == 'tokens':
+ self.tokens_ = t[1]
+ else:
+ self.text_, self.pos_, self.text_length_ = t[1]
+ self.tokens_ = None
+
+ def pushstack(self, v):
+ if self.tokens_ is None:
+ self.stack_.append(('text', (self.text_, self.pos_, self.text_length_)))
+ else:
+ self.stack_.append(('tokens', self.tokens_))
+ self.stack_.append(v)
+ self.popstack()
+
+ def pushback(self, token_type, token):
+ if self.tokens_ is not None:
+ self.tokens_.append((token_type, token))
+ else:
+ self.pushstack(('tokens', [(token_type, token)]))
+
+ def parse_variable(self, vname):
+ t = str(self.scope.get(vname, ''))
+ if t != '':
+ self.pushstack(['text', (t + " ", 0, len(t)+1)])
+ return self.next_()
+
+class feax_IncludingLexer(IncludingLexer):
+
+ @staticmethod
+ def make_lexer_(file_or_path):
+ if hasattr(file_or_path, "read"):
+ fileobj, closing = file_or_path, False
+ else:
+ filename, closing = file_or_path, True
+ fileobj = io.open(filename, mode="r", encoding="utf-8")
+ data = fileobj.read()
+ filename = getattr(fileobj, "name", None)
+ if closing:
+ fileobj.close()
+ return feax_Lexer(data, filename)
+
diff --git a/lib/silfont/feax_parser.py b/lib/silfont/feax_parser.py
new file mode 100644
index 0000000..aea3619
--- /dev/null
+++ b/lib/silfont/feax_parser.py
@@ -0,0 +1,727 @@
+from fontTools.feaLib import ast
+from fontTools.feaLib.parser import Parser
+from fontTools.feaLib.lexer import IncludingLexer, Lexer
+import silfont.feax_lexer as feax_lexer
+from fontTools.feaLib.error import FeatureLibError
+import silfont.feax_ast as astx
+import io, re, math, os
+import logging
+
+class feaplus_ast(object) :
+ MarkBasePosStatement = astx.ast_MarkBasePosStatement
+ MarkMarkPosStatement = astx.ast_MarkMarkPosStatement
+ MarkLigPosStatement = astx.ast_MarkLigPosStatement
+ CursivePosStatement = astx.ast_CursivePosStatement
+ BaseClass = astx.ast_BaseClass
+ MarkClass = astx.ast_MarkClass
+ BaseClassDefinition = astx.ast_BaseClassDefinition
+ MultipleSubstStatement = astx.ast_MultipleSubstStatement
+ LigatureSubstStatement = astx.ast_LigatureSubstStatement
+ IfBlock = astx.ast_IfBlock
+ DoForSubStatement = astx.ast_DoForSubStatement
+ DoLetSubStatement = astx.ast_DoLetSubStatement
+ DoIfSubStatement = astx.ast_DoIfSubStatement
+ AlternateSubstStatement = astx.ast_AlternateSubstStatement
+ Comment = astx.ast_Comment
+ KernPairsStatement = astx.ast_KernPairsStatement
+
+ def __getattr__(self, name):
+ return getattr(ast, name) # retrieve undefined attrs from imported fontTools.feaLib ast module
+
+class feaplus_parser(Parser) :
+ extensions = {
+ 'baseClass': lambda s: s.parseBaseClass(),
+ 'ifclass': lambda s: s.parseIfClass(),
+ 'ifinfo': lambda s: s.parseIfInfo(),
+ 'do': lambda s: s.parseDoStatement_(),
+ 'def': lambda s: s.parseDefStatement_(),
+ 'kernpairs': lambda s: s.parseKernPairsStatement_()
+ }
+ ast = feaplus_ast()
+
+ def __init__(self, filename, glyphmap, fontinfo, kerninfo, defines) :
+ if filename is None :
+ empty_file = io.StringIO("")
+ super(feaplus_parser, self).__init__(empty_file, glyphmap)
+ else :
+ super(feaplus_parser, self).__init__(filename, glyphmap)
+ self.fontinfo = fontinfo
+ self.kerninfo = kerninfo
+ self.glyphs = glyphmap
+ self.defines = defines
+ self.fns = {
+ '__builtins__': None,
+ 're' : re,
+ 'math' : math,
+ 'APx': lambda g, a, d=0: int(self.glyphs[g].anchors.get(a, [d])[0]),
+ 'APy': lambda g, a, d=0: int(self.glyphs[g].anchors.get(a, [0,d])[1]),
+ 'ADVx': lambda g: int(self.glyphs[g].advance),
+ 'MINx': lambda g: int(self.glyphs[g].bbox[0]),
+ 'MINy': lambda g: int(self.glyphs[g].bbox[1]),
+ 'MAXx': lambda g: int(self.glyphs[g].bbox[2]),
+ 'MAXy': lambda g: int(self.glyphs[g].bbox[3]),
+ 'feaclass': lambda c: self.resolve_glyphclass(c).glyphSet(),
+ 'allglyphs': lambda : self.glyphs.keys(),
+ 'lf': lambda : "\n",
+ 'info': lambda s: self.fontinfo.get(s, ""),
+ 'fileexists': lambda s: os.path.exists(s),
+ 'kerninfo': lambda s:[(k1, k2, v) for k1, x in self.kerninfo.items() for k2, v in x.items()],
+ 'opt': lambda s: self.defines.get(s, "")
+ }
+ # Document which builtins we really need. Of course still insecure.
+ for x in ('True', 'False', 'None', 'int', 'float', 'str', 'abs', 'all', 'any', 'bool',
+ 'dict', 'enumerate', 'filter', 'hasattr', 'hex', 'len', 'list', 'map', 'print',
+ 'max', 'min', 'ord', 'range', 'set', 'sorted', 'sum', 'tuple', 'zip'):
+ self.fns[x] = __builtins__[x]
+
+ def parse(self, filename=None) :
+ if filename is not None :
+ self.lexer_ = feax_lexer.feax_IncludingLexer(filename)
+ self.advance_lexer_(comments=True)
+ return super(feaplus_parser, self).parse()
+
+ def back_lexer_(self):
+ self.lexer_.lexers_[-1].pushback(self.next_token_type_, self.next_token_)
+ self.next_token_type_ = self.cur_token_type_
+ self.next_token_ = self.cur_token_
+ self.next_token_location_ = self.cur_token_location_
+
+ # methods to limit layer violations
+ def define_glyphclass(self, ap_nm, gc) :
+ self.glyphclasses_.define(ap_nm, gc)
+
+ def resolve_glyphclass(self, ap_nm):
+ try:
+ return self.glyphclasses_.resolve(ap_nm)
+ except KeyError:
+ raise FeatureLibError("Glyphclass '{}' missing".format(ap_nm), self.lexer_.location_())
+ return None
+
+ def add_statement(self, val) :
+ self.doc_.statements.append(val)
+
+ def set_baseclass(self, ap_nm) :
+ gc = self.ast.BaseClass(ap_nm)
+ if not hasattr(self.doc_, 'baseClasses') :
+ self.doc_.baseClasses = {}
+ self.doc_.baseClasses[ap_nm] = gc
+ self.define_glyphclass(ap_nm, gc)
+ return gc
+
+ def set_markclass(self, ap_nm) :
+ gc = self.ast.MarkClass(ap_nm)
+ if not hasattr(self.doc_, 'markClasses') :
+ self.doc_.markClasses = {}
+ self.doc_.markClasses[ap_nm] = gc
+ self.define_glyphclass(ap_nm, gc)
+ return gc
+
+
+ # like base class parse_position_base_ & overrides it
+ def parse_position_base_(self, enumerated, vertical):
+ location = self.cur_token_location_
+ self.expect_keyword_("base")
+ if enumerated:
+ raise FeatureLibError(
+ '"enumerate" is not allowed with '
+ 'mark-to-base attachment positioning',
+ location)
+ base = self.parse_glyphclass_(accept_glyphname=True)
+ if self.next_token_ == "<": # handle pos base [glyphs] <anchor> mark @MARKCLASS
+ marks = self.parse_anchor_marks_()
+ else: # handle pos base @BASECLASS mark @MARKCLASS; like base class parse_anchor_marks_
+ marks = []
+ while self.next_token_ == "mark": #TODO: is more than one 'mark' meaningful?
+ self.expect_keyword_("mark")
+ m = self.expect_markClass_reference_()
+ marks.append(m)
+ self.expect_symbol_(";")
+ return self.ast.MarkBasePosStatement(base, marks, location=location)
+
+ # like base class parse_position_mark_ & overrides it
+ def parse_position_mark_(self, enumerated, vertical):
+ location = self.cur_token_location_
+ self.expect_keyword_("mark")
+ if enumerated:
+ raise FeatureLibError(
+ '"enumerate" is not allowed with '
+ 'mark-to-mark attachment positioning',
+ location)
+ baseMarks = self.parse_glyphclass_(accept_glyphname=True)
+ if self.next_token_ == "<": # handle pos mark [glyphs] <anchor> mark @MARKCLASS
+ marks = self.parse_anchor_marks_()
+ else: # handle pos mark @MARKCLASS mark @MARKCLASS; like base class parse_anchor_marks_
+ marks = []
+ while self.next_token_ == "mark": #TODO: is more than one 'mark' meaningful?
+ self.expect_keyword_("mark")
+ m = self.expect_markClass_reference_()
+ marks.append(m)
+ self.expect_symbol_(";")
+ return self.ast.MarkMarkPosStatement(baseMarks, marks, location=location)
+
+ def parse_position_cursive_(self, enumerated, vertical):
+ location = self.cur_token_location_
+ self.expect_keyword_("cursive")
+ if enumerated:
+ raise FeatureLibError(
+ '"enumerate" is not allowed with '
+ 'cursive attachment positioning',
+ location)
+ glyphclass = self.parse_glyphclass_(accept_glyphname=True)
+ if self.next_token_ == "<": # handle pos cursive @glyphClass <anchor entry> <anchor exit>
+ entryAnchor = self.parse_anchor_()
+ exitAnchor = self.parse_anchor_()
+ self.expect_symbol_(";")
+ return self.ast.CursivePosStatement(
+ glyphclass, entryAnchor, exitAnchor, location=location)
+ else: # handle pos cursive @baseClass @baseClass;
+ mc = self.expect_markClass_reference_()
+ return self.ast.CursivePosStatement(glyphclass.markClass, None, mc, location=location)
+
+ def parse_position_ligature_(self, enumerated, vertical):
+ location = self.cur_token_location_
+ self.expect_keyword_("ligature")
+ if enumerated:
+ raise FeatureLibError(
+ '"enumerate" is not allowed with '
+ 'mark-to-ligature attachment positioning',
+ location)
+ ligatures = self.parse_glyphclass_(accept_glyphname=True)
+ marks = [self._parse_anchorclass_marks_()]
+ while self.next_token_ == "ligComponent":
+ self.expect_keyword_("ligComponent")
+ marks.append(self._parse_anchorclass_marks_())
+ self.expect_symbol_(";")
+ return self.ast.MarkLigPosStatement(ligatures, marks, location=location)
+
+ def _parse_anchorclass_marks_(self):
+ """Parses a sequence of [<anchor> | @BASECLASS mark @MARKCLASS]*."""
+ anchorMarks = [] # [(self.ast.Anchor, markClassName)*]
+ while True:
+ if self.next_token_ == "<":
+ anchor = self.parse_anchor_()
+ else:
+ anchor = self.parse_glyphclass_(accept_glyphname=False)
+ if anchor is not None:
+ self.expect_keyword_("mark")
+ markClass = self.expect_markClass_reference_()
+ anchorMarks.append((anchor, markClass))
+ if self.next_token_ == "ligComponent" or self.next_token_ == ";":
+ break
+ return anchorMarks
+
+ # like base class parseMarkClass
+ # but uses BaseClass and BaseClassDefinition which subclass Mark counterparts
+ def parseBaseClass(self):
+ if not hasattr(self.doc_, 'baseClasses'):
+ self.doc_.baseClasses = {}
+ location = self.cur_token_location_
+ glyphs = self.parse_glyphclass_(accept_glyphname=True)
+ anchor = self.parse_anchor_()
+ name = self.expect_class_name_()
+ self.expect_symbol_(";")
+ baseClass = self.doc_.baseClasses.get(name)
+ if baseClass is None:
+ baseClass = self.ast.BaseClass(name)
+ self.doc_.baseClasses[name] = baseClass
+ self.glyphclasses_.define(name, baseClass)
+ bcdef = self.ast.BaseClassDefinition(baseClass, anchor, glyphs, location=location)
+ baseClass.addDefinition(bcdef)
+ return bcdef
+
+ #similar to and overrides parser.parse_substitute_
+ def parse_substitute_(self):
+ assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"}
+ location = self.cur_token_location_
+ reverse = self.cur_token_ in {"reversesub", "rsub"}
+ old_prefix, old, lookups, values, old_suffix, hasMarks = \
+ self.parse_glyph_pattern_(vertical=False)
+ if any(values):
+ raise FeatureLibError(
+ "Substitution statements cannot contain values", location)
+ new = []
+ if self.next_token_ == "by":
+ keyword = self.expect_keyword_("by")
+ while self.next_token_ != ";":
+ gc = self.parse_glyphclass_(accept_glyphname=True)
+ new.append(gc)
+ elif self.next_token_ == "from":
+ keyword = self.expect_keyword_("from")
+ new = [self.parse_glyphclass_(accept_glyphname=False)]
+ else:
+ keyword = None
+ self.expect_symbol_(";")
+ if len(new) == 0 and not any(lookups):
+ raise FeatureLibError(
+ 'Expected "by", "from" or explicit lookup references',
+ self.cur_token_location_)
+
+ # GSUB lookup type 3: Alternate substitution.
+ # Format: "substitute a from [a.1 a.2 a.3];"
+ if keyword == "from":
+ if reverse:
+ raise FeatureLibError(
+ 'Reverse chaining substitutions do not support "from"',
+ location)
+ # allow classes on lhs
+ if len(old) != 1:
+ raise FeatureLibError(
+ 'Expected single glyph or glyph class before "from"',
+ location)
+ if len(new) != 1:
+ raise FeatureLibError(
+ 'Expected a single glyphclass after "from"',
+ location)
+ if len(old[0].glyphSet()) == 0 or len(new[0].glyphSet()) % len(old[0].glyphSet()) != 0:
+ raise FeatureLibError(
+ 'The glyphclass after "from" must be a multiple of length of the glyphclass on before',
+ location)
+ return self.ast.AlternateSubstStatement(
+ old_prefix, old[0], old_suffix, new[0], location=location)
+
+ num_lookups = len([l for l in lookups if l is not None])
+
+ # GSUB lookup type 1: Single substitution.
+ # Format A: "substitute a by a.sc;"
+ # Format B: "substitute [one.fitted one.oldstyle] by one;"
+ # Format C: "substitute [a-d] by [A.sc-D.sc];"
+ if (not reverse and len(old) == 1 and len(new) == 1 and
+ num_lookups == 0):
+ glyphs = list(old[0].glyphSet())
+ replacements = list(new[0].glyphSet())
+ if len(replacements) == 1:
+ replacements = replacements * len(glyphs)
+ if len(glyphs) != len(replacements):
+ raise FeatureLibError(
+ 'Expected a glyph class with %d elements after "by", '
+ 'but found a glyph class with %d elements' %
+ (len(glyphs), len(replacements)), location)
+ return self.ast.SingleSubstStatement(
+ old, new,
+ old_prefix, old_suffix,
+ forceChain=hasMarks, location=location
+ )
+
+ # GSUB lookup type 2: Multiple substitution.
+ # Format: "substitute f_f_i by f f i;"
+ if (not reverse and
+ len(old) == 1 and len(new) > 1 and num_lookups == 0):
+ return self.ast.MultipleSubstStatement(old_prefix, old[0], old_suffix, new,
+ hasMarks, location=location)
+
+ # GSUB lookup type 4: Ligature substitution.
+ # Format: "substitute f f i by f_f_i;"
+ if (not reverse and
+ len(old) > 1 and len(new) == 1 and num_lookups == 0):
+ return self.ast.LigatureSubstStatement(old_prefix, old, old_suffix, new[0],
+ forceChain=hasMarks, location=location)
+
+ # GSUB lookup type 8: Reverse chaining substitution.
+ if reverse:
+ if len(old) != 1:
+ raise FeatureLibError(
+ "In reverse chaining single substitutions, "
+ "only a single glyph or glyph class can be replaced",
+ location)
+ if len(new) != 1:
+ raise FeatureLibError(
+ 'In reverse chaining single substitutions, '
+ 'the replacement (after "by") must be a single glyph '
+ 'or glyph class', location)
+ if num_lookups != 0:
+ raise FeatureLibError(
+ "Reverse chaining substitutions cannot call named lookups",
+ location)
+ glyphs = sorted(list(old[0].glyphSet()))
+ replacements = sorted(list(new[0].glyphSet()))
+ if len(replacements) == 1:
+ replacements = replacements * len(glyphs)
+ if len(glyphs) != len(replacements):
+ raise FeatureLibError(
+ 'Expected a glyph class with %d elements after "by", '
+ 'but found a glyph class with %d elements' %
+ (len(glyphs), len(replacements)), location)
+ return self.ast.ReverseChainSingleSubstStatement(
+ old_prefix, old_suffix, old, new, location=location)
+
+ # GSUB lookup type 6: Chaining contextual substitution.
+ assert len(new) == 0, new
+ rule = self.ast.ChainContextSubstStatement(
+ old_prefix, old, old_suffix, lookups, location=location)
+ return rule
+
+ def parse_glyphclass_(self, accept_glyphname):
+ if (accept_glyphname and
+ self.next_token_type_ in (Lexer.NAME, Lexer.CID)):
+ glyph = self.expect_glyph_()
+ return self.ast.GlyphName(glyph, location=self.cur_token_location_)
+ if self.next_token_type_ is Lexer.GLYPHCLASS:
+ self.advance_lexer_()
+ gc = self.glyphclasses_.resolve(self.cur_token_)
+ if gc is None:
+ raise FeatureLibError(
+ "Unknown glyph class @%s" % self.cur_token_,
+ self.cur_token_location_)
+ if isinstance(gc, self.ast.MarkClass):
+ return self.ast.MarkClassName(gc, location=self.cur_token_location_)
+ else:
+ return self.ast.GlyphClassName(gc, location=self.cur_token_location_)
+
+ self.expect_symbol_("[")
+ location = self.cur_token_location_
+ glyphs = self.ast.GlyphClass(location=location)
+ while self.next_token_ != "]":
+ if self.next_token_type_ is Lexer.NAME:
+ glyph = self.expect_glyph_()
+ location = self.cur_token_location_
+ if '-' in glyph and glyph not in self.glyphNames_:
+ start, limit = self.split_glyph_range_(glyph, location)
+ glyphs.add_range(
+ start, limit,
+ self.make_glyph_range_(location, start, limit))
+ elif self.next_token_ == "-":
+ start = glyph
+ self.expect_symbol_("-")
+ limit = self.expect_glyph_()
+ glyphs.add_range(
+ start, limit,
+ self.make_glyph_range_(location, start, limit))
+ else:
+ glyphs.append(glyph)
+ elif self.next_token_type_ is Lexer.CID:
+ glyph = self.expect_glyph_()
+ if self.next_token_ == "-":
+ range_location = self.cur_token_location_
+ range_start = self.cur_token_
+ self.expect_symbol_("-")
+ range_end = self.expect_cid_()
+ glyphs.add_cid_range(range_start, range_end,
+ self.make_cid_range_(range_location,
+ range_start, range_end))
+ else:
+ glyphs.append("cid%05d" % self.cur_token_)
+ elif self.next_token_type_ is Lexer.GLYPHCLASS:
+ self.advance_lexer_()
+ gc = self.glyphclasses_.resolve(self.cur_token_)
+ if gc is None:
+ raise FeatureLibError(
+ "Unknown glyph class @%s" % self.cur_token_,
+ self.cur_token_location_)
+ # fix bug don't output class definition, just the name.
+ if isinstance(gc, self.ast.MarkClass):
+ gcn = self.ast.MarkClassName(gc, location=self.cur_token_location_)
+ else:
+ gcn = self.ast.GlyphClassName(gc, location=self.cur_token_location_)
+ glyphs.add_class(gcn)
+ else:
+ raise FeatureLibError(
+ "Expected glyph name, glyph range, "
+ "or glyph class reference. Found %s" % self.next_token_,
+ self.next_token_location_)
+ self.expect_symbol_("]")
+ return glyphs
+
+ def parseIfClass(self):
+ location = self.cur_token_location_
+ self.expect_symbol_("(")
+ if self.next_token_type_ is Lexer.GLYPHCLASS:
+ self.advance_lexer_()
+ def ifClassTest():
+ gc = self.glyphclasses_.resolve(self.cur_token_)
+ return gc is not None and len(gc.glyphSet())
+ block = self.ast.IfBlock(ifClassTest, 'ifclass', '@'+self.cur_token_, location=location)
+ self.expect_symbol_(")")
+ import inspect # oh this is so ugly!
+ calledby = inspect.stack()[2][3] # called through lambda since extension
+ if calledby == 'parse_block_':
+ self.parse_subblock_(block, False)
+ else:
+ self.parse_statements_block_(block)
+ return block
+ else:
+ raise FeatureLibError("Syntax error missing glyphclass", location)
+
+ def parseIfInfo(self):
+ location = self.cur_token_location_
+ self.expect_symbol_("(")
+ name = self.expect_name_()
+ self.expect_symbol_(",")
+ reg = self.expect_string_()
+ self.expect_symbol_(")")
+ def ifInfoTest():
+ s = self.fontinfo.get(name, "")
+ return re.search(reg, s)
+ block = self.ast.IfBlock(ifInfoTest, 'ifinfo', '{}, "{}"'.format(name, reg), location=location)
+ import inspect # oh this is so ugly! Instead caller should pass in context
+ calledby = inspect.stack()[2][3] # called through a lambda since extension
+ if calledby == 'parse_block_':
+ self.parse_subblock_(block, False)
+ else:
+ self.parse_statements_block_(block)
+ return block
+
+ def parseKernPairsStatement_(self):
+ location = self.cur_token_location_
+ res = self.ast.KernPairsStatement(self.kerninfo, location)
+ return res
+
+ def parse_statements_block_(self, block):
+ self.expect_symbol_("{")
+ statements = block.statements
+ while self.next_token_ != "}" or self.cur_comments_:
+ self.advance_lexer_(comments=True)
+ if self.cur_token_type_ is Lexer.COMMENT:
+ statements.append(
+ self.ast.Comment(self.cur_token_,
+ location=self.cur_token_location_))
+ elif self.is_cur_keyword_("include"):
+ statements.append(self.parse_include_())
+ elif self.cur_token_type_ is Lexer.GLYPHCLASS:
+ statements.append(self.parse_glyphclass_definition_())
+ elif self.is_cur_keyword_(("anon", "anonymous")):
+ statements.append(self.parse_anonymous_())
+ elif self.is_cur_keyword_("anchorDef"):
+ statements.append(self.parse_anchordef_())
+ elif self.is_cur_keyword_("languagesystem"):
+ statements.append(self.parse_languagesystem_())
+ elif self.is_cur_keyword_("lookup"):
+ statements.append(self.parse_lookup_(vertical=False))
+ elif self.is_cur_keyword_("markClass"):
+ statements.append(self.parse_markClass_())
+ elif self.is_cur_keyword_("feature"):
+ statements.append(self.parse_feature_block_())
+ elif self.is_cur_keyword_("table"):
+ statements.append(self.parse_table_())
+ elif self.is_cur_keyword_("valueRecordDef"):
+ statements.append(
+ self.parse_valuerecord_definition_(vertical=False))
+ elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in self.extensions:
+ statements.append(self.extensions[self.cur_token_](self))
+ elif self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ";":
+ continue
+ else:
+ raise FeatureLibError(
+ "Expected feature, languagesystem, lookup, markClass, "
+ "table, or glyph class definition, got {} \"{}\"".format(self.cur_token_type_, self.cur_token_),
+ self.cur_token_location_)
+
+ self.expect_symbol_("}")
+ # self.expect_symbol_(";") # can't have }; since tokens are space separated
+
+ def parse_subblock_(self, block, vertical, stylisticset=False,
+ size_feature=None, cv_feature=None):
+ self.expect_symbol_("{")
+ for symtab in self.symbol_tables_:
+ symtab.enter_scope()
+
+ statements = block.statements
+ while self.next_token_ != "}" or self.cur_comments_:
+ self.advance_lexer_(comments=True)
+ if self.cur_token_type_ is Lexer.COMMENT:
+ statements.append(self.ast.Comment(
+ self.cur_token_, location=self.cur_token_location_))
+ elif self.cur_token_type_ is Lexer.GLYPHCLASS:
+ statements.append(self.parse_glyphclass_definition_())
+ elif self.is_cur_keyword_("anchorDef"):
+ statements.append(self.parse_anchordef_())
+ elif self.is_cur_keyword_({"enum", "enumerate"}):
+ statements.append(self.parse_enumerate_(vertical=vertical))
+ elif self.is_cur_keyword_("feature"):
+ statements.append(self.parse_feature_reference_())
+ elif self.is_cur_keyword_("ignore"):
+ statements.append(self.parse_ignore_())
+ elif self.is_cur_keyword_("language"):
+ statements.append(self.parse_language_())
+ elif self.is_cur_keyword_("lookup"):
+ statements.append(self.parse_lookup_(vertical))
+ elif self.is_cur_keyword_("lookupflag"):
+ statements.append(self.parse_lookupflag_())
+ elif self.is_cur_keyword_("markClass"):
+ statements.append(self.parse_markClass_())
+ elif self.is_cur_keyword_({"pos", "position"}):
+ statements.append(
+ self.parse_position_(enumerated=False, vertical=vertical))
+ elif self.is_cur_keyword_("script"):
+ statements.append(self.parse_script_())
+ elif (self.is_cur_keyword_({"sub", "substitute",
+ "rsub", "reversesub"})):
+ statements.append(self.parse_substitute_())
+ elif self.is_cur_keyword_("subtable"):
+ statements.append(self.parse_subtable_())
+ elif self.is_cur_keyword_("valueRecordDef"):
+ statements.append(self.parse_valuerecord_definition_(vertical))
+ elif stylisticset and self.is_cur_keyword_("featureNames"):
+ statements.append(self.parse_featureNames_(stylisticset))
+ elif cv_feature and self.is_cur_keyword_("cvParameters"):
+ statements.append(self.parse_cvParameters_(cv_feature))
+ elif size_feature and self.is_cur_keyword_("parameters"):
+ statements.append(self.parse_size_parameters_())
+ elif size_feature and self.is_cur_keyword_("sizemenuname"):
+ statements.append(self.parse_size_menuname_())
+ elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in self.extensions:
+ statements.append(self.extensions[self.cur_token_](self))
+ elif self.cur_token_ == ";":
+ continue
+ else:
+ raise FeatureLibError(
+ "Expected glyph class definition or statement: got {} {}".format(self.cur_token_type_, self.cur_token_),
+ self.cur_token_location_)
+
+ self.expect_symbol_("}")
+ for symtab in self.symbol_tables_:
+ symtab.exit_scope()
+
+ def collect_block_(self):
+ self.expect_symbol_("{")
+ tokens = [(self.cur_token_type_, self.cur_token_)]
+ count = 1
+ while count > 0:
+ self.advance_lexer_()
+ if self.cur_token_ == "{":
+ count += 1
+ elif self.cur_token_ == "}":
+ count -= 1
+ tokens.append((self.cur_token_type_, self.cur_token_))
+ return tokens
+
+ def parseDoStatement_(self):
+ location = self.cur_token_location_
+ substatements = []
+ ifs = []
+ while True:
+ self.advance_lexer_()
+ if self.is_cur_keyword_("for"):
+ substatements.append(self.parseDoFor_())
+ elif self.is_cur_keyword_("let"):
+ substatements.append(self.parseDoLet_())
+ elif self.is_cur_keyword_("if"):
+ ifs.append(self.parseDoIf_())
+ elif self.cur_token_ == '{':
+ self.back_lexer_()
+ ifs.append(self.parseEmptyIf_())
+ break
+ elif self.cur_token_type_ == Lexer.COMMENT:
+ continue
+ else:
+ self.back_lexer_()
+ break
+ res = self.ast.Block()
+ lex = self.lexer_.lexers_[-1]
+ for s in self.DoIterateValues_(substatements):
+ for i in ifs:
+ (_, v) = next(i.items(s))
+ if v:
+ lex.scope = s
+ #import pdb; pdb.set_trace()
+ lex.pushstack(('tokens', i.block[:]))
+ self.advance_lexer_()
+ self.advance_lexer_()
+ try:
+ import inspect # oh this is so ugly!
+ calledby = inspect.stack()[2][3] # called through lambda since extension
+ if calledby == 'parse_block_':
+ self.parse_subblock_(res, False)
+ else:
+ self.parse_statements_block_(res)
+ except Exception as e:
+ logging.warning("In do context: " + str(s) + " lexer: " + repr(lex) + " at: " + str((self.cur_token_, self.next_token_)))
+ raise
+ return res
+
+ def DoIterateValues_(self, substatements):
+ def updated(d, *a, **kw):
+ d.update(*a, **kw)
+ return d
+ results = [{}]
+ #import pdb; pdb.set_trace()
+ for s in substatements:
+ newresults = []
+ for x in results:
+ for r in s.items(x):
+ c = x.copy()
+ c.update(r)
+ newresults.append(c)
+ results = newresults
+ for r in results:
+ yield r
+
+ def parseDoFor_(self):
+ location = self.cur_token_location_
+ self.advance_lexer_()
+ if self.cur_token_type_ is Lexer.NAME:
+ name = self.cur_token_
+ else:
+ raise FeatureLibError("Bad name in do for statement", location)
+ self.expect_symbol_("=")
+ glyphs = self.parse_glyphclass_(True)
+ self.expect_symbol_(";")
+ res = self.ast.DoForSubStatement(name, glyphs, location=location)
+ return res
+
+ def parseDoLet_(self):
+ # import pdb; pdb.set_trace()
+ location = self.cur_token_location_
+ self.advance_lexer_()
+ names = []
+ while self.cur_token_type_ == Lexer.NAME:
+ names.append(self.cur_token_)
+ if self.next_token_type_ is Lexer.SYMBOL:
+ if self.next_token_ == ",":
+ self.advance_lexer_()
+ elif self.next_token_ == "=":
+ break
+ self.advance_lexer_()
+ else:
+ raise FeatureLibError("Expected '=', found '%s'" % self.cur_token_,
+ self.cur_token_location_)
+ lex = self.lexer_.lexers_[-1]
+ lex.scan_over_(Lexer.CHAR_WHITESPACE_)
+ start = lex.pos_
+ lex.scan_until_(";")
+ expr = lex.text_[start:lex.pos_]
+ self.advance_lexer_()
+ self.expect_symbol_(";")
+ return self.ast.DoLetSubStatement(names, expr, self, location=location)
+
+ def parseDoIf_(self):
+ location = self.cur_token_location_
+ lex = self.lexer_.lexers_[-1]
+ start = lex.pos_
+ lex.scan_until_(";")
+ expr = self.next_token_ + " " + lex.text_[start:lex.pos_]
+ self.advance_lexer_()
+ self.expect_symbol_(";")
+ block = self.collect_block_()
+ keep = (self.next_token_type_, self.next_token_)
+ block = [keep] + block + [keep]
+ return self.ast.DoIfSubStatement(expr, self, block, location=location)
+
+ def parseEmptyIf_(self):
+ location = self.cur_token_location_
+ lex = self.lexer_.lexers_[-1]
+ start = lex.pos_
+ expr = "True"
+ block = self.collect_block_()
+ keep = (self.next_token_type_, self.next_token_)
+ block = [keep] + block + [keep]
+ return self.ast.DoIfSubStatement(expr, self, block, location=location)
+
+ def parseDefStatement_(self):
+ lex = self.lexer_.lexers_[-1]
+ start = lex.pos_
+ lex.scan_until_("{")
+ fname = self.next_token_
+ fsig = fname + lex.text_[start:lex.pos_].strip()
+ tag = re.escape(fname)
+ _, content, location = lex.scan_anonymous_block(tag)
+ self.advance_lexer_()
+ start = lex.pos_
+ lex.scan_until_(";")
+ endtag = lex.text_[start:lex.pos_].strip()
+ assert(fname == endtag)
+ self.advance_lexer_()
+ self.advance_lexer_()
+ funcstr = "def " + fsig + ":\n" + content
+ if astx.safeeval(funcstr):
+ exec(funcstr, self.fns)
+ return self.ast.Comment("# def " + fname)
diff --git a/lib/silfont/ftml.py b/lib/silfont/ftml.py
new file mode 100644
index 0000000..9f63cab
--- /dev/null
+++ b/lib/silfont/ftml.py
@@ -0,0 +1,433 @@
+#!/usr/bin/env python
+'Classes and functions for use handling FTML objects in pysilfont scripts'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2016 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from xml.etree import ElementTree as ET
+from fontTools import ttLib
+import re
+from xml.sax.saxutils import quoteattr
+import silfont.core
+import silfont.etutil as ETU
+
+# Regular expression for parsing font name
+fontspec = re.compile(r"""^ # beginning of string
+ (?P<rest>[A-Za-z ]+?) # Font Family Name
+ \s*(?P<bold>Bold)? # Bold
+ \s*(?P<italic>Italic)? # Italic
+ \s*(?P<regular>Regular)? # Regular
+ $""", re.VERBOSE) # end of string
+
+class Fxml(ETU.ETelement) :
+ def __init__(self, file = None, xmlstring = None, testgrouplabel = None, logger = None, params = None) :
+ self.logger = logger if logger is not None else silfont.core.loggerobj()
+ self.params = params if params is not None else silfont.core.parameters()
+ self.parseerrors=None
+ if not exactlyoneof(file, xmlstring, testgrouplabel) : self.logger.log("Must supply exactly one of file, xmlstring and testgrouplabel","X")
+
+ if testgrouplabel : # Create minimal valid ftml
+ xmlstring = '<ftml version="1.0"><head></head><testgroup label=' + quoteattr(testgrouplabel) +'></testgroup></ftml>'
+
+ if file and not hasattr(file, 'read') : self.logger.log("'file' is not a file object", "X") # ET.parse would also work on file name, but other code assumes file object
+
+ try :
+ if file :
+ self.element = ET.parse(file).getroot()
+ else :
+ self.element = ET.fromstring(xmlstring)
+ except Exception as e :
+ self.logger.log("Error parsing FTML input: " + str(e), "S")
+
+ super(Fxml,self).__init__(self.element)
+
+ self.version = getattrib(self.element,"version")
+ if self.version != "1.0" : self.logger.log("ftml items must have a version of 1.0", "S")
+
+ self.process_subelements((
+ ("head", "head" , Fhead, True, False),
+ ("testgroup", "testgroups", Ftestgroup, True, True )),
+ offspec = False)
+
+ self.stylesheet = {}
+ if file : # If reading from file, look to see if a stylesheet is present in xml processing instructions
+ file.seek(0) # Have to re-read file since ElementTree does not support processing instructions
+ for line in file :
+ if line[0:2] == "<?" :
+ line = line.strip()[:-2] # Strip white space and removing training ?>
+ parts = line.split(" ")
+ if parts[0] == "<?xml-stylesheet" :
+ for part in parts[1:] :
+ (name,value) = part.split("=")
+ self.stylesheet[name] = value[1:-1] # Strip quotes
+ break
+ else :
+ break
+
+ self.filename = file if file else None
+
+ if self.parseerrors:
+ self.logger.log("Errors parsing ftml element:","E")
+ for error in self.parseerrors : self.logger.log(" " + error,"E")
+ self.logger.log("Invalid FTML", "S")
+
+ def save(self, file) :
+ self.outxmlstr=""
+ element = self.create_element()
+ etw = ETU.ETWriter(element, inlineelem = ["em"])
+ self.outxmlstr = etw.serialize_xml()
+ file.write(self.outxmlstr)
+
+ def create_element(self) : # Create a new Elementtree element based on current object contents
+ element = ET.Element('ftml', version = str(self.version))
+ if self.stylesheet : # Create dummy .pi attribute for style sheet processing instruction
+ pi = "xml-stylesheet"
+ for attrib in sorted(self.stylesheet) : pi = pi + ' ' + attrib + '="' + self.stylesheet[attrib] + '"' ## Spec is not clear about what order attributes should be in
+ element.attrib['.pi'] = pi
+ element.append(self.head.create_element())
+ for testgroup in self.testgroups : element.append(testgroup.create_element())
+ return element
+
+class Fhead(ETU.ETelement) :
+ def __init__(self, parent, element) :
+ self.parent = parent
+ self.logger = parent.logger
+ super(Fhead,self).__init__(element)
+
+ self.process_subelements((
+ ("comment", "comment", None, False, False),
+ ("fontscale", "fontscale", None, False, False),
+ ("fontsrc", "fontsrc", Ffontsrc, False, True),
+ ("styles", "styles", ETU.ETelement, False, False ), # Initially just basic elements; Fstyles created below
+ ("title", "title", None, False, False),
+ ("widths", "widths", _Fwidth, False, False)),
+ offspec = True)
+
+ if self.fontscale is not None : self.fontscale = int(self.fontscale)
+ if self.styles is not None :
+ styles = {}
+ for styleelem in self.styles["style"] :
+ style = Fstyle(self, element = styleelem)
+ styles[style.name] = style
+ if style.parseerrors:
+ name = "" if style.name is None else style.name
+ self.parseerrors.append("Errors parsing style element: " + name)
+ for error in style.parseerrors : self.parseerrors.append(" " + error)
+ self.styles = styles
+ if self.widths is not None : self.widths = self.widths.widthsdict # Convert _Fwidths object into dict
+
+ self.elements = dict(self._contents) # Dictionary of all elements, particularly for handling non-standard elements
+
+ def findstyle(self, name = None, feats = None, lang = None) :
+ if self.styles is not None:
+ for s in self.styles :
+ style = self.styles[s]
+ if style.feats == feats and style.lang == lang :
+ if name is None or name == style.name : return style # if name is supplied it must match
+ return None
+
+ def addstyle(self, name, feats = None, lang = None) : # Return style if it exists otherwise create new style with newname
+ s = self.findstyle(name, feats, lang)
+ if s is None :
+ if self.styles is None:
+ self.styles = {}
+ if name in self.styles : self.logger.log("Adding duplicate style name " + name, "X")
+ s = Fstyle(self, name = name, feats = feats, lang = lang)
+ self.styles[name] = s
+ return s
+
+ def create_element(self) :
+ element = ET.Element('head')
+ # Add in-spec sub-elements in alphabetic order
+ if self.comment : x = ET.SubElement(element, 'comment') ; x.text = self.comment
+ if self.fontscale : x = ET.SubElement(element, 'fontscale') ; x.text = str(self.fontscale)
+ if isinstance(self.fontsrc, list):
+ # Allow multiple fontsrc
+ for fontsrc in self.fontsrc:
+ element.append(fontsrc.create_element())
+ elif self.fontsrc is not None:
+ element.append(self.fontsrc.create_element())
+ if self.styles :
+ x = ET.SubElement(element, 'styles')
+ for style in sorted(self.styles) : x.append(self.styles[style].create_element())
+ if self.title : y = ET.SubElement(element, 'title') ; y.text = self.title
+ if not self.widths is None :
+ x = ET.SubElement(element, 'widths')
+ for width in sorted(self.widths) :
+ if self.widths[width] is not None: x.set(width, self.widths[width])
+
+ # Add any non-spec elements
+ for el in sorted(self.elements) :
+ if el not in ("comment", "fontscale", "fontsrc", "styles", "title", "widths") :
+ for elem in self.elements[el] : element.append(elem)
+
+ return element
+
+class Ffontsrc(ETU.ETelement) :
+ # This library only supports a single font in the fontsrc as recommended by the FTML spec
+ # Currently it only supports simple url() and local() values
+
+ def __init__(self, parent, element = None, text = None, label=None) :
+ self.parent = parent
+ self.logger = parent.logger
+ self.parseerrors = []
+
+ if not exactlyoneof(element, text) : self.logger.log("Must supply exactly one of element and text","X")
+
+ try:
+ (txt, url, local) = parsefontsrc(text, allowplain=True) if text else parsefontsrc(element.text)
+ except ValueError as e :
+ txt = text if text else element.text
+ self.parseerrors.append(str(e) + ": " + txt)
+ else :
+ if text : element = ET.Element("fontsrc") ; element.text = txt
+ if label : element.set('label', label)
+ super(Ffontsrc,self).__init__(element)
+ self.process_attributes((
+ ("label", "label", False),),
+ others=False)
+ self.text = txt
+ self.url = url
+ self.local = local
+ if self.local : # Parse font name to find if bold, italic etc
+ results = re.match(fontspec, self.local) ## Does not cope with -, eg Gentium-Bold. Should it?"
+ self.fontfamily = results.group('rest')
+ self.bold = results.group('bold') != None
+ self.italic = results.group('italic') != None
+ else :
+ self.fontfamily = None # If details are needed call getweights()
+
+ def addfontinfo(self) : # set fontfamily, bold and italic by looking inside font
+ (ff, bold, italic) = getfontinfo(self.url)
+ self.fontfamily = ff
+ self.bold = bold
+ self.italic = italic
+
+ def create_element(self) :
+ element = ET.Element("fontsrc")
+ element.text = self.text
+ if self.label : element.set("label", self.label)
+ return element
+
+class Fstyle(ETU.ETelement) :
+ def __init__(self, parent, element = None, name = None, feats = None, lang = None) :
+ self.parent = parent
+ self.logger = parent.logger
+ if element is not None :
+ if name or feats or lang : parent.logger("Can't supply element and other parameters", "X")
+ else :
+ if name is None : self.logger.log("Must supply element or name to Fstyle", "X")
+ element = self.element = ET.Element("style", name = name)
+ if feats is not None :
+ if type(feats) is dict : feats = self.dict_to_string(feats)
+ element.set('feats',feats)
+ if lang is not None : element.set('lang', lang)
+ super(Fstyle,self).__init__(element)
+
+ self.process_attributes((
+ ("feats", "feats", False),
+ ("lang", "lang", False),
+ ("name", "name", True)),
+ others = False)
+
+ if type(self.feats) is str : self.feats = self.string_to_dict(self.feats)
+
+ def string_to_dict(self, string) : # Split string on ',', then add to dict splitting on " " and removing quotes
+ dict={}
+ for f in string.split(','):
+ f = f.strip()
+ m = re.match(r'''(?P<quote>['"])(\w{4})(?P=quote)\s+(\d+|on|off)$''', f)
+ if m:
+ dict[m.group(2)] = m.group(3)
+ else:
+ self.logger.log(f'Invalid feature syntax "{f}"', 'E')
+ return dict
+
+ def dict_to_string(self, dict) :
+ str=""
+ for name in sorted(dict) :
+ if dict[name] is not None : str += "'" + name + "' " + dict[name] + ", "
+ str = str[0:-2] # remove final ", "
+ return str
+
+ def create_element(self) :
+ element = ET.Element("style", name = self.name)
+ if self.feats : element.set("feats", self.dict_to_string(self.feats))
+ if self.lang : element.set("lang", self.lang)
+ return element
+
+
+class _Fwidth(ETU.ETelement) : # Only used temporarily whilst parsing xml
+ def __init__(self, parent, element) :
+ super(_Fwidth,self).__init__(element)
+ self.parent = parent
+ self.logger = parent.logger
+
+ self.process_attributes((
+ ("comment", "comment", False),
+ ("label", "label", False),
+ ("string", "string", False),
+ ("stylename", "stylename", False),
+ ("table", "table", False)),
+ others = False)
+ self.widthsdict = {
+ "comment": self.comment,
+ "label": self.label,
+ "string": self.string,
+ "stylename": self.stylename,
+ "table": self.table}
+
+class Ftestgroup(ETU.ETelement) :
+ def __init__(self, parent, element = None, label = None) :
+ self.parent = parent
+ self.logger = parent.logger
+ if not exactlyoneof(element, label) : self.logger.log("Must supply exactly one of element and label","X")
+
+ if label : element = ET.Element("testgroup", label = label)
+
+ super(Ftestgroup,self).__init__(element)
+
+ self.subgroup = True if type(parent) is Ftestgroup else False
+ self.process_attributes((
+ ("background", "background", False),
+ ("label", "label", True)),
+ others = False)
+ self.process_subelements((
+ ("comment", "comment", None, False, False),
+ ("test", "tests", Ftest, False, True),
+ ("testgroup", "testgroups", Ftestgroup, False, True)),
+ offspec = False)
+ if self.subgroup and self.testgroups != [] : parent.parseerrors.append("Only one level of testgroup nesting permitted")
+
+ # Merge any sub-testgroups into tests
+ if self.testgroups != [] :
+ tests = []
+ tg = list(self.testgroups) # Want to preserve original list
+ for elem in self.element :
+ if elem.tag == "test":
+ tests.append(self.tests.pop(0))
+ elif elem.tag == "testgroup" :
+ tests.append(tg.pop(0))
+ self.tests = tests
+
+ def create_element(self) :
+ element = ET.Element("testgroup")
+ if self.background : element.set("background", self.background)
+ element.set("label", self.label)
+ if self.comment : x = ET.SubElement(element, 'comment') ; x.text = self.comment
+ for test in self.tests : element.append(test.create_element())
+ return element
+
+class Ftest(ETU.ETelement) :
+ def __init__(self, parent, element = None, label = None, string = None) :
+ self.parent = parent
+ self.logger = parent.logger
+ if not exactlyoneof(element, (label, string)) : self.logger.log("Must supply exactly one of element and label/string","X")
+
+ if label :
+ element = ET.Element("test", label = label)
+ x = ET.SubElement(element,"string") ; x.text = string
+
+ super(Ftest,self).__init__(element)
+
+ self.process_attributes((
+ ("background", "background", False),
+ ("label", "label", True),
+ ("rtl", "rtl", False),
+ ("stylename", "stylename", False)),
+ others = False)
+
+ self.process_subelements((
+ ("comment", "comment", None, False, False),
+ ("string", "string", _Fstring, True, False)),
+ offspec = False)
+
+ self.string = self.string.string # self.string initially a temporary _Fstring element
+
+ def str(self, noems = False) : # Return formatted version of string
+ string = self.string
+ if noems :
+ string = string.replace("<em>","")
+ string = string.replace("</em>","")
+ return string ## Other formatting options to be added as needed cf ftml2odt
+
+ def create_element(self) :
+ element = ET.Element("test")
+ if self.background : element.set("background", self.background)
+ element.set("label", self.label)
+ if self.rtl : element.set("rtl", self.rtl)
+ if self.stylename : element.set("stylename", self.stylename)
+ if self.comment : x = ET.SubElement(element, "comment") ; x.text = self.comment
+ x = ET.SubElement(element, "string") ; x.text = self.string
+
+ return element
+
+class _Fstring(ETU.ETelement) : # Only used temporarily whilst parsing xml
+ def __init__(self, parent, element = None) :
+ self.parent = parent
+ self.logger = parent.logger
+ super(_Fstring,self).__init__(element)
+ self.process_subelements((("em", "em", ETU.ETelement,False, True),), offspec = False)
+ # Need to build text of string to include <em> subelements
+ self.string = element.text if element.text else ""
+ for em in self.em :
+ self.string += "<em>{}</em>{}".format(em.element.text, em.element.tail)
+
+def getattrib(element,attrib) :
+ return element.attrib[attrib] if attrib in element.attrib else None
+
+def exactlyoneof( *args ) : # Check one and only one of args is not None
+
+ last = args[-1] # Check if last argument is a tuple - in which case
+ if type(last) is tuple : # either all or none of list must be None
+ for test in last[1:] :
+ if (test is None) != (last[0] == None) : return False
+ args = list(args) # Convert to list so last val can be changed
+ args[-1] = last[0] # Now valid to test on any item in tuple
+
+ one = False
+ for test in args :
+ if test is not None :
+ if one : return False # already have found one not None
+ one = True
+ if one : return True
+ return False
+
+def parsefontsrc(text, allowplain = False) : # Check fontsrc text is valid and return normalised text, url and local values
+ ''' - if multiple (fallback) fonts are specified, just process the first one
+ - just handles simple url() or local() formats
+ - if allowplain is set, allows text without url() or local() and decides which based on "." in text '''
+ text = text.split(",")[0] # If multiple (fallback) fonts are specified, just process the first one
+ #if allowplain and not re.match(r"^(url|local)[(][^)]+[)]",text) : # Allow for text without url() or local() form
+ if allowplain and not "(" in text : # Allow for text without url() or local() form
+ plain = True
+ if "." in text :
+ type = "url"
+ else :
+ type = "local"
+ else :
+ type = text.split("(")[0]
+ if type == "url" :
+ text = text.split("(")[1][:-1].strip()
+ elif type == "local" :
+ text = text.split("(")[1][:-1].strip()
+ else : raise ValueError("Invalid fontsrc string")
+ if type == "url" :
+ return ("url("+text+")", text, None)
+ else :
+ return ("local("+text+")", None , text)
+
+ return (text,url,local)
+
+def getfontinfo(filename) : # peek inside the font for the name, weight, style
+ f = ttLib.TTFont(filename)
+ # take name from name table, NameID 1, platform ID 3, Encoding ID 1 (possible fallback platformID 1, EncodingID =0)
+ n = f['name'] # name table from font
+ fontname = n.getName(1,3,1).toUnicode() # nameID 1 = Font Family name
+ # take bold and italic info from OS/2 table, fsSelection bits 0 and 5
+ o = f['OS/2'] # OS/2 table
+ italic = (o.fsSelection & 1) > 0
+ bold = (o.fsSelection & 32) > 0
+ return (fontname, bold, italic)
+
diff --git a/lib/silfont/ftml_builder.py b/lib/silfont/ftml_builder.py
new file mode 100644
index 0000000..eb7e73b
--- /dev/null
+++ b/lib/silfont/ftml_builder.py
@@ -0,0 +1,750 @@
+#!/usr/bin/env python
+"""classes and functions for building ftml tests from glyph_data.csv and UFO"""
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bob Hallissy'
+
+from silfont.ftml import Fxml, Ftestgroup, Ftest, Ffontsrc
+from palaso.unicode.ucd import get_ucd
+from itertools import product
+import re
+import collections.abc
+
+# This module comprises two related functionalities:
+# 1. The FTML object which acts as a staging object for ftml test data. The methods of this class
+# permit a gradual build-up of an ftml file, e.g.,
+#
+# startTestGroup(...)
+# setFeatures(...)
+# addToTest(...)
+# addToTest(...)
+# clearFeatures(...)
+# setLang(...)
+# addToTest(...)
+# closeTestGroup(...)
+# ...
+# writeFile(...)
+#
+# The module is clever enough, for example, to automatically close a test when changing features, languages or direction.
+#
+# 2. The FTMLBuilder object which reads and processes glyph_data.csv and provides assistance in iterating over
+# the characters, features, and languages that should be supported by the font, e.g.:
+#
+# ftml.startTestGroup('Encoded characters')
+# for uid in sorted(builder.uids()):
+# if uid < 32: continue
+# c = builder.char(uid)
+# for featlist in builder.permuteFeatures(uids=[uid]):
+# ftml.setFeatures(featlist)
+# builder.render([uid], ftml)
+# ftml.clearFeatures()
+# for langID in sorted(c.langs):
+# ftml.setLang(langID)
+# builder.render([uid], ftml)
+# ftml.clearLang()
+#
+# See examples/psfgenftml.py for ideas
+
+class FTML(object):
+ """a staging class for collecting ftml content and finally writing the xml"""
+
+ # Assumes no nesting of test groups
+
+ def __init__(self, title, logger, comment = None, fontsrc = None, fontlabel = None, fontscale = None,
+ widths = None, rendercheck = True, xslfn = None, defaultrtl = False):
+ self.logger = logger
+ # Initialize an Fxml object
+ fxml = Fxml(testgrouplabel = "dummy")
+ fxml.stylesheet = {'type': 'text/xsl', 'href': xslfn if xslfn is not None else 'ftml.xsl'}
+ fxml.head.title = title
+ fxml.head.comment = comment
+ if isinstance(fontsrc, (tuple, list)):
+ # Allow multiple fontsrc
+ fxml.head.fontsrc = [Ffontsrc(fxml.head, text=fontsrc,
+ label=fontlabel[i] if fontlabel is not None and i < len(fontlabel) else None)
+ for i, fontsrc in enumerate(fontsrc)]
+ elif fontsrc:
+ fxml.head.fontsrc = Ffontsrc(fxml.head, text=fontsrc, label=fontlabel)
+
+ if fontscale: fxml.head.fontscale = int(fontscale)
+ if widths: fxml.head.widths = widths
+ fxml.testgroups.pop() # Remove dummy test group
+ # Save object
+ self._fxml = fxml
+ # Initialize state
+ self._curTest = None
+ self.closeTestGroup()
+ self.defaultRTL = defaultrtl
+ # Add first testgroup if requested
+ if rendercheck:
+ self.startTestGroup("Rendering Check", background="#F0F0F0")
+ self.addToTest(None, "RenderingUnknown", "check", rtl = False)
+ self.closeTest()
+ self.closeTestGroup()
+
+ _colorMap = {
+ 'aqua': '#00ffff',
+ 'black': '#000000',
+ 'blue': '#0000ff',
+ 'fuchsia': '#ff00ff',
+ 'green': '#008000',
+ 'grey': '#808080',
+ 'lime': '#00ff00',
+ 'maroon': '#800000',
+ 'navy': '#000080',
+ 'olive': '#808000',
+ 'purple': '#800080',
+ 'red': '#ff0000',
+ 'silver': '#c0c0c0',
+ 'teal': '#008080',
+ 'white': '#ffffff',
+ 'yellow': '#ffff00',
+ 'orange': '#ffa500'
+ }
+
+ @staticmethod
+ def _getColor(color):
+ if color is None or len(color) == 0:
+ return None
+ color = color.lower()
+ if color in FTML._colorMap:
+ return FTML._colorMap[color]
+ if re.match(r'#[0-9a-f]{6}$', color):
+ return color
+ self.logger.log(f'Color "{color}" not understood; ignored', 'W')
+ return None
+
+ def closeTest(self, comment = None):
+ if self._curTest:
+ if comment is not None:
+ self._curTest.comment = comment
+ if self._curColor:
+ self._curTest.background = self._curColor
+ self._curTest = None
+ self._lastUID = None
+ self._lastRTL = None
+
+ def addToTest(self, uid, s = "", label = None, comment = None, rtl = None):
+ if rtl is None: rtl = self.defaultRTL
+ if (self._lastUID and uid and uid not in range(self._lastUID, self._lastUID + 2))\
+ or (self._lastRTL is not None and rtl != self._lastRTL):
+ self.closeTest()
+ self._lastUID = uid
+ self._lastRTL = rtl
+ if self._curTestGroup is None:
+ # Create a new Ftestgroup
+ self.startTestGroup("Group")
+ if self._curTest is None:
+ # Create a new Ftest
+ if label is None:
+ label = "U+{0:04X}".format(uid) if uid is not None else "test"
+ test = Ftest(self._curTestGroup, label = label, string = '')
+ if comment:
+ test.comment = comment
+ if rtl: test.rtl = "True"
+ # Construct stylename and add style if needed:
+ x = ['{}_{}'.format(t,v) for t,v in self._curFeatures.items()] if self._curFeatures else []
+ if self._curLang:
+ x.insert(0,self._curLang)
+ if len(x):
+ test.stylename = '_'.join(x)
+ self._fxml.head.addstyle(test.stylename, feats = self._curFeatures, lang = self._curLang)
+ # Append to current test group
+ self._curTestGroup.tests.append(test)
+ self._curTest = test
+ if len(self._curTest.string): self._curTest.string += ' '
+ # Special hack until we get to python3 with full unicode support
+ self._curTest.string += ''.join([ c if ord(c) < 128 else '\\u{0:06X}'.format(ord(c)) for c in s ])
+ # self._curTest.string += s
+
+ def setFeatures(self, features):
+ # features can be None or a list; list elements can be:
+ # None
+ # a feature setting in the form [tag,value]
+ if features is None:
+ return self.clearFeatures()
+ features = [x for x in features if x]
+ if len(features) == 0:
+ return self.clearFeatures()
+ features = dict(features) # Convert to a dictionary -- this is what we'll keep.
+ if features != self._curFeatures:
+ self.closeTest()
+ self._curFeatures = features
+
+ def clearFeatures(self):
+ if self._curFeatures is not None:
+ self.closeTest()
+ self._curFeatures = None
+
+ def setLang(self, langID):
+ if langID != self._curLang:
+ self.closeTest();
+ self._curLang = langID
+
+ def clearLang(self):
+ if self._curLang:
+ self.closeTest()
+ self._curLang = None
+
+ def setBackground(self, color):
+ color = self._getColor(color)
+ if color != self._curColor:
+ self.closeTest()
+ self._curColor = color
+
+ def clearBackground(self):
+ if self._curColor is not None:
+ self.closeTest()
+ self._curColor = None
+
+ def closeTestGroup(self):
+ self.closeTest()
+ self._curTestGroup = None
+ self._curFeatures = None
+ self._curLang = None
+ self._curColor = None
+
+ def startTestGroup(self, label, background = None):
+ if self._curTestGroup is not None:
+ if label == self._curTestGroup.label:
+ return
+ self.closeTestGroup()
+ # Add new test group
+ self._curTestGroup = Ftestgroup(self._fxml, label = label)
+ background = self._getColor(background)
+ if background is not None:
+ self._curTestGroup.background = background
+
+ # append to root test groups
+ self._fxml.testgroups.append(self._curTestGroup)
+
+ def writeFile(self, output):
+ self.closeTestGroup()
+ self._fxml.save(output)
+
+
+class Feature(object):
+ """abstraction of a feature"""
+
+ def __init__(self, tag):
+ self.tag = tag
+ self.default = 0
+ self.maxval = 1
+ self._tvlist = None
+
+ def __getattr__(self,name):
+ if name == "tvlist":
+ # tvlist is a list of all possible tag,value pairs (except the default but including None) for this feature
+ # This attribute shouldn't be needed until all the possible feature value are known,
+ # therefore we'll generate this the first time we need it and save it
+ if self._tvlist is None:
+ self._tvlist = [ None ]
+ for v in range (0, self.maxval+1):
+ if v != self.default:
+ self._tvlist.append( [self.tag, str(v)])
+ return self._tvlist
+
+
+class FChar(object):
+ """abstraction of an encoded glyph in the font"""
+
+ def __init__(self, uids, basename, logger):
+ self.logger = logger
+ # uids can be a singleton integer or, for multiple-encoded glyphs, some kind of sequence of integers
+ if isinstance(uids,collections.abc.Sequence):
+ uids1 = uids
+ else:
+ uids1 = (uids,)
+ # test each uid to make sure valid; remove if not.
+ uids2=[]
+ self.general = "unknown"
+ for uid in uids1:
+ try:
+ gc = get_ucd(uid,'gc')
+ if self.general == "unknown":
+ self.general = gc
+ uids2.append(uid)
+ except (TypeError, IndexError):
+ self.logger.log(f'Invalid USV "{uid}" -- ignored.', 'E')
+ continue
+ except KeyError:
+ self.logger.log('USV %04X not defined; no properties known' % uid, 'W')
+ # make sure there's at least one left
+ assert len(uids2) > 0, f'No valid USVs found in {repr(uids)}'
+ self._uids = tuple(uids2)
+ self.basename = basename
+ self.feats = set() # feat tags that affect this char
+ self.langs = set() # lang tags that affect this char
+ self.aps = set()
+ self.altnames = {} # alternate glyph names.
+ # the above is a dict keyed by either:
+ # lang tag e.g., 'ur', or
+ # feat tag and value, e.g., 'cv24=3'
+ # and returns a the glyphname for that alternate.
+ # Additional info from UFO:
+ self.takesMarks = self.isMark = self.isBase = self.notInUFO = False
+
+ # Most callers don't need to support or or care about multiple-encoded glyphs, so we
+ # support the old .uid attribute by returning the first (I guess we consider it primary) uid.
+ def __getattr__(self,name):
+ if name == 'uids':
+ return self._uids
+ elif name == 'uid':
+ return self._uids[0]
+ else:
+ raise AttributeError
+
+ # the static method FTMLBuilder.checkGlyph is likely preferred
+ # but leave this instance method for backwards compatibility
+ def checkGlyph(self, gname, font, apRE):
+ # glean info from UFO if glyph is present
+ if gname in font.deflayer:
+ self.notInUFO = False
+ for a in font.deflayer[gname]['anchor'] :
+ name = a.element.get('name')
+ if apRE.match(name) is None:
+ continue
+ self.aps.add(name)
+ if name.startswith("_") :
+ self.isMark = True
+ else:
+ self.takesMarks = True
+ self.isBase = self.takesMarks and not self.isMark
+ else:
+ self.notInUFO = True
+
+
+class FSpecial(object):
+ """abstraction of a ligature or other interesting sequence"""
+
+ # Similar to FChar but takes a uid list rather than a single uid
+ def __init__(self, uids, basename, logger):
+ self.logger = logger
+ self.uids = uids
+ self.basename = basename
+ # a couple of properties based on the first uid:
+ try:
+ self.general = get_ucd(uids[0],'gc')
+ except KeyError:
+ self.logger.log('USV %04X not defined; no properties known' % uids[0], 'W')
+ self.feats = set() # feat tags that affect this char
+ self.aps = set()
+ self.langs = set() # lang tags that affect this char
+ self.altnames = {} # alternate glyph names.
+ self.takesMarks = self.isMark = self.isBase = self.notInUFO = False
+
+class FTMLBuilder(object):
+ """glyph_data and UFO processing for building FTML"""
+
+ def __init__(self, logger, incsv = None, fontcode = None, font = None, langs = None, rtlenable = False, ap = None ):
+ self.logger = logger
+ self.rtlEnable = rtlenable
+
+ # Default diacritic base:
+ self.diacBase = 0x25CC
+
+ # Default joinBefore and joinAfter sequence
+ self.joinBefore = '\u200D' # put before a sequence to force joining shape; def = zwj
+ self.joinAfter = '\u200D' # put after a sequence to force joining shape; def = zwj
+
+ # Dict mapping tag to Feature
+ self.features = {}
+
+ # Set of all languages seen
+ if langs is not None:
+ # Use a list so we keep the order (assuming caller wouldn't give us dups
+ self.allLangs = list(re.split(r'\s*[\s,]\s*', langs)) # Allow comma- or space-separated tags
+ self._langsComplete = True # We have all the lang tags desired
+ else:
+ # use a set because the langtags are going to dribble in and be repeated.
+ self.allLangs = set()
+ self._langsComplete = False # Add lang_tags from glyph_data
+
+ # Be able to find chars and specials:
+ self._charFromUID = {}
+ self._charFromBasename = {}
+ self._specialFromUIDs = {}
+ self._specialFromBasename = {}
+
+ # list of USVs that are in the CSV but whose glyphs are not in the UFO
+ self.uidsMissingFromUFO = set()
+
+ # DummyUSV (see charAuto())
+ self.curDummyUSV = 0x100000 # Supplemental Private Use Area B
+
+ # Compile --ap parameter
+ if ap is None:
+ ap = "."
+ try:
+ self.apRE = re.compile(ap)
+ except re.error as e:
+ logger.log("--ap parameter '{}' doesn't compile as regular expression: {}".format(ap, e), "S")
+
+ if incsv is not None:
+ self.readGlyphData(incsv, fontcode, font)
+
+ def addChar(self, uids, basename):
+ # Add an FChar
+ # assume parameters are OK:
+ c = FChar(uids, basename, self.logger)
+ # fatal error if the basename or any of uids have already been seen
+ fatal = False
+ for uid in c.uids:
+ if uid in self._charFromUID:
+ self.logger.log('Attempt to add duplicate USV %04X' % uid, 'E')
+ fatal = True
+ self._charFromUID[uid] = c
+ if basename in self._charFromBasename:
+ self.logger.log('Attempt to add duplicate basename %s' % basename, 'E')
+ fatal = True
+ self._charFromBasename[basename] = c
+ if fatal:
+ self.logger.log('Cannot continue due to previous errors', 'S')
+ return c
+
+ def uids(self):
+ """ returns list of uids in glyph_data """
+ return self._charFromUID.keys()
+
+ def char(self, x):
+ """ finds an FChar based either basename or uid;
+ generates KeyError if not found."""
+ return self._charFromBasename[x] if isinstance(x, str) else self._charFromUID[x]
+
+ def charAuto(self, x):
+ """ Like char() but will issue a warning and add a dummy """
+ try:
+ return self._charFromBasename[x] if isinstance(x, str) else self._charFromUID[x]
+ except KeyError:
+ # Issue error message and create dummy Char object for this character
+ if isinstance(x, str):
+ self.logger.log(f'Glyph "{x}" isn\'t in glyph_data.csv - adding dummy', 'E')
+ while self.curDummyUSV in self._charFromUID:
+ self.curDummyUSV += 1
+ c = self.addChar(self.curDummyUSV, x)
+ else:
+ self.logger.log(f'Char U+{x:04x} isn\'t in glyph_data.csv - adding dummy', 'E')
+ c = self.addChar(x, f'U+{x:04x}')
+ return c
+
+ def addSpecial(self, uids, basename):
+ # Add an FSpecial:
+ # fatal error if basename has already been seen:
+ if basename in self._specialFromBasename:
+ self.logger.log('Attempt to add duplicate basename %s' % basename, 'S')
+ c = FSpecial(uids, basename, self.logger)
+ # remember it:
+ self._specialFromUIDs[tuple(uids)] = c
+ self._specialFromBasename[basename] = c
+ return c
+
+ def specials(self):
+ """returns a list of the basenames of specials"""
+ return self._specialFromBasename.keys()
+
+ def special(self, x):
+ """ finds an FSpecial based either basename or uid sequence;
+ generates KeyError if not found."""
+ return self._specialFromBasename[x] if isinstance(x, str) else self._specialFromUIDs[tuple(x)]
+
+ def _csvWarning(self, msg, exception = None):
+ m = "glyph_data line {1}: {0}".format(msg, self.incsv.line_num)
+ if exception is not None:
+ m += '; ' + str(exception)
+ self.logger.log(m, 'W')
+
+ def readGlyphData(self, incsv, fontcode = None, font = None):
+ # Remember csv file for other methods:
+ self.incsv = incsv
+
+ # Validate fontcode, if provided
+ if fontcode is not None:
+ whichfont = fontcode.strip().lower()
+ if len(whichfont) != 1:
+ self.logger.log('fontcode must be a single letter', 'S')
+ else:
+ whichfont = None
+
+ # Get headings from csvfile:
+ fl = incsv.firstline
+ if fl is None: self.logger.log("Empty input file", "S")
+ # required columns:
+ try:
+ nameCol = fl.index('glyph_name');
+ usvCol = fl.index('USV')
+ except ValueError as e:
+ self.logger.log('Missing csv input field: ' + str(e), 'S')
+ except Exception as e:
+ self.logger.log('Error reading csv input field: ' + str(e), 'S')
+ # optional columns:
+ # If -f specified, make sure we have the fonts column
+ if whichfont is not None:
+ if 'Fonts' not in fl: self.logger.log('-f requires "Fonts" column in glyph_data', 'S')
+ fontsCol = fl.index('Fonts')
+ # Allow for projects that use only production glyph names (ps_name same as glyph_name)
+ psCol = fl.index('ps_name') if 'ps_name' in fl else nameCol
+ # Allow for projects that have no feature and/or lang-specific behaviors
+ featCol = fl.index('Feat') if 'Feat' in fl else None
+ bcp47Col = fl.index('bcp47tags') if 'bcp47tags' in fl else None
+
+ next(incsv.reader, None) # Skip first line with headers
+
+ # RE that matches names of glyphs we don't care about
+ namesToSkipRE = re.compile('^(?:[._].*|null|cr|nonmarkingreturn|tab|glyph_name)$',re.IGNORECASE)
+
+ # RE that matches things like 'cv23' or 'cv23=4' or 'cv23=2,3'
+ featRE = re.compile('^(\w{2,4})(?:=([\d,]+))?$')
+
+ # RE that matches USV sequences for ligatures
+ ligatureRE = re.compile('^[0-9A-Fa-f]{4,6}(?:_[0-9A-Fa-f]{4,6})+$')
+
+ # RE that matches space-separated USV sequences
+ USVsRE = re.compile('^[0-9A-Fa-f]{4,6}(?:\s+[0-9A-Fa-f]{4,6})*$')
+
+ # keep track of glyph names we've seen to detect duplicates
+ namesSeen = set()
+ psnamesSeen = set()
+
+ # OK, process all records in glyph_data
+ for line in incsv:
+ gname = line[nameCol].strip()
+
+ # things to ignore:
+ if namesToSkipRE.match(gname):
+ continue
+ if whichfont is not None and line[fontsCol] != '*' and line[fontsCol].lower().find(whichfont) < 0:
+ continue
+ if len(gname) == 0:
+ self._csvWarning('empty glyph name in glyph_data; ignored')
+ continue
+ if gname.startswith('#'):
+ continue
+ if gname in namesSeen:
+ self._csvWarning('glyph name %s previously seen in glyph_data; ignored' % gname)
+ continue
+
+ psname = line[psCol].strip() or gname # If psname absent, working name will be production name
+ if psname in psnamesSeen:
+ self._csvWarning('psname %s previously seen; ignored' % psname)
+ continue
+ namesSeen.add(gname)
+ psnamesSeen.add(psname)
+
+ # compute basename-- the glyph name without extensions:
+ basename = gname.split('.',1)[0]
+
+ # Process USV(s)
+ # could be empty string, a single USV, space-separated list of USVs for multiple encoding,
+ # or underscore-connected USVs indicating ligatures.
+
+ usvs = line[usvCol].strip()
+ if len(usvs) == 0:
+ # Empty USV field, unencoded glyph
+ usvs = ()
+ elif USVsRE.match(usvs):
+ # space-separated hex values:
+ usvs = usvs.split()
+ isLigature = False
+ elif ligatureRE.match(usvs):
+ # '_' separated hex values (ligatures)
+ usvs = usvs.split('_')
+ isLigature = True
+ else:
+ self._csvWarning(f"invalid USV field '{usvs}'; ignored")
+ usvs = ()
+ uids = [int(x, 16) for x in usvs]
+
+ if len(uids) == 0:
+ # Handle unencoded glyphs
+ uids = None # Prevents using this record to set default feature values
+ if basename in self._charFromBasename:
+ c = self._charFromBasename[basename]
+ # Check for additional AP info
+ c.checkGlyph(gname, font, self.apRE)
+ elif basename in self._specialFromBasename:
+ c = self._specialFromBasename[basename]
+ else:
+ self._csvWarning('unencoded variant %s found before encoded glyph' % gname)
+ c = None
+ elif isLigature:
+ # Handle ligatures
+ c = self.addSpecial(uids, basename)
+ uids = None # Prevents using this record to set default feature values (TODO: Research this)
+ else:
+ # Handle simple encoded glyphs (could be multiple uids!)
+ # Create character object
+ c = self.addChar(uids, basename)
+ if font is not None:
+ # Examine APs to determine if this character takes marks:
+ c.checkGlyph(gname, font, self.apRE)
+ if c.notInUFO:
+ self.uidsMissingFromUFO.update(uids)
+
+ if featCol is not None:
+ feats = line[featCol].strip()
+ if len(feats) > 0 and not(feats.startswith('#')):
+ feats = feats.split(';')
+ for feat in feats:
+ m = featRE.match(feat)
+ if m is None:
+ self._csvWarning('incorrectly formed feature specification "%s"; ignored' % feat)
+ else:
+ # find/create structure for this feature:
+ tag = m.group(1)
+ try:
+ feature = self.features[tag]
+ except KeyError:
+ feature = Feature(tag)
+ self.features[tag] = feature
+ # if values supplied, collect default and maximum values for this feature:
+ if m.group(2) is not None:
+ vals = [int(i) for i in m.group(2).split(',')]
+ if len(vals) > 0:
+ if uids is not None:
+ feature.default = vals[0]
+ elif len(feats) == 1: # TODO: This seems like wrong test.
+ for v in vals:
+ # remember the glyph name for this feature/value combination:
+ feat = '{}={}'.format(tag,v)
+ if c is not None and feat not in c.altnames:
+ c.altnames[feat] = gname
+ vals.append(feature.maxval)
+ feature.maxval = max(vals)
+ if c is not None:
+ # Record that this feature affects this character:
+ c.feats.add(tag)
+ else:
+ self._csvWarning('untestable feature "%s" : no known USV' % tag)
+
+ if bcp47Col is not None:
+ bcp47 = line[bcp47Col].strip()
+ if len(bcp47) > 0 and not(bcp47.startswith('#')):
+ if c is not None:
+ for tag in re.split(r'\s*[\s,]\s*', bcp47): # Allow comma- or space-separated tags
+ c.langs.add(tag) # lang-tags mentioned for this character
+ if not self._langsComplete:
+ self.allLangs.add(tag) # keep track of all possible lang-tags
+ else:
+ self._csvWarning('untestable langs: no known USV')
+
+ # We're finally done, but if allLangs is a set, let's order it (for lack of anything better) and make a list:
+ if not self._langsComplete:
+ self.allLangs = list(sorted(self.allLangs))
+
+ def permuteFeatures(self, uids = None, feats = None):
+ """ returns an iterator that provides all combinations of feature/value pairs, for a list of uids and/or a specific list of feature tags"""
+ feats = set(feats) if feats is not None else set()
+ if uids is not None:
+ for uid in uids:
+ if uid in self._charFromUID:
+ feats.update(self._charFromUID[uid].feats)
+ l = [self.features[tag].tvlist for tag in sorted(feats)]
+ return product(*l)
+
+ @staticmethod
+ def checkGlyph(obj, gname, font, apRE):
+ # glean info from UFO if glyph is present
+ if gname in font.deflayer:
+ obj.notInUFO = False
+ for a in font.deflayer[gname]['anchor']:
+ name = a.element.get('name')
+ if apRE.match(name) is None:
+ continue
+ obj.aps.add(name)
+ if name.startswith("_"):
+ obj.isMark = True
+ else:
+ obj.takesMarks = True
+ obj.isBase = obj.takesMarks and not obj.isMark
+ else:
+ obj.notInUFO = True
+
+ @staticmethod
+ def matchMarkBase(c_mark, c_base):
+ """ test whether an _AP on c_mark matches an AP on c_base """
+ for apM in c_mark.aps:
+ if apM.startswith("_"):
+ ap = apM[1:]
+ for apB in c_base.aps:
+ if apB == ap:
+ return True
+ return False
+
+ def render(self, uids, ftml, keyUID = 0, addBreaks = True, rtl = None, dualJoinMode = 3, label = None, comment = None):
+ """ general purpose (but not required) function to generate ftml for a character sequence """
+ if len(uids) == 0:
+ return
+ # Make a copy so we don't affect caller
+ uids = list(uids)
+ # Remember first uid and original length for later
+ startUID = uids[0]
+ uidLen = len(uids)
+ # if keyUID wasn't supplied, use startUID
+ if keyUID == 0: keyUID = startUID
+ if label is None:
+ # Construct label from uids:
+ label = '\n'.join(['U+{0:04X}'.format(u) for u in uids])
+ if comment is None:
+ # Construct comment from glyph names:
+ comment = ' '.join([self._charFromUID[u].basename for u in uids])
+ # see if uid list includes a mirrored char
+ hasMirrored = bool(len([x for x in uids if get_ucd(x,'Bidi_M')]))
+ # Analyze first and last joining char
+ joiningChars = [x for x in uids if get_ucd(x, 'jt') != 'T']
+ if len(joiningChars):
+ # If first or last non-TRANSPARENT char is a joining char, then we need to emit examples with zwj
+ # Assumes any non-TRANSPARENT char that is bc != L must be a rtl character of some sort
+ uid = joiningChars[0]
+ zwjBefore = (get_ucd(uid,'jt') == 'D'
+ or (get_ucd(uid,'bc') == 'L' and get_ucd(uid,'jt') == 'L')
+ or (get_ucd(uid,'bc') != 'L' and get_ucd(uid,'jt') == 'R'))
+ uid = joiningChars[-1]
+ zwjAfter = (get_ucd(uid,'jt') == 'D'
+ or (get_ucd(uid,'bc') == 'L' and get_ucd(uid,'jt') == 'R')
+ or (get_ucd(uid,'bc') != 'L' and get_ucd(uid,'jt') == 'L'))
+ else:
+ zwjBefore = zwjAfter = False
+ if get_ucd(startUID,'gc') == 'Mn':
+ # First char is a NSM... prefix a suitable base
+ uids.insert(0, self.diacBase)
+ zwjBefore = False # No longer any need to put zwj before
+ elif get_ucd(startUID, 'WSpace'):
+ # First char is whitespace -- prefix with baseline brackets:
+ uids.insert(0, 0xF130)
+ lastNonMark = [x for x in uids if get_ucd(x,'gc') != 'Mn'][-1]
+ if get_ucd(lastNonMark, 'WSpace'):
+ # Last non-mark is whitespace -- append baseline brackets:
+ uids.append(0xF131)
+ s = ''.join([chr(uid) for uid in uids])
+ if zwjBefore or zwjAfter:
+ # Show contextual forms:
+ # Start with isolate
+ t = u'{0} '.format(s)
+ if zwjBefore and zwjAfter:
+ # For sequences that show dual-joining behavior, what we show depends on dualJoinMode:
+ if dualJoinMode & 1:
+ # show initial, medial, final separated by space:
+ t += u'{0}{2} {1}{0}{2} {1}{0} '.format(s, self.joinBefore, self.joinAfter)
+ if dualJoinMode & 2:
+ # show 3 joined forms in sequence:
+ t += u'{0}{0}{0} '.format(s)
+ elif zwjAfter:
+ t += u'{0}{1} '.format(s, self.joinAfter)
+ elif zwjBefore:
+ t += u'{1}{0} '.format(s, self.joinBefore)
+ if addBreaks: ftml.closeTest()
+ ftml.addToTest(keyUID, t, label = label, comment = comment, rtl = rtl)
+ if addBreaks: ftml.closeTest()
+ elif hasMirrored and self.rtlEnable:
+ # Contains mirrored and rtl enabled:
+ if addBreaks: ftml.closeTest()
+ ftml.addToTest(keyUID, u'{0} LTR: \u202A{0}\u202C RTL: \u202B{0}\u202C'.format(s), label = label, comment = comment, rtl = rtl)
+ if addBreaks: ftml.closeTest()
+ # elif is LRE, RLE, PDF
+ # elif is LRI, RLI, FSI, PDI
+ elif uidLen > 1:
+ ftml.addToTest(keyUID, s , label = label, comment = comment, rtl = rtl)
+ else:
+ ftml.addToTest(keyUID, s , comment = comment, rtl = rtl)
+
diff --git a/lib/silfont/harfbuzz.py b/lib/silfont/harfbuzz.py
new file mode 100755
index 0000000..b02b478
--- /dev/null
+++ b/lib/silfont/harfbuzz.py
@@ -0,0 +1,71 @@
+#!/usr/bin/python
+'Harfbuzz support for fonttools'
+
+import gi
+gi.require_version('HarfBuzz', '0.0')
+from gi.repository import HarfBuzz as hb
+from gi.repository import GLib
+
+class Glyph(object):
+ def __init__(self, gid, **kw):
+ self.gid = gid
+ for k,v in kw.items():
+ setattr(self, k, v)
+
+ def __repr__(self):
+ return "[{gid}@({offset[0]},{offset[1]})+({advance[0]},{advance[1]})]".format(**self.__dict__)
+
+def shape_text(f, text, features = [], lang=None, dir="", script="", shapers=""):
+ fontfile = f.reader.file
+ fontfile.seek(0, 0)
+ fontdata = fontfile.read()
+ blob = hb.glib_blob_create(GLib.Bytes.new(fontdata))
+ face = hb.face_create(blob, 0)
+ del blob
+ font = hb.font_create(face)
+ upem = hb.face_get_upem(face)
+ del face
+ hb.font_set_scale(font, upem, upem)
+ hb.ot_font_set_funcs(font)
+
+ buf = hb.buffer_create()
+ t = text.encode('utf-8')
+ hb.buffer_add_utf8(buf, t, 0, -1)
+ hb.buffer_guess_segment_properties(buf)
+ if dir:
+ hb.buffer_set_direction(buf, hb.direction_from_string(dir))
+ if script:
+ hb.buffer_set_script(buf, hb.script_from_string(script))
+ if lang:
+ hb.buffer_set_language(buf, hb.language_from_string(lang))
+
+ feats = []
+ if len(features):
+ for feat_string in features:
+ if hb.feature_from_string(feat_string, -1, aFeats):
+ feats.append(aFeats)
+ if shapers:
+ hb.shape_full(font, buf, feats, shapers)
+ else:
+ hb.shape(font, buf, feats)
+
+ num_glyphs = hb.buffer_get_length(buf)
+ info = hb.buffer_get_glyph_infos(buf)
+ pos = hb.buffer_get_glyph_positions(buf)
+
+ glyphs = []
+ for i in range(num_glyphs):
+ glyphs.append(Glyph(info[i].codepoint, cluster = info[i].cluster,
+ offset = (pos[i].x_offset, pos[i].y_offset),
+ advance = (pos[i].x_advance, pos[i].y_advance),
+ flags = info[i].mask))
+ return glyphs
+
+if __name__ == '__main__':
+ import sys
+ from fontTools.ttLib import TTFont
+ font = sys.argv[1]
+ text = sys.argv[2]
+ f = TTFont(font)
+ glyphs = shape_text(f, text)
+ print(glyphs)
diff --git a/lib/silfont/ipython.py b/lib/silfont/ipython.py
new file mode 100644
index 0000000..150aa97
--- /dev/null
+++ b/lib/silfont/ipython.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+'IPython support for fonttools'
+
+__all__ = ['displayGlyphs', 'loadFont', 'displayText', 'displayRaw']
+
+from fontTools import ttLib
+from fontTools.pens.basePen import BasePen
+from fontTools.misc import arrayTools
+from IPython.display import SVG, HTML
+from defcon import Font
+from ufo2ft import compileTTF
+
+class SVGPen(BasePen) :
+
+ def __init__(self, glyphSet, scale=1.0) :
+ super(SVGPen, self).__init__(glyphSet);
+ self.__commands = []
+ self.__scale = scale
+
+ def __str__(self) :
+ return " ".join(self.__commands)
+
+ def scale(self, pt) :
+ return ((pt[0] or 0) * self.__scale, (pt[1] or 0) * self.__scale)
+
+ def _moveTo(self, pt):
+ self.__commands.append("M {0[0]} {0[1]}".format(self.scale(pt)))
+
+ def _lineTo(self, pt):
+ self.__commands.append("L {0[0]} {0[1]}".format(self.scale(pt)))
+
+ def _curveToOne(self, pt1, pt2, pt3) :
+ self.__commands.append("C {0[0]} {0[1]} {1[0]} {1[1]} {2[0]} {2[1]}".format(self.scale(pt1), self.scale(pt2), self.scale(pt3)))
+
+ def _closePath(self) :
+ self.__commands.append("Z")
+
+ def clear(self) :
+ self.__commands = []
+
+def _svgheader():
+ return '''<?xml version="1.0"?>
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">
+'''
+
+def _bbox(f, gnames, points, scale=1):
+ gset = f.glyphSet
+ bbox = (0, 0, 0, 0)
+ for i, gname in enumerate(gnames):
+ if hasattr(points, '__len__') and i == len(points):
+ points.append((bbox[2] / scale, 0))
+ pt = points[i] if i < len(points) else (0, 0)
+ g = gset[gname]._glyph
+ if g is None or not hasattr(g, 'xMin') :
+ gbox = (0, 0, 0, 0)
+ else :
+ gbox = (g.xMin * scale, g.yMin * scale, g.xMax * scale, g.yMax * scale)
+ bbox = arrayTools.unionRect(bbox, arrayTools.offsetRect(gbox, pt[0] * scale, pt[1] * scale))
+ return bbox
+
+glyphsetcount = 0
+def _defglyphs(f, gnames, scale=1):
+ global glyphsetcount
+ glyphsetcount += 1
+ gset = f.glyphSet
+ p = SVGPen(gset, scale)
+ res = "<defs><g>\n"
+ for gname in sorted(set(gnames)):
+ res += '<symbol overflow="visible" id="{}_{}">\n'.format(gname, glyphsetcount)
+ g = gset[gname]
+ p.clear()
+ g.draw(p)
+ res += '<path style="stroke:none;" d="' + str(p) + '"/>\n</symbol>\n'
+ res += "</g></defs>\n"
+ return res
+
+def loadFont(fname):
+ if fname.lower().endswith(".ufo"):
+ ufo = Font(fname)
+ f = compileTTF(ufo)
+ else:
+ f = ttLib.TTFont(fname)
+ return f
+
+def displayGlyphs(f, gnames, points=None, scale=None):
+ if not hasattr(gnames, '__len__') or isinstance(gnames, basestring):
+ gnames = [gnames]
+ if not hasattr(points, '__len__'):
+ points = []
+ if not hasattr(f, 'glyphSet'):
+ f.glyphSet = f.getGlyphSet()
+ res = _svgheader()
+ if points is None:
+ points = []
+ bbox = _bbox(f, gnames, points, scale or 1)
+ maxh = 100.
+ height = bbox[3] - (bbox[1] if bbox[1] < 0 else 0)
+ if scale is None and height > maxh:
+ scale = maxh / height
+ bbox = [x * scale for x in bbox]
+ res += _defglyphs(f, gnames, scale)
+ res += '<g id="surface1" transform="matrix(1,0,0,-1,{},{})">\n'.format(-bbox[0], bbox[3])
+ res += ' <rect x="{}" y="{}" width="{}" height="{}" style="fill:white;stroke:none"/>\n'.format(
+ bbox[0], bbox[1], bbox[2]-bbox[0], bbox[3])
+ res += ' <g style="fill:black">\n'
+ for i, gname in enumerate(gnames):
+ pt = points[i] if i < len(points) else (0, 0)
+ res += ' <use xlink:href="#{0}_{3}" x="{1}" y="{2}"/>\n'.format(gname, pt[0] * scale, pt[1] * scale, glyphsetcount)
+ res += ' </g></g>\n</svg>\n'
+ return SVG(data=res)
+ #return res
+
+def displayText(f, text, features = [], lang=None, dir="", script="", shapers="", size=0):
+ import harfbuzz
+ glyphs = harfbuzz.shape_text(f, text, features, lang, dir, script, shapers)
+ gnames = []
+ points = []
+ x = 0
+ y = 0
+ for g in glyphs:
+ gnames.append(f.getGlyphName(g.gid))
+ points.append((x+g.offset[0], y+g.offset[1]))
+ x += g.advance[0]
+ y += g.advance[1]
+ if size == 0:
+ scale = None
+ else:
+ upem = f['head'].unitsPerEm
+ scale = 4. * size / (upem * 3.)
+ return displayGlyphs(f, gnames, points, scale=scale)
+
+def displayRaw(text):
+ # res = "<html><body>"+text.encode('utf-8')+"</body></html>"
+ res = u"<html><body><p>"+text+u"</p></body></html>"
+ return HTML(data=res)
diff --git a/lib/silfont/scripts/__init__.py b/lib/silfont/scripts/__init__.py
new file mode 100755
index 0000000..e69de29
--- /dev/null
+++ b/lib/silfont/scripts/__init__.py
diff --git a/lib/silfont/scripts/psfaddanchors.py b/lib/silfont/scripts/psfaddanchors.py
new file mode 100755
index 0000000..88f595f
--- /dev/null
+++ b/lib/silfont/scripts/psfaddanchors.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+__doc__ = 'read anchor data from XML file and apply to UFO'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Rowe'
+
+from silfont.core import execute
+from xml.etree import ElementTree as ET
+
+argspec = [
+ ('ifont',{'help': 'Input UFO'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output UFO','nargs': '?' }, {'type': 'outfont'}),
+ ('-i','--anchorinfo',{'help': 'XML file with anchor data'}, {'type': 'infile', 'def': '_anc.xml'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_anc.log'}),
+ ('-a','--analysis',{'help': 'Analysis only; no output font generated', 'action': 'store_true'},{}),
+ # 'choices' for -r should correspond to infont.logger.loglevels.keys()
+ ('-r','--report',{'help': 'Set reporting level for log', 'type':str, 'choices':['X','S','E','P','W','I','V']},{})
+ ]
+
+def doit(args) :
+ infont = args.ifont
+ if args.report: infont.logger.loglevel = args.report
+ glyphcount = 0
+
+ try:
+ for g in ET.parse(args.anchorinfo).getroot().findall('glyph'): ###
+ glyphcount += 1
+ gname = g.get('PSName')
+ if gname not in infont.deflayer.keys():
+ infont.logger.log("glyph element number " + str(glyphcount) + ": " + gname + " not in font, so skipping anchor data", "W")
+ continue
+ # anchors currently in font for this glyph
+ glyph = infont.deflayer[gname]
+ anchorsinfont = set([ ( a.element.get('name'),a.element.get('x'),a.element.get('y') ) for a in glyph['anchor']])
+ # anchors in XML file to be added
+ anchorstoadd = set()
+ for p in g.findall('point'):
+ name = p.get('type')
+ x = p[0].get('x') # assume subelement location is first child
+ y = p[0].get('y')
+ if name and x and y:
+ anchorstoadd.add( (name,x,y) )
+ else:
+ infont.logger.log("Incomplete information for anchor '" + name + "' for glyph " + gname, "E")
+ # compare sets
+ if anchorstoadd == anchorsinfont:
+ if len(anchorstoadd) > 0:
+ infont.logger.log("Anchors in file already in font for glyph " + gname + ": " + str(anchorstoadd), "V")
+ else:
+ infont.logger.log("No anchors in file or in font for glyph " + gname, "V")
+ else:
+ infont.logger.log("Anchors in file for glyph " + gname + ": " + str(anchorstoadd), "I")
+ infont.logger.log("Anchors in font for glyph " + gname + ": " + str(anchorsinfont), "I")
+ for name,x,y in anchorstoadd:
+ # if anchor being added exists in font already, delete it first
+ ancnames = [a.element.get('name') for a in glyph['anchor']]
+ infont.logger.log(str(ancnames), "V") ###
+ if name in ancnames:
+ infont.logger.log("removing anchor " + name + ", index " + str(ancnames.index(name)), "V") ###
+ glyph.remove('anchor', ancnames.index(name))
+ infont.logger.log("adding anchor " + name + ": (" + x + ", " + y + ")", "V") ###
+ glyph.add('anchor', {'name': name, 'x': x, 'y': y})
+ # If analysis only, return without writing output font
+ if args.analysis: return
+ # Return changed font and let execute() write it out
+ return infont
+ except ET.ParseError as mess:
+ infont.logger.log("Error parsing XML input file: " + str(mess), "S")
+ return # but really should terminate after logging Severe error above
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfbuildcomp.py b/lib/silfont/scripts/psfbuildcomp.py
new file mode 100755
index 0000000..0c5d790
--- /dev/null
+++ b/lib/silfont/scripts/psfbuildcomp.py
@@ -0,0 +1,309 @@
+#!/usr/bin/env python
+__doc__ = '''Read Composite Definitions and add glyphs to a UFO font'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Rowe'
+
+try:
+ xrange
+except NameError:
+ xrange = range
+from xml.etree import ElementTree as ET
+import re
+from silfont.core import execute
+import silfont.ufo as ufo
+from silfont.comp import CompGlyph
+from silfont.etutil import ETWriter
+from silfont.util import parsecolors
+
+argspec = [
+ ('ifont',{'help': 'Input UFO'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output UFO','nargs': '?' }, {'type': 'outfont'}),
+ ('-i','--cdfile',{'help': 'Composite Definitions input file'}, {'type': 'infile', 'def': '_CD.txt'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_CD.log'}),
+ ('-a','--analysis',{'help': 'Analysis only; no output font generated', 'action': 'store_true'},{}),
+ ('-c','--color',{'help': 'Color cells of generated glyphs', 'action': 'store_true'},{}),
+ ('--colors', {'help': 'Color(s) to use when marking generated glyphs'},{}),
+ ('-f','--force',{'help': 'Force overwrite of glyphs having outlines', 'action': 'store_true'},{}),
+ ('-n','--noflatten',{'help': 'Do not flatten component references', 'action': 'store_true'},{}),
+ ('--remove',{'help': 'a regex matching anchor names that should always be removed from composites'},{}),
+ ('--preserve', {'help': 'a regex matching anchor names that, if present in glyphs about to be replace, should not be overwritten'}, {})
+ ]
+
+glyphlist = [] # accessed as global by recursive function addtolist() and main function doit()
+
+def doit(args):
+ global glyphlist
+ infont = args.ifont
+ logger = args.logger
+ params = infont.outparams
+
+ removeRE = re.compile(args.remove) if args.remove else None
+ preserveRE = re.compile(args.preserve) if args.preserve else None
+
+ colors = None
+ if args.color or args.colors:
+ colors = args.colors if args.colors else "(0.04,0.57,0.04,1)"
+ colors = parsecolors(colors, allowspecial=True)
+ invalid = False
+ for color in colors:
+ if color[0] is None:
+ invalid = True
+ logger.log(color[2], "E")
+ if len(colors) > 3:
+ logger.log("A maximum of three colors can be supplied: " + str(len(colors)) + " supplied", "E")
+ invalid = True
+ if invalid: logger.log("Re-run with valid colors", "S")
+ if len(colors) == 1: colors.append(colors[0])
+ if len(colors) == 2: colors.append(colors[1])
+ logstatuses = ("Glyph unchanged", "Glyph changed", "New glyph")
+
+ ### temp section (these may someday be passed as optional parameters)
+ RemoveUsedAnchors = True
+ ### end of temp section
+
+ cgobj = CompGlyph()
+
+ for linenum, rawCDline in enumerate(args.cdfile):
+ CDline=rawCDline.strip()
+ if len(CDline) == 0 or CDline[0] == "#": continue
+ logger.log("Processing line " + str(linenum+1) + ": " + CDline,"I")
+ cgobj.CDline=CDline
+ try:
+ cgobj.parsefromCDline()
+ except ValueError as mess:
+ logger.log("Parsing error: " + str(mess), "E")
+ continue
+ g = cgobj.CDelement
+
+ # Collect target glyph information and construct list of component glyphs
+ targetglyphname = g.get("PSName")
+ targetglyphunicode = g.get("UID")
+ glyphlist = [] # list of component glyphs
+ lsb = rsb = 0
+ adv = None
+ for e in g:
+ if e.tag == 'note': pass
+ elif e.tag == 'property': pass # ignore mark info
+ elif e.tag == 'lsb': lsb = int(e.get('width'))
+ elif e.tag == 'rsb': rsb = int(e.get('width'))
+ elif e.tag == 'advance': adv = int(e.get('width'))
+ elif e.tag == 'base':
+ addtolist(e,None)
+ logger.log(str(glyphlist),"V")
+
+ # find each component glyph and compute x,y position
+ xadvance = lsb
+ componentlist = []
+ targetglyphanchors = {} # dictionary of {name: (xOffset,yOffset)}
+ for currglyph, prevglyph, baseAP, diacAP, shiftx, shifty in glyphlist:
+ # get current glyph and its anchor names from font
+ if currglyph not in infont.deflayer:
+ logger.log(currglyph + " not found in font", "E")
+ continue
+ cg = infont.deflayer[currglyph]
+ cganc = [x.element.get('name') for x in cg['anchor']]
+ diacAPx = diacAPy = 0
+ baseAPx = baseAPy = 0
+ if prevglyph is None: # this is new 'base'
+ xOffset = xadvance
+ yOffset = 0
+ # Find advance width of currglyph and add to xadvance
+ if 'advance' in cg:
+ cgadvance = cg['advance']
+ if cgadvance is not None and cgadvance.element.get('width') is not None:
+ xadvance += int(float(cgadvance.element.get('width')))
+ else: # this is 'attach'
+ if diacAP is not None: # find diacritic Attachment Point in currglyph
+ if diacAP not in cganc:
+ logger.log("The AP '" + diacAP + "' does not exist on diacritic glyph " + currglyph, "E")
+ else:
+ i = cganc.index(diacAP)
+ diacAPx = int(float(cg['anchor'][i].element.get('x')))
+ diacAPy = int(float(cg['anchor'][i].element.get('y')))
+ else:
+ logger.log("No AP specified for diacritic " + currglyph, "E")
+ if baseAP is not None: # find base character Attachment Point in targetglyph
+ if baseAP not in targetglyphanchors.keys():
+ logger.log("The AP '" + baseAP + "' does not exist on base glyph when building " + targetglyphname, "E")
+ else:
+ baseAPx = targetglyphanchors[baseAP][0]
+ baseAPy = targetglyphanchors[baseAP][1]
+ if RemoveUsedAnchors:
+ logger.log("Removing used anchor " + baseAP, "V")
+ del targetglyphanchors[baseAP]
+ xOffset = baseAPx - diacAPx
+ yOffset = baseAPy - diacAPy
+
+ if shiftx is not None: xOffset += int(shiftx)
+ if shifty is not None: yOffset += int(shifty)
+
+ componentdic = {'base': currglyph}
+ if xOffset != 0: componentdic['xOffset'] = str(xOffset)
+ if yOffset != 0: componentdic['yOffset'] = str(yOffset)
+ componentlist.append( componentdic )
+
+ # Move anchor information to targetglyphanchors
+ for a in cg['anchor']:
+ dic = a.element.attrib
+ thisanchorname = dic['name']
+ if RemoveUsedAnchors and thisanchorname == diacAP:
+ logger.log("Skipping used anchor " + diacAP, "V")
+ continue # skip this anchor
+ # add anchor (adjusted for position in targetglyph)
+ targetglyphanchors[thisanchorname] = ( int( dic['x'] ) + xOffset, int( dic['y'] ) + yOffset )
+ logger.log("Adding anchor " + thisanchorname + ": " + str(targetglyphanchors[thisanchorname]), "V")
+ logger.log(str(targetglyphanchors),"V")
+
+ if adv is not None:
+ xadvance = adv ### if adv specified, then this advance value overrides calculated value
+ else:
+ xadvance += rsb ### adjust with rsb
+
+ logger.log("Glyph: " + targetglyphname + ", " + str(targetglyphunicode) + ", " + str(xadvance), "V")
+ for c in componentlist:
+ logger.log(str(c), "V")
+
+ # Flatten components unless -n set
+ if not args.noflatten:
+ newcomponentlist = []
+ for compdic in componentlist:
+ c = compdic['base']
+ x = compdic.get('xOffset')
+ y = compdic.get('yOffset')
+ # look up component glyph
+ g=infont.deflayer[c]
+ # check if it has only components (that is, no contours) in outline
+ if g['outline'] and g['outline'].components and not g['outline'].contours:
+ # for each component, get base, x1, y1 and create new entry with base, x+x1, y+y1
+ for subcomp in g['outline'].components:
+ componentdic = subcomp.element.attrib.copy()
+ x1 = componentdic.pop('xOffset', 0)
+ y1 = componentdic.pop('yOffset', 0)
+ xOffset = addtwo(x, x1)
+ yOffset = addtwo(y, y1)
+ if xOffset != 0: componentdic['xOffset'] = str(xOffset)
+ if yOffset != 0: componentdic['yOffset'] = str(yOffset)
+ newcomponentlist.append( componentdic )
+ else:
+ newcomponentlist.append( compdic )
+ if componentlist == newcomponentlist:
+ logger.log("No changes to flatten components", "V")
+ else:
+ componentlist = newcomponentlist
+ logger.log("Components flattened", "V")
+ for c in componentlist:
+ logger.log(str(c), "V")
+
+ # Check if this new glyph exists in the font already; if so, decide whether to replace, or issue warning
+ preservedAPs = set()
+ if targetglyphname in infont.deflayer.keys():
+ logger.log("Target glyph, " + targetglyphname + ", already exists in font.", "V")
+ targetglyph = infont.deflayer[targetglyphname]
+ if targetglyph['outline'] and targetglyph['outline'].contours and not args.force: # don't replace glyph with contours, unless -f set
+ logger.log("Not replacing existing glyph, " + targetglyphname + ", because it has contours.", "W")
+ continue
+ else:
+ logger.log("Replacing information in existing glyph, " + targetglyphname, "I")
+ glyphstatus = "Replace"
+ # delete information from existing glyph
+ targetglyph.remove('outline')
+ targetglyph.remove('advance')
+ for i in xrange(len(targetglyph['anchor'])-1,-1,-1):
+ aname = targetglyph['anchor'][i].element.attrib['name']
+ if preserveRE is not None and preserveRE.match(aname):
+ preservedAPs.add(aname)
+ logger.log("Preserving anchor " + aname, "V")
+ else:
+ targetglyph.remove('anchor',index=i)
+ else:
+ logger.log("Adding new glyph, " + targetglyphname, "I")
+ glyphstatus = "New"
+ # create glyph, using targetglyphname, targetglyphunicode
+ targetglyph = ufo.Uglif(layer=infont.deflayer, name=targetglyphname)
+ # actually add the glyph to the font
+ infont.deflayer.addGlyph(targetglyph)
+
+ if xadvance != 0: targetglyph.add('advance',{'width': str(xadvance)} )
+ if targetglyphunicode: # remove any existing unicode value(s) before adding unicode value
+ for i in xrange(len(targetglyph['unicode'])-1,-1,-1):
+ targetglyph.remove('unicode',index=i)
+ targetglyph.add('unicode',{'hex': targetglyphunicode} )
+ targetglyph.add('outline')
+ # to the outline element, add a component element for every entry in componentlist
+ for compdic in componentlist:
+ comp = ufo.Ucomponent(targetglyph['outline'],ET.Element('component',compdic))
+ targetglyph['outline'].appendobject(comp,'component')
+ # copy anchors to new glyph from targetglyphanchors which has format {'U': (500,1000), 'L': (500,0)}
+ for a in sorted(targetglyphanchors):
+ if removeRE is not None and removeRE.match(a):
+ logger.log("Skipping unwanted anchor " + a, "V")
+ continue # skip this anchor
+ if a not in preservedAPs:
+ targetglyph.add('anchor', {'name': a, 'x': str(targetglyphanchors[a][0]), 'y': str(targetglyphanchors[a][1])} )
+ # mark glyphs as being generated by setting cell mark color if -c or --colors set
+ if colors:
+ # Need to see if the target glyph has changed.
+ if glyphstatus == "Replace":
+ # Need to recreate the xml element then normalize it for comparison with original
+ targetglyph["anchor"].sort(key=lambda anchor: anchor.element.get("name"))
+ targetglyph.rebuildET()
+ attribOrder = params['attribOrders']['glif'] if 'glif' in params['attribOrders'] else {}
+ if params["sortDicts"] or params["precision"] is not None: ufo.normETdata(targetglyph.etree, params, 'glif')
+ etw = ETWriter(targetglyph.etree, attributeOrder=attribOrder, indentIncr=params["indentIncr"],
+ indentFirst=params["indentFirst"], indentML=params["indentML"], precision=params["precision"],
+ floatAttribs=params["floatAttribs"], intAttribs=params["intAttribs"])
+ newxml = etw.serialize_xml()
+ if newxml == targetglyph.inxmlstr: glyphstatus = 'Unchanged'
+
+ x = 0 if glyphstatus == "Unchanged" else 1 if glyphstatus == "Replace" else 2
+
+ color = colors[x]
+ lib = targetglyph["lib"]
+ if color[0]: # Need to set actual color
+ if lib is None: targetglyph.add("lib")
+ targetglyph["lib"].setval("public.markColor", "string", color[0])
+ logger.log(logstatuses[x] + " - setting markColor to " + color[2], "I")
+ elif x < 2: # No need to log for new glyphs
+ if color[1] == "none": # Remove existing color
+ if lib is not None and "public.markColor" in lib: lib.remove("public.markColor")
+ logger.log(logstatuses[x] + " - Removing existing markColor", "I")
+ else:
+ logger.log(logstatuses[x] + " - Leaving existing markColor (if any)", "I")
+
+ # If analysis only, return without writing output font
+ if args.analysis: return
+ # Return changed font and let execute() write it out
+ return infont
+
+def addtolist(e, prevglyph):
+ """Given an element ('base' or 'attach') and the name of previous glyph,
+ add a tuple to the list of glyphs in this composite, including
+ "at" and "with" attachment point information, and x and y shift values
+ """
+ global glyphlist
+ subelementlist = []
+ thisglyphname = e.get('PSName')
+ atvalue = e.get("at")
+ withvalue = e.get("with")
+ shiftx = shifty = None
+ for se in e:
+ if se.tag == 'property': pass
+ elif se.tag == 'shift':
+ shiftx = se.get('x')
+ shifty = se.get('y')
+ elif se.tag == 'attach':
+ subelementlist.append( se )
+ glyphlist.append( ( thisglyphname, prevglyph, atvalue, withvalue, shiftx, shifty ) )
+ for se in subelementlist:
+ addtolist(se, thisglyphname)
+
+def addtwo(a1, a2):
+ """Take two items (string, number or None), convert to integer and return sum"""
+ b1 = int(a1) if a1 is not None else 0
+ b2 = int(a2) if a2 is not None else 0
+ return b1 + b2
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfbuildcompgc.py b/lib/silfont/scripts/psfbuildcompgc.py
new file mode 100644
index 0000000..e3d0a3e
--- /dev/null
+++ b/lib/silfont/scripts/psfbuildcompgc.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+'''Uses the GlyphConstruction library to build composite glyphs.'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Victor Gaultney'
+
+from silfont.core import execute
+from glyphConstruction import ParseGlyphConstructionListFromString, GlyphConstructionBuilder
+
+argspec = [
+ ('ifont', {'help': 'Input font filename'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-i','--cdfile',{'help': 'Composite Definitions input file'}, {'type': 'infile', 'def': 'constructions.txt'}),
+ ('-l','--log',{'help': 'Set log file name'}, {'type': 'outfile', 'def': '_gc.log'})]
+
+def doit(args) :
+ font = args.ifont
+ logger = args.logger
+
+ constructions = ParseGlyphConstructionListFromString(args.cdfile)
+
+ for construction in constructions :
+ # Create a new constructed glyph object
+ try:
+ constructionGlyph = GlyphConstructionBuilder(construction, font)
+ except ValueError as e:
+ logger.log("Invalid CD line '" + construction + "' - " + str(e), "E")
+ else:
+ # Make a new glyph in target font with the new glyph name
+ glyph = font.newGlyph(constructionGlyph.name)
+ # Draw the constructed object onto the new glyph
+ # This is rather odd in how it works
+ constructionGlyph.draw(glyph.getPen())
+ # Copy glyph metadata from constructed object
+ glyph.name = constructionGlyph.name
+ glyph.unicode = constructionGlyph.unicode
+ glyph.note = constructionGlyph.note
+ #glyph.markColor = constructionGlyph.mark
+ glyph.width = constructionGlyph.width
+
+ return font
+
+def cmd() : execute("FP",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfbuildfea.py b/lib/silfont/scripts/psfbuildfea.py
new file mode 100755
index 0000000..41cf099
--- /dev/null
+++ b/lib/silfont/scripts/psfbuildfea.py
@@ -0,0 +1,89 @@
+#!/usr/bin/python3
+__doc__ = 'Build features.fea file into a ttf font'
+# TODO: add conditional compilation, compare to fea, compile to ttf
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Martin Hosken'
+
+from fontTools.feaLib.builder import Builder
+from fontTools import configLogger
+from fontTools.ttLib import TTFont
+from fontTools.ttLib.tables.otTables import lookupTypes
+from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo
+
+from silfont.core import execute
+
+class MyBuilder(Builder):
+
+ def __init__(self, font, featurefile, lateSortLookups=False, fronts=None):
+ super(MyBuilder, self).__init__(font, featurefile)
+ self.lateSortLookups = lateSortLookups
+ self.fronts = fronts if fronts is not None else []
+
+ def buildLookups_(self, tag):
+ assert tag in ('GPOS', 'GSUB'), tag
+ countFeatureLookups = 0
+ fronts = set([l for k, l in self.named_lookups_.items() if k in self.fronts])
+ for bldr in self.lookups_:
+ bldr.lookup_index = None
+ if bldr.table == tag and getattr(bldr, '_feature', "") != "":
+ countFeatureLookups += 1
+ lookups = []
+ latelookups = []
+ for bldr in self.lookups_:
+ if bldr.table != tag:
+ continue
+ if self.lateSortLookups and getattr(bldr, '_feature', "") == "":
+ if bldr in fronts:
+ latelookups.insert(0, bldr)
+ else:
+ latelookups.append(bldr)
+ else:
+ bldr.lookup_index = len(lookups)
+ lookups.append(bldr)
+ bldr.map_index = bldr.lookup_index
+ numl = len(lookups)
+ for i, l in enumerate(latelookups):
+ l.lookup_index = numl + i
+ l.map_index = l.lookup_index
+ for l in lookups + latelookups:
+ self.lookup_locations[tag][str(l.lookup_index)] = LookupDebugInfo(
+ location=str(l.location),
+ name=self.get_lookup_name_(l),
+ feature=None)
+ return [b.build() for b in lookups + latelookups]
+
+ def add_lookup_to_feature_(self, lookup, feature_name):
+ super(MyBuilder, self).add_lookup_to_feature_(lookup, feature_name)
+ lookup._feature = feature_name
+
+
+#TODO: provide more argument info
+argspec = [
+ ('input_fea', {'help': 'Input fea file'}, {}),
+ ('input_font', {'help': 'Input font file'}, {}),
+ ('-o', '--output', {'help': 'Output font file'}, {}),
+ ('-v', '--verbose', {'help': 'Repeat to increase verbosity', 'action': 'count', 'default': 0}, {}),
+ ('-m', '--lookupmap', {'help': 'File into which place lookup map'}, {}),
+ ('-l','--log',{'help': 'Optional log file'}, {'type': 'outfile', 'def': '_buildfea.log', 'optlog': True}),
+ ('-e','--end',{'help': 'Push lookups not in features to the end', 'action': 'store_true'}, {}),
+ ('-F','--front',{'help': 'Pull named lookups to the front of unnamed list', 'action': 'append'}, {}),
+]
+
+def doit(args) :
+ levels = ["WARNING", "INFO", "DEBUG"]
+ configLogger(level=levels[min(len(levels) - 1, args.verbose)])
+
+ font = TTFont(args.input_font)
+ builder = MyBuilder(font, args.input_fea, lateSortLookups=args.end, fronts=args.front)
+ builder.build()
+ if args.lookupmap:
+ with open(args.lookupmap, "w") as outf:
+ for n, l in sorted(builder.named_lookups_.items()):
+ if l is not None:
+ outf.write("{},{},{}\n".format(n, l.table, l.map_index))
+ font.save(args.output)
+
+def cmd(): execute(None, doit, argspec)
+if __name__ == '__main__': cmd()
diff --git a/lib/silfont/scripts/psfchangegdlnames.py b/lib/silfont/scripts/psfchangegdlnames.py
new file mode 100755
index 0000000..2a10869
--- /dev/null
+++ b/lib/silfont/scripts/psfchangegdlnames.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+__doc__ = '''Change graphite names within GDL based on a csv list in format
+ old name, newname
+ Logs any names not in list
+ Also updates postscript names in postscript() statements based on psnames csv'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2016 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+import os, re
+
+argspec = [
+ ('input',{'help': 'Input file or folder'}, {'type': 'filename'}),
+ ('output',{'help': 'Output file or folder', 'nargs': '?'}, {}),
+ ('-n','--names',{'help': 'Names csv file'}, {'type': 'incsv', 'def': 'gdlmap.csv'}),
+ ('--names2',{'help': '2nd names csv file', 'nargs': '?'}, {'type': 'incsv', 'def': None}),
+ ('--psnames',{'help': 'PS names csv file'}, {'type': 'incsv', 'def': 'psnames.csv'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': 'GDLchangeNames.log'})]
+
+def doit(args) :
+ logger = args.paramsobj.logger
+
+ exceptions = ("glyph", "gamma", "greek_circ")
+
+ # Process input which may be a single file or a directory
+ input = args.input
+ gdlfiles = []
+
+ if os.path.isdir(input) :
+ inputisdir = True
+ indir = input
+ for name in os.listdir(input) :
+ ext = os.path.splitext(name)[1]
+ if ext in ('.gdl','.gdh') :
+ gdlfiles.append(name)
+ else :
+ inputisdir = False
+ indir,inname = os.path.split(input)
+ gdlfiles = [inname]
+
+ # Process output file name - execute() will not have processed file/dir name at all
+ output = "" if args.output is None else args.output
+ outdir,outfile = os.path.split(output)
+ if outfile != "" and os.path.splitext(outfile)[1] == "" : # if no extension on outfile, assume a dir was meant
+ outdir = os.path.join(outdir,outfile)
+ outfile = None
+ if outfile == "" : outfile = None
+ if outfile and inputisdir : logger.log("Can't specify an output file when input is a directory", "S")
+ outappend = None
+ if outdir == "" :
+ if outfile is None :
+ outappend = "_out"
+ else :
+ if outfile == gdlfiles[0] : logger.log("Specify a different output file", "S")
+ outdir = indir
+ else:
+ if indir == outdir :
+ if outfile :
+ if outfile == gdlfiles[0] : logger.log("Specify a different output file", "S")
+ else:
+ logger.log("Specify a different output dir", "S")
+ if not os.path.isdir(outdir) : logger.log("Output directory does not exist", "S")
+
+ # Process names csv file
+ args.names.numfields = 2
+ names = {}
+ for line in args.names : names[line[0]] = line[1]
+
+ # Process names2 csv if present
+ names2 = args.names2
+ if names2 is not None :
+ names2.numfields = 2
+ for line in names2 :
+ n1 = line[0]
+ n2 = line[1]
+ if n1 in names and n2 != names[n1] :
+ logger.log(n1 + " in both names and names2 with different values","E")
+ else :
+ names[n1] = n2
+
+ # Process psnames csv file
+ args.psnames.numfields = 2
+ psnames = {}
+ for line in args.psnames : psnames[line[1]] = line[0]
+
+ missed = []
+ psmissed = []
+ for filen in gdlfiles:
+ dbg = True if filen == 'main.gdh' else False ##
+ file = open(os.path.join(indir,filen),"r")
+ if outappend :
+ base,ext = os.path.splitext(filen)
+ outfilen = base+outappend+ext
+ else :
+ outfilen = filen
+ outfile = open(os.path.join(outdir,outfilen),"w")
+ commentblock = False
+ cnt = 0 ##
+ for line in file:
+ cnt += 1 ##
+ #if cnt > 150 : break ##
+ line = line.rstrip()
+ # Skip comment blocks
+ if line[0:2] == "/*" :
+ outfile.write(line + "\n")
+ if line.find("*/") == -1 : commentblock = True
+ continue
+ if commentblock :
+ outfile.write(line + "\n")
+ if line.find("*/") != -1 : commentblock = False
+ continue
+ # Scan for graphite names
+ cpos = line.find("//")
+ if cpos == -1 :
+ scan = line
+ comment = ""
+ else :
+ scan = line[0:cpos]
+ comment = line[cpos:]
+ tmpline = ""
+ while re.search('[\s(\[,]g\w+?[\s)\],?:;=]'," "+scan+" ") :
+ m = re.search('[\s(\[,]g\w+?[\s)\],?:;=]'," "+scan+" ")
+ gname = m.group(0)[1:-1]
+ if gname in names :
+ gname = names[gname]
+ else :
+ if gname not in missed and gname not in exceptions :
+ logger.log(gname + " from '" + line.strip() + "' in " + filen + " missing from csv", "W")
+ missed.append(gname) # only log each missed name once
+ tmpline = tmpline + scan[lastend:m.start()] + gname
+ scan = scan[m.end()-2:]
+ tmpline = tmpline + scan + comment
+
+ # Scan for postscript statements
+ scan = tmpline[0:tmpline.find("//")] if tmpline.find("//") != -1 else tmpline
+ newline = ""
+ lastend = 0
+
+ for m in re.finditer('postscript\(.+?\)',scan) :
+ psname = m.group(0)[12:-2]
+ if psname in psnames :
+ psname = psnames[psname]
+ else :
+ if psname not in psmissed :
+ logger.log(psname + " from '" + line.strip() + "' in " + filen + " missing from ps csv", "W")
+ psmissed.append(psname) # only log each missed name once
+ newline = newline + scan[lastend:m.start()+12] + psname
+ lastend = m.end()-2
+
+ newline = newline + tmpline[lastend:]
+ outfile.write(newline + "\n")
+ file.close()
+ outfile.close()
+ if missed != [] : logger.log("Names were missed from the csv file - see log file for details","E")
+ return
+
+def cmd() : execute(None,doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfchangettfglyphnames.py b/lib/silfont/scripts/psfchangettfglyphnames.py
new file mode 100644
index 0000000..2c9fa37
--- /dev/null
+++ b/lib/silfont/scripts/psfchangettfglyphnames.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+__doc__ = 'Rename the glyphs in a ttf file based on production names in a UFO'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Alan Ward'
+
+# Rename the glyphs in a ttf file based on production names in a UFO
+# using same technique as fontmake.
+# Production names come from ufo.lib.public.postscriptNames according to ufo2ft comments
+# but I don't know exactly where in the UFO that is
+
+from silfont.core import execute
+import defcon, fontTools.ttLib, ufo2ft
+
+argspec = [
+ ('iufo', {'help': 'Input UFO folder'}, {}),
+ ('ittf', {'help': 'Input ttf file name'}, {}),
+ ('ottf', {'help': 'Output ttf file name'}, {})]
+
+def doit(args):
+ ufo = defcon.Font(args.iufo)
+ ttf = fontTools.ttLib.TTFont(args.ittf)
+
+ args.logger.log('Renaming the input ttf glyphs based on production names in the UFO', 'P')
+ postProcessor = ufo2ft.PostProcessor(ttf, ufo)
+ ttf = postProcessor.process(useProductionNames=True, optimizeCFF=False)
+
+ args.logger.log('Saving the output ttf file', 'P')
+ ttf.save(args.ottf)
+
+ args.logger.log('Done', 'P')
+
+def cmd(): execute(None, doit, argspec)
+if __name__ == '__main__': cmd()
diff --git a/lib/silfont/scripts/psfcheckbasicchars.py b/lib/silfont/scripts/psfcheckbasicchars.py
new file mode 100644
index 0000000..c86beac
--- /dev/null
+++ b/lib/silfont/scripts/psfcheckbasicchars.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+__doc__ = '''Checks a UFO for the presence of glyphs that represent the
+Recommended characters for Non-Roman fonts and warns if any are missing.
+http://scriptsource.org/entry/gg5wm9hhd3'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Victor Gaultney'
+
+from silfont.core import execute
+from silfont.util import required_chars
+
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('-r', '--rtl', {'help': 'Also include characters just for RTL scripts', 'action': 'store_true'}, {}),
+ ('-s', '--silpua', {'help': 'Also include characters in SIL PUA block', 'action': 'store_true'}, {}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_checkbasicchars.log'})]
+
+def doit(args) :
+ font = args.ifont
+ logger = args.logger
+
+ rationales = {
+ "A": "in Codepage 1252",
+ "B": "in MacRoman",
+ "C": "for publishing",
+ "D": "for Non-Roman fonts and publishing",
+ "E": "by Google Fonts",
+ "F": "by TeX for visible space",
+ "G": "for encoding conversion utilities",
+ "H": "in case Variation Sequences are defined in future",
+ "I": "to detect byte order",
+ "J": "to render combining marks in isolation",
+ "K": "to view sidebearings for every glyph using these characters"}
+
+ charsets = ["basic"]
+ if args.rtl: charsets.append("rtl")
+ if args.silpua: charsets.append("sil")
+
+ req_chars = required_chars(charsets)
+
+ glyphlist = font.deflayer.keys()
+
+ for glyphn in glyphlist :
+ glyph = font.deflayer[glyphn]
+ if len(glyph["unicode"]) == 1 :
+ unival = glyph["unicode"][0].hex
+ if unival in req_chars:
+ del req_chars[unival]
+
+ cnt = len(req_chars)
+ if cnt > 0:
+ for usv in sorted(req_chars.keys()):
+ item = req_chars[usv]
+ psname = item["ps_name"]
+ gname = item["glyph_name"]
+ name = psname if psname == gname else psname + ", " + gname
+ logger.log("U+" + usv + " from the " + item["sil_set"] +
+ " set has no representative glyph (" + name + ")", "W")
+ logger.log("Rationale: This character is needed " + rationales[item["rationale"]], "I")
+ if item["notes"]:
+ logger.log(item["notes"], "I")
+ logger.log("There are " + str(cnt) + " required characters missing", "E")
+
+ return
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfcheckclassorders.py b/lib/silfont/scripts/psfcheckclassorders.py
new file mode 100644
index 0000000..12654c9
--- /dev/null
+++ b/lib/silfont/scripts/psfcheckclassorders.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+'''verify classes defined in xml have correct ordering where needed
+
+Looks for comment lines in the classes.xml file that match the string:
+ *NEXT n CLASSES MUST MATCH*
+where n is the number of upcoming class definitions that must result in the
+same glyph alignment when glyph names are sorted by TTF order (as described
+in the glyph_data.csv file).
+'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2019 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bob Hallissy'
+
+import re
+import types
+from xml.etree import ElementTree as ET
+from silfont.core import execute
+
+argspec = [
+ ('classes', {'help': 'class definition in XML format', 'nargs': '?', 'default': 'classes.xml'}, {'type': 'infile'}),
+ ('glyphdata', {'help': 'Glyph info csv file', 'nargs': '?', 'default': 'glyph_data.csv'}, {'type': 'incsv'}),
+ ('--gname', {'help': 'Column header for glyph name', 'default': 'glyph_name'}, {}),
+ ('--sort', {'help': 'Column header(s) for sort order', 'default': 'sort_final'}, {}),
+]
+
+# Dictionary of glyphName : sortValue
+sorts = dict()
+
+# Keep track of glyphs mentioned in classes but not in glyph_data.csv
+missingGlyphs = set()
+
+def doit(args):
+ logger = args.logger
+
+ # Read input csv to get glyph sort order
+ incsv = args.glyphdata
+ fl = incsv.firstline
+ if fl is None: logger.log("Empty input file", "S")
+ if args.gname in fl:
+ glyphnpos = fl.index(args.gname)
+ else:
+ logger.log("No" + args.gname + "field in csv headers", "S")
+ if args.sort in fl:
+ sortpos = fl.index(args.sort)
+ else:
+ logger.log('No "' + args.sort + '" heading in csv headers"', "S")
+ next(incsv.reader, None) # Skip first line with containing headers
+ for line in incsv:
+ glyphn = line[glyphnpos]
+ if len(glyphn) == 0:
+ continue # No need to include cases where name is blank
+ sorts[glyphn] = float(line[sortpos])
+
+ # RegEx we are looking for in comments
+ matchCountRE = re.compile("\*NEXT ([1-9]\d*) CLASSES MUST MATCH\*")
+
+ # parse classes.xml but include comments
+ class MyTreeBuilder(ET.TreeBuilder):
+ def comment(self, data):
+ res = matchCountRE.search(data)
+ if res:
+ # record the count of classes that must match
+ self.start(ET.Comment, {})
+ self.data(res.group(1))
+ self.end(ET.Comment)
+ doc = ET.parse(args.classes, parser=ET.XMLParser(target=MyTreeBuilder())).getroot()
+
+ # process results looking for both class elements and specially formatted comments
+ matchCount = 0
+ refClassList = None
+ refClassName = None
+
+ for child in doc:
+ if isinstance(child.tag, types.FunctionType):
+ # Special type used for comments
+ if matchCount > 0:
+ logger.log("Unexpected match request '{}': matching {} is not yet complete".format(child.text, refClassName), "E")
+ ref = None
+ matchCount = int(child.text)
+ # print "Match count = {}".format(matchCount)
+
+ elif child.tag == 'class':
+ l = orderClass(child, logger) # Do this so we record classes whether we match them or not.
+ if matchCount > 0:
+ matchCount -= 1
+ className = child.attrib['name']
+ if refClassName is None:
+ refClassList = l
+ refLen = len(refClassList)
+ refClassName = className
+ else:
+ # compare ref list and l
+ if len(l) != refLen:
+ logger.log("Class {} (length {}) and {} (length {}) have unequal length".format(refClassName, refLen, className, len(l)), "E")
+ else:
+ errCount = 0
+ for i in range(refLen):
+ if l[i][0] != refClassList[i][0]:
+ logger.log ("Class {} and {} inconsistent order glyphs {} and {}".format(refClassName, className, refClassList[i][2], l[i][2]), "E")
+ errCount += 1
+ if errCount > 5:
+ logger.log ("Abandoning compare between Classes {} and {}".format(refClassName, className), "E")
+ break
+ if matchCount == 0:
+ refClassName = None
+
+ # List glyphs mentioned in classes.xml but not present in glyph_data:
+ if len(missingGlyphs):
+ logger.log('Glyphs mentioned in classes.xml but not present in glyph_data: ' + ', '.join(sorted(missingGlyphs)), 'W')
+
+
+classes = {} # Keep record of all classes we've seen so we can flatten references
+
+def orderClass(classElement, logger):
+ # returns a list of tuples, each containing (indexWithinClass, sortOrder, glyphName)
+ # list is sorted by sortOrder
+ glyphList = classElement.text.split()
+ res = []
+ for i in range(len(glyphList)):
+ token = glyphList[i]
+ if token.startswith('@'):
+ # Nested class
+ cname = token[1:]
+ if cname in classes:
+ res.extend(classes[cname])
+ else:
+ logger.log("Invalid fea: class {} referenced before being defined".format(cname),"S")
+ else:
+ # simple glyph name -- make sure it is in glyph_data:
+ if token in sorts:
+ res.append((i, sorts[token], token))
+ else:
+ missingGlyphs.add(token)
+
+ classes[classElement.attrib['name']] = res
+ return sorted(res, key=lambda x: x[1])
+
+
+
+def cmd() : execute(None,doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfcheckftml.py b/lib/silfont/scripts/psfcheckftml.py
new file mode 100644
index 0000000..84df95f
--- /dev/null
+++ b/lib/silfont/scripts/psfcheckftml.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+'''Test structural integrity of one or more ftml files
+
+Assumes ftml files have already validated against FTML.dtd, for example by using:
+ xmllint --noout --dtdvalid FTML.dtd inftml.ftml
+
+Verifies that:
+ - silfont.ftml can parse the file
+ - every stylename is defined the <styles> list '''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2021 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bob Hallissy'
+
+import glob
+from silfont.ftml import Fxml, Ftest
+from silfont.core import execute
+
+argspec = [
+ ('inftml', {'help': 'Input ftml filename pattern (default: *.ftml) ', 'nargs' : '?', 'default' : '*.ftml'}, {}),
+]
+
+def doit(args):
+ logger = args.logger
+ fnames = glob.glob(args.inftml)
+ if len(fnames) == 0:
+ logger.log(f'No files matching "{args.inftml}" found.','E')
+ for fname in glob.glob(args.inftml):
+ logger.log(f'checking {fname}', 'P')
+ unknownStyles = set()
+ usedStyles = set()
+
+ # recursively find and check all <test> elements in a <testsgroup>
+ def checktestgroup(testgroup):
+ for test in testgroup.tests:
+ # Not sure why, but sub-testgroups are also included in tests, so filter those out for now
+ if isinstance(test, Ftest) and test.stylename:
+ sname = test.stylename
+ usedStyles.add(sname)
+ if sname is not None and sname not in unknownStyles and \
+ not (hasStyles and sname in ftml.head.styles):
+ logger.log(f' stylename "{sname}" not defined in head/styles', 'E')
+ unknownStyles.add(sname)
+ # recurse to nested testgroups if any:
+ if testgroup.testgroups is not None:
+ for subgroup in testgroup.testgroups:
+ checktestgroup(subgroup)
+
+ with open(fname,encoding='utf8') as f:
+ # Attempt to parse the ftml file
+ ftml = Fxml(f)
+ hasStyles = ftml.head.styles is not None # Whether or not any styles are defined in head element
+
+ # Look through all tests for undefined styles:
+ for testgroup in ftml.testgroups:
+ checktestgroup(testgroup)
+
+ if hasStyles:
+ # look for unused styles:
+ for style in ftml.head.styles:
+ if style not in usedStyles:
+ logger.log(f' defined style "{style}" not used in any test', 'W')
+
+def cmd() : execute(None,doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfcheckglyphinventory.py b/lib/silfont/scripts/psfcheckglyphinventory.py
new file mode 100644
index 0000000..37a5ffc
--- /dev/null
+++ b/lib/silfont/scripts/psfcheckglyphinventory.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+__doc__ = '''Warn for differences in glyph inventory and encoding between UFO and input file (e.g., glyph_data.csv).
+Input file can be:
+ - simple text file with one glyph name per line
+ - csv file with headers, using headers "glyph_name" and, if present, "USV"'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2020 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bob Hallissy'
+
+from silfont.core import execute
+
+argspec = [
+ ('ifont', {'help': 'Input UFO'}, {'type': 'infont'}),
+ ('-i', '--input', {'help': 'Input text file, default glyph_data.csv in current directory', 'default': 'glyph_data.csv'}, {'type': 'incsv'}),
+ ('--indent', {'help': 'size of indent (default 10)', 'type': int, 'default': 10}, {}),
+ ('-l', '--log', {'help': 'Log file'}, {'type': 'outfile', 'def': '_checkinventory.log'})]
+
+def doit(args):
+ font = args.ifont
+ incsv = args.input
+ logger = args.logger
+ indent = ' '*args.indent
+
+ if not (args.quiet or 'scrlevel' in args.paramsobj.sets['command line']):
+ logger.raisescrlevel('W') # Raise level to W if not already W or higher
+
+ def csvWarning(msg, exception=None):
+ m = f'glyph_data line {incsv.line_num}: {msg}'
+ if exception is not None:
+ m += '; ' + exception.message
+ logger.log(m, 'W')
+
+ # Get glyph names and encoding from input file
+ glyphFromCSVuid = {}
+ uidFromCSVglyph = {}
+
+ # Identify file format (plain text or csv) from first line
+ # If csv file, it must have headers for "glyph_name" and "USV"
+ fl = incsv.firstline
+ if fl is None: logger.log('Empty input file', 'S')
+ numfields = len(fl)
+ incsv.numfields = numfields
+ usvCol = None # Use this as a flag later to determine whether to check USV inventory
+ if numfields > 1: # More than 1 column, so must have headers
+ # Required columns:
+ try:
+ nameCol = fl.index('glyph_name');
+ except ValueError as e:
+ logger.log('Missing csv input field: ' + e.message, 'S')
+ except Exception as e:
+ logger.log('Error reading csv input field: ' + e.message, 'S')
+ # Optional columns:
+ usvCol = fl.index('USV') if 'USV' in fl else None
+
+ next(incsv.reader, None) # Skip first line with headers in
+
+ glyphList = set()
+ for line in incsv:
+ gname = line[nameCol]
+ if len(gname) == 0 or line[0].strip().startswith('#'):
+ continue # No need to include cases where name is blank or comment
+ glyphList.add(gname)
+
+ # Process USV
+ # could be empty string, a single USV or space-separated list of USVs
+ try:
+ uidList = [int(x, 16) for x in line[usvCol].split()]
+ except Exception as e:
+ csvWarning("invalid USV '%s' (%s); ignored: " % (line[usvCol], e.message))
+ uidList = []
+ if len(uidList) == 1:
+ # Handle simple encoded glyphs
+ uid = uidList[0]
+ if uid in glyphFromCSVuid:
+ csvWarning('USV %04X previously seen; ignored' % uid)
+ uidList = []
+ else:
+ # Remember this glyph
+ glyphFromCSVuid[uid] = gname
+ uidFromCSVglyph[gname] = uid
+ elif numfields == 1: # Simple text file.
+ glyphList = set(line[0] for line in incsv)
+ else:
+ logger.log('Invalid csv file', 'S')
+
+ # Get the list of glyphs in the UFO
+ ufoList = set(font.deflayer.keys())
+
+ notInUFO = glyphList - ufoList
+ notInGlyphData = ufoList - glyphList
+
+ if len(notInUFO):
+ logger.log('Glyphs present in glyph_data but missing from UFO:\n' + '\n'.join(indent + g for g in sorted(notInUFO)), 'W')
+
+ if len(notInGlyphData):
+ logger.log('Glyphs present in UFO but missing from glyph_data:\n' + '\n'.join(indent + g for g in sorted(notInGlyphData)), 'W')
+
+ if len(notInUFO) == 0 and len(notInGlyphData) == 0:
+ logger.log('No glyph inventory differences found', 'P')
+
+ if usvCol:
+ # We cam check USV inventory of glyphs in common
+ inBoth = glyphList & ufoList # Glyphs we want to examine
+ csvEncodings = set(f'{gname}|{uidFromCSVglyph[gname]:04X}' for gname in filter(lambda x: x in uidFromCSVglyph, inBoth))
+ ufoEncodings = set(f'{gname}|{int(x.hex, 16):04X}' for gname in inBoth for x in font.deflayer[gname]['unicode'])
+
+ notInUFO = csvEncodings - ufoEncodings
+ notInGlyphData = ufoEncodings - csvEncodings
+
+ if len(notInUFO):
+ logger.log('Encodings present in glyph_data but missing from UFO:\n' + '\n'.join(indent + g for g in sorted(notInUFO)), 'W')
+
+ if len(notInGlyphData):
+ logger.log('Encodings present in UFO but missing from glyph_data:\n' + '\n'.join(indent + g for g in sorted(notInGlyphData)), 'W')
+
+ if len(notInUFO) == 0 and len(notInGlyphData) == 0:
+ logger.log('No glyph encoding differences found', 'P')
+
+ else:
+ logger.log('Glyph encodings not compared', 'P')
+
+
+def cmd(): execute('UFO', doit, argspec)
+if __name__ == '__main__': cmd()
diff --git a/lib/silfont/scripts/psfcheckinterpolatable.py b/lib/silfont/scripts/psfcheckinterpolatable.py
new file mode 100644
index 0000000..abcf526
--- /dev/null
+++ b/lib/silfont/scripts/psfcheckinterpolatable.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+__doc__ = '''Check that the ufos in a designspace file are interpolatable'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2021 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+from fontParts.world import OpenFont
+import fontTools.designspaceLib as DSD
+
+argspec = [
+ ('designspace', {'help': 'Design space file'}, {'type': 'filename'}),
+ ('-l','--log', {'help': 'Log file'}, {'type': 'outfile', 'def': '_checkinterp.log'}),
+ ]
+
+def doit(args) :
+ logger = args.logger
+
+ ds = DSD.DesignSpaceDocument()
+ ds.read(args.designspace)
+ if len(ds.sources) == 1: logger.log("The design space file has only one source UFO", "S")
+
+ # Find all the UFOs from the DS Sources. Where there are more than 2, the primary one will be considered to be
+ # the one where info copy="1" is set (as per psfsyncmasters). If not set for any, use the first ufo.
+ pufo = None
+ otherfonts = {}
+ for source in ds.sources:
+ ufo = source.path
+ try:
+ font = OpenFont(ufo)
+ except Exception as e:
+ logger.log("Unable to open " + ufo, "S")
+ if source.copyInfo:
+ if pufo: logger.log('Multiple fonts with <info copy="1" />', "S")
+ pufo = ufo
+ pfont = font
+ else:
+ otherfonts[ufo] = font
+ if pufo is None: # If we can't identify the primary font by conyInfo, just use the first one
+ pufo = ds.sources[0].path
+ pfont = otherfonts[pufo]
+ del otherfonts[pufo]
+
+ pinventory = set(glyph.name for glyph in pfont)
+
+ for oufo in otherfonts:
+ logger.log(f'Comparing {pufo} with {oufo}', 'P')
+ ofont = otherfonts[oufo]
+ oinventory = set(glyph.name for glyph in ofont)
+
+ if pinventory != oinventory:
+ logger.log("The glyph inventories in the two UFOs differ", "E")
+ for glyphn in sorted(pinventory - oinventory):
+ logger.log(f'{glyphn} is only in {pufo}', "W")
+ for glyphn in sorted(oinventory - pinventory):
+ logger.log(f'{glyphn} is only in {oufo}', "W")
+ else:
+ logger.log("The UFOs have the same glyph inventories", "P")
+ # Are glyphs compatible for interpolation
+ incompatibles = {}
+ for glyphn in pinventory & oinventory:
+ compatible, report = pfont[glyphn].isCompatible(ofont[glyphn])
+ if not compatible: incompatibles[glyphn] = report
+ if incompatibles:
+ logger.log(f'{len(incompatibles)} glyphs are not interpolatable', 'E')
+ for glyphn in sorted(incompatibles):
+ logger.log(f'{glyphn} is not interpolatable', 'W')
+ logger.log(incompatibles[glyphn], "I")
+ if logger.scrlevel == "W": logger.log("To see detailed reports run with scrlevel and/or loglevel set to I")
+ else:
+ logger.log("All the glyphs are interpolatable", "P")
+
+def cmd() : execute(None,doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfcompdef2xml.py b/lib/silfont/scripts/psfcompdef2xml.py
new file mode 100755
index 0000000..62f9972
--- /dev/null
+++ b/lib/silfont/scripts/psfcompdef2xml.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+__doc__ = 'convert composite definition file to XML format'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Rowe'
+
+from silfont.core import execute
+from silfont.etutil import ETWriter
+from silfont.comp import CompGlyph
+from xml.etree import ElementTree as ET
+
+# specify three parameters: input file (single line format), output file (XML format), log file
+# and optional -p indentFirst " " -p indentIncr " " -p "PSName,UID,with,at,x,y" for XML formatting.
+argspec = [
+ ('input',{'help': 'Input file of CD in single line format'}, {'type': 'infile'}),
+ ('output',{'help': 'Output file of CD in XML format'}, {'type': 'outfile', 'def': '_out.xml'}),
+ ('log',{'help': 'Log file'},{'type': 'outfile', 'def': '_log.txt'}),
+ ('-p','--params',{'help': 'XML formatting parameters: indentFirst, indentIncr, attOrder','action': 'append'}, {'type': 'optiondict'})]
+
+def doit(args) :
+ ofile = args.output
+ lfile = args.log
+ filelinecount = 0
+ linecount = 0
+ elementcount = 0
+ cgobj = CompGlyph()
+ f = ET.Element('font')
+ for line in args.input.readlines():
+ filelinecount += 1
+ testline = line.strip()
+ if len(testline) > 0 and testline[0:1] != '#': # not whitespace or comment
+ linecount += 1
+ cgobj.CDline=line
+ cgobj.CDelement=None
+ try:
+ cgobj.parsefromCDline()
+ if cgobj.CDelement != None:
+ f.append(cgobj.CDelement)
+ elementcount += 1
+ except ValueError as e:
+ lfile.write("Line "+str(filelinecount)+": "+str(e)+'\n')
+ if linecount != elementcount:
+ lfile.write("Lines read from input file: " + str(filelinecount)+'\n')
+ lfile.write("Lines parsed (excluding blank and comment lines): " + str(linecount)+'\n')
+ lfile.write("Valid glyphs found: " + str(elementcount)+'\n')
+# instead of simple serialization with: ofile.write(ET.tostring(f))
+# create ETWriter object and specify indentation and attribute order to get normalized output
+ indentFirst = " "
+ indentIncr = " "
+ attOrder = "PSName,UID,with,at,x,y"
+ for k in args.params:
+ if k == 'indentIncr': indentIncr = args.params['indentIncr']
+ elif k == 'indentFirst': indentFirst = args.params['indentFirst']
+ elif k == 'attOrder': attOrder = args.params['attOrder']
+ x = attOrder.split(',')
+ attributeOrder = dict(zip(x,range(len(x))))
+ etwobj=ETWriter(f, indentFirst=indentFirst, indentIncr=indentIncr, attributeOrder=attributeOrder)
+ ofile.write(etwobj.serialize_xml())
+
+ return
+
+def cmd() : execute(None,doit,argspec)
+if __name__ == "__main__": cmd()
+
diff --git a/lib/silfont/scripts/psfcompressgr.py b/lib/silfont/scripts/psfcompressgr.py
new file mode 100755
index 0000000..5802cce
--- /dev/null
+++ b/lib/silfont/scripts/psfcompressgr.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+__doc__ = 'Compress Graphite tables in a font'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Martin Hosken'
+
+argspec = [
+ ('ifont',{'help': 'Input TTF'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output TTF','nargs': '?' }, {'type': 'outfont'}),
+ ('-l','--log',{'help': 'Optional log file'}, {'type': 'outfile', 'def': '_compressgr', 'optlog': True})
+]
+
+from silfont.core import execute
+from fontTools.ttLib.tables.DefaultTable import DefaultTable
+import lz4.block
+import sys, struct
+
+class lz4tuple(object) :
+ def __init__(self, start) :
+ self.start = start
+ self.literal = start
+ self.literal_len = 0
+ self.match_dist = 0
+ self.match_len = 0
+ self.end = 0
+
+ def __str__(self) :
+ return "lz4tuple(@{},{}+{},-{}+{})={}".format(self.start, self.literal, self.literal_len, self.match_dist, self.match_len, self.end)
+
+def read_literal(t, dat, start, datlen) :
+ if t == 15 and start < datlen :
+ v = ord(dat[start:start+1])
+ t += v
+ while v == 0xFF and start < datlen :
+ start += 1
+ v = ord(dat[start:start+1])
+ t += v
+ start += 1
+ return (t, start)
+
+def write_literal(num, shift) :
+ res = []
+ if num > 14 :
+ res.append(15 << shift)
+ num -= 15
+ while num > 255 :
+ res.append(255)
+ num -= 255
+ res.append(num)
+ else :
+ res.append(num << shift)
+ return bytearray(res)
+
+def parseTuple(dat, start, datlen) :
+ res = lz4tuple(start)
+ token = ord(dat[start:start+1])
+ (res.literal_len, start) = read_literal(token >> 4, dat, start+1, datlen)
+ res.literal = start
+ start += res.literal_len
+ res.end = start
+ if start > datlen - 2 :
+ return res
+ res.match_dist = ord(dat[start:start+1]) + (ord(dat[start+1:start+2]) << 8)
+ start += 2
+ (res.match_len, start) = read_literal(token & 0xF, dat, start, datlen)
+ res.end = start
+ return res
+
+def compressGr(dat, version) :
+ if ord(dat[1:2]) < version :
+ vstr = bytes([version]) if sys.version_info.major > 2 else chr(version)
+ dat = dat[0:1] + vstr + dat[2:]
+ datc = lz4.block.compress(dat[:-4], mode='high_compression', compression=16, store_size=False)
+ # now find the final tuple
+ end = len(datc)
+ start = 0
+ curr = lz4tuple(start)
+ while curr.end < end :
+ start = curr.end
+ curr = parseTuple(datc, start, end)
+ if curr.end > end :
+ print("Sync error: {!s}".format(curr))
+ newend = write_literal(curr.literal_len + 4, 4) + datc[curr.literal:curr.literal+curr.literal_len+1] + dat[-4:]
+ lz4hdr = struct.pack(">L", (1 << 27) + (len(dat) & 0x7FFFFFF))
+ return dat[0:4] + lz4hdr + datc[0:curr.start] + newend
+
+def doit(args) :
+ infont = args.ifont
+ for tag, version in (('Silf', 5), ('Glat', 3)) :
+ dat = infont.getTableData(tag)
+ newdat = bytes(compressGr(dat, version))
+ table = DefaultTable(tag)
+ table.decompile(newdat, infont)
+ infont[tag] = table
+ return infont
+
+def cmd() : execute('FT', doit, argspec)
+if __name__ == "__main__" : cmd()
+
diff --git a/lib/silfont/scripts/psfcopyglyphs.py b/lib/silfont/scripts/psfcopyglyphs.py
new file mode 100644
index 0000000..fe37bf7
--- /dev/null
+++ b/lib/silfont/scripts/psfcopyglyphs.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+__doc__ = """Copy glyphs from one UFO to another"""
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bob Hallissy'
+
+from xml.etree import ElementTree as ET
+from silfont.core import execute
+from silfont.ufo import makeFileName, Uglif
+import re
+
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-s','--source',{'help': 'Font to get glyphs from'}, {'type': 'infont'}),
+ ('-i','--input',{'help': 'Input csv file'}, {'type': 'incsv', 'def': 'glyphlist.csv'}),
+ ('-f','--force',{'help' : 'Overwrite existing glyphs in the font', 'action' : 'store_true'}, {}),
+ ('-l','--log',{'help': 'Set log file name'}, {'type': 'outfile', 'def': '_copy.log'}),
+ ('-n', '--name', {'help': 'Include glyph named name', 'action': 'append'}, {}),
+ ('--rename',{'help' : 'Rename glyphs to names in this column'}, {}),
+ ('--unicode', {'help': 'Re-encode glyphs to USVs in this column'}, {}),
+ ('--scale',{'type' : float, 'help' : 'Scale glyphs by this factor'}, {})
+]
+
+class Glyph:
+ """details about a glyph we have, or need to, copy; mostly just for syntactic sugar"""
+
+ # Glyphs that are used *only* as component glyphs may have to be renamed if there already exists a glyph
+ # by the same name in the target font. we compute a new name by appending .copy1, .copy2, etc until we get a
+ # unique name. We keep track of the mapping from source font glyphname to target font glyphname using a dictionary.
+ # For ease of use, glyphs named by the input file (which won't have their names changed, see --force) will also
+ # be added to this dictionary because they can also be used as components.
+ nameMap = dict()
+
+ def __init__(self, oldname, newname="", psname="", dusv=None):
+ self.oldname = oldname
+ self.newname = newname or oldname
+ self.psname = psname or None
+ self.dusv = dusv or None
+ # Keep track of old-to-new name mapping
+ Glyph.nameMap[oldname] = self.newname
+
+
+# Mapping from decimal USV to glyphname in target font
+dusv2gname = None
+
+# RE for parsing glyph names and peeling off the .copyX if present in order to search for a unique name to use:
+gcopyRE = re.compile(r'(^.+?)(?:\.copy(\d+))?$')
+
+
+def copyglyph(sfont, tfont, g, args):
+ """copy glyph from source font to target font"""
+ # Generally, 't' variables are target, 's' are source. E.g., tfont is target font.
+
+ global dusv2gname
+ if not dusv2gname:
+ # Create mappings to find exsting glyph name from decimal usv:
+ dusv2gname = {int(unicode.hex, 16): gname for gname in tfont.deflayer for unicode in tfont.deflayer[gname]['unicode']}
+ # NB: Assumes font is well-formed and has at most one glyph with any particular Unicode value.
+
+ # The layer where we want the copied glyph:
+ tlayer = tfont.deflayer
+
+ # if new name present in target layer, delete it.
+ if g.newname in tlayer:
+ # New name is already in font:
+ tfont.logger.log("Replacing glyph '{0}' with new glyph".format(g.newname), "V")
+ glyph = tlayer[g.newname]
+ # While here, remove from our mapping any Unicodes from the old glyph:
+ for unicode in glyph["unicode"]:
+ dusv = int(unicode.hex, 16)
+ if dusv in dusv2gname:
+ del dusv2gname[dusv]
+ # Ok, remove old glyph from the layer
+ tlayer.delGlyph(g.newname)
+ else:
+ # New name is not in the font:
+ tfont.logger.log("Adding glyph '{0}'".format(g.newname), "V")
+
+ # Create new glyph
+ glyph = Uglif(layer = tlayer)
+ # Set etree from source glyph
+ glyph.etree = ET.fromstring(sfont.deflayer[g.oldname].inxmlstr)
+ glyph.process_etree()
+ # Rename the glyph if needed
+ if glyph.name != g.newname:
+ # Use super to bypass normal glyph renaming logic since it isn't yet in the layer
+ super(Uglif, glyph).__setattr__("name", g.newname)
+ # add new glyph to layer:
+ tlayer.addGlyph(glyph)
+ tfont.logger.log("Added glyph '{0}'".format(g.newname), "V")
+
+ # todo: set psname if requested; adjusting any other glyphs in the font as needed.
+
+ # Adjust encoding of new glyph
+ if args.unicode:
+ # First remove any encodings the copied glyph had in the source font:
+ for i in range(len(glyph['unicode']) - 1, -1, -1):
+ glyph.remove('unicode', index=i)
+ if g.dusv:
+ # we want this glyph to be encoded.
+ # First remove this Unicode from any other glyph in the target font
+ if g.dusv in dusv2gname:
+ oglyph = tlayer[dusv2gname[g.dusv]]
+ for unicode in oglyph["unicode"]:
+ if int(unicode.hex,16) == g.dusv:
+ oglyph.remove("unicode", object=unicode)
+ tfont.logger.log("Removed USV {0:04X} from existing glyph '{1}'".format(g.dusv,dusv2gname[g.dusv]), "V")
+ break
+ # Now add and record it:
+ glyph.add("unicode", {"hex": '{:04X}'.format(g.dusv)})
+ dusv2gname[g.dusv] = g.newname
+ tfont.logger.log("Added USV {0:04X} to glyph '{1}'".format(g.dusv, g.newname), "V")
+
+ # Scale glyph if desired
+ if args.scale:
+ for e in glyph.etree.iter():
+ for attr in ('width', 'height', 'x', 'y', 'xOffset', 'yOffset'):
+ if attr in e.attrib: e.set(attr, str(int(float(e.get(attr))* args.scale)))
+
+ # Look through components, adjusting names and finding out if we need to copy some.
+ for component in glyph.etree.findall('./outline/component[@base]'):
+ oldname = component.get('base')
+ # Note: the following will cause recursion:
+ component.set('base', copyComponent(sfont, tfont, oldname ,args))
+
+
+
+def copyComponent(sfont, tfont, oldname, args):
+ """copy component glyph if not already copied; make sure name and psname are unique; return its new name"""
+ if oldname in Glyph.nameMap:
+ # already copied
+ return Glyph.nameMap[oldname]
+
+ # if oldname is already in the target font, make up a new name by adding ".copy1", incrementing as necessary
+ if oldname not in tfont.deflayer:
+ newname = oldname
+ tfont.logger.log("Copying component '{0}' with existing name".format(oldname), "V")
+ else:
+ x = gcopyRE.match(oldname)
+ base = x.group(1)
+ try: i = int(x.group(2))
+ except: i = 1
+ while "{0}.copy{1}".format(base,i) in tfont.deflayer:
+ i += 1
+ newname = "{0}.copy{1}".format(base,i)
+ tfont.logger.log("Copying component '{0}' with new name '{1}'".format(oldname, newname), "V")
+
+ # todo: something similar to above but for psname
+
+ # Now copy the glyph, giving it new name if needed.
+ copyglyph(sfont, tfont, Glyph(oldname, newname), args)
+
+ return newname
+
+def doit(args) :
+ sfont = args.source # source UFO
+ tfont = args.ifont # target UFO
+ incsv = args.input
+ logger = args.logger
+
+ # Get headings from csvfile:
+ fl = incsv.firstline
+ if fl is None: logger.log("Empty input file", "S")
+ numfields = len(fl)
+ incsv.numfields = numfields
+ # defaults for single column csv (no headers):
+ nameCol = 0
+ renameCol = None
+ psCol = None
+ usvCol = None
+ if numfields > 1 or args.rename or args.unicode:
+ # required columns:
+ try:
+ nameCol = fl.index('glyph_name');
+ if args.rename:
+ renameCol = fl.index(args.rename);
+ if args.unicode:
+ usvCol = fl.index(args.unicode);
+ except ValueError as e:
+ logger.log('Missing csv input field: ' + e.message, 'S')
+ except Exception as e:
+ logger.log('Error reading csv input field: ' + e.message, 'S')
+ # optional columns
+ psCol = fl.index('ps_name') if 'ps_name' in fl else None
+ if 'glyph_name' in fl:
+ next(incsv.reader, None) # Skip first line with headers in
+
+ # list of glyphs to copy
+ glist = list()
+
+ def checkname(oldname, newname = None):
+ if not newname: newname = oldname
+ if oldname in Glyph.nameMap:
+ logger.log("Line {0}: Glyph '{1}' specified more than once; only the first kept".format(incsv.line_num, oldname), 'W')
+ elif oldname not in sfont.deflayer:
+ logger.log("Line {0}: Glyph '{1}' is not in source font; skipping".format(incsv.line_num, oldname),"W")
+ elif newname in tfont.deflayer and not args.force:
+ logger.log("Line {0}: Glyph '{1}' already present; skipping".format(incsv.line_num, newname), "W")
+ else:
+ return True
+ return False
+
+ # glyphs specified in csv file
+ for r in incsv:
+ oldname = r[nameCol]
+ newname = r[renameCol] if args.rename else oldname
+ psname = r[psCol] if psCol is not None else None
+ if args.unicode and r[usvCol]:
+ # validate USV:
+ try:
+ dusv = int(r[usvCol],16)
+ except ValueError:
+ logger.log("Line {0}: Invalid USV '{1}'; ignored.".format(incsv.line_num, r[usvCol]), "W")
+ dusv = None
+ else:
+ dusv = None
+
+ if checkname(oldname, newname):
+ glist.append(Glyph(oldname, newname, psname, dusv))
+
+ # glyphs specified on the command line
+ if args.name:
+ for gname in args.name:
+ if checkname(gname):
+ glist.append(Glyph(gname))
+
+ # Ok, now process them:
+ if len(glist) == 0:
+ logger.log("No glyphs to copy", "S")
+
+ # copy glyphs by name
+ while len(glist) :
+ g = glist.pop(0)
+ tfont.logger.log("Copying source glyph '{0}' as '{1}'{2}".format(g.oldname, g.newname,
+ " (U+{0:04X})".format(g.dusv) if g.dusv else ""), "I")
+ copyglyph(sfont, tfont, g, args)
+
+ return tfont
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfcopymeta.py b/lib/silfont/scripts/psfcopymeta.py
new file mode 100755
index 0000000..1e031fd
--- /dev/null
+++ b/lib/silfont/scripts/psfcopymeta.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+__doc__ = '''Copy metadata between fonts in different (related) families
+Usually run against the master (regular) font in each family then data synced within family afterwards'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+import silfont.ufo as UFO
+from xml.etree import ElementTree as ET
+
+argspec = [
+ ('fromfont',{'help': 'From font file'}, {'type': 'infont'}),
+ ('tofont',{'help': 'To font file'}, {'type': 'infont'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_copymeta.log'}),
+ ('-r','--reportonly', {'help': 'Report issues but no updating', 'action': 'store_true', 'default': False},{})
+ ]
+
+def doit(args) :
+
+ fields = ["copyright", "openTypeNameDescription", "openTypeNameDesigner", "openTypeNameDesignerURL", "openTypeNameLicense", # General feilds
+ "openTypeNameLicenseURL", "openTypeNameManufacturer", "openTypeNameManufacturerURL", "openTypeOS2CodePageRanges",
+ "openTypeOS2UnicodeRanges", "openTypeOS2VendorID", "trademark",
+ "openTypeNameVersion", "versionMajor", "versionMinor", # Version fields
+ "ascender", "descender", "openTypeHheaAscender", "openTypeHheaDescender", "openTypeHheaLineGap", # Design fields
+ "openTypeOS2TypoAscender", "openTypeOS2TypoDescender", "openTypeOS2TypoLineGap", "openTypeOS2WinAscent", "openTypeOS2WinDescent"]
+ libfields = ["public.postscriptNames", "public.glyphOrder", "com.schriftgestaltung.glyphOrder"]
+
+ fromfont = args.fromfont
+ tofont = args.tofont
+ logger = args.logger
+ reportonly = args.reportonly
+
+ updatemessage = " to be updated: " if reportonly else " updated: "
+ precision = fromfont.paramset["precision"]
+ # Increase screen logging level to W unless specific level supplied on command-line
+ if not(args.quiet or "scrlevel" in args.paramsobj.sets["command line"]) : logger.scrlevel = "W"
+
+ # Process fontinfo.plist
+ ffi = fromfont.fontinfo
+ tfi = tofont.fontinfo
+ fupdated = False
+ for field in fields:
+ if field in ffi :
+ felem = ffi[field][1]
+ ftag = felem.tag
+ ftext = felem.text
+ if ftag == 'real' : ftext = processnum(ftext,precision)
+ message = field + updatemessage
+
+ if field in tfi : # Need to compare values to see if update is needed
+ telem = tfi[field][1]
+ ttag = telem.tag
+ ttext = telem.text
+ if ttag == 'real' : ttext = processnum(ttext,precision)
+
+ if ftag in ("real", "integer", "string") :
+ if ftext != ttext :
+ if field == "openTypeNameLicense" : # Too long to display all
+ addmess = " Old: '" + ttext[0:80] + "...' New: '" + ftext[0:80] + "...'"
+ else: addmess = " Old: '" + ttext + "' New: '" + str(ftext) + "'"
+ telem.text = ftext
+ logger.log(message + addmess, "W")
+ fupdated = True
+ elif ftag in ("true, false") :
+ if ftag != ttag :
+ fti.setelem(field, ET.fromstring("<" + ftag + "/>"))
+ logger.log(message + " Old: '" + ttag + "' New: '" + str(ftag) + "'", "W")
+ fupdated = True
+ elif ftag == "array" : # Assume simple array with just values to compare
+ farray = []
+ for subelem in felem : farray.append(subelem.text)
+ tarray = []
+ for subelem in telem : tarray.append(subelem.text)
+ if farray != tarray :
+ tfi.setelem(field, ET.fromstring(ET.tostring(felem)))
+ logger.log(message + "Some values different Old: " + str(tarray) + " New: " + str(farray), "W")
+ fupdated = True
+ else : logger.log("Non-standard fontinfo field type: "+ ftag + " in " + fontname, "S")
+ else :
+ tfi.addelem(field, ET.fromstring(ET.tostring(felem)))
+ logger.log(message + "is missing from destination font so will be copied from source font", "W")
+ fupdated = True
+ else: # Field not in from font
+ if field in tfi :
+ logger.log( field + " is missing from source font but present in destination font", "E")
+ else :
+ logger.log( field + " is in neither font", "W")
+
+ # Process lib.plist - currently just public.postscriptNames and glyph order fields which are all simple dicts or arrays
+ flib = fromfont.lib
+ tlib = tofont.lib
+ lupdated = False
+ for field in libfields:
+ action = None
+ if field in flib:
+ if field in tlib: # Need to compare values to see if update is needed
+ if flib.getval(field) != tlib.getval(field):
+ action = "Updatefield"
+ else:
+ action = "Copyfield"
+ else:
+ action = "Error" if field == ("public.GlyphOrder", "public.postscriptNames") else "Warn"
+ issue = field + " not in source font lib.plist"
+
+ # Process the actions, create log messages etc
+ if action is None or action == "Ignore":
+ pass
+ elif action == "Warn":
+ logger.log(field + " needs manual correction: " + issue, "W")
+ elif action == "Error":
+ logger.log(field + " needs manual correction: " + issue, "E")
+ elif action in ("Updatefield", "Copyfield"): # Updating actions
+ lupdated = True
+ message = field + updatemessage
+ if action == "Copyfield":
+ message = message + "is missing so will be copied from source font"
+ tlib.addelem(field, ET.fromstring(ET.tostring(flib[field][1])))
+ elif action == "Updatefield":
+ message = message + "Some values different"
+ tlib.setelem(field, ET.fromstring(ET.tostring(flib[field][1])))
+ logger.log(message, "W")
+ else:
+ logger.log("Uncoded action: " + action + " - oops", "X")
+
+ # Now update on disk
+ if not reportonly:
+ if fupdated:
+ logger.log("Writing updated fontinfo.plist", "P")
+ UFO.writeXMLobject(tfi, tofont.outparams, tofont.ufodir, "fontinfo.plist", True, fobject=True)
+ if lupdated:
+ logger.log("Writing updated lib.plist", "P")
+ UFO.writeXMLobject(tlib, tofont.outparams, tofont.ufodir, "lib.plist", True, fobject=True)
+
+ return
+
+
+def processnum(text, precision) : # Apply same processing to real numbers that normalization will
+ if precision is not None:
+ val = round(float(text), precision)
+ if val == int(val) : val = int(val) # Removed trailing decimal .0
+ text = str(val)
+ return text
+
+
+def cmd(): execute("UFO",doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfcreateinstances.py b/lib/silfont/scripts/psfcreateinstances.py
new file mode 100644
index 0000000..468435f
--- /dev/null
+++ b/lib/silfont/scripts/psfcreateinstances.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python
+__doc__ = 'Generate instance UFOs from a designspace document and master UFOs'
+
+# Python 2.7 script to build instance UFOs from a designspace document
+# If a file is given, all instances are built
+# A particular instance to build can be specified using the -i option
+# and the 'name' attribute value for an 'instance' element in the designspace file
+# Or it can be specified using the -a and -v options
+# to specify any attribute and value pair for an 'instance' in the designspace file
+# If more than one instances matches, all will be built
+# A prefix for the output path can be specified (for smith processing)
+# If the location of an instance UFO matches a master's location,
+# glyphs are copied instead of calculated
+# This allows instances to build with glyphs that are not interpolatable
+# An option exists to calculate glyphs instead of copying them
+# If a folder is given using an option, all instances in all designspace files are built
+# Specifying an instance to build or an output path prefix is not supported with a folder
+# Also, all glyphs will be calculated
+
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Alan Ward'
+
+import os, re
+from mutatorMath.ufo.document import DesignSpaceDocumentReader
+from mutatorMath.ufo.instance import InstanceWriter
+from fontMath.mathGlyph import MathGlyph
+from mutatorMath.ufo import build as build_designspace
+from silfont.core import execute
+
+argspec = [
+ ('designspace_path', {'help': 'Path to designspace document (or folder of them)'}, {}),
+ ('-i', '--instanceName', {'help': 'Font name for instance to build'}, {}),
+ ('-a', '--instanceAttr', {'help': 'Attribute used to specify instance to build'}, {}),
+ ('-v', '--instanceVal', {'help': 'Value of attribute specifying instance to build'}, {}),
+ ('-f', '--folder', {'help': 'Build all designspace files in a folder','action': 'store_true'}, {}),
+ ('-o', '--output', {'help': 'Prepend path to all output paths'}, {}),
+ ('--forceInterpolation', {'help': 'If an instance matches a master, calculate glyphs instead of copying them',
+ 'action': 'store_true'}, {}),
+ ('--roundInstances', {'help': 'Apply integer rounding to all geometry when interpolating',
+ 'action': 'store_true'}, {}),
+ ('-l','--log',{'help': 'Log file (default: *_createinstances.log)'}, {'type': 'outfile', 'def': '_createinstances.log'}),
+ ('-W','--weightfix',{'help': 'Enable RIBBI style weight fixing', 'action': 'store_true'}, {}),
+]
+
+# Class factory to wrap a subclass in a closure to store values not defined in the original class
+# that our method overrides will utilize
+# The class methods will fail unless the class is generated by the factory, which is enforced by scoping
+# Using class attribs or global variables would violate encapsulation even more
+# and would only allow for one instance of the class
+
+weightClasses = {
+ 'bold': 700
+}
+
+def InstanceWriterCF(output_path_prefix, calc_glyphs, fix_weight):
+
+ class LocalInstanceWriter(InstanceWriter):
+ fixWeight = fix_weight
+
+ def __init__(self, path, *args, **kw):
+ if output_path_prefix:
+ path = os.path.join(output_path_prefix, path)
+ return super(LocalInstanceWriter, self).__init__(path, *args, **kw)
+
+ # Override the method used to calculate glyph geometry
+ # If copy_glyphs is true and the glyph being processed is in the same location
+ # (has all the same axes values) as a master UFO,
+ # then extract the glyph geometry directly into the target glyph.
+ # FYI, in the superclass method, m = buildMutator(); m.makeInstance() returns a MathGlyph
+ def _calculateGlyph(self, targetGlyphObject, instanceLocationObject, glyphMasters):
+ # Search for a glyphMaster with the same location as instanceLocationObject
+ found = False
+ if not calc_glyphs: # i.e. if copying glyphs
+ for item in glyphMasters:
+ locationObject = item['location'] # mutatorMath Location
+ if locationObject.sameAs(instanceLocationObject) == 0:
+ found = True
+ fontObject = item['font'] # defcon Font
+ glyphName = item['glyphName'] # string
+ glyphObject = MathGlyph(fontObject[glyphName])
+ glyphObject.extractGlyph(targetGlyphObject, onlyGeometry=True)
+ break
+
+ if not found: # includes case of calc_glyphs == True
+ super(LocalInstanceWriter, self)._calculateGlyph(targetGlyphObject,
+ instanceLocationObject,
+ glyphMasters)
+
+ def _copyFontInfo(self, targetInfo, sourceInfo):
+ super(LocalInstanceWriter, self)._copyFontInfo(targetInfo, sourceInfo)
+
+ if getattr(self, 'fixWeight', False):
+ # fixWeight is True since the --weightfix (or -W) option was specified
+
+ # This mode is used for RIBBI font builds,
+ # therefore the weight class can be determined
+ # by the style name
+ if self.font.info.styleMapStyleName.lower().startswith("bold"):
+ weight_class = 700
+ else:
+ weight_class = 400
+ else:
+ # fixWeight is False (or None)
+
+ # This mode is used for non-RIBBI font builds,
+ # therefore the weight class can be determined
+ # by the weight axis map in the Designspace file
+ foundmap = False
+ weight = int(self.locationObject["weight"])
+ for map_space in self.axes["weight"]["map"]:
+ userspace = int(map_space[0]) # called input in the Designspace file
+ designspace = int(map_space[1]) # called output in the Designspace file
+ if designspace == weight:
+ weight_class = userspace
+ foundmap = True
+ if not foundmap:
+ weight_class = 399 # Dummy value designed to look non-standard
+ logger.log(f'No entry in designspace axis mapping for {weight}; set to 399', 'W')
+ setattr(targetInfo, 'openTypeOS2WeightClass', weight_class)
+
+ localinfo = {}
+ for k in (('openTypeNameManufacturer', None),
+ ('styleMapFamilyName', 'familyName'),
+ ('styleMapStyleName', 'styleName')):
+ localinfo[k[0]] = getattr(targetInfo, k[0], (getattr(targetInfo, k[1]) if k[1] is not None else ""))
+ localinfo['styleMapStyleName'] = localinfo['styleMapStyleName'].title()
+ localinfo['year'] = re.sub(r'^.*?([0-9]+)\s*$', r'\1', getattr(targetInfo, 'openTypeNameUniqueID'))
+ uniqueID = "{openTypeNameManufacturer}: {styleMapFamilyName} {styleMapStyleName} {year}".format(**localinfo)
+ setattr(targetInfo, 'openTypeNameUniqueID', uniqueID)
+
+ return LocalInstanceWriter
+
+logger = None
+severe_error = False
+def progress_func(state="update", action=None, text=None, tick=0):
+ global severe_error
+ if logger:
+ if state == 'error':
+ if str(action) == 'unicodes':
+ logger.log("%s: %s\n%s" % (state, str(action), str(text)), 'W')
+ else:
+ logger.log("%s: %s\n%s" % (state, str(action), str(text)), 'E')
+ severe_error = True
+ else:
+ logger.log("%s: %s\n%s" % (state, str(action), str(text)), 'I')
+
+def doit(args):
+ global logger
+ logger = args.logger
+
+ designspace_path = args.designspace_path
+ instance_font_name = args.instanceName
+ instance_attr = args.instanceAttr
+ instance_val = args.instanceVal
+ output_path_prefix = args.output
+ calc_glyphs = args.forceInterpolation
+ build_folder = args.folder
+ round_instances = args.roundInstances
+
+ if instance_font_name and (instance_attr or instance_val):
+ args.logger.log('--instanceName is mutually exclusive with --instanceAttr or --instanceVal','S')
+ if (instance_attr and not instance_val) or (instance_val and not instance_attr):
+ args.logger.log('--instanceAttr and --instanceVal must be used together', 'S')
+ if (build_folder and (instance_font_name or instance_attr or instance_val
+ or output_path_prefix or calc_glyphs)):
+ args.logger.log('--folder cannot be used with options: -i, -a, -v, -o, --forceInterpolation', 'S')
+
+ args.logger.log('Interpolating master UFOs from designspace', 'P')
+ if not build_folder:
+ if not os.path.isfile(designspace_path):
+ args.logger.log('A designspace file (not a folder) is required', 'S')
+ reader = DesignSpaceDocumentReader(designspace_path, ufoVersion=3,
+ roundGeometry=round_instances,
+ progressFunc=progress_func)
+ # assignment to an internal object variable is a kludge, probably should use subclassing instead
+ reader._instanceWriterClass = InstanceWriterCF(output_path_prefix, calc_glyphs, args.weightfix)
+ if calc_glyphs:
+ args.logger.log('Interpolating glyphs where an instance font location matches a master', 'P')
+ if instance_font_name or instance_attr:
+ key_attr = instance_attr if instance_val else 'name'
+ key_val = instance_val if instance_attr else instance_font_name
+ reader.readInstance((key_attr, key_val))
+ else:
+ reader.readInstances()
+ else:
+ # The below uses a utility function that's part of mutatorMath
+ # It will accept a folder and processes all designspace files there
+ args.logger.log('Interpolating glyphs where an instance font location matches a master', 'P')
+ build_designspace(designspace_path,
+ outputUFOFormatVersion=3, roundGeometry=round_instances,
+ progressFunc=progress_func)
+
+ if not severe_error:
+ args.logger.log('Done', 'P')
+ else:
+ args.logger.log('Done with severe error', 'S')
+
+def cmd(): execute(None, doit, argspec)
+if __name__ == '__main__': cmd()
+
+# Future development might use: fonttools\Lib\fontTools\designspaceLib to read
+# the designspace file (which is the most up-to-date approach)
+# then pass that object to mutatorMath, but there's no way to do that today.
+
+
+# For reference:
+# from mutatorMath/ufo/__init__.py:
+# build() is a convenience function for reading and executing a designspace file.
+# documentPath: filepath to the .designspace document
+# outputUFOFormatVersion: ufo format for output
+# verbose: True / False for lots or no feedback [to log file]
+# logPath: filepath to a log file
+# progressFunc: an optional callback to report progress.
+# see mutatorMath.ufo.tokenProgressFunc
+#
+# class DesignSpaceDocumentReader(object):
+# def __init__(self, documentPath,
+# ufoVersion,
+# roundGeometry=False,
+# verbose=False,
+# logPath=None,
+# progressFunc=None
+# ):
+#
+# def readInstance(self, key, makeGlyphs=True, makeKerning=True, makeInfo=True):
+# def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True):
diff --git a/lib/silfont/scripts/psfcsv2comp.py b/lib/silfont/scripts/psfcsv2comp.py
new file mode 100644
index 0000000..7a4a960
--- /dev/null
+++ b/lib/silfont/scripts/psfcsv2comp.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+__doc__ = '''generate composite definitions from csv file'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bob Hallissy'
+
+import re
+from silfont.core import execute
+import re
+
+argspec = [
+ ('output',{'help': 'Output file containing composite definitions'}, {'type': 'outfile'}),
+ ('-i','--input',{'help': 'Glyph info csv file'}, {'type': 'incsv', 'def': 'glyph_data.csv'}),
+ ('-f','--fontcode',{'help': 'letter to filter for glyph_data'},{}),
+ ('--gname', {'help': 'Column header for glyph name', 'default': 'glyph_name'}, {}),
+ ('--base', {'help': 'Column header for name of base', 'default': 'base'}, {}),
+ ('--usv', {'help': 'Column header for USV'}, {}),
+ ('--anchors', {'help': 'Column header(s) for APs to compose', 'default': 'above,below'}, {}),
+ ('-r','--report',{'help': 'Set reporting level for log', 'type':str, 'choices':['X','S','E','P','W','I','V']},{}),
+ ('-l','--log',{'help': 'Set log file name'}, {'type': 'outfile', 'def': 'csv2comp.log'}),
+ ]
+
+def doit(args):
+ logger = args.logger
+ if args.report: logger.loglevel = args.report
+ # infont = args.ifont
+ incsv = args.input
+ output = args.output
+
+ def csvWarning(msg, exception = None):
+ m = "glyph_data warning: %s at line %d" % (msg, incsv.line_num)
+ if exception is not None:
+ m += '; ' + exception.message
+ logger.log(m, 'W')
+
+ if args.fontcode is not None:
+ whichfont = args.fontcode.strip().lower()
+ if len(whichfont) != 1:
+ logger.log('-f parameter must be a single letter', 'S')
+ else:
+ whichfont = None
+
+ # Which headers represent APs to use:
+ apList = args.anchors.split(',')
+ if len(apList) == 0:
+ logger.log('--anchors option value "%s" is invalid' % args.anchors, 'S')
+
+ # Get headings from csvfile:
+ fl = incsv.firstline
+ if fl is None: logger.log("Empty input file", "S")
+ # required columns:
+ try:
+ nameCol = fl.index(args.gname)
+ baseCol = fl.index(args.base)
+ apCols = [fl.index(ap) for ap in apList]
+ if args.usv is not None:
+ usvCol = fl.index(args.usv)
+ else:
+ usvCol = None
+ except ValueError as e:
+ logger.log('Missing csv input field: ' + e.message, 'S')
+ except Exception as e:
+ logger.log('Error reading csv input field: ' + e.message, 'S')
+
+ # Now make strip AP names; pair up with columns so easy to iterate:
+ apInfo = list(zip(apCols, [x.strip() for x in apList]))
+
+ # If -f specified, make sure we have the fonts column
+ if whichfont is not None:
+ if 'fonts' not in fl: logger.log('-f requires "fonts" column in glyph_data', 'S')
+ fontsCol = fl.index('fonts')
+
+ # RE that matches names of glyphs we don't care about
+ namesToSkipRE = re.compile('^(?:[._].*|null|cr|nonmarkingreturn|tab|glyph_name)$',re.IGNORECASE)
+
+ # keep track of glyph names we've seen to detect duplicates
+ namesSeen = set()
+
+ # OK, process all records in glyph_data
+ for line in incsv:
+ base = line[baseCol].strip()
+ if len(base) == 0:
+ # No composites specified
+ continue
+
+ gname = line[nameCol].strip()
+ # things to ignore:
+ if namesToSkipRE.match(gname): continue
+ if whichfont is not None and line[fontsCol] != '*' and line[fontsCol].lower().find(whichfont) < 0:
+ continue
+
+ if len(gname) == 0:
+ csvWarning('empty glyph name in glyph_data; ignored')
+ continue
+ if gname.startswith('#'): continue
+ if gname in namesSeen:
+ csvWarning('glyph name %s previously seen in glyph_data; ignored' % gname)
+ continue
+ namesSeen.add(gname)
+
+ # Ok, start building the composite
+ composite = '%s = %s' %(gname, base)
+
+ # The first component must *not* reference the base; all others *must*:
+ seenfirst = False
+ for apCol, apName in apInfo:
+ component = line[apCol].strip()
+ if len(component):
+ if not seenfirst:
+ composite += ' + %s@%s' % (component, apName)
+ seenfirst = True
+ else:
+ composite += ' + %s@%s:%s' % (component, base, apName)
+
+ # Add USV if present
+ if usvCol is not None:
+ usv = line[usvCol].strip()
+ if len(usv):
+ composite += ' | %s' % usv
+
+ # Output this one
+ output.write(composite + '\n')
+
+ output.close()
+
+def cmd() : execute("",doit,argspec)
+if __name__ == "__main__": cmd()
+
diff --git a/lib/silfont/scripts/psfdeflang.py b/lib/silfont/scripts/psfdeflang.py
new file mode 100755
index 0000000..3a1ac26
--- /dev/null
+++ b/lib/silfont/scripts/psfdeflang.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+__doc__ = '''Switch default language in a font'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2019 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Martin Hosken'
+
+from silfont.core import execute
+
+argspec = [
+ ('ifont',{'help': 'Input TTF'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output TTF','nargs': '?' }, {'type': 'outfont'}),
+ ('-L','--lang', {'help': 'Language to switch to'}, {}),
+ ('-l','--log',{'help': 'Optional log file'}, {'type': 'outfile', 'def': '_deflang.log', 'optlog': True}),
+]
+
+def long2tag(x):
+ res = []
+ while x:
+ res.append(chr(x & 0xFF))
+ x >>= 8
+ return "".join(reversed(res))
+
+def doit(args):
+ infont = args.ifont
+ ltag = args.lang.lower()
+ if 'Sill' in infont and 'Feat' in infont:
+ if ltag in infont['Sill'].langs:
+ changes = dict((long2tag(x[0]), x[1]) for x in infont['Sill'].langs[ltag])
+ for g, f in infont['Feat'].features.items():
+ if g in changes:
+ f.default = changes[g]
+ otltag = ltag + (" " * (4 - len(ltag)))
+ for k in ('GSUB', 'GPOS'):
+ try:
+ t = infont[k].table
+ except KeyError:
+ continue
+ for srec in t.ScriptList.ScriptRecord:
+ for lrec in srec.Script.LangSysRecord:
+ if lrec.LangSysTag.lower() == otltag:
+ srec.Script.DefaultLangSys = lrec.LangSys
+ return infont
+
+def cmd() : execute('FT', doit, argspec)
+if __name__ == "__main__" : cmd()
+
diff --git a/lib/silfont/scripts/psfdeleteglyphs.py b/lib/silfont/scripts/psfdeleteglyphs.py
new file mode 100644
index 0000000..94caa36
--- /dev/null
+++ b/lib/silfont/scripts/psfdeleteglyphs.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+__doc__ = '''Deletes glyphs from a UFO based on list. Can instead delete glyphs not in list.'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Victor Gaultney'
+
+from silfont.core import execute
+from xml.etree import ElementTree as ET
+
+argspec = [
+ ('ifont', {'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont', {'help': 'Output font file', 'nargs': '?'}, {'type': 'outfont'}),
+ ('-i', '--input', {'help': 'Input text file, one glyphname per line'}, {'type': 'infile', 'def': 'glyphlist.txt'}),
+ ('--reverse',{'help': 'Remove glyphs not in list instead', 'action': 'store_true', 'default': False},{}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': 'deletedglyphs.log'})]
+
+def doit(args) :
+ font = args.ifont
+ listinput = args.input
+ logger = args.logger
+
+ glyphlist = []
+ for line in listinput.readlines():
+ glyphlist.append(line.strip())
+
+ deletelist = []
+
+ if args.reverse:
+ for glyphname in font.deflayer:
+ if glyphname not in glyphlist:
+ deletelist.append(glyphname)
+ else:
+ for glyphname in font.deflayer:
+ if glyphname in glyphlist:
+ deletelist.append(glyphname)
+
+ secondarylayers = [x for x in font.layers if x.layername != "public.default"]
+
+ liststocheck = ('public.glyphOrder', 'public.postscriptNames', 'com.schriftgestaltung.glyphOrder')
+ liblists = [[],[],[]]; inliblists = [[],[],[]]
+ if hasattr(font, 'lib'):
+ for (i,listn) in enumerate(liststocheck):
+ if listn in font.lib:
+ liblists[i] = font.lib.getval(listn)
+ else:
+ logger.log("No lib.plist found in font", "W")
+
+ # Now loop round deleting the glyphs etc
+ logger.log("Deleted glyphs:", "I")
+
+ # With groups and kerning, create dicts representing then plists (to make deletion of members easier) and indexes by glyph/member name
+ kgroupprefixes = {"public.kern1.": 1, "public.kern2.": 2}
+ gdict = {}
+ kdict = {}
+ groupsbyglyph = {}
+ ksetsbymember = {}
+
+ groups = font.groups if hasattr(font, "groups") else []
+ kerning = font.kerning if hasattr(font, "kerning") else []
+ if groups:
+ for gname in groups:
+ group = groups.getval(gname)
+ gdict[gname] = group
+ for glyph in group:
+ if glyph in groupsbyglyph:
+ groupsbyglyph[glyph].append(gname)
+ else:
+ groupsbyglyph[glyph] = [gname]
+ if kerning:
+ for setname in kerning:
+ kset = kerning.getval(setname)
+ kdict[setname] = kset
+ for member in kset:
+ if member in ksetsbymember:
+ ksetsbymember[member].append(setname)
+ else:
+ ksetsbymember[member] = [setname]
+
+ # Loop round doing the deleting
+ for glyphn in sorted(deletelist):
+ # Delete from all layers
+ font.deflayer.delGlyph(glyphn)
+ deletedfrom = "Default layer"
+ for layer in secondarylayers:
+ if glyphn in layer:
+ deletedfrom += ", " + layer.layername
+ layer.delGlyph(glyphn)
+ # Check to see if the deleted glyph is in any of liststocheck
+ stillin = None
+ for (i, liblist) in enumerate(liblists):
+ if glyphn in liblist:
+ inliblists[i].append(glyphn)
+ stillin = stillin + ", " + liststocheck[i] if stillin else liststocheck[i]
+
+ logger.log(" " + glyphn + " deleted from: " + deletedfrom, "I")
+ if stillin: logger.log(" " + glyphn + " is still in " + stillin, "I")
+
+ # Process groups.plist and kerning.plist
+
+ tocheck = (glyphn, "public.kern1." + glyphn, "public.kern2." + glyphn)
+ # First delete whole groups and kern pair sets
+ for kerngroup in tocheck[1:]: # Don't check glyphn when deleting groups:
+ if kerngroup in gdict: gdict.pop(kerngroup)
+ for setn in tocheck:
+ if setn in kdict: kdict.pop(setn)
+ # Now delete members within groups and kern pair sets
+ if glyphn in groupsbyglyph:
+ for groupn in groupsbyglyph[glyphn]:
+ if groupn in gdict: # Need to check still there, since whole group may have been deleted above
+ group = gdict[groupn]
+ del group[group.index(glyphn)]
+ for member in tocheck:
+ if member in ksetsbymember:
+ for setn in ksetsbymember[member]:
+ if setn in kdict: del kdict[setn][member]
+ # Now need to recreate groups.plist and kerning.plist
+ if groups:
+ for group in list(groups): groups.remove(group) # Empty existing contents
+ for gname in gdict:
+ elem = ET.Element("array")
+ if gdict[gname]: # Only create if group is not empty
+ for glyph in gdict[gname]:
+ ET.SubElement(elem, "string").text = glyph
+ groups.setelem(gname, elem)
+ if kerning:
+ for kset in list(kerning): kerning.remove(kset) # Empty existing contents
+ for kset in kdict:
+ elem = ET.Element("dict")
+ if kdict[kset]:
+ for member in kdict[kset]:
+ ET.SubElement(elem, "key").text = member
+ ET.SubElement(elem, "integer").text = str(kdict[kset][member])
+ kerning.setelem(kset, elem)
+
+ logger.log(str(len(deletelist)) + " glyphs deleted. Set logging to I to see details", "P")
+ inalist = set(inliblists[0] + inliblists[1] + inliblists[2])
+ if inalist: logger.log(str(len(inalist)) + " of the deleted glyphs are still in some lib.plist entries.", "W")
+
+ return font
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
+
diff --git a/lib/silfont/scripts/psfdupglyphs.py b/lib/silfont/scripts/psfdupglyphs.py
new file mode 100644
index 0000000..1b68bee
--- /dev/null
+++ b/lib/silfont/scripts/psfdupglyphs.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+'''Duplicates glyphs in a UFO based on a csv definition: source,target.
+Duplicates everything except unicodes.'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Victor Gaultney'
+
+from silfont.core import execute
+
+argspec = [
+ ('ifont', {'help': 'Input font filename'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-i','--input',{'help': 'Input csv file'}, {'type': 'incsv', 'def': 'duplicates.csv'}),
+ ('-l','--log',{'help': 'Set log file name'}, {'type': 'outfile', 'def': '_duplicates.log'})]
+
+def doit(args) :
+ font = args.ifont
+ logger = args.logger
+
+ # Process duplicates csv file into a dictionary structure
+ args.input.numfields = 2
+ duplicates = {}
+ for line in args.input :
+ duplicates[line[0]] = line[1]
+
+ # Iterate through dictionary (unsorted)
+ for source, target in duplicates.items() :
+ # Check if source glyph is in font
+ if source in font.keys() :
+ # Give warning if target is already in font, but overwrite anyway
+ if target in font.keys() :
+ logger.log("Warning: " + target + " already in font and will be replaced")
+ sourceglyph = font[source]
+ # Make a copy of source into a new glyph object
+ newglyph = sourceglyph.copy()
+ # Modify that glyph object
+ newglyph.unicodes = []
+ # Add the new glyph object to the font with name target
+ font.__setitem__(target,newglyph)
+ logger.log(source + " duplicated to " + target)
+ else :
+ logger.log("Warning: " + source + " not in font")
+
+ return font
+
+def cmd() : execute("FP",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfexportanchors.py b/lib/silfont/scripts/psfexportanchors.py
new file mode 100755
index 0000000..fd27a47
--- /dev/null
+++ b/lib/silfont/scripts/psfexportanchors.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+__doc__ = 'export anchor data from UFO to XML file'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015,2016 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Rowe'
+
+from silfont.core import execute
+from silfont.etutil import ETWriter
+from xml.etree import ElementTree as ET
+
+argspec = [
+ ('ifont',{'help': 'Input UFO'}, {'type': 'infont'}),
+ ('output',{'help': 'Output file exported anchor data in XML format', 'nargs': '?'}, {'type': 'outfile', 'def': '_anc.xml'}),
+ ('-r','--report',{'help': 'Set reporting level for log', 'type':str, 'choices':['X','S','E','P','W','I','V']},{}),
+ ('-l','--log',{'help': 'Set log file name'}, {'type': 'outfile', 'def': '_anc.log'}),
+ ('-g','--gid',{'help': 'Include GID attribute in <glyph> elements', 'action': 'store_true'},{}),
+ ('-s','--sort',{'help': 'Sort by public.glyphOrder in lib.plist', 'action': 'store_true'},{}),
+ ('-u','--Uprefix',{'help': 'Include U+ prefix on UID attribute in <glyph> elements', 'action': 'store_true'},{}),
+ ('-p','--params',{'help': 'XML formatting parameters: indentFirst, indentIncr, attOrder','action': 'append'}, {'type': 'optiondict'})
+ ]
+
+def doit(args) :
+ logfile = args.logger
+ if args.report: logfile.loglevel = args.report
+ infont = args.ifont
+ prefix = "U+" if args.Uprefix else ""
+
+ if hasattr(infont, 'lib') and 'public.glyphOrder' in infont.lib:
+ glyphorderlist = [s.text for s in infont.lib['public.glyphOrder'][1].findall('string')]
+ else:
+ glyphorderlist = []
+ if args.gid:
+ logfile.log("public.glyphOrder is absent; ignoring --gid option", "E")
+ args.gid = False
+ glyphorderset = set(glyphorderlist)
+ if len(glyphorderlist) != len(glyphorderset):
+ logfile.log("At least one duplicate name in public.glyphOrder", "W")
+ # count of duplicate names is len(glyphorderlist) - len(glyphorderset)
+ actualglyphlist = [g for g in infont.deflayer.keys()]
+ actualglyphset = set(actualglyphlist)
+ listorder = []
+ gid = 0
+ for g in glyphorderlist:
+ if g in actualglyphset:
+ listorder.append( (g, gid) )
+ gid += 1
+ actualglyphset.remove(g)
+ glyphorderset.remove(g)
+ else:
+ logfile.log(g + " in public.glyphOrder list but absent from UFO", "W")
+ if args.sort: listorder.sort()
+ for g in sorted(actualglyphset): # if any glyphs remaining
+ listorder.append( (g, None) )
+ logfile.log(g + " in UFO but not in public.glyphOrder list", "W")
+
+ if 'postscriptFontName' in infont.fontinfo:
+ postscriptFontName = infont.fontinfo['postscriptFontName'][1].text
+ else:
+ if 'styleMapFamilyName' in infont.fontinfo:
+ family = infont.fontinfo['styleMapFamilyName'][1].text
+ elif 'familyName' in infont.fontinfo:
+ family = infont.fontinfo['familyName'][1].text
+ else:
+ family = "UnknownFamily"
+ if 'styleMapStyleName' in infont.fontinfo:
+ style = infont.fontinfo['styleMapStyleName'][1].text.capitalize()
+ elif 'styleName' in infont.fontinfo:
+ style = infont.fontinfo['styleName'][1].text
+ else:
+ style = "UnknownStyle"
+
+ postscriptFontName = '-'.join((family,style)).replace(' ','')
+ fontElement= ET.Element('font', upem=infont.fontinfo['unitsPerEm'][1].text, name=postscriptFontName)
+ for g, i in listorder:
+ attrib = {'PSName': g}
+ if args.gid and i != None: attrib['GID'] = str(i)
+ u = infont.deflayer[g]['unicode']
+ if len(u)>0: attrib['UID'] = prefix + u[0].element.get('hex')
+ glyphElement = ET.SubElement(fontElement, 'glyph', attrib)
+ anchorlist = []
+ for a in infont.deflayer[g]['anchor']:
+ anchorlist.append( (a.element.get('name'), int(float(a.element.get('x'))), int(float(a.element.get('y'))) ) )
+ anchorlist.sort()
+ for a, x, y in anchorlist:
+ anchorElement = ET.SubElement(glyphElement, 'point', attrib = {'type': a})
+ locationElement = ET.SubElement(anchorElement, 'location', attrib = {'x': str(x), 'y': str(y)})
+
+# instead of simple serialization with: ofile.write(ET.tostring(fontElement))
+# create ETWriter object and specify indentation and attribute order to get normalized output
+ ofile = args.output
+ indentFirst = args.params.get('indentFirst', "")
+ indentIncr = args.params.get('indentIncr', " ")
+ attOrder = args.params.get('attOrder', "name,upem,PSName,GID,UID,type,x,y")
+ x = attOrder.split(',')
+ attributeOrder = dict(zip(x,range(len(x))))
+ etwobj=ETWriter(fontElement, indentFirst=indentFirst, indentIncr=indentIncr, attributeOrder=attributeOrder)
+ ofile.write(etwobj.serialize_xml())
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfexportmarkcolors.py b/lib/silfont/scripts/psfexportmarkcolors.py
new file mode 100755
index 0000000..98beae1
--- /dev/null
+++ b/lib/silfont/scripts/psfexportmarkcolors.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+__doc__ = '''Write mapping of glyph name to cell mark color to a csv file
+- csv format glyphname,colordef'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2019 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Victor Gaultney'
+
+from silfont.core import execute
+from silfont.util import parsecolors, colortoname
+import datetime
+
+suffix = "_colormap"
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('-o','--output',{'help': 'Output csv file'}, {'type': 'outfile', 'def': suffix+'.csv'}),
+ ('-c','--color',{'help': 'Export list of glyphs that match color'},{}),
+ ('-n','--names',{'help': 'Export colors as names', 'action': 'store_true', 'default': False},{}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': suffix+'.log'}),
+ ('--nocomments',{'help': 'No comments in output files', 'action': 'store_true', 'default': False},{})]
+
+def doit(args) :
+ font = args.ifont
+ outfile = args.output
+ logger = args.logger
+ color = args.color
+
+ # Add initial comments to outfile
+ if not args.nocomments :
+ outfile.write("# " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S ") + args.cmdlineargs[0] + "\n")
+ outfile.write("# "+" ".join(args.cmdlineargs[1:])+"\n\n")
+
+ if color :
+ (colorfilter, colorname, logcolor, splitcolor) = parsecolors(color, single=True)
+ if colorfilter is None : logger.log(logcolor, "S") # If color not parsed, parsecolors() puts error in logcolor
+
+ glyphlist = font.deflayer.keys()
+
+ for glyphn in sorted(glyphlist) :
+ glyph = font.deflayer[glyphn]
+ colordefraw = ""
+ colordef = ""
+ if glyph["lib"] :
+ lib = glyph["lib"]
+ if "public.markColor" in lib :
+ colordefraw = lib["public.markColor"][1].text
+ colordef = '"' + colordefraw + '"'
+ if args.names : colordef = colortoname(colordefraw, colordef)
+ if color :
+ if colorfilter == colordefraw : outfile.write(glyphn + "\n")
+ if not color : outfile.write(glyphn + "," + colordef + "\n")
+ return
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfexportpsnames.py b/lib/silfont/scripts/psfexportpsnames.py
new file mode 100755
index 0000000..5995e14
--- /dev/null
+++ b/lib/silfont/scripts/psfexportpsnames.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+__doc__ = '''Write mapping of glyph name to postscript name to a csv file
+- csv format glyphname,postscriptname'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2016 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+import datetime
+
+suffix = "_psnamesmap"
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('-o','--output',{'help': 'Ouput csv file'}, {'type': 'outfile', 'def': suffix+'.csv'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': suffix+'.log'}),
+ ('--nocomments',{'help': 'No comments in output files', 'action': 'store_true', 'default': False},{})]
+
+def doit(args) :
+ font = args.ifont
+ outfile = args.output
+
+ # Add initial comments to outfile
+ if not args.nocomments :
+ outfile.write("# " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S ") + args.cmdlineargs[0] + "\n")
+ outfile.write("# "+" ".join(args.cmdlineargs[1:])+"\n\n")
+
+ glyphlist = font.deflayer.keys()
+ missingnames = False
+
+ for glyphn in glyphlist :
+ glyph = font.deflayer[glyphn]
+ # Find PSname if present
+ PSname = None
+ if "lib" in glyph :
+ lib = glyph["lib"]
+ if "public.postscriptname" in lib : PSname = lib["public.postscriptname"][1].text
+ if PSname:
+ outfile.write(glyphn + "," + PSname + "\n")
+ else :
+ font.logger("No psname for " + glyphn, "W")
+ missingnames = True
+ if missingnames : font.logger("Some glyphs had no psnames - see log file","E")
+ return
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfexportunicodes.py b/lib/silfont/scripts/psfexportunicodes.py
new file mode 100755
index 0000000..c9e1be3
--- /dev/null
+++ b/lib/silfont/scripts/psfexportunicodes.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+__doc__ = '''Export the name and unicode of glyphs that have a defined unicode to a csv file. Does not support double-encoded glyphs.
+- csv format glyphname,unicode'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2016-2020 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Victor Gaultney, based on UFOexportPSname.py'
+
+from silfont.core import execute
+import datetime
+
+suffix = "_unicodes"
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('-o','--output',{'help': 'Output csv file'}, {'type': 'outfile', 'def': suffix+'.csv'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': suffix+'.log'}),
+ ('--nocomments',{'help': 'No comments in output files', 'action': 'store_true', 'default': False},{}),
+ ('--allglyphs',{'help': 'Export names of all glyphs even without', 'action': 'store_true', 'default': False},{})]
+
+def doit(args) :
+ font = args.ifont
+ outfile = args.output
+
+ # Add initial comments to outfile
+ if not args.nocomments :
+ outfile.write("# " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S ") + args.cmdlineargs[0] + "\n")
+ outfile.write("# "+" ".join(args.cmdlineargs[1:])+"\n\n")
+
+ glyphlist = sorted(font.deflayer.keys())
+
+ for glyphn in glyphlist :
+ glyph = font.deflayer[glyphn]
+ if len(glyph["unicode"]) == 1 :
+ unival = glyph["unicode"][0].hex
+ outfile.write(glyphn + "," + unival + "\n")
+ else :
+ if args.allglyphs :
+ outfile.write(glyphn + "," + "\n")
+
+ return
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psffixffglifs.py b/lib/silfont/scripts/psffixffglifs.py
new file mode 100755
index 0000000..f496737
--- /dev/null
+++ b/lib/silfont/scripts/psffixffglifs.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+__doc__ = '''Make changes needed to a UFO following processing by FontForge.
+'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2019 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_postff.log'})]
+
+def doit(args) :
+
+ font = args.ifont
+ logger = args.logger
+
+ advances_removed = 0
+ unicodes_removed = 0
+ for layer in font.layers:
+ if layer.layername == "public.background":
+ for g in layer:
+ glyph = layer[g]
+ # Remove advance and unicode fields from background layer
+ # (FF currently copies some from default layer)
+ if "advance" in glyph:
+ glyph.remove("advance")
+ advances_removed += 1
+ logger.log("Removed <advance> from " + g, "I")
+ uc = glyph["unicode"]
+ if uc != []:
+ while glyph["unicode"] != []: glyph.remove("unicode",0)
+ unicodes_removed += 1
+ logger.log("Removed unicode value(s) from " + g, "I")
+
+ if advances_removed + unicodes_removed > 0 :
+ logger.log("Advance removed from " + str(advances_removed) + " glyphs and unicode values(s) removed from "
+ + str(unicodes_removed) + " glyphs", "P")
+ else:
+ logger.log("No advances or unicodes removed from glyphs", "P")
+
+ return args.ifont
+
+def cmd() : execute("UFO",doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psffixfontlab.py b/lib/silfont/scripts/psffixfontlab.py
new file mode 100755
index 0000000..c83868f
--- /dev/null
+++ b/lib/silfont/scripts/psffixfontlab.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+__doc__ = '''Make changes needed to a UFO following processing by FontLab 7.
+Various items are reset using the backup of the original font that Fontlab creates
+'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2021 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute, splitfn
+from silfont.ufo import Ufont
+import os, shutil, glob
+
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'filename'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_fixfontlab.log'})]
+
+def doit(args) :
+
+ fontname = args.ifont
+ logger = args.logger
+ params = args.paramsobj
+
+ # Locate the oldest backup
+ (path, base, ext) = splitfn(fontname)
+ backuppath = os.path.join(path, base + ".*-*" + ext) # Backup has date/time added in format .yymmdd-hhmm
+ backups = glob.glob(backuppath)
+ if len(backups) == 0:
+ logger.log("No backups found matching %s so no changes made to the font" % backuppath, "P")
+ return
+ backupname = sorted(backups)[0] # Choose the oldest backup - date/time format sorts alphabetically
+
+ # Reset groups.plist, kerning.plist and any layerinfo.plist(s) from backup ufo
+ for filename in ["groups.plist", "kerning.plist"]:
+ bufullname = os.path.join(backupname, filename)
+ ufofullname = os.path.join(fontname, filename)
+ if os.path.exists(bufullname):
+ try:
+ shutil.copy(bufullname, fontname)
+ logger.log(filename + " restored from backup", "P")
+ except Exception as e:
+ logger.log("Failed to copy %s to %s: %s" % (bufullname, fontname, str(e)), "S")
+ elif os.path.exists(ufofullname):
+ os.remove(ufofullname)
+ logger.log(filename + " removed from ufo", "P")
+ lifolders = []
+ for ufoname in (fontname, backupname): # Find any layerinfo files in either ufo
+ lis = glob.glob(os.path.join(ufoname, "*/layerinfo.plist"))
+ for li in lis:
+ (lifolder, dummy) = os.path.split(li) # Get full path name for folder
+ (dummy, lifolder) = os.path.split(lifolder) # Now take ufo name off the front
+ if lifolder not in lifolders: lifolders.append(lifolder)
+ for folder in lifolders:
+ filename = os.path.join(folder, "layerinfo.plist")
+ bufullname = os.path.join(backupname, filename)
+ ufofullname = os.path.join(fontname, filename)
+ if os.path.exists(bufullname):
+ try:
+ shutil.copy(bufullname, os.path.join(fontname, folder))
+ logger.log(filename + " restored from backup", "P")
+ except Exception as e:
+ logger.log("Failed to copy %s to %s: %s" % (bufullname, fontname, str(e)), "S")
+ elif os.path.exists(ufofullname):
+ os.remove(ufofullname)
+ logger.log(filename + " removed from ufo", "P")
+
+ # Now open the fonts
+ font = Ufont(fontname, params = params)
+ backupfont = Ufont(backupname, params = params)
+
+ fidel = ("openTypeGaspRangeRecords", "openTypeHeadFlags", "openTypeHheaCaretOffset",
+ "postscriptBlueFuzz", "postscriptBlueScale", "postscriptBlueShift", "postscriptForceBold",
+ "postscriptIsFixedPitch", "postscriptWeightName")
+ libdel = ("com.fontlab.v2.tth", "com.typemytype.robofont.italicSlantOffset")
+ fontinfo = font.fontinfo
+ libplist = font.lib
+ backupfi = backupfont.fontinfo
+ backuplib = backupfont.lib
+
+ # Delete keys that are not needed
+ for key in fidel:
+ if key in fontinfo:
+ old = fontinfo.getval(key)
+ fontinfo.remove(key)
+ logchange(logger, " removed from fontinfo.plist. ", key, old, None)
+ for key in libdel:
+ if key in libplist:
+ old = libplist.getval(key)
+ libplist.remove(key)
+ logchange(logger, " removed from lib.plist. ", key, old, None)
+
+ # Correct other metadata:
+ if "guidelines" in backupfi:
+ fontinfo.setelem("guidelines",backupfi["guidelines"][1])
+ logger.log("fontinfo guidelines copied from backup ufo", "I")
+ elif "guidelines" in fontinfo:
+ fontinfo.remove("guidelines")
+ logger.log("fontinfo guidelines deleted - not in backup ufo", "I")
+ if "italicAngle" in fontinfo and fontinfo.getval("italicAngle") == 0:
+ fontinfo.remove("italicAngle")
+ logger.log("fontinfo italicAngle removed since it was 0", "I")
+ if "openTypeOS2VendorID" in fontinfo:
+ old = fontinfo.getval("openTypeOS2VendorID")
+ if len(old) < 4:
+ new = "%-4s" % (old,)
+ fontinfo.setval("openTypeOS2VendorID", "string", new)
+ logchange(logger, " padded to 4 characters ", "openTypeOS2VendorID", "'%s'" % (old,) , "'%s'" % (new,))
+ if "woffMetadataCredits" in backupfi:
+ fontinfo.setelem("woffMetadataCredits",backupfi["woffMetadataCredits"][1])
+ logger.log("fontinfo woffMetadataCredits copied from backup ufo", "I")
+ elif "woffMetadataCredits" in fontinfo:
+ fontinfo.remove("woffMetadataCredits")
+ logger.log("fontinfo woffMetadataCredits deleted - not in backup ufo", "I")
+ if "woffMetadataDescription" in backupfi:
+ fontinfo.setelem("woffMetadataDescription",backupfi["woffMetadataDescription"][1])
+ logger.log("fontinfo woffMetadataDescription copied from backup ufo", "I")
+ elif "woffMetadataDescription" in fontinfo:
+ fontinfo.remove("woffMetadataDescription")
+ logger.log("fontinfo woffMetadataDescription deleted - not in backup ufo", "I")
+ if "public.glyphOrder" in backuplib:
+ libplist.setelem("public.glyphOrder",backuplib["public.glyphOrder"][1])
+ logger.log("lib.plist public.glyphOrder copied from backup ufo", "I")
+ elif "public.glyphOrder" in libplist:
+ libplist.remove("public.glyphOrder")
+ logger.log("libplist public.glyphOrder deleted - not in backup ufo", "I")
+
+
+
+ # Now process glif level data
+ updates = False
+ for gname in font.deflayer:
+ glyph = font.deflayer[gname]
+ glines = glyph["guideline"]
+ if glines:
+ for gl in list(glines): glines.remove(gl) # Remove any existing glines
+ updates = True
+ buglines = backupfont.deflayer[gname]["guideline"] if gname in backupfont.deflayer else []
+ if buglines:
+ for gl in buglines: glines.append(gl) # Add in those from backup
+ updates = True
+ if updates:
+ logger.log("Some updates to glif guidelines may have been made", "I")
+ updates = False
+ for layer in font.layers:
+ if layer.layername == "public.background":
+ for gname in layer:
+ glyph = layer[gname]
+ if glyph["advance"] is not None:
+ glyph.remove("advance")
+ updates = True
+ if updates: logger.log("Some advance elements removed from public.background glifs", "I")
+ font.write(fontname)
+ return
+
+def logchange(logger, logmess, key, old, new):
+ oldstr = str(old) if len(str(old)) < 22 else str(old)[0:20] + "..."
+ newstr = str(new) if len(str(new)) < 22 else str(new)[0:20] + "..."
+ logmess = key + logmess
+ if old is None:
+ logmess = logmess + " New value: " + newstr
+ else:
+ if new is None:
+ logmess = logmess + " Old value: " + oldstr
+ else:
+ logmess = logmess + " Old value: " + oldstr + ", new value: " + newstr
+ logger.log(logmess, "I")
+
+def cmd() : execute(None,doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfftml2TThtml.py b/lib/silfont/scripts/psfftml2TThtml.py
new file mode 100755
index 0000000..e9e1128
--- /dev/null
+++ b/lib/silfont/scripts/psfftml2TThtml.py
@@ -0,0 +1,389 @@
+#! /usr/bin/python3
+'''Build fonts for all combinations of TypeTuner features needed for specific ftml then build html that uses those fonts'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2019 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bob Hallissy'
+
+from silfont.core import execute
+from fontTools import ttLib
+from lxml import etree as ET # using this because it supports xslt and HTML
+from collections import OrderedDict
+from subprocess import check_output, CalledProcessError
+import os, re
+import gzip
+from glob import glob
+
+
+argspec = [
+ ('ttfont', {'help': 'Input Tunable TTF file'}, {'type': 'filename'}),
+ ('map', {'help': 'Feature mapping CSV file'}, {'type': 'incsv'}),
+ ('-o', '--outputdir', {'help': 'Output directory. Default: tests/typetuner', 'default': 'tests/typetuner'}, {}),
+ ('--ftml', {'help': 'ftml file(s) to process. Can be used multiple times and can contain filename patterns.', 'action': 'append'}, {}),
+ ('--xsl', {'help': 'standard xsl file. Default: ../tools/ftml.xsl', 'default': '../tools/ftml.xsl'}, {'type': 'filename'}),
+ ('--norebuild', {'help': 'assume existing fonts are good', 'action': 'store_true'}, {}),
+ ]
+
+# Define globals needed everywhere:
+
+logger = None
+sourcettf = None
+outputdir = None
+fontdir = None
+
+
+# Dictionary of TypeTuner features, derived from 'feat_all.xml', indexed by feature name
+feat_all = dict()
+
+class feat(object):
+ 'TypeTuner feature'
+ def __init__(self, elem, sortkey):
+ self.name = elem.attrib.get('name')
+ self.tag = elem.attrib.get('tag')
+ self.default = elem.attrib.get('value')
+ self.values = OrderedDict()
+ self.sortkey = sortkey
+ for v in elem.findall('value'):
+ # Only add those values which aren't importing line metrics
+ if v.find("./cmd[@name='line_metrics_scaled']") is None:
+ self.values[v.attrib.get('name')] = v.attrib.get('tag')
+
+
+# Dictionaries of mappings from OpenType tags to TypeTuner names, derived from map csv
+feat_maps = dict()
+lang_maps = dict()
+
+class feat_map(object):
+ 'mapping from OpenType feature tag to TypeTuner feature name, default value, and all values'
+ def __init__(self, r):
+ self.ottag, self.ttfeature, self.default = r[0:3]
+ self.ttvalues = r[3:]
+
+class lang_map(object):
+ 'mapping from OpenType language tag to TypeTuner language feature name and value'
+ def __init__(self,r):
+ self.ottag, self.ttfeature, self.ttvalue = r
+
+# About font_tag values
+#
+# In this code, a font_tag uniquely identifies a font we've built.
+#
+# Because different ftml files could use different style names for the same set of features and language, and we
+# want to build only one font for any given combination of features and language, we don't depend on the name of the
+# ftml style for identifying and caching the fonts we build. Rather we build a font_tag which is a the
+# concatenation of the ftml feature/value tags and the ftml lang feature/value tag.
+
+# Font object used to cache information about a tuned font we've created
+
+class font(object):
+ 'Cache of tuned font information'
+
+ def __init__(self, font_tag, feats, lang, fontface):
+ self.font_tag = font_tag
+ self.feats = feats
+ self.lang = lang
+ self.fontface = fontface
+
+
+# Dictionaries for finding font objects
+
+# Finding font from font_tag:
+font_tag2font = dict()
+
+# If an ftml style contains no feats, only the lang tag will show up in the html. Special mapping for those cases:
+lang2font = dict()
+
+# RE to match strings like: # "'cv02' 1"
+feature_settingRE = re.compile(r"^'(\w{4,4})'(?:\s+(\w+))?$")
+# RE to split strings of multiple features around comma (with optional whitespace)
+features_splitRE = re.compile(r"\s*,\s*")
+
+
+def cache_font(feats, lang, norebuild):
+ 'Create (and cache) a TypeTuned font and @fontface for this combination of features and lang'
+
+ # feats is either None or a css font-feature-settings string using single quotes (according to ftml spec), e.g. "'cv02' 1, 'cv60' 1"
+ # lang is either None or bcp47 langtag
+ # norebuild is a debugging aid that causes the code to skip building a .ttf if it is already present thus making the
+ # program run faster but with the risk that the built TTFs don't match the current build.
+
+ # First step is to construct a name for this set of languages and features, something we'll call the "font tag"
+
+ parts = []
+ ttsettings = dict() # place to save TT setting name and value in case we need to build the font
+ fatal_errors = False
+
+ if feats:
+ # Need to split the font-feature-settings around commas and parse each part, mapping css tag and value to
+ # typetuner tag and value
+ for setting in features_splitRE.split(feats):
+ m = feature_settingRE.match(setting)
+ if m is None:
+ logger.log('Invalid CSS feature setting in ftml: {}'.format(setting), 'E')
+ fatal_errors = True
+ continue
+ f,v = m.groups() # Feature tag and value
+ if v in ['normal','off']:
+ v = '0'
+ elif v == 'on':
+ v = '1'
+ try:
+ v = int(v)
+ assert v >= 0
+ except:
+ logger.log('Invalid feature value {} found in map file'.format(setting), 'E')
+ fatal_errors = True
+ continue
+ if not v:
+ continue # No need to include 0/off values
+
+ # we need this one... so translate to TypeTuner feature & value using the map file
+ try:
+ fmap = feat_maps[f]
+ except KeyError:
+ logger.log('Font feature "{}" not found in map file'.format(f), 'E')
+ fatal_errors = True
+ continue
+
+ f = fmap.ttfeature
+
+ try:
+ v = fmap.ttvalues[v - 1]
+ except IndexError:
+ logger.log('TypeTuner feature "{}" doesn\'t have a value index {}'.format(f, v), 'E')
+ fatal_errors = True
+ continue
+
+ # Now translate to TypeTuner tags using feat_all info
+ if f not in feat_all:
+ logger.log('Tunable font doesn\'t contain a feature "{}"'.format(f), 'E')
+ fatal_errors = True
+ elif v not in feat_all[f].values:
+ logger.log('Tunable font feature "{}" doesn\'t have a value {}'.format(f, v), 'E')
+ fatal_errors = True
+ else:
+ ttsettings[f] = v # Save TT setting name and value name in case we need to build the font
+ ttfeat = feat_all[f]
+ f = ttfeat.tag
+ v = ttfeat.values[v]
+ # Finally!
+ parts.append(f+v)
+ if lang:
+ if lang not in lang_maps:
+ logger.log('Language tag "{}" not found in map file'.format(lang), 'E')
+ fatal_errors = True
+ else:
+ # Translate to TypeTuner feature & value using the map file
+ lmap = lang_maps[lang]
+ f = lmap.ttfeature
+ v = lmap.ttvalue
+ # Translate to TypeTuner tags using feat_all info
+ if f not in feat_all:
+ logger.log('Tunable font doesn\'t contain a feature "{}"'.format(f), 'E')
+ fatal_errors = True
+ elif v not in feat_all[f].values:
+ logger.log('Tunable font feature "{}" doesn\'t have a value {}'.format(f, v), 'E')
+ fatal_errors = True
+ else:
+ ttsettings[f] = v # Save TT setting name and value in case we need to build the font
+ ttfeat = feat_all[f]
+ f = ttfeat.tag
+ v = ttfeat.values[v]
+ # Finally!
+ parts.append(f+v)
+ if fatal_errors:
+ return None
+
+ if len(parts) == 0:
+ logger.log('No features or languages found'.format(f), 'E')
+ return None
+
+ # the Font Tag is how we name everything (the ttf, the xml, etc)
+ font_tag = '_'.join(sorted(parts))
+
+ # See if we've had this combination before:
+ if font_tag in font_tag2font:
+ logger.log('Found cached font {}'.format(font_tag), 'I')
+ return font_tag
+
+ # Path to font, which may already exist, and @fontface
+ ttfname = os.path.join(fontdir, font_tag + '.ttf')
+ fontface = '@font-face { font-family: {}; src: url(fonts/{}.ttf); } .{} {font-family: {}; }'.replace('{}',font_tag)
+
+ # Create new font object and remember how to find it:
+ thisfont = font(font_tag, feats, lang, fontface)
+ font_tag2font[font_tag] = thisfont
+ if lang and not feats:
+ lang2font[lang] = thisfont
+
+ # Debugging shortcut: use the existing fonts without rebuilding
+ if norebuild and os.path.isfile(ttfname):
+ logger.log('Blindly using existing font {}'.format(font_tag), 'I')
+ return font_tag
+
+ # Ok, need to build the font
+ logger.log('Building font {}'.format(font_tag), 'I')
+
+ # Create and save the TypeTuner feature settings file
+ sfname = os.path.join(fontdir, font_tag + '.xml')
+ root = ET.XML('''\
+<?xml version = "1.0"?>
+<!DOCTYPE features_set SYSTEM "feat_set.dtd">
+<features_set version = "1.0"/>
+''')
+ # Note: Order of elements in settings file should be same as order in feat_all
+ # (because this is the way TypeTuner does it and some fonts may expect this)
+ for name, ttfeat in sorted(feat_all.items(), key=lambda x: x[1].sortkey):
+ if name in ttsettings:
+ # Output the non-default value for this one:
+ ET.SubElement(root, 'feature',{'name': name, 'value': ttsettings[name]})
+ else:
+ ET.SubElement(root, 'feature', {'name': name, 'value': ttfeat.default})
+ xml = ET.tostring(root,pretty_print = True, encoding='UTF-8', xml_declaration=True)
+ with open(sfname, '+wb')as f:
+ f.write(xml)
+
+ # Now invoke TypeTuner to create the tuned font
+ try:
+ cmd = ['typetuner', '-o', ttfname, '-n', font_tag, sfname, sourcettf]
+ res = check_output(cmd)
+ if len(res):
+ print('\n', res)
+ except CalledProcessError as err:
+ logger.log("couldn't tune font: {}".format(err.output), 'S')
+
+ return font_tag
+
+def doit(args) :
+
+ global logger, sourcettf, outputdir, fontdir
+
+ logger = args.logger
+ sourcettf = args.ttfont
+
+ # Create output directory, including fonts subdirectory, if not present
+ outputdir = args.outputdir
+ os.makedirs(outputdir, exist_ok = True)
+ fontdir = os.path.join(outputdir, 'fonts')
+ os.makedirs(fontdir, exist_ok = True)
+
+ # Read and save feature mapping
+ for r in args.map:
+ # remove empty cells from the end
+ while len(r) and len(r[-1]) == 0:
+ r.pop()
+ if len(r) == 0 or r[0].startswith('#'):
+ continue
+ elif r[0].startswith('lang='):
+ if len(r[0]) < 7 or len(r) != 3:
+ logger.log("Invalid lang mapping: '" + ','.join(r) + "' ignored", "W")
+ else:
+ r[0] = r[0][5:]
+ lang_maps[r[0]] = lang_map(r)
+ else:
+ if len(r) < 4:
+ logger.log("Invalid feature mapping: '" + ','.join(r) + "' ignored", "W")
+ else:
+ feat_maps[r[0]] = feat_map(r)
+
+ # Open and verify input file is a tunable font; extract and parse feat_all from font.
+ font = ttLib.TTFont(sourcettf)
+ raw_data = font.getTableData('Silt')
+ feat_xml = gzip.decompress(raw_data) # .decode('utf-8')
+ root = ET.fromstring(feat_xml)
+ if root.tag != 'all_features':
+ logger.log("Invalid TypeTuner feature file: missing root element", "S")
+ for i, f in enumerate(root.findall('.//feature')):
+ # add to dictionary
+ ttfeat = feat(f,i)
+ feat_all[ttfeat.name] = ttfeat
+
+ # Open and prepare the xslt file to transform the ftml:
+ xslt = ET.parse(args.xsl)
+ xslt_transform = ET.XSLT(xslt)
+
+
+ # Process all ftml files:
+
+ for arg in args.ftml:
+ for infname in glob(arg):
+ # based on input filename, construct output name
+ # find filename and change extension to html:
+ outfname = os.path.join(outputdir, os.path.splitext(os.path.basename(infname))[0] + '.html')
+ logger.log('Processing: {} -> {}'.format(infname, outfname), 'P')
+
+ # Each named style in the FTML ultimately maps to a TypeTuned font that will be added via @fontface.
+ # We need to remember the names of the styles and their associated fonts so we can hack the html.
+ sname2font = dict() # Indexed by ftml stylename; result is a font object
+
+ # Parse the FTML
+ ftml_doc = ET.parse(infname)
+
+ # Adjust <title> to show this is from TypeTuner
+ head = ftml_doc.find('head')
+ title = head.find('title')
+ title.text += " - TypeTuner"
+ # Replace all <fontsrc> elements with two identical from the input font:
+ # One will remain unchanged, the other will eventually be changed to a typetuned font.
+ ET.strip_elements(head, 'fontsrc')
+ fpathname = os.path.relpath(sourcettf, outputdir).replace('\\','/') # for css make sure all slashes are forward!
+ head.append(ET.fromstring('<fontsrc>url({})</fontsrc>'.format(fpathname))) # First font
+ head.append(ET.fromstring('<fontsrc>url({})</fontsrc>'.format(fpathname))) # Second font, same as the first
+
+ # iterate over all the styles in this ftml file, building tuned fonts to match if not already done.
+ for style in head.iter('style'):
+ sname = style.get('name') # e.g. "some_style"
+ feats = style.get('feats') # e.g "'cv02' 1, 'cv60' 1" -- this we'll parse to get need tt features
+ lang = style.get('lang') # e.g., "sd"
+ font_tag = cache_font(feats, lang, args.norebuild)
+ # font_tag could be None due to errors, but messages should already have been logged
+ # If it is valid, remember how to find this font from the ftml stylename
+ if font_tag:
+ sname2font[sname] = font_tag2font[font_tag]
+
+ # convert to html via supplied xslt
+ html_doc = xslt_transform(ftml_doc)
+
+ # Two modifications to make in the html:
+ # 1) add all @fontface specs to the <style> element
+ # 2) Fix up all occurrences of <td> elements referencing font2
+
+ # Add @fontface to <style>
+ style = html_doc.find('//style')
+ style.text = style.text + '\n' + '\n'.join([x.fontface for x in sname2font.values()])
+
+ # Iterate over all <td> elements looking for font2 and a style or lang indicating feature settings
+
+ classRE = re.compile(r'string\s+(?:(\w+)\s+)?font2$')
+
+ for td in html_doc.findall('//td'):
+ tdclass = td.get('class')
+ tdlang = td.get('lang')
+ m = classRE.match(tdclass)
+ if m:
+ sname = m.group(1)
+ if sname:
+ # stylename will get us directly to the font
+ try:
+ td.set('class', 'string {}'.format(sname2font[sname].font_tag))
+ if tdlang: # If there is also a lang attribute, we no longer need it.
+ del td.attrib['lang']
+ except KeyError:
+ logger.log("Style name {} not available.".format(sname), "W")
+ elif tdlang:
+ # Otherwise we'll assume there is only the lang attribute
+ try:
+ td.set('class', 'string {}'.format(lang2font[tdlang].font_tag))
+ del td.attrib['lang'] # lang attribute no longer needed.
+ except KeyError:
+ logger.log("Style for langtag {} not available.".format(tdlang), "W")
+
+
+ # Ok -- write the html out!
+ html = ET.tostring(html_doc, pretty_print=True, method='html', encoding='UTF-8')
+ with open(outfname, '+wb')as f:
+ f.write(html)
+
+
+def cmd() : execute(None,doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfftml2odt.py b/lib/silfont/scripts/psfftml2odt.py
new file mode 100755
index 0000000..49777e4
--- /dev/null
+++ b/lib/silfont/scripts/psfftml2odt.py
@@ -0,0 +1,453 @@
+#!/usr/bin/env python
+__doc__ = 'read FTML file and generate LO writer .odt file'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015, SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Rowe'
+
+from silfont.core import execute
+from fontTools import ttLib
+from xml.etree import ElementTree as ET ### used to parse input FTML (may not be needed if FTML parser used)
+import re
+import os
+import io
+from odf.opendocument import OpenDocumentText, OpaqueObject
+from odf.config import ConfigItem, ConfigItemSet
+from odf.office import FontFaceDecls
+from odf.style import FontFace, ParagraphProperties, Style, TableCellProperties, TableColumnProperties, TableProperties, TextProperties
+from odf.svg import FontFaceSrc, FontFaceUri, FontFaceFormat
+from odf.table import Table, TableCell, TableColumn, TableRow
+from odf.text import H, P, SequenceDecl, SequenceDecls, Span
+
+# specify two parameters: input file (FTML/XML format), output file (ODT format)
+# preceded by optional log file plus zero or more font strings
+argspec = [
+ ('input',{'help': 'Input file in FTML format'}, {'type': 'infile'}),
+ ('output',{'help': 'Output file (LO writer .odt)', 'nargs': '?'}, {'type': 'filename', 'def': '_out.odt'}),
+ ('-l','--log',{'help': 'Log file', 'required': False},{'type': 'outfile', 'def': '_ftml2odt_log.txt'}),
+ ('-r','--report',{'help': 'Set reporting level for log', 'type':str, 'choices':['X','S','E','P','W','I','V']},{}),
+ ('-f','--font',{'help': 'font specification','action': 'append', 'required': False}, {}),
+ ]
+
+# RegExs for extracting font name from fontsrc element
+findfontnamelocal = re.compile(r"""local\( # begin with local(
+ (["']?) # optional open quote
+ (?P<fontstring>[^)]+) # font name
+ \1 # optional matching close quote
+ \)""", re.VERBOSE) # and end with )
+findfontnameurl = re.compile(r"""url\( # begin with local(
+ (["']?) # optional open quote
+ (?P<fontstring>[^)]+) # font name
+ \1 # optional matching close quote
+ \)""", re.VERBOSE) # and end with )
+fontspec = re.compile(r"""^ # beginning of string
+ (?P<rest>[A-Za-z ]+?) # Font Family Name
+ \s*(?P<bold>Bold)? # Bold
+ \s*(?P<italic>Italic)? # Italic
+ \s*(?P<regular>Regular)? # Regular
+ $""", re.VERBOSE) # end of string
+# RegEx for extracting feature(s) from feats attribute of style element
+onefeat = re.compile(r"""^\s*
+ '(?P<featname>[^']+)'\s* # feature tag
+ (?P<featval>[^', ]+)\s* # feature value
+ ,?\s* # optional comma
+ (?P<remainder>.*) # rest of line (with zero or more tag-value pairs)
+ $""", re.VERBOSE)
+# RegEx for extracting language (and country) from lang attribute of style element
+langcode = re.compile(r"""^
+ (?P<langname>[A-Za-z]+) # language name
+ (- # (optional) hyphen and
+ (?P<countryname>[A-Za-z]+) # country name
+ (-[A-Za-z0-9][-A-Za-z0-9]*)? # (optional) hyphen and other codes
+ )?$""", re.VERBOSE)
+# RegEx to extract hex value from \uxxxxxx and function to generate Unicode character
+# use to change string to newstring:
+# newstring = re.sub(backu, hextounichr, string)
+# or newstring = re.sub(backu, lambda m: unichr(int(m.group(1),16)), string)
+backu = re.compile(r"\\u([0-9a-fA-F]{4,6})")
+def hextounichr(match):
+ return chr(int(match.group(1),16))
+
+def BoldItalic(bold, italic):
+ rs = ""
+ if bold:
+ rs += " Bold"
+ if italic:
+ rs += " Italic"
+ return rs
+
+def parsefeats(inputline):
+ featdic = {}
+ while inputline != "":
+ results = re.match(onefeat, inputline)
+ if results:
+ featdic[results.group('featname')] = results.group('featval')
+ inputline = results.group('remainder')
+ else:
+ break ### warning about unrecognized feature string: inputline
+ return ":" + "&".join( [f + '=' + featdic[f] for f in sorted(featdic)])
+
+def getfonts(fontsourcestrings, logfile, fromcommandline=True):
+ fontlist = []
+ checkfontfamily = []
+ checkembeddedfont = []
+ for fs in fontsourcestrings:
+ if not fromcommandline: # from FTML <fontsrc> either local() or url()
+ installed = True # Assume locally installed font
+ results = re.match(findfontnamelocal, fs)
+ fontstring = results.group('fontstring') if results else None
+ if fontstring == None:
+ installed = False
+ results = re.match(findfontnameurl, fs)
+ fontstring = results.group('fontstring') if results else None
+ if fontstring == None:
+ logfile.log("Invalid font specification: " + fs, "S")
+ else: # from command line
+ fontstring = fs
+ if "." in fs: # must be a filename
+ installed = False
+ else: # must be an installed font
+ installed = True
+ if installed:
+ # get name, bold and italic info from string
+ results = re.match(fontspec, fontstring.strip())
+ if results:
+ fontname = results.group('rest')
+ bold = results.group('bold') != None
+ italic = results.group('italic') != None
+ fontlist.append( (fontname, bold, italic, None) )
+ if (fontname, bold, italic) in checkfontfamily:
+ logfile.log("Duplicate font specification: " + fs, "W") ### or more severe?
+ else:
+ checkfontfamily.append( (fontname, bold, italic) )
+ else:
+ logfile.log("Invalid font specification: " + fontstring.strip(), "E")
+ else:
+ try:
+ # peek inside the font for the name, weight, style
+ f = ttLib.TTFont(fontstring)
+ # take name from name table, NameID 1, platform ID 3, Encoding ID 1 (possible fallback platformID 1, EncodingID =0)
+ n = f['name'] # name table from font
+ fontname = n.getName(1,3,1).toUnicode() # nameID 1 = Font Family name
+ # take bold and italic info from OS/2 table, fsSelection bits 0 and 5
+ o = f['OS/2'] # OS/2 table
+ italic = (o.fsSelection & 1) > 0
+ bold = (o.fsSelection & 32) > 0
+ fontlist.append( (fontname, bold, italic, fontstring) )
+ if (fontname, bold, italic) in checkfontfamily:
+ logfile.log("Duplicate font specification: " + fs + BoldItalic(bold, italic), "W") ### or more severe?
+ else:
+ checkfontfamily.append( (fontname, bold, italic) )
+ if (os.path.basename(fontstring)) in checkembeddedfont:
+ logfile.log("Duplicate embedded font: " + fontstring, "W") ### or more severe?
+ else:
+ checkembeddedfont.append(os.path.basename(fontstring))
+ except IOError:
+ logfile.log("Unable to find font file to embed: " + fontstring, "E")
+ except fontTools.ttLib.TTLibError:
+ logfile.log("File is not a valid font: " + fontstring, "E")
+ except:
+ logfile.log("Error occurred while checking font: " + fontstring, "E") # some other error
+ return fontlist
+
+def init(LOdoc, numfonts=1):
+ totalwid = 6800 #6.8inches
+
+ #compute column widths
+ f = min(numfonts,4)
+ ashare = 4*(6-f)
+ dshare = 2*(6-f)
+ bshare = 100 - 2*ashare - dshare
+ awid = totalwid * ashare // 100
+ dwid = totalwid * dshare // 100
+ bwid = totalwid * bshare // (numfonts * 100)
+
+ # create styles for table, for columns (one style for each column width)
+ # and for one cell (used for everywhere except where background changed)
+ tstyle = Style(name="Table1", family="table")
+ tstyle.addElement(TableProperties(attributes={'width':str(totalwid/1000.)+"in", 'align':"left"}))
+ LOdoc.automaticstyles.addElement(tstyle)
+ tastyle = Style(name="Table1.A", family="table-column")
+ tastyle.addElement(TableColumnProperties(attributes={'columnwidth':str(awid/1000.)+"in"}))
+ LOdoc.automaticstyles.addElement(tastyle)
+ tbstyle = Style(name="Table1.B", family="table-column")
+ tbstyle.addElement(TableColumnProperties(attributes={'columnwidth':str(bwid/1000.)+"in"}))
+ LOdoc.automaticstyles.addElement(tbstyle)
+ tdstyle = Style(name="Table1.D", family="table-column")
+ tdstyle.addElement(TableColumnProperties(attributes={'columnwidth':str(dwid/1000.)+"in"}))
+ LOdoc.automaticstyles.addElement(tdstyle)
+ ta1style = Style(name="Table1.A1", family="table-cell")
+ ta1style.addElement(TableCellProperties(attributes={'padding':"0.035in", 'border':"0.05pt solid #000000"}))
+ LOdoc.automaticstyles.addElement(ta1style)
+ # text style used with non-<em> text
+ t1style = Style(name="T1", family="text")
+ t1style.addElement(TextProperties(attributes={'color':"#999999" }))
+ LOdoc.automaticstyles.addElement(t1style)
+ # create styles for Title, Subtitle
+ tstyle = Style(name="Title", family="paragraph")
+ tstyle.addElement(TextProperties(attributes={'fontfamily':"Arial",'fontsize':"24pt",'fontweight':"bold" }))
+ LOdoc.styles.addElement(tstyle)
+ ststyle = Style(name="Subtitle", family="paragraph")
+ ststyle.addElement(TextProperties(attributes={'fontfamily':"Arial",'fontsize':"18pt",'fontweight':"bold" }))
+ LOdoc.styles.addElement(ststyle)
+
+def doit(args) :
+ logfile = args.logger
+ if args.report: logfile.loglevel = args.report
+
+ try:
+ root = ET.parse(args.input).getroot()
+ except:
+ logfile.log("Error parsing FTML input", "S")
+
+ if args.font: # font(s) specified on command line
+ fontlist = getfonts( args.font, logfile )
+ else: # get font spec from FTML fontsrc element
+ fontlist = getfonts( [root.find("./head/fontsrc").text], logfile, False )
+ #fontlist = getfonts( [fs.text for fs in root.findall("./head/fontsrc")], False ) ### would allow multiple fontsrc elements
+ numfonts = len(fontlist)
+ if numfonts == 0:
+ logfile.log("No font(s) specified", "S")
+ if numfonts > 1:
+ formattedfontnum = ["{0:02d}".format(n) for n in range(numfonts)]
+ else:
+ formattedfontnum = [""]
+ logfile.log("Font(s) specified:", "V")
+ for n, (fontname, bold, italic, embeddedfont) in enumerate(fontlist):
+ logfile.log(" " + formattedfontnum[n] + " " + fontname + BoldItalic(bold, italic) + " " + str(embeddedfont), "V")
+
+ # get optional fontscale; compute pointsize as int(12*fontscale/100). If result xx is not 12, then add "fo:font-size=xxpt" in Px styles
+ pointsize = 12
+ fontscaleel = root.find("./head/fontscale")
+ if fontscaleel != None:
+ fontscale = fontscaleel.text
+ try:
+ pointsize = int(int(fontscale)*12/100)
+ except ValueError:
+ # any problem leaves pointsize 12
+ logfile.log("Problem with fontscale value; defaulting to 12 point", "W")
+
+ # Get FTML styles and generate LO writer styles
+ # P2 is paragraph style for string element when no features specified
+ # each Px (for P3...) corresponds to an FTML style, which specifies lang or feats or both
+ # if numfonts > 1, two-digit font number is appended to make an LO writer style for each FTML style + font combo
+ # When LO writer style is used with attribute rtl="True", "R" appended to style name
+ LOstyles = {}
+ ftmlstyles = {}
+ Pstylenum = 2
+ LOstyles["P2"] = ("", None, None)
+ ftmlstyles[0] = "P2"
+ for s in root.findall("./head/styles/style"):
+ Pstylenum += 1
+ Pnum = "P" + str(Pstylenum)
+ featstring = ""
+ if s.get('feats'):
+ featstring = parsefeats(s.get('feats'))
+ langname = None
+ countryname = None
+ lang = s.get('lang')
+ if lang != None:
+ x = re.match(langcode, lang)
+ langname = x.group('langname')
+ countryname = x.group('countryname')
+ # FTML <test> element @stylename attribute references this <style> element @name attribute
+ ftmlstyles[s.get('name')] = Pnum
+ LOstyles[Pnum] = (featstring, langname, countryname)
+
+ # create LOwriter file and construct styles for tables, column widths, etc.
+ LOdoc = OpenDocumentText()
+ init(LOdoc, numfonts)
+ # Initialize sequence counters
+ sds = SequenceDecls()
+ sd = sds.addElement(SequenceDecl(displayoutlinelevel = '0', name = 'Illustration'))
+ sd = sds.addElement(SequenceDecl(displayoutlinelevel = '0', name = 'Table'))
+ sd = sds.addElement(SequenceDecl(displayoutlinelevel = '0', name = 'Text'))
+ sd = sds.addElement(SequenceDecl(displayoutlinelevel = '0', name = 'Drawing'))
+ LOdoc.text.addElement(sds)
+
+ # Create Px style for each (featstring, langname, countryname) tuple in LOstyles
+ # and for each font (if >1 font, append to Px style name a two-digit number corresponding to the font in fontlist)
+ # and (if at least one rtl attribute) suffix of nothing or "R"
+ # At the same time, collect info for creating FontFace elements (and any embedded fonts)
+ suffixlist = ["", "R"] if root.find(".//test/[@rtl='True']") != None else [""]
+ fontfaces = {}
+ for p in sorted(LOstyles, key = lambda x : int(x[1:])): # key = lambda x : int(x[1:]) corrects sort order
+ featstring, langname, countryname = LOstyles[p]
+ for n, (fontname, bold, italic, embeddedfont) in enumerate(fontlist): # embeddedfont = None if no embedding needed
+ fontnum = formattedfontnum[n]
+ # Collect fontface info: need one for each font family + feature combination
+ # Put embedded font in list only under fontname with empty featstring
+ if (fontname, featstring) not in fontfaces:
+ fontfaces[ (fontname, featstring) ] = []
+ if embeddedfont:
+ if (fontname, "") not in fontfaces:
+ fontfaces[ (fontname, "") ] = []
+ if embeddedfont not in fontfaces[ (fontname, "") ]:
+ fontfaces[ (fontname, "") ].append(embeddedfont)
+ # Generate paragraph styles
+ for s in suffixlist:
+ pstyle = Style(name=p+fontnum+s, family="paragraph")
+ if s == "R":
+ pstyle.addElement(ParagraphProperties(textalign="end", justifysingleword="false", writingmode="rl-tb"))
+ pstyledic = {}
+ pstyledic['fontnamecomplex'] = \
+ pstyledic['fontnameasian'] =\
+ pstyledic['fontname'] = fontname + featstring
+ pstyledic['fontsizecomplex'] = \
+ pstyledic['fontsizeasian'] = \
+ pstyledic['fontsize'] = str(pointsize) + "pt"
+ if bold:
+ pstyledic['fontweightcomplex'] = \
+ pstyledic['fontweightasian'] = \
+ pstyledic['fontweight'] = 'bold'
+ if italic:
+ pstyledic['fontstylecomplex'] = \
+ pstyledic['fontstyleasian'] = \
+ pstyledic['fontstyle'] = 'italic'
+ if langname != None:
+ pstyledic['languagecomplex'] = \
+ pstyledic['languageasian'] = \
+ pstyledic['language'] = langname
+ if countryname != None:
+ pstyledic['countrycomplex'] = \
+ pstyledic['countryasian'] = \
+ pstyledic['country'] = countryname
+ pstyle.addElement(TextProperties(attributes=pstyledic))
+# LOdoc.styles.addElement(pstyle) ### tried this, but when saving the generated odt, LO changed them to automatic styles
+ LOdoc.automaticstyles.addElement(pstyle)
+
+ fontstoembed = []
+ for fontname, featstring in sorted(fontfaces): ### Or find a way to keep order of <style> elements from original FTML?
+ ff = FontFace(name=fontname + featstring, fontfamily=fontname + featstring, fontpitch="variable")
+ LOdoc.fontfacedecls.addElement(ff)
+ if fontfaces[ (fontname, featstring) ]: # embedding needed for this combination
+ for fontfile in fontfaces[ (fontname, featstring) ]:
+ fontstoembed.append(fontfile) # make list for embedding
+ ffsrc = FontFaceSrc()
+ ffuri = FontFaceUri( **{'href': "Fonts/" + os.path.basename(fontfile), 'type': "simple"} )
+ ffformat = FontFaceFormat( **{'string': 'truetype'} )
+ ff.addElement(ffsrc)
+ ffsrc.addElement(ffuri)
+ ffuri.addElement(ffformat)
+
+ basename = "Table1.B"
+ colorcount = 0
+ colordic = {} # record color #rrggbb as key and "Table1.Bx" as stylename (where x is current color count)
+ tablenum = 0
+
+ # get title and comment and use as title and subtitle
+ titleel = root.find("./head/title")
+ if titleel != None:
+ LOdoc.text.addElement(H(outlinelevel=1, stylename="Title", text=titleel.text))
+ commentel = root.find("./head/comment")
+ if commentel != None:
+ LOdoc.text.addElement(P(stylename="Subtitle", text=commentel.text))
+
+ # Each testgroup element begins a new table
+ for tg in root.findall("./testgroup"):
+ # insert label attribute of testgroup element as subtitle
+ tglabel = tg.get('label')
+ if tglabel != None:
+ LOdoc.text.addElement(H(outlinelevel=1, stylename="Subtitle", text=tglabel))
+
+ # insert text from comment subelement of testgroup element
+ tgcommentel = tg.find("./comment")
+ if tgcommentel != None:
+ #print("commentel found")
+ LOdoc.text.addElement(P(text=tgcommentel.text))
+
+ tgbg = tg.get('background') # background attribute of testgroup element
+ tablenum += 1
+ table = Table(name="Table" + str(tablenum), stylename="Table1")
+ table.addElement(TableColumn(stylename="Table1.A"))
+ for n in range(numfonts):
+ table.addElement(TableColumn(stylename="Table1.B"))
+ table.addElement(TableColumn(stylename="Table1.A"))
+ table.addElement(TableColumn(stylename="Table1.D"))
+ for t in tg.findall("./test"): # Each test element begins a new row
+ # stuff to start the row
+ labeltext = t.get('label')
+ stylename = t.get('stylename')
+ stringel = t.find('./string')
+ commentel = t.find('./comment')
+ rtlsuffix = "R" if t.get('rtl') == 'True' else ""
+ comment = commentel.text if commentel != None else None
+ colBstyle = "Table1.A1"
+ tbg = t.get('background') # get background attribute of test group (if one exists)
+ if tbg == None: tbg = tgbg
+ if tbg != None: # if background attribute for test element (or background attribute for testgroup element)
+ if tbg not in colordic: # if color not found in color dic, create new style
+ colorcount += 1
+ newname = basename + str(colorcount)
+ colordic[tbg] = newname
+ tb1style = Style(name=newname, family="table-cell")
+ tb1style.addElement(TableCellProperties(attributes={'padding':"0.0382in", 'border':"0.05pt solid #000000", 'backgroundcolor':tbg}))
+ LOdoc.automaticstyles.addElement(tb1style)
+ colBstyle = colordic[tbg]
+
+ row = TableRow()
+ table.addElement(row)
+ # fill cells
+ # column A (label)
+ cell = TableCell(stylename="Table1.A1", valuetype="string")
+ if labeltext:
+ cell.addElement(P(stylename="Table_20_Contents", text = labeltext))
+ row.addElement(cell)
+
+ # column B (string)
+ for n in range(numfonts):
+ Pnum = ftmlstyles[stylename] if stylename != None else "P2"
+ Pnum = Pnum + formattedfontnum[n] + rtlsuffix
+ ### not clear if any of the following can be moved outside loop and reused
+ cell = TableCell(stylename=colBstyle, valuetype="string")
+ par = P(stylename=Pnum)
+ if len(stringel) == 0: # no <em> subelements
+ par.addText(re.sub(backu, hextounichr, stringel.text))
+ else: # handle <em> subelement(s)
+ if stringel.text != None:
+ par.addElement(Span(stylename="T1", text = re.sub(backu, hextounichr, stringel.text)))
+ for e in stringel.findall("em"):
+ if e.text != None:
+ par.addText(re.sub(backu, hextounichr, e.text))
+ if e.tail != None:
+ par.addElement(Span(stylename="T1", text = re.sub(backu, hextounichr, e.tail)))
+ cell.addElement(par)
+ row.addElement(cell)
+
+ # column C (comment)
+ cell = TableCell(stylename="Table1.A1", valuetype="string")
+ if comment:
+ cell.addElement(P(stylename="Table_20_Contents", text = comment))
+ row.addElement(cell)
+
+ # column D (stylename)
+ cell = TableCell(stylename="Table1.A1", valuetype="string")
+ if comment:
+ cell.addElement(P(stylename="Table_20_Contents", text = stylename))
+ row.addElement(cell)
+ LOdoc.text.addElement(table)
+
+ LOdoc.text.addElement(P(stylename="Subtitle", text="")) # Empty paragraph to end ### necessary?
+
+ try:
+ if fontstoembed: logfile.log("Embedding fonts in document", "V")
+ for f in fontstoembed:
+ LOdoc._extra.append(
+ OpaqueObject(filename = "Fonts/" + os.path.basename(f),
+ mediatype = "application/x-font-ttf", ### should be "application/font-woff" or "/font-woff2" for WOFF fonts, "/font-opentype" for ttf
+ content = io.open(f, "rb").read() ))
+ ci = ConfigItem(**{'name':'EmbedFonts', 'type': 'boolean'}) ### (name = 'EmbedFonts', type = 'boolean')
+ ci.addText('true')
+ cis=ConfigItemSet(**{'name':'ooo:configuration-settings'}) ### (name = 'ooo:configuration-settings')
+ cis.addElement(ci)
+ LOdoc.settings.addElement(cis)
+ except:
+ logfile.log("Error embedding fonts in document", "E")
+ logfile.log("Writing output file: " + args.output, "P")
+ LOdoc.save(args.output)
+ return
+
+def cmd() : execute("",doit, argspec)
+
+if __name__ == "__main__": cmd()
+
diff --git a/lib/silfont/scripts/psfgetglyphnames.py b/lib/silfont/scripts/psfgetglyphnames.py
new file mode 100755
index 0000000..da685ed
--- /dev/null
+++ b/lib/silfont/scripts/psfgetglyphnames.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+__doc__ = '''Create a list of glyphs to import from a list of characters.'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2019-2020 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bobby de Vos'
+
+from silfont.core import execute
+
+suffix = "_psfgetglyphnames"
+argspec = [
+ ('ifont',{'help': 'Font file to copy from'}, {'type': 'infont'}),
+ ('glyphs',{'help': 'List of glyphs for psfcopyglyphs'}, {'type': 'outfile'}),
+ ('-i', '--input', {'help': 'List of characters to import'}, {'type': 'infile', 'def': None}),
+ ('-a','--aglfn',{'help': 'AGLFN list'}, {'type': 'incsv', 'def': None}),
+ ('-u','--uni',{'help': 'Generate uni or u glyph names if not in AGLFN', 'action': 'store_true', 'default': False}, {}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': suffix+'.log'})
+ ]
+
+def doit(args) :
+
+ font = args.ifont
+
+ aglfn = dict()
+ if args.aglfn:
+ # Load Adobe Glyph List For New Fonts (AGLFN)
+ incsv = args.aglfn
+ incsv.numfields = 3
+
+ for line in incsv:
+ usv = line[0]
+ aglfn_name = line[1]
+
+ codepoint = int(usv, 16)
+ aglfn[codepoint] = aglfn_name
+
+ # Gather data from the UFO
+ cmap = dict()
+ for glyph in font:
+ for codepoint in glyph.unicodes:
+ cmap[codepoint] = glyph.name
+
+ # Determine list of glyphs that need to be copied
+ header = ('glyph_name', 'rename', 'usv')
+ glyphs = args.glyphs
+ row = ','.join(header)
+ glyphs.write(row + '\n')
+
+ for line in args.input:
+
+ # Ignore comments
+ line = line.partition('#')[0]
+ line = line.strip()
+
+ # Ignore blank lines
+ if line == '':
+ continue
+
+ # Specify the glyph to copy
+ codepoint = int(line, 16)
+ usv = f'{codepoint:04X}'
+
+ # Specify how to construct default AGLFN name
+ # if codepoint is not listed in the AGLFN file
+ glyph_prefix = 'uni'
+ if codepoint > 0xFFFF:
+ glyph_prefix = 'u'
+
+ if codepoint in cmap:
+ # By default codepoints not listed in the AGLFN file
+ # will be imported with the glyph name of the source UFO
+ default_aglfn = ''
+ if args.uni:
+ # Provide AGLFN compatible names if requested
+ default_aglfn = f'{glyph_prefix}{usv}'
+
+ # Create control file for use with psfcopyglyphs
+ aglfn_name = aglfn.get(codepoint, default_aglfn)
+ glyph_name = cmap[codepoint]
+ if '_' in glyph_name and aglfn_name == '':
+ aglfn_name = glyph_name.replace('_', '')
+ row = ','.join((glyph_name, aglfn_name, usv))
+ glyphs.write(row + '\n')
+
+
+def cmd() : execute("FP",doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfglyphs2ufo.py b/lib/silfont/scripts/psfglyphs2ufo.py
new file mode 100644
index 0000000..3af2ddd
--- /dev/null
+++ b/lib/silfont/scripts/psfglyphs2ufo.py
@@ -0,0 +1,275 @@
+#!/usr/bin/env python
+__doc__ = '''Export fonts in a GlyphsApp file to UFOs'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Victor Gaultney'
+
+from silfont.core import execute
+from silfont.ufo import obsoleteLibKeys
+
+import glyphsLib
+import silfont.ufo
+import silfont.etutil
+from io import open
+import os, shutil
+
+argspec = [
+ ('glyphsfont', {'help': 'Input font file'}, {'type': 'filename'}),
+ ('masterdir', {'help': 'Output directory for masters'}, {}),
+ ('--nofixes', {'help': 'Bypass code fixing data', 'action': 'store_true', 'default': False}, {}),
+ ('--nofea', {'help': "Don't output features.fea", 'action': 'store_true', 'default': False}, {}),
+ ('--preservefea', {'help': "Retain the original features.fea in the UFO", 'action': 'store_true', 'default': False}, {}),
+ ('-l', '--log', {'help': 'Log file'}, {'type': 'outfile', 'def': '_glyphs2ufo.log'}),
+ ('-r', '--restore', {'help': 'List of extra keys to restore to fontinfo.plist or lib.plist'}, {})]
+
+def doit(args):
+ logger = args.logger
+ logger.log("Creating UFO objects from GlyphsApp file", "I")
+ with open(args.glyphsfont, 'r', encoding='utf-8') as gfile:
+ gfont = glyphsLib.parser.load(gfile)
+ ufos = glyphsLib.to_ufos(gfont, include_instances=False, family_name=None, propagate_anchors=False, generate_GDEF=False)
+
+ # Extract directory name for use with restores
+ (glyphsdir, filen) = os.path.split(args.glyphsfont)
+
+ keylists = {
+
+ "librestorekeys": ["org.sil.pysilfontparams", "org.sil.altLineMetrics", "org.sil.lcg.toneLetters",
+ "org.sil.lcg.transforms", "public.glyphOrder", "public.postscriptNames",
+ "com.schriftgestaltung.disablesLastChange", "com.schriftgestaltung.disablesAutomaticAlignment",
+ "public.skipExportGlyphs"],
+ "libdeletekeys": ("com.schriftgestaltung.customParameter.GSFont.copyright",
+ "com.schriftgestaltung.customParameter.GSFont.designer",
+ "com.schriftgestaltung.customParameter.GSFont.manufacturer",
+ "com.schriftgestaltung.customParameter.GSFont.note",
+ "com.schriftgestaltung.customParameter.GSFont.Axes",
+ "com.schriftgestaltung.customParameter.GSFont.Axis Mappings",
+ "com.schriftgestaltung.customParameter.GSFontMaster.Master Name"),
+ "libdeleteempty": ("com.schriftgestaltung.DisplayStrings",),
+ "inforestorekeys": ["openTypeHeadCreated", "openTypeNamePreferredFamilyName", "openTypeNamePreferredSubfamilyName",
+ "openTypeNameUniqueID", "openTypeOS2WeightClass", "openTypeOS2WidthClass", "postscriptFontName",
+ "postscriptFullName", "styleMapFamilyName", "styleMapStyleName", "note",
+ "woffMetadataCredits", "woffMetadataDescription"],
+ "integerkeys": ("openTypeOS2WeightClass", "openTypeOS2WidthClass"),
+ "infodeletekeys": ("openTypeVheaVertTypoAscender", "openTypeVheaVertTypoDescender", "openTypeVheaVertTypoLineGap"),
+ # "infodeleteempty": ("openTypeOS2Selection",)
+ }
+
+ if args.restore: # Extra keys to restore. Add to both lists, since should never be duplicated names
+ keylist = args.restore.split(",")
+ keylists["librestorekeys"] += keylist
+ keylists["inforestorekeys"].append(keylist)
+
+ loglists = []
+ obskeysfound={}
+ for ufo in ufos:
+ loglists.append(process_ufo(ufo, keylists, glyphsdir, args, obskeysfound))
+ for loglist in loglists:
+ for logitem in loglist: logger.log(logitem[0], logitem[1])
+ if obskeysfound:
+ logmess = "The following obsolete keys were found. They may have been in the original UFO or you may have an old version of glyphsLib installed\n"
+ for fontname in obskeysfound:
+ keys = obskeysfound[fontname]
+ logmess += " " + fontname + ": "
+ for key in keys:
+ logmess += key + ", "
+ logmess += "\n"
+ logger.log(logmess, "E")
+
+def process_ufo(ufo, keylists, glyphsdir, args, obskeysfound):
+ loglist=[]
+# sn = ufo.info.styleName # )
+# sn = sn.replace("Italic Italic", "Italic") # ) Temp fixes due to glyphLib incorrectly
+# sn = sn.replace("Italic Bold Italic", "Bold Italic") # ) forming styleName
+# sn = sn.replace("Extra Italic Light Italic", "Extra Light Italic") # )
+# ufo.info.styleName = sn # )
+ fontname = ufo.info.familyName.replace(" ", "") + "-" + ufo.info.styleName.replace(" ", "")
+
+ # Fixes to the data
+ if not args.nofixes:
+ loglist.append(("Fixing data in " + fontname, "P"))
+ # lib.plist processing
+ loglist.append(("Checking lib.plist", "P"))
+
+ # Restore values from original UFOs, assuming named as <fontname>.ufo in same directory as input .gylphs file
+
+ ufodir = os.path.join(glyphsdir, fontname + ".ufo")
+ try:
+ origlibplist = silfont.ufo.Uplist(font=None, dirn=ufodir, filen="lib.plist")
+ except Exception as e:
+ loglist.append(("Unable to open lib.plist in " + ufodir + "; values will not be restored", "E"))
+ origlibplist = None
+
+ if origlibplist is not None:
+
+ for key in keylists["librestorekeys"]:
+ current = None if key not in ufo.lib else ufo.lib[key]
+ if key in origlibplist:
+ new = origlibplist.getval(key)
+ if current == new:
+ continue
+ else:
+ ufo.lib[key] = new
+ logchange(loglist, " restored from backup ufo. ", key, current, new)
+ elif current:
+ ufo.lib[key] = None
+ logchange(loglist, " removed since not in backup ufo. ", key, current, None)
+
+ # Delete unneeded keys
+
+ for key in keylists["libdeletekeys"]:
+ if key in ufo.lib:
+ current = ufo.lib[key]
+ del ufo.lib[key]
+ logchange(loglist, " deleted. ", key, current, None)
+
+ for key in keylists["libdeleteempty"]:
+ if key in ufo.lib and (ufo.lib[key] == "" or ufo.lib[key] == []):
+ current = ufo.lib[key]
+ del ufo.lib[key]
+ logchange(loglist, " empty field deleted. ", key, current, None)
+
+ # Check for obsolete keys
+ for key in obsoleteLibKeys:
+ if key in ufo.lib:
+ if fontname not in obskeysfound: obskeysfound[fontname] = []
+ obskeysfound[fontname].append(key)
+
+ # Special processing for Axis Mappings
+ #key = "com.schriftgestaltung.customParameter.GSFont.Axis Mappings"
+ #if key in ufo.lib:
+ # current =ufo.lib[key]
+ # new = dict(current)
+ # for x in current:
+ # val = current[x]
+ # k = list(val.keys())[0]
+ # if k[-2:] == ".0": new[x] = {k[0:-2]: val[k]}
+ # if current != new:
+ # ufo.lib[key] = new
+ # logchange(loglist, " key names set to integers. ", key, current, new)
+
+ # Special processing for ufo2ft filters
+ key = "com.github.googlei18n.ufo2ft.filters"
+ if key in ufo.lib:
+ current = ufo.lib[key]
+ new = list(current)
+ for x in current:
+ if x["name"] == "eraseOpenCorners":
+ new.remove(x)
+
+ if current != new:
+ if new == []:
+ del ufo.lib[key]
+ else:
+ ufo.lib[key] = new
+ logchange(loglist, " eraseOpenCorners filter removed ", key, current, new)
+
+ # fontinfo.plist processing
+
+ loglist.append(("Checking fontinfo.plist", "P"))
+
+ try:
+ origfontinfo = silfont.ufo.Uplist(font=None, dirn=ufodir, filen="fontinfo.plist")
+ except Exception as e:
+ loglist.append(("Unable to open fontinfo.plist in " + ufodir + "; values will not be restored", "E"))
+ origfontinfo = None
+
+ if origfontinfo is not None:
+ for key in keylists["inforestorekeys"]:
+ current = None if not hasattr(ufo.info, key) else getattr(ufo.info, key)
+ if key in origfontinfo:
+ new = origfontinfo.getval(key)
+ if key in keylists["integerkeys"]: new = int(new)
+ if current == new:
+ continue
+ else:
+ setattr(ufo.info, key, new)
+ logchange(loglist, " restored from backup ufo. ", key, current, new)
+ elif current:
+ setattr(ufo.info, key, None)
+ logchange(loglist, " removed since not in backup ufo. ", key, current, None)
+
+ if getattr(ufo.info, "italicAngle") == 0: # Remove italicAngle if 0
+ setattr(ufo.info, "italicAngle", None)
+ logchange(loglist, " removed", "italicAngle", 0, None)
+
+ # Delete unneeded keys
+
+ for key in keylists["infodeletekeys"]:
+ if hasattr(ufo.info, key):
+ current = getattr(ufo.info, key)
+ setattr(ufo.info, key, None)
+ logchange(loglist, " deleted. ", key, current, None)
+
+# for key in keylists["infodeleteempty"]:
+# if hasattr(ufo.info, key) and getattr(ufo.info, key) == "":
+# setattr(ufo.info, key, None)
+# logchange(loglist, " empty field deleted. ", key, current, None)
+ if args.nofea or args.preservefea: ufo.features.text = "" # Suppress output of features.fea
+
+ for layer in ufo.layers:
+ for glyph in layer:
+ lib = glyph.lib
+ if "public.verticalOrigin" in lib: del lib["public.verticalOrigin"]
+
+ # Write ufo out
+ ufopath = os.path.join(args.masterdir, fontname + ".ufo")
+ if args.preservefea: # Move features.fea out of the ufo so that it can be restored afterward
+ origfea = os.path.join(ufopath, "features.fea")
+ hiddenfea = os.path.join(args.masterdir, fontname + "features.tmp")
+ if os.path.exists(origfea):
+ loglist.append((f'Renaming {origfea} to {hiddenfea}', "I"))
+ os.rename(origfea, hiddenfea)
+ else:
+ loglist.append((f"{origfea} does not exists so can't be restored", "E"))
+ origfea = None
+ loglist.append(("Writing out " + ufopath, "P"))
+ if os.path.exists(ufopath): shutil.rmtree(ufopath)
+ ufo.save(ufopath)
+ if args.preservefea and origfea:
+ loglist.append((f'Renaming {hiddenfea} back to {origfea}', "I"))
+ os.rename(hiddenfea, origfea)
+
+ # Now correct the newly-written fontinfo.plist with changes that can't be made via glyphsLib
+ if not args.nofixes:
+ fontinfo = silfont.ufo.Uplist(font=None, dirn=ufopath, filen="fontinfo.plist")
+ changes = False
+ for key in ("guidelines", "postscriptBlueValues", "postscriptFamilyBlues", "postscriptFamilyOtherBlues",
+ "postscriptOtherBlues"):
+ if key in fontinfo and fontinfo.getval(key) == []:
+ fontinfo.remove(key)
+ changes = True
+ logchange(loglist, " empty list deleted", key, None, [])
+ if changes:
+ # Create outparams. Just need any valid values, since font will need normalizing later
+ params = args.paramsobj
+ paramset = params.sets["main"]
+ outparams = {"attribOrders": {}}
+ for parn in params.classes["outparams"]: outparams[parn] = paramset[parn]
+ loglist.append(("Writing updated fontinfo.plist", "I"))
+ silfont.ufo.writeXMLobject(fontinfo, params=outparams, dirn=ufopath, filen="fontinfo.plist", exists=True,
+ fobject=True)
+ return loglist
+
+def logchange(loglist, logmess, key, old, new):
+ oldstr = str(old) if len(str(old)) < 22 else str(old)[0:20] + "..."
+ newstr = str(new) if len(str(new)) < 22 else str(new)[0:20] + "..."
+ logmess = key + logmess
+ if old is None:
+ logmess = logmess + " New value: " + newstr
+ else:
+ if new is None:
+ logmess = logmess + " Old value: " + oldstr
+ else:
+ logmess = logmess + " Old value: " + oldstr + ", new value: " + newstr
+ loglist.append((logmess, "I"))
+ # Extra verbose logging
+ if len(str(old)) > 21 :
+ loglist.append(("Full old value: " + str(old), "V"))
+ if len(str(new)) > 21 :
+ loglist.append(("Full new value: " + str(new), "V"))
+ loglist.append(("Types: Old - " + str(type(old)) + ", New - " + str(type(new)), "V"))
+
+def cmd(): execute(None, doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfmakedeprecated.py b/lib/silfont/scripts/psfmakedeprecated.py
new file mode 100644
index 0000000..b86ed4c
--- /dev/null
+++ b/lib/silfont/scripts/psfmakedeprecated.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python3
+'''Creates deprecated versions of glyphs: takes the specified glyph and creates a
+duplicate with an additional box surrounding it so that it becomes reversed,
+and assigns a new unicode encoding to it.
+Input is a csv with three fields: original,new,unicode'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Victor Gaultney'
+
+from silfont.core import execute
+
+argspec = [
+ ('ifont', {'help': 'Input font filename'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-i','--input',{'help': 'Input csv file'}, {'type': 'incsv', 'def': 'todeprecate.csv'}),
+ ('-l','--log',{'help': 'Set log file name'}, {'type': 'outfile', 'def': '_deprecated.log'})]
+
+offset = 30
+
+def doit(args) :
+ font = args.ifont
+ logger = args.logger
+
+ # Process csv list into a dictionary structure
+ args.input.numfields = 3
+ deps = {}
+ for line in args.input :
+ deps[line[0]] = {"newname": line[1], "newuni": line[2]}
+
+ # Iterate through dictionary (unsorted)
+ for source, target in deps.items() :
+ # Check if source glyph is in font
+ if source in font.keys() :
+ # Give warning if target is already in font, but overwrite anyway
+ targetname = target["newname"]
+ targetuni = int(target["newuni"], 16)
+ if targetname in font.keys() :
+ logger.log("Warning: " + targetname + " already in font and will be replaced")
+
+ # Make a copy of source into a new glyph object
+ sourceglyph = font[source]
+ newglyph = sourceglyph.copy()
+
+ # Draw box around it
+ xmin, ymin, xmax, ymax = sourceglyph.bounds
+ pen = newglyph.getPen()
+ pen.moveTo((xmax + offset, ymin - offset))
+ pen.lineTo((xmax + offset, ymax + offset))
+ pen.lineTo((xmin - offset, ymax + offset))
+ pen.lineTo((xmin - offset, ymin - offset))
+ pen.closePath()
+
+ # Set unicode
+ newglyph.unicodes = []
+ newglyph.unicode = targetuni
+
+ # Add the new glyph object to the font with name target
+ font.__setitem__(targetname,newglyph)
+
+ # Decompose glyph in case there may be components
+ # It seems you can't decompose a glyph has hasn't yet been added to a font
+ font[targetname].decompose()
+ # Correct path direction
+ font[targetname].correctDirection()
+
+ logger.log(source + " duplicated to " + targetname)
+ else :
+ logger.log("Warning: " + source + " not in font")
+
+ return font
+
+def cmd() : execute("FP",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfmakefea.py b/lib/silfont/scripts/psfmakefea.py
new file mode 100755
index 0000000..6ef0613
--- /dev/null
+++ b/lib/silfont/scripts/psfmakefea.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python3
+__doc__ = 'Make features.fea file'
+# TODO: add conditional compilation, compare to fea, compile to ttf
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Martin Hosken, Alan Ward'
+
+import silfont.ufo as ufo
+from collections import OrderedDict
+from silfont.feax_parser import feaplus_parser
+from xml.etree import ElementTree as et
+import re
+
+from silfont.core import execute
+
+def getbbox(g):
+ res = (65536, 65536, -65536, -65536)
+ if g['outline'] is None:
+ return (0, 0, 0, 0)
+ for c in g['outline'].contours:
+ for p in c['point']:
+ if 'type' in p.attrib: # any actual point counts
+ x = float(p.get('x', '0'))
+ y = float(p.get('y', '0'))
+ res = (min(x, res[0]), min(y, res[1]), max(x, res[2]), max(y, res[3]))
+ return res
+
+class Glyph(object) :
+ def __init__(self, name, advance=0, bbox=None) :
+ self.name = name
+ self.anchors = {}
+ self.is_mark = False
+ self.advance = int(float(advance))
+ self.bbox = bbox or (0, 0, 0, 0)
+
+ def add_anchor(self, info) :
+ self.anchors[info['name']] = (int(float(info['x'])), int(float(info['y'])))
+
+ def decide_if_mark(self) :
+ for a in self.anchors.keys() :
+ if a.startswith("_") :
+ self.is_mark = True
+ break
+
+def decode_element(e):
+ '''Convert plist element into python structures'''
+ res = None
+ if e.tag == 'string':
+ return e.text
+ elif e.tag == 'integer':
+ return int(e.text)
+ elif e.tag== 'real':
+ return float(e.text)
+ elif e.tag == 'array':
+ res = [decode_element(x) for x in e]
+ elif e.tag == 'dict':
+ res = {}
+ for p in zip(e[::2], e[1::2]):
+ res[p[0].text] = decode_element(p[1])
+ return res
+
+class Font(object) :
+ def __init__(self, defines = None):
+ self.glyphs = OrderedDict()
+ self.classes = OrderedDict()
+ self.all_aps = OrderedDict()
+ self.fontinfo = {}
+ self.kerns = {}
+ self.defines = {} if defines is None else defines
+
+ def readaps(self, filename, omitaps='', params = None) :
+ omittedaps = set(omitaps.replace(',',' ').split()) # allow comma- and/or space-separated list
+ if filename.endswith('.ufo') :
+ f = ufo.Ufont(filename, params = params)
+ self.fontinfo = {}
+ for k, v in f.fontinfo._contents.items():
+ self.fontinfo[k] = decode_element(v[1])
+ skipglyphs = set(f.lib.getval('public.skipExportGlyphs', []))
+ for g in f.deflayer :
+ if g in skipglyphs:
+ continue
+ ufo_g = f.deflayer[g]
+ advb = ufo_g['advance']
+ adv = advb.width if advb is not None else 0
+ bbox = getbbox(ufo_g)
+ glyph = Glyph(g, advance=adv, bbox=bbox)
+ self.glyphs[g] = glyph
+ if 'anchor' in ufo_g._contents :
+ for a in ufo_g._contents['anchor'] :
+ if a.element.attrib['name'] not in omittedaps:
+ glyph.add_anchor(a.element.attrib)
+ self.all_aps.setdefault(a.element.attrib['name'], []).append(glyph)
+ if hasattr(f, 'groups'):
+ for k, v in f.groups._contents.items():
+ self.classes[k.lstrip('@')] = decode_element(v[1])
+ if hasattr(f, 'kerning'):
+ for k, v in f.kerning._contents.items():
+ key = k.lstrip('@')
+ if key in self.classes:
+ key = "@" + key
+ subelements = decode_element(v[1])
+ kerndict = {}
+ for s, n in subelements.items():
+ skey = s.lstrip('@')
+ if skey in self.classes:
+ skey = "@" + skey
+ kerndict[skey] = n
+ self.kerns[key] = kerndict
+ elif filename.endswith('.xml') :
+ currGlyph = None
+ currPoint = None
+ self.fontinfo = {}
+ for event, elem in et.iterparse(filename, events=('start', 'end')):
+ if event == 'start':
+ if elem.tag == 'glyph':
+ name = elem.get('PSName', '')
+ if name:
+ currGlyph = Glyph(name)
+ self.glyphs[name] = currGlyph
+ currPoint = None
+ elif elem.tag == 'point':
+ currPoint = {'name' : elem.get('type', '')}
+ elif elem.tag == 'location' and currPoint is not None:
+ currPoint['x'] = int(elem.get('x', 0))
+ currPoint['y'] = int(elem.get('y', 0))
+ elif elem.tag == 'font':
+ n = elem.get('name', '')
+ x = n.split('-')
+ if len(x) == 2:
+ self.fontinfo['familyName'] = x[0]
+ self.fontinfo['openTypeNamePreferredFamilyName'] = x[0]
+ self.fontinfo['styleMapFamilyName'] = x[0]
+ self.fontinfo['styleName'] = x[1]
+ self.fontinfo['openTypeNamePreferredSubfamilyName'] = x[1]
+ self.fontinfo['postscriptFullName'] = "{0} {1}".format(*x)
+ self.fontinfo['postscriptFontName'] = n
+ elif event == 'end':
+ if elem.tag == 'point':
+ if currGlyph and currPoint['name'] not in omittedaps:
+ currGlyph.add_anchor(currPoint)
+ self.all_aps.setdefault(currPoint['name'], []).append(currGlyph)
+ currPoint = None
+ elif elem.tag == 'glyph':
+ currGlyph = None
+
+ def read_classes(self, fname, classproperties=False):
+ doc = et.parse(fname)
+ for c in doc.findall('.//class'):
+ class_name = c.get('name')
+ m = re.search('\[(\d+)\]$', class_name)
+ # support fixedclasses like make_gdl.pl via AP.pm
+ if m:
+ class_nm = class_name[0:m.start()]
+ ix = int(m.group(1))
+ else:
+ class_nm = class_name
+ ix = None
+ cl = self.classes.setdefault(class_nm, [])
+ for e in c.get('exts', '').split() + [""]:
+ for g in c.text.split():
+ if g+e in self.glyphs or (e == '' and g.startswith('@')):
+ if ix:
+ cl.insert(ix, g+e)
+ else:
+ cl.append(g+e)
+ if not classproperties:
+ return
+ for c in doc.findall('.//property'):
+ for e in c.get('exts', '').split() + [""]:
+ for g in c.text.split():
+ if g+e in self.glyphs:
+ cname = c.get('name') + "_" + c.get('value')
+ self.classes.setdefault(cname, []).append(g+e)
+
+ def make_classes(self, ligmode) :
+ for name, g in self.glyphs.items() :
+ # pull off suffix and make classes
+ # TODO: handle ligatures
+ base = name
+ if ligmode is None or 'comp' not in ligmode or "_" not in name:
+ pos = base.rfind('.')
+ while pos > 0 :
+ old_base = base
+ ext = base[pos+1:]
+ base = base[:pos]
+ ext_class_nm = "c_" + ext
+ if base in self.glyphs and old_base in self.glyphs:
+ glyph_lst = self.classes.setdefault(ext_class_nm, [])
+ if not old_base in glyph_lst:
+ glyph_lst.append(old_base)
+ self.classes.setdefault("cno_" + ext, []).append(base)
+ pos = base.rfind('.')
+ if ligmode is not None and "_" in name:
+ comps = name.split("_")
+ if "comp" in ligmode or "." not in comps[-1]:
+ base = comps.pop(-1 if "last" in ligmode else 0)
+ cname = base.replace(".", "_")
+ noname = "_".join(comps)
+ if base in self.glyphs and noname in self.glyphs:
+ glyph_lst = self.classes.setdefault("clig_"+cname, [])
+ if name not in glyph_lst:
+ glyph_lst.append(name)
+ self.classes.setdefault("cligno_"+cname, []).append(noname)
+ if g.is_mark :
+ self.classes.setdefault('GDEF_marks', []).append(name)
+ else :
+ self.classes.setdefault('GDEF_bases', []).append(name)
+
+ def make_marks(self) :
+ for name, g in self.glyphs.items() :
+ g.decide_if_mark()
+
+ def order_classes(self):
+ # return ordered list of classnames as desired for FEA
+
+ # Start with alphabetical then correct:
+ # 1. Put classes like "cno_whatever" adjacent to "c_whatever"
+ # 2. Classes can be defined in terms of other classes but FEA requires that
+ # classes be defined before they can be referenced.
+
+ def sortkey(x):
+ key1 = 'c_' + x[4:] if x.startswith('cno_') else x
+ return (key1, x)
+
+ classes = sorted(self.classes.keys(), key=sortkey)
+ links = {} # key = classname; value = list of other classes that include this one
+ counts = {} # key = classname; value = count of un-output classes that this class includes
+ for name in classes:
+ y = [c[1:] for c in self.classes[name] if c.startswith('@')] #list of included classes
+ counts[name] = len(y)
+ for c in y:
+ links.setdefault(c, []).append(name)
+
+ outclasses = []
+ while len(classes) > 0:
+ foundone = False
+ for name in classes:
+ if counts[name] == 0:
+ foundone = True
+ # output this class
+ outclasses.append(name)
+ classes.remove(name)
+ # adjust counts of classes that include this one
+ if name in links:
+ for n in links[name]:
+ counts[n] -= 1
+ # It may now be possible to output some we skipped earlier,
+ # so start over from the beginning of the list
+ break
+ if not foundone:
+ # all remaining classes include un-output classes and thus there is a loop somewhere
+ raise ValueError("Class reference loop(s) found: " + ", ".join(classes))
+ return outclasses
+
+ def addComment(self, parser, text):
+ cmt = parser.ast.Comment("# " + text, location=None)
+ cmt.pretext = "\n"
+ parser.add_statement(cmt)
+
+ def append_classes(self, parser) :
+ # normal glyph classes
+ self.addComment(parser, "Main Classes")
+ for name in self.order_classes():
+ gc = parser.ast.GlyphClass(None, location=None)
+ for g in self.classes[name] :
+ gc.append(g)
+ gcd = parser.ast.GlyphClassDefinition(name, gc, location=None)
+ parser.add_statement(gcd)
+ parser.define_glyphclass(name, gcd)
+
+ def _addGlyphsToClass(self, parser, glyphs, gc, anchor, definer):
+ if len(glyphs) > 1 :
+ val = parser.ast.GlyphClass(glyphs, location=None)
+ else :
+ val = parser.ast.GlyphName(glyphs[0], location=None)
+ classdef = definer(gc, anchor, val, location=None)
+ gc.addDefinition(classdef)
+ parser.add_statement(classdef)
+
+ def append_positions(self, parser):
+ # create base and mark classes, add to fea file dicts and parser symbol table
+ bclassdef_lst = []
+ mclassdef_lst = []
+ self.addComment(parser, "Positioning classes and statements")
+ for ap_nm, glyphs_w_ap in self.all_aps.items() :
+ self.addComment(parser, "AP: " + ap_nm)
+ # e.g. all glyphs with U AP
+ if not ap_nm.startswith("_"):
+ if any(not x.is_mark for x in glyphs_w_ap):
+ gcb = parser.set_baseclass(ap_nm)
+ parser.add_statement(gcb)
+ if any(x.is_mark for x in glyphs_w_ap):
+ gcm = parser.set_baseclass(ap_nm + "_MarkBase")
+ parser.add_statement(gcm)
+ else:
+ gc = parser.set_markclass(ap_nm)
+
+ # create lists of glyphs that use the same point (name and coordinates)
+ # that can share a class definition
+ anchor_cache = OrderedDict()
+ markanchor_cache = OrderedDict()
+ for g in glyphs_w_ap :
+ p = g.anchors[ap_nm]
+ if g.is_mark and not ap_nm.startswith("_"):
+ markanchor_cache.setdefault(p, []).append(g.name)
+ else:
+ anchor_cache.setdefault(p, []).append(g.name)
+
+ if ap_nm.startswith("_"):
+ for p, glyphs_w_pt in anchor_cache.items():
+ anchor = parser.ast.Anchor(p[0], p[1], location=None)
+ self._addGlyphsToClass(parser, glyphs_w_pt, gc, anchor, parser.ast.MarkClassDefinition)
+ else:
+ for p, glyphs_w_pt in anchor_cache.items():
+ anchor = parser.ast.Anchor(p[0], p[1], location=None)
+ self._addGlyphsToClass(parser, glyphs_w_pt, gcb, anchor, parser.ast.BaseClassDefinition)
+ for p, glyphs_w_pt in markanchor_cache.items():
+ anchor = parser.ast.Anchor(p[0], p[1], location=None)
+ self._addGlyphsToClass(parser, glyphs_w_pt, gcm, anchor, parser.ast.BaseClassDefinition)
+
+#TODO: provide more argument info
+argspec = [
+ ('infile', {'nargs': '?', 'help': 'Input UFO or file'}, {'def': None, 'type': 'filename'}),
+ ('-i', '--input', {'required': 'True', 'help': 'Fea file to merge'}, {}),
+ ('-o', '--output', {'help': 'Output fea file'}, {}),
+ ('-c', '--classfile', {'help': 'Classes file'}, {}),
+ ('-L', '--ligmode', {'help': 'Parse ligatures: last - use last element as class name, first - use first element as class name, lastcomp, firstcomp - final variants are part of the component not the whole ligature'}, {}),
+ ('-D', '--define', {'action': 'append', 'help': 'Add option definition to pass to fea code --define=var=val'}, {}),
+ # ('--debug', {'help': 'Drop into pdb', 'action': 'store_true'}, {}),
+ ('--classprops', {'help': 'Include property elements from classes file', 'action': 'store_true'}, {}),
+ ('--omitaps', {'help': 'names of attachment points to omit (comma- or space-separated)', 'default': '', 'action': 'store'}, {})
+]
+
+def doit(args) :
+ defines = dict(x.split('=') for x in args.define) if args.define else {}
+ font = Font(defines)
+ # if args.debug:
+ # import pdb; pdb.set_trace()
+ if "checkfix" not in args.params:
+ args.paramsobj.sets["main"]["checkfix"] = "None"
+ if args.infile is not None:
+ font.readaps(args.infile, args.omitaps, args.paramsobj)
+
+ font.make_marks()
+ font.make_classes(args.ligmode)
+ if args.classfile:
+ font.read_classes(args.classfile, classproperties = args.classprops)
+
+ p = feaplus_parser(None, font.glyphs, font.fontinfo, font.kerns, font.defines)
+ doc_ufo = p.parse() # returns an empty ast.FeatureFile
+
+ # Add goodies from the font
+ font.append_classes(p)
+ font.append_positions(p)
+
+ # parse the input fea file
+ if args.input :
+ doc_fea = p.parse(args.input)
+ else:
+ doc_fea = doc_ufo
+
+ # output as doc.asFea()
+ if args.output :
+ with open(args.output, "w") as of :
+ of.write(doc_fea.asFea())
+
+def cmd(): execute(None, doit, argspec)
+if __name__ == '__main__': cmd()
diff --git a/lib/silfont/scripts/psfmakescaledshifted.py b/lib/silfont/scripts/psfmakescaledshifted.py
new file mode 100644
index 0000000..69bbbb9
--- /dev/null
+++ b/lib/silfont/scripts/psfmakescaledshifted.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python3
+'''Creates duplicate versions of glyphs that are scaled and shifted.
+Input is a csv with three fields: original,new,unicode'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2019 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Victor Gaultney'
+
+from silfont.core import execute
+from silfont.util import parsecolors
+from ast import literal_eval as make_tuple
+
+argspec = [
+ ('ifont', {'help': 'Input font filename'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-i','--input',{'help': 'Input csv file', 'required': True}, {'type': 'incsv', 'def': 'scaledshifted.csv'}),
+ ('-c', '--colorcells', {'help': 'Color cells of generated glyphs', 'action': 'store_true'}, {}),
+ ('--color', {'help': 'Color to use when marking generated glyphs'},{}),
+ ('-t','--transform',{'help': 'Transform matrix or type', 'required': True}, {}),
+ ('-l','--log',{'help': 'Set log file name'}, {'type': 'outfile', 'def': '_scaledshifted.log'})]
+
+def doit(args) :
+ font = args.ifont
+ logger = args.logger
+ transform = args.transform
+
+ if transform[1] == "(":
+ # Set transform from matrix - example: "(0.72, 0, 0, 0.6, 10, 806)"
+ # (xx, xy, yx, yy, x, y)
+ trans = make_tuple(args.transform)
+ else:
+ # Set transformation specs from UFO lib.plist org.sil.lcg.transforms
+ # Will need to be enhanced to support adjustMetrics, boldX, boldY parameters for smallcaps
+ try:
+ trns = font.lib["org.sil.lcg.transforms"][transform]
+ except KeyError:
+ logger.log("Error: transform type not found in lib.plist org.sil.lcg.transforms", "S")
+ else:
+ try:
+ adjM = trns["adjustMetrics"]
+ except KeyError:
+ adjM = 0
+ try:
+ skew = trns["skew"]
+ except KeyError:
+ skew = 0
+ try:
+ shiftX = trns["shiftX"]
+ except KeyError:
+ shiftX = 0
+ try:
+ shiftY = trns["shiftY"]
+ except KeyError:
+ shiftY = 0
+ trans = (trns["scaleX"], 0, skew, trns["scaleY"], shiftX+adjM, shiftY)
+
+
+ # Process csv list into a dictionary structure
+ args.input.numfields = 3
+ deps = {}
+ for (source, newname, newuni) in args.input :
+ if source in deps:
+ deps[source].append({"newname": newname, "newuni": newuni})
+ else:
+ deps[source] = [({"newname": newname, "newuni": newuni})]
+
+ # Iterate through dictionary (unsorted)
+ for source in deps:
+ # Check if source glyph is in font
+ if source in font.keys() :
+ for target in deps[source]:
+ # Give warning if target is already in font, but overwrite anyway
+ targetname = target["newname"]
+ if targetname in font.keys() :
+ logger.log("Warning: " + targetname + " already in font and will be replaced")
+
+ # Make a copy of source into a new glyph object
+ sourceglyph = font[source]
+ newglyph = sourceglyph.copy()
+
+ newglyph.transformBy(trans)
+ # Set width because transformBy does not seems to properly adjust width
+ newglyph.width = (int(newglyph.width * trans[0])) + (adjM * 2)
+
+ # Set unicode
+ newglyph.unicodes = []
+ if target["newuni"]:
+ newglyph.unicode = int(target["newuni"], 16)
+
+ # mark glyphs as being generated by setting cell mark color (defaults to blue if args.color not set)
+ if args.colorcells or args.color:
+ if args.color:
+ (color, name, logcolor, splitcolor) = parsecolors(args.color, single=True)
+ if color is None: logger.log(logcolor, "S") # If color not parsed, parsecolors() puts error in logcolor
+ color = color.split(",") # Need to convert string to tuple
+ color = (float(color[0]), float(color[1]), float(color[2]), float(color[3]))
+ else:
+ color = (0.18, 0.16, 0.78, 1)
+ newglyph.markColor = color
+
+ # Add the new glyph object to the font with name target
+ font.__setitem__(targetname, newglyph)
+
+ # Decompose glyph in case there may be components
+ # It seems you can't decompose a glyph has hasn't yet been added to a font
+ font[targetname].decompose()
+ # Correct path direction
+ font[targetname].correctDirection()
+
+ logger.log(source + " duplicated to " + targetname)
+ else :
+ logger.log("Warning: " + source + " not in font")
+
+ return font
+
+def cmd() : execute("FP",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfmakewoffmetadata.py b/lib/silfont/scripts/psfmakewoffmetadata.py
new file mode 100755
index 0000000..c7d8128
--- /dev/null
+++ b/lib/silfont/scripts/psfmakewoffmetadata.py
@@ -0,0 +1,225 @@
+#!/usr/bin/env python
+__doc__ = 'Make the WOFF metadata xml file based on input UFO (and optionally FONTLOG.txt)'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+import silfont.ufo as UFO
+
+import re, os, datetime
+from xml.etree import ElementTree as ET
+
+argspec = [
+ ('font', {'help': 'Source font file'}, {'type': 'infont'}),
+ ('-n', '--primaryname', {'help': 'Primary Font Name', 'required': True}, {}),
+ ('-i', '--orgid', {'help': 'orgId', 'required': True}, {}),
+ ('-f', '--fontlog', {'help': 'FONTLOG.txt file', 'default': 'FONTLOG.txt'}, {'type': 'filename'}),
+ ('-o', '--output', {'help': 'Override output file'}, {'type': 'filename', 'def': None}),
+ ('--populateufowoff', {'help': 'Copy info from FONTLOG.txt to UFO', 'action': 'store_true', 'default': False},{}),
+ ('-l', '--log', {'help': 'Log file'}, {'type': 'outfile', 'def': '_makewoff.log'})]
+
+def doit(args):
+ font = args.font
+ pfn = args.primaryname
+ orgid = args.orgid
+ logger = args.logger
+ ofn = args.output
+
+ # Find & process info required in the UFO
+
+ fi = font.fontinfo
+
+ ufofields = {}
+ missing = None
+ for field in ("versionMajor", "versionMinor", "openTypeNameManufacturer", "openTypeNameManufacturerURL",
+ "openTypeNameLicense", "copyright", "trademark"):
+ if field in fi:
+ ufofields[field] = fi[field][1].text
+ elif field != 'trademark': # trademark is no longer required
+ missing = field if missing is None else missing + ", " + field
+ if missing is not None: logger.log("Field(s) missing from fontinfo.plist: " + missing, "S")
+
+ version = ufofields["versionMajor"] + "." + ufofields["versionMinor"].zfill(3)
+
+ # Find & process WOFF fields if present in the UFO
+
+ missing = None
+ ufofields["woffMetadataDescriptionurl"] = None
+ ufowoff = {"woffMetadataCredits": "credits", "woffMetadataDescription": "text"} # Field, dict name
+ for field in ufowoff:
+ fival = fi.getval(field) if field in fi else None
+ if fival is None:
+ missing = field if missing is None else missing + ", " + field
+ ufofields[field] = None
+ else:
+ ufofields[field] = fival[ufowoff[field]]
+ if field == "woffMetadataDescription" and "url" in fival:
+ ufofields["woffMetadataDescriptionurl"] = fival["url"]
+
+ # Process --populateufowoff setting, if present
+ if args.populateufowoff:
+ if missing != "woffMetadataCredits, woffMetadataDescription":
+ logger.log("Data exists in the UFO for woffMetadata - remove manually to reuse --populateufowoff", "S")
+
+ if args.populateufowoff or missing is not None:
+ if missing: logger.log("WOFF field(s) missing from fontinfo.plist will be generated from FONTLOG.txt: " + missing, "W")
+ # Open the fontlog file
+ try:
+ fontlog = open(args.fontlog)
+ except Exception as e:
+ logger.log(f"Unable to open {args.fontlog}: {str(e)}", "S")
+ # Parse the fontlog file
+ (section, match) = readuntil(fontlog, ("Basic Font Information",)) # Skip until start of "Basic Font Information" section
+ if match is None: logger.log("No 'Basic Font Information' section in fontlog", "S")
+ (fldescription, match) = readuntil(fontlog, ("Information for C", "Acknowledgements")) # Description ends when first of these sections is found
+ fldescription = [{"text": fldescription}]
+ if match == "Information for C": (section, match) = readuntil(fontlog, ("Acknowledgements",)) # If Info... section present then skip on to Acknowledgements
+ if match is None: logger.log("No 'Acknowledgements' section in fontlog", "S")
+ (acksection, match) = readuntil(fontlog, ("No match needed!!",))
+
+ flcredits = []
+ credit = {}
+ acktype = ""
+ flog2woff = {"N": "name", "E": "Not used", "W": "url", "D": "role"}
+ for line in acksection.splitlines():
+ if line == "":
+ if acktype != "": # Must be at the end of a credit section
+ if "name" in credit:
+ flcredits.append(credit)
+ else:
+ logger.log("Credit section found with no N: entry", "E")
+ credit = {}
+ acktype = ""
+ else:
+ match = re.match("^([NEWD]): (.*)", line)
+ if match is None:
+ if acktype == "N": credit["name"] = credit["name"] + line # Name entries can be multiple lines
+ else:
+ acktype = match.group(1)
+ if acktype in credit:
+ logger.log("Multiple " + acktype + " entries found in a credit section", "E")
+ else:
+ credit[flog2woff[acktype]] = match.group(2)
+ if flcredits == []: logger.log("No credits found in fontlog", "S")
+ if args.populateufowoff:
+ ufofields["woffMetadataDescription"] = fldescription # Force fontlog values to be used writing metadata.xml later
+ ufofields["woffMetadataCredits"] = flcredits
+ # Create xml strings and update fontinfo
+ xmlstring = "<dict>" + \
+ "<key>text</key><array><dict>" + \
+ "<key>text</key><string>" + textprotect(fldescription[0]["text"]) + "</string>" + \
+ "</dict></array>" + \
+ "<key>url</key><string>https://software.sil.org/</string>"\
+ "</dict>"
+ fi.setelem("woffMetadataDescription", ET.fromstring(xmlstring))
+
+ xmlstring = "<dict><key>credits</key><array>"
+ for credit in flcredits:
+ xmlstring += '<dict><key>name</key><string>' + textprotect(credit["name"]) + '</string>'
+ if "url" in credit: xmlstring += '<key>url</key><string>' + textprotect(credit["url"]) + '</string>'
+ if "role" in credit: xmlstring += '<key>role</key><string>' + textprotect(credit["role"]) + '</string>'
+ xmlstring += '</dict>'
+ xmlstring += '</array></dict>'
+ fi.setelem("woffMetadataCredits", ET.fromstring(xmlstring))
+
+ fi.setval("openTypeHeadCreated", "string", datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
+ logger.log("Writing updated fontinfo.plist with values from FONTLOG.txt", "P")
+ exists = True if os.path.isfile(os.path.join(font.ufodir, "fontinfo.plist")) else False
+ UFO.writeXMLobject(fi, font.outparams, font.ufodir, "fontinfo.plist", exists, fobject=True)
+
+ description = ufofields["woffMetadataDescription"]
+ if description == None: description = fldescription
+ credits = ufofields["woffMetadataCredits"]
+ if credits == None : credits = flcredits
+
+ # Construct output file name
+ (folder, ufoname) = os.path.split(font.ufodir)
+ filename = os.path.join(folder, pfn + "-WOFF-metadata.xml") if ofn is None else ofn
+ try:
+ file = open(filename, "w")
+ except Exception as e:
+ logger.log("Unable to open " + filename + " for writing:\n" + str(e), "S")
+ logger.log("Writing to : " + filename, "P")
+
+ file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
+ file.write('<metadata version="1.0">\n')
+ file.write(' <uniqueid id="' + orgid + '.' + pfn + '.' + version + '" />\n')
+ file.write(' <vendor name="' + attrprotect(ufofields["openTypeNameManufacturer"]) + '" url="'
+ + attrprotect(ufofields["openTypeNameManufacturerURL"]) + '" />\n')
+ file.write(' <credits>\n')
+ for credit in credits:
+ file.write(' <credit\n')
+ file.write(' name="' + attrprotect(credit["name"]) + '"\n')
+ if "url" in credit: file.write(' url="' + attrprotect(credit["url"]) + '"\n')
+ if "role" in credit: file.write(' role="' + attrprotect(credit["role"]) + '"\n')
+ file.write(' />\n')
+ file.write(' </credits>\n')
+
+ if ufofields["woffMetadataDescriptionurl"]:
+ file.write(f' <description url="{ufofields["woffMetadataDescriptionurl"]}">\n')
+ else:
+ file.write(' <description>\n')
+ file.write(' <text lang="en">\n')
+ for entry in description:
+ for line in entry["text"].splitlines():
+ file.write(' ' + textprotect(line) + '\n')
+ file.write(' </text>\n')
+ file.write(' </description>\n')
+
+ file.write(' <license url="http://scripts.sil.org/OFL" id="org.sil.ofl.1.1">\n')
+ file.write(' <text lang="en">\n')
+ for line in ufofields["openTypeNameLicense"].splitlines(): file.write(' ' + textprotect(line) + '\n')
+ file.write(' </text>\n')
+ file.write(' </license>\n')
+
+ file.write(' <copyright>\n')
+ file.write(' <text lang="en">\n')
+ for line in ufofields["copyright"].splitlines(): file.write(' ' + textprotect(line) + '\n')
+ file.write(' </text>\n')
+ file.write(' </copyright>\n')
+
+ if 'trademark' in ufofields:
+ file.write(' <trademark>\n')
+ file.write(' <text lang="en">' + textprotect(ufofields["trademark"]) + '</text>\n')
+ file.write(' </trademark>\n')
+
+ file.write('</metadata>')
+
+ file.close()
+
+def readuntil(file, texts): # Read through file until line is in texts. Return section up to there and the text matched
+ skip = True
+ match = None
+ for line in file:
+ line = line.strip()
+ if skip: # Skip underlines and blank lines at start of section
+ if line == "" or line[0:5] == "-----":
+ pass
+ else:
+ section = line
+ skip = False
+ else:
+ for text in texts:
+ if line[0:len(text)] == text: match = text
+ if match: break
+ section = section + "\n" + line
+ while section[-1] == "\n": section = section[:-1] # Strip blank lines at end
+ return (section, match)
+
+def textprotect(txt): # Switch special characters in text to use &...; format
+ txt = re.sub(r'&', '&amp;', txt)
+ txt = re.sub(r'<', '&lt;', txt)
+ txt = re.sub(r'>', '&gt;', txt)
+ return txt
+
+def attrprotect(txt): # Switch special characters in text to use &...; format
+ txt = re.sub(r'&', '&amp;', txt)
+ txt = re.sub(r'<', '&lt;', txt)
+ txt = re.sub(r'>', '&gt;', txt)
+ txt = re.sub(r'"', '&quot;', txt)
+ return txt
+
+def cmd(): execute("UFO", doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfnormalize.py b/lib/silfont/scripts/psfnormalize.py
new file mode 100755
index 0000000..82b132e
--- /dev/null
+++ b/lib/silfont/scripts/psfnormalize.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+__doc__ = '''Normalize a UFO and optionally convert between UFO2 and UFO3'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_normalize.log'}),
+ ('-v','--version',{'help': 'UFO version to convert to (2, 3 or 3ff)'},{})]
+
+def doit(args) :
+
+ if args.version is not None :
+ v = args.version.lower()
+ if v in ("2","3","3ff") :
+ if v == "3ff": # Special action for testing with FontForge import
+ v = "3"
+ args.ifont.outparams['format1Glifs'] = True
+ args.ifont.outparams['UFOversion'] = v
+ else:
+ args.logger.log("-v, --version must be one of 2,3 or 3ff", "S")
+
+ return args.ifont
+
+def cmd() : execute("UFO",doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfremovegliflibkeys.py b/lib/silfont/scripts/psfremovegliflibkeys.py
new file mode 100644
index 0000000..5d59ad7
--- /dev/null
+++ b/lib/silfont/scripts/psfremovegliflibkeys.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+__doc__ = '''Remove the specified key(s) from glif libs'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('key',{'help': 'Key(s) to remove','nargs': '*' }, {}),
+ ('-b', '--begins', {'help': 'Remove keys beginning with','nargs': '*' }, {}),
+ ('-o', '--ofont',{'help': 'Output font file' }, {'type': 'outfont'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_removegliflibkeys.log'})]
+
+def doit(args) :
+ font = args.ifont
+ logger = args.logger
+ keys = args.key
+ bkeys=args.begins if args.begins is not None else []
+ keycounts = {}
+ bkeycounts = {}
+ for key in keys : keycounts[key] = 0
+ for key in bkeys:
+ if key in keycounts: logger.log("--begins key can't be the same as a standard key", "S")
+ bkeycounts[key] = 0
+
+ for glyphn in font.deflayer :
+ glyph = font.deflayer[glyphn]
+ if glyph["lib"] :
+ for key in keys :
+ if key in glyph["lib"] :
+ val = str( glyph["lib"].getval(key))
+ glyph["lib"].remove(key)
+ keycounts[key] += 1
+ logger.log(key + " removed from " + glyphn + ". Value was " + val, "I" )
+ if key == "com.schriftgestaltung.Glyphs.originalWidth": # Special fix re glyphLib bug
+ if glyph["advance"] is None: glyph.add("advance")
+ adv = (glyph["advance"])
+ if adv.width is None:
+ adv.width = int(float(val))
+ logger.log("Advance width for " + glyphn + " set to " + val, "I")
+ else:
+ logger.log("Advance width for " + glyphn + " is already set to " + str(adv.width) + " so originalWidth not copied", "E")
+ for key in bkeys:
+ gkeys = list(glyph["lib"])
+ for gkey in gkeys:
+ if gkey[:len(key)] == key:
+ val = str(glyph["lib"].getval(gkey))
+ glyph["lib"].remove(gkey)
+ if gkey in keycounts:
+ keycounts[gkey] += 1
+ else:
+ keycounts[gkey] = 1
+ bkeycounts[key] += 1
+ logger.log(gkey + " removed from " + glyphn + ". Value was " + val, "I")
+
+ for key in keycounts :
+ count = keycounts[key]
+ if count > 0 :
+ logger.log(key + " removed from " + str(count) + " glyphs", "P")
+ else :
+ logger.log("No lib entries found for " + key, "E")
+ for key in bkeycounts:
+ if bkeycounts[key] == 0: logger.log("No lib entries found for beginning with " + key, "E")
+
+ return font
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfrenameglyphs.py b/lib/silfont/scripts/psfrenameglyphs.py
new file mode 100644
index 0000000..ff8ef73
--- /dev/null
+++ b/lib/silfont/scripts/psfrenameglyphs.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+__doc__ = '''Assign new working names to glyphs based on csv input file
+- csv format oldname,newname'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bob Hallissy'
+
+from silfont.core import execute
+from xml.etree import ElementTree as ET
+import re
+import os
+from glob import glob
+
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-c', '--classfile', {'help': 'Classes file'}, {}),
+ ('-i','--input',{'help': 'Input csv file'}, {'type': 'incsv', 'def': 'namemap.csv'}),
+ ('--mergecomps',{'help': 'turn on component merge', 'action': 'store_true', 'default': False},{}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_renameglyphs.log'})]
+
+csvmap = "" # Variable used globally
+
+def doit(args) :
+ global csvmap, ksetsbymember
+ font = args.ifont
+ incsv = args.input
+ logger = args.logger
+ mergemode = args.mergecomps
+
+ failerrors = 0 # Keep count of errors that should cause the script to fail
+ csvmap = {} # List of all real maps in incsv, so excluding headers, blank lines, comments and identity maps
+ nameMap = {} # remember all glyphs actually renamed
+ kerngroupsrenamed = {} # List of all kern groups actually renamed
+
+ # List of secondary layers (ie layers other than the default)
+ secondarylayers = [x for x in font.layers if x.layername != "public.default"]
+
+ # Obtain lib.plist glyph order(s) and psnames if they exist:
+ publicGlyphOrder = csGlyphOrder = psnames = displayStrings = None
+ if hasattr(font, 'lib'):
+ if 'public.glyphOrder' in font.lib:
+ publicGlyphOrder = font.lib.getval('public.glyphOrder') # This is an array
+ if 'com.schriftgestaltung.glyphOrder' in font.lib:
+ csGlyphOrder = font.lib.getval('com.schriftgestaltung.glyphOrder') # This is an array
+ if 'public.postscriptNames' in font.lib:
+ psnames = font.lib.getval('public.postscriptNames') # This is a dict keyed by glyphnames
+ if 'com.schriftgestaltung.customParameter.GSFont.DisplayStrings' in font.lib:
+ displayStrings = font.lib.getval('com.schriftgestaltung.customParameter.GSFont.DisplayStrings')
+ else:
+ logger.log("no lib.plist found in font", "W")
+
+ # Renaming within the UFO is done in two passes to make sure we can handle circular renames such as:
+ # someglyph.alt = someglyph
+ # someglyph = someglyph.alt
+
+ # Note that the various objects with glyph names are all done independently since
+ # the same glyph names are not necessarily in all structures.
+
+ # First pass: process all records of csv, and for each glyph that is to be renamed:
+ # If the new glyphname is not already present, go ahead and rename it now.
+ # If the new glyph name already exists, rename the glyph to a temporary name
+ # and put relevant details in saveforlater[]
+
+ saveforlaterFont = [] # For the font itself
+ saveforlaterPGO = [] # For public.GlyphOrder
+ saveforlaterCSGO = [] # For GlyphsApp GlyphOrder (com.schriftgestaltung.glyphOrder)
+ saveforlaterPSN = [] # For public.postscriptNames
+ deletelater = [] # Glyphs we'll delete after merging
+
+ for r in incsv:
+ oldname = r[0].strip()
+ newname = r[1].strip()
+ # ignore header row and rows where the newname is blank or a comment marker
+ if oldname == "Name" or oldname.startswith('#') or newname == "" or oldname == newname:
+ continue
+ if len(oldname)==0:
+ logger.log('empty glyph oldname in glyph_data; ignored (newname: %s)' % newname, 'W')
+ continue
+ csvmap[oldname]=newname
+
+ # Handle font first:
+ if oldname not in font.deflayer:
+ logger.log("glyph name not in font: " + oldname , "I")
+ elif newname not in font.deflayer:
+ inseclayers = False
+ for layer in secondarylayers:
+ if newname in layer:
+ logger.log("Glyph %s is already in non-default layers; can't rename %s" % (newname, oldname), "E")
+ failerrors += 1
+ inseclayers = True
+ continue
+ if not inseclayers:
+ # Ok, this case is easy: just rename the glyph in all layers
+ for layer in font.layers:
+ if oldname in layer: layer[oldname].name = newname
+ nameMap[oldname] = newname
+ logger.log("Pass 1 (Font): Renamed %s to %s" % (oldname, newname), "I")
+ elif mergemode:
+ mergeglyphs(font.deflayer[oldname], font.deflayer[newname])
+ for layer in secondarylayers:
+ if oldname in layer:
+ if newname in layer:
+ mergeglyphs(layer[oldname], layer[newname])
+ else:
+ layer[oldname].name = newname
+
+ nameMap[oldname] = newname
+ deletelater.append(oldname)
+ logger.log("Pass 1 (Font): merged %s to %s" % (oldname, newname), "I")
+ else:
+ # newname already in font -- but it might get renamed later in which case this isn't actually a problem.
+ # For now, then, rename glyph to a temporary name and remember it for second pass
+ tempname = gettempname(lambda n : n not in font.deflayer)
+ for layer in font.layers:
+ if oldname in layer:
+ layer[oldname].name = tempname
+ saveforlaterFont.append( (tempname, oldname, newname) )
+
+ # Similar algorithm for public.glyphOrder, if present:
+ if publicGlyphOrder:
+ if oldname not in publicGlyphOrder:
+ logger.log("glyph name not in publicGlyphorder: " + oldname , "I")
+ else:
+ x = publicGlyphOrder.index(oldname)
+ if newname not in publicGlyphOrder:
+ publicGlyphOrder[x] = newname
+ nameMap[oldname] = newname
+ logger.log("Pass 1 (PGO): Renamed %s to %s" % (oldname, newname), "I")
+ elif mergemode:
+ del publicGlyphOrder[x]
+ nameMap[oldname] = newname
+ logger.log("Pass 1 (PGO): Removed %s (now using %s)" % (oldname, newname), "I")
+ else:
+ tempname = gettempname(lambda n : n not in publicGlyphOrder)
+ publicGlyphOrder[x] = tempname
+ saveforlaterPGO.append( (x, oldname, newname) )
+
+ # And for GlyphsApp glyph order, if present:
+ if csGlyphOrder:
+ if oldname not in csGlyphOrder:
+ logger.log("glyph name not in csGlyphorder: " + oldname , "I")
+ else:
+ x = csGlyphOrder.index(oldname)
+ if newname not in csGlyphOrder:
+ csGlyphOrder[x] = newname
+ nameMap[oldname] = newname
+ logger.log("Pass 1 (csGO): Renamed %s to %s" % (oldname, newname), "I")
+ elif mergemode:
+ del csGlyphOrder[x]
+ nameMap[oldname] = newname
+ logger.log("Pass 1 (csGO): Removed %s (now using %s)" % (oldname, newname), "I")
+ else:
+ tempname = gettempname(lambda n : n not in csGlyphOrder)
+ csGlyphOrder[x] = tempname
+ saveforlaterCSGO.append( (x, oldname, newname) )
+
+ # And for psnames
+ if psnames:
+ if oldname not in psnames:
+ logger.log("glyph name not in psnames: " + oldname , "I")
+ elif newname not in psnames:
+ psnames[newname] = psnames.pop(oldname)
+ nameMap[oldname] = newname
+ logger.log("Pass 1 (psn): Renamed %s to %s" % (oldname, newname), "I")
+ elif mergemode:
+ del psnames[oldname]
+ nameMap[oldname] = newname
+ logger.log("Pass 1 (psn): Removed %s (now using %s)" % (oldname, newname), "I")
+ else:
+ tempname = gettempname(lambda n: n not in psnames)
+ psnames[tempname] = psnames.pop(oldname)
+ saveforlaterPSN.append( (tempname, oldname, newname))
+
+ # Second pass: now we can reprocess those things we saved for later:
+ # If the new glyphname is no longer present, we can complete the renaming
+ # Otherwise we've got a fatal error
+
+ for j in saveforlaterFont:
+ tempname, oldname, newname = j
+ if newname in font.deflayer: # Only need to check deflayer, since (if present) it would have been renamed in all
+ # Ok, this really is a problem
+ logger.log("Glyph %s already in font; can't rename %s" % (newname, oldname), "E")
+ failerrors += 1
+ else:
+ for layer in font.layers:
+ if tempname in layer:
+ layer[tempname].name = newname
+ nameMap[oldname] = newname
+ logger.log("Pass 2 (Font): Renamed %s to %s" % (oldname, newname), "I")
+
+ for j in saveforlaterPGO:
+ x, oldname, newname = j
+ if newname in publicGlyphOrder:
+ # Ok, this really is a problem
+ logger.log("Glyph %s already in public.GlyphOrder; can't rename %s" % (newname, oldname), "E")
+ failerrors += 1
+ else:
+ publicGlyphOrder[x] = newname
+ nameMap[oldname] = newname
+ logger.log("Pass 2 (PGO): Renamed %s to %s" % (oldname, newname), "I")
+
+ for j in saveforlaterCSGO:
+ x, oldname, newname = j
+ if newname in csGlyphOrder:
+ # Ok, this really is a problem
+ logger.log("Glyph %s already in com.schriftgestaltung.glyphOrder; can't rename %s" % (newname, oldname), "E")
+ failerrors += 1
+ else:
+ csGlyphOrder[x] = newname
+ nameMap[oldname] = newname
+ logger.log("Pass 2 (csGO): Renamed %s to %s" % (oldname, newname), "I")
+
+ for tempname, oldname, newname in saveforlaterPSN:
+ if newname in psnames:
+ # Ok, this really is a problem
+ logger.log("Glyph %s already in public.postscriptNames; can't rename %s" % (newname, oldname), "E")
+ failerrors += 1
+ else:
+ psnames[newname] = psnames.pop(tempname)
+ nameMap[oldname] = newname
+ logger.log("Pass 2 (psn): Renamed %s to %s" % (oldname, newname), "I")
+
+ # Rebuild font structures from the modified lists we have:
+
+ # Rebuild glyph order elements:
+ if publicGlyphOrder:
+ array = ET.Element("array")
+ for name in publicGlyphOrder:
+ ET.SubElement(array, "string").text = name
+ font.lib.setelem("public.glyphOrder", array)
+
+ if csGlyphOrder:
+ array = ET.Element("array")
+ for name in csGlyphOrder:
+ ET.SubElement(array, "string").text = name
+ font.lib.setelem("com.schriftgestaltung.glyphOrder", array)
+
+ # Rebuild postscriptNames:
+ if psnames:
+ dict = ET.Element("dict")
+ for n in psnames:
+ ET.SubElement(dict, "key").text = n
+ ET.SubElement(dict, "string").text = psnames[n]
+ font.lib.setelem("public.postscriptNames", dict)
+
+ # Iterate over all glyphs, and fix up any components that reference renamed glyphs
+ for layer in font.layers:
+ for name in layer:
+ glyph = layer[name]
+ for component in glyph.etree.findall('./outline/component[@base]'):
+ oldname = component.get('base')
+ if oldname in nameMap:
+ component.set('base', nameMap[oldname])
+ logger.log(f'renamed component base {oldname} to {component.get("base")} in glyph {name} layer {layer.layername}', 'I')
+ lib = glyph['lib']
+ if lib:
+ if 'com.schriftgestaltung.Glyphs.ComponentInfo' in lib:
+ cielem = lib['com.schriftgestaltung.Glyphs.ComponentInfo'][1]
+ for component in cielem:
+ for i in range(0,len(component),2):
+ if component[i].text == 'name':
+ oldname = component[i+1].text
+ if oldname in nameMap:
+ component[i+1].text = nameMap[oldname]
+ logger.log(f'renamed component info {oldname} to {nameMap[oldname]} in glyph {name} layer {layer.layername}', 'I')
+
+ # Delete anything we no longer need:
+ for name in deletelater:
+ for layer in font.layers:
+ if name in layer: layer.delGlyph(name)
+ logger.log("glyph %s removed" % name, "I")
+
+ # Other structures with glyphs in are handled by looping round the structures replacing glyphs rather than
+ # looping round incsv
+
+ # Update Display Strings
+
+ if displayStrings:
+ changed = False
+ glyphRE = re.compile(r'/([a-zA-Z0-9_.-]+)') # regex to match / followed by a glyph name
+ for i, dispstr in enumerate(displayStrings): # Passing the glyphSub function to .sub() causes it to
+ displayStrings[i] = glyphRE.sub(glyphsub, dispstr) # every non-overlapping occurrence of pattern
+ if displayStrings[i] != dispstr:
+ changed = True
+ if changed:
+ array = ET.Element("array")
+ for dispstr in displayStrings:
+ ET.SubElement(array, "string").text = dispstr
+ font.lib.setelem('com.schriftgestaltung.customParameter.GSFont.DisplayStrings', array)
+ logger.log("com.schriftgestaltung.customParameter.GSFont.DisplayStrings updated", "I")
+
+ # Process groups.plist and kerning.plist
+ # group names in the form public.kern[1|2].<glyph name> will automatically be renamed if the glyph name is in the csvmap
+ #
+ groups = kerning = None
+ kgroupprefixes = {"public.kern1.": 1, "public.kern2.": 2}
+
+ if "groups" in font.__dict__: groups = font.groups
+ if "kerning" in font.__dict__: kerning = font.kerning
+
+ if (groups or kerning) and mergemode:
+ logger.log("Note - Kerning and group data not processed when using mergecomps", "P")
+ elif groups or kerning:
+
+ kgroupsmap = ["", {}, {}] # Dicts of kern1/kern2 group renames. Outside the groups if statement, since also used with kerning.plist
+ if groups:
+ # Analyse existing data, building dict from existing data and building some indexes
+ gdict = {}
+ kgroupsbyglyph = ["", {}, {}] # First entry dummy, so index is 1 or 2 for kern1 and kern2
+ kgroupduplicates = ["", [], []] #
+ for gname in groups:
+ group = groups.getval(gname)
+ gdict[gname] = group
+ kprefix = gname[0:13]
+ if kprefix in kgroupprefixes:
+ ktype = kgroupprefixes[kprefix]
+ for glyph in group:
+ if glyph in kgroupsbyglyph[ktype]:
+ kgroupduplicates[ktype].append(glyph)
+ logger.log("In existing kern groups, %s is in more than one kern%s group" % (glyph, str(ktype)), "E")
+ failerrors += 1
+ else:
+ kgroupsbyglyph[ktype][glyph] = gname
+ # Now process the group data
+ glyphsrenamed = []
+ saveforlaterKgroups = []
+ for gname in list(gdict): # Loop round groups renaming glyphs within groups and kern group names
+ group = gdict[gname]
+
+ # Rename group if kern1 or kern2 group
+ kprefix = gname[:13]
+ if kprefix in kgroupprefixes:
+ ktype = kgroupprefixes[kprefix]
+ ksuffix = gname[13:]
+ if ksuffix in csvmap: # This is a kern group that we should rename
+ newgname = kprefix + csvmap[ksuffix]
+ if newgname in gdict: # Will need to be renamed in second pass
+ tempname = gettempname(lambda n : n not in gdict)
+ gdict[tempname] = gdict.pop(gname)
+ saveforlaterKgroups.append((tempname, gname, newgname))
+ else:
+ gdict[newgname] = gdict.pop(gname)
+ kerngroupsrenamed[gname] = newgname
+ logger.log("Pass 1 (Kern groups): Renamed %s to %s" % (gname, newgname), "I")
+ kgroupsmap[ktype][gname] = newgname
+
+ # Now rename glyphs within the group
+ # - This could lead to duplicate names, but that might be valid for arbitrary groups so not checked
+ # - kern group validity will be checked after all renaming is done
+
+ for (i, glyph) in enumerate(group):
+ if glyph in csvmap:
+ group[i] = csvmap[glyph]
+ if glyph not in glyphsrenamed: glyphsrenamed.append(glyph)
+
+ # Need to report glyphs renamed after the loop, since otherwise could report multiple times
+ for oldname in glyphsrenamed:
+ nameMap[oldname] = csvmap[oldname]
+ logger.log("Glyphs in groups: Renamed %s to %s" % (oldname, csvmap[oldname]), "I")
+
+ # Second pass for renaming kern groups. (All glyph renaming is done in first pass)
+
+ for (tempname, oldgname, newgname) in saveforlaterKgroups:
+ if newgname in gdict: # Can't rename
+ logger.log("Kern group %s already in groups.plist; can't rename %s" % (newgname, oldgname), "E")
+ failerrors += 1
+ else:
+ gdict[newgname] = gdict.pop(tempname)
+ kerngroupsrenamed[oldgname] = newgname
+ logger.log("Pass 2 (Kern groups): Renamed %s to %s" % (oldgname, newgname), "I")
+
+ # Finally check kern groups follow the UFO rules!
+ kgroupsbyglyph = ["", {}, {}] # Reset for new analysis
+ for gname in gdict:
+ group = gdict[gname]
+ kprefix = gname[:13]
+ if kprefix in kgroupprefixes:
+ ktype = kgroupprefixes[kprefix]
+ for glyph in group:
+ if glyph in kgroupsbyglyph[ktype]: # Glyph already in a kern group so we have a duplicate
+ if glyph not in kgroupduplicates[ktype]: # This is a newly-created duplicate so report
+ logger.log("After renaming, %s is in more than one kern%s group" % (glyph, str(ktype)), "E")
+ failerrors += 1
+ kgroupduplicates[ktype].append(glyph)
+ else:
+ kgroupsbyglyph[ktype][glyph] = gname
+
+ # Now need to recreate groups.plist from gdict
+
+ for group in list(groups): groups.remove(group) # Empty existing contents
+ for gname in gdict:
+ elem = ET.Element("array")
+ for glyph in gdict[gname]:
+ ET.SubElement(elem, "string").text = glyph
+ groups.setelem(gname, elem)
+
+ # Now process kerning data
+ if kerning:
+ k1map = kgroupsmap[1]
+ k2map = kgroupsmap[2]
+ kdict = {}
+ for setname in kerning: kdict[setname] = kerning.getval(setname) # Create a working dict from plist
+ saveforlaterKsets = []
+ # First pass on set names
+ for setname in list(kdict): # setname could be a glyph in csvmap or a kern1 group name in k1map
+ if setname in csvmap or setname in k1map:
+ newname = csvmap[setname] if setname in csvmap else k1map[setname]
+ if newname in kdict:
+ tempname = gettempname(lambda n : n not in kdict)
+ kdict[tempname] = kdict.pop(setname)
+ saveforlaterKsets.append((tempname, setname, newname))
+ else:
+ kdict[newname] = kdict.pop(setname)
+ if setname in csvmap: nameMap[setname] = newname # Change to kern set name will have been logged previously
+ logger.log("Pass 1 (Kern sets): Renamed %s to %s" % (setname, newname), "I")
+
+ # Now do second pass for set names
+ for (tempname, oldname, newname) in saveforlaterKsets:
+ if newname in kdict: # Can't rename
+ logger.log("Kern set %s already in kerning.plist; can't rename %s" % (newname, oldname), "E")
+ failerrors += 1
+ else:
+ kdict[newname] = kdict.pop(tempname)
+ if oldname in csvmap: nameMap[oldname] = newname
+ logger.log("Pass 1 (Kern sets): Renamed %s to %s" % (oldname, newname), "I")
+
+ # Rename kern set members next.
+
+ # Here, since a member could be in more than one set, take different approach to two passes.
+ # - In first pass, rename to a temp (and invalid) name so duplicates are not possible. Name to include
+ # old name for reporting purposes
+ # - In second pass, set to correct new name after checking for duplicates
+
+ # Do first pass for set names
+ tempnames = []
+ for setname in list(kdict):
+ kset = kdict[setname]
+
+ for mname in list(kset): # mname could be a glyph in csvmap or a kern2 group name in k2map
+ if mname in csvmap or mname in k2map:
+ newname = csvmap[mname] if mname in csvmap else k2map[mname]
+ newname = "^" + newname + "^" + mname
+ if newname not in tempnames: tempnames.append(newname)
+ kset[newname] = kset.pop(mname)
+
+ # Second pass to change temp names to correct final names
+ # We need an index of which sets each member is in
+ ksetsbymember = {}
+ for setname in kdict:
+ kset = kdict[setname]
+ for member in kset:
+ if member not in ksetsbymember:
+ ksetsbymember[member] = [setname]
+ else:
+ ksetsbymember[member].append(setname)
+ # Now do the renaming
+ for tname in tempnames:
+ (newname, oldname) = tname[1:].split("^")
+ if newname in ksetsbymember: # Can't rename
+ logger.log("Kern set %s already in kerning.plist; can't rename %s" % (newname, oldname), "E")
+ failerrors += 1
+ else:
+ for ksetname in ksetsbymember[tname]:
+ kset = kdict[ksetname]
+ kset[newname] = kset.pop(tname)
+ ksetsbymember[newname] = ksetsbymember.pop(tname)
+ if tname in csvmap: nameMap[oldname] = newname
+ logger.log("Kern set members: Renamed %s to %s" % (oldname, newname), "I")
+
+ # Now need to recreate kerning.plist from kdict
+ for kset in list(kerning): kerning.remove(kset) # Empty existing contents
+ for kset in kdict:
+ elem = ET.Element("dict")
+ for member in kdict[kset]:
+ ET.SubElement(elem, "key").text = member
+ ET.SubElement(elem, "integer").text = str(kdict[kset][member])
+ kerning.setelem(kset, elem)
+
+ if failerrors:
+ logger.log(str(failerrors) + " issues detected - see errors reported above", "S")
+
+ logger.log("%d glyphs renamed in UFO" % (len(nameMap)), "P")
+ if kerngroupsrenamed: logger.log("%d kern groups renamed in UFO" % (len(kerngroupsrenamed)), "P")
+
+ # If a classfile was provided, change names within it also
+ #
+ if args.classfile:
+
+ logger.log("Processing classfile {}".format(args.classfile), "P")
+
+ # In order to preserve comments we use our own TreeBuilder
+ class MyTreeBuilder(ET.TreeBuilder):
+ def comment(self, data):
+ self.start(ET.Comment, {})
+ self.data(data)
+ self.end(ET.Comment)
+
+ # RE to match separators between glyph names (whitespace):
+ notGlyphnameRE = re.compile(r'(\s+)')
+
+ # Keep a list of glyphnames that were / were not changed
+ changed = set()
+ notChanged = set()
+
+ # Process one token (might be whitespace separator, glyph name, or embedded classname starting with @):
+ def dochange(gname, logErrors = True):
+ if len(gname) == 0 or gname.isspace() or gname not in csvmap or gname.startswith('@'):
+ # No change
+ return gname
+ try:
+ newgname = csvmap[gname]
+ changed.add(gname)
+ return newgname
+ except KeyError:
+ if logErrors: notChanged.add(gname)
+ return gname
+
+ doc = ET.parse(args.classfile, parser=ET.XMLParser(target=MyTreeBuilder()))
+ for e in doc.iter(None):
+ if e.tag in ('class', 'property'):
+ if 'exts' in e.attrib:
+ logger.log("{} '{}' has 'exts' attribute which may need editing".format(e.tag.title(), e.get('name')), "W")
+ # Rather than just split() the text, we'll use re and thus try to preserve whitespace
+ e.text = ''.join([dochange(x) for x in notGlyphnameRE.split(e.text)])
+ elif e.tag is ET.Comment:
+ # Go ahead and look for glyph names in comment text but don't flag as error
+ e.text = ''.join([dochange(x, False) for x in notGlyphnameRE.split(e.text)])
+ # and process the tail as this might be valid part of class or property
+ e.tail = ''.join([dochange(x) for x in notGlyphnameRE.split(e.tail)])
+
+
+ if len(changed):
+ # Something in classes changed so rewrite it... saving backup
+ (dn,fn) = os.path.split(args.classfile)
+ dn = os.path.join(dn, args.paramsobj.sets['main']['backupdir'])
+ if not os.path.isdir(dn):
+ os.makedirs(dn)
+ # Work out backup name based on existing backups
+ backupname = os.path.join(dn,fn)
+ nums = [int(re.search(r'\.(\d+)~$',n).group(1)) for n in glob(backupname + ".*~")]
+ backupname += ".{}~".format(max(nums) + 1 if nums else 1)
+ logger.log("Backing up input classfile to {}".format(backupname), "P")
+ # Move the original file to backupname
+ os.rename(args.classfile, backupname)
+ # Write the output file
+ doc.write(args.classfile)
+
+ if len(notChanged):
+ logger.log("{} glyphs renamed, {} NOT renamed in {}: {}".format(len(changed), len(notChanged), args.classfile, ' '.join(notChanged)), "W")
+ else:
+ logger.log("All {} glyphs renamed in {}".format(len(changed), args.classfile), "P")
+
+ return font
+
+def mergeglyphs(mergefrom, mergeto): # Merge any "moving" anchors (i.e., those starting with '_') into the glyph we're keeping
+ # Assumption: we are merging one or more component references to just one component; deleting the others
+ for a in mergefrom['anchor']:
+ aname = a.element.get('name')
+ if aname.startswith('_'):
+ # We want to copy this anchor to the glyph being kept:
+ for i, a2 in enumerate(mergeto['anchor']):
+ if a2.element.get('name') == aname:
+ # Overwrite existing anchor of same name
+ mergeto['anchor'][i] = a
+ break
+ else:
+ # Append anchor to glyph
+ mergeto['anchor'].append(a)
+
+def gettempname(f):
+ ''' return a temporary glyph name that, when passed to function f(), returns true'''
+ # Initialize function attribute for use as counter
+ if not hasattr(gettempname, "counter"): gettempname.counter = 0
+ while True:
+ name = "tempglyph%d" % gettempname.counter
+ gettempname.counter += 1
+ if f(name): return name
+
+def glyphsub(m): # Function passed to re.sub() when updating display strings
+ global csvmap
+ gname = m.group(1)
+ return '/' + csvmap[gname] if gname in csvmap else m.group(0)
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfrunfbchecks.py b/lib/silfont/scripts/psfrunfbchecks.py
new file mode 100755
index 0000000..eddf8bc
--- /dev/null
+++ b/lib/silfont/scripts/psfrunfbchecks.py
@@ -0,0 +1,249 @@
+#!/usr/bin/env python
+'''Run Font Bakery tests using a standard profile with option to specify an alternative profile
+It defaults to ttfchecks.py - ufo checks are not supported yet'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2020 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+import glob, os, csv
+
+from textwrap import TextWrapper
+
+from fontbakery.reporters.serialize import SerializeReporter
+from fontbakery.reporters.html import HTMLReporter
+from fontbakery.checkrunner import distribute_generator, CheckRunner, get_module_profile, SKIP, INFO
+from fontbakery.configuration import Configuration
+from fontbakery.commands.check_profile import get_module
+from fontbakery import __version__ as version
+
+from silfont.core import execute
+
+argspec = [
+ ('fonts',{'help': 'font(s) to run checks against; wildcards allowed', 'nargs': "+"}, {'type': 'filename'}),
+ ('--profile', {'help': 'profile to use instead of Pysilfont default'}, {}),
+ ('--html', {'help': 'Write html report to htmlfile', 'metavar': "HTMLFILE"}, {}),
+ ('--csv',{'help': 'Write results to csv file'}, {'type': 'filename', 'def': None}),
+ ('-F', '--full-lists',{'help': "Don't truncate lists of items" ,'action': 'store_true', 'default': False}, {}),
+ ('--ttfaudit', {'help': 'Compare the list of ttf checks in pysilfont with those in Font Bakery and output a csv to "fonts". No checks are actually run',
+ 'action': 'store_true', 'default': False}, {}),
+ ('-l', '--log', {'help': 'Log file'}, {'type': 'outfile', 'def': '_runfbchecks.log'})]
+
+def doit(args):
+ global version
+ v = version.split(".")
+ version = f'{v[0]}.{v[1]}.{v[2]}' # Set version to just the number part - ie without .dev...
+
+ logger = args.logger
+ htmlfile = args.html
+
+ if args.ttfaudit: # Special action to compare checks in profile against check_list values
+ audit(args.fonts, logger) # args.fonts used as output file name for audit
+ return
+
+ if args.csv:
+ try:
+ csvfile = open(args.csv, 'w')
+ csvwriter = csv.writer(csvfile)
+ csvlines = []
+ except Exception as e:
+ logger.log("Failed to open " + args.csv + ": " + str(e), "S")
+ else:
+ csvfile = None
+
+ # Process list of fonts supplied, expanding wildcards using glob if needed
+ fonts = []
+ fontstype = None
+ for pattern in args.fonts:
+ for fullpath in glob.glob(pattern):
+ ftype = fullpath.lower().rsplit(".", 1)[-1]
+ if ftype == "otf": ftype = "ttf"
+ if ftype not in ("ttf", "ufo"):
+ logger.log("Fonts must be OpenType or UFO - " + fullpath + " invalid", "S")
+ if fontstype is None:
+ fontstype = ftype
+ else:
+ if ftype != fontstype:
+ logger.log("All fonts must be of the same type - both UFO and ttf/otf fonts supplied", "S")
+ fonts.append(fullpath)
+
+ if fonts == [] : logger.log("No files match the filespec provided for fonts: " + str(args.fonts), "S")
+
+ # Find the main folder name for ttf files - strips "results" if present
+ (path, ttfdir) = os.path.split(os.path.dirname(fonts[0]))
+ if ttfdir == ("results"): ttfdir = os.path.basename(path)
+
+ # Create the profile object
+ if args.profile:
+ proname = args.profile
+ else:
+ if fontstype == "ttf":
+ proname = "silfont.fbtests.ttfchecks"
+ else:
+ logger.log("UFO fonts not yet supported", "S")
+
+ try:
+ module = get_module(proname)
+ except Exception as e:
+ logger.log("Failed to import profile: " + proname + "\n" + str(e), "S")
+
+ profile = get_module_profile(module)
+ profile.configuration_defaults = {
+ "com.google.fonts/check/file_size": {
+ "WARN_SIZE": 1 * 1024 * 1024,
+ "FAIL_SIZE": 9 * 1024 * 1024
+ }
+ }
+
+ psfcheck_list = module.psfcheck_list
+
+ # Create the runner and reporter objects, then run the tests
+ configuration = Configuration(full_lists = args.full_lists)
+ runner = CheckRunner(profile, values={
+ "fonts": fonts, 'ufos': [], 'designspaces': [], 'glyphs_files': [], 'readme_md': [], 'metadata_pb': []}
+ , config=configuration)
+
+ if version == "0.8.6":
+ sr = SerializeReporter(runner=runner) # This produces results from all the tests in sr.getdoc for later analysis
+ else:
+ sr = SerializeReporter(runner=runner, loglevels = [INFO]) # loglevels was added with 0.8.7
+ reporters = [sr.receive]
+
+ if htmlfile:
+ hr = HTMLReporter(runner=runner, loglevels = [SKIP])
+ reporters.append(hr.receive)
+
+ distribute_generator(runner.run(), reporters)
+
+ # Process the results
+ results = sr.getdoc()
+ sections = results["sections"]
+
+ checks = {}
+ maxname = 11
+ somedebug = False
+ overrides = {}
+ tempoverrides = False
+
+ for section in sections:
+ secchecks = section["checks"]
+ for check in secchecks:
+ checkid = check["key"][1][17:-1]
+ fontfile = check["filename"] if "filename" in check else "Family-wide"
+ path, fontname = os.path.split(fontfile)
+ if fontname not in checks:
+ checks[fontname] = {"ERROR": [], "FAIL": [], "WARN": [], "INFO": [], "SKIP": [], "PASS": [], "DEBUG": []}
+ if len(fontname) > maxname: maxname = len(fontname)
+ status = check["result"]
+ if checkid in psfcheck_list:
+ # Look for status overrides
+ (changetype, temp) = ("temp_change_status", True) if "temp_change_status" in psfcheck_list[checkid]\
+ else ("change_status", False)
+ if changetype in psfcheck_list[checkid]:
+ change_status = psfcheck_list[checkid][changetype]
+ if status in change_status:
+ reason = change_status["reason"] if "reason" in change_status else None
+ overrides[fontname + ", " + checkid] = (status + " to " + change_status[status], temp, reason)
+ if temp: tempoverrides = True
+ status = change_status[status] ## Should validate new status is one of FAIL, WARN or PASS
+ checks[fontname][status].append(check)
+ if status == "DEBUG": somedebug = True
+
+ if htmlfile:
+ logger.log("Writing results to " + htmlfile, "P")
+ with open(htmlfile, 'w') as hfile:
+ hfile.write(hr.get_html())
+
+ fbstats = ["ERROR", "FAIL", "WARN", "INFO", "SKIP", "PASS"]
+ psflevels = ["E", "E", "W", "I", "I", "V"]
+ if somedebug: # Only have debug column if some debug statuses are present
+ fbstats.append("DEBUG")
+ psflevels.append("W")
+ wrapper = TextWrapper(width=120, initial_indent=" ", subsequent_indent=" ")
+ errorcnt = 0
+ failcnt = 0
+ summarymess = "Check status summary:\n"
+ summarymess += "{:{pad}}ERROR FAIL WARN INFO SKIP PASS".format("", pad=maxname+4)
+ if somedebug: summarymess += " DEBUG"
+ fontlist = list(sorted(x for x in checks if x != "Family-wide")) # Alphabetic list of fonts
+ if "Family-wide" in checks: fontlist.append("Family-wide") # Add Family-wide last
+ for fontname in fontlist:
+ summarymess += "\n {:{pad}}".format(fontname, pad=maxname)
+ for i, status in enumerate(fbstats):
+ psflevel = psflevels[i]
+ checklist = checks[fontname][status]
+ cnt = len(checklist)
+ if cnt > 0 or status != "DEBUG": summarymess += "{:6d}".format(cnt) # Suppress 0 for DEBUG
+ if cnt:
+ if status == "ERROR": errorcnt += cnt
+ if status == "FAIL": failcnt += cnt
+ messparts = ["Checks with status {} for {}".format(status, fontname)]
+ for check in checklist:
+ checkid = check["key"][1][17:-1]
+ csvline = [ttfdir, fontname, check["key"][1][17:-1], status, check["description"]]
+ messparts.append(" > {}".format(checkid))
+ for record in check["logs"]:
+ message = record["message"]
+ if record["status"] != status: message = record["status"] + " " + message
+ messparts += wrapper.wrap(message)
+ csvline.append(message)
+ if csvfile: csvlines.append(csvline)
+ logger.log("\n".join(messparts) , psflevel)
+ if csvfile: # Output to csv file, worted by font then checkID
+ for line in sorted(csvlines, key = lambda x: (x[1],x[2])): csvwriter.writerow(line)
+ if overrides != {}:
+ summarymess += "\n Note: " + str(len(overrides)) + " Fontbakery statuses were overridden - see log file for details"
+ if tempoverrides: summarymess += "\n ******** Some of the overrides were temporary overrides ********"
+ logger.log(summarymess, "P")
+
+ if overrides != {}:
+ for oname in overrides:
+ override = overrides[oname]
+ mess = "Status override for " + oname + ": " + override[0]
+ if override[1]: mess += " (Temporary override)"
+ logger.log(mess, "W")
+ if override[2] is not None: logger.log("Override reason: " + override[2], "I")
+
+ if errorcnt + failcnt > 0:
+ mess = str(failcnt) + " test(s) gave a status of FAIL" if failcnt > 0 else ""
+ if errorcnt > 0:
+ if failcnt > 0: mess += "\n "
+ mess += str(errorcnt) + " test(s) gave a status of ERROR which means they failed to execute properly." \
+ "\n " \
+ " ERROR probably indicates a software issue rather than font issue"
+ logger.log(mess, "E")
+
+def audit(fonts, logger):
+ if len(fonts) != 1: logger.log("For audit, specify output csv file instead of list of fonts", "S")
+ csvname = fonts[0]
+ from silfont.fbtests.ttfchecks import all_checks_dict
+ missingfromprofile=[]
+ missingfromchecklist=[]
+ checks = all_checks_dict()
+ logger.log("Opening " + csvname + " for audit output csv", "P")
+ with open(csvname, 'w', newline='') as csvfile:
+ csvwriter = csv.writer(csvfile, dialect='excel')
+ fields = ["id", "psfaction", "section", "description", "rationale", "conditions"]
+ csvwriter.writerow(fields)
+
+ for checkid in checks:
+ check = checks[checkid]
+ row = [checkid]
+ for field in fields:
+ if field != "id": row.append(check[field])
+ if check["section"] == "Missing": missingfromprofile.append(checkid)
+ if check["psfaction"] == "Not in psfcheck_list": missingfromchecklist.append(checkid)
+ csvwriter.writerow(row)
+ if missingfromprofile != []:
+ mess = "The following checks are in psfcheck_list but not in the ttfchecks.py profile:"
+ for checkid in missingfromprofile: mess += "\n " + checkid
+ logger.log(mess, "E")
+ if missingfromchecklist != []:
+ mess = "The following checks are in the ttfchecks.py profile but not in psfcheck_list:"
+ for checkid in missingfromchecklist: mess += "\n " + checkid
+ logger.log(mess, "E")
+
+ return
+
+def cmd(): execute(None, doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfsetassocfeat.py b/lib/silfont/scripts/psfsetassocfeat.py
new file mode 100755
index 0000000..da33bb4
--- /dev/null
+++ b/lib/silfont/scripts/psfsetassocfeat.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+__doc__ = '''Add associate feature info to glif lib based on a csv file
+csv format glyphname,featurename[,featurevalue]'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+
+suffix = "_AssocFeat"
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-i','--input',{'help': 'Input csv file'}, {'type': 'incsv', 'def': suffix+'.csv'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': suffix+'.log'})]
+
+def doit(args) :
+ font = args.ifont
+ incsv = args.input
+ incsv.minfields = 2
+ incsv.maxfields = 3
+ incsv.logger = font.logger
+ glyphlist = list(font.deflayer.keys()) # Identify which glifs have not got an AssocFeat set
+
+ for line in incsv :
+ glyphn = line[0]
+ feature = line[1]
+ value = line[2] if len(line) == 3 else ""
+
+ if glyphn in glyphlist :
+ glyph = font.deflayer[glyphn]
+ if glyph["lib"] is None : glyph.add("lib")
+ glyph["lib"].setval("org.sil.assocFeature","string",feature)
+ if value != "" :
+ glyph["lib"].setval("org.sil.assocFeatureValue","integer",value)
+ else :
+ if "org.sil.assocFeatureValue" in glyph["lib"] : glyph["lib"].remove("org.sil.assocFeatureValue")
+ glyphlist.remove(glyphn)
+ else :
+ font.logger.log("No glyph in font for " + glyphn + " on line " + str(incsv.line_num),"E")
+
+ for glyphn in glyphlist : # Remove any values from remaining glyphs
+ glyph = font.deflayer[glyphn]
+ if glyph["lib"] :
+ if "org.sil.assocFeatureValue" in glyph["lib"] : glyph["lib"].remove("org.sil.assocFeatureValue")
+ if "org.sil.assocFeature" in glyph["lib"] :
+ glyph["lib"].remove("org.sil.assocFeature")
+ font.logger.log("Feature info removed for " + glyphn,"I")
+
+ return font
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfsetassocuids.py b/lib/silfont/scripts/psfsetassocuids.py
new file mode 100755
index 0000000..e8be848
--- /dev/null
+++ b/lib/silfont/scripts/psfsetassocuids.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+__doc__ = '''Add associate UID info to glif lib based on a csv file
+- Could be one value for variant UIDs and multiple for ligatures'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+from xml.etree import ElementTree as ET
+
+suffix = "_AssocUIDs"
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-i','--input',{'help': 'Input csv file'}, {'type': 'incsv', 'def': suffix+'.csv'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': suffix+'.log'})]
+
+def doit(args) :
+ font = args.ifont
+ incsv = args.input
+ incsv.minfields = 2
+ incsv.logger = font.logger
+ glyphlist = list(font.deflayer.keys()) # Identify which glifs have not got AssocUIDs set
+
+ for line in incsv :
+ glyphn = line.pop(0)
+ if glyphn in glyphlist :
+ glyph = font.deflayer[glyphn]
+ if glyph["lib"] is None : glyph.add("lib")
+ # Create an array element for the UID value(s)
+ array = ET.Element("array")
+ for UID in line:
+ sub = ET.SubElement(array,"string")
+ sub.text = UID
+ glyph["lib"].setelem("org.sil.assocUIDs",array)
+ glyphlist.remove(glyphn)
+ else :
+ font.logger.log("No glyph in font for " + glyphn + " on line " + str(incsv.line_num),"E")
+
+ for glyphn in glyphlist : # Remove any values from remaining glyphs
+ glyph = font.deflayer[glyphn]
+ if glyph["lib"] :
+ if "org.sil.assocUIDs" in glyph["lib"] :
+ glyph["lib"].remove("org.sil.assocUIDs")
+ font.logger.log("UID info removed for " + glyphn,"I")
+
+ return font
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfsetdummydsig.py b/lib/silfont/scripts/psfsetdummydsig.py
new file mode 100644
index 0000000..02896a0
--- /dev/null
+++ b/lib/silfont/scripts/psfsetdummydsig.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python3
+
+__doc__ = 'Put a dummy DSIG table into a ttf font'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2019 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Nicolas Spalinger'
+
+from silfont.core import execute
+from fontTools import ttLib
+
+argspec = [
+ ('-i', '--ifont', {'help': 'Input ttf font file'}, {}),
+ ('-o', '--ofont', {'help': 'Output font file'}, {}),
+ ('-l', '--log', {'help': 'Optional log file'}, {'type': 'outfile', 'def': 'dummydsig.log', 'optlog': True})]
+
+
+def doit(args):
+
+ ttf = ttLib.TTFont(args.ifont)
+
+ newDSIG = ttLib.newTable("DSIG")
+ newDSIG.ulVersion = 1
+ newDSIG.usFlag = 0
+ newDSIG.usNumSigs = 0
+ newDSIG.signatureRecords = []
+ ttf.tables["DSIG"] = newDSIG
+
+ args.logger.log('Saving the output ttf file with dummy DSIG table', 'P')
+ ttf.save(args.ofont)
+
+ args.logger.log('Done', 'P')
+
+
+def cmd(): execute("FT", doit, argspec)
+if __name__ == '__main__': cmd()
diff --git a/lib/silfont/scripts/psfsetglyphdata.py b/lib/silfont/scripts/psfsetglyphdata.py
new file mode 100644
index 0000000..9aa2162
--- /dev/null
+++ b/lib/silfont/scripts/psfsetglyphdata.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+__doc__ = '''Update and/or sort glyph_data.csv based on input file(s)'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2021 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+import csv
+
+argspec = [
+ ('glyphdata', {'help': 'glyph_data csv file to update'}, {'type': 'incsv', 'def': 'glyph_data.csv'}),
+ ('outglyphdata', {'help': 'Alternative output file name', 'nargs': '?'}, {'type': 'filename', 'def': None}),
+ ('-a','--addcsv',{'help': 'Records to add to glyphdata'}, {'type': 'incsv', 'def': None}),
+ ('-d', '--deletions', {'help': 'Records to delete from glyphdata'}, {'type': 'incsv', 'def': None}),
+ ('-s', '--sortheader', {'help': 'Column header to sort by'}, {}),
+ ('--sortalpha', {'help': 'Use with sortheader to sort alphabetically not numerically', 'action': 'store_true', 'default': False}, {}),
+ ('-f', '--force', {'help': 'When adding, if glyph exists, overwrite existing data', 'action': 'store_true', 'default': False}, {}),
+ ('-l','--log',{'help': 'Log file name'}, {'type': 'outfile', 'def': 'setglyphdata.log'}),
+ ]
+
+def doit(args):
+ logger = args.logger
+ gdcsv = args.glyphdata
+ addcsv = args.addcsv
+ dellist = args.deletions
+ sortheader = args.sortheader
+ force = args.force
+
+ # Check arguments are valid
+ if not(addcsv or dellist or sortheader): logger.log("At least one of -a, -d or -s must be specified", "S")
+ if force and not addcsv: logger.log("-f should only be used with -a", "S")
+
+ #
+ # Process the glyph_data.csv
+ #
+
+ # Process the headers line
+ gdheaders = gdcsv.firstline
+ if 'glyph_name' not in gdheaders: logger.log("No glyph_name header in glyph data csv", "S")
+ gdcsv.numfields = len(gdheaders)
+ gdheaders = {header: col for col, header in enumerate(gdheaders)} # Turn into dict of form header: column
+ gdnamecol = gdheaders["glyph_name"]
+ if sortheader and sortheader not in gdheaders:
+ logger.log(sortheader + " not in glyph data headers", "S")
+ next(gdcsv.reader, None) # Skip first line with headers in
+
+ # Read the data in
+ logger.log("Reading in exisitng glyph data file", "P")
+ gddata = {}
+ gdorder = []
+ for line in gdcsv:
+ gname = line[gdnamecol]
+ gddata[gname] = line
+ gdorder.append(gname)
+
+ # Delete records from dellist
+
+ if dellist:
+ logger.log("Deleting items from glyph data based on deletions file", "P")
+ dellist.numfields = 1
+ for line in dellist:
+ gname = line[0]
+ if gname in gdorder:
+ del gddata[gname]
+ gdorder.remove(gname)
+ logger.log(gname + " deleted from glyph data", "I")
+ else:
+ logger.log(gname + "not in glyph data", "W")
+
+ #
+ # Process the addcsv, if present
+ #
+
+ if addcsv:
+ # Check if addcsv has headers; if not use gdheaders
+ addheaders = addcsv.firstline
+ headerssame = True
+ if 'glyph_name' in addheaders:
+ if addheaders != gdcsv.firstline: headerssame = False
+ next(addcsv.reader)
+ else:
+ addheaders = gdheaders
+
+ addcsv.numfields = len(addheaders)
+ addheaders = {header: col for col, header in enumerate(addheaders)} # Turn into dict of form header: column
+ addnamecol = addheaders["glyph_name"]
+
+ logger.log("Adding new records from add csv file", "P")
+ for line in addcsv:
+ gname = line[addnamecol]
+ logtype = "added to"
+ if gname in gdorder:
+ if force: # Remove existing line
+ logtype = "replaced in"
+ del gddata[gname]
+ gdorder.remove(gname)
+ else:
+ logger.log(gname + " already in glyphdata so new data not added", "W")
+ continue
+ logger.log(f'{gname} {logtype} glyphdata', "I")
+
+ if not headerssame: # need to construct new line based on addheaders
+ newline = []
+ for header in gdheaders:
+ val = line[addheaders[header]] if header in addheaders else ""
+ newline.append(val)
+ line = newline
+
+ gddata[gname] = line
+ gdorder.append(gname)
+
+ # Finally sort the data if sortheader supplied
+ def numeric(x):
+ try:
+ numx = float(x)
+ except ValueError:
+ logger.log(f'Non-numeric value "{x}" in sort column; 0 used for sorting', "E")
+ numx = 0
+ return numx
+
+ if sortheader:
+ sortheaderpos = gdheaders[sortheader]
+ if args.sortalpha:
+ gdorder = sorted(gdorder, key=lambda x: gddata[x][sortheaderpos])
+ else:
+ gdorder = sorted(gdorder, key=lambda x: numeric(gddata[x][sortheaderpos]))
+
+ # Now write the data out
+ outfile = args.outglyphdata
+ if not outfile:
+ gdcsv.file.close()
+ outfile = gdcsv.filename
+ logger.log(f'Writing glyph data out to {outfile}', "P")
+ with open(outfile, "w", newline="") as f:
+ writer = csv.writer(f)
+ writer.writerow(gdcsv.firstline)
+ for glyphn in gdorder:
+ writer.writerow(gddata[glyphn])
+
+def cmd() : execute("",doit,argspec)
+if __name__ == "__main__": cmd()
+
diff --git a/lib/silfont/scripts/psfsetglyphorder.py b/lib/silfont/scripts/psfsetglyphorder.py
new file mode 100755
index 0000000..e2448e7
--- /dev/null
+++ b/lib/silfont/scripts/psfsetglyphorder.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+__doc__ = '''Load glyph order data into public.glyphOrder in lib.plist based on based on a text file in one of two formats:
+ - simple text file with one glyph name per line
+ - csv file with headers, using headers "glyph_name" and "sort_final" where the latter contains
+ numeric values used to sort the glyph names by'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+from xml.etree import ElementTree as ET
+
+argspec = [
+ ('ifont', {'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont', {'help': 'Output font file', 'nargs': '?'}, {'type': 'outfont'}),
+ ('--gname', {'help': 'Column header for glyph name', 'default': 'glyph_name'}, {}),
+ ('--header', {'help': 'Column header(s) for sort order', 'default': 'sort_final'}, {}),
+ ('--field', {'help': 'Field(s) in lib.plist to update', 'default': 'public.glyphOrder'}, {}),
+ ('-i', '--input', {'help': 'Input text file, one glyphname per line'}, {'type': 'incsv', 'def': 'glyph_data.csv'}),
+ ('-x', '--removemissing', {'help': 'Remove from list if glyph not in font', 'action': 'store_true', 'default': False}, {}),
+ ('-l', '--log', {'help': 'Log file'}, {'type': 'outfile', 'def': '_gorder.log'})]
+
+
+def doit(args):
+ font = args.ifont
+ incsv = args.input
+ logger = args.logger
+ removemissing = args.removemissing
+
+ fields = args.field.split(",")
+ fieldcount = len(fields)
+ headers = args.header.split(",")
+ if fieldcount != len(headers): logger.log("Must specify same number of values in --field and --header", "S")
+ gname = args.gname
+
+ # Identify file format from first line then create glyphdata[] with glyph name then one column per header
+ glyphdata = {}
+ fl = incsv.firstline
+ if fl is None: logger.log("Empty input file", "S")
+ numfields = len(fl)
+ incsv.numfields = numfields
+ fieldpos = []
+ if numfields > 1: # More than 1 column, so must have headers
+ if gname in fl:
+ glyphnpos = fl.index(gname)
+ else:
+ logger.log("No" + gname + "field in csv headers", "S")
+ for header in headers:
+ if header in fl:
+ pos = fl.index(header)
+ fieldpos.append(pos)
+ else:
+ logger.log('No "' + header + '" heading in csv headers"', "S")
+ next(incsv.reader, None) # Skip first line with headers in
+ for line in incsv:
+ glyphn = line[glyphnpos]
+ if len(glyphn) == 0:
+ continue # No need to include cases where name is blank
+ glyphdata[glyphn]=[]
+ for pos in fieldpos: glyphdata[glyphn].append(float(line[pos]))
+ elif numfields == 1: # Simple text file. Create glyphdata in same format as for csv files
+ for i, line in enumerate(incsv): glyphdata[line[0]]=(i,)
+ else:
+ logger.log("Invalid csv file", "S")
+
+ # Now process the data
+ if "lib" not in font.__dict__: font.addfile("lib")
+ glyphlist = list(font.deflayer.keys())
+
+ for i in range(0,fieldcount):
+ array = ET.Element("array")
+ for glyphn, vals in sorted(glyphdata.items(), key=lambda item: item[1][i]):
+ if glyphn in glyphlist:
+ sub = ET.SubElement(array, "string")
+ sub.text = glyphn
+ else:
+ font.logger.log("No glyph in font for " + glyphn, "I")
+ if not removemissing:
+ sub = ET.SubElement(array, "string")
+ sub.text = glyphn
+ font.lib.setelem(fields[i-1],array)
+
+ for glyphn in sorted(glyphlist): # Remaining glyphs were not in the input file
+ if glyphn not in glyphdata: font.logger.log("No entry in input file for font glyph " + glyphn, "I")
+
+ return font
+
+
+def cmd(): execute("UFO", doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfsetkeys.py b/lib/silfont/scripts/psfsetkeys.py
new file mode 100755
index 0000000..2cee4ab
--- /dev/null
+++ b/lib/silfont/scripts/psfsetkeys.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+__doc__ = '''Set keys with given values in a UFO plist file.'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bobby de Vos'
+
+from silfont.core import execute
+from xml.etree import ElementTree as ET
+import codecs
+
+suffix = "_setkeys"
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('--plist',{'help': 'Select plist to modify'}, {'def': 'fontinfo'}),
+ ('-i','--input',{'help': 'Input csv file'}, {'type': 'incsv', 'def': None}),
+ ('-k','--key',{'help': 'Name of key to set'},{}),
+ ('-v','--value',{'help': 'Value to set key to'},{}),
+ ('--file',{'help': 'Use contents of file to set key to'},{}),
+ ('--filepart',{'help': 'Use contents of part of the file to set key to'},{}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': suffix+'.log'})
+ ]
+
+def doit(args) :
+
+ font = args.ifont
+ logger = args.logger
+ plist = args.plist
+ if plist is None: plist = "fontinfo"
+ if plist not in ("lib", "fontinfo"):
+ logger.log("--plist must be either fontinfo or lib", "S")
+ else:
+ if plist not in font.__dict__: font.addfile(plist)
+ logger.log("Adding keys to " + plist, "I")
+ font_plist = getattr(font, plist)
+
+ # Ensure enough options were specified
+ value = args.value or args.file or args.filepart
+ if args.key and not value:
+ logger.log('Value needs to be specified', "S")
+ if not args.key and value:
+ logger.log('Key needs to be specified', "S")
+
+ # Use a one line string to set the key
+ if args.key and args.value:
+ set_key_value(font_plist, args.key, args.value)
+
+ # Use entire file contents to set the key
+ if args.key and args.file:
+ fh = codecs.open(args.file, 'r', 'utf-8')
+ contents = ''.join(fh.readlines())
+ set_key_value(font_plist, args.key, contents)
+ fh.close()
+
+ # Use some of the file contents to set the key
+ if args.key and args.filepart:
+ fh = codecs.open(args.filepart, 'r', 'utf-8')
+ lines = list()
+ for line in fh:
+ if line == '\n':
+ break
+ lines.append(line)
+ contents = ''.join(lines)
+ set_key_value(font_plist, args.key, contents)
+ fh.close()
+
+ # Set many keys
+ if args.input:
+ incsv = args.input
+ incsv.numfields = 2
+
+ for line in incsv:
+ key = line[0]
+ value = line[1]
+ set_key_value(font_plist, key, value)
+
+ return font
+
+def set_key_value(font_plist, key, value):
+ """Set key to value in font."""
+
+ # Currently setval() only works for integer, real or string.
+ # For other items you need to construct an elementtree element and use setelem()
+
+ if value == 'true' or value == 'false':
+ # Handle boolean values
+ font_plist.setelem(key, ET.Element(value))
+ else:
+ try:
+ # Handle integers values
+ number = int(value)
+ font_plist.setval(key, 'integer', number)
+ except ValueError:
+ # Handle string (including multi-line strings) values
+ font_plist.setval(key, 'string', value)
+ font_plist.font.logger.log(key + " added, value: " + str(value), "I")
+
+def cmd() : execute("UFO",doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfsetmarkcolors.py b/lib/silfont/scripts/psfsetmarkcolors.py
new file mode 100755
index 0000000..c436f4c
--- /dev/null
+++ b/lib/silfont/scripts/psfsetmarkcolors.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+__doc__ = ''' Sets the cell mark color of glyphs in a UFO
+- Input file is a list of glyph names (or unicode values if -u is specified
+- Color can be numeric or certain names, eg "0.85,0.26,0.06,1" or "g_red"
+'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2019 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute, splitfn
+from silfont.util import parsecolors
+import io
+
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-i','--input',{'help': 'input file'}, {'type': 'filename', 'def': 'nodefault.txt'}),
+ ('-c','--color',{'help': 'Color to set'},{}),
+ ('-u','--unicodes',{'help': 'Use unicode values in input file', 'action': 'store_true', 'default': False},{}),
+ ('-x','--deletecolors',{'help': 'Delete existing mark colors', 'action': 'store_true', 'default': False},{}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_setmarkcolors.log'})]
+
+def doit(args) :
+ font = args.ifont
+ logger = args.logger
+ infile = args.input
+ color = args.color
+ unicodes = args.unicodes
+ deletecolors = args.deletecolors
+
+ if not ((color is not None) ^ deletecolors): logger.log("Must specify one and only one of -c and -x", "S")
+
+ if color is not None:
+ (color, colorname, logcolor, splitcolor) = parsecolors(color, single=True)
+ if color is None: logger.log(logcolor, "S") # If color not parsed, parsecolors() puts error in logcolor
+
+ # Process the input file. It needs to be done in script rather than by execute() since, if -x is used, there might not be one
+ (ibase, iname, iext) = splitfn(infile)
+ if iname == "nodefault": # Indicates no file was specified
+ infile = None
+ if (color is not None) or unicodes or (not deletecolors): logger.log("If no input file, -x must be used and neither -c or -u can be used", "S")
+ else:
+ logger.log('Opening file for input: ' + infile, "P")
+ try:
+ infile = io.open(infile, "r", encoding="utf-8")
+ except Exception as e:
+ logger.log("Failed to open file: " + str(e), "S")
+
+ # Create list of glyphs to process
+ if deletecolors and infile is None: # Need to delete colors from all glyphs
+ glyphlist = sorted(font.deflayer.keys())
+ else:
+ inlist = [x.strip() for x in infile.readlines()]
+ glyphlist = []
+ if unicodes:
+ unicodesfound = []
+ for glyphn in sorted(font.deflayer.keys()):
+ glyph = font.deflayer[glyphn]
+ for unicode in [x.hex for x in glyph["unicode"]]:
+ if unicode in inlist:
+ glyphlist.append(glyphn)
+ unicodesfound.append(unicode)
+ for unicode in inlist:
+ if unicode not in unicodesfound: logger.log("No gylphs with unicode '" + unicode + "' in the font", "I")
+ else:
+ for glyphn in inlist:
+ if glyphn in font.deflayer:
+ glyphlist.append(glyphn)
+ else:
+ logger.log(glyphn + " is not in the font", "I")
+
+ changecnt = 0
+ for glyphn in glyphlist:
+ glyph = font.deflayer[glyphn]
+ oldcolor = None
+ lib = glyph["lib"]
+ if lib:
+ if "public.markColor" in lib: oldcolor = str(glyph["lib"].getval("public.markColor"))
+ if oldcolor != color:
+ if oldcolor is not None:
+ (temp, oldname, oldlogcolor, splitcolor) = parsecolors(oldcolor, single=True)
+ if temp is None: oldlogcolor = oldcolor # Failed to parse old color, so just report what is was
+
+ changecnt += 1
+ if deletecolors:
+ glyph["lib"].remove("public.markColor")
+ logger.log(glyphn + ": " + oldlogcolor + " removed", "I")
+ else:
+ if oldcolor is None:
+ if lib is None: glyph.add("lib")
+ glyph["lib"].setval("public.markColor","string",color)
+ logger.log(glyphn+ ": " + logcolor + " added", "I")
+ else:
+ glyph["lib"].setval("public.markColor", "string", color)
+ logger.log(glyphn + ": " + oldlogcolor + " changed to " + logcolor, "I")
+
+ if deletecolors:
+ logger.log(str(changecnt) + " colors removed", "P")
+ else:
+ logger.log(str(changecnt) + " colors changed or added", "P")
+
+ return font
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfsetpsnames.py b/lib/silfont/scripts/psfsetpsnames.py
new file mode 100755
index 0000000..55528c7
--- /dev/null
+++ b/lib/silfont/scripts/psfsetpsnames.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+__doc__ = '''Add public.postscriptNames to lib.plist based on a csv file in one of two formats:
+ - simple glyphname, postscriptname with no headers
+ - with headers, where the headers for glyph name and postscript name "glyph_name" and "ps_name"'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+from xml.etree import ElementTree as ET
+
+argspec = [
+ ('ifont', {'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont', {'help': 'Output font file', 'nargs': '?'}, {'type': 'outfont'}),
+ ('--gname', {'help': 'Column header for glyph name', 'default': 'glyph_name'}, {}),
+ ('-i', '--input', {'help': 'Input csv file'}, {'type': 'incsv', 'def': 'glyph_data.csv'}),
+ ('-x', '--removemissing', {'help': 'Remove from list if glyph not in font', 'action': 'store_true', 'default': False}, {}),
+ ('-l', '--log', {'help': 'Log file'}, {'type': 'outfile', 'def': 'setpsnames.log'})]
+
+
+def doit(args):
+ font = args.ifont
+ logger = args.logger
+ incsv = args.input
+ gname = args.gname
+ removemissing = args.removemissing
+
+ glyphlist = list(font.deflayer.keys()) # List to check every glyph has a psname supplied
+
+ # Identify file format from first line
+ fl = incsv.firstline
+ if fl is None: logger.log("Empty input file", "S")
+ numfields = len(fl)
+ incsv.numfields = numfields
+ if numfields == 2:
+ glyphnpos = 0
+ psnamepos = 1 # Default for plain csv
+ elif numfields > 2: # More than 2 columns, so must have standard headers
+ if gname in fl:
+ glyphnpos = fl.index(gname)
+ else:
+ logger.log("No " + gname + " field in csv headers", "S")
+ if "ps_name" in fl:
+ psnamepos = fl.index("ps_name")
+ else:
+ logger.log("No ps_name field in csv headers", "S")
+ next(incsv.reader, None) # Skip first line with headers in
+ else:
+ logger.log("Invalid csv file", "S")
+
+ # Now process the data
+ dict = ET.Element("dict")
+ for line in incsv:
+ glyphn = line[glyphnpos]
+ psname = line[psnamepos]
+ if len(psname) == 0 or glyphn == psname:
+ continue # No need to include cases where production name is blank or same as working name
+ # Check if in font
+ infont = False
+ if glyphn in glyphlist:
+ glyphlist.remove(glyphn)
+ infont = True
+ else:
+ if not removemissing: logger.log("No glyph in font for " + glyphn + " on line " + str(incsv.line_num), "I")
+ if not removemissing or infont:
+ # Add to dict
+ sub = ET.SubElement(dict, "key")
+ sub.text = glyphn
+ sub = ET.SubElement(dict, "string")
+ sub.text = psname
+ # Add to lib.plist
+ if len(dict) > 0:
+ if "lib" not in font.__dict__: font.addfile("lib")
+ font.lib.setelem("public.postscriptNames", dict)
+ else:
+ if "lib" in font.__dict__ and "public.postscriptNames" in font.lib:
+ font.lib.remove("public.postscriptNames")
+
+ for glyphn in sorted(glyphlist): logger.log("No PS name in input file for font glyph " + glyphn, "I")
+
+ return font
+
+
+def cmd(): execute("UFO", doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfsetunicodes.py b/lib/silfont/scripts/psfsetunicodes.py
new file mode 100755
index 0000000..aac1778
--- /dev/null
+++ b/lib/silfont/scripts/psfsetunicodes.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+__doc__ = '''Set the unicodes of glyphs in a font based on an external csv file.
+- csv format glyphname,unicode, [unicode2, [,unicode3]]'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2016 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Victor Gaultney, based on UFOsetPSnames.py'
+
+from silfont.core import execute
+
+suffix = "_setunicodes"
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-i','--input',{'help': 'Input csv file'}, {'type': 'incsv', 'def': suffix+'.csv'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': suffix+'.log'})]
+
+def doit(args) :
+ font = args.ifont
+ incsv = args.input
+ logger = args.logger
+ # Allow for up to 3 unicode values per glyph
+ incsv.minfields = 2
+ incsv.maxfields = 4
+
+ # List of glyphnames actually in the font:
+ glyphlist = list(font.deflayer.keys())
+
+ # Create mapping to find glyph name from decimal usv:
+ dusv2gname = {int(unicode.hex, 16): gname for gname in glyphlist for unicode in font.deflayer[gname]['unicode']}
+
+ # Remember what glyphnames we've processed:
+ processed = set()
+
+ for line in incsv :
+ glyphn = line[0]
+ # Allow for up to 3 unicode values
+ dusvs = []
+ for col in range(1,len(line)):
+ try:
+ dusv = int(line[col],16) # sanity check and convert to decimal
+ except ValueError:
+ logger.log("Invalid USV '%s'; line %d ignored." % (line[col], incsv.line_num), "W")
+ continue
+ dusvs.append(dusv)
+
+ if glyphn in glyphlist :
+
+ if glyphn in processed:
+ logger.log(f"Glyph {glyphn} in csv more than once; line {incsv.line_num} ignored.", "W")
+
+ glyph = font.deflayer[glyphn]
+ # Remove existing unicodes
+ for unicode in list(glyph["unicode"]):
+ del dusv2gname[int(unicode.hex, 16)]
+ glyph.remove("unicode",index = 0)
+
+ # Add the new unicode(s) in
+ for dusv in dusvs:
+ # See if any glyph already encodes this unicode value:
+ if dusv in dusv2gname:
+ # Remove this encoding from the other glyph:
+ oglyph = font.deflayer[dusv2gname[dusv]]
+ for unicode in oglyph["unicode"]:
+ if int(unicode.hex,16) == dusv:
+ oglyph.remove("unicode", object=unicode)
+ break
+ # Add this unicode value and update dusv2gname
+ dusv2gname[dusv] = glyphn
+ glyph.add("unicode",{"hex": ("%04X" % dusv)}) # Standardize to 4 (or more) digits and caps
+ # Record that we processed this glyphname,
+ processed.add(glyphn)
+ else :
+ logger.log("Glyph '%s' not in font; line %d ignored." % (glyphn, incsv.line_num), "I")
+
+ return font
+
+def cmd() : execute("UFO",doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfsetversion.py b/lib/silfont/scripts/psfsetversion.py
new file mode 100755
index 0000000..764271e
--- /dev/null
+++ b/lib/silfont/scripts/psfsetversion.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+__doc__ = '''Update the various font version fields'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+import silfont.ufo as UFO
+import re
+
+argspec = [
+ ('font',{'help': 'From font file'}, {'type': 'infont'}),
+ ('newversion',{'help': 'Version string or increment', 'nargs': '?'}, {}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_setversion.log'})
+ ]
+
+otnvre = re.compile('Version (\d)\.(\d\d\d)( .+)?$')
+
+def doit(args) :
+
+ font = args.font
+ logger = args.logger
+ newversion = args.newversion
+
+
+ fi = font.fontinfo
+ otelem = fi["openTypeNameVersion"][1] if "openTypeNameVersion" in fi else None
+ majelem = fi["versionMajor"][1] if "versionMajor" in fi else None
+ minelem = fi["versionMinor"][1] if "versionMinor" in fi else None
+ otnv = None if otelem is None else otelem.text
+ vmaj = None if majelem is None else majelem.text
+ vmin = None if minelem is None else minelem.text
+
+ if otnv is None or vmaj is None or vmin is None : logger.log("At least one of openTypeNameVersion, versionMajor or versionMinor missing from fontinfo.plist", "S")
+
+ if newversion is None:
+ if otnvre.match(otnv) is None:
+ logger.log("Current version is '" + otnv + "' which is non-standard", "E")
+ else :
+ logger.log("Current version is '" + otnv + "'", "P")
+ (otmaj,otmin,otextrainfo) = parseotnv(otnv)
+ if (otmaj, int(otmin)) != (vmaj,int(vmin)) :
+ logger.log("openTypeNameVersion values don't match versionMajor (" + vmaj + ") and versionMinor (" + vmin + ")", "E")
+ else:
+ if newversion[0:1] == "+" :
+ if otnvre.match(otnv) is None:
+ logger.log("Current openTypeNameVersion is non-standard so can't be incremented: " + otnv , "S")
+ else :
+ (otmaj,otmin,otextrainfo) = parseotnv(otnv)
+ if (otmaj, int(otmin)) != (vmaj,int(vmin)) :
+ logger.log("openTypeNameVersion (" + otnv + ") doesn't match versionMajor (" + vmaj + ") and versionMinor (" + vmin + ")", "S")
+ # Process increment to versionMinor. Note vmin is treated as 3 digit mpp where m and pp are minor and patch versions respectively
+ increment = newversion[1:]
+ if increment not in ("1", "0.001", ".001", "0.1", ".1") :
+ logger.log("Invalid increment value - must be one of 1, 0.001, .001, 0.1 or .1", "S")
+ increment = 100 if increment in ("0.1", ".1") else 1
+ if (increment == 100 and vmin[0:1] == "9") or (increment == 1 and vmin[1:2] == "99") :
+ logger.log("Version already at maximum so can't be incremented", "S")
+ otmin = str(int(otmin) + increment).zfill(3)
+ else :
+ newversion = "Version " + newversion
+ if otnvre.match(newversion) is None:
+ logger.log("newversion format invalid - should be 'M.mpp' or 'M.mpp extrainfo'", "S")
+ else :
+ (otmaj,otmin,otextrainfo) = parseotnv(newversion)
+ newotnv = "Version " + otmaj + "." + otmin + otextrainfo # Extrainfo already as leading space
+ logger.log("Updating version from '" + otnv + "' to '" + newotnv + "'","P")
+
+ # Update and write to disk
+ otelem.text = newotnv
+ majelem.text = otmaj
+ minelem.text = otmin
+ UFO.writeXMLobject(fi,font.outparams,font.ufodir, "fontinfo.plist" , True, fobject = True)
+
+ return
+
+def parseotnv(string) : # Returns maj, min and extrainfo
+ m = otnvre.match(string) # Assumes string has already been tested for a match
+ extrainfo = "" if m.group(3) is None else m.group(3)
+ return (m.group(1), m.group(2), extrainfo)
+
+
+def cmd() : execute("UFO",doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfshownames.py b/lib/silfont/scripts/psfshownames.py
new file mode 100755
index 0000000..191e0f8
--- /dev/null
+++ b/lib/silfont/scripts/psfshownames.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python3
+__doc__ = 'Display name fields and other bits for linking fonts into families'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2021 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bobby de Vos'
+
+from silfont.core import execute, splitfn
+from fontTools.ttLib import TTFont
+import glob
+from operator import attrgetter, methodcaller
+import tabulate
+
+WINDOWS_ENGLISH_IDS = 3, 1, 0x409
+
+FAMILY_RELATED_IDS = {
+ 1: 'Family',
+ 2: 'Subfamily',
+ 4: 'Full name',
+ 6: 'PostScript name',
+ 16: 'Typographic/Preferred family',
+ 17: 'Typographic/Preferred subfamily',
+ 21: 'WWS family',
+ 22: 'WWS subfamily',
+ 25: 'Variations PostScript Name Prefix',
+}
+
+
+class FontInfo:
+ def __init__(self):
+ self.filename = ''
+ self.name_table = dict()
+ self.weight_class = 0
+ self.regular = ''
+ self.bold = ''
+ self.italic = ''
+ self.width = ''
+ self.width_name = ''
+ self.width_class = 0
+ self.wws = ''
+
+ def sort_fullname(self):
+ return self.name_table[4]
+
+
+argspec = [
+ ('font', {'help': 'ttf font(s) to run report against; wildcards allowed', 'nargs': "+"}, {'type': 'filename'}),
+ ('-b', '--bits', {'help': 'Show bits', 'action': 'store_true'}, {}),
+ ('-m', '--multiline', {'help': 'Output multi-line key:values instead of a table', 'action': 'store_true'}, {}),
+]
+
+
+def doit(args):
+ logger = args.logger
+
+ font_infos = []
+ for pattern in args.font:
+ for fullpath in glob.glob(pattern):
+ logger.log(f'Processing {fullpath}', 'P')
+ try:
+ font = TTFont(fullpath)
+ except Exception as e:
+ logger.log(f'Error opening {fullpath}: {e}', 'E')
+ break
+
+ font_info = FontInfo()
+ font_info.filename = fullpath
+ get_names(font, font_info)
+ get_bits(font, font_info)
+ font_infos.append(font_info)
+
+ if not font_infos:
+ logger.log("No files match the filespec provided for fonts: " + str(args.fonts), "S")
+
+ font_infos.sort(key=methodcaller('sort_fullname'))
+ font_infos.sort(key=attrgetter('width_class'), reverse=True)
+ font_infos.sort(key=attrgetter('weight_class'))
+
+ rows = list()
+ if args.multiline:
+ # Multi-line mode
+ for font_info in font_infos:
+ for line in multiline_names(font_info):
+ rows.append(line)
+ if args.bits:
+ for line in multiline_bits(font_info):
+ rows.append(line)
+ align = ['left', 'right']
+ if len(font_infos) == 1:
+ del align[0]
+ for row in rows:
+ del row[0]
+ output = tabulate.tabulate(rows, tablefmt='plain', colalign=align)
+ output = output.replace(': ', ':')
+ output = output.replace('#', '')
+ else:
+ # Table mode
+
+ # Record information for headers
+ headers = table_headers(args.bits)
+
+ # Record information for each instance.
+ for font_info in font_infos:
+ record = table_records(font_info, args.bits)
+ rows.append(record)
+
+ # Not all fonts in a family with have the same name ids present,
+ # for instance 16: Typographic/Preferred family is only needed in
+ # non-RIBBI familes, and even then only for the non-RIBBI instances.
+ # Also, not all the bit fields are present in each instance.
+ # Therefore, columns with no data in any instance are removed.
+ indices = list(range(len(headers)))
+ indices.reverse()
+ for index in indices:
+ empty = True
+ for row in rows:
+ data = row[index]
+ if data:
+ empty = False
+ if empty:
+ for row in rows + [headers]:
+ del row[index]
+
+ # Format 'pipe' is nicer for GitHub, but is wider on a command line
+ output = tabulate.tabulate(rows, headers, tablefmt='simple')
+
+ # Print output from either mode
+ if args.quiet:
+ print(output)
+ else:
+ logger.log('The following family-related values were found in the name, head, and OS/2 tables\n' + output, 'P')
+
+
+def get_names(font, font_info):
+ table = font['name']
+ (platform_id, encoding_id, language_id) = WINDOWS_ENGLISH_IDS
+
+ for name_id in FAMILY_RELATED_IDS:
+ record = table.getName(
+ nameID=name_id,
+ platformID=platform_id,
+ platEncID=encoding_id,
+ langID=language_id
+ )
+ if record:
+ font_info.name_table[name_id] = str(record)
+
+
+def get_bits(font, font_info):
+ os2 = font['OS/2']
+ head = font['head']
+ font_info.weight_class = os2.usWeightClass
+ font_info.regular = bit2code(os2.fsSelection, 6, 'W-')
+ font_info.bold = bit2code(os2.fsSelection, 5, 'W')
+ font_info.bold += bit2code(head.macStyle, 0, 'M')
+ font_info.italic = bit2code(os2.fsSelection, 0, 'W')
+ font_info.italic += bit2code(head.macStyle, 1, 'M')
+ font_info.width_class = os2.usWidthClass
+ font_info.width = str(font_info.width_class)
+ if font_info.width_class == 5:
+ font_info.width_name = 'Width-Normal'
+ if font_info.width_class < 5:
+ font_info.width_name = 'Width-Condensed'
+ font_info.width += bit2code(head.macStyle, 5, 'M')
+ if font_info.width_class > 5:
+ font_info.width_name = 'Width-Extended'
+ font_info.width += bit2code(head.macStyle, 6, 'M')
+ font_info.wws = bit2code(os2.fsSelection, 8, '8')
+
+
+def bit2code(bit_field, bit, code_letter):
+ code = ''
+ if bit_field & 1 << bit:
+ code = code_letter
+ return code
+
+
+def multiline_names(font_info):
+ for name_id in sorted(font_info.name_table):
+ line = [font_info.filename + ':',
+ str(name_id) + ':',
+ FAMILY_RELATED_IDS[name_id] + ':',
+ font_info.name_table[name_id]
+ ]
+ yield line
+
+
+def multiline_bits(font_info):
+ labels = ('usWeightClass', 'Regular', 'Bold', 'Italic', font_info.width_name, 'WWS')
+ values = (font_info.weight_class, font_info.regular, font_info.bold, font_info.italic, font_info.width, font_info.wws)
+ for label, value in zip(labels, values):
+ if not value:
+ continue
+ line = [font_info.filename + ':',
+ '#',
+ str(label) + ':',
+ value
+ ]
+ yield line
+
+
+def table_headers(bits):
+ headers = ['filename']
+ for name_id in sorted(FAMILY_RELATED_IDS):
+ name_id_key = FAMILY_RELATED_IDS[name_id]
+ header = f'{name_id}: {name_id_key}'
+ if len(header) > 20:
+ header = header.replace(' ', '\n')
+ header = header.replace('/', '\n')
+ headers.append(header)
+ if bits:
+ headers.extend(['wght', 'R', 'B', 'I', 'wdth', 'WWS'])
+ return headers
+
+
+def table_records(font_info, bits):
+ record = [font_info.filename]
+ for name_id in sorted(FAMILY_RELATED_IDS):
+ name_id_value = font_info.name_table.get(name_id, '')
+ record.append(name_id_value)
+ if bits:
+ record.append(font_info.weight_class)
+ record.append(font_info.regular)
+ record.append(font_info.bold)
+ record.append(font_info.italic)
+ record.append(font_info.width)
+ record.append(font_info.wws)
+ return record
+
+
+def cmd(): execute('FT', doit, argspec)
+
+
+if __name__ == '__main__':
+ cmd()
diff --git a/lib/silfont/scripts/psfsubset.py b/lib/silfont/scripts/psfsubset.py
new file mode 100644
index 0000000..b207c3b
--- /dev/null
+++ b/lib/silfont/scripts/psfsubset.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python
+__doc__ = '''Subset an existing UFO based on a csv or text list of glyph names or USVs to keep.
+'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bob Hallissy'
+
+from silfont.core import execute
+from xml.etree import ElementTree as ET
+import re
+
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
+ ('-i','--input',{'help': 'Input csv file'}, {'type': 'incsv'}),
+ ('--header', {'help': 'Column header for glyphlist', 'default': 'glyph_name'}, {}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_subset.log'})]
+
+def doit(args) :
+ font = args.ifont
+ incsv = args.input
+ logger = args.logger
+ deflayer = font.deflayer
+
+ # Create mappings to find glyph name from decimal usv:
+ dusv2gname = {int(ucode.hex, 16): gname for gname in deflayer for ucode in deflayer[gname]['unicode']}
+
+ # check for headers in the csv
+ fl = incsv.firstline
+ if fl is None: logger.log("Empty input file", "S")
+ numfields = len(fl)
+ if numfields == 1 and args.header not in fl:
+ dataCol = 0 # Default for plain csv
+ elif numfields >= 1: # Must have headers
+ try:
+ dataCol = fl.index(args.header)
+ except ValueError as e:
+ logger.log('Missing csv input field: ' + e.message, 'S')
+ except Exception as e:
+ logger.log('Error reading csv input field: ' + e.message, 'S')
+ next(incsv.reader, None) # Skip first line with headers in
+ else:
+ logger.log("Invalid csv file", "S")
+
+ # From the csv, assemble a list of glyphs to process:
+ toProcess = set()
+ usvRE = re.compile('[0-9a-f]{4,6}',re.IGNORECASE) # matches 4-6 digit hex
+ for r in incsv:
+ gname = r[dataCol].strip()
+ if usvRE.match(gname):
+ # data is USV, not glyph name
+ dusv = int(gname,16)
+ if dusv in dusv2gname:
+ toProcess.add(dusv2gname[dusv])
+ continue
+ # The USV wasn't in the font... try it as a glyph name
+ if gname not in deflayer:
+ logger.log("Glyph '%s' not in font; line %d ignored" % (gname, incsv.line_num), 'W')
+ continue
+ toProcess.add(gname)
+
+ # Generate a complete list of glyphs to keep:
+ toKeep = set()
+ while len(toProcess):
+ gname = toProcess.pop() # retrieves a random item from the set
+ if gname in toKeep:
+ continue # Already processed this one
+ toKeep.add(gname)
+
+ # If it has any components we haven't already processed, add them to the toProcess list
+ for component in deflayer[gname].etree.findall('./outline/component[@base]'):
+ cname = component.get('base')
+ if cname not in toKeep:
+ toProcess.add(cname)
+
+ # Generate a complete list of glyphs to delete:
+ toDelete = set(deflayer).difference(toKeep)
+
+ # Remove any glyphs not in the toKeep set
+ for gname in toDelete:
+ logger.log("Deleting " + gname, "V")
+ deflayer.delGlyph(gname)
+ assert len(deflayer) == len(toKeep), "len(deflayer) != len(toKeep)"
+ logger.log("Retained %d glyphs, deleted %d glyphs." % (len(toKeep), len(toDelete)), "P")
+
+ # Clean up and rebuild sort orders
+ libexists = True if "lib" in font.__dict__ else False
+ for orderName in ('public.glyphOrder', 'com.schriftgestaltung.glyphOrder'):
+ if libexists and orderName in font.lib:
+ glyphOrder = font.lib.getval(orderName) # This is an array
+ array = ET.Element("array")
+ for gname in glyphOrder:
+ if gname in toKeep:
+ ET.SubElement(array, "string").text = gname
+ font.lib.setelem(orderName, array)
+
+ # Clean up and rebuild psnames
+ if libexists and 'public.postscriptNames' in font.lib:
+ psnames = font.lib.getval('public.postscriptNames') # This is a dict keyed by glyphnames
+ dict = ET.Element("dict")
+ for gname in psnames:
+ if gname in toKeep:
+ ET.SubElement(dict, "key").text = gname
+ ET.SubElement(dict, "string").text = psnames[gname]
+ font.lib.setelem("public.postscriptNames", dict)
+
+ return font
+
+def cmd() : execute("UFO",doit,argspec)
+
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfsyncmasters.py b/lib/silfont/scripts/psfsyncmasters.py
new file mode 100644
index 0000000..4302d50
--- /dev/null
+++ b/lib/silfont/scripts/psfsyncmasters.py
@@ -0,0 +1,293 @@
+#!/usr/bin/env python
+__doc__ = '''Sync metadata across a family of fonts based on designspace files'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+import silfont.ufo as UFO
+import silfont.etutil as ETU
+import os, datetime
+import fontTools.designspaceLib as DSD
+from xml.etree import ElementTree as ET
+
+argspec = [
+ ('primaryds', {'help': 'Primary design space file'}, {'type': 'filename'}),
+ ('secondds', {'help': 'Second design space file', 'nargs': '?', 'default': None}, {'type': 'filename', 'def': None}),
+ ('--complex', {'help': 'Indicates complex set of fonts rather than RIBBI', 'action': 'store_true', 'default': False},{}),
+ ('-l','--log', {'help': 'Log file'}, {'type': 'outfile', 'def': '_sync.log'}),
+ ('-n','--new', {'help': 'append "_new" to file names', 'action': 'store_true', 'default': False},{}) # For testing/debugging
+ ]
+
+def doit(args) :
+ ficopyreq = ("ascender", "copyright", "descender", "familyName", "openTypeHheaAscender",
+ "openTypeHheaDescender", "openTypeHheaLineGap", "openTypeNameDescription", "openTypeNameDesigner",
+ "openTypeNameDesignerURL", "openTypeNameLicense", "openTypeNameLicenseURL",
+ "openTypeNameManufacturer", "openTypeNameManufacturerURL", "openTypeNamePreferredFamilyName",
+ "openTypeNameVersion", "openTypeOS2CodePageRanges", "openTypeOS2TypoAscender",
+ "openTypeOS2TypoDescender", "openTypeOS2TypoLineGap", "openTypeOS2UnicodeRanges",
+ "openTypeOS2VendorID", "openTypeOS2WinAscent", "openTypeOS2WinDescent", "versionMajor",
+ "versionMinor")
+ ficopyopt = ("openTypeNameSampleText", "postscriptFamilyBlues", "postscriptFamilyOtherBlues", "trademark",
+ "woffMetadataCredits", "woffMetadataDescription")
+ fispecial = ("italicAngle", "openTypeOS2WeightClass", "openTypeNamePreferredSubfamilyName", "openTypeNameUniqueID",
+ "styleMapFamilyName", "styleMapStyleName", "styleName", "unitsPerEm")
+ fiall = sorted(set(ficopyreq) | set(ficopyopt) | set(fispecial))
+ required = ficopyreq + ("openTypeOS2WeightClass", "styleName", "unitsPerEm")
+ libcopy = ("com.schriftgestaltung.glyphOrder", "public.glyphOrder", "public.postscriptNames")
+ logger = args.logger
+ complex = args.complex
+
+ pds = DSD.DesignSpaceDocument()
+ pds.read(args.primaryds)
+ if args.secondds is not None:
+ sds = DSD.DesignSpaceDocument()
+ sds.read(args.secondds)
+ else:
+ sds = None
+
+ # Process all the sources
+ psource = None
+ dsources = []
+ for source in pds.sources:
+ if source.copyInfo:
+ if psource: logger.log('Multiple fonts with <info copy="1" />', "S")
+ psource = Dsource(pds, source, logger, frompds=True, psource = True, args = args)
+ else:
+ dsources.append(Dsource(pds, source, logger, frompds=True, psource = False, args = args))
+ if sds is not None:
+ for source in sds.sources:
+ dsources.append(Dsource(sds, source, logger, frompds=False, psource = False, args=args))
+
+ # Process values in psource
+ fipval = {}
+ libpval = {}
+ changes = False
+ reqmissing = False
+
+ for field in fiall:
+ pval = psource.fontinfo.getval(field) if field in psource.fontinfo else None
+ oval = pval
+ # Set values or do other checks for special cases
+ if field == "italicAngle":
+ if "italic" in psource.source.filename.lower():
+ if pval is None or pval == 0 :
+ logger.log("Primary font: Italic angle must be non-zero for italic fonts", "E")
+ else:
+ if pval is not None and pval != 0 :
+ logger.log("Primary font: Italic angle must be zero for non-italic fonts", "E")
+ pval = None
+ elif field == "openTypeOS2WeightClass":
+ pval = int(psource.source.location["weight"])
+ elif field == "styleMapFamilyName":
+ if not complex and pval is None: logger.log("styleMapFamilyName missing from primary font", "E")
+ elif field == "styleMapStyleName":
+ if not complex and pval not in ('regular', 'bold', 'italic', 'bold italic'):
+ logger.log("styleMapStyleName must be 'regular', 'bold', 'italic', 'bold italic'", "E")
+ elif field in ("styleName", "openTypeNamePreferredSubfamilyName"):
+ pval = psource.source.styleName
+ elif field == "openTypeNameUniqueID":
+ nm = str(fipval["openTypeNameManufacturer"]) # Need to wrap with str() just in case missing from
+ fn = str(fipval["familyName"]) # fontinfo so would have been set to None
+ sn = psource.source.styleName
+ pval = nm + ": " + fn + " " + sn + ": " + datetime.datetime.now().strftime("%Y")
+ elif field == "unitsperem":
+ if pval is None or pval <= 0: logger.log("unitsperem must be non-zero", "S")
+ # After processing special cases, all required fields should have values
+ if pval is None and field in required:
+ reqmissing = True
+ logger.log("Required fontinfo field " + field + " missing from " + psource.source.filename, "E")
+ elif oval != pval:
+ changes = True
+ if pval is None:
+ if field in psource.fontinfo: psource.fontinfo.remove(field)
+ else:
+ psource.fontinfo[field][1].text = str(pval)
+ logchange(logger, "Primary font: " + field + " updated:", oval, pval)
+ fipval[field] = pval
+ if reqmissing: logger.log("Required fontinfo fields missing from " + psource.source.filename, "S")
+ if changes:
+ psource.fontinfo.setval("openTypeHeadCreated", "string",
+ datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
+ psource.write("fontinfo")
+
+ for field in libcopy:
+ pval = psource.lib.getval(field) if field in psource.lib else None
+ if pval is None:
+ logtype = "W" if field[0:7] == "public." else "I"
+ logger.log("lib.plist field " + field + " missing from " + psource.source.filename, logtype)
+ libpval[field] = pval
+
+ # Now update values in other source fonts
+
+ for dsource in dsources:
+ logger.log("Processing " + dsource.ufodir, "I")
+ fchanges = False
+ for field in fiall:
+ sval = dsource.fontinfo.getval(field) if field in dsource.fontinfo else None
+ oval = sval
+ pval = fipval[field]
+ # Set values or do other checks for special cases
+ if field == "italicAngle":
+ if "italic" in dsource.source.filename.lower():
+ if sval is None or sval == 0:
+ logger.log(dsource.source.filename + ": Italic angle must be non-zero for italic fonts", "E")
+ else:
+ if sval is not None and sval != 0:
+ logger.log(dsource.source.filename + ": Italic angle must be zero for non-italic fonts", "E")
+ sval = None
+ elif field == "openTypeOS2WeightClass":
+ sval = int(dsource.source.location["weight"])
+ elif field == "styleMapStyleName":
+ if not complex and sval not in ('regular', 'bold', 'italic', 'bold italic'):
+ logger.log(dsource.source.filename + ": styleMapStyleName must be 'regular', 'bold', 'italic', 'bold italic'", "E")
+ elif field in ("styleName", "openTypeNamePreferredSubfamilyName"):
+ sval = dsource.source.styleName
+ elif field == "openTypeNameUniqueID":
+ sn = dsource.source.styleName
+ sval = nm + ": " + fn + " " + sn + ": " + datetime.datetime.now().strftime("%Y")
+ else:
+ sval = pval
+ if oval != sval:
+ if field == "unitsPerEm": logger.log("unitsPerEm inconsistent across fonts", "S")
+ fchanges = True
+ if sval is None:
+ dsource.fontinfo.remove(field)
+ logmess = " removed: "
+ else:
+ logmess = " added: " if oval is None else " updated: "
+ # Copy value from primary. This will add if missing.
+ dsource.fontinfo.setelem(field, ET.fromstring(ET.tostring(psource.fontinfo[field][1])))
+ # For fields where it is not a copy from primary...
+ if field in ("italicAngle", "openTypeNamePreferredSubfamilyName", "openTypeNameUniqueID",
+ "openTypeOS2WeightClass", "styleName"):
+ dsource.fontinfo[field][1].text = str(sval)
+
+ logchange(logger, dsource.source.filename + " " + field + logmess, oval, sval)
+
+ lchanges = False
+ for field in libcopy:
+ oval = dsource.lib.getval(field) if field in dsource.lib else None
+ pval = libpval[field]
+ if oval != pval:
+ lchanges = True
+ if pval is None:
+ dsource.lib.remove(field)
+ logmess = " removed: "
+ else:
+ dsource.lib.setelem(field, ET.fromstring(ET.tostring(psource.lib[field][1])))
+ logmess = " updated: "
+ logchange(logger, dsource.source.filename + " " + field + logmess, oval, pval)
+
+ if lchanges:
+ dsource.write("lib")
+ fchanges = True # Force fontinfo to update so openTypeHeadCreated is set
+ if fchanges:
+ dsource.fontinfo.setval("openTypeHeadCreated", "string",
+ datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
+ dsource.write("fontinfo")
+
+ logger.log("psfsyncmasters completed", "P")
+
+class Dsource(object):
+ def __init__(self, ds, source, logger, frompds, psource, args):
+ self.ds = ds
+ self.source = source
+ self.logger = logger
+ self.frompds = frompds # Boolean to say if came from pds
+ self.newfile = "_new" if args.new else ""
+ self.ufodir = source.path
+ if not os.path.isdir(self.ufodir): logger.log(self.ufodir + " in designspace doc does not exist", "S")
+ try:
+ self.fontinfo = UFO.Uplist(font=None, dirn=self.ufodir, filen="fontinfo.plist")
+ except Exception as e:
+ logger.log("Unable to open fontinfo.plist in " + self.ufodir, "S")
+ try:
+ self.lib = UFO.Uplist(font=None, dirn=self.ufodir, filen="lib.plist")
+ except Exception as e:
+ if psource:
+ logger.log("Unable to open lib.plist in " + self.ufodir, "E")
+ self.lib = {} # Just need empty dict, so all vals will be set to None
+ else:
+ logger.log("Unable to open lib.plist in " + self.ufodir + "; creating empty one", "E")
+ self.lib = UFO.Uplist()
+ self.lib.logger=logger
+ self.lib.etree = ET.fromstring("<plist>\n<dict/>\n</plist>")
+ self.lib.populate_dict()
+ self.lib.dirn = self.ufodir
+ self.lib.filen = "lib.plist"
+
+ # Process parameters with similar logic to that in ufo.py. primarily to create outparams for writeXMLobject
+ libparams = {}
+ params = args.paramsobj
+ if "org.sil.pysilfontparams" in self.lib:
+ elem = self.lib["org.sil.pysilfontparams"][1]
+ if elem.tag != "array":
+ logger.log("Invalid parameter XML lib.plist - org.sil.pysilfontparams must be an array", "S")
+ for param in elem:
+ parn = param.tag
+ if not (parn in params.paramclass) or params.paramclass[parn] not in ("outparams", "ufometadata"):
+ logger.log(
+ "lib.plist org.sil.pysilfontparams must only contain outparams or ufometadata values: " + parn + " invalid",
+ "S")
+ libparams[parn] = param.text
+ # Create font-specific parameter set (with updates from lib.plist) Prepend names with ufodir to ensure uniqueness if multiple fonts open
+ params.addset(self.ufodir + "lib", "lib.plist in " + self.ufodir, inputdict=libparams)
+ if "command line" in params.sets:
+ params.sets[self.ufodir + "lib"].updatewith("command line", log=False) # Command line parameters override lib.plist ones
+ copyset = "main" if "main" in params.sets else "default"
+ params.addset(self.ufodir, copyset=copyset)
+ params.sets[self.ufodir].updatewith(self.ufodir + "lib", sourcedesc="lib.plist")
+ self.paramset = params.sets[self.ufodir]
+ # Validate specific parameters
+ if sorted(self.paramset["glifElemOrder"]) != sorted(params.sets["default"]["glifElemOrder"]):
+ logger.log("Invalid values for glifElemOrder", "S")
+ # Create outparams based on values in paramset, building attriborders from separate attriborders.<type> parameters.
+ self.outparams = {"attribOrders": {}}
+ for parn in params.classes["outparams"]:
+ value = self.paramset[parn]
+ if parn[0:12] == 'attribOrders':
+ elemname = parn.split(".")[1]
+ self.outparams["attribOrders"][elemname] = ETU.makeAttribOrder(value)
+ else:
+ self.outparams[parn] = value
+ self.outparams["UFOversion"] = 9 # Dummy value since not currently needed
+
+ def write(self, plistn):
+ filen = plistn + self.newfile + ".plist"
+ self.logger.log("Writing updated " + plistn + ".plist to " + filen, "P")
+ exists = True if os.path.isfile(os.path.join(self.ufodir, filen)) else False
+ plist = getattr(self, plistn)
+ UFO.writeXMLobject(plist, self.outparams, self.ufodir, filen, exists, fobject=True)
+
+
+def logchange(logger, logmess, old, new):
+ oldstr = str(old) if len(str(old)) < 22 else str(old)[0:20] + "..."
+ newstr = str(new) if len(str(new)) < 22 else str(new)[0:20] + "..."
+ if old is None:
+ logmess = logmess + " New value: " + newstr
+ else:
+ if new is None:
+ logmess = logmess + " Old value: " + oldstr
+ else:
+ logmess = logmess + " Old value: " + oldstr + ", new value: " + newstr
+ logger.log(logmess, "W")
+ # Extra verbose logging
+ if len(str(old)) > 21 :
+ logger.log("Full old value: " + str(old), "V")
+ if len(str(new)) > 21 :
+ logger.log("Full new value: " + str(new), "V")
+ logger.log("Types: Old - " + str(type(old)) + ", New - " + str(type(new)), "V")
+
+
+def cmd() : execute(None,doit, argspec)
+if __name__ == "__main__": cmd()
+
+
+''' *** Code notes ***
+
+Does not check precision for float, since no float values are currently processed
+ - see processnum in psfsyncmeta if needed later
+
+'''
diff --git a/lib/silfont/scripts/psfsyncmeta.py b/lib/silfont/scripts/psfsyncmeta.py
new file mode 100755
index 0000000..e925fde
--- /dev/null
+++ b/lib/silfont/scripts/psfsyncmeta.py
@@ -0,0 +1,303 @@
+#!/usr/bin/env python
+__doc__ = '''Sync metadata across a family of fonts assuming standard UFO file naming'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute
+from datetime import datetime
+import silfont.ufo as UFO
+import os
+from xml.etree import ElementTree as ET
+
+argspec = [
+ ('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
+ ('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_sync.log'}),
+ ('-s','--single', {'help': 'Sync single UFO against master', 'action': 'store_true', 'default': False},{}),
+ ('-m','--master', {'help': 'Master UFO to sync single UFO against', 'nargs': '?' },{'type': 'infont', 'def': None}),
+ ('-r','--reportonly', {'help': 'Report issues but no updating', 'action': 'store_true', 'default': False},{}),
+ ('-n','--new', {'help': 'append "_new" to file/ufo names', 'action': 'store_true', 'default': False},{}),
+ ('--normalize', {'help': 'output all the fonts to normalize them', 'action': 'store_true', 'default': False},{}),
+ ]
+
+def doit(args) :
+ standardstyles = ["Regular", "Italic", "Bold", "BoldItalic"]
+ finfoignore = ["openTypeHeadCreated", "openTypeOS2Panose", "postscriptBlueScale", "postscriptBlueShift",
+ "postscriptBlueValues", "postscriptOtherBlues", "postscriptStemSnapH", "postscriptStemSnapV", "postscriptForceBold"]
+ libfields = ["public.postscriptNames", "public.glyphOrder", "com.schriftgestaltung.glyphOrder"]
+
+ font = args.ifont
+ logger = args.logger
+ singlefont = args.single
+ mfont = args.master
+ newfile = "_new" if args.new else ""
+ reportonly = args.reportonly
+ updatemessage = " to be updated: " if reportonly else " updated: "
+ params = args.paramsobj
+ precision = font.paramset["precision"]
+
+ # Increase screen logging level to W unless specific level supplied on command-line
+ if not(args.quiet or "scrlevel" in params.sets["command line"]) : logger.scrlevel = "W"
+
+ # Process UFO name
+ (path,base) = os.path.split(font.ufodir)
+ (base,ext) = os.path.splitext(base)
+ if '-' not in base : logger.log("Non-standard UFO name - must be <family>-<style>", "S")
+ (family,style) = base.split('-')
+
+ styles = [style]
+ fonts = {}
+ fonts[style] = font
+
+ # Process single and master settings
+ if singlefont :
+ if mfont :
+ mastertext = "Master" # Used in log messages
+ else : # Check against Regular font from same family
+ mfont = openfont(params, path, family, "Regular")
+ if mfont is None : logger.log("No regular font to check against - use -m to specify master font", "S")
+ mastertext = "Regular"
+ fonts["Regular"] =mfont
+ else : # Supplied font must be Regular
+ if mfont : logger.log("-m --master must only be used with -s --single", "S")
+ if style != "Regular" : logger.log("Must specify a Regular font unless -s is used", "S")
+ mastertext = "Regular"
+ mfont = font
+
+ # Check for required fields in master font
+ mfinfo = mfont.fontinfo
+ if "familyName" in mfinfo :
+ spacedfamily = mfinfo["familyName"][1].text
+ else:
+ logger.log("No familyName field in " + mastertext, "S")
+ if "openTypeNameManufacturer" in mfinfo :
+ manufacturer = mfinfo["openTypeNameManufacturer"][1].text
+ else:
+ logger.log("No openTypeNameManufacturer field in " + mastertext, "S")
+ mlib = mfont.lib
+
+ # Open the remaining fonts in the family
+ if not singlefont :
+ for style in standardstyles :
+ if not style in fonts :
+ fonts[style] = openfont(params, path, family, style) # Will return None if font does not exist
+ if fonts[style] is not None : styles.append(style)
+
+ # Process fonts
+ psuniqueidlist = []
+ fieldscopied = False
+ for style in styles :
+ font = fonts[style]
+ if font.UFOversion != "2" : logger.log("This script only works with UFO 2 format fonts","S")
+
+ fontname = family + "-" + style
+ spacedstyle = "Bold Italic" if style == "BoldItalic" else style
+ spacedname = spacedfamily + " " + spacedstyle
+ logger.log("************ Processing " + fontname, "P")
+
+ ital = True if "Italic" in style else False
+ bold = True if "Bold" in style else False
+
+ # Process fontinfo.plist
+ finfo=font.fontinfo
+ fieldlist = list(set(finfo) | set(mfinfo)) # Need all fields from both to detect missing fields
+ fchanged = False
+
+ for field in fieldlist:
+ action = None; issue = ""; newval = ""
+ if field in finfo :
+ elem = finfo[field][1]
+ tag = elem.tag
+ text = elem.text
+ if text is None : text = ""
+ if tag == "real" : text = processnum(text,precision)
+ # Field-specific actions
+
+ if field not in finfo :
+ if field not in finfoignore : action = "Copyfield"
+ elif field == "italicAngle" :
+ if ital and text == "0" :
+ issue = "is zero"
+ action = "Warn"
+ if not ital and text != "0" :
+ issue = "is non-zero"
+ newval = 0
+ action = "Update"
+ elif field == "openTypeNameUniqueID" :
+ newval = manufacturer + ": " + spacedname + ": " + datetime.now().strftime("%Y")
+ if text != newval :
+ issue = "Incorrect value"
+ action = "Update"
+ elif field == "openTypeOS2WeightClass" :
+ if bold and text != "700" :
+ issue = "is not 700"
+ newval = 700
+ action = "Update"
+ if not bold and text != "400" :
+ issue = "is not 400"
+ newval = 400
+ action = "Update"
+ elif field == "postscriptFontName" :
+ if text != fontname :
+ newval = fontname
+ issue = "Incorrect value"
+ action = "Update"
+ elif field == "postscriptFullName" :
+ if text != spacedname :
+ newval = spacedname
+ issue = "Incorrect value"
+ action = "Update"
+ elif field == "postscriptUniqueID" :
+ if text in psuniqueidlist :
+ issue = "has same value as another font: " + text
+ action = "Warn"
+ else :
+ psuniqueidlist.append(text)
+ elif field == "postscriptWeightName" :
+ newval = 'bold' if bold else 'regular'
+ if text != newval :
+ issue = "Incorrect value"
+ action = 'Update'
+ elif field == "styleMapStyleName" :
+ if text != spacedstyle.lower() :
+ newval = spacedstyle.lower()
+ issue = "Incorrect value"
+ action = "Update"
+ elif field in ("styleName", "openTypeNamePreferredSubfamilyName") :
+ if text != spacedstyle :
+ newval = spacedstyle
+ issue = "Incorrect value"
+ action = "Update"
+ elif field in finfoignore :
+ action = "Ignore"
+ # Warn for fields in this font but not master
+ elif field not in mfinfo :
+ issue = "is in " + spacedstyle + " but not in " + mastertext
+ action = "Warn"
+ # for all other fields, sync values from master
+ else :
+ melem = mfinfo[field][1]
+ mtag = melem.tag
+ mtext = melem.text
+ if mtext is None : mtext = ""
+ if mtag == 'real' : mtext = processnum(mtext,precision)
+ if tag in ("real", "integer", "string") :
+ if mtext != text :
+ issue = "does not match " + mastertext + " value"
+ newval = mtext
+ action = "Update"
+ elif tag in ("true, false") :
+ if tag != mtag :
+ issue = "does not match " + mastertext + " value"
+ action = "FlipBoolean"
+ elif tag == "array" : # Assume simple array with just values to compare
+ marray = mfinfo.getval(field)
+ array = finfo.getval(field)
+ if array != marray: action = "CopyArray"
+ else : logger.log("Non-standard fontinfo field type in " + fontname, "X")
+
+ # Now process the actions, create log messages etc
+ if action is None or action == "Ignore" :
+ pass
+ elif action == "Warn" :
+ logger.log(field + " needs manual correction: " + issue, "W")
+ elif action == "Error" :
+ logger.log(field + " needs manual correction: " + issue, "E")
+ elif action in ("Update", "FlipBoolean", "Copyfield", "CopyArray") : # Updating actions
+ fchanged = True
+ message = field + updatemessage
+ if action == "Update" :
+ message = message + issue + " Old: '" + text + "' New: '" + str(newval) + "'"
+ elem.text = newval
+ elif action == "FlipBoolean" :
+ newval = "true" if tag == "false" else "false"
+ message = message + issue + " Old: '" + tag + "' New: '" + newval + "'"
+ finfo.setelem(field, ET.fromstring("<" + newval + "/>"))
+ elif action == "Copyfield" :
+ message = message + "is missing so will be copied from " + mastertext
+ fieldscopied = True
+ finfo.addelem(field, ET.fromstring(ET.tostring(mfinfo[field][1])))
+ elif action == "CopyArray" :
+ message = message + "Some values different Old: " + str(array) + " New: " + str(marray)
+ finfo.setelem(field, ET.fromstring(ET.tostring(melem)))
+ logger.log(message, "W")
+ else:
+ logger.log("Uncoded action: " + action + " - oops", "X")
+
+ # Process lib.plist - currently just public.postscriptNames and glyph order fields which are all simple dicts or arrays
+ lib = font.lib
+ lchanged = False
+
+ for field in libfields:
+ # Check the values
+ action = None; issue = ""; newval = ""
+ if field in mlib:
+ if field in lib:
+ if lib.getval(field) != mlib.getval(field): # will only work for arrays or dicts with simple values
+ action = "Updatefield"
+ else:
+ action = "Copyfield"
+ else:
+ action = "Error" if field == ("public.GlyphOrder", "public.postscriptNames") else "Warn"
+ issue = field + " not in " + mastertext + " lib.plist"
+
+ # Process the actions, create log messages etc
+ if action is None or action == "Ignore":
+ pass
+ elif action == "Warn":
+ logger.log(field + " needs manual correction: " + issue, "W")
+ elif action == "Error":
+ logger.log(field + " needs manual correction: " + issue, "E")
+ elif action in ("Updatefield", "Copyfield"): # Updating actions
+ lchanged = True
+ message = field + updatemessage
+ if action == "Copyfield":
+ message = message + "is missing so will be copied from " + mastertext
+ lib.addelem(field, ET.fromstring(ET.tostring(mlib[field][1])))
+ elif action == "Updatefield":
+ message = message + "Some values different"
+ lib.setelem(field, ET.fromstring(ET.tostring(mlib[field][1])))
+ logger.log(message, "W")
+ else:
+ logger.log("Uncoded action: " + action + " - oops", "X")
+
+ # Now update on disk
+ if not reportonly:
+ if args.normalize:
+ font.write(os.path.join(path, family + "-" + style + newfile + ".ufo"))
+ else: # Just update fontinfo and lib
+ if fchanged:
+ filen = "fontinfo" + newfile + ".plist"
+ logger.log("Writing updated fontinfo to " + filen, "P")
+ exists = True if os.path.isfile(os.path.join(font.ufodir, filen)) else False
+ UFO.writeXMLobject(finfo, font.outparams, font.ufodir, filen, exists, fobject=True)
+ if lchanged:
+ filen = "lib" + newfile + ".plist"
+ logger.log("Writing updated lib.plist to " + filen, "P")
+ exists = True if os.path.isfile(os.path.join(font.ufodir, filen)) else False
+ UFO.writeXMLobject(lib, font.outparams, font.ufodir, filen, exists, fobject=True)
+
+ if fieldscopied :
+ message = "After updating, UFOsyncMeta will need to be re-run to validate these fields" if reportonly else "Re-run UFOsyncMeta to validate these fields"
+ logger.log("*** Some fields were missing and so copied from " + mastertext + ". " + message, "P")
+
+ return
+
+
+def openfont(params, path, family, style) : # Only try if directory exists
+ ufodir = os.path.join(path,family+"-"+style+".ufo")
+ font = UFO.Ufont(ufodir, params=params) if os.path.isdir(ufodir) else None
+ return font
+
+
+def processnum(text, precision) : # Apply same processing to numbers that normalization will
+ if precision is not None:
+ val = round(float(text), precision)
+ if val == int(val) : val = int(val) # Removed trailing decimal .0
+ text = str(val)
+ return text
+
+
+def cmd() : execute("UFO",doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psftuneraliases.py b/lib/silfont/scripts/psftuneraliases.py
new file mode 100644
index 0000000..f71d097
--- /dev/null
+++ b/lib/silfont/scripts/psftuneraliases.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+__doc__ = '''Merge lookup and feature aliases into TypeTuner feature file'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2019 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bob Hallissy'
+
+from silfont.core import execute
+from xml.etree import ElementTree as ET
+from fontTools import ttLib
+import csv
+import struct
+
+argspec = [
+ ('input', {'help': 'Input TypeTuner feature file'}, {'type': 'infile'}),
+ ('output', {'help': 'Output TypeTuner feature file'}, {}),
+ ('-m','--mapping', {'help': 'Input csv mapping file'}, {'type': 'incsv'}),
+ ('-f','--ttf', {'help': 'Compiled TTF file'}, {}),
+ ('-l','--log',{'help': 'Optional log file'}, {'type': 'outfile', 'def': '_tuneraliases.log', 'optlog': True}),
+ ]
+
+def doit(args) :
+ logger = args.logger
+
+ if args.mapping is None and args.ttf is None:
+ logger.log("One or both of -m and -f must be provided", "S")
+ featdoc = ET.parse(args.input)
+ root = featdoc.getroot()
+ if root.tag != 'all_features':
+ logger.log("Invalid TypeTuner feature file: missing root element", "S")
+
+ # Whitespace to add after each new alias:
+ tail = '\n\t\t'
+
+ # Find or add alliaes element
+ aliases = root.find('aliases')
+ if aliases is None:
+ aliases = ET.SubElement(root,'aliases')
+ aliases.tail = '\n'
+
+ added = set()
+ duplicates = set()
+ def setalias(name, value):
+ # detect duplicate names in input
+ if name in added:
+ duplicates.add(name)
+ else:
+ added.add(name)
+ # modify existing or add new alias
+ alias = aliases.find('alias[@name="{}"]'.format(name))
+ if alias is None:
+ alias = ET.SubElement(aliases, 'alias', {'name': name, 'value': value})
+ alias.tail = tail
+ else:
+ alias.set('value', value)
+
+ # Process mapping file if present:
+ if args.mapping:
+ # Mapping file is assumed to come from psfbuildfea, and should look like:
+ # lookupname,table,index
+ # e.g. DigitAlternates,GSUB,51
+ for (name,table,value) in args.mapping:
+ setalias(name, value)
+
+ # Process the ttf file if present
+ if args.ttf:
+ # Generate aliases for features.
+ # In this code featureID means the key used in FontUtils for finding the feature, e.g., "calt _2"
+ def dotable(t): # Common routine for GPOS and GSUB
+ currtag = None
+ currtagindex = None
+ flist = [] # list, in order, of (featureTag, featureID), per Font::TTF
+ for i in range(0,t.FeatureList.FeatureCount):
+ newtag = str(t.FeatureList.FeatureRecord[i].FeatureTag)
+ if currtag is None or currtag != newtag:
+ flist.append((newtag, newtag))
+ currtag = newtag
+ currtagindex = 0
+ else:
+ flist.append( (currtag, '{} _{}'.format(currtag, currtagindex)))
+ currtagindex += 1
+ fslList = {} # dictionary keyed by feature_script_lang values returning featureID
+ for s in t.ScriptList.ScriptRecord:
+ currtag = str(s.ScriptTag)
+ # At present only looking at the dflt lang entries
+ for findex in s.Script.DefaultLangSys.FeatureIndex:
+ fslList['{}_{}_dflt'.format(flist[findex][0],currtag)] = flist[findex][1]
+ # Now that we have them all, add them in sorted order.
+ for name, value in sorted(fslList.items()):
+ setalias(name,value)
+
+ # Open the TTF for processing
+ try:
+ f = ttLib.TTFont(args.ttf)
+ except Exception as e:
+ logger.log("Couldn't open font '{}' for reading : {}".format(args.ttf, str(e)),"S")
+ # Grab features from GSUB and GPOS
+ for tag in ('GSUB', 'GPOS'):
+ try:
+ dotable(f[tag].table)
+ except Exception as e:
+ logger.log("Failed to process {} table: {}".format(tag, str(e)), "W")
+ # Grab features from Graphite:
+ try:
+ for tag in sorted(f['Feat'].features.keys()):
+ if tag == '1':
+ continue
+ name = 'gr_' + tag
+ value = str(struct.unpack('>L', tag.encode())[0])
+ setalias(name,value)
+ except Exception as e:
+ logger.log("Failed to process Feat table: {}".format(str(e)), "W")
+
+ if len(duplicates):
+ logger.log("The following aliases defined more than once in input: {}".format(", ".join(sorted(duplicates))), "S")
+
+ # Success. Write the result
+ featdoc.write(args.output, encoding='UTF-8', xml_declaration=True)
+
+def cmd() : execute(None,doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfufo2glyphs.py b/lib/silfont/scripts/psfufo2glyphs.py
new file mode 100644
index 0000000..b40afcc
--- /dev/null
+++ b/lib/silfont/scripts/psfufo2glyphs.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+__doc__ = '''Reads a designSpace file and create a Glyphs file from its linked ufos'''
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from silfont.core import execute, splitfn
+
+from glyphsLib import to_glyphs
+from fontTools.designspaceLib import DesignSpaceDocument
+import os
+
+argspec = [
+ ('designspace', {'help': 'Input designSpace file'}, {'type': 'filename'}),
+ ('glyphsfile', {'help': 'Output glyphs file name', 'nargs': '?' }, {'type': 'filename', 'def': None}),
+ ('--glyphsformat', {'help': "Format for glyphs file (2 or 3)", 'default': "2"}, {}),
+ ('--nofea', {'help': 'Do not process features.fea', 'action': 'store_true', 'default': False}, {}),
+ # ('--nofixes', {'help': 'Bypass code fixing data', 'action': 'store_true', 'default': False}, {}),
+ ('-l', '--log', {'help': 'Log file'}, {'type': 'outfile', 'def': '_ufo2glyphs.log'})]
+
+# This is just bare-bones code at present so does the same as glyphsLib's ufo2glyphs!
+# It is designed so that data could be massaged, if necessary, on the way. No such need has been found so far
+
+def doit(args):
+ glyphsfile = args.glyphsfile
+ logger = args.logger
+ gformat = args.glyphsformat
+ if gformat in ("2","3"):
+ gformat = int(gformat)
+ else:
+ logger.log("--glyphsformat must be 2 or 3", 'S')
+ if glyphsfile is None:
+ (path,base,ext) = splitfn(args.designspace)
+ glyphsfile = os.path.join(path, base + ".glyphs" )
+ else:
+ (path, base, ext) = splitfn(glyphsfile)
+ backupname = os.path.join(path, base + "-backup.glyphs" )
+ logger.log("Opening designSpace file", "I")
+ ds = DesignSpaceDocument()
+ ds.read(args.designspace)
+ if args.nofea: # Need to rename the features.fea files so they are not processed
+ origfeas = []; hiddenfeas = []
+ for source in ds.sources:
+ origfea = os.path.join(source.path, "features.fea")
+ hiddenfea = os.path.join(source.path, "features.tmp")
+ if os.path.exists(origfea):
+ logger.log(f'Renaming {origfea} to {hiddenfea}', "I")
+ os.rename(origfea, hiddenfea)
+ origfeas.append(origfea)
+ hiddenfeas.append(hiddenfea)
+ else:
+ logger.log(f'No features.fea found in {source.path}')
+ logger.log("Now creating glyphs object", "I")
+ glyphsfont = to_glyphs(ds)
+ if args.nofea: # Now need to reverse renamimg of features.fea files
+ for i, origfea in enumerate(origfeas):
+ logger.log(f'Renaming {hiddenfeas[i]} back to {origfea}', "I")
+ os.rename(hiddenfeas[i], origfea)
+ glyphsfont.format_version = gformat
+
+ if os.path.exists(glyphsfile): # Create a backup
+ logger.log("Renaming existing glyphs file to " + backupname, "I")
+ os.renames(glyphsfile, backupname)
+ logger.log("Writing glyphs file: " + glyphsfile, "I")
+ glyphsfont.save(glyphsfile)
+
+def cmd(): execute(None, doit, argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfufo2ttf.py b/lib/silfont/scripts/psfufo2ttf.py
new file mode 100644
index 0000000..832ef88
--- /dev/null
+++ b/lib/silfont/scripts/psfufo2ttf.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+__doc__ = 'Generate a ttf file without OpenType tables from a UFO'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Alan Ward'
+
+# Compared to fontmake it does not decompose glyphs or remove overlaps
+# and curve conversion seems to happen in a different way.
+
+from silfont.core import execute
+import defcon, ufo2ft.outlineCompiler, ufo2ft.preProcessor, ufo2ft.filters
+
+argspec = [
+ ('iufo', {'help': 'Input UFO folder'}, {}),
+ ('ottf', {'help': 'Output ttf file name'}, {}),
+ ('--removeOverlaps', {'help': 'Merge overlapping contours', 'action': 'store_true'}, {}),
+ ('--decomposeComponents', {'help': 'Decompose componenets', 'action': 'store_true'}, {}),
+ ('-l', '--log', {'help': 'Optional log file'}, {'type': 'outfile', 'def': '_ufo2ttf.log', 'optlog': True})]
+
+PUBLIC_PREFIX = 'public.'
+
+def doit(args):
+ ufo = defcon.Font(args.iufo)
+
+ # if style is Regular and there are no openTypeNameRecords defining the full name (ID=4), then
+ # add one so that "Regular" is omitted from the fullname
+ if ufo.info.styleName == 'Regular':
+ if ufo.info.openTypeNameRecords is None:
+ ufo.info.openTypeNameRecords = []
+ fullNameRecords = [ nr for nr in ufo.info.openTypeNameRecords if nr['nameID'] == 4]
+ if not len(fullNameRecords):
+ ufo.info.openTypeNameRecords.append( { 'nameID': 4, 'platformID': 3, 'encodingID': 1, 'languageID': 1033, 'string': ufo.info.familyName } )
+
+# args.logger.log('Converting UFO to ttf and compiling fea')
+# font = ufo2ft.compileTTF(ufo,
+# glyphOrder = ufo.lib.get(PUBLIC_PREFIX + 'glyphOrder'),
+# useProductionNames = False)
+
+ args.logger.log('Converting UFO to ttf without OT', 'P')
+
+ # default arg value for TTFPreProcessor class: removeOverlaps = False, convertCubics = True
+ preProcessor = ufo2ft.preProcessor.TTFPreProcessor(ufo, removeOverlaps = args.removeOverlaps, convertCubics=True,
+ flattenComponents = True,
+ skipExportGlyphs = ufo.lib.get("public.skipExportGlyphs", []))
+
+ # Need to handle cases if filters that are used are set in com.github.googlei18n.ufo2ft.filters with lib.plist
+ dc = dtc = ftpos = None
+ for (i,filter) in enumerate(preProcessor.preFilters):
+ if isinstance(filter, ufo2ft.filters.decomposeComponents.DecomposeComponentsFilter):
+ dc = True
+ if isinstance(filter, ufo2ft.filters.decomposeTransformedComponents.DecomposeTransformedComponentsFilter):
+ dtc = True
+ if isinstance(filter, ufo2ft.filters.flattenComponents.FlattenComponentsFilter):
+ ftpos = i
+ # Add decomposeComponents if --decomposeComponents is used
+ if args.decomposeComponents and not dc: preProcessor.preFilters.append(
+ ufo2ft.filters.decomposeComponents.DecomposeComponentsFilter())
+ # Add decomposeTransformedComponents if not already set via lib.plist
+ if not dtc: preProcessor.preFilters.append(ufo2ft.filters.decomposeTransformedComponents.DecomposeTransformedComponentsFilter())
+ # Remove flattenComponents if set via lib.plist since we set it via flattenComponents = True when setting up the preprocessor
+ if ftpos: preProcessor.preFilters.pop(ftpos)
+
+ glyphSet = preProcessor.process()
+ outlineCompiler = ufo2ft.outlineCompiler.OutlineTTFCompiler(ufo,
+ glyphSet=glyphSet,
+ glyphOrder=ufo.lib.get(PUBLIC_PREFIX + 'glyphOrder'))
+ font = outlineCompiler.compile()
+
+ # handle uvs glyphs until ufo2ft does it for us.
+ uvsdict = getuvss(ufo)
+ if len(uvsdict):
+ from fontTools.ttLib.tables._c_m_a_p import cmap_format_14
+ cmap_uvs = cmap_format_14(14)
+ cmap_uvs.platformID = 0
+ cmap_uvs.platEncID = 5
+ cmap_uvs.cmap = {}
+ cmap_uvs.uvsDict = uvsdict
+ font['cmap'].tables.append(cmap_uvs)
+
+ args.logger.log('Saving ttf file', 'P')
+ font.save(args.ottf)
+
+ args.logger.log('Done', 'P')
+
+def getuvss(ufo):
+ uvsdict = {}
+ uvs = ufo.lib.get('org.sil.variationSequences', None)
+ if uvs is not None:
+ for usv, dat in uvs.items():
+ usvc = int(usv, 16)
+ pairs = []
+ uvsdict[usvc] = pairs
+ for k, v in dat.items():
+ pairs.append((int(k, 16), v))
+ return uvsdict
+ for g in ufo:
+ uvs = getattr(g, 'lib', {}).get("org.sil.uvs", None)
+ if uvs is None:
+ continue
+ codes = [int(x, 16) for x in uvs.split()]
+ if codes[1] not in uvsdict:
+ uvsdict[codes[1]] = []
+ uvsdict[codes[1]].append((codes[0], (g.name if codes[0] not in g.unicodes else None)))
+ return uvsdict
+
+def cmd(): execute(None, doit, argspec)
+if __name__ == '__main__': cmd()
diff --git a/lib/silfont/scripts/psfversion.py b/lib/silfont/scripts/psfversion.py
new file mode 100755
index 0000000..12cdae9
--- /dev/null
+++ b/lib/silfont/scripts/psfversion.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+__doc__ = 'Display version info for pysilfont and dependencies'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+import sys, importlib
+import silfont
+
+def cmd() :
+
+ deps = ( # (module, used by, min recommended version)
+ ('defcon', '?', ''),
+ ('fontbakery', '?', ''),
+ ('fontMath', '?', ''),
+ ('fontParts', '?', ''),
+ ('fontTools', '?', ''),
+ ('glyphConstruction', '?', ''),
+ ('glyphsLib', '?', ''),
+ ('lxml','?', ''),
+ ('lz4', '?', ''),
+ ('mutatorMath', '?', ''),
+ ('odf', '?', ''),
+ ('palaso', '?', ''),
+ ('tabulate', '?', ''),
+ ('ufo2ft', '?', ''),
+ ('ufoLib2', '?', ''),
+ )
+
+ # Pysilfont info
+ print("Pysilfont " + silfont.__copyright__ + "\n")
+ print(" Version: " + silfont.__version__)
+ print(" Commands in: " + sys.argv[0][:-10])
+ print(" Code running from: " + silfont.__file__[:-12])
+ print(" using: Python " + sys.version.split(" \n")[0] + "\n")
+
+ for dep in deps:
+ name = dep[0]
+
+ try:
+ module = importlib.import_module(name)
+ path = module.__file__
+ # Remove .py file name from end
+ pyname = path.split("/")[-1]
+ path = path[:-len(pyname)-1]
+ version = "No version info"
+ for attr in ("__version__", "version", "VERSION"):
+ if hasattr(module, attr):
+ version = getattr(module, attr)
+ break
+ except Exception as e:
+ etext = str(e)
+ if etext == "No module named '" + name + "'":
+ version = "Module is not installed"
+ else:
+ version = "Module import failed with " + etext
+ path = ""
+
+ print('{:20} {:15} {}'.format(name + ":", version, path))
+
+ return
+
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/scripts/psfwoffit.py b/lib/silfont/scripts/psfwoffit.py
new file mode 100644
index 0000000..678cb19
--- /dev/null
+++ b/lib/silfont/scripts/psfwoffit.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+__doc__ = 'Convert font between ttf, woff, woff2'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2021 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'Bob Hallissy'
+
+from silfont.core import execute
+from fontTools.ttLib import TTFont
+from fontTools.ttLib.sfnt import WOFFFlavorData
+from fontTools.ttLib.woff2 import WOFF2FlavorData
+import os.path
+
+argspec = [
+ ('infont', {'help': 'Source font file (can be ttf, woff, or woff2)'}, {}),
+ ('-m', '--metadata', {'help': 'file containing XML WOFF metadata', 'default': None}, {}),
+ ('--privatedata', {'help': 'file containing WOFF privatedata', 'default': None}, {}),
+ ('-v', '--version', {'help': 'woff font version number in major.minor', 'default': None}, {}),
+ ('--ttf', {'help': 'name of ttf file to be written', 'nargs': '?', 'default': None, 'const': '-'}, {}),
+ ('--woff', {'help': 'name of woff file to be written', 'nargs': '?', 'default': None, 'const': '-'}, {}),
+ ('--woff2', {'help': 'name of woff2 file to be written', 'nargs': '?', 'default': None, 'const': '-'}, {}),
+ ('-l', '--log', {'help': 'Log file'}, {'type': 'outfile', 'def': '_woffit.log'})]
+
+def doit(args):
+ logger = args.logger
+ infont = args.infont
+ font = TTFont(args.infont)
+ defaultpath = os.path.splitext(infont)[0]
+ inFlavor = font.flavor or 'ttf'
+ logger.log(f'input font {infont} is a {inFlavor}', 'I')
+
+ # Read & parse version, if provided
+ flavorData = WOFFFlavorData() # Initializes all fields to None
+
+ if args.version:
+ try:
+ version = float(args.version)
+ if version < 0:
+ raise ValueError('version cannot be negative')
+ flavorData.majorVersion, flavorData.minorVersion = map(int, format(version, '.3f').split('.'))
+ except:
+ logger.log(f'invalid version syntax "{args.version}": should be MM.mmm', 'S')
+ else:
+ try:
+ flavorData.majorVersion = font.flavorData.majorVersion
+ flavorData.minorVersion = font.flavorData.minorVersion
+ except:
+ # Pull version from head table
+ head = font['head']
+ flavorData.majorVersion, flavorData.minorVersion =map(int, format(head.fontRevision, '.3f').split('.'))
+
+ # Read metadata if provided, else get value from input font
+ if args.metadata:
+ try:
+ with open(args.metadata, 'rb') as f:
+ flavorData.metaData = f.read()
+ except:
+ logger.log(f'Unable to read file "{args.metadata}"', 'S')
+ elif inFlavor != 'ttf':
+ flavorData.metaData = font.flavorData.metaData
+
+ # Same process for private data
+ if args.privatedata:
+ try:
+ with open(args.privatedata, 'rb') as f:
+ flavorData.privateData = f.read()
+ except:
+ logger.log(f'Unable to read file "{args.privatedata}"', 'S')
+ elif inFlavor != 'ttf':
+ flavorData.privData = font.flavorData.privData
+
+ if args.woff:
+ font.flavor = 'woff'
+ font.flavorData = flavorData
+ fname = f'{defaultpath}.{font.flavor}' if args.woff2 == '-' else args.woff
+ logger.log(f'Writing {font.flavor} font to "{fname}"', 'P')
+ font.save(fname)
+
+ if args.woff2:
+ font.flavor = 'woff2'
+ font.flavorData = WOFF2FlavorData(data=flavorData)
+ fname = f'{defaultpath}.{font.flavor}' if args.woff2 == '-' else args.woff2
+ logger.log(f'Writing {font.flavor} font to "{fname}"', 'P')
+ font.save(fname)
+
+ if args.ttf:
+ font.flavor = None
+ font.flavorData = None
+ fname = f'{defaultpath}.ttf' if args.ttf == '-' else args.ttf
+ logger.log(f'Writing ttf font to "{fname}"', 'P')
+ font.save(fname)
+
+ font.close()
+
+def cmd() : execute('FT',doit, argspec)
+if __name__ == "__main__": cmd()
+
+
+
diff --git a/lib/silfont/scripts/psfxml2compdef.py b/lib/silfont/scripts/psfxml2compdef.py
new file mode 100755
index 0000000..f5b5b35
--- /dev/null
+++ b/lib/silfont/scripts/psfxml2compdef.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+__doc__ = 'convert composite definition file from XML format'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Rowe'
+
+from silfont.core import execute
+from silfont.comp import CompGlyph
+from xml.etree import ElementTree as ET
+
+# specify two parameters: input file (XML format), output file (single line format).
+argspec = [
+ ('input',{'help': 'Input file of CD in XML format'}, {'type': 'infile'}),
+ ('output',{'help': 'Output file of CD in single line format'}, {'type': 'outfile'}),
+ ('-l', '--log', {'help': 'Optional log file'}, {'type': 'outfile', 'def': '_xml2compdef.log', 'optlog': True})]
+
+def doit(args) :
+ cgobj = CompGlyph()
+ glyphcount = 0
+ for g in ET.parse(args.input).getroot().findall('glyph'):
+ glyphcount += 1
+ cgobj.CDelement = g
+ cgobj.CDline = None
+ cgobj.parsefromCDelement()
+ if cgobj.CDline != None:
+ args.output.write(cgobj.CDline+'\n')
+ else:
+ pass # error in glyph number glyphcount message
+ return
+
+def cmd() : execute(None,doit,argspec)
+if __name__ == "__main__": cmd()
diff --git a/lib/silfont/ufo.py b/lib/silfont/ufo.py
new file mode 100755
index 0000000..e6917c8
--- /dev/null
+++ b/lib/silfont/ufo.py
@@ -0,0 +1,1386 @@
+#!/usr/bin/env python
+'Classes and functions for use handling Ufont UFO font objects in pysilfont scripts'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+from xml.etree import ElementTree as ET
+import sys, os, shutil, filecmp, io, re
+import warnings
+import collections
+import datetime
+import silfont.core
+import silfont.util as UT
+import silfont.etutil as ETU
+
+_glifElemMulti = ('unicode', 'guideline', 'anchor') # glif elements that can occur multiple times
+_glifElemF1 = ('advance', 'unicode', 'outline', 'lib') # glif elements valid in format 1 glifs (ie UFO2 glfis)
+
+# Define illegal characters and reserved names for makeFileName
+_illegalChars = "\"*+/:><?[\]|" + chr(0x7F)
+for i in range(0, 32): _illegalChars += chr(i)
+_illegalChars = list(_illegalChars)
+_reservedNames = "CON PRN AUX CLOCK$ NUL COM1 COM2 COM3 COM4 PT1 LPT2 LPT3".lower().split(" ")
+
+obsoleteLibKeys = [ # Used by "check and fix" + some scripts
+ "com.schriftgestaltung.blueFuzz",
+ "com.schriftgestaltung.blueScale",
+ "com.schriftgestaltung.blueShift",
+ "com.schriftgestaltung.customValue",
+ "com.schriftgestaltung.Disable Last Change",
+ "com.schriftgestaltung.disablesAutomaticAlignment",
+ "com.schriftgestaltung.disablesLastChange",
+ "com.schriftgestaltung.DisplayStrings",
+ "com.schriftgestaltung.font.Disable Last Change",
+ "com.schriftgestaltung.font.glyphOrder",
+ "com.schriftgestaltung.font.license",
+ "com.schriftgestaltung.useNiceNames",
+ "org.sil.glyphsappversion",
+ "UFOFormat"]
+
+class _Ucontainer(object):
+ # Parent class for other objects (eg Ulayer)
+ def __init__(self):
+ self._contents = {}
+
+ # Define methods so it acts like an immutable container
+ # (changes should be made via object functions etc)
+ def __len__(self):
+ return len(self._contents)
+
+ def __getitem__(self, key):
+ return self._contents[key]
+
+ def __iter__(self):
+ return iter(self._contents)
+
+ def get(self, key, default=None):
+ return self._contents.get(key, default=default)
+
+ def keys(self):
+ return self._contents.keys()
+
+
+class _plist(object):
+ # Used for common plist methods inherited by Uplist and Ulib classes
+
+ def addval(self, key, valuetype, value): # For simple single-value elements - use addelem for dicts or arrays
+ if valuetype not in ("integer", "real", "string"):
+ self.font.logger.log("addval() can only be used with simple elements", "X")
+ if key in self._contents: self.font.logger.log("Attempt to add duplicate key " + key + " to plist", "X")
+ dict = self.etree[0]
+
+ keyelem = ET.Element("key")
+ keyelem.text = key
+ dict.append(keyelem)
+
+ valelem = ET.Element(valuetype)
+ valelem.text = str(value)
+ dict.append(valelem)
+
+ self._contents[key] = [keyelem, valelem]
+
+ def setval(self, key, valuetype, value): # For simple single-value elements - use setelem for dicts or arrays
+ if valuetype not in ("integer", "real", "string"):
+ self.font.logger.log("setval() can only be used with simple elements", "X")
+ if key in self._contents:
+ self._contents[key][1].text = str(value)
+ else:
+ self.addval(key, valuetype, value)
+
+ def getval(self, key, default=None): # Returns a value for integer, real, string, true, false, dict or array keys or None for other keys
+ elem = self._contents.get(key, [None, None])[1]
+ if elem is None:
+ return default
+ return self._valelem(elem)
+
+ def _valelem(self, elem): # Used by getval to recursively process dict and array elements
+ if elem.tag == "integer": return int(elem.text)
+ elif elem.tag == "real": return float(elem.text)
+ elif elem.tag == "string": return elem.text
+ elif elem.tag == "true": return True
+ elif elem.tag == "false": return False
+ elif elem.tag == "array":
+ array = []
+ for subelem in elem: array.append(self._valelem(subelem))
+ return array
+ elif elem.tag == "dict":
+ dict = {}
+ for i in range(0, len(elem), 2): dict[elem[i].text] = self._valelem(elem[i + 1])
+ return dict
+ else:
+ return None
+
+ def remove(self, key):
+ item = self._contents[key]
+ self.etree[0].remove(item[0])
+ self.etree[0].remove(item[1])
+ del self._contents[key]
+
+ def addelem(self, key, element): # For non-simple elements (eg arrays) the calling script needs to build the etree element
+ if key in self._contents: self.font.logger.log("Attempt to add duplicate key " + key + " to plist", "X")
+ dict = self.etree[0]
+
+ keyelem = ET.Element("key")
+ keyelem.text = key
+ dict.append(keyelem)
+ dict.append(element)
+
+ self._contents[key] = [keyelem, element]
+
+ def setelem(self, key, element):
+ if key in self._contents: self.remove(key)
+ self.addelem(key, element)
+
+
+class Uelement(_Ucontainer):
+ # Class for an etree element. Mainly used as a parent class
+ # For each tag in the element, returns list of sub-elements with that tag
+ def __init__(self, element):
+ self.element = element
+ self.reindex()
+
+ def reindex(self):
+ self._contents = collections.defaultdict(list)
+ for e in self.element:
+ self._contents[e.tag].append(e)
+
+ def remove(self, subelement):
+ self._contents[subelement.tag].remove(subelement)
+ self.element.remove(subelement)
+
+ def append(self, subelement):
+ self._contents[subelement.tag].append(subelement)
+ self.element.append(subelement)
+
+ def insert(self, index, subelement):
+ self._contents[subelement.tag].insert(index, subelement)
+ self.element.insert(index, subelement)
+
+ def replace(self, index, subelement):
+ self._contents[subelement.tag][index] = subelement
+ self.element[index] = subelement
+
+
+class UtextFile(object):
+ # Generic object for handling non-xml text files
+ def __init__(self, font, dirn, filen):
+ self.type = "textfile"
+ self.font = font
+ self.filen = filen
+ self.dirn = dirn
+ if dirn == font.ufodir:
+ dtree = font.dtree
+ else:
+ dtree = font.dtree.subtree(dirn)
+ if not dtree: font.logger.log("Missing directory " + dirn, "X")
+ if filen not in dtree:
+ dtree[filen] = UT.dirTreeItem(added=True)
+ dtree[filen].setinfo(read=True)
+ dtree[filen].fileObject = self
+ dtree[filen].fileType = "text"
+
+ def write(self, dtreeitem, dir, ofilen, exists):
+ # For now just copies source to destination if changed
+ inpath = os.path.join(self.dirn, self.filen)
+ changed = True
+ if exists: changed = not (filecmp.cmp(inpath, os.path.join(dir, self.filen)))
+ if changed:
+ try:
+ shutil.copy2(inpath, dir)
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+ dtreeitem.written = True
+
+class Udirectory(object):
+ # Generic object for handling directories - used for data and images
+ def __init__(self, font, parentdir, dirn):
+ self.type = "directory"
+ self.font = font
+ self.parentdir = parentdir
+ self.dirn = dirn
+ if parentdir != font.ufodir:
+ self.font.logger.log("Currently Udir only supports top-level directories", "X")
+ dtree = font.dtree
+ if dirn not in dtree:
+ self.font.logger.log("Udir directory " + dirn + " does not exist", "X")
+ dtree[dirn].setinfo(read=True)
+ dtree[dirn].fileObject = self
+ dtree[dirn].fileType = "directory"
+
+ def write(self, dtreeitem, oparentdir):
+ # For now just copies source to destination
+ if self.parentdir == oparentdir: return # No action needed
+ inpath = os.path.join(self.parentdir, self.dirn)
+ outpath = os.path.join(oparentdir, self.dirn)
+ try:
+ if os.path.isdir(outpath):
+ shutil.rmtree(outpath)
+ shutil.copytree(inpath, outpath)
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+ dtreeitem.written = True
+
+class Ufont(object):
+ """ Object to hold all the data from a UFO"""
+
+ def __init__(self, ufodir, logger=None, params=None):
+ if logger is not None and params is not None:
+ params.logger.log("Only supply a logger if params not set (since that has one)", "X")
+ if params is None:
+ params = silfont.core.parameters()
+ if logger is not None: params.logger = logger
+ self.params = params
+ self.logger = params.logger
+ logger = self.logger
+ self.ufodir = ufodir
+ logger.log('Reading UFO: ' + ufodir, 'P')
+ if not os.path.isdir(ufodir):
+ logger.log(ufodir + " is not a directory", "S")
+ # Read list of files and folders
+ self.dtree = UT.dirTree(ufodir)
+ # Read metainfo (which must exist)
+ self.metainfo = self._readPlist("metainfo.plist")
+ self.UFOversion = self.metainfo["formatVersion"][1].text
+ # Read lib.plist then process pysilfont parameters if present
+ libparams = {}
+ if "lib.plist" in self.dtree:
+ self.lib = self._readPlist("lib.plist")
+ if "org.sil.pysilfontparams" in self.lib:
+ elem = self.lib["org.sil.pysilfontparams"][1]
+ if elem.tag != "array":
+ logger.log("Invalid parameter XML lib.plist - org.sil.pysilfontparams must be an array", "S")
+ for param in elem:
+ parn = param.tag
+ if not (parn in params.paramclass) or params.paramclass[parn] not in ("outparams", "ufometadata"):
+ logger.log("lib.plist org.sil.pysilfontparams must only contain outparams or ufometadata values: " + parn + " invalid", "S")
+ libparams[parn] = param.text
+ # Create font-specific parameter set (with updates from lib.plist) Prepend names with ufodir to ensure uniqueness if multiple fonts open
+ params.addset(ufodir + "lib", "lib.plist in " + ufodir, inputdict=libparams)
+ if "command line" in params.sets:
+ params.sets[ufodir + "lib"].updatewith("command line", log=False) # Command line parameters override lib.plist ones
+ copyset = "main" if "main" in params.sets else "default"
+ params.addset(ufodir, copyset=copyset)
+ params.sets[ufodir].updatewith(ufodir + "lib", sourcedesc="lib.plist")
+ self.paramset = params.sets[ufodir]
+ # Validate specific parameters
+ if self.paramset["UFOversion"] not in ("", "2", "3"): logger.log("UFO version must be 2 or 3", "S")
+ if sorted(self.paramset["glifElemOrder"]) != sorted(self.params.sets["default"]["glifElemOrder"]):
+ logger.log("Invalid values for glifElemOrder", "S")
+
+ # Create outparams based on values in paramset, building attriborders from separate attriborders.<type> parameters.
+ self.outparams = {"attribOrders": {}}
+ for parn in params.classes["outparams"]:
+ value = self.paramset[parn]
+ if parn[0:12] == 'attribOrders':
+ elemname = parn.split(".")[1]
+ self.outparams["attribOrders"][elemname] = ETU.makeAttribOrder(value)
+ else:
+ self.outparams[parn] = value
+ if self.outparams["UFOversion"] == "": self.outparams["UFOversion"] = self.UFOversion
+
+ # Set flags for checking and fixing metadata
+ cf = self.paramset["checkfix"].lower()
+ if cf not in ("check", "fix", "none", ""): logger.log("Invalid value '" + cf + "' for checkfix parameter", "S")
+
+ self.metacheck = True if cf in ("check", "fix") else False
+ self.metafix = True if cf == "fix" else False
+ if "fontinfo.plist" not in self.dtree:
+ logger.log("fontinfo.plist missing so checkfix routines can't be run", "E")
+ self.metacheck = False
+ self.metafix = False
+
+ # Read other top-level plists
+ if "fontinfo.plist" in self.dtree: self.fontinfo = self._readPlist("fontinfo.plist")
+ if "groups.plist" in self.dtree: self.groups = self._readPlist("groups.plist")
+ if "kerning.plist" in self.dtree: self.kerning = self._readPlist("kerning.plist")
+ createlayercontents = False
+ if self.UFOversion == "2": # Create a dummy layer contents so 2 & 3 can be handled the same
+ createlayercontents = True
+ else:
+ if "layercontents.plist" in self.dtree:
+ self.layercontents = self._readPlist("layercontents.plist")
+ else:
+ logger.log("layercontents.plist missing - one will be created", "W")
+ createlayercontents = True
+ if createlayercontents:
+ if "glyphs" not in self.dtree: logger.log('No glyphs directory in font', "S")
+ self.layercontents = Uplist(font=self)
+ self.dtree['layercontents.plist'] = UT.dirTreeItem(read=True, added=True, fileObject=self.layercontents,
+ fileType="xml")
+ dummylc = "<plist>\n<array>\n<array>\n<string>public.default</string>\n<string>glyphs</string>\n</array>\n</array>\n</plist>"
+ self.layercontents.etree = ET.fromstring(dummylc)
+ self.layercontents.populate_dict()
+
+ # Process features.fea
+ if "features.fea" in self.dtree:
+ self.features = UfeatureFile(self, ufodir, "features.fea")
+ # Process the glyphs directories)
+ self.layers = []
+ self.deflayer = None
+ for i in sorted(self.layercontents.keys()):
+ layername = self.layercontents[i][0].text
+ layerdir = self.layercontents[i][1].text
+ logger.log("Processing Glyph Layer " + str(i) + ": " + layername + layerdir, "I")
+ layer = Ulayer(layername, layerdir, self)
+ if layer:
+ self.layers.append(layer)
+ if layername == "public.default": self.deflayer = layer
+ else:
+ logger.log("Glyph directory " + layerdir + " missing", "S")
+ if self.deflayer is None: logger.log("No public.default layer", "S")
+ # Process other directories
+ if "images" in self.dtree:
+ self.images = Udirectory(self,ufodir, "images")
+ if "data" in self.dtree:
+ self.data = Udirectory(self, ufodir, "data")
+
+ # Run best practices check and fix routines
+ if self.metacheck:
+ initwarnings = logger.warningcount
+ initerrors = logger.errorcount
+
+ fireq = ("ascender", "copyright", "descender", "familyName", "openTypeNameManufacturer",
+ "styleName", "unitsPerEm", "versionMajor", "versionMinor")
+ fiwarnifmiss = ("capHeight", "copyright", "openTypeNameDescription", "openTypeNameDesigner",
+ "openTypeNameDesignerURL", "openTypeNameLicense", "openTypeNameLicenseURL",
+ "openTypeNameManufacturerURL", "openTypeOS2CodePageRanges",
+ "openTypeOS2UnicodeRanges", "openTypeOS2VendorID","styleMapFamilyName", "styleMapStyleName",
+ "openTypeOS2WeightClass", "openTypeOS2WinAscent", "openTypeOS2WinDescent")
+ fiwarnifnot = {"unitsPerEm": (1000, 2048),
+ "styleMapStyleName": ("regular", "bold", "italic", "bold italic")},
+ fiwarnifpresent = ("note",)
+ fidel = ("macintoshFONDFamilyID", "macintoshFONDName", "openTypeNameCompatibleFullName",
+ "openTypeGaspRangeRecords", "openTypeHeadFlags", "openTypeHheaCaretOffset",
+ "openTypeOS2FamilyClass", "postscriptForceBold", "postscriptIsFixedPitch",
+ "postscriptBlueFuzz", "postscriptBlueScale", "postscriptBlueShift", "postscriptWeightName",
+ "year")
+ fidelifempty = ("guidelines", "postscriptBlueValues", "postscriptFamilyBlues", "postscriptFamilyOtherBlues",
+ "postscriptOtherBlues")
+ fiint = ("ascender", "capHeight", "descender", "postscriptUnderlinePosition",
+ "postscriptUnderlineThickness", "unitsPerEm", "xHeight")
+ ficapitalize = ("styleMapFamilyName", "styleName")
+ fisetifmissing = {}
+ fisettoother = {"openTypeHheaAscender": "ascender", "openTypeHheaDescender": "descender",
+ "openTypeNamePreferredFamilyName": "familyName",
+ "openTypeNamePreferredSubfamilyName": "styleName", "openTypeOS2TypoAscender": "ascender",
+ "openTypeOS2TypoDescender": "descender"}
+ fisetto = {"openTypeHheaLineGap": 0, "openTypeOS2TypoLineGap": 0, "openTypeOS2WidthClass": 5,
+ "openTypeOS2Selection": [7], "openTypeOS2Type": []} # Other values are added below
+
+ libdel = ("com.fontlab.v2.tth", "com.typemytype.robofont.italicSlantOffset")
+ libsetto = {"com.schriftgestaltung.customParameter.GSFont.disablesAutomaticAlignment": True,
+ "com.schriftgestaltung.customParameter.GSFont.disablesLastChange": True}
+ libwarnifnot = {"com.schriftgestaltung.customParameter.GSFont.useNiceNames": False}
+ libwarnifmissing = ("public.glyphOrder",)
+
+ # fontinfo.plist checks
+ logger.log("Checking fontinfo.plist metadata", "P")
+
+ # Check required fields, some of which are needed for remaining checks
+ missing = []
+ for key in fireq:
+ if key not in self.fontinfo or self.fontinfo.getval(key) is None: missing.append(key)
+ # Collect values for constructing other fields, setting dummy values when missing and in check-only mode
+ dummies = False
+ storedvals = {}
+ for key in ("ascender", "copyright", "descender", "familyName", "styleName", "openTypeNameManufacturer", "versionMajor", "versionMinor"):
+ if key in self.fontinfo and self.fontinfo.getval(key) is not None:
+ storedvals[key] = self.fontinfo.getval(key)
+ if key == "styleName":
+ sn = storedvals[key]
+ sn = re.sub(r"(\w)(Italic)", r"\1 \2", sn) # Add a space before Italic if missing
+ # Capitalise first letter of words
+ sep = b' ' if type(sn) is bytes else ' '
+ sn = sep.join(s[:1].upper() + s[1:] for s in sn.split(sep))
+ if sn != storedvals[key]:
+ if self.metafix:
+ self.fontinfo.setval(key, "string", sn)
+ logmess = " updated "
+ else:
+ logmess = " would be updated "
+ self.logchange(logmess, key, storedvals[key], sn)
+ storedvals[key] = sn
+ if key in ("ascender", "descender"):
+ storedvals[key] = int(storedvals[key])
+ else:
+ dummies = True
+ if key in ("ascender", "descender", "versionMajor", "versionMinor"):
+ storedvals[key] = 999
+ else:
+ storedvals[key] = "Dummy"
+ if missing:
+ logtype = "S" if self.metafix else "W"
+ logger.log("Required fields missing from fontinfo.plist: " + str(missing), logtype)
+ if dummies:
+ logger.log("Checking will continue with values of 'Dummy' or 999 for missing fields", "W")
+ # Construct values for certain fields
+ value = storedvals["openTypeNameManufacturer"] + ": " + storedvals["familyName"] + " "
+ value = value + storedvals["styleName"] + ": " + datetime.datetime.now().strftime("%Y")
+ fisetto["openTypeNameUniqueID"] = value
+# fisetto["openTypeOS2WinDescent"] = -storedvals["descender"]
+ if "openTypeNameVersion" not in self.fontinfo:
+ fisetto["openTypeNameVersion"] = "Version " + str(storedvals["versionMajor"]) + "."\
+ + str(storedvals["versionMinor"])
+ if "openTypeOS2WeightClass" not in self.fontinfo:
+ sn = storedvals["styleName"]
+ sn2wc = {"Regular": 400, "Italic": 400, "Bold": 700, "BoldItalic": 700}
+ if sn in sn2wc: fisetto["openTypeOS2WeightClass"] = sn2wc[sn]
+ if "xHeight" not in self.fontinfo:
+ fisetto["xHeight"] = int(storedvals["ascender"] * 0.6)
+ if "openTypeOS2Selection" in self.fontinfo: # If already present, need to ensure bit 7 is set
+ fisetto["openTypeOS2Selection"] = sorted(list(set(self.fontinfo.getval("openTypeOS2Selection") + [7])))
+
+ for key in fisetifmissing:
+ if key not in self.fontinfo:
+ fisetto[key] = fisetifmissing[key]
+
+ changes = 0
+ # Warn about missing fields
+ for key in fiwarnifmiss:
+ if key not in self.fontinfo:
+ logmess = key + " is missing from fontinfo.plist"
+ if key in ("styleMapFamilyName", "styleMapStyleName") :
+ logmess = logmess + " (not needed for complex masters)"
+ logger.log(logmess, "W")
+ # Warn about bad values
+ for key in fiwarnifnot:
+ if key in self.fontinfo:
+ value = self.fontinfo.getval(key)
+ if value not in fiwarnifnot[key]:
+ logger.log(key + " should be one of " + str(fiwarnifnot[key]), "W")
+ # Warn about keys where use of discouraged
+ for key in fiwarnifpresent:
+ if key in self.fontinfo:
+ logger.log(key + " is present - it's use is discouraged")
+
+ # Now do all remaining checks - which will lead to values being changed
+ for key in fidel + fidelifempty:
+ if key in self.fontinfo:
+ old = self.fontinfo.getval(key)
+ if not(key in fidelifempty and old != []): # Delete except for non-empty fidelifempty
+ if self.metafix:
+ self.fontinfo.remove(key)
+ logmess = " removed from fontinfo. "
+ else:
+ logmess = " would be removed from fontinfo "
+ self.logchange(logmess, key, old, None)
+ changes += 1
+
+ # Set to integer values
+ for key in fiint:
+ if key in self.fontinfo:
+ old = self.fontinfo.getval(key)
+ if old != int(old):
+ new = int(old)
+ if self.metafix:
+ self.fontinfo.setval(key, "integer", new)
+ logmess = " updated "
+ else:
+ logmess = " would be updated "
+ self.logchange(logmess, key, old, new)
+ changes += 1
+ # Capitalize words
+ for key in ficapitalize:
+ if key in self.fontinfo:
+ old = self.fontinfo.getval(key)
+ sep = b' ' if type(old) is bytes else ' '
+ new = sep.join(s[:1].upper() + s[1:] for s in old.split(sep)) # Capitalise words
+ if new != old:
+ if self.metafix:
+ self.fontinfo.setval(key, "string", new)
+ logmess = " uppdated "
+ else:
+ logmess = " would be uppdated "
+ self.logchange(logmess, key, old, new)
+ changes += 1
+ # Set to specific values
+ for key in list(fisetto.keys()) + list(fisettoother.keys()):
+ if key in self.fontinfo:
+ old = self.fontinfo.getval(key)
+ logmess = " updated "
+ else:
+ old = None
+ logmess = " added "
+ if key in fisetto:
+ new = fisetto[key]
+ else:
+ new = storedvals[fisettoother[key]]
+ if new != old:
+ if self.metafix:
+ if isinstance(new, list): # Currently only integer arrays
+ array = ET.Element("array")
+ for val in new: # Only covers integer at present for openTypeOS2Selection
+ ET.SubElement(array, "integer").text = val
+ self.fontinfo.setelem(key, array)
+ else: # Does not cover real at present
+ valtype = "integer" if isinstance(new, int) else "string"
+ self.fontinfo.setval(key, valtype, new)
+ else:
+ logmess = " would be" + logmess
+ self.logchange(logmess, key, old, new)
+ changes += 1
+ # Specific checks
+ if "italicAngle" in self.fontinfo:
+ old = self.fontinfo.getval("italicAngle")
+ if old == 0: # Should be deleted if 0
+ logmess = " removed since it is 0 "
+ if self.metafix:
+ self.fontinfo.remove("italicAngle")
+ else:
+ logmess = " would be" + logmess
+ self.logchange(logmess, "italicAngle", old, None)
+ changes += 1
+ if "versionMajor" in self.fontinfo: # If missing, an error will already have been reported...
+ vm = self.fontinfo.getval("versionMajor")
+ if vm == 0: logger.log("versionMajor is 0", "W")
+
+ # lib.plist checks
+ if "lib" not in self.__dict__:
+ logger.log("lib.plist missing so not checked by check & fix routines", "E")
+ else:
+ logger.log("Checking lib.plist metadata", "P")
+
+ for key in libdel:
+ if key in self.lib:
+ old = self.lib.getval(key)
+ if self.metafix:
+ self.lib.remove(key)
+ logmess = " removed from lib.plist. "
+ else:
+ logmess = " would be removed from lib.plist "
+ self.logchange(logmess, key, old, None)
+ changes += 1
+
+ for key in libsetto:
+ if key in self.lib:
+ old = self.lib.getval(key)
+ logmess = " updated "
+ else:
+ old = None
+ logmess = " added "
+ new = libsetto[key]
+ if new != old:
+ if self.metafix:
+ # Currently just supports True. See fisetto for adding other types
+ if new == True:
+ self.lib.setelem(key, ET.fromstring("<true/>"))
+ else: # Does not cover real at present
+ logger.log("Invalid value type for libsetto", "X")
+ else:
+ logmess = " would be" + logmess
+ self.logchange(logmess, key, old, new)
+ changes += 1
+ for key in libwarnifnot:
+ value = self.lib.getval(key) if key in self.lib else None
+ if value != libwarnifnot[key]:
+ addmess = "; currently missing" if value is None else "; currently set to " + str(value)
+ logger.log(key + " should normally be " + str(libwarnifnot[key]) + addmess, "W")
+
+ for key in libwarnifmissing:
+ if key not in self.lib:
+ logger.log(key + " is missing from lib.plist", "W")
+
+ logmess = " deleted - obsolete key" if self.metafix else " would be deleted - obsolete key"
+ for key in obsoleteLibKeys: # For obsolete keys that have been added historically by some tools
+ if key in self.lib:
+ old = self.lib.getval(key)
+ if self.metafix: self.lib.remove(key)
+ self.logchange(logmess,key,old,None)
+ changes += 1
+
+ # Show check&fix summary
+ warnings = logger.warningcount - initwarnings - changes
+ errors = logger.errorcount - initerrors
+ if errors or warnings or changes:
+ changemess = ", Changes made: " if self.metafix else ", Changes to make: "
+ logger.log("Check & fix results:- Errors: " + str(errors) + changemess + str(changes) +
+ ", Other warnings: " + str(warnings), "P")
+ if logger.scrlevel not in "WIV": logger.log("See log file for details", "P")
+ if missing and not self.metafix:
+ logger.log("**** Since some required fields were missing, checkfix=fix would fail", "P")
+ else:
+ logger.log("Check & Fix ran cleanly", "P")
+
+ def _readPlist(self, filen):
+ if filen in self.dtree:
+ plist = Uplist(font=self, filen=filen)
+ self.dtree[filen].setinfo(read=True, fileObject=plist, fileType="xml")
+ return plist
+ else:
+ self.logger.log(filen + " does not exist", "S")
+
+ def write(self, outdir):
+ # Write UFO out to disk, based on values set in self.outparams
+ self.logger.log("Processing font for output", "P")
+ if not os.path.exists(outdir):
+ try:
+ os.mkdir(outdir)
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+ if not os.path.isdir(outdir):
+ self.logger.log(outdir + " not a directory", "S")
+
+ # If output UFO already exists, need to open so only changed files are updated and redundant files deleted
+ if outdir == self.ufodir: # In special case of output and input being the same, simply copy the input font
+ odtree = UT.dirTree(outdir)
+ else:
+ if not os.path.exists(outdir): # If outdir does not exist, create it
+ try:
+ os.mkdir(outdir)
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+ odtree = {}
+ else:
+ if not os.path.isdir(outdir): self.logger.log(outdir + " not a directory", "S")
+ dirlist = os.listdir(outdir)
+ if dirlist == []: # Outdir is empty
+ odtree = {}
+ else:
+ self.logger.log("Output UFO already exists - reading for comparison", "P")
+ odtree = UT.dirTree(outdir)
+ # Update version info etc
+ UFOversion = self.outparams["UFOversion"]
+ self.metainfo["formatVersion"][1].text = str(UFOversion)
+ self.metainfo["creator"][1].text = "org.sil.scripts.pysilfont"
+
+ # Set standard UFO files for output
+ dtree = self.dtree
+ setFileForOutput(dtree, "metainfo.plist", self.metainfo, "xml")
+ if "fontinfo" in self.__dict__: setFileForOutput(dtree, "fontinfo.plist", self.fontinfo, "xml")
+ if "groups" in self.__dict__: # With groups, sort by glyph name
+ for gname in list(self.groups):
+ group = self.groups.getval(gname)
+ elem = ET.Element("array")
+ for glyph in sorted(group):
+ ET.SubElement(elem, "string").text = glyph
+ self.groups.setelem(gname, elem)
+ setFileForOutput(dtree, "groups.plist", self.groups, "xml")
+ if "kerning" in self.__dict__: setFileForOutput(dtree, "kerning.plist", self.kerning, "xml")
+ if "lib" in self.__dict__: setFileForOutput(dtree, "lib.plist", self.lib, "xml")
+ if UFOversion == "3":
+ # Sort layer contents by layer name
+ lc = self.layercontents
+ lcindex = {lc[x][0].text: lc[x] for x in lc} # index on layer name
+ for (x, name) in enumerate(sorted(lcindex)):
+ lc.etree[0][x] = lcindex[name] # Replace array elements in new order
+ setFileForOutput(dtree, "layercontents.plist", self.layercontents, "xml")
+ if "features" in self.__dict__: setFileForOutput(dtree, "features.fea", self.features, "text")
+ # Set glyph layers for output
+ for layer in self.layers: layer.setForOutput()
+
+ # Write files to disk
+
+ self.logger.log("Writing font to " + outdir, "P")
+
+ changes = writeToDisk(dtree, outdir, self, odtree)
+ if changes: # Need to update openTypeHeadCreated if there have been any changes to the font
+ if "fontinfo" in self.__dict__:
+ self.fontinfo.setval("openTypeHeadCreated", "string",
+ datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
+ self.fontinfo.outxmlstr="" # Need to reset since writeXMLobject has already run once
+ writeXMLobject(self.fontinfo, self.outparams, outdir, "fontinfo.plist", True, fobject=True)
+
+ def addfile(self, filetype): # Add empty plist file for optional files
+ if filetype not in ("fontinfo", "groups", "kerning", "lib"): self.logger.log("Invalid file type to add", "X")
+ if filetype in self.__dict__: self.logger.log("File already in font", "X")
+ obj = Uplist(font=self)
+ setattr(self, filetype, obj)
+ self.dtree[filetype + '.plist'] = UT.dirTreeItem(read=True, added=True, fileObject=obj, fileType="xml")
+ obj.etree = ET.fromstring("<plist>\n<dict/>\n</plist>")
+
+ def logchange(self, logmess, key, old, new):
+ oldstr = str(old) if len(str(old)) < 22 else str(old)[0:20] + "..."
+ newstr = str(new) if len(str(new)) < 22 else str(new)[0:20] + "..."
+ logmess = key + logmess
+ if old is None:
+ logmess = logmess + " New value: " + newstr
+ else:
+ if new is None:
+ logmess = logmess + " Old value: " + oldstr
+ else:
+ logmess = logmess + " Old value: " + oldstr + ", new value: " + newstr
+ self.logger.log(logmess, "W")
+ # Extra verbose logging
+ if len(str(old)) > 21:
+ self.logger.log("Full old value: " + str(old), "I")
+ if len(str(new)) > 21:
+ self.logger.log("Full new value: " + str(new), "I")
+ otype = "string" if isinstance(old, (bytes, str)) else type(old).__name__ # To produce consistent reporting
+ ntype = "string" if isinstance(new, (bytes, str)) else type(new).__name__ # with Python 2 & 3
+ self.logger.log("Types: Old - " + otype + ", New - " + ntype, "I")
+
+class Ulayer(_Ucontainer):
+ def __init__(self, layername, layerdir, font):
+ self._contents = collections.OrderedDict()
+ self.dtree = font.dtree.subTree(layerdir)
+ font.dtree[layerdir].read = True
+ self.layername = layername
+ self.layerdir = layerdir
+ self.font = font
+ fulldir = os.path.join(font.ufodir, layerdir)
+ self.contents = Uplist(font=font, dirn=fulldir, filen="contents.plist")
+ self.dtree["contents.plist"].setinfo(read=True, fileObject=self.contents, fileType="xml")
+
+ if font.UFOversion == "3":
+ if 'layerinfo.plist' in self.dtree:
+ self.layerinfo = Uplist(font=font, dirn=fulldir, filen="layerinfo.plist")
+ self.dtree["layerinfo.plist"].setinfo(read=True, fileObject=self.layerinfo, fileType="xml")
+
+ for glyphn in sorted(self.contents.keys()):
+ glifn = self.contents[glyphn][1].text
+ if glifn in self.dtree:
+ glyph = Uglif(layer=self, filen=glifn)
+ self._contents[glyphn] = glyph
+ self.dtree[glifn].setinfo(read=True, fileObject=glyph, fileType="xml")
+ if glyph.name != glyphn:
+ super(Uglif, glyph).__setattr__("name", glyphn) # Need to use super to bypass normal glyph renaming logic
+ self.font.logger.log("Glyph names in glif and contents.plist did not match for " + glyphn + "; corrected", "W")
+ else:
+ self.font.logger.log("Missing glif " + glifn + " in " + fulldir, "S")
+
+ def setForOutput(self):
+
+ UFOversion = self.font.outparams["UFOversion"]
+ convertg2f1 = True if UFOversion == "2" or self.font.outparams["format1Glifs"] else False
+ dtree = self.font.dtree.subTree(self.layerdir)
+ if self.font.outparams["renameGlifs"]: self.renameGlifs()
+
+ setFileForOutput(dtree, "contents.plist", self.contents, "xml")
+ if "layerinfo" in self.__dict__ and UFOversion == "3":
+ setFileForOutput(dtree, "layerinfo.plist", self.layerinfo, "xml")
+
+ for glyphn in self:
+ glyph = self._contents[glyphn]
+ if convertg2f1: glyph.convertToFormat1()
+ if glyph["advance"] is not None:
+ if glyph["advance"].width is None and glyph["advance"].height is None: glyph.remove("advance")
+ # Normalize so that, when both exist, components come before contour
+ outline = glyph["outline"]
+ if len(outline.components) > 0 and list(outline)[0] == "contour":
+ # Need to move components to the front...
+ contours = outline.contours
+ components = outline.components
+ oldcontours = list(contours) # Easiest way to 'move' components is to delete contours then append back at the end
+ for contour in oldcontours: outline.removeobject(contour, "contour")
+ for contour in oldcontours: outline.appendobject(contour, "contour")
+
+ setFileForOutput(dtree, glyph.filen, glyph, "xml")
+
+ def renameGlifs(self):
+ namelist = []
+ for glyphn in sorted(self.keys()):
+ glyph = self._contents[glyphn]
+ filename = makeFileName(glyphn, namelist)
+ namelist.append(filename.lower())
+ filename += ".glif"
+ if filename != glyph.filen:
+ self.renameGlif(glyphn, glyph, filename)
+
+ def renameGlif(self, glyphn, glyph, newname):
+ self.font.logger.log("Renaming glif for " + glyphn + " from " + glyph.filen + " to " + newname, "I")
+ self.dtree.removedfiles[glyph.filen] = newname # Track so original glif does not get reported as invalid
+ glyph.filen = newname
+ self.contents[glyphn][1].text = newname
+
+ def addGlyph(self, glyph):
+ glyphn = glyph.name
+ if glyphn in self._contents: self.font.logger.log(glyphn + " already in font", "X")
+ self._contents[glyphn] = glyph
+ # Set glif name
+ glifn = makeFileName(glyphn)
+ names = []
+ while glifn in self.contents: # need to check for duplicate glif names
+ names.append(glifn)
+ glifn = makeFileName(glyphn, names)
+ glifn += ".glif"
+ glyph.filen = glifn
+ # Add to contents.plist and dtree
+ self.contents.addval(glyphn, "string", glifn)
+ self.dtree[glifn] = UT.dirTreeItem(read=False, added=True, fileObject=glyph, fileType="xml")
+
+ def delGlyph(self, glyphn):
+ self.dtree.removedfiles[self[glyphn].filen] = "deleted" # Track so original glif does not get reported as invalid
+ del self._contents[glyphn]
+ self.contents.remove(glyphn)
+
+
+class Uplist(ETU.xmlitem, _plist):
+ def __init__(self, font=None, dirn=None, filen=None, parse=True):
+ if dirn is None and font: dirn = font.ufodir
+ logger = font.logger if font else silfont.core.loggerobj()
+ ETU.xmlitem.__init__(self, dirn, filen, parse, logger)
+ self.type = "plist"
+ self.font = font
+ self.outparams = None
+ if filen and dirn: self.populate_dict()
+
+ def populate_dict(self):
+ self._contents.clear() # Clear existing contents, if any
+ pl = self.etree[0]
+ if pl.tag == "dict":
+ for i in range(0, len(pl), 2):
+ key = pl[i].text
+ self._contents[key] = [pl[i], pl[i + 1]] # The two elements for the item
+ else: # Assume array of 2 element arrays (eg layercontents.plist)
+ for i in range(len(pl)):
+ self._contents[i] = pl[i]
+
+
+class Uglif(ETU.xmlitem):
+ # Unlike plists, glifs can have multiples of some sub-elements (eg anchors) so create lists for those
+
+ def __init__(self, layer, filen=None, parse=True, name=None, format=None):
+ dirn = os.path.join(layer.font.ufodir, layer.layerdir)
+ ETU.xmlitem.__init__(self, dirn, filen, parse, layer.font.logger) # Will read item from file if dirn and filen both present
+ self.type = "glif"
+ self.layer = layer
+ self.format = format if format else '2'
+ self.name = name
+ self.outparams = None
+ self.glifElemOrder = self.layer.font.outparams["glifElemOrder"]
+ # Set initial values for sub-objects
+ for elem in self.glifElemOrder:
+ if elem in _glifElemMulti:
+ self._contents[elem] = []
+ else:
+ self._contents[elem] = None
+ if self.etree is not None: self.process_etree()
+
+ def __setattr__(self, name, value):
+ if name == "name" and getattr(self, "name", None): # Existing glyph name is being changed
+ oname = self.name
+ if value in self.layer._contents: self.layer.font.logger.log(name + " already in font", "X")
+ # Update the _contents dictionary
+ del self.layer._contents[oname]
+ self.layer._contents[value] = self
+ # Set glif name
+ glifn = makeFileName(value)
+ names = []
+ while glifn in self.layer.contents: # need to check for duplicate glif names
+ names.append(glifn)
+ glifn = makeFileName(value, names)
+ glifn += ".glif"
+
+ # Update to contents.plist, filen and dtree
+ self.layer.contents.remove(oname)
+ self.layer.contents.addval(value, "string", glifn)
+ self.layer.dtree.removedfiles[self.filen] = glifn # Track so original glif does not get reported as invalid
+ self.filen = glifn
+ self.layer.dtree[glifn] = UT.dirTreeItem(read=False, added=True, fileObject=self, fileType="xml")
+ super(Uglif, self).__setattr__(name, value)
+
+ def process_etree(self):
+ et = self.etree
+ self.name = getattrib(et, "name")
+ self.format = getattrib(et, "format")
+ if self.format is None:
+ if self.layer.font.UFOversion == "3":
+ self.format = '2'
+ else:
+ self.format = '1'
+ for i in range(len(et)):
+ element = et[i]
+ tag = element.tag
+ if not tag in self.glifElemOrder: self.layer.font.logger.log(
+ "Invalid element " + tag + " in glif " + self.name, "E")
+ if tag in _glifElemF1 or self.format == '2':
+ if tag in _glifElemMulti:
+ self._contents[tag].append(self.makeObject(tag, element))
+ else:
+ self._contents[tag] = self.makeObject(tag, element)
+
+ # Convert UFO2 style anchors to UFO3 anchors
+ if self._contents['outline'] is not None and self.format == "1":
+ for contour in self._contents['outline'].contours[:]:
+ if contour.UFO2anchor:
+ del contour.UFO2anchor["type"] # remove type="move"
+ self.add('anchor', contour.UFO2anchor)
+ self._contents['outline'].removeobject(contour, "contour")
+ if self._contents['outline'] is None: self.add('outline')
+
+ self.format = "2"
+
+ def rebuildET(self):
+ self.etree = ET.Element("glyph")
+ et = self.etree
+ et.attrib["name"] = self.name
+ et.attrib["format"] = self.format
+ # Insert sub-elements
+ for elem in self.glifElemOrder:
+ if elem in _glifElemF1 or self.format == "2": # Check element is valid for glif format
+ item = self._contents[elem]
+ if item is not None:
+ if elem in _glifElemMulti:
+ for object in item:
+ et.append(object.element)
+ else:
+ et.append(item.element)
+
+ def add(self, ename, attrib=None):
+ # Add an element and corresponding object to a glif
+ element = ET.Element(ename)
+ if attrib: element.attrib = attrib
+ if ename == "lib": ET.SubElement(element, "dict")
+ multi = True if ename in _glifElemMulti else False
+
+ if multi and ename not in self._contents:
+ self._contents[ename] = []
+
+ # Check element does not already exist for single elements
+ if ename in self._contents and not multi:
+ if self._contents[ename] is not None: self.layer.font.logger.log("Already an " + ename + " in glif", "X")
+
+ # Add new object
+ if multi:
+ self._contents[ename].append(self.makeObject(ename, element))
+ else:
+ self._contents[ename] = self.makeObject(ename, element)
+
+ def remove(self, ename, index=None, object=None):
+ # Remove object from a glif
+ # For multi objects, an index or object must be supplied to identify which
+ # to delete
+ if ename in _glifElemMulti:
+ item = self._contents[ename]
+ if index is None: index = item.index(object)
+ del item[index]
+ else:
+ self._contents[ename] = None
+
+ def convertToFormat1(self):
+ # Convert to a glif format of 1 (for UFO2) prior to writing out
+ self.format = "1"
+ # Change anchors to UFO2 style anchors. Sort anchors by anchor name first
+ anchororder = sorted(self._contents['anchor'], key=lambda x: x.element.attrib['name'])
+ for anchor in anchororder:
+ element = anchor.element
+ for attrn in ('colour', 'indentifier'): # Remove format 2 attributes
+ if attrn in element.attrib: del element.attrib[attrn]
+ element.attrib['type'] = 'move'
+ contelement = ET.Element("contour")
+ contelement.append(ET.Element("point", element.attrib))
+ self._contents['outline'].appendobject(Ucontour(self._contents['outline'], contelement), "contour")
+ self.remove('anchor', object=anchor)
+
+ def makeObject(self, type, element):
+ if type == 'advance': return Uadvance(self, element)
+ if type == 'unicode': return Uunicode(self, element)
+ if type == 'outline': return Uoutline(self, element)
+ if type == 'lib': return Ulib(self, element)
+ if type == 'note': return Unote(self, element)
+ if type == 'image': return Uimage(self, element)
+ if type == 'guideline': return Uguideline(self, element)
+ if type == 'anchor': return Uanchor(self, element)
+
+
+class Uadvance(Uelement):
+ def __init__(self, glif, element):
+ super(Uadvance, self).__init__(element)
+ self.glif = glif
+ if 'width' in element.attrib:
+ self.width = element.attrib[str('width')]
+ else:
+ self.width = None
+ if 'height' in element.attrib:
+ self.height = element.attrib[str('height')]
+ else:
+ self.height = None
+
+ def __setattr__(self, name, value):
+ if name in ('width', 'height'):
+ if value == "0" : value = None
+ if value is None:
+ if name in self.element.attrib: del self.element.attrib[name]
+ else:
+ value = str(value)
+ self.element.attrib[name] = value
+ super(Uadvance, self).__setattr__(name, value)
+
+class Uunicode(Uelement):
+ def __init__(self, glif, element):
+ super(Uunicode, self).__init__(element)
+ self.glif = glif
+ if 'hex' in element.attrib:
+ self.hex = element.attrib['hex']
+ else:
+ self.hex = ""
+ self.glif.logger.log("No unicode hex attribute for " + glif.name, "E")
+
+ def __setattr__(self, name, value):
+ if name == "hex": self.element.attrib['hex'] = value
+ super(Uunicode, self).__setattr__(name, value)
+
+
+class Unote(Uelement):
+ def __init__(self, glif, element):
+ self.glif = glif
+ super(Unote, self).__init__(element)
+
+
+class Uimage(Uelement):
+ def __init__(self, glif, element):
+ self.glif = glif
+ super(Uimage, self).__init__(element)
+
+
+class Uguideline(Uelement):
+ def __init__(self, glif, element):
+ self.glif = glif
+ super(Uguideline, self).__init__(element)
+
+
+class Uanchor(Uelement):
+ def __init__(self, glif, element):
+ self.glif = glif
+ super(Uanchor, self).__init__(element)
+
+
+class Uoutline(Uelement):
+ def __init__(self, glif, element):
+ super(Uoutline, self).__init__(element)
+ self.glif = glif
+ self.components = []
+ self.contours = []
+ for tag in self._contents:
+ if tag == "component":
+ for component in self._contents[tag]:
+ self.components.append(Ucomponent(self, component))
+ if tag == "contour":
+ for contour in self._contents[tag]:
+ self.contours.append(Ucontour(self, contour))
+
+ def removeobject(self, obj, typ):
+ super(Uoutline, self).remove(obj.element)
+ if typ == "component": self.components.remove(obj)
+ if typ == "contour": self.contours.remove(obj)
+
+ def replaceobject(self, oldobj, newobj, typ):
+ eindex = list(self.element).index(oldobj.element)
+ super(Uoutline, self).replace(eindex, newobj.element)
+ if typ == "component":
+ cindex = self.components.index(oldobj)
+ self.components[cindex]= newobj
+ if typ == "contour":
+ cindex = self.contours.index(oldobj)
+ self.contours[cindex]= newobj
+
+ def appendobject(self, item, typ): # Item can be an contour/component object, element or attribute list
+ if isinstance(item, (Ucontour, Ucomponent)):
+ obj = item
+ else:
+ if isinstance(item, dict):
+ elem = ET.Element(typ)
+ elem.attrib = item
+ elif isinstance(item, ET.Element):
+ elem = item
+ else:
+ self.glif.logger.log("item should be dict, element, Ucontour or Ucomponent", "S")
+ if typ == 'component':
+ obj = Ucomponent(self,elem)
+ else:
+ obj = Ucontour(self,elem)
+ super(Uoutline, self).append(obj.element)
+ if typ == "component": self.components.append(obj)
+ if typ == "contour": self.contours.append(obj)
+
+ def insertobject(self, index, item, typ): # Needs updating to match appendobject
+ self.glif.logger.log("insertobject currently buggy so don't use!", "X")
+ # Bug is that index for super... should be different than components/contours.
+ # need to think through logic to sort this out...
+ # May need to take some logic from appendobject and some from replaceobj
+
+ #super(Uoutline, self).insert(index, obj.element)
+ #if typ == "component": self.components.insert(index, obj)
+ #if typ == "contour": self.contours.insert(index, obj)
+
+
+class Ucomponent(Uelement):
+ def __init__(self, outline, element):
+ super(Ucomponent, self).__init__(element)
+ self.outline = outline
+
+
+class Ucontour(Uelement):
+ def __init__(self, outline, element):
+ super(Ucontour, self).__init__(element)
+ self.outline = outline
+ self.UFO2anchor = None
+ points = self._contents['point']
+ # Identify UFO2-style anchor points
+ if len(points) == 1 and "type" in points[0].attrib:
+ if points[0].attrib["type"] == "move":
+ if "name" in points[0].attrib:
+ self.UFO2anchor = points[0].attrib
+ else:
+ self.outline.glif.layer.font.logger.log(
+ "Glyph " + self.outline.glif.name + " contains a single-point contour with no anchor name", "E")
+
+
+class Ulib(_Ucontainer, _plist):
+ # For glif lib elements; top-level lib files use Uplist
+ def __init__(self, glif, element):
+ self.glif = glif
+ self.element = element # needs both element and etree for compatibility
+ self.etree = element # with other glif components and _plist methods
+ self._contents = {}
+ self.reindex()
+
+ def reindex(self):
+ self._contents.clear() # Clear existing contents, if any
+ pl = self.element[0]
+ if pl.tag == "dict":
+ for i in range(0, len(pl), 2):
+ key = pl[i].text
+ self._contents[key] = [pl[i], pl[i + 1]] # The two elements for the item
+
+
+class UfeatureFile(UtextFile):
+ def __init__(self, font, dirn, filen):
+ super(UfeatureFile, self).__init__(font, dirn, filen)
+
+
+def writeXMLobject(dtreeitem, params, dirn, filen, exists, fobject=False):
+ object = dtreeitem if fobject else dtreeitem.fileObject # Set fobject to True if a file object is passed ratehr than dtreeitem
+ if object.outparams: params = object.outparams # override default params with object-specific ones
+ indentFirst = params["indentFirst"]
+ attribOrder = {}
+ if object.type in params['attribOrders']: attribOrder = params['attribOrders'][object.type]
+ if object.type == "plist":
+ indentFirst = params["plistIndentFirst"]
+ object.etree.attrib[".doctype"] = 'plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"'
+
+ # Format ET data if any data parameters are set
+ if params["sortDicts"] or params["precision"] is not None: normETdata(object.etree, params, type=object.type)
+
+ etw = ETU.ETWriter(object.etree, attributeOrder=attribOrder, indentIncr=params["indentIncr"],
+ indentFirst=indentFirst, indentML=params["indentML"], precision=params["precision"],
+ floatAttribs=params["floatAttribs"], intAttribs=params["intAttribs"])
+ object.outxmlstr=etw.serialize_xml()
+ # Now we have the output xml, need to compare with existing item's xml, if present
+ changed = True
+
+ if exists: # File already on disk
+ if exists == "same": # Output and input locations the same
+ oxmlstr = object.inxmlstr
+ else: # Read existing XML from disk
+ oxmlstr = ""
+ try:
+ oxml = io.open(os.path.join(dirn, filen), "r", encoding="utf-8")
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+ for line in oxml.readlines():
+ oxmlstr += line
+ oxml.close()
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", UnicodeWarning)
+ if oxmlstr == object.outxmlstr: changed = False
+
+ if changed: object.write_to_file(dirn, filen)
+ if not fobject: dtreeitem.written = True # Mark as True, even if not changed - the file should still be there!
+ return changed # Boolean to indicate file updated on disk
+
+
+def setFileForOutput(dtree, filen, fileObject, fileType): # Put details in dtree, creating item if needed
+ if filen not in dtree:
+ dtree[filen] = UT.dirTreeItem()
+ dtree[filen].added = True
+ dtree[filen].setinfo(fileObject=fileObject, fileType=fileType, towrite=True)
+
+
+def writeToDisk(dtree, outdir, font, odtree=None, logindent="", changes = False):
+ if odtree is None: odtree = {}
+ # Make lists of items in dtree and odtree with type prepended for sorting and comparison purposes
+ dtreelist = []
+ for filen in dtree: dtreelist.append(dtree[filen].type + filen)
+ dtreelist.sort()
+ odtreelist = []
+ if odtree == {}:
+ locationtype = "Empty"
+ else:
+ if outdir == font.ufodir:
+ locationtype = "Same"
+ else:
+ locationtype = "Different"
+ for filen in odtree: odtreelist.append(odtree[filen].type + filen)
+ odtreelist.sort()
+
+ okey = odtreelist.pop(0) if odtreelist != [] else None
+
+ for key in dtreelist:
+ type = key[0:1]
+ filen = key[1:]
+ dtreeitem = dtree[filen]
+
+ while okey and okey < key: # Item in output UFO no longer needed
+ ofilen = okey[1:]
+ if okey[0:1] == "f":
+ logmess = 'Deleting ' + ofilen + ' from existing output UFO'
+ os.remove(os.path.join(outdir, ofilen))
+ else:
+ logmess = 'Deleting directory ' + ofilen + ' from existing output UFO'
+ shutil.rmtree(os.path.join(outdir, ofilen))
+ if ofilen not in dtree.removedfiles: font.logger.log(logmess, "W") # No need to log for remaned files
+ okey = odtreelist.pop(0) if odtreelist != [] else None
+
+ if key == okey:
+ exists = locationtype
+ okey = odtreelist.pop(0) if odtreelist != [] else None # Ready for next loop
+ else:
+ exists = False
+
+ if dtreeitem.type == "f":
+ if dtreeitem.towrite:
+ font.logger.log(logindent + filen, "V")
+ if dtreeitem.fileType == "xml":
+ if dtreeitem.fileObject: # Only write if object has items
+ if dtreeitem.fileObject.type == "glif":
+ glif = dtreeitem.fileObject
+ if glif["lib"] is not None: # Delete lib if no items in it
+ if glif["lib"].__len__() == 0:
+ glif.remove("lib")
+ # Sort UFO3 anchors by name (UFO2 anchors will have been sorted on conversion)
+ glif["anchor"].sort(key=lambda anchor: anchor.element.get("name"))
+ glif.rebuildET()
+ result = writeXMLobject(dtreeitem, font.outparams, outdir, filen, exists)
+ if result: changes = True
+ else: # Delete existing item if the current object is empty
+ if exists:
+ font.logger.log('Deleting empty item ' + filen + ' from existing output UFO', "I")
+ os.remove(os.path.join(outdir, filen))
+ changes = True
+ elif dtreeitem.fileType == "text":
+ dtreeitem.fileObject.write(dtreeitem, outdir, filen, exists)
+ ## Need to add code for other file types
+ else:
+ if filen in dtree.removedfiles:
+ if exists:
+ os.remove(os.path.join(outdir, filen)) # Silently remove old file for renamed files
+ changes = True
+ exists = False
+ else: # File should not have been in original UFO
+ if exists == "same":
+ font.logger.log('Deleting ' + filen + ' from existing UFO', "W")
+ os.remove(os.path.join(outdir, filen))
+ changes = True
+ exists = False
+ else:
+ if not dtreeitem.added:
+ font.logger.log('Skipping invalid file ' + filen + ' from input UFO', "W")
+ if exists:
+ font.logger.log('Deleting ' + filen + ' from existing output UFO', "W")
+ os.remove(os.path.join(outdir, filen))
+ changes = True
+
+ else: # Must be directory
+ if not dtreeitem.read:
+ font.logger.log(logindent + "Skipping invalid input directory " + filen, "W")
+ if exists:
+ font.logger.log('Deleting directory ' + filen + ' from existing output UFO', "W")
+ shutil.rmtree(os.path.join(outdir, filen))
+ changes = True
+ continue
+ font.logger.log(logindent + "Processing " + filen + " directory", "I")
+ subdir = os.path.join(outdir, filen)
+ if isinstance(dtreeitem.fileObject, Udirectory):
+ dtreeitem.fileObject.write(dtreeitem, outdir)
+ else:
+ if not os.path.exists(subdir): # If outdir does not exist, create it
+ try:
+ os.mkdir(subdir)
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+ changes = True
+
+ if exists:
+ subodtree = odtree[filen].dirtree
+ else:
+ subodtree = {}
+ subindent = logindent + " "
+ changes = writeToDisk(dtreeitem.dirtree, subdir, font, subodtree, subindent, changes)
+ if os.listdir(subdir) == []:
+ os.rmdir(subdir) # Delete directory if empty
+ changes = True
+
+ while okey: # Any remaining items in odree list are no longer needed
+ ofilen = okey[1:]
+ if okey[0:1] == "f":
+ logmess = 'Deleting ' + ofilen + ' from existing output UFO'
+ os.remove(os.path.join(outdir, ofilen))
+ changes = True
+ else:
+ logmess = 'Deleting directory ' + ofilen + ' from existing output UFO', "W"
+ shutil.rmtree(os.path.join(outdir, ofilen))
+ changes = True
+ if ofilen not in dtree.removedfiles: font.logger.log(logmess, "W") # No need to log warning for removed files
+ okey = odtreelist.pop(0) if odtreelist != [] else None
+ return changes
+
+def normETdata(element, params, type):
+ # Recursively normalise the data an an ElementTree element
+ for subelem in element:
+ normETdata(subelem, params, type)
+
+ precision = params["precision"]
+ if precision is not None:
+ if element.tag in ("integer", "real"):
+ num = round(float(element.text), precision)
+ if num == int(num):
+ element.tag = "integer"
+ element.text = "{}".format(int(num))
+ else:
+ element.tag = "real"
+ element.text = "{}".format(num)
+
+ if params["sortDicts"] and element.tag == "dict":
+ edict = {}
+ elist = []
+ for i in range(0, len(element), 2):
+ edict[element[i].text] = [element[i], element[i + 1]]
+ elist.append(element[i].text)
+ keylist = sorted(edict.keys())
+ if elist != keylist:
+ i = 0
+ for key in keylist:
+ element[i] = edict[key][0]
+ element[i + 1] = edict[key][1]
+ i = i + 2
+
+
+def getattrib(element, attrib): return element.attrib[attrib] if attrib in element.attrib else None
+
+
+def makeFileName(name, namelist=None):
+ if namelist is None: namelist = []
+ # Replace illegal characters and add _ after UC letters
+ newname = ""
+ for x in name:
+ if x in _illegalChars:
+ x = "_"
+ else:
+ if x != x.lower(): x += "_"
+ newname += x
+ # Replace initial . if present
+ if newname[0] == ".": newname = "_" + newname[1:]
+ parts = []
+ for part in newname.split("."):
+ if part in _reservedNames:
+ part = "_" + part
+ parts.append(part)
+ name = ".".join(parts)
+ if name.lower() in namelist: # case-insensitive name already used, so add a suffix
+ newname = None
+ i = 1
+ while newname is None:
+ test = name + '{0:015d}'.format(i)
+ if not (test.lower() in namelist): newname = test
+ i += 1
+ name = newname
+ return name
diff --git a/lib/silfont/util.py b/lib/silfont/util.py
new file mode 100755
index 0000000..e8b020d
--- /dev/null
+++ b/lib/silfont/util.py
@@ -0,0 +1,374 @@
+#!/usr/bin/env python
+'General classes and functions for use in pysilfont scripts'
+__url__ = 'http://github.com/silnrsi/pysilfont'
+__copyright__ = 'Copyright (c) 2014 SIL International (http://www.sil.org)'
+__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
+__author__ = 'David Raymond'
+
+import os, subprocess, difflib, sys, io
+from silfont.core import execute
+from pkg_resources import resource_filename
+from csv import reader as csvreader
+
+try:
+ from fontTools.ttLib import TTFont
+except Exception as e:
+ TTFont = None
+
+class dirTree(dict) :
+ """ An object to hold list of all files and directories in a directory
+ with option to read sub-directory contents into dirTree objects.
+ Iterates through readSub levels of subfolders
+ Flags to keep track of changes to files etc"""
+ def __init__(self,dirn,readSub = 9999) :
+ self.removedfiles = {} # List of files that have been renamed or deleted since reading from disk
+ for name in os.listdir(dirn) :
+ if name[-1:] == "~" : continue
+ item=dirTreeItem()
+ if os.path.isdir(os.path.join(dirn, name)) :
+ item.type = "d"
+ if readSub :
+ item.dirtree = dirTree(os.path.join(dirn,name),readSub-1)
+ self[name] = item
+
+ def subTree(self,path) : # Returns dirTree object for a subtree based on subfolder name(s)
+ # 'path' can be supplied as either a relative path (eg "subf/subsubf") or array (eg ['subf','subsubf']
+ if type(path) in (bytes, str): path = self._split(path)
+ subf=path[0]
+ if subf in self:
+ dtree = self[subf].dirtree
+ else : return None
+
+ if len(path) == 1 :
+ return dtree
+ else :
+ path.pop(0)
+ return dtree.subTree(path)
+
+ def _split(self,path) : # Turn a relative path into an array of subfolders
+ npath = [os.path.split(path)[1]]
+ while os.path.split(path)[0] :
+ path = os.path.split(path)[0]
+ npath.insert(0,os.path.split(path)[1])
+ return npath
+
+class dirTreeItem(object) :
+
+ def __init__(self, type = "f", dirtree = None, read = False, added = False, changed = False, towrite = False, written = False, fileObject = None, fileType = None, flags = {}) :
+ self.type = type # "d" or "f"
+ self.dirtree = dirtree # dirtree for a sub-directory
+ # Remaining properties are for calling scripts to use as they choose to track actions etc
+ self.read = read # Item has been read by the script
+ self.added = added # Item has been added to dirtree, so does not exist on disk
+ self.changed = changed # Item has been changed, so may need updating on disk
+ self.towrite = towrite # Item should be written out to disk
+ self.written = written # Item has been written to disk
+ self.fileObject = fileObject # An object representing the file
+ self.fileType = fileType # The type of the file object
+ self.flags = {} # Any other flags a script might need
+
+ def setinfo(self, read = None, added = None, changed = None, towrite = None, written = None, fileObject = None, fileType = None, flags = None) :
+ pass
+ if read : self.read = read
+ if added : self.added = added
+ if changed : self.changed = changed
+ if towrite: self.towrite = towrite
+ if written : self.written = written
+ if fileObject is not None : self.fileObject = fileObject
+ if fileType : self.fileType = fileType
+ if flags : self.flags = flags
+
+class ufo_diff(object): # For diffing 2 ufos as part of testing
+ # returncodes:
+ # 0 - ufos are the same
+ # 1 - Differences were found
+ # 2 - Errors running the difference (eg can't open file)
+ # diff - text of the differences
+ # errors - text of the errors
+
+ def __init__(self, ufo1, ufo2, ignoreOHCtime=True):
+
+ diffcommand = ["diff", "-r", "-c1", ufo1, ufo2]
+
+ # By default, if only difference in fontinfo is the openTypeHeadCreated timestamp ignore that
+
+ if ignoreOHCtime: # Exclude fontinfo if only diff is openTypeHeadCreated
+ # Otherwise leave it in so differences are reported by main diff
+ fi1 = os.path.join(ufo1,"fontinfo.plist")
+ fi2 = os.path.join(ufo2, "fontinfo.plist")
+ fitest = subprocess.Popen(["diff", fi1, fi2, "-c1"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ text = fitest.communicate()
+ if fitest.returncode == 1:
+ difftext = text[0].decode("utf-8").split("\n")
+ if difftext[4].strip() == "<key>openTypeHeadCreated</key>" and len(difftext) == 12:
+ diffcommand.append("--exclude=fontinfo.plist")
+
+ # Now do the main diff
+ test = subprocess.Popen(diffcommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ text = test.communicate()
+ self.returncode = test.returncode
+ self.diff = text[0].decode("utf-8")
+ self.errors = text[1]
+
+ def print_text(self): # Print diff info or errors from the diffcommand
+ if self.returncode == 0:
+ print("UFOs are the same")
+ elif self.returncode == 1:
+ print("UFOs are different")
+ print(self.diff)
+ elif self.returncode == 2:
+ print("Failed to compare UFOs")
+ print(self.errors)
+
+class text_diff(object): # For diffing 2 text files with option to ignore common timestamps
+ # See ufo_diff for class attribute details
+
+ def __init__(self, file1, file2, ignore_chars=0, ignore_firstlinechars = 0):
+ # ignore_chars - characters to ignore from left of each line; typically 20 for timestamps
+ # ignore_firstlinechars - as above, but just for first line, eg for initial comment in csv files, typically 22
+ errors = []
+ try:
+ f1 = [x[ignore_chars:-1].replace('\\','/') for x in io.open(file1, "r", encoding="utf-8").readlines()]
+ except IOError:
+ errors.append("Can't open " + file1)
+ try:
+ f2 = [x[ignore_chars:-1].replace('\\','/') for x in io.open(file2, "r", encoding="utf-8").readlines()]
+ except IOError:
+ errors.append("Can't open " + file2)
+ if errors == []: # Indicates both files were opened OK
+ if ignore_firstlinechars: # Ignore first line for files with first line comment with timestamp
+ f1[0] = f1[0][ignore_firstlinechars:-1]
+ f2[0] = f2[0][ignore_firstlinechars:-1]
+ self.errors = ""
+ self.diff = "\n".join([x for x in difflib.unified_diff(f1, f2, file1, file2, n=0)])
+ self.returncode = 0 if self.diff == "" else 1
+ else:
+ self.diff = ""
+ self.errors = "\n".join(errors)
+ self.returncode = 2
+
+ def print_text(self): # Print diff info or errors the unified_diff command
+ if self.returncode == 0:
+ print("Files are the same")
+ elif self.returncode == 1:
+ print("Files are different")
+ print(self.diff)
+ elif self.returncode == 2:
+ print("Failed to compare Files")
+ print(self.errors)
+
+class ttf_diff(object): # For diffing 2 ttf files. Differences are not listed
+ # See ufo_diff for class attribute details
+
+ def __init__(self, file1, file2):
+ errors=[]
+ if TTFont is None:
+ self.diff=""
+ self.errors="Testing failed - class ttf_diff requires fontTools to be installed"
+ self.returncode = 2
+ return
+
+ # Open the ttf files
+ try:
+ font1 = TTFont(file1)
+ except Exception as e:
+ errors.append("Can't open " + file1)
+ errors.append(e.__str__())
+ try:
+ font2 = TTFont(file2)
+ except Exception as e:
+ errors.append("Can't open " + file2)
+ errors.append(e.__str__())
+ if errors:
+ self.diff = ""
+ self.errors = "\n".join(errors)
+ self.returncode = 2
+ return
+
+ # Create ttx xml strings from each font
+ ttx1 = _ttx()
+ ttx2 = _ttx()
+ font1.saveXML(ttx1)
+ font2.saveXML(ttx2)
+
+ if ttx1.txt() == ttx2.txt():
+ self.diff = ""
+ self.errors = ""
+ self.returncode = 0
+ else:
+ self.diff = file1 + " and " + file2 + " are different - compare with external tools"
+ self.errors = ""
+ self.returncode = 1
+
+ def print_text(self): # Print diff info or errors the unified_diff command
+ if self.returncode == 0:
+ print("Files are the same")
+ elif self.returncode == 1:
+ print("Files are different")
+ print(self.diff)
+ elif self.returncode == 2:
+ print("Failed to compare Files")
+ print(self.errors)
+
+def test_run(tool, commandline, testcommand, outfont, exp_errors, exp_warnings): # Used by tests to run commands
+ sys.argv = commandline.split(" ")
+ (args, font) = execute(tool, testcommand.doit, testcommand.argspec, chain="first")
+ if outfont:
+ if tool in ("FT", "FP"):
+ font.save(outfont)
+ else: # Must be Pyslifont Ufont
+ font.write(outfont)
+ args.logger.logfile.close() # Need to close the log so that the diff test can be run
+ exp_counts = (exp_errors, exp_warnings)
+ actual_counts = (args.logger.errorcount, args.logger.warningcount)
+ result = exp_counts == actual_counts
+ if not result: print("Mis-match of logger errors/warnings: " + str(exp_counts) + " vs " + str(actual_counts))
+ return result
+
+def test_diffs(dirname, testname, extensions): # Used by test to run diffs on results files based on extensions
+ result = True
+ for ext in extensions:
+ resultfile = os.path.join("local/testresults", dirname, testname + ext)
+ referencefile = os.path.join("tests/reference", dirname, testname + ext)
+ if ext == ".ufo":
+ diff = ufo_diff(resultfile, referencefile)
+ elif ext == ".csv":
+ diff = text_diff(resultfile, referencefile, ignore_firstlinechars=22)
+ elif ext in (".log", ".lg"):
+ diff = text_diff(resultfile, referencefile, ignore_chars=20)
+ elif ext == ".ttf":
+ diff = ttf_diff(resultfile, referencefile)
+ else:
+ diff = text_diff(resultfile, referencefile)
+
+ if diff.returncode:
+ diff.print_text()
+ result = False
+ return result
+
+class _ttx(object): # Used by ttf_diff()
+
+ def __init__(self):
+ self.lines = []
+
+ def write(self, line):
+ if not("<checkSumAdjustment value=" in line or "<modified value=" in line) :
+ self.lines.append(line)
+
+ def txt(self):
+ return "".join(self.lines)
+
+# Functions for mapping color def to names based on the colors provided by app UIs
+namestocolorslist = {
+ 'g_red': '0.85,0.26,0.06,1', # g_ names refers to colors definable using the Glyphs UI
+ 'g_orange': '0.99,0.62,0.11,1',
+ 'g_brown': '0.65,0.48,0.2,1',
+ 'g_yellow': '0.97,1,0,1',
+ 'g_light_green': '0.67,0.95,0.38,1',
+ 'g_dark_green': '0.04,0.57,0.04,1',
+ 'g_cyan': '0,0.67,0.91,1',
+ 'g_blue': '0.18,0.16,0.78,1',
+ 'g_purple': '0.5,0.09,0.79,1',
+ 'g_pink': '0.98,0.36,0.67,1',
+ 'g_light_gray': '0.75,0.75,0.75,1',
+ 'g_dark_gray': '0.25,0.25,0.25,1'
+}
+colorstonameslist = {v: k for k, v in namestocolorslist.items()}
+
+def nametocolor(color, default=None):
+ global namestocolorslist
+ if default is not None:
+ return namestocolorslist.get(color,default)
+ else:
+ return namestocolorslist.get(color)
+
+def colortoname(color, default=None):
+ global colorstonameslist
+ if default:
+ return colorstonameslist.get(color,default)
+ else:
+ return colorstonameslist.get(color)
+
+def parsecolors(colors, single = False, allowspecial = False): # Process a list of colors - designed for handling command-line input
+ # Colors can be in RBGA format (eg (0.25,0.25,0.25,1)) or text name (eg g_dark_grey), spearated by commas.
+ # Function returns a list of tuples, one per color, (RGBA, name, logcolor, original color after splitting)
+ # If the color can't be parsed, RGBA will be None and logocolor contain an error message
+ # If single is set, just return one tuple rather than a list of tuples
+ # Also can allow for special values of 'none' and 'leave' if allowspecial set
+
+ # First tidy up the input string
+ cols = colors.lower().replace(" ", "")
+
+ if single: # If just one color, don't need to split the string and can allow for RGBA without brackets
+ splitcols = ["(" + cols + ")"] if cols[0] in ("0", "1") else [cols]
+ else:
+ # Since RGBA colors which have parentheses and then commas within them, so can't just split on commas so add @ signs for first split
+ cols = cols.replace(",(", "@(").replace("),", ")@").split("@")
+ splitcols = []
+ for color in cols:
+ if color[0] == "(":
+ splitcols.append(color)
+ else:
+ splitcols = splitcols + color.split(',')
+ parsed = []
+ for splitcol in splitcols:
+ if allowspecial and splitcol in ("none", "leave"):
+ RGBA = ""
+ name = splitcol
+ logcolor = splitcol
+ else:
+ errormess = ""
+ name = ""
+ RGBA = ""
+ if splitcol[0] == '(':
+ values = splitcol[1:-1].split(',') # Remove parentheses then split on commas
+ if len(values) != 4:
+ errormess = "RGBA colours must have 4 values"
+ else:
+ for i in (0, 1, 2, 3):
+ values[i] = float(values[i])
+ if values[i] < 0 or values[i] > 1: errormess = "RGBA values must be between 0 and 1"
+ if values[0] + values[1] + values[2] == 0: errormess = "At lease one RGB value must be non-zero"
+ if values[3] == 0: errormess = "With RGBA, A must not be zero"
+ if errormess == "":
+ for i in (0, 1, 2, 3):
+ v = values[i]
+ if v == int(v): v = int(v) # Convert integers to int type for correct formatting with str()
+ RGBA += str(v) + ","
+ RGBA = RGBA[0:-1] # Strip trialing comma
+ name = colortoname(RGBA, "")
+ else:
+ name = splitcol
+ RGBA = nametocolor(name)
+ if RGBA is None: errormess = "Invalid color name"
+ if errormess:
+ logcolor = "Invalid color: " + splitcol + " - " + errormess
+ RGBA = None
+ name = None
+ else:
+ logcolor = RGBA
+ if name: logcolor += " (" + name + ")"
+ parsed.append((RGBA, name, logcolor,splitcol))
+ if single: parsed = parsed[0]
+
+ return parsed
+
+# Provide dict of required characters which match the supplied list of sets - sets can be basic, rtl or sil
+def required_chars(sets="basic"):
+ if type(sets) == str: sets = (sets,) # Convert single string to a tuple
+ rcfile = open(resource_filename('silfont','data/required_chars.csv'))
+ rcreader = csvreader(rcfile)
+ next(rcreader) # Read fist line which is headers
+ rcdict = {}
+ for line in rcreader:
+ unicode = line[0][2:]
+ item = {
+ "ps_name": line[1],
+ "glyph_name": line[2],
+ "sil_set": line[3],
+ "rationale": line[4],
+ "notes": line[5]
+ }
+ if item["sil_set"] in sets: rcdict[unicode] = item
+ return rcdict
+