diff --git a/ConnectionSearcher.py b/ConnectionSearcher.py index d7c9b8e..6474cee 100755 --- a/ConnectionSearcher.py +++ b/ConnectionSearcher.py @@ -1,48 +1,48 @@ -#! /usr/bin/python +#!/usr/local/bin/python3 # -*- coding: utf-8 -*- -from lib import biplist import os import json import sys import glob +import plistlib from workflow import Workflow, ICON_WARNING, MATCH_SUBSTRING -reload(sys) -sys.setdefaultencoding('utf8') - def read_connections(): # Read preferences file - preferencesPath = os.path.join(os.environ["HOME"], "Library", "Containers", "com.p5sys.jump.mac.viewer", "Data", "Library", "Preferences", "com.p5sys.jump.mac.viewer.plist") + preferences_path = os.path.join(os.environ["HOME"], "Library", "Containers", "com.p5sys.jump.mac.viewer", "Data", "Library", "Preferences", "com.p5sys.jump.mac.viewer.plist") # if preference path not exists, try this location: - if not os.path.isfile(preferencesPath): - preferencesPath = os.path.join(os.environ["HOME"], "Library", "Preferences", "com.p5sys.jump.mac.viewer.web.plist") - plist = biplist.readPlist(preferencesPath) - # Extract profile data from plist - connectionPath = plist.get('path where JSON .jump files are stored') - if connectionPath.startswith('~'): - connectionPath = os.environ["HOME"] + connectionPath[1:] - jumps = glob.glob(connectionPath + "/Computer - *.jump") + if not os.path.isfile(preferences_path): + preferences_path = os.path.join(os.environ["HOME"], "Library", "Preferences", "com.p5sys.jump.mac.viewer.web.plist") + connections = [] - for jump in jumps: - f = open(jump) - json_content = f.read() - f.close() - dict_content = json.loads(json_content) - icon = None - if dict_content['Icon']: - icon = "/Applications/Jump Desktop.app/Contents/Resources/%s.png" % dict_content['Icon'] - command = 'jump://?protocol=%s&host=%s&username=%s' % \ - (protocol_switch(dict_content['ProtocolTypeCode']), dict_content['TcpHostName'], dict_content['Username']) - - connections.append({ - 'name': dict_content['DisplayName'], - 'command': command, - 'path': jump, - 'icon': icon, - 'tags': dict_content['Tags'] - }) + with open(preferences_path, 'rb') as fp: + plist = plistlib.load(fp) + # Extract profile data from plist + connection_path = plist.get('path where JSON .jump files are stored') + if connection_path.startswith('~'): + connection_path = os.environ["HOME"] + connection_path[1:] + jumps = glob.glob(connection_path + "/Computer - *.jump") + connections = [] + for jump in jumps: + f = open(jump) + json_content = f.read() + f.close() + dict_content = json.loads(json_content) + icon = None + if dict_content['Icon']: + icon = "/Applications/Jump Desktop.app/Contents/Resources/%s.png" % dict_content['Icon'] + command = 'jump://?protocol=%s&host=%s&username=%s' % \ + (protocol_switch(dict_content['ProtocolTypeCode']), dict_content['TcpHostName'], dict_content['Username']) + + connections.append({ + 'name': dict_content['DisplayName'], + 'command': command, + 'path': jump, + 'icon': icon, + 'tags': dict_content['Tags'] + }) return connections @@ -91,4 +91,3 @@ def main(wf): if __name__ == u"__main__": wf = Workflow() sys.exit(wf.run(main)) - diff --git a/lib/__init__.py b/lib/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/lib/biplist-1.0.1.dist-info/DESCRIPTION.rst b/lib/biplist-1.0.1.dist-info/DESCRIPTION.rst deleted file mode 100644 index 24476cf..0000000 --- a/lib/biplist-1.0.1.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,8 +0,0 @@ -`biplist` is a binary plist parser/generator for Python. - -Binary Property List (plist) files provide a faster and smaller serialization -format for property lists on OS X. This is a library for generating binary -plists which can be read by OS X, iOS, or other clients. - -This module requires Python 2.6 or higher or Python 3.4 or higher. - diff --git a/lib/biplist-1.0.1.dist-info/INSTALLER b/lib/biplist-1.0.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/lib/biplist-1.0.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/lib/biplist-1.0.1.dist-info/METADATA b/lib/biplist-1.0.1.dist-info/METADATA deleted file mode 100644 index 8eb9431..0000000 --- a/lib/biplist-1.0.1.dist-info/METADATA +++ /dev/null @@ -1,26 +0,0 @@ -Metadata-Version: 2.0 -Name: biplist -Version: 1.0.1 -Summary: biplist is a library for reading/writing binary plists. -Home-page: https://bitbucket.org/wooster/biplist -Author: Andrew Wooster -Author-email: andrew@planetaryscale.com -License: BSD -Download-URL: https://bitbucket.org/wooster/biplist/downloads/biplist-1.0.1.tar.gz -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup - -`biplist` is a binary plist parser/generator for Python. - -Binary Property List (plist) files provide a faster and smaller serialization -format for property lists on OS X. This is a library for generating binary -plists which can be read by OS X, iOS, or other clients. - -This module requires Python 2.6 or higher or Python 3.4 or higher. - diff --git a/lib/biplist-1.0.1.dist-info/RECORD b/lib/biplist-1.0.1.dist-info/RECORD deleted file mode 100644 index 5245292..0000000 --- a/lib/biplist-1.0.1.dist-info/RECORD +++ /dev/null @@ -1,9 +0,0 @@ -biplist/__init__.py,sha256=EJYSOaxRsEs2E2VpmfgljCs7yL3Gzrvak-p3QH2Wvx4,32518 -biplist-1.0.1.dist-info/DESCRIPTION.rst,sha256=kCcGPDJWuzI_Us6YiFsVARgWFr4YRM3yP2d5AeeoBik,337 -biplist-1.0.1.dist-info/METADATA,sha256=1_sRrs-YUCMlt92W6B05_lmsyLgkF5wQcyKZGixDttk,1041 -biplist-1.0.1.dist-info/RECORD,, -biplist-1.0.1.dist-info/WHEEL,sha256=BtVfdXUcEYLcFjOkbIrCFRyXU4qszVPt-E9o3RWkSNw,93 -biplist-1.0.1.dist-info/metadata.json,sha256=5jp34A8el0ckFeToroyL3FWBkFvhueEdVGzR0B06lDA,904 -biplist-1.0.1.dist-info/top_level.txt,sha256=TT2vZtYziYVpLawg8KXvf6vAOlwDVwsHk0OpMih9Mk8,8 -biplist-1.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -biplist/__init__.pyc,, diff --git a/lib/biplist-1.0.1.dist-info/WHEEL b/lib/biplist-1.0.1.dist-info/WHEEL deleted file mode 100644 index 5a93381..0000000 --- a/lib/biplist-1.0.1.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: cp27-none-any - diff --git a/lib/biplist-1.0.1.dist-info/metadata.json b/lib/biplist-1.0.1.dist-info/metadata.json deleted file mode 100644 index 2488b55..0000000 --- a/lib/biplist-1.0.1.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup"], "download_url": "https://bitbucket.org/wooster/biplist/downloads/biplist-1.0.1.tar.gz", "extensions": {"python.details": {"contacts": [{"email": "andrew@planetaryscale.com", "name": "Andrew Wooster", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://bitbucket.org/wooster/biplist"}}}, "generator": "bdist_wheel (0.29.0)", "license": "BSD", "metadata_version": "2.0", "name": "biplist", "summary": "biplist is a library for reading/writing binary plists.", "test_requires": [{"requires": ["coverage", "nose"]}], "version": "1.0.1"} \ No newline at end of file diff --git a/lib/biplist-1.0.1.dist-info/top_level.txt b/lib/biplist-1.0.1.dist-info/top_level.txt deleted file mode 100644 index cbcb28a..0000000 --- a/lib/biplist-1.0.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -biplist diff --git a/lib/biplist/__init__.py b/lib/biplist/__init__.py deleted file mode 100644 index 9cab05e..0000000 --- a/lib/biplist/__init__.py +++ /dev/null @@ -1,870 +0,0 @@ -"""biplist -- a library for reading and writing binary property list files. - -Binary Property List (plist) files provide a faster and smaller serialization -format for property lists on OS X. This is a library for generating binary -plists which can be read by OS X, iOS, or other clients. - -The API models the plistlib API, and will call through to plistlib when -XML serialization or deserialization is required. - -To generate plists with UID values, wrap the values with the Uid object. The -value must be an int. - -To generate plists with NSData/CFData values, wrap the values with the -Data object. The value must be a string. - -Date values can only be datetime.datetime objects. - -The exceptions InvalidPlistException and NotBinaryPlistException may be -thrown to indicate that the data cannot be serialized or deserialized as -a binary plist. - -Plist generation example: - - from biplist import * - from datetime import datetime - plist = {'aKey':'aValue', - '0':1.322, - 'now':datetime.now(), - 'list':[1,2,3], - 'tuple':('a','b','c') - } - try: - writePlist(plist, "example.plist") - except (InvalidPlistException, NotBinaryPlistException), e: - print "Something bad happened:", e - -Plist parsing example: - - from biplist import * - try: - plist = readPlist("example.plist") - print plist - except (InvalidPlistException, NotBinaryPlistException), e: - print "Not a plist:", e -""" - -from collections import namedtuple -import datetime -import io -import math -import plistlib -from struct import pack, unpack, unpack_from -from struct import error as struct_error -import sys -import time - -try: - unicode - unicodeEmpty = r'' -except NameError: - unicode = str - unicodeEmpty = '' -try: - long -except NameError: - long = int -try: - {}.iteritems - iteritems = lambda x: x.iteritems() -except AttributeError: - iteritems = lambda x: x.items() - -__all__ = [ - 'Uid', 'Data', 'readPlist', 'writePlist', 'readPlistFromString', - 'writePlistToString', 'InvalidPlistException', 'NotBinaryPlistException' -] - -# Apple uses Jan 1, 2001 as a base for all plist date/times. -apple_reference_date = datetime.datetime.utcfromtimestamp(978307200) - -class Uid(object): - """Wrapper around integers for representing UID values. This - is used in keyed archiving.""" - integer = 0 - def __init__(self, integer): - self.integer = integer - - def __repr__(self): - return "Uid(%d)" % self.integer - - def __eq__(self, other): - if isinstance(self, Uid) and isinstance(other, Uid): - return self.integer == other.integer - return False - - def __cmp__(self, other): - return self.integer - other.integer - - def __lt__(self, other): - return self.integer < other.integer - - def __hash__(self): - return self.integer - - def __int__(self): - return int(self.integer) - -class Data(bytes): - """Wrapper around bytes to distinguish Data values.""" - -class InvalidPlistException(Exception): - """Raised when the plist is incorrectly formatted.""" - -class NotBinaryPlistException(Exception): - """Raised when a binary plist was expected but not encountered.""" - -def readPlist(pathOrFile): - """Raises NotBinaryPlistException, InvalidPlistException""" - didOpen = False - result = None - if isinstance(pathOrFile, (bytes, unicode)): - pathOrFile = open(pathOrFile, 'rb') - didOpen = True - try: - reader = PlistReader(pathOrFile) - result = reader.parse() - except NotBinaryPlistException as e: - try: - pathOrFile.seek(0) - result = None - if hasattr(plistlib, 'loads'): - contents = None - if isinstance(pathOrFile, (bytes, unicode)): - with open(pathOrFile, 'rb') as f: - contents = f.read() - else: - contents = pathOrFile.read() - result = plistlib.loads(contents) - else: - result = plistlib.readPlist(pathOrFile) - result = wrapDataObject(result, for_binary=True) - except Exception as e: - raise InvalidPlistException(e) - finally: - if didOpen: - pathOrFile.close() - return result - -def wrapDataObject(o, for_binary=False): - if isinstance(o, Data) and not for_binary: - v = sys.version_info - if not (v[0] >= 3 and v[1] >= 4): - o = plistlib.Data(o) - elif isinstance(o, (bytes, plistlib.Data)) and for_binary: - if hasattr(o, 'data'): - o = Data(o.data) - elif isinstance(o, tuple): - o = wrapDataObject(list(o), for_binary) - o = tuple(o) - elif isinstance(o, list): - for i in range(len(o)): - o[i] = wrapDataObject(o[i], for_binary) - elif isinstance(o, dict): - for k in o: - o[k] = wrapDataObject(o[k], for_binary) - return o - -def writePlist(rootObject, pathOrFile, binary=True): - if not binary: - rootObject = wrapDataObject(rootObject, binary) - if hasattr(plistlib, "dump"): - if isinstance(pathOrFile, (bytes, unicode)): - with open(pathOrFile, 'wb') as f: - return plistlib.dump(rootObject, f) - else: - return plistlib.dump(rootObject, pathOrFile) - else: - return plistlib.writePlist(rootObject, pathOrFile) - else: - didOpen = False - if isinstance(pathOrFile, (bytes, unicode)): - pathOrFile = open(pathOrFile, 'wb') - didOpen = True - writer = PlistWriter(pathOrFile) - result = writer.writeRoot(rootObject) - if didOpen: - pathOrFile.close() - return result - -def readPlistFromString(data): - return readPlist(io.BytesIO(data)) - -def writePlistToString(rootObject, binary=True): - if not binary: - rootObject = wrapDataObject(rootObject, binary) - if hasattr(plistlib, "dumps"): - return plistlib.dumps(rootObject) - elif hasattr(plistlib, "writePlistToBytes"): - return plistlib.writePlistToBytes(rootObject) - else: - return plistlib.writePlistToString(rootObject) - else: - ioObject = io.BytesIO() - writer = PlistWriter(ioObject) - writer.writeRoot(rootObject) - return ioObject.getvalue() - -def is_stream_binary_plist(stream): - stream.seek(0) - header = stream.read(7) - if header == b'bplist0': - return True - else: - return False - -PlistTrailer = namedtuple('PlistTrailer', 'offsetSize, objectRefSize, offsetCount, topLevelObjectNumber, offsetTableOffset') -PlistByteCounts = namedtuple('PlistByteCounts', 'nullBytes, boolBytes, intBytes, realBytes, dateBytes, dataBytes, stringBytes, uidBytes, arrayBytes, setBytes, dictBytes') - -class PlistReader(object): - file = None - contents = '' - offsets = None - trailer = None - currentOffset = 0 - - def __init__(self, fileOrStream): - """Raises NotBinaryPlistException.""" - self.reset() - self.file = fileOrStream - - def parse(self): - return self.readRoot() - - def reset(self): - self.trailer = None - self.contents = '' - self.offsets = [] - self.currentOffset = 0 - - def readRoot(self): - result = None - self.reset() - # Get the header, make sure it's a valid file. - if not is_stream_binary_plist(self.file): - raise NotBinaryPlistException() - self.file.seek(0) - self.contents = self.file.read() - if len(self.contents) < 32: - raise InvalidPlistException("File is too short.") - trailerContents = self.contents[-32:] - try: - self.trailer = PlistTrailer._make(unpack("!xxxxxxBBQQQ", trailerContents)) - offset_size = self.trailer.offsetSize * self.trailer.offsetCount - offset = self.trailer.offsetTableOffset - offset_contents = self.contents[offset:offset+offset_size] - offset_i = 0 - while offset_i < self.trailer.offsetCount: - begin = self.trailer.offsetSize*offset_i - tmp_contents = offset_contents[begin:begin+self.trailer.offsetSize] - tmp_sized = self.getSizedInteger(tmp_contents, self.trailer.offsetSize) - self.offsets.append(tmp_sized) - offset_i += 1 - self.setCurrentOffsetToObjectNumber(self.trailer.topLevelObjectNumber) - result = self.readObject() - except TypeError as e: - raise InvalidPlistException(e) - return result - - def setCurrentOffsetToObjectNumber(self, objectNumber): - self.currentOffset = self.offsets[objectNumber] - - def readObject(self): - result = None - tmp_byte = self.contents[self.currentOffset:self.currentOffset+1] - marker_byte = unpack("!B", tmp_byte)[0] - format = (marker_byte >> 4) & 0x0f - extra = marker_byte & 0x0f - self.currentOffset += 1 - - def proc_extra(extra): - if extra == 0b1111: - #self.currentOffset += 1 - extra = self.readObject() - return extra - - # bool, null, or fill byte - if format == 0b0000: - if extra == 0b0000: - result = None - elif extra == 0b1000: - result = False - elif extra == 0b1001: - result = True - elif extra == 0b1111: - pass # fill byte - else: - raise InvalidPlistException("Invalid object found at offset: %d" % (self.currentOffset - 1)) - # int - elif format == 0b0001: - extra = proc_extra(extra) - result = self.readInteger(pow(2, extra)) - # real - elif format == 0b0010: - extra = proc_extra(extra) - result = self.readReal(extra) - # date - elif format == 0b0011 and extra == 0b0011: - result = self.readDate() - # data - elif format == 0b0100: - extra = proc_extra(extra) - result = self.readData(extra) - # ascii string - elif format == 0b0101: - extra = proc_extra(extra) - result = self.readAsciiString(extra) - # Unicode string - elif format == 0b0110: - extra = proc_extra(extra) - result = self.readUnicode(extra) - # uid - elif format == 0b1000: - result = self.readUid(extra) - # array - elif format == 0b1010: - extra = proc_extra(extra) - result = self.readArray(extra) - # set - elif format == 0b1100: - extra = proc_extra(extra) - result = set(self.readArray(extra)) - # dict - elif format == 0b1101: - extra = proc_extra(extra) - result = self.readDict(extra) - else: - raise InvalidPlistException("Invalid object found: {format: %s, extra: %s}" % (bin(format), bin(extra))) - return result - - def readInteger(self, byteSize): - result = 0 - original_offset = self.currentOffset - data = self.contents[self.currentOffset:self.currentOffset + byteSize] - result = self.getSizedInteger(data, byteSize, as_number=True) - self.currentOffset = original_offset + byteSize - return result - - def readReal(self, length): - result = 0.0 - to_read = pow(2, length) - data = self.contents[self.currentOffset:self.currentOffset+to_read] - if length == 2: # 4 bytes - result = unpack('>f', data)[0] - elif length == 3: # 8 bytes - result = unpack('>d', data)[0] - else: - raise InvalidPlistException("Unknown real of length %d bytes" % to_read) - return result - - def readRefs(self, count): - refs = [] - i = 0 - while i < count: - fragment = self.contents[self.currentOffset:self.currentOffset+self.trailer.objectRefSize] - ref = self.getSizedInteger(fragment, len(fragment)) - refs.append(ref) - self.currentOffset += self.trailer.objectRefSize - i += 1 - return refs - - def readArray(self, count): - result = [] - values = self.readRefs(count) - i = 0 - while i < len(values): - self.setCurrentOffsetToObjectNumber(values[i]) - value = self.readObject() - result.append(value) - i += 1 - return result - - def readDict(self, count): - result = {} - keys = self.readRefs(count) - values = self.readRefs(count) - i = 0 - while i < len(keys): - self.setCurrentOffsetToObjectNumber(keys[i]) - key = self.readObject() - self.setCurrentOffsetToObjectNumber(values[i]) - value = self.readObject() - result[key] = value - i += 1 - return result - - def readAsciiString(self, length): - result = unpack("!%ds" % length, self.contents[self.currentOffset:self.currentOffset+length])[0] - self.currentOffset += length - return str(result.decode('ascii')) - - def readUnicode(self, length): - actual_length = length*2 - data = self.contents[self.currentOffset:self.currentOffset+actual_length] - # unpack not needed?!! data = unpack(">%ds" % (actual_length), data)[0] - self.currentOffset += actual_length - return data.decode('utf_16_be') - - def readDate(self): - result = unpack(">d", self.contents[self.currentOffset:self.currentOffset+8])[0] - # Use timedelta to workaround time_t size limitation on 32-bit python. - result = datetime.timedelta(seconds=result) + apple_reference_date - self.currentOffset += 8 - return result - - def readData(self, length): - result = self.contents[self.currentOffset:self.currentOffset+length] - self.currentOffset += length - return Data(result) - - def readUid(self, length): - return Uid(self.readInteger(length+1)) - - def getSizedInteger(self, data, byteSize, as_number=False): - """Numbers of 8 bytes are signed integers when they refer to numbers, but unsigned otherwise.""" - result = 0 - # 1, 2, and 4 byte integers are unsigned - if byteSize == 1: - result = unpack('>B', data)[0] - elif byteSize == 2: - result = unpack('>H', data)[0] - elif byteSize == 4: - result = unpack('>L', data)[0] - elif byteSize == 8: - if as_number: - result = unpack('>q', data)[0] - else: - result = unpack('>Q', data)[0] - elif byteSize <= 16: - # Handle odd-sized or integers larger than 8 bytes - # Don't naively go over 16 bytes, in order to prevent infinite loops. - result = 0 - if hasattr(int, 'from_bytes'): - result = int.from_bytes(data, 'big') - else: - for byte in data: - if not isinstance(byte, int): # Python3.0-3.1.x return ints, 2.x return str - byte = unpack_from('>B', byte)[0] - result = (result << 8) | byte - else: - raise InvalidPlistException("Encountered integer longer than 16 bytes.") - return result - -class HashableWrapper(object): - def __init__(self, value): - self.value = value - def __repr__(self): - return "" % [self.value] - -class BoolWrapper(object): - def __init__(self, value): - self.value = value - def __repr__(self): - return "" % self.value - -class FloatWrapper(object): - _instances = {} - def __new__(klass, value): - # Ensure FloatWrapper(x) for a given float x is always the same object - wrapper = klass._instances.get(value) - if wrapper is None: - wrapper = object.__new__(klass) - wrapper.value = value - klass._instances[value] = wrapper - return wrapper - def __repr__(self): - return "" % self.value - -class StringWrapper(object): - __instances = {} - - encodedValue = None - encoding = None - - def __new__(cls, value): - '''Ensure we only have a only one instance for any string, - and that we encode ascii as 1-byte-per character when possible''' - - encodedValue = None - - for encoding in ('ascii', 'utf_16_be'): - try: - encodedValue = value.encode(encoding) - except: pass - if encodedValue is not None: - if encodedValue not in cls.__instances: - cls.__instances[encodedValue] = super(StringWrapper, cls).__new__(cls) - cls.__instances[encodedValue].encodedValue = encodedValue - cls.__instances[encodedValue].encoding = encoding - return cls.__instances[encodedValue] - - raise ValueError('Unable to get ascii or utf_16_be encoding for %s' % repr(value)) - - def __len__(self): - '''Return roughly the number of characters in this string (half the byte length)''' - if self.encoding == 'ascii': - return len(self.encodedValue) - else: - return len(self.encodedValue)//2 - - @property - def encodingMarker(self): - if self.encoding == 'ascii': - return 0b0101 - else: - return 0b0110 - - def __repr__(self): - return '' % (self.encoding, self.encodedValue) - -class PlistWriter(object): - header = b'bplist00bybiplist1.0' - file = None - byteCounts = None - trailer = None - computedUniques = None - writtenReferences = None - referencePositions = None - wrappedTrue = None - wrappedFalse = None - - def __init__(self, file): - self.reset() - self.file = file - self.wrappedTrue = BoolWrapper(True) - self.wrappedFalse = BoolWrapper(False) - - def reset(self): - self.byteCounts = PlistByteCounts(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) - self.trailer = PlistTrailer(0, 0, 0, 0, 0) - - # A set of all the uniques which have been computed. - self.computedUniques = set() - # A list of all the uniques which have been written. - self.writtenReferences = {} - # A dict of the positions of the written uniques. - self.referencePositions = {} - - def positionOfObjectReference(self, obj): - """If the given object has been written already, return its - position in the offset table. Otherwise, return None.""" - return self.writtenReferences.get(obj) - - def writeRoot(self, root): - """ - Strategy is: - - write header - - wrap root object so everything is hashable - - compute size of objects which will be written - - need to do this in order to know how large the object refs - will be in the list/dict/set reference lists - - write objects - - keep objects in writtenReferences - - keep positions of object references in referencePositions - - write object references with the length computed previously - - computer object reference length - - write object reference positions - - write trailer - """ - output = self.header - wrapped_root = self.wrapRoot(root) - self.computeOffsets(wrapped_root, asReference=True, isRoot=True) - self.trailer = self.trailer._replace(**{'objectRefSize':self.intSize(len(self.computedUniques))}) - self.writeObjectReference(wrapped_root, output) - output = self.writeObject(wrapped_root, output, setReferencePosition=True) - - # output size at this point is an upper bound on how big the - # object reference offsets need to be. - self.trailer = self.trailer._replace(**{ - 'offsetSize':self.intSize(len(output)), - 'offsetCount':len(self.computedUniques), - 'offsetTableOffset':len(output), - 'topLevelObjectNumber':0 - }) - - output = self.writeOffsetTable(output) - output += pack('!xxxxxxBBQQQ', *self.trailer) - self.file.write(output) - - def wrapRoot(self, root): - if isinstance(root, bool): - if root is True: - return self.wrappedTrue - else: - return self.wrappedFalse - elif isinstance(root, float): - return FloatWrapper(root) - elif isinstance(root, set): - n = set() - for value in root: - n.add(self.wrapRoot(value)) - return HashableWrapper(n) - elif isinstance(root, dict): - n = {} - for key, value in iteritems(root): - n[self.wrapRoot(key)] = self.wrapRoot(value) - return HashableWrapper(n) - elif isinstance(root, list): - n = [] - for value in root: - n.append(self.wrapRoot(value)) - return HashableWrapper(n) - elif isinstance(root, tuple): - n = tuple([self.wrapRoot(value) for value in root]) - return HashableWrapper(n) - elif isinstance(root, (str, unicode)) and not isinstance(root, Data): - return StringWrapper(root) - elif isinstance(root, bytes): - return Data(root) - else: - return root - - def incrementByteCount(self, field, incr=1): - self.byteCounts = self.byteCounts._replace(**{field:self.byteCounts.__getattribute__(field) + incr}) - - def computeOffsets(self, obj, asReference=False, isRoot=False): - def check_key(key): - if key is None: - raise InvalidPlistException('Dictionary keys cannot be null in plists.') - elif isinstance(key, Data): - raise InvalidPlistException('Data cannot be dictionary keys in plists.') - elif not isinstance(key, StringWrapper): - raise InvalidPlistException('Keys must be strings.') - - def proc_size(size): - if size > 0b1110: - size += self.intSize(size) - return size - # If this should be a reference, then we keep a record of it in the - # uniques table. - if asReference: - if obj in self.computedUniques: - return - else: - self.computedUniques.add(obj) - - if obj is None: - self.incrementByteCount('nullBytes') - elif isinstance(obj, BoolWrapper): - self.incrementByteCount('boolBytes') - elif isinstance(obj, Uid): - size = self.intSize(obj.integer) - self.incrementByteCount('uidBytes', incr=1+size) - elif isinstance(obj, (int, long)): - size = self.intSize(obj) - self.incrementByteCount('intBytes', incr=1+size) - elif isinstance(obj, FloatWrapper): - size = self.realSize(obj) - self.incrementByteCount('realBytes', incr=1+size) - elif isinstance(obj, datetime.datetime): - self.incrementByteCount('dateBytes', incr=2) - elif isinstance(obj, Data): - size = proc_size(len(obj)) - self.incrementByteCount('dataBytes', incr=1+size) - elif isinstance(obj, StringWrapper): - size = proc_size(len(obj)) - self.incrementByteCount('stringBytes', incr=1+size) - elif isinstance(obj, HashableWrapper): - obj = obj.value - if isinstance(obj, set): - size = proc_size(len(obj)) - self.incrementByteCount('setBytes', incr=1+size) - for value in obj: - self.computeOffsets(value, asReference=True) - elif isinstance(obj, (list, tuple)): - size = proc_size(len(obj)) - self.incrementByteCount('arrayBytes', incr=1+size) - for value in obj: - asRef = True - self.computeOffsets(value, asReference=True) - elif isinstance(obj, dict): - size = proc_size(len(obj)) - self.incrementByteCount('dictBytes', incr=1+size) - for key, value in iteritems(obj): - check_key(key) - self.computeOffsets(key, asReference=True) - self.computeOffsets(value, asReference=True) - else: - raise InvalidPlistException("Unknown object type: %s (%s)" % (type(obj).__name__, repr(obj))) - - def writeObjectReference(self, obj, output): - """Tries to write an object reference, adding it to the references - table. Does not write the actual object bytes or set the reference - position. Returns a tuple of whether the object was a new reference - (True if it was, False if it already was in the reference table) - and the new output. - """ - position = self.positionOfObjectReference(obj) - if position is None: - self.writtenReferences[obj] = len(self.writtenReferences) - output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize) - return (True, output) - else: - output += self.binaryInt(position, byteSize=self.trailer.objectRefSize) - return (False, output) - - def writeObject(self, obj, output, setReferencePosition=False): - """Serializes the given object to the output. Returns output. - If setReferencePosition is True, will set the position the - object was written. - """ - def proc_variable_length(format, length): - result = b'' - if length > 0b1110: - result += pack('!B', (format << 4) | 0b1111) - result = self.writeObject(length, result) - else: - result += pack('!B', (format << 4) | length) - return result - - def timedelta_total_seconds(td): - # Shim for Python 2.6 compatibility, which doesn't have total_seconds. - # Make one argument a float to ensure the right calculation. - return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10.0**6) / 10.0**6 - - if setReferencePosition: - self.referencePositions[obj] = len(output) - - if obj is None: - output += pack('!B', 0b00000000) - elif isinstance(obj, BoolWrapper): - if obj.value is False: - output += pack('!B', 0b00001000) - else: - output += pack('!B', 0b00001001) - elif isinstance(obj, Uid): - size = self.intSize(obj.integer) - output += pack('!B', (0b1000 << 4) | size - 1) - output += self.binaryInt(obj.integer) - elif isinstance(obj, (int, long)): - byteSize = self.intSize(obj) - root = math.log(byteSize, 2) - output += pack('!B', (0b0001 << 4) | int(root)) - output += self.binaryInt(obj, as_number=True) - elif isinstance(obj, FloatWrapper): - # just use doubles - output += pack('!B', (0b0010 << 4) | 3) - output += self.binaryReal(obj) - elif isinstance(obj, datetime.datetime): - try: - timestamp = (obj - apple_reference_date).total_seconds() - except AttributeError: - timestamp = timedelta_total_seconds(obj - apple_reference_date) - output += pack('!B', 0b00110011) - output += pack('!d', float(timestamp)) - elif isinstance(obj, Data): - output += proc_variable_length(0b0100, len(obj)) - output += obj - elif isinstance(obj, StringWrapper): - output += proc_variable_length(obj.encodingMarker, len(obj)) - output += obj.encodedValue - elif isinstance(obj, bytes): - output += proc_variable_length(0b0101, len(obj)) - output += obj - elif isinstance(obj, HashableWrapper): - obj = obj.value - if isinstance(obj, (set, list, tuple)): - if isinstance(obj, set): - output += proc_variable_length(0b1100, len(obj)) - else: - output += proc_variable_length(0b1010, len(obj)) - - objectsToWrite = [] - for objRef in obj: - (isNew, output) = self.writeObjectReference(objRef, output) - if isNew: - objectsToWrite.append(objRef) - for objRef in objectsToWrite: - output = self.writeObject(objRef, output, setReferencePosition=True) - elif isinstance(obj, dict): - output += proc_variable_length(0b1101, len(obj)) - keys = [] - values = [] - objectsToWrite = [] - for key, value in iteritems(obj): - keys.append(key) - values.append(value) - for key in keys: - (isNew, output) = self.writeObjectReference(key, output) - if isNew: - objectsToWrite.append(key) - for value in values: - (isNew, output) = self.writeObjectReference(value, output) - if isNew: - objectsToWrite.append(value) - for objRef in objectsToWrite: - output = self.writeObject(objRef, output, setReferencePosition=True) - return output - - def writeOffsetTable(self, output): - """Writes all of the object reference offsets.""" - all_positions = [] - writtenReferences = list(self.writtenReferences.items()) - writtenReferences.sort(key=lambda x: x[1]) - for obj,order in writtenReferences: - # Porting note: Elsewhere we deliberately replace empty unicdoe strings - # with empty binary strings, but the empty unicode string - # goes into writtenReferences. This isn't an issue in Py2 - # because u'' and b'' have the same hash; but it is in - # Py3, where they don't. - if bytes != str and obj == unicodeEmpty: - obj = b'' - position = self.referencePositions.get(obj) - if position is None: - raise InvalidPlistException("Error while writing offsets table. Object not found. %s" % obj) - output += self.binaryInt(position, self.trailer.offsetSize) - all_positions.append(position) - return output - - def binaryReal(self, obj): - # just use doubles - result = pack('>d', obj.value) - return result - - def binaryInt(self, obj, byteSize=None, as_number=False): - result = b'' - if byteSize is None: - byteSize = self.intSize(obj) - if byteSize == 1: - result += pack('>B', obj) - elif byteSize == 2: - result += pack('>H', obj) - elif byteSize == 4: - result += pack('>L', obj) - elif byteSize == 8: - if as_number: - result += pack('>q', obj) - else: - result += pack('>Q', obj) - elif byteSize <= 16: - try: - result = pack('>Q', 0) + pack('>Q', obj) - except struct_error as e: - raise InvalidPlistException("Unable to pack integer %d: %s" % (obj, e)) - else: - raise InvalidPlistException("Core Foundation can't handle integers with size greater than 16 bytes.") - return result - - def intSize(self, obj): - """Returns the number of bytes necessary to store the given integer.""" - # SIGNED - if obj < 0: # Signed integer, always 8 bytes - return 8 - # UNSIGNED - elif obj <= 0xFF: # 1 byte - return 1 - elif obj <= 0xFFFF: # 2 bytes - return 2 - elif obj <= 0xFFFFFFFF: # 4 bytes - return 4 - # SIGNED - # 0x7FFFFFFFFFFFFFFF is the max. - elif obj <= 0x7FFFFFFFFFFFFFFF: # 8 bytes signed - return 8 - elif obj <= 0xffffffffffffffff: # 8 bytes unsigned - return 16 - else: - raise InvalidPlistException("Core Foundation can't handle integers with size greater than 8 bytes.") - - def realSize(self, obj): - return 8 diff --git a/workflow/__init__.py b/workflow/__init__.py old mode 100644 new mode 100755 index 17636a4..f93fb60 --- a/workflow/__init__.py +++ b/workflow/__init__.py @@ -12,14 +12,10 @@ import os -# Workflow objects -from .workflow import Workflow, manager -from .workflow3 import Variables, Workflow3 - -# Exceptions -from .workflow import PasswordNotFound, KeychainError - +# Filter matching rules # Icons +# Exceptions +# Workflow objects from .workflow import ( ICON_ACCOUNT, ICON_BURN, @@ -44,10 +40,6 @@ ICON_USER, ICON_WARNING, ICON_WEB, -) - -# Filter matching rules -from .workflow import ( MATCH_ALL, MATCH_ALLCHARS, MATCH_ATOM, @@ -57,52 +49,56 @@ MATCH_INITIALS_STARTSWITH, MATCH_STARTSWITH, MATCH_SUBSTRING, + KeychainError, + PasswordNotFound, + Workflow, + manager, ) +from .workflow3 import Variables, Workflow3 - -__title__ = 'Alfred-Workflow' -__version__ = open(os.path.join(os.path.dirname(__file__), 'version')).read() -__author__ = 'Dean Jackson' -__licence__ = 'MIT' -__copyright__ = 'Copyright 2014-2019 Dean Jackson' +__title__ = "Alfred-Workflow" +__version__ = open(os.path.join(os.path.dirname(__file__), "version")).read() +__author__ = "Dean Jackson" +__licence__ = "MIT" +__copyright__ = "Copyright 2014-2019 Dean Jackson" __all__ = [ - 'Variables', - 'Workflow', - 'Workflow3', - 'manager', - 'PasswordNotFound', - 'KeychainError', - 'ICON_ACCOUNT', - 'ICON_BURN', - 'ICON_CLOCK', - 'ICON_COLOR', - 'ICON_COLOUR', - 'ICON_EJECT', - 'ICON_ERROR', - 'ICON_FAVORITE', - 'ICON_FAVOURITE', - 'ICON_GROUP', - 'ICON_HELP', - 'ICON_HOME', - 'ICON_INFO', - 'ICON_NETWORK', - 'ICON_NOTE', - 'ICON_SETTINGS', - 'ICON_SWIRL', - 'ICON_SWITCH', - 'ICON_SYNC', - 'ICON_TRASH', - 'ICON_USER', - 'ICON_WARNING', - 'ICON_WEB', - 'MATCH_ALL', - 'MATCH_ALLCHARS', - 'MATCH_ATOM', - 'MATCH_CAPITALS', - 'MATCH_INITIALS', - 'MATCH_INITIALS_CONTAIN', - 'MATCH_INITIALS_STARTSWITH', - 'MATCH_STARTSWITH', - 'MATCH_SUBSTRING', + "Variables", + "Workflow", + "Workflow3", + "manager", + "PasswordNotFound", + "KeychainError", + "ICON_ACCOUNT", + "ICON_BURN", + "ICON_CLOCK", + "ICON_COLOR", + "ICON_COLOUR", + "ICON_EJECT", + "ICON_ERROR", + "ICON_FAVORITE", + "ICON_FAVOURITE", + "ICON_GROUP", + "ICON_HELP", + "ICON_HOME", + "ICON_INFO", + "ICON_NETWORK", + "ICON_NOTE", + "ICON_SETTINGS", + "ICON_SWIRL", + "ICON_SWITCH", + "ICON_SYNC", + "ICON_TRASH", + "ICON_USER", + "ICON_WARNING", + "ICON_WEB", + "MATCH_ALL", + "MATCH_ALLCHARS", + "MATCH_ATOM", + "MATCH_CAPITALS", + "MATCH_INITIALS", + "MATCH_INITIALS_CONTAIN", + "MATCH_INITIALS_STARTSWITH", + "MATCH_STARTSWITH", + "MATCH_SUBSTRING", ] diff --git a/workflow/background.py b/workflow/background.py index ba5c52a..2001856 100644 --- a/workflow/background.py +++ b/workflow/background.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # encoding: utf-8 # # Copyright (c) 2014 deanishe@deanishe.net @@ -17,17 +16,16 @@ and examples. """ -from __future__ import print_function, unicode_literals -import signal -import sys import os -import subprocess import pickle +import signal +import subprocess +import sys from workflow import Workflow -__all__ = ['is_running', 'run_in_background'] +__all__ = ["is_running", "run_in_background"] _wf = None @@ -52,7 +50,7 @@ def _arg_cache(name): :rtype: ``unicode`` filepath """ - return wf().cachefile(name + '.argcache') + return wf().cachefile(name + ".argcache") def _pid_file(name): @@ -64,7 +62,7 @@ def _pid_file(name): :rtype: ``unicode`` filepath """ - return wf().cachefile(name + '.pid') + return wf().cachefile(name + ".pid") def _process_exists(pid): @@ -96,16 +94,16 @@ def _job_pid(name): if not os.path.exists(pidfile): return - with open(pidfile, 'rb') as fp: - pid = int(fp.read()) + with open(pidfile, "rb") as fp: + read = fp.read() + # print(str(read)) + pid = int.from_bytes(read, sys.byteorder) + # print(pid) if _process_exists(pid): return pid - try: - os.unlink(pidfile) - except Exception: # pragma: no cover - pass + os.unlink(pidfile) def is_running(name): @@ -123,8 +121,9 @@ def is_running(name): return False -def _background(pidfile, stdin='/dev/null', stdout='/dev/null', - stderr='/dev/null'): # pragma: no cover +def _background( + pidfile, stdin="/dev/null", stdout="/dev/null", stderr="/dev/null" +): # pragma: no cover """Fork the current process into a background daemon. :param pidfile: file to write PID of daemon process to. @@ -137,42 +136,43 @@ def _background(pidfile, stdin='/dev/null', stdout='/dev/null', :type stderr: filepath """ + def _fork_and_exit_parent(errmsg, wait=False, write=False): try: pid = os.fork() if pid > 0: if write: # write PID of child process to `pidfile` - tmp = pidfile + '.tmp' - with open(tmp, 'wb') as fp: - fp.write(str(pid)) + tmp = pidfile + ".tmp" + with open(tmp, "wb") as fp: + fp.write(pid.to_bytes(4, sys.byteorder)) os.rename(tmp, pidfile) if wait: # wait for child process to exit os.waitpid(pid, 0) os._exit(0) except OSError as err: - _log().critical('%s: (%d) %s', errmsg, err.errno, err.strerror) + _log().critical("%s: (%d) %s", errmsg, err.errno, err.strerror) raise err # Do first fork and wait for second fork to finish. - _fork_and_exit_parent('fork #1 failed', wait=True) + _fork_and_exit_parent("fork #1 failed", wait=True) # Decouple from parent environment. os.chdir(wf().workflowdir) os.setsid() # Do second fork and write PID to pidfile. - _fork_and_exit_parent('fork #2 failed', write=True) + _fork_and_exit_parent("fork #2 failed", write=True) # Now I am a daemon! # Redirect standard file descriptors. - si = open(stdin, 'r', 0) - so = open(stdout, 'a+', 0) - se = open(stderr, 'a+', 0) - if hasattr(sys.stdin, 'fileno'): + si = open(stdin, "r", 1) + so = open(stdout, "a+", 1) + se = open(stderr, "a+", 1) + if hasattr(sys.stdin, "fileno"): os.dup2(si.fileno(), sys.stdin.fileno()) - if hasattr(sys.stdout, 'fileno'): + if hasattr(sys.stdout, "fileno"): os.dup2(so.fileno(), sys.stdout.fileno()) - if hasattr(sys.stderr, 'fileno'): + if hasattr(sys.stderr, "fileno"): os.dup2(se.fileno(), sys.stderr.fileno()) @@ -222,25 +222,25 @@ def run_in_background(name, args, **kwargs): """ if is_running(name): - _log().info('[%s] job already running', name) + _log().info("[%s] job already running", name) return argcache = _arg_cache(name) # Cache arguments - with open(argcache, 'wb') as fp: - pickle.dump({'args': args, 'kwargs': kwargs}, fp) - _log().debug('[%s] command cached: %s', name, argcache) + with open(argcache, "wb") as fp: + pickle.dump({"args": args, "kwargs": kwargs}, fp) + _log().debug("[%s] command cached: %s", name, argcache) # Call this script - cmd = ['/usr/bin/python', __file__, name] - _log().debug('[%s] passing job to background runner: %r', name, cmd) + cmd = [sys.executable, "-m", "workflow.background", name] + _log().debug("[%s] passing job to background runner: %r", name, cmd) retcode = subprocess.call(cmd) if retcode: # pragma: no cover - _log().error('[%s] background runner failed with %d', name, retcode) + _log().error("[%s] background runner failed with %d", name, retcode) else: - _log().debug('[%s] background job started', name) + _log().debug("[%s] background job started", name) return retcode @@ -256,7 +256,7 @@ def main(wf): # pragma: no cover name = wf.args[0] argcache = _arg_cache(name) if not os.path.exists(argcache): - msg = '[{0}] command cache not found: {1}'.format(name, argcache) + msg = "[{0}] command cache not found: {1}".format(name, argcache) log.critical(msg) raise IOError(msg) @@ -265,29 +265,29 @@ def main(wf): # pragma: no cover _background(pidfile) # Load cached arguments - with open(argcache, 'rb') as fp: + with open(argcache, "rb") as fp: data = pickle.load(fp) # Cached arguments - args = data['args'] - kwargs = data['kwargs'] + args = data["args"] + kwargs = data["kwargs"] # Delete argument cache file os.unlink(argcache) try: # Run the command - log.debug('[%s] running command: %r', name, args) + log.debug("[%s] running command: %r", name, args) retcode = subprocess.call(args, **kwargs) if retcode: - log.error('[%s] command failed with status %d', name, retcode) + log.error("[%s] command failed with status %d", name, retcode) finally: os.unlink(pidfile) - log.debug('[%s] job complete', name) + log.debug("[%s] job complete", name) -if __name__ == '__main__': # pragma: no cover +if __name__ == "__main__": # pragma: no cover wf().run(main) diff --git a/workflow/notify.py b/workflow/notify.py old mode 100644 new mode 100755 index a4b7f40..fa582f6 --- a/workflow/notify.py +++ b/workflow/notify.py @@ -23,7 +23,6 @@ icon and then calls the application to post notifications. """ -from __future__ import print_function, unicode_literals import os import plistlib @@ -33,9 +32,9 @@ import tarfile import tempfile import uuid +from typing import List -import workflow - +from . import workflow _wf = None _log = None @@ -43,20 +42,20 @@ #: Available system sounds from System Preferences > Sound > Sound Effects SOUNDS = ( - 'Basso', - 'Blow', - 'Bottle', - 'Frog', - 'Funk', - 'Glass', - 'Hero', - 'Morse', - 'Ping', - 'Pop', - 'Purr', - 'Sosumi', - 'Submarine', - 'Tink', + "Basso", + "Blow", + "Bottle", + "Frog", + "Funk", + "Glass", + "Hero", + "Morse", + "Ping", + "Pop", + "Purr", + "Sosumi", + "Submarine", + "Tink", ) @@ -90,7 +89,7 @@ def notifier_program(): Returns: unicode: Path to Notify.app ``applet`` executable. """ - return wf().datafile('Notify.app/Contents/MacOS/applet') + return wf().datafile("Notify.app/Contents/MacOS/applet") def notifier_icon_path(): @@ -99,7 +98,7 @@ def notifier_icon_path(): Returns: unicode: Path to ``applet.icns`` within the app bundle. """ - return wf().datafile('Notify.app/Contents/Resources/applet.icns') + return wf().datafile("Notify.app/Contents/Resources/applet.icns") def install_notifier(): @@ -108,21 +107,21 @@ def install_notifier(): Changes the bundle ID of the installed app and gives it the workflow's icon. """ - archive = os.path.join(os.path.dirname(__file__), 'Notify.tgz') + archive = os.path.join(os.path.dirname(__file__), "Notify.tgz") destdir = wf().datadir - app_path = os.path.join(destdir, 'Notify.app') + app_path = os.path.join(destdir, "Notify.app") n = notifier_program() - log().debug('installing Notify.app to %r ...', destdir) + log().debug("installing Notify.app to %r ...", destdir) # z = zipfile.ZipFile(archive, 'r') # z.extractall(destdir) - tgz = tarfile.open(archive, 'r:gz') + tgz = tarfile.open(archive, "r:gz") tgz.extractall(destdir) - assert os.path.exists(n), \ - 'Notify.app could not be installed in %s' % destdir + if not os.path.exists(n): # pragma: nocover + raise RuntimeError("Notify.app could not be installed in " + destdir) # Replace applet icon icon = notifier_icon_path() - workflow_icon = wf().workflowfile('icon.png') + workflow_icon = wf().workflowfile("icon.png") if os.path.exists(icon): os.unlink(icon) @@ -134,7 +133,7 @@ def install_notifier(): # until I figure out a better way of excluding this module # from coverage in py2.6. if sys.version_info >= (2, 7): # pragma: no cover - from AppKit import NSWorkspace, NSImage + from AppKit import NSImage, NSWorkspace ws = NSWorkspace.sharedWorkspace() img = NSImage.alloc().init() @@ -142,11 +141,11 @@ def install_notifier(): ws.setIcon_forFile_options_(img, app_path, 0) # Change bundle ID of installed app - ip_path = os.path.join(app_path, 'Contents/Info.plist') - bundle_id = '{0}.{1}'.format(wf().bundleid, uuid.uuid4().hex) + ip_path = os.path.join(app_path, "Contents/Info.plist") + bundle_id = "{0}.{1}".format(wf().bundleid, uuid.uuid4().hex) data = plistlib.readPlist(ip_path) - log().debug('changing bundle ID to %r', bundle_id) - data['CFBundleIdentifier'] = bundle_id + log().debug("changing bundle ID to %r", bundle_id) + data["CFBundleIdentifier"] = bundle_id plistlib.writePlist(data, ip_path) @@ -172,7 +171,7 @@ def validate_sound(sound): return None -def notify(title='', text='', sound=None): +def notify(title="", text="", sound=None): """Post notification via Notify.app helper. Args: @@ -186,10 +185,10 @@ def notify(title='', text='', sound=None): Returns: bool: ``True`` if notification was posted, else ``False``. """ - if title == text == '': - raise ValueError('Empty notification') + if title == text == "": + raise ValueError("Empty notification") - sound = validate_sound(sound) or '' + sound = validate_sound(sound) or "" n = notifier_program() @@ -197,19 +196,23 @@ def notify(title='', text='', sound=None): install_notifier() env = os.environ.copy() - enc = 'utf-8' - env['NOTIFY_TITLE'] = title.encode(enc) - env['NOTIFY_MESSAGE'] = text.encode(enc) - env['NOTIFY_SOUND'] = sound.encode(enc) + enc = "utf-8" + env["NOTIFY_TITLE"] = title.encode(enc) + env["NOTIFY_MESSAGE"] = text.encode(enc) + env["NOTIFY_SOUND"] = sound.encode(enc) cmd = [n] retcode = subprocess.call(cmd, env=env) if retcode == 0: return True - log().error('Notify.app exited with status {0}.'.format(retcode)) + log().error("Notify.app exited with status {0}.".format(retcode)) return False +def usr_bin_env(*args: str) -> List[str]: + return ["/usr/bin/env", f'PATH={os.environ["PATH"]}'] + list(args) + + def convert_image(inpath, outpath, size): """Convert an image file using ``sips``. @@ -221,17 +224,15 @@ def convert_image(inpath, outpath, size): Raises: RuntimeError: Raised if ``sips`` exits with non-zero status. """ - cmd = [ - b'sips', - b'-z', str(size), str(size), - inpath, - b'--out', outpath] + cmd = ["sips", "-z", str(size), str(size), inpath, "--out", outpath] # log().debug(cmd) - with open(os.devnull, 'w') as pipe: - retcode = subprocess.call(cmd, stdout=pipe, stderr=subprocess.STDOUT) + with open(os.devnull, "w") as pipe: + retcode = subprocess.call( + cmd, shell=True, stdout=pipe, stderr=subprocess.STDOUT + ) if retcode != 0: - raise RuntimeError('sips exited with %d' % retcode) + raise RuntimeError("sips exited with %d" % retcode) def png_to_icns(png_path, icns_path): @@ -248,24 +249,25 @@ def png_to_icns(png_path, icns_path): Raises: RuntimeError: Raised if ``iconutil`` or ``sips`` fail. """ - tempdir = tempfile.mkdtemp(prefix='aw-', dir=wf().datadir) + tempdir = tempfile.mkdtemp(prefix="aw-", dir=wf().datadir) try: - iconset = os.path.join(tempdir, 'Icon.iconset') + iconset = os.path.join(tempdir, "Icon.iconset") + + if os.path.exists(iconset): # pragma: nocover + raise RuntimeError("iconset already exists: " + iconset) - assert not os.path.exists(iconset), \ - 'iconset already exists: ' + iconset os.makedirs(iconset) # Copy source icon to icon set and generate all the other # sizes needed configs = [] for i in (16, 32, 128, 256, 512): - configs.append(('icon_{0}x{0}.png'.format(i), i)) - configs.append((('icon_{0}x{0}@2x.png'.format(i), i * 2))) + configs.append(("icon_{0}x{0}.png".format(i), i)) + configs.append((("icon_{0}x{0}@2x.png".format(i), i * 2))) - shutil.copy(png_path, os.path.join(iconset, 'icon_256x256.png')) - shutil.copy(png_path, os.path.join(iconset, 'icon_128x128@2x.png')) + shutil.copy(png_path, os.path.join(iconset, "icon_256x256.png")) + shutil.copy(png_path, os.path.join(iconset, "icon_128x128@2x.png")) for name, size in configs: outpath = os.path.join(iconset, name) @@ -273,18 +275,14 @@ def png_to_icns(png_path, icns_path): continue convert_image(png_path, outpath, size) - cmd = [ - b'iconutil', - b'-c', b'icns', - b'-o', icns_path, - iconset] + cmd = ["iconutil", "-c", "icns", "-o", icns_path, iconset] retcode = subprocess.call(cmd) if retcode != 0: - raise RuntimeError('iconset exited with %d' % retcode) + raise RuntimeError("iconset exited with %d" % retcode) - assert os.path.exists(icns_path), \ - 'generated ICNS file not found: ' + repr(icns_path) + if not os.path.exists(icns_path): # pragma: nocover + raise ValueError("generated ICNS file not found: " + repr(icns_path)) finally: try: shutil.rmtree(tempdir) @@ -292,29 +290,29 @@ def png_to_icns(png_path, icns_path): pass -if __name__ == '__main__': # pragma: nocover +if __name__ == "__main__": # pragma: nocover # Simple command-line script to test module with # This won't work on 2.6, as `argparse` isn't available # by default. import argparse - from unicodedata import normalize def ustr(s): """Coerce `s` to normalised Unicode.""" - return normalize('NFD', s.decode('utf-8')) + return normalize("NFD", s.decode("utf-8")) p = argparse.ArgumentParser() - p.add_argument('-p', '--png', help="PNG image to convert to ICNS.") - p.add_argument('-l', '--list-sounds', help="Show available sounds.", - action='store_true') - p.add_argument('-t', '--title', - help="Notification title.", type=ustr, - default='') - p.add_argument('-s', '--sound', type=ustr, - help="Optional notification sound.", default='') - p.add_argument('text', type=ustr, - help="Notification body text.", default='', nargs='?') + p.add_argument("-p", "--png", help="PNG image to convert to ICNS.") + p.add_argument( + "-l", "--list-sounds", help="Show available sounds.", action="store_true" + ) + p.add_argument("-t", "--title", help="Notification title.", type=ustr, default="") + p.add_argument( + "-s", "--sound", type=ustr, help="Optional notification sound.", default="" + ) + p.add_argument( + "text", type=ustr, help="Notification body text.", default="", nargs="?" + ) o = p.parse_args() # List available sounds @@ -327,20 +325,20 @@ def ustr(s): if o.png: icns = os.path.join( os.path.dirname(o.png), - os.path.splitext(os.path.basename(o.png))[0] + '.icns') + os.path.splitext(os.path.basename(o.png))[0] + ".icns", + ) - print('converting {0!r} to {1!r} ...'.format(o.png, icns), - file=sys.stderr) + print("converting {0!r} to {1!r} ...".format(o.png, icns), file=sys.stderr) - assert not os.path.exists(icns), \ - 'destination file already exists: ' + icns + if os.path.exists(icns): + raise ValueError("destination file already exists: " + icns) png_to_icns(o.png, icns) sys.exit(0) # Post notification - if o.title == o.text == '': - print('ERROR: empty notification.', file=sys.stderr) + if o.title == o.text == "": + print("ERROR: empty notification.", file=sys.stderr) sys.exit(1) else: notify(o.title, o.text, o.sound) diff --git a/workflow/update.py b/workflow/update.py old mode 100644 new mode 100755 index ffc6353..dc40c4b --- a/workflow/update.py +++ b/workflow/update.py @@ -21,24 +21,26 @@ """ -from __future__ import print_function, unicode_literals -from collections import defaultdict -from functools import total_ordering import json import os -import tempfile import re import subprocess +import tempfile +from collections import defaultdict +from functools import total_ordering +from itertools import zip_longest +from urllib import request -import workflow -import web +from workflow.util import atomic_writer + +from . import workflow # __all__ = [] -RELEASES_BASE = 'https://api.github.com/repos/{}/releases' -match_workflow = re.compile(r'\.alfred(\d+)?workflow$').search +RELEASES_BASE = "https://api.github.com/repos/{}/releases" +match_workflow = re.compile(r"\.alfred(\d+)?workflow$").search _wf = None @@ -70,9 +72,12 @@ class Download(object): @classmethod def from_dict(cls, d): """Create a `Download` from a `dict`.""" - return cls(url=d['url'], filename=d['filename'], - version=Version(d['version']), - prerelease=d['prerelease']) + return cls( + url=d["url"], + filename=d["filename"], + version=Version(d["version"]), + prerelease=d["prerelease"], + ) @classmethod def from_releases(cls, js): @@ -95,34 +100,35 @@ def from_releases(cls, js): releases = json.loads(js) downloads = [] for release in releases: - tag = release['tag_name'] + tag = release["tag_name"] dupes = defaultdict(int) try: version = Version(tag) except ValueError as err: - wf().logger.debug('ignored release: bad version "%s": %s', - tag, err) + wf().logger.debug('ignored release: bad version "%s": %s', tag, err) continue dls = [] - for asset in release.get('assets', []): - url = asset.get('browser_download_url') + for asset in release.get("assets", []): + url = asset.get("browser_download_url") filename = os.path.basename(url) m = match_workflow(filename) if not m: - wf().logger.debug('unwanted file: %s', filename) + wf().logger.debug("unwanted file: %s", filename) continue ext = m.group(0) dupes[ext] = dupes[ext] + 1 - dls.append(Download(url, filename, version, - release['prerelease'])) + dls.append(Download(url, filename, version, release["prerelease"])) valid = True - for ext, n in dupes.items(): + for ext, n in list(dupes.items()): if n > 1: - wf().logger.debug('ignored release "%s": multiple assets ' - 'with extension "%s"', tag, ext) + wf().logger.debug( + 'ignored release "%s": multiple assets ' 'with extension "%s"', + tag, + ext, + ) valid = False break @@ -143,7 +149,7 @@ def __init__(self, url, filename, version, prerelease=False): pre-release. Defaults to False. """ - if isinstance(version, basestring): + if isinstance(version, str): version = Version(version) self.url = url @@ -156,23 +162,29 @@ def alfred_version(self): """Minimum Alfred version based on filename extension.""" m = match_workflow(self.filename) if not m or not m.group(1): - return Version('0') + return Version("0") return Version(m.group(1)) @property def dict(self): """Convert `Download` to `dict`.""" - return dict(url=self.url, filename=self.filename, - version=str(self.version), prerelease=self.prerelease) + return dict( + url=self.url, + filename=self.filename, + version=str(self.version), + prerelease=self.prerelease, + ) def __str__(self): """Format `Download` for printing.""" - u = ('Download(url={dl.url!r}, ' - 'filename={dl.filename!r}, ' - 'version={dl.version!r}, ' - 'prerelease={dl.prerelease!r})'.format(dl=self)) - - return u.encode('utf-8') + return ( + "Download(" + "url={dl.url!r}, " + "filename={dl.filename!r}, " + "version={dl.version!r}, " + "prerelease={dl.prerelease!r}" + ")" + ).format(dl=self) def __repr__(self): """Code-like representation of `Download`.""" @@ -180,10 +192,12 @@ def __repr__(self): def __eq__(self, other): """Compare Downloads based on version numbers.""" - if self.url != other.url \ - or self.filename != other.filename \ - or self.version != other.version \ - or self.prerelease != other.prerelease: + if ( + self.url != other.url + or self.filename != other.filename + or self.version != other.version + or self.prerelease != other.prerelease + ): return False return True @@ -222,7 +236,7 @@ class Version(object): """ #: Match version and pre-release/build information in version strings - match_version = re.compile(r'([0-9\.]+)(.+)?').match + match_version = re.compile(r"([0-9][0-9\.]*)(.+)?").match def __init__(self, vstr): """Create new `Version` object. @@ -231,23 +245,24 @@ def __init__(self, vstr): vstr (basestring): Semantic version string. """ if not vstr: - raise ValueError('invalid version number: {!r}'.format(vstr)) + raise ValueError("invalid version number: {!r}".format(vstr)) self.vstr = vstr self.major = 0 self.minor = 0 self.patch = 0 - self.suffix = '' - self.build = '' + self.suffix = "" + self.build = "" self._parse(vstr) def _parse(self, vstr): - if vstr.startswith('v'): + vstr = str(vstr) + if vstr.startswith("v"): m = self.match_version(vstr[1:]) else: m = self.match_version(vstr) if not m: - raise ValueError('invalid version number: {!r}'.format(vstr)) + raise ValueError("invalid version number: " + vstr) version, suffix = m.groups() parts = self._parse_dotted_string(version) @@ -257,26 +272,23 @@ def _parse(self, vstr): if len(parts): self.patch = parts.pop(0) if not len(parts) == 0: - raise ValueError('version number too long: {!r}'.format(vstr)) + raise ValueError("version number too long: " + vstr) if suffix: # Build info - idx = suffix.find('+') + idx = suffix.find("+") if idx > -1: - self.build = suffix[idx+1:] + self.build = suffix[idx + 1 :] suffix = suffix[:idx] if suffix: - if not suffix.startswith('-'): - raise ValueError( - 'suffix must start with - : {0}'.format(suffix)) + if not suffix.startswith("-"): + raise ValueError("suffix must start with - : " + suffix) self.suffix = suffix[1:] - # wf().logger.debug('version str `{}` -> {}'.format(vstr, repr(self))) - def _parse_dotted_string(self, s): """Parse string ``s`` into list of ints and strings.""" parsed = [] - parts = s.split('.') + parts = s.split(".") for p in parts: if p.isdigit(): p = int(p) @@ -291,7 +303,7 @@ def tuple(self): def __lt__(self, other): """Implement comparison.""" if not isinstance(other, Version): - raise ValueError('not a Version instance: {0!r}'.format(other)) + raise ValueError("not a Version instance: {0!r}".format(other)) t = self.tuple[:3] o = other.tuple[:3] if t < o: @@ -301,15 +313,27 @@ def __lt__(self, other): return True if other.suffix and not self.suffix: return False - return self._parse_dotted_string(self.suffix) \ - < self._parse_dotted_string(other.suffix) + + self_suffix = self._parse_dotted_string(self.suffix) + other_suffix = self._parse_dotted_string(other.suffix) + + for s, o in zip_longest(self_suffix, other_suffix): + if s is None: # shorter value wins + return True + elif o is None: # longer value loses + return False + elif type(s) != type(o): # type coersion + s, o = str(s), str(o) + if s == o: # next if the same compare + continue + return s < o # finally compare # t > o return False def __eq__(self, other): """Implement comparison.""" if not isinstance(other, Version): - raise ValueError('not a Version instance: {0!r}'.format(other)) + raise ValueError("not a Version instance: {0!r}".format(other)) return self.tuple == other.tuple def __ne__(self, other): @@ -319,13 +343,13 @@ def __ne__(self, other): def __gt__(self, other): """Implement comparison.""" if not isinstance(other, Version): - raise ValueError('not a Version instance: {0!r}'.format(other)) + raise ValueError("not a Version instance: {0!r}".format(other)) return other.__lt__(self) def __le__(self, other): """Implement comparison.""" if not isinstance(other, Version): - raise ValueError('not a Version instance: {0!r}'.format(other)) + raise ValueError("not a Version instance: {0!r}".format(other)) return not other.__lt__(self) def __ge__(self, other): @@ -334,11 +358,11 @@ def __ge__(self, other): def __str__(self): """Return semantic version string.""" - vstr = '{0}.{1}.{2}'.format(self.major, self.minor, self.patch) + vstr = "{0}.{1}.{2}".format(self.major, self.minor, self.patch) if self.suffix: - vstr = '{0}-{1}'.format(vstr, self.suffix) + vstr = "{0}-{1}".format(vstr, self.suffix) if self.build: - vstr = '{0}+{1}'.format(vstr, self.build) + vstr = "{0}+{1}".format(vstr, self.build) return vstr def __repr__(self): @@ -359,16 +383,15 @@ def retrieve_download(dl): """ if not match_workflow(dl.filename): - raise ValueError('attachment not a workflow: ' + dl.filename) + raise ValueError("attachment not a workflow: " + dl.filename) path = os.path.join(tempfile.gettempdir(), dl.filename) - wf().logger.debug('downloading update from ' - '%r to %r ...', dl.url, path) + wf().logger.debug("downloading update from " "%r to %r ...", dl.url, path) - r = web.get(dl.url) - r.raise_for_status() + r = request.urlopen(dl.url) - r.save_to_path(path) + with atomic_writer(path, "wb") as file_obj: + file_obj.write(r.read()) return path @@ -383,8 +406,8 @@ def build_api_url(repo): unicode: URL to the API endpoint for the repo's releases """ - if len(repo.split('/')) != 2: - raise ValueError('invalid GitHub repo: {!r}'.format(repo)) + if len(repo.split("/")) != 2: + raise ValueError("invalid GitHub repo: {!r}".format(repo)) return RELEASES_BASE.format(repo) @@ -403,12 +426,11 @@ def get_downloads(repo): url = build_api_url(repo) def _fetch(): - wf().logger.info('retrieving releases for %r ...', repo) - r = web.get(url) - r.raise_for_status() - return r.content + wf().logger.info("retrieving releases for %r ...", repo) + r = request.urlopen(url) + return r.read() - key = 'github-releases-' + repo.replace('/', '-') + key = "github-releases-" + repo.replace("/", "-") js = wf().cached_data(key, _fetch, max_age=60) return Download.from_releases(js) @@ -416,7 +438,7 @@ def _fetch(): def latest_download(dls, alfred_version=None, prereleases=False): """Return newest `Download`.""" - alfred_version = alfred_version or os.getenv('alfred_version') + alfred_version = alfred_version or os.getenv("alfred_version") version = None if alfred_version: version = Version(alfred_version) @@ -424,21 +446,24 @@ def latest_download(dls, alfred_version=None, prereleases=False): dls.sort(reverse=True) for dl in dls: if dl.prerelease and not prereleases: - wf().logger.debug('ignored prerelease: %s', dl.version) + wf().logger.debug("ignored prerelease: %s", dl.version) continue if version and dl.alfred_version > version: - wf().logger.debug('ignored incompatible (%s > %s): %s', - dl.alfred_version, version, dl.filename) + wf().logger.debug( + "ignored incompatible (%s > %s): %s", + dl.alfred_version, + version, + dl.filename, + ) continue - wf().logger.debug('latest version: %s (%s)', dl.version, dl.filename) + wf().logger.debug("latest version: %s (%s)", dl.version, dl.filename) return dl return None -def check_update(repo, current_version, prereleases=False, - alfred_version=None): +def check_update(repo, current_version, prereleases=False, alfred_version=None): """Check whether a newer release is available on GitHub. Args: @@ -456,38 +481,32 @@ def check_update(repo, current_version, prereleases=False, be cached. """ - key = '__workflow_latest_version' + key = "__workflow_latest_version" # data stored when no update is available - no_update = { - 'available': False, - 'download': None, - 'version': None, - } + no_update = {"available": False, "download": None, "version": None} current = Version(current_version) dls = get_downloads(repo) if not len(dls): - wf().logger.warning('no valid downloads for %s', repo) + wf().logger.warning("no valid downloads for %s", repo) wf().cache_data(key, no_update) return False - wf().logger.info('%d download(s) for %s', len(dls), repo) + wf().logger.info("%d download(s) for %s", len(dls), repo) dl = latest_download(dls, alfred_version, prereleases) if not dl: - wf().logger.warning('no compatible downloads for %s', repo) + wf().logger.warning("no compatible downloads for %s", repo) wf().cache_data(key, no_update) return False - wf().logger.debug('latest=%r, installed=%r', dl.version, current) + wf().logger.debug("latest=%r, installed=%r", dl.version, current) if dl.version > current: - wf().cache_data(key, { - 'version': str(dl.version), - 'download': dl.dict, - 'available': True, - }) + wf().cache_data( + key, {"version": str(dl.version), "download": dl.dict, "available": True} + ) return True wf().cache_data(key, no_update) @@ -500,50 +519,45 @@ def install_update(): :returns: ``True`` if an update is installed, else ``False`` """ - key = '__workflow_latest_version' + key = "__workflow_latest_version" # data stored when no update is available - no_update = { - 'available': False, - 'download': None, - 'version': None, - } + no_update = {"available": False, "download": None, "version": None} status = wf().cached_data(key, max_age=0) - if not status or not status.get('available'): - wf().logger.info('no update available') + if not status or not status.get("available"): + wf().logger.info("no update available") return False - dl = status.get('download') + dl = status.get("download") if not dl: - wf().logger.info('no download information') + wf().logger.info("no download information") return False path = retrieve_download(Download.from_dict(dl)) - wf().logger.info('installing updated workflow ...') - subprocess.call(['open', path]) + wf().logger.info("installing updated workflow ...") + subprocess.call(["open", path]) # nosec wf().cache_data(key, no_update) return True -if __name__ == '__main__': # pragma: nocover +if __name__ == "__main__": # pragma: nocover import sys prereleases = False def show_help(status=0): """Print help message.""" - print('usage: update.py (check|install) ' - '[--prereleases] ') + print("usage: update.py (check|install) " "[--prereleases] ") sys.exit(status) argv = sys.argv[:] - if '-h' in argv or '--help' in argv: + if "-h" in argv or "--help" in argv: show_help() - if '--prereleases' in argv: - argv.remove('--prereleases') + if "--prereleases" in argv: + argv.remove("--prereleases") prereleases = True if len(argv) != 4: @@ -555,9 +569,9 @@ def show_help(status=0): try: - if action == 'check': + if action == "check": check_update(repo, version, prereleases) - elif action == 'install': + elif action == "install": install_update() else: show_help(1) diff --git a/workflow/util.py b/workflow/util.py old mode 100644 new mode 100755 index 27209d8..998456b --- a/workflow/util.py +++ b/workflow/util.py @@ -10,11 +10,8 @@ """A selection of helper functions useful for building workflows.""" -from __future__ import print_function, absolute_import import atexit -from collections import namedtuple -from contextlib import contextmanager import errno import fcntl import functools @@ -23,8 +20,10 @@ import signal import subprocess import sys -from threading import Event import time +from collections import namedtuple +from contextlib import contextmanager +from threading import Event # JXA scripts to call Alfred's API via the Scripting Bridge # {app} is automatically replaced with "Alfred 3" or @@ -44,13 +43,15 @@ JXA_SET_CONFIG = "Application({app}).setConfiguration({arg}, {opts});" # Delete a variable from the workflow configuration sheet/info.plist JXA_UNSET_CONFIG = "Application({app}).removeConfiguration({arg}, {opts});" +# Tell Alfred to reload a workflow from disk +JXA_RELOAD_WORKFLOW = "Application({app}).reloadWorkflow({arg});" class AcquisitionError(Exception): """Raised if a lock cannot be acquired.""" -AppInfo = namedtuple('AppInfo', ['name', 'path', 'bundleid']) +AppInfo = namedtuple("AppInfo", ["name", "path", "bundleid"]) """Information about an installed application. Returned by :func:`appinfo`. All attributes are Unicode. @@ -84,14 +85,14 @@ def jxa_app_name(): unicode: Application name or ID. """ - if os.getenv('alfred_version', '').startswith('3'): + if os.getenv("alfred_version", "").startswith("3"): # Alfred 3 - return u'Alfred 3' + return "Alfred 3" # Alfred 4+ - return u'com.runningwithcrayons.Alfred' + return "com.runningwithcrayons.Alfred" -def unicodify(s, encoding='utf-8', norm=None): +def unicodify(s, encoding="utf-8", norm=None): """Ensure string is Unicode. .. versionadded:: 1.31 @@ -108,11 +109,12 @@ def unicodify(s, encoding='utf-8', norm=None): unicode: Decoded, optionally normalised, Unicode string. """ - if not isinstance(s, unicode): - s = unicode(s, encoding) + if not isinstance(s, str): + s = str(s, encoding) if norm: from unicodedata import normalize + s = normalize(norm, s) return s @@ -136,8 +138,8 @@ def utf8ify(s): if isinstance(s, str): return s - if isinstance(s, unicode): - return s.encode('utf-8') + if isinstance(s, str): + return s.encode("utf-8") return str(s) @@ -148,20 +150,19 @@ def applescriptify(s): .. versionadded:: 1.31 Replaces ``"`` with `"& quote &"`. Use this function if you want - to insert a string into an AppleScript script: - >>> query = 'g "python" test' - >>> applescriptify(query) + + >>> applescriptify('g "python" test') 'g " & quote & "python" & quote & "test' Args: s (unicode): Unicode string to escape. Returns: - unicode: Escaped string + unicode: Escaped string. """ - return s.replace(u'"', u'" & quote & "') + return s.replace('"', '" & quote & "') def run_command(cmd, **kwargs): @@ -173,15 +174,15 @@ def run_command(cmd, **kwargs): all arguments are encoded to UTF-8 first. Args: - cmd (list): Command arguments to pass to ``check_output``. - **kwargs: Keyword arguments to pass to ``check_output``. + cmd (list): Command arguments to pass to :func:`~subprocess.check_output`. + **kwargs: Keyword arguments to pass to :func:`~subprocess.check_output`. Returns: - str: Output returned by ``check_output``. + str: Output returned by :func:`~subprocess.check_output`. """ - cmd = [utf8ify(s) for s in cmd] - return subprocess.check_output(cmd, **kwargs) + cmd = [str(s) for s in cmd] + return subprocess.check_output(cmd, **kwargs).decode() def run_applescript(script, *args, **kwargs): @@ -197,22 +198,23 @@ def run_applescript(script, *args, **kwargs): script (str, optional): Filepath of script or code to run. *args: Optional command-line arguments to pass to the script. **kwargs: Pass ``lang`` to run a language other than AppleScript. + Any other keyword arguments are passed to :func:`run_command`. Returns: str: Output of run command. """ - lang = 'AppleScript' - if 'lang' in kwargs: - lang = kwargs['lang'] - del kwargs['lang'] + lang = "AppleScript" + if "lang" in kwargs: + lang = kwargs["lang"] + del kwargs["lang"] - cmd = ['/usr/bin/osascript', '-l', lang] + cmd = ["/usr/bin/osascript", "-l", lang] if os.path.exists(script): cmd += [script] else: - cmd += ['-e', script] + cmd += ["-e", script] cmd.extend(args) @@ -234,7 +236,7 @@ def run_jxa(script, *args): str: Output of script. """ - return run_applescript(script, *args, lang='JavaScript') + return run_applescript(script, *args, lang="JavaScript") def run_trigger(name, bundleid=None, arg=None): @@ -242,8 +244,8 @@ def run_trigger(name, bundleid=None, arg=None): .. versionadded:: 1.31 - If ``bundleid`` is not specified, reads the bundle ID of the current - workflow from Alfred's environment variables. + If ``bundleid`` is not specified, the bundle ID of the calling + workflow is used. Args: name (str): Name of External Trigger to call. @@ -251,17 +253,33 @@ def run_trigger(name, bundleid=None, arg=None): arg (str, optional): Argument to pass to trigger. """ - bundleid = bundleid or os.getenv('alfred_workflow_bundleid') + bundleid = bundleid or os.getenv("alfred_workflow_bundleid") appname = jxa_app_name() - opts = {'inWorkflow': bundleid} + opts = {"inWorkflow": bundleid} if arg: - opts['withArgument'] = arg + opts["withArgument"] = arg + + script = JXA_TRIGGER.format( + app=json.dumps(appname), + arg=json.dumps(name), + opts=json.dumps(opts, sort_keys=True), + ) + + run_applescript(script, lang="JavaScript") + + +def set_theme(theme_name): + """Change Alfred's theme. - script = JXA_TRIGGER.format(app=json.dumps(appname), - arg=json.dumps(name), - opts=json.dumps(opts, sort_keys=True)) + .. versionadded:: 1.39.0 - run_applescript(script, lang='JavaScript') + Args: + theme_name (unicode): Name of theme Alfred should use. + + """ + appname = jxa_app_name() + script = JXA_SET_THEME.format(app=json.dumps(appname), arg=json.dumps(theme_name)) + run_applescript(script, lang="JavaScript") def set_config(name, value, bundleid=None, exportable=False): @@ -269,6 +287,9 @@ def set_config(name, value, bundleid=None, exportable=False): .. versionadded:: 1.33 + If ``bundleid`` is not specified, the bundle ID of the calling + workflow is used. + Args: name (str): Name of variable to set. value (str): Value to set variable to. @@ -277,19 +298,17 @@ def set_config(name, value, bundleid=None, exportable=False): as exportable (Don't Export checkbox). """ - bundleid = bundleid or os.getenv('alfred_workflow_bundleid') + bundleid = bundleid or os.getenv("alfred_workflow_bundleid") appname = jxa_app_name() - opts = { - 'toValue': value, - 'inWorkflow': bundleid, - 'exportable': exportable, - } + opts = {"toValue": value, "inWorkflow": bundleid, "exportable": exportable} - script = JXA_SET_CONFIG.format(app=json.dumps(appname), - arg=json.dumps(name), - opts=json.dumps(opts, sort_keys=True)) + script = JXA_SET_CONFIG.format( + app=json.dumps(appname), + arg=json.dumps(name), + opts=json.dumps(opts, sort_keys=True), + ) - run_applescript(script, lang='JavaScript') + run_applescript(script, lang="JavaScript") def unset_config(name, bundleid=None): @@ -297,20 +316,91 @@ def unset_config(name, bundleid=None): .. versionadded:: 1.33 + If ``bundleid`` is not specified, the bundle ID of the calling + workflow is used. + Args: name (str): Name of variable to delete. bundleid (str, optional): Bundle ID of workflow variable belongs to. """ - bundleid = bundleid or os.getenv('alfred_workflow_bundleid') + bundleid = bundleid or os.getenv("alfred_workflow_bundleid") appname = jxa_app_name() - opts = {'inWorkflow': bundleid} + opts = {"inWorkflow": bundleid} + + script = JXA_UNSET_CONFIG.format( + app=json.dumps(appname), + arg=json.dumps(name), + opts=json.dumps(opts, sort_keys=True), + ) + + run_applescript(script, lang="JavaScript") + + +def search_in_alfred(query=None): + """Open Alfred with given search query. + + .. versionadded:: 1.39.0 + + Omit ``query`` to simply open Alfred's main window. + + Args: + query (unicode, optional): Search query. - script = JXA_UNSET_CONFIG.format(app=json.dumps(appname), - arg=json.dumps(name), - opts=json.dumps(opts, sort_keys=True)) + """ + query = query or "" + appname = jxa_app_name() + script = JXA_SEARCH.format(app=json.dumps(appname), arg=json.dumps(query)) + run_applescript(script, lang="JavaScript") + + +def browse_in_alfred(path): + """Open Alfred's filesystem navigation mode at ``path``. + + .. versionadded:: 1.39.0 + + Args: + path (unicode): File or directory path. + + """ + appname = jxa_app_name() + script = JXA_BROWSE.format(app=json.dumps(appname), arg=json.dumps(path)) + run_applescript(script, lang="JavaScript") + + +def action_in_alfred(paths): + """Action the give filepaths in Alfred. + + .. versionadded:: 1.39.0 + + Args: + paths (list): Unicode paths to files/directories to action. + + """ + appname = jxa_app_name() + script = JXA_ACTION.format(app=json.dumps(appname), arg=json.dumps(paths)) + run_applescript(script, lang="JavaScript") + + +def reload_workflow(bundleid=None): + """Tell Alfred to reload a workflow from disk. + + .. versionadded:: 1.39.0 + + If ``bundleid`` is not specified, the bundle ID of the calling + workflow is used. + + Args: + bundleid (unicode, optional): Bundle ID of workflow to reload. + + """ + bundleid = bundleid or os.getenv("alfred_workflow_bundleid") + appname = jxa_app_name() + script = JXA_RELOAD_WORKFLOW.format( + app=json.dumps(appname), arg=json.dumps(bundleid) + ) - run_applescript(script, lang='JavaScript') + run_applescript(script, lang="JavaScript") def appinfo(name): @@ -325,24 +415,30 @@ def appinfo(name): AppInfo: :class:`AppInfo` tuple or ``None`` if app isn't found. """ - cmd = ['mdfind', '-onlyin', '/Applications', - '-onlyin', os.path.expanduser('~/Applications'), - '(kMDItemContentTypeTree == com.apple.application &&' - '(kMDItemDisplayName == "{0}" || kMDItemFSName == "{0}.app"))' - .format(name)] + cmd = [ + "mdfind", + "-onlyin", + "/Applications", + "-onlyin", + "/System/Applications", + "-onlyin", + os.path.expanduser("~/Applications"), + "(kMDItemContentTypeTree == com.apple.application &&" + '(kMDItemDisplayName == "{0}" || kMDItemFSName == "{0}.app"))'.format(name), + ] output = run_command(cmd).strip() if not output: return None - path = output.split('\n')[0] + path = output.split("\n")[0] - cmd = ['mdls', '-raw', '-name', 'kMDItemCFBundleIdentifier', path] + cmd = ["mdls", "-raw", "-name", "kMDItemCFBundleIdentifier", path] bid = run_command(cmd).strip() if not bid: # pragma: no cover return None - return AppInfo(unicodify(name), unicodify(path), unicodify(bid)) + return AppInfo(name, path, bid) @contextmanager @@ -360,7 +456,7 @@ def atomic_writer(fpath, mode): :type mode: string """ - suffix = '.{}.tmp'.format(os.getpid()) + suffix = ".{}.tmp".format(os.getpid()) temppath = fpath + suffix with open(temppath, mode) as fp: try: @@ -369,7 +465,7 @@ def atomic_writer(fpath, mode): finally: try: os.remove(temppath) - except (OSError, IOError): + except OSError: pass @@ -383,7 +479,7 @@ class LockFile(object): >>> path = '/path/to/file' >>> with LockFile(path): - >>> with open(path, 'wb') as fp: + >>> with open(path, 'w') as fp: >>> fp.write(data) Args: @@ -404,7 +500,7 @@ class LockFile(object): def __init__(self, protected_path, timeout=0.0, delay=0.05): """Create new :class:`LockFile` object.""" - self.lockfile = protected_path + '.lock' + self.lockfile = protected_path + ".lock" self._lockfile = None self.timeout = timeout self.delay = delay @@ -433,7 +529,7 @@ def acquire(self, blocking=True): while True: # Raise error if we've been waiting too long to acquire the lock if self.timeout and (time.time() - start) >= self.timeout: - raise AcquisitionError('lock acquisition timed out') + raise AcquisitionError("lock acquisition timed out") # If already locked, wait then try again if self.locked: @@ -442,7 +538,7 @@ def acquire(self, blocking=True): # Create in append mode so we don't lose any contents if self._lockfile is None: - self._lockfile = open(self.lockfile, 'a') + self._lockfile = open(self.lockfile, "a") # Try to acquire the lock try: @@ -476,10 +572,10 @@ def release(self): self._lockfile = None try: os.unlink(self.lockfile) - except (IOError, OSError): # pragma: no cover + except OSError: # pragma: no cover pass - return True + return True # noqa: B012 def __enter__(self): """Acquire lock.""" @@ -516,7 +612,7 @@ class uninterruptible(object): """ - def __init__(self, func, class_name=''): + def __init__(self, func, class_name=""): """Decorate `func`.""" self.func = func functools.update_wrapper(self, func) @@ -548,5 +644,4 @@ def __call__(self, *args, **kwargs): def __get__(self, obj=None, klass=None): """Decorator API.""" - return self.__class__(self.func.__get__(obj, klass), - klass.__name__) + return self.__class__(self.func.__get__(obj, klass), klass.__name__) diff --git a/workflow/version b/workflow/version index a537514..ebc91b4 100644 --- a/workflow/version +++ b/workflow/version @@ -1 +1 @@ -1.37.1 \ No newline at end of file +1.40.0 \ No newline at end of file diff --git a/workflow/web.py b/workflow/web.py deleted file mode 100644 index 0781911..0000000 --- a/workflow/web.py +++ /dev/null @@ -1,685 +0,0 @@ -# encoding: utf-8 -# -# Copyright (c) 2014 Dean Jackson -# -# MIT Licence. See http://opensource.org/licenses/MIT -# -# Created on 2014-02-15 -# - -"""Lightweight HTTP library with a requests-like interface.""" - -import codecs -import json -import mimetypes -import os -import random -import re -import socket -import string -import unicodedata -import urllib -import urllib2 -import urlparse -import zlib - - -USER_AGENT = u'Alfred-Workflow/1.36 (+http://www.deanishe.net/alfred-workflow)' - -# Valid characters for multipart form data boundaries -BOUNDARY_CHARS = string.digits + string.ascii_letters - -# HTTP response codes -RESPONSES = { - 100: 'Continue', - 101: 'Switching Protocols', - 200: 'OK', - 201: 'Created', - 202: 'Accepted', - 203: 'Non-Authoritative Information', - 204: 'No Content', - 205: 'Reset Content', - 206: 'Partial Content', - 300: 'Multiple Choices', - 301: 'Moved Permanently', - 302: 'Found', - 303: 'See Other', - 304: 'Not Modified', - 305: 'Use Proxy', - 307: 'Temporary Redirect', - 400: 'Bad Request', - 401: 'Unauthorized', - 402: 'Payment Required', - 403: 'Forbidden', - 404: 'Not Found', - 405: 'Method Not Allowed', - 406: 'Not Acceptable', - 407: 'Proxy Authentication Required', - 408: 'Request Timeout', - 409: 'Conflict', - 410: 'Gone', - 411: 'Length Required', - 412: 'Precondition Failed', - 413: 'Request Entity Too Large', - 414: 'Request-URI Too Long', - 415: 'Unsupported Media Type', - 416: 'Requested Range Not Satisfiable', - 417: 'Expectation Failed', - 500: 'Internal Server Error', - 501: 'Not Implemented', - 502: 'Bad Gateway', - 503: 'Service Unavailable', - 504: 'Gateway Timeout', - 505: 'HTTP Version Not Supported' -} - - -def str_dict(dic): - """Convert keys and values in ``dic`` into UTF-8-encoded :class:`str`. - - :param dic: Mapping of Unicode strings - :type dic: dict - :returns: Dictionary containing only UTF-8 strings - :rtype: dict - - """ - if isinstance(dic, CaseInsensitiveDictionary): - dic2 = CaseInsensitiveDictionary() - else: - dic2 = {} - for k, v in dic.items(): - if isinstance(k, unicode): - k = k.encode('utf-8') - if isinstance(v, unicode): - v = v.encode('utf-8') - dic2[k] = v - return dic2 - - -class NoRedirectHandler(urllib2.HTTPRedirectHandler): - """Prevent redirections.""" - - def redirect_request(self, *args): - """Ignore redirect.""" - return None - - -# Adapted from https://gist.github.com/babakness/3901174 -class CaseInsensitiveDictionary(dict): - """Dictionary with caseless key search. - - Enables case insensitive searching while preserving case sensitivity - when keys are listed, ie, via keys() or items() methods. - - Works by storing a lowercase version of the key as the new key and - stores the original key-value pair as the key's value - (values become dictionaries). - - """ - - def __init__(self, initval=None): - """Create new case-insensitive dictionary.""" - if isinstance(initval, dict): - for key, value in initval.iteritems(): - self.__setitem__(key, value) - - elif isinstance(initval, list): - for (key, value) in initval: - self.__setitem__(key, value) - - def __contains__(self, key): - return dict.__contains__(self, key.lower()) - - def __getitem__(self, key): - return dict.__getitem__(self, key.lower())['val'] - - def __setitem__(self, key, value): - return dict.__setitem__(self, key.lower(), {'key': key, 'val': value}) - - def get(self, key, default=None): - """Return value for case-insensitive key or default.""" - try: - v = dict.__getitem__(self, key.lower()) - except KeyError: - return default - else: - return v['val'] - - def update(self, other): - """Update values from other ``dict``.""" - for k, v in other.items(): - self[k] = v - - def items(self): - """Return ``(key, value)`` pairs.""" - return [(v['key'], v['val']) for v in dict.itervalues(self)] - - def keys(self): - """Return original keys.""" - return [v['key'] for v in dict.itervalues(self)] - - def values(self): - """Return all values.""" - return [v['val'] for v in dict.itervalues(self)] - - def iteritems(self): - """Iterate over ``(key, value)`` pairs.""" - for v in dict.itervalues(self): - yield v['key'], v['val'] - - def iterkeys(self): - """Iterate over original keys.""" - for v in dict.itervalues(self): - yield v['key'] - - def itervalues(self): - """Interate over values.""" - for v in dict.itervalues(self): - yield v['val'] - - -class Response(object): - """ - Returned by :func:`request` / :func:`get` / :func:`post` functions. - - Simplified version of the ``Response`` object in the ``requests`` library. - - >>> r = request('http://www.google.com') - >>> r.status_code - 200 - >>> r.encoding - ISO-8859-1 - >>> r.content # bytes - ... - >>> r.text # unicode, decoded according to charset in HTTP header/meta tag - u' ...' - >>> r.json() # content parsed as JSON - - """ - - def __init__(self, request, stream=False): - """Call `request` with :mod:`urllib2` and process results. - - :param request: :class:`urllib2.Request` instance - :param stream: Whether to stream response or retrieve it all at once - :type stream: bool - - """ - self.request = request - self._stream = stream - self.url = None - self.raw = None - self._encoding = None - self.error = None - self.status_code = None - self.reason = None - self.headers = CaseInsensitiveDictionary() - self._content = None - self._content_loaded = False - self._gzipped = False - - # Execute query - try: - self.raw = urllib2.urlopen(request) - except urllib2.HTTPError as err: - self.error = err - try: - self.url = err.geturl() - # sometimes (e.g. when authentication fails) - # urllib can't get a URL from an HTTPError - # This behaviour changes across Python versions, - # so no test cover (it isn't important). - except AttributeError: # pragma: no cover - pass - self.status_code = err.code - else: - self.status_code = self.raw.getcode() - self.url = self.raw.geturl() - self.reason = RESPONSES.get(self.status_code) - - # Parse additional info if request succeeded - if not self.error: - headers = self.raw.info() - self.transfer_encoding = headers.getencoding() - self.mimetype = headers.gettype() - for key in headers.keys(): - self.headers[key.lower()] = headers.get(key) - - # Is content gzipped? - # Transfer-Encoding appears to not be used in the wild - # (contrary to the HTTP standard), but no harm in testing - # for it - if 'gzip' in headers.get('content-encoding', '') or \ - 'gzip' in headers.get('transfer-encoding', ''): - self._gzipped = True - - @property - def stream(self): - """Whether response is streamed. - - Returns: - bool: `True` if response is streamed. - - """ - return self._stream - - @stream.setter - def stream(self, value): - if self._content_loaded: - raise RuntimeError("`content` has already been read from " - "this Response.") - - self._stream = value - - def json(self): - """Decode response contents as JSON. - - :returns: object decoded from JSON - :rtype: list, dict or unicode - - """ - return json.loads(self.content, self.encoding or 'utf-8') - - @property - def encoding(self): - """Text encoding of document or ``None``. - - :returns: Text encoding if found. - :rtype: str or ``None`` - - """ - if not self._encoding: - self._encoding = self._get_encoding() - - return self._encoding - - @property - def content(self): - """Raw content of response (i.e. bytes). - - :returns: Body of HTTP response - :rtype: str - - """ - if not self._content: - - # Decompress gzipped content - if self._gzipped: - decoder = zlib.decompressobj(16 + zlib.MAX_WBITS) - self._content = decoder.decompress(self.raw.read()) - - else: - self._content = self.raw.read() - - self._content_loaded = True - - return self._content - - @property - def text(self): - """Unicode-decoded content of response body. - - If no encoding can be determined from HTTP headers or the content - itself, the encoded response body will be returned instead. - - :returns: Body of HTTP response - :rtype: unicode or str - - """ - if self.encoding: - return unicodedata.normalize('NFC', unicode(self.content, - self.encoding)) - return self.content - - def iter_content(self, chunk_size=4096, decode_unicode=False): - """Iterate over response data. - - .. versionadded:: 1.6 - - :param chunk_size: Number of bytes to read into memory - :type chunk_size: int - :param decode_unicode: Decode to Unicode using detected encoding - :type decode_unicode: bool - :returns: iterator - - """ - if not self.stream: - raise RuntimeError("You cannot call `iter_content` on a " - "Response unless you passed `stream=True`" - " to `get()`/`post()`/`request()`.") - - if self._content_loaded: - raise RuntimeError( - "`content` has already been read from this Response.") - - def decode_stream(iterator, r): - dec = codecs.getincrementaldecoder(r.encoding)(errors='replace') - - for chunk in iterator: - data = dec.decode(chunk) - if data: - yield data - - data = dec.decode(b'', final=True) - if data: # pragma: no cover - yield data - - def generate(): - if self._gzipped: - decoder = zlib.decompressobj(16 + zlib.MAX_WBITS) - - while True: - chunk = self.raw.read(chunk_size) - if not chunk: - break - - if self._gzipped: - chunk = decoder.decompress(chunk) - - yield chunk - - chunks = generate() - - if decode_unicode and self.encoding: - chunks = decode_stream(chunks, self) - - return chunks - - def save_to_path(self, filepath): - """Save retrieved data to file at ``filepath``. - - .. versionadded: 1.9.6 - - :param filepath: Path to save retrieved data. - - """ - filepath = os.path.abspath(filepath) - dirname = os.path.dirname(filepath) - if not os.path.exists(dirname): - os.makedirs(dirname) - - self.stream = True - - with open(filepath, 'wb') as fileobj: - for data in self.iter_content(): - fileobj.write(data) - - def raise_for_status(self): - """Raise stored error if one occurred. - - error will be instance of :class:`urllib2.HTTPError` - """ - if self.error is not None: - raise self.error - return - - def _get_encoding(self): - """Get encoding from HTTP headers or content. - - :returns: encoding or `None` - :rtype: unicode or ``None`` - - """ - headers = self.raw.info() - encoding = None - - if headers.getparam('charset'): - encoding = headers.getparam('charset') - - # HTTP Content-Type header - for param in headers.getplist(): - if param.startswith('charset='): - encoding = param[8:] - break - - if not self.stream: # Try sniffing response content - # Encoding declared in document should override HTTP headers - if self.mimetype == 'text/html': # sniff HTML headers - m = re.search(r"""""", - self.content) - if m: - encoding = m.group(1) - - elif ((self.mimetype.startswith('application/') - or self.mimetype.startswith('text/')) - and 'xml' in self.mimetype): - m = re.search(r"""]*\?>""", - self.content) - if m: - encoding = m.group(1) - - # Format defaults - if self.mimetype == 'application/json' and not encoding: - # The default encoding for JSON - encoding = 'utf-8' - - elif self.mimetype == 'application/xml' and not encoding: - # The default for 'application/xml' - encoding = 'utf-8' - - if encoding: - encoding = encoding.lower() - - return encoding - - -def request(method, url, params=None, data=None, headers=None, cookies=None, - files=None, auth=None, timeout=60, allow_redirects=False, - stream=False): - """Initiate an HTTP(S) request. Returns :class:`Response` object. - - :param method: 'GET' or 'POST' - :type method: unicode - :param url: URL to open - :type url: unicode - :param params: mapping of URL parameters - :type params: dict - :param data: mapping of form data ``{'field_name': 'value'}`` or - :class:`str` - :type data: dict or str - :param headers: HTTP headers - :type headers: dict - :param cookies: cookies to send to server - :type cookies: dict - :param files: files to upload (see below). - :type files: dict - :param auth: username, password - :type auth: tuple - :param timeout: connection timeout limit in seconds - :type timeout: int - :param allow_redirects: follow redirections - :type allow_redirects: bool - :param stream: Stream content instead of fetching it all at once. - :type stream: bool - :returns: Response object - :rtype: :class:`Response` - - - The ``files`` argument is a dictionary:: - - {'fieldname' : { 'filename': 'blah.txt', - 'content': '', - 'mimetype': 'text/plain'} - } - - * ``fieldname`` is the name of the field in the HTML form. - * ``mimetype`` is optional. If not provided, :mod:`mimetypes` will - be used to guess the mimetype, or ``application/octet-stream`` - will be used. - - """ - # TODO: cookies - socket.setdefaulttimeout(timeout) - - # Default handlers - openers = [] - - if not allow_redirects: - openers.append(NoRedirectHandler()) - - if auth is not None: # Add authorisation handler - username, password = auth - password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() - password_manager.add_password(None, url, username, password) - auth_manager = urllib2.HTTPBasicAuthHandler(password_manager) - openers.append(auth_manager) - - # Install our custom chain of openers - opener = urllib2.build_opener(*openers) - urllib2.install_opener(opener) - - if not headers: - headers = CaseInsensitiveDictionary() - else: - headers = CaseInsensitiveDictionary(headers) - - if 'user-agent' not in headers: - headers['user-agent'] = USER_AGENT - - # Accept gzip-encoded content - encodings = [s.strip() for s in - headers.get('accept-encoding', '').split(',')] - if 'gzip' not in encodings: - encodings.append('gzip') - - headers['accept-encoding'] = ', '.join(encodings) - - # Force POST by providing an empty data string - if method == 'POST' and not data: - data = '' - - if files: - if not data: - data = {} - new_headers, data = encode_multipart_formdata(data, files) - headers.update(new_headers) - elif data and isinstance(data, dict): - data = urllib.urlencode(str_dict(data)) - - # Make sure everything is encoded text - headers = str_dict(headers) - - if isinstance(url, unicode): - url = url.encode('utf-8') - - if params: # GET args (POST args are handled in encode_multipart_formdata) - - scheme, netloc, path, query, fragment = urlparse.urlsplit(url) - - if query: # Combine query string and `params` - url_params = urlparse.parse_qs(query) - # `params` take precedence over URL query string - url_params.update(params) - params = url_params - - query = urllib.urlencode(str_dict(params), doseq=True) - url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) - - req = urllib2.Request(url, data, headers) - return Response(req, stream) - - -def get(url, params=None, headers=None, cookies=None, auth=None, - timeout=60, allow_redirects=True, stream=False): - """Initiate a GET request. Arguments as for :func:`request`. - - :returns: :class:`Response` instance - - """ - return request('GET', url, params, headers=headers, cookies=cookies, - auth=auth, timeout=timeout, allow_redirects=allow_redirects, - stream=stream) - - -def post(url, params=None, data=None, headers=None, cookies=None, files=None, - auth=None, timeout=60, allow_redirects=False, stream=False): - """Initiate a POST request. Arguments as for :func:`request`. - - :returns: :class:`Response` instance - - """ - return request('POST', url, params, data, headers, cookies, files, auth, - timeout, allow_redirects, stream) - - -def encode_multipart_formdata(fields, files): - """Encode form data (``fields``) and ``files`` for POST request. - - :param fields: mapping of ``{name : value}`` pairs for normal form fields. - :type fields: dict - :param files: dictionary of fieldnames/files elements for file data. - See below for details. - :type files: dict of :class:`dict` - :returns: ``(headers, body)`` ``headers`` is a - :class:`dict` of HTTP headers - :rtype: 2-tuple ``(dict, str)`` - - The ``files`` argument is a dictionary:: - - {'fieldname' : { 'filename': 'blah.txt', - 'content': '', - 'mimetype': 'text/plain'} - } - - - ``fieldname`` is the name of the field in the HTML form. - - ``mimetype`` is optional. If not provided, :mod:`mimetypes` will - be used to guess the mimetype, or ``application/octet-stream`` - will be used. - - """ - def get_content_type(filename): - """Return or guess mimetype of ``filename``. - - :param filename: filename of file - :type filename: unicode/str - :returns: mime-type, e.g. ``text/html`` - :rtype: str - - """ - return mimetypes.guess_type(filename)[0] or 'application/octet-stream' - - boundary = '-----' + ''.join(random.choice(BOUNDARY_CHARS) - for i in range(30)) - CRLF = '\r\n' - output = [] - - # Normal form fields - for (name, value) in fields.items(): - if isinstance(name, unicode): - name = name.encode('utf-8') - if isinstance(value, unicode): - value = value.encode('utf-8') - output.append('--' + boundary) - output.append('Content-Disposition: form-data; name="%s"' % name) - output.append('') - output.append(value) - - # Files to upload - for name, d in files.items(): - filename = d[u'filename'] - content = d[u'content'] - if u'mimetype' in d: - mimetype = d[u'mimetype'] - else: - mimetype = get_content_type(filename) - if isinstance(name, unicode): - name = name.encode('utf-8') - if isinstance(filename, unicode): - filename = filename.encode('utf-8') - if isinstance(mimetype, unicode): - mimetype = mimetype.encode('utf-8') - output.append('--' + boundary) - output.append('Content-Disposition: form-data; ' - 'name="%s"; filename="%s"' % (name, filename)) - output.append('Content-Type: %s' % mimetype) - output.append('') - output.append(content) - - output.append('--' + boundary + '--') - output.append('') - body = CRLF.join(output) - headers = { - 'Content-Type': 'multipart/form-data; boundary=%s' % boundary, - 'Content-Length': str(len(body)), - } - return (headers, body) diff --git a/workflow/workflow.py b/workflow/workflow.py index 2a057b0..22a8da0 100644 --- a/workflow/workflow.py +++ b/workflow/workflow.py @@ -19,11 +19,8 @@ """ -from __future__ import print_function, unicode_literals import binascii -import cPickle -from copy import deepcopy import json import logging import logging.handlers @@ -37,6 +34,9 @@ import sys import time import unicodedata +from contextlib import contextmanager +from copy import deepcopy +from typing import Optional try: import xml.etree.cElementTree as ET @@ -44,12 +44,10 @@ import xml.etree.ElementTree as ET # imported to maintain API -from util import AcquisitionError # noqa: F401 -from util import ( - atomic_writer, - LockFile, - uninterruptible, -) +from workflow.util import AcquisitionError # noqa: F401 +from workflow.util import LockFile, atomic_writer, uninterruptible + +assert sys.version_info[0] == 3 #: Sentinel for properties that haven't been set yet (that might #: correctly have the value ``None``) @@ -68,32 +66,32 @@ # The system icons are all in this directory. There are many more than # are listed here -ICON_ROOT = '/System/Library/CoreServices/CoreTypes.bundle/Contents/Resources' +ICON_ROOT = "/System/Library/CoreServices/CoreTypes.bundle/Contents/Resources" -ICON_ACCOUNT = os.path.join(ICON_ROOT, 'Accounts.icns') -ICON_BURN = os.path.join(ICON_ROOT, 'BurningIcon.icns') -ICON_CLOCK = os.path.join(ICON_ROOT, 'Clock.icns') -ICON_COLOR = os.path.join(ICON_ROOT, 'ProfileBackgroundColor.icns') +ICON_ACCOUNT = os.path.join(ICON_ROOT, "Accounts.icns") +ICON_BURN = os.path.join(ICON_ROOT, "BurningIcon.icns") +ICON_CLOCK = os.path.join(ICON_ROOT, "Clock.icns") +ICON_COLOR = os.path.join(ICON_ROOT, "ProfileBackgroundColor.icns") ICON_COLOUR = ICON_COLOR # Queen's English, if you please -ICON_EJECT = os.path.join(ICON_ROOT, 'EjectMediaIcon.icns') +ICON_EJECT = os.path.join(ICON_ROOT, "EjectMediaIcon.icns") # Shown when a workflow throws an error -ICON_ERROR = os.path.join(ICON_ROOT, 'AlertStopIcon.icns') -ICON_FAVORITE = os.path.join(ICON_ROOT, 'ToolbarFavoritesIcon.icns') +ICON_ERROR = os.path.join(ICON_ROOT, "AlertStopIcon.icns") +ICON_FAVORITE = os.path.join(ICON_ROOT, "ToolbarFavoritesIcon.icns") ICON_FAVOURITE = ICON_FAVORITE -ICON_GROUP = os.path.join(ICON_ROOT, 'GroupIcon.icns') -ICON_HELP = os.path.join(ICON_ROOT, 'HelpIcon.icns') -ICON_HOME = os.path.join(ICON_ROOT, 'HomeFolderIcon.icns') -ICON_INFO = os.path.join(ICON_ROOT, 'ToolbarInfo.icns') -ICON_NETWORK = os.path.join(ICON_ROOT, 'GenericNetworkIcon.icns') -ICON_NOTE = os.path.join(ICON_ROOT, 'AlertNoteIcon.icns') -ICON_SETTINGS = os.path.join(ICON_ROOT, 'ToolbarAdvanced.icns') -ICON_SWIRL = os.path.join(ICON_ROOT, 'ErasingIcon.icns') -ICON_SWITCH = os.path.join(ICON_ROOT, 'General.icns') -ICON_SYNC = os.path.join(ICON_ROOT, 'Sync.icns') -ICON_TRASH = os.path.join(ICON_ROOT, 'TrashIcon.icns') -ICON_USER = os.path.join(ICON_ROOT, 'UserIcon.icns') -ICON_WARNING = os.path.join(ICON_ROOT, 'AlertCautionIcon.icns') -ICON_WEB = os.path.join(ICON_ROOT, 'BookmarkIcon.icns') +ICON_GROUP = os.path.join(ICON_ROOT, "GroupIcon.icns") +ICON_HELP = os.path.join(ICON_ROOT, "HelpIcon.icns") +ICON_HOME = os.path.join(ICON_ROOT, "HomeFolderIcon.icns") +ICON_INFO = os.path.join(ICON_ROOT, "ToolbarInfo.icns") +ICON_NETWORK = os.path.join(ICON_ROOT, "GenericNetworkIcon.icns") +ICON_NOTE = os.path.join(ICON_ROOT, "AlertNoteIcon.icns") +ICON_SETTINGS = os.path.join(ICON_ROOT, "ToolbarAdvanced.icns") +ICON_SWIRL = os.path.join(ICON_ROOT, "ErasingIcon.icns") +ICON_SWITCH = os.path.join(ICON_ROOT, "General.icns") +ICON_SYNC = os.path.join(ICON_ROOT, "Sync.icns") +ICON_TRASH = os.path.join(ICON_ROOT, "TrashIcon.icns") +ICON_USER = os.path.join(ICON_ROOT, "UserIcon.icns") +ICON_WARNING = os.path.join(ICON_ROOT, "AlertCautionIcon.icns") +ICON_WEB = os.path.join(ICON_ROOT, "BookmarkIcon.icns") #################################################################### # non-ASCII to ASCII diacritic folding. @@ -101,241 +99,241 @@ #################################################################### ASCII_REPLACEMENTS = { - 'À': 'A', - 'Á': 'A', - 'Â': 'A', - 'Ã': 'A', - 'Ä': 'A', - 'Å': 'A', - 'Æ': 'AE', - 'Ç': 'C', - 'È': 'E', - 'É': 'E', - 'Ê': 'E', - 'Ë': 'E', - 'Ì': 'I', - 'Í': 'I', - 'Î': 'I', - 'Ï': 'I', - 'Ð': 'D', - 'Ñ': 'N', - 'Ò': 'O', - 'Ó': 'O', - 'Ô': 'O', - 'Õ': 'O', - 'Ö': 'O', - 'Ø': 'O', - 'Ù': 'U', - 'Ú': 'U', - 'Û': 'U', - 'Ü': 'U', - 'Ý': 'Y', - 'Þ': 'Th', - 'ß': 'ss', - 'à': 'a', - 'á': 'a', - 'â': 'a', - 'ã': 'a', - 'ä': 'a', - 'å': 'a', - 'æ': 'ae', - 'ç': 'c', - 'è': 'e', - 'é': 'e', - 'ê': 'e', - 'ë': 'e', - 'ì': 'i', - 'í': 'i', - 'î': 'i', - 'ï': 'i', - 'ð': 'd', - 'ñ': 'n', - 'ò': 'o', - 'ó': 'o', - 'ô': 'o', - 'õ': 'o', - 'ö': 'o', - 'ø': 'o', - 'ù': 'u', - 'ú': 'u', - 'û': 'u', - 'ü': 'u', - 'ý': 'y', - 'þ': 'th', - 'ÿ': 'y', - 'Ł': 'L', - 'ł': 'l', - 'Ń': 'N', - 'ń': 'n', - 'Ņ': 'N', - 'ņ': 'n', - 'Ň': 'N', - 'ň': 'n', - 'Ŋ': 'ng', - 'ŋ': 'NG', - 'Ō': 'O', - 'ō': 'o', - 'Ŏ': 'O', - 'ŏ': 'o', - 'Ő': 'O', - 'ő': 'o', - 'Œ': 'OE', - 'œ': 'oe', - 'Ŕ': 'R', - 'ŕ': 'r', - 'Ŗ': 'R', - 'ŗ': 'r', - 'Ř': 'R', - 'ř': 'r', - 'Ś': 'S', - 'ś': 's', - 'Ŝ': 'S', - 'ŝ': 's', - 'Ş': 'S', - 'ş': 's', - 'Š': 'S', - 'š': 's', - 'Ţ': 'T', - 'ţ': 't', - 'Ť': 'T', - 'ť': 't', - 'Ŧ': 'T', - 'ŧ': 't', - 'Ũ': 'U', - 'ũ': 'u', - 'Ū': 'U', - 'ū': 'u', - 'Ŭ': 'U', - 'ŭ': 'u', - 'Ů': 'U', - 'ů': 'u', - 'Ű': 'U', - 'ű': 'u', - 'Ŵ': 'W', - 'ŵ': 'w', - 'Ŷ': 'Y', - 'ŷ': 'y', - 'Ÿ': 'Y', - 'Ź': 'Z', - 'ź': 'z', - 'Ż': 'Z', - 'ż': 'z', - 'Ž': 'Z', - 'ž': 'z', - 'ſ': 's', - 'Α': 'A', - 'Β': 'B', - 'Γ': 'G', - 'Δ': 'D', - 'Ε': 'E', - 'Ζ': 'Z', - 'Η': 'E', - 'Θ': 'Th', - 'Ι': 'I', - 'Κ': 'K', - 'Λ': 'L', - 'Μ': 'M', - 'Ν': 'N', - 'Ξ': 'Ks', - 'Ο': 'O', - 'Π': 'P', - 'Ρ': 'R', - 'Σ': 'S', - 'Τ': 'T', - 'Υ': 'U', - 'Φ': 'Ph', - 'Χ': 'Kh', - 'Ψ': 'Ps', - 'Ω': 'O', - 'α': 'a', - 'β': 'b', - 'γ': 'g', - 'δ': 'd', - 'ε': 'e', - 'ζ': 'z', - 'η': 'e', - 'θ': 'th', - 'ι': 'i', - 'κ': 'k', - 'λ': 'l', - 'μ': 'm', - 'ν': 'n', - 'ξ': 'x', - 'ο': 'o', - 'π': 'p', - 'ρ': 'r', - 'ς': 's', - 'σ': 's', - 'τ': 't', - 'υ': 'u', - 'φ': 'ph', - 'χ': 'kh', - 'ψ': 'ps', - 'ω': 'o', - 'А': 'A', - 'Б': 'B', - 'В': 'V', - 'Г': 'G', - 'Д': 'D', - 'Е': 'E', - 'Ж': 'Zh', - 'З': 'Z', - 'И': 'I', - 'Й': 'I', - 'К': 'K', - 'Л': 'L', - 'М': 'M', - 'Н': 'N', - 'О': 'O', - 'П': 'P', - 'Р': 'R', - 'С': 'S', - 'Т': 'T', - 'У': 'U', - 'Ф': 'F', - 'Х': 'Kh', - 'Ц': 'Ts', - 'Ч': 'Ch', - 'Ш': 'Sh', - 'Щ': 'Shch', - 'Ъ': "'", - 'Ы': 'Y', - 'Ь': "'", - 'Э': 'E', - 'Ю': 'Iu', - 'Я': 'Ia', - 'а': 'a', - 'б': 'b', - 'в': 'v', - 'г': 'g', - 'д': 'd', - 'е': 'e', - 'ж': 'zh', - 'з': 'z', - 'и': 'i', - 'й': 'i', - 'к': 'k', - 'л': 'l', - 'м': 'm', - 'н': 'n', - 'о': 'o', - 'п': 'p', - 'р': 'r', - 'с': 's', - 'т': 't', - 'у': 'u', - 'ф': 'f', - 'х': 'kh', - 'ц': 'ts', - 'ч': 'ch', - 'ш': 'sh', - 'щ': 'shch', - 'ъ': "'", - 'ы': 'y', - 'ь': "'", - 'э': 'e', - 'ю': 'iu', - 'я': 'ia', + "À": "A", + "Á": "A", + "Â": "A", + "Ã": "A", + "Ä": "A", + "Å": "A", + "Æ": "AE", + "Ç": "C", + "È": "E", + "É": "E", + "Ê": "E", + "Ë": "E", + "Ì": "I", + "Í": "I", + "Î": "I", + "Ï": "I", + "Ð": "D", + "Ñ": "N", + "Ò": "O", + "Ó": "O", + "Ô": "O", + "Õ": "O", + "Ö": "O", + "Ø": "O", + "Ù": "U", + "Ú": "U", + "Û": "U", + "Ü": "U", + "Ý": "Y", + "Þ": "Th", + "ß": "ss", + "à": "a", + "á": "a", + "â": "a", + "ã": "a", + "ä": "a", + "å": "a", + "æ": "ae", + "ç": "c", + "è": "e", + "é": "e", + "ê": "e", + "ë": "e", + "ì": "i", + "í": "i", + "î": "i", + "ï": "i", + "ð": "d", + "ñ": "n", + "ò": "o", + "ó": "o", + "ô": "o", + "õ": "o", + "ö": "o", + "ø": "o", + "ù": "u", + "ú": "u", + "û": "u", + "ü": "u", + "ý": "y", + "þ": "th", + "ÿ": "y", + "Ł": "L", + "ł": "l", + "Ń": "N", + "ń": "n", + "Ņ": "N", + "ņ": "n", + "Ň": "N", + "ň": "n", + "Ŋ": "ng", + "ŋ": "NG", + "Ō": "O", + "ō": "o", + "Ŏ": "O", + "ŏ": "o", + "Ő": "O", + "ő": "o", + "Œ": "OE", + "œ": "oe", + "Ŕ": "R", + "ŕ": "r", + "Ŗ": "R", + "ŗ": "r", + "Ř": "R", + "ř": "r", + "Ś": "S", + "ś": "s", + "Ŝ": "S", + "ŝ": "s", + "Ş": "S", + "ş": "s", + "Š": "S", + "š": "s", + "Ţ": "T", + "ţ": "t", + "Ť": "T", + "ť": "t", + "Ŧ": "T", + "ŧ": "t", + "Ũ": "U", + "ũ": "u", + "Ū": "U", + "ū": "u", + "Ŭ": "U", + "ŭ": "u", + "Ů": "U", + "ů": "u", + "Ű": "U", + "ű": "u", + "Ŵ": "W", + "ŵ": "w", + "Ŷ": "Y", + "ŷ": "y", + "Ÿ": "Y", + "Ź": "Z", + "ź": "z", + "Ż": "Z", + "ż": "z", + "Ž": "Z", + "ž": "z", + "ſ": "s", + "Α": "A", + "Β": "B", + "Γ": "G", + "Δ": "D", + "Ε": "E", + "Ζ": "Z", + "Η": "E", + "Θ": "Th", + "Ι": "I", + "Κ": "K", + "Λ": "L", + "Μ": "M", + "Ν": "N", + "Ξ": "Ks", + "Ο": "O", + "Π": "P", + "Ρ": "R", + "Σ": "S", + "Τ": "T", + "Υ": "U", + "Φ": "Ph", + "Χ": "Kh", + "Ψ": "Ps", + "Ω": "O", + "α": "a", + "β": "b", + "γ": "g", + "δ": "d", + "ε": "e", + "ζ": "z", + "η": "e", + "θ": "th", + "ι": "i", + "κ": "k", + "λ": "l", + "μ": "m", + "ν": "n", + "ξ": "x", + "ο": "o", + "π": "p", + "ρ": "r", + "ς": "s", + "σ": "s", + "τ": "t", + "υ": "u", + "φ": "ph", + "χ": "kh", + "ψ": "ps", + "ω": "o", + "А": "A", + "Б": "B", + "В": "V", + "Г": "G", + "Д": "D", + "Е": "E", + "Ж": "Zh", + "З": "Z", + "И": "I", + "Й": "I", + "К": "K", + "Л": "L", + "М": "M", + "Н": "N", + "О": "O", + "П": "P", + "Р": "R", + "С": "S", + "Т": "T", + "У": "U", + "Ф": "F", + "Х": "Kh", + "Ц": "Ts", + "Ч": "Ch", + "Ш": "Sh", + "Щ": "Shch", + "Ъ": "'", + "Ы": "Y", + "Ь": "'", + "Э": "E", + "Ю": "Iu", + "Я": "Ia", + "а": "a", + "б": "b", + "в": "v", + "г": "g", + "д": "d", + "е": "e", + "ж": "zh", + "з": "z", + "и": "i", + "й": "i", + "к": "k", + "л": "l", + "м": "m", + "н": "n", + "о": "o", + "п": "p", + "р": "r", + "с": "s", + "т": "t", + "у": "u", + "ф": "f", + "х": "kh", + "ц": "ts", + "ч": "ch", + "ш": "sh", + "щ": "shch", + "ъ": "'", + "ы": "y", + "ь": "'", + "э": "e", + "ю": "iu", + "я": "ia", # 'ᴀ': '', # 'ᴁ': '', # 'ᴂ': '', @@ -374,18 +372,18 @@ # 'ᴣ': '', # 'ᴤ': '', # 'ᴥ': '', - 'ᴦ': 'G', - 'ᴧ': 'L', - 'ᴨ': 'P', - 'ᴩ': 'R', - 'ᴪ': 'PS', - 'ẞ': 'Ss', - 'Ỳ': 'Y', - 'ỳ': 'y', - 'Ỵ': 'Y', - 'ỵ': 'y', - 'Ỹ': 'Y', - 'ỹ': 'y', + "ᴦ": "G", + "ᴧ": "L", + "ᴨ": "P", + "ᴩ": "R", + "ᴪ": "PS", + "ẞ": "Ss", + "Ỳ": "Y", + "ỳ": "y", + "Ỵ": "Y", + "ỵ": "y", + "Ỹ": "Y", + "ỹ": "y", } #################################################################### @@ -393,14 +391,14 @@ #################################################################### DUMB_PUNCTUATION = { - '‘': "'", - '’': "'", - '‚': "'", - '“': '"', - '”': '"', - '„': '"', - '–': '-', - '—': '-' + "‘": "'", + "’": "'", + "‚": "'", + "“": '"', + "”": '"', + "„": '"', + "–": "-", + "—": "-", } @@ -413,7 +411,7 @@ INITIALS = string.ascii_uppercase + string.digits #: Split on non-letters, numbers -split_on_delimiters = re.compile('[^a-zA-Z0-9]').split +split_on_delimiters = re.compile("[^a-zA-Z0-9]").split # Match filter flags #: Match items that start with ``query`` @@ -483,6 +481,7 @@ class PasswordExists(KeychainError): # Helper functions #################################################################### + def isascii(text): """Test if ``text`` contains only ASCII characters. @@ -493,7 +492,7 @@ def isascii(text): """ try: - text.encode('ascii') + text.encode("ascii") except UnicodeEncodeError: return False return True @@ -503,6 +502,7 @@ def isascii(text): # Implementation classes #################################################################### + class SerializerManager(object): """Contains registered serializers. @@ -540,8 +540,8 @@ def register(self, name, serializer): """ # Basic validation - getattr(serializer, 'load') - getattr(serializer, 'dump') + serializer.load + serializer.dump self._serializers[name] = serializer @@ -568,8 +568,7 @@ def unregister(self, name): """ if name not in self._serializers: - raise ValueError('No such serializer registered : {0}'.format( - name)) + raise ValueError("No such serializer registered : {0}".format(name)) serializer = self._serializers[name] del self._serializers[name] @@ -582,86 +581,72 @@ def serializers(self): return sorted(self._serializers.keys()) -class JSONSerializer(object): - """Wrapper around :mod:`json`. Sets ``indent`` and ``encoding``. - - .. versionadded:: 1.8 - - Use this serializer if you need readable data files. JSON doesn't - support Python objects as well as ``cPickle``/``pickle``, so be - careful which data you try to serialize as JSON. - - """ +class BaseSerializer: + is_binary: Optional[bool] = None @classmethod - def load(cls, file_obj): - """Load serialized object from open JSON file. - - .. versionadded:: 1.8 - - :param file_obj: file handle - :type file_obj: ``file`` object - :returns: object loaded from JSON file - :rtype: object - - """ - return json.load(file_obj) + def binary_mode(cls): + return "b" if cls.is_binary else "" @classmethod - def dump(cls, obj, file_obj): - """Serialize object ``obj`` to open JSON file. + def _opener(cls, opener, path, mode="r"): + with opener(path, mode + cls.binary_mode()) as fp: + yield fp - .. versionadded:: 1.8 - - :param obj: Python object to serialize - :type obj: JSON-serializable data structure - :param file_obj: file handle - :type file_obj: ``file`` object + @classmethod + @contextmanager + def atomic_writer(cls, path, mode): + yield from cls._opener(atomic_writer, path, mode) - """ - return json.dump(obj, file_obj, indent=2, encoding='utf-8') + @classmethod + @contextmanager + def open(cls, path, mode): + yield from cls._opener(open, path, mode) -class CPickleSerializer(object): - """Wrapper around :mod:`cPickle`. Sets ``protocol``. +class JSONSerializer(BaseSerializer): + """Wrapper around :mod:`json`. Sets ``indent`` and ``encoding``. .. versionadded:: 1.8 - This is the default serializer and the best combination of speed and - flexibility. + Use this serializer if you need readable data files. JSON doesn't + support Python objects as well as ``pickle``, so be + careful which data you try to serialize as JSON. """ + is_binary = False + @classmethod def load(cls, file_obj): - """Load serialized object from open pickle file. + """Load serialized object from open JSON file. .. versionadded:: 1.8 :param file_obj: file handle :type file_obj: ``file`` object - :returns: object loaded from pickle file + :returns: object loaded from JSON file :rtype: object """ - return cPickle.load(file_obj) + return json.load(file_obj) @classmethod def dump(cls, obj, file_obj): - """Serialize object ``obj`` to open pickle file. + """Serialize object ``obj`` to open JSON file. .. versionadded:: 1.8 :param obj: Python object to serialize - :type obj: Python object + :type obj: JSON-serializable data structure :param file_obj: file handle :type file_obj: ``file`` object """ - return cPickle.dump(obj, file_obj, protocol=-1) + return json.dump(obj, file_obj, indent=2) -class PickleSerializer(object): +class PickleSerializer(BaseSerializer): """Wrapper around :mod:`pickle`. Sets ``protocol``. .. versionadded:: 1.8 @@ -670,6 +655,8 @@ class PickleSerializer(object): """ + is_binary = True + @classmethod def load(cls, file_obj): """Load serialized object from open pickle file. @@ -701,9 +688,8 @@ def dump(cls, obj, file_obj): # Set up default manager and register built-in serializers manager = SerializerManager() -manager.register('cpickle', CPickleSerializer) -manager.register('pickle', PickleSerializer) -manager.register('json', JSONSerializer) +manager.register("pickle", PickleSerializer) +manager.register("json", JSONSerializer) class Item(object): @@ -717,10 +703,22 @@ class Item(object): """ - def __init__(self, title, subtitle='', modifier_subtitles=None, - arg=None, autocomplete=None, valid=False, uid=None, - icon=None, icontype=None, type=None, largetext=None, - copytext=None, quicklookurl=None): + def __init__( + self, + title, + subtitle="", + modifier_subtitles=None, + arg=None, + autocomplete=None, + valid=False, + uid=None, + icon=None, + icontype=None, + type=None, + largetext=None, + copytext=None, + quicklookurl=None, + ): """Same arguments as :meth:`Workflow.add_item`.""" self.title = title self.subtitle = subtitle @@ -747,35 +745,36 @@ def elem(self): # Attributes on element attr = {} if self.valid: - attr['valid'] = 'yes' + attr["valid"] = "yes" else: - attr['valid'] = 'no' + attr["valid"] = "no" # Allow empty string for autocomplete. This is a useful value, # as TABing the result will revert the query back to just the # keyword if self.autocomplete is not None: - attr['autocomplete'] = self.autocomplete + attr["autocomplete"] = self.autocomplete # Optional attributes - for name in ('uid', 'type'): + for name in ("uid", "type"): value = getattr(self, name, None) if value: attr[name] = value - root = ET.Element('item', attr) - ET.SubElement(root, 'title').text = self.title - ET.SubElement(root, 'subtitle').text = self.subtitle + root = ET.Element("item", attr) + ET.SubElement(root, "title").text = self.title + ET.SubElement(root, "subtitle").text = self.subtitle # Add modifier subtitles - for mod in ('cmd', 'ctrl', 'alt', 'shift', 'fn'): + for mod in ("cmd", "ctrl", "alt", "shift", "fn"): if mod in self.modifier_subtitles: - ET.SubElement(root, 'subtitle', - {'mod': mod}).text = self.modifier_subtitles[mod] + ET.SubElement( + root, "subtitle", {"mod": mod} + ).text = self.modifier_subtitles[mod] # Add arg as element instead of attribute on , as it's more # flexible (newlines aren't allowed in attributes) if self.arg: - ET.SubElement(root, 'arg').text = self.arg + ET.SubElement(root, "arg").text = self.arg # Add icon if there is one if self.icon: @@ -783,18 +782,16 @@ def elem(self): attr = dict(type=self.icontype) else: attr = {} - ET.SubElement(root, 'icon', attr).text = self.icon + ET.SubElement(root, "icon", attr).text = self.icon if self.largetext: - ET.SubElement(root, 'text', - {'type': 'largetype'}).text = self.largetext + ET.SubElement(root, "text", {"type": "largetype"}).text = self.largetext if self.copytext: - ET.SubElement(root, 'text', - {'type': 'copy'}).text = self.copytext + ET.SubElement(root, "text", {"type": "copy"}).text = self.copytext if self.quicklookurl: - ET.SubElement(root, 'quicklookurl').text = self.quicklookurl + ET.SubElement(root, "quicklookurl").text = self.quicklookurl return root @@ -826,7 +823,7 @@ def __init__(self, filepath, defaults=None): if os.path.exists(self._filepath): self._load() elif defaults: - for key, val in defaults.items(): + for key, val in list(defaults.items()): self[key] = val self.save() # save default settings @@ -834,7 +831,7 @@ def _load(self): """Load cached settings from JSON file `self._filepath`.""" data = {} with LockFile(self._filepath, 0.5): - with open(self._filepath, 'rb') as fp: + with open(self._filepath, "r") as fp: data.update(json.load(fp)) self._original = deepcopy(data) @@ -858,9 +855,8 @@ def save(self): data.update(self) with LockFile(self._filepath, 0.5): - with atomic_writer(self._filepath, 'wb') as fp: - json.dump(data, fp, sort_keys=True, indent=2, - encoding='utf-8') + with atomic_writer(self._filepath, "w") as fp: + json.dump(data, fp, sort_keys=True, indent=2) # dict methods def __setitem__(self, key, value): @@ -936,11 +932,20 @@ class Workflow(object): # won't want to change this item_class = Item - def __init__(self, default_settings=None, update_settings=None, - input_encoding='utf-8', normalization='NFC', - capture_args=True, libraries=None, - help_url=None): + def __init__( + self, + default_settings=None, + update_settings=None, + input_encoding="utf-8", + normalization="NFC", + capture_args=True, + libraries=None, + help_url=None, + ): """Create new :class:`Workflow` object.""" + + seralizer = "pickle" + self._default_settings = default_settings or {} self._update_settings = update_settings or {} self._input_encoding = input_encoding @@ -953,8 +958,8 @@ def __init__(self, default_settings=None, update_settings=None, self._bundleid = None self._debugging = None self._name = None - self._cache_serializer = 'cpickle' - self._data_serializer = 'cpickle' + self._cache_serializer = seralizer + self._data_serializer = seralizer self._info = None self._info_loaded = False self._logger = None @@ -969,7 +974,7 @@ def __init__(self, default_settings=None, update_settings=None, #: Prefix for all magic arguments. #: The default value is ``workflow:`` so keyword #: ``config`` would match user query ``workflow:config``. - self.magic_prefix = 'workflow:' + self.magic_prefix = "workflow:" #: Mapping of available magic arguments. The built-in magic #: arguments are registered by default. To add your own magic arguments #: (or override built-ins), add a key:value pair where the key is @@ -996,8 +1001,9 @@ def __init__(self, default_settings=None, update_settings=None, @property def alfred_version(self): """Alfred version as :class:`~workflow.update.Version` object.""" - from update import Version - return Version(self.alfred_env.get('version')) + from .update import Version + + return Version(self.alfred_env.get("version")) @property def alfred_env(self): @@ -1052,26 +1058,30 @@ def alfred_env(self): data = {} for key in ( - 'debug', - 'preferences', - 'preferences_localhash', - 'theme', - 'theme_background', - 'theme_subtext', - 'version', - 'version_build', - 'workflow_bundleid', - 'workflow_cache', - 'workflow_data', - 'workflow_name', - 'workflow_uid', - 'workflow_version'): - - value = os.getenv('alfred_' + key, '') + "debug", + "preferences", + "preferences_localhash", + "theme", + "theme_background", + "theme_subtext", + "version", + "version_build", + "workflow_bundleid", + "workflow_cache", + "workflow_data", + "workflow_name", + "workflow_uid", + "workflow_version", + ): + + value = os.getenv("alfred_" + key, "") if value: - if key in ('debug', 'version_build', 'theme_subtext'): - value = int(value) + if key in ("debug", "version_build", "theme_subtext"): + if value.isdigit(): + value = int(value) + else: + value = False else: value = self.decode(value) @@ -1097,10 +1107,10 @@ def bundleid(self): """ if not self._bundleid: - if self.alfred_env.get('workflow_bundleid'): - self._bundleid = self.alfred_env.get('workflow_bundleid') + if self.alfred_env.get("workflow_bundleid"): + self._bundleid = self.alfred_env.get("workflow_bundleid") else: - self._bundleid = unicode(self.info['bundleid'], 'utf-8') + self._bundleid = self.info["bundleid"] return self._bundleid @@ -1112,7 +1122,9 @@ def debugging(self): :rtype: ``bool`` """ - return self.alfred_env.get('debug') == 1 + return bool( + self.alfred_env.get("debug") == 1 or os.environ.get("PYTEST_RUNNING") + ) @property def name(self): @@ -1123,10 +1135,10 @@ def name(self): """ if not self._name: - if self.alfred_env.get('workflow_name'): - self._name = self.decode(self.alfred_env.get('workflow_name')) + if self.alfred_env.get("workflow_name"): + self._name = self.decode(self.alfred_env.get("workflow_name")) else: - self._name = self.decode(self.info['name']) + self._name = self.decode(self.info["name"]) return self._name @@ -1151,27 +1163,28 @@ def version(self): version = None # environment variable has priority - if self.alfred_env.get('workflow_version'): - version = self.alfred_env['workflow_version'] + if self.alfred_env.get("workflow_version"): + version = self.alfred_env["workflow_version"] # Try `update_settings` elif self._update_settings: - version = self._update_settings.get('version') + version = self._update_settings.get("version") # `version` file if not version: - filepath = self.workflowfile('version') + filepath = self.workflowfile("version") if os.path.exists(filepath): - with open(filepath, 'rb') as fileobj: + with open(filepath, "r") as fileobj: version = fileobj.read() # info.plist if not version: - version = self.info.get('version') + version = self.info.get("version") if version: - from update import Version + from .update import Version + version = Version(version) self._version = version @@ -1204,7 +1217,7 @@ def args(self): # Handle magic args if len(args) and self._capture_args: for name in self.magic_arguments: - key = '{0}{1}'.format(self.magic_prefix, name) + key = "{0}{1}".format(self.magic_prefix, name) if key in args: msg = self.magic_arguments[name]() @@ -1235,8 +1248,8 @@ def cachedir(self): unicode: full path to workflow's cache directory """ - if self.alfred_env.get('workflow_cache'): - dirpath = self.alfred_env.get('workflow_cache') + if self.alfred_env.get("workflow_cache"): + dirpath = self.alfred_env.get("workflow_cache") else: dirpath = self._default_cachedir @@ -1248,9 +1261,10 @@ def _default_cachedir(self): """Alfred 2's default cache directory.""" return os.path.join( os.path.expanduser( - '~/Library/Caches/com.runningwithcrayons.Alfred-2/' - 'Workflow Data/'), - self.bundleid) + "~/Library/Caches/com.runningwithcrayons.Alfred-2/" "Workflow Data/" + ), + self.bundleid, + ) @property def datadir(self): @@ -1271,8 +1285,8 @@ def datadir(self): unicode: full path to workflow data directory """ - if self.alfred_env.get('workflow_data'): - dirpath = self.alfred_env.get('workflow_data') + if self.alfred_env.get("workflow_data"): + dirpath = self.alfred_env.get("workflow_data") else: dirpath = self._default_datadir @@ -1282,9 +1296,10 @@ def datadir(self): @property def _default_datadir(self): """Alfred 2's default data directory.""" - return os.path.join(os.path.expanduser( - '~/Library/Application Support/Alfred 2/Workflow Data/'), - self.bundleid) + return os.path.join( + os.path.expanduser("~/Library/Application Support/Alfred 2/Workflow Data/"), + self.bundleid, + ) @property def workflowdir(self): @@ -1299,8 +1314,9 @@ def workflowdir(self): # the library is in. CWD will be the workflow root if # a workflow is being run in Alfred candidates = [ - os.path.abspath(os.getcwdu()), - os.path.dirname(os.path.abspath(os.path.dirname(__file__)))] + os.path.abspath(os.getcwd()), + os.path.dirname(os.path.abspath(os.path.dirname(__file__))), + ] # climb the directory tree until we find `info.plist` for dirpath in candidates: @@ -1309,11 +1325,11 @@ def workflowdir(self): dirpath = self.decode(dirpath) while True: - if os.path.exists(os.path.join(dirpath, 'info.plist')): + if os.path.exists(os.path.join(dirpath, "info.plist")): self._workflowdir = dirpath break - elif dirpath == '/': + elif dirpath == "/": # no `info.plist` found break @@ -1376,7 +1392,7 @@ def logfile(self): :rtype: ``unicode`` """ - return self.cachefile('%s.log' % self.bundleid) + return self.cachefile("%s.log" % self.bundleid) @property def logger(self): @@ -1394,7 +1410,7 @@ def logger(self): return self._logger # Initialise new logger and optionally handlers - logger = logging.getLogger('') + logger = logging.getLogger("") # Only add one set of handlers # Exclude from coverage, as pytest will have configured the @@ -1402,14 +1418,13 @@ def logger(self): if not len(logger.handlers): # pragma: no cover fmt = logging.Formatter( - '%(asctime)s %(filename)s:%(lineno)s' - ' %(levelname)-8s %(message)s', - datefmt='%H:%M:%S') + "%(asctime)s %(filename)s:%(lineno)s" " %(levelname)-8s %(message)s", + datefmt="%H:%M:%S", + ) logfile = logging.handlers.RotatingFileHandler( - self.logfile, - maxBytes=1024 * 1024, - backupCount=1) + self.logfile, maxBytes=1024 * 1024, backupCount=1 + ) logfile.setFormatter(fmt) logger.addHandler(logfile) @@ -1445,7 +1460,7 @@ def settings_path(self): """ if not self._settings_path: - self._settings_path = self.datafile('settings.json') + self._settings_path = self.datafile("settings.json") return self._settings_path @property @@ -1465,9 +1480,8 @@ def settings(self): """ if not self._settings: - self.logger.debug('reading settings from %s', self.settings_path) - self._settings = Settings(self.settings_path, - self._default_settings) + self.logger.debug("reading settings from %s", self.settings_path) + self._settings = Settings(self.settings_path, self._default_settings) return self._settings @property @@ -1506,10 +1520,11 @@ def cache_serializer(self, serializer_name): """ if manager.serializer(serializer_name) is None: raise ValueError( - 'Unknown serializer : `{0}`. Register your serializer ' - 'with `manager` first.'.format(serializer_name)) + "Unknown serializer : `{0}`. Register your serializer " + "with `manager` first.".format(serializer_name) + ) - self.logger.debug('default cache serializer: %s', serializer_name) + self.logger.debug("default cache serializer: %s", serializer_name) self._cache_serializer = serializer_name @@ -1548,10 +1563,11 @@ def data_serializer(self, serializer_name): """ if manager.serializer(serializer_name) is None: raise ValueError( - 'Unknown serializer : `{0}`. Register your serializer ' - 'with `manager` first.'.format(serializer_name)) + "Unknown serializer : `{0}`. Register your serializer " + "with `manager` first.".format(serializer_name) + ) - self.logger.debug('default data serializer: %s', serializer_name) + self.logger.debug("default data serializer: %s", serializer_name) self._data_serializer = serializer_name @@ -1565,39 +1581,40 @@ def stored_data(self, name): :param name: name of datastore """ - metadata_path = self.datafile('.{0}.alfred-workflow'.format(name)) + metadata_path = self.datafile(".{0}.alfred-workflow".format(name)) if not os.path.exists(metadata_path): - self.logger.debug('no data stored for `%s`', name) + self.logger.debug("no data stored for `%s`", name) return None - with open(metadata_path, 'rb') as file_obj: + with open(metadata_path, "r") as file_obj: serializer_name = file_obj.read().strip() serializer = manager.serializer(serializer_name) if serializer is None: raise ValueError( - 'Unknown serializer `{0}`. Register a corresponding ' - 'serializer with `manager.register()` ' - 'to load this data.'.format(serializer_name)) + "Unknown serializer `{0}`. Register a corresponding " + "serializer with `manager.register()` " + "to load this data.".format(serializer_name) + ) - self.logger.debug('data `%s` stored as `%s`', name, serializer_name) + self.logger.debug("data `%s` stored as `%s`", name, serializer_name) - filename = '{0}.{1}'.format(name, serializer_name) + filename = "{0}.{1}".format(name, serializer_name) data_path = self.datafile(filename) if not os.path.exists(data_path): - self.logger.debug('no data stored: %s', name) + self.logger.debug("no data stored: %s", name) if os.path.exists(metadata_path): os.unlink(metadata_path) return None - with open(data_path, 'rb') as file_obj: + with open(data_path, "rb") as file_obj: data = serializer.load(file_obj) - self.logger.debug('stored data loaded: %s', data_path) + self.logger.debug("stored data loaded: %s", data_path) return data @@ -1626,47 +1643,52 @@ def delete_paths(paths): for path in paths: if os.path.exists(path): os.unlink(path) - self.logger.debug('deleted data file: %s', path) + self.logger.debug("deleted data file: %s", path) serializer_name = serializer or self.data_serializer # In order for `stored_data()` to be able to load data stored with # an arbitrary serializer, yet still have meaningful file extensions, # the format (i.e. extension) is saved to an accompanying file - metadata_path = self.datafile('.{0}.alfred-workflow'.format(name)) - filename = '{0}.{1}'.format(name, serializer_name) + metadata_path = self.datafile(".{0}.alfred-workflow".format(name)) + filename = "{0}.{1}".format(name, serializer_name) data_path = self.datafile(filename) if data_path == self.settings_path: raise ValueError( - 'Cannot save data to' + - '`{0}` with format `{1}`. '.format(name, serializer_name) + - "This would overwrite Alfred-Workflow's settings file.") + "Cannot save data to" + + "`{0}` with format `{1}`. ".format(name, serializer_name) + + "This would overwrite Alfred-Workflow's settings file." + ) serializer = manager.serializer(serializer_name) if serializer is None: raise ValueError( - 'Invalid serializer `{0}`. Register your serializer with ' - '`manager.register()` first.'.format(serializer_name)) + "Invalid serializer `{0}`. Register your serializer with " + "`manager.register()` first.".format(serializer_name) + ) if data is None: # Delete cached data delete_paths((metadata_path, data_path)) return + if isinstance(data, str): + data = bytearray(data) + # Ensure write is not interrupted by SIGTERM @uninterruptible def _store(): # Save file extension - with atomic_writer(metadata_path, 'wb') as file_obj: + with atomic_writer(metadata_path, "w") as file_obj: file_obj.write(serializer_name) - with atomic_writer(data_path, 'wb') as file_obj: + with serializer.atomic_writer(data_path, "w") as file_obj: serializer.dump(data, file_obj) _store() - self.logger.debug('saved data: %s', data_path) + self.logger.debug("saved data: %s", data_path) def cached_data(self, name, data_func=None, max_age=60): """Return cached data if younger than ``max_age`` seconds. @@ -1686,13 +1708,13 @@ def cached_data(self, name, data_func=None, max_age=60): """ serializer = manager.serializer(self.cache_serializer) - cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer)) + cache_path = self.cachefile("%s.%s" % (name, self.cache_serializer)) age = self.cached_data_age(name) if (age < max_age or max_age == 0) and os.path.exists(cache_path): - with open(cache_path, 'rb') as file_obj: - self.logger.debug('loading cached data: %s', cache_path) + with open(cache_path, "rb") as file_obj: + self.logger.debug("loading cached data: %s", cache_path) return serializer.load(file_obj) if not data_func: @@ -1716,18 +1738,18 @@ def cache_data(self, name, data): """ serializer = manager.serializer(self.cache_serializer) - cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer)) + cache_path = self.cachefile("%s.%s" % (name, self.cache_serializer)) if data is None: if os.path.exists(cache_path): os.unlink(cache_path) - self.logger.debug('deleted cache file: %s', cache_path) + self.logger.debug("deleted cache file: %s", cache_path) return - with atomic_writer(cache_path, 'wb') as file_obj: + with serializer.atomic_writer(cache_path, "w") as file_obj: serializer.dump(data, file_obj) - self.logger.debug('cached data: %s', cache_path) + self.logger.debug("cached data: %s", cache_path) def cached_data_fresh(self, name, max_age): """Whether cache `name` is less than `max_age` seconds old. @@ -1755,16 +1777,25 @@ def cached_data_age(self, name): :rtype: ``int`` """ - cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer)) + cache_path = self.cachefile("%s.%s" % (name, self.cache_serializer)) if not os.path.exists(cache_path): return 0 return time.time() - os.stat(cache_path).st_mtime - def filter(self, query, items, key=lambda x: x, ascending=False, - include_score=False, min_score=0, max_results=0, - match_on=MATCH_ALL, fold_diacritics=True): + def filter( + self, + query, + items, + key=lambda x: x, + ascending=False, + include_score=False, + min_score=0, + max_results=0, + match_on=MATCH_ALL, + fold_diacritics=True, + ): """Fuzzy search filter. Returns list of ``items`` that match ``query``. ``query`` is case-insensitive. Any item that does not contain the @@ -1873,23 +1904,23 @@ def filter(self, query, items, key=lambda x: x, ascending=False, return items # Use user override if there is one - fold_diacritics = self.settings.get('__workflow_diacritic_folding', - fold_diacritics) + fold_diacritics = self.settings.get( + "__workflow_diacritic_folding", fold_diacritics + ) results = [] for item in items: skip = False score = 0 - words = [s.strip() for s in query.split(' ')] + words = [s.strip() for s in query.split(" ")] value = key(item).strip() - if value == '': + if value == "": continue for word in words: - if word == '': + if word == "": continue - s, rule = self._filter_item(value, word, match_on, - fold_diacritics) + s, rule = self._filter_item(value, word, match_on, fold_diacritics) if not s: # Skip items that don't match part of the query skip = True @@ -1902,8 +1933,9 @@ def filter(self, query, items, key=lambda x: x, ascending=False, # use "reversed" `score` (i.e. highest becomes lowest) and # `value` as sort key. This means items with the same score # will be sorted in alphabetical not reverse alphabetical order - results.append(((100.0 / score, value.lower(), score), - (item, score, rule))) + results.append( + ((100.0 / score, value.lower(), score), (item, score, rule)) + ) # sort on keys, then discard the keys results.sort(reverse=ascending) @@ -1950,7 +1982,7 @@ def _filter_item(self, value, query, match_on, fold_diacritics): # query matches capitalised letters in item, # e.g. of = OmniFocus if match_on & MATCH_CAPITALS: - initials = ''.join([c for c in value if c in INITIALS]) + initials = "".join([c for c in value if c in INITIALS]) if initials.lower().startswith(query): score = 100.0 - (len(initials) / len(query)) @@ -1958,13 +1990,15 @@ def _filter_item(self, value, query, match_on, fold_diacritics): # split the item into "atoms", i.e. words separated by # spaces or other non-word characters - if (match_on & MATCH_ATOM or - match_on & MATCH_INITIALS_CONTAIN or - match_on & MATCH_INITIALS_STARTSWITH): + if ( + match_on & MATCH_ATOM + or match_on & MATCH_INITIALS_CONTAIN + or match_on & MATCH_INITIALS_STARTSWITH + ): atoms = [s.lower() for s in split_on_delimiters(value)] # print('atoms : %s --> %s' % (value, atoms)) # initials of the atoms - initials = ''.join([s[0] for s in atoms if s]) + initials = "".join([s[0] for s in atoms if s]) if match_on & MATCH_ATOM: # is `query` one of the atoms in item? @@ -1979,16 +2013,14 @@ def _filter_item(self, value, query, match_on, fold_diacritics): # atoms, e.g. ``himym`` matches "How I Met Your Mother" # *and* "how i met your mother" (the ``capitals`` rule only # matches the former) - if (match_on & MATCH_INITIALS_STARTSWITH and - initials.startswith(query)): + if match_on & MATCH_INITIALS_STARTSWITH and initials.startswith(query): score = 100.0 - (len(initials) / len(query)) return (score, MATCH_INITIALS_STARTSWITH) # `query` is a substring of initials, e.g. ``doh`` matches # "The Dukes of Hazzard" - elif (match_on & MATCH_INITIALS_CONTAIN and - query in initials): + elif match_on & MATCH_INITIALS_CONTAIN and query in initials: score = 95.0 - (len(initials) / len(query)) return (score, MATCH_INITIALS_CONTAIN) @@ -2005,8 +2037,9 @@ def _filter_item(self, value, query, match_on, fold_diacritics): search = self._search_for_query(query) match = search(value) if match: - score = 100.0 / ((1 + match.start()) * - (match.end() - match.start() + 1)) + score = 100.0 / ( + (1 + match.start()) * (match.end() - match.start() + 1) + ) return (score, MATCH_ALLCHARS) @@ -2021,8 +2054,8 @@ def _search_for_query(self, query): pattern = [] for c in query: # pattern.append('[^{0}]*{0}'.format(re.escape(c))) - pattern.append('.*?{0}'.format(re.escape(c))) - pattern = ''.join(pattern) + pattern.append(".*?{0}".format(re.escape(c))) + pattern = "".join(pattern) search = re.compile(pattern, re.IGNORECASE).search self._search_pattern_cache[query] = search @@ -2051,16 +2084,17 @@ def run(self, func, text_errors=False): start = time.time() # Write to debugger to ensure "real" output starts on a new line - print('.', file=sys.stderr) + print(".", file=sys.stderr) # Call workflow's entry function/method within a try-except block # to catch any errors and display an error message in Alfred try: if self.version: - self.logger.debug('---------- %s (%s) ----------', - self.name, self.version) + self.logger.debug( + "---------- %s (%s) ----------", self.name, self.version + ) else: - self.logger.debug('---------- %s ----------', self.name) + self.logger.debug("---------- %s ----------", self.name) # Run update check if configured for self-updates. # This call has to go in the `run` try-except block, as it will @@ -2079,11 +2113,11 @@ def run(self, func, text_errors=False): except Exception as err: self.logger.exception(err) if self.help_url: - self.logger.info('for assistance, see: %s', self.help_url) + self.logger.info("for assistance, see: %s", self.help_url) if not sys.stdout.isatty(): # Show error in Alfred if text_errors: - print(unicode(err).encode('utf-8'), end='') + print(str(err).encode("utf-8"), end="") else: self._items = [] if self._name: @@ -2092,24 +2126,37 @@ def run(self, func, text_errors=False): name = self._bundleid else: # pragma: no cover name = os.path.dirname(__file__) - self.add_item("Error in workflow '%s'" % name, - unicode(err), - icon=ICON_ERROR) + self.add_item( + "Error in workflow '%s'" % name, str(err), icon=ICON_ERROR + ) self.send_feedback() return 1 finally: - self.logger.debug('---------- finished in %0.3fs ----------', - time.time() - start) + self.logger.debug( + "---------- finished in %0.3fs ----------", time.time() - start + ) return 0 # Alfred feedback methods ------------------------------------------ - def add_item(self, title, subtitle='', modifier_subtitles=None, arg=None, - autocomplete=None, valid=False, uid=None, icon=None, - icontype=None, type=None, largetext=None, copytext=None, - quicklookurl=None): + def add_item( + self, + title, + subtitle="", + modifier_subtitles=None, + arg=None, + autocomplete=None, + valid=False, + uid=None, + icon=None, + icontype=None, + type=None, + largetext=None, + copytext=None, + quicklookurl=None, + ): """Add an item to be output to Alfred. :param title: Title shown in Alfred @@ -2167,19 +2214,31 @@ def add_item(self, title, subtitle='', modifier_subtitles=None, arg=None, edit it or do something with it other than send it to Alfred. """ - item = self.item_class(title, subtitle, modifier_subtitles, arg, - autocomplete, valid, uid, icon, icontype, type, - largetext, copytext, quicklookurl) + item = self.item_class( + title, + subtitle, + modifier_subtitles, + arg, + autocomplete, + valid, + uid, + icon, + icontype, + type, + largetext, + copytext, + quicklookurl, + ) self._items.append(item) return item def send_feedback(self): """Print stored items to console/Alfred as XML.""" - root = ET.Element('items') + root = ET.Element("items") for item in self._items: root.append(item.elem) sys.stdout.write('\n') - sys.stdout.write(ET.tostring(root).encode('utf-8')) + sys.stdout.write(ET.tostring(root, encoding="unicode")) sys.stdout.flush() #################################################################### @@ -2196,7 +2255,7 @@ def first_run(self): """ if not self.version: - raise ValueError('No workflow version set') + raise ValueError("No workflow version set") if not self.last_version_run: return True @@ -2215,14 +2274,15 @@ def last_version_run(self): """ if self._last_version_run is UNSET: - version = self.settings.get('__workflow_last_version') + version = self.settings.get("__workflow_last_version") if version: - from update import Version + from .update import Version + version = Version(version) self._last_version_run = version - self.logger.debug('last run version: %s', self._last_version_run) + self.logger.debug("last run version: %s", self._last_version_run) return self._last_version_run @@ -2239,19 +2299,19 @@ def set_last_version(self, version=None): """ if not version: if not self.version: - self.logger.warning( - "Can't save last version: workflow has no version") + self.logger.warning("Can't save last version: workflow has no version") return False version = self.version - if isinstance(version, basestring): - from update import Version + if isinstance(version, str): + from .update import Version + version = Version(version) - self.settings['__workflow_last_version'] = str(version) + self.settings["__workflow_last_version"] = str(version) - self.logger.debug('set last run version: %s', version) + self.logger.debug("set last run version: %s", version) return True @@ -2267,16 +2327,16 @@ def update_available(self): :returns: ``True`` if an update is available, else ``False`` """ - key = '__workflow_latest_version' + key = "__workflow_latest_version" # Create a new workflow object to ensure standard serialiser # is used (update.py is called without the user's settings) status = Workflow().cached_data(key, max_age=0) # self.logger.debug('update status: %r', status) - if not status or not status.get('available'): + if not status or not status.get("available"): return False - return status['available'] + return status["available"] @property def prereleases(self): @@ -2289,10 +2349,10 @@ def prereleases(self): ``False``. """ - if self._update_settings.get('prereleases'): + if self._update_settings.get("prereleases"): return True - return self.settings.get('__workflow_prereleases') or False + return self.settings.get("__workflow_prereleases") or False def check_update(self, force=False): """Call update script if it's time to check for a new release. @@ -2309,38 +2369,34 @@ def check_update(self, force=False): :type force: ``Boolean`` """ - key = '__workflow_latest_version' - frequency = self._update_settings.get('frequency', - DEFAULT_UPDATE_FREQUENCY) + key = "__workflow_latest_version" + frequency = self._update_settings.get("frequency", DEFAULT_UPDATE_FREQUENCY) - if not force and not self.settings.get('__workflow_autoupdate', True): - self.logger.debug('Auto update turned off by user') + if not force and not self.settings.get("__workflow_autoupdate", True): + self.logger.debug("Auto update turned off by user") return # Check for new version if it's time - if (force or not self.cached_data_fresh(key, frequency * 86400)): - - repo = self._update_settings['github_slug'] + if force or not self.cached_data_fresh(key, frequency * 86400): + repo = self._update_settings["github_slug"] # version = self._update_settings['version'] version = str(self.version) - from background import run_in_background + from .background import run_in_background # update.py is adjacent to this file - update_script = os.path.join(os.path.dirname(__file__), - b'update.py') - - cmd = ['/usr/bin/python', update_script, 'check', repo, version] + update_script = os.path.join(os.path.dirname(__file__), "update.py") + cmd = [sys.executable, update_script, "check", repo, version] if self.prereleases: - cmd.append('--prereleases') + cmd.append("--prereleases") - self.logger.info('checking for update ...') + self.logger.info("checking for update ...") - run_in_background('__workflow_update_check', cmd) + run_in_background("__workflow_update_check", cmd) else: - self.logger.debug('update check not due') + self.logger.debug("update check not due") def start_update(self): """Check for update and download and install new workflow file. @@ -2354,28 +2410,27 @@ def start_update(self): installed, else ``False`` """ - import update + from . import update - repo = self._update_settings['github_slug'] + repo = self._update_settings["github_slug"] # version = self._update_settings['version'] version = str(self.version) if not update.check_update(repo, version, self.prereleases): return False - from background import run_in_background + from .background import run_in_background # update.py is adjacent to this file - update_script = os.path.join(os.path.dirname(__file__), - b'update.py') + update_script = os.path.join(os.path.dirname(__file__), "update.py") - cmd = ['/usr/bin/python', update_script, 'install', repo, version] + cmd = [sys.executable, update_script, "install", repo, version] if self.prereleases: - cmd.append('--prereleases') + cmd.append("--prereleases") - self.logger.debug('downloading update ...') - run_in_background('__workflow_update_install', cmd) + self.logger.debug("downloading update ...") + run_in_background("__workflow_update_install", cmd) return True @@ -2406,22 +2461,24 @@ def save_password(self, account, password, service=None): service = self.bundleid try: - self._call_security('add-generic-password', service, account, - '-w', password) - self.logger.debug('saved password : %s:%s', service, account) + self._call_security( + "add-generic-password", service, account, "-w", password + ) + self.logger.debug("saved password : %s:%s", service, account) except PasswordExists: - self.logger.debug('password exists : %s:%s', service, account) + self.logger.debug("password exists : %s:%s", service, account) current_password = self.get_password(account, service) if current_password == password: - self.logger.debug('password unchanged') + self.logger.debug("password unchanged") else: self.delete_password(account, service) - self._call_security('add-generic-password', service, - account, '-w', password) - self.logger.debug('save_password : %s:%s', service, account) + self._call_security( + "add-generic-password", service, account, "-w", password + ) + self.logger.debug("save_password : %s:%s", service, account) def get_password(self, account, service=None): """Retrieve the password saved at ``service/account``. @@ -2441,24 +2498,23 @@ def get_password(self, account, service=None): if not service: service = self.bundleid - output = self._call_security('find-generic-password', service, - account, '-g') + output = self._call_security("find-generic-password", service, account, "-g") # Parsing of `security` output is adapted from python-keyring # by Jason R. Coombs # https://pypi.python.org/pypi/keyring m = re.search( - r'password:\s*(?:0x(?P[0-9A-F]+)\s*)?(?:"(?P.*)")?', - output) + r'password:\s*(?:0x(?P[0-9A-F]+)\s*)?(?:"(?P.*)")?', output + ) if m: groups = m.groupdict() - h = groups.get('hex') - password = groups.get('pw') + h = groups.get("hex") + password = groups.get("pw") if h: - password = unicode(binascii.unhexlify(h), 'utf-8') + password = str(binascii.unhexlify(h), "utf-8") - self.logger.debug('got password : %s:%s', service, account) + self.logger.debug("got password : %s:%s", service, account) return password @@ -2478,15 +2534,15 @@ def delete_password(self, account, service=None): if not service: service = self.bundleid - self._call_security('delete-generic-password', service, account) + self._call_security("delete-generic-password", service, account) - self.logger.debug('deleted password : %s:%s', service, account) + self.logger.debug("deleted password : %s:%s", service, account) #################################################################### # Methods for workflow:* magic args #################################################################### - def _register_default_magic(self): + def _register_default_magic(self): # noqa: C901 """Register the built-in magic arguments.""" # TODO: refactor & simplify # Wrap callback and message with callable @@ -2497,91 +2553,98 @@ def wrapper(): return wrapper - self.magic_arguments['delcache'] = callback(self.clear_cache, - 'Deleted workflow cache') - self.magic_arguments['deldata'] = callback(self.clear_data, - 'Deleted workflow data') - self.magic_arguments['delsettings'] = callback( - self.clear_settings, 'Deleted workflow settings') - self.magic_arguments['reset'] = callback(self.reset, - 'Reset workflow') - self.magic_arguments['openlog'] = callback(self.open_log, - 'Opening workflow log file') - self.magic_arguments['opencache'] = callback( - self.open_cachedir, 'Opening workflow cache directory') - self.magic_arguments['opendata'] = callback( - self.open_datadir, 'Opening workflow data directory') - self.magic_arguments['openworkflow'] = callback( - self.open_workflowdir, 'Opening workflow directory') - self.magic_arguments['openterm'] = callback( - self.open_terminal, 'Opening workflow root directory in Terminal') + self.magic_arguments["delcache"] = callback( + self.clear_cache, "Deleted workflow cache" + ) + self.magic_arguments["deldata"] = callback( + self.clear_data, "Deleted workflow data" + ) + self.magic_arguments["delsettings"] = callback( + self.clear_settings, "Deleted workflow settings" + ) + self.magic_arguments["reset"] = callback(self.reset, "Reset workflow") + self.magic_arguments["openlog"] = callback( + self.open_log, "Opening workflow log file" + ) + self.magic_arguments["opencache"] = callback( + self.open_cachedir, "Opening workflow cache directory" + ) + self.magic_arguments["opendata"] = callback( + self.open_datadir, "Opening workflow data directory" + ) + self.magic_arguments["openworkflow"] = callback( + self.open_workflowdir, "Opening workflow directory" + ) + self.magic_arguments["openterm"] = callback( + self.open_terminal, "Opening workflow root directory in Terminal" + ) # Diacritic folding def fold_on(): - self.settings['__workflow_diacritic_folding'] = True - return 'Diacritics will always be folded' + self.settings["__workflow_diacritic_folding"] = True + return "Diacritics will always be folded" def fold_off(): - self.settings['__workflow_diacritic_folding'] = False - return 'Diacritics will never be folded' + self.settings["__workflow_diacritic_folding"] = False + return "Diacritics will never be folded" def fold_default(): - if '__workflow_diacritic_folding' in self.settings: - del self.settings['__workflow_diacritic_folding'] - return 'Diacritics folding reset' + if "__workflow_diacritic_folding" in self.settings: + del self.settings["__workflow_diacritic_folding"] + return "Diacritics folding reset" - self.magic_arguments['foldingon'] = fold_on - self.magic_arguments['foldingoff'] = fold_off - self.magic_arguments['foldingdefault'] = fold_default + self.magic_arguments["foldingon"] = fold_on + self.magic_arguments["foldingoff"] = fold_off + self.magic_arguments["foldingdefault"] = fold_default # Updates def update_on(): - self.settings['__workflow_autoupdate'] = True - return 'Auto update turned on' + self.settings["__workflow_autoupdate"] = True + return "Auto update turned on" def update_off(): - self.settings['__workflow_autoupdate'] = False - return 'Auto update turned off' + self.settings["__workflow_autoupdate"] = False + return "Auto update turned off" def prereleases_on(): - self.settings['__workflow_prereleases'] = True - return 'Prerelease updates turned on' + self.settings["__workflow_prereleases"] = True + return "Prerelease updates turned on" def prereleases_off(): - self.settings['__workflow_prereleases'] = False - return 'Prerelease updates turned off' + self.settings["__workflow_prereleases"] = False + return "Prerelease updates turned off" def do_update(): if self.start_update(): - return 'Downloading and installing update ...' + return "Downloading and installing update ..." else: - return 'No update available' + return "No update available" - self.magic_arguments['autoupdate'] = update_on - self.magic_arguments['noautoupdate'] = update_off - self.magic_arguments['prereleases'] = prereleases_on - self.magic_arguments['noprereleases'] = prereleases_off - self.magic_arguments['update'] = do_update + self.magic_arguments["autoupdate"] = update_on + self.magic_arguments["noautoupdate"] = update_off + self.magic_arguments["prereleases"] = prereleases_on + self.magic_arguments["noprereleases"] = prereleases_off + self.magic_arguments["update"] = do_update # Help def do_help(): if self.help_url: self.open_help() - return 'Opening workflow help URL in browser' + return "Opening workflow help URL in browser" else: - return 'Workflow has no help URL' + return "Workflow has no help URL" def show_version(): if self.version: - return 'Version: {0}'.format(self.version) + return "Version: {0}".format(self.version) else: - return 'This workflow has no version number' + return "This workflow has no version number" def list_magic(): """Display all available magic args in Alfred.""" isatty = sys.stderr.isatty() for name in sorted(self.magic_arguments.keys()): - if name == 'magic': + if name == "magic": continue arg = self.magic_prefix + name self.logger.debug(arg) @@ -2592,9 +2655,9 @@ def list_magic(): if not isatty: self.send_feedback() - self.magic_arguments['help'] = do_help - self.magic_arguments['magic'] = list_magic - self.magic_arguments['version'] = show_version + self.magic_arguments["help"] = do_help + self.magic_arguments["magic"] = list_magic + self.magic_arguments["version"] = show_version def clear_cache(self, filter_func=lambda f: True): """Delete all files in workflow's :attr:`cachedir`. @@ -2624,7 +2687,7 @@ def clear_settings(self): """Delete workflow's :attr:`settings_path`.""" if os.path.exists(self.settings_path): os.unlink(self.settings_path) - self.logger.debug('deleted : %r', self.settings_path) + self.logger.debug("deleted : %r", self.settings_path) def reset(self): """Delete workflow settings, cache and data. @@ -2639,30 +2702,29 @@ def reset(self): def open_log(self): """Open :attr:`logfile` in default app (usually Console.app).""" - subprocess.call(['open', self.logfile]) + subprocess.call(["open", self.logfile]) # nosec def open_cachedir(self): """Open the workflow's :attr:`cachedir` in Finder.""" - subprocess.call(['open', self.cachedir]) + subprocess.call(["open", self.cachedir]) # nosec def open_datadir(self): """Open the workflow's :attr:`datadir` in Finder.""" - subprocess.call(['open', self.datadir]) + subprocess.call(["open", self.datadir]) # nosec def open_workflowdir(self): """Open the workflow's :attr:`workflowdir` in Finder.""" - subprocess.call(['open', self.workflowdir]) + subprocess.call(["open", self.workflowdir]) # nosec def open_terminal(self): """Open a Terminal window at workflow's :attr:`workflowdir`.""" - subprocess.call(['open', '-a', 'Terminal', - self.workflowdir]) + subprocess.call(["open", "-a", "Terminal", self.workflowdir]) # nosec def open_help(self): """Open :attr:`help_url` in default browser.""" - subprocess.call(['open', self.help_url]) + subprocess.call(["open", self.help_url]) # nosec - return 'Opening workflow help URL in browser' + return "Opening workflow help URL in browser" #################################################################### # Helper methods @@ -2698,8 +2760,8 @@ def decode(self, text, encoding=None, normalization=None): """ encoding = encoding or self._input_encoding normalization = normalization or self._normalizsation - if not isinstance(text, unicode): - text = unicode(text, encoding) + if not isinstance(text, str): + text = str(text, encoding) return unicodedata.normalize(normalization, text) def fold_to_ascii(self, text): @@ -2717,9 +2779,8 @@ def fold_to_ascii(self, text): """ if isascii(text): return text - text = ''.join([ASCII_REPLACEMENTS.get(c, c) for c in text]) - return unicode(unicodedata.normalize('NFKD', - text).encode('ascii', 'ignore')) + text = "".join([ASCII_REPLACEMENTS.get(c, c) for c in text]) + return unicodedata.normalize("NFKD", text) def dumbify_punctuation(self, text): """Convert non-ASCII punctuation to closest ASCII equivalent. @@ -2739,7 +2800,7 @@ def dumbify_punctuation(self, text): if isascii(text): return text - text = ''.join([DUMB_PUNCTUATION.get(c, c) for c in text]) + text = "".join([DUMB_PUNCTUATION.get(c, c) for c in text]) return text def _delete_directory_contents(self, dirpath, filter_func): @@ -2761,12 +2822,13 @@ def _delete_directory_contents(self, dirpath, filter_func): shutil.rmtree(path) else: os.unlink(path) - self.logger.debug('deleted : %r', path) + self.logger.debug("deleted : %r", path) def _load_info_plist(self): """Load workflow info from ``info.plist``.""" # info.plist should be in the directory above this one - self._info = plistlib.readPlist(self.workflowfile('info.plist')) + with open(self.workflowfile("info.plist"), "rb") as file_obj: + self._info = plistlib.load(file_obj) self._info_loaded = True def _create(self, dirpath): @@ -2806,16 +2868,15 @@ def _call_security(self, action, service, account, *args): :rtype: `tuple` (`int`, ``unicode``) """ - cmd = ['security', action, '-s', service, '-a', account] + list(args) - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + cmd = ["security", action, "-s", service, "-a", account] + list(args) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, _ = p.communicate() if p.returncode == 44: # password does not exist raise PasswordNotFound() elif p.returncode == 45: # password already exists raise PasswordExists() elif p.returncode > 0: - err = KeychainError('Unknown Keychain error : %s' % stdout) + err = KeychainError("Unknown Keychain error : %s" % stdout) err.retcode = p.returncode raise err - return stdout.strip().decode('utf-8') + return stdout.strip().decode("utf-8") diff --git a/workflow/workflow3.py b/workflow/workflow3.py index b92c4be..3a06e33 100644 --- a/workflow/workflow3.py +++ b/workflow/workflow3.py @@ -23,7 +23,6 @@ """ -from __future__ import print_function, unicode_literals, absolute_import import json import os @@ -50,12 +49,16 @@ class Variables(dict): information. Args: - arg (unicode, optional): Main output/``{query}``. + arg (unicode or list, optional): Main output/``{query}``. **variables: Workflow variables to set. + In Alfred 4.1+ and Alfred-Workflow 1.40+, ``arg`` may also be a + :class:`list` or :class:`tuple`. Attributes: - arg (unicode): Output value (``{query}``). + arg (unicode or list): Output value (``{query}``). + In Alfred 4.1+ and Alfred-Workflow 1.40+, ``arg`` may also be a + :class:`list` or :class:`tuple`. config (dict): Configuration for downstream workflow element. """ @@ -68,23 +71,23 @@ def __init__(self, arg=None, **variables): @property def obj(self): - """Return ``alfredworkflow`` `dict`.""" + """``alfredworkflow`` :class:`dict`.""" o = {} if self: d2 = {} - for k, v in self.items(): + for k, v in list(self.items()): d2[k] = v - o['variables'] = d2 + o["variables"] = d2 if self.config: - o['config'] = self.config + o["config"] = self.config if self.arg is not None: - o['arg'] = self.arg + o["arg"] = self.arg - return {'alfredworkflow': o} + return {"alfredworkflow": o} - def __unicode__(self): + def __str__(self): """Convert to ``alfredworkflow`` JSON object. Returns: @@ -92,22 +95,13 @@ def __unicode__(self): """ if not self and not self.config: - if self.arg: + if not self.arg: + return "" + if isinstance(self.arg, str): return self.arg - else: - return u'' return json.dumps(self.obj) - def __str__(self): - """Convert to ``alfredworkflow`` JSON object. - - Returns: - str: UTF-8 encoded ``alfredworkflow`` JSON object - - """ - return unicode(self).encode('utf-8') - class Modifier(object): """Modify :class:`Item3` arg/icon/variables when modifier key is pressed. @@ -149,8 +143,9 @@ class Modifier(object): """ - def __init__(self, key, subtitle=None, arg=None, valid=None, icon=None, - icontype=None): + def __init__( + self, key, subtitle=None, arg=None, valid=None, icon=None, icontype=None + ): """Create a new :class:`Modifier`. Don't use this class directly (as it won't be associated with any @@ -212,23 +207,23 @@ def obj(self): o = {} if self.subtitle is not None: - o['subtitle'] = self.subtitle + o["subtitle"] = self.subtitle if self.arg is not None: - o['arg'] = self.arg + o["arg"] = self.arg if self.valid is not None: - o['valid'] = self.valid + o["valid"] = self.valid if self.variables: - o['variables'] = self.variables + o["variables"] = self.variables if self.config: - o['config'] = self.config + o["config"] = self.config icon = self._icon() if icon: - o['icon'] = icon + o["icon"] = icon return o @@ -241,10 +236,10 @@ def _icon(self): """ icon = {} if self.icon is not None: - icon['path'] = self.icon + icon["path"] = self.icon if self.icontype is not None: - icon['type'] = self.icontype + icon["type"] = self.icontype return icon @@ -261,9 +256,22 @@ class Item3(object): """ - def __init__(self, title, subtitle='', arg=None, autocomplete=None, - match=None, valid=False, uid=None, icon=None, icontype=None, - type=None, largetext=None, copytext=None, quicklookurl=None): + def __init__( + self, + title, + subtitle="", + arg=None, + autocomplete=None, + match=None, + valid=False, + uid=None, + icon=None, + icontype=None, + type=None, + largetext=None, + copytext=None, + quicklookurl=None, + ): """Create a new :class:`Item3` object. Use same arguments as for @@ -314,8 +322,9 @@ def getvar(self, name, default=None): """ return self.variables.get(name, default) - def add_modifier(self, key, subtitle=None, arg=None, valid=None, icon=None, - icontype=None): + def add_modifier( + self, key, subtitle=None, arg=None, valid=None, icon=None, icontype=None + ): """Add alternative values for a modifier key. Args: @@ -328,6 +337,9 @@ def add_modifier(self, key, subtitle=None, arg=None, valid=None, icon=None, :meth:`Workflow.add_item() ` for valid values. + In Alfred 4.1+ and Alfred-Workflow 1.40+, ``arg`` may also be a + :class:`list` or :class:`tuple`. + Returns: Modifier: Configured :class:`Modifier`. @@ -350,50 +362,46 @@ def obj(self): """ # Required values - o = { - 'title': self.title, - 'subtitle': self.subtitle, - 'valid': self.valid, - } + o = {"title": self.title, "subtitle": self.subtitle, "valid": self.valid} # Optional values if self.arg is not None: - o['arg'] = self.arg + o["arg"] = self.arg if self.autocomplete is not None: - o['autocomplete'] = self.autocomplete + o["autocomplete"] = self.autocomplete if self.match is not None: - o['match'] = self.match + o["match"] = self.match if self.uid is not None: - o['uid'] = self.uid + o["uid"] = self.uid if self.type is not None: - o['type'] = self.type + o["type"] = self.type if self.quicklookurl is not None: - o['quicklookurl'] = self.quicklookurl + o["quicklookurl"] = self.quicklookurl if self.variables: - o['variables'] = self.variables + o["variables"] = self.variables if self.config: - o['config'] = self.config + o["config"] = self.config # Largetype and copytext text = self._text() if text: - o['text'] = text + o["text"] = text icon = self._icon() if icon: - o['icon'] = icon + o["icon"] = icon # Modifiers mods = self._modifiers() if mods: - o['mods'] = mods + o["mods"] = mods return o @@ -406,10 +414,10 @@ def _icon(self): """ icon = {} if self.icon is not None: - icon['path'] = self.icon + icon["path"] = self.icon if self.icontype is not None: - icon['type'] = self.icontype + icon["type"] = self.icontype return icon @@ -422,10 +430,10 @@ def _text(self): """ text = {} if self.largetext is not None: - text['largetype'] = self.largetext + text["largetype"] = self.largetext if self.copytext is not None: - text['copy'] = self.copytext + text["copy"] = self.copytext return text @@ -438,7 +446,7 @@ def _modifiers(self): """ if self.modifiers: mods = {} - for k, mod in self.modifiers.items(): + for k, mod in list(self.modifiers.items()): mods[k] = mod.obj return mods @@ -470,25 +478,27 @@ def __init__(self, **kwargs): self.variables = {} self._rerun = 0 # Get session ID from environment if present - self._session_id = os.getenv('_WF_SESSION_ID') or None + self._session_id = os.getenv("_WF_SESSION_ID") or None if self._session_id: - self.setvar('_WF_SESSION_ID', self._session_id) + self.setvar("_WF_SESSION_ID", self._session_id) @property def _default_cachedir(self): """Alfred 4's default cache directory.""" return os.path.join( os.path.expanduser( - '~/Library/Caches/com.runningwithcrayons.Alfred/' - 'Workflow Data/'), - self.bundleid) + "~/Library/Caches/com.runningwithcrayons.Alfred/" "Workflow Data/" + ), + self.bundleid, + ) @property def _default_datadir(self): """Alfred 4's default data directory.""" - return os.path.join(os.path.expanduser( - '~/Library/Application Support/Alfred/Workflow Data/'), - self.bundleid) + return os.path.join( + os.path.expanduser("~/Library/Application Support/Alfred/Workflow Data/"), + self.bundleid, + ) @property def rerun(self): @@ -517,8 +527,9 @@ def session_id(self): """ if not self._session_id: from uuid import uuid4 + self._session_id = uuid4().hex - self.setvar('_WF_SESSION_ID', self._session_id) + self.setvar("_WF_SESSION_ID", self._session_id) return self._session_id @@ -541,9 +552,11 @@ def setvar(self, name, value, persist=False): self.variables[name] = value if persist: from .util import set_config + set_config(name, value, self.bundleid) - self.logger.debug('saved variable %r with value %r to info.plist', - name, value) + self.logger.debug( + "saved variable %r with value %r to info.plist", name, value + ) def getvar(self, name, default=None): """Return value of workflow variable for ``name`` or ``default``. @@ -558,9 +571,22 @@ def getvar(self, name, default=None): """ return self.variables.get(name, default) - def add_item(self, title, subtitle='', arg=None, autocomplete=None, - valid=False, uid=None, icon=None, icontype=None, type=None, - largetext=None, copytext=None, quicklookurl=None, match=None): + def add_item( + self, + title, + subtitle="", + arg=None, + autocomplete=None, + valid=False, + uid=None, + icon=None, + icontype=None, + type=None, + largetext=None, + copytext=None, + quicklookurl=None, + match=None, + ): """Add an item to be output to Alfred. Args: @@ -568,6 +594,9 @@ def add_item(self, title, subtitle='', arg=None, autocomplete=None, turned on for your Script Filter, Alfred (version 3.5 and above) will filter against this field, not ``title``. + In Alfred 4.1+ and Alfred-Workflow 1.40+, ``arg`` may also be a + :class:`list` or :class:`tuple`. + See :meth:`Workflow.add_item() ` for the main documentation and other parameters. @@ -579,9 +608,21 @@ def add_item(self, title, subtitle='', arg=None, autocomplete=None, Item3: Alfred feedback item. """ - item = self.item_class(title, subtitle, arg, autocomplete, - match, valid, uid, icon, icontype, type, - largetext, copytext, quicklookurl) + item = self.item_class( + title, + subtitle, + arg, + autocomplete, + match, + valid, + uid, + icon, + icontype, + type, + largetext, + copytext, + quicklookurl, + ) # Add variables to child item item.variables.update(self.variables) @@ -592,7 +633,7 @@ def add_item(self, title, subtitle='', arg=None, autocomplete=None, @property def _session_prefix(self): """Filename prefix for current session.""" - return '_wfsess-{0}-'.format(self.session_id) + return "_wfsess-{0}-".format(self.session_id) def _mk_session_name(self, name): """New cache name/key based on session ID.""" @@ -662,11 +703,13 @@ def clear_session_cache(self, current=False): current session. """ + def _is_session_file(filename): if current: - return filename.startswith('_wfsess-') - return filename.startswith('_wfsess-') \ - and not filename.startswith(self._session_prefix) + return filename.startswith("_wfsess-") + return filename.startswith("_wfsess-") and not filename.startswith( + self._session_prefix + ) self.clear_cache(_is_session_file) @@ -682,14 +725,14 @@ def obj(self): for item in self._items: items.append(item.obj) - o = {'items': items} + o = {"items": items} if self.variables: - o['variables'] = self.variables + o["variables"] = self.variables if self.rerun: - o['rerun'] = self.rerun + o["rerun"] = self.rerun return o - def warn_empty(self, title, subtitle=u'', icon=None): + def warn_empty(self, title, subtitle="", icon=None): """Add a warning to feedback if there are no items. .. versionadded:: 1.31 @@ -717,5 +760,8 @@ def warn_empty(self, title, subtitle=u'', icon=None): def send_feedback(self): """Print stored items to console/Alfred as JSON.""" - json.dump(self.obj, sys.stdout) + if self.debugging: + json.dump(self.obj, sys.stdout, indent=2, separators=(",", ": ")) + else: + json.dump(self.obj, sys.stdout) sys.stdout.flush()