diff --git a/appveyor.yml b/appveyor.yml index 25cb9454c..4b5e4fd38 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -8,7 +8,7 @@ environment: matrix: # Since appveyor is quite slow, we only use a single configuration - - PYTHON: "3.8" + - PYTHON: "3.6" ARCH: "64" CONDA_ENV: testenv diff --git a/docs/source/user-guide/ir/ir-builder.rst b/docs/source/user-guide/ir/ir-builder.rst index d8c3bc35f..78b067ec2 100644 --- a/docs/source/user-guide/ir/ir-builder.rst +++ b/docs/source/user-guide/ir/ir-builder.rst @@ -459,26 +459,44 @@ Memory *typ*. If *size* is not given, a stack slot for 1 value is allocated. -* .. method:: IRBuilder.load(ptr, name='', align=None) +* .. method:: IRBuilder.load(ptr, name='', align=None, volatile=False, atomic_ordering=None, sync_scope=None) Load value from pointer *ptr*. If *align* is passed, it should be a Python integer specifying the guaranteed pointer alignment. + + If *volatile* is truthy, the load is marked as volatile. -* .. method:: IRBuilder.store(value, ptr, align=None) + If *atomic_ordering* and *align* are specified, the load is marked as + atomic. For atomic load a *sync_scope* may optionally be specified. + +* .. method:: IRBuilder.store(value, ptr, align=None, volatile=False, atomic_ordering=None, sync_scope=None) Store *value* to pointer *ptr*. If *align* is passed, it should be a Python integer specifying the guaranteed pointer alignment. + + If *volatile* is truthy, the load is marked as volatile. + + If *atomic_ordering* and *align* are specified, the load is marked as + atomic. For atomic store a *sync_scope* may optionally be specified. * .. method:: IRBuilder.load_atomic(ptr, ordering, align, name='') + .. deprecated:: 0.33.0 + + Use :func:`IRBuilder.load` with parameter `atomic_ordering` instead. + Load value from pointer *ptr* as an atomic operation with the given *ordering*. *align* must be a Python integer specifying the guaranteed pointer alignment. * .. method:: IRBuilder.store_atomic(value, ptr, ordering, align) + .. deprecated:: 0.33.0 + + Use :func:`IRBuilder.store` with parameter `atomic_ordering` instead. + Store *value* to pointer *ptr* as an atomic operation with the given *ordering*. *align* must be a Python integer specifying the guaranteed pointer alignment. diff --git a/llvmlite/ir/builder.py b/llvmlite/ir/builder.py index bddf102a8..68cc1cf94 100644 --- a/llvmlite/ir/builder.py +++ b/llvmlite/ir/builder.py @@ -1,5 +1,6 @@ import contextlib import functools +import warnings from llvmlite.ir import instructions, types, values @@ -723,7 +724,8 @@ def alloca(self, typ, size=None, name=''): self._insert(al) return al - def load(self, ptr, name='', align=None): + def load(self, ptr, name='', align=None, volatile=False, + atomic_ordering=None, sync_scope=None): """ Load value from pointer, with optional guaranteed alignment: name = *ptr @@ -731,12 +733,13 @@ def load(self, ptr, name='', align=None): if not isinstance(ptr.type, types.PointerType): msg = "cannot load from value of type %s (%r): not a pointer" raise TypeError(msg % (ptr.type, str(ptr))) - ld = instructions.LoadInstr(self.block, ptr, name) - ld.align = align + ld = instructions.LoadInstr(self.block, ptr, name, align, + volatile, atomic_ordering, sync_scope) self._insert(ld) return ld - def store(self, value, ptr, align=None): + def store(self, value, ptr, align=None, volatile=False, + atomic_ordering=None, sync_scope=None): """ Store value to pointer, with optional guaranteed alignment: *ptr = name @@ -747,8 +750,8 @@ def store(self, value, ptr, align=None): if ptr.type.pointee != value.type: raise TypeError("cannot store %s to %s: mismatching types" % (value.type, ptr.type)) - st = instructions.StoreInstr(self.block, value, ptr) - st.align = align + st = instructions.StoreInstr(self.block, value, ptr, align, + volatile, atomic_ordering, sync_scope) self._insert(st) return st @@ -756,30 +759,35 @@ def load_atomic(self, ptr, ordering, align, name=''): """ Load value from pointer, with optional guaranteed alignment: name = *ptr + + Deprecated. Please use the `atomic_ordering` keyword of load(). """ - if not isinstance(ptr.type, types.PointerType): - msg = "cannot load from value of type %s (%r): not a pointer" - raise TypeError(msg % (ptr.type, str(ptr))) - ld = instructions.LoadAtomicInstr( - self.block, ptr, ordering, align, name) - self._insert(ld) - return ld + msg = ("load_atomic() is being deprecated. Please use the " + "atomic_ordering parameter of load() instead.") + warnings.warn(msg, DeprecationWarning) + + return self.load( + ptr=ptr, + name=name, + align=align, + atomic_ordering=ordering) def store_atomic(self, value, ptr, ordering, align): """ Store value to pointer, with optional guaranteed alignment: *ptr = name + + Deprecated. Please use the `atomic_ordering` keyword of store(). """ - if not isinstance(ptr.type, types.PointerType): - msg = "cannot store to value of type %s (%r): not a pointer" - raise TypeError(msg % (ptr.type, str(ptr))) - if ptr.type.pointee != value.type: - raise TypeError("cannot store %s to %s: mismatching types" - % (value.type, ptr.type)) - st = instructions.StoreAtomicInstr( - self.block, value, ptr, ordering, align) - self._insert(st) - return st + msg = ("store_atomic() is being deprecated. Please use the " + "atomic_ordering parameter of store() instead.") + warnings.warn(msg, DeprecationWarning) + + return self.store( + value=value, + ptr=ptr, + align=align, + atomic_ordering=ordering) # # Terminators APIs diff --git a/llvmlite/ir/instructions.py b/llvmlite/ir/instructions.py index 54ec0972e..fc41bba54 100644 --- a/llvmlite/ir/instructions.py +++ b/llvmlite/ir/instructions.py @@ -1,6 +1,7 @@ """ Implementation of LLVM IR instructions. """ +import warnings from llvmlite.ir import types from llvmlite.ir.values import (Block, Function, Value, NamedValue, Constant, @@ -389,85 +390,105 @@ def descr(self, buf): class LoadInstr(Instruction): - - def __init__(self, parent, ptr, name=''): + def __init__(self, parent, ptr, name='', align=None, volatile=False, + atomic_ordering=None, sync_scope=None): + if atomic_ordering is not None and align is None: + msg = "atomic load requires the align parameter to be specified" + raise ValueError(msg) + if sync_scope is not None and atomic_ordering is None: + msg = ("sync_scope may only be specified in combination with " + "atomic_ordering") + raise ValueError(msg) super(LoadInstr, self).__init__(parent, ptr.type.pointee, "load", [ptr], name=name) - self.align = None + self.align = align + self.atomic_ordering = atomic_ordering + self.sync_scope = sync_scope + self.volatile = volatile def descr(self, buf): [val] = self.operands - if self.align is not None: - align = ', align %d' % (self.align) - else: - align = '' - buf.append("load {0}, {1} {2}{3}{4}\n".format( - val.type.pointee, - val.type, - val.get_reference(), - align, - self._stringify_metadata(leading_comma=True), - )) + + flags = [] + if self.atomic_ordering is not None: + flags.append('atomic') + if self.volatile: + flags.append('volatile') + flags = ' '.join([''] + flags) + + atomic = [] + if self.atomic_ordering is not None: + if self.sync_scope is not None: + atomic.append(f'syncscope("{self.sync_scope}")') + atomic.append(self.atomic_ordering) + atomic_params = " ".join([''] + atomic) + + align = '' if self.align is None else f', align {self.align}' + metadata = self._stringify_metadata(leading_comma=True) + + buf.append(f"load{flags} {val.type.pointee}, {val.type} " + f"{val.get_reference()}{atomic_params}{align}{metadata}\n") class StoreInstr(Instruction): - def __init__(self, parent, val, ptr): + def __init__(self, parent, val, ptr, align=None, volatile=False, + atomic_ordering=None, sync_scope=None): + if atomic_ordering is not None and align is None: + msg = "atomic store requires the align parameter to be specified" + raise ValueError(msg) + if sync_scope is not None and atomic_ordering is None: + msg = ("sync_scope may only be specified in combination with " + "atomic_ordering") + raise ValueError(msg) super(StoreInstr, self).__init__(parent, types.VoidType(), "store", [val, ptr]) + self.align = align + self.atomic_ordering = atomic_ordering + self.sync_scope = sync_scope + self.volatile = volatile def descr(self, buf): val, ptr = self.operands - if self.align is not None: - align = ', align %d' % (self.align) - else: - align = '' - buf.append("store {0} {1}, {2} {3}{4}{5}\n".format( - val.type, - val.get_reference(), - ptr.type, - ptr.get_reference(), - align, - self._stringify_metadata(leading_comma=True), - )) + flags = [] + if self.atomic_ordering is not None: + flags.append('atomic') + if self.volatile: + flags.append('volatile') + flags = ' '.join([''] + flags) + + atomic = [] + if self.atomic_ordering is not None: + if self.sync_scope is not None: + atomic.append(f'syncscope("{self.sync_scope}")') + atomic.append(self.atomic_ordering) + atomic_params = " ".join([''] + atomic) + + align = '' if self.align is None else f', align {self.align}' + metadata = self._stringify_metadata(leading_comma=True) + + buf.append(f"store{flags} {val.type} {val.get_reference()}, {ptr.type}" + f" {ptr.get_reference()}{atomic_params}{align}{metadata}\n") -class LoadAtomicInstr(Instruction): + +class LoadAtomicInstr(LoadInstr): def __init__(self, parent, ptr, ordering, align, name=''): - super(LoadAtomicInstr, self).__init__(parent, ptr.type.pointee, - "load atomic", [ptr], name=name) - self.ordering = ordering - self.align = align + msg = ("LoadAtomicInstr() is being deprecated. Please use the " + "atomic_ordering parameter of LoadInstr() instead.") + warnings.warn(msg, DeprecationWarning) - def descr(self, buf): - [val] = self.operands - buf.append("load atomic {0}, {1} {2} {3}, align {4}{5}\n".format( - val.type.pointee, - val.type, - val.get_reference(), - self.ordering, - self.align, - self._stringify_metadata(leading_comma=True), - )) + super(LoadAtomicInstr, self).__init__( + parent, ptr, align=align, atomic_ordering=ordering, name=name) -class StoreAtomicInstr(Instruction): +class StoreAtomicInstr(StoreInstr): def __init__(self, parent, val, ptr, ordering, align): - super(StoreAtomicInstr, self).__init__(parent, types.VoidType(), - "store atomic", [val, ptr]) - self.ordering = ordering - self.align = align + msg = ("StoreAtomicInstr() is being deprecated. Please use the " + "atomic_ordering parameter of StoreInstr() instead.") + warnings.warn(msg, DeprecationWarning) - def descr(self, buf): - val, ptr = self.operands - buf.append("store atomic {0} {1}, {2} {3} {4}, align {5}{6}\n".format( - val.type, - val.get_reference(), - ptr.type, - ptr.get_reference(), - self.ordering, - self.align, - self._stringify_metadata(leading_comma=True), - )) + super(StoreAtomicInstr, self).__init__( + parent, val, ptr, align=align, atomic_ordering=ordering) class AllocaInstr(Instruction): diff --git a/llvmlite/tests/test_ir.py b/llvmlite/tests/test_ir.py index 155510096..b848b6777 100644 --- a/llvmlite/tests/test_ir.py +++ b/llvmlite/tests/test_ir.py @@ -805,11 +805,30 @@ def test_mem_ops(self): self.assertEqual(h.type, ir.VoidType()) i = builder.load(c, 'i', align=1) self.assertEqual(i.type, int32) - # Atomics - j = builder.store_atomic(b, c, ordering="seq_cst", align=4) + # Volatile + j = builder.store(b, c, align=1, volatile=True) self.assertEqual(j.type, ir.VoidType()) - k = builder.load_atomic(c, ordering="seq_cst", align=4, name='k') + k = builder.load(c, 'k', align=1, volatile=True) self.assertEqual(k.type, int32) + # Atomics + l = builder.store(b, c, atomic_ordering="seq_cst", align=4) + self.assertEqual(l.type, ir.VoidType()) + m = builder.load(c, atomic_ordering="seq_cst", align=4, name='m') + self.assertEqual(m.type, int32) + # Atomics Volatile + o = builder.store(b, c, atomic_ordering="seq_cst", + align=4, volatile=True) + self.assertEqual(o.type, ir.VoidType()) + p = builder.load(c, atomic_ordering="seq_cst", + align=4, name='p', volatile=True) + self.assertEqual(p.type, int32) + # Atomics Volatile with syncscope + q = builder.store(b, c, atomic_ordering="seq_cst", + align=4, volatile=True, sync_scope='test_scope') + self.assertEqual(q.type, ir.VoidType()) + r = builder.load(c, atomic_ordering="seq_cst", align=4, name='r', + volatile=True, sync_scope='test_scope') + self.assertEqual(r.type, int32) # Not pointer types with self.assertRaises(TypeError): builder.store(b, a) @@ -820,7 +839,27 @@ def test_mem_ops(self): builder.store(b, e) self.assertEqual(str(cm.exception), "cannot store i32 to double*: mismatching types") - self.check_block(block, """\ + # atomic store without align + with self.assertRaises(ValueError) as cm: + builder.store(b, c, atomic_ordering="seq_cst") + self.assertIn('requires the align parameter', str(cm.exception)) + # atomic load without align + with self.assertRaises(ValueError) as cm: + builder.load(c, atomic_ordering="seq_cst", name='m') + self.assertIn('requires the align parameter', str(cm.exception)) + # store with sync_scope with missing atomic_ordering + with self.assertRaises(ValueError) as cm: + builder.store(b, c, sync_scope="test_scope") + self.assertIn('may only be specified in combination with ' + 'atomic_ordering', str(cm.exception)) + # load with sync_scope with missing atomic_ordering + with self.assertRaises(ValueError) as cm: + builder.load(c, sync_scope="test_scope") + self.assertIn('may only be specified in combination with ' + 'atomic_ordering', str(cm.exception)) + self.check_block( + block, + """\ my_block: %"c" = alloca i32 %"d" = alloca i32, i32 42 @@ -830,8 +869,16 @@ def test_mem_ops(self): %"g" = load i32, i32* %"c" store i32 %".2", i32* %"c", align 1 %"i" = load i32, i32* %"c", align 1 + store volatile i32 %".2", i32* %"c", align 1 + %"k" = load volatile i32, i32* %"c", align 1 store atomic i32 %".2", i32* %"c" seq_cst, align 4 - %"k" = load atomic i32, i32* %"c" seq_cst, align 4 + %"m" = load atomic i32, i32* %"c" seq_cst, align 4 + store atomic volatile i32 %".2", i32* %"c" seq_cst, align 4 + %"p" = load atomic volatile i32, i32* %"c" seq_cst, align 4 + store atomic volatile i32 %".2", i32* %"c" """ + """syncscope("test_scope") seq_cst, align 4""" """ + %"r" = load atomic volatile i32, i32* %"c" """ + """syncscope("test_scope") seq_cst, align 4""" """ """) def test_gep(self):