diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..371ffcb --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,63 @@ +try: + import tensorflow as tf +except (ImportError, ModuleNotFoundError) as e: + tf_available = False +else: + tf_available = True + +try: + import torch + from torch.autograd import Variable + + torch_available = True +except ImportError as e: + torch_available = False + +try: + import jax + import jax.numpy as jnp + + jax_available = True +except ImportError as e: + jax_available = False + + +# pylint: disable=unused-argument +def pytest_generate_tests(metafunc): + if jax_available: + jax.config.update("jax_enable_x64", True) + + +def pytest_runtest_setup(item): + """Automatically skip tests if interfaces are not installed""" + # Autograd is assumed to be installed + interfaces = {"tf", "torch", "jax"} + available_interfaces = { + "tf": tf_available, + "torch": torch_available, + "jax": jax_available, + } + + allowed_interfaces = [ + allowed_interface + for allowed_interface in interfaces + if available_interfaces[allowed_interface] is True + ] + + # load the marker specifying what the interface is + all_interfaces = {"tf", "torch", "jax", "all_interfaces"} + marks = {mark.name for mark in item.iter_markers() if mark.name in all_interfaces} + + for b in marks: + if b == "all_interfaces": + required_interfaces = {"tf", "torch", "jax"} + for interface in required_interfaces: + if interface not in allowed_interfaces: + pytest.skip( + f"\nTest {item.nodeid} only runs with {allowed_interfaces} interfaces(s) but {b} interface provided", + ) + else: + if b not in allowed_interfaces: + pytest.skip( + f"\nTest {item.nodeid} only runs with {allowed_interfaces} interfaces(s) but {b} interface provided", + ) \ No newline at end of file diff --git a/tests/test_ops.py b/tests/test_ops.py index ce944d0..fe51db3 100644 --- a/tests/test_ops.py +++ b/tests/test_ops.py @@ -25,12 +25,14 @@ def get_GPI_matrix(phi): + """TODO""" return np.array([[0, math.exp(-1j * phi)], [math.exp(1j * phi), 0]]) class State: @staticmethod def set_state(): + """TODO""" qml.RX(0.2, wires=0) qml.RY(1.1, wires=0) qml.RX(0.3, wires=0) @@ -38,42 +40,42 @@ def set_state(): @classmethod @qml.qnode(qml.device("default.qubit", wires=1)) def get_state(cls): + """TODO""" cls.set_state() return qml.state() def __init__(self): + """TODO""" self.state = self.get_state() self.a_conj_b = self.state[0] * np.conj(self.state[1]) self.b_conj_a = self.state[1] * np.conj(self.state[0]) +@pytest.mark.parametrize("phi", [0, 0.37 * two_pi, 1.23 * two_pi, two_pi]) class TestGPI: @staticmethod def circuit(phi): + """TODO""" State.set_state() GPI(phi, wires=0) return qml.expval(qml.PauliY(wires=0)) @pytest.fixture(autouse=True) def state(self): + """TODO""" self._state = State() - @pytest.mark.parametrize("interface, array_method", interfaces_and_array_methods) - def test_GPI_compute_matrix(self, interface, array_method): - phi_rand = np.random.rand() * two_pi - phi_values = [0, phi_rand, two_pi] - - for phi_value in phi_values: - phi_interface = array_method(phi_value, interface) - gpi_matrix = GPI.compute_matrix(phi_interface) - - check_matrix = get_GPI_matrix(phi_value) + def test_GPI_compute_matrix(self, phi): + """TODO""" + gpi_matrix = GPI.compute_matrix(phi) + check_matrix = get_GPI_matrix(phi_value) - assert math.allclose(gpi_matrix, check_matrix) + assert math.allclose(gpi_matrix, check_matrix) @pytest.mark.parametrize("interface, array_method", interfaces_and_array_methods) def test_GPI_circuit(self, interface, array_method, phi): - phi_GPI = array_method(phi, interface) + """TODO""" + phi_GPI = array_method(phi) dev = qml.device("default.qubit", wires=1) qnode_GPI = qml.QNode(self.circuit, dev, interface=interface) @@ -87,34 +89,92 @@ def test_GPI_circuit(self, interface, array_method, phi): val_GPI, expected_val, atol=1e-07 ), f"Given val: {val_GPI}; Expected val: {expected_val}" - @pytest.mark.parametrize("interface, array_method", interfaces_and_array_methods) + def get_circuit_grad(self, phi): + """TODO""" + expected_inner_product_1 = 1j * self._state.b_conj_a * np.exp(-2j * phi) * (-2j) + expected_inner_product_2 = -1j * self._state.a_conj_b * np.exp(2j * phi) * 2j + return np.real(expected_inner_product_1 + expected_inner_product_2) + @pytest.mark.parametrize("diff_method", diff_methods) - @pytest.mark.parametrize("phi", [0.37 * two_pi, 1.23 * two_pi, two_pi]) - def test_GPI_grad(self, diff_method, interface, array_method): - phi = np.random.rand() * two_pi - phi_GPI = array_method(phi, interface) + def test_GPI_grad_qnode(self, diff_method, phi): + """TODO""" + + phi_GPI = np.array(phi) + dev = qml.device("default.qubit", wires=1) + + qnode_GPI = qml.QNode(self.circuit, dev, diff_method=diff_method) + grad_GPI = qml.grad(qnode_GPI, argnum=0)(phi_GPI) + + expected_grad = self.get_circuit_grad(phi) + assert np.isclose( + grad_GPI, expected_grad + ), f"Given grad: {grad_GPI}; Expected grad: {expected_grad}" + + @pytest.mark.autograd + def test_GPI_grad_autograd(self, phi): + """TODO""" + phi_GPI = qml.numpy.array(phi, requires_grad=True) dev = qml.device("default.qubit", wires=1) qnode_GPI = qml.QNode(self.circuit, dev, interface=interface, diff_method=diff_method) + grad_GPI = qml.grad(qnode_GPI, argnum=0)(phi_GPI) - if interface == "torch": - phi_GPI.requires_grad = True - result = qnode_GPI(phi_GPI) - result.backward() - grad_GPI = phi_GPI.grad - elif interface == "tf": - with tf.GradientTape() as tape: - loss = qnode_GPI(phi_GPI) - grad_GPI = tape.gradient(loss, phi_GPI) - elif interface == "jax": - grad_GPI = jax.grad(qnode_GPI)(phi_GPI) - else: - grad_GPI = qml.grad(qnode_GPI, argnum=0)(phi_GPI) + expected_grad = self.get_circuit_grad(phi) + assert np.isclose( + grad_GPI, expected_grad + ), f"Given grad: {grad_GPI}; Expected grad: {expected_grad}" - expected_inner_product_1 = 1j * self._state.b_conj_a * np.exp(-2j * phi) * (-2j) - expected_inner_product_2 = -1j * self._state.a_conj_b * np.exp(2j * phi) * 2j - expected_grad = np.real(expected_inner_product_1 + expected_inner_product_2) + @pytest.mark.jax + def test_GPI_grad_jax(self, phi): + """TODO""" + import jax + + phi_GPI = jax.numpy.array(phi, dtype=jax.numpy.complex128) + dev = qml.device("default.qubit", wires=1) + qnode_GPI = qml.QNode(self.circuit, dev, interface=interface, diff_method=diff_method) + grad_GPI = jax.grad(qnode_GPI)(phi_GPI) + + expected_grad = self.get_circuit_grad(phi) assert np.isclose( grad_GPI, expected_grad ), f"Given grad: {grad_GPI}; Expected grad: {expected_grad}" + + @pytest.mark.torch + def test_GPI_grad_torch(self, phi): + """TODO""" + import torch + + phi_GPI = torch.tensor(phi, requires_grad=True) + dev = qml.device("default.qubit", wires=1) + + qnode_GPI = qml.QNode(self.circuit, dev, interface=interface, diff_method=diff_method) + + phi_GPI.requires_grad = True + result = qnode_GPI(phi_GPI) + result.backward() + grad_GPI = phi_GPI.grad + + expected_grad = self.get_circuit_grad(phi) + assert np.isclose( + grad_GPI, expected_grad + ), f"Given grad: {grad_GPI}; Expected grad: {expected_grad}" + + @pytest.mark.tf + def test_GPI_grad_tensorflow(self, phi): + """TODO""" + import tensorflow as tf + + phi_GPI = tf.Variable(phi) + dev = qml.device("default.qubit", wires=1) + + qnode_GPI = qml.QNode(self.circuit, dev, interface=interface, diff_method=diff_method) + + with tf.GradientTape() as tape: + loss = qnode_GPI(phi_GPI) + grad_GPI = tape.gradient(loss, phi_GPI) + + expected_grad = self.get_circuit_grad(phi) + assert np.isclose( + grad_GPI, expected_grad + ), f"Given grad: {grad_GPI}; Expected grad: {expected_grad}" \ No newline at end of file