Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor functions in matrixtools.py #442

Merged
merged 11 commits into from
Sep 24, 2024
12 changes: 12 additions & 0 deletions pygsti/extras/interpygate/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,15 @@

from .core import PhysicalProcess, InterpolatedDenseOp, InterpolatedOpFactory
from .process_tomography import vec, unvec, run_process_tomography

# Note from Riley on September, 2024:
#
# vec is deprecated, and shouldn't be called anywhere in the codebase.
#
# unvec is deprecated and replaced with unvec_square; the latter function
# isn't imported here because we don't want people to access it just from
# the pygsti.extras.interpygate namespace.
#
# Ideally we'd remove vec and unvec from the pygsti.extras.interpygate namespace
# and only have them available in pygsti.extras.interpygate.process_tomography.
#
54 changes: 44 additions & 10 deletions pygsti/extras/interpygate/process_tomography.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import numpy.linalg as _lin

from pygsti.tools.basistools import change_basis
from pygsti.tools.legacytools import deprecate


#Helper functions
Expand All @@ -15,8 +16,11 @@ def multi_kron(*a):
return reduce(_np.kron, a)


@deprecate("Calls to this function should be replaced with in-lined code: matrix.reshape((matrix.size, 1), 'F')")
def vec(matrix):
"""A function that vectorizes a matrix.
"""
Returns an explicit column-vector representation of a square matrix, obtained by reading
from the square matrix in column-major order.

Args:
matrix (list,numpy.ndarray): NxN matrix
Expand All @@ -30,11 +34,12 @@ def vec(matrix):
"""
matrix = _np.array(matrix)
if matrix.shape == (len(matrix), len(matrix)):
return _np.array([_np.concatenate(_np.array(matrix).T)]).T
return matrix.reshape(shape=(matrix.size, 1), order='F')
else:
raise ValueError('The input matrix must be square.')


@deprecate("Calls to this function should be replaced by unvec_square(vectorized, 'F')")
def unvec(vectorized):
"""A function that vectorizes a process in the basis of matrix units, sorted first
by column, then row.
Expand All @@ -49,13 +54,42 @@ def unvec(vectorized):
ValueError: If the length of the input is not a perfect square

"""
vectorized = _np.array(vectorized)
length = int(_np.sqrt(max(vectorized.shape)))
if len(vectorized) == length ** 2:
return _np.reshape(vectorized, [length, length]).T
return unvec_square(vectorized, order='F')


def unvec_square(vectorized, order):
"""
Takes a vector whose length is a perfect square, and returns a square matrix
representation by reading from the vectors entries to define the matrix in
column-major order (order='F') or row-major order (order='C').

Args:
vectorized: array-like, where np.array(vectorized).size is a perfect square.
order: 'F' or 'C'

Returns:
numpy.ndarray: NxN dimensional array

Raises:
ValueError: If the length of the input is not a perfect square.

"""
assert order == 'F' or order == 'C'
if not isinstance(vectorized, _np.ndarray):
vectorized = _np.array(vectorized)

if vectorized.ndim == 2:
assert min(vectorized.shape) == 1
vectorized = vectorized.ravel()
elif vectorized.ndim > 2:
raise ValueError('vectorized.ndim must be <= 2.')

n = int(_np.sqrt(max(vectorized.shape)))
if len(vectorized) == n ** 2:
return vectorized.reshape(shape=(n, n), order=order)
else:
raise ValueError(
'The input vector length must be a perfect square, but this input has length %d.' % len(vectorized))
msg = 'The input vector length must be a perfect square, but this input has length %d.' % len(vectorized)
raise ValueError(msg)


def split(n, a):
Expand Down Expand Up @@ -129,7 +163,7 @@ def run_process_tomography(state_to_density_matrix_fn, n_qubits=1, comm=None,
states = _itertools.product(one_qubit_states, repeat=n_qubits)
states = [multi_kron(*state) for state in states]
in_density_matrices = [_np.outer(state, state.conj()) for state in states]
in_states = _np.column_stack(list([vec(rho) for rho in in_density_matrices]))
in_states = _np.column_stack(list([rho.ravel(order='F') for rho in in_density_matrices]))
my_states = split(size, states)[rank]
if verbose:
print("Process %d of %d evaluating %d input states." % (rank, size, len(my_states)))
Expand All @@ -150,7 +184,7 @@ def run_process_tomography(state_to_density_matrix_fn, n_qubits=1, comm=None,
out_density_matrices = _np.array([y for x in gathered_out_density_matrices for y in x])
# Sort the list by time
out_density_matrices = _np.transpose(out_density_matrices, [1, 0, 2, 3])
out_states = [_np.column_stack(list([vec(rho) for rho in density_matrices_at_time]))
out_states = [_np.column_stack(list([rho.ravel(order='F') for rho in density_matrices_at_time]))
for density_matrices_at_time in out_density_matrices]
process_matrices = [_np.dot(out_states_at_time, _lin.inv(in_states)) for out_states_at_time in out_states]
process_matrices = [change_basis(process_matrix_at_time, 'col', basis)
Expand Down
4 changes: 2 additions & 2 deletions pygsti/modelmembers/operations/experrorgenop.py
Original file line number Diff line number Diff line change
Expand Up @@ -699,9 +699,9 @@ def spam_transform_inplace(self, s, typ):

#just act on postfactor and Lindbladian exponent:
if typ == "prep":
mx = _mt.safe_dot(Uinv, mx)
mx = Uinv @ mx
else:
mx = _mt.safe_dot(mx, U)
mx = mx @ U
self.set_dense(mx) # calls _update_rep() and sets dirty flag
else:
raise ValueError("Invalid transform for this LindbladErrorgen: type %s"
Expand Down
6 changes: 3 additions & 3 deletions pygsti/modelmembers/operations/fullunitaryop.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def transform_inplace(self, s):
Uinv = s.transform_matrix_inverse

my_superop_mx = _ot.unitary_to_superop(self._ptr, self._basis)
my_superop_mx = _mt.safe_dot(Uinv, _mt.safe_dot(my_superop_mx, U))
my_superop_mx = Uinv @ (my_superop_mx @ U)

self._ptr[:, :] = _ot.superop_to_unitary(my_superop_mx, self._basis)
self._ptr_has_changed()
Expand Down Expand Up @@ -250,9 +250,9 @@ def spam_transform_inplace(self, s, typ):

#Note: this code may need to be tweaked to work with sparse matrices
if typ == "prep":
my_superop_mx = _mt.safe_dot(Uinv, my_superop_mx)
my_superop_mx = Uinv @ my_superop_mx
else:
my_superop_mx = _mt.safe_dot(my_superop_mx, U)
my_superop_mx = my_superop_mx @ U

self._ptr[:, :] = _ot.superop_to_unitary(my_superop_mx, self._basis)
self._ptr_has_changed()
Expand Down
2 changes: 1 addition & 1 deletion pygsti/modelmembers/operations/lindbladcoefficients.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def create_lindblad_term_superoperators(self, mx_basis='pp', sparse="auto", incl
if sparse:
#Note: complex OK here sometimes, as only linear combos of "other" gens
# (like (i,j) + (j,i) terms) need to be real.
superops = [_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)) for mx in superops]
superops = [leftTrans @ (mx @ rightTrans) for mx in superops]
for mx in superops: mx.sort_indices()
else:
#superops = _np.einsum("ik,akl,lj->aij", leftTrans, superops, rightTrans)
Expand Down
137 changes: 2 additions & 135 deletions pygsti/modelmembers/operations/lindbladerrorgen.py
Original file line number Diff line number Diff line change
Expand Up @@ -497,140 +497,7 @@ def __init__(self, lindblad_coefficient_blocks, lindblad_basis='auto', mx_basis=
self._paramlbls = _np.array(list(_itertools.chain.from_iterable(
[blk.param_labels for blk in self.coefficient_blocks])), dtype=object)
assert(self._onenorm_upbound is not None) # _update_rep should set this
#Done with __init__(...)

#def _init_generators(self, dim):
# #assumes self.dim, self.ham_basis, self.other_basis, and self.matrix_basis are setup...
# sparse_bases = bool(self._rep_type == 'sparse superop')
#
# #HERE TODO - need to update this / MOVE to block class?
# #use caching to increase performance - cache based on all the self.XXX members utilized by this fn
# cache_key = (self._rep_type, self.matrix_basis, self.ham_basis, self.other_basis, self.parameterization)
# #print("cache key = ",self._rep_type, (self.matrix_basis.name, self.matrix_basis.dim),
# # (self.ham_basis.name, self.ham_basis.dim), (self.other_basis.name, self.other_basis.dim),
# # str(self.parameterization))
#
# if cache_key not in self._generators_cache:
#
# d = int(round(_np.sqrt(dim)))
# assert(d * d == dim), "Errorgen dim must be a perfect square"
#
# # Get basis transfer matrix
# mxBasisToStd = self.matrix_basis.create_transform_matrix(
# _BuiltinBasis("std", self.matrix_basis.dim, sparse_bases))
# # use BuiltinBasis("std") instead of just "std" in case matrix_basis is a TensorProdBasis
# leftTrans = _spsl.inv(mxBasisToStd.tocsc()).tocsr() if _sps.issparse(mxBasisToStd) \
# else _np.linalg.inv(mxBasisToStd)
# rightTrans = mxBasisToStd
#
# hamBasisMxs = self.ham_basis.elements
# otherBasisMxs = self.other_basis.elements
#
# hamGens, otherGens = _ot.lindblad_error_generators(
# hamBasisMxs, otherBasisMxs, normalize=False,
# other_mode=self.parameterization.nonham_mode) # in std basis
#
# # Note: lindblad_error_generators will return sparse generators when
# # given a sparse basis (or basis matrices)
#
# if hamGens is not None:
# bsH = len(hamGens) + 1 # projection-basis size (not nec. == dim)
# _ot._assert_shape(hamGens, (bsH - 1, dim, dim), sparse_bases)
#
# # apply basis change now, so we don't need to do so repeatedly later
# if sparse_bases:
# hamGens = [_mt.safe_real(_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)),
# inplace=True, check=True) for mx in hamGens]
# for mx in hamGens: mx.sort_indices()
# # for faster addition ops in _construct_errgen_matrix
# else:
# #hamGens = _np.einsum("ik,akl,lj->aij", leftTrans, hamGens, rightTrans)
# hamGens = _np.transpose(_np.tensordot(
# _np.tensordot(leftTrans, hamGens, (1, 1)), rightTrans, (2, 0)), (1, 0, 2))
# else:
# bsH = 0
# assert(bsH == self.ham_basis_size)
#
# if otherGens is not None:
#
# if self.parameterization.nonham_mode == "diagonal":
# bsO = len(otherGens) + 1 # projection-basis size (not nec. == dim)
# _ot._assert_shape(otherGens, (bsO - 1, dim, dim), sparse_bases)
#
# # apply basis change now, so we don't need to do so repeatedly later
# if sparse_bases:
# otherGens = [_mt.safe_real(_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)),
# inplace=True, check=True) for mx in otherGens]
# for mx in otherGens: mx.sort_indices()
# # for faster addition ops in _construct_errgen_matrix
# else:
# #otherGens = _np.einsum("ik,akl,lj->aij", leftTrans, otherGens, rightTrans)
# otherGens = _np.transpose(_np.tensordot(
# _np.tensordot(leftTrans, otherGens, (1, 1)), rightTrans, (2, 0)), (1, 0, 2))
#
# elif self.parameterization.nonham_mode == "diag_affine":
# # projection-basis size (not nec. == dim) [~shape[1] but works for lists too]
# bsO = len(otherGens[0]) + 1
# _ot._assert_shape(otherGens, (2, bsO - 1, dim, dim), sparse_bases)
#
# # apply basis change now, so we don't need to do so repeatedly later
# if sparse_bases:
# otherGens = [[_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans))
# for mx in mxRow] for mxRow in otherGens]
#
# for mxRow in otherGens:
# for mx in mxRow: mx.sort_indices()
# # for faster addition ops in _construct_errgen_matrix
# else:
# #otherGens = _np.einsum("ik,abkl,lj->abij", leftTrans,
# # otherGens, rightTrans)
# otherGens = _np.transpose(_np.tensordot(
# _np.tensordot(leftTrans, otherGens, (1, 2)), rightTrans, (3, 0)), (1, 2, 0, 3))
#
# else:
# bsO = len(otherGens) + 1 # projection-basis size (not nec. == dim)
# _ot._assert_shape(otherGens, (bsO - 1, bsO - 1, dim, dim), sparse_bases)
#
# # apply basis change now, so we don't need to do so repeatedly later
# if sparse_bases:
# otherGens = [[_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans))
# for mx in mxRow] for mxRow in otherGens]
# #Note: complex OK here, as only linear combos of otherGens (like (i,j) + (j,i)
# # terms) need to be real
#
# for mxRow in otherGens:
# for mx in mxRow: mx.sort_indices()
# # for faster addition ops in _construct_errgen_matrix
# else:
# #otherGens = _np.einsum("ik,abkl,lj->abij", leftTrans,
# # otherGens, rightTrans)
# otherGens = _np.transpose(_np.tensordot(
# _np.tensordot(leftTrans, otherGens, (1, 2)), rightTrans, (3, 0)), (1, 2, 0, 3))
#
# else:
# bsO = 0
# assert(bsO == self.other_basis_size)
#
# if hamGens is not None:
# hamGens_1norms = _np.array([_mt.safe_onenorm(mx) for mx in hamGens], 'd')
# else:
# hamGens_1norms = None
#
# if otherGens is not None:
# if self.parameterization.nonham_mode == "diagonal":
# otherGens_1norms = _np.array([_mt.safe_onenorm(mx) for mx in otherGens], 'd')
# else:
# otherGens_1norms = _np.array([_mt.safe_onenorm(mx)
# for oGenRow in otherGens for mx in oGenRow], 'd')
# else:
# otherGens_1norms = None
#
# self._generators_cache[cache_key] = (hamGens, otherGens, hamGens_1norms, otherGens_1norms)
#
# cached_hamGens, cached_otherGens, cached_h1norms, cached_o1norms = self._generators_cache[cache_key]
# return (_copy.deepcopy(cached_hamGens), _copy.deepcopy(cached_otherGens),
# cached_h1norms.copy() if (cached_h1norms is not None) else None,
# cached_o1norms.copy() if (cached_o1norms is not None) else None)
# Done with __init__(...)

def _init_terms(self, coefficient_blocks, max_polynomial_vars):

Expand Down Expand Up @@ -1341,7 +1208,7 @@ def transform_inplace(self, s):

#conjugate Lindbladian exponent by U:
err_gen_mx = self.to_sparse() if self._rep_type == 'sparse superop' else self.to_dense()
err_gen_mx = _mt.safe_dot(Uinv, _mt.safe_dot(err_gen_mx, U))
err_gen_mx = Uinv @ (err_gen_mx @ U)
trunc = 1e-6 if isinstance(s, _gaugegroup.UnitaryGaugeGroupElement) else False
self._set_params_from_matrix(err_gen_mx, truncate=trunc)
self.dirty = True
Expand Down
Loading