From 55da605eeab38e9b154ac654218f61b4eb7ac723 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Wed, 22 May 2024 17:02:05 -0400 Subject: [PATCH 1/9] main changes (breaks some calling functions elsewhere) --- pygsti/tools/matrixtools.py | 421 +++++++++++++--------------- test/unit/tools/test_matrixtools.py | 40 +-- 2 files changed, 204 insertions(+), 257 deletions(-) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index 0e176ca2e..c4998d310 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -30,6 +30,53 @@ #EXPM_DEFAULT_TOL = 1e-7 EXPM_DEFAULT_TOL = 2**-53 # Scipy default +BLAS_FUNCS = { + 'herk': { + 's' : _spl.blas.ssyrk, + 'd' : _spl.blas.dsyrk, + 'c' : _spl.blas.cherk, + 'z': _spl.blas.zherk + } +} + +def gram_matrix(m, adjoint=False): + """ + If adjoint=False, then return m.T.conj() @ m, computed in a more efficient way. + + If adjoint=True, return m @ m.T.conj(), likewise computed in a more efficient way. + """ + assert isinstance(m, _np.ndarray) + prefix_char, _, _ = _spl.blas.find_best_blas_type(dtype=m.dtype) + herk = BLAS_FUNCS["herk"][prefix_char] + + if adjoint: + trans = 0 + elif _np.iscomplexobj(m): + trans = 2 + else: + trans = 1 + out = herk(1.0, m, trans=trans) + i_lower = _np.tril_indices(out.shape[0], -1) + upper_values = out.T[i_lower] + out[i_lower] = upper_values.real + if trans > 0: + out[i_lower] += upper_values.imag + return out + + +def is_normal(m, tol=1e-9): + """ + Test whether m is a normal operator, in the sense that it commutes with its adjoint. + """ + if m.shape[0] != m.shape[1]: + return False + prefix_char, _, _ = _spl.blas.find_best_blas_type(dtype=m.dtype) + herk = BLAS_FUNCS["herk"][prefix_char] + trans = 2 if _np.iscomplexobj(m) else 1 + mdagm = herk( 1.0, m, trans=trans ) + mmdag = herk( -1.0, m, trans=0, c=mdagm, overwrite_c=True ) + return _np.all(_np.abs(mmdag) <= tol) + def is_hermitian(mx, tol=1e-9): """ @@ -49,14 +96,13 @@ def is_hermitian(mx, tol=1e-9): True if mx is hermitian, otherwise False. """ (m, n) = mx.shape - for i in range(m): - if abs(mx[i, i].imag) > tol: return False - for j in range(i + 1, n): - if abs(mx[i, j] - mx[j, i].conjugate()) > tol: return False - return True + if m != n: + return False + else: + return _np.all(_np.abs(mx - mx.T.conj()) <= tol) -def is_pos_def(mx, tol=1e-9): +def is_pos_def(mx, tol=1e-9, attempt_cholesky=False): """ Test whether mx is a positive-definite matrix. @@ -73,7 +119,15 @@ def is_pos_def(mx, tol=1e-9): bool True if mx is positive-semidefinite, otherwise False. """ - evals = _np.linalg.eigvals(mx) + if not is_hermitian(mx, tol): + return False + if attempt_cholesky: + try: + _ = _spl.cholesky(mx) + return True # Cholesky succeeded + except _spl.LinAlgError: + pass # we fall back on eigenvalue decomposition + evals = _np.linalg.eigvalsh(mx) return all([ev > -tol for ev in evals]) @@ -94,7 +148,7 @@ def is_valid_density_mx(mx, tol=1e-9): bool True if mx is a valid density matrix, otherwise False. """ - return is_hermitian(mx, tol) and is_pos_def(mx, tol) and abs(_np.trace(mx) - 1.0) < tol + return abs(_np.trace(mx) - 1.0) < tol and is_hermitian(mx, tol) and is_pos_def(mx, tol) def nullspace(m, tol=1e-7): @@ -115,7 +169,7 @@ def nullspace(m, tol=1e-7): """ _, s, vh = _np.linalg.svd(m) rank = (s > tol).sum() - return vh[rank:].T.conjugate().copy() + return vh[rank:].T.conjugate() def nullspace_qr(m, tol=1e-7): @@ -151,15 +205,13 @@ def nullspace_qr(m, tol=1e-7): return q[:, rank:] +#TODO: remove the orthogonalize argument (requires changing functions that call this one) def nice_nullspace(m, tol=1e-7, orthogonalize=False): """ Computes the nullspace of a matrix, and tries to return a "nice" basis for it. Columns of the returned value (a basis for the nullspace) each have a maximum - absolute value of 1.0 and are chosen so as to align with the the original - matrix's basis as much as possible (the basis is found by projecting each - original basis vector onto an arbitrariliy-found nullspace and keeping only - a set of linearly independent projections). + absolute value of 1.0. Parameters ---------- @@ -176,27 +228,30 @@ def nice_nullspace(m, tol=1e-7, orthogonalize=False): ------- An matrix of shape (M,K) whose columns contain nullspace basis vectors. """ - nullsp = nullspace(m, tol) - nullsp_projector = _np.dot(nullsp, nullsp.conj().T) - keepers = []; current_rank = 0 - for i in range(nullsp_projector.shape[1]): # same as mx.shape[1] - rank = _np.linalg.matrix_rank(nullsp_projector[:, 0:i + 1], tol=tol) - if rank > current_rank: - keepers.append(i) - current_rank = rank - ret = _np.take(nullsp_projector, keepers, axis=1) - - if orthogonalize: # and not columns_are_orthogonal(ret): - ret, _ = _np.linalg.qr(ret) # Gram-Schmidt orthogonalization + # + # nullsp = nullspace(m, tol) + # dim_ker = nullsp.shape[1] + # _, _, p = _spl.qr(nullsp.T.conj(), mode='raw', pivoting=True) + # ret = nullsp @ (nullsp.T[:, p[dim_ker]]).conj() + # + ## ^ Equivalent to, but faster than the following + ## + ## nullsp_projector = nullsp @ nullsp.T.conj() + ## ret = nullsp_projector[:, p[:dim_ker]] + ## + # + + ret = nullspace(m, tol) for j in range(ret.shape[1]): # normalize columns so largest element is +1.0 imax = _np.argmax(_np.abs(ret[:, j])) - if abs(ret[imax, j]) > 1e-6: ret[:, j] /= ret[imax, j] + if abs(ret[imax, j]) > 1e-6: + ret[:, j] /= ret[imax, j] return ret -def normalize_columns(m, return_norms=False, ord=None): +def normalize_columns(m, return_norms=False, norm_ord=None): """ Normalizes the columns of a matrix. @@ -209,7 +264,7 @@ def normalize_columns(m, return_norms=False, ord=None): If `True`, also return a 1D array containing the norms of the columns (before they were normalized). - ord : int or list of ints, optional + norm_ord : int or list of ints, optional The order of the norm. See :func:`numpy.linalg.norm`. An array of orders can be given to specify the norm on a per-column basis. @@ -223,13 +278,13 @@ def normalize_columns(m, return_norms=False, ord=None): Only returned when `return_norms=True`, a 1-dimensional array of the pre-normalization norm of each column. """ - norms = column_norms(m, ord) + norms = column_norms(m, norm_ord) norms[norms == 0.0] = 1.0 # avoid division of zero-column by zero normalized_m = scale_columns(m, 1 / norms) return (normalized_m, norms) if return_norms else normalized_m -def column_norms(m, ord=None): +def column_norms(m, norm_ord=None): """ Compute the norms of the columns of a matrix. @@ -248,14 +303,16 @@ def column_norms(m, ord=None): numpy.ndarray A 1-dimensional array of the column norms (length is number of columns of `m`). """ - ord_list = [ord] * m.shape[1] if (ord is None or isinstance(ord, int)) else ord - assert(len(ord_list) == m.shape[1]) - if _sps.issparse(m): - #this could be done more efficiently, e.g. by converting to csc and taking column norms directly + ord_list = norm_ord if isinstance(norm_ord, (list, _np.ndarray)) else [norm_ord] * m.shape[1] + assert(len(ord_list) == m.shape[1]) norms = _np.array([_np.linalg.norm(m[:, j].toarray(), ord=o) for j, o in enumerate(ord_list)]) + elif isinstance(norm_ord, (list, _np.ndarray)): + assert(len(norm_ord) == m.shape[1]) + norms = _np.array([_np.linalg.norm(m[:, j], ord=o) for j, o in enumerate(norm_ord)]) else: - norms = _np.array([_np.linalg.norm(m[:, j], ord=o) for j, o in enumerate(ord_list)]) + norms = _np.linalg.norm(m, axis=0, ord=norm_ord) + return norms @@ -311,8 +368,9 @@ def columns_are_orthogonal(m, tol=1e-7): ------- bool """ - if m.size == 0: return True # boundary case - check = _np.dot(m.conj().T, m) + if m.size == 0: + return True # boundary case + check = gram_matrix(m) check[_np.diag_indices_from(check)] = 0.0 return bool(_np.linalg.norm(check) / check.size < tol) @@ -337,9 +395,11 @@ def columns_are_orthonormal(m, tol=1e-7): ------- bool """ - if m.size == 0: return True # boundary case - check = _np.dot(m.conj().T, m) - return bool(_np.allclose(check, _np.identity(check.shape[0], 'd'), atol=tol)) + if m.size == 0: + return True # boundary case + check = gram_matrix(m) + check[_np.diag_indices_from(check)] -= 1.0 + return bool(_np.linalg.norm(check) / check.size < tol) def independent_columns(m, initial_independent_cols=None, tol=1e-7): @@ -369,27 +429,28 @@ def independent_columns(m, initial_independent_cols=None, tol=1e-7): list A list of the independent-column indices of `m`. """ - indep_cols = [] - if not _sps.issparse(m): - running_indep_cols = initial_independent_cols.copy() \ - if (initial_independent_cols is not None) else _np.empty((m.shape[0], 0), m.dtype) - num_indep_cols = running_indep_cols.shape[0] - - for j in range(m.shape[1]): - trial = _np.concatenate((running_indep_cols, m[:, j]), axis=1) - if _np.linalg.matrix_rank(trial, tol=tol) == num_indep_cols + 1: - running_indep_cols = trial - indep_cols.append(j) - num_indep_cols += 1 + if initial_independent_cols is None: + proj_m = m.copy() + else: + assert initial_independent_cols.shape[0] == m.shape[0] + q = _spl.qr(initial_independent_cols, mode='econ')[0] + # proj_m = (I - qq')m + temp1 = q.T.conj() @ m + temp2 = q @ temp1 + proj_m = m - temp2 - else: # sparse case + rank = _np.linalg.matrix_rank(proj_m, tol=tol) + pivots = _spl.qr(proj_m, overwrite_a=True, mode='raw', pivoting=True)[2] + indep_cols = pivots[:rank].tolist() + else: + # TODO: re-implement to avoid unreliable calls to ARPACK's svds. + indep_cols = [] from scipy.sparse.linalg import ArpackNoConvergence as _ArpackNoConvergence from scipy.sparse.linalg import ArpackError as _ArpackError running_indep_cols = initial_independent_cols.copy() \ if (initial_independent_cols is not None) else _sps.csc_matrix((m.shape[0], 0), dtype=m.dtype) - num_indep_cols = running_indep_cols.shape[0] for j in range(m.shape[1]): trial = _sps.hstack((running_indep_cols, m[:, j])) @@ -408,15 +469,33 @@ def independent_columns(m, initial_independent_cols=None, tol=1e-7): def pinv_of_matrix_with_orthogonal_columns(m): - """ TODO: docstring """ - col_scaling = _np.sum(_np.abs(m)**2, axis=0) + """ + Return the matrix "pinv_m" so m @ pinvm and pinv_m @ m are orthogonal projectors + onto subspaces of dimension rank(m). + + Parameters + ---------- + m : numpy.ndarray + + Returns + ---------- + pinv_m : numpy.ndarray + """ + col_scaling = _np.linalg.norm(m, axis=0)**2 m_with_scaled_cols = m.conj() * col_scaling[None, :] return m_with_scaled_cols.T def matrix_sign(m): """ - The "sign" matrix of `m` + Compute the matrix s = sign(m). The eigenvectors of s are the same as those of m. + The eigenvalues of s are +/- 1, corresponding to the signs of m's eigenvalues. + + It's straightforward to compute s when m is a normal operator. If m is not normal, + then the definition of s can be given in terms of m's Jordan form, and s + can be computed by (suitably post-processing) the Schur decomposition of m. + + See https://nhigham.com/2020/12/15/what-is-the-matrix-sign-function/ for background. Parameters ---------- @@ -427,40 +506,45 @@ def matrix_sign(m): ------- numpy.ndarray """ - #Notes: sign(m) defined s.t. eigvecs of sign(m) are evecs of m - # and evals of sign(m) are +/-1 or 0 based on sign of eigenvalues of m + N = m.shape[0] + assert(m.shape == (N, N)), "m must be square!" - #Using the extremely numerically stable (but expensive) Schur method - # see http://www.maths.manchester.ac.uk/~higham/fm/OT104HighamChapter5.pdf - N = m.shape[0]; assert(m.shape == (N, N)), "m must be square!" - T, Z = _spl.schur(m, 'complex') # m = Z T Z^H where Z is unitary and T is upper-triangular - U = _np.zeros(T.shape, 'complex') # will be sign(T), which is easy to compute - # (U is also upper triangular), and then sign(m) = Z U Z^H + if is_hermitian(m): + eigvals, eigvecs = _spl.eigh(m) + sign = (eigvecs * _np.sign(eigvals)[None, :]) @ eigvecs.T.conj() + return sign - # diagonals are easy + T, Z = _spl.schur(m, 'complex') # m = Z T Z^H where Z is unitary and T is upper-triangular + U = _np.zeros(T.shape, 'complex') U[_np.diag_indices_from(U)] = _np.sign(_np.diagonal(T)) + # If T is diagonal, then we're basically done. If T isn't diagonal, then we have work to do. + + if not _np.all(_np.isclose(T[_np.triu_indices(N, 1)], 0.0)): + # Use the extremely numerically stable (but expensive) method from + # N. Higham's book, Functions of Matrices : Theory and Practice, Chapter 5. + + #Off diagonals: use U^2 = I or TU = UT + # Note: Tij = Uij = 0 when i > j and i==j easy so just consider i j and i==j easy so just consider i -i[H, rho] @@ -873,27 +964,9 @@ def vec(matrix_in): return [b for a in _np.transpose(matrix_in) for b in a] -def unvec(vector_in): - """ - Slices a vector into the columns of a matrix. - - Parameters - ---------- - vector_in : numpy.ndarray - - Returns - ------- - numpy.ndarray - """ - dim = int(_np.sqrt(len(vector_in))) - return _np.transpose(_np.array(list( - zip(*[_ittls.chain(vector_in, - _ittls.repeat(None, dim - 1))] * dim)))) - - def norm1(m): """ - Returns the 1 norm of a matrix + Returns the Schatten 1-norm of a matrix Parameters ---------- @@ -904,9 +977,13 @@ def norm1(m): ------- numpy.ndarray """ - return float(_np.real(_np.trace(_sqrtm(_np.dot(m.conj().T, m))))) + s = _spl.svdvals(m) + nrm = _np.sum(s) + return nrm +# Riley note: I'd like to rewrite this, but I don't want to mess with reproducibility +# issues. For now I've just made it a teeny bit more efficient. def random_hermitian(dim): """ Generates a random Hermitian matrix @@ -925,12 +1002,13 @@ def random_hermitian(dim): dim = int(dim) a = _np.random.random(size=[dim, dim]) b = _np.random.random(size=[dim, dim]) - c = a + 1.j * b + (a + 1.j * b).conj().T + c = a + 1.j * b + c += c.conj().T my_norm = norm1(c) return c / my_norm -def norm1to1(operator, num_samples=10000, mx_basis="gm", return_list=False): +def norm1to1(operator, num_samples=10000, mx_basis="gm"): """ The Hermitian 1-to-1 norm of a superoperator represented in the standard basis. @@ -948,23 +1026,20 @@ def norm1to1(operator, num_samples=10000, mx_basis="gm", return_list=False): mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis The basis of `operator`. - return_list : bool, optional - Whether the entire list of sampled values is returned or just the maximum. - Returns ------- float or list Depends on the value of `return_list`. """ std_operator = change_basis(operator, mx_basis, 'std') - rand_dim = int(_np.sqrt(float(len(std_operator)))) - vals = [norm1(unvec(_np.dot(std_operator, vec(random_hermitian(rand_dim))))) - for n in range(num_samples)] - if return_list: - return vals - else: - return max(vals) - + dim = int(_np.sqrt(len(std_operator))) + max_val = 0.0 + for _ in range(num_samples): + invec = random_hermitian(dim).ravel(order='F') + outvec = std_operator @ invec + val = norm1(outvec.reshape((dim,dim), order='F')) + max_val = max(val, max_val) + return max_val ## ------------------------ General utility fns ----------------------------------- @@ -1372,6 +1447,9 @@ def _findx(a, inds, always_copy=False): return a_inds +# TODO: reevaluate the need for this function. It seems like we could just in-line @ +# and let operator overloading and implementations of __matmul__ and __rmatmul__ +# handle it. def safe_dot(a, b): """ Performs dot(a,b) correctly when neither, either, or both arguments are sparse matrices. @@ -1398,78 +1476,6 @@ def safe_dot(a, b): return _np.dot(a, b) -def safe_real(a, inplace=False, check=False): - """ - Get the real-part of `a`, where `a` can be either a dense array or a sparse matrix. - - Parameters - ---------- - a : numpy.ndarray or scipy.sparse matrix. - Array to take real part of. - - inplace : bool, optional - Whether this operation should be done in-place. - - check : bool, optional - If True, raise a `ValueError` if `a` has a nonzero imaginary part. - - Returns - ------- - numpy.ndarray or scipy.sparse matrix - """ - if check: - assert(safe_norm(a, 'imag') < 1e-6), "Check failed: taking real-part of matrix w/nonzero imaginary part" - if _sps.issparse(a): - if _sps.isspmatrix_csr(a): - if inplace: - ret = _sps.csr_matrix((_np.real(a.data), a.indices, a.indptr), shape=a.shape, dtype='d') - else: # copy - ret = _sps.csr_matrix((_np.real(a.data).copy(), a.indices.copy(), - a.indptr.copy()), shape=a.shape, dtype='d') - ret.eliminate_zeros() - return ret - else: - raise NotImplementedError("safe_real() doesn't work with %s matrices yet" % str(type(a))) - else: - return _np.real(a) - - -def safe_imag(a, inplace=False, check=False): - """ - Get the imaginary-part of `a`, where `a` can be either a dense array or a sparse matrix. - - Parameters - ---------- - a : numpy.ndarray or scipy.sparse matrix. - Array to take imaginary part of. - - inplace : bool, optional - Whether this operation should be done in-place. - - check : bool, optional - If True, raise a `ValueError` if `a` has a nonzero real part. - - Returns - ------- - numpy.ndarray or scipy.sparse matrix - """ - if check: - assert(safe_norm(a, 'real') < 1e-6), "Check failed: taking imag-part of matrix w/nonzero real part" - if _sps.issparse(a): - if _sps.isspmatrix_csr(a): - if inplace: - ret = _sps.csr_matrix((_np.imag(a.data), a.indices, a.indptr), shape=a.shape, dtype='d') - else: # copy - ret = _sps.csr_matrix((_np.imag(a.data).copy(), a.indices.copy(), - a.indptr.copy()), shape=a.shape, dtype='d') - ret.eliminate_zeros() - return ret - else: - raise NotImplementedError("safe_real() doesn't work with %s matrices yet" % str(type(a))) - else: - return _np.imag(a) - - def safe_norm(a, part=None): """ Get the frobenius norm of a matrix or vector, `a`, when it is either a dense array or a sparse matrix. @@ -2044,7 +2050,7 @@ def to_unitary(scaled_unitary): unitary : ndarray Such that `scale * unitary == scaled_unitary`. """ - scaled_identity = _np.dot(scaled_unitary, _np.conjugate(scaled_unitary.T)) + scaled_identity = gram_matrix(scaled_unitary, adjoint=True) scale = _np.sqrt(scaled_identity[0, 0]) assert(_np.allclose(scaled_identity / (scale**2), _np.identity(scaled_identity.shape[0], 'd'))), \ "Given `scaled_unitary` does not appear to be a scaled unitary matrix!" @@ -2243,30 +2249,6 @@ def project_onto_antikite(mx, kite): return mx -def remove_dependent_cols(mx, tol=1e-7): - """ - Removes the linearly dependent columns of a matrix. - - Parameters - ---------- - mx : numpy.ndarray - The input matrix - - Returns - ------- - A linearly independent subset of the columns of `mx`. - """ - last_rank = 0; cols_to_remove = [] - for j in range(mx.shape[1]): - rnk = _np.linalg.matrix_rank(mx[:, 0:j + 1], tol) - if rnk == last_rank: - cols_to_remove.append(j) - else: - last_rank = rnk - #print("Removing %d cols" % len(cols_to_remove)) - return _np.delete(mx, cols_to_remove, axis=1) - - def intersection_space(space1, space2, tol=1e-7, use_nice_nullspace=False): """ TODO: docstring @@ -2282,7 +2264,8 @@ def union_space(space1, space2, tol=1e-7): TODO: docstring """ VW = _np.concatenate((space1, space2), axis=1) - return remove_dependent_cols(VW, tol) + indep_cols = independent_columns(VW, None, tol) + return VW[:, indep_cols] #UNUSED diff --git a/test/unit/tools/test_matrixtools.py b/test/unit/tools/test_matrixtools.py index 0bb1601ef..3b2b20a75 100644 --- a/test/unit/tools/test_matrixtools.py +++ b/test/unit/tools/test_matrixtools.py @@ -17,8 +17,8 @@ def test_is_hermitian(self): self.assertFalse(mt.is_hermitian(non_herm_mx)) def test_is_pos_def(self): - pos_mx = np.array([[ 4, 0.2], - [0.1, 3]], 'complex') + pos_mx = np.array([[ 4.0, 0.2], + [0.2, 3.0]], 'complex') non_pos_mx = np.array([[ 0, 1], [1, 0]], 'complex') self.assertTrue(mt.is_pos_def(pos_mx)) @@ -160,42 +160,6 @@ def test_fancy_assignment(self): self.assertEqual(mt._findx(a, ([0, 1], [0, 1], 0)).shape, (2, 2)) self.assertEqual(mt._findx(a, ([], [0, 1], 0)).shape, (0, 2)) - def test_safe_ops(self): - mx = np.array([[1+1j, 0], - [2+2j, 3+3j]], 'complex') - smx = sps.csr_matrix(mx) - smx_lil = sps.lil_matrix(mx) # currently unsupported - - r = mt.safe_real(mx, inplace=False) - self.assertArraysAlmostEqual(r, np.real(mx)) - i = mt.safe_imag(mx, inplace=False) - self.assertArraysAlmostEqual(i, np.imag(mx)) - - r = mt.safe_real(smx, inplace=False) - self.assertArraysAlmostEqual(r.toarray(), np.real(mx)) - i = mt.safe_imag(smx, inplace=False) - self.assertArraysAlmostEqual(i.toarray(), np.imag(mx)) - - with self.assertRaises(NotImplementedError): - mt.safe_real(smx_lil, inplace=False) - with self.assertRaises(NotImplementedError): - mt.safe_imag(smx_lil, inplace=False) - - with self.assertRaises(AssertionError): - mt.safe_real(mx, check=True) - with self.assertRaises(AssertionError): - mt.safe_imag(mx, check=True) - - M = mx.copy(); M = mt.safe_real(M, inplace=True) - self.assertArraysAlmostEqual(M, np.real(mx)) - M = mx.copy(); M = mt.safe_imag(M, inplace=True) - self.assertArraysAlmostEqual(M, np.imag(mx)) - - M = smx.copy(); M = mt.safe_real(M, inplace=True) - self.assertArraysAlmostEqual(M.toarray(), np.real(mx)) - M = smx.copy(); M = mt.safe_imag(M, inplace=True) - self.assertArraysAlmostEqual(M.toarray(), np.imag(mx)) - def test_fast_expm(self): mx = np.array([[1, 2], [2, 3]], 'd') From f82655a215c6341e9192e2a41f348ae536efa85f Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 10:16:27 -0400 Subject: [PATCH 2/9] check in --- pygsti/extras/interpygate/__init__.py | 7 + .../extras/interpygate/process_tomography.py | 8 +- .../operations/lindbladerrorgen.py | 135 +------- pygsti/modelmembers/povms/composedeffect.py | 317 ------------------ pygsti/modelmembers/states/composedstate.py | 317 ------------------ pygsti/models/fogistore.py | 3 +- pygsti/tools/basistools.py | 3 +- pygsti/tools/matrixtools.py | 18 +- pygsti/tools/rbtheory.py | 5 +- test/unit/objects/test_fogi.py | 5 +- 10 files changed, 31 insertions(+), 787 deletions(-) diff --git a/pygsti/extras/interpygate/__init__.py b/pygsti/extras/interpygate/__init__.py index 49fddae7b..f126dee97 100644 --- a/pygsti/extras/interpygate/__init__.py +++ b/pygsti/extras/interpygate/__init__.py @@ -10,3 +10,10 @@ from .core import PhysicalProcess, InterpolatedDenseOp, InterpolatedOpFactory from .process_tomography import vec, unvec, run_process_tomography + +# Note from Riley on May 22, 2024: +# +# I wanted to remove the implementations of vec and unvec and just in-line equivalent +# code in the few places they were used. However, the fact that they're included in this +# __init__.py file suggests that they might be used outside of pyGSTi itself. +# diff --git a/pygsti/extras/interpygate/process_tomography.py b/pygsti/extras/interpygate/process_tomography.py index 2b262b1d2..61c625190 100644 --- a/pygsti/extras/interpygate/process_tomography.py +++ b/pygsti/extras/interpygate/process_tomography.py @@ -30,7 +30,7 @@ def vec(matrix): """ matrix = _np.array(matrix) if matrix.shape == (len(matrix), len(matrix)): - return _np.array([_np.concatenate(_np.array(matrix).T)]).T + return matrix.reshape((-1, 1), order='F') else: raise ValueError('The input matrix must be square.') @@ -50,9 +50,9 @@ def unvec(vectorized): """ vectorized = _np.array(vectorized) - length = int(_np.sqrt(max(vectorized.shape))) - if len(vectorized) == length ** 2: - return _np.reshape(vectorized, [length, length]).T + dim = int(_np.sqrt(max(vectorized.shape))) + if len(vectorized) == dim ** 2: + return vectorized.reshape((dim, dim), order='F') else: raise ValueError( 'The input vector length must be a perfect square, but this input has length %d.' % len(vectorized)) diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index 68097dd82..d0e310a74 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -497,140 +497,7 @@ def __init__(self, lindblad_coefficient_blocks, lindblad_basis='auto', mx_basis= self._paramlbls = _np.array(list(_itertools.chain.from_iterable( [blk.param_labels for blk in self.coefficient_blocks])), dtype=object) assert(self._onenorm_upbound is not None) # _update_rep should set this - #Done with __init__(...) - - #def _init_generators(self, dim): - # #assumes self.dim, self.ham_basis, self.other_basis, and self.matrix_basis are setup... - # sparse_bases = bool(self._rep_type == 'sparse superop') - # - # #HERE TODO - need to update this / MOVE to block class? - # #use caching to increase performance - cache based on all the self.XXX members utilized by this fn - # cache_key = (self._rep_type, self.matrix_basis, self.ham_basis, self.other_basis, self.parameterization) - # #print("cache key = ",self._rep_type, (self.matrix_basis.name, self.matrix_basis.dim), - # # (self.ham_basis.name, self.ham_basis.dim), (self.other_basis.name, self.other_basis.dim), - # # str(self.parameterization)) - # - # if cache_key not in self._generators_cache: - # - # d = int(round(_np.sqrt(dim))) - # assert(d * d == dim), "Errorgen dim must be a perfect square" - # - # # Get basis transfer matrix - # mxBasisToStd = self.matrix_basis.create_transform_matrix( - # _BuiltinBasis("std", self.matrix_basis.dim, sparse_bases)) - # # use BuiltinBasis("std") instead of just "std" in case matrix_basis is a TensorProdBasis - # leftTrans = _spsl.inv(mxBasisToStd.tocsc()).tocsr() if _sps.issparse(mxBasisToStd) \ - # else _np.linalg.inv(mxBasisToStd) - # rightTrans = mxBasisToStd - # - # hamBasisMxs = self.ham_basis.elements - # otherBasisMxs = self.other_basis.elements - # - # hamGens, otherGens = _ot.lindblad_error_generators( - # hamBasisMxs, otherBasisMxs, normalize=False, - # other_mode=self.parameterization.nonham_mode) # in std basis - # - # # Note: lindblad_error_generators will return sparse generators when - # # given a sparse basis (or basis matrices) - # - # if hamGens is not None: - # bsH = len(hamGens) + 1 # projection-basis size (not nec. == dim) - # _ot._assert_shape(hamGens, (bsH - 1, dim, dim), sparse_bases) - # - # # apply basis change now, so we don't need to do so repeatedly later - # if sparse_bases: - # hamGens = [_mt.safe_real(_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)), - # inplace=True, check=True) for mx in hamGens] - # for mx in hamGens: mx.sort_indices() - # # for faster addition ops in _construct_errgen_matrix - # else: - # #hamGens = _np.einsum("ik,akl,lj->aij", leftTrans, hamGens, rightTrans) - # hamGens = _np.transpose(_np.tensordot( - # _np.tensordot(leftTrans, hamGens, (1, 1)), rightTrans, (2, 0)), (1, 0, 2)) - # else: - # bsH = 0 - # assert(bsH == self.ham_basis_size) - # - # if otherGens is not None: - # - # if self.parameterization.nonham_mode == "diagonal": - # bsO = len(otherGens) + 1 # projection-basis size (not nec. == dim) - # _ot._assert_shape(otherGens, (bsO - 1, dim, dim), sparse_bases) - # - # # apply basis change now, so we don't need to do so repeatedly later - # if sparse_bases: - # otherGens = [_mt.safe_real(_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)), - # inplace=True, check=True) for mx in otherGens] - # for mx in otherGens: mx.sort_indices() - # # for faster addition ops in _construct_errgen_matrix - # else: - # #otherGens = _np.einsum("ik,akl,lj->aij", leftTrans, otherGens, rightTrans) - # otherGens = _np.transpose(_np.tensordot( - # _np.tensordot(leftTrans, otherGens, (1, 1)), rightTrans, (2, 0)), (1, 0, 2)) - # - # elif self.parameterization.nonham_mode == "diag_affine": - # # projection-basis size (not nec. == dim) [~shape[1] but works for lists too] - # bsO = len(otherGens[0]) + 1 - # _ot._assert_shape(otherGens, (2, bsO - 1, dim, dim), sparse_bases) - # - # # apply basis change now, so we don't need to do so repeatedly later - # if sparse_bases: - # otherGens = [[_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)) - # for mx in mxRow] for mxRow in otherGens] - # - # for mxRow in otherGens: - # for mx in mxRow: mx.sort_indices() - # # for faster addition ops in _construct_errgen_matrix - # else: - # #otherGens = _np.einsum("ik,abkl,lj->abij", leftTrans, - # # otherGens, rightTrans) - # otherGens = _np.transpose(_np.tensordot( - # _np.tensordot(leftTrans, otherGens, (1, 2)), rightTrans, (3, 0)), (1, 2, 0, 3)) - # - # else: - # bsO = len(otherGens) + 1 # projection-basis size (not nec. == dim) - # _ot._assert_shape(otherGens, (bsO - 1, bsO - 1, dim, dim), sparse_bases) - # - # # apply basis change now, so we don't need to do so repeatedly later - # if sparse_bases: - # otherGens = [[_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)) - # for mx in mxRow] for mxRow in otherGens] - # #Note: complex OK here, as only linear combos of otherGens (like (i,j) + (j,i) - # # terms) need to be real - # - # for mxRow in otherGens: - # for mx in mxRow: mx.sort_indices() - # # for faster addition ops in _construct_errgen_matrix - # else: - # #otherGens = _np.einsum("ik,abkl,lj->abij", leftTrans, - # # otherGens, rightTrans) - # otherGens = _np.transpose(_np.tensordot( - # _np.tensordot(leftTrans, otherGens, (1, 2)), rightTrans, (3, 0)), (1, 2, 0, 3)) - # - # else: - # bsO = 0 - # assert(bsO == self.other_basis_size) - # - # if hamGens is not None: - # hamGens_1norms = _np.array([_mt.safe_onenorm(mx) for mx in hamGens], 'd') - # else: - # hamGens_1norms = None - # - # if otherGens is not None: - # if self.parameterization.nonham_mode == "diagonal": - # otherGens_1norms = _np.array([_mt.safe_onenorm(mx) for mx in otherGens], 'd') - # else: - # otherGens_1norms = _np.array([_mt.safe_onenorm(mx) - # for oGenRow in otherGens for mx in oGenRow], 'd') - # else: - # otherGens_1norms = None - # - # self._generators_cache[cache_key] = (hamGens, otherGens, hamGens_1norms, otherGens_1norms) - # - # cached_hamGens, cached_otherGens, cached_h1norms, cached_o1norms = self._generators_cache[cache_key] - # return (_copy.deepcopy(cached_hamGens), _copy.deepcopy(cached_otherGens), - # cached_h1norms.copy() if (cached_h1norms is not None) else None, - # cached_o1norms.copy() if (cached_o1norms is not None) else None) + # Done with __init__(...) def _init_terms(self, coefficient_blocks, max_polynomial_vars): diff --git a/pygsti/modelmembers/povms/composedeffect.py b/pygsti/modelmembers/povms/composedeffect.py index 845085bad..eabcc2afd 100644 --- a/pygsti/modelmembers/povms/composedeffect.py +++ b/pygsti/modelmembers/povms/composedeffect.py @@ -42,323 +42,6 @@ class ComposedPOVMEffect(_POVMEffect): # , _ErrorMapContainer parameters with other gates and spam vectors.) """ - #@classmethod - #def _from_spamvec_obj(cls, spamvec, typ, param_type="GLND", purevec=None, - # proj_basis="pp", mx_basis="pp", truncate=True, - # lazy=False): - # """ - # Creates a LindbladSPAMVec from an existing SPAMVec object and some additional information. - # - # This function is different from `from_spam_vector` in that it assumes - # that `spamvec` is a :class:`SPAMVec`-derived object, and if `lazy=True` - # and if `spamvec` is already a matching LindbladSPAMVec, it - # is returned directly. This routine is primarily used in spam vector - # conversion functions, where conversion is desired only when necessary. - # - # Parameters - # ---------- - # spamvec : SPAMVec - # The spam vector object to "convert" to a - # `LindbladSPAMVec`. - # - # typ : {"prep","effect"} - # Whether this is a state preparation or POVM effect vector. - # - # param_type : str, optional - # The high-level "parameter type" of the gate to create. This - # specifies both which Lindblad parameters are included and what - # type of evolution is used. Examples of valid values are - # `"CPTP"`, `"H+S"`, `"S terms"`, and `"GLND clifford terms"`. - # - # purevec : numpy array or SPAMVec object, optional - # A SPAM vector which represents a pure-state, taken as the "ideal" - # reference state when constructing the error generator of the - # returned `LindbladSPAMVec`. Note that this vector - # still acts on density matrices (if it's a SPAMVec it should have - # a "densitymx", "svterm", or "cterm" evolution type, and if it's - # a numpy array it should have the same dimension as `spamvec`). - # If None, then it is taken to be `spamvec`, and so `spamvec` must - # represent a pure state in this case. - # - # proj_basis : {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object - # The basis used to construct the Lindblad-term error generators onto - # which the SPAM vector's error generator is projected. Allowed values - # are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt), list of numpy arrays, or a custom basis object. - # - # mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object - # The source and destination basis, respectively. Allowed - # values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt) (or a custom basis object). - # - # truncate : bool, optional - # Whether to truncate the projections onto the Lindblad terms in - # order to meet constraints (e.g. to preserve CPTP) when necessary. - # If False, then an error is thrown when the given `spamvec` cannot - # be realized by the specified set of Lindblad projections. - # - # lazy : bool, optional - # If True, then if `spamvec` is already a LindbladSPAMVec - # with the requested details (given by the other arguments), then - # `spamvec` is returned directly and no conversion/copying is - # performed. If False, then a new object is always returned. - # - # Returns - # ------- - # LindbladSPAMVec - # """ - # - # if not isinstance(spamvec, SPAMVec): - # spamvec = StaticSPAMVec(spamvec, typ=typ) # assume spamvec is just a vector - # - # if purevec is None: - # purevec = spamvec # right now, we don't try to extract a "closest pure vec" - # # to spamvec - below will fail if spamvec isn't pure. - # elif not isinstance(purevec, SPAMVec): - # purevec = StaticSPAMVec(purevec, typ=typ) # assume spamvec is just a vector - # - # #Break param_type in to a "base" type and an evotype - # from .operation import LindbladOp as _LPGMap - # bTyp, evotype, nonham_mode, param_mode = _LPGMap.decomp_paramtype(param_type) - # - # ham_basis = proj_basis if (("H" == bTyp) or ("H+" in bTyp) or bTyp in ("CPTP", "GLND")) else None - # nonham_basis = None if bTyp == "H" else proj_basis - # - # def beq(b1, b2): - # """ Check if bases have equal names """ - # b1 = b1.name if isinstance(b1, _Basis) else b1 - # b2 = b2.name if isinstance(b2, _Basis) else b2 - # return b1 == b2 - # - # def normeq(a, b): - # if a is None and b is None: return True - # if a is None or b is None: return False - # return _mt.safe_norm(a - b) < 1e-6 # what about possibility of Clifford gates? - # - # if isinstance(spamvec, LindbladSPAMVec) \ - # and spamvec._evotype == evotype and spamvec.typ == typ \ - # and beq(ham_basis, spamvec.error_map.ham_basis) and beq(nonham_basis, spamvec.error_map.other_basis) \ - # and param_mode == spamvec.error_map.param_mode and nonham_mode == spamvec.error_map.nonham_mode \ - # and beq(mx_basis, spamvec.error_map.matrix_basis) and lazy: - # #normeq(gate.pure_state_vec,purevec) \ # TODO: more checks for equality?! - # return spamvec # no creation necessary! - # else: - # #Convert vectors (if possible) to SPAMVecs - # # of the appropriate evotype and 0 params. - # bDiff = spamvec is not purevec - # spamvec = _convert_to_lindblad_base(spamvec, typ, evotype, mx_basis) - # purevec = _convert_to_lindblad_base(purevec, typ, evotype, mx_basis) if bDiff else spamvec - # assert(spamvec._evotype == evotype) - # assert(purevec._evotype == evotype) - # - # return cls.from_spam_vector( - # spamvec, purevec, typ, ham_basis, nonham_basis, - # param_mode, nonham_mode, truncate, mx_basis, evotype) - # - #@classmethod - #def from_spam_vector(cls, spam_vec, pure_vec, typ, - # ham_basis="pp", nonham_basis="pp", param_mode="cptp", - # nonham_mode="all", truncate=True, mx_basis="pp", - # evotype="densitymx"): - # """ - # Creates a Lindblad-parameterized spamvec from a state vector and a basis. - # - # The basis specifies how to decompose (project) the vector's error generator. - # - # Parameters - # ---------- - # spam_vec : SPAMVec - # the SPAM vector to initialize from. The error generator that - # tranforms `pure_vec` into `spam_vec` forms the parameterization - # of the returned LindbladSPAMVec. - # - # pure_vec : numpy array or SPAMVec - # An array or SPAMVec in the *full* density-matrix space (this - # vector will have the same dimension as `spam_vec` - 4 in the case - # of a single qubit) which represents a pure-state preparation or - # projection. This is used as the "base" preparation/projection - # when computing the error generator that will be parameterized. - # Note that this argument must be specified, as there is no natural - # default value (like the identity in the case of gates). - # - # typ : {"prep","effect"} - # Whether this is a state preparation or POVM effect vector. - # - # ham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object - # The basis is used to construct the Hamiltonian-type lindblad error - # Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt), list of numpy arrays, or a custom basis object. - # - # nonham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object - # The basis is used to construct the Stochastic-type lindblad error - # Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt), list of numpy arrays, or a custom basis object. - # - # param_mode : {"unconstrained", "cptp", "depol", "reldepol"} - # Describes how the Lindblad coefficients/projections relate to the - # SPAM vector's parameter values. Allowed values are: - # `"unconstrained"` (coeffs are independent unconstrained parameters), - # `"cptp"` (independent parameters but constrained so map is CPTP), - # `"reldepol"` (all non-Ham. diagonal coeffs take the *same* value), - # `"depol"` (same as `"reldepol"` but coeffs must be *positive*) - # - # nonham_mode : {"diagonal", "diag_affine", "all"} - # Which non-Hamiltonian Lindblad projections are potentially non-zero. - # Allowed values are: `"diagonal"` (only the diagonal Lind. coeffs.), - # `"diag_affine"` (diagonal coefficients + affine projections), and - # `"all"` (the entire matrix of coefficients is allowed). - # - # truncate : bool, optional - # Whether to truncate the projections onto the Lindblad terms in - # order to meet constraints (e.g. to preserve CPTP) when necessary. - # If False, then an error is thrown when the given `gate` cannot - # be realized by the specified set of Lindblad projections. - # - # mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object - # The source and destination basis, respectively. Allowed - # values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt) (or a custom basis object). - # - # evotype : {"densitymx","svterm","cterm"} - # The evolution type of the spamvec being constructed. `"densitymx"` is - # usual Lioville density-matrix-vector propagation via matrix-vector - # products. `"svterm"` denotes state-vector term-based evolution - # (spamvec is obtained by evaluating the rank-1 terms up to - # some order). `"cterm"` is similar but stabilizer states. - # - # Returns - # ------- - # LindbladSPAMVec - # """ - # #Compute a (errgen, pure_vec) pair from the given - # # (spam_vec, pure_vec) pair. - # - # assert(pure_vec is not None), "Must supply `pure_vec`!" # since there's no good default? - # - # if not isinstance(spam_vec, SPAMVec): - # spam_vec = StaticSPAMVec(spam_vec, evotype, typ) # assume spamvec is just a vector - # if not isinstance(pure_vec, SPAMVec): - # pure_vec = StaticSPAMVec(pure_vec, evotype, typ) # assume spamvec is just a vector - # d2 = pure_vec.dim - # - # #Determine whether we're using sparse bases or not - # sparse = None - # if ham_basis is not None: - # if isinstance(ham_basis, _Basis): sparse = ham_basis.sparse - # elif not isinstance(ham_basis, str) and len(ham_basis) > 0: - # sparse = _sps.issparse(ham_basis[0]) - # if sparse is None and nonham_basis is not None: - # if isinstance(nonham_basis, _Basis): sparse = nonham_basis.sparse - # elif not isinstance(nonham_basis, str) and len(nonham_basis) > 0: - # sparse = _sps.issparse(nonham_basis[0]) - # if sparse is None: sparse = False # the default - # - # if spam_vec is None or spam_vec is pure_vec: - # if sparse: errgen = _sps.csr_matrix((d2, d2), dtype='d') - # else: errgen = _np.zeros((d2, d2), 'd') - # else: - # #Construct "spam error generator" by comparing *dense* vectors - # pvdense = pure_vec.to_dense() - # svdense = spam_vec.to_dense() - # errgen = _ot.spam_error_generator(svdense, pvdense, mx_basis) - # if sparse: errgen = _sps.csr_matrix(errgen) - # - # assert(pure_vec._evotype == evotype), "`pure_vec` must have evotype == '%s'" % evotype - # - # from .operation import LindbladErrorgen as _LErrorgen - # from .operation import LindbladOp as _LPGMap - # from .operation import LindbladDenseOp as _LPOp - # - # errgen = _LErrorgen.from_error_generator(errgen, ham_basis, - # nonham_basis, param_mode, nonham_mode, - # mx_basis, truncate, evotype) - # errcls = _LPOp if (pure_vec.dim <= 64 and evotype == "densitymx") else _LPGMap - # errmap = errcls(None, errgen) - # - # return cls(pure_vec, errmap, typ) - - #@classmethod - #def from_lindblad_terms(cls, pure_vec, lindblad_term_dict, typ, basisdict=None, - # param_mode="cptp", nonham_mode="all", truncate=True, - # mx_basis="pp", evotype="densitymx"): - # """ - # Create a Lindblad-parameterized spamvec with a given set of Lindblad terms. - # - # Parameters - # ---------- - # pure_vec : numpy array or SPAMVec - # An array or SPAMVec in the *full* density-matrix space (this - # vector will have dimension 4 in the case of a single qubit) which - # represents a pure-state preparation or projection. This is used as - # the "base" preparation or projection that is followed or preceded - # by, respectively, the parameterized Lindblad-form error generator. - # - # lindblad_term_dict : dict - # A dictionary specifying which Linblad terms are present in the gate - # parameteriztion. Keys are `(termType, basisLabel1, )` - # tuples, where `termType` can be `"H"` (Hamiltonian), `"S"` - # (Stochastic), or `"A"` (Affine). Hamiltonian and Affine terms always - # have a single basis label (so key is a 2-tuple) whereas Stochastic - # tuples with 1 basis label indicate a *diagonal* term, and are the - # only types of terms allowed when `nonham_mode != "all"`. Otherwise, - # Stochastic term tuples can include 2 basis labels to specify - # "off-diagonal" non-Hamiltonian Lindblad terms. Basis labels can be - # strings or integers. Values are complex coefficients (error rates). - # - # typ : {"prep","effect"} - # Whether this is a state preparation or POVM effect vector. - # - # basisdict : dict, optional - # A dictionary mapping the basis labels (strings or ints) used in the - # keys of `lindblad_term_dict` to basis matrices (numpy arrays or Scipy sparse - # matrices). - # - # param_mode : {"unconstrained", "cptp", "depol", "reldepol"} - # Describes how the Lindblad coefficients/projections relate to the - # SPAM vector's parameter values. Allowed values are: - # `"unconstrained"` (coeffs are independent unconstrained parameters), - # `"cptp"` (independent parameters but constrained so map is CPTP), - # `"reldepol"` (all non-Ham. diagonal coeffs take the *same* value), - # `"depol"` (same as `"reldepol"` but coeffs must be *positive*) - # - # nonham_mode : {"diagonal", "diag_affine", "all"} - # Which non-Hamiltonian Lindblad projections are potentially non-zero. - # Allowed values are: `"diagonal"` (only the diagonal Lind. coeffs.), - # `"diag_affine"` (diagonal coefficients + affine projections), and - # `"all"` (the entire matrix of coefficients is allowed). - # - # truncate : bool, optional - # Whether to truncate the projections onto the Lindblad terms in - # order to meet constraints (e.g. to preserve CPTP) when necessary. - # If False, then an error is thrown when the given dictionary of - # Lindblad terms doesn't conform to the constrains. - # - # mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object - # The source and destination basis, respectively. Allowed - # values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt) (or a custom basis object). - # - # evotype : {"densitymx","svterm","cterm"} - # The evolution type of the spamvec being constructed. `"densitymx"` is - # usual Lioville density-matrix-vector propagation via matrix-vector - # products. `"svterm"` denotes state-vector term-based evolution - # (spamvec is obtained by evaluating the rank-1 terms up to - # some order). `"cterm"` is similar but stabilizer states. - # - # Returns - # ------- - # LindbladOp - # """ - # #Need a dimension for error map construction (basisdict could be completely empty) - # if not isinstance(pure_vec, SPAMVec): - # pure_vec = StaticSPAMVec(pure_vec, evotype, typ) # assume spamvec is just a vector - # d2 = pure_vec.dim - # - # from .operation import LindbladOp as _LPGMap - # errmap = _LPGMap(d2, lindblad_term_dict, basisdict, param_mode, nonham_mode, - # truncate, mx_basis, evotype) - # return cls(pure_vec, errmap, typ) - def __init__(self, static_effect, errormap): evotype = errormap._evotype #from .operation import LindbladOp as _LPGMap diff --git a/pygsti/modelmembers/states/composedstate.py b/pygsti/modelmembers/states/composedstate.py index 03b555b4f..9826db229 100644 --- a/pygsti/modelmembers/states/composedstate.py +++ b/pygsti/modelmembers/states/composedstate.py @@ -43,323 +43,6 @@ class ComposedState(_State): # , _ErrorMapContainer parameters with other gates and spam vectors.) """ - #@classmethod - #def _from_spamvec_obj(cls, spamvec, typ, param_type="GLND", purevec=None, - # proj_basis="pp", mx_basis="pp", truncate=True, - # lazy=False): - # """ - # Creates a LindbladSPAMVec from an existing SPAMVec object and some additional information. - # - # This function is different from `from_spam_vector` in that it assumes - # that `spamvec` is a :class:`SPAMVec`-derived object, and if `lazy=True` - # and if `spamvec` is already a matching LindbladSPAMVec, it - # is returned directly. This routine is primarily used in spam vector - # conversion functions, where conversion is desired only when necessary. - # - # Parameters - # ---------- - # spamvec : SPAMVec - # The spam vector object to "convert" to a - # `LindbladSPAMVec`. - # - # typ : {"prep","effect"} - # Whether this is a state preparation or POVM effect vector. - # - # param_type : str, optional - # The high-level "parameter type" of the gate to create. This - # specifies both which Lindblad parameters are included and what - # type of evolution is used. Examples of valid values are - # `"CPTP"`, `"H+S"`, `"S terms"`, and `"GLND clifford terms"`. - # - # purevec : numpy array or SPAMVec object, optional - # A SPAM vector which represents a pure-state, taken as the "ideal" - # reference state when constructing the error generator of the - # returned `LindbladSPAMVec`. Note that this vector - # still acts on density matrices (if it's a SPAMVec it should have - # a "densitymx", "svterm", or "cterm" evolution type, and if it's - # a numpy array it should have the same dimension as `spamvec`). - # If None, then it is taken to be `spamvec`, and so `spamvec` must - # represent a pure state in this case. - # - # proj_basis : {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object - # The basis used to construct the Lindblad-term error generators onto - # which the SPAM vector's error generator is projected. Allowed values - # are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt), list of numpy arrays, or a custom basis object. - # - # mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object - # The source and destination basis, respectively. Allowed - # values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt) (or a custom basis object). - # - # truncate : bool, optional - # Whether to truncate the projections onto the Lindblad terms in - # order to meet constraints (e.g. to preserve CPTP) when necessary. - # If False, then an error is thrown when the given `spamvec` cannot - # be realized by the specified set of Lindblad projections. - # - # lazy : bool, optional - # If True, then if `spamvec` is already a LindbladSPAMVec - # with the requested details (given by the other arguments), then - # `spamvec` is returned directly and no conversion/copying is - # performed. If False, then a new object is always returned. - # - # Returns - # ------- - # LindbladSPAMVec - # """ - # - # if not isinstance(spamvec, SPAMVec): - # spamvec = StaticSPAMVec(spamvec, typ=typ) # assume spamvec is just a vector - # - # if purevec is None: - # purevec = spamvec # right now, we don't try to extract a "closest pure vec" - # # to spamvec - below will fail if spamvec isn't pure. - # elif not isinstance(purevec, SPAMVec): - # purevec = StaticSPAMVec(purevec, typ=typ) # assume spamvec is just a vector - # - # #Break param_type in to a "base" type and an evotype - # from .operation import LindbladOp as _LPGMap - # bTyp, evotype, nonham_mode, param_mode = _LPGMap.decomp_paramtype(param_type) - # - # ham_basis = proj_basis if (("H" == bTyp) or ("H+" in bTyp) or bTyp in ("CPTP", "GLND")) else None - # nonham_basis = None if bTyp == "H" else proj_basis - # - # def beq(b1, b2): - # """ Check if bases have equal names """ - # b1 = b1.name if isinstance(b1, _Basis) else b1 - # b2 = b2.name if isinstance(b2, _Basis) else b2 - # return b1 == b2 - # - # def normeq(a, b): - # if a is None and b is None: return True - # if a is None or b is None: return False - # return _mt.safe_norm(a - b) < 1e-6 # what about possibility of Clifford gates? - # - # if isinstance(spamvec, LindbladSPAMVec) \ - # and spamvec._evotype == evotype and spamvec.typ == typ \ - # and beq(ham_basis, spamvec.error_map.ham_basis) and beq(nonham_basis, spamvec.error_map.other_basis) \ - # and param_mode == spamvec.error_map.param_mode and nonham_mode == spamvec.error_map.nonham_mode \ - # and beq(mx_basis, spamvec.error_map.matrix_basis) and lazy: - # #normeq(gate.pure_state_vec,purevec) \ # TODO: more checks for equality?! - # return spamvec # no creation necessary! - # else: - # #Convert vectors (if possible) to SPAMVecs - # # of the appropriate evotype and 0 params. - # bDiff = spamvec is not purevec - # spamvec = _convert_to_lindblad_base(spamvec, typ, evotype, mx_basis) - # purevec = _convert_to_lindblad_base(purevec, typ, evotype, mx_basis) if bDiff else spamvec - # assert(spamvec._evotype == evotype) - # assert(purevec._evotype == evotype) - # - # return cls.from_spam_vector( - # spamvec, purevec, typ, ham_basis, nonham_basis, - # param_mode, nonham_mode, truncate, mx_basis, evotype) - # - #@classmethod - #def from_spam_vector(cls, spam_vec, pure_vec, typ, - # ham_basis="pp", nonham_basis="pp", param_mode="cptp", - # nonham_mode="all", truncate=True, mx_basis="pp", - # evotype="densitymx"): - # """ - # Creates a Lindblad-parameterized spamvec from a state vector and a basis. - # - # The basis specifies how to decompose (project) the vector's error generator. - # - # Parameters - # ---------- - # spam_vec : SPAMVec - # the SPAM vector to initialize from. The error generator that - # tranforms `pure_vec` into `spam_vec` forms the parameterization - # of the returned LindbladSPAMVec. - # - # pure_vec : numpy array or SPAMVec - # An array or SPAMVec in the *full* density-matrix space (this - # vector will have the same dimension as `spam_vec` - 4 in the case - # of a single qubit) which represents a pure-state preparation or - # projection. This is used as the "base" preparation/projection - # when computing the error generator that will be parameterized. - # Note that this argument must be specified, as there is no natural - # default value (like the identity in the case of gates). - # - # typ : {"prep","effect"} - # Whether this is a state preparation or POVM effect vector. - # - # ham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object - # The basis is used to construct the Hamiltonian-type lindblad error - # Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt), list of numpy arrays, or a custom basis object. - # - # nonham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object - # The basis is used to construct the Stochastic-type lindblad error - # Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt), list of numpy arrays, or a custom basis object. - # - # param_mode : {"unconstrained", "cptp", "depol", "reldepol"} - # Describes how the Lindblad coefficients/projections relate to the - # SPAM vector's parameter values. Allowed values are: - # `"unconstrained"` (coeffs are independent unconstrained parameters), - # `"cptp"` (independent parameters but constrained so map is CPTP), - # `"reldepol"` (all non-Ham. diagonal coeffs take the *same* value), - # `"depol"` (same as `"reldepol"` but coeffs must be *positive*) - # - # nonham_mode : {"diagonal", "diag_affine", "all"} - # Which non-Hamiltonian Lindblad projections are potentially non-zero. - # Allowed values are: `"diagonal"` (only the diagonal Lind. coeffs.), - # `"diag_affine"` (diagonal coefficients + affine projections), and - # `"all"` (the entire matrix of coefficients is allowed). - # - # truncate : bool, optional - # Whether to truncate the projections onto the Lindblad terms in - # order to meet constraints (e.g. to preserve CPTP) when necessary. - # If False, then an error is thrown when the given `gate` cannot - # be realized by the specified set of Lindblad projections. - # - # mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object - # The source and destination basis, respectively. Allowed - # values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt) (or a custom basis object). - # - # evotype : {"densitymx","svterm","cterm"} - # The evolution type of the spamvec being constructed. `"densitymx"` is - # usual Lioville density-matrix-vector propagation via matrix-vector - # products. `"svterm"` denotes state-vector term-based evolution - # (spamvec is obtained by evaluating the rank-1 terms up to - # some order). `"cterm"` is similar but stabilizer states. - # - # Returns - # ------- - # LindbladSPAMVec - # """ - # #Compute a (errgen, pure_vec) pair from the given - # # (spam_vec, pure_vec) pair. - # - # assert(pure_vec is not None), "Must supply `pure_vec`!" # since there's no good default? - # - # if not isinstance(spam_vec, SPAMVec): - # spam_vec = StaticSPAMVec(spam_vec, evotype, typ) # assume spamvec is just a vector - # if not isinstance(pure_vec, SPAMVec): - # pure_vec = StaticSPAMVec(pure_vec, evotype, typ) # assume spamvec is just a vector - # d2 = pure_vec.dim - # - # #Determine whether we're using sparse bases or not - # sparse = None - # if ham_basis is not None: - # if isinstance(ham_basis, _Basis): sparse = ham_basis.sparse - # elif not isinstance(ham_basis, str) and len(ham_basis) > 0: - # sparse = _sps.issparse(ham_basis[0]) - # if sparse is None and nonham_basis is not None: - # if isinstance(nonham_basis, _Basis): sparse = nonham_basis.sparse - # elif not isinstance(nonham_basis, str) and len(nonham_basis) > 0: - # sparse = _sps.issparse(nonham_basis[0]) - # if sparse is None: sparse = False # the default - # - # if spam_vec is None or spam_vec is pure_vec: - # if sparse: errgen = _sps.csr_matrix((d2, d2), dtype='d') - # else: errgen = _np.zeros((d2, d2), 'd') - # else: - # #Construct "spam error generator" by comparing *dense* vectors - # pvdense = pure_vec.to_dense() - # svdense = spam_vec.to_dense() - # errgen = _ot.spam_error_generator(svdense, pvdense, mx_basis) - # if sparse: errgen = _sps.csr_matrix(errgen) - # - # assert(pure_vec._evotype == evotype), "`pure_vec` must have evotype == '%s'" % evotype - # - # from .operation import LindbladErrorgen as _LErrorgen - # from .operation import LindbladOp as _LPGMap - # from .operation import LindbladDenseOp as _LPOp - # - # errgen = _LErrorgen.from_error_generator(errgen, ham_basis, - # nonham_basis, param_mode, nonham_mode, - # mx_basis, truncate, evotype) - # errcls = _LPOp if (pure_vec.dim <= 64 and evotype == "densitymx") else _LPGMap - # errmap = errcls(None, errgen) - # - # return cls(pure_vec, errmap, typ) - - #@classmethod - #def from_lindblad_terms(cls, pure_vec, lindblad_term_dict, typ, basisdict=None, - # param_mode="cptp", nonham_mode="all", truncate=True, - # mx_basis="pp", evotype="densitymx"): - # """ - # Create a Lindblad-parameterized spamvec with a given set of Lindblad terms. - # - # Parameters - # ---------- - # pure_vec : numpy array or SPAMVec - # An array or SPAMVec in the *full* density-matrix space (this - # vector will have dimension 4 in the case of a single qubit) which - # represents a pure-state preparation or projection. This is used as - # the "base" preparation or projection that is followed or preceded - # by, respectively, the parameterized Lindblad-form error generator. - # - # lindblad_term_dict : dict - # A dictionary specifying which Linblad terms are present in the gate - # parameteriztion. Keys are `(termType, basisLabel1, )` - # tuples, where `termType` can be `"H"` (Hamiltonian), `"S"` - # (Stochastic), or `"A"` (Affine). Hamiltonian and Affine terms always - # have a single basis label (so key is a 2-tuple) whereas Stochastic - # tuples with 1 basis label indicate a *diagonal* term, and are the - # only types of terms allowed when `nonham_mode != "all"`. Otherwise, - # Stochastic term tuples can include 2 basis labels to specify - # "off-diagonal" non-Hamiltonian Lindblad terms. Basis labels can be - # strings or integers. Values are complex coefficients (error rates). - # - # typ : {"prep","effect"} - # Whether this is a state preparation or POVM effect vector. - # - # basisdict : dict, optional - # A dictionary mapping the basis labels (strings or ints) used in the - # keys of `lindblad_term_dict` to basis matrices (numpy arrays or Scipy sparse - # matrices). - # - # param_mode : {"unconstrained", "cptp", "depol", "reldepol"} - # Describes how the Lindblad coefficients/projections relate to the - # SPAM vector's parameter values. Allowed values are: - # `"unconstrained"` (coeffs are independent unconstrained parameters), - # `"cptp"` (independent parameters but constrained so map is CPTP), - # `"reldepol"` (all non-Ham. diagonal coeffs take the *same* value), - # `"depol"` (same as `"reldepol"` but coeffs must be *positive*) - # - # nonham_mode : {"diagonal", "diag_affine", "all"} - # Which non-Hamiltonian Lindblad projections are potentially non-zero. - # Allowed values are: `"diagonal"` (only the diagonal Lind. coeffs.), - # `"diag_affine"` (diagonal coefficients + affine projections), and - # `"all"` (the entire matrix of coefficients is allowed). - # - # truncate : bool, optional - # Whether to truncate the projections onto the Lindblad terms in - # order to meet constraints (e.g. to preserve CPTP) when necessary. - # If False, then an error is thrown when the given dictionary of - # Lindblad terms doesn't conform to the constrains. - # - # mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object - # The source and destination basis, respectively. Allowed - # values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - # and Qutrit (qt) (or a custom basis object). - # - # evotype : {"densitymx","svterm","cterm"} - # The evolution type of the spamvec being constructed. `"densitymx"` is - # usual Lioville density-matrix-vector propagation via matrix-vector - # products. `"svterm"` denotes state-vector term-based evolution - # (spamvec is obtained by evaluating the rank-1 terms up to - # some order). `"cterm"` is similar but stabilizer states. - # - # Returns - # ------- - # LindbladOp - # """ - # #Need a dimension for error map construction (basisdict could be completely empty) - # if not isinstance(pure_vec, SPAMVec): - # pure_vec = StaticSPAMVec(pure_vec, evotype, typ) # assume spamvec is just a vector - # d2 = pure_vec.dim - # - # from .operation import LindbladOp as _LPGMap - # errmap = _LPGMap(d2, lindblad_term_dict, basisdict, param_mode, nonham_mode, - # truncate, mx_basis, evotype) - # return cls(pure_vec, errmap, typ) - def __init__(self, static_state, errormap): evotype = errormap._evotype #from .operation import LindbladOp as _LPGMap diff --git a/pygsti/models/fogistore.py b/pygsti/models/fogistore.py index 389925321..5281cad13 100644 --- a/pygsti/models/fogistore.py +++ b/pygsti/models/fogistore.py @@ -547,7 +547,8 @@ def create_fogi_aggregate_single_op_space(self, op_label, errorgen_type='H', else: raise ValueError("Invalid intrinsic_or_relational value: `%s`" % str(intrinsic_or_relational)) - space = _mt.remove_dependent_cols(space) + col_indices = _mt.independent_columns(space) + space = space[:, col_indices] return space @classmethod diff --git a/pygsti/tools/basistools.py b/pygsti/tools/basistools.py index 95471181b..06cf7674f 100644 --- a/pygsti/tools/basistools.py +++ b/pygsti/tools/basistools.py @@ -199,7 +199,7 @@ def change_basis(mx, from_basis, to_basis): if _mt.safe_norm(ret, 'imag') > 1e-8: raise ValueError("Array has non-zero imaginary part (%g) after basis change (%s to %s)!\n%s" % (_mt.safe_norm(ret, 'imag'), from_basis, to_basis, ret)) - return _mt.safe_real(ret) + return ret.real #def transform_matrix(from_basis, to_basis, dim_or_block_dims=None, sparse=False): # ''' @@ -507,6 +507,7 @@ def vec_to_stdmx(v, basis, keep_complex=False): """ if not isinstance(basis, _basis.Basis): basis = _basis.BuiltinBasis(basis, len(v)) + v = v.ravel() ret = _np.zeros(basis.elshape, 'complex') for i, mx in enumerate(basis.elements): if keep_complex: diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index c4998d310..eea184f10 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -251,7 +251,7 @@ def nice_nullspace(m, tol=1e-7, orthogonalize=False): return ret -def normalize_columns(m, return_norms=False, norm_ord=None): +def normalize_columns(m, return_norms=False, ord=None): """ Normalizes the columns of a matrix. @@ -264,7 +264,7 @@ def normalize_columns(m, return_norms=False, norm_ord=None): If `True`, also return a 1D array containing the norms of the columns (before they were normalized). - norm_ord : int or list of ints, optional + ord : int or list of ints, optional The order of the norm. See :func:`numpy.linalg.norm`. An array of orders can be given to specify the norm on a per-column basis. @@ -278,13 +278,13 @@ def normalize_columns(m, return_norms=False, norm_ord=None): Only returned when `return_norms=True`, a 1-dimensional array of the pre-normalization norm of each column. """ - norms = column_norms(m, norm_ord) + norms = column_norms(m, ord) norms[norms == 0.0] = 1.0 # avoid division of zero-column by zero normalized_m = scale_columns(m, 1 / norms) return (normalized_m, norms) if return_norms else normalized_m -def column_norms(m, norm_ord=None): +def column_norms(m, ord=None): """ Compute the norms of the columns of a matrix. @@ -304,14 +304,14 @@ def column_norms(m, norm_ord=None): A 1-dimensional array of the column norms (length is number of columns of `m`). """ if _sps.issparse(m): - ord_list = norm_ord if isinstance(norm_ord, (list, _np.ndarray)) else [norm_ord] * m.shape[1] + ord_list = ord if isinstance(ord, (list, _np.ndarray)) else [ord] * m.shape[1] assert(len(ord_list) == m.shape[1]) norms = _np.array([_np.linalg.norm(m[:, j].toarray(), ord=o) for j, o in enumerate(ord_list)]) - elif isinstance(norm_ord, (list, _np.ndarray)): - assert(len(norm_ord) == m.shape[1]) - norms = _np.array([_np.linalg.norm(m[:, j], ord=o) for j, o in enumerate(norm_ord)]) + elif isinstance(ord, (list, _np.ndarray)): + assert(len(ord) == m.shape[1]) + norms = _np.array([_np.linalg.norm(m[:, j], ord=o) for j, o in enumerate(ord)]) else: - norms = _np.linalg.norm(m, axis=0, ord=norm_ord) + norms = _np.linalg.norm(m, axis=0, ord=ord) return norms diff --git a/pygsti/tools/rbtheory.py b/pygsti/tools/rbtheory.py index 48cda8f9f..79e23f06c 100644 --- a/pygsti/tools/rbtheory.py +++ b/pygsti/tools/rbtheory.py @@ -218,7 +218,8 @@ def rb_gauge(model, target_model, weights=None, mx_basis=None, eigenvector_weigh vec_l_operator = vec_l_operator.real vec_l_operator[abs(vec_l_operator) < 10**(-15)] = 0. - l_operator = _mtls.unvec(vec_l_operator) + dim = int(_np.sqrt(vec_l_operator.size)) + l_operator = vec_l_operator.reshape((dim, dim), order='F') return l_operator @@ -791,7 +792,7 @@ def gate_dependence_of_errormaps(model, target_model, norm='diamond', mx_basis=N mx_basis=mx_basis)) elif norm == '1to1': gate_dif = error_gs.operations[gate] - error_gs.operations['Gavg'] - delta.append(_optls.norm1to1(gate_dif, num_samples=1000, mx_basis=mx_basis, return_list=False)) + delta.append(_optls.norm1to1(gate_dif, num_samples=1000, mx_basis=mx_basis)) else: raise ValueError("Only diamond or 1to1 norm available.") diff --git a/test/unit/objects/test_fogi.py b/test/unit/objects/test_fogi.py index 783de2390..5676d853f 100644 --- a/test/unit/objects/test_fogi.py +++ b/test/unit/objects/test_fogi.py @@ -170,8 +170,8 @@ def test_crosstalk_free_fogi(self): nprefix = mdl.num_params - nfogi # reparameterization *prefixes* FOGI params with "unused" params self.assertEqual(nprefix, 0) # because include_spam=True above - self.assertArraysAlmostEqual(mdl.fogi_errorgen_components_array(include_fogv=False, normalized_elem_gens=True), - mdl.to_vector()[nprefix:]) + temp = mdl.fogi_errorgen_components_array(include_fogv=False, normalized_elem_gens=True) + self.assertArraysAlmostEqual(temp, mdl.to_vector()[nprefix:]) v = mdl.to_vector() # just test this works @@ -179,6 +179,7 @@ def test_crosstalk_free_fogi(self): w = np.random.rand(mdl.num_params) w[0:nprefix] = 0 # zero out all unused params (these can be SPAM and can't be any value?) mdl.from_vector(w) + pass def test_cloud_crosstalk_fogi(self): From d0e1bde22ee23c1399f349a12d55ab7d8ad8459e Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 10:28:37 -0400 Subject: [PATCH 3/9] remove change that wasnt strictly in-scope for the PR --- pygsti/extras/interpygate/process_tomography.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pygsti/extras/interpygate/process_tomography.py b/pygsti/extras/interpygate/process_tomography.py index 61c625190..2b262b1d2 100644 --- a/pygsti/extras/interpygate/process_tomography.py +++ b/pygsti/extras/interpygate/process_tomography.py @@ -30,7 +30,7 @@ def vec(matrix): """ matrix = _np.array(matrix) if matrix.shape == (len(matrix), len(matrix)): - return matrix.reshape((-1, 1), order='F') + return _np.array([_np.concatenate(_np.array(matrix).T)]).T else: raise ValueError('The input matrix must be square.') @@ -50,9 +50,9 @@ def unvec(vectorized): """ vectorized = _np.array(vectorized) - dim = int(_np.sqrt(max(vectorized.shape))) - if len(vectorized) == dim ** 2: - return vectorized.reshape((dim, dim), order='F') + length = int(_np.sqrt(max(vectorized.shape))) + if len(vectorized) == length ** 2: + return _np.reshape(vectorized, [length, length]).T else: raise ValueError( 'The input vector length must be a perfect square, but this input has length %d.' % len(vectorized)) From b932571ef82cc8e1d7200bc009d75501dc8b0db3 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 10:36:15 -0400 Subject: [PATCH 4/9] remove changes that werent strictly necessary --- pygsti/tools/matrixtools.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index eea184f10..e486b559a 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -148,7 +148,8 @@ def is_valid_density_mx(mx, tol=1e-9): bool True if mx is a valid density matrix, otherwise False. """ - return abs(_np.trace(mx) - 1.0) < tol and is_hermitian(mx, tol) and is_pos_def(mx, tol) + # is_pos_def includes a check that the matrix is Hermitian. + return abs(_np.trace(mx) - 1.0) < tol and is_pos_def(mx, tol) def nullspace(m, tol=1e-7): @@ -656,7 +657,6 @@ def mx_to_string_complex(m, real_width=9, im_width=9, prec=4): return s -#TODO: revert changes in the function below. def unitary_superoperator_matrix_log(m, mx_basis): """ Construct the logarithm of superoperator matrix `m`. @@ -686,16 +686,11 @@ def unitary_superoperator_matrix_log(m, mx_basis): from . import lindbladtools as _lt # (would create circular imports if at top) from . import optools as _ot # (would create circular imports if at top) - # Riley question: what assumptions do we have for the input m? The call to eigvals - # below is intended for fully-general matrices. I imagine we (typically) have structure - # that makes it preferable to call some other function (li) M_std = change_basis(m, mx_basis, "std") evals = _np.linalg.eigvals(M_std) - assert(_np.allclose(_np.abs(evals), 1.0)) - # ^ simple but technically incomplete check for a unitary superop - # (e.g. could be anti-unitary: diag(1, -1, -1, -1)) - - # ^ Riley question: + assert(_np.allclose(_np.abs(evals), 1.0)) # simple but technically incomplete check for a unitary superop + # (e.g. could be anti-unitary: diag(1, -1, -1, -1)) + U = _ot.std_process_mx_to_unitary(M_std) H = _spl.logm(U) / -1j # U = exp(-iH) logM_std = _lt.create_elementary_errorgen('H', H) # rho --> -i[H, rho] From 4f47d1fafd9a421dff35fa7482aff31c259fd92e Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 14:12:03 -0400 Subject: [PATCH 5/9] tests pass --- pygsti/models/fogistore.py | 5 +++-- pygsti/tools/matrixtools.py | 29 +++++++++++++---------------- test/unit/objects/test_fogi.py | 4 ++-- 3 files changed, 18 insertions(+), 20 deletions(-) diff --git a/pygsti/models/fogistore.py b/pygsti/models/fogistore.py index 5281cad13..ccbd80848 100644 --- a/pygsti/models/fogistore.py +++ b/pygsti/models/fogistore.py @@ -265,8 +265,9 @@ def opcoeffs_to_fogiv_components_array(self, op_coeffs): errorgen_vec = _np.zeros(self.errorgen_space_dim, 'd') for i, (op_label, elem_lbl) in enumerate(self.errorgen_space_op_elem_labels): errorgen_vec[i] += op_coeffs[op_label].get(elem_lbl, 0.0) - return self.errorgen_vec_to_fogi_components_array(errorgen_vec), \ - self.errorgen_vec_to_fogv_components_array(errorgen_vec) + out1 = self.errorgen_vec_to_fogi_components_array(errorgen_vec) + out2 = self.errorgen_vec_to_fogv_components_array(errorgen_vec) + return out1, out2 def fogi_components_array_to_errorgen_vec(self, fogi_components): assert(self._dependent_fogi_action == 'drop'), \ diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index e486b559a..8bcb20fd6 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -206,7 +206,6 @@ def nullspace_qr(m, tol=1e-7): return q[:, rank:] -#TODO: remove the orthogonalize argument (requires changing functions that call this one) def nice_nullspace(m, tol=1e-7, orthogonalize=False): """ Computes the nullspace of a matrix, and tries to return a "nice" basis for it. @@ -229,21 +228,19 @@ def nice_nullspace(m, tol=1e-7, orthogonalize=False): ------- An matrix of shape (M,K) whose columns contain nullspace basis vectors. """ - - # - # nullsp = nullspace(m, tol) - # dim_ker = nullsp.shape[1] - # _, _, p = _spl.qr(nullsp.T.conj(), mode='raw', pivoting=True) - # ret = nullsp @ (nullsp.T[:, p[dim_ker]]).conj() - # - ## ^ Equivalent to, but faster than the following - ## - ## nullsp_projector = nullsp @ nullsp.T.conj() - ## ret = nullsp_projector[:, p[:dim_ker]] - ## - # - - ret = nullspace(m, tol) + nullsp = nullspace(m, tol) + dim_ker = nullsp.shape[1] + if dim_ker == 0: + return nullsp # empty 0-by-N array + _, _, p = _spl.qr(nullsp.T.conj(), mode='raw', pivoting=True) + ret = nullsp @ (nullsp.T[:, p[:dim_ker]]).conj() + # ^ That's equivalent to, but faster than: + # nullsp_projector = nullsp @ nullsp.T.conj() + # _, _, p = _spl.qr(nullsp_projector mode='raw', pivoting=True) + # ret = nullsp_projector[:, p[:dim_ker]] + + if orthogonalize: + ret, _ = _spl.qr(ret, mode='economic') for j in range(ret.shape[1]): # normalize columns so largest element is +1.0 imax = _np.argmax(_np.abs(ret[:, j])) if abs(ret[imax, j]) > 1e-6: diff --git a/test/unit/objects/test_fogi.py b/test/unit/objects/test_fogi.py index 5676d853f..d55314aa2 100644 --- a/test/unit/objects/test_fogi.py +++ b/test/unit/objects/test_fogi.py @@ -219,8 +219,8 @@ def test_cloud_crosstalk_fogi(self): nprefix = mdl.num_params - nfogi # reparameterization *prefixes* FOGI params with "unused" params self.assertEqual(nprefix, 0) # because include_spam=True above - self.assertArraysAlmostEqual(mdl.fogi_errorgen_components_array(include_fogv=False, normalized_elem_gens=True), - mdl.to_vector()[nprefix:]) + temp = mdl.fogi_errorgen_components_array(include_fogv=False, normalized_elem_gens=True) + self.assertArraysAlmostEqual(temp, mdl.to_vector()[nprefix:]) v = mdl.to_vector() # just test this works From 2cd29abe5ac108c503f864b4be764d47c0c1b193 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 14:15:26 -0400 Subject: [PATCH 6/9] remove is_normal function --- pygsti/tools/matrixtools.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index 8bcb20fd6..42c5a2ea7 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -64,20 +64,6 @@ def gram_matrix(m, adjoint=False): return out -def is_normal(m, tol=1e-9): - """ - Test whether m is a normal operator, in the sense that it commutes with its adjoint. - """ - if m.shape[0] != m.shape[1]: - return False - prefix_char, _, _ = _spl.blas.find_best_blas_type(dtype=m.dtype) - herk = BLAS_FUNCS["herk"][prefix_char] - trans = 2 if _np.iscomplexobj(m) else 1 - mdagm = herk( 1.0, m, trans=trans ) - mmdag = herk( -1.0, m, trans=0, c=mdagm, overwrite_c=True ) - return _np.all(_np.abs(mmdag) <= tol) - - def is_hermitian(mx, tol=1e-9): """ Test whether mx is a hermitian matrix. From 14c444be9b8ffa087fe07643145a7c4093a50049 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 16:35:02 -0400 Subject: [PATCH 7/9] add a comment and remove unused imports --- pygsti/tools/matrixtools.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index 42c5a2ea7..db61ed0b3 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -417,6 +417,8 @@ def independent_columns(m, initial_independent_cols=None, tol=1e-7): if initial_independent_cols is None: proj_m = m.copy() else: + # We assume initial_independent_cols is full column-rank. + # This lets us use unpivoted QR instead of pivoted QR or SVD. assert initial_independent_cols.shape[0] == m.shape[0] q = _spl.qr(initial_independent_cols, mode='econ')[0] # proj_m = (I - qq')m @@ -900,9 +902,6 @@ def real_matrix_log(m, action_if_imaginary="raise", tol=1e-8): ## ------------------------ Erik : Matrix tools that Tim has moved here ----------- -from scipy.linalg import sqrtm as _sqrtm -import itertools as _ittls - def column_basis_vector(i, dim): """ From ba3868be61ca1c655667392e242873a2d03fdce7 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Tue, 24 Sep 2024 12:38:40 -0400 Subject: [PATCH 8/9] interpygate helpers --- pygsti/extras/interpygate/__init__.py | 13 +++-- .../extras/interpygate/process_tomography.py | 54 +++++++++++++++---- test/test_packages/extras/test_interpygate.py | 15 +++--- 3 files changed, 61 insertions(+), 21 deletions(-) diff --git a/pygsti/extras/interpygate/__init__.py b/pygsti/extras/interpygate/__init__.py index f126dee97..1155ee3f1 100644 --- a/pygsti/extras/interpygate/__init__.py +++ b/pygsti/extras/interpygate/__init__.py @@ -11,9 +11,14 @@ from .core import PhysicalProcess, InterpolatedDenseOp, InterpolatedOpFactory from .process_tomography import vec, unvec, run_process_tomography -# Note from Riley on May 22, 2024: +# Note from Riley on September, 2024: # -# I wanted to remove the implementations of vec and unvec and just in-line equivalent -# code in the few places they were used. However, the fact that they're included in this -# __init__.py file suggests that they might be used outside of pyGSTi itself. +# vec is deprecated, and shouldn't be called anywhere in the codebase. +# +# unvec is deprecated and replaced with unvec_square; the latter function +# isn't imported here because we don't want people to access it just from +# the pygsti.extras.interpygate namespace. +# +# Ideally we'd remove vec and unvec from the pygsti.extras.interpygate namespace +# and only have them available in pygsti.extras.interpygate.process_tomography. # diff --git a/pygsti/extras/interpygate/process_tomography.py b/pygsti/extras/interpygate/process_tomography.py index 2b262b1d2..42908777e 100644 --- a/pygsti/extras/interpygate/process_tomography.py +++ b/pygsti/extras/interpygate/process_tomography.py @@ -7,6 +7,7 @@ import numpy.linalg as _lin from pygsti.tools.basistools import change_basis +from pygsti.tools.legacytools import deprecate #Helper functions @@ -15,8 +16,11 @@ def multi_kron(*a): return reduce(_np.kron, a) +@deprecate("Calls to this function should be replaced with in-lined code: matrix.reshape((matrix.size, 1), 'F')") def vec(matrix): - """A function that vectorizes a matrix. + """ + Returns an explicit column-vector representation of a square matrix, obtained by reading + from the square matrix in column-major order. Args: matrix (list,numpy.ndarray): NxN matrix @@ -30,11 +34,12 @@ def vec(matrix): """ matrix = _np.array(matrix) if matrix.shape == (len(matrix), len(matrix)): - return _np.array([_np.concatenate(_np.array(matrix).T)]).T + return matrix.reshape(shape=(matrix.size, 1), order='F') else: raise ValueError('The input matrix must be square.') +@deprecate("Calls to this function should be replaced by unvec_square(vectorized, 'F')") def unvec(vectorized): """A function that vectorizes a process in the basis of matrix units, sorted first by column, then row. @@ -49,13 +54,42 @@ def unvec(vectorized): ValueError: If the length of the input is not a perfect square """ - vectorized = _np.array(vectorized) - length = int(_np.sqrt(max(vectorized.shape))) - if len(vectorized) == length ** 2: - return _np.reshape(vectorized, [length, length]).T + return unvec_square(vectorized, order='F') + + +def unvec_square(vectorized, order): + """ + Takes a vector whose length is a perfect square, and returns a square matrix + representation by reading from the vectors entries to define the matrix in + column-major order (order='F') or row-major order (order='C'). + + Args: + vectorized: array-like, where np.array(vectorized).size is a perfect square. + order: 'F' or 'C' + + Returns: + numpy.ndarray: NxN dimensional array + + Raises: + ValueError: If the length of the input is not a perfect square. + + """ + assert order == 'F' or order == 'C' + if not isinstance(vectorized, _np.ndarray): + vectorized = _np.array(vectorized) + + if vectorized.ndim == 2: + assert min(vectorized.shape) == 1 + vectorized = vectorized.ravel() + elif vectorized.ndim > 2: + raise ValueError('vectorized.ndim must be <= 2.') + + n = int(_np.sqrt(max(vectorized.shape))) + if len(vectorized) == n ** 2: + return vectorized.reshape(shape=(n, n), order=order) else: - raise ValueError( - 'The input vector length must be a perfect square, but this input has length %d.' % len(vectorized)) + msg = 'The input vector length must be a perfect square, but this input has length %d.' % len(vectorized) + raise ValueError(msg) def split(n, a): @@ -129,7 +163,7 @@ def run_process_tomography(state_to_density_matrix_fn, n_qubits=1, comm=None, states = _itertools.product(one_qubit_states, repeat=n_qubits) states = [multi_kron(*state) for state in states] in_density_matrices = [_np.outer(state, state.conj()) for state in states] - in_states = _np.column_stack(list([vec(rho) for rho in in_density_matrices])) + in_states = _np.column_stack(list([rho.ravel(order='F') for rho in in_density_matrices])) my_states = split(size, states)[rank] if verbose: print("Process %d of %d evaluating %d input states." % (rank, size, len(my_states))) @@ -150,7 +184,7 @@ def run_process_tomography(state_to_density_matrix_fn, n_qubits=1, comm=None, out_density_matrices = _np.array([y for x in gathered_out_density_matrices for y in x]) # Sort the list by time out_density_matrices = _np.transpose(out_density_matrices, [1, 0, 2, 3]) - out_states = [_np.column_stack(list([vec(rho) for rho in density_matrices_at_time])) + out_states = [_np.column_stack(list([rho.ravel(order='F') for rho in density_matrices_at_time])) for density_matrices_at_time in out_density_matrices] process_matrices = [_np.dot(out_states_at_time, _lin.inv(in_states)) for out_states_at_time in out_states] process_matrices = [change_basis(process_matrix_at_time, 'col', basis) diff --git a/test/test_packages/extras/test_interpygate.py b/test/test_packages/extras/test_interpygate.py index 565e5c396..97e76e936 100644 --- a/test/test_packages/extras/test_interpygate.py +++ b/test/test_packages/extras/test_interpygate.py @@ -3,7 +3,7 @@ import pygsti from pygsti.extras import interpygate as interp -from pygsti.extras.interpygate.process_tomography import run_process_tomography, vec, unvec +from pygsti.extras.interpygate.process_tomography import run_process_tomography, unvec_square from pygsti.tools import change_basis from ..testutils import BaseTestCase @@ -51,7 +51,7 @@ def advance(self, state, v, t): L = dephasing * self.dephasing_generator + decoherence * self.decoherence_generator process = change_basis(_expm((H + L) * t), 'pp', 'col') - state = unvec(_np.dot(process, vec(_np.outer(state, state.conj())))) + state = unvec_square(_np.dot(process, _np.outer(state, state.conj()).ravel(order='F')), 'F') return state def create_process_matrix(self, v, comm=None): @@ -102,7 +102,7 @@ def advance(self, state, v, times): L = dephasing * self.dephasing_generator + decoherence * self.decoherence_generator processes = [change_basis(_expm((H + L) * t), 'pp', 'col') for t in times] - states = [unvec(_np.dot(process, vec(_np.outer(state, state.conj())))) for process in processes] + states = [unvec_square(_np.dot(process, _np.outer(state, state.conj())).ravel(order='F'),'F') for process in processes] return states @@ -318,12 +318,13 @@ def test_process_tomography(self): test_process = _np.kron(U.conj().T, U) def single_time_test_function(pure_state, test_process=test_process): - rho = vec(_np.outer(pure_state, pure_state.conj())) - return unvec(_np.dot(test_process, rho)) + rho = _np.outer(pure_state, pure_state.conj()).ravel(order='F') + return unvec_square(_np.dot(test_process, rho),'F') def multi_time_test_function(pure_state, test_process=test_process): - rho = vec(_np.outer(pure_state, pure_state.conj())) - return [unvec(_np.dot(test_process, rho)), unvec(_np.dot(_np.linalg.matrix_power(test_process, 2), rho))] + rho = _np.outer(pure_state, pure_state.conj()).ravel(order='F') + temp = _np.dot(_np.linalg.matrix_power(test_process, 2), rho) + return [unvec_square(_np.dot(test_process, rho), 'F'), unvec_square(temp, 'F')] process_matrix = run_process_tomography(single_time_test_function, n_qubits=2, verbose=False) if _rank == 0: From 65d25f98b2629490703577c406f11a036642523a Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Tue, 24 Sep 2024 12:59:51 -0400 Subject: [PATCH 9/9] remove safe_dot --- .../modelmembers/operations/experrorgenop.py | 4 +-- .../modelmembers/operations/fullunitaryop.py | 6 ++-- .../operations/lindbladcoefficients.py | 2 +- .../operations/lindbladerrorgen.py | 2 +- pygsti/tools/basistools.py | 4 +-- pygsti/tools/matrixtools.py | 29 ------------------- scripts/api_names.yaml | 1 - 7 files changed, 9 insertions(+), 39 deletions(-) diff --git a/pygsti/modelmembers/operations/experrorgenop.py b/pygsti/modelmembers/operations/experrorgenop.py index 142ee2c21..d6c4e6200 100644 --- a/pygsti/modelmembers/operations/experrorgenop.py +++ b/pygsti/modelmembers/operations/experrorgenop.py @@ -699,9 +699,9 @@ def spam_transform_inplace(self, s, typ): #just act on postfactor and Lindbladian exponent: if typ == "prep": - mx = _mt.safe_dot(Uinv, mx) + mx = Uinv @ mx else: - mx = _mt.safe_dot(mx, U) + mx = mx @ U self.set_dense(mx) # calls _update_rep() and sets dirty flag else: raise ValueError("Invalid transform for this LindbladErrorgen: type %s" diff --git a/pygsti/modelmembers/operations/fullunitaryop.py b/pygsti/modelmembers/operations/fullunitaryop.py index 728a301bb..4fa3d8514 100644 --- a/pygsti/modelmembers/operations/fullunitaryop.py +++ b/pygsti/modelmembers/operations/fullunitaryop.py @@ -200,7 +200,7 @@ def transform_inplace(self, s): Uinv = s.transform_matrix_inverse my_superop_mx = _ot.unitary_to_superop(self._ptr, self._basis) - my_superop_mx = _mt.safe_dot(Uinv, _mt.safe_dot(my_superop_mx, U)) + my_superop_mx = Uinv @ (my_superop_mx @ U) self._ptr[:, :] = _ot.superop_to_unitary(my_superop_mx, self._basis) self._ptr_has_changed() @@ -250,9 +250,9 @@ def spam_transform_inplace(self, s, typ): #Note: this code may need to be tweaked to work with sparse matrices if typ == "prep": - my_superop_mx = _mt.safe_dot(Uinv, my_superop_mx) + my_superop_mx = Uinv @ my_superop_mx else: - my_superop_mx = _mt.safe_dot(my_superop_mx, U) + my_superop_mx = my_superop_mx @ U self._ptr[:, :] = _ot.superop_to_unitary(my_superop_mx, self._basis) self._ptr_has_changed() diff --git a/pygsti/modelmembers/operations/lindbladcoefficients.py b/pygsti/modelmembers/operations/lindbladcoefficients.py index 25ebcaab2..cbfee77c2 100644 --- a/pygsti/modelmembers/operations/lindbladcoefficients.py +++ b/pygsti/modelmembers/operations/lindbladcoefficients.py @@ -195,7 +195,7 @@ def create_lindblad_term_superoperators(self, mx_basis='pp', sparse="auto", incl if sparse: #Note: complex OK here sometimes, as only linear combos of "other" gens # (like (i,j) + (j,i) terms) need to be real. - superops = [_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)) for mx in superops] + superops = [leftTrans @ (mx @ rightTrans) for mx in superops] for mx in superops: mx.sort_indices() else: #superops = _np.einsum("ik,akl,lj->aij", leftTrans, superops, rightTrans) diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index d0e310a74..bbf18ee93 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -1208,7 +1208,7 @@ def transform_inplace(self, s): #conjugate Lindbladian exponent by U: err_gen_mx = self.to_sparse() if self._rep_type == 'sparse superop' else self.to_dense() - err_gen_mx = _mt.safe_dot(Uinv, _mt.safe_dot(err_gen_mx, U)) + err_gen_mx = Uinv @ (err_gen_mx @ U) trunc = 1e-6 if isinstance(s, _gaugegroup.UnitaryGaugeGroupElement) else False self._set_params_from_matrix(err_gen_mx, truncate=trunc) self.dirty = True diff --git a/pygsti/tools/basistools.py b/pygsti/tools/basistools.py index 25168123d..b87c59f67 100644 --- a/pygsti/tools/basistools.py +++ b/pygsti/tools/basistools.py @@ -189,9 +189,9 @@ def change_basis(mx, from_basis, to_basis): if isMx: # want ret = toMx.dot( _np.dot(mx, fromMx)) but need to deal # with some/all args being sparse: - ret = _mt.safe_dot(toMx, _mt.safe_dot(mx, fromMx)) + ret = toMx @ (mx @ fromMx) else: # isVec - ret = _mt.safe_dot(toMx, mx) + ret = toMx @ mx if not to_basis.real: return ret diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index db61ed0b3..9d70f867b 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -1424,35 +1424,6 @@ def _findx(a, inds, always_copy=False): return a_inds -# TODO: reevaluate the need for this function. It seems like we could just in-line @ -# and let operator overloading and implementations of __matmul__ and __rmatmul__ -# handle it. -def safe_dot(a, b): - """ - Performs dot(a,b) correctly when neither, either, or both arguments are sparse matrices. - - Parameters - ---------- - a : numpy.ndarray or scipy.sparse matrix. - First matrix. - - b : numpy.ndarray or scipy.sparse matrix. - Second matrix. - - Returns - ------- - numpy.ndarray or scipy.sparse matrix - """ - if _sps.issparse(a): - return a.dot(b) # sparseMx.dot works for both sparse and dense args - elif _sps.issparse(b): - # to return a sparse mx even when a is dense (asymmetric behavior): - # --> return _sps.csr_matrix(a).dot(b) # numpyMx.dot can't handle sparse argument - return _np.dot(a, b.toarray()) - else: - return _np.dot(a, b) - - def safe_norm(a, part=None): """ Get the frobenius norm of a matrix or vector, `a`, when it is either a dense array or a sparse matrix. diff --git a/scripts/api_names.yaml b/scripts/api_names.yaml index 81f4e0d68..c09dfd954 100644 --- a/scripts/api_names.yaml +++ b/scripts/api_names.yaml @@ -3723,7 +3723,6 @@ tools: random_hermitian: null real_matrix_log: null safe_onenorm: null - safedot: safe_dot safeimag: safe_imag safenorm: safe_norm safereal: safe_real