diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc index 6a5c9db49..df83b6222 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc @@ -179,7 +179,7 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { // need to switch to larger statespace. @@ -191,10 +191,10 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < other_fused_circuits[i].size(); j++) { + for (size_t j = 0; j < other_fused_circuits[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = std::complex(1, 0); @@ -202,7 +202,7 @@ class TfqInnerProductOp : public tensorflow::OpKernel { } ss.SetStateZero(scratch); - for (int k = 0; k < other_fused_circuits[i][j].size(); k++) { + for (size_t k = 0; k < other_fused_circuits[i][j].size(); k++) { qsim::ApplyFusedGate(sim, other_fused_circuits[i][j][k], scratch); } @@ -260,13 +260,13 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (size_t j = 0; j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } ss.SetStateZero(scratch); - for (int k = 0; + for (size_t k = 0; k < other_fused_circuits[cur_batch_index][cur_internal_index].size(); k++) { diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc index 198f92c63..1563ef37a 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc @@ -61,9 +61,9 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel { "other_programs must be rank 2. Got ", context->input(3).dims()))); // Create the output Tensor. - const int output_dim_batch_size = context->input(0).dim_size(0); - const int output_dim_internal_size = context->input(3).dim_size(1); - const int output_dim_symbol_size = context->input(1).dim_size(0); + const size_t output_dim_batch_size = context->input(0).dim_size(0); + const size_t output_dim_internal_size = context->input(3).dim_size(1); + const size_t output_dim_symbol_size = context->input(1).dim_size(0); OP_REQUIRES(context, output_dim_symbol_size > 0, tensorflow::errors::InvalidArgument(absl::StrCat( "The number of symbols must be a positive integer, got ", @@ -403,13 +403,13 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (size_t k = 0; k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; + for (size_t k = 0; k < gradient_gates[cur_batch_index][l - 1].grad_gates.size(); k++) { // Copy sv_adj onto scratch2 in anticipation of non-unitary diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc index dabe6ceac..5a13fa1af 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc @@ -181,8 +181,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_n_shots = 1; - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t i = 0; i < num_samples.size(); i++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { max_n_shots = std::max(max_n_shots, num_samples[i][j]); } } @@ -194,12 +194,12 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; // (#679) Just ignore empty program if (ncircuits[i].channels.size() == 0) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -226,7 +226,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { sv, unused_stats); // Use this trajectory as a source for all expectation calculations. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { if (run_samples[j] >= num_samples[i][j]) { continue; } @@ -238,14 +238,14 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { run_samples[j]++; } bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { if (run_samples[j] < num_samples[i][j]) { break_loop = false; break; } } if (break_loop) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) = static_cast(rolling_sums[j]); } @@ -286,8 +286,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_n_shots = 1; - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t i = 0; i < num_samples.size(); i++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { max_n_shots = std::max(max_n_shots, num_samples[i][j]); } } @@ -310,13 +310,13 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { random_gen.ReserveSamples128(ncircuits.size() * max_n_shots + 1); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; int rep_offset = rep_offsets[start][i]; // (#679) Just ignore empty program if (ncircuits[i].channels.size() == 0) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -343,7 +343,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { sim, sv, unused_stats); // Compute expectations across all ops using this trajectory. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] >= p_reps + rep_offset) { continue; @@ -360,7 +360,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { // Check if we have run enough trajectories for all ops. bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] < p_reps + rep_offset) { break_loop = false; @@ -370,7 +370,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { if (break_loop) { // Lock writing to this batch index in output_tensor. batch_locks[i].lock(); - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) += static_cast(rolling_sums[j]); } diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc index 66a0e168c..ed59331b1 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc @@ -183,8 +183,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_psum_length = 1; int max_n_shots = 1; - for (int i = 0; i < pauli_sums.size(); i++) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t i = 0; i < pauli_sums.size(); i++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { max_psum_length = std::max(max_psum_length, pauli_sums[i][j].terms().size()); max_n_shots = std::max(max_n_shots, num_samples[i][j]); @@ -198,12 +198,12 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; // (#679) Just ignore empty program if (ncircuits[i].channels.empty()) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -230,7 +230,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { sv, unused_stats); // Use this trajectory as a source for all expectation calculations. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { if (run_samples[j] >= num_samples[i][j]) { continue; } @@ -242,14 +242,14 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { run_samples[j]++; } bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { if (run_samples[j] < num_samples[i][j]) { break_loop = false; break; } } if (break_loop) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) = static_cast(rolling_sums[j]); } @@ -291,8 +291,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_psum_length = 1; int max_n_shots = 1; - for (int i = 0; i < pauli_sums.size(); i++) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t i = 0; i < pauli_sums.size(); i++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { max_psum_length = std::max(max_psum_length, pauli_sums[i][j].terms().size()); max_n_shots = std::max(max_n_shots, num_samples[i][j]); @@ -316,13 +316,13 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { auto local_gen = random_gen.ReserveSamples128(num_rand); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; int rep_offset = rep_offsets[start][i]; // (#679) Just ignore empty program if (ncircuits[i].channels.empty()) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -349,7 +349,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { sim, sv, unused_stats); // Compute expectations across all ops using this trajectory. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] >= p_reps + rep_offset) { continue; @@ -366,7 +366,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { // Check if we have run enough trajectories for all ops. bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] < p_reps + rep_offset) { break_loop = false; @@ -376,7 +376,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { if (break_loop) { // Lock writing to this batch index in output_tensor. batch_locks[i].lock(); - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) += static_cast(rolling_sums[j]); } diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc index 78c633f13..a09f826b9 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc @@ -160,7 +160,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -182,7 +182,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { QTSimulator::RunOnce(param, ncircuits[i], rand_source.Rand64(), ss, sim, sv, gathered_samples); - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { @@ -253,7 +253,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { auto local_gen = random_gen.ReserveSamples32(needed_random); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; int j = start > 0 ? offset_prefix_sum[start - 1][i] : 0; int needed_samples = offset_prefix_sum[start][i] - j; @@ -279,7 +279,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { QTSimulator::RunOnce(param, ncircuits[i], rand_source.Rand64(), ss, sim, sv, gathered_samples); - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { diff --git a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc index c96a9cb0d..088c6dcde 100644 --- a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc +++ b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc @@ -212,7 +212,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { } ss.SetStateZero(sv); - for (int j = 0; j < full_fuse[i].size(); j++) { + for (size_t j = 0; j < full_fuse[i].size(); j++) { qsim::ApplyFusedGate(sim, full_fuse[i][j], sv); } @@ -241,13 +241,14 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (size_t k = 0; k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (size_t k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); + k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); @@ -307,7 +308,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { auto scratch = ss.Create(largest_nq); auto scratch2 = ss.Create(largest_nq); - for (int i = 0; i < partial_fused_circuits.size(); i++) { + for (size_t i = 0; i < partial_fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -324,7 +325,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { } ss.SetStateZero(sv); - for (int j = 0; j < full_fuse[i].size(); j++) { + for (size_t j = 0; j < full_fuse[i].size(); j++) { qsim::ApplyFusedGate(sim, full_fuse[i][j], sv); } @@ -352,13 +353,14 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (size_t k = 0; k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (size_t k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); + k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); diff --git a/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc b/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc index ace5327e1..4f1f662ca 100644 --- a/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc +++ b/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc @@ -116,7 +116,7 @@ class TfqCalculateUnitaryOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the unitary as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; UCalculator sim = UCalculator(tfq_for); UnitarySpace us = UnitarySpace(tfq_for); @@ -126,7 +126,7 @@ class TfqCalculateUnitaryOp : public tensorflow::OpKernel { u = us.CreateUnitary(nq); } us.SetIdentity(u); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], u); } diff --git a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc b/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc index 559fbecc9..d5b4ef9a7 100644 --- a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc +++ b/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc @@ -163,12 +163,13 @@ class TfqPsSymbolReplaceOp : public tensorflow::OpKernel { for (int i = start; i < end; i++) { int sidx = i % n_symbols; int pidx = i / n_symbols; - for (int j = 0; j < output_programs.at(pidx).at(sidx).size(); j++) { + + for (size_t j = 0; j < output_programs.at(pidx).at(sidx).size(); j++) { output_tensor(pidx, sidx, j) = output_programs.at(pidx).at(sidx).at(j); } - for (int j = output_programs.at(pidx).at(sidx).size(); j < biggest_pad; - j++) { + for (size_t j = output_programs.at(pidx).at(sidx).size(); + j < biggest_pad; j++) { output_tensor(pidx, sidx, j) = empty_program; } } diff --git a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc b/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc index 4a027223e..7ffebfd22 100644 --- a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc +++ b/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc @@ -146,7 +146,7 @@ class TfqPsWeightsFromSymbolOp : public tensorflow::OpKernel { auto DoWork2 = [&](int start, int end) { for (int i = start; i < end; i++) { for (int j = 0; j < n_symbols; j++) { - for (int k = 0; k < output_results.at(i).at(j).size(); k++) { + for (size_t k = 0; k < output_results.at(i).at(j).size(); k++) { output_tensor(i, j, k) = output_results.at(i).at(j).at(k); } for (int k = output_results.at(i).at(j).size(); diff --git a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc index 7583437ca..6f0561217 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc @@ -148,7 +148,7 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -161,10 +161,10 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = -2.0; @@ -226,7 +226,7 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (size_t j = 0; j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } diff --git a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc index b9f9ee982..552644a43 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc @@ -180,7 +180,7 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -193,10 +193,10 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = -2.0; @@ -278,7 +278,7 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (size_t j = 0; j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } diff --git a/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc index 0e68020e9..447f66c70 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc @@ -154,7 +154,7 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -163,13 +163,13 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } auto samples = ss.Sample(sv, num_samples, rand_source.Rand32()); for (int j = 0; j < num_samples; j++) { - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { @@ -219,13 +219,13 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } auto samples = ss.Sample(sv, num_samples, rand_source.Rand32()); for (int j = 0; j < num_samples; j++) { - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { diff --git a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc index e17c36d56..cbc3756cf 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc @@ -136,7 +136,7 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -145,7 +145,7 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } @@ -194,7 +194,7 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } diff --git a/tensorflow_quantum/core/src/adj_util.cc b/tensorflow_quantum/core/src/adj_util.cc index ceb76b2c1..e15ff8a8c 100644 --- a/tensorflow_quantum/core/src/adj_util.cc +++ b/tensorflow_quantum/core/src/adj_util.cc @@ -38,7 +38,7 @@ void CreateGradientCircuit( const QsimCircuit& circuit, const std::vector& metadata, std::vector>>* partial_fuses, std::vector* grad_gates) { - for (int i = 0; i < metadata.size(); i++) { + for (size_t i = 0; i < metadata.size(); i++) { if (metadata[i].symbol_values.empty()) { continue; } @@ -78,7 +78,7 @@ void CreateGradientCircuit( // PhasedX else if (circuit.gates[i].kind == qsim::Cirq::GateKind::kPhasedXPowGate) { // Process potentially several symbols. - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (size_t j = 0; j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kPhaseExponent) { PopulateGradientPhasedXPhasedExponent( @@ -103,7 +103,7 @@ void CreateGradientCircuit( // Process potentially several symbols. bool swapq = circuit.gates[i].swapped; - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (size_t j = 0; j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kTheta) { PopulateGradientFsimTheta( metadata[i].symbol_values[j], i, @@ -128,7 +128,7 @@ void CreateGradientCircuit( qsim::Cirq::GateKind::kPhasedISwapPowGate) { // Process potentially several symbols. bool swapq = circuit.gates[i].swapped; - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (size_t j = 0; j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kPhaseExponent) { PopulateGradientPhasedISwapPhasedExponent( @@ -159,7 +159,7 @@ void CreateGradientCircuit( partial_fuses->assign(grad_gates->size() + 1, std::vector>({})); - for (int i = 0; i < grad_gates->size(); i++) { + for (size_t i = 0; i < grad_gates->size(); i++) { right = circuit.gates.begin() + (*grad_gates)[i].index; (*partial_fuses)[i] = fuser.FuseGates(qsim::BasicGateFuser::Parameter(), diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc index a1742a205..f5add3c8a 100644 --- a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc +++ b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc @@ -96,7 +96,7 @@ inline void AssertChannelEqual(const QsimChannel& a, const QsimChannel& b) { auto a_k_ops = a[i].ops; auto b_k_ops = b[i].ops; EXPECT_EQ(a_k_ops.size(), b_k_ops.size()); - for (int j = 0; j < a_k_ops.size(); j++) { + for (size_t j = 0; j < a_k_ops.size(); j++) { AssertOneQubitEqual(a_k_ops[j], b_k_ops[j]); } } diff --git a/tensorflow_quantum/core/src/program_resolution.cc b/tensorflow_quantum/core/src/program_resolution.cc index 0fbda9368..0e9ef28c2 100644 --- a/tensorflow_quantum/core/src/program_resolution.cc +++ b/tensorflow_quantum/core/src/program_resolution.cc @@ -361,7 +361,7 @@ Status CheckMPSSupported(const Program& program) { control_ids = absl::StrSplit(control_qubits, ','); } } - const int total_num_qubits = qubits.size() + control_ids.size(); + const size_t total_num_qubits = qubits.size() + control_ids.size(); if (total_num_qubits > 2) { return Status( static_cast( @@ -372,7 +372,7 @@ Status CheckMPSSupported(const Program& program) { } if (total_num_qubits == 2) { - int j = 0; + size_t j = 0; std::vector qids(2, -1234); for (; j < qubits.size(); j++) { (void)absl::SimpleAtoi(qubits[j].id(), &qids[j]); diff --git a/tensorflow_quantum/core/src/util_balance_trajectory.cc b/tensorflow_quantum/core/src/util_balance_trajectory.cc index 6230e747a..8351e49b1 100644 --- a/tensorflow_quantum/core/src/util_balance_trajectory.cc +++ b/tensorflow_quantum/core/src/util_balance_trajectory.cc @@ -29,13 +29,13 @@ void BalanceTrajectory(const std::vector>& num_samples, std::vector rep_limits(num_samples.size(), -1); std::vector height(num_threads, 0); - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t i = 0; i < num_samples.size(); i++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { rep_limits[i] = std::max(rep_limits[i], num_samples[i][j]); } } int prev_max_height = -1; - for (int j = 0; j < num_samples.size(); j++) { + for (size_t j = 0; j < num_samples.size(); j++) { int run_ceiling = ((rep_limits[j] + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - rep_limits[j]; int num_hi = num_threads - num_lo; @@ -74,7 +74,7 @@ void BalanceTrajectory(const int& num_samples, const int& num_threads, std::vector height(num_threads, 0); int prev_max_height = -1; - for (int j = 0; j < (*thread_offsets)[0].size(); j++) { + for (size_t j = 0; j < (*thread_offsets)[0].size(); j++) { int run_ceiling = ((num_samples + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - num_samples; int num_hi = num_threads - num_lo; diff --git a/tensorflow_quantum/core/src/util_balance_trajectory_test.cc b/tensorflow_quantum/core/src/util_balance_trajectory_test.cc index 1656a9acf..f361f5754 100644 --- a/tensorflow_quantum/core/src/util_balance_trajectory_test.cc +++ b/tensorflow_quantum/core/src/util_balance_trajectory_test.cc @@ -24,13 +24,13 @@ static void AssertWellBalanced(const std::vector>& n_reps, const int& num_threads, const std::vector>& offsets) { auto max_work = std::vector(n_reps.size(), -1); - for (int i = 0; i < n_reps.size(); i++) { - for (int j = 0; j < n_reps[0].size(); j++) { + for (size_t i = 0; i < n_reps.size(); i++) { + for (size_t j = 0; j < n_reps[0].size(); j++) { max_work[i] = std::max(max_work[i], n_reps[i][j]); } } - for (int i = 0; i < n_reps.size(); i++) { + for (size_t i = 0; i < n_reps.size(); i++) { int sum = 0; int prev_local_work = 0; for (int k = 0; k < num_threads; k++) {