Skip to content

Commit

Permalink
Further updates
Browse files Browse the repository at this point in the history
  • Loading branch information
cdbf1 committed Aug 13, 2024
1 parent 1932534 commit e944a1d
Show file tree
Hide file tree
Showing 3 changed files with 78 additions and 42 deletions.
6 changes: 3 additions & 3 deletions supermarq-benchmarks/examples/qcvv/qcvv_css.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@
" records.append({**sample.data, **sample.probabilities})\n",
" return pd.DataFrame(records)\n",
"\n",
" def analyse_results(self, plot_results: bool = True) -> NaiveExperiment:\n",
" def analyze_results(self, plot_results: bool = True) -> NaiveExperiment:\n",
" \"\"\"To analyse the results to fit a simple exponential decay. This can be done easily\n",
" by fitting a linear model to the logarithm of the equation above.\n",
" \"\"\"\n",
Expand Down Expand Up @@ -212,7 +212,7 @@
],
"source": [
"if experiment.collect_data():\n",
" results = experiment.analyse_results(plot_results=True)\n",
" results = experiment.analyze_results(plot_results=True)\n",
" print(results)"
]
},
Expand Down Expand Up @@ -324,7 +324,7 @@
],
"source": [
"if experiment.collect_data():\n",
" results = experiment.analyse_results(plot_results=True)\n",
" results = experiment.analyze_results(plot_results=True)\n",
" print(results)"
]
},
Expand Down
58 changes: 37 additions & 21 deletions supermarq-benchmarks/supermarq/qcvv/base_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,17 @@ class Sample:
that is needed for analysis
"""

circuit: cirq.Circuit
"""The sample circuit."""
raw_circuit: cirq.Circuit
"""The raw (i.e. pre-compiled) sample circuit."""
data: dict[str, Any]
"""The corresponding data about the circuit"""
probabilities: dict[str, float] = field(init=False)
"""The probabilities of the computational basis states"""
job: css.Job | None = None
"""The superstaq job corresponding to the sample. Defaults to None if no job is
associated with the sample."""
raw_circuit: cirq.Circuit = field(init=False)
"""The raw (pre-compiled) circuit. Only used if the circuits are compiled for a specific
compiled_circuit: cirq.Circuit = field(init=False)
"""The compiled circuit. Only used if the circuits are compiled for a specific
target."""

@property
Expand All @@ -63,6 +63,17 @@ def target(self) -> str:
# Otherwise the experiment hasn't yet been run so there is no target.
return "No target"

@property
def circuit(self) -> cirq.Circuit:
"""Returns:
The circuit used for the experiment. Defaults to the compiled circuit if available
and if not returns the raw circuit.
"""
if hasattr(self, "compiled_circuit"):
return self.compiled_circuit

return self.raw_circuit


@dataclass(frozen=True)
class BenchmarkingResults(ABC):
Expand Down Expand Up @@ -111,7 +122,7 @@ class BenchmarkingExperiment(ABC, Generic[ResultsT]):
.. code::
if experiment.collect_data():
results = experiment.analyse_results(<<args>>)
results = experiment.analyze_results(<<args>>)
#. The final results of the experiment will be stored in the :code:`results` attribute as a
:class:`BenchmarkingResults` of values, while all the data from the experiment will be
Expand Down Expand Up @@ -146,7 +157,7 @@ class BenchmarkingExperiment(ABC, Generic[ResultsT]):
computational basis resulting from running each circuit and combine the relevant details
into a :class:`pandas.DataFrame`.
#. :meth:`analyse_results`: Analyse the data in the :attr:`raw_data` dataframe and return a
#. :meth:`analyze_results`: Analyse the data in the :attr:`raw_data` dataframe and return a
:class:`BenchmarkingResults` object containing the results of the experiment.
#. :meth:`plot_results`: Produce any relevant plots that are useful for understanding the
Expand Down Expand Up @@ -378,7 +389,7 @@ def _validate_circuits(self) -> None:
# Public Methods #
###################
@abstractmethod
def analyse_results(self, plot_results: bool = True) -> ResultsT:
def analyze_results(self, plot_results: bool = True) -> ResultsT:
"""Perform the experiment analysis and store the results in the `results` attribute.
Args:
Expand Down Expand Up @@ -450,8 +461,7 @@ def compile_circuits(self, target: str, **kwargs: Any) -> None:
).circuits

for k, sample in enumerate(self.samples):
sample.raw_circuit = sample.circuit.copy()
sample.circuit = compiled_circuits[k] # type: ignore[assignment]
sample.compiled_circuit = compiled_circuits[k] # type: ignore[assignment]

@abstractmethod
def plot_results(self) -> None:
Expand Down Expand Up @@ -488,7 +498,7 @@ def prepare_experiment(
def run_on_device(
self,
target: str,
shots: int = 10_000,
repetitions: int = 10_000,
method: str | None = None,
overwrite: bool = False,
**target_options: Any,
Expand All @@ -502,7 +512,7 @@ def run_on_device(
Args:
target: The name of a Superstaq target.
shots: The number of shots to sample. Defaults to 10,000.
repetitions: The number of shots to sample. Defaults to 10,000.
method: Optional method to use on the Superstaq device. Defaults to None corresponding
to normal running.
target_options: Optional configuration dictionary passed when submitting the job.
Expand All @@ -519,26 +529,29 @@ def run_on_device(
[sample.circuit for sample in self.samples],
target=target,
method=method,
repetitions=shots,
repetitions=repetitions,
**target_options,
)
compiled_circuits = experiment_job.compiled_circuits()

for k, sample in enumerate(self.samples):
sample.job = experiment_job[k]
sample.compiled_circuit = compiled_circuits[k]

return experiment_job

def run_with_simulator(
self,
simulator: cirq.Sampler | None = None,
shots: int = 10_000,
repetitions: int = 10_000,
overwrite: bool = False,
) -> None:
"""Use the local simulator to sample the circuits and store the resulting probabilities.
Args:
simulator: A local :class:`~cirq.Sampler` to use. If None then the default
:class:`cirq.Simulator` simulator is used. Defaults to None.
shots: The number of shots to sample. Defaults to 10,000.
repetitions: The number of shots to sample. Defaults to 10,000.
overwrite: Whether to force an experiment run even if there is existing data that would
be over written in the process. Defaults to False.
"""
Expand All @@ -549,10 +562,10 @@ def run_with_simulator(
simulator = cirq.Simulator()

for sample in tqdm(self.samples, desc="Simulating circuits"):
result = simulator.run(sample.circuit, repetitions=shots)
result = simulator.run(sample.circuit, repetitions=repetitions)
hist = result.histogram(key=cirq.measurement_key_name(sample.circuit))
sample.probabilities = {
f"{i:0{self.num_qubits}b}": count / shots for i, count in hist.items()
f"{i:0{self.num_qubits}b}": count / repetitions for i, count in hist.items()
}

def run_with_callable(
Expand All @@ -572,18 +585,21 @@ def run_with_callable(
kwargs: Additional arguments to pass to the custom function.
Raises:
RuntimeError: If the returned probabilities dictionary keys is missing bitstrings.
RuntimeError: If the returned probabilities dictionary keys is missing include
an incorrect number of bits.
RuntimeError: If the returned probabilities dictionary values do not sum to 1.0.
"""
if not overwrite:
self._run_check()
for sample in tqdm(self.samples, desc="Running circuits"):
probability = circuit_eval_func(sample.circuit, **kwargs)
if sorted(probability.keys()) != sorted(
f"{i:0{self.num_qubits}b}" for i in range(2**self.num_qubits)
):
raise RuntimeError("Returned probabilities are missing bitstrings.")
if not all(len(key) == self.num_qubits for key in probability.keys()):
raise RuntimeError("Returned probabilities include an incorrect number of bits.")
if not math.isclose(sum(probability.values()), 1.0):
raise RuntimeError("Returned probabilities do not sum to 1.0.")

for k in range(2**self.num_qubits):
if (bitstring := format(k, f"0{self.num_qubits}b")) not in probability:
probability[bitstring] = 0.0

sample.probabilities = probability
56 changes: 38 additions & 18 deletions supermarq-benchmarks/supermarq/qcvv/base_experiment_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,15 +55,17 @@ def sample_circuits() -> list[Sample]:
qubits = cirq.LineQubit.range(2)
return [
Sample(
circuit=cirq.Circuit(cirq.CZ(*qubits), cirq.CZ(*qubits), cirq.measure(*qubits)),
raw_circuit=cirq.Circuit(cirq.CZ(*qubits), cirq.CZ(*qubits), cirq.measure(*qubits)),
data={"circuit": 1},
),
Sample(circuit=cirq.Circuit(cirq.CX(*qubits), cirq.measure(*qubits)), data={"circuit": 2}),
Sample(
raw_circuit=cirq.Circuit(cirq.CX(*qubits), cirq.measure(*qubits)), data={"circuit": 2}
),
]


def test_sample_target_property() -> None:
sample = Sample(circuit=MagicMock(), data={})
sample = Sample(raw_circuit=MagicMock(), data={})
assert sample.target == "No target"

sample.probabilities = {"0": 0.25, "1": 0.75}
Expand Down Expand Up @@ -111,7 +113,7 @@ def test_empty_samples_error(abc_experiment: BenchmarkingExperiment[ExampleResul
def test_prepare_experiment_overwrite_error(
abc_experiment: BenchmarkingExperiment[ExampleResults],
) -> None:
abc_experiment._samples = [Sample(circuit=MagicMock(), data={})]
abc_experiment._samples = [Sample(raw_circuit=MagicMock(), data={})]
abc_experiment._build_circuits = MagicMock()

with pytest.raises(
Expand All @@ -125,7 +127,7 @@ def test_prepare_experiment_overwrite_error(
def test_prepare_experiment_overwrite(
abc_experiment: BenchmarkingExperiment[ExampleResults],
) -> None:
abc_experiment._samples = [Sample(circuit=MagicMock(), data={})]
abc_experiment._samples = [Sample(raw_circuit=MagicMock(), data={})]
abc_experiment._build_circuits = MagicMock()
abc_experiment._validate_circuits = MagicMock()

Expand Down Expand Up @@ -154,7 +156,7 @@ def test_run_with_simulator(
mock_result.histogram.return_value = {0: 0, 1: 100, 2: 0, 3: 0}
test_sim.run.return_value = mock_result

abc_experiment.run_with_simulator(simulator=test_sim, shots=100)
abc_experiment.run_with_simulator(simulator=test_sim, repetitions=100)

# Test simulator calls
test_sim.run.assert_has_calls(
Expand Down Expand Up @@ -197,7 +199,7 @@ def test_run_with_simulator_default_target(
mock_result.histogram.return_value = {0: 0, 1: 100, 2: 0, 3: 0}
target().run.return_value = mock_result

abc_experiment.run_with_simulator(shots=100)
abc_experiment.run_with_simulator(repetitions=100)

# Test simulator calls
target().run.assert_has_calls(
Expand All @@ -220,18 +222,26 @@ def test_run_on_device(
abc_experiment._service = (mock_service := MagicMock())

job = abc_experiment.run_on_device(
target="example_target", shots=100, overwrite=False, **{"some": "options"}
target="example_target", repetitions=100, overwrite=False, **{"some": "options"}
)

mock_service.create_job.assert_called_once_with(
[sample_circuits[0].circuit, sample_circuits[1].circuit],
[sample_circuits[0].raw_circuit, sample_circuits[1].raw_circuit],
target="example_target",
method=None,
repetitions=100,
some="options",
)

assert job == mock_service.create_job.return_value
assert (
sample_circuits[0].compiled_circuit
== mock_service.create_job.return_value.compiled_circuits.return_value[0]
)
assert (
sample_circuits[1].compiled_circuit
== mock_service.create_job.return_value.compiled_circuits.return_value[1]
)


def test_run_on_device_existing_probabilties(
Expand All @@ -258,15 +268,23 @@ def test_run_on_device_dry_run(
abc_experiment._samples = sample_circuits
abc_experiment._service = (mock_service := MagicMock())

job = abc_experiment.run_on_device(target="example_target", shots=100, method="dry-run")
job = abc_experiment.run_on_device(target="example_target", repetitions=100, method="dry-run")

mock_service.create_job.assert_called_once_with(
[sample_circuits[0].circuit, sample_circuits[1].circuit],
[sample_circuits[0].raw_circuit, sample_circuits[1].raw_circuit],
target="example_target",
method="dry-run",
repetitions=100,
)
assert job == mock_service.create_job.return_value
assert (
sample_circuits[0].compiled_circuit
== mock_service.create_job.return_value.compiled_circuits.return_value[0]
)
assert (
sample_circuits[1].compiled_circuit
== mock_service.create_job.return_value.compiled_circuits.return_value[1]
)


def test_interleave_circuit() -> None:
Expand Down Expand Up @@ -342,14 +360,14 @@ def test_validate_circuits(
abc_experiment._validate_circuits()

# Add a gate so not all measurements are terminal
abc_experiment._samples[0].circuit += cirq.X(abc_experiment.qubits[0])
abc_experiment._samples[0].raw_circuit += cirq.X(abc_experiment.qubits[0])
with pytest.raises(
ValueError, match="QCVV experiment circuits can only contain terminal measurements."
):
abc_experiment._validate_circuits()

# Remove measurements
abc_experiment._samples[0].circuit = abc_experiment._samples[0].circuit[:-2] + cirq.measure(
abc_experiment._samples[0].raw_circuit = abc_experiment._samples[0].circuit[:-2] + cirq.measure(
abc_experiment.qubits[0]
)
with pytest.raises(
Expand All @@ -359,7 +377,7 @@ def test_validate_circuits(
abc_experiment._validate_circuits()

# Remove all measurements
abc_experiment._samples[0].circuit = abc_experiment._samples[0].circuit[:-2]
abc_experiment._samples[0].raw_circuit = abc_experiment._samples[0].circuit[:-2]
with pytest.raises(
ValueError,
match="QCVV experiment circuits must contain measurements.",
Expand Down Expand Up @@ -553,7 +571,7 @@ def test_run_with_callable(
) -> None:
abc_experiment._samples = sample_circuits
test_callable = MagicMock()
test_callable.return_value = {"00": 0.0, "01": 0.2, "10": 0.7, "11": 0.1}
test_callable.return_value = {"01": 0.2, "10": 0.7, "11": 0.1}

abc_experiment.run_with_callable(test_callable, some="kwargs")

Expand All @@ -567,15 +585,17 @@ def test_run_with_callable(
assert sample_circuits[1].probabilities == {"00": 0.0, "01": 0.2, "10": 0.7, "11": 0.1}


def test_run_with_callable_missing_bitstring(
def test_run_with_callable_bad_bitstring(
abc_experiment: BenchmarkingExperiment[ExampleResults],
sample_circuits: list[Sample],
) -> None:
abc_experiment._samples = sample_circuits
test_callable = MagicMock()
test_callable.return_value = {"00": 0.0, "01": 0.2, "10": 0.8}
test_callable.return_value = {"000": 0.0, "01": 0.2, "10": 0.8}

with pytest.raises(RuntimeError, match="Returned probabilities are missing bitstrings."):
with pytest.raises(
RuntimeError, match="Returned probabilities include an incorrect number of bits."
):
abc_experiment.run_with_callable(test_callable, some="kwargs")


Expand Down

0 comments on commit e944a1d

Please sign in to comment.