OUTPUT 1:
Initial hyperparamaters:
%%time
# Get a (pre-processed) training and test set
train_set, test_set = classifier.get_train_test_set(train_size=300, test_size=100)
# Configure model training hyper parameters
config = {
"epochs": 4,
"lr": 0.1,
"batch_size": 25,
"betas": (0.9, 0.99),
"weight_decay": 1e-3,
"clip_grad": True,
"log_interval": 6,
}
# Train and plot the results
classifier.train_module(train_set, test_set, config)
classifier.plot_training_progress()
This was done with the QCNN ansatz class
Cell 16:
from ionqvision.ansatze.ansatz_library import QCNNAnsatz
import QAOAAnsatz
ansatz = QCNNAnsatz(num_qubits=4)
ansatz.draw("mpl")
TEST 2.
# Configure model training hyper parameters
config = {
"epochs": 30,
"lr": 0.001,
"batch_size": 32,
"betas": (0.9, 0.99),
"weight_decay": 1e-3,
"clip_grad": True,
"log_interval": 6,
}
OUTPUT 2
61% accuracy on the board
OUTPUT 3 - Darssan
Code Box for the QCNN modification
from qiskit.circuit import QuantumCircuit
from math import log
class QCNNAnsatz(VariationalAnsatz):
class ConvolutionBrickwork(BrickworkLayoutAnsatz):
"""
Implement the convolution filters for the :class:`.QCNNAnsatz`.
"""
def __init__(self, num_qubits, num_layers, prefix=None, qubits=None, initial_state=None):
super().__init__(num_qubits, num_layers, blk_sz=3, prefix=prefix, qubits=qubits, initial_state=initial_state)
def two_qubit_block(self, theta, q1, q2):
conv_op = QuantumCircuit(2, name="CONV")
conv_op.ry(theta[0], 0)
conv_op.ry(theta[1], 1)
conv_op.rxx(theta[2], 0, 1)
self.append(conv_op.to_instruction(), [q1, q2])
class PoolingLayer(BrickworkLayoutAnsatz):
"""
Implement the pooling layer for the :class:`.QCNNAnsatz`.
"""
def __init__(self, num_qubits, prefix=None, qubits=None):
super().__init__(num_qubits, 1, blk_sz=1, prefix=prefix, qubits=qubits)
def two_qubit_block(self, theta, q1, q2):
pool_op = QuantumCircuit(2, name="POOL")
pool_op.crz(theta[0], 1, 0)
self.append(pool_op.to_instruction(), [q1, q2])
class AdaptiveLayer(BrickworkLayoutAnsatz):
"""
An adaptive layer that can determine if more layers are needed based on performance.
"""
def __init__(self, num_qubits, max_layers, prefix=None, qubits=None):
super().__init__(num_qubits, max_layers, blk_sz=1, prefix=prefix, qubits=qubits)
def two_qubit_block(self, theta, q1, q2):
adaptive_op = QuantumCircuit(2, name="ADAPTIVE")
adaptive_op.ry(theta[0], 0)
adaptive_op.ry(theta[1], 1)
self.append(adaptive_op.to_instruction(), [q1, q2])
def __init__(self, num_qubits, filter_depth=1, initial_state=None):
num_layers = min(int(log(num_qubits, 2)), filter_depth) # Limit to filter_depth
if abs(log(num_qubits, 2) - num_layers) > 1e-6:
raise ValueError("num_qubits must be a power of 2")
super().__init__(num_qubits)
if initial_state is not None:
self.compose(initial_state, inplace=True)
# Add convolution and pooling layers
for k in range(num_layers):
qubits = list(range(0, num_qubits, 2**k))
# Convolution layer
conv = QCNNAnsatz.ConvolutionBrickwork(num_qubits, filter_depth, prefix="C" + str(k), qubits=qubits)
self.compose(conv, inplace=True)
# Pooling layer
pool = QCNNAnsatz.PoolingLayer(num_qubits, prefix="P" + str(k), qubits=qubits)
self.compose(pool, inplace=True)
# Optional: Add adaptive layer to dynamically adjust depth
adaptive = QCNNAnsatz.AdaptiveLayer(num_qubits, filter_depth, prefix="A", qubits=qubits)
self.compose(adaptive, inplace=True)
box 1:
from ionqvision.ansatze.ansatz_library import AngleEncoder
encoder = AngleEncoder(num_qubits=4)
encoder.draw("mpl")
box 2:
from ionqvision.ansatze.ansatz_library import QCNNAnsatz
ansatz = QCNNAnsatz(num_qubits=4)
ansatz.draw("mpl")
box 3:
from qiskit.quantum_info import SparsePauliOp
# Measure the expectation value of X_0, Y_0, Z_0
quantum_features = [
SparsePauliOp(["IIIX"]),
SparsePauliOp(["IIIY"]),
SparsePauliOp(["IIIZ"])
]
box 4:
from ionqvision.modules import BinaryMNISTClassifier
# Set up your classifier and inspect its architecture
classifier = BinaryMNISTClassifier(encoder, ansatz, quantum_features); classifier
box 5:
import logging
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
box 6:
# Check out your quantum layer
classifier.quantum_layer.layer_qc.draw("mpl")
# Verify the images loaded correctly
classifier.visualize_batch()
box 7:
%%time
# Get a (pre-processed) training and test set
train_set, test_set = classifier.get_train_test_set(train_size=300, test_size=100)
# Configure model training hyper parameters
config = {
"epochs": 4,
"lr": 0.1,
"batch_size": 50,
"betas": (0.9, 0.99),
"weight_decay": 1e-3,
"clip_grad": True,
"log_interval": 6,
}
# Train and plot the results
classifier.train_module(train_set, test_set, config)
classifier.plot_training_progress()
Tested parameters:
from ionqvision.ansatze.ansatz_library import AngleEncoder
encoder = AngleEncoder(num_qubits=4)
encoder.draw("mpl")
from ionqvision.ansatze.ansatz_library import QCNNAnsatz
ansatz = QCNNAnsatz(num_qubits=4)
ansatz.draw("mpl")
from qiskit.quantum_info import SparsePauliOp
# Measure the expectation value of X_0, Y_0, Z_0
quantum_features = [
SparsePauliOp(["IIIX"]),
SparsePauliOp(["IIIY"]),
SparsePauliOp(["IIIZ"])
]
from ionqvision.modules import BinaryMNISTClassifier
# Set up your classifier and inspect its architecture
# BinaryMNISTClassifier uses the encoder, quantum ansatz, and quantum features
# It combines the quantum and classical neural network layers for binary classification
classifier = BinaryMNISTClassifier(encoder, ansatz, quantum_features); classifier
import logging
# Suppress unnecessary logging to focus on the critical outputs during training
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
# Check out your quantum layer
classifier.quantum_layer.layer_qc.draw("mpl")
# Verify the images loaded correctly
classifier.visualize_batch()
%%time
# Get a (pre-processed) training and test set
# In this case, we are using 300 images for training and 100 for testing
train_set, test_set = classifier.get_train_test_set(train_size=300, test_size=100)
# Configure model training hyper parameters
config = {
"epochs": 10, # Number of passes through the entire training dataset, increased epochs can help the model learn complex patterns
"lr": 0.01, # Learning rate for the Adam optimizer
"batch_size": 55, # Number of images in each training batch, larger batch sizes may decrease accuracy but increase speed
"betas": (0.9, 0.99), # Parameters for controlling momentum and smoothing
"weight_decay": 1e-3, # Parameter to avoid overfitting
"clip_grad": True, # Gradient clipping to prevent exploding gradients
"log_interval": 6, # Interval for logging during training
}
# Train and plot the results
classifier.train_module(train_set, test_set, config)
classifier.plot_training_progress()