经典量子混合神经网络模拟

1. 基本参数设定

固定随机数。

from isqpy import IsqCircuit
from isqpy.backend import TorchBackend
from isqpy.neural_networks import TorchLayer
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim

import torchvision
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)

setup_seed(222)

设定超参数。

epochs = 1
train_samples = 200
batch_size_train = 10

test_samples = 100
batch_size_test = 10

device="cpu"

2. 数据加载

下载MNIST数据集。这里为了简便,仅仅取出数字为0,1的部分数据。

train_data = torchvision.datasets.MNIST(
    "./data/",
    train=True,
    download=True,
    transform=torchvision.transforms.Compose(
        [
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(
                (0.1307,),
                (0.3081,),
            ),
        ]
    )
)
test_data = torchvision.datasets.MNIST(
    "./data/",
    train=False,
    download=True,
    transform=torchvision.transforms.Compose(
        [
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(
                (0.1307,),
                (0.3081,),
            ),
        ]
    )
)

idx = np.append(
    np.where(train_data.targets == 0)[0][:train_samples],
    np.where(train_data.targets == 1)[0][:train_samples],
)

train_data.data = train_data.data[idx]
train_data.targets = train_data.targets[idx]

train_dataloader = DataLoader(
    train_data,
    batch_size=batch_size_train,
    shuffle=True,
)
# 数据 标签 X_train, y_train



idx = np.append(
    np.where(test_data.targets == 0)[0][:test_samples],
    np.where(test_data.targets == 1)[0][:test_samples],
)

test_data.data = test_data.data[idx]
test_data.targets = test_data.targets[idx]

test_dataloader = DataLoader(
    test_data,
    batch_size=batch_size_test,
    shuffle=True
)

# 数据 标签 X_test, y_test

3. 查看数据集

test_plot_data, test_plot_targets = next(iter(test_dataloader))

_, axes = plt.subplots(1, 8, figsize=(10, 3))
for i in range(8):
    axes[i].imshow(test_plot_data[i][0], cmap="gray", interpolation="none")
    axes[i].set_title(f"Number: {test_plot_targets[i]}")
    axes[i].set_xticks([])
    axes[i].set_yticks([])

png

4. 创建量子机器学习量子电路

"hybrid.isq"文件如下。

import std;


param inputs[], weights[];
qbit q[2];

procedure single_h(qbit q[]) {
    for i in 0:q.length {
        H(q[i]);
    }
}

procedure adjacent_cz(qbit q[]) {
    for i in 0:q.length-1 {
        CZ(q[i], q[i+1]);
    }
}

procedure encode_inputs(qbit q[], int start_idx) {
    for i in 0:q.length {
        Rz(inputs[i+start_idx], q[i]);
    }
}

procedure encode_weights(qbit q[], int start_idx) {
    for i in 0:q.length {
        Rx(weights[i+start_idx], q[i]);
    }
    for i in 0:q.length {
        Ry(weights[i+start_idx+q.length], q[i]);
    }
    for i in 0:q.length {
        Rz(weights[i+start_idx+q.length*2], q[i]);
    }
}


procedure main() {

    single_h(q);
    encode_inputs(q, 0);
    adjacent_cz(q);

    encode_weights(q, 0);
    adjacent_cz(q);

    encode_weights(q, 6);
    adjacent_cz(q);

    M(q[0]);
    M(q[1]);
}

使用python创建量子机器学习电路。

backend = TorchBackend()

qc = IsqCircuit(
    file="hybrid.isq",
    backend=backend,
    sample=False,
)

def circuit(inputs, weights):
    param = {
        "inputs": inputs,
        "weights": weights,
    }
    result = qc.measure(**param)
    return torch.cat((result[0].view(-1), result[2].view(-1)))

qnn = TorchLayer(
    circuit=circuit,
    num_weights=12,
    is_vmap=True,
)

电路可视化。

from isqpy.draw import Drawer

dr = Drawer()
dr.plot(qc.qcis)

png

5. 创建经典量子混合电路

qnn继承于torch.nn.Module,因此可以很方便地直接创建经典量子神经网络。这里我们使用卷积神经网络和全连接网络处理图片数据,在全连接神经网络中,我们加入qnn,形成量子经典混合神经网络。qnn中inputs的维度为2,测量返回值维度为2,因此可以放在两个线性层的中间。使用量子神经网络的时候一定要注意数据的维度,否则混合神经网络无法正常构建。

class HybridNet(nn.Module):
    def __init__(self, qnn):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(1, 10, kernel_size=5),
            nn.MaxPool2d(2),
            nn.ReLU(),
            nn.Conv2d(10, 20, kernel_size=5),
            nn.Dropout2d(),
            nn.MaxPool2d(2),
            nn.ReLU(),
            nn.Flatten(),
        )

        self.fc = nn.Sequential(
            nn.Linear(320, 50),
            nn.ReLU(),
            nn.Linear(50, 2),
            qnn,
            nn.Linear(2, 2),
        )

    def forward(self, x):
        x = self.conv(x)
        x = self.fc(x)
        return x

创建混合神经网络的对象,并且选取损失函数和优化器。

hybrid_net = HybridNet(qnn)
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(hybrid_net.parameters(), lr=0.001)

定义训练函数和测试函数。

def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)
    model.train()
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)
        # Compute prediction error
        pred = model(X)
        loss = loss_fn(pred, y)
        # Backpropagation
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        if batch % 10 == 0:
            loss, current = loss.item(), (batch + 1) * len(X)
            print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")
def test(dataloader, model, loss_fn):
    size = len(dataloader.dataset)

    num_batches = len(dataloader)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= num_batches
    correct /= size
    print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")

6. 训练神经网络

训练5个epoch。

epochs = 5
for t in range(epochs):
    print(f"Epoch {t+1}\n-------------------------------")
    train(train_dataloader, hybrid_net, loss_fn, optimizer)
    test(test_dataloader, hybrid_net, loss_fn)
print("Done!")
Epoch 1
-------------------------------
loss: 0.826408  [   10/  400]
loss: 0.665908  [  110/  400]
loss: 0.522112  [  210/  400]
loss: 0.484606  [  310/  400]
Test Error: 
 Accuracy: 99.0%, Avg loss: 0.507890

Epoch 2
-------------------------------
loss: 0.485518  [   10/  400]
loss: 0.514424  [  110/  400]
loss: 0.536301  [  210/  400]
loss: 0.480640  [  310/  400]
Test Error: 
 Accuracy: 100.0%, Avg loss: 0.451690

Epoch 3
-------------------------------
loss: 0.485919  [   10/  400]
loss: 0.488746  [  110/  400]
loss: 0.500073  [  210/  400]
loss: 0.448662  [  310/  400]
Test Error: 
 Accuracy: 100.0%, Avg loss: 0.421000

Epoch 4
-------------------------------
loss: 0.452065  [   10/  400]
loss: 0.406150  [  110/  400]
loss: 0.385825  [  210/  400]
loss: 0.422143  [  310/  400]
Test Error: 
 Accuracy: 100.0%, Avg loss: 0.394240

Epoch 5
-------------------------------
loss: 0.405451  [   10/  400]
loss: 0.409295  [  110/  400]
loss: 0.375203  [  210/  400]
loss: 0.426138  [  310/  400]
Test Error: 
 Accuracy: 100.0%, Avg loss: 0.369067

Done!

7. 模型验证

判断模型的预测准确性。

count = 0
_, axes = plt.subplots(1, 8, figsize=(10, 3))
hybrid_net.eval()
with torch.no_grad():
    for batch_idx, (data, target) in enumerate(test_dataloader):
        if count == 8:
            break
        output = hybrid_net(data[0:1])
        pred = output.argmax(dim=1, keepdim=True)
        axes[count].imshow(data[0].numpy().squeeze(), cmap="gray", interpolation="none")
        axes[count].set_xticks([])
        axes[count].set_yticks([])
        axes[count].set_title(f"Predicted {pred.item()}")
        count += 1

png