Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions papers/QORC/configs/defaults.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@

"xp_type" : "qorc",

"n_photons" : 2,
"n_modes" : 10,
"seed" : 42,
"n_photons" : 3,
"n_modes" : 20,
Comment on lines +6 to +7
Copy link

Copilot AI Jan 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The default values for n_photons and n_modes have been changed (n_photons from 2 to 3, n_modes from 10 to 20). While this might be intentional for the v0.2 update, please verify these are the intended default values, as they will affect the behavior of experiments that don't explicitly specify these parameters.

Copilot uses AI. Check for mistakes.
"run_seed" : 42,
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let us keep a ticket to remember to fix that more generally

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sure

"fold_index": 0,

"n_epochs": 100,
Expand Down
2 changes: 1 addition & 1 deletion papers/QORC/configs/xp_qorc.json
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the configs are currently running multiple versions of the same model in order to make the comparison. While this is economical, the rule is to have 1 config PER run. The figures from the paper are then to be reproduced using .sh files that recombine these

Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

"n_photons" : 2,
"n_modes" : [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 130, 160, 200],
"seed" : [42, 47, 53, 59, 67],
"run_seed" : [42, 47, 53, 59, 67],
"fold_index": 0,

"n_epochs": 100,
Expand Down
2 changes: 1 addition & 1 deletion papers/QORC/configs/xp_rff.json
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same here. one config per run

Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"n_rff_features" : [10, 20, 30, 120, 220, 364, 560, 1140, 2024, 4060, 4960, 9880],
"sigma" : 10,
"regularization_c" : 1.0,
"seed" : [42, 47, 53, 59, 67],
"run_seed" : [42, 47, 53, 59, 67],

"b_optim_via_sgd" : true,
"max_iter_sgd" : 1000,
Expand Down
92 changes: 85 additions & 7 deletions papers/QORC/lib/lib_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,97 @@

import sys
from pathlib import Path
import random

_REPO_ROOT = Path(__file__).resolve().parents[2]
if str(_REPO_ROOT) not in sys.path:
sys.path.insert(0, str(_REPO_ROOT))
import numpy as np
import torch
from torch.utils.data import Dataset

from papers.shared.QORC.datasets import ( # type: ignore
get_dataloader,
get_mnist_variant,
seed_worker,
split_fold_numpy,
tensor_dataset,
)

_REPO_ROOT = Path(__file__).resolve().parents[2]
if str(_REPO_ROOT) not in sys.path:
sys.path.insert(0, str(_REPO_ROOT))


class tensor_dataset(Dataset):
def __init__(self, np_x, np_y, device, dtype, transform=None, n_side_pixels=None):
if isinstance(np_x, torch.Tensor):
self.np_x = np_x.detach().clone().to(device=device, dtype=dtype)
else:
self.np_x = torch.tensor(np_x, device=device, dtype=dtype)

if isinstance(np_y, torch.Tensor):
self.np_y = np_y.detach().clone().to(device=device, dtype=torch.long)
else:
self.np_y = torch.tensor(np_y, device=device, dtype=torch.long)

self.n_items = self.np_x.shape[0]

assert self.n_items == self.np_y.shape[0], (
f"tensor_dataset: x and y do not have the same number of rows. "
f"self.np_x.shape: {self.np_x.shape}, self.np_y.shape: {self.np_y.shape}"
)

self.transform = transform
self.n_side_pixels = n_side_pixels

def __getitem__(self, index):
image = self.np_x[index]
label = self.np_y[index]
if self.transform:
if self.n_side_pixels:
n_pixels = self.n_side_pixels * self.n_side_pixels
image = self.transform(
image.view(self.n_side_pixels, self.n_side_pixels)
).view(n_pixels)
else:
image = self.transform(image)
return image, label

def __len__(self):
return self.n_items


def seed_worker(worker_id, seed=42):
worker_seed = seed + worker_id
np.random.seed(worker_seed)
random.seed(worker_seed)


def get_dataloader(dataset, batch_size, shuffle, num_workers, pin_memory, seed=42):
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
worker_init_fn=lambda id: seed_worker(id, seed),
generator=torch.Generator().manual_seed(seed),
)


def split_fold_numpy(label, data, n_fold, fold_index, split_seed=-1):
if split_seed >= 0:
np.random.seed(split_seed)
shuffled_indices = np.random.permutation(len(label))
label = label[shuffled_indices]
data = data[shuffled_indices]
fold_size = len(label) // n_fold
val_start = fold_index * fold_size
val_end = (fold_index + 1) * fold_size if fold_index < n_fold - 1 else len(label)
val_indices = np.arange(val_start, val_end)
train_indices = np.array([i for i in range(len(label)) if i not in val_indices])
return (
label[val_indices],
data[val_indices],
label[train_indices],
data[train_indices],
)


__all__ = [
"tensor_dataset",
"seed_worker",
Expand Down
55 changes: 32 additions & 23 deletions papers/QORC/lib/lib_qorc_encoding_and_linear_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,18 +48,18 @@ def get_circuit_physical_depth(circuit: pcvl.Circuit):
return d_current, depths
else:
raise ValueError(
"Erreur dans get_circuit_physical_depth: Le circuit n'est pas composite."
"Error in get_circuit_physical_depth: The circuit is not 'composite' type."
)
case _:
raise ValueError(
f"Erreur dans get_circuit_physical_depth: Type de circuit non géré: {t}"
f"Error in get_circuit_physical_depth: Wrong type of circuit: {t}"
)
raise ValueError("Erreur dans get_circuit_physical_depth (interne).")
raise ValueError("Error in get_circuit_physical_depth (internal).")


def get_PS_name_for_mode_and_depth(circuit: pcvl.Circuit, mode: int, depth: int):
if not circuit.is_composite():
raise ValueError("Erreur: Circuit pas composite")
raise ValueError("Error: The circuit is not 'composite' type.")

depths = [0] * circuit.m
for modes, comp in circuit._components: # type: ignore[attr-defined]
Expand All @@ -72,7 +72,7 @@ def get_PS_name_for_mode_and_depth(circuit: pcvl.Circuit, mode: int, depth: int)
if isinstance(comp, pcvl.components.PS):
add_depth = 0
if add_depth is None:
raise ValueError("Erreur: Composant non reconnu")
raise ValueError("Error: Component not recognized.")

for m in modes:
depths[m] = d_current + add_depth
Expand All @@ -83,7 +83,7 @@ def get_PS_name_for_mode_and_depth(circuit: pcvl.Circuit, mode: int, depth: int)
ps_name = comp.get_variables()["phi"]
return ps_name, depths[mode]

# Pas de Phaseshifter trouvé avec une profondeur en BS suffisante (la depth demandée est trop élevée pour le circuit)
# No Phaseshifter found with requested BS depth (requested depth too high)
return None, None


Expand All @@ -98,20 +98,19 @@ def create_quantum_layer_for_ascella(n_photons, logger):
specs = remote_processor.specs
spec_circuit = specs["specific_circuit"]
d_current, depths = get_circuit_physical_depth(spec_circuit)
print("circuit depths:", d_current, depths)
# print("circuit depths:", d_current, depths)

# Ascella: On cherche les PS du milieu, pour les 11 derniers modes, car le premier mode n'a pas de PhaseShifter
# Ascella: We look for PS in the middle-depth of the circuit, for the last 11 modes, as first mode does not contain any PhaseShifter
input_param_names = []
for mode_cour in range(1, 12):
depth_target = depths[mode_cour] // 2
ps_name, depth_cour = get_PS_name_for_mode_and_depth(
spec_circuit, mode_cour, depth_target
)
print(mode_cour, depth_target, depth_cour, ps_name)
# print(mode_cour, depth_target, depth_cour, ps_name)
input_param_names.append(ps_name)
print("Liste des paramètres d'input:", input_param_names)

# On construit un circuit identique, avec des phases fixes pour les non-input
# Build an identical circuit, with fixed phases for non-inputs
qorc_circuit = pcvl.Circuit(n_modes)
np.random.seed(run_seed)
for modes, comp in spec_circuit._components: # type: ignore[attr-defined]
Expand All @@ -138,15 +137,17 @@ def create_quantum_layer_for_ascella(n_photons, logger):
qorc_input_state[index] = 1

device_name = "cpu"
input_size = (
n_modes - 1
) # Nb input features = 11 for ascella (first mode does not have PS)
measurement_strategy = ML.MeasurementStrategy.PROBABILITIES
qorc_quantum_layer = ML.QuantumLayer(
input_size=n_modes
- 1, # Nb input features = 11 pour ascella (le premier mode n'a pas de PS)
output_size=qorc_output_size, # Nb output classes = nb modes
input_size=input_size,
circuit=qorc_circuit, # QORC quantum circuit
trainable_parameters=[], # Circuit is not trainable
input_parameters=input_param_names, # Input encoding parameters
input_state=qorc_input_state, # Initial photon state
output_mapping_strategy=ML.OutputMappingStrategy.NONE, # Output: Get all Fock states probas
measurement_strategy=measurement_strategy, # MerLin v2
# See: https://merlinquantum.ai/user_guide/output_mappings.html
no_bunching=False,
device=torch.device(device_name),
Expand Down Expand Up @@ -206,15 +207,14 @@ def create_qorc_quantum_layer(
qorc_output_size = math.comb(n_photons + n_modes - 1, n_photons)

logger.info("MerLin QuantumLayer creation:")
measurement_strategy = ML.MeasurementStrategy.PROBABILITIES
qorc_quantum_layer = ML.QuantumLayer(
input_size=n_modes, # Nb input features = nb modes
output_size=qorc_output_size, # Nb output classes = nb modes
circuit=qorc_circuit, # QORC quantum circuit
trainable_parameters=[], # Circuit is not trainable
trainable_parameters=[],
input_parameters=params_prefix, # Input encoding parameters
input_state=qorc_input_state, # Initial photon state
output_mapping_strategy=ML.OutputMappingStrategy.NONE, # Output: Get all Fock states probas
# See: https://merlinquantum.ai/user_guide/output_mappings.html
measurement_strategy=measurement_strategy,
no_bunching=b_no_bunching,
device=torch.device(device_name),
)
Expand Down Expand Up @@ -265,14 +265,20 @@ def qorc_encoding_and_linear_training(
if "ascella" in qpu_device_name:
n_modes = 12
n_components = 11 # Ascella first mode does not contain any phaseShifter -> 11 inputs instead of 12
assert n_photons <= 6, (
"Error: The number of photons should not exceed 6 for ascella qpu."
)
logger.info(
"Warning: ascella architecture detectd in qpu_device_name. Forcing n_modes=12 and n_components=11."
)
if "belenos" in qpu_device_name:
n_modes = 24
n_components = 24
assert n_photons <= 12, (
"Error: The number of photons should not exceed 12 for belenos qpu."
)
logger.info(
"Warning: ascella architecture detectd in qpu_device_name. Forcing n_modes=24 and n_components=24."
"Warning: belenos architecture detectd in qpu_device_name. Forcing n_modes=24 and n_components=24."
)

run_seed = seed
Expand Down Expand Up @@ -375,13 +381,16 @@ def normalize_global_min_max(data, global_min, global_max):
logger.info("Computation of the quantum features...")
time_t2 = time.time()
train_tensor = torch.tensor(
train_data_pca_norm, dtype=torch.float32, device=compute_device
# MerLin v0.2 requires Pi factor (as opposed to MerLin v0.1, which performs the product implicitly)
np.pi * train_data_pca_norm,
dtype=torch.float32,
device=compute_device,
)
val_tensor = torch.tensor(
val_data_pca_norm, dtype=torch.float32, device=compute_device
np.pi * val_data_pca_norm, dtype=torch.float32, device=compute_device
)
test_tensor = torch.tensor(
test_data_pca_norm, dtype=torch.float32, device=compute_device
np.pi * test_data_pca_norm, dtype=torch.float32, device=compute_device
)

if qpu_device_name == "none" or qpu_device_name == "":
Expand Down
76 changes: 17 additions & 59 deletions papers/QORC/lib/lib_remote_qorc.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ def forward_remote_qorc_quantum_layer(
# max_batch_size = 64
# max_batch_size = 128
# max_batch_size = 1024
# max_batch_size = 10240 # 10k images à la fois => 7/8 batchs par run => En pratique plus long
max_batch_size = 102400 # 100k images à la fois => un seul batch
# max_batch_size = 10240 # 10k images in a row => Takes more time
max_batch_size = 102400 # 100k images in a row => only one batch

logger.info("Call to remote_qorc_quantum_layer ")
logger.info(
Expand All @@ -64,7 +64,7 @@ def forward_remote_qorc_quantum_layer(
)
qpu_device_name = qpu_device_name.replace(LOCAL_STR, "")
logger.info(
"'{}' détecté: Traitement local du remote processor".format(LOCAL_STR)
"'{}' detected: local treatment of remote processor".format(LOCAL_STR)
)

valid_qpu_device_name_list = [
Expand Down Expand Up @@ -92,7 +92,7 @@ def forward_remote_qorc_quantum_layer(
proc = MerlinProcessor(
remote_processor,
chunk_concurrency=chunk_concurrency,
max_batch_size=max_batch_size,
microbatch_size=max_batch_size,
)

train_size = train_tensor.shape[0]
Expand All @@ -101,61 +101,19 @@ def forward_remote_qorc_quantum_layer(
data_tensor = torch.cat([train_tensor, val_tensor, test_tensor], dim=0)
logger.info("data_tensor.shape:{}".format(str(data_tensor.shape)))

match qpu_device_name:
case "sim:slos":
logger.info("qpu_device_name=sim:slos - Calcule le train/val/test")
time_cour = time.time()

fut = proc.forward_async(
qorc_quantum_layer, data_tensor, nsample=qpu_device_nsample
)
_spin_until_with_ctrlc(
lambda: len(fut.job_ids) > 0 or fut.done(), timeout_s=qpu_device_timeout
)
processed_data_tensor = fut.wait()

duration = time.time() - time_cour
logger.info("Durée (s): {}".format(duration))

case "sim:ascella" | "qpu:ascella":
# Parralléliser les 3 jobs
logger.info(
"qpu_device_name={} - Calcule le train/val/test".format(
qpu_device_name
)
)
time_cour = time.time()

fut = proc.forward_async(
qorc_quantum_layer, data_tensor, nsample=qpu_device_nsample
)
_spin_until_with_ctrlc(
lambda: len(fut.job_ids) > 0 or fut.done(), timeout_s=qpu_device_timeout
)
processed_data_tensor = fut.wait()

duration = time.time() - time_cour
logger.info("Durée (s): {}".format(duration))

case _:
# Cas général: On lance les calculs par défaut
logger.info(
"Qorc: Traitement général (case else) du remote processor: {} - Calcule le train/val/test".format(
qpu_device_name
)
)
time_cour = time.time()

fut = proc.forward_async(
qorc_quantum_layer, data_tensor, nsample=qpu_device_nsample
)
_spin_until_with_ctrlc(
lambda: len(fut.job_ids) > 0 or fut.done(), timeout_s=qpu_device_timeout
)
processed_data_tensor = fut.wait()

duration = time.time() - time_cour
logger.info("Durée (s): {}".format(duration))
logger.info(
f"Qorc: Call to forward async for remote processor: {qpu_device_name} - Compute train/val/test"
)
time_cour = time.time()
fut = proc.forward_async(
qorc_quantum_layer, data_tensor, nsample=qpu_device_nsample
)
_spin_until_with_ctrlc(
lambda: len(fut.job_ids) > 0 or fut.done(), timeout_s=qpu_device_timeout
)
processed_data_tensor = fut.wait()
duration = time.time() - time_cour
logger.info(f"Duration (s): {duration}")

train_data_qorc = processed_data_tensor[:train_size]
val_data_qorc = processed_data_tensor[train_size : (train_size + val_size)]
Expand Down
2 changes: 2 additions & 0 deletions papers/QORC/lib/lib_rff_encoding_and_linear_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,8 @@ def rff_encoding_and_linear_training(

train_acc = int(10000.0 * accuracy_score(train_label, train_model_pred)) / 10000.0
test_acc = int(10000.0 * accuracy_score(test_label, test_model_pred)) / 10000.0
logger.info("train accuracy: {}".format(train_acc))
logger.info("test accuracy: {}".format(test_acc))

duration_calcul_rff_features = int(100.0 * (time_t3 - time_t2)) / 100.0
logger.info(
Expand Down
Loading