Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions ansible/roles/configure-local-storage/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -1,19 +1,18 @@
---
# configure-local-storage default vars

# controlplane_* configures local storage on SNO, 3-Node Compact MNO, and
# Control-plane nodes on a "standard" multi-node deployment with workers
# controlplane_* configures disk wiping/partitioning on SNO, 3-Node Compact MNO,
# and Control-plane nodes on a "standard" multi-node deployment with workers.
# Populating device lists triggers Ignition-based disk preparation at boot.
controlplane_etcd_on_nvme: false
controlplane_nvme_device: /dev/nvme0n1
controlplane_localstorage_configuration: false
controlplane_localstorage_lvm_devices: []
controlplane_localstorage_disk_devices: []
controlplane_localstorage_lv_count: 10
controlplane_localstorage_lv_size: 100G

# worker_* configures local storage on worker nodes on a "standard"
# multi-node deployment with workers
worker_localstorage_configuration: false
# worker_* configures disk wiping/partitioning on worker nodes on a
# "standard" multi-node deployment with workers
worker_localstorage_lvm_devices: []
worker_localstorage_disk_devices: []
worker_localstorage_lv_count: 10
Expand Down
4 changes: 2 additions & 2 deletions ansible/roles/configure-local-storage/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
return_content: true
body: { "config": "{{ lookup('template', 'controlplane.ign.j2') | to_json }}" }
with_items: "{{ get_hosts.json|selectattr('role', 'eq', 'master') | list }}"
when: (controlplane_etcd_on_nvme or controlplane_localstorage_configuration)
when: controlplane_etcd_on_nvme or controlplane_localstorage_lvm_devices | length > 0 or controlplane_localstorage_disk_devices | length > 0

- name: Apply worker host ignition config overrides
uri:
Expand All @@ -32,5 +32,5 @@
body: { "config": "{{ lookup('template', 'worker.ign.j2') | to_json }}" }
with_items: "{{ get_hosts.json|selectattr('role', 'eq', 'worker') | list }}"
when:
- worker_localstorage_configuration
- worker_localstorage_lvm_devices | length > 0 or worker_localstorage_disk_devices | length > 0
- cluster_type != "sno"
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@
"sizeMiB": 0
}
]
{% if controlplane_localstorage_configuration %}
{% if controlplane_localstorage_lvm_devices | length > 0 or controlplane_localstorage_disk_devices | length > 0 %}
},
{% else %}
}
{% endif %}
{% endif %}
{% if controlplane_localstorage_configuration %}
{% if controlplane_localstorage_lvm_devices | length > 0 or controlplane_localstorage_disk_devices | length > 0 %}
{% for device in controlplane_localstorage_lvm_devices %}
{
"device": "{{ device }}",
Expand Down Expand Up @@ -77,7 +77,7 @@
{% endif %}
],
"files": [
{% if controlplane_localstorage_configuration and controlplane_localstorage_lvm_devices | length > 0 %}
{% if controlplane_localstorage_lvm_devices | length > 0 %}
{
"contents": {
"source": "data:text/plain;base64,{{ lookup('template', './controlplane-setup-lvm.sh.j2') | b64encode }}"
Expand All @@ -101,13 +101,13 @@
"name": "var-lib-containers.mount",
"enabled": true,
"contents": "[Mount]\nWhat=/dev/disk/by-partlabel/CTNR\nType=xfs\nOptions=defaults\n[Install]\nWantedBy=local-fs.target"
{% if controlplane_localstorage_configuration and controlplane_localstorage_lvm_devices | length > 0 %}
{% if controlplane_localstorage_lvm_devices | length > 0 %}
},
{% else %}
}
{% endif %}
{% endif %}
{% if controlplane_localstorage_configuration and controlplane_localstorage_lvm_devices | length > 0 %}
{% if controlplane_localstorage_lvm_devices | length > 0 %}
{
"name": "setup-localstorage.service",
"enabled": true,
Expand Down
7 changes: 5 additions & 2 deletions ansible/roles/mno-post-cluster-install/defaults/main/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,16 @@ aap_override_max_concurrent_awt: 10
setup_openshift_gitops: false
gitops_channel: stable

# Install Local Storage Operator (LSO) and create LocalVolume resources
setup_lso: false

# Migrate the ingresscontrollers and/or monitoring components to the master nodes
# Used during nodedensity testing to ensure these things are more available
migrate_ingresscontrollers: false
migrate_monitoring: false

# Apply the cluster-monitoring-config that uses the localstorage class name created either
# by controlplane_localstorage_configuration or worker_localstorage_configuration
# Apply the cluster-monitoring-config that uses the localstorage class name
# created by LSO (requires setup_lso: true)
apply_cluster_monitoring_config: false

# Prometheus storage settings
Expand Down
8 changes: 4 additions & 4 deletions ansible/roles/mno-post-cluster-install/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -146,20 +146,20 @@
when: setup_openshift_gitops

- name: Setup Local Storage Operator (LSO)
when: controlplane_localstorage_configuration or worker_localstorage_configuration
when: setup_lso | bool
block:
- name: Install Local Storage Operator (LSO)
shell:
KUBECONFIG={{ bastion_cluster_config_dir }}/kubeconfig oc apply -f {{ bastion_cluster_config_dir }}/localstorage/localstorage.yml

- name: Label control-plane nodes for localstorage
when: controlplane_localstorage_configuration
when: controlplane_localstorage_lvm_devices | length > 0 or controlplane_localstorage_disk_devices | length > 0
shell: |
KUBECONFIG={{ bastion_cluster_config_dir }}/kubeconfig oc label no --overwrite {{ item }} localstorage=true
loop: "{{ groups['controlplane'] }}"

- name: Label worker nodes for localstorage
when: worker_localstorage_configuration
when: worker_localstorage_lvm_devices | length > 0 or worker_localstorage_disk_devices | length > 0
shell: |
KUBECONFIG={{ bastion_cluster_config_dir }}/kubeconfig oc label no --overwrite {{ item }} localstorage=true
loop: "{{ groups['worker'] }}"
Expand Down Expand Up @@ -215,7 +215,7 @@
KUBECONFIG={{ bastion_cluster_config_dir }}/kubeconfig oc patch ingresscontrollers/default --type merge -n openshift-ingress-operator -p '{"spec": {"nodePlacement": {"nodeSelector": {"matchLabels": {"node-role.kubernetes.io/master": "" }}, "tolerations": [{"effect": "NoSchedule", "key": "node-role.kubernetes.io/master"}]} }}'

- name: Apply OpenShift-Monitoring configuration options
when: migrate_monitoring or (apply_cluster_monitoring_config and (controlplane_localstorage_configuration or worker_localstorage_configuration))
when: (migrate_monitoring | bool) or ((apply_cluster_monitoring_config | bool) and (setup_lso | bool))
shell:
KUBECONFIG={{ bastion_cluster_config_dir }}/kubeconfig oc apply -f {{ bastion_cluster_config_dir }}/openshift-monitoring/cluster-monitoring-config.yml

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ spec:
fsType: xfs
{% endif %}
devicePaths:
{% if controlplane_localstorage_configuration and controlplane_localstorage_lvm_devices | length > 0 %}
{% if controlplane_localstorage_lvm_devices | length > 0 %}
{% for i in range(controlplane_localstorage_lv_count) %}
- "/dev/vg_ls/lv_cp_tv{{ '%02d' % i }}"
{% endfor %}
{% endif %}
{% if worker_localstorage_configuration and worker_localstorage_lvm_devices | length > 0 %}
{% if worker_localstorage_lvm_devices | length > 0 %}
{% for i in range(worker_localstorage_lv_count) %}
- "/dev/vg_ls/lv_w_tv{{ '%02d' % i }}"
{% endfor %}
Expand Down
7 changes: 5 additions & 2 deletions ansible/roles/sno-post-cluster-install/defaults/main/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,14 @@ setup_minio: false
minio_image_tag: RELEASE.2025-09-07T16-13-09Z
minio_pv_storageclass: localstorage2-sc

# Install Local Storage Operator (LSO) and create LocalVolume resources
setup_lso: false

# setup metal3
sno_metal3: false

# Apply the cluster-monitoring-config that uses the localstorage class name created
# by controlplane_localstorage_configuration
# Apply the cluster-monitoring-config that uses the localstorage class name
# created by LSO (requires setup_lso: true)
apply_cluster_monitoring_config: false

# Prometheus storage settings
Expand Down
6 changes: 3 additions & 3 deletions ansible/roles/sno-post-cluster-install/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@
KUBECONFIG={{ bastion_cluster_config_dir }}/{{ groups['sno'][0] }}/kubeconfig oc apply -f {{ bastion_cluster_config_dir }}/{{ groups['sno'][0] }}/gitops/openshift-gitops-operator.yml
when: setup_openshift_gitops

- name: Setup nodes localstorage
when: controlplane_localstorage_configuration
- name: Setup Local Storage Operator (LSO)
when: setup_lso | bool
block:
- name: Install local-storage operator
shell:
Expand Down Expand Up @@ -162,7 +162,7 @@
KUBECONFIG={{ bastion_cluster_config_dir }}/{{ groups['sno'][0] }}/kubeconfig oc apply -f {{ bastion_cluster_config_dir }}/{{ groups['sno'][0] }}/minio/minio.yml

- name: Apply OpenShift-Monitoring configuration options
when: apply_cluster_monitoring_config and controlplane_localstorage_configuration
when: (apply_cluster_monitoring_config | bool) and (setup_lso | bool)
shell:
KUBECONFIG={{ bastion_cluster_config_dir }}/{{ groups['sno'][0] }}/kubeconfig oc apply -f {{ bastion_cluster_config_dir }}/{{ groups['sno'][0] }}/openshift-monitoring/cluster-monitoring-config.yml

Expand Down
48 changes: 33 additions & 15 deletions docs/local-storage.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Local Storage

Jetlag can configure the [OpenShift Local Storage Operator (LSO)](https://docs.openshift.com/container-platform/latest/storage/persistent_storage/persistent_storage_local/persistent-storage-local.html) on control-plane and worker nodes during cluster deployment. Disk preparation (wiping, partitioning, LVM setup) is handled via Ignition at node boot time. LSO is then installed and `LocalVolume` resources are created post-install.
Jetlag can prepare disks and optionally install the [OpenShift Local Storage Operator (LSO)](https://docs.openshift.com/container-platform/latest/storage/persistent_storage/persistent_storage_local/persistent-storage-local.html) on control-plane and worker nodes during cluster deployment. Disk preparation (wiping, partitioning, LVM setup) is triggered by populating device lists and handled via Ignition at node boot time. LSO installation is controlled independently by `setup_lso` and creates `LocalVolume` resources post-install.

_**Table of Contents**_

Expand All @@ -20,28 +20,34 @@ _**Table of Contents**_

## Variables

All variables are defined in `ansible/roles/configure-local-storage/defaults/main.yml`. Override them in the `Extra vars` section of `ansible/vars/all.yml`.
Disk preparation variables are defined in `ansible/roles/configure-local-storage/defaults/main.yml`. LSO and LocalVolume variables are in the post-install role defaults. Override them in the `Extra vars` section of `ansible/vars/all.yml`.

**Control-plane / SNO**
**Control-plane / SNO disk preparation**

Populating any device list below triggers Ignition-based disk wiping/partitioning at node boot.

| Variable | Default | Description |
| -------- | ------- | ----------- |
| `controlplane_localstorage_configuration` | `false` | Master enable for control-plane local storage setup |
| `controlplane_localstorage_lvm_devices` | `[]` | List of disks to partition and include in the LVM volume group. Empty list disables LVM setup |
| `controlplane_localstorage_disk_devices` | `[]` | List of disks to wipe and expose as raw block devices to LSO |
| `controlplane_localstorage_disk_devices` | `[]` | List of disks to wipe and expose as raw block devices |
| `controlplane_localstorage_lv_count` | `10` | Number of thin logical volumes to create in the LVM volume group |
| `controlplane_localstorage_lv_size` | `100G` | Size of each thin logical volume |

**Worker nodes**
**Worker node disk preparation**

| Variable | Default | Description |
| -------- | ------- | ----------- |
| `worker_localstorage_configuration` | `false` | Master enable for worker node local storage setup |
| `worker_localstorage_lvm_devices` | `[]` | List of disks to partition and include in the LVM volume group on worker nodes. Empty list disables LVM setup |
| `worker_localstorage_disk_devices` | `[]` | List of disks to wipe and expose as raw block devices to LSO on worker nodes |
| `worker_localstorage_disk_devices` | `[]` | List of disks to wipe and expose as raw block devices on worker nodes |
| `worker_localstorage_lv_count` | `10` | Number of thin logical volumes to create in the LVM volume group |
| `worker_localstorage_lv_size` | `100G` | Size of each thin logical volume |

**LSO installation (post-install)**

| Variable | Default | Description |
| -------- | ------- | ----------- |
| `setup_lso` | `false` | Install the Local Storage Operator and create `LocalVolume` resources for any populated device lists |

**LocalVolume volume modes**

| Variable | Default | Description |
Expand Down Expand Up @@ -89,48 +95,60 @@ When `localstorage_lvm_volume_mode` or `localstorage_disk_volume_mode` is set to

### Control-plane with LVM only

Three LVM logical volumes on a single disk, exposed as a filesystem storage class:
Three LVM logical volumes on a single disk, wiped at boot and exposed via LSO as a filesystem storage class:

```yaml
controlplane_localstorage_configuration: true
controlplane_localstorage_lvm_devices:
- /dev/disk/by-path/pci-0000:4a:00.0-scsi-0:0:2:0
controlplane_localstorage_lv_count: 3
controlplane_localstorage_lv_size: 500G
setup_lso: true
```

### Control-plane with Block Disks only
### Control-plane with block disks only

```yaml
controlplane_localstorage_configuration: true
controlplane_localstorage_disk_devices:
- /dev/disk/by-path/pci-0000:4a:00.0-scsi-0:0:2:0
- /dev/disk/by-path/pci-0000:4a:00.0-scsi-0:0:3:0
setup_lso: true
```

### Control-plane with LVM and block disks

LVM volumes on one disk, plus two additional raw disks exposed as block devices:

```yaml
controlplane_localstorage_configuration: true
controlplane_localstorage_lvm_devices:
- /dev/disk/by-path/pci-0000:4a:00.0-scsi-0:0:2:0
controlplane_localstorage_lv_count: 10
controlplane_localstorage_lv_size: 100G
controlplane_localstorage_disk_devices:
- /dev/disk/by-path/pci-0000:4a:00.0-scsi-0:0:3:0
localstorage_disk_volume_mode: Block
setup_lso: true
```

This creates both `localvolume-lvm` (storage class `localstorage-sc`) and `localvolume-disk` (storage class `localstorage-disk-sc`).

### Workers with LVM

```yaml
worker_localstorage_configuration: true
worker_localstorage_device:
worker_localstorage_lvm_devices:
- /dev/disk/by-path/pci-0000:4a:00.0-scsi-0:0:2:0
worker_localstorage_lv_count: 10
worker_localstorage_lv_size: 200G
setup_lso: true
```

### Wipe only, no LSO

Wipe and partition disks at boot time without installing the Local Storage Operator. Useful when preparing disks for other storage backends (e.g., ODF) or for clean disk state:

```yaml
controlplane_localstorage_disk_devices:
- /dev/disk/by-path/pci-0000:4a:00.0-scsi-0:0:2:0
- /dev/disk/by-path/pci-0000:4a:00.0-scsi-0:0:3:0
```

Omitting `setup_lso` (or setting it to `false`) skips LSO installation. The disks are wiped at node boot via Ignition but no `LocalVolume` resources are created.
4 changes: 2 additions & 2 deletions docs/odf.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ Minimum requirements:
- **Cluster type**: MNO only (ODF is not supported on SNO deployments in Jetlag)
- **Nodes**: At least 3 nodes (control-plane or worker) with available block devices
- **Block devices**: Each node must have at least one unused disk configured via `controlplane_localstorage_disk_devices` or `worker_localstorage_disk_devices`
- **LSO**: `controlplane_localstorage_configuration: true` (or `worker_localstorage_configuration: true`) with disk devices listed
- **LSO**: `setup_lso: true` with disk devices listed
- **Node labels**: The label `cluster.ocs.openshift.io/openshift-storage=` must be applied to the nodes that will host ODF. Set this via `post_install_node_labels`

## Variables
Expand Down Expand Up @@ -57,7 +57,7 @@ post_install_node_labels:
- cluster.ocs.openshift.io/openshift-storage=

# --- Local Storage (prerequisite for ODF) ---
controlplane_localstorage_configuration: true
setup_lso: true
controlplane_localstorage_disk_devices:
- /dev/disk/by-path/pci-0000:4a:00.0-scsi-0:0:2:0
- /dev/disk/by-path/pci-0000:4a:00.0-scsi-0:0:3:0
Expand Down