From cb5925368d9d9e3d5d4b2809c3c4f456b5d1bb26 Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Mon, 2 Feb 2026 12:08:31 +0000 Subject: [PATCH 1/7] Fix issue with systemd-networkd-wait-online when no IP This sets the RequiredForOnline[1] property to false when no IP is configured. This prevents the interface from entering the degraded state[2]. [1] https://www.freedesktop.org/software/systemd/man/latest/systemd.network.html#RequiredForOnline= [2] https://github.com/systemd/systemd/issues/575#issuecomment-124286854 Closes-Bug: #2139592 Change-Id: I266b720156c0d9cf5672d5dcc5a44a2a43888df6 Signed-off-by: Will Szumski --- kayobe/plugins/filter/networkd.py | 2 +- .../unit/plugins/filter/test_networkd.py | 52 ++++++++++++++++--- ...-wait-network-online-c7e01e49174ef313.yaml | 6 +++ 3 files changed, 51 insertions(+), 9 deletions(-) create mode 100644 releasenotes/notes/fixes-wait-network-online-c7e01e49174ef313.yaml diff --git a/kayobe/plugins/filter/networkd.py b/kayobe/plugins/filter/networkd.py index aafe4ded9..b0bfd9e35 100644 --- a/kayobe/plugins/filter/networkd.py +++ b/kayobe/plugins/filter/networkd.py @@ -303,7 +303,7 @@ def _network(context, name, inventory_hostname, bridge, bond, vlan_interfaces): { 'Link': [ {'MTUBytes': mtu}, - ] + ] + ([{'RequiredForOnline': "false"}] if not ip else []) }, ] diff --git a/kayobe/tests/unit/plugins/filter/test_networkd.py b/kayobe/tests/unit/plugins/filter/test_networkd.py index ffcedecca..c1d42bd09 100644 --- a/kayobe/tests/unit/plugins/filter/test_networkd.py +++ b/kayobe/tests/unit/plugins/filter/test_networkd.py @@ -508,8 +508,9 @@ def test_vlan(self): { "Match": [ {"Name": "eth0.2"} - ] + ], }, + {'Link': [{'RequiredForOnline': 'false'}]}, ] } self.assertEqual(expected, nets) @@ -541,6 +542,7 @@ def test_vlan_multiple(self): {"Name": "eth0.2"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-eth0.3": [ { @@ -548,6 +550,7 @@ def test_vlan_multiple(self): {"Name": "eth0.3"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ] } self.assertEqual(expected, nets) @@ -577,6 +580,7 @@ def test_vlan_with_parent(self): {"Name": "eth0.2"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-vlan.5": [ { @@ -584,6 +588,7 @@ def test_vlan_with_parent(self): {"Name": "vlan.5"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-vlan6": [ { @@ -591,6 +596,7 @@ def test_vlan_with_parent(self): {"Name": "vlan6"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ] } self.assertEqual(expected, nets) @@ -609,6 +615,7 @@ def test_bridge(self): {"Name": "br0"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-eth0": [ { @@ -652,6 +659,7 @@ def test_bridge_with_bridge_port_net(self): {"Name": "br0"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-eth0": [ { @@ -667,6 +675,7 @@ def test_bridge_with_bridge_port_net(self): { "Link": [ {"MTUBytes": 1400}, + {'RequiredForOnline': 'false'}, ] }, ], @@ -698,6 +707,7 @@ def test_bridge_with_bridge_port_vlan(self): {"Name": "br0"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-eth0": [ { @@ -730,6 +740,7 @@ def test_bridge_with_bridge_port_vlan(self): {"Name": "eth1.2"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], } self.assertEqual(expected, nets) @@ -747,8 +758,9 @@ def test_bridge_with_bridge_port_vlan_net(self): { "Match": [ {"Name": "br0"} - ] + ], }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-eth0": [ { @@ -762,19 +774,21 @@ def test_bridge_with_bridge_port_vlan_net(self): {"VLAN": "eth0.2"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-eth0.2": [ { "Match": [ {"Name": "eth0.2"} - ] + ], }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-eth1": [ { "Match": [ {"Name": "eth1"} - ] + ], }, { "Network": [ @@ -799,6 +813,7 @@ def test_bond(self): {"Name": "bond0"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-eth0": [ { @@ -842,6 +857,9 @@ def test_bond_with_bond_member_net(self): {"Name": "bond0"} ] }, + { + "Link": [{'RequiredForOnline': 'false'}] + }, ], "50-kayobe-eth0": [ { @@ -857,6 +875,7 @@ def test_bond_with_bond_member_net(self): { "Link": [ {"MTUBytes": 1400}, + {'RequiredForOnline': 'false'}, ] }, ], @@ -888,6 +907,7 @@ def test_bond_with_bond_member_vlan(self): {"Name": "bond0"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-eth0": [ { @@ -920,6 +940,7 @@ def test_bond_with_bond_member_vlan(self): {"Name": "eth1.2"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], } self.assertEqual(expected, nets) @@ -937,7 +958,10 @@ def test_bond_with_bond_member_vlan_net(self): { "Match": [ {"Name": "bond0"} - ] + ], + }, + { + "Link": [{'RequiredForOnline': 'false'}] }, ], "50-kayobe-eth0": [ @@ -952,6 +976,9 @@ def test_bond_with_bond_member_vlan_net(self): {"VLAN": "eth0.2"}, ] }, + { + "Link": [{'RequiredForOnline': 'false'}] + }, ], "50-kayobe-eth0.2": [ { @@ -959,6 +986,10 @@ def test_bond_with_bond_member_vlan_net(self): {"Name": "eth0.2"} ] }, + { + "Link": [{'RequiredForOnline': 'false'}] + }, + ], "50-kayobe-eth1": [ { @@ -991,6 +1022,7 @@ def test_veth(self): {"Name": "br0"} ] }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-p-br0-phy": [ { @@ -1034,6 +1066,7 @@ def test_veth_with_mtu(self): { "Link": [ {"MTUBytes": 1400}, + {'RequiredForOnline': 'false'}, ] }, ], @@ -1095,14 +1128,16 @@ def test_veth_on_vlan(self): "Network": [ {"VLAN": "br0.42"} ] - } + }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-br0.42": [ { "Match": [ {"Name": "br0.42"} - ] + ], }, + {'Link': [{'RequiredForOnline': 'false'}]}, ], "50-kayobe-p-br0-phy": [ { @@ -1180,8 +1215,9 @@ def test_no_veth_on_vlan_without_bridge(self): { "Match": [ {"Name": "eth0.2"} - ] + ], }, + {'Link': [{'RequiredForOnline': 'false'}]}, ] } self.assertEqual(expected, nets) diff --git a/releasenotes/notes/fixes-wait-network-online-c7e01e49174ef313.yaml b/releasenotes/notes/fixes-wait-network-online-c7e01e49174ef313.yaml new file mode 100644 index 000000000..f8fadb87b --- /dev/null +++ b/releasenotes/notes/fixes-wait-network-online-c7e01e49174ef313.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue with systemd-networkd-wait-online.service failing when some + interfaces were not configured with an IP address. This affected systemd + units depending on network-online.target. From c17def5d4272befa6812926eecc40ff13a6e9800 Mon Sep 17 00:00:00 2001 From: Leonie Chamberlin-Medd Date: Wed, 8 Apr 2026 12:58:54 +0000 Subject: [PATCH 2/7] Add infra-vms to container-engine group Change-Id: Ie7b194dec24e402949d01da745ccb4743fa6c567 Signed-off-by: Leonie Chamberlin-Medd --- etc/kayobe/inventory/groups | 1 + ...infra-vms-to-container-engine-group-dd2b417126e06162.yaml | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 releasenotes/notes/add-infra-vms-to-container-engine-group-dd2b417126e06162.yaml diff --git a/etc/kayobe/inventory/groups b/etc/kayobe/inventory/groups index fee8c48e7..1c48ddbbd 100644 --- a/etc/kayobe/inventory/groups +++ b/etc/kayobe/inventory/groups @@ -80,6 +80,7 @@ monitoring storage compute ansible-control +infra-vms [docker-registry:children] # Hosts in this group will have a Docker Registry deployed. This group should diff --git a/releasenotes/notes/add-infra-vms-to-container-engine-group-dd2b417126e06162.yaml b/releasenotes/notes/add-infra-vms-to-container-engine-group-dd2b417126e06162.yaml new file mode 100644 index 000000000..1c60045ca --- /dev/null +++ b/releasenotes/notes/add-infra-vms-to-container-engine-group-dd2b417126e06162.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds ``infra-vms`` to ``container-engine`` group to allow for Docker/Podman + to be installed. From f23ced2b59731c7eb89d9212237e933042f74d7b Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Fri, 27 Mar 2026 11:44:11 +0000 Subject: [PATCH 3/7] Block until timedatectl status show synchronized Should reduce incidents of time sync issues when running kolla pre-checks. Change-Id: Iad7fc73f16ffcbc2ab5f261322bd6777036fd5f6 Signed-off-by: Will Szumski --- ansible/roles/ntp/tasks/sync.yml | 9 +++++++++ ...-until-timedatectl-synchronized-4b0243df78e72550.yaml | 9 +++++++++ 2 files changed, 18 insertions(+) create mode 100644 releasenotes/notes/block-until-timedatectl-synchronized-4b0243df78e72550.yaml diff --git a/ansible/roles/ntp/tasks/sync.yml b/ansible/roles/ntp/tasks/sync.yml index 39d7536c8..25db6c7f3 100644 --- a/ansible/roles/ntp/tasks/sync.yml +++ b/ansible/roles/ntp/tasks/sync.yml @@ -23,4 +23,13 @@ - name: Force hardware clock synchronisation command: hwclock --systohc + + - name: Wait for system clock to be synchronized (up to 10 minutes) + become: True + ansible.builtin.command: timedatectl show --property=NTPSynchronized --value + register: sync_check + until: sync_check.stdout | trim == "yes" + retries: 60 + delay: 10 + failed_when: sync_check.rc != 0 when: ntp_force_sync | bool diff --git a/releasenotes/notes/block-until-timedatectl-synchronized-4b0243df78e72550.yaml b/releasenotes/notes/block-until-timedatectl-synchronized-4b0243df78e72550.yaml new file mode 100644 index 000000000..4716c1341 --- /dev/null +++ b/releasenotes/notes/block-until-timedatectl-synchronized-4b0243df78e72550.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Reduces clock synchronisation race conditions before running Kolla by + waiting for ``timedatectl`` to report ``NTPSynchronized=yes`` when + ``ntp_force_sync`` is enabled. + + This improves reliability of deployments where services are sensitive to + clock skew immediately after NTP configuration. From cde3eba8d0138a0b6c90b3af071aa61fb27949ae Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Fri, 22 Nov 2024 14:06:51 +0000 Subject: [PATCH 4/7] Support provisioning hosts without kolla services Use case is for deploying hosts that are not passed through to kolla such as lustre storage nodes. This allows us to reuse the usual set of overcloud commands instead of creating yet another set of similar ones. Change-Id: Ic6fd254977697ce7be6916fc89952b16c283ff17 Signed-off-by: Will Szumski --- ansible/inventory/group_vars/all/kolla | 10 ++ ansible/kolla-ansible.yml | 18 ++- .../templates/overcloud-top-level.j2 | 2 + doc/source/administration/overcloud.rst | 112 ++++++++++++++++++ .../configuration/reference/kolla-ansible.rst | 44 +++++++ etc/kayobe/kolla.yml | 5 + ...pass-through-enabled-7bae0db263794540.yaml | 9 ++ 7 files changed, 197 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/adds-kolla-overcloud-inventory-pass-through-enabled-7bae0db263794540.yaml diff --git a/ansible/inventory/group_vars/all/kolla b/ansible/inventory/group_vars/all/kolla index 411b116e6..20c820fba 100644 --- a/ansible/inventory/group_vars/all/kolla +++ b/ansible/inventory/group_vars/all/kolla @@ -337,6 +337,16 @@ kolla_seed_inventory_pass_through_host_vars_map: >- # hosts. kolla_overcloud_inventory_custom_top_level: +# Flag indicating whether host will be included in the generated kolla +# inventory. True if inventory_hostname is in any of the groups defined in +# kolla_overcloud_inventory_top_level_group_map, false otherwise. +kolla_overcloud_inventory_pass_through_enabled: >- + {{ kolla_overcloud_inventory_top_level_group_map.values() | + map(attribute='groups') | + flatten | + intersect(group_names) is truthy + }} + # Custom overcloud inventory containing a mapping from components to top level # groups. kolla_overcloud_inventory_custom_components: diff --git a/ansible/kolla-ansible.yml b/ansible/kolla-ansible.yml index ec6c4e173..cc8fb16f9 100644 --- a/ansible/kolla-ansible.yml +++ b/ansible/kolla-ansible.yml @@ -1,4 +1,15 @@ --- +- name: Collect hosts to map through to kolla-ansible + hosts: overcloud + gather_facts: false + tags: + - always + tasks: + - name: Group by kolla_overcloud_inventory_pass_through_enabled + ansible.builtin.group_by: + key: "kolla_mapped_overcloud_host_{{ kolla_overcloud_inventory_pass_through_enabled }}" + changed_when: false + - name: Gather facts for localhost hosts: localhost gather_facts: true @@ -66,6 +77,7 @@ when: - groups[controller_loadbalancer_group] | length > 0 - item.required | bool + - groups['kolla_mapped_overcloud_host_True'] | length > 0 with_items: - var_name: "kolla_internal_vip_address" description: "Internal API VIP address" @@ -137,7 +149,7 @@ kolla_ansible_inventory_path: "{{ kolla_config_path }}/inventory/seed" - name: Generate Kolla Ansible host vars for overcloud hosts - hosts: overcloud + hosts: kolla_mapped_overcloud_host_True tags: - config - config-validation @@ -171,11 +183,11 @@ - var_name: "kolla_network_interface" description: "Default network" network: "{{ internal_net_name }}" - required: True + required: "{{ kolla_overcloud_inventory_pass_through_enabled | bool }}" - var_name: "kolla_api_interface" description: "API network" network: "{{ internal_net_name }}" - required: True + required: "{{ kolla_overcloud_inventory_pass_through_enabled | bool }}" - var_name: "kolla_storage_interface" description: "Storage network" network: "{{ storage_net_name }}" diff --git a/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2 b/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2 index c0efff9e8..ee11d9417 100644 --- a/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2 +++ b/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2 @@ -9,7 +9,9 @@ [{{ group }}] # These hostnames must be resolvable from your deployment host {% for host in groups.get(group, []) %} +{%- if hostvars[host].kolla_overcloud_inventory_pass_through_enabled | default(true) | bool -%} {{ host }} +{%- endif -%} {% endfor %} {% endfor %} diff --git a/doc/source/administration/overcloud.rst b/doc/source/administration/overcloud.rst index 13080a33f..11023a028 100644 --- a/doc/source/administration/overcloud.rst +++ b/doc/source/administration/overcloud.rst @@ -63,6 +63,118 @@ For example:: To execute the command with root privileges, add the ``--become`` argument. Adding the ``--verbose`` argument allows the output of the command to be seen. +Provisioning Hosts Without Kolla Services +========================================== + +Sometimes it may be necessary to provision hosts that are not included in the +Kolla Ansible inventory - for example, external storage nodes such as Lustre +fileservers, or other infrastructure nodes that require network configuration +and system setup but should not run OpenStack services. + +By default, hosts in groups mapped by ``kolla_overcloud_inventory_top_level_group_map`` +are automatically included in the Kolla Ansible inventory. To exclude a group of +hosts from the Kolla Ansible inventory, do not map that group in +``kolla_overcloud_inventory_top_level_group_map``. + +When a group is excluded from the Kolla Ansible inventory, the following +variables are not required for hosts in that group: + +- ``kolla_internal_vip_address`` +- ``kolla_internal_fqdn`` +- ``kolla_network_interface`` +- ``kolla_api_interface`` + +The hosts can still be provisioned and configured via Kayobe playbooks, allowing +you to use Kayobe for complete infrastructure provisioning while selectively +excluding certain hosts from Kolla Ansible deployments. + +Example: Provision an external storage group +---------------------------------------------- + +If you are using bifrost, follow the regular process to enroll the servers. +Configure ``overcloud_group_hosts_map`` to map the hosts into an appropriate +group: + +.. code-block:: yaml + :caption: ``$KAYOBE_CONFIG_PATH/overcloud.yml`` + + overcloud_group_hosts_map: + lustre-servers: + - lustre-server-01 + - lustre-server-02 + +These hosts must also be configured to be members of the ``overcloud`` group: + +.. code-block:: ini + :caption: ``$KAYOBE_CONFIG_PATH/inventory/groups`` + + # Empty group declaration. kayobe overcloud inventory discover will + # populate this group from bifrost inventory. + [lustre-servers] + + # The hosts must be members of the overcloud group + [overcloud:children] + lustre-servers + +You can then run ``kayobe overcloud inventory discover`` to automatically +populate the ``lustre-servers`` group. + +Alternatively, you can provision these hosts by some other means: either +manually or by using an alternative provisioning tool. You would then manually +add these to your inventory. + +.. code-block:: ini + :caption: ``$KAYOBE_CONFIG_PATH/inventory/groups`` + + # This example demonstrates how you can add hosts that are not provisioned + # by bifrost e.g deployed by some external provisioning tool. + + [lustre-servers] + lustre-server-01 + lustre-server-02 + + # The hosts must be members of the overcloud group + [overcloud:children] + lustre-servers + +Ensuring that the IPs used to access these servers are correctly set in +``network-allocations.yml``. + +Ensure the hosts are not mapped through to the kolla inventory: + + .. code-block:: yaml + :caption: ``$KAYOBE_CONFIG_PATH/kolla.yml`` + + # Define the Kolla group mapping without including lustre-servers + kolla_overcloud_inventory_top_level_group_map: + control: + groups: + - controllers + network: + groups: + - network + compute: + groups: + - compute + compute-vgpu: + groups: + - compute-vgpu + monitoring: + groups: + - monitoring + storage: + groups: + - storage + # lustre-servers group is intentionally not mapped + +The hosts can then be configured using:: + + (kayobe) $ kayobe overcloud host configure --limit lustre-servers + +This will apply host configuration and network setup. ``kayobe overcloud service +deploy`` will be a no-op for these hosts as they will not be mapped to the +Kolla Ansible inventory. + .. _overcloud-administration-reconfigure: Reconfiguring Containerised Services diff --git a/doc/source/configuration/reference/kolla-ansible.rst b/doc/source/configuration/reference/kolla-ansible.rst index 5fc6e7e45..eca5e7755 100644 --- a/doc/source/configuration/reference/kolla-ansible.rst +++ b/doc/source/configuration/reference/kolla-ansible.rst @@ -412,6 +412,50 @@ to enable debug logging for Nova services: --- nova_logging_debug: true +Inventory Passthrough +--------------------- + +By default, only hosts that are in the groups listed in +``kolla_overcloud_inventory_top_level_group_map`` will be included in the +generated Kolla Ansible inventory. The variable +``kolla_overcloud_inventory_pass_through_enabled`` is dynamically determined +based on whether ``inventory_hostname`` is in any of the groups mapped in +``kolla_overcloud_inventory_top_level_group_map``. + +To exclude hosts from the Kolla Ansible inventory, simply do not map their +groups in ``kolla_overcloud_inventory_top_level_group_map``. When a group is +excluded from the mapping, the following variables are not required for hosts +in that group: + +- ``kolla_internal_vip_address`` +- ``kolla_internal_fqdn`` +- ``kolla_network_interface`` +- ``kolla_api_interface`` + +These variables are only required when hosts are included in the Kolla Ansible +inventory. + +Example: exclude a group from Kolla Ansible inventory +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To exclude a dedicated storage group from the Kolla Ansible inventory, do not +include it in ``kolla_overcloud_inventory_top_level_group_map``: + +.. code-block:: yaml + :caption: ``$KAYOBE_CONFIG_PATH/kolla.yml`` + + kolla_overcloud_inventory_top_level_group_map: + control: + groups: + - controllers + network: + groups: + - network + compute: + groups: + - compute + # external-storage group is not mapped - hosts will not be in Kolla inventory + Host variables -------------- diff --git a/etc/kayobe/kolla.yml b/etc/kayobe/kolla.yml index 6eb02a5ec..a89f2949d 100644 --- a/etc/kayobe/kolla.yml +++ b/etc/kayobe/kolla.yml @@ -184,6 +184,11 @@ # concatenation of the top level, component, and service inventories. #kolla_overcloud_inventory_custom: +# Flag indicating whether host will be included in the generated kolla +# inventory. True if inventory_hostname is in any of the groups defined in +# kolla_overcloud_inventory_top_level_group_map, false otherwise. +#kolla_overcloud_inventory_pass_through_enabled: + # Dict mapping from kolla-ansible groups to kayobe groups and variables. Each # item is a dict with the following items: # * groups: A list of kayobe ansible groups to map to this kolla-ansible group. diff --git a/releasenotes/notes/adds-kolla-overcloud-inventory-pass-through-enabled-7bae0db263794540.yaml b/releasenotes/notes/adds-kolla-overcloud-inventory-pass-through-enabled-7bae0db263794540.yaml new file mode 100644 index 000000000..42f4671f0 --- /dev/null +++ b/releasenotes/notes/adds-kolla-overcloud-inventory-pass-through-enabled-7bae0db263794540.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + The internal network is no longer required on hosts that are not mapped + to the Kolla Ansible inventory. Hosts can be excluded from the Kolla + Ansible inventory by not mapping their groups in + ``kolla_overcloud_inventory_top_level_group_map``. This allows provisioning + of infrastructure hosts such as external storage nodes without requiring + OpenStack-specific network configuration. From 12d50b7c9121040cf4e22d39e1bac5327c012556 Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Mon, 23 Mar 2026 17:31:03 +0000 Subject: [PATCH 5/7] Add mechanism to add custom CAs to trust store Change-Id: I38567293492b52e8e2fef80fe1793e3e39477e1c Signed-off-by: Will Szumski --- ansible/control-host-configure.yml | 1 + ansible/infra-vm-host-configure.yml | 1 + ansible/inventory/group_vars/all/trust-store | 26 ++++++++++++++ ansible/overcloud-host-configure.yml | 1 + ansible/roles/trust-store/defaults/main.yml | 8 +++++ ansible/roles/trust-store/tasks/main.yml | 16 +++++++++ ansible/seed-host-configure.yml | 1 + ansible/seed-hypervisor-host-configure.yml | 1 + ansible/trust-store.yml | 21 ++++++++++++ doc/source/configuration/reference/hosts.rst | 34 +++++++++++++++++++ etc/kayobe/trust-store.yml | 21 ++++++++++++ etc/kayobe/trust-store/.gitkeep | 0 ...e-group-ca-selection-f7086efe0a8b3191.yaml | 10 ++++++ 13 files changed, 141 insertions(+) create mode 100644 ansible/inventory/group_vars/all/trust-store create mode 100644 ansible/roles/trust-store/defaults/main.yml create mode 100644 ansible/roles/trust-store/tasks/main.yml create mode 100644 ansible/trust-store.yml create mode 100644 etc/kayobe/trust-store.yml create mode 100644 etc/kayobe/trust-store/.gitkeep create mode 100644 releasenotes/notes/trust-store-group-ca-selection-f7086efe0a8b3191.yaml diff --git a/ansible/control-host-configure.yml b/ansible/control-host-configure.yml index fb58add50..6954d078e 100644 --- a/ansible/control-host-configure.yml +++ b/ansible/control-host-configure.yml @@ -2,6 +2,7 @@ - import_playbook: "ssh-known-host.yml" - import_playbook: "kayobe-ansible-user.yml" - import_playbook: "logging.yml" +- import_playbook: "trust-store.yml" - import_playbook: "proxy.yml" - import_playbook: "apt.yml" - import_playbook: "dnf.yml" diff --git a/ansible/infra-vm-host-configure.yml b/ansible/infra-vm-host-configure.yml index 2d1595074..cba2d997b 100644 --- a/ansible/infra-vm-host-configure.yml +++ b/ansible/infra-vm-host-configure.yml @@ -2,6 +2,7 @@ - import_playbook: "ssh-known-host.yml" - import_playbook: "kayobe-ansible-user.yml" - import_playbook: "logging.yml" +- import_playbook: "trust-store.yml" - import_playbook: "proxy.yml" - import_playbook: "apt.yml" - import_playbook: "dnf.yml" diff --git a/ansible/inventory/group_vars/all/trust-store b/ansible/inventory/group_vars/all/trust-store new file mode 100644 index 000000000..72028f41b --- /dev/null +++ b/ansible/inventory/group_vars/all/trust-store @@ -0,0 +1,26 @@ +--- +############################################################################### +# Trust store configuration. + +# List of CA certificate file paths discovered in +# $KAYOBE_CONFIG_PATH/trust-store/ and in trust-store/ subdirectories of +# kayobe_env_search_paths. +trust_store_ca_certificates_default: >- + {{ query( + 'ansible.builtin.fileglob', + *((([kayobe_config_path] + + (kayobe_env_search_paths | default([]) | list)) + | unique + | map('regex_replace', '$', '/trust-store/*') + | list))) + | unique | list }} + +# List of additional CA certificate file paths to install on the current host. +# This can be set in inventory group_vars to add host class-specific CAs. +trust_store_ca_certificates_extra: [] + +# List of CA certificate file paths to install on the current host. +trust_store_ca_certificates: >- + {{ (trust_store_ca_certificates_default + + (trust_store_ca_certificates_extra | default([]) | list)) + | unique | list }} \ No newline at end of file diff --git a/ansible/overcloud-host-configure.yml b/ansible/overcloud-host-configure.yml index a6cdf4d8b..7faf3ccbb 100644 --- a/ansible/overcloud-host-configure.yml +++ b/ansible/overcloud-host-configure.yml @@ -2,6 +2,7 @@ - import_playbook: "ssh-known-host.yml" - import_playbook: "kayobe-ansible-user.yml" - import_playbook: "logging.yml" +- import_playbook: "trust-store.yml" - import_playbook: "proxy.yml" - import_playbook: "apt.yml" - import_playbook: "dnf.yml" diff --git a/ansible/roles/trust-store/defaults/main.yml b/ansible/roles/trust-store/defaults/main.yml new file mode 100644 index 000000000..37bd1755a --- /dev/null +++ b/ansible/roles/trust-store/defaults/main.yml @@ -0,0 +1,8 @@ +--- +trust_store_ca_certificates: [] +trust_store_ca_path_debian: /usr/local/share/ca-certificates +trust_store_ca_path_redhat: /etc/pki/ca-trust/source/anchors +trust_store_ca_path: "{{ lookup('vars', 'trust_store_ca_path_' ~ ansible_facts.os_family | lower) }}" +trust_store_update_cmd_debian: update-ca-certificates +trust_store_update_cmd_redhat: update-ca-trust +trust_store_update_cmd: "{{ lookup('vars', 'trust_store_update_cmd_' ~ ansible_facts.os_family | lower) }}" diff --git a/ansible/roles/trust-store/tasks/main.yml b/ansible/roles/trust-store/tasks/main.yml new file mode 100644 index 000000000..2bd2c4878 --- /dev/null +++ b/ansible/roles/trust-store/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: Copy custom CA to system trust store ({{ trust_store_ca_path }}) + copy: + src: "{{ item }}" + dest: "{{ trust_store_ca_path }}/{{ item | basename }}" + owner: root + group: root + mode: "0660" + loop: "{{ trust_store_ca_certificates }}" + register: trust_store_copy_result + become: true + +- name: Update system trust store + command: "{{ trust_store_update_cmd }}" + become: true + when: trust_store_copy_result is changed diff --git a/ansible/seed-host-configure.yml b/ansible/seed-host-configure.yml index 25b0dcc16..bc899f361 100644 --- a/ansible/seed-host-configure.yml +++ b/ansible/seed-host-configure.yml @@ -2,6 +2,7 @@ - import_playbook: "ssh-known-host.yml" - import_playbook: "kayobe-ansible-user.yml" - import_playbook: "logging.yml" +- import_playbook: "trust-store.yml" - import_playbook: "proxy.yml" - import_playbook: "apt.yml" - import_playbook: "dnf.yml" diff --git a/ansible/seed-hypervisor-host-configure.yml b/ansible/seed-hypervisor-host-configure.yml index 56f240848..79fe3a6a0 100644 --- a/ansible/seed-hypervisor-host-configure.yml +++ b/ansible/seed-hypervisor-host-configure.yml @@ -2,6 +2,7 @@ - import_playbook: "ssh-known-host.yml" - import_playbook: "kayobe-ansible-user.yml" - import_playbook: "logging.yml" +- import_playbook: "trust-store.yml" - import_playbook: "proxy.yml" - import_playbook: "apt.yml" - import_playbook: "dnf.yml" diff --git a/ansible/trust-store.yml b/ansible/trust-store.yml new file mode 100644 index 000000000..69596a809 --- /dev/null +++ b/ansible/trust-store.yml @@ -0,0 +1,21 @@ +--- +- name: Add custom CAs to system trust store + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control + gather_facts: true + max_fail_percentage: >- + {{ trust_store_max_fail_percentage | + default(host_configure_max_fail_percentage) | + default(kayobe_max_fail_percentage) | + default(100) }} + vars: + ansible_user: "{{ bootstrap_user }}" + # We can't assume that a virtualenv exists at this point, so use the + # system python interpreter. + ansible_python_interpreter: /usr/bin/python3 + tags: + - config + - trust-store + tasks: + - name: Include trust-store role + include_role: + name: trust-store diff --git a/doc/source/configuration/reference/hosts.rst b/doc/source/configuration/reference/hosts.rst index da2a1467f..7b03784ba 100644 --- a/doc/source/configuration/reference/hosts.rst +++ b/doc/source/configuration/reference/hosts.rst @@ -1430,6 +1430,40 @@ applying the change to the seed hypervisor. For example, to install the libvirt_host_extra_daemon_packages: - trousers +Custom CA certificates +====================== +*tags:* + | ``trust-store`` + +Kayobe can install custom CA certificates on configured hosts using the +``trust-store`` role. + +By default, all files in ``$KAYOBE_CONFIG_PATH/trust-store/`` and in +``trust-store/`` subdirectories of ``kayobe_env_search_paths`` are installed +on hosts targeted by the host configure playbooks. In the common case, users +can simply drop CA certificate files into ``etc/kayobe/trust-store/`` or an +environment-specific ``trust-store/`` directory and allow Kayobe to distribute +them. + +It is also possible to modify the certificate list for specific host classes. +To add host class-specific certificates, set +``trust_store_ca_certificates_extra`` in group variables under +``inventory/group_vars//trust-store``. + +Example: adding a custom CA certificate for compute hosts +---------------------------------------------------------- + +To install an additional CA certificate on compute hosts: + +.. code-block:: yaml + :caption: ``inventory/group_vars/compute/trust-store`` + + trust_store_ca_certificates_extra: + - "{{ kayobe_env_config_path }}/kolla/certificates/ca/vault.crt" + +In this example, all other files discovered via the default trust-store search +paths continue to be installed on compute hosts. + Swap ==== diff --git a/etc/kayobe/trust-store.yml b/etc/kayobe/trust-store.yml new file mode 100644 index 000000000..af7b30bd0 --- /dev/null +++ b/etc/kayobe/trust-store.yml @@ -0,0 +1,21 @@ +--- +# Kayobe trust store configuration. + +############################################################################### +# Trust store configuration. + +# List of CA certificate file paths discovered in +# $KAYOBE_CONFIG_PATH/trust-store/ and in trust-store/ subdirectories of +# kayobe_env_search_paths. +#trust_store_ca_certificates_default: + +# List of additional CA certificate file paths to install on the current host. +# This can be set in inventory group_vars to add host class-specific CAs. +#trust_store_ca_certificates_extra: + +# List of CA certificate file paths to install on the current host. +#trust_store_ca_certificates: + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/trust-store/.gitkeep b/etc/kayobe/trust-store/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/releasenotes/notes/trust-store-group-ca-selection-f7086efe0a8b3191.yaml b/releasenotes/notes/trust-store-group-ca-selection-f7086efe0a8b3191.yaml new file mode 100644 index 000000000..f06ec01ec --- /dev/null +++ b/releasenotes/notes/trust-store-group-ca-selection-f7086efe0a8b3191.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Added a feature that allows operators to install custom CA certificates + into the system trust store. Operators can drop CA files into + ``$KAYOBE_CONFIG_PATH/trust-store/`` or into ``trust-store/`` + subdirectories of environment search paths. See :kayobe-doc:`Kayobe + documentation on custom CA certificates + ` for more + details. From 0fdc9c2edc3f865f2aa9cd331f8d19bc1d0fd060 Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Mon, 15 Dec 2025 11:20:22 +0000 Subject: [PATCH 6/7] Support configuring DNS on Ironic Neutron networks This can be useful if you are customisting the fully qualified domain for the internal API so that Ironic Python Agent can resolve the domain to post back data to the conductors. Change-Id: I45a578804a8b2c71ad4a81b3eae6b6a4f874a1e2 Signed-off-by: Will Szumski --- ansible/provision-net.yml | 3 + .../configuration/reference/network.rst | 60 ++++++++++++++++++- kayobe/plugins/filter/networks.py | 7 +++ ...rs-network-attribute-3610bd501b4963e5.yaml | 8 +++ 4 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/adds-neutron-dns-servers-network-attribute-3610bd501b4963e5.yaml diff --git a/ansible/provision-net.yml b/ansible/provision-net.yml index c64a70867..94a0d1f60 100644 --- a/ansible/provision-net.yml +++ b/ansible/provision-net.yml @@ -28,6 +28,7 @@ subnets: - name: "{{ kolla_ironic_inspection_network }}" cidr: "{{ inspection_net_name | net_cidr }}" + dns_nameservers: "{{ inspection_net_name | net_neutron_dns_servers | default(omit, True) }}" gateway_ip: "{{ inspection_net_name | net_neutron_gateway or provision_wl_net_name | net_gateway | default(omit, True) }}" allocation_pool_start: "{{ inspection_net_name | net_neutron_allocation_pool_start }}" allocation_pool_end: "{{ inspection_net_name | net_neutron_allocation_pool_end }}" @@ -42,6 +43,7 @@ subnets: - name: "{{ kolla_ironic_provisioning_network }}" cidr: "{{ provision_wl_net_name | net_cidr }}" + dns_nameservers: "{{ provision_wl_net_name | net_neutron_dns_servers | default(omit, True) }}" gateway_ip: "{{ provision_wl_net_name | net_neutron_gateway or provision_wl_net_name | net_gateway | default(omit, True) }}" allocation_pool_start: "{{ provision_wl_net_name | net_neutron_allocation_pool_start }}" allocation_pool_end: "{{ provision_wl_net_name | net_neutron_allocation_pool_end }}" @@ -56,6 +58,7 @@ subnets: - name: "{{ kolla_ironic_cleaning_network }}" cidr: "{{ cleaning_net_name | net_cidr }}" + dns_nameservers: "{{ cleaning_net_name | net_neutron_dns_servers | default(omit, True) }}" gateway_ip: "{{ cleaning_net_name | net_neutron_gateway or cleaning_net_name | net_gateway | default(omit, True) }}" allocation_pool_start: "{{ cleaning_net_name | net_neutron_allocation_pool_start }}" allocation_pool_end: "{{ cleaning_net_name | net_neutron_allocation_pool_end }}" diff --git a/doc/source/configuration/reference/network.rst b/doc/source/configuration/reference/network.rst index d104f9897..42c802688 100644 --- a/doc/source/configuration/reference/network.rst +++ b/doc/source/configuration/reference/network.rst @@ -39,8 +39,13 @@ supported: IP address of the gateway for the hardware introspection network. ``neutron_gateway`` IP address of the gateway for a neutron subnet based on this network. +``neutron_dns_servers`` + List of DNS servers to be configured when registering the network in Neutron. ``inspection_dns_servers`` - List of DNS servers used during hardware introspection. + List of DNS servers used for :ironic-doc:`unmanaged inspection `. + These are configured as options on the standalone dnsmasq instance that hands + out IP addresses during auto-discovery (as opposed to the neutron controlled DHCP server + used for managed introspection). ``vlan`` VLAN ID. ``mtu`` @@ -825,6 +830,24 @@ allocation pool: Workload Inspection Network --------------------------- +The configuration of the inspection network depends on whether or not you are +using :ironic-doc:`unmanaged or managed inspection +`. + +Unmanaged inspection is the default when auto-discovery is enabled with +``kolla_inspector_enable_discovery``. In this mode of operation, a standalone +dnsmasq instance is configured to hand out DHCP leases to any unknown devices +that show up on the network. The network properties starting with the +``inspection_`` prefix , e.g ``inspection_allocation_pool_start`` control configuration of this service. + +Managed inspection uses Neutron to hand out the DHCP leases. The properties +used to configure the network registered in Neutron use the prefix: +``neutron_`` e.g ``neutron_allocation_pool_start``. The variables prefixed with +``inspection_`` have no effect in this mode of operation. + +Unmanaged Inspeciton +^^^^^^^^^^^^^^^^^^^^ + If using the overcloud to inspect bare metal workload (compute) hosts, it is necessary to define a DHCP allocation pool for the overcloud's ironic inspector DHCP server using the ``inspection_allocation_pool_start`` and @@ -851,6 +874,41 @@ and inspection DNS servers: This pool should not overlap with a kayobe or neutron allocation pool on the same network. +Managed Inspection +^^^^^^^^^^^^^^^^^^ + +.. note:: + + Using managed inspection requires that the Ironic ports + are created either via: out-of-band inspection or manually. + +The allocation pool used for the neutron network is configured via the +``neutron_allocation_pool_start`` and ``neutron_allocation_pool_end`` +variables. + +When using managed inspection, ``neutron_dns_servers`` can be used to control +the DNS servers handed out by DHCP. This can be helpful to resolve the ironic +callback url, if for example, ``kolla_internal_fqdn`` has been set. + +.. note:: + + This example assumes that the ``example`` network is mapped to + ``provision_wl_net_name``. + +To configure a network called ``example`` with an inspection allocation pool +and inspection DNS servers: + +.. code-block:: yaml + + example_neutron_allocation_pool_start: 10.0.1.196 + example_neutron_allocation_pool_end: 10.0.1.254 + example_neutron_dns_servers: [10.0.1.10, 10.0.1.11] + +.. note:: + + This pool should not overlap with a kayobe or neutron allocation pool on the + same network. + Neutron Networking ================== diff --git a/kayobe/plugins/filter/networks.py b/kayobe/plugins/filter/networks.py index 5a48dce70..50fe5caf8 100644 --- a/kayobe/plugins/filter/networks.py +++ b/kayobe/plugins/filter/networks.py @@ -252,6 +252,12 @@ def net_inspection_dns_servers(context, name, inventory_hostname=None): inventory_hostname) +@jinja2.pass_context +def net_neutron_dns_servers(context, name, inventory_hostname=None): + return net_attr(context, name, 'neutron_dns_servers', + inventory_hostname) + + @jinja2.pass_context def net_neutron_allocation_pool_start(context, name, inventory_hostname=None): return net_attr(context, name, 'neutron_allocation_pool_start', @@ -800,6 +806,7 @@ def get_filters(): 'net_inspection_dns_servers': net_inspection_dns_servers, 'net_neutron_allocation_pool_start': net_neutron_allocation_pool_start, 'net_neutron_allocation_pool_end': net_neutron_allocation_pool_end, + 'net_neutron_dns_servers': net_neutron_dns_servers, 'net_neutron_gateway': net_neutron_gateway, 'net_vlan': net_vlan, 'net_mtu': net_mtu, diff --git a/releasenotes/notes/adds-neutron-dns-servers-network-attribute-3610bd501b4963e5.yaml b/releasenotes/notes/adds-neutron-dns-servers-network-attribute-3610bd501b4963e5.yaml new file mode 100644 index 000000000..0ebe88b64 --- /dev/null +++ b/releasenotes/notes/adds-neutron-dns-servers-network-attribute-3610bd501b4963e5.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Adds ``neutron_dns_servers`` network attribute to configure DNS on the + networks registered in OpenStack. See :kayobe-doc:`Kayobe + documentation on network configuration + ` for more + details. From c863900248e95eb6b2c004940e536f3162ec8968 Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Mon, 20 Apr 2026 09:24:12 +0100 Subject: [PATCH 7/7] kolla-ansible inventory sync See: - https://review.opendev.org/c/openstack/kolla-ansible/+/924575 - https://review.opendev.org/c/openstack/kolla-ansible/+/959853 Change-Id: Ib7c8e5451ca8bdcea8c4b4dcb1093680acce0778 Signed-off-by: Will Szumski --- .../roles/kolla-ansible/templates/overcloud-services.j2 | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ansible/roles/kolla-ansible/templates/overcloud-services.j2 b/ansible/roles/kolla-ansible/templates/overcloud-services.j2 index e80e0bca4..b4d3bf049 100644 --- a/ansible/roles/kolla-ansible/templates/overcloud-services.j2 +++ b/ansible/roles/kolla-ansible/templates/overcloud-services.j2 @@ -129,6 +129,9 @@ control compute network +[neutron-ovn-vpn-agent:children] +neutron + # Cinder [cinder-api:children] cinder @@ -410,6 +413,10 @@ monitoring [prometheus-libvirt-exporter:children] compute +[prometheus-openstack-network-exporter:children] +compute +network + [prometheus-valkey-exporter:children] valkey