Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:ALP:Workloads
ansible-container
_service:obs_scm:examples.obscpio
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File _service:obs_scm:examples.obscpio of Package ansible-container
07070100000000000041ED000000000000000000000004652E3F1A00000000000000000000000000000000000000000000001100000000examples/ansible07070100000001000081A4000000000000000000000001652E3F1A00000F51000000000000000000000000000000000000002300000000examples/ansible/create_alp_vm.yml--- # # A example playbook showing how the create a ALP VM. # # Ensure the system is ready to act as a libvirt host. # NOTE: A reboot may be required if packages need to be installed. - name: Setup ALP system as a libvirt host import_playbook: setup_libvirt_host.yml tags: libvirt - name: Create an ALP VM hosts: alphost vars: appliance: name: alptest mirror: https://download.opensuse.org/repositories/SUSE:/ALP/images/ image: ALP-VM.x86_64-0.0.1-kvm-Build24.3 format: qcow2 checksum: sha256 vcpus: 2 memory_mb: 1536 disk_size_gb: 30 libvirt: images: /var/lib/libvirt/images network: default_network tasks: - name: Check if we already have the ALP VM image ansible.builtin.stat: path: "{{ libvirt.images }}/{{ appliance.image }}.{{ appliance.format }}" register: stat_vm_image - name: Download ALP VM image become: true ansible.builtin.get_url: dest: "{{ libvirt.images }}" url: "{{ item.url }}" checksum: "{{ appliance.checksum }}:{{ item.url }}.{{ appliance.checksum }}" mode: '0644' loop: - name: "{{ appliance.image }}" url: "{{ appliance.mirror }}/{{ appliance.image }}.{{ appliance.format }}" loop_control: label: "{{ item.name }}" when: - not stat_vm_image.stat.exists - name: Query list of configured libvirt networks become: true community.libvirt.virt_net: command: list_nets register: virt_net_list_nets - name: Fail if required network is not available ansible.builtin.fail: msg: "ERROR: required '{{ libvirt.network }}' missing!" when: - libvirt.network not in virt_net_list_nets.list_nets - name: Determine list of SSH public keys ansible.builtin.set_fact: ssh_pub_keys: "{{ lookup('ansible.builtin.fileglob', '~/.ssh/*.pub').split(',') }}" - name: Generate ignition config file become: true ansible.builtin.template: src: "config.ign.j2" dest: "{{ libvirt.images }}/{{ appliance.name }}.ign" mode: '0644' - name: Query list of libvirt VMs become: true community.libvirt.virt: command: list_vms register: virt_list_vms - name: Create the ALP VM if not running become: true ansible.builtin.command: >- /usr/local/bin/virt-install --connect qemu:///system --import --name {{ appliance.name }} --osinfo opensusetumbleweed --virt-type kvm --hvm --machine q35 --boot hd --cpu host-passthrough --video vga --console pty,target_type=virtio --noautoconsole --network network={{ libvirt.network }} --rng /dev/urandom --vcpu {{ appliance.vcpus }} --memory {{ appliance.memory_mb }} --disk size={{ appliance.disk_size_gb }}, backing_store={{ libvirt.images }}/{{ appliance.image }}.{{ appliance.format }}, backing_format={{ appliance.format }}, bus=virtio, cache=none --graphics vnc,listen=0.0.0.0 --sysinfo type=fwcfg,entry0.name="opt/com.coreos/config",entry0.file="{{ libvirt.images }}/{{ appliance.name }}.ign" --tpm backend.type=emulator,backend.version=2.0,model=tpm-tis register: virt_install_vm changed_when: - "virt_install_vm.rc == 0" when: - appliance.name not in virt_list_vms.list_vms - name: Query list of libvirt VMs become: true community.libvirt.virt: command: list_vms register: virt_list_vms - name: Show that the ALP VM has been created ansible.builtin.debug: msg: "Running VMs: {{ virt_list_vms.list_vms | join(', ') }}" when: - appliance.name in virt_list_vms.list_vms 07070100000002000081A4000000000000000000000001652E3F1A00000D93000000000000000000000000000000000000002A00000000examples/ansible/create_tumbleweed_vm.yml--- # # A example playbook showing how the create a openSUSE Tumbleweed VM. # # Ensure the system is ready to act as a libvirt host. # NOTE: A reboot may be required if packages need to be installed. - name: Setup ALP system as a libvirt host import_playbook: setup_libvirt_host.yml tags: libvirt - name: Create an openSUSE Tumbleweed appliance hosts: alphost vars: appliance: name: tumbleweed mirror: https://download.opensuse.org/tumbleweed/appliances image: openSUSE-Tumbleweed-Minimal-VM.x86_64-kvm-and-xen format: qcow2 checksum: sha256 vcpus: 2 memory_mb: 2048 disk_size_gb: 30 libvirt: images: /var/lib/libvirt/images network: default_network tasks: - name: Check if we already have the openSUSE Tumbleweed image ansible.builtin.stat: path: "{{ libvirt.images }}/{{ appliance.image }}.{{ appliance.format }}" register: stat_vm_image - name: Download openSUSE Tumbleweed appliance image become: true ansible.builtin.get_url: dest: "{{ libvirt.images }}" url: "{{ item.url }}" checksum: "{{ appliance.checksum }}:{{ item.url }}.{{ appliance.checksum }}" mode: '0644' loop: - name: "{{ appliance.image }}" url: "{{ appliance.mirror }}/{{ appliance.image }}.{{ appliance.format }}" loop_control: label: "{{ item.name }}" when: - not stat_vm_image.stat.exists - name: Query list of configured libvirt networks become: true community.libvirt.virt_net: command: list_nets register: virt_net_list_nets - name: Fail if required network is not available ansible.builtin.fail: msg: "ERROR: required '{{ libvirt.network }}' missing!" when: - libvirt.network not in virt_net_list_nets.list_nets - name: Query list of libvirt VMs become: true community.libvirt.virt: command: list_vms register: virt_list_vms - name: Create the openSUSE Tumbleweed appliance if not running become: true ansible.builtin.command: >- /usr/local/bin/virt-install --connect qemu:///system --import --name {{ appliance.name }} --osinfo opensusetumbleweed --virt-type kvm --hvm --machine q35 --boot hd --cpu host-passthrough --video vga --console pty,target_type=virtio --noautoconsole --network network={{ libvirt.network }} --rng /dev/urandom --vcpu {{ appliance.vcpus }} --memory {{ appliance.memory_mb }} --cloud-init --disk size={{ appliance.disk_size_gb }}, backing_store={{ libvirt.images }}/{{ appliance.image }}.{{ appliance.format }}, backing_format={{ appliance.format }}, bus=virtio,cache=none --graphics vnc,listen=0.0.0.0 register: virt_install_vm changed_when: - "virt_install_vm.rc == 0" when: - ('tumbleweed' not in virt_list_vms.list_vms) - name: Query list of libvirt VMs become: true community.libvirt.virt: command: list_vms register: virt_list_vms - name: Show that Tumbleweed appliance has been created ansible.builtin.debug: msg: "Running VMs: {{ virt_list_vms.list_vms | join(', ') }}" when: - ('tumbleweed' in virt_list_vms.list_vms) 07070100000003000041ED000000000000000000000003652E3F1A00000000000000000000000000000000000000000000001B00000000examples/ansible/inventory07070100000004000041ED000000000000000000000003652E3F1A00000000000000000000000000000000000000000000002600000000examples/ansible/inventory/group_vars07070100000005000041ED000000000000000000000002652E3F1A00000000000000000000000000000000000000000000002A00000000examples/ansible/inventory/group_vars/all07070100000006000081A4000000000000000000000001652E3F1A00000032000000000000000000000000000000000000004200000000examples/ansible/inventory/group_vars/all/python_interpreter.yaml--- ansible_python_interpreter: /usr/bin/python3 07070100000007000081A4000000000000000000000001652E3F1A00000085000000000000000000000000000000000000002A00000000examples/ansible/inventory/inventory.yamlalphost_group: hosts: alphost: ansible_host: host.containers.internal ansible_python_interpreter: /usr/bin/python3 07070100000008000081A4000000000000000000000001652E3F1A000009E2000000000000000000000000000000000000001D00000000examples/ansible/network.yml--- - name: Configure Networking hosts: alphost vars: static_nics: - name: enp2s0 ifname: enp2s0 ip4: 192.168.181.3/24 gw4: 192.168.181.2 dns4: - 8.8.8.8 - name: enp3s0 ifname: enp3s0 ip4: 192.168.181.4/24 gw4: 192.168.181.2 dns4: - 8.8.8.8 bonds: - name: bondcon0 ifname: bond0 ip4: 192.168.181.10/24 gw4: 192.168.181.2 mode: active-backup - name: bondcon1 ifname: bond1 ip4: 192.168.181.11/24 gw4: 192.168.181.2 mode: balance-alb bonded_nics: - name: bond0-if1 ifname: enp4s0 master: bond0 - name: bond0-if2 ifname: enp5s0 master: bond0 - name: bond1-if1 ifname: enp6s0 master: bond1 - name: bond1-if2 ifname: enp7s0 master: bond1 tasks: - name: Gather the package facts ansible.builtin.package_facts: manager: auto - name: Ensure NetworkManager is installed ansible.builtin.package: name: "{{ item }}" state: present become: true loop: - NetworkManager - name: Configure NIC community.general.nmcli: conn_name: '{{ item.name }}' ifname: '{{ item.ifname }}' ip4: '{{ item.ip4 }}' gw4: '{{ item.gw4 }}' dns4: '{{ item.dns4 }}' state: present autoconnect: true type: ethernet become: true loop: '{{ static_nics }}' - name: Create bonds community.general.nmcli: type: bond conn_name: '{{ item.name }}' ifname: '{{ item.ifname }}' ip4: '{{ item.ip4 }}' gw4: '{{ item.gw4 }}' mode: '{{ item.mode }}' state: present become: true loop: "{{ bonds }}" - name: Add NICs to bonds community.general.nmcli: type: bond-slave conn_name: '{{ item.name }}' ifname: '{{ item.ifname }}' state: present master: '{{ item.master }}' become: true loop: "{{ bonded_nics }}" - name: Ping test Bond IPs ansible.builtin.command: >- ping -c 1 -W 0.1 {{ item.ip4 | ansible.utils.ipaddr('address') }} loop: "{{ bonds }}" changed_when: false - name: Ping test static nics IPs ansible.builtin.command: >- ping -c 1 -W 0.1 {{ item.ip4 | ansible.utils.ipaddr('address') }} loop: "{{ static_nics }}" changed_when: false 07070100000009000081A4000000000000000000000001652E3F1A00000425000000000000000000000000000000000000001E00000000examples/ansible/playbook.yml--- - name: Ensure Alpha Host Setup hosts: alphost tasks: - name: Site | hello world ansible.builtin.command: echo "Hi! Ansible is working" changed_when: false - name: Gather the package facts ansible.builtin.package_facts: manager: auto - name: Print the package facts ansible.builtin.debug: var: ansible_facts.packages - name: Ensure NetworkManager is installed ansible.builtin.package: name: "{{ item }}" state: present become: true with_items: - NetworkManager - name: Deactivate Wireless Network Interfaces ansible.builtin.command: nmcli radio wifi off become: true when: "'NetworkManager' in ansible_facts.packages" changed_when: false - name: Test ssh ansible.builtin.wait_for: host: "{{ ansible_host }}" port: 22 delegate_to: localhost - name: Test webpage access ansible.builtin.uri: url: https://www.example.com return_content: true register: webpage 0707010000000A000081A4000000000000000000000001652E3F1A00000818000000000000000000000000000000000000002300000000examples/ansible/setup_cockpit.yml--- # Ansible Playbook: Setup Cockpit Web server on ALP Dolomite # Description: This Ansible playbook automates the deployment of the Cockpit Web server on an ALP Dolomite host. # The steps are based on: [https://documentation.suse.com/alp/dolomite/html/alp-dolomite/available-alp-workloads.html#task-run-cockpit-with-podman] # Administering SUSE ALP Dolomite using Cockpit Documentation: [https://documentation.suse.com/alp/dolomite/single-html/cockpit-alp-dolomite/] - name: Setup Cockpit Web server hosts: alphost become: true vars: workload: name: cockpit-ws image: registry.opensuse.org/suse/alp/workloads/tumbleweed_containerfiles/suse/alp/workloads/cockpit-ws:latest tasks: - name: Install required packages, if any, for workload {{ workload.name }} ansible.builtin.package: name: "{{ item }}" state: present notify: Reboot loop: - cockpit-bridge - cockpit-tukit - name: Reboot right now if necessary ansible.builtin.meta: flush_handlers - name: Retrieve image for workload {{ workload.name }} containers.podman.podman_image: name: "{{ workload.image }}" state: present - name: Install Cockpit Web server container ansible.builtin.command: >- podman container runlabel install {{ workload.image }} register: workload_runlabel_install changed_when: - ('already exist' not in workload_runlabel_install.stdout) - name: Ensure service can be started for workload {{ workload.name }} ansible.builtin.systemd_service: name: "cockpit.service" state: "started" enabled: true - name: Inform user to access the Cockpit Web user interface ansible.builtin.debug: msg: >- Cockpit Web UI is running on https://{{ ansible_default_ipv4.address }}:9090 Please accept the warning about the self-signed SSL certificate to access it. handlers: - name: Reboot ansible.builtin.reboot: reboot_timeout: 600 post_reboot_delay: 60 0707010000000B000081A4000000000000000000000001652E3F1A00000A02000000000000000000000000000000000000002500000000examples/ansible/setup_firewalld.yml--- # Ansible Playbook: Setup firewalld using Podman on SUSE ALP Dolomite # Description: This Ansible playbook automates the deployment of the firewalld using Podman on SUSE ALP Dolomite. # The deployment adds firewall capability to ALP Dolomite to define the trust level of network connections or interfaces. # Key Considerations: # - The container image utilizes the system's dbus instance. Thus, dbus and polkit configuration files must be installed initially. # - The systemd service and its configuration file allow the container to start and stop via systemd with Podman as the container manager. # - The `/usr/local/bin/firewall-cmd` serves as a wrapper to invoke firewall-cmd inside the container, with both Docker and Podman being supported. # Based on: "Running firewalld using Podman on SUSE ALP Dolomite". Documentation available at: # [https://documentation.suse.com/alp/dolomite/single-html/firewalld-podman-alp-dolomite/] - name: Setup firewalld using Podman on SUSE ALP Dolomite hosts: alphost become: true vars: workload: name: firewalld image: registry.opensuse.org/suse/alp/workloads/tumbleweed_images/suse/alp/workloads/firewalld tasks: - name: Gather package facts ansible.builtin.package_facts: manager: "rpm" - name: Fail if firewalld is installed locally ansible.builtin.fail: msg: "Firewalld is installed locally. Please remove it before installing this container." when: "'firewalld' in ansible_facts.packages" - name: Retrieve image for workload containers.podman.podman_image: name: "{{ workload.image }}" state: present - name: Initialize the environment ansible.builtin.command: >- podman container runlabel install "{{ workload.image }}" register: workload_runlabel_install changed_when: - "('already exist' not in workload_runlabel_install.stdout)" - name: Ensure polkit daemon is restarted (if necessary) ansible.builtin.service: name: polkit state: restarted when: - "'etc/polkit-1/actions/org.fedoraproject.FirewallD1.policy' in workload_runlabel_install.stdout" - name: Start and enable firewalld using systemd ansible.builtin.service: name: "{{ workload.name }}" state: started enabled: true - name: Display completion message ansible.builtin.debug: msg: >- "Firewalld workload setup complete." "Use the /usr/local/bin/firewall-cmd wrapper to manage the firewalld instance." 0707010000000C000081A4000000000000000000000001652E3F1A00000AA3000000000000000000000000000000000000003100000000examples/ansible/setup_gnome_display_manager.yml--- # Ansible Playbook: Deploy and run GNOME Display Manager on ALP Dolomite # Description: This Ansible playbook automates the deployment and operation of the GNOME Display Manager (GDM) on SUSE ALP Dolomite using Podman. # This deployment allows users to run GDM within a container environment, providing a basic GNOME desktop experience. # Based on: "Running the GNOME Display Manager workload using Podman on SUSE ALP Dolomite". # Documentation reference: [https://documentation.suse.com/alp/dolomite/html/alp-dolomite/available-alp-workloads.html#task-run-gdm-with-podman] - name: Deploy and run GNOME Display Manager on ALP Dolomite hosts: alphost become: true vars: workload: name: gdm image: registry.opensuse.org/suse/alp/workloads/tumbleweed_containerfiles/suse/alp/workloads/gdm:latest tasks: - name: Install required packages, if any, for workload {{ workload.name }} ansible.builtin.package: name: ['accountsservice', 'systemd-experimental', 'python3-selinux'] state: present notify: Reboot - name: Reboot right now if necessary ansible.builtin.meta: flush_handlers - name: Set SELinux to permissive mode ansible.posix.selinux: policy: targeted state: permissive - name: Retrieve image for workload {{ workload.name }} containers.podman.podman_image: name: "{{ workload.image }}" state: present - name: Apply container runlabel install for workload {{ workload.name }} ansible.builtin.command: >- podman container runlabel install {{ workload.image }} register: workload_runlabel_install notify: Reload systemd daemon changed_when: - ('already exist' not in workload_runlabel_install.stdout) - name: Reload systemd daemon now ansible.builtin.meta: flush_handlers - name: Reload dbus service ansible.builtin.systemd: name: dbus state: reloaded - name: Restart accounts-daemon service ansible.builtin.systemd: name: accounts-daemon state: started enabled: true - name: Start service for workload {{ workload.name }} ansible.builtin.systemd: name: gdm.service state: started enabled: true - name: Display completion message ansible.builtin.debug: msg: >- GNOME Display Manager (GDM) has been successfully deployed and started on ALP Dolomite. After you log in, a basic GNOME environment opens. handlers: - name: Reboot ansible.builtin.reboot: reboot_timeout: 600 post_reboot_delay: 60 - name: Reload systemd daemon ansible.builtin.systemd: daemon_reload: true 0707010000000D000081A4000000000000000000000001652E3F1A00000651000000000000000000000000000000000000002300000000examples/ansible/setup_grafana.yml--- # Ansible Playbook: Setup Grafana on SUSE ALP Dolomite # Description: This Ansible playbook automates the deployment of Grafana on a SUSE ALP Dolomite host. # The steps include fetching the Grafana image, setting up the Grafana container, and providing access information. # The steps are based on https://documentation.suse.com/alp/dolomite/html/alp-dolomite/available-alp-workloads.html#task-run-grafana-with-podman - name: Setup ALP system for Grafana hosts: alphost become: true vars: workload: name: grafana image: registry.opensuse.org/suse/alp/workloads/tumbleweed_containerfiles/suse/alp/workloads/grafana:latest tasks: - name: Retrieve image for Grafana containers.podman.podman_image: name: "{{ workload.image }}" state: present - name: Initialize the environment ansible.builtin.command: >- podman container runlabel install "{{ workload.image }}" register: workload_runlabel_install changed_when: - ('already exist' not in workload_runlabel_install.stdout) - name: Start and enable Grafana using systemd ansible.builtin.service: name: "{{ workload.name }}" state: started enabled: true - name: Display Grafana access information ansible.builtin.debug: msg: - "Please open the Grafana UI at http://{{ ansible_default_ipv4.address }}:3000." - "Log in to Grafana. The default user name and password are both set to 'admin'. After logging in, enter a new password." - "Follow the on-screen prompts to complete the configuration." 0707010000000E000081A4000000000000000000000001652E3F1A00000735000000000000000000000000000000000000002B00000000examples/ansible/setup_kea_dhcp_server.yml--- # Ansible Playbook: Manage Kea DHCPV4 Server Workload on ALP Host # Description: This Ansible playbook automates the setup of the Kea DHCPV4 server workload # on an ALP host. It follows the steps documented in the URL provided below. # Kea Workload Documentation: https://build.opensuse.org/package/view_file/SUSE:ALP:Workloads/kea-container/README.md?expand=1 - name: Deploying and Managing the Kea DHCP server workload hosts: alphost become: true vars: workload: name: kea image: registry.opensuse.org/suse/alp/workloads/tumbleweed_containerfiles/suse/alp/workloads/kea:latest tasks: - name: Pull the Kea DHCP server container image containers.podman.podman_image: name: "{{ workload.image }}" state: present - name: Install all required parts of the Kea workload ansible.builtin.command: >- podman container runlabel install {{ workload.image }} register: workload_runlabel_install changed_when: - ('already exist' not in workload_runlabel_install.stdout) - name: Add firewall exception rule for DHCP ansible.posix.firewalld: service: dhcp permanent: true state: enabled immediate: true - name: Configure Kea DHCPv4 using template ansible.builtin.template: src: "kea-dhcp4.conf.j2" dest: "/etc/kea/kea-dhcp4.conf" mode: '0644' notify: Reload Kea configuration - name: Start Kea DHCPv4 server using systemd ansible.builtin.systemd: name: kea-dhcp4.service state: started enabled: true handlers: - name: Reload Kea configuration ansible.builtin.command: /usr/local/bin/keactrl reload register: kea_reload_result changed_when: - '"INFO/keactrl: Reloading kea-dhcp4..." in kea_reload_result.stdout' 0707010000000F000081A4000000000000000000000001652E3F1A00000737000000000000000000000000000000000000002D00000000examples/ansible/setup_kea_dhcpv6_server.yml--- # Ansible Playbook: Manage Kea DHCPV6 Server Workload on ALP Host # Description: This Ansible playbook automates the setup of the Kea DHCPv6 server workload # on an ALP host. It follows the steps documented in the URL provided below. # Kea Workload Documentation: https://build.opensuse.org/package/view_file/SUSE:ALP:Workloads/kea-container/README.md?expand=1 - name: Deploying and Managing the Kea DHCP server workload hosts: alphost become: true vars: workload: name: kea image: registry.opensuse.org/suse/alp/workloads/tumbleweed_containerfiles/suse/alp/workloads/kea:latest tasks: - name: Pull the Kea DHCP server container image containers.podman.podman_image: name: "{{ workload.image }}" state: present - name: Install all required parts of the Kea workload ansible.builtin.command: >- podman container runlabel install {{ workload.image }} register: workload_runlabel_install changed_when: - ('already exist' not in workload_runlabel_install.stdout) - name: Add firewall exception rule for DHCP ansible.posix.firewalld: service: dhcpv6 permanent: true state: enabled immediate: true - name: Configure Kea DHCPv6 using template ansible.builtin.template: src: "kea-dhcp6.conf.j2" dest: "/etc/kea/kea-dhcp6.conf" mode: '0644' notify: Reload Kea configuration - name: Start Kea DHCPv6 server using systemd ansible.builtin.systemd: name: kea-dhcp6.service state: started enabled: true handlers: - name: Reload Kea configuration ansible.builtin.command: /usr/local/bin/keactrl reload register: kea_reload_result changed_when: - '"INFO/keactrl: Reloading kea-dhcp4..." in kea_reload_result.stdout' 07070100000010000081A4000000000000000000000001652E3F1A00001210000000000000000000000000000000000000002800000000examples/ansible/setup_libvirt_host.yml--- # Ansible Playbook: Setup SUSE ALP Dolomite as a libvirt Host # Description: This Ansible playbook automates the setup of a SUSE ALP Dolomite host as a libvirt host. # The steps encompass installing necessary packages for the workload, ensuring system readiness through reboots if necessary, # fetching the required images for kvm-server and kvm-client from the specified registry, and installing tools for both kvm-server # and kvm-client. Subsequent tasks ensure that needed services are stopped, started, or enabled as per the requirements. # Documentation reference: [https://documentation.suse.com/alp/dolomite/html/alp-dolomite/available-alp-workloads.html#task-run-kvm-with-podman] # Creating customized VMs using virt-scenario: [https://documentation.suse.com/alp/dolomite/html/alp-dolomite/concept-virt-scenario.html] - name: Setup ALP system as a libvirt host hosts: alphost become: true vars: workload: name: kvm service: kvm-server-container images: kvmserver: registry.opensuse.org/suse/alp/workloads/tumbleweed_containerfiles/suse/alp/workloads/kvm-server:latest kvmclient: registry.opensuse.org/suse/alp/workloads/tumbleweed_containerfiles/suse/alp/workloads/kvm-client:latest required_pkgs: - kernel-default - "-kernel-default-base" - netcat-openbsd - python3-libvirt-python - python3-lxml - swtpm libvirtd_services: - libvirtd.service - libvirtd-ro.socket - libvirtd-admin.socket - libvirtd-tcp.socket - libvirtd-tls.socket log_and_lock_drivers: - container-virtlogd.service - virtlogd.socket - virtlogd-admin.socket - container-virtlockd.service - virtlockd.socket - virtlockd-admin.socket other_drivers: - qemu - network - nodedev - nwfilter - proxy - secret - storage tasks: - name: Install required packages, if any, for workload ansible.builtin.package: name: "{{ item }}" state: present loop: "{{ workload.required_pkgs }}" notify: Reboot - name: Reboot right now if necessary ansible.builtin.meta: flush_handlers - name: Retrieve images for kvm-server and kvm-client containers.podman.podman_image: name: "{{ item.value }}" state: present loop: "{{ workload.images | dict2items }}" - name: Install tools for kvmserver ansible.builtin.command: >- podman container runlabel install "{{ workload.images.kvmserver }}" register: workload_runlabel_install changed_when: - ('already exist' not in workload_runlabel_install.stdout) - name: Install tools for kvmclient ansible.builtin.command: >- podman container runlabel install "{{ workload.images.kvmclient }}" register: workload_runlabel_install changed_when: - ('already exist' not in workload_runlabel_install.stdout) - name: Ensure libvirtd is stopped and disabled ansible.builtin.systemd_service: name: "{{ item }}" state: stopped enabled: false loop: "{{ workload.libvirtd_services }}" register: service_result failed_when: > service_result is failed and ("Could not find the requested service" not in service_result.msg) - name: Ensure kvm-server-container.service is started and enabled ansible.builtin.systemd_service: name: "{{ workload.service }}" state: started enabled: true notify: Reload systemd - name: Reload systemd right now if necessary ansible.builtin.meta: flush_handlers - name: Enable and start log and lock drivers ansible.builtin.systemd_service: name: "{{ item }}" state: started enabled: true loop: "{{ workload.log_and_lock_drivers }}" - name: Enable and start other drivers ansible.builtin.systemd_service: name: "container-virt{{ item }}d.service" state: started enabled: true loop: "{{ workload.other_drivers }}" - name: Display completion message ansible.builtin.debug: msg: >- ALP system setup as a libvirt host on alptestvm completed successfully. All necessary components are installed and configured for managing virtual machines. handlers: - name: Reboot ansible.builtin.reboot: reboot_timeout: 600 post_reboot_delay: 60 - name: Reload systemd ansible.builtin.systemd: daemon_reload: true 07070100000011000081A4000000000000000000000001652E3F1A00000803000000000000000000000000000000000000002500000000examples/ansible/setup_neuvector.yml--- # This Ansible playbook is used to manage the NeuVector workload on a ALP host. # The steps are based on : https://build.opensuse.org/package/view_file/SUSE:ALP:Workloads/neuvector-demo/README.md?expand=1 # and https://documentation.suse.com/alp/micro/html/alp-micro/available-alp-workloads.html#task-run-neuvector-with-podman # The playbook supports setup of NeuVector. - name: Running the NeuVector workload hosts: alphost become: true vars: workload: name: neuvector image: registry.opensuse.org/suse/alp/workloads/bci_containerfiles/suse/alp/workloads/neuvector-demo:latest tasks: - name: Install required packages, if any, for workload {{ workload.name }} ansible.builtin.package: name: python3-selinux state: present notify: Reboot - name: Reboot right now if necessary ansible.builtin.meta: flush_handlers - name: Set SELinux into permissive mode ansible.posix.selinux: policy: targeted state: permissive - name: Retrieve image for workload {{ workload.name }} containers.podman.podman_image: name: "{{ workload.image }}" state: present - name: Execute nevector runlabel INSTALL ansible.builtin.command: >- podman container runlabel install {{ workload.image }} register: workload_runlabel_install changed_when: - ('already exist' not in workload_runlabel_install.stdout) - name: Enable and start NeuVector service ansible.builtin.systemd: name: neuvector.service state: started enabled: true - name: Print message connect to NeuVector ansible.builtin.debug: msg: >- NeuVector is running on https://{{ ansible_default_ipv4.address }}:8443 You need to accept the warning about the self-signed SSL certificate and log in with the following default credentials: admin / admin. handlers: - name: Reboot ansible.builtin.reboot: reboot_timeout: 600 post_reboot_delay: 60 07070100000012000041ED000000000000000000000002652E3F1A00000000000000000000000000000000000000000000001B00000000examples/ansible/templates07070100000013000081A4000000000000000000000001652E3F1A00000680000000000000000000000000000000000000002900000000examples/ansible/templates/config.ign.j2{#- Based upon ignition config generated by https://opensuse.github.io/fuel-ignition/edit -#} {%- set _keys = [] -%} {%- for _kf in ssh_pub_keys | default([]) -%} {%- set _ = _keys.append(lookup('ansible.builtin.file', _kf)) -%} {%- endfor -%} {%- set unique_keys = _keys | sort | unique -%} { "ignition": { "version": "3.2.0" }, "passwd": { "users": [ { "name": "root", {% if (unique_keys | length) > 0 %} "sshAuthorizedKeys": {{ unique_keys | to_json }}, {% endif %} {# Password is the user's name #} "passwordHash": "$2a$10$FbGb5ARQnuaHiskxcYIOgO9PADKyymvmioHMCoHdfO.eyYePLqBZ2" }, { "name": "test", {% if (unique_keys | length) > 0 %} "sshAuthorizedKeys": {{ unique_keys | to_json }}, {% endif %} {# Password is the user's name #} "passwordHash": "$2a$10$WK21CVEDrqW4QB5FmmeCjuvFlJl7NMCYGRqBCg/WR1932ua8igzIa" } ] }, "storage": { "filesystems": [ { "device": "/dev/disk/by-label/ROOT", "format": "btrfs", "mountOptions": [ "subvol=/@/home" ], "path": "/home", "wipeFilesystem": false } ], "files": [ { "path": "/etc/hostname", "mode": 420, "overwrite": true, "contents": { "source": "data:text/plain;charset=utf-8;base64,{{ appliance.name | b64encode }}" } }, { "path": "/etc/sudoers.d/test", "mode": 420, "overwrite": true, "contents": { "source": "data:text/plain;charset=utf-8;base64,{{ 'test ALL=(ALL:ALL) NOPASSWD:ALL' | b64encode }}" } } ] } } 07070100000014000081A4000000000000000000000001652E3F1A00000B49000000000000000000000000000000000000002D00000000examples/ansible/templates/kea-dhcp4.conf.j2// Minimal Kea DHCPv6 Configuration Example // For full configuration details, refer to: // https://gitlab.isc.org/isc-projects/kea/-/blob/master/src/bin/keactrl/kea-dhcp6.conf.pre // // Note: This is a simplified configuration. Consult the linked full configuration for comprehensive details. { "Dhcp4": { "valid-lifetime": 4000, "renew-timer": 1000, "rebind-timer": 2000, "subnet4": [ { "subnet": "192.0.2.0/24", "pools": [ { "pool": "192.0.2.1 - 192.0.2.100" } ], "reservations": [ { "hw-address": "1a:1b:1c:1d:1e:1f", "ip-address": "192.0.2.201" }, { "client-id": "01:11:22:33:44:55:66", "ip-address": "192.0.2.202", "hostname": "special-snowflake" }, { "duid": "01:02:03:04:05", "ip-address": "192.0.2.203", "option-data": [ { "name": "domain-name-servers", "data": "10.1.1.202, 10.1.1.203" } ] }, { "client-id": "01:12:23:34:45:56:67", "ip-address": "192.0.2.204", "option-data": [ { "name": "vivso-suboptions", "data": "4491" }, { "name": "tftp-servers", "space": "vendor-4491", "data": "10.1.1.202, 10.1.1.203" } ] }, { "client-id": "01:0a:0b:0c:0d:0e:0f", "ip-address": "192.0.2.205", "next-server": "192.0.2.1", "server-hostname": "hal9000", "boot-file-name": "/dev/null" }, { "flex-id": "'s0mEVaLue'", "ip-address": "192.0.2.206" } ] } ], "loggers": [ { "name": "kea-dhcp4", "output_options": [ { "output": "stdout" } ], "severity": "INFO", "debuglevel": 0 } ] } } 07070100000015000081A4000000000000000000000001652E3F1A00001194000000000000000000000000000000000000002D00000000examples/ansible/templates/kea-dhcp6.conf.j2// Minimal Kea DHCPv6 Configuration Example // For full configuration details, refer to: // https://gitlab.isc.org/isc-projects/kea/-/blob/master/src/bin/keactrl/kea-dhcp6.conf.pre // // Note: This is a simplified configuration. Consult the linked full configuration for comprehensive details. { "Dhcp6": { "interfaces-config": { "interfaces": [] }, "control-socket": { "socket-type": "unix", "socket-name": "/tmp/kea6-ctrl-socket" }, "lease-database": { "type": "memfile", "lfc-interval": 3600 }, "expired-leases-processing": { "reclaim-timer-wait-time": 10, "flush-reclaimed-timer-wait-time": 25, "hold-reclaimed-time": 3600, "max-reclaim-leases": 100, "max-reclaim-time": 250, "unwarned-reclaim-cycles": 5 }, "renew-timer": 1000, "rebind-timer": 2000, "preferred-lifetime": 3000, "valid-lifetime": 4000, "option-data": [ { "name": "dns-servers", "data": "2001:db8:2::45, 2001:db8:2::100" }, { "code": 12, "data": "2001:db8::1" }, { "name": "new-posix-timezone", "data": "EST5EDT4\\,M3.2.0/02:00\\,M11.1.0/02:00" }, { "name": "preference", "data": "0xf0" }, { "name": "bootfile-param", "data": "root=/dev/sda2, quiet, splash" } ], "subnet6": [ { "id": 1, "subnet": "2001:db8:1::/64", "pools": [ { "pool": "2001:db8:1::/80" } ], "pd-pools": [ { "prefix": "2001:db8:8::", "prefix-len": 56, "delegated-len": 64 } ], "option-data": [ { "name": "dns-servers", "data": "2001:db8:2::dead:beef, 2001:db8:2::cafe:babe" } ], "reservations": [ { "duid": "01:02:03:04:05:0A:0B:0C:0D:0E", "ip-addresses": ["2001:db8:1::100"] }, { "hw-address": "00:01:02:03:04:05", "ip-addresses": ["2001:db8:1::101"], "option-data": [ { "name": "dns-servers", "data": "3000:1::234" }, { "name": "nis-servers", "data": "3000:1::234" } ], "client-classes": ["special_snowflake", "office"] }, { "duid": "01:02:03:04:05:06:07:08:09:0A", "ip-addresses": ["2001:db8:1:0:cafe::1"], "prefixes": ["2001:db8:2:abcd::/64"], "hostname": "foo.example.com", "option-data": [ { "name": "vendor-opts", "data": "4491" }, { "name": "tftp-servers", "space": "vendor-4491", "data": "3000:1::234" } ] }, { "flex-id": "'somevalue'", "ip-addresses": ["2001:db8:1:0:cafe::2"] } ] } ], "loggers": [ { "name": "kea-dhcp6", "output_options": [ { "output": "@localstatedir@/log/kea-dhcp6.log" } ], "severity": "INFO", "debuglevel": 0 } ] } } 07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!85 blocks
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor