Implement Ansible roles for Rocky Linux Testing Framework
- Added `bootstrap_sparrowdo` role for bootstrapping Sparrowdo on a VM. - Introduced `cleanup_vm` role for cleaning up VMs and disk images. - Created `download_image` role to download and cache QCOW2 images. - Developed `golden_image` role for creating and customizing golden images. - Implemented `provision_vm` role for provisioning VMs as linked clones. - Added `run_test` role for executing tests with Sparrowdo. - Created playbooks for building golden images, running single tests, and running test suites. - Enhanced documentation with usage examples, configuration details, and troubleshooting tips. - Added support for multiple cloud providers (AWS, Azure) in the test execution workflow. Signed-off-by: Stephen Simpson <ssimpson89@users.noreply.github.com>
This commit is contained in:
9
ansible/roles/bootstrap_sparrowdo/defaults/main.yml
Normal file
9
ansible/roles/bootstrap_sparrowdo/defaults/main.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
# Timeouts (seconds)
|
||||
vm_boot_timeout: 60
|
||||
ssh_port_timeout: 300
|
||||
bootstrap_timeout: 900
|
||||
|
||||
# VM resources
|
||||
vm_memory: 2048
|
||||
vm_vcpus: 2
|
||||
100
ansible/roles/bootstrap_sparrowdo/tasks/main.yml
Normal file
100
ansible/roles/bootstrap_sparrowdo/tasks/main.yml
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
- name: Verify golden image exists
|
||||
stat:
|
||||
path: "{{ golden_image_path }}"
|
||||
register: golden_stat
|
||||
failed_when: not golden_stat.stat.exists
|
||||
|
||||
- name: Set bootstrap VM name
|
||||
set_fact:
|
||||
bootstrap_vm: "bootstrap-{{ ansible_date_time.epoch }}"
|
||||
|
||||
- name: Bootstrap Sparrowdo
|
||||
block:
|
||||
- name: Create overlay disk
|
||||
command: >
|
||||
qemu-img create -f qcow2
|
||||
-b {{ golden_image_path }} -F qcow2
|
||||
/var/lib/libvirt/images/{{ bootstrap_vm }}.qcow2
|
||||
become: true
|
||||
|
||||
- name: Start bootstrap VM
|
||||
command: >
|
||||
virt-install
|
||||
--name {{ bootstrap_vm }}
|
||||
--memory {{ vm_memory }}
|
||||
--vcpus {{ vm_vcpus }}
|
||||
--disk path=/var/lib/libvirt/images/{{ bootstrap_vm }}.qcow2,format=qcow2
|
||||
--import
|
||||
--os-variant rocky9-unknown
|
||||
--network network=default
|
||||
--noautoconsole
|
||||
--wait 0
|
||||
become: true
|
||||
|
||||
- name: Wait for VM IP
|
||||
shell: >
|
||||
virsh -c qemu:///system domifaddr {{ bootstrap_vm }} --source lease 2>/dev/null |
|
||||
awk '/ipv4/ {print $4}' | cut -d/ -f1 | head -1
|
||||
become: true
|
||||
register: vm_ip
|
||||
until: vm_ip.stdout != "" and vm_ip.stdout != "0.0.0.0"
|
||||
retries: "{{ vm_boot_timeout }}"
|
||||
delay: 2
|
||||
changed_when: false
|
||||
|
||||
- name: Wait for SSH
|
||||
wait_for:
|
||||
host: "{{ vm_ip.stdout }}"
|
||||
port: 22
|
||||
timeout: "{{ ssh_port_timeout }}"
|
||||
|
||||
- name: Run Sparrowdo bootstrap
|
||||
command:
|
||||
argv:
|
||||
- "~/.raku/bin/sparrowdo"
|
||||
- "--host={{ vm_ip.stdout }}"
|
||||
- "--ssh_user={{ ssh_user }}"
|
||||
- "--ssh_private_key={{ ssh_private_key_path }}"
|
||||
- "--bootstrap"
|
||||
timeout: "{{ bootstrap_timeout }}"
|
||||
register: bootstrap_result
|
||||
retries: 3
|
||||
delay: 5
|
||||
until: bootstrap_result.rc == 0
|
||||
|
||||
- name: Shutdown VM
|
||||
command: >
|
||||
ssh -i {{ ssh_private_key_path }}
|
||||
-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null
|
||||
{{ ssh_user }}@{{ vm_ip.stdout }} 'sudo shutdown -h now'
|
||||
ignore_errors: true
|
||||
|
||||
- name: Wait for shutdown
|
||||
shell: virsh -c qemu:///system list --name | grep -q "{{ bootstrap_vm }}"
|
||||
become: true
|
||||
register: vm_running
|
||||
until: vm_running.rc != 0
|
||||
retries: 30
|
||||
delay: 2
|
||||
failed_when: false
|
||||
|
||||
always:
|
||||
- name: Force stop VM if running
|
||||
command: "virsh -c qemu:///system destroy {{ bootstrap_vm }}"
|
||||
become: true
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Undefine VM
|
||||
command: "virsh -c qemu:///system undefine {{ bootstrap_vm }}"
|
||||
become: true
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Remove overlay disk
|
||||
file:
|
||||
path: "/var/lib/libvirt/images/{{ bootstrap_vm }}.qcow2"
|
||||
state: absent
|
||||
become: true
|
||||
ignore_errors: true
|
||||
17
ansible/roles/cleanup_vm/defaults/main.yml
Normal file
17
ansible/roles/cleanup_vm/defaults/main.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
# Default variables for cleanup_vm role
|
||||
|
||||
# VM name to cleanup (required)
|
||||
vm_name: ""
|
||||
|
||||
# Force destroy even if running
|
||||
force_destroy: true
|
||||
|
||||
# Remove disk image
|
||||
remove_disk: true
|
||||
|
||||
# Cleanup multiple VMs matching pattern
|
||||
cleanup_pattern: ""
|
||||
|
||||
# Cleanup all VMs in list
|
||||
cleanup_vm_list: []
|
||||
23
ansible/roles/cleanup_vm/tasks/cleanup_single.yml
Normal file
23
ansible/roles/cleanup_vm/tasks/cleanup_single.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
# Cleanup a single VM (used in loop)
|
||||
|
||||
- name: Destroy VM {{ vm_to_cleanup }}
|
||||
command: virsh -c qemu:///system destroy {{ vm_to_cleanup }}
|
||||
become: true
|
||||
register: destroy_result
|
||||
failed_when: false
|
||||
changed_when: destroy_result.rc == 0
|
||||
|
||||
- name: Undefine VM {{ vm_to_cleanup }}
|
||||
command: virsh -c qemu:///system undefine {{ vm_to_cleanup }}
|
||||
become: true
|
||||
register: undefine_result
|
||||
failed_when: false
|
||||
changed_when: undefine_result.rc == 0
|
||||
|
||||
- name: Remove disk for VM {{ vm_to_cleanup }}
|
||||
file:
|
||||
path: "/var/lib/libvirt/images/{{ vm_to_cleanup }}.qcow2"
|
||||
state: absent
|
||||
become: true
|
||||
when: remove_disk
|
||||
77
ansible/roles/cleanup_vm/tasks/main.yml
Normal file
77
ansible/roles/cleanup_vm/tasks/main.yml
Normal file
@@ -0,0 +1,77 @@
|
||||
---
|
||||
# Tasks for cleanup_vm role
|
||||
|
||||
- name: Cleanup single VM
|
||||
when: vm_name != ""
|
||||
block:
|
||||
- name: Display cleanup info for single VM
|
||||
debug:
|
||||
msg: "Starting cleanup for VM: {{ vm_name }}"
|
||||
|
||||
- name: Destroy VM
|
||||
command: virsh -c qemu:///system destroy {{ vm_name }}
|
||||
become: true
|
||||
register: destroy_result
|
||||
failed_when: false
|
||||
changed_when: destroy_result.rc == 0
|
||||
|
||||
- name: Display destroy result
|
||||
debug:
|
||||
msg: "{{ 'VM was not running' if destroy_result.rc != 0 else 'VM destroyed' }}"
|
||||
|
||||
- name: Undefine VM
|
||||
command: virsh -c qemu:///system undefine {{ vm_name }}
|
||||
become: true
|
||||
register: undefine_result
|
||||
failed_when: false
|
||||
changed_when: undefine_result.rc == 0
|
||||
|
||||
- name: Display undefine result
|
||||
debug:
|
||||
msg: "{{ 'VM definition already removed' if undefine_result.rc != 0 else 'VM undefined' }}"
|
||||
|
||||
- name: Remove disk image
|
||||
file:
|
||||
path: "/var/lib/libvirt/images/{{ vm_name }}.qcow2"
|
||||
state: absent
|
||||
become: true
|
||||
when: remove_disk
|
||||
|
||||
- name: Display cleanup completion
|
||||
debug:
|
||||
msg: "Cleanup complete for {{ vm_name }}"
|
||||
|
||||
- name: Cleanup VMs matching pattern
|
||||
when: cleanup_pattern != ""
|
||||
block:
|
||||
- name: Get list of VMs matching pattern
|
||||
shell: virsh -c qemu:///system list --all --name | grep -E "{{ cleanup_pattern }}"
|
||||
become: true
|
||||
register: matching_vms
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Display matching VMs
|
||||
debug:
|
||||
msg: "Found {{ matching_vms.stdout_lines | length }} VMs matching pattern: {{ cleanup_pattern }}"
|
||||
when: matching_vms.stdout_lines | length > 0
|
||||
|
||||
- name: Cleanup each matching VM
|
||||
include_tasks: cleanup_single.yml
|
||||
loop: "{{ matching_vms.stdout_lines }}"
|
||||
loop_control:
|
||||
loop_var: vm_to_cleanup
|
||||
when: matching_vms.stdout_lines | length > 0
|
||||
|
||||
- name: Cleanup VMs from list
|
||||
when: cleanup_vm_list | length > 0
|
||||
block:
|
||||
- name: Display VMs to cleanup
|
||||
debug:
|
||||
msg: "Cleaning up {{ cleanup_vm_list | length }} VMs from list"
|
||||
|
||||
- name: Cleanup each VM in list
|
||||
include_tasks: cleanup_single.yml
|
||||
loop: "{{ cleanup_vm_list }}"
|
||||
loop_control:
|
||||
loop_var: vm_to_cleanup
|
||||
6
ansible/roles/download_image/defaults/main.yml
Normal file
6
ansible/roles/download_image/defaults/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
# Default variables for download_image role
|
||||
# Most settings come from inventory (group_vars)
|
||||
|
||||
# Return variable name for the downloaded image path
|
||||
image_path_var: "base_image_path"
|
||||
24
ansible/roles/download_image/tasks/main.yml
Normal file
24
ansible/roles/download_image/tasks/main.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
- name: Ensure base images directory exists
|
||||
file:
|
||||
path: "{{ base_images_dir }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
become: true
|
||||
|
||||
- name: Set cached image path
|
||||
set_fact:
|
||||
cached_image_path: "{{ base_images_dir }}/{{ qcow2_url | basename }}"
|
||||
|
||||
- name: Download QCOW2 image
|
||||
get_url:
|
||||
url: "{{ qcow2_url }}"
|
||||
dest: "{{ cached_image_path }}"
|
||||
mode: '0644'
|
||||
timeout: "{{ download_timeout }}"
|
||||
force: "{{ force_download }}"
|
||||
become: true
|
||||
|
||||
- name: Set image path fact
|
||||
set_fact:
|
||||
"{{ image_path_var }}": "{{ cached_image_path }}"
|
||||
4
ansible/roles/golden_image/defaults/main.yml
Normal file
4
ansible/roles/golden_image/defaults/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
# Paths (passed from playbook)
|
||||
golden_image_base_image_path: ""
|
||||
golden_image_path: ""
|
||||
17
ansible/roles/golden_image/tasks/customize.sh
Normal file
17
ansible/roles/golden_image/tasks/customize.sh
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
set -eux
|
||||
|
||||
# Create user if it doesn't exist
|
||||
if ! id -u rocky >/dev/null 2>&1; then
|
||||
useradd -m rocky
|
||||
fi
|
||||
|
||||
# Set password
|
||||
echo "rocky:rockypass" | chpasswd
|
||||
|
||||
# Sudoers
|
||||
echo "rocky ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/rocky
|
||||
chmod 0440 /etc/sudoers.d/rocky
|
||||
|
||||
# Enable ssh
|
||||
systemctl enable sshd
|
||||
35
ansible/roles/golden_image/tasks/main.yml
Normal file
35
ansible/roles/golden_image/tasks/main.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
- name: Verify base image exists
|
||||
ansible.builtin.stat:
|
||||
path: "{{ golden_image_base_image_path }}"
|
||||
register: golden_image_base_image_stat
|
||||
failed_when: not golden_image_base_image_stat.stat.exists
|
||||
|
||||
- name: Ensure golden image directory exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ golden_image_path | dirname }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
become: true
|
||||
|
||||
- name: Copy base image to golden image
|
||||
ansible.builtin.copy:
|
||||
src: "{{ golden_image_base_image_path }}"
|
||||
dest: "{{ golden_image_path }}"
|
||||
remote_src: true
|
||||
mode: '0644'
|
||||
become: true
|
||||
|
||||
- name: Customize golden image
|
||||
ansible.builtin.command: >
|
||||
virt-customize -a {{ golden_image_path }}
|
||||
--install perl,git,wget,tar,openssh-server,vim
|
||||
--run {{ role_path }}/tasks/customize.sh
|
||||
--ssh-inject root:file:{{ ssh_public_key_path }}
|
||||
--ssh-inject rocky:file:{{ ssh_public_key_path }}
|
||||
--root-password password:{{ root_password }}
|
||||
--selinux-relabel
|
||||
changed_when: false
|
||||
environment:
|
||||
LIBGUESTFS_BACKEND: direct
|
||||
become: true
|
||||
18
ansible/roles/provision_vm/defaults/main.yml
Normal file
18
ansible/roles/provision_vm/defaults/main.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
# Default variables for provision_vm role
|
||||
# Most settings come from inventory (group_vars/libvirt.yml)
|
||||
|
||||
# VM name (required, passed from playbook/tasks)
|
||||
vm_name: ""
|
||||
|
||||
# Maximum wait time for IP (seconds)
|
||||
max_wait_ip: 30
|
||||
|
||||
# OS variant for virt-install
|
||||
os_variant: "rocky9-unknown"
|
||||
|
||||
# Use transient VM (doesn't survive reboot)
|
||||
vm_transient: true
|
||||
|
||||
# Return variable name for VM IP
|
||||
vm_ip_var: "provisioned_vm_ip"
|
||||
82
ansible/roles/provision_vm/tasks/main.yml
Normal file
82
ansible/roles/provision_vm/tasks/main.yml
Normal file
@@ -0,0 +1,82 @@
|
||||
---
|
||||
# Tasks for provision_vm role
|
||||
|
||||
- name: Validate VM name
|
||||
fail:
|
||||
msg: "vm_name is required"
|
||||
when: vm_name == ""
|
||||
|
||||
- name: Validate golden image path
|
||||
stat:
|
||||
path: "{{ golden_image_path }}"
|
||||
register: golden_image_stat
|
||||
failed_when: not golden_image_stat.stat.exists
|
||||
|
||||
- name: Set VM disk path
|
||||
set_fact:
|
||||
vm_disk_path: "/var/lib/libvirt/images/{{ vm_name }}.qcow2"
|
||||
|
||||
- name: Display provisioning info
|
||||
debug:
|
||||
msg: "Creating VM: {{ vm_name }}"
|
||||
|
||||
- name: Create linked clone overlay disk
|
||||
command: >
|
||||
qemu-img create -f qcow2
|
||||
-b {{ golden_image_path }}
|
||||
-F qcow2
|
||||
{{ vm_disk_path }}
|
||||
become: true
|
||||
register: disk_created
|
||||
changed_when: true
|
||||
|
||||
- name: Build virt-install command
|
||||
set_fact:
|
||||
virt_install_cmd: >
|
||||
virt-install
|
||||
--name {{ vm_name }}
|
||||
--memory {{ vm_memory }}
|
||||
--vcpus {{ vm_vcpus }}
|
||||
--disk path={{ vm_disk_path }},format=qcow2
|
||||
--import
|
||||
--os-variant {{ os_variant }}
|
||||
--network network={{ vm_network }}
|
||||
--noautoconsole
|
||||
--wait 0
|
||||
{% if vm_transient %}--transient{% endif %}
|
||||
|
||||
- name: Start VM with virt-install
|
||||
command: "{{ virt_install_cmd }}"
|
||||
become: true
|
||||
register: vm_started
|
||||
changed_when: true
|
||||
failed_when: vm_started.rc != 0
|
||||
|
||||
- name: Wait for VM to obtain IP address
|
||||
shell: >
|
||||
virsh -c qemu:///system domifaddr {{ vm_name }} --source lease 2>/dev/null |
|
||||
awk '/ipv4/ {print $4}' | cut -d/ -f1 | head -1
|
||||
become: true
|
||||
register: vm_ip_result
|
||||
until: vm_ip_result.stdout != "" and vm_ip_result.stdout != "0.0.0.0"
|
||||
retries: "{{ max_wait_ip }}"
|
||||
delay: 2
|
||||
changed_when: false
|
||||
|
||||
- name: Set VM IP fact
|
||||
set_fact:
|
||||
"{{ vm_ip_var }}": "{{ vm_ip_result.stdout }}"
|
||||
|
||||
- name: Display VM IP
|
||||
debug:
|
||||
msg: "IP obtained: {{ vm_ip_result.stdout }}"
|
||||
|
||||
- name: Export VM IP and name
|
||||
set_stats:
|
||||
data:
|
||||
"{{ vm_ip_var }}": "{{ vm_ip_result.stdout }}"
|
||||
provisioned_vm_name: "{{ vm_name }}"
|
||||
|
||||
- name: Register VM for cleanup
|
||||
set_fact:
|
||||
provisioned_vms: "{{ provisioned_vms | default([]) + [{'name': vm_name, 'ip': vm_ip_result.stdout, 'disk': vm_disk_path}] }}"
|
||||
15
ansible/roles/run_test/defaults/main.yml
Normal file
15
ansible/roles/run_test/defaults/main.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
# Default variables for run_test role
|
||||
# Most settings come from inventory (group_vars)
|
||||
|
||||
# Test name (required, passed from playbook/tasks)
|
||||
test_name: ""
|
||||
|
||||
# Test repository URL (required, passed from playbook/tasks)
|
||||
test_repo_url: ""
|
||||
|
||||
# Test repository branch (passed from playbook/tasks)
|
||||
test_repo_branch: "main"
|
||||
|
||||
# Test timeout (passed from playbook/tasks)
|
||||
test_timeout: 900
|
||||
164
ansible/roles/run_test/tasks/main.yml
Normal file
164
ansible/roles/run_test/tasks/main.yml
Normal file
@@ -0,0 +1,164 @@
|
||||
---
|
||||
# Tasks for run_test role
|
||||
|
||||
- name: Validate test parameters
|
||||
fail:
|
||||
msg: "{{ item.msg }}"
|
||||
when: item.condition
|
||||
loop:
|
||||
- { condition: "{{ test_name == '' }}", msg: "test_name is required" }
|
||||
- { condition: "{{ test_repo_url == '' }}", msg: "test_repo_url is required" }
|
||||
|
||||
- name: Generate unique VM name
|
||||
set_fact:
|
||||
test_vm_name: "{{ test_name }}-{{ ansible_date_time.epoch }}"
|
||||
|
||||
- name: Create working directory
|
||||
file:
|
||||
path: "{{ work_dir }}/{{ test_vm_name }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Display test info
|
||||
debug:
|
||||
msg:
|
||||
- "Running test: {{ test_name }}"
|
||||
- "Repository: {{ test_repo_url }}"
|
||||
- "Branch: {{ test_repo_branch }}"
|
||||
- "VM: {{ test_vm_name }}"
|
||||
|
||||
# Provision VM
|
||||
- name: Provision test VM
|
||||
include_role:
|
||||
name: provision_vm
|
||||
vars:
|
||||
vm_name: "{{ test_vm_name }}"
|
||||
vm_ip_var: "test_vm_ip"
|
||||
|
||||
- name: Set VM IP variable
|
||||
set_fact:
|
||||
vm_ip: "{{ test_vm_ip }}"
|
||||
|
||||
- name: Display VM information
|
||||
debug:
|
||||
msg: "VM ready at {{ vm_ip }}"
|
||||
|
||||
# Wait for SSH
|
||||
- name: Wait for SSH to be ready
|
||||
wait_for:
|
||||
host: "{{ vm_ip }}"
|
||||
port: 22
|
||||
timeout: 60
|
||||
state: started
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Test SSH connection
|
||||
command: >
|
||||
ssh -i {{ ssh_private_key_path }}
|
||||
-o StrictHostKeyChecking=no
|
||||
-o ConnectTimeout=5
|
||||
-o UserKnownHostsFile=/dev/null
|
||||
{{ ssh_user }}@{{ vm_ip }}
|
||||
'echo SSH ready'
|
||||
register: ssh_test
|
||||
until: ssh_test.rc == 0
|
||||
retries: 30
|
||||
delay: 2
|
||||
changed_when: false
|
||||
|
||||
# Clone test repository
|
||||
- name: Clone test repository
|
||||
git:
|
||||
repo: "{{ test_repo_url }}"
|
||||
dest: "{{ work_dir }}/{{ test_vm_name }}/test-repo"
|
||||
version: "{{ test_repo_branch }}"
|
||||
register: repo_cloned
|
||||
|
||||
- name: Find sparrowfile
|
||||
find:
|
||||
paths: "{{ work_dir }}/{{ test_vm_name }}/test-repo"
|
||||
patterns:
|
||||
- "main.raku"
|
||||
- "sparrowfile"
|
||||
recurse: true
|
||||
register: sparrowfile_search
|
||||
|
||||
- name: Validate sparrowfile exists
|
||||
fail:
|
||||
msg: "No sparrowfile or main.raku found in test repository"
|
||||
when: sparrowfile_search.files | length == 0
|
||||
|
||||
- name: Set sparrowfile path
|
||||
set_fact:
|
||||
sparrowfile_path: "{{ sparrowfile_search.files[0].path }}"
|
||||
|
||||
- name: Display sparrowfile path
|
||||
debug:
|
||||
msg: "Found sparrowfile: {{ sparrowfile_path }}"
|
||||
|
||||
# Run Sparrowdo test
|
||||
- name: Build sparrowdo command
|
||||
set_fact:
|
||||
sparrowdo_cmd: >
|
||||
sparrowdo
|
||||
--host={{ vm_ip }}
|
||||
--ssh_user={{ ssh_user }}
|
||||
--ssh_private_key={{ ssh_private_key_path }}
|
||||
--ssh_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
|
||||
{% if sparrowdo_no_sudo %}--no_sudo{% endif %}
|
||||
--sparrowfile={{ sparrowfile_path }}
|
||||
{% if sparrowdo_verbose %}--verbose{% endif %}
|
||||
{% if sparrowdo_color %}--color{% endif %}
|
||||
|
||||
- name: Create logs directory
|
||||
file:
|
||||
path: "{{ logs_dir }}/{{ test_vm_name }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
when: save_logs
|
||||
|
||||
- name: Run Sparrowdo test
|
||||
shell: "{{ sparrowdo_cmd }} 2>&1 | tee {{ logs_dir }}/{{ test_vm_name }}/test.log"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: sparrowdo_result
|
||||
timeout: "{{ sparrowdo_timeout }}"
|
||||
when: save_logs
|
||||
|
||||
- name: Run Sparrowdo test (without logging)
|
||||
command: "{{ sparrowdo_cmd }}"
|
||||
register: sparrowdo_result_nolog
|
||||
timeout: "{{ sparrowdo_timeout }}"
|
||||
when: not save_logs
|
||||
|
||||
- name: Display test result
|
||||
debug:
|
||||
msg: "Test {{ test_name }} completed successfully"
|
||||
|
||||
- name: Cleanup test VM
|
||||
include_role:
|
||||
name: cleanup_vm
|
||||
vars:
|
||||
vm_name: "{{ test_vm_name }}"
|
||||
when: cleanup_after_test
|
||||
|
||||
- name: Archive test results
|
||||
set_fact:
|
||||
test_results: "{{ test_results | default([]) + [{'name': test_name, 'status': 'passed', 'vm': test_vm_name, 'log': logs_dir + '/' + test_vm_name + '/test.log'}] }}"
|
||||
when: save_logs
|
||||
|
||||
# Error handling
|
||||
- name: Handle test failure
|
||||
block:
|
||||
- name: Archive failed test logs
|
||||
set_fact:
|
||||
test_results: "{{ test_results | default([]) + [{'name': test_name, 'status': 'failed', 'vm': test_vm_name, 'log': logs_dir + '/' + test_vm_name + '/test.log'}] }}"
|
||||
when: save_logs
|
||||
|
||||
- name: Cleanup VM on failure
|
||||
include_role:
|
||||
name: cleanup_vm
|
||||
vars:
|
||||
vm_name: "{{ test_vm_name }}"
|
||||
when: cleanup_after_test
|
||||
when: sparrowdo_result is failed or sparrowdo_result_nolog is failed
|
||||
Reference in New Issue
Block a user