diff --git a/.ansible-lint b/.ansible-lint index e83e308..2f9f98d 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -1 +1,2 @@ -skip_list: [] +skip_list: + - name[casing] diff --git a/meta/main.yml b/meta/main.yml index 55c95ed..622bfaf 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -3,15 +3,11 @@ galaxy_info: author: Larry Smith Jr. description: Ansible role to manage(create, extend, resize) LVM Groups/Logical Volumes. namespace: mrlesmithjr - role_name: manage-lvm + role_name: manage_lvm license: MIT - min_ansible_version: 1.2 + min_ansible_version: "1.2" platforms: - - name: fedora - versions: - - 31 - - name: EL versions: - all diff --git a/molecule/default/converge.yml b/molecule/default/converge.yml index 119de5d..668cd37 100644 --- a/molecule/default/converge.yml +++ b/molecule/default/converge.yml @@ -25,5 +25,5 @@ manage_lvm: true tasks: - name: "Include lvm" - include_role: + ansible.builtin.include_role: name: "ansible-manage-lvm" diff --git a/molecule/kvm/converge.yml b/molecule/kvm/converge.yml index ab055c7..f09164e 100644 --- a/molecule/kvm/converge.yml +++ b/molecule/kvm/converge.yml @@ -25,5 +25,5 @@ manage_lvm: true tasks: - name: "Include lvm" - include_role: + ansible.builtin.include_role: name: "ansible-manage-lvm" diff --git a/molecule/kvmonlyvg/converge.yml b/molecule/kvmonlyvg/converge.yml index be123c4..6109f0e 100644 --- a/molecule/kvmonlyvg/converge.yml +++ b/molecule/kvmonlyvg/converge.yml @@ -10,5 +10,5 @@ manage_lvm: true tasks: - name: "Include lvm" - include_role: + ansible.builtin.include_role: name: "ansible-manage-lvm" diff --git a/molecule/kvmsinglelv/converge.yml b/molecule/kvmsinglelv/converge.yml index 09d88bd..e464782 100644 --- a/molecule/kvmsinglelv/converge.yml +++ b/molecule/kvmsinglelv/converge.yml @@ -18,5 +18,5 @@ manage_lvm: true tasks: - name: "Include lvm" - include_role: + ansible.builtin.include_role: name: "ansible-manage-lvm" diff --git a/playbook.yml b/playbook.yml index 6b69e5a..69c3548 100644 --- a/playbook.yml +++ b/playbook.yml @@ -1,5 +1,6 @@ --- -- hosts: test-nodes +- name: Example playbook + hosts: test-nodes vars: lvm_groups: - vgname: test-vg @@ -23,5 +24,5 @@ manage_lvm: true tasks: - name: Include lvm - include_role: + ansible.builtin.include_role: name: ansible-manage-lvm diff --git a/tasks/amazon.yml b/tasks/amazon.yml index a53ac69..2fdeeb0 100644 --- a/tasks/amazon.yml +++ b/tasks/amazon.yml @@ -1,7 +1,6 @@ --- - - name: amazon | check for nvme devices - shell: | + ansible.builtin.shell: | set -o pipefail cat /proc/partitions | awk '{print $4}' | grep -q nvme. ; echo $? args: @@ -10,7 +9,7 @@ changed_when: false - name: amazon | check for amazon ebs devices - shell: | + ansible.builtin.shell: | set -o pipefail lsblk -O -J | grep -qi "amazon elastic block store" ; echo $? register: blkdev_awsebs @@ -19,11 +18,11 @@ executable: /bin/bash - name: amazon | set flag for nvme subsystem - set_fact: + ansible.builtin.set_fact: device_is_nvme: "{{ true if (blkdev_nvme.stdout == '0') else false }}" - name: amazon | set flag for aws ebs devices - set_fact: + ansible.builtin.set_fact: device_is_awsebs: "{{ true if (blkdev_awsebs.stdout == '0') else false }}" - name: Block to map renamed ec2 ebs nvmeXnY devices to their original sdX/xvdX names @@ -31,13 +30,14 @@ become: true block: - name: amazon | download nvme mapping binary helper - get_url: + ansible.builtin.get_url: url: "{{ ebsnvme_binary_helper_url }}" dest: "{{ ebsnvme_binary_helper_tmp }}/" + mode: u=rw,g=r,o=r changed_when: false - name: amazon | extract binary helper - unarchive: + ansible.builtin.unarchive: src: "{{ ebsnvme_binary_helper_tmp }}/{{ ebsnvme_binary_helper_file }}" dest: "{{ ebsnvme_binary_helper_tmp }}/" remote_src: yes @@ -45,26 +45,26 @@ changed_when: false - name: amazon | copy binary helper - copy: + ansible.builtin.copy: src: "{{ ebsnvme_binary_helper_tmp }}/go-ebsnvme" dest: "{{ ebsnvme_binary_helper_path }}" mode: a+x remote_src: yes - name: amazon | template script helper - template: - src: 'ebsnvme-id.j2' + ansible.builtin.template: + src: "ebsnvme-id.j2" dest: "{{ ebsnvme_scrip_helper_path }}" mode: a+x - name: amazon | template udev rule - template: - src: '70-ec2-nvme-devices.rules.j2' - dest: '/etc/udev/rules.d/70-ec2-nvme-devices.rules' + ansible.builtin.template: + src: "70-ec2-nvme-devices.rules.j2" + dest: "/etc/udev/rules.d/70-ec2-nvme-devices.rules" mode: 0755 register: udev_rule - - name: amazon | reload and trigger udev rules - shell: + - name: amazon | reload and trigger udev rules # noqa no-changed-when no-handler + ansible.builtin.shell: cmd: udevadm control --reload-rules && udevadm trigger when: udev_rule.changed diff --git a/tasks/centos.yml b/tasks/centos.yml index b2a29e8..34d0d05 100644 --- a/tasks/centos.yml +++ b/tasks/centos.yml @@ -1,17 +1,18 @@ --- - name: centos | installing lvm2 and sg3_utils - package: + ansible.builtin.package: name: - lvm2 - sg3_utils state: present become: true -- include_tasks: amazon.yml +- name: Specific tasks for Amazon EC2 + ansible.builtin.include_tasks: amazon.yml when: ansible_facts.system_vendor == 'Amazon EC2' - name: centos | debug lvg - debug: + ansible.builtin.debug: var: lv verbosity: 3 loop: "{{ lookup('subelements', lvm_groups, 'lvnames', {'skip_missing': True}, wantlist=True) }}" @@ -19,7 +20,7 @@ loop_var: lv - name: centos | install xfs tools - package: + ansible.builtin.package: name: "xfsprogs" state: "present" become: true @@ -35,35 +36,35 @@ - lv.1.create|bool - name: centos | check for scsi adapters - find: + ansible.builtin.find: paths: "/sys/class/scsi_host" file_type: any become: true register: scsi_adapters -- block: - - name: centos | installing sg3_utils - package: - name: sg3_utils - state: present - become: true - - - name: centos | checking for scsi devices - command: sg_scan - become: true - register: scsi_devices - changed_when: false - - - name: centos | rescanning for new disks - command: "{{ rescan_scsi_command }}" - become: true - changed_when: false - when: scsi_devices.stdout|length > 0 - - - name: centos | rescanning for resized disks - command: "{{ rescan_scsi_command }} -s" - become: true - changed_when: false - when: scsi_devices.stdout|length > 0 - +- name: centos | Check for new disks when: scsi_adapters.matched > 0 + block: + - name: centos | installing sg3_utils + ansible.builtin.package: + name: sg3_utils + state: present + become: true + + - name: centos | checking for scsi devices + ansible.builtin.command: sg_scan + become: true + register: scsi_devices + changed_when: false + + - name: centos | rescanning for new disks + ansible.builtin.command: "{{ rescan_scsi_command }}" + become: true + changed_when: false + when: scsi_devices.stdout|length > 0 + + - name: centos | rescanning for resized disks + ansible.builtin.command: "{{ rescan_scsi_command }} -s" + become: true + changed_when: false + when: scsi_devices.stdout|length > 0 diff --git a/tasks/create_fs.yml b/tasks/create_fs.yml index 5b3d497..c75529d 100644 --- a/tasks/create_fs.yml +++ b/tasks/create_fs.yml @@ -1,11 +1,10 @@ --- - # unable to resize xfs: looks like we've to reference the mountpoint instead of the device - name: create_fs | check already converted # at least xfs is executed twice if the partition has changed in the meantime # then it tries to recreate the fs on the mounted fs which indeed fails... - shell: "xfs_info {{ lv.mntp }} | grep -c 'ftype=1'" - become: yes + ansible.builtin.shell: "xfs_info {{ lv.mntp }} | grep -c 'ftype=1'" + become: true register: mountedxfs ignore_errors: true changed_when: false @@ -17,7 +16,7 @@ - lv.create|bool - name: create_fs | unmounting filesystem(s) - mount: + ansible.posix.mount: path: "{{ lv.mntp }}" src: "/dev/{{ vg.vgname }}/{{ lv.lvname }}" fstype: "{{ lv.filesystem | default(omit) }}" @@ -31,7 +30,7 @@ - lv.filesystem != "swap" - name: create_fs | creating new filesystem on new LVM logical volume(s) - filesystem: + community.general.system.filesystem: fstype: "{{ lv.filesystem }}" dev: "/dev/{{ vg.vgname }}/{{ lv.lvname }}" resizefs: yes @@ -49,7 +48,7 @@ - lv.filesystem != 'xfs' - name: create_fs | creating new xfs filesystem on new LVM logical volume(s) - filesystem: + community.general.system.filesystem: fstype: "{{ lv.filesystem }}" dev: "/dev/{{ vg.vgname }}/{{ lv.lvname }}" opts: "{{ lv.fsopts | default(omit) }}" @@ -66,7 +65,7 @@ - lv.filesystem == 'xfs' - name: create_fs | mounting new filesystem(s) - mount: + ansible.posix.mount: path: "{{ lv.mntp }}" src: "/dev/{{ vg.vgname }}/{{ lv.lvname }}" fstype: "{{ lv.filesystem }}" @@ -83,8 +82,8 @@ - lv.mount is defined - lv.mount|bool -- name: create_fs | resizing xfs filesystem on new LVM logical volume(s) - command: "xfs_growfs {{ lv.mntp }}" +- name: create_fs | resizing xfs filesystem on new LVM logical volume(s) # noqa no-changed-when + ansible.builtin.command: "xfs_growfs {{ lv.mntp }}" become: true when: - vg.create is defined diff --git a/tasks/create_lv.yml b/tasks/create_lv.yml index ee2e7ca..2f9ffef 100644 --- a/tasks/create_lv.yml +++ b/tasks/create_lv.yml @@ -1,12 +1,11 @@ --- - - name: create_lv | Display Volume Group - debug: + ansible.builtin.debug: var: vg verbosity: 2 - name: create_lv | creating new LVM logical volume(s) - lvol: + community.general.system.lvol: vg: "{{ vg.vgname }}" lv: "{{ lv.lvname }}" size: "{{ lv.size }}" @@ -24,8 +23,8 @@ - lv.create|bool - name: create_lv | debug changed attribute - debug: + ansible.builtin.debug: var: lvchanged - name: create_lv | configuring FS - include_tasks: create_fs.yml + ansible.builtin.include_tasks: create_fs.yml diff --git a/tasks/create_vg.yml b/tasks/create_vg.yml index f779d42..14c6bc5 100644 --- a/tasks/create_vg.yml +++ b/tasks/create_vg.yml @@ -1,6 +1,6 @@ --- - name: create_vg | creating new LVM volume group(s) - lvg: + community.general.system.lvg: vg: "{{ vg.vgname }}" pvs: "{{ vg.disks | join(',') }}" state: present @@ -12,7 +12,7 @@ ### workaround: auto pvresize waiting for upgrade to new module supporting integrated pvresize ### ref: https://docs.ansible.com/ansible/3/collections/community/general/lvg_module.html - name: create_vg | pvresize to max available free space - command: "pvresize {{ pv }}" + ansible.builtin.command: "pvresize {{ pv }}" loop: "{{ vg.disks | default([]) }}" loop_control: loop_var: pv @@ -23,7 +23,7 @@ - pvresize_to_max|bool - name: manage_lvm | loop over logical volume group(s) to create logical volumes - include_tasks: create_lv.yml + ansible.builtin.include_tasks: create_lv.yml loop: "{{ vg.lvnames | default([]) }}" loop_control: loop_var: lv diff --git a/tasks/debian.yml b/tasks/debian.yml index 1ca87ec..8d9a607 100644 --- a/tasks/debian.yml +++ b/tasks/debian.yml @@ -1,12 +1,12 @@ --- - name: debian | Updating Apt Cache - apt: + ansible.builtin.apt: update_cache: true cache_valid_time: 3600 become: true - name: debian | installing pre-reqs - apt: + ansible.builtin.apt: name: - lvm2 - scsitools @@ -14,11 +14,11 @@ become: true - name: debian | install xfs tools - apt: + ansible.builtin.apt: name: xfsprogs state: present become: true - loop: "{{ lvm_groups|subelements('lvnames') }}" + loop: "{{ lvm_groups | subelements('lvnames') }}" when: - item.1 is defined - item.1 != 'None' @@ -28,13 +28,13 @@ - item.1.create|bool - name: debian | checking for scsi devices - command: sg_scan + ansible.builtin.command: sg_scan become: true register: scsi_devices changed_when: false - name: debian | rescanning for new disks added - command: "{{ rescan_scsi_command }}" + ansible.builtin.command: "{{ rescan_scsi_command }}" become: true changed_when: false when: scsi_devices['stdout'] | length diff --git a/tasks/main.yml b/tasks/main.yml index f964122..4a6bd25 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -1,20 +1,23 @@ --- # tasks file for ansible-manage-lvm - name: Set rescan_scsi_command for old debian version - set_fact: + ansible.builtin.set_fact: rescan_scsi_command: "/sbin/rescan-scsi-bus" when: - ansible_facts.distribution | replace(' ','') | lower == 'debian' - ansible_facts.distribution_release not in ('bookworm', 'sid') - ansible_facts.distribution_major_version is version(10, '<=') -- include_tasks: debian.yml +- name: Debian specific tasks + ansible.builtin.include_tasks: debian.yml when: ansible_facts.os_family == "Debian" -- include_tasks: centos.yml +- name: RHEL specific tasks + ansible.builtin.include_tasks: centos.yml when: ansible_facts.os_family == "RedHat" -- include_tasks: manage_lvm.yml +- name: Manage LVM + ansible.builtin.include_tasks: manage_lvm.yml when: - lvm_groups is defined - manage_lvm|bool diff --git a/tasks/manage_lvm.yml b/tasks/manage_lvm.yml index 04b6d26..ce88eae 100644 --- a/tasks/manage_lvm.yml +++ b/tasks/manage_lvm.yml @@ -1,18 +1,18 @@ --- - name: manage_lvm | manage physical volume group creation - include_tasks: create_vg.yml + ansible.builtin.include_tasks: create_vg.yml loop: "{{ lvm_groups }}" loop_control: loop_var: vg - name: manage_lvm | Removing LVM logical volume(s) - lvol: + community.general.system.lvol: vg: "{{ item.0.vgname }}" lv: "{{ item.1.lvname }}" state: absent force: true become: true - loop: "{{ lvm_groups|subelements('lvnames', {'skip_missing': true}) }}" + loop: "{{ lvm_groups | subelements('lvnames', {'skip_missing': true}) }}" when: - item.1 is defined - item.1 != 'None' @@ -20,7 +20,7 @@ - not item.1.create|bool - name: manage_lvm | Removing LVM volume group(s) - lvg: + community.general.system.lvg: vg: "{{ item.vgname }}" pvs: "{{ item.disks | join(',') }}" state: absent diff --git a/tests/.ansible-lint b/tests/.ansible-lint index 9c1ce1b..4dd9ea2 100644 --- a/tests/.ansible-lint +++ b/tests/.ansible-lint @@ -1 +1 @@ -skip_list: ['305','503','602'] +skip_list: ['305', '503', '602'] diff --git a/tests/test.yml b/tests/test.yml index 2ac7e23..e30b33d 100644 --- a/tests/test.yml +++ b/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost - connection: local - roles: - - ansible-manage-lvm +# - name: Test role +# hosts: localhost +# connection: local +# roles: +# - role: ansible-manage-lvm