*** Wartungsfenster jeden ersten Mittwoch vormittag im Monat ***

Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • hpc/datalab/ansible/collections/datalab.osrancher
1 result
Show changes
Commits on Source (415)
Showing
with 630 additions and 182 deletions
tests
.DS_Store
\ No newline at end of file
......@@ -41,4 +41,4 @@ Use roles inside a Ansible playbook
| node_taints | | Node taints for RKE2 node |
| node_labels | | Node labels for RKE2 node |
| rke2_channel | stable | RKE3 version channel |
| state | present | Flag for setup (`present`) or removing (`absent`) RKE3 cluster |
\ No newline at end of file
| state | present | Flag for setup (`present`) or removing (`absent`) RKE3 cluster |
### REQUIRED
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
# underscores or numbers and cannot contain consecutive underscores
namespace: adls
# The name of the collection. Has the same character restrictions as 'namespace'
name: osrancher
# The version of the collection. Must be compatible with semantic versioning
version: 1.0.0
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
readme: README.md
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
# @nicks:irc/im.site#channel'
authors:
- Thomas Weber <thomas.weber@wu.ac.at>
- Elias Wimmer <elias.wimmer@tuwien.ac.at>
### OPTIONAL but strongly recommended
# A short summary description of the collection
description: Setup a RKE2 Kubernetes Cluster on OpenStack
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
license:
- GPL-2.0-or-later
# The path to the license file for the collection. This path is relative to the root of the collection. This key is
# mutually exclusive with 'license'
license_file: ''
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
# requirements as 'namespace' and 'name'
tags: []
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
# collection label 'namespace.name'. The value is a version range
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
# range specifiers can be set and are separated by ','
dependencies: {}
# The URL of the originating SCM repository
repository: https://gitlab.tuwien.ac.at/ADLS/infrastructure/adls.osrancher
# The URL to any online docs
documentation: https://gitlab.tuwien.ac.at/ADLS/infrastructure/adls.osrancher
# The URL to the homepage of the collection/project
homepage: https://gitlab.tuwien.ac.at/ADLS/infrastructure/adls.osrancher
# The URL to the collection issue tracker
issues: http://example.com/issue/tracker
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
# and '.git' are always filtered
build_ignore: []
- name: install epel repos
package:
name: epel-release
- name: create ceph.repo
template:
src: ceph.repo.j2
dest: /etc/yum.repos.d/ceph.repo
mode: 0664
- name: install ceph common
package:
name: ceph-common
- name: create ceph.conf
template:
src: ceph.conf.j2
dest: /etc/ceph/ceph.conf
mode: 0664
- name: create ceph keyrings
template:
src: ceph.client.keyring.j2
dest: "/etc/ceph/ceph.client.{{ client }}.keyring"
mode: 0600
vars:
client: "{{ item.client }}"
key: "{{ item.key }}"
loop: "{{ ceph_keyrings }}"
- name: create mount points
ansible.builtin.file:
path: "{{ item }}"
state: directory
loop:
- /mnt/jaas-home
- /mnt/jaas-jaas
# - name: add to fstab
# ansible.builtin.lineinfile:
# path: /etc/fstab
# line: :/home /mnt/jaas-home ceph _netdev,name=jaas-home 0 0
# state: absent
# - name: add to fstab
# ansible.builtin.lineinfile:
# path: /etc/fstab
# line: :/jaas /mnt/jaas-jaas ceph _netdev,name=jaas-jaas 0 0
# state: absent
- name: mount cephfs
ansible.posix.mount:
src: ":/home"
path: "/mnt/jaas-home"
opts: _netdev,name=jaas-home
state: mounted
fstype: ceph
- name: mount cephfs
ansible.posix.mount:
src: ":/jaas"
path: "/mnt/jaas-jaas"
opts: _netdev,name=jaas-jaas
state: mounted
fstype: ceph
- name: enable container_use_cephfs
ansible.posix.seboolean:
name: container_use_cephfs
state: true
persistent: true
[client.{{ client }}]
key = {{ key }}
# minimal ceph.conf for 356ebb9a-acc2-11ea-a9d6-b8599fef7b50
[global]
fsid = {{ ceph_cluster_id }}
mon_host = {% for mon in ceph_monitors %}[v2:{{ mon }}:3300/0,v1:{{ mon }}:6789/0] {% endfor %}
[Ceph]
name=Ceph $basearch
baseurl=https://download.ceph.com/rpm-quincy/el8/$basearch
enabled=1
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.gpg
[Ceph-noarch]
name=Ceph noarch
baseurl=https://download.ceph.com/rpm-quincy/el8/noarch
enabled=1
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.gpg
[Ceph-source]
name=Ceph SRPMS
baseurl=https://download.ceph.com/rpm-quincy/el8/SRPMS
enabled=1
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.gpg
......@@ -8,13 +8,18 @@ router_name: "rke2-{{ cluster_name }}"
server_volume_size: 50
agent_volume_size: 100
server_flavor: m1a.large
agent_flavor: m1a.xlarge
image: 1fe615f0-9dad-447d-bf54-9071defafb77
server_flavor: a2.standard.2
agent_flavor: a2.highmem.16
image: 3a5f018e-d0cd-45b9-8253-f45117257abc
server_count: 3
agent_count: 3
loadbalancer_name: "rke2_{{ cluster_name }}"
security_group: "rke2_{{ cluster_name }}"
state: present
\ No newline at end of file
state: present
storage_security_group: "rke2_{{ cluster_name }}_storage"
jumphost_security_group: "rke2_{{ cluster_name }}_jumphost"
sriov: false
\ No newline at end of file
- block:
# - pause:
# seconds: 5
- openstack.cloud.server_info:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
server: "rke2-{{ cluster_name }}-server-{{ item }}"
loop: "{{ range(1, agent_count, 1) | list }}"
register: servers_result
- openstack.cloud.server_info:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
server: "rke2-{{ cluster_name }}-agent-{{ item }}"
loop: "{{ range(0, agent_count, 1) | list }}"
register: agents_result
- ansible.builtin.set_fact:
agents: "{{ agents_result.results | community.general.json_query('[].openstack_servers[].{name: name, private_v4: private_v4}') }}"
servers: "{{ servers_result.results | community.general.json_query('[].openstack_servers[].{name: name, private_v4: private_v4}') }}"
# - block:
# # - pause:
# # seconds: 5
# - openstack.cloud.server_info:
# auth: "{{ openstack_auth }}"
# auth_type: "{{ openstack_auth_type }}"
# server: "rke2-{{ cluster_name }}-server-{{ item }}"
# loop: "{{ range(1, agent_count, 1) | list }}"
# register: servers_result
# - openstack.cloud.server_info:
# auth: "{{ openstack_auth }}"
# auth_type: "{{ openstack_auth_type }}"
# server: "rke2-{{ cluster_name }}-agent-{{ item }}"
# loop: "{{ range(0, agent_count, 1) | list }}"
# register: agents_result
# - set_fact:
# agents: "{{ agents_result.results | community.general.json_query('[].openstack_servers[].{name: name, private_v4: private_v4}') }}"
# servers: "{{ servers_result.results | community.general.json_query('[].openstack_servers[].{name: name, private_v4: private_v4}') }}"
when: agents | length > 0 and agents[0].private_v4 == '' or servers | length > 1 and servers[0].private_v4 == ''
# when: agents | length > 0 and agents[0].private_v4 == '' or servers | length > 1 and servers[0].private_v4 == ''
- name: update inventory in project
ansible.builtin.template:
template:
src: hosts.j2
dest: hosts
- name: create inventory folders
ansible.builtin.file:
file:
path: "{{ item }}"
state: directory
loop:
- group_vars
- group_vars/all
- group_vars/master
- group_vars/servers
- group_vars/agents
- name: update all inventory
ansible.builtin.template:
src: all.yml.j2
dest: group_vars/all/infrastructure.yml
- name: update master inventory
ansible.builtin.template:
src: master.yml.j2
dest: group_vars/master/infrastructure.yml
- name: update servers inventory
ansible.builtin.template:
src: servers.yml.j2
dest: group_vars/servers/infrastructure.yml
- name: update config
lineinfile:
dest: "config.yml"
regexp: "{{ item.key }}:.*"
line: "{{ item.key }}: {{ item.value }}"
state: present
loop:
- { key: loadbalancer_ip, value: "{{ loadbalancer_ip }}" }
- { key: jumphost_floating_ip, value: "{{ jumphost_floating_ip }}" }
- { key: subnet_id, value: "{{ new_subnet_id }}" }
- { key: ssh_key_file, value: "{{ ssh_key_file }}" }
- name: update agents inventory
ansible.builtin.template:
src: agents.yml.j2
dest: group_vars/agents/infrastructure.yml
......@@ -11,13 +11,19 @@
- name: create inventory folders
ansible.builtin.file:
file:
path: "{{ ssh_keys_dir }}"
state: directory
- name: remove ssh key
file:
path: "{{ ssh_keys_dir }}"
state: absent
when: state == 'absent'
- name: store private key locally
ansible.builtin.copy:
copy:
dest: "{{ ssh_keys_dir }}/{{ ssh_key_name }}.pem"
content: "{{ ssh_key.key.private_key }}"
content: "{{ ssh_key.keypair.private_key }}"
mode: "0600"
when: state == 'present' and ssh_key.key.private_key != none
\ No newline at end of file
when: state == 'present' and ssh_key.keypair.private_key != none
\ No newline at end of file
- name: wait for loadbalancer
ansible.builtin.async_status:
async_status:
jid: "{{ _create_loadbalancer.ansible_job_id }}"
register: loadbalancer_result
until: loadbalancer_result.finished
delay: 1
retries: 600
- debug:
var: loadbalancer_result
- name: store loadbalancer ip
ansible.builtin.set_fact:
new_loadbalancer_ip: "{{ loadbalancer_result.loadbalancer.public_vip_address | default(loadbalancer_ip) }}"
set_fact:
loadbalancer_ip: "{{ loadbalancer_result.loadbalancer.public_vip_address | default(loadbalancer_ip) }}"
- name: add listener
openstack.cloud.lb_listener:
......@@ -58,10 +61,10 @@
- 443
- ansible.builtin.set_fact:
- set_fact:
pool_members:
- { pool: 6443, port: 6443, vm: "{{ servers + [ master ] }}" }
- { pool: 9345, port: 9345, vm: "{{ servers + [ master ] }}" }
- { pool: 6443, port: 6443, vm: "{{ servers }}" }
- { pool: 9345, port: 9345, vm: "{{ servers }}" }
- { pool: 80, port: 80, vm: "{{ agents }}" }
- { pool: 443, port: 443, vm: "{{ agents }}" }
......
......@@ -4,8 +4,9 @@
auth_type: "{{ openstack_auth_type }}"
name: "{{ loadbalancer_name }}"
vip_subnet: "{{ subnet_name }}"
auto_public_ip: yes
public_network: public
public_ip_address : "{{ loadbalancer_ip }}"
public_network: "{{ floating_network_id }}"
assign_floating_ip: true
state: "{{ state }}"
register: _create_loadbalancer
async: 600
......
- block:
- name: network
ansible.builtin.include_tasks: network.yml
args:
apply:
tags:
- network
tags:
- network
include_tasks: network.yml
- name: loadbalancer
ansible.builtin.include_tasks: loadbalancer.yml
args:
apply:
tags:
- loadbalancer
tags:
- loadbalancer
include_tasks: loadbalancer.yml
- name: security groups
ansible.builtin.include_tasks: security_groups.yml
args:
apply:
tags:
- security_groups
tags:
- security_groups
include_tasks: security_groups.yml
- name: keypair
ansible.builtin.include_tasks: keypair.yml
args:
apply:
tags:
- keypair
tags:
- keypair
include_tasks: keypair.yml
- name: vm
ansible.builtin.include_tasks: vm.yml
args:
apply:
tags:
- vm
tags:
- vm
include_tasks: vm.yml
- name: lb elements
ansible.builtin.include_tasks: lb_members.yml
args:
apply:
tags:
- loadbalancer
tags:
- loadbalancer
include_tasks: lb_members.yml
- name: inventory
ansible.builtin.include_tasks: inventory.yml
include_tasks: inventory.yml
when: state == 'present'
- block:
- name: loadbalancer
ansible.builtin.include_tasks: loadbalancer.yml
include_tasks: loadbalancer.yml
- name: vm
ansible.builtin.include_tasks: vm.yml
include_tasks: vm.yml
- name: security groups
ansible.builtin.include_tasks: security_groups.yml
include_tasks: security_groups.yml
- name: network
ansible.builtin.include_tasks: network.yml
include_tasks: network.yml
- name: delete inventory
ansible.builtin.file:
file:
path: "{{ item }}"
state: absent
loop:
- group_vars
- hosts
- name: keypair
include_tasks: keypair.yml
when: state == 'absent'
\ No newline at end of file
......@@ -22,8 +22,9 @@
cidr: "{{ cidr }}"
state: "{{ state }}"
register: subnet_result
when: state == 'present'
- ansible.builtin.set_fact:
- set_fact:
new_subnet_id: "{{ subnet_result.subnet.id }}"
when: state == 'present'
......
- name: security group
- name: security group jumphost
openstack.cloud.security_group:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
name: "{{ jumphost_security_group }}"
state: "{{ state }}"
- name: ingress rules jumphost
openstack.cloud.security_group_rule:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
security_group: "{{ jumphost_security_group }}"
protocol: "{{ item.protocol }}"
port_range_min: "{{ item.min }}"
port_range_max: "{{ item.max }}"
remote_ip_prefix: "{{ item.cidr }}"
direction: "{{ item.direction | default('ingress') }}"
loop:
- { protocol: TCP, min: 22, max: 22, cidr: "0.0.0.0/0" } # SSH
when: state == 'present'
- name: egress rules jumphost
openstack.cloud.security_group_rule:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
security_group: "{{ jumphost_security_group }}"
protocol: any
remote_ip_prefix: "0.0.0.0/0"
direction: "egress"
when: state == 'present'
- name: security group k8s
openstack.cloud.security_group:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
name: "{{ security_group }}"
state: "{{ state }}"
- name: rules
- name: ingress rules k8s
openstack.cloud.security_group_rule:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
......@@ -14,19 +46,45 @@
port_range_min: "{{ item.min }}"
port_range_max: "{{ item.max }}"
remote_ip_prefix: "{{ item.cidr }}"
direction: "{{ item.direction | default('ingress') }}"
loop:
- { protocol: TCP, min: 2379, max: 2380, cidr: "{{ cidr }}" } # etcd
- { protocol: TCP, min: 6443, max: 6443, cidr: "{{ cidr }}" } # Kubernetes API
- { protocol: TCP, min: 80, max: 80, cidr: "{{ cidr }}" } # Ingress HTTP
- { protocol: TCP, min: 443, max: 80, cidr: "{{ cidr }}" } # Ingress HTTPS
- { protocol: TCP, min: 443, max: 443, cidr: "{{ cidr }}" } # Ingress HTTPS
- { protocol: TCP, min: 8443, max: 8443, cidr: "{{ cidr }}" } # Ingress Webhook
- { protocol: TCP, min: 9345, max: 9345, cidr: "{{ cidr }}" } # RKE2 API
- { protocol: TCP, min: 10250, max: 10250, cidr: "{{ cidr }}" } # kubelet metrics
- { protocol: TCP, min: 30000, max: 32767, cidr: "{{ cidr }}" } # NodePort port range
- { protocol: UDP, min: 8472, max: 8472, cidr: "{{ cidr }}" } # flannel
- { protocol: TCP, min: 4240, max: 4240, cidr: "{{ cidr }}" } # cni-health
- { protocol: TCP, min: 22, max: 22, cidr: "0.0.0.0/0" } # SSH
ignore_errors: yes
async: 60
poll: 0
- { protocol: TCP, min: 9100, max: 9100, cidr: "{{ cidr }}" } # nodeexporter
- { protocol: TCP, min: 22, max: 22, cidr: "{{ cidr }}" } # SSH
when: state == 'present'
- name: egress rules k8s
openstack.cloud.security_group_rule:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
security_group: "{{ security_group }}"
protocol: any
remote_ip_prefix: "0.0.0.0/0"
direction: "egress"
when: state == 'present'
- name: security group storage
openstack.cloud.security_group:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
name: "{{ storage_security_group }}"
state: "{{ state }}"
- name: egress rules storage
openstack.cloud.security_group_rule:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
security_group: "{{ storage_security_group }}"
protocol: any
remote_ip_prefix: "0.0.0.0/0"
direction: "egress"
when: state == 'present'
\ No newline at end of file
- name: master VM
- name: jumphost VM
openstack.cloud.server:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
name: "rke2-{{ cluster_name }}-master"
boot_from_volume: yes
terminate_volume: yes
name: "rke2-{{ cluster_name }}-jumphost"
boot_from_volume: true
terminate_volume: true
volume_size: "{{ server_volume_size }}"
network: "{{ network_name }}"
nics:
- net-name: "{{ network_name }}"
- net-id: "{{ storage_network }}"
key_name: "{{ ssh_key_name }}"
flavor: "{{ server_flavor }}"
image: "{{ image }}"
security_groups:
- "{{ security_group }}"
delete_fip: yes
floating_ip_pools:
- public
- "{{ jumphost_security_group }}"
auto_ip: false
state: "{{ state }}"
userdata: |-
#cloud-config
package_update: true
package_upgrade: true
register: _create_master
network:
config: disabled
register: _create_jumphost
async: 600
poll: 0
......@@ -29,82 +31,137 @@
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
name: "rke2-{{ cluster_name }}-server-{{ item }}"
boot_from_volume: yes
terminate_volume: yes
boot_from_volume: true
terminate_volume: true
volume_size: "{{ server_volume_size }}"
network: "{{ network_name }}"
nics:
- net-name: "{{ network_name }}"
- net-id: "{{ storage_network }}"
key_name: "{{ ssh_key_name }}"
flavor: "{{ server_flavor }}"
image: "{{ image }}"
security_groups:
- "{{ security_group }}"
auto_ip: no
auto_ip: false
state: "{{ state }}"
userdata: |-
#cloud-config
package_update: true
package_upgrade: true
loop: "{{ range(1, server_count, 1) | list }}"
network:
config: disabled
loop: "{{ range(0, server_count, 1) | list }}"
register: _create_servers
async: 600
poll: 0
- name: create sriov port
openstack.cloud.port:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
name: "rke2-{{ cluster_name }}-storage-{{ item }}"
network: "{{ storage_network }}"
vnic_type: direct
binding_profile:
"capabilities": '["switchdev"]'
security_groups:
- "{{ storage_security_group }}"
state: "{{ state }}"
loop: "{{ range(0, agent_count, 1) | list }}"
when: sriov | bool
- name: create vxlan port
openstack.cloud.port:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
name: "rke2-{{ cluster_name }}-storage-{{ item }}"
network: "{{ storage_network }}"
security_groups:
- "{{ storage_security_group }}"
state: "{{ state }}"
loop: "{{ range(0, agent_count, 1) | list }}"
when: not (sriov | bool)
- name: agent VM
openstack.cloud.server:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
name: "rke2-{{ cluster_name }}-agent-{{ item }}"
boot_from_volume: yes
terminate_volume: yes
boot_from_volume: true
terminate_volume: true
volume_size: "{{ agent_volume_size }}"
network: "{{ network_name }}"
nics:
- net-name: "{{ network_name }}"
- port-name: "rke2-{{ cluster_name}}-storage-{{ item }}"
key_name: "{{ ssh_key_name }}"
flavor: "{{ agent_flavor }}"
image: "{{ image }}"
security_groups:
- "{{ security_group }}"
auto_ip: no
auto_ip: false
state: "{{ state }}"
userdata: |-
#cloud-config
package_update: true
package_upgrade: true
network:
config: disabled
loop: "{{ range(0, agent_count, 1) | list }}"
register: _create_agents
async: 600
poll: 0
- name: wait for master vm
ansible.builtin.async_status:
jid: "{{ _create_master.ansible_job_id }}"
register: master_result
until: master_result.finished
- name: wait for jumphost vm
async_status:
jid: "{{ _create_jumphost.ansible_job_id }}"
register: jumphost_result
until: jumphost_result.finished
delay: 1
retries: 600
- block:
- name: wait for server vms
ansible.builtin.async_status:
jid: "{{ item.ansible_job_id }}"
register: servers_result
until: servers_result.finished
delay: 1
retries: 600
loop: "{{ _create_servers.results }}"
- name: create floating ip for jumphost
openstack.cloud.floating_ip:
auth: "{{ openstack_auth }}"
auth_type: "{{ openstack_auth_type }}"
server: "rke2-{{ cluster_name }}-jumphost"
network: "{{ floating_network_id }}"
nat_destination: "{{ network_name }}"
wait: true
timeout: 180
register: jumphost_floating_ip
when: state == 'present'
- name: wait for server vms
async_status:
jid: "{{ item.ansible_job_id }}"
register: servers_result
until: servers_result.finished
delay: 1
retries: 600
loop: "{{ _create_servers.results }}"
- name: wait for agents vm
async_status:
jid: "{{ item.ansible_job_id }}"
register: agents_result
until: agents_result.finished
delay: 1
retries: 600
loop: "{{ _create_agents.results }}"
- set_fact:
jumphost: "{{ jumphost_result | community.general.json_query('server.{name: hostname, private_v4: addresses.\"rke2-test\"[0].addr}') }}"
servers: "{{ servers_result.results | community.general.json_query('[].server.{name: hostname, private_v4: addresses.\"rke2-test\"[0].addr}') }}"
agents: "{{ agents_result.results | community.general.json_query('[].server.{name: hostname, private_v4: addresses.\"rke2-test\"[0].addr}') }}"
jumphost_floating_ip: "{{ jumphost_floating_ip.floating_ip.floating_ip_address }}"
when: state == 'present'
- block:
- name: wait for agents vm
ansible.builtin.async_status:
jid: "{{ item.ansible_job_id }}"
register: agents_result
until: agents_result.finished
delay: 1
retries: 600
loop: "{{ _create_agents.results }}"
- debug:
var: jumphost
- ansible.builtin.set_fact:
master: "{{ master_result | community.general.json_query('server.{name: name, private_v4: private_v4, public_v4: public_v4}') }}"
servers: "{{ servers_result.results | community.general.json_query('[].server.{name: name, private_v4: private_v4}') }}"
agents: "{{ agents_result.results | community.general.json_query('[].server.{name: name, private_v4: private_v4}') }}"
new_master_floating_ip: "{{ master_result.server.public_v4 }}"
when: state == 'present'
\ No newline at end of file
- debug:
var: servers
- debug:
var: agents
\ No newline at end of file
[rke2:children]
servers
agents
[master]
{{ master.name }} ansible_host="{{ master.public_v4 }}"
{{ servers[0].name }}
[servers]
{% if servers is defined and servers | length > 0 %}
{% for server in servers %}
{{ server.name }} ansible_host="{{ server.private_v4 }}"
{{ server.name }} ansible_hostname={{ server.private_v4 }} node_ip={{ server.private_v4 }} node_type=server
{% endfor %}
{% endif %}
[agents]
{% if agents is defined and agents | length > 0 %}
{% for agent in agents %}
{{ agent.name }} ansible_host="{{ agent.private_v4 }}"
{{ agent.name }} ansible_hostname={{ agent.private_v4 }} node_ip={{ agent.private_v4 }} node_type=agent
{% endfor %}
{% endif %}
\ No newline at end of file
[jumphosts]
{{ jumphost.name }} ansible_hostname="{{ jumphost_floating_ip }}" node_ip={{ jumphost.private_v4 }}
\ No newline at end of file
domain:
server: "https://{{ domain }}:9345"
state: present
upgrade: no
dist_upgrade: no
reboot: no
debug: false
selinux: true
tls_san:
- "{{ domain }}"
- "{{ loadbalancer_ip }}"
node_taints: []
node_labels: []
rke2_channel: stable
rke2_k8s_version: "1.24"
state: present
upgrade: no
dist_upgrade: no
reboot: no
\ No newline at end of file
resolv_conf: "/etc/resolv.conf"
cloud_provider_name: "external"
semodules:
- my-iptables
registry_mirrors: {}
# mirrors:
# docker.io:
# endpoint:
# - "https://docker-mirror.example.com:5000"
# registry.example.com:
# endpoint:
# - "https://registry.example.com"
# configs:
# "docker-mirror.example.com:5000":
# auth:
# username: xxxxxx # this is the registry username
# password: xxxxxx # this is the registry password
# tls:
# cert_file: # path to the cert file used to authenticate to the registry
# key_file: # path to the key file for the certificate used to authenticate to the registry
# ca_file: # path to the ca file used to verify the registry's certificate
# insecure_skip_verify: # may be set to true to skip verifying the registry's certificate
# "registry.example.com":
# auth: --SEE_ABOVE--
# tls: --SEE_ABOVE--
# default_vars:
# cluster_issuer_name: letsencrypt-cluster-issuer
# placeholder_domain: example.org
# manifests: {} # used to override default_manifests
# manifests_config: "{{ default_manifests | combine(manifests, recursive=True) }}"
# default_manifests:
# config-rke2-coredns:
# enabled: false
# config-rke2-calico:
# enabled: false
# config-rke2-canal:
# enabled: false
# config-rke2-cilium:
# enabled: false
# hubble:
# enabled: false
# user: ""
# password: ""
# cilium: #advanced here
# debug:
# enabled: false
# preflight:
# enabled: false
# bpf:
# clockProbe: true
# kubeProxyReplacement: disabled # probe, strict
# hostPort:
# enabled: false
# hostFirewall:
# enabled: false
# bandwidthManager:
# enabled: false
# bbr: false
# socketLB:
# enabled: false
# hostNamespaceOnly: false
# cni:
# chainingMode: none # needs plugins, not needed when kubeProxyReplacement in place
# containerRuntime:
# integration: auto
# prometheus:
# enabled: false
# serviceMonitor:
# enabled: false
# hubble:
# enabled: "{{ manifests['config-rke2-cilium'].hubble.enabled | default(false) }}"
# metrics:
# enabled:
# - dns:query;ignoreAAAA
# - drop
# - tcp
# - flow
# - icmp
# - http
# serviceMonitor:
# enabled: false
# relay:
# enabled: "{{ manifests['config-rke2-cilium'].hubble.enabled | default(false) }}"
# ui:
# enabled: "{{ manifests['config-rke2-cilium'].hubble.enabled | default(false) }}"
# ingress:
# enabled: true
# hosts:
# - "{{ manifests['config-rke2-cilium'].hubble.hostname | default('hubble.' + default_vars.placeholder_domain) }}"
# tls:
# - secretName: hubble-cilium-tls
# hosts:
# - "{{ manifests['config-rke2-cilium'].hubble.hostname | default('hubble.' + default_vars.placeholder_domain) }}"
# annotations:
# cert-manager.io/cluster-issuer: "{{ manifests['deploy-cert-manager'].clusterissuer.name | default(default_vars.cluster_issuer_name) }}"
# nginx.ingress.kubernetes.io/whitelist-source-range: "{{ manifests['config-rke2-cilium'].hubble.allowlist_source_range | default('0.0.0.0/0') }}"
# nginx.ingress.kubernetes.io/auth-type: basic
# nginx.ingress.kubernetes.io/auth-secret: hubble-auth-secret
# nginx.ingress.kubernetes.io/auth-secret-type: auth-file
# nginx.ingress.kubernetes.io/auth-realm: Hubble Authentication Required
# deploy-openstack-ccm:
# enabled: false
# deploy-openstack-cinder:
# enabled: false
# deploy-openstack-manila:
# enabled: false
# deploy-cephfs:
# enabled: false
# deploy-nfs:
# enabled: false
# deploy-grafana:
# enabled: false
# adminPassword: ""
# deploy-rancher-ui:
# enabled: false
# rancher_ui_dns: "{{ 'rancher.' + default_vars.placeholder_domain }}"
# letsEncrypt_admin_mail: "{{ 'adls@' + default_vars.placeholder_domain }}"
# deploy-metallb:
# enabled: false
# pools:
# - name: "default"
# addresses:
# - ""
# auto_assign: false
# deploy-cloud-provider-vsphere:
# enabled: false
# vCenter_IP: ""
# vCenter_Username: ""
# vCenter_Password: ""
# vCenter_Datacenter: ""
# vCenter_ClusterID: ""
# CSIMigrationvSphere: ""
# deploy-spectrum-scale-secret:
# enabled: false
# deploy-spectrum-scale-operator-2.2.0:
# enabled: false
# deploy-spectrum-scale-csi-2.1.0:
# enabled: false
# deploy-nginx-ingress-public:
# enabled: false
# deploy-irods-csi-driver:
# enabled: false
# deploy-smb-csi-driver:
# enabled: false
# depoly-minio-operator:
# enabled: false
# deploy-oidc-issuer-ingress:
# enabled: false
# hostname: oidc.cluster.local
# certissuer: letsencrypt-cluster-issuer
\ No newline at end of file
module my-iptables 1.0;
require {
type iptables_t;
type cgroup_t;
class dir ioctl;
}
#============= iptables_t ==============
allow iptables_t cgroup_t:dir ioctl;
\ No newline at end of file
(block my-node-exporter
(blockinherit container)
(allow my-node-exporter.process init_t (dir (search)))
(allow my-node-exporter.process init_t (file (read open)))
(allow my-node-exporter.process node_t (tcp_socket (node_bind)))
(allow my-node-exporter.process unreserved_port_t (tcp_socket (name_bind)))
(allow my-node-exporter.process self (tcp_socket (listen)))
)