diff --git a/roles/cephfs/tasks/main.yml b/roles/cephfs/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..60cd731e07f4f8416026307167805f63535430b9
--- /dev/null
+++ b/roles/cephfs/tasks/main.yml
@@ -0,0 +1,72 @@
+- name: install epel repos
+  package:
+    name: epel-release
+
+- name: create ceph.repo
+  template:
+    src: ceph.repo.j2
+    dest: /etc/yum.repos.d/ceph.repo
+    mode: 0664
+
+- name: install ceph common
+  package:
+    name: ceph-common
+
+- name: create ceph.conf
+  template:
+    src: ceph.conf.j2
+    dest: /etc/ceph/ceph.conf
+    mode: 0664
+
+- name: create ceph keyrings
+  template:
+    src: ceph.client.keyring.j2
+    dest: "/etc/ceph/ceph.client.{{ client }}.keyring"
+    mode: 0600
+  vars:
+    client: "{{ item.client }}"
+    key: "{{ item.key }}"
+  loop: "{{ ceph_keyrings }}"
+
+- name: create mount points
+  ansible.builtin.file:
+    path: "{{ item }}"
+    state: directory
+  loop:
+    - /mnt/jaas-home
+    - /mnt/jaas-jaas
+
+# - name: add to fstab
+#   ansible.builtin.lineinfile:
+#     path: /etc/fstab
+#     line: :/home /mnt/jaas-home ceph _netdev,name=jaas-home 0 0 
+#     state: absent
+
+# - name: add to fstab
+#   ansible.builtin.lineinfile:
+#     path: /etc/fstab
+#     line: :/jaas /mnt/jaas-jaas ceph _netdev,name=jaas-jaas 0 0
+#     state: absent
+
+- name: mount cephfs
+  ansible.posix.mount:
+    src: ":/home"
+    path: "/mnt/jaas-home"
+    opts: _netdev,name=jaas-home
+    state: mounted
+    fstype: ceph
+
+- name: mount cephfs
+  ansible.posix.mount:
+    src: ":/jaas"
+    path: "/mnt/jaas-jaas"
+    opts: _netdev,name=jaas-jaas
+    state: mounted
+    fstype: ceph
+
+- name: enable container_use_cephfs
+  ansible.posix.seboolean:
+    name: container_use_cephfs
+    state: true
+    persistent: true
+
diff --git a/roles/cephfs/templates/ceph.client.keyring.j2 b/roles/cephfs/templates/ceph.client.keyring.j2
new file mode 100644
index 0000000000000000000000000000000000000000..4d57840cf53a5ff418c4def64034dc6a9699a552
--- /dev/null
+++ b/roles/cephfs/templates/ceph.client.keyring.j2
@@ -0,0 +1,2 @@
+[client.{{ client }}]
+   key = {{ key }}
diff --git a/roles/cephfs/templates/ceph.conf.j2 b/roles/cephfs/templates/ceph.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..1dcfceafad167224dbdfe79c7928c79e1e3d2ce6
--- /dev/null
+++ b/roles/cephfs/templates/ceph.conf.j2
@@ -0,0 +1,4 @@
+# minimal ceph.conf for 356ebb9a-acc2-11ea-a9d6-b8599fef7b50
+[global]
+   fsid = {{ ceph_cluster_id }}
+   mon_host = {% for mon in ceph_monitors %}[v2:{{ mon }}:3300/0,v1:{{ mon }}:6789/0] {% endfor %}
diff --git a/roles/cephfs/templates/ceph.repo.j2 b/roles/cephfs/templates/ceph.repo.j2
new file mode 100644
index 0000000000000000000000000000000000000000..9a447b3003ecb9755156b0256f03239329a99593
--- /dev/null
+++ b/roles/cephfs/templates/ceph.repo.j2
@@ -0,0 +1,20 @@
+[Ceph]
+name=Ceph $basearch
+baseurl=https://download.ceph.com/rpm-quincy/el8/$basearch
+enabled=1
+gpgcheck=1
+gpgkey=https://download.ceph.com/keys/release.gpg
+
+[Ceph-noarch]
+name=Ceph noarch
+baseurl=https://download.ceph.com/rpm-quincy/el8/noarch
+enabled=1
+gpgcheck=1
+gpgkey=https://download.ceph.com/keys/release.gpg
+
+[Ceph-source]
+name=Ceph SRPMS
+baseurl=https://download.ceph.com/rpm-quincy/el8/SRPMS
+enabled=1
+gpgcheck=1
+gpgkey=https://download.ceph.com/keys/release.gpg
diff --git a/roles/create_infrastructure/defaults/main.yml b/roles/create_infrastructure/defaults/main.yml
index c27bba1dbdb604a0e8b0005eaf04728de936d328..d797c97c72e2f4cfdd1fd616f48e12153bea7ae1 100644
--- a/roles/create_infrastructure/defaults/main.yml
+++ b/roles/create_infrastructure/defaults/main.yml
@@ -3,7 +3,7 @@ ssh_key_name: "rke2-{{ cluster_name }}-ssh-key"
 ssh_key_file: "{{ ssh_keys_dir }}/{{ ssh_key_name }}.pem"
 network_name: "rke2-{{ cluster_name }}"
 subnet_name: "rke2-{{ cluster_name }}"
-cidr: 10.0.0.0/24
+cidr: 192.168.13.0/24
 router_name: "rke2-{{ cluster_name }}"
 
 server_volume_size: 50
@@ -17,4 +17,9 @@ agent_count: 3
 
 loadbalancer_name: "rke2_{{ cluster_name }}"
 security_group: "rke2_{{ cluster_name }}"
-state: present
\ No newline at end of file
+state: present
+
+storage_security_group: "rke2_{{ cluster_name }}_storage"
+jumphost_security_group: "rke2_{{ cluster_name }}_jumphost"
+
+sriov: false
\ No newline at end of file
diff --git a/roles/create_infrastructure/tasks/inventory.yml b/roles/create_infrastructure/tasks/inventory.yml
index c50e818118f0182ded2c4fb5af064dca1777cc39..f006ad9d5b92223aac8d4bebea92a7c97b5c6561 100644
--- a/roles/create_infrastructure/tasks/inventory.yml
+++ b/roles/create_infrastructure/tasks/inventory.yml
@@ -1,26 +1,26 @@
-- block:  
-  # - pause:
-  #     seconds: 5
-
-  - openstack.cloud.server_info:
-      auth: "{{ openstack_auth }}"
-      auth_type: "{{ openstack_auth_type }}"
-      server: "rke2-{{ cluster_name }}-server-{{ item }}"
-    loop: "{{ range(1, agent_count, 1) | list }}"
-    register: servers_result
-
-  - openstack.cloud.server_info:
-      auth: "{{ openstack_auth }}"
-      auth_type: "{{ openstack_auth_type }}"
-      server: "rke2-{{ cluster_name }}-agent-{{ item }}"
-    loop: "{{ range(0, agent_count, 1) | list }}"
-    register: agents_result
-
-  - set_fact:
-      agents: "{{ agents_result.results | community.general.json_query('[].openstack_servers[].{name: name, private_v4: private_v4}') }}"
-      servers: "{{ servers_result.results | community.general.json_query('[].openstack_servers[].{name: name, private_v4: private_v4}') }}"
+# - block:  
+#   # - pause:
+#   #     seconds: 5
+
+#   - openstack.cloud.server_info:
+#       auth: "{{ openstack_auth }}"
+#       auth_type: "{{ openstack_auth_type }}"
+#       server: "rke2-{{ cluster_name }}-server-{{ item }}"
+#     loop: "{{ range(1, agent_count, 1) | list }}"
+#     register: servers_result
+
+#   - openstack.cloud.server_info:
+#       auth: "{{ openstack_auth }}"
+#       auth_type: "{{ openstack_auth_type }}"
+#       server: "rke2-{{ cluster_name }}-agent-{{ item }}"
+#     loop: "{{ range(0, agent_count, 1) | list }}"
+#     register: agents_result
+
+#   - set_fact:
+#       agents: "{{ agents_result.results | community.general.json_query('[].openstack_servers[].{name: name, private_v4: private_v4}') }}"
+#       servers: "{{ servers_result.results | community.general.json_query('[].openstack_servers[].{name: name, private_v4: private_v4}') }}"
   
-  when: agents | length > 0 and agents[0].private_v4 == '' or servers | length > 1 and servers[0].private_v4 == ''
+#   when: agents | length > 0 and agents[0].private_v4 == '' or servers | length > 1 and servers[0].private_v4 == ''
 
 
 - name: update inventory in project
@@ -35,26 +35,16 @@
   loop:
     - group_vars
     - group_vars/all
-    - group_vars/master
-    - group_vars/servers
-    - group_vars/agents    
-
-- name: update all inventory
-  template:
-    src: all.yml.j2
-    dest: group_vars/all/infrastructure.yml
-
-- name: update master inventory
-  template:
-    src: master.yml.j2
-    dest: group_vars/master/infrastructure.yml
-
-- name: update servers inventory
-  template:
-    src: servers.yml.j2
-    dest: group_vars/servers/infrastructure.yml
+ 
+- name: update config
+  lineinfile:
+    dest: "config.yml"
+    regexp: "{{ item.key }}:.*"
+    line: "{{ item.key }}: {{ item.value }}"
+    state: present
+  loop:
+    - { key: loadbalancer_ip, value: "{{ loadbalancer_ip }}" }
+    - { key: jumphost_floating_ip, value: "{{ jumphost_floating_ip }}" }
+    - { key: subnet_id, value: "{{ new_subnet_id }}" }
+    - { key: ssh_key_file, value: "{{ ssh_key_file }}" }
 
-- name: update agents inventory
-  template:
-    src: agents.yml.j2
-    dest: group_vars/agents/infrastructure.yml
diff --git a/roles/create_infrastructure/tasks/keypair.yml b/roles/create_infrastructure/tasks/keypair.yml
index 933d7b3faac1383e44c15a6ed6b342b16188c17e..b3083c010262541d2161fd62272ca5da0a6660a5 100644
--- a/roles/create_infrastructure/tasks/keypair.yml
+++ b/roles/create_infrastructure/tasks/keypair.yml
@@ -15,9 +15,15 @@
     path: "{{ ssh_keys_dir }}"
     state: directory
 
+- name: remove ssh key
+  file:
+    path: "{{ ssh_keys_dir }}"
+    state: absent
+  when: state == 'absent'
+
 - name: store private key locally
   copy:
     dest: "{{ ssh_keys_dir }}/{{ ssh_key_name }}.pem"
-    content: "{{ ssh_key.key.private_key }}"
+    content: "{{ ssh_key.keypair.private_key }}"
     mode: "0600"
-  when: state == 'present' and ssh_key.key.private_key != none
\ No newline at end of file
+  when: state == 'present' and ssh_key.keypair.private_key != none
\ No newline at end of file
diff --git a/roles/create_infrastructure/tasks/lb_members.yml b/roles/create_infrastructure/tasks/lb_members.yml
index 7880891101aeb77e439c6b57255f3392e691acbf..07fa10c1c3960bf472dbdc195cb1e6dd58023f56 100644
--- a/roles/create_infrastructure/tasks/lb_members.yml
+++ b/roles/create_infrastructure/tasks/lb_members.yml
@@ -1,14 +1,17 @@
-# - name: wait for loadbalancer
-#   async_status:
-#     jid: "{{ _create_loadbalancer.ansible_job_id }}"
-#   register: loadbalancer_result
-#   until: loadbalancer_result.finished
-#   delay: 1
-#   retries: 600
+- name: wait for loadbalancer
+  async_status:
+    jid: "{{ _create_loadbalancer.ansible_job_id }}"
+  register: loadbalancer_result
+  until: loadbalancer_result.finished
+  delay: 1
+  retries: 600
 
-# - name: store loadbalancer ip
-#   set_fact:
-#     new_loadbalancer_ip: "{{ loadbalancer_result.loadbalancer.public_vip_address | default(loadbalancer_ip) }}"
+- debug:
+    var: loadbalancer_result
+
+- name: store loadbalancer ip
+  set_fact:
+    loadbalancer_ip: "{{ loadbalancer_result.loadbalancer.public_vip_address | default(loadbalancer_ip) }}"
 
 - name: add listener
   openstack.cloud.lb_listener:
diff --git a/roles/create_infrastructure/tasks/loadbalancer.yml b/roles/create_infrastructure/tasks/loadbalancer.yml
index f1a17f10a6939349e768bd7ae1f3a139f52f1a8c..9fbb04f419ed2e36ebf69569a1a30230885868ce 100644
--- a/roles/create_infrastructure/tasks/loadbalancer.yml
+++ b/roles/create_infrastructure/tasks/loadbalancer.yml
@@ -4,8 +4,9 @@
     auth_type: "{{ openstack_auth_type }}"
     name: "{{ loadbalancer_name }}"
     vip_subnet: "{{ subnet_name }}"
-    auto_public_ip: yes
-    public_network: public
+    public_ip_address : "{{ loadbalancer_ip }}"
+    public_network: "{{ floating_network_id }}"
+    assign_floating_ip: true
     state: "{{ state }}"
   register: _create_loadbalancer
   async: 600
diff --git a/roles/create_infrastructure/tasks/main.yml b/roles/create_infrastructure/tasks/main.yml
index 0dddfcd2c0b91238cfe480afe9fa677aa2aaa6c9..268dc35735e018000d138c3c8ae3f8f8d8ceef78 100644
--- a/roles/create_infrastructure/tasks/main.yml
+++ b/roles/create_infrastructure/tasks/main.yml
@@ -1,60 +1,24 @@
 - block:
   - name: network
     include_tasks: network.yml
-    args:
-      apply:
-        tags:
-          - network
-    tags:
-      - network
 
   - name: loadbalancer
     include_tasks: loadbalancer.yml
-    args:
-      apply:
-        tags:
-          - loadbalancer
-    tags:
-      - loadbalancer
 
   - name: security groups
     include_tasks: security_groups.yml
-    args:
-      apply:
-        tags:
-          - security_groups
-    tags:
-      - security_groups
 
   - name: keypair
     include_tasks: keypair.yml
-    args:
-      apply:
-        tags:
-          - keypair
-    tags:
-      - keypair
 
   - name: vm
     include_tasks: vm.yml
-    args:
-      apply:
-        tags:
-          - vm
-    tags:
-      - vm
 
   - name: lb elements 
     include_tasks: lb_members.yml
-    args:
-      apply:
-        tags:
-          - loadbalancer
-    tags:
-      - loadbalancer
 
-  # - name: inventory
-  #   include_tasks: inventory.yml
+  - name: inventory
+    include_tasks: inventory.yml
 
   when: state == 'present'
 
@@ -78,5 +42,8 @@
     loop:
       - group_vars
       - hosts
+  
+  - name: keypair
+    include_tasks: keypair.yml
 
   when: state == 'absent'
\ No newline at end of file
diff --git a/roles/create_infrastructure/tasks/network.yml b/roles/create_infrastructure/tasks/network.yml
index 3ce82b7278e2935484d856162b6a5aae54091246..d341de4e83a3f8ad2718fcea30a2b448828a8b33 100644
--- a/roles/create_infrastructure/tasks/network.yml
+++ b/roles/create_infrastructure/tasks/network.yml
@@ -22,6 +22,7 @@
     cidr: "{{ cidr }}"
     state: "{{ state }}"
   register: subnet_result
+  when: state == 'present'
 
 - set_fact:
     new_subnet_id: "{{ subnet_result.subnet.id }}"
diff --git a/roles/create_infrastructure/tasks/security_groups.yml b/roles/create_infrastructure/tasks/security_groups.yml
index 1524c88eee5eefb7d7fed5547469098e8c489e39..3fdd2092ca9a7c66a6e1fac888e8fe1375a61344 100644
--- a/roles/create_infrastructure/tasks/security_groups.yml
+++ b/roles/create_infrastructure/tasks/security_groups.yml
@@ -1,11 +1,43 @@
-- name: security group
+- name: security group jumphost
+  openstack.cloud.security_group:
+    auth: "{{ openstack_auth }}"
+    auth_type: "{{ openstack_auth_type }}"
+    name: "{{ jumphost_security_group }}"
+    state: "{{ state }}"
+
+- name: ingress rules jumphost
+  openstack.cloud.security_group_rule:
+    auth: "{{ openstack_auth }}"
+    auth_type: "{{ openstack_auth_type }}"
+    security_group: "{{ jumphost_security_group }}"
+    protocol: "{{ item.protocol }}"
+    port_range_min: "{{ item.min }}"
+    port_range_max: "{{ item.max }}"
+    remote_ip_prefix: "{{ item.cidr }}"
+    direction: "{{ item.direction | default('ingress') }}"
+  loop:
+    - { protocol: TCP, min: 22, max: 22, cidr: "0.0.0.0/0" } # SSH
+  when: state == 'present'
+
+- name: egress rules jumphost
+  openstack.cloud.security_group_rule:
+    auth: "{{ openstack_auth }}"
+    auth_type: "{{ openstack_auth_type }}"
+    security_group: "{{ jumphost_security_group }}"
+    protocol: any
+    remote_ip_prefix: "0.0.0.0/0"
+    direction: "egress"
+  when: state == 'present'
+
+
+- name: security group k8s
   openstack.cloud.security_group:
     auth: "{{ openstack_auth }}"
     auth_type: "{{ openstack_auth_type }}"
     name: "{{ security_group }}"
     state: "{{ state }}"
 
-- name: rules
+- name: ingress rules k8s
   openstack.cloud.security_group_rule:
     auth: "{{ openstack_auth }}"
     auth_type: "{{ openstack_auth_type }}"
@@ -19,7 +51,7 @@
     - { protocol: TCP, min: 2379, max: 2380, cidr: "{{ cidr }}" } # etcd
     - { protocol: TCP, min: 6443, max: 6443, cidr: "{{ cidr }}" } # Kubernetes API
     - { protocol: TCP, min: 80, max: 80, cidr: "{{ cidr }}" } # Ingress HTTP
-    - { protocol: TCP, min: 443, max: 80, cidr: "{{ cidr }}" } # Ingress HTTPS
+    - { protocol: TCP, min: 443, max: 443, cidr: "{{ cidr }}" } # Ingress HTTPS
     - { protocol: TCP, min: 8443, max: 8443, cidr: "{{ cidr }}" } # Ingress Webhook
     - { protocol: TCP, min: 9345, max: 9345, cidr: "{{ cidr }}" } # RKE2 API
     - { protocol: TCP, min: 10250, max: 10250, cidr: "{{ cidr }}" } # kubelet metrics
@@ -27,33 +59,32 @@
     - { protocol: UDP, min: 8472, max: 8472, cidr: "{{ cidr }}" } # flannel
     - { protocol: TCP, min: 4240, max: 4240, cidr: "{{ cidr }}" } # cni-health
     - { protocol: TCP, min: 9100, max: 9100, cidr: "{{ cidr }}" } # nodeexporter
-    - { protocol: TCP, min: 22, max: 22, cidr: "0.0.0.0/0" } # SSH
-    - { protocol: ANY, cidr: "0.0.0.0/0", direction: egress } # all out
-  ignore_errors: yes
-  async: 60
-  poll: 0
+    - { protocol: TCP, min: 22, max: 22, cidr: "{{ cidr }}" } # SSH
+  when: state == 'present'
+
+- name: egress rules k8s
+  openstack.cloud.security_group_rule:
+    auth: "{{ openstack_auth }}"
+    auth_type: "{{ openstack_auth_type }}"
+    security_group: "{{ security_group }}"
+    protocol: any
+    remote_ip_prefix: "0.0.0.0/0"
+    direction: "egress"
   when: state == 'present'
 
-- name: security group
+- name: security group storage
   openstack.cloud.security_group:
     auth: "{{ openstack_auth }}"
     auth_type: "{{ openstack_auth_type }}"
     name: "{{ storage_security_group }}"
     state: "{{ state }}"
 
-- name: rules
+- name: egress rules storage
   openstack.cloud.security_group_rule:
     auth: "{{ openstack_auth }}"
     auth_type: "{{ openstack_auth_type }}"
     security_group: "{{ storage_security_group }}"
-    protocol: "{{ item.protocol }}"
-    port_range_min: "{{ item.min }}"
-    port_range_max: "{{ item.max }}"
-    remote_ip_prefix: "{{ item.cidr }}"
-    direction: "{{ item.direction | default('ingress') }}"
-  loop:
-    - { protocol: ANY, cidr: "0.0.0.0/0", direction: egress } # all out
-  ignore_errors: yes
-  async: 60
-  poll: 0
+    protocol: any
+    remote_ip_prefix: "0.0.0.0/0"
+    direction: "egress"
   when: state == 'present'
\ No newline at end of file
diff --git a/roles/create_infrastructure/tasks/vm.yml b/roles/create_infrastructure/tasks/vm.yml
index ccd8a3a1827af2728d59df8aaf48175f88cf5c28..30efbff1055cce18ed5464bff262e3c8788967ed 100644
--- a/roles/create_infrastructure/tasks/vm.yml
+++ b/roles/create_infrastructure/tasks/vm.yml
@@ -2,24 +2,26 @@
   openstack.cloud.server:
     auth: "{{ openstack_auth }}"
     auth_type: "{{ openstack_auth_type }}"
-    name: "rke2-{{ cluster_name }}-server-{{ item }}"
-    boot_from_volume: yes
-    terminate_volume: yes
+    name: "rke2-{{ cluster_name }}-jumphost"
+    boot_from_volume: true
+    terminate_volume: true
     volume_size: "{{ server_volume_size }}"
     nics:
       - net-name: "{{ network_name }}"
-      - net-name: "{{ storage_network_name }}"
+      - net-id: "{{ storage_network }}"
     key_name: "{{ ssh_key_name }}"
     flavor: "{{ server_flavor }}"
     image: "{{ image }}"
     security_groups:
-      - "{{ security_group }}"
-    auto_ip: yes
+      - "{{ jumphost_security_group }}"
+    auto_ip: false
     state: "{{ state }}"
     userdata: |-
       #cloud-config
       package_update: true
       package_upgrade: true
+      network:
+        config: disabled
   register: _create_jumphost
   async: 600
   poll: 0
@@ -29,21 +31,25 @@
     auth: "{{ openstack_auth }}"
     auth_type: "{{ openstack_auth_type }}"
     name: "rke2-{{ cluster_name }}-server-{{ item }}"
-    boot_from_volume: yes
-    terminate_volume: yes
+    boot_from_volume: true
+    terminate_volume: true
     volume_size: "{{ server_volume_size }}"
-    network: "{{ network_name }}"
+    nics:
+      - net-name: "{{ network_name }}"
+      - net-id: "{{ storage_network }}"
     key_name: "{{ ssh_key_name }}"
     flavor: "{{ server_flavor }}"
     image: "{{ image }}"
     security_groups:
       - "{{ security_group }}"
-    auto_ip: no
+    auto_ip: false
     state: "{{ state }}"
     userdata: |-
       #cloud-config
       package_update: true
       package_upgrade: true
+      network:
+        config: disabled
   loop: "{{ range(0, server_count, 1) | list }}"
   register: _create_servers
   async: 600
@@ -53,74 +59,109 @@
   openstack.cloud.port:
     auth: "{{ openstack_auth }}"
     auth_type: "{{ openstack_auth_type }}"
-    name: "rke2-{{ cluster_name }}-sriov-{{ item }}"
+    name: "rke2-{{ cluster_name }}-storage-{{ item }}"
     network: "{{ storage_network }}"
     vnic_type: direct
     binding_profile:
       "capabilities": '["switchdev"]'
     security_groups:
       - "{{ storage_security_group }}"
+    state: "{{ state }}"
+  loop: "{{ range(0, agent_count, 1) | list }}"
+  when: sriov | bool
+
+- name: create vxlan port
+  openstack.cloud.port:
+    auth: "{{ openstack_auth }}"
+    auth_type: "{{ openstack_auth_type }}"
+    name: "rke2-{{ cluster_name }}-storage-{{ item }}"
+    network: "{{ storage_network }}"
+    security_groups:
+      - "{{ storage_security_group }}"
+    state: "{{ state }}"
   loop: "{{ range(0, agent_count, 1) | list }}"
+  when: not (sriov | bool)
 
 - name: agent VM
   openstack.cloud.server:
     auth: "{{ openstack_auth }}"
     auth_type: "{{ openstack_auth_type }}"
-    name: "rke2-{{ cluster_name }}20-agent-{{ item }}"
-    boot_from_volume: yes
-    terminate_volume: yes
+    name: "rke2-{{ cluster_name }}-agent-{{ item }}"
+    boot_from_volume: true
+    terminate_volume: true
     volume_size: "{{ agent_volume_size }}"
     nics:
       - net-name: "{{ network_name }}"
-      - port-name: "rke2-{{ cluster_name}}-sriov-{{ item }}"
+      - port-name: "rke2-{{ cluster_name}}-storage-{{ item }}"
     key_name: "{{ ssh_key_name }}"
     flavor: "{{ agent_flavor }}"
     image: "{{ image }}"
     security_groups:
       - "{{ security_group }}"
-    auto_ip: no
+    auto_ip: false
     state: "{{ state }}"
     userdata: |-
       #cloud-config
       package_update: true
       package_upgrade: true
+      network:
+        config: disabled
   loop: "{{ range(0, agent_count, 1) | list }}"
   register: _create_agents
   async: 600
   poll: 0
 
-- block:
-  - name: wait for jumphost vm
-    async_status:
-      jid: "{{ item.ansible_job_id }}"
-    register: jumphost_result
-    until: jumphost_result.finished
-    delay: 1
-    retries: 600
-    loop: "{{ _create_jumphost.results }}"
+- name: wait for jumphost vm
+  async_status:
+    jid: "{{ _create_jumphost.ansible_job_id }}"
+  register: jumphost_result
+  until: jumphost_result.finished
+  delay: 1
+  retries: 600
+
+- name: create floating ip for jumphost
+  openstack.cloud.floating_ip:
+    auth: "{{ openstack_auth }}"
+    auth_type: "{{ openstack_auth_type }}"
+    server: "rke2-{{ cluster_name }}-jumphost"
+    network: "{{ floating_network_id }}"
+    nat_destination: "{{ network_name }}"
+    wait: true
+    timeout: 180
+  register: jumphost_floating_ip
+  when: state == 'present'
+
+- name: wait for server vms
+  async_status:
+    jid: "{{ item.ansible_job_id }}"
+  register: servers_result
+  until: servers_result.finished
+  delay: 1
+  retries: 600
+  loop: "{{ _create_servers.results }}"
 
-  - name: wait for server vms
-    async_status:
-      jid: "{{ item.ansible_job_id }}"
-    register: servers_result
-    until: servers_result.finished
-    delay: 1
-    retries: 600
-    loop: "{{ _create_servers.results }}"
+- name: wait for agents vm
+  async_status:
+    jid: "{{ item.ansible_job_id }}"
+  register: agents_result
+  until: agents_result.finished
+  delay: 1
+  retries: 600
+  loop: "{{ _create_agents.results }}"
 
-- block:
-  - name: wait for agents vm
-    async_status:
-      jid: "{{ item.ansible_job_id }}"
-    register: agents_result
-    until: agents_result.finished
-    delay: 1
-    retries: 600
-    loop: "{{ _create_agents.results }}"
 
 - set_fact:
-      jumphost: "{{ jumphost_result | community.general.json_query('server.{name: name, private_v4: private_v4, public_v4: public_v4}') }}"
-      servers: "{{ servers_result.results | community.general.json_query('[].server.{name: name, private_v4: private_v4}') }}"
-      agents: "{{ agents_result.results | community.general.json_query('[].server.{name: name, private_v4: private_v4}') }}"
-      jumphost_floating_ip: "{{ jumphost_result.server.public_v4 }}"
-  when: state == 'present'
\ No newline at end of file
+      jumphost: "{{ jumphost_result | community.general.json_query('server.{name: hostname, private_v4: addresses.\"rke2-test\"[0].addr}') }}"
+      servers: "{{ servers_result.results | community.general.json_query('[].server.{name: hostname, private_v4: addresses.\"rke2-test\"[0].addr}') }}"
+      agents: "{{ agents_result.results | community.general.json_query('[].server.{name: hostname, private_v4: addresses.\"rke2-test\"[0].addr}') }}"
+      jumphost_floating_ip: "{{ jumphost_floating_ip.floating_ip.floating_ip_address }}"
+  when: state == 'present'
+
+- debug:
+    var: jumphost
+
+- debug:
+    var: servers
+    
+- debug:
+    var: agents
\ No newline at end of file
diff --git a/roles/create_infrastructure/templates/agents.yml.j2 b/roles/create_infrastructure/templates/agents.yml.j2
deleted file mode 100644
index ab719f07ce7a59f19d307272bc38bd5ccf3d71c2..0000000000000000000000000000000000000000
--- a/roles/create_infrastructure/templates/agents.yml.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-{% raw %}
-ansible_user: "ubuntu"
-ansible_ssh_private_key_file: "{{ ssh_key_file }}"
-ansible_ssh_common_args: "-o ProxyCommand='ssh -q ubuntu@{{ master_floating_ip }} -o StrictHostKeyChecking=no -i {{ ssh_key_file }} -W %h:%p' -o StrictHostKeyChecking=no"
-node_type: agent
-{% endraw %}
\ No newline at end of file
diff --git a/roles/create_infrastructure/templates/all.yml.j2 b/roles/create_infrastructure/templates/all.yml.j2
deleted file mode 100644
index 7a89c769cba6272baad3f96635cf103d94d686f9..0000000000000000000000000000000000000000
--- a/roles/create_infrastructure/templates/all.yml.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-loadbalancer_ip: {{ new_loadbalancer_ip }}
-
-master_floating_ip: {{ new_master_floating_ip }}
-
-subnet_id: {{ new_subnet_id }}
-
-ssh_key_file: {{ ssh_keys_dir }}/{{ ssh_key_file }}
\ No newline at end of file
diff --git a/roles/create_infrastructure/templates/hosts.j2 b/roles/create_infrastructure/templates/hosts.j2
index a68715697be9a9af5e8a276883997c1f55e963e7..a4a201063674fd98b9c4c149b7f5b53e2af33fa3 100644
--- a/roles/create_infrastructure/templates/hosts.j2
+++ b/roles/create_infrastructure/templates/hosts.j2
@@ -1,16 +1,20 @@
+[rke2:children]
+servers
+agents
+
 [master]
-{{ master.name }} ansible_host="{{ master.public_v4 }}"
+{{ servers[0].name }}
 
 [servers]
-{% if servers is defined and servers | length > 0 %}
 {% for server in servers %}
-{{ server.name }} ansible_host="{{ server.private_v4 }}"
+{{ server.name }} ansible_hostname={{ server.private_v4 }} node_ip={{ server.private_v4 }} node_type=server
 {% endfor %}
-{% endif %}
+
 
 [agents]
-{% if agents is defined and agents | length > 0 %}
 {% for agent in agents %}
-{{ agent.name }} ansible_host="{{ agent.private_v4 }}"
+{{ agent.name }} ansible_hostname={{ agent.private_v4 }} node_ip={{ agent.private_v4 }} node_type=agent
 {% endfor %}
-{% endif %}
\ No newline at end of file
+
+[jumphosts]
+{{ jumphost.name }} ansible_hostname="{{ jumphost_floating_ip }}" node_ip={{ jumphost.private_v4 }}
\ No newline at end of file
diff --git a/roles/create_infrastructure/templates/master.yml.j2 b/roles/create_infrastructure/templates/master.yml.j2
deleted file mode 100644
index 39c75b6d782e08911cdff64b46a2f2cf49b8c414..0000000000000000000000000000000000000000
--- a/roles/create_infrastructure/templates/master.yml.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-{% raw %}
-ansible_user: "ubuntu"
-ansible_ssh_private_key_file: "{{ ssh_key_file }}"
-ansible_ssh_common_args: "-o StrictHostKeyChecking=no"
-node_type: server
-{% endraw %}
\ No newline at end of file
diff --git a/roles/create_infrastructure/templates/servers.yml.j2 b/roles/create_infrastructure/templates/servers.yml.j2
deleted file mode 100644
index 562da525422fe35ca273ebb83cc4dff70d6fbbbb..0000000000000000000000000000000000000000
--- a/roles/create_infrastructure/templates/servers.yml.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-{% raw %}
-ansible_user: "ubuntu"
-ansible_ssh_private_key_file: "{{ ssh_key_file }}"
-ansible_ssh_common_args: "-o ProxyCommand='ssh -q ubuntu@{{ master_floating_ip }} -o StrictHostKeyChecking=no -i {{ ssh_key_file }} -W %h:%p' -o StrictHostKeyChecking=no"
-node_type: server
-{% endraw %}
\ No newline at end of file
diff --git a/roles/rke2/defaults/main.yml b/roles/rke2/defaults/main.yml
index cae42bc2e6396d78bf7f7c058a5106aa07cc8b87..275015ae2bc6b774af072616749fd47a610ec94a 100644
--- a/roles/rke2/defaults/main.yml
+++ b/roles/rke2/defaults/main.yml
@@ -2,25 +2,24 @@ state: present
 upgrade: no
 dist_upgrade: no
 reboot: no
-
-domain:
-server: "https://{{ domain }}:9345"
-
+debug: false
+selinux: true
 tls_san:
-  - "{{ domain }}"
+  - "{{ loadbalancer_ip }}"
 
 node_taints: []
 node_labels: []
 
 rke2_channel: stable
-rke2_version: "v1.22.9+rke2r2"
+rke2_k8s_version: "1.24"
 
 resolv_conf: "/etc/resolv.conf"
-resolv_conf_server: "{{ resolv_conf }}"
-resolv_conf_node: "{{ resolv_conf }}"
 
 cloud_provider_name: "external"
 
+semodules:
+  - my-iptables
+
 registry_mirrors: {}
 # mirrors:
 #   docker.io:
@@ -43,149 +42,149 @@ registry_mirrors: {}
 #     auth: --SEE_ABOVE--
 #     tls: --SEE_ABOVE--
 
-default_vars:
-  cluster_issuer_name: letsencrypt-cluster-issuer
-  placeholder_domain: example.org
-
-manifests: {} # used to override default_manifests
-manifests_config: "{{ default_manifests | combine(manifests, recursive=True) }}"
-default_manifests:
-  config-rke2-coredns:
-    enabled: false
-
-  config-rke2-calico:
-    enabled: false
-
-  config-rke2-canal:
-    enabled: false
-
-  config-rke2-cilium:
-    enabled: false
-    hubble:
-      enabled: false
-      user: ""
-      password: ""
-    cilium: #advanced here
-      debug:
-        enabled: false
-      preflight:
-        enabled: false
-      bpf:
-        clockProbe: true
-      kubeProxyReplacement: disabled # probe, strict
-      hostPort:
-        enabled: false
-      hostFirewall:
-        enabled: false
-      bandwidthManager:
-        enabled: false
-        bbr: false
-      socketLB:
-        enabled: false
-        hostNamespaceOnly: false
-      cni:
-        chainingMode: none # needs plugins, not needed when kubeProxyReplacement in place
-      containerRuntime:
-        integration: auto
-      prometheus:
-        enabled: false
-        serviceMonitor:
-          enabled: false
-      hubble:
-        enabled: "{{ manifests['config-rke2-cilium'].hubble.enabled | default(false) }}"
-        metrics:
-          enabled:
-          - dns:query;ignoreAAAA
-          - drop
-          - tcp
-          - flow
-          - icmp
-          - http
-          serviceMonitor:
-            enabled: false
-        relay:
-          enabled: "{{ manifests['config-rke2-cilium'].hubble.enabled | default(false) }}"
-        ui:
-          enabled: "{{ manifests['config-rke2-cilium'].hubble.enabled | default(false) }}"
-          ingress:
-            enabled: true
-            hosts:
-              - "{{ manifests['config-rke2-cilium'].hubble.hostname | default('hubble.' + default_vars.placeholder_domain) }}"
-            tls:
-              - secretName: hubble-cilium-tls
-                hosts:
-                  - "{{ manifests['config-rke2-cilium'].hubble.hostname | default('hubble.' + default_vars.placeholder_domain) }}"
-            annotations:
-              cert-manager.io/cluster-issuer: "{{ manifests['deploy-cert-manager'].clusterissuer.name | default(default_vars.cluster_issuer_name) }}"
-              nginx.ingress.kubernetes.io/whitelist-source-range: "{{ manifests['config-rke2-cilium'].hubble.allowlist_source_range | default('0.0.0.0/0') }}"
-              nginx.ingress.kubernetes.io/auth-type: basic
-              nginx.ingress.kubernetes.io/auth-secret: hubble-auth-secret
-              nginx.ingress.kubernetes.io/auth-secret-type: auth-file
-              nginx.ingress.kubernetes.io/auth-realm: Hubble Authentication Required
-
-  deploy-openstack-ccm:
-    enabled: false
-
-  deploy-openstack-cinder:
-    enabled: false
-
-  deploy-openstack-manila:
-    enabled: false
-
-  deploy-cephfs:
-    enabled: false
-
-  deploy-nfs:
-    enabled: false
-
-  deploy-grafana:
-    enabled: false
-    adminPassword: ""
-
-  deploy-rancher-ui:
-    enabled: false
-    rancher_ui_dns:  "{{ 'rancher.' + default_vars.placeholder_domain }}"
-    letsEncrypt_admin_mail: "{{ 'adls@' + default_vars.placeholder_domain }}"
-
-  deploy-metallb:
-    enabled: false
-    pools:
-      - name: "default"
-        addresses:
-          - ""
-        auto_assign: false
-
-  deploy-cloud-provider-vsphere:
-    enabled: false
-    vCenter_IP: ""
-    vCenter_Username: ""
-    vCenter_Password: ""
-    vCenter_Datacenter: ""
-    vCenter_ClusterID: ""
-    CSIMigrationvSphere: ""
-
-  deploy-spectrum-scale-secret:
-    enabled: false
-
-  deploy-spectrum-scale-operator-2.2.0:
-    enabled: false
-
-  deploy-spectrum-scale-csi-2.1.0:
-    enabled: false
-
-  deploy-nginx-ingress-public:
-    enabled: false
-
-  deploy-irods-csi-driver:
-    enabled: false
-
-  deploy-smb-csi-driver:
-    enabled: false
-
-  depoly-minio-operator:
-    enabled: false
-
-  deploy-oidc-issuer-ingress:
-    enabled: false
-    hostname: oidc.cluster.local
-    certissuer: letsencrypt-cluster-issuer
\ No newline at end of file
+# default_vars:
+#   cluster_issuer_name: letsencrypt-cluster-issuer
+#   placeholder_domain: example.org
+
+# manifests: {} # used to override default_manifests
+# manifests_config: "{{ default_manifests | combine(manifests, recursive=True) }}"
+# default_manifests:
+#   config-rke2-coredns:
+#     enabled: false
+
+#   config-rke2-calico:
+#     enabled: false
+
+#   config-rke2-canal:
+#     enabled: false
+
+#   config-rke2-cilium:
+#     enabled: false
+#     hubble:
+#       enabled: false
+#       user: ""
+#       password: ""
+#     cilium: #advanced here
+#       debug:
+#         enabled: false
+#       preflight:
+#         enabled: false
+#       bpf:
+#         clockProbe: true
+#       kubeProxyReplacement: disabled # probe, strict
+#       hostPort:
+#         enabled: false
+#       hostFirewall:
+#         enabled: false
+#       bandwidthManager:
+#         enabled: false
+#         bbr: false
+#       socketLB:
+#         enabled: false
+#         hostNamespaceOnly: false
+#       cni:
+#         chainingMode: none # needs plugins, not needed when kubeProxyReplacement in place
+#       containerRuntime:
+#         integration: auto
+#       prometheus:
+#         enabled: false
+#         serviceMonitor:
+#           enabled: false
+#       hubble:
+#         enabled: "{{ manifests['config-rke2-cilium'].hubble.enabled | default(false) }}"
+#         metrics:
+#           enabled:
+#           - dns:query;ignoreAAAA
+#           - drop
+#           - tcp
+#           - flow
+#           - icmp
+#           - http
+#           serviceMonitor:
+#             enabled: false
+#         relay:
+#           enabled: "{{ manifests['config-rke2-cilium'].hubble.enabled | default(false) }}"
+#         ui:
+#           enabled: "{{ manifests['config-rke2-cilium'].hubble.enabled | default(false) }}"
+#           ingress:
+#             enabled: true
+#             hosts:
+#               - "{{ manifests['config-rke2-cilium'].hubble.hostname | default('hubble.' + default_vars.placeholder_domain) }}"
+#             tls:
+#               - secretName: hubble-cilium-tls
+#                 hosts:
+#                   - "{{ manifests['config-rke2-cilium'].hubble.hostname | default('hubble.' + default_vars.placeholder_domain) }}"
+#             annotations:
+#               cert-manager.io/cluster-issuer: "{{ manifests['deploy-cert-manager'].clusterissuer.name | default(default_vars.cluster_issuer_name) }}"
+#               nginx.ingress.kubernetes.io/whitelist-source-range: "{{ manifests['config-rke2-cilium'].hubble.allowlist_source_range | default('0.0.0.0/0') }}"
+#               nginx.ingress.kubernetes.io/auth-type: basic
+#               nginx.ingress.kubernetes.io/auth-secret: hubble-auth-secret
+#               nginx.ingress.kubernetes.io/auth-secret-type: auth-file
+#               nginx.ingress.kubernetes.io/auth-realm: Hubble Authentication Required
+
+#   deploy-openstack-ccm:
+#     enabled: false
+
+#   deploy-openstack-cinder:
+#     enabled: false
+
+#   deploy-openstack-manila:
+#     enabled: false
+
+#   deploy-cephfs:
+#     enabled: false
+
+#   deploy-nfs:
+#     enabled: false
+
+#   deploy-grafana:
+#     enabled: false
+#     adminPassword: ""
+
+#   deploy-rancher-ui:
+#     enabled: false
+#     rancher_ui_dns:  "{{ 'rancher.' + default_vars.placeholder_domain }}"
+#     letsEncrypt_admin_mail: "{{ 'adls@' + default_vars.placeholder_domain }}"
+
+#   deploy-metallb:
+#     enabled: false
+#     pools:
+#       - name: "default"
+#         addresses:
+#           - ""
+#         auto_assign: false
+
+#   deploy-cloud-provider-vsphere:
+#     enabled: false
+#     vCenter_IP: ""
+#     vCenter_Username: ""
+#     vCenter_Password: ""
+#     vCenter_Datacenter: ""
+#     vCenter_ClusterID: ""
+#     CSIMigrationvSphere: ""
+
+#   deploy-spectrum-scale-secret:
+#     enabled: false
+
+#   deploy-spectrum-scale-operator-2.2.0:
+#     enabled: false
+
+#   deploy-spectrum-scale-csi-2.1.0:
+#     enabled: false
+
+#   deploy-nginx-ingress-public:
+#     enabled: false
+
+#   deploy-irods-csi-driver:
+#     enabled: false
+
+#   deploy-smb-csi-driver:
+#     enabled: false
+
+#   depoly-minio-operator:
+#     enabled: false
+
+#   deploy-oidc-issuer-ingress:
+#     enabled: false
+#     hostname: oidc.cluster.local
+#     certissuer: letsencrypt-cluster-issuer
\ No newline at end of file
diff --git a/roles/rke2/files/my-iptables.te b/roles/rke2/files/my-iptables.te
new file mode 100644
index 0000000000000000000000000000000000000000..ee715a2261e28f39a88d239e724b4010d6dc5347
--- /dev/null
+++ b/roles/rke2/files/my-iptables.te
@@ -0,0 +1,10 @@
+module my-iptables 1.0;
+
+require {
+	type iptables_t;
+	type cgroup_t;
+	class dir ioctl;
+}
+
+#============= iptables_t ==============
+allow iptables_t cgroup_t:dir ioctl;
\ No newline at end of file
diff --git a/roles/rke2/selinux/my-node-exporter.cil b/roles/rke2/files/my-node-exporter.cil
similarity index 100%
rename from roles/rke2/selinux/my-node-exporter.cil
rename to roles/rke2/files/my-node-exporter.cil
diff --git a/roles/rke2/selinux/my-rke2.te b/roles/rke2/files/my-rke2.te
similarity index 100%
rename from roles/rke2/selinux/my-rke2.te
rename to roles/rke2/files/my-rke2.te
diff --git a/roles/rke2/selinux/udica/base-container.cil b/roles/rke2/files/udica/base-container.cil
similarity index 100%
rename from roles/rke2/selinux/udica/base-container.cil
rename to roles/rke2/files/udica/base-container.cil
diff --git a/roles/rke2/tasks/bootstrap_rke2.yml b/roles/rke2/tasks/bootstrap_rke2.yml
new file mode 100644
index 0000000000000000000000000000000000000000..38734b756c8507f39431ae6b08e2afbb067d9b7d
--- /dev/null
+++ b/roles/rke2/tasks/bootstrap_rke2.yml
@@ -0,0 +1,47 @@
+- block:
+  - name: start rke2
+    ansible.builtin.systemd:
+      name: "rke2-{{ node_type }}"
+      enabled: yes
+      masked: no
+      state: started
+      daemon_reload: yes
+
+  - name: wait for rke2 cacerts to come up till 200
+    uri:
+      url: "https://{{ loadbalancer_ip }}:9345/cacerts"
+      status_code: [200]
+      validate_certs: no
+    register: result
+    until: result.status == 200
+    retries: 10 # retry X times
+    delay: 10 # pause for X sec b/w each call
+
+  - name: wait for kubeconfig
+    wait_for:
+      path: /etc/rancher/rke2/rke2.yaml
+
+  - name: fetch kubeconfig from master
+    ansible.builtin.fetch:
+      src: /etc/rancher/rke2/rke2.yaml
+      dest: kubeconfig.yaml
+      flat: true
+
+  - name: replace endpoint in kubeconfig NEW
+    delegate_to: localhost
+    become: false
+    ansible.builtin.replace:
+      path: kubeconfig.yaml
+      regexp: '^(\s+server: ).*'
+      replace: '\1https://{{ loadbalancer_ip }}:6443'
+  when: "'master' in group_names"
+
+- name: start rke2
+  throttle: 1
+  ansible.builtin.systemd:
+    name: "rke2-{{ node_type }}"
+    enabled: yes
+    masked: no
+    state: started
+    daemon_reload: yes
+  when: "'master' not in group_names"
\ No newline at end of file
diff --git a/roles/rke2/tasks/config_rke2.yml b/roles/rke2/tasks/config_rke2.yml
index c41d6f2d73631d08995d0aa00807a53cf969add1..2818d2b44545ee57e559276066948a7d2c703a89 100644
--- a/roles/rke2/tasks/config_rke2.yml
+++ b/roles/rke2/tasks/config_rke2.yml
@@ -1,3 +1,23 @@
+- name: create token
+  delegate_to: localhost
+  run_once: true
+  set_fact:
+    init_token: "{{ lookup('community.general.random_string', length=129, special=False) }}"
+  when: token is not defined
+
+- name: Update the /etc/hosts file with node name
+  delegate_to: localhost
+  become: false
+  lineinfile:
+    dest: "config.yml"
+    regexp: "token:.*"
+    line: "token: {{ init_token }}"
+    state: present
+  when: token is not defined
+
+- name: read token
+  include_vars: config.yml
+
 - name: copy rke2 config
   template:
     src: config.yaml.j2
diff --git a/roles/rke2/tasks/config_selinux.yml b/roles/rke2/tasks/config_selinux.yml
new file mode 100644
index 0000000000000000000000000000000000000000..19fd8bb7dd62b975a80fa1834e4cf161191d2664
--- /dev/null
+++ b/roles/rke2/tasks/config_selinux.yml
@@ -0,0 +1,52 @@
+- name: Copy SELinux Policies
+  copy:
+    src: "{{ item }}.te"
+    dest: "/etc/selinux/targeted/policy/{{ item }}.te"
+  loop: "{{ semodules }}"
+
+- name: Build SELinux policy module
+  command: "checkmodule -M -m -o /etc/selinux/targeted/policy/{{ item }}.mod /etc/selinux/targeted/policy/{{ item }}.te"
+  loop: "{{ semodules }}"
+
+- name: Package SELinux policy module 
+  command: "semodule_package -o /etc/selinux/targeted/policy/{{ item }}.pp -m /etc/selinux/targeted/policy/{{ item }}.mod"
+  loop: "{{ semodules }}"
+
+- name: Add SELinux policy module
+  command: "semodule -i /etc/selinux/targeted/policy/{{ item }}.pp"
+  loop: "{{ semodules }}"
+
+    
+    
+  
+
+
+
+
+
+
+# - name: Copy udica Base Container SELinux Policies 
+#   template:
+#     src: ../selinux/udica/base-container.cil
+#     dest: /etc/selinux/targeted/policy/base-container.cil
+
+# - name: Copy SELinux Policies (prometheus/node_exporter)
+#   template:
+#     src: ../selinux/my-node-exporter.cil
+#     dest: /etc/selinux/targeted/policy/my-node-exporter.cil
+
+# - name: Copy SELinux Policies
+#   template:
+#     src: ../selinux/my-openstack-ccm.cil
+#     dest: /etc/selinux/targeted/policy/my-openstack-ccm.cil
+#   when: ( 'control-plane' in group_names )
+
+# - name: Build SELinux exception modules
+#   shell: |
+#     semodule -i /etc/selinux/targeted/policy/my-node-exporter.cil /etc/selinux/targeted/policy/base-container.cil
+
+# - name: Build SELinux exception modules
+#   shell: |
+#     semodule -i /etc/selinux/targeted/policy/my-openstack-ccm.cil /etc/selinux/targeted/policy/base-container.cil
+#   when: ( 'control-plane' in group_names )
+
diff --git a/roles/rke2/tasks/install_rke2.yml b/roles/rke2/tasks/install_rke2.yml
index 0b61235e754003ec4113cb799eccfff292cd48a1..7c08d821d5e51f0dd0a84c6634ce45f6c51f9254 100644
--- a/roles/rke2/tasks/install_rke2.yml
+++ b/roles/rke2/tasks/install_rke2.yml
@@ -1,45 +1,27 @@
-- name: download RKE2 install script
-  get_url:
-    url: https://get.rke2.io
-    dest: /tmp/rke2.sh
-    mode: '0755'
-  when: not rke2_installed.stat.exists or upgrade
-
-- name: Update crypto-policy to allow SHA1
-  shell: update-crypto-policies --set DEFAULT:SHA1
-  when: not rke2_installed.stat.exists or upgrade
+- name: Ensure /var/lib/rancher/rke2/server/manifests
+  file:
+    path: /var/lib/rancher/rke2/server/manifests
+    state: directory
+    recurse: yes
 
-- name: Install RKE2
-  command: "/tmp/rke2.sh"
-  args:
-    creates: /usr/bin/rke2
-  environment:
-    INSTALL_RKE2_VERSION: "{{ rke2_version }}"
-    INSTALL_RKE2_CHANNEL: "{{ rke2_channel }}"
-    INSTALL_RKE2_TYPE: "{{ node_type }}"
-  notify:
-    - restart rke2
+- name: Ensure /etc/rancher/rke2
+  file:
+    path: /etc/rancher/rke2
+    state: directory
+    recurse: yes
 
-- name: Revert crypto-policy
-  shell: update-crypto-policies --set DEFAULT:NO-SHA1
-  when: not rke2_installed.stat.exists or upgrade
+- name: add repo
+  template:
+    src: rancher-rke2.repo.j2
+    dest: /etc/yum.repos.d/rancher-rke2.repo
 
+- name: install rke2 
+  dnf:
+    name:
+      - rke2-common
+      - "rke2-{{ node_type }}"
+      - rke2-selinux
 
-- name: remove RKE2 install script
-  file:
-    path: /tmp/rke2.sh
-    state: absent
 
-- name: Copy SELinux Policies (rke2)
-  template:
-    src: ../selinux/my-rke2.te
-    dest: /etc/selinux/targeted/policy/my-rke2.te
-  when: ( 'control-plane' in group_names )
 
-- name: Build SELinux exception module (rke2)
-  shell: |
-    checkmodule -M -m -o /etc/selinux/targeted/policy/my-rke2.mod /etc/selinux/targeted/policy/my-rke2.te
-    semodule_package -o /etc/selinux/targeted/policy/my-rke2.pp -m /etc/selinux/targeted/policy/my-rke2.mod
-    semodule -i /etc/selinux/targeted/policy/my-rke2.pp
-  when: ( 'control-plane' in group_names )
 
diff --git a/roles/rke2/tasks/instantiate_token.yml b/roles/rke2/tasks/instantiate_token.yml
deleted file mode 100644
index 8e7e5e4394ed26cd54c8807b4ac310d56b4b408b..0000000000000000000000000000000000000000
--- a/roles/rke2/tasks/instantiate_token.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-- name: Instantiate Token
-  block:
-    - name: Load token
-      slurp:
-        src: "/var/lib/rancher/rke2/server/node-token"
-      register: slurped_token
-
-    - name: Decode token
-      set_fact:
-        decoded_token: "{{ slurped_token.content | b64decode | trim }}"
-
-    - name: set token
-      set_fact:
-        init_token: "{{ decoded_token.split('server:')[1] }}"
-  rescue:
-    - name: create token
-      set_fact:
-        init_token: "{{ lookup('community.general.random_string', length=129, special=False) }}"
-      when: token is not defined
-
-- name: Save Token locally
-  block:
-  - name: ensure inventory folders
-    delegate_to: localhost
-    become: false
-    run_once: true
-    file:
-      path: "{{ item }}"
-      state: directory
-    loop:
-      - group_vars
-      - group_vars/all
-
-  - name: store token
-    delegate_to: localhost
-    become: false
-    run_once: true
-    copy:
-      dest: group_vars/all/token.yml
-      content: |-
-        token: {{ init_token }}
-
-- name: Distribute token to all servers
-  set_fact:
-    token: "{{ init_token }}"
-  delegate_to: "{{ item }}"
-  delegate_facts: true
-  loop: "{{ groups['all'] }}"
\ No newline at end of file
diff --git a/roles/rke2/tasks/main.yml b/roles/rke2/tasks/main.yml
index bf19c38387764573fae6dc292cd160077bccf16c..04a2bf0a02a2630b1c93f23c90767b008a3fb7c9 100644
--- a/roles/rke2/tasks/main.yml
+++ b/roles/rke2/tasks/main.yml
@@ -1,47 +1,8 @@
-- name: stat if rke2 is installed
-  stat:
-    path: /usr/bin/rke2
-  register: rke2_installed
-
-- block:
-  - include_tasks: setup_host.yml
-  - include_tasks: install_rke2.yml
-  when: state != 'absent' and (not rke2_installed.stat.exists or upgrade)
-
-- name: prepare master node
-  block:
-    - include_tasks: templates.yml
-    - include_tasks: instantiate_token.yml
-  when: state != 'absent' and 'master' in group_names
-
-- name: config nodes
-  block:
-    - include_tasks: config_registries.yml
-    - include_tasks: config_rke2.yml
-  when: state != 'absent'
-
-- name: await rke2 on master and save kubeconfig in keyvault
-  block:
-    - include_tasks: start_rke2.yml
-    - include_tasks: save_kubeconfig.yml
-  when: state != 'absent' and 'master' in group_names
-
-- name: rotate etcd secrets encryption key
-  include_tasks: rotate_encryption.yml
-  when: state != 'absent' and 'control-plane' in group_names and rotate 
-
-- name: uninstall rke2
-  command: rke2-uninstall.sh
-  when: rke2_installed.stat.exists and state == 'absent'
-
-- name: Flush handlers
-  meta: flush_handlers
-
-- name: configure Openstack Cloud Controller Manager
-  include_tasks: config_ccm.yml
-  when: state != 'absent' and manifests['deploy-openstack-ccm'].enabled == true and 'master' in group_names
-
-- name: wait for rancher
-  run_once: true
-  include_tasks: wait_for_rancher.yml
-  when: state != 'absent' and manifests['deploy-rancher-ui'].enabled == true
\ No newline at end of file
+- include_tasks: setup_host.yml
+- include_tasks: config_selinux.yml
+- include_tasks: install_rke2.yml  
+- include_tasks: config_registries.yml
+- include_tasks: config_rke2.yml
+  
+- include_tasks: manifests.yml
+- include_tasks: bootstrap_rke2.yml
diff --git a/roles/rke2/tasks/manifests.yml b/roles/rke2/tasks/manifests.yml
new file mode 100644
index 0000000000000000000000000000000000000000..77012fcfe55ee688f82e8266a355f23bdc32e50f
--- /dev/null
+++ b/roles/rke2/tasks/manifests.yml
@@ -0,0 +1,13 @@
+- name: copy manifest template files
+  template:
+    src: 'manifests/{{ item }}.yml.j2'
+    dest: '/var/lib/rancher/rke2/server/manifests/{{ item }}.yaml'
+  loop:
+    - config-nginx-ingress
+    - config-rke2-canal
+    - config-rke2-coredns
+    - deploy-openstack-ccm
+    - deploy-openstack-cinder
+  notify:
+    - start rke2
+  when: "'master' in group_names"
\ No newline at end of file
diff --git a/roles/rke2/tasks/save_kubeconfig.yml b/roles/rke2/tasks/save_kubeconfig.yml
deleted file mode 100644
index 052b27cc4b81fdd61605c5f204792962c196cab9..0000000000000000000000000000000000000000
--- a/roles/rke2/tasks/save_kubeconfig.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-- name: wait for kubeconfig
-  wait_for:
-    path: /etc/rancher/rke2/rke2.yaml
-
-- name: fetch kubeconfig from master
-  ansible.builtin.fetch:
-    src: /etc/rancher/rke2/rke2.yaml
-    dest: kubeconfig.yaml
-    flat: yes
-
-- name: replace endpoint in kubeconfig NEW
-  delegate_to: localhost
-  become: false
-  ansible.builtin.replace:
-    path: kubeconfig.yaml
-    regexp: '^(\s+server: ).*'
-    replace: '\1https://{{ lb_ip_mgmt }}:6443'
-
-- name: fetch kubeconfig from master and copy it 
-  ansible.builtin.fetch:
-    src: /etc/rancher/rke2/rke2.yaml
-    dest: kubeconfigext.yaml
-    flat: yes
-
-- name: replace endpoint in external kubeconfig
-  delegate_to: localhost
-  become: false
-  ansible.builtin.replace:
-    path: kubeconfigext.yaml
-    regexp: '^(\s+server: ).*'
-    replace: '\1https://rancher.{{ domain }}'
-
-
diff --git a/roles/rke2/tasks/selinux.yml b/roles/rke2/tasks/selinux.yml
deleted file mode 100644
index 8ec757f4c9dd7105347b1578ccec8d479e080efa..0000000000000000000000000000000000000000
--- a/roles/rke2/tasks/selinux.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-- name: Copy udica Base Container SELinux Policies 
-  template:
-    src: ../selinux/udica/base-container.cil
-    dest: /etc/selinux/targeted/policy/base-container.cil
-
-- name: Copy SELinux Policies (prometheus/node_exporter)
-  template:
-    src: ../selinux/my-node-exporter.cil
-    dest: /etc/selinux/targeted/policy/my-node-exporter.cil
-
-- name: Copy SELinux Policies
-  template:
-    src: ../selinux/my-openstack-ccm.cil
-    dest: /etc/selinux/targeted/policy/my-openstack-ccm.cil
-  when: ( 'control-plane' in group_names )
-
-- name: Build SELinux exception modules
-  shell: |
-    semodule -i /etc/selinux/targeted/policy/my-node-exporter.cil /etc/selinux/targeted/policy/base-container.cil
-
-- name: Build SELinux exception modules
-  shell: |
-    semodule -i /etc/selinux/targeted/policy/my-openstack-ccm.cil /etc/selinux/targeted/policy/base-container.cil
-  when: ( 'control-plane' in group_names )
-
diff --git a/roles/rke2/tasks/setup_host.yml b/roles/rke2/tasks/setup_host.yml
index b106efcaf0c32dfc2fd40d1751b64afc26ebd515..9eaf72a21976ee592f8472f8a296f07794f8b3f8 100644
--- a/roles/rke2/tasks/setup_host.yml
+++ b/roles/rke2/tasks/setup_host.yml
@@ -1,96 +1,82 @@
 - name: Update the /etc/hosts file with node name
-  lineinfile:
-    dest: "/etc/hosts"
-    regexp: ".*\t{{ hostvars[item]['inventory_hostname']}}"
-    line: "{{ hostvars[item]['ansible_host'] }}\t{{ hostvars[item]['inventory_hostname']}}"
-    state: present
-    backup: yes
-  loop: "{{ groups['all'] }}"
-
-- name: dnf upgrade all
-  dnf:
-    name: "*"
-    state: latest
-
-- name: install RHEL packages
-  dnf:
-    name:
-      - sssd-ldap
-      - container-selinux
-      - cloud-utils-growpart
-      - fapolicyd
-    state: latest
-
-
-- name: disable firewalld
-  ansible.builtin.systemd:
-    name: "firewalld"
-    enabled: no
-    masked: no
-    state: stopped
-
-- name: enable nftables
-  ansible.builtin.systemd:
-    name: "nftables"
-    enabled: yes
-    state: started
-    masked: no
-
-- name: add kernel params
   template:
-    src: kernel_params.conf.j2
-    dest: /etc/sysctl.d/rke2_kernel_params.conf
-  register: kernel_params
-
-- name: load kernel params
-  shell: sysctl --system
-  when: kernel_params.changed
-
-- name: add group - etcd 
-  group:
-    name: etcd
-    state: present
-
-- name: add user - etcd
-  user:
-    name: etcd
-    group: etcd
-
-- name: remove rocky user at uid 1000
-  user:
-    name: rocky
-    state: absent 
-    remove: yes
-  ignore_errors: True 
-
-- name: Download root CA
-  get_url:
-    url: https://curl.se/ca/cacert.pem
-    dest: /etc/ssl/certs
-
-- name: Update CA trust
-  shell: update-ca-trust
-
-
-- name: Resize the disks if volume was expanded (experimental)
-  shell: |
-    /usr/bin/growpart /dev/vda 2 
-    /usr/sbin/pvresize -y -q /dev/vda2 
-    /usr/sbin/lvresize -y -q -r -l +100%FREE /dev/mapper/*root 
-
-
-
-- name: Ensure /var/lib/rancher/rke2/server/manifests
-  file:
-    path: /var/lib/rancher/rke2/server/manifests
-    state: directory
-    recurse: yes
-
-- name: Ensure /etc/rancher/rke2
-  file:
-    path: /etc/rancher/rke2
-    state: directory
-    recurse: yes
+    src: hosts.j2
+    dest: /etc/hosts
+
+# - name: dnf upgrade all
+#   dnf:
+#     name: "*"
+#     state: latest
+
+# - name: install RHEL packages
+#   dnf:
+#     name:
+#       # - sssd-ldap
+#       #- container-selinux
+#       #- cloud-utils-growpart
+#       #- fapolicyd
+#     state: latest
+
+# - name: disable firewalld
+#   ansible.builtin.systemd:
+#     name: "firewalld"
+#     enabled: no
+#     masked: no
+#     state: stopped
+#   ignore_errors: true
+
+# - name: enable nftables
+#   ansible.builtin.systemd:
+#     name: "nftables"
+#     enabled: yes
+#     state: started
+#     masked: no
+
+# - name: add kernel params
+#   template:
+#     src: kernel_params.conf.j2
+#     dest: /etc/sysctl.d/rke2_kernel_params.conf
+#   register: kernel_params
+
+# - name: load kernel params
+#   shell: sysctl --system
+#   when: kernel_params.changed
+
+# - name: add group - etcd 
+#   group:
+#     name: etcd
+#     state: present
+
+# - name: add user - etcd
+#   user:
+#     name: etcd
+#     group: etcd
+
+# - name: remove rocky user at uid 1000
+#   user:
+#     name: rocky
+#     state: absent 
+#     remove: yes
+#   ignore_errors: True 
+
+# - name: Download root CA
+#   get_url:
+#     url: https://curl.se/ca/cacert.pem
+#     dest: /etc/ssl/certs
+
+# - name: Update CA trust
+#   shell: update-ca-trust
+
+
+# - name: Resize the disks if volume was expanded (experimental)
+#   shell: |
+#     /usr/bin/growpart /dev/vda 2 
+#     /usr/sbin/pvresize -y -q /dev/vda2 
+#     /usr/sbin/lvresize -y -q -r -l +100%FREE /dev/mapper/*root 
+
+
+
+
 
 # Comment In the following statement for debugging the latest SELinux hiccups, never merge it into main  
 #- name: Disable SELinux
diff --git a/roles/rke2/tasks/start_rke2.yml b/roles/rke2/tasks/start_rke2.yml
deleted file mode 100644
index 2650c32eaa0b9c9ec10a21775a56d27746d4bf61..0000000000000000000000000000000000000000
--- a/roles/rke2/tasks/start_rke2.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-- name: start rke2
-  ansible.builtin.systemd:
-    name: "rke2-{{ node_type }}"
-    enabled: yes
-    masked: no
-    state: started
-    daemon_reload: yes
-
-- name: wait for rke2 cacerts to come up till 200
-  delegate_to: localhost
-  uri:
-    url: "https://{{ LB_IP_MGMT }}:9345/cacerts"
-    status_code: [200]
-    validate_certs: no
-  register: result
-  until: result.status == 200
-  retries: 10 # retry X times
-  delay: 10 # pause for X sec b/w each call
diff --git a/roles/rke2/tasks/templates.yml b/roles/rke2/tasks/templates.yml
deleted file mode 100644
index a8ac5bb000095bd31bef2e44d34ea50b4bb97710..0000000000000000000000000000000000000000
--- a/roles/rke2/tasks/templates.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-
-- name: sync k8s manifests
-  block:
-    - name: copy manifest template files
-      template:
-        src: 'manifests/{{ item.key }}.j2'
-        dest: '/var/lib/rancher/rke2/server/manifests/{{ item.key }}.yaml'
-      with_dict: "{{ manifests_config }}"
-      when: item.value.enabled
-      notify:
-        - start rke2
-
-    - name: Remove manifest template files
-      ansible.builtin.file:
-        path: '/var/lib/rancher/rke2/server/manifests/{{ item.key }}.yaml'
-        state: absent
-      with_dict: "{{ manifests_config }}"
-      when: not item.value.enabled
-      ignore_errors: true
\ No newline at end of file
diff --git a/roles/rke2/templates/all.yml b/roles/rke2/templates/all.yml
deleted file mode 100644
index 7288a0061b07a6478a1a9d046782878acdd527d0..0000000000000000000000000000000000000000
--- a/roles/rke2/templates/all.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-token: {{ token }}
-
-server: "https://{{ domain }}:9345"
diff --git a/roles/rke2/templates/config.yaml.j2 b/roles/rke2/templates/config.yaml.j2
index 5ed7b8067a802867f70c24486903ca3288f0d357..ef8350cdc8023576e27f0b0e5af510cbe694e1c2 100644
--- a/roles/rke2/templates/config.yaml.j2
+++ b/roles/rke2/templates/config.yaml.j2
@@ -1,13 +1,11 @@
-debug: true
-node-name: "{{ ansible_hostname }}"
+debug: {{ debug }}
+node-name: "{{ inventory_hostname }}"
+node-ip: "{{ node_ip }}"
+
 token: "{{ token }}"
-selinux: "{{ selinux_bool }}"
+selinux: "{{ selinux }}"
+
 
-{% if node_ip is defined %}
-node-ip: "{{ node_ip }}"
-{% else %}
-node-ip: "{{ ansible_default_ipv4.address }}"
-{% endif %}
 
 {% if node_taints is defined and node_taints | length > 0 %}
 node-taint:
@@ -24,49 +22,16 @@ node-label:
 {% endif %}
 
 {% if 'master' not in group_names %}
-server: "{{ server }}"
+server: "https://{{ hostvars[groups['servers'][0]]['node_ip'] }}:9345"
 {% endif %}
 
 {% if cis_profile is defined %}
 profile: "{{ cis_profile }}"
 {% endif %}
 
-{% if cloud_provider_name is defined and cloud_provider_name | length > 0 %}
-cloud-provider-name: "{{ cloud_provider_name }}"
-{% endif %}
-
-{% if node_type == 'server' %}
-resolv-conf: "{{ resolv_conf_server }}"
-{# disable-cloud-controller: true #}
-write-kubeconfig-mode: "0600"
-{% if kube_apiserver_args is defined and kube_apiserver_args | length > 0%}
-kube-apiserver-arg:
-{% for kube_apiserver_arg in kube_apiserver_args %}
-    - {{ kube_apiserver_arg }}
-{% endfor %}
-{% endif %}
-
-{% if kubelet_args is defined and kubelet_args | length > 0%}
-kubelet-arg:
-{% for kubelet_arg in kubelet_args %}
-    - {{ kubelet_arg }}
-{% endfor %}
-{% endif %}
 
-{% if disable_kube_proxy is defined and disable_kube_proxy == true %}
-disable-kube-proxy: true
-{% endif %}
+cloud-provider-name: "{{ cloud_provider_name }}"
 
-{% if cni is defined and cni | length > 0 %}
-cni: "{{ cni }}"
-{% elif manifests_config['config-rke2-calico'].enabled and manifests_config['config-rke2-cilium'].enabled%}
-cni: multus,calico,cilium
-{% elif manifests_config['config-rke2-calico'].enabled%}
-cni: calico
-{% elif manifests_config['config-rke2-cilium'].enabled%}
-cni: cilium
-{# disable-kube-proxy: true #TODO also for agents #}
-{% endif %}
 
 {% if tls_san is defined and tls_san | length > 0 %}
 tls-san:
@@ -74,22 +39,3 @@ tls-san:
   - {{ san }}
 {% endfor %}
 {% endif %}
-
-{# Only for Nodes #}
-{% else %} 
-resolv-conf: "{{ resolv_conf_node }}"
-{% endif %}
-
-
-{% if secrets_encryption is defined %}
-secrets-encryption: "{{ secrets_encryption }}"
-{% endif %}
-{# Do not use it:
-
-{% if node_external_ip is defined %}
-node-external-ip: "{{ node_external_ip }}"
-{% else %}
-node-external-ip: "{{ ansible_default_ipv4.address }}"
-{% endif %}
-
-#}
\ No newline at end of file
diff --git a/roles/rke2/templates/hosts.j2 b/roles/rke2/templates/hosts.j2
new file mode 100644
index 0000000000000000000000000000000000000000..174c8eef63ac6e75f771e1bfc94a28af1456db8a
--- /dev/null
+++ b/roles/rke2/templates/hosts.j2
@@ -0,0 +1,6 @@
+127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
+::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
+
+{% for host in groups['all'] %}
+{{ hostvars[host]['node_ip'] }}\t{{ hostvars[host]['inventory_hostname']}}
+{% endfor %}
\ No newline at end of file
diff --git a/roles/rke2/templates/manifests/config-nginx-ingress.yml.j2 b/roles/rke2/templates/manifests/config-nginx-ingress.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..571f68e234f014e1de8cf9f2b25b138b0c338bf8
--- /dev/null
+++ b/roles/rke2/templates/manifests/config-nginx-ingress.yml.j2
@@ -0,0 +1,13 @@
+apiVersion: helm.cattle.io/v1
+kind: HelmChartConfig
+metadata:
+  name: rke2-ingress-nginx
+  namespace: kube-system
+spec:
+  valuesContent: |-
+    controller:
+      config: 
+        use-forwarded-headers: "true"
+        proxy-body-size: 64m
+    tolerations:
+      - effect: NoSchedule
\ No newline at end of file
diff --git a/roles/rke2/templates/manifests/config-rke2-calico.j2 b/roles/rke2/templates/manifests/config-rke2-calico.j2
deleted file mode 100644
index 53fd7a4f87f237562c1fb4c296988fe980ab9eaa..0000000000000000000000000000000000000000
--- a/roles/rke2/templates/manifests/config-rke2-calico.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-apiVersion: helm.cattle.io/v1
-kind: HelmChartConfig
-metadata:
-  name: rke2-calico
-  namespace: kube-system
-spec:
-    valuesContent: |-
-      installation:
-        controlPlaneTolerations:
-        - key: "node-role.kubernetes.io/control-plane"
-          operator: "Exists"
-          effect: "NoSchedule"
-        - key: "node-role.kubernetes.io/etcd"
-          operator: "Exists"
-          effect: "NoExecute"
-        - key: "node-role.kubernetes.io/etcd"
-          operator: "Exists"
-          effect: "NoExecute"
-        - key: "CriticalAddonsOnly"
-          operator: "Exists"
-          effect: "NoExecute"
\ No newline at end of file
diff --git a/roles/rke2/templates/manifests/config-rke2-canal.j2 b/roles/rke2/templates/manifests/config-rke2-canal.yml.j2
similarity index 93%
rename from roles/rke2/templates/manifests/config-rke2-canal.j2
rename to roles/rke2/templates/manifests/config-rke2-canal.yml.j2
index 8f3bc18549aeebdc7b9c44615674b5726b0c8efc..75f23b6636cae55536a28e53d9a6ee648032225d 100644
--- a/roles/rke2/templates/manifests/config-rke2-canal.j2
+++ b/roles/rke2/templates/manifests/config-rke2-canal.yml.j2
@@ -7,7 +7,7 @@ spec:
   bootstrap: True
   valuesContent: |-
     calico:
-      vethuMTU: 1400
+      vethuMTU: 1450
       masquerade: false
       networkingBackend: "vxlan"
 {% if flannel_iface is defined %}
diff --git a/roles/rke2/templates/manifests/config-rke2-cilium.j2 b/roles/rke2/templates/manifests/config-rke2-cilium.j2
deleted file mode 100644
index 3f3e89e3b528d02ea9b659e3cfbdcd5b0bd5fce8..0000000000000000000000000000000000000000
--- a/roles/rke2/templates/manifests/config-rke2-cilium.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-
-{% if item.value.cilium.hubble.ui.ingress.enabled %}
----
-
-apiVersion: v1
-kind: Secret
-metadata:
-  name: hubble-auth-secret
-  namespace: kube-system
-type: Opaque
-stringData:
-  auth: "{{ item.value.hubble.user }}:{{ item.value.hubble.password | password_hash('sha512') }}"
-
-{% endif %}
-
----
-
-apiVersion: helm.cattle.io/v1
-kind: HelmChartConfig
-metadata:
-  name: rke2-cilium
-  namespace: kube-system
-spec:
-  valuesContent: |-
-{{ item.value.cilium | to_nice_yaml | indent(4, true)  }}
\ No newline at end of file
diff --git a/roles/rke2/templates/manifests/config-rke2-coredns.j2 b/roles/rke2/templates/manifests/config-rke2-coredns.yml.j2
similarity index 65%
rename from roles/rke2/templates/manifests/config-rke2-coredns.j2
rename to roles/rke2/templates/manifests/config-rke2-coredns.yml.j2
index c6b8a671dc5ed1de854f2e60201536b32ecee12c..68efdc08f6a0795c343e8805cd4f41447076e1db 100644
--- a/roles/rke2/templates/manifests/config-rke2-coredns.j2
+++ b/roles/rke2/templates/manifests/config-rke2-coredns.yml.j2
@@ -4,6 +4,6 @@ metadata:
   name: rke2-coredns
   namespace: kube-system
 spec:
-  valuesContent: |-
-    nodeSelector:
-      role: "agent"
+#  valuesContent: |-
+#    nodeSelector:
+#      role: "agent"
diff --git a/roles/rke2/templates/manifests/deploy-openstack-ccm.yml.j2 b/roles/rke2/templates/manifests/deploy-openstack-ccm.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c7962ee11d541ce270b0ac59922861dbbc8b4406
--- /dev/null
+++ b/roles/rke2/templates/manifests/deploy-openstack-ccm.yml.j2
@@ -0,0 +1,54 @@
+apiVersion: helm.cattle.io/v1
+kind: HelmChart
+metadata:
+  name: openstack-ccm
+  namespace: kube-system
+spec:
+  repo: https://kubernetes.github.io/cloud-provider-openstack
+  chart: openstack-cloud-controller-manager
+  version: 1.4.0
+  bootstrap: True
+  valuesContent: |-
+    cloudConfig:
+      global:
+        auth-url: {{ openstack_auth.auth_url }}
+        application-credential-id: {{ openstack_auth.application_credential_id }}
+        application-credential-secret: {{ openstack_auth.application_credential_secret }}
+        region: {{ openstack_region_name }}
+      loadBalancer:
+        subnet-id: {{ subnet_id }}
+        floating-network-id: {{ floating_network_id }}
+
+{% if router_id is defined %}      
+      route:
+        router-id: {{ router_id }}
+{% endif %}
+
+    tolerations:
+      - key: node.cloudprovider.kubernetes.io/uninitialized
+        value: "true"
+        effect: NoSchedule
+      - key: node-role.kubernetes.io/master
+        value: "true"
+        effect: NoSchedule
+      - key: CriticalAddonsOnly
+        value: "true"
+        effect: NoExecute
+
+    nodeSelector:
+      node-role.kubernetes.io/control-plane: "true"
+
+    # serviceMonitor:
+    #   enabled: "true"
+
+    {# livenessProbe:
+      httpGet:
+        path: /metrics
+        port: 10258 #}
+
+    controllerExtraArgs: |-
+      - --cluster-name=rke2-{{ cluster_name }}
+
+    resources:
+      requests:
+        cpu: 200m
\ No newline at end of file
diff --git a/roles/rke2/templates/manifests/deploy-openstack-cinder.yml.j2 b/roles/rke2/templates/manifests/deploy-openstack-cinder.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..4bf62d9e7359b34f04dee1978a49b26a96bb7cef
--- /dev/null
+++ b/roles/rke2/templates/manifests/deploy-openstack-cinder.yml.j2
@@ -0,0 +1,25 @@
+apiVersion: helm.cattle.io/v1
+kind: HelmChart
+metadata:
+  name: openstack-cinder-csi
+  namespace: kube-system
+spec:
+  repo: https://kubernetes.github.io/cloud-provider-openstack
+  chart: openstack-cinder-csi
+  version: 2.3.0
+  valuesContent: |-
+    secret:
+      enabled: true
+      create: true
+      name: cinder-csi-cloud-config
+      data:
+        cloud.conf: |-
+          [Global]
+          auth-url={{ openstack_auth.auth_url }}
+          application-credential-id={{ openstack_auth.application_credential_id }}
+          application-credential-secret={{ openstack_auth.application_credential_secret }}
+          region={{ openstack_region_name }}
+
+    storageClass:
+      delete:
+        isDefault: true
diff --git a/roles/rke2/templates/rancher-rke2.repo.j2 b/roles/rke2/templates/rancher-rke2.repo.j2
new file mode 100644
index 0000000000000000000000000000000000000000..6b68985e95b04e77786f08bbb11f51f7a4615d2b
--- /dev/null
+++ b/roles/rke2/templates/rancher-rke2.repo.j2
@@ -0,0 +1,14 @@
+[rancher-rke2-common-stable]
+name=Rancher RKE2 Common (stable)
+baseurl=https://rpm.rancher.io/rke2/{{ rke2_channel }}/common/centos/8/noarch
+enabled=1
+gpgcheck=1
+repo_gpgcheck=0
+gpgkey=https://rpm.rancher.io/public.key
+[rancher-rke2-1.24-stable]
+name=Rancher RKE2 1.24 (stable)
+baseurl=https://rpm.rancher.io/rke2/{{ rke2_channel }}/{{ rke2_k8s_version }}/centos/8/x86_64
+enabled=1
+gpgcheck=1
+repo_gpgcheck=0
+gpgkey=https://rpm.rancher.io/public.key