From 1423ba9df619c9bd61e3de1545e1dbd13a5067b2 Mon Sep 17 00:00:00 2001
From: thweber <thomas.weber@wu.ac.at>
Date: Wed, 15 Dec 2021 16:54:07 +0100
Subject: [PATCH] merge feature#configurable-manifests

---
 .gitignore                                    |   1 +
 README.md                                     |  30 +-
 roles/rke2/defaults/main.yml                  | 120 +++-
 roles/rke2/handlers/main.yml                  |  31 +-
 roles/rke2/tasks/config_rke2.yml              |  20 +-
 roles/rke2/tasks/helm.yml                     |  19 -
 roles/rke2/tasks/install_rke2.yml             |   2 +
 roles/rke2/tasks/kubeconfig.yml               |  11 +
 roles/rke2/tasks/main.yml                     |   7 +-
 roles/rke2/tasks/templates.yml                |  27 +
 roles/rke2/templates/config.yaml.j2           |  60 +-
 .../templates/helm/config-nginx-ingress.j2    |  10 -
 .../rke2/templates/helm/config-rke2-canal.j2  |  11 -
 .../rke2/templates/helm/deploy-rancher-ui.j2  |  14 -
 .../rke2/templates/helm/deploy-rke2-cilium.j2 |  11 -
 .../manifests/config-nginx-ingress.j2         |  40 ++
 .../templates/manifests/config-rke2-calico.j2 |  21 +
 .../templates/manifests/config-rke2-canal.j2  |  16 +
 .../templates/manifests/config-rke2-cilium.j2 |  48 ++
 .../config-rke2-coredns.j2                    |   0
 .../{helm => manifests}/deploy-cephfs.j2      |   0
 .../deploy-cert-manager.j2                    |  14 +-
 .../deploy-cloud-provider-vsphere.j2          | 622 ++++++++++++++++++
 .../{helm => manifests}/deploy-grafana.j2     |   2 +-
 .../templates/manifests/deploy-metallb.j2     |  32 +
 .../manifests/deploy-nginx-ingress-public.j2  |  45 ++
 .../deploy-openstack-ccm.j2                   |   0
 .../deploy-openstack-cinder.j2                |   0
 .../templates/manifests/deploy-rancher-ui.j2  |  23 +
 roles/rke2/templates/registry_mirrors.j2      |   2 +
 30 files changed, 1128 insertions(+), 111 deletions(-)
 create mode 100644 .gitignore
 delete mode 100644 roles/rke2/tasks/helm.yml
 create mode 100644 roles/rke2/tasks/templates.yml
 delete mode 100644 roles/rke2/templates/helm/config-nginx-ingress.j2
 delete mode 100644 roles/rke2/templates/helm/config-rke2-canal.j2
 delete mode 100644 roles/rke2/templates/helm/deploy-rancher-ui.j2
 delete mode 100644 roles/rke2/templates/helm/deploy-rke2-cilium.j2
 create mode 100644 roles/rke2/templates/manifests/config-nginx-ingress.j2
 create mode 100644 roles/rke2/templates/manifests/config-rke2-calico.j2
 create mode 100644 roles/rke2/templates/manifests/config-rke2-canal.j2
 create mode 100644 roles/rke2/templates/manifests/config-rke2-cilium.j2
 rename roles/rke2/templates/{helm => manifests}/config-rke2-coredns.j2 (100%)
 rename roles/rke2/templates/{helm => manifests}/deploy-cephfs.j2 (100%)
 rename roles/rke2/templates/{helm => manifests}/deploy-cert-manager.j2 (58%)
 create mode 100644 roles/rke2/templates/manifests/deploy-cloud-provider-vsphere.j2
 rename roles/rke2/templates/{helm => manifests}/deploy-grafana.j2 (86%)
 create mode 100644 roles/rke2/templates/manifests/deploy-metallb.j2
 create mode 100644 roles/rke2/templates/manifests/deploy-nginx-ingress-public.j2
 rename roles/rke2/templates/{helm => manifests}/deploy-openstack-ccm.j2 (100%)
 rename roles/rke2/templates/{helm => manifests}/deploy-openstack-cinder.j2 (100%)
 create mode 100644 roles/rke2/templates/manifests/deploy-rancher-ui.j2
 create mode 100644 roles/rke2/templates/registry_mirrors.j2

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..3598c30
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+tests
\ No newline at end of file
diff --git a/README.md b/README.md
index 99aca55..d157178 100644
--- a/README.md
+++ b/README.md
@@ -27,18 +27,18 @@ Use roles inside a Ansible playbook
 
 ## Variables
 
-| Variable | Default | Description |
-| ----------- | ----------- | ----------- |
-| cluster_name |  | Name of the RKE2 cluster |
-| server_count | 3 | Number of RKE2 worker VMs |
-| agent_count | 3 | Number of RKE2 server VMs |
-| server_flavor | m1a.large | Server VM flavor |
-| agent_flavor | m1a.xlarge | Worker VM flavor |
-| server_volume_size | 50 | Volume size (GB) for server VM |
-| agent_volume_size | 100 | Volume size (GB) for worker VM |
-| image | 1fe615f0-9dad-447d-bf54-9071defafb77 | ID for OpenStack VM image |
-| domain | | DNS-Entry for loadbalancer IP |
-| node_taints | | Node taints for RKE2 node |
-| node_labels | | Node labels for RKE2 node |
-| rke2_channel | stable | RKE3 version channel |
-| state | present | Flag for setup (`present`) or removing (`absent`) RKE3 cluster |
\ No newline at end of file
+| Variable           | Default                              | Description                                                    |
+| ------------------ | ------------------------------------ | -------------------------------------------------------------- |
+| cluster_name       |                                      | Name of the RKE2 cluster                                       |
+| server_count       | 3                                    | Number of RKE2 worker VMs                                      |
+| agent_count        | 3                                    | Number of RKE2 server VMs                                      |
+| server_flavor      | m1a.large                            | Server VM flavor                                               |
+| agent_flavor       | m1a.xlarge                           | Worker VM flavor                                               |
+| server_volume_size | 50                                   | Volume size (GB) for server VM                                 |
+| agent_volume_size  | 100                                  | Volume size (GB) for worker VM                                 |
+| image              | 1fe615f0-9dad-447d-bf54-9071defafb77 | ID for OpenStack VM image                                      |
+| domain             |                                      | DNS-Entry for loadbalancer IP                                  |
+| node_taints        |                                      | Node taints for RKE2 node                                      |
+| node_labels        |                                      | Node labels for RKE2 node                                      |
+| rke2_channel       | stable                               | RKE3 version channel                                           |
+| state              | present                              | Flag for setup (`present`) or removing (`absent`) RKE3 cluster |
diff --git a/roles/rke2/defaults/main.yml b/roles/rke2/defaults/main.yml
index 518b274..6cfc629 100644
--- a/roles/rke2/defaults/main.yml
+++ b/roles/rke2/defaults/main.yml
@@ -8,8 +8,126 @@ node_taints: []
 node_labels: []
 
 rke2_channel: stable
+rke2_version: ""
+
+resolv_conf: "/run/systemd/resolve/resolv.conf"
+resolv_conf_server: "{{ resolv_conf }}"
+resolv_conf_node: "{{ resolv_conf }}"
+
+cloud_provider_name: "external"
+
+registry_mirrors: {}
+# mirrors:
+#   docker.io:
+#     endpoint:
+#       - "https://docker-mirror.example.com:5000"
+#   registry.example.com:
+#     endpoint:
+#       - "https://registry.example.com"
+# configs:
+#   "docker-mirror.example.com:5000":
+#     auth:
+#       username: xxxxxx # this is the registry username
+#       password: xxxxxx # this is the registry password
+#     tls:
+#       cert_file:            # path to the cert file used to authenticate to the registry
+#       key_file:             # path to the key file for the certificate used to authenticate to the registry
+#       ca_file:              # path to the ca file used to verify the registry's certificate
+#       insecure_skip_verify: # may be set to true to skip verifying the registry's certificate
+#   "registry.example.com":
+#     auth: --SEE_ABOVE--
+#     tls: --SEE_ABOVE--
+
 
 state: present
 upgrade: no
 dist_upgrade: no
-reboot: no
\ No newline at end of file
+reboot: no
+
+manifests: {} # used to override default_manifests
+default_manifests:
+  config-rke2-coredns:
+    enabled: false
+
+  config-rke2-calico:
+    enabled: false
+
+  config-rke2-canal:
+    enabled: false
+
+  config-rke2-cilium:
+    enabled: false
+    hubble:
+      user: ""
+      password: ""
+    ui:
+      enabled: false
+      ingress:
+        enabled: true
+        annotations:
+          nginx.ingress.kubernetes.io/auth-type: basic
+          nginx.ingress.kubernetes.io/auth-secret: hubble-auth-secret
+          nginx.ingress.kubernetes.io/auth-secret-type: auth-file
+          nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - hubble'
+
+  config-nginx-ingress:
+    enabled: false
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    metallbAddressPool: ""
+
+  deploy-openstack-ccm:
+    enabled: false
+
+  deploy-openstack-cinder:
+    enabled: false
+
+  deploy-openstack-manila:
+    enabled: false
+
+  deploy-cephfs:
+    enabled: false
+
+  deploy-nfs:
+    enabled: false
+
+  deploy-grafana:
+    enabled: false
+    adminPassword: ""
+
+  deploy-cert-manager:
+    enabled: false
+
+  deploy-rancher-ui:
+    enabled: false
+    rancher_ui_dns: ""
+    letsEncrypt_admin_mail: ""
+
+  deploy-metallb:
+    enabled: false
+    pools:
+      - name: "default"
+        addresses:
+          - ""
+        auto_assign: false
+
+  deploy-cloud-provider-vsphere:
+    enabled: false
+    vCenter_IP: ""
+    vCenter_Username: ""
+    vCenter_Password: ""
+    vCenter_Datacenter: ""
+    vCenter_ClusterID: ""
+    CSIMigrationvSphere: ""
+
+  deploy-spectrum-scale-secret:
+    enabled: false
+
+  deploy-spectrum-scale-operator-2.2.0:
+    enabled: false
+
+  deploy-spectrum-scale-csi-2.1.0:
+    enabled: false
+
+  deploy-nginx-ingress-public:
+    enabled: false
\ No newline at end of file
diff --git a/roles/rke2/handlers/main.yml b/roles/rke2/handlers/main.yml
index 0ab4ca9..75b35a8 100644
--- a/roles/rke2/handlers/main.yml
+++ b/roles/rke2/handlers/main.yml
@@ -1,6 +1,33 @@
+- name: start rke2
+  ansible.builtin.systemd:
+    name: "rke2-{{ node_type }}"
+    enabled: yes
+    masked: no
+    state: started
+    daemon_reload: yes
+
 - name: restart rke2
-  service:
+  throttle: 1
+  ansible.builtin.systemd:
     name: "rke2-{{ node_type }}"
     masked: no
+    enabled: yes
     state: restarted
-    daemon_reload: yes
\ No newline at end of file
+    daemon_reload: yes
+
+- name: reload rke2
+  ansible.builtin.systemd:
+    name: "rke2-{{ node_type }}"
+    masked: no
+    enabled: yes
+    state: reloaded
+    daemon_reload: yes
+
+- name: wait for RANCHER to come up
+  uri:
+    url: "http://{{ rancher_ui_dns }}"
+    status_code: [200, 404]
+  register: result
+  until: result.status == 200 or result.status == 404
+  retries: 100 # retry X times
+  delay: 30 # pause for X sec b/w each call
\ No newline at end of file
diff --git a/roles/rke2/tasks/config_rke2.yml b/roles/rke2/tasks/config_rke2.yml
index dc936f2..7d46a66 100644
--- a/roles/rke2/tasks/config_rke2.yml
+++ b/roles/rke2/tasks/config_rke2.yml
@@ -3,7 +3,18 @@
   run_once: yes
   set_fact:
     token: "{{ lookup('community.general.random_string', length=129, special=False) }}"
-  when: token is not defined
+  when: (not upgrade) and (token is not defined) 
+
+- name: ensure inventory folders
+  delegate_to: localhost
+  become: no
+  run_once: yes
+  file:
+    path: "{{ item }}"
+    state: directory
+  loop:
+    - group_vars
+    - group_vars/all
 
 - name: store token
   delegate_to: localhost
@@ -13,6 +24,7 @@
     dest: group_vars/all/token.yml
     content: |-
       token: {{ token }}
+  when: not upgrade
 
 - name: read token
   include_vars: group_vars/all/token.yml
@@ -24,10 +36,8 @@
   notify:
     - restart rke2
 
-- name: start RKE2
-  service:
+- name: enable rke2
+  ansible.builtin.systemd:
     name: "rke2-{{ node_type }}"
     enabled: yes
     masked: no
-    state: started
-    daemon_reload: yes
\ No newline at end of file
diff --git a/roles/rke2/tasks/helm.yml b/roles/rke2/tasks/helm.yml
deleted file mode 100644
index 8d047a8..0000000
--- a/roles/rke2/tasks/helm.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-- name: copy helm template files
-  template:
-    src: 'helm/{{ item.template }}.j2'
-    dest: '/var/lib/rancher/rke2/server/manifests/{{ item.template }}.yaml'
-  loop:
-      - { template: 'deploy-openstack-ccm'}
-      - { template: 'deploy-openstack-cinder'}
-      # - { template: 'deploy-openstack-manila'}
-      - { template: 'deploy-cephfs'}
-      # - { template: 'deploy-nfs'}
-      # - { template: 'deploy-grafana'}
-      # - { template: 'deploy-cert-manager'}
-      # - { template: 'deploy-rancher-ui'}
-      # - { template: 'deploy-rke2-cilium'}
-      - { template: 'config-rke2-canal'}
-      - { template: 'config-nginx-ingress'}
-      # - { template: 'config-rke2-coredns'}
-  notify:
-    - restart rke2
\ No newline at end of file
diff --git a/roles/rke2/tasks/install_rke2.yml b/roles/rke2/tasks/install_rke2.yml
index a96d4b6..7047bd9 100644
--- a/roles/rke2/tasks/install_rke2.yml
+++ b/roles/rke2/tasks/install_rke2.yml
@@ -10,12 +10,14 @@
   args:
     creates: /usr/local/bin/rke2
   environment:
+    INSTALL_RKE2_VERSION: "{{ rke2_version }}"
     INSTALL_RKE2_CHANNEL: "{{ rke2_channel }}"
     INSTALL_RKE2_TYPE: "{{ node_type }}"
 
 - name: Upgrade RKE2
   command: "/tmp/rke2.sh"
   environment:
+    INSTALL_RKE2_VERSION: "{{ rke2_version }}"
     INSTALL_RKE2_CHANNEL: "{{ rke2_channel }}"
     INSTALL_RKE2_TYPE: "{{ node_type }}"
   when: upgrade
diff --git a/roles/rke2/tasks/kubeconfig.yml b/roles/rke2/tasks/kubeconfig.yml
index 75bed5d..7744bf0 100644
--- a/roles/rke2/tasks/kubeconfig.yml
+++ b/roles/rke2/tasks/kubeconfig.yml
@@ -1,3 +1,14 @@
+
+- name: start rke2 on master
+  ansible.builtin.systemd:
+    name: "rke2-{{ node_type }}"
+    enabled: yes
+    masked: no
+    state: started
+    daemon_reload: yes
+#  notify:
+#  - wait for RANCHER to come up
+
 - name: wait for kubeconfig
   wait_for:
     path: /etc/rancher/rke2/rke2.yaml
diff --git a/roles/rke2/tasks/main.yml b/roles/rke2/tasks/main.yml
index cc6f6b6..8650989 100644
--- a/roles/rke2/tasks/main.yml
+++ b/roles/rke2/tasks/main.yml
@@ -1,4 +1,5 @@
-- stat:
+- name: stat if is rke2 installed
+  stat:
     path: /usr/local/bin/rke2
   register: rke2_installed
 
@@ -7,7 +8,7 @@
   - include_tasks: install_rke2.yml
   when: ( not rke2_installed.stat.exists and state != 'absent' ) or (upgrade and state != 'absent' )
 
-- include_tasks: helm.yml
+- include_tasks: templates.yml
   when: "state != 'absent' and 'master' in group_names"
 
 - include_tasks: config_rke2.yml
@@ -18,4 +19,4 @@
 
 - name: uninstall rke2
   command: rke2-uninstall.sh
-  when: rke2_installed.stat.exists and state == 'absent'
\ No newline at end of file
+  when: rke2_installed.stat.exists and state == 'absent'
diff --git a/roles/rke2/tasks/templates.yml b/roles/rke2/tasks/templates.yml
new file mode 100644
index 0000000..0e87b4f
--- /dev/null
+++ b/roles/rke2/tasks/templates.yml
@@ -0,0 +1,27 @@
+
+- name: sync k8s manifests
+  block:
+    - name: copy manifest template files
+      template:
+        src: 'manifests/{{ item.key }}.j2'
+        dest: '/var/lib/rancher/rke2/server/manifests/{{ item.key }}.yaml'
+      with_dict: "{{ default_manifests | combine(manifests) }}"
+      when: item.value.enabled
+      notify:
+        - restart rke2
+
+    - name: Remove manifest template files
+      ansible.builtin.file:
+        path: '/var/lib/rancher/rke2/server/manifests/{{ item.key }}.yaml'
+        state: absent
+      with_dict: "{{ default_manifests | combine(manifests) }}"
+      when: not item.value.enabled
+      ignore_errors: yes
+
+- name: copy registry_mirrors file
+  template:
+    src: 'registry_mirrors.j2'
+    dest: '/etc/rancher/rke2/registries.yaml'
+  notify:
+    - restart rke2
+  when: registry_mirrors is defined and registry_mirrors | length > 0
\ No newline at end of file
diff --git a/roles/rke2/templates/config.yaml.j2 b/roles/rke2/templates/config.yaml.j2
index 415ebf0..592a225 100644
--- a/roles/rke2/templates/config.yaml.j2
+++ b/roles/rke2/templates/config.yaml.j2
@@ -1,7 +1,27 @@
-node-ip: "{{ ansible_default_ipv4.address }}"
+debug: true
 node-name: "{{ ansible_hostname }}"
-
 token: "{{ token }}"
+
+{% if node_ip is defined %}
+node-ip: "{{ node_ip }}"
+{% else %}
+node-ip: "{{ ansible_default_ipv4.address }}"
+{% endif %}
+
+{% if node_taints is defined and node_taints | length > 0 %}
+node-taint:
+{% for item in node_taints %}
+  - {{ item }}
+{% endfor %}
+{% endif %}
+
+{% if node_labels is defined and node_labels | length > 0 %}
+node-label:
+{% for item in node_labels %}
+    - {{ item }}
+{% endfor %}
+{% endif %}
+
 {% if 'master' not in group_names %}
 server: "{{ server }}"
 {% endif %}
@@ -9,31 +29,37 @@ server: "{{ server }}"
 {% if cis_profile is defined %}
 profile: "{{ cis_profile }}"
 {% endif %}
-resolv-conf: "/run/systemd/resolve/resolv.conf" # systemd-resolved
-debug: true
-cloud-provider-name: "external"
+
+{% if cloud_provider_name is defined and cloud_provider_name | length > 0 %}
+cloud-provider-name: "{{ cloud_provider_name }}"
+{% endif %}
 
 {% if node_type == 'server' %}
-{# disable: rke2-canal #}
+resolv-conf: "{{ resolv_conf_server }}"
+{# disable-cloud-controller: true #}
 write-kubeconfig-mode: "0644"
+kube-apiserver-arg: "--enable-admission-plugins=NodeRestriction,PodSecurityPolicy,PodNodeSelector,PodTolerationRestriction,DenyServiceExternalIPs"
+
+{% if cni is defined and cni | length > 0 %}
+cni: "{{ cni }}"
+{% endif %}
+
 {% if tls_san is defined and tls_san | length > 0 %}
 tls-san:
 {% for san in tls_san %}
   - {{ san }}
 {% endfor %}
 {% endif %}
+
+{# Only for Nodes #}
+{% else %} 
+resolv-conf: "{{ resolv_conf_node }}"
 {% endif %}
 
-{% if node_taints is defined and node_taints | length > 0 %}
-node-taint:
-{% for item in node_taints %}
-  - {{ item }}
-{% endfor %}
+
+{% if node_external_ip is defined %}
+node-external-ip: "{{ node_external_ip }}"
+{% else %}
+node-external-ip: "{{ ansible_default_ipv4.address }}"
 {% endif %}
 
-{% if node_labels is defined and node_labels | length > 0 %}
-node-label:
-{% for item in node_labels %}
-    - {{ item }}
-{% endfor %}
-{% endif %}
\ No newline at end of file
diff --git a/roles/rke2/templates/helm/config-nginx-ingress.j2 b/roles/rke2/templates/helm/config-nginx-ingress.j2
deleted file mode 100644
index b3db160..0000000
--- a/roles/rke2/templates/helm/config-nginx-ingress.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: helm.cattle.io/v1
-kind: HelmChartConfig
-metadata:
-  name: rke2-ingress-nginx
-  namespace: kube-system
-spec:
-  valuesContent: |-
-    controller:
-      config: 
-        use-forwarded-headers: "true"
\ No newline at end of file
diff --git a/roles/rke2/templates/helm/config-rke2-canal.j2 b/roles/rke2/templates/helm/config-rke2-canal.j2
deleted file mode 100644
index 2321389..0000000
--- a/roles/rke2/templates/helm/config-rke2-canal.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-apiVersion: helm.cattle.io/v1
-kind: HelmChartConfig
-metadata:
-  name: rke2-canal
-  namespace: kube-system
-spec:
-    valuesContent: |-
-        calico:
-            vethuMTU: 1400
-            networkingBackend: "vxlan"
-            masquerade: false
\ No newline at end of file
diff --git a/roles/rke2/templates/helm/deploy-rancher-ui.j2 b/roles/rke2/templates/helm/deploy-rancher-ui.j2
deleted file mode 100644
index c36482c..0000000
--- a/roles/rke2/templates/helm/deploy-rancher-ui.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-apiVersion: helm.cattle.io/v1
-kind: HelmChart
-metadata:
-  name: rancher
-  namespace: kube-system
-spec:
-  repo: https://releases.rancher.com/server-charts/latest
-  chart: rancher
-  version: 2.5.9-rc2
-  #targetNamespace: cattle-system
-  set:
-    hostname: "{{rancher_ui_dns}}"
-    letsEncrypt.email: "{{letsEncrypt_admin_mail}}"
-    ingress.tls.source: "letsEncrypt"
\ No newline at end of file
diff --git a/roles/rke2/templates/helm/deploy-rke2-cilium.j2 b/roles/rke2/templates/helm/deploy-rke2-cilium.j2
deleted file mode 100644
index aa6dea3..0000000
--- a/roles/rke2/templates/helm/deploy-rke2-cilium.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-apiVersion: helm.cattle.io/v1
-kind: HelmChart
-metadata:
-  name: rke2-cilium
-  namespace: kube-system
-spec:
-  repo: https://rke2-charts.rancher.io
-  chart: rke2-cilium
-  bootstrap: true
-  valuesContent: |-
-    cilium: {}
\ No newline at end of file
diff --git a/roles/rke2/templates/manifests/config-nginx-ingress.j2 b/roles/rke2/templates/manifests/config-nginx-ingress.j2
new file mode 100644
index 0000000..ebd7e29
--- /dev/null
+++ b/roles/rke2/templates/manifests/config-nginx-ingress.j2
@@ -0,0 +1,40 @@
+apiVersion: helm.cattle.io/v1
+kind: HelmChartConfig
+metadata:
+  name: rke2-ingress-nginx
+  namespace: kube-system
+spec:
+  valuesContent: |-
+    defaultBackend:
+      enabled: true
+    podSecurityPolicy:
+      enabled: true
+    controller:
+      config: 
+        use-forwarded-headers: "true"
+        enable-underscores-in-headers: "true"
+        proxy-add-original-uri-header: "true"
+        allow-snippet-annotations: "true"
+      ingressClassResource:
+        enabled: true
+        default: true
+      service:
+        enabled: true
+{% if item.value.loadBalancerIP is defined and item.value.loadBalancerIP | length > 0 %}
+        externalTrafficPolicy: "Local"
+        type: LoadBalancer
+        loadBalancerIP: {{ item.value.loadBalancerIP }}
+{% if item.value.loadBalancerSourceRanges is defined and item.value.loadBalancerSourceRanges | length > 0 %}
+        loadBalancerSourceRanges:
+{% for sourceRange in item.value.loadBalancerSourceRanges %}
+          - {{ sourceRange }}
+{% endfor %}
+{% endif %}
+{% if item.value.metallbAddressPool is defined and item.value.metallbAddressPool | length > 0 %}
+      hostNetwork: false 
+      hostPort:
+        enabled: false
+      annotations:
+        metallb.universe.tf/address-pool: {{ item.value.metallbAddressPool }}
+{% endif %}
+{% endif %}
\ No newline at end of file
diff --git a/roles/rke2/templates/manifests/config-rke2-calico.j2 b/roles/rke2/templates/manifests/config-rke2-calico.j2
new file mode 100644
index 0000000..53fd7a4
--- /dev/null
+++ b/roles/rke2/templates/manifests/config-rke2-calico.j2
@@ -0,0 +1,21 @@
+apiVersion: helm.cattle.io/v1
+kind: HelmChartConfig
+metadata:
+  name: rke2-calico
+  namespace: kube-system
+spec:
+    valuesContent: |-
+      installation:
+        controlPlaneTolerations:
+        - key: "node-role.kubernetes.io/control-plane"
+          operator: "Exists"
+          effect: "NoSchedule"
+        - key: "node-role.kubernetes.io/etcd"
+          operator: "Exists"
+          effect: "NoExecute"
+        - key: "node-role.kubernetes.io/etcd"
+          operator: "Exists"
+          effect: "NoExecute"
+        - key: "CriticalAddonsOnly"
+          operator: "Exists"
+          effect: "NoExecute"
\ No newline at end of file
diff --git a/roles/rke2/templates/manifests/config-rke2-canal.j2 b/roles/rke2/templates/manifests/config-rke2-canal.j2
new file mode 100644
index 0000000..8f3bc18
--- /dev/null
+++ b/roles/rke2/templates/manifests/config-rke2-canal.j2
@@ -0,0 +1,16 @@
+apiVersion: helm.cattle.io/v1
+kind: HelmChartConfig
+metadata:
+  name: rke2-canal
+  namespace: kube-system
+spec:
+  bootstrap: True
+  valuesContent: |-
+    calico:
+      vethuMTU: 1400
+      masquerade: false
+      networkingBackend: "vxlan"
+{% if flannel_iface is defined %}
+    flannel:
+      iface: "{{ flannel_iface }}"
+{% endif %}
\ No newline at end of file
diff --git a/roles/rke2/templates/manifests/config-rke2-cilium.j2 b/roles/rke2/templates/manifests/config-rke2-cilium.j2
new file mode 100644
index 0000000..47efb53
--- /dev/null
+++ b/roles/rke2/templates/manifests/config-rke2-cilium.j2
@@ -0,0 +1,48 @@
+
+{% if item.value.ui.ingress.enabled %}
+---
+
+apiVersion: v1
+kind: Secret
+metadata:
+  name: hubble-auth-secret
+  namespace: kube-system
+type: Opaque
+stringData:
+  auth: "{{ item.value.hubble.user }}:{{ item.value.hubble.password | password_hash('sha512') }}"
+{% endif %}
+
+---
+
+apiVersion: helm.cattle.io/v1
+kind: HelmChartConfig
+metadata:
+  name: rke2-cilium
+  namespace: kube-system
+spec:
+  valuesContent: |-
+    cilium:
+      bandwidthManager: true
+      debug:
+        enabled: true
+      preflight:
+        enabled: false
+      bpf:
+        clockProbe: true
+      containerRuntime:
+        integration: auto
+      hubble:
+        metrics:
+          enabled:
+          - dns:query;ignoreAAAA
+          - drop
+          - tcp
+          - flow
+          - icmp
+          - http
+        relay:
+          enabled: true
+{% if item.value.ui.enabled %}
+{% set ui = {"ui": item.value.ui} %}
+      {{ ui | to_nice_yaml(indent=8) }}
+{% endif %}
\ No newline at end of file
diff --git a/roles/rke2/templates/helm/config-rke2-coredns.j2 b/roles/rke2/templates/manifests/config-rke2-coredns.j2
similarity index 100%
rename from roles/rke2/templates/helm/config-rke2-coredns.j2
rename to roles/rke2/templates/manifests/config-rke2-coredns.j2
diff --git a/roles/rke2/templates/helm/deploy-cephfs.j2 b/roles/rke2/templates/manifests/deploy-cephfs.j2
similarity index 100%
rename from roles/rke2/templates/helm/deploy-cephfs.j2
rename to roles/rke2/templates/manifests/deploy-cephfs.j2
diff --git a/roles/rke2/templates/helm/deploy-cert-manager.j2 b/roles/rke2/templates/manifests/deploy-cert-manager.j2
similarity index 58%
rename from roles/rke2/templates/helm/deploy-cert-manager.j2
rename to roles/rke2/templates/manifests/deploy-cert-manager.j2
index 25b4576..53abce5 100644
--- a/roles/rke2/templates/helm/deploy-cert-manager.j2
+++ b/roles/rke2/templates/manifests/deploy-cert-manager.j2
@@ -1,3 +1,12 @@
+---
+
+kind: Namespace
+apiVersion: v1
+metadata:
+  name: cert-manager
+
+---
+
 apiVersion: helm.cattle.io/v1
 kind: HelmChart
 metadata:
@@ -6,7 +15,8 @@ metadata:
 spec:
   repo: https://charts.jetstack.io
   chart: cert-manager
-  #targetNamespace: cert-manager
-  version: v1.3.1
+  version: v1.6.1
+  bootstrap: True
+  targetNamespace: cert-manager
   set:
     installCRDs: "true"
\ No newline at end of file
diff --git a/roles/rke2/templates/manifests/deploy-cloud-provider-vsphere.j2 b/roles/rke2/templates/manifests/deploy-cloud-provider-vsphere.j2
new file mode 100644
index 0000000..13be47d
--- /dev/null
+++ b/roles/rke2/templates/manifests/deploy-cloud-provider-vsphere.j2
@@ -0,0 +1,622 @@
+
+---
+
+kind: Namespace
+apiVersion: v1
+metadata:
+  name: vmware-system
+
+---
+
+apiVersion: helm.cattle.io/v1
+kind: HelmChart
+metadata:
+  name: vsphere-cpi
+  namespace: kube-system
+spec:
+  repo: https://kubernetes.github.io/cloud-provider-vsphere
+  chart: vsphere-cpi
+  version: "1.0.0"
+  targetNamespace: vmware-system
+{% if upgrade %}
+  bootstrap: True
+{% endif %}
+  valuesContent: |-
+    config:
+      enabled: true
+      vcenter: "{{ item.value.vCenter_IP }}"
+      username: "{{ item.value.vCenter_Username }}"
+      password: "{{ item.value.vCenter_Password }}"
+      datacenter: "{{ item.value.vCenter_Datacenter }}"
+
+    daemonset:
+      tolerations:
+      - effect: NoSchedule
+        key: node-role.kubernetes.io/control-plane
+        value: "true"
+
+---
+
+apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+  name: vsphere-config-secret
+  namespace: vmware-system
+stringData:
+  csi-vsphere.conf: |
+    [Global]
+    cluster-id = "{{ item.value.vCenter_ClusterID }}"
+    user = "{{ item.value.vCenter_Username }}"
+    password = "{{ item.value.vCenter_Password }}"
+    port = "443"
+    insecure-flag = "True"
+
+    [VirtualCenter "{{ item.value.vCenter_IP }}"]
+    datacenters = "{{ item.value.vCenter_Datacenter }}"
+    
+    [Labels]
+    region = k8s-region
+    zone = k8s-zone
+
+---
+
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+  name: vsphere-ext4
+provisioner: csi.vsphere.vmware.com
+volumeBindingMode: WaitForFirstConsumer
+
+---
+
+### used for privileged containers
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: psp:vsphere-csi-privileged-role
+  namespace: vmware-system
+rules:
+- apiGroups:
+  - policy
+  resourceNames:
+  - system-unrestricted-psp
+  resources:
+  - podsecuritypolicies
+  verbs:
+  - use
+
+---
+
+### used for privileged containers
+
+apiVersion: v1
+items:
+- apiVersion: rbac.authorization.k8s.io/v1
+  kind: RoleBinding
+  metadata:
+    name: privileged:psp:vsphere-csi-controller-binding
+    namespace: vmware-system
+  roleRef:
+    apiGroup: rbac.authorization.k8s.io
+    kind: Role
+    name: psp:vsphere-csi-privileged-role
+  subjects:
+  - kind: ServiceAccount
+    name: vsphere-csi-controller
+    namespace: vmware-system
+- apiVersion: rbac.authorization.k8s.io/v1
+  kind: RoleBinding
+  metadata:
+    name: privileged:psp:vsphere-csi-node-binding
+    namespace: vmware-system
+  roleRef:
+    apiGroup: rbac.authorization.k8s.io
+    kind: Role
+    name: psp:vsphere-csi-privileged-role
+  subjects:
+  - kind: ServiceAccount
+    name: vsphere-csi-node
+    namespace: vmware-system
+- apiVersion: rbac.authorization.k8s.io/v1
+  kind: RoleBinding
+  metadata:
+    name: privileged:psp:vsphere-csi-webhook-binding
+    namespace: vmware-system
+  roleRef:
+    apiGroup: rbac.authorization.k8s.io
+    kind: Role
+    name: psp:vsphere-csi-privileged-role
+  subjects:
+  - kind: ServiceAccount
+    name: vsphere-csi-webhook
+    namespace: vmware-system
+kind: List
+
+
+---
+
+apiVersion: storage.k8s.io/v1 # For k8s 1.17 use storage.k8s.io/v1beta1
+kind: CSIDriver
+metadata:
+  name: csi.vsphere.vmware.com
+spec:
+  attachRequired: true
+  podInfoOnMount: false
+---
+kind: ServiceAccount
+apiVersion: v1
+metadata:
+  name: vsphere-csi-controller
+  namespace: vmware-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: vsphere-csi-controller-role
+rules:
+  - apiGroups: [""]
+    resources: ["nodes", "persistentvolumeclaims", "pods", "configmaps"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims/status"]
+    verbs: ["patch"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["get", "list", "watch", "create", "update", "patch"]
+  - apiGroups: ["coordination.k8s.io"]
+    resources: ["leases"]
+    verbs: ["get", "watch", "list", "delete", "update", "create"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses", "csinodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "patch"]
+  - apiGroups: ["cns.vmware.com"]
+    resources: ["triggercsifullsyncs"]
+    verbs: ["create", "get", "update", "watch", "list"]
+  - apiGroups: ["cns.vmware.com"]
+    resources: ["cnsvspherevolumemigrations", "cnsvolumeoperationrequests"]
+    verbs: ["create", "get", "list", "watch", "update", "delete"]
+  - apiGroups: ["apiextensions.k8s.io"]
+    resources: ["customresourcedefinitions"]
+    verbs: ["get", "create", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments/status"]
+    verbs: ["patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: vsphere-csi-controller-binding
+subjects:
+  - kind: ServiceAccount
+    name: vsphere-csi-controller
+    namespace: vmware-system
+roleRef:
+  kind: ClusterRole
+  name: vsphere-csi-controller-role
+  apiGroup: rbac.authorization.k8s.io
+---
+kind: ServiceAccount
+apiVersion: v1
+metadata:
+  name: vsphere-csi-node
+  namespace: vmware-system
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: vsphere-csi-node-role
+  namespace: vmware-system
+rules:
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "watch"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: vsphere-csi-node-binding
+  namespace: vmware-system
+subjects:
+  - kind: ServiceAccount
+    name: vsphere-csi-node
+    namespace: vmware-system
+roleRef:
+  kind: Role
+  name: vsphere-csi-node-role
+  apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: v1
+data:
+  "csi-migration": "true"
+  "csi-auth-check": "true"
+  "online-volume-extend": "true"
+  "trigger-csi-fullsync": "false"
+  "async-query-volume": "true"
+  "improved-csi-idempotency": "true"
+  "improved-volume-topology": "true"
+kind: ConfigMap
+metadata:
+  name: internal-feature-states.csi.vsphere.vmware.com
+  namespace: vmware-system
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: vsphere-csi-controller
+  namespace: vmware-system
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: vsphere-csi-controller
+  template:
+    metadata:
+      labels:
+        app: vsphere-csi-controller
+        role: vsphere-csi
+    spec:
+      serviceAccountName: vsphere-csi-controller
+      nodeSelector:
+        node-role.kubernetes.io/control-plane: "true"
+      tolerations:
+        - key: node-role.kubernetes.io/control-plane
+          operator: Exists
+          effect: NoSchedule
+        - key: "CriticalAddonsOnly"
+          operator: "Exists"
+          effect: "NoExecute"
+        # uncomment below toleration if you need an aggressive pod eviction in case when
+        # node becomes not-ready or unreachable. Default is 300 seconds if not specified.
+        #- key: node.kubernetes.io/not-ready
+        #  operator: Exists
+        #  effect: NoExecute
+        #  tolerationSeconds: 30
+        #- key: node.kubernetes.io/unreachable
+        #  operator: Exists
+        #  effect: NoExecute
+        #  tolerationSeconds: 30
+      dnsPolicy: "Default"
+      containers:
+        - name: csi-attacher
+          image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.0
+          args:
+            - "--v=4"
+            - "--timeout=300s"
+            - "--csi-address=$(ADDRESS)"
+            - "--leader-election"
+            - "--kube-api-qps=100"
+            - "--kube-api-burst=100"
+          env:
+            - name: ADDRESS
+              value: /csi/csi.sock
+          volumeMounts:
+            - mountPath: /csi
+              name: socket-dir
+        - name: csi-resizer
+          image: quay.io/k8scsi/csi-resizer:v1.1.0
+          securityContext:
+            allowPrivilegeEscalation: true
+            privileged: true
+          args:
+            - "--v=4"
+            - "--timeout=300s"
+            - "--handle-volume-inuse-error=false"
+            - "--csi-address=$(ADDRESS)"
+            - "--kube-api-qps=100"
+            - "--kube-api-burst=100"
+            - "--leader-election"
+          env:
+            - name: ADDRESS
+              value: /csi/csi.sock
+          volumeMounts:
+            - mountPath: /csi
+              name: socket-dir
+        - name: vsphere-csi-controller
+          image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.3.0
+          securityContext:
+            allowPrivilegeEscalation: true
+            privileged: true
+          args:
+            - "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
+            - "--fss-namespace=$(CSI_NAMESPACE)"
+          imagePullPolicy: "Always"
+          env:
+            - name: CSI_ENDPOINT
+              value: unix:///csi/csi.sock
+            - name: X_CSI_MODE
+              value: "controller"
+            - name: X_CSI_SPEC_DISABLE_LEN_CHECK
+              value: "true"
+            - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT
+              value: 3m
+            - name: VSPHERE_CSI_CONFIG
+              value: "/etc/cloud/csi-vsphere.conf"
+            - name: LOGGER_LEVEL
+              value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION
+            - name: INCLUSTER_CLIENT_QPS
+              value: "100"
+            - name: INCLUSTER_CLIENT_BURST
+              value: "100"
+            - name: CSI_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+          volumeMounts:
+            - mountPath: /etc/cloud
+              name: vsphere-config-volume
+              readOnly: true
+            - mountPath: /csi
+              name: socket-dir
+          ports:
+            - name: healthz
+              containerPort: 9808
+              protocol: TCP
+            - name: prometheus
+              containerPort: 2112
+              protocol: TCP
+          livenessProbe:
+            httpGet:
+              path: /healthz
+              port: healthz
+            initialDelaySeconds: 10
+            timeoutSeconds: 3
+            periodSeconds: 5
+            failureThreshold: 3
+        - name: liveness-probe
+          image: quay.io/k8scsi/livenessprobe:v2.2.0
+          securityContext:
+            allowPrivilegeEscalation: true
+            privileged: true
+          args:
+            - "--v=4"
+            - "--csi-address=/csi/csi.sock"
+          volumeMounts:
+            - name: socket-dir
+              mountPath: /csi
+        - name: vsphere-syncer
+          image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.3.0
+          securityContext:
+            allowPrivilegeEscalation: true
+            privileged: true
+          args:
+            - "--leader-election"
+            - "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
+            - "--fss-namespace=$(CSI_NAMESPACE)"
+          imagePullPolicy: "Always"
+          ports:
+            - containerPort: 2113
+              name: prometheus
+              protocol: TCP
+          env:
+            - name: FULL_SYNC_INTERVAL_MINUTES
+              value: "30"
+            - name: VSPHERE_CSI_CONFIG
+              value: "/etc/cloud/csi-vsphere.conf"
+            - name: LOGGER_LEVEL
+              value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION
+            - name: INCLUSTER_CLIENT_QPS
+              value: "100"
+            - name: INCLUSTER_CLIENT_BURST
+              value: "100"
+            - name: CSI_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+          volumeMounts:
+            - mountPath: /etc/cloud
+              name: vsphere-config-volume
+              readOnly: true
+        - name: csi-provisioner
+          image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.0
+          securityContext:
+            allowPrivilegeEscalation: true
+            privileged: true
+          args:
+            - "--v=4"
+            - "--timeout=300s"
+            - "--csi-address=$(ADDRESS)"
+            - "--kube-api-qps=100"
+            - "--kube-api-burst=100"
+            - "--leader-election"
+            - "--default-fstype=ext4"
+            # needed only for topology aware setup
+            - "--feature-gates=Topology=true"
+            - "--strict-topology"
+          env:
+            - name: ADDRESS
+              value: /csi/csi.sock
+          volumeMounts:
+            - mountPath: /csi
+              name: socket-dir
+      volumes:
+        - name: vsphere-config-volume
+          secret:
+            secretName: vsphere-config-secret
+        - name: socket-dir
+          emptyDir: {}
+---
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+  name: vsphere-csi-node
+  namespace: vmware-system
+spec:
+  selector:
+    matchLabels:
+      app: vsphere-csi-node
+  updateStrategy:
+    type: "RollingUpdate"
+    rollingUpdate:
+      maxUnavailable: 1
+  template:
+    metadata:
+      labels:
+        app: vsphere-csi-node
+        role: vsphere-csi
+    spec:
+      serviceAccountName: vsphere-csi-node
+      hostNetwork: true
+      dnsPolicy: "ClusterFirstWithHostNet"
+      nodeSelector:
+        storage.provider: "vsphere"
+      containers:
+        - name: node-driver-registrar
+          image: quay.io/k8scsi/csi-node-driver-registrar:v2.1.0
+          args:
+            - "--v=5"
+            - "--csi-address=$(ADDRESS)"
+            - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
+            - "--health-port=9809"
+          env:
+            - name: ADDRESS
+              value: /csi/csi.sock
+            - name: DRIVER_REG_SOCK_PATH
+              value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock
+          volumeMounts:
+            - name: plugin-dir
+              mountPath: /csi
+            - name: registration-dir
+              mountPath: /registration
+          ports:
+            - containerPort: 9809
+              name: healthz
+          livenessProbe:
+            httpGet:
+              path: /healthz
+              port: healthz
+            initialDelaySeconds: 5
+            timeoutSeconds: 5
+        - name: vsphere-csi-node
+          image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.3.0
+          args:
+            - "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
+            - "--fss-namespace=$(CSI_NAMESPACE)"
+          imagePullPolicy: "Always"
+          env:
+            - name: NODE_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+            - name: CSI_ENDPOINT
+              value: unix:///csi/csi.sock
+            - name: MAX_VOLUMES_PER_NODE
+              value: "0" # Maximum number of volumes that controller can publish to the node. If value is not set or zero Kubernetes decide how many volumes can be published by the controller to the node.
+            - name: X_CSI_MODE
+              value: "node"
+            - name: X_CSI_SPEC_REQ_VALIDATION
+              value: "false"
+            - name: X_CSI_SPEC_DISABLE_LEN_CHECK
+              value: "true"
+            # needed only for topology aware setups
+            #- name: VSPHERE_CSI_CONFIG
+            #  value: "/etc/cloud/csi-vsphere.conf" # here csi-vsphere.conf is the name of the file used for creating secret using "--from-file" flag
+            - name: LOGGER_LEVEL
+              value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION
+            - name: CSI_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+          securityContext:
+            privileged: true
+            capabilities:
+              add: ["SYS_ADMIN"]
+            allowPrivilegeEscalation: true
+          volumeMounts:
+            # needed only for topology aware setups
+            - name: vsphere-config-volume
+              mountPath: /etc/cloud
+              readOnly: true
+            - name: plugin-dir
+              mountPath: /csi
+            - name: pods-mount-dir
+              mountPath: /var/lib/kubelet
+              # needed so that any mounts setup inside this container are
+              # propagated back to the host machine.
+              mountPropagation: "Bidirectional"
+            - name: device-dir
+              mountPath: /dev
+            - name: blocks-dir
+              mountPath: /sys/block
+            - name: sys-devices-dir
+              mountPath: /sys/devices
+          ports:
+            - name: healthz
+              containerPort: 9808
+              protocol: TCP
+          livenessProbe:
+            httpGet:
+              path: /healthz
+              port: healthz
+            initialDelaySeconds: 10
+            timeoutSeconds: 5
+            periodSeconds: 5
+            failureThreshold: 3
+        - name: liveness-probe
+          image: quay.io/k8scsi/livenessprobe:v2.2.0
+          args:
+            - "--v=4"
+            - "--csi-address=/csi/csi.sock"
+          volumeMounts:
+            - name: plugin-dir
+              mountPath: /csi
+      volumes:
+        # needed only for topology aware setups
+        - name: vsphere-config-volume
+          secret:
+            secretName: vsphere-config-secret
+        - name: registration-dir
+          hostPath:
+            path: /var/lib/kubelet/plugins_registry
+            type: Directory
+        - name: plugin-dir
+          hostPath:
+            path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com
+            type: DirectoryOrCreate
+        - name: pods-mount-dir
+          hostPath:
+            path: /var/lib/kubelet
+            type: Directory
+        - name: device-dir
+          hostPath:
+            path: /dev
+        - name: blocks-dir
+          hostPath:
+            path: /sys/block
+            type: Directory
+        - name: sys-devices-dir
+          hostPath:
+            path: /sys/devices
+            type: Directory
+      tolerations:
+        - effect: NoExecute
+          operator: Exists
+        - effect: NoSchedule
+          operator: Exists
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: vsphere-csi-controller
+  namespace: vmware-system
+  labels:
+    app: vsphere-csi-controller
+spec:
+  ports:
+    - name: ctlr
+      port: 2112
+      targetPort: 2112
+      protocol: TCP
+    - name: syncer
+      port: 2113
+      targetPort: 2113
+      protocol: TCP
+  selector:
+    app: vsphere-csi-controller
diff --git a/roles/rke2/templates/helm/deploy-grafana.j2 b/roles/rke2/templates/manifests/deploy-grafana.j2
similarity index 86%
rename from roles/rke2/templates/helm/deploy-grafana.j2
rename to roles/rke2/templates/manifests/deploy-grafana.j2
index 97f1d80..682cbda 100644
--- a/roles/rke2/templates/helm/deploy-grafana.j2
+++ b/roles/rke2/templates/manifests/deploy-grafana.j2
@@ -7,7 +7,7 @@ spec:
   chart: stable/grafana
   #targetNamespace: monitoring
   set:
-    adminPassword: "{{ grafana_password }}"
+    adminPassword: "{{ item.value.adminPassword }}"
   valuesContent: |-
     image:
       tag: master
diff --git a/roles/rke2/templates/manifests/deploy-metallb.j2 b/roles/rke2/templates/manifests/deploy-metallb.j2
new file mode 100644
index 0000000..e7d405c
--- /dev/null
+++ b/roles/rke2/templates/manifests/deploy-metallb.j2
@@ -0,0 +1,32 @@
+---
+
+kind: Namespace
+apiVersion: v1
+metadata:
+  name: metallb-system
+
+---
+
+apiVersion: helm.cattle.io/v1
+kind: HelmChart
+metadata:
+  name: metallb
+  namespace: kube-system
+spec:
+  repo: https://metallb.github.io/metallb
+  chart: metallb
+  version: "0.11.0"
+  bootstrap: True
+  targetNamespace: metallb-system
+  valuesContent: |-
+    configInline:
+      address-pools:
+{% for pool in item.value.pools %}
+      - name: {{ pool.name }}
+        protocol: layer2
+        addresses:
+{% for address in pool.addresses %}
+         - {{ address }}
+{% endfor %}
+        auto-assign: {{ pool.auto_assign or false }}
+{% endfor %}
\ No newline at end of file
diff --git a/roles/rke2/templates/manifests/deploy-nginx-ingress-public.j2 b/roles/rke2/templates/manifests/deploy-nginx-ingress-public.j2
new file mode 100644
index 0000000..fc3f376
--- /dev/null
+++ b/roles/rke2/templates/manifests/deploy-nginx-ingress-public.j2
@@ -0,0 +1,45 @@
+---
+
+kind: Namespace
+apiVersion: v1
+metadata:
+  name: ingress-nginx-public
+  
+---
+
+apiVersion: helm.cattle.io/v1
+kind: HelmChart
+metadata:
+  name: ingress-nginx-public
+  namespace: kube-system
+spec:
+  repo: https://kubernetes.github.io/ingress-nginx
+  chart: ingress-nginx
+  targetNamespace: ingress-nginx-public
+  valuesContent: |-
+    controller:
+      config: 
+        use-forwarded-headers: "true"
+        enable-underscores-in-headers: "true"
+        proxy-add-original-uri-header: "true"
+        allow-snippet-annotations: "true"
+      hostNetwork: false 
+      hostPort:
+        enabled: false
+      annotations:
+        metallb.universe.tf/address-pool: private
+      service:
+        enabled: true
+        externalTrafficPolicy: "Local"
+        type: LoadBalancer
+        loadBalancerIP: 137.208.31.142
+        admissionWebhooks:
+          enabled: false
+      ingressClassResource:
+        name: nginx-public
+        controllerValue: "k8s.io/ingress-nginx-public"
+        enabled: true
+    defaultBackend:
+        enabled: true
+    podSecurityPolicy:
+      enabled: true
\ No newline at end of file
diff --git a/roles/rke2/templates/helm/deploy-openstack-ccm.j2 b/roles/rke2/templates/manifests/deploy-openstack-ccm.j2
similarity index 100%
rename from roles/rke2/templates/helm/deploy-openstack-ccm.j2
rename to roles/rke2/templates/manifests/deploy-openstack-ccm.j2
diff --git a/roles/rke2/templates/helm/deploy-openstack-cinder.j2 b/roles/rke2/templates/manifests/deploy-openstack-cinder.j2
similarity index 100%
rename from roles/rke2/templates/helm/deploy-openstack-cinder.j2
rename to roles/rke2/templates/manifests/deploy-openstack-cinder.j2
diff --git a/roles/rke2/templates/manifests/deploy-rancher-ui.j2 b/roles/rke2/templates/manifests/deploy-rancher-ui.j2
new file mode 100644
index 0000000..8885456
--- /dev/null
+++ b/roles/rke2/templates/manifests/deploy-rancher-ui.j2
@@ -0,0 +1,23 @@
+---
+
+kind: Namespace
+apiVersion: v1
+metadata:
+  name: cattle-system
+
+---
+
+apiVersion: helm.cattle.io/v1
+kind: HelmChart
+metadata:
+  name: rancher
+  namespace: kube-system
+spec:
+  repo: https://releases.rancher.com/server-charts/latest
+  chart: rancher
+  version: "2.6.2"
+  targetNamespace: cattle-system
+  set:
+    hostname: "{{ item.value.rancher_ui_dns }}"
+    letsEncrypt.email: "{{ item.value.letsEncrypt_admin_mail }}"
+    ingress.tls.source: "letsEncrypt"
\ No newline at end of file
diff --git a/roles/rke2/templates/registry_mirrors.j2 b/roles/rke2/templates/registry_mirrors.j2
new file mode 100644
index 0000000..d6558ac
--- /dev/null
+++ b/roles/rke2/templates/registry_mirrors.j2
@@ -0,0 +1,2 @@
+{% set registry = {"mirrors": registry_mirrors.mirrors, "configs": registry_mirrors.configs} %}
+{{ registry | to_nice_yaml }}
\ No newline at end of file
-- 
GitLab