Merge branch 'role_kubernetes-k3s'
commit
f2c86dc22d
@ -1,7 +1,13 @@
|
|||||||
- name: Install K3s agent
|
- name: Install K3s agent
|
||||||
command: /root/k3s_install.sh {{ type }}
|
command: /root/k3s_install.sh {{ type }}
|
||||||
register: command
|
register: command
|
||||||
changed_when: "'No change detected' in command.stdout"
|
changed_when: "'No change detected' in command.stdout"
|
||||||
until: "command is not failed"
|
until: "command is not failed"
|
||||||
retries: 2
|
retries: 2
|
||||||
delay: 10
|
delay: 10
|
||||||
|
|
||||||
|
- name: Restart when config changed, but install already done
|
||||||
|
service:
|
||||||
|
name: k3s
|
||||||
|
status: restarted
|
||||||
|
when: "inventory_hostname != groups['kubernetes'][0] and not command.changed and config.changed"
|
||||||
|
@ -0,0 +1,17 @@
|
|||||||
|
- name: Deploy calico operator
|
||||||
|
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
|
||||||
|
register: command
|
||||||
|
changed_when: "'created' in command.stdout"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Deploy calico ressource template
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
|
||||||
|
dest: /root/calico-ressource.yml
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Deploy calico ressource
|
||||||
|
command: kubectl apply -f /root/calico-ressource.yml
|
||||||
|
register: command
|
||||||
|
changed_when: "'created' in command.stdout"
|
||||||
|
run_once: true
|
@ -0,0 +1,7 @@
|
|||||||
|
- name: Deploy service-file for routing-table to wireguard-translation
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
|
||||||
|
mode: u=rw,g=r,o=r
|
||||||
|
run_once: true
|
||||||
|
when: "kubernetes.network.helper.routingtabletowg"
|
@ -1,7 +1,21 @@
|
|||||||
server: https://{{ kubernetes.control_plane.dns_name }}:6443
|
server: https://{{ kubernetes.control_plane.dns_name }}:6443
|
||||||
token: {{ kubernetes.token }}
|
token: '{{ kubernetes.token }}'
|
||||||
|
|
||||||
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||||
|
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
||||||
|
|
||||||
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
|
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
|
||||||
kubelet-arg: "--node-ip=0.0.0.0"
|
kubelet-arg: "--node-ip=0.0.0.0"
|
||||||
|
{% else %}
|
||||||
|
node-ip: {{ nodeip_ipv4 }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
## Label
|
||||||
|
# Region & DC
|
||||||
|
node-label:
|
||||||
|
{% if region is defined %}
|
||||||
|
- topology.kubernetes.io/region={{ region }}
|
||||||
|
{% endif %}
|
||||||
|
{% if zone is defined %}
|
||||||
|
- topology.kubernetes.io/zone={{ zone }}
|
||||||
|
{% endif %}
|
||||||
|
@ -1,23 +1,52 @@
|
|||||||
## Base ##
|
## Base ##
|
||||||
{% if inventory_hostname == groups['kubernetes'][0] %}
|
{% if inventory_hostname == groups['kubernetes'][0] %}
|
||||||
|
# Initialize with internal etcd
|
||||||
cluster-init: true
|
cluster-init: true
|
||||||
{% else %}
|
{% else %}
|
||||||
server: https://{{ groups['kubernetes'][0] }}:6443
|
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
token: {{ kubernetes.token }}
|
token: '{{ kubernetes.token }}'
|
||||||
tls-san:
|
tls-san:
|
||||||
- {{ kubernetes.control_plane.dns_name }}
|
- {{ kubernetes.control_plane.dns_name }}
|
||||||
|
|
||||||
# Networking
|
# Networking
|
||||||
|
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||||
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
||||||
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
||||||
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
|
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
|
||||||
|
|
||||||
|
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
|
||||||
|
kubelet-arg: "--node-ip=0.0.0.0"
|
||||||
|
{% else %}
|
||||||
|
node-ip: {{ nodeip_ipv4 }}
|
||||||
|
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
|
||||||
|
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
egress-selector-mode: disabled
|
egress-selector-mode: disabled
|
||||||
|
|
||||||
# Network-plugin
|
# Network-plugin
|
||||||
|
{% if kubernetes.network_plugin == "flannel" %}
|
||||||
flannel-backend: vxlan
|
flannel-backend: vxlan
|
||||||
|
{% else %}
|
||||||
|
disable-network-policy: true
|
||||||
|
flannel-backend: none
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
|
# Ingress-plugin
|
||||||
kubelet-arg: "--node-ip=0.0.0.0"
|
{% if kubernetes.ingress_controller != "traefik-ingress" %}
|
||||||
|
disable: traefik
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
## Label
|
||||||
|
# Region & DC
|
||||||
|
node-label:
|
||||||
|
{% if region is defined %}
|
||||||
|
- topology.kubernetes.io/region={{ region }}
|
||||||
|
{% endif %}
|
||||||
|
{% if zone is defined %}
|
||||||
|
- topology.kubernetes.io/zone={{ zone }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{{ kubernetes.config_extra | to_yaml }}
|
||||||
|
@ -0,0 +1,34 @@
|
|||||||
|
# This section includes base Calico installation configuration.
|
||||||
|
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
|
||||||
|
apiVersion: operator.tigera.io/v1
|
||||||
|
kind: Installation
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
spec:
|
||||||
|
# Configures Calico networking.
|
||||||
|
calicoNetwork:
|
||||||
|
# Note: The ipPools section cannot be modified post-install.
|
||||||
|
ipPools:
|
||||||
|
- blockSize: 26
|
||||||
|
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
|
||||||
|
encapsulation: None
|
||||||
|
natOutgoing: Enabled
|
||||||
|
nodeSelector: all()
|
||||||
|
|
||||||
|
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||||
|
- blockSize: 122
|
||||||
|
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
||||||
|
encapsulation: None
|
||||||
|
natOutgoing: Enabled
|
||||||
|
nodeSelector: all()
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# This section configures the Calico API server.
|
||||||
|
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||||
|
apiVersion: operator.tigera.io/v1
|
||||||
|
kind: APIServer
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
spec: {}
|
@ -0,0 +1,45 @@
|
|||||||
|
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: routingtabletowg
|
||||||
|
namespace: calico-system
|
||||||
|
labels:
|
||||||
|
app: routingtabletowg
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: routingtabletowg
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: routingtabletowg
|
||||||
|
spec:
|
||||||
|
tolerations:
|
||||||
|
# this toleration is to have the daemonset runnable on master nodes
|
||||||
|
# remove it if your masters can't run pods
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: routingtabletowg
|
||||||
|
image: "ruakij/routingtabletowg:0.2.0"
|
||||||
|
env:
|
||||||
|
- name: INTERFACE
|
||||||
|
value: {{ kubernetes.ipPool.nodeIp_interface }}
|
||||||
|
- name: FILTER_PROTOCOL
|
||||||
|
value: bird
|
||||||
|
- name: PERIODIC_SYNC
|
||||||
|
value: '300'
|
||||||
|
securityContext:
|
||||||
|
capabilities:
|
||||||
|
add:
|
||||||
|
- NET_ADMIN
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 10Mi
|
||||||
|
limits:
|
||||||
|
cpu: 20m
|
||||||
|
memory: 20Mi
|
||||||
|
---
|
Loading…
Reference in New Issue