Merge branch 'role_kubernetes-k3s'

role_nftables
Ruakij 2 years ago
commit f2c86dc22d

@ -2,16 +2,40 @@
kubernetes:
ipPool:
ipv4:
# Minimum: /24
cluster_cidr: 10.42.0.0/16
service_cidr: 10.43.0.0/16
ipv6:
# Minimum: /120
cluster_cidr: fd42::/56
service_cidr: fd43::/112
# Replace - with _
# Interface to grab node-IPv4/v6 from
nodeIp_interface: <interface to grab nodeIp from>
control_plane:
dns_name: <control-plane dns-reachable-name>
token: <shared token for nodes to join>
network:
# One of [flannel, calico]
plugin: calico
# Helper for networking
helper:
# https://github.com/Ruakij/RoutingTableToWg
# Translates received-routes from e.g. BGP to wireguard-allowedips
# Helpful, when nodeIp_interface is a wireguard-interface
routingtabletowg: false
# One of [traefik-ingress]
ingress_controller: traefik-ingress
config_extra:
# etcd-tuning
# heartbeat: 0.5-1.5x of rtt
# election: 10x- of heartbeat
etcd-arg:
heartbeat-interval: 500
election-timeout: 5000

@ -1,4 +1,3 @@
---
dependencies:
- role: docker
- role: netmaker

@ -8,6 +8,7 @@
ansible.builtin.template:
src: k3s/{{ type }}/config.yaml.jinja2
dest: /etc/rancher/k3s/config.yaml
register: config
- name: Download install-script
get_url:

@ -1,7 +1,13 @@
- name: Install K3s agent
command: /root/k3s_install.sh {{ type }}
command: /root/k3s_install.sh {{ type }}
register: command
changed_when: "'No change detected' in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Restart when config changed, but install already done
service:
name: k3s
status: restarted
when: "inventory_hostname != groups['kubernetes'][0] and not command.changed and config.changed"

@ -1,9 +1,15 @@
- name: Install K3s-server for 1st-node
command: /root/k3s_install.sh {{ type }}
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname == groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' in command.stdout"
- name: Restart when config changed, but install already done
service:
name: k3s
status: restarted
when: "inventory_hostname == groups['kubernetes'][0] and not command.changed and config.changed"
- name: Waiting for K3s-server to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
@ -20,6 +26,12 @@
retries: 2
delay: 10
- name: Restart when config changed, but install already done
service:
name: k3s
status: restarted
when: "inventory_hostname != groups['kubernetes'][0] and not command.changed and config.changed"
- name: Waiting for K3s-server to accept connections on other nodes
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
@ -34,3 +46,10 @@
# block: |
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
# create: true
- name: Deploy calico
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_calico.yml
when: "kubernetes.network.plugin == 'calico'"
- name: Deploy network-helpers
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_network_helper.yml

@ -0,0 +1,17 @@
- name: Deploy calico operator
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
register: command
changed_when: "'created' in command.stdout"
run_once: true
- name: Deploy calico ressource template
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
dest: /root/calico-ressource.yml
run_once: true
- name: Deploy calico ressource
command: kubectl apply -f /root/calico-ressource.yml
register: command
changed_when: "'created' in command.stdout"
run_once: true

@ -0,0 +1,7 @@
- name: Deploy service-file for routing-table to wireguard-translation
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
mode: u=rw,g=r,o=r
run_once: true
when: "kubernetes.network.helper.routingtabletowg"

@ -17,19 +17,26 @@
#- name: Disable swap
# command: swapoff -a
#- name: Install iptables
# package:
# name:
# #- containerd
# - iptables
# state: latest
- name: Install required packages
package:
name:
#- containerd
#- iptables
# For Longhorn:
- nfs-common
- open-iscsi
state: latest
- import_tasks: ./prerequisites/containerd.yml
- name: Gather interface-name
set_fact:
interface: "{{ kubernetes.ipPool.nodeIp_interface | replace('-', '_') }}"
- name: Getting nodeIp-data from interface
set_fact:
nodeip_ipv4: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv6[0].address }}"
nodeip_ipv4: "{{ ansible_facts[ interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ interface ].ipv6[0].address if ansible_facts[ interface ].ipv6 is defined }}"
- name: Run handlers to reload configurations
meta: flush_handlers

@ -1,7 +1,21 @@
server: https://{{ kubernetes.control_plane.dns_name }}:6443
token: {{ kubernetes.token }}
token: '{{ kubernetes.token }}'
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
kubelet-arg: "--node-ip=0.0.0.0"
{% else %}
node-ip: {{ nodeip_ipv4 }}
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}

@ -1,23 +1,52 @@
## Base ##
{% if inventory_hostname == groups['kubernetes'][0] %}
# Initialize with internal etcd
cluster-init: true
{% else %}
server: https://{{ groups['kubernetes'][0] }}:6443
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
{% endif %}
token: {{ kubernetes.token }}
token: '{{ kubernetes.token }}'
tls-san:
- {{ kubernetes.control_plane.dns_name }}
# Networking
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
kubelet-arg: "--node-ip=0.0.0.0"
{% else %}
node-ip: {{ nodeip_ipv4 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
{% endif %}
egress-selector-mode: disabled
# Network-plugin
{% if kubernetes.network_plugin == "flannel" %}
flannel-backend: vxlan
{% else %}
disable-network-policy: true
flannel-backend: none
{% endif %}
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
kubelet-arg: "--node-ip=0.0.0.0"
# Ingress-plugin
{% if kubernetes.ingress_controller != "traefik-ingress" %}
disable: traefik
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}
{{ kubernetes.config_extra | to_yaml }}

@ -0,0 +1,34 @@
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
- blockSize: 122
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% endif %}
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

@ -0,0 +1,45 @@
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: routingtabletowg
namespace: calico-system
labels:
app: routingtabletowg
spec:
selector:
matchLabels:
app: routingtabletowg
template:
metadata:
labels:
app: routingtabletowg
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
hostNetwork: true
containers:
- name: routingtabletowg
image: "ruakij/routingtabletowg:0.2.0"
env:
- name: INTERFACE
value: {{ kubernetes.ipPool.nodeIp_interface }}
- name: FILTER_PROTOCOL
value: bird
- name: PERIODIC_SYNC
value: '300'
securityContext:
capabilities:
add:
- NET_ADMIN
resources:
requests:
cpu: 10m
memory: 10Mi
limits:
cpu: 20m
memory: 20Mi
---
Loading…
Cancel
Save