36 Commits

Author SHA1 Message Date
b8ae38bab8 Check loclhost instead of external in case firewalls block external connections 2023-06-21 17:36:51 +02:00
093612f3a7 Change restart-check to started/restarted check 2023-04-14 10:20:16 +02:00
a92409c56f Add failed_when to deploy calico operator 2023-04-14 10:01:42 +02:00
f50e3ac33c Use first node's IP for joining cluster 2023-04-14 09:50:39 +02:00
668ff23ee6 Fix service task wrong usage 2023-04-14 09:42:46 +02:00
c2c6a2872f Fix conditional for changed after install 2023-04-12 23:12:10 +02:00
c1c7ec9e56 Remove workaround as k3s is now at 1.26 2023-04-12 22:39:43 +02:00
550f6868ff Fix old usage of network_plugin var 2023-04-12 20:49:38 +02:00
c8f90f0f8d Update calico 2023-04-05 20:15:31 +02:00
41570ea40d Create new block for network-stuff 2023-04-05 20:14:36 +02:00
a3c887748a Move network-helper to own file independend from calico 2023-04-05 20:14:12 +02:00
d113625fa8 Fix env-value not being string 2023-04-05 20:08:25 +02:00
dadd077723 Fix service and conditional 2023-04-05 14:13:22 +02:00
0d43d07ad4 Add extra-config option 2023-04-05 14:12:56 +02:00
d6f8f975bb Reload when config changed, but install already done 2023-04-05 13:32:14 +02:00
7c86a5d77d Add register for config 2023-04-05 13:30:22 +02:00
8c4e3c2401 Update routingtabletowg and use new sync feature 2023-03-30 14:54:49 +02:00
b46d35c8a5 Add labels 2023-03-30 14:54:28 +02:00
791ad96849 Add ipv6-check to calico deploy 2023-03-19 15:17:45 +01:00
fc3d9845d6 Fix undeterministic node-selection but uses group 2023-03-19 15:17:02 +01:00
590b75ac23 Add quotes to token-usage for special chars 2023-03-19 14:01:19 +01:00
0c82504299 Separate getting name and ips to fix bug easily 2023-03-19 14:00:46 +01:00
2fee9a1747 Only enable ipv6 when available and activated 2023-03-19 14:00:15 +01:00
fb44c39969 Add install of often-used packets 2023-03-19 13:58:47 +01:00
5452303992 Remove netmaker from dependency 2023-03-19 13:58:05 +01:00
4321d78cf8 Add comments to variables 2023-03-19 13:57:57 +01:00
f9a859e95c Add ingress-option 2023-03-19 13:56:26 +01:00
e5920b3ddf Add network-plugin option 2023-03-17 15:57:48 +01:00
0fc5dbb791 Initial role-data 2022-11-02 16:29:55 +01:00
9cb2e88193 Merge branch 'role_netmaker' 2022-10-27 01:16:57 +02:00
25ceb0f456 Merge branch 'role_netmaker_server' 2022-10-27 01:16:55 +02:00
fcc4f1ed18 Fix task 2022-10-21 15:40:00 +02:00
de0e220004 Add defaults-var-file 2022-10-21 15:32:20 +02:00
f9cc97a8f2 Add CA to trust-store 2022-10-21 15:32:08 +02:00
811fc22eef Delete unnecessary task-file 2022-10-21 15:31:37 +02:00
4cb418e2b6 Add role netmaker (netclient) 2022-10-17 14:51:52 +02:00
31 changed files with 551 additions and 13 deletions

View File

@@ -0,0 +1,41 @@
---
kubernetes:
ipPool:
ipv4:
# Minimum: /24
cluster_cidr: 10.42.0.0/16
service_cidr: 10.43.0.0/16
ipv6:
# Minimum: /120
cluster_cidr: fd42::/56
service_cidr: fd43::/112
# Interface to grab node-IPv4/v6 from
nodeIp_interface: <interface to grab nodeIp from>
control_plane:
dns_name: <control-plane dns-reachable-name>
token: <shared token for nodes to join>
network:
# One of [flannel, calico]
plugin: calico
# Helper for networking
helper:
# https://github.com/Ruakij/RoutingTableToWg
# Translates received-routes from e.g. BGP to wireguard-allowedips
# Helpful, when nodeIp_interface is a wireguard-interface
routingtabletowg: false
# One of [traefik-ingress]
ingress_controller: traefik-ingress
config_extra:
# etcd-tuning
# heartbeat: 0.5-1.5x of rtt
# election: 10x- of heartbeat
etcd-arg:
heartbeat-interval: 500
election-timeout: 5000

View File

@@ -0,0 +1,33 @@
@startuml
rectangle "Control-Plane" as control_plane {
rectangle "Node" as sn1 {
component "netclient" as sn1_netclient
component etcd as sn1_etcd
component "k3s-server" as sn1_k3s_server
sn1_k3s_server - sn1_etcd
}
rectangle "Node" as sn2 {
component "netclient" as sn2_netclient
component etcd as sn2_etcd
component "k3s-server" as sn2_k3s_server
sn2_k3s_server - sn2_etcd
}
sn1_netclient -- sn2_netclient
sn1_etcd -- sn2_etcd
}
rectangle "Workers" {
rectangle "Node" as an1 {
component "netclient" as an1_netclient
component "k3s-agent" as sn1_k3s_agent
}
}
@enduml

View File

@@ -0,0 +1,35 @@
# Copyright 2018-2022 Docker Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
disabled_plugins = []
#root = "/var/lib/containerd"
#state = "/run/containerd"
#subreaper = true
#oom_score = 0
#[grpc]
# address = "/run/containerd/containerd.sock"
# uid = 0
# gid = 0
#[debug]
# address = "/run/containerd/debug.sock"
# uid = 0
# gid = 0
# level = "info"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true

View File

@@ -0,0 +1,19 @@
- name: reload_sysctl
command: sysctl --system
- name: restart_containerd
ansible.builtin.service:
name: containerd
state: restarted
- name: reload_networking
service:
name: networking
state: restarted
async: 5
poll: 0
notify: wait_for_connection
- name: wait_for_connection
wait_for_connection:
delay: 5

3
kubernetes/meta/main.yml Normal file
View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: docker

View File

@@ -0,0 +1,30 @@
- name: Create k3s-folder
ansible.builtin.file:
path: /etc/rancher/k3s/
state: directory
mode: '0755'
- name: Deploy k3s config
ansible.builtin.template:
src: k3s/{{ type }}/config.yaml.jinja2
dest: /etc/rancher/k3s/config.yaml
register: config
- name: Download install-script
get_url:
url: https://get.k3s.io
dest: /root/k3s_install.sh
mode: '744'
# todo: update when file changed?
- import_tasks: ./install/server/setup_network.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_helm.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_k3s.yml
when: "type == 'server'"
- import_tasks: ./install/agent/install_k3s.yml
when: "type == 'agent'"

View File

@@ -0,0 +1,12 @@
- name: Install K3s agent
command: /root/k3s_install.sh {{ type }}
register: command
changed_when: "'No change detected' not in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s-agent
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"

View File

@@ -0,0 +1,17 @@
- name: Add Balto key
apt_key:
url: https://baltocdn.com/helm/signing.asc
state: present
- name: Add Balto Repository
apt_repository:
repo: "deb https://baltocdn.com/helm/stable/debian/ all main"
state: present
filename: kubernetes
update_cache: yes
- name: Install helm
package:
name:
- helm
state: latest

View File

@@ -0,0 +1,55 @@
- name: Install K3s-server for 1st-node
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname == groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections
ansible.builtin.wait_for:
host: "127.0.0.1"
port: 6443
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Install K3s-server for other nodes
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname != groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname != groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections on other nodes
ansible.builtin.wait_for:
host: "127.0.0.1"
port: 6443
state: started
when: "inventory_hostname != groups['kubernetes'][0]"
#- name: Add Kubernetes environment-vars to /etc/profile.d/
# blockinfile:
# path: /etc/profile.d/k3s-bin.sh
# marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
# block: |
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
# create: true
- name: Deploy calico
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_calico.yml
when: "kubernetes.network.plugin == 'calico'"
- name: Deploy network-helpers
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_network_helper.yml

View File

@@ -0,0 +1,19 @@
- name: Deploy calico operator
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
register: command
changed_when: "'created' in command.stdout"
run_once: true
failed_when:
- "command.rc == 1 and 'AlreadyExists' not in command.stderr"
- name: Deploy calico ressource template
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
dest: /root/calico-ressource.yml
run_once: true
- name: Deploy calico ressource
command: kubectl apply -f /root/calico-ressource.yml
register: command
changed_when: "'created' in command.stdout"
run_once: true

View File

@@ -0,0 +1,7 @@
- name: Deploy service-file for routing-table to wireguard-translation
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
mode: u=rw,g=r,o=r
run_once: true
when: "kubernetes.network.helper.routingtabletowg"

View File

@@ -0,0 +1,6 @@
- name: Set control-plane-dns-endpoint towards local-ip
blockinfile:
path: /etc/hosts
marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
block: |
{{ nodeip_ipv4 }} {{ kubernetes.control_plane.dns_name }}

View File

@@ -0,0 +1,4 @@
- import_tasks: ./prerequisites.yml
- import_tasks: ./install.yml

View File

@@ -0,0 +1,42 @@
#- name: Load br_netfilter kernel-module
# modprobe:
# name: br_netfilter
# state: present
- name: Set sysctl settings for iptables bridged traffic
copy:
dest: "/etc/sysctl.d/kubernetes.conf"
content: |
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.conf.all.forwarding=1
net.ipv6.conf.all.forwarding=1
notify: reload_sysctl
#- name: Disable swap
# command: swapoff -a
- name: Install required packages
package:
name:
#- containerd
#- iptables
# For Longhorn:
- nfs-common
- open-iscsi
state: latest
- import_tasks: ./prerequisites/containerd.yml
- name: Gather interface-name
set_fact:
interface: "{{ kubernetes.ipPool.nodeIp_interface | replace('-', '_') }}"
- name: Getting nodeIp-data from interface
set_fact:
nodeip_ipv4: "{{ ansible_facts[ interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ interface ].ipv6[0].address if ansible_facts[ interface ].ipv6 is defined }}"
- name: Run handlers to reload configurations
meta: flush_handlers

View File

@@ -0,0 +1,24 @@
- name: Check if containerd-service exists & is started
service:
name: containerd
state: started
ignore_errors: true
register: containerd_status
- name: Install containerd when not exists
package:
name:
- containerd
when: containerd_status is failed
- name: Create containerd config-folder
file:
path: /etc/containerd
state: directory
- name: Deploy containerd-config
ansible.builtin.copy:
src: containerd_config.toml
dest: /etc/containerd/config.toml
mode: u=rw,g=r,o=r
notify: restart_containerd

View File

@@ -0,0 +1,18 @@
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
token: '{{ kubernetes.token }}'
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}

View File

@@ -0,0 +1,49 @@
## Base ##
{% if inventory_hostname == groups['kubernetes'][0] %}
# Initialize with internal etcd
cluster-init: true
{% else %}
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
{% endif %}
token: '{{ kubernetes.token }}'
tls-san:
- {{ kubernetes.control_plane.dns_name }}
# Networking
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
{% endif %}
egress-selector-mode: disabled
# Network-plugin
{% if kubernetes.network.plugin == "flannel" %}
flannel-backend: vxlan
{% else %}
disable-network-policy: true
flannel-backend: none
{% endif %}
# Ingress-plugin
{% if kubernetes.ingress_controller != "traefik-ingress" %}
disable: traefik
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}
{{ kubernetes.config_extra | to_yaml }}

View File

@@ -0,0 +1,34 @@
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
- blockSize: 122
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% endif %}
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

View File

@@ -0,0 +1,45 @@
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: routingtabletowg
namespace: calico-system
labels:
app: routingtabletowg
spec:
selector:
matchLabels:
app: routingtabletowg
template:
metadata:
labels:
app: routingtabletowg
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
hostNetwork: true
containers:
- name: routingtabletowg
image: "ruakij/routingtabletowg:0.2.0"
env:
- name: INTERFACE
value: {{ kubernetes.ipPool.nodeIp_interface }}
- name: FILTER_PROTOCOL
value: bird
- name: PERIODIC_SYNC
value: '300'
securityContext:
capabilities:
add:
- NET_ADMIN
resources:
requests:
cpu: 10m
memory: 10Mi
limits:
cpu: 20m
memory: 20Mi
---

View File

@@ -0,0 +1,4 @@
netclient:
# Token to join default-network
# leave empty to ignore
join_network_token:

3
netmaker/meta/main.yml Normal file
View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: docker

4
netmaker/tasks/certs.yml Normal file
View File

@@ -0,0 +1,4 @@
- name: Deploy CA Certificate
ansible.builtin.copy:
src: secret_files/netmaker_server/ca/ca.crt
dest: /etc/ssl/certs/netmaker-ca.pem

View File

@@ -0,0 +1,25 @@
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- gpg
- gpg-agent
- name: Add netmaker-key
apt_key:
url: https://apt.netmaker.org/gpg.key
state: present
- name: Add netmaker-repository
apt_repository:
repo: "deb https:apt.netmaker.org stable main"
state: present
filename: netmaker
update_cache: yes
- name: Install wireguard & netclient
package:
name:
- wireguard
- netclient
state: latest

View File

@@ -0,0 +1,5 @@
- name: Join netmaker-network
when: "netclient.join_network_token is defined"
command: "netclient join -t {{ netclient.join_network_token }}"
failed_when: command.rc != 0
register: command

5
netmaker/tasks/main.yml Normal file
View File

@@ -0,0 +1,5 @@
- import_tasks: ./certs.yml
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml

View File

@@ -30,7 +30,7 @@ component netmaker_server {
component nm_api
nm_api -down- nm_api_http
ng_http --( nm_api_http
nm_api .up.( ng_TLS : db-connection to rqlite-master
nm_api -up-( ng_TLS : db-connection to rqlite-master
nm_api --( mq_plain
}

View File

@@ -1,11 +1,11 @@
per_listener_settings false
listener 8883
protocol websockets
allow_anonymous false
certfile /certs/node.crt
keyfile /certs/node.key
listener 1883
protocol websockets
allow_anonymous false
plugin /usr/lib/mosquitto_dynamic_security.so

View File

@@ -30,7 +30,7 @@
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker_server'][0]"
when: "inventory_hostname == groups['netmaker'][0]"
register: default_mesh
until: "default_mesh is not failed"
retries: 2
@@ -50,7 +50,7 @@
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker_server'][0]"
when: "inventory_hostname == groups['netmaker'][0]"
register: default_mesh_key
until: "default_mesh_key is not failed"
retries: 2

View File

@@ -33,15 +33,15 @@ services:
-auth /config.json
{% if inventory_hostname != groups['netmaker_server'][0] %}
{% if inventory_hostname != groups['netmaker'][0] %}
-join-as netmaker
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker_server'][0] }}:{{ netmaker_nginx.advertise_port }}
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker'][0] }}:{{ netmaker_nginx.advertise_port }}
{% endif %}
"
# FIXME: /\ \/ Change http -> https
netmaker: # The Primary Server for running Netmaker
image: gravitl/netmaker:v0.17.1
image: gravitl/netmaker:v0.16.1
depends_on:
- rqlite
cap_add:
@@ -104,7 +104,7 @@ services:
- "51821-51830:51821-51830/udp" # wireguard ports
netmaker-ui: # The Netmaker UI Component
image: gravitl/netmaker-ui:v0.17.1
image: gravitl/netmaker-ui:v0.16.1
depends_on:
- netmaker
links:
@@ -120,6 +120,7 @@ services:
- ./mosquitto/config:/mosquitto/config
- ./mosquitto/data:/mosquitto/data
- ./mosquitto/logs:/mosquitto/log
- "./certs:/certs:ro"
depends_on:
- netmaker
command: ["/mosquitto/config/wait.sh"]

View File

@@ -6,7 +6,7 @@ stream{
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883; # todo: tls-terminate?
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} 127.0.0.1:8443;
{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }} rqlite:4002;

View File

@@ -3,9 +3,7 @@ map $host $proxy_name {
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} netmaker-ui:80;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} netmaker:8081;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} rqlite:4001;
default 444;