18 Commits

Author SHA1 Message Date
fd302e4ebc Move regather facts to join when changed 2023-03-19 13:47:01 +01:00
b5729caa0e Add wait for interface to come up 2023-03-19 13:46:44 +01:00
dca40ed835 Remove throttle 2023-03-19 13:46:21 +01:00
95ddd04a86 Fix join command 2023-03-19 10:39:06 +01:00
911bc47acb Initial role stuff 2023-03-17 15:58:49 +01:00
f3e381aca3 Move netmaker to deprecated 2023-01-18 13:23:51 +01:00
233eadaf40 Merge branch 'role_netmaker_server' 2023-01-10 09:47:34 +01:00
109a09052d Update to 0.17.1
This also sets up tls-termination for mosquitto
2023-01-10 09:46:55 +01:00
4ea9492ca3 Change hos group-name 2023-01-10 09:46:01 +01:00
e5ebc2ad5f Merge branch 'role_common' 2022-11-02 16:41:10 +01:00
98c51c6fc1 Merge branch 'role_kubernetes-k3s' 2022-11-02 16:40:52 +01:00
6b59bf6c75 Merge branch 'role_netmaker' 2022-11-02 16:40:36 +01:00
1b2af7cf6c Merge branch 'role_netmaker_server' 2022-11-02 16:40:25 +01:00
d9cf3d2066 Re-gatherfacts at the end for other plays 2022-11-02 16:38:13 +01:00
f42bce9b6b Add changed-detection 2022-11-02 16:37:53 +01:00
975746e7d7 Add IPv6-network to common 2022-11-02 10:27:06 +01:00
a27ca2c37a Create dedicated docs-folder and move files there 2022-10-27 01:24:30 +02:00
247fdec7ae fixme: hotfix for multi-master netmaker-server
netmaker doesnt handle concurrent joins to different server-nodes well and will duplicate addresses
2022-10-27 01:23:45 +02:00
43 changed files with 127 additions and 241 deletions

View File

@@ -0,0 +1,3 @@
---
dependencies: []
#- role: docker

View File

@@ -2,4 +2,6 @@
when: "netclient.join_network_token is defined"
command: "netclient join -t {{ netclient.join_network_token }}"
failed_when: command.rc != 0
changed_when: "'starting wireguard' in command.stdout"
register: command
throttle: 1

View File

@@ -0,0 +1,8 @@
- import_tasks: ./certs.yml
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml
- name: Gather facts to get changes
ansible.builtin.gather_facts:

View File

@@ -30,7 +30,7 @@ component netmaker_server {
component nm_api
nm_api -down- nm_api_http
ng_http --( nm_api_http
nm_api -up-( ng_TLS : db-connection to rqlite-master
nm_api .up.( ng_TLS : db-connection to rqlite-master
nm_api --( mq_plain
}

View File

@@ -1,11 +1,11 @@
per_listener_settings false
listener 8883
protocol websockets
allow_anonymous false
certfile /certs/node.crt
keyfile /certs/node.key
listener 1883
protocol websockets
allow_anonymous false
plugin /usr/lib/mosquitto_dynamic_security.so

View File

@@ -30,7 +30,7 @@
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker'][0]"
when: "inventory_hostname == groups['netmaker_server'][0]"
register: default_mesh
until: "default_mesh is not failed"
retries: 2
@@ -50,7 +50,7 @@
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker'][0]"
when: "inventory_hostname == groups['netmaker_server'][0]"
register: default_mesh_key
until: "default_mesh_key is not failed"
retries: 2

View File

@@ -33,15 +33,15 @@ services:
-auth /config.json
{% if inventory_hostname != groups['netmaker'][0] %}
{% if inventory_hostname != groups['netmaker_server'][0] %}
-join-as netmaker
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker'][0] }}:{{ netmaker_nginx.advertise_port }}
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker_server'][0] }}:{{ netmaker_nginx.advertise_port }}
{% endif %}
"
# FIXME: /\ \/ Change http -> https
netmaker: # The Primary Server for running Netmaker
image: gravitl/netmaker:v0.16.1
image: gravitl/netmaker:v0.17.1
depends_on:
- rqlite
cap_add:
@@ -104,7 +104,7 @@ services:
- "51821-51830:51821-51830/udp" # wireguard ports
netmaker-ui: # The Netmaker UI Component
image: gravitl/netmaker-ui:v0.16.1
image: gravitl/netmaker-ui:v0.17.1
depends_on:
- netmaker
links:
@@ -120,7 +120,6 @@ services:
- ./mosquitto/config:/mosquitto/config
- ./mosquitto/data:/mosquitto/data
- ./mosquitto/logs:/mosquitto/log
- "./certs:/certs:ro"
depends_on:
- netmaker
command: ["/mosquitto/config/wait.sh"]

View File

@@ -6,7 +6,7 @@ stream{
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883; # todo: tls-terminate?
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} 127.0.0.1:8443;
{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }} rqlite:4002;

View File

@@ -4,6 +4,8 @@ map $host $proxy_name {
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} netmaker-ui:80;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} netmaker:8081;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} rqlite:4001;
default 444;

1
common/defaults/main.yml Normal file
View File

@@ -0,0 +1 @@
ipv6_stable_secret: 1111:2222:3333:4444:5555:6666:7777:8888

View File

@@ -5,3 +5,5 @@
- import_tasks: ./packages.yml
- import_tasks: ./aliases.yml
- import_tasks: ./networking.yml

View File

@@ -0,0 +1,22 @@
- name: Set sysctl settings for ip-forwarding
copy:
dest: "/etc/sysctl.d/ip-forwarding.conf"
content: |
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
notify: reload_sysctl
- name: Set sysctl settings for ipv6-address-generation
copy:
dest: "/etc/sysctl.d/ipv6-slaac-address-generation.conf"
content: |
net.ipv6.conf.default.addr_gen_mode = 2
net.ipv6.conf.default.stable_secret = {{ ipv6_stable_secret }}
notify: reload_sysctl
- name: Set sysctl settings to override ipv6-slaac with enabled forwarding
copy:
dest: "/etc/sysctl.d/ipv6-slaac-override.conf"
content: |
net.ipv6.conf.all.accept_ra = 2
notify: reload_sysctl

View File

@@ -2,40 +2,16 @@
kubernetes:
ipPool:
ipv4:
# Minimum: /24
cluster_cidr: 10.42.0.0/16
service_cidr: 10.43.0.0/16
ipv6:
# Minimum: /120
cluster_cidr: fd42::/56
service_cidr: fd43::/112
# Interface to grab node-IPv4/v6 from
# Replace - with _
nodeIp_interface: <interface to grab nodeIp from>
control_plane:
dns_name: <control-plane dns-reachable-name>
token: <shared token for nodes to join>
network:
# One of [flannel, calico]
plugin: calico
# Helper for networking
helper:
# https://github.com/Ruakij/RoutingTableToWg
# Translates received-routes from e.g. BGP to wireguard-allowedips
# Helpful, when nodeIp_interface is a wireguard-interface
routingtabletowg: false
# One of [traefik-ingress]
ingress_controller: traefik-ingress
config_extra:
# etcd-tuning
# heartbeat: 0.5-1.5x of rtt
# election: 10x- of heartbeat
etcd-arg:
heartbeat-interval: 500
election-timeout: 5000

View File

@@ -1,3 +1,4 @@
---
dependencies:
- role: docker
- role: netmaker

View File

@@ -8,7 +8,6 @@
ansible.builtin.template:
src: k3s/{{ type }}/config.yaml.jinja2
dest: /etc/rancher/k3s/config.yaml
register: config
- name: Download install-script
get_url:

View File

@@ -1,12 +1,7 @@
- name: Install K3s agent
command: /root/k3s_install.sh {{ type }}
register: command
changed_when: "'No change detected' not in command.stdout"
changed_when: "'No change detected' in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s-agent
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"

View File

@@ -2,17 +2,11 @@
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname == groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname == groups['kubernetes'][0]"
changed_when: "'No change detected' in command.stdout"
- name: Waiting for K3s-server to accept connections
ansible.builtin.wait_for:
host: "127.0.0.1"
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
@@ -21,20 +15,14 @@
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname != groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
changed_when: "'No change detected' in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname != groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections on other nodes
ansible.builtin.wait_for:
host: "127.0.0.1"
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname != groups['kubernetes'][0]"
@@ -46,10 +34,3 @@
# block: |
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
# create: true
- name: Deploy calico
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_calico.yml
when: "kubernetes.network.plugin == 'calico'"
- name: Deploy network-helpers
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_network_helper.yml

View File

@@ -1,19 +0,0 @@
- name: Deploy calico operator
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
register: command
changed_when: "'created' in command.stdout"
run_once: true
failed_when:
- "command.rc == 1 and 'AlreadyExists' not in command.stderr"
- name: Deploy calico ressource template
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
dest: /root/calico-ressource.yml
run_once: true
- name: Deploy calico ressource
command: kubectl apply -f /root/calico-ressource.yml
register: command
changed_when: "'created' in command.stdout"
run_once: true

View File

@@ -1,7 +0,0 @@
- name: Deploy service-file for routing-table to wireguard-translation
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
mode: u=rw,g=r,o=r
run_once: true
when: "kubernetes.network.helper.routingtabletowg"

View File

@@ -17,26 +17,19 @@
#- name: Disable swap
# command: swapoff -a
- name: Install required packages
package:
name:
#- containerd
#- name: Install iptables
# package:
# name:
# #- containerd
# - iptables
# For Longhorn:
- nfs-common
- open-iscsi
state: latest
# state: latest
- import_tasks: ./prerequisites/containerd.yml
- name: Gather interface-name
set_fact:
interface: "{{ kubernetes.ipPool.nodeIp_interface | replace('-', '_') }}"
- name: Getting nodeIp-data from interface
set_fact:
nodeip_ipv4: "{{ ansible_facts[ interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ interface ].ipv6[0].address if ansible_facts[ interface ].ipv6 is defined }}"
nodeip_ipv4: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv6[0].address }}"
- name: Run handlers to reload configurations
meta: flush_handlers

View File

@@ -1,18 +1,7 @@
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
token: '{{ kubernetes.token }}'
server: https://{{ kubernetes.control_plane.dns_name }}:6443
token: {{ kubernetes.token }}
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
kubelet-arg: "--node-ip=0.0.0.0"

View File

@@ -1,49 +1,23 @@
## Base ##
{% if inventory_hostname == groups['kubernetes'][0] %}
# Initialize with internal etcd
cluster-init: true
{% else %}
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
server: https://{{ groups['kubernetes'][0] }}:6443
{% endif %}
token: '{{ kubernetes.token }}'
token: {{ kubernetes.token }}
tls-san:
- {{ kubernetes.control_plane.dns_name }}
# Networking
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
{% endif %}
egress-selector-mode: disabled
# Network-plugin
{% if kubernetes.network.plugin == "flannel" %}
flannel-backend: vxlan
{% else %}
disable-network-policy: true
flannel-backend: none
{% endif %}
# Ingress-plugin
{% if kubernetes.ingress_controller != "traefik-ingress" %}
disable: traefik
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}
{{ kubernetes.config_extra | to_yaml }}
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
kubelet-arg: "--node-ip=0.0.0.0"

View File

@@ -1,34 +0,0 @@
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
- blockSize: 122
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% endif %}
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

View File

@@ -1,45 +0,0 @@
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: routingtabletowg
namespace: calico-system
labels:
app: routingtabletowg
spec:
selector:
matchLabels:
app: routingtabletowg
template:
metadata:
labels:
app: routingtabletowg
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
hostNetwork: true
containers:
- name: routingtabletowg
image: "ruakij/routingtabletowg:0.2.0"
env:
- name: INTERFACE
value: {{ kubernetes.ipPool.nodeIp_interface }}
- name: FILTER_PROTOCOL
value: bird
- name: PERIODIC_SYNC
value: '300'
securityContext:
capabilities:
add:
- NET_ADMIN
resources:
requests:
cpu: 10m
memory: 10Mi
limits:
cpu: 20m
memory: 20Mi
---

View File

@@ -0,0 +1,6 @@
netbird_client:
# Key and url to join a network
# leave empty to ignore
join_network:
setup_key:
management_url:

View File

@@ -0,0 +1,26 @@
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- ca-certificates
- curl
- gnupg
- name: Add netbird-key
apt_key:
url: https://pkgs.wiretrustee.com/debian/public.key
state: present
- name: Add netbird-repository
apt_repository:
repo: "deb https://pkgs.wiretrustee.com/debian stable main"
state: present
filename: netbird
update_cache: yes
- name: Install wireguard & netbird
package:
name:
- wireguard
- netbird
state: latest

View File

@@ -0,0 +1,16 @@
- name: Join netbird-network
when: "netbird_client.join_network.setup_key is defined"
command: "netbird up --management-url {{ netbird_client.join_network.management_url }} --setup-key {{ netbird_client.join_network.setup_key }}"
failed_when: command.rc != 0
changed_when: "'Connected' in command.stdout"
register: command
- name: Wait for netbird-interface to exist
wait_for:
path: "/sys/class/net/wt0"
state: present
when: command.changed
- name: Gather facts to get changes
ansible.builtin.gather_facts:
when: command.changed

View File

@@ -1,5 +1,4 @@
- import_tasks: ./certs.yml
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml

View File

@@ -1,3 +0,0 @@
---
dependencies:
- role: docker