Compare commits
No commits in common. 'main' and 'role_netmaker_server' have entirely different histories.
main
...
role_netma
@ -1,4 +0,0 @@
|
||||
netclient:
|
||||
# Token to join default-network
|
||||
# leave empty to ignore
|
||||
join_network_token:
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
dependencies: []
|
||||
#- role: docker
|
@ -1,4 +0,0 @@
|
||||
- name: Deploy CA Certificate
|
||||
ansible.builtin.copy:
|
||||
src: secret_files/netmaker_server/ca/ca.crt
|
||||
dest: /etc/ssl/certs/netmaker-ca.pem
|
@ -1,25 +0,0 @@
|
||||
- name: Install Packages
|
||||
# when: docker_file.stat.exists == False
|
||||
package:
|
||||
name:
|
||||
- gpg
|
||||
- gpg-agent
|
||||
|
||||
- name: Add netmaker-key
|
||||
apt_key:
|
||||
url: https://apt.netmaker.org/gpg.key
|
||||
state: present
|
||||
|
||||
- name: Add netmaker-repository
|
||||
apt_repository:
|
||||
repo: "deb https:apt.netmaker.org stable main"
|
||||
state: present
|
||||
filename: netmaker
|
||||
update_cache: yes
|
||||
|
||||
- name: Install wireguard & netclient
|
||||
package:
|
||||
name:
|
||||
- wireguard
|
||||
- netclient
|
||||
state: latest
|
@ -1,7 +0,0 @@
|
||||
- name: Join netmaker-network
|
||||
when: "netclient.join_network_token is defined"
|
||||
command: "netclient join -t {{ netclient.join_network_token }}"
|
||||
failed_when: command.rc != 0
|
||||
changed_when: "'starting wireguard' in command.stdout"
|
||||
register: command
|
||||
throttle: 1
|
@ -1,8 +0,0 @@
|
||||
- import_tasks: ./certs.yml
|
||||
|
||||
- import_tasks: ./install.yml
|
||||
|
||||
- import_tasks: ./join-network.yml
|
||||
|
||||
- name: Gather facts to get changes
|
||||
ansible.builtin.gather_facts:
|
@ -1 +0,0 @@
|
||||
ipv6_stable_secret: 1111:2222:3333:4444:5555:6666:7777:8888
|
@ -1,22 +0,0 @@
|
||||
- name: Set sysctl settings for ip-forwarding
|
||||
copy:
|
||||
dest: "/etc/sysctl.d/ip-forwarding.conf"
|
||||
content: |
|
||||
net.ipv4.ip_forward = 1
|
||||
net.ipv6.conf.all.forwarding = 1
|
||||
notify: reload_sysctl
|
||||
|
||||
- name: Set sysctl settings for ipv6-address-generation
|
||||
copy:
|
||||
dest: "/etc/sysctl.d/ipv6-slaac-address-generation.conf"
|
||||
content: |
|
||||
net.ipv6.conf.default.addr_gen_mode = 2
|
||||
net.ipv6.conf.default.stable_secret = {{ ipv6_stable_secret }}
|
||||
notify: reload_sysctl
|
||||
|
||||
- name: Set sysctl settings to override ipv6-slaac with enabled forwarding
|
||||
copy:
|
||||
dest: "/etc/sysctl.d/ipv6-slaac-override.conf"
|
||||
content: |
|
||||
net.ipv6.conf.all.accept_ra = 2
|
||||
notify: reload_sysctl
|
@ -1,41 +0,0 @@
|
||||
---
|
||||
kubernetes:
|
||||
ipPool:
|
||||
ipv4:
|
||||
# Minimum: /24
|
||||
cluster_cidr: 10.42.0.0/16
|
||||
service_cidr: 10.43.0.0/16
|
||||
ipv6:
|
||||
# Minimum: /120
|
||||
cluster_cidr: fd42::/56
|
||||
service_cidr: fd43::/112
|
||||
|
||||
# Interface to grab node-IPv4/v6 from
|
||||
nodeIp_interface: <interface to grab nodeIp from>
|
||||
|
||||
control_plane:
|
||||
dns_name: <control-plane dns-reachable-name>
|
||||
|
||||
token: <shared token for nodes to join>
|
||||
|
||||
network:
|
||||
# One of [flannel, calico]
|
||||
plugin: calico
|
||||
|
||||
# Helper for networking
|
||||
helper:
|
||||
# https://github.com/Ruakij/RoutingTableToWg
|
||||
# Translates received-routes from e.g. BGP to wireguard-allowedips
|
||||
# Helpful, when nodeIp_interface is a wireguard-interface
|
||||
routingtabletowg: false
|
||||
|
||||
# One of [traefik-ingress]
|
||||
ingress_controller: traefik-ingress
|
||||
|
||||
config_extra:
|
||||
# etcd-tuning
|
||||
# heartbeat: 0.5-1.5x of rtt
|
||||
# election: 10x- of heartbeat
|
||||
etcd-arg:
|
||||
heartbeat-interval: 500
|
||||
election-timeout: 5000
|
@ -1,33 +0,0 @@
|
||||
@startuml
|
||||
|
||||
rectangle "Control-Plane" as control_plane {
|
||||
rectangle "Node" as sn1 {
|
||||
component "netclient" as sn1_netclient
|
||||
|
||||
component etcd as sn1_etcd
|
||||
component "k3s-server" as sn1_k3s_server
|
||||
sn1_k3s_server - sn1_etcd
|
||||
}
|
||||
|
||||
rectangle "Node" as sn2 {
|
||||
component "netclient" as sn2_netclient
|
||||
|
||||
component etcd as sn2_etcd
|
||||
component "k3s-server" as sn2_k3s_server
|
||||
sn2_k3s_server - sn2_etcd
|
||||
}
|
||||
|
||||
sn1_netclient -- sn2_netclient
|
||||
sn1_etcd -- sn2_etcd
|
||||
}
|
||||
|
||||
rectangle "Workers" {
|
||||
rectangle "Node" as an1 {
|
||||
component "netclient" as an1_netclient
|
||||
|
||||
component "k3s-agent" as sn1_k3s_agent
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@enduml
|
@ -1,35 +0,0 @@
|
||||
# Copyright 2018-2022 Docker Inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
disabled_plugins = []
|
||||
|
||||
#root = "/var/lib/containerd"
|
||||
#state = "/run/containerd"
|
||||
#subreaper = true
|
||||
#oom_score = 0
|
||||
|
||||
#[grpc]
|
||||
# address = "/run/containerd/containerd.sock"
|
||||
# uid = 0
|
||||
# gid = 0
|
||||
|
||||
#[debug]
|
||||
# address = "/run/containerd/debug.sock"
|
||||
# uid = 0
|
||||
# gid = 0
|
||||
# level = "info"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
@ -1,19 +0,0 @@
|
||||
- name: reload_sysctl
|
||||
command: sysctl --system
|
||||
|
||||
- name: restart_containerd
|
||||
ansible.builtin.service:
|
||||
name: containerd
|
||||
state: restarted
|
||||
|
||||
- name: reload_networking
|
||||
service:
|
||||
name: networking
|
||||
state: restarted
|
||||
async: 5
|
||||
poll: 0
|
||||
notify: wait_for_connection
|
||||
|
||||
- name: wait_for_connection
|
||||
wait_for_connection:
|
||||
delay: 5
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: docker
|
@ -1,30 +0,0 @@
|
||||
- name: Create k3s-folder
|
||||
ansible.builtin.file:
|
||||
path: /etc/rancher/k3s/
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Deploy k3s config
|
||||
ansible.builtin.template:
|
||||
src: k3s/{{ type }}/config.yaml.jinja2
|
||||
dest: /etc/rancher/k3s/config.yaml
|
||||
register: config
|
||||
|
||||
- name: Download install-script
|
||||
get_url:
|
||||
url: https://get.k3s.io
|
||||
dest: /root/k3s_install.sh
|
||||
mode: '744'
|
||||
# todo: update when file changed?
|
||||
|
||||
- import_tasks: ./install/server/setup_network.yml
|
||||
when: "type == 'server'"
|
||||
|
||||
- import_tasks: ./install/server/install_helm.yml
|
||||
when: "type == 'server'"
|
||||
|
||||
- import_tasks: ./install/server/install_k3s.yml
|
||||
when: "type == 'server'"
|
||||
|
||||
- import_tasks: ./install/agent/install_k3s.yml
|
||||
when: "type == 'agent'"
|
@ -1,12 +0,0 @@
|
||||
- name: Install K3s agent
|
||||
command: /root/k3s_install.sh {{ type }}
|
||||
register: command
|
||||
changed_when: "'No change detected' not in command.stdout"
|
||||
until: "command is not failed"
|
||||
retries: 2
|
||||
delay: 10
|
||||
|
||||
- name: Make sure service is started / restarted on config change
|
||||
service:
|
||||
name: k3s-agent
|
||||
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
|
@ -1,17 +0,0 @@
|
||||
- name: Add Balto key
|
||||
apt_key:
|
||||
url: https://baltocdn.com/helm/signing.asc
|
||||
state: present
|
||||
|
||||
- name: Add Balto Repository
|
||||
apt_repository:
|
||||
repo: "deb https://baltocdn.com/helm/stable/debian/ all main"
|
||||
state: present
|
||||
filename: kubernetes
|
||||
update_cache: yes
|
||||
|
||||
- name: Install helm
|
||||
package:
|
||||
name:
|
||||
- helm
|
||||
state: latest
|
@ -1,55 +0,0 @@
|
||||
- name: Install K3s-server for 1st-node
|
||||
command: /root/k3s_install.sh {{ type }}
|
||||
when: "inventory_hostname == groups['kubernetes'][0]"
|
||||
register: command
|
||||
changed_when: "'No change detected' not in command.stdout"
|
||||
|
||||
- name: Make sure service is started / restarted on config change
|
||||
service:
|
||||
name: k3s
|
||||
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
|
||||
when: "inventory_hostname == groups['kubernetes'][0]"
|
||||
|
||||
- name: Waiting for K3s-server to accept connections
|
||||
ansible.builtin.wait_for:
|
||||
host: "{{ inventory_hostname }}"
|
||||
port: 6443
|
||||
state: started
|
||||
when: "inventory_hostname == groups['kubernetes'][0]"
|
||||
|
||||
- name: Install K3s-server for other nodes
|
||||
command: /root/k3s_install.sh {{ type }}
|
||||
when: "inventory_hostname != groups['kubernetes'][0]"
|
||||
register: command
|
||||
changed_when: "'No change detected' not in command.stdout"
|
||||
until: "command is not failed"
|
||||
retries: 2
|
||||
delay: 10
|
||||
|
||||
- name: Make sure service is started / restarted on config change
|
||||
service:
|
||||
name: k3s
|
||||
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
|
||||
when: "inventory_hostname != groups['kubernetes'][0]"
|
||||
|
||||
- name: Waiting for K3s-server to accept connections on other nodes
|
||||
ansible.builtin.wait_for:
|
||||
host: "{{ inventory_hostname }}"
|
||||
port: 6443
|
||||
state: started
|
||||
when: "inventory_hostname != groups['kubernetes'][0]"
|
||||
|
||||
#- name: Add Kubernetes environment-vars to /etc/profile.d/
|
||||
# blockinfile:
|
||||
# path: /etc/profile.d/k3s-bin.sh
|
||||
# marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
|
||||
# block: |
|
||||
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
|
||||
# create: true
|
||||
|
||||
- name: Deploy calico
|
||||
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_calico.yml
|
||||
when: "kubernetes.network.plugin == 'calico'"
|
||||
|
||||
- name: Deploy network-helpers
|
||||
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_network_helper.yml
|
@ -1,19 +0,0 @@
|
||||
- name: Deploy calico operator
|
||||
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
|
||||
register: command
|
||||
changed_when: "'created' in command.stdout"
|
||||
run_once: true
|
||||
failed_when:
|
||||
- "command.rc == 1 and 'AlreadyExists' not in command.stderr"
|
||||
|
||||
- name: Deploy calico ressource template
|
||||
ansible.builtin.template:
|
||||
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
|
||||
dest: /root/calico-ressource.yml
|
||||
run_once: true
|
||||
|
||||
- name: Deploy calico ressource
|
||||
command: kubectl apply -f /root/calico-ressource.yml
|
||||
register: command
|
||||
changed_when: "'created' in command.stdout"
|
||||
run_once: true
|
@ -1,7 +0,0 @@
|
||||
- name: Deploy service-file for routing-table to wireguard-translation
|
||||
ansible.builtin.template:
|
||||
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
|
||||
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
|
||||
mode: u=rw,g=r,o=r
|
||||
run_once: true
|
||||
when: "kubernetes.network.helper.routingtabletowg"
|
@ -1,6 +0,0 @@
|
||||
- name: Set control-plane-dns-endpoint towards local-ip
|
||||
blockinfile:
|
||||
path: /etc/hosts
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
|
||||
block: |
|
||||
{{ nodeip_ipv4 }} {{ kubernetes.control_plane.dns_name }}
|
@ -1,4 +0,0 @@
|
||||
- import_tasks: ./prerequisites.yml
|
||||
|
||||
- import_tasks: ./install.yml
|
||||
|
@ -1,42 +0,0 @@
|
||||
#- name: Load br_netfilter kernel-module
|
||||
# modprobe:
|
||||
# name: br_netfilter
|
||||
# state: present
|
||||
|
||||
- name: Set sysctl settings for iptables bridged traffic
|
||||
copy:
|
||||
dest: "/etc/sysctl.d/kubernetes.conf"
|
||||
content: |
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
|
||||
net.ipv4.conf.all.forwarding=1
|
||||
net.ipv6.conf.all.forwarding=1
|
||||
notify: reload_sysctl
|
||||
|
||||
#- name: Disable swap
|
||||
# command: swapoff -a
|
||||
|
||||
- name: Install required packages
|
||||
package:
|
||||
name:
|
||||
#- containerd
|
||||
#- iptables
|
||||
# For Longhorn:
|
||||
- nfs-common
|
||||
- open-iscsi
|
||||
state: latest
|
||||
|
||||
- import_tasks: ./prerequisites/containerd.yml
|
||||
|
||||
- name: Gather interface-name
|
||||
set_fact:
|
||||
interface: "{{ kubernetes.ipPool.nodeIp_interface | replace('-', '_') }}"
|
||||
|
||||
- name: Getting nodeIp-data from interface
|
||||
set_fact:
|
||||
nodeip_ipv4: "{{ ansible_facts[ interface ].ipv4.address }}"
|
||||
nodeip_ipv6: "{{ ansible_facts[ interface ].ipv6[0].address if ansible_facts[ interface ].ipv6 is defined }}"
|
||||
|
||||
- name: Run handlers to reload configurations
|
||||
meta: flush_handlers
|
@ -1,24 +0,0 @@
|
||||
- name: Check if containerd-service exists & is started
|
||||
service:
|
||||
name: containerd
|
||||
state: started
|
||||
ignore_errors: true
|
||||
register: containerd_status
|
||||
|
||||
- name: Install containerd when not exists
|
||||
package:
|
||||
name:
|
||||
- containerd
|
||||
when: containerd_status is failed
|
||||
|
||||
- name: Create containerd config-folder
|
||||
file:
|
||||
path: /etc/containerd
|
||||
state: directory
|
||||
|
||||
- name: Deploy containerd-config
|
||||
ansible.builtin.copy:
|
||||
src: containerd_config.toml
|
||||
dest: /etc/containerd/config.toml
|
||||
mode: u=rw,g=r,o=r
|
||||
notify: restart_containerd
|
@ -1,18 +0,0 @@
|
||||
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
|
||||
token: '{{ kubernetes.token }}'
|
||||
|
||||
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
||||
{% else %}
|
||||
node-ip: {{ nodeip_ipv4 }}
|
||||
{% endif %}
|
||||
|
||||
## Label
|
||||
# Region & DC
|
||||
node-label:
|
||||
{% if region is defined %}
|
||||
- topology.kubernetes.io/region={{ region }}
|
||||
{% endif %}
|
||||
{% if zone is defined %}
|
||||
- topology.kubernetes.io/zone={{ zone }}
|
||||
{% endif %}
|
@ -1,49 +0,0 @@
|
||||
## Base ##
|
||||
{% if inventory_hostname == groups['kubernetes'][0] %}
|
||||
# Initialize with internal etcd
|
||||
cluster-init: true
|
||||
{% else %}
|
||||
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
|
||||
{% endif %}
|
||||
|
||||
token: '{{ kubernetes.token }}'
|
||||
tls-san:
|
||||
- {{ kubernetes.control_plane.dns_name }}
|
||||
|
||||
# Networking
|
||||
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
||||
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
||||
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
|
||||
{% else %}
|
||||
node-ip: {{ nodeip_ipv4 }}
|
||||
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
|
||||
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
|
||||
{% endif %}
|
||||
|
||||
egress-selector-mode: disabled
|
||||
|
||||
# Network-plugin
|
||||
{% if kubernetes.network.plugin == "flannel" %}
|
||||
flannel-backend: vxlan
|
||||
{% else %}
|
||||
disable-network-policy: true
|
||||
flannel-backend: none
|
||||
{% endif %}
|
||||
|
||||
# Ingress-plugin
|
||||
{% if kubernetes.ingress_controller != "traefik-ingress" %}
|
||||
disable: traefik
|
||||
{% endif %}
|
||||
|
||||
## Label
|
||||
# Region & DC
|
||||
node-label:
|
||||
{% if region is defined %}
|
||||
- topology.kubernetes.io/region={{ region }}
|
||||
{% endif %}
|
||||
{% if zone is defined %}
|
||||
- topology.kubernetes.io/zone={{ zone }}
|
||||
{% endif %}
|
||||
|
||||
{{ kubernetes.config_extra | to_yaml }}
|
@ -1,34 +0,0 @@
|
||||
# This section includes base Calico installation configuration.
|
||||
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
# Configures Calico networking.
|
||||
calicoNetwork:
|
||||
# Note: The ipPools section cannot be modified post-install.
|
||||
ipPools:
|
||||
- blockSize: 26
|
||||
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
|
||||
encapsulation: None
|
||||
natOutgoing: Enabled
|
||||
nodeSelector: all()
|
||||
|
||||
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||
- blockSize: 122
|
||||
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
||||
encapsulation: None
|
||||
natOutgoing: Enabled
|
||||
nodeSelector: all()
|
||||
{% endif %}
|
||||
|
||||
---
|
||||
|
||||
# This section configures the Calico API server.
|
||||
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
@ -1,45 +0,0 @@
|
||||
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: routingtabletowg
|
||||
namespace: calico-system
|
||||
labels:
|
||||
app: routingtabletowg
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: routingtabletowg
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: routingtabletowg
|
||||
spec:
|
||||
tolerations:
|
||||
# this toleration is to have the daemonset runnable on master nodes
|
||||
# remove it if your masters can't run pods
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: routingtabletowg
|
||||
image: "ruakij/routingtabletowg:0.2.0"
|
||||
env:
|
||||
- name: INTERFACE
|
||||
value: {{ kubernetes.ipPool.nodeIp_interface }}
|
||||
- name: FILTER_PROTOCOL
|
||||
value: bird
|
||||
- name: PERIODIC_SYNC
|
||||
value: '300'
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 10Mi
|
||||
limits:
|
||||
cpu: 20m
|
||||
memory: 20Mi
|
||||
---
|
@ -1,6 +0,0 @@
|
||||
netbird_client:
|
||||
# Key and url to join a network
|
||||
# leave empty to ignore
|
||||
join_network:
|
||||
setup_key:
|
||||
management_url:
|
@ -1,26 +0,0 @@
|
||||
- name: Install Packages
|
||||
# when: docker_file.stat.exists == False
|
||||
package:
|
||||
name:
|
||||
- ca-certificates
|
||||
- curl
|
||||
- gnupg
|
||||
|
||||
- name: Add netbird-key
|
||||
apt_key:
|
||||
url: https://pkgs.wiretrustee.com/debian/public.key
|
||||
state: present
|
||||
|
||||
- name: Add netbird-repository
|
||||
apt_repository:
|
||||
repo: "deb https://pkgs.wiretrustee.com/debian stable main"
|
||||
state: present
|
||||
filename: netbird
|
||||
update_cache: yes
|
||||
|
||||
- name: Install wireguard & netbird
|
||||
package:
|
||||
name:
|
||||
- wireguard
|
||||
- netbird
|
||||
state: latest
|
@ -1,16 +0,0 @@
|
||||
- name: Join netbird-network
|
||||
when: "netbird_client.join_network.setup_key is defined"
|
||||
command: "netbird up --management-url {{ netbird_client.join_network.management_url }} --setup-key {{ netbird_client.join_network.setup_key }}"
|
||||
failed_when: command.rc != 0
|
||||
changed_when: "'Connected' in command.stdout"
|
||||
register: command
|
||||
|
||||
- name: Wait for netbird-interface to exist
|
||||
wait_for:
|
||||
path: "/sys/class/net/wt0"
|
||||
state: present
|
||||
when: command.changed
|
||||
|
||||
- name: Gather facts to get changes
|
||||
ansible.builtin.gather_facts:
|
||||
when: command.changed
|
@ -1,4 +0,0 @@
|
||||
- import_tasks: ./install.yml
|
||||
|
||||
- import_tasks: ./join-network.yml
|
||||
|
@ -1,29 +0,0 @@
|
||||
nftables:
|
||||
# Rules to add
|
||||
# Handled as templates
|
||||
# Creates separate files for each entry.
|
||||
# The identifier is necessary for ansible to be able to merge the keys (when 'hash_behaviour = merge')
|
||||
# rule-ids have to be unique across files and raw
|
||||
rules:
|
||||
# Files with Rules to add
|
||||
files:
|
||||
#'<group_identifier>': '<relative-location>'
|
||||
#'<group_identifier>':
|
||||
# main: <relative-location>
|
||||
# '<identifier>': '<relative-location>'
|
||||
|
||||
# Rules to add
|
||||
raw:
|
||||
#'<group_identifier>': '<content>'
|
||||
#'<group_identifier>':
|
||||
# main: <content>
|
||||
# '<identifier>': '<content>'
|
||||
|
||||
# Decides if /etc/nftables.conf is applied or separate files which have changed
|
||||
# Separate changes require the files to be self-tyding to not end up with duplicate rules
|
||||
# e.g.
|
||||
# table ip mytable
|
||||
# flush table ip mytable
|
||||
# delete table ip mytable
|
||||
# table ip mytable {} ...
|
||||
apply_global: false
|
@ -1,8 +0,0 @@
|
||||
- name: Load group rules
|
||||
command: "nft -f /etc/nftables/ansible-managed/{{ item }}.nft"
|
||||
loop: "{{ combined_rules | list }}"
|
||||
when: not nftables.apply_global
|
||||
|
||||
- name: Load global rule file
|
||||
command: "nft -f /etc/nftables.nft"
|
||||
when: nftables.apply_global
|
@ -1,11 +0,0 @@
|
||||
- name: Deploying group files
|
||||
include_tasks: ./per-group-template-file.yml
|
||||
with_items:
|
||||
- "{{ nftables.rules.files | list }}"
|
||||
|
||||
- name: Deploying group raw-files
|
||||
include_tasks: ./per-group-template.yml
|
||||
with_items:
|
||||
- "{{ nftables.rules.raw | list }}"
|
||||
|
||||
- include_tasks: ./remove-files.yml
|
@ -1,51 +0,0 @@
|
||||
- set_fact:
|
||||
group_identifier: "{{ item }}"
|
||||
value: "{{ nftables.rules.files[item] }}"
|
||||
when: "item is defined"
|
||||
|
||||
#'<group_identifier>': '<relative-location>'
|
||||
- block:
|
||||
- name: Create main rule file
|
||||
template:
|
||||
src: "{{ value }}"
|
||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
||||
when: value is string
|
||||
|
||||
#'<group_identifier>':
|
||||
# main: <relative-location>
|
||||
# '<identifier>': '<relative-location>'
|
||||
- block:
|
||||
- set_fact:
|
||||
items: "{{ nftables.rules.files[item] }}"
|
||||
|
||||
- block:
|
||||
- name: Create main rule file
|
||||
template:
|
||||
src: "{{ items['main'] }}"
|
||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
||||
|
||||
- name: Include rule files
|
||||
lineinfile:
|
||||
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
||||
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
|
||||
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
|
||||
when: items['main'] is defined
|
||||
|
||||
- name: Create group folder
|
||||
file:
|
||||
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
|
||||
state: directory
|
||||
when: items|length > 0
|
||||
|
||||
- set_fact:
|
||||
test: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
|
||||
|
||||
- name: Create included rule files
|
||||
template:
|
||||
src: "{{ fileItem.value }}"
|
||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ fileItem.key }}.nft"
|
||||
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
|
||||
loop_control:
|
||||
loop_var: fileItem
|
||||
|
||||
when: value is mapping
|
@ -1,48 +0,0 @@
|
||||
- set_fact:
|
||||
group_identifier: "{{ item }}"
|
||||
value: "{{ nftables.rules.raw[item] }}"
|
||||
when: "item is defined"
|
||||
|
||||
#'<group_identifier>': '<content>'
|
||||
- block:
|
||||
- name: Create main rule file
|
||||
copy:
|
||||
content: "{{ value }}"
|
||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
||||
when: value is string
|
||||
|
||||
#'<group_identifier>':
|
||||
# main: <content>
|
||||
# '<identifier>': '<content>'
|
||||
- block:
|
||||
- set_fact:
|
||||
items: "{{ nftables.rules.raw[item] }}"
|
||||
|
||||
- block:
|
||||
- name: Create main rule file
|
||||
copy:
|
||||
content: "{{ items['main'] }}"
|
||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
||||
|
||||
- name: Include rule files
|
||||
lineinfile:
|
||||
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
||||
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
|
||||
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
|
||||
when: items['main'] is defined
|
||||
|
||||
- name: Create group folder
|
||||
file:
|
||||
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
|
||||
state: directory
|
||||
when: items|length > 0
|
||||
|
||||
- name: Create included rule files
|
||||
copy:
|
||||
content: "{{ included_item.value }}"
|
||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ included_item.key }}.nft"
|
||||
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
|
||||
loop_control:
|
||||
loop_var: included_item
|
||||
|
||||
when: value is mapping
|
@ -1,4 +0,0 @@
|
||||
- name: Install Packages
|
||||
package:
|
||||
name:
|
||||
- nftables
|
@ -1,7 +0,0 @@
|
||||
- import_tasks: ./prerequisites.yml
|
||||
|
||||
- import_tasks: ./setup-packages.yml
|
||||
|
||||
- import_tasks: ./deploy-rules/main.yml
|
||||
|
||||
- import_tasks: ./apply-files.yml
|
@ -1,13 +0,0 @@
|
||||
# Defaults if missing
|
||||
- name: Set defaults if missing
|
||||
set_fact:
|
||||
nftables:
|
||||
rules:
|
||||
files: "{{ nftables.rules.files | default({}) | combine({}) }}"
|
||||
raw: "{{ nftables.rules.raw | default({}) | combine({}) }}"
|
||||
combined_rules: "{{ nftables.rules.raw | combine(nftables.rules.files, recursive=true) }}"
|
||||
|
||||
#- name: Check items for consistency
|
||||
# assert:
|
||||
# that: "{{ nftables.rules.files.values() | length }} + {{ nftables.rules.raw.values() | length }} == {{ combined_rules.values() | length }}"
|
||||
# fail_msg: "files and raw rules share the same identifier"
|
@ -1,21 +0,0 @@
|
||||
- name: Handle removed group files
|
||||
block:
|
||||
- find:
|
||||
paths: /etc/nftables/ansible-managed/
|
||||
file_type: 'any'
|
||||
excludes: '{% for item in combined_rules %}{{ item }},{{ item }}.nft,{% endfor %}'
|
||||
depth: 1
|
||||
register: removeFiles
|
||||
|
||||
- file:
|
||||
path: "{{ fileItem.path }}"
|
||||
state: absent
|
||||
loop: "{{ removeFiles.files }}"
|
||||
loop_control:
|
||||
label: "{{ fileItem.path }}"
|
||||
loop_var: fileItem
|
||||
|
||||
- name: Handle removed included files per group
|
||||
include_tasks: ./remove-per-group.yml
|
||||
with_items:
|
||||
- "{{ combined_rules | list }}"
|
@ -1,20 +0,0 @@
|
||||
- set_fact:
|
||||
group_identifier: "{{ item }}"
|
||||
group_items: "{{ combined_rules[item] }}"
|
||||
|
||||
- block:
|
||||
- find:
|
||||
paths: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
|
||||
file_type: 'any'
|
||||
excludes: '{% for item in group_items %}{{ item }}.nft,{% endfor %}'
|
||||
register: removeFiles
|
||||
|
||||
- file:
|
||||
path: "{{ fileItem.path }}"
|
||||
state: absent
|
||||
loop: "{{ removeFiles.files }}"
|
||||
loop_control:
|
||||
label: "{{ fileItem.path }}"
|
||||
loop_var: fileItem
|
||||
|
||||
when: group_items is mapping
|
@ -1,15 +0,0 @@
|
||||
- name: Install nftables
|
||||
package:
|
||||
name:
|
||||
- nftables
|
||||
|
||||
- name: Create /etc/nftables/ansible-managed
|
||||
file:
|
||||
path: /etc/nftables/ansible-managed
|
||||
state: directory
|
||||
|
||||
- name: Include files in /etc/nftables/ansible-managed/ from /etc/nftables.conf
|
||||
blockinfile:
|
||||
path: /etc/nftables.conf
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK - nftables"
|
||||
content: 'include "/etc/nftables/ansible-managed/*.nft"'
|
@ -1,12 +0,0 @@
|
||||
wireguard_ipv6_converter:
|
||||
version: latest
|
||||
|
||||
# see https://github.com/Ruakij/wg-ipv6-converter#31-environment
|
||||
setup:
|
||||
interface: wg0
|
||||
#ipv6_format: fc12::%02x%02x:%02x%02x/%d
|
||||
#filter_prefix: 100.100
|
||||
#recheck_interval: 60s
|
||||
|
||||
service:
|
||||
#bindTo: netbird.service
|
@ -1,11 +0,0 @@
|
||||
- name: Get architecture
|
||||
set_fact:
|
||||
arch: "{{ 'amd64' if ansible_architecture == 'x86_64' else 'arm64' }}"
|
||||
versionUri: "{% if wireguard_ipv6_converter.version == 'latest' %}latest/download{% else %}download/{{ wireguard_ipv6_converter.version }}{% endif %}"
|
||||
|
||||
- name: Download binary
|
||||
get_url:
|
||||
url: https://github.com/Ruakij/wg-ipv6-converter/releases/{{ versionUri }}/wg-ipv6-converter_{{ arch }}
|
||||
dest: /usr/local/bin/wg-ipv6-converter
|
||||
mode: "744"
|
||||
register: deployDownload
|
@ -1,3 +0,0 @@
|
||||
- import_tasks: ./deploy.yml
|
||||
|
||||
- import_tasks: ./setup-service.yml
|
@ -1,27 +0,0 @@
|
||||
- name: Deploy service
|
||||
ansible.builtin.template:
|
||||
src: wg-ipv6-conv.service.jinja2
|
||||
dest: /etc/systemd/system/wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}.service
|
||||
register: serviceFile
|
||||
|
||||
- name: Enable service
|
||||
ansible.builtin.service:
|
||||
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
|
||||
daemon-reload: true
|
||||
enabled: true
|
||||
|
||||
- name: Start service if interface exists already
|
||||
ansible.builtin.service:
|
||||
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
|
||||
state: "{{ 'restarted' if deployDownload.changed or serviceFile.changed else 'started' }}"
|
||||
register: service
|
||||
when: "wireguard_ipv6_converter.setup.interface in ansible_interfaces"
|
||||
|
||||
- name: Pause for 5s to wait for program to have run
|
||||
ansible.builtin.pause:
|
||||
seconds: 5
|
||||
when: "service.changed"
|
||||
|
||||
- name: Gather facts to get changes
|
||||
ansible.builtin.gather_facts:
|
||||
when: "service.changed"
|
@ -1,29 +0,0 @@
|
||||
[Unit]
|
||||
Description=WireGuard IPv6 converter for {{ wireguard_ipv6_converter.setup.interface }}
|
||||
{% if wireguard_ipv6_converter.service.bindTo is defined %}
|
||||
BindsTo={{ wireguard_ipv6_converter.service.bindTo }}
|
||||
After={{ wireguard_ipv6_converter.service.bindTo }}
|
||||
{% endif %}
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
{% if wireguard_ipv6_converter.service.bindTo is defined %}
|
||||
ExecStartPre=/bin/sleep 10
|
||||
{% endif %}
|
||||
ExecStart=/usr/local/bin/wg-ipv6-converter
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
|
||||
Environment="INTERFACE={{ wireguard_ipv6_converter.setup.interface }}"
|
||||
{% if wireguard_ipv6_converter.setup.ipv6_format is defined %}
|
||||
Environment="IPV6_FORMAT={{ wireguard_ipv6_converter.setup.ipv6_format }}"
|
||||
{% endif %}
|
||||
{% if wireguard_ipv6_converter.setup.filter_prefix is defined %}
|
||||
Environment="FILTER_PREFIX={{ wireguard_ipv6_converter.setup.filter_prefix }}"
|
||||
{% endif %}
|
||||
{% if wireguard_ipv6_converter.setup.recheck_interval is defined %}
|
||||
Environment="RECHECK_INTERVAL={{ wireguard_ipv6_converter.setup.recheck_interval }}"
|
||||
{% endif %}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
Loading…
Reference in New Issue