Compare commits

...

50 Commits

Author SHA1 Message Date
Ruakij 27e9d4a1ab Merge branch 'role_nftables' 2 years ago
Ruakij 755f9b2e1a Initial role 2 years ago
Ruakij 753f456ef3 Merge branch 'role_kubernetes-k3s' 2 years ago
Ruakij 093612f3a7 Change restart-check to started/restarted check 2 years ago
Ruakij a92409c56f Add failed_when to deploy calico operator 2 years ago
Ruakij f50e3ac33c Use first node's IP for joining cluster 2 years ago
Ruakij 668ff23ee6 Fix service task wrong usage 2 years ago
Ruakij c2c6a2872f Fix conditional for changed after install 2 years ago
Ruakij c1c7ec9e56 Remove workaround as k3s is now at 1.26 2 years ago
Ruakij 550f6868ff Fix old usage of network_plugin var 2 years ago
Ruakij 3fe288f6a5 Merge branch 'role_wireguard-ipv6-converter' 2 years ago
Ruakij ab9220d042 Add pause & gather_facts at end when service was started 2 years ago
Ruakij 15ad7920d4 Merge branch 'role_wireguard-ipv6-converter' 2 years ago
Ruakij 7012e1ea2b Separate enable and re-/start service for fine-control 2 years ago
Ruakij 13ebd48c5d Add register for task download 2 years ago
Ruakij 1a76b94a46 Add download-version 2 years ago
Ruakij 1b765689e6 Make sure daemon is reloaded before service-start 2 years ago
Ruakij 5b607df2de Add checks if var is set 2 years ago
Ruakij 521b76453a Add start to naming 2 years ago
Ruakij 7f503d983a Fix download-location 2 years ago
Ruakij b143d9c848 Fix checks 2 years ago
Ruakij f95bcbc38d change defaults 2 years ago
Ruakij 248a3c08b8 Initial role-data 2 years ago
Ruakij f2c86dc22d Merge branch 'role_kubernetes-k3s' 2 years ago
Ruakij a79f2cac8a Merge branch 'role_netbird_client' 2 years ago
Ruakij c8f90f0f8d Update calico 2 years ago
Ruakij 41570ea40d Create new block for network-stuff 2 years ago
Ruakij a3c887748a Move network-helper to own file independend from calico 2 years ago
Ruakij d113625fa8 Fix env-value not being string 2 years ago
Ruakij dadd077723 Fix service and conditional 2 years ago
Ruakij 0d43d07ad4 Add extra-config option 2 years ago
Ruakij d6f8f975bb Reload when config changed, but install already done 2 years ago
Ruakij 7c86a5d77d Add register for config 2 years ago
Ruakij 8c4e3c2401 Update routingtabletowg and use new sync feature 2 years ago
Ruakij b46d35c8a5 Add labels 2 years ago
Ruakij 791ad96849 Add ipv6-check to calico deploy 2 years ago
Ruakij fc3d9845d6 Fix undeterministic node-selection but uses group 2 years ago
Ruakij 590b75ac23 Add quotes to token-usage for special chars 2 years ago
Ruakij 0c82504299 Separate getting name and ips to fix bug easily 2 years ago
Ruakij 2fee9a1747 Only enable ipv6 when available and activated 2 years ago
Ruakij fb44c39969 Add install of often-used packets 2 years ago
Ruakij 5452303992 Remove netmaker from dependency 2 years ago
Ruakij 4321d78cf8 Add comments to variables 2 years ago
Ruakij f9a859e95c Add ingress-option 2 years ago
Ruakij fd302e4ebc Move regather facts to join when changed 2 years ago
Ruakij b5729caa0e Add wait for interface to come up 2 years ago
Ruakij dca40ed835 Remove throttle 2 years ago
Ruakij 95ddd04a86 Fix join command 2 years ago
Ruakij 911bc47acb Initial role stuff 2 years ago
Ruakij e5920b3ddf Add network-plugin option 2 years ago

@ -2,16 +2,40 @@
kubernetes: kubernetes:
ipPool: ipPool:
ipv4: ipv4:
# Minimum: /24
cluster_cidr: 10.42.0.0/16 cluster_cidr: 10.42.0.0/16
service_cidr: 10.43.0.0/16 service_cidr: 10.43.0.0/16
ipv6: ipv6:
# Minimum: /120
cluster_cidr: fd42::/56 cluster_cidr: fd42::/56
service_cidr: fd43::/112 service_cidr: fd43::/112
# Replace - with _ # Interface to grab node-IPv4/v6 from
nodeIp_interface: <interface to grab nodeIp from> nodeIp_interface: <interface to grab nodeIp from>
control_plane: control_plane:
dns_name: <control-plane dns-reachable-name> dns_name: <control-plane dns-reachable-name>
token: <shared token for nodes to join> token: <shared token for nodes to join>
network:
# One of [flannel, calico]
plugin: calico
# Helper for networking
helper:
# https://github.com/Ruakij/RoutingTableToWg
# Translates received-routes from e.g. BGP to wireguard-allowedips
# Helpful, when nodeIp_interface is a wireguard-interface
routingtabletowg: false
# One of [traefik-ingress]
ingress_controller: traefik-ingress
config_extra:
# etcd-tuning
# heartbeat: 0.5-1.5x of rtt
# election: 10x- of heartbeat
etcd-arg:
heartbeat-interval: 500
election-timeout: 5000

@ -1,4 +1,3 @@
--- ---
dependencies: dependencies:
- role: docker - role: docker
- role: netmaker

@ -8,6 +8,7 @@
ansible.builtin.template: ansible.builtin.template:
src: k3s/{{ type }}/config.yaml.jinja2 src: k3s/{{ type }}/config.yaml.jinja2
dest: /etc/rancher/k3s/config.yaml dest: /etc/rancher/k3s/config.yaml
register: config
- name: Download install-script - name: Download install-script
get_url: get_url:

@ -1,7 +1,12 @@
- name: Install K3s agent - name: Install K3s agent
command: /root/k3s_install.sh {{ type }} command: /root/k3s_install.sh {{ type }}
register: command register: command
changed_when: "'No change detected' in command.stdout" changed_when: "'No change detected' not in command.stdout"
until: "command is not failed" until: "command is not failed"
retries: 2 retries: 2
delay: 10 delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s-agent
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"

@ -2,7 +2,13 @@
command: /root/k3s_install.sh {{ type }} command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname == groups['kubernetes'][0]" when: "inventory_hostname == groups['kubernetes'][0]"
register: command register: command
changed_when: "'No change detected' in command.stdout" changed_when: "'No change detected' not in command.stdout"
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections - name: Waiting for K3s-server to accept connections
ansible.builtin.wait_for: ansible.builtin.wait_for:
@ -15,11 +21,17 @@
command: /root/k3s_install.sh {{ type }} command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname != groups['kubernetes'][0]" when: "inventory_hostname != groups['kubernetes'][0]"
register: command register: command
changed_when: "'No change detected' in command.stdout" changed_when: "'No change detected' not in command.stdout"
until: "command is not failed" until: "command is not failed"
retries: 2 retries: 2
delay: 10 delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname != groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections on other nodes - name: Waiting for K3s-server to accept connections on other nodes
ansible.builtin.wait_for: ansible.builtin.wait_for:
host: "{{ inventory_hostname }}" host: "{{ inventory_hostname }}"
@ -34,3 +46,10 @@
# block: | # block: |
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml" # export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
# create: true # create: true
- name: Deploy calico
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_calico.yml
when: "kubernetes.network.plugin == 'calico'"
- name: Deploy network-helpers
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_network_helper.yml

@ -0,0 +1,19 @@
- name: Deploy calico operator
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
register: command
changed_when: "'created' in command.stdout"
run_once: true
failed_when:
- "command.rc == 1 and 'AlreadyExists' not in command.stderr"
- name: Deploy calico ressource template
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
dest: /root/calico-ressource.yml
run_once: true
- name: Deploy calico ressource
command: kubectl apply -f /root/calico-ressource.yml
register: command
changed_when: "'created' in command.stdout"
run_once: true

@ -0,0 +1,7 @@
- name: Deploy service-file for routing-table to wireguard-translation
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
mode: u=rw,g=r,o=r
run_once: true
when: "kubernetes.network.helper.routingtabletowg"

@ -17,19 +17,26 @@
#- name: Disable swap #- name: Disable swap
# command: swapoff -a # command: swapoff -a
#- name: Install iptables - name: Install required packages
# package: package:
# name: name:
# #- containerd #- containerd
# - iptables #- iptables
# state: latest # For Longhorn:
- nfs-common
- open-iscsi
state: latest
- import_tasks: ./prerequisites/containerd.yml - import_tasks: ./prerequisites/containerd.yml
- name: Gather interface-name
set_fact:
interface: "{{ kubernetes.ipPool.nodeIp_interface | replace('-', '_') }}"
- name: Getting nodeIp-data from interface - name: Getting nodeIp-data from interface
set_fact: set_fact:
nodeip_ipv4: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv4.address }}" nodeip_ipv4: "{{ ansible_facts[ interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv6[0].address }}" nodeip_ipv6: "{{ ansible_facts[ interface ].ipv6[0].address if ansible_facts[ interface ].ipv6 is defined }}"
- name: Run handlers to reload configurations - name: Run handlers to reload configurations
meta: flush_handlers meta: flush_handlers

@ -1,7 +1,18 @@
server: https://{{ kubernetes.control_plane.dns_name }}:6443 server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
token: {{ kubernetes.token }} token: '{{ kubernetes.token }}'
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }} node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
{% endif %}
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses ## Label
kubelet-arg: "--node-ip=0.0.0.0" # Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}

@ -1,23 +1,49 @@
## Base ## ## Base ##
{% if inventory_hostname == groups['kubernetes'][0] %} {% if inventory_hostname == groups['kubernetes'][0] %}
# Initialize with internal etcd
cluster-init: true cluster-init: true
{% else %} {% else %}
server: https://{{ groups['kubernetes'][0] }}:6443 server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
{% endif %} {% endif %}
token: {{ kubernetes.token }} token: '{{ kubernetes.token }}'
tls-san: tls-san:
- {{ kubernetes.control_plane.dns_name }} - {{ kubernetes.control_plane.dns_name }}
# Networking # Networking
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }} node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }} cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }} service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
{% endif %}
egress-selector-mode: disabled egress-selector-mode: disabled
# Network-plugin # Network-plugin
{% if kubernetes.network.plugin == "flannel" %}
flannel-backend: vxlan flannel-backend: vxlan
{% else %}
disable-network-policy: true
flannel-backend: none
{% endif %}
# Ingress-plugin
{% if kubernetes.ingress_controller != "traefik-ingress" %}
disable: traefik
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses {{ kubernetes.config_extra | to_yaml }}
kubelet-arg: "--node-ip=0.0.0.0"

@ -0,0 +1,34 @@
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
- blockSize: 122
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% endif %}
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

@ -0,0 +1,45 @@
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: routingtabletowg
namespace: calico-system
labels:
app: routingtabletowg
spec:
selector:
matchLabels:
app: routingtabletowg
template:
metadata:
labels:
app: routingtabletowg
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
hostNetwork: true
containers:
- name: routingtabletowg
image: "ruakij/routingtabletowg:0.2.0"
env:
- name: INTERFACE
value: {{ kubernetes.ipPool.nodeIp_interface }}
- name: FILTER_PROTOCOL
value: bird
- name: PERIODIC_SYNC
value: '300'
securityContext:
capabilities:
add:
- NET_ADMIN
resources:
requests:
cpu: 10m
memory: 10Mi
limits:
cpu: 20m
memory: 20Mi
---

@ -0,0 +1,6 @@
netbird_client:
# Key and url to join a network
# leave empty to ignore
join_network:
setup_key:
management_url:

@ -0,0 +1,26 @@
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- ca-certificates
- curl
- gnupg
- name: Add netbird-key
apt_key:
url: https://pkgs.wiretrustee.com/debian/public.key
state: present
- name: Add netbird-repository
apt_repository:
repo: "deb https://pkgs.wiretrustee.com/debian stable main"
state: present
filename: netbird
update_cache: yes
- name: Install wireguard & netbird
package:
name:
- wireguard
- netbird
state: latest

@ -0,0 +1,16 @@
- name: Join netbird-network
when: "netbird_client.join_network.setup_key is defined"
command: "netbird up --management-url {{ netbird_client.join_network.management_url }} --setup-key {{ netbird_client.join_network.setup_key }}"
failed_when: command.rc != 0
changed_when: "'Connected' in command.stdout"
register: command
- name: Wait for netbird-interface to exist
wait_for:
path: "/sys/class/net/wt0"
state: present
when: command.changed
- name: Gather facts to get changes
ansible.builtin.gather_facts:
when: command.changed

@ -0,0 +1,4 @@
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml

@ -0,0 +1,29 @@
nftables:
# Rules to add
# Handled as templates
# Creates separate files for each entry.
# The identifier is necessary for ansible to be able to merge the keys (when 'hash_behaviour = merge')
# rule-ids have to be unique across files and raw
rules:
# Files with Rules to add
files:
#'<group_identifier>': '<relative-location>'
#'<group_identifier>':
# main: <relative-location>
# '<identifier>': '<relative-location>'
# Rules to add
raw:
#'<group_identifier>': '<content>'
#'<group_identifier>':
# main: <content>
# '<identifier>': '<content>'
# Decides if /etc/nftables.conf is applied or separate files which have changed
# Separate changes require the files to be self-tyding to not end up with duplicate rules
# e.g.
# table ip mytable
# flush table ip mytable
# delete table ip mytable
# table ip mytable {} ...
apply_global: false

@ -0,0 +1,8 @@
- name: Load group rules
command: "nft -f /etc/nftables/ansible-managed/{{ item }}.nft"
loop: "{{ combined_rules | list }}"
when: not nftables.apply_global
- name: Load global rule file
command: "nft -f /etc/nftables.nft"
when: nftables.apply_global

@ -0,0 +1,11 @@
- name: Deploying group files
include_tasks: ./per-group-template-file.yml
with_items:
- "{{ nftables.rules.files | list }}"
- name: Deploying group raw-files
include_tasks: ./per-group-template.yml
with_items:
- "{{ nftables.rules.raw | list }}"
- include_tasks: ./remove-files.yml

@ -0,0 +1,51 @@
- set_fact:
group_identifier: "{{ item }}"
value: "{{ nftables.rules.files[item] }}"
when: "item is defined"
#'<group_identifier>': '<relative-location>'
- block:
- name: Create main rule file
template:
src: "{{ value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
when: value is string
#'<group_identifier>':
# main: <relative-location>
# '<identifier>': '<relative-location>'
- block:
- set_fact:
items: "{{ nftables.rules.files[item] }}"
- block:
- name: Create main rule file
template:
src: "{{ items['main'] }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
- name: Include rule files
lineinfile:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
when: items['main'] is defined
- name: Create group folder
file:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
state: directory
when: items|length > 0
- set_fact:
test: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
- name: Create included rule files
template:
src: "{{ fileItem.value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ fileItem.key }}.nft"
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
loop_control:
loop_var: fileItem
when: value is mapping

@ -0,0 +1,48 @@
- set_fact:
group_identifier: "{{ item }}"
value: "{{ nftables.rules.raw[item] }}"
when: "item is defined"
#'<group_identifier>': '<content>'
- block:
- name: Create main rule file
copy:
content: "{{ value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
when: value is string
#'<group_identifier>':
# main: <content>
# '<identifier>': '<content>'
- block:
- set_fact:
items: "{{ nftables.rules.raw[item] }}"
- block:
- name: Create main rule file
copy:
content: "{{ items['main'] }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
- name: Include rule files
lineinfile:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
when: items['main'] is defined
- name: Create group folder
file:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
state: directory
when: items|length > 0
- name: Create included rule files
copy:
content: "{{ included_item.value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ included_item.key }}.nft"
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
loop_control:
loop_var: included_item
when: value is mapping

@ -0,0 +1,4 @@
- name: Install Packages
package:
name:
- nftables

@ -0,0 +1,7 @@
- import_tasks: ./prerequisites.yml
- import_tasks: ./setup-packages.yml
- import_tasks: ./deploy-rules/main.yml
- import_tasks: ./apply-files.yml

@ -0,0 +1,13 @@
# Defaults if missing
- name: Set defaults if missing
set_fact:
nftables:
rules:
files: "{{ nftables.rules.files | default({}) | combine({}) }}"
raw: "{{ nftables.rules.raw | default({}) | combine({}) }}"
combined_rules: "{{ nftables.rules.raw | combine(nftables.rules.files, recursive=true) }}"
#- name: Check items for consistency
# assert:
# that: "{{ nftables.rules.files.values() | length }} + {{ nftables.rules.raw.values() | length }} == {{ combined_rules.values() | length }}"
# fail_msg: "files and raw rules share the same identifier"

@ -0,0 +1,21 @@
- name: Handle removed group files
block:
- find:
paths: /etc/nftables/ansible-managed/
file_type: 'any'
excludes: '{% for item in combined_rules %}{{ item }},{{ item }}.nft,{% endfor %}'
depth: 1
register: removeFiles
- file:
path: "{{ fileItem.path }}"
state: absent
loop: "{{ removeFiles.files }}"
loop_control:
label: "{{ fileItem.path }}"
loop_var: fileItem
- name: Handle removed included files per group
include_tasks: ./remove-per-group.yml
with_items:
- "{{ combined_rules | list }}"

@ -0,0 +1,20 @@
- set_fact:
group_identifier: "{{ item }}"
group_items: "{{ combined_rules[item] }}"
- block:
- find:
paths: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
file_type: 'any'
excludes: '{% for item in group_items %}{{ item }}.nft,{% endfor %}'
register: removeFiles
- file:
path: "{{ fileItem.path }}"
state: absent
loop: "{{ removeFiles.files }}"
loop_control:
label: "{{ fileItem.path }}"
loop_var: fileItem
when: group_items is mapping

@ -0,0 +1,15 @@
- name: Install nftables
package:
name:
- nftables
- name: Create /etc/nftables/ansible-managed
file:
path: /etc/nftables/ansible-managed
state: directory
- name: Include files in /etc/nftables/ansible-managed/ from /etc/nftables.conf
blockinfile:
path: /etc/nftables.conf
marker: "# {mark} ANSIBLE MANAGED BLOCK - nftables"
content: 'include "/etc/nftables/ansible-managed/*.nft"'

@ -0,0 +1,12 @@
wireguard_ipv6_converter:
version: latest
# see https://github.com/Ruakij/wg-ipv6-converter#31-environment
setup:
interface: wg0
#ipv6_format: fc12::%02x%02x:%02x%02x/%d
#filter_prefix: 100.100
#recheck_interval: 60s
service:
#bindTo: netbird.service

@ -0,0 +1,11 @@
- name: Get architecture
set_fact:
arch: "{{ 'amd64' if ansible_architecture == 'x86_64' else 'arm64' }}"
versionUri: "{% if wireguard_ipv6_converter.version == 'latest' %}latest/download{% else %}download/{{ wireguard_ipv6_converter.version }}{% endif %}"
- name: Download binary
get_url:
url: https://github.com/Ruakij/wg-ipv6-converter/releases/{{ versionUri }}/wg-ipv6-converter_{{ arch }}
dest: /usr/local/bin/wg-ipv6-converter
mode: "744"
register: deployDownload

@ -0,0 +1,3 @@
- import_tasks: ./deploy.yml
- import_tasks: ./setup-service.yml

@ -0,0 +1,27 @@
- name: Deploy service
ansible.builtin.template:
src: wg-ipv6-conv.service.jinja2
dest: /etc/systemd/system/wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}.service
register: serviceFile
- name: Enable service
ansible.builtin.service:
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
daemon-reload: true
enabled: true
- name: Start service if interface exists already
ansible.builtin.service:
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
state: "{{ 'restarted' if deployDownload.changed or serviceFile.changed else 'started' }}"
register: service
when: "wireguard_ipv6_converter.setup.interface in ansible_interfaces"
- name: Pause for 5s to wait for program to have run
ansible.builtin.pause:
seconds: 5
when: "service.changed"
- name: Gather facts to get changes
ansible.builtin.gather_facts:
when: "service.changed"

@ -0,0 +1,29 @@
[Unit]
Description=WireGuard IPv6 converter for {{ wireguard_ipv6_converter.setup.interface }}
{% if wireguard_ipv6_converter.service.bindTo is defined %}
BindsTo={{ wireguard_ipv6_converter.service.bindTo }}
After={{ wireguard_ipv6_converter.service.bindTo }}
{% endif %}
[Service]
Type=simple
{% if wireguard_ipv6_converter.service.bindTo is defined %}
ExecStartPre=/bin/sleep 10
{% endif %}
ExecStart=/usr/local/bin/wg-ipv6-converter
Restart=always
RestartSec=30
Environment="INTERFACE={{ wireguard_ipv6_converter.setup.interface }}"
{% if wireguard_ipv6_converter.setup.ipv6_format is defined %}
Environment="IPV6_FORMAT={{ wireguard_ipv6_converter.setup.ipv6_format }}"
{% endif %}
{% if wireguard_ipv6_converter.setup.filter_prefix is defined %}
Environment="FILTER_PREFIX={{ wireguard_ipv6_converter.setup.filter_prefix }}"
{% endif %}
{% if wireguard_ipv6_converter.setup.recheck_interval is defined %}
Environment="RECHECK_INTERVAL={{ wireguard_ipv6_converter.setup.recheck_interval }}"
{% endif %}
[Install]
WantedBy=multi-user.target
Loading…
Cancel
Save