Compare commits

..

86 Commits

Author SHA1 Message Date
Ruakij 27e9d4a1ab Merge branch 'role_nftables' 2 years ago
Ruakij 755f9b2e1a Initial role 2 years ago
Ruakij 753f456ef3 Merge branch 'role_kubernetes-k3s' 2 years ago
Ruakij 093612f3a7 Change restart-check to started/restarted check 2 years ago
Ruakij a92409c56f Add failed_when to deploy calico operator 2 years ago
Ruakij f50e3ac33c Use first node's IP for joining cluster 2 years ago
Ruakij 668ff23ee6 Fix service task wrong usage 2 years ago
Ruakij c2c6a2872f Fix conditional for changed after install 2 years ago
Ruakij c1c7ec9e56 Remove workaround as k3s is now at 1.26 2 years ago
Ruakij 550f6868ff Fix old usage of network_plugin var 2 years ago
Ruakij 3fe288f6a5 Merge branch 'role_wireguard-ipv6-converter' 2 years ago
Ruakij ab9220d042 Add pause & gather_facts at end when service was started 2 years ago
Ruakij 15ad7920d4 Merge branch 'role_wireguard-ipv6-converter' 2 years ago
Ruakij 7012e1ea2b Separate enable and re-/start service for fine-control 2 years ago
Ruakij 13ebd48c5d Add register for task download 2 years ago
Ruakij 1a76b94a46 Add download-version 2 years ago
Ruakij 1b765689e6 Make sure daemon is reloaded before service-start 2 years ago
Ruakij 5b607df2de Add checks if var is set 2 years ago
Ruakij 521b76453a Add start to naming 2 years ago
Ruakij 7f503d983a Fix download-location 2 years ago
Ruakij b143d9c848 Fix checks 2 years ago
Ruakij f95bcbc38d change defaults 2 years ago
Ruakij 248a3c08b8 Initial role-data 2 years ago
Ruakij f2c86dc22d Merge branch 'role_kubernetes-k3s' 2 years ago
Ruakij a79f2cac8a Merge branch 'role_netbird_client' 2 years ago
Ruakij c8f90f0f8d Update calico 2 years ago
Ruakij 41570ea40d Create new block for network-stuff 2 years ago
Ruakij a3c887748a Move network-helper to own file independend from calico 2 years ago
Ruakij d113625fa8 Fix env-value not being string 2 years ago
Ruakij dadd077723 Fix service and conditional 2 years ago
Ruakij 0d43d07ad4 Add extra-config option 2 years ago
Ruakij d6f8f975bb Reload when config changed, but install already done 2 years ago
Ruakij 7c86a5d77d Add register for config 2 years ago
Ruakij 8c4e3c2401 Update routingtabletowg and use new sync feature 2 years ago
Ruakij b46d35c8a5 Add labels 2 years ago
Ruakij 791ad96849 Add ipv6-check to calico deploy 2 years ago
Ruakij fc3d9845d6 Fix undeterministic node-selection but uses group 2 years ago
Ruakij 590b75ac23 Add quotes to token-usage for special chars 2 years ago
Ruakij 0c82504299 Separate getting name and ips to fix bug easily 2 years ago
Ruakij 2fee9a1747 Only enable ipv6 when available and activated 2 years ago
Ruakij fb44c39969 Add install of often-used packets 2 years ago
Ruakij 5452303992 Remove netmaker from dependency 2 years ago
Ruakij 4321d78cf8 Add comments to variables 2 years ago
Ruakij f9a859e95c Add ingress-option 2 years ago
Ruakij fd302e4ebc Move regather facts to join when changed 2 years ago
Ruakij b5729caa0e Add wait for interface to come up 2 years ago
Ruakij dca40ed835 Remove throttle 2 years ago
Ruakij 95ddd04a86 Fix join command 2 years ago
Ruakij 911bc47acb Initial role stuff 2 years ago
Ruakij e5920b3ddf Add network-plugin option 2 years ago
Ruakij f3e381aca3 Move netmaker to deprecated 2 years ago
Ruakij 233eadaf40 Merge branch 'role_netmaker_server' 2 years ago
Ruakij 109a09052d Update to 0.17.1
This also sets up tls-termination for mosquitto
2 years ago
Ruakij 4ea9492ca3 Change hos group-name 2 years ago
Ruakij e5ebc2ad5f Merge branch 'role_common' 2 years ago
Ruakij 98c51c6fc1 Merge branch 'role_kubernetes-k3s' 2 years ago
Ruakij 6b59bf6c75 Merge branch 'role_netmaker' 2 years ago
Ruakij 1b2af7cf6c Merge branch 'role_netmaker_server' 2 years ago
Ruakij 0fc5dbb791 Initial role-data 2 years ago
Ruakij 975746e7d7 Add IPv6-network to common 2 years ago
Ruakij a27ca2c37a Create dedicated docs-folder and move files there 2 years ago
Ruakij 9cb2e88193 Merge branch 'role_netmaker' 2 years ago
Ruakij 25ceb0f456 Merge branch 'role_netmaker_server' 2 years ago
Ruakij cdd4c9babb Remove CA in args rqlite will use the system trust-store then 2 years ago
Ruakij d553f604a9 Add own certs to mosquitto 2 years ago
Ruakij 806b41b73e Fix proxy-protocol being expected 2 years ago
Ruakij ec98188a24 Fix variable name 2 years ago
Ruakij 06bdae380b Revert proxy-protocol-matching 2 years ago
Ruakij 83b50c10cd Use new variables and fix requests 2 years ago
Ruakij 3890007042 Use more specific hostnames in cert 2 years ago
Ruakij bb3d363094 Created nginx-config-file-templates 2 years ago
Ruakij 7453f1e616 Move variables to defaults-folder 2 years ago
Ruakij e022a6e9f0 Restructure to make better looking 2 years ago
Ruakij 772dc3a620 Move TLS-point outside of netmaker-system 2 years ago
Ruakij 6d5c86927d Make diagram more readable 2 years ago
Ruakij c94168fb30 Comment-in connection-check todo: change check to http 2 years ago
Ruakij 6168ba2b0a Add missing dependency 2 years ago
Ruakij e4a2c5dd2f Remove ports and add/change advertised adresses and ports 2 years ago
Ruakij 315f5a1805 Fix private_ip checking 2 years ago
Ruakij d2d8ebd8cc Add missing nginx-file 2 years ago
Ruakij dd87d5e724 Move cert-generation outside 2 years ago
Ruakij 86e6317e28 Fix naming 2 years ago
Ruakij 8fddfc532f Add nginx as service 2 years ago
Ruakij f733543ae1 Fix architecture-diagram 2 years ago
Ruakij 526cf66bd7 Add chart for architecture 2 years ago
Ruakij b593a2874a Add role netmaker_server 2 years ago

@ -0,0 +1,3 @@
---
dependencies: []
#- role: docker

@ -0,0 +1,5 @@
netmaker_creds:
rqlite_password:
mq_admin_password:
master_key:

@ -0,0 +1,28 @@
# Overwrite for specific nodes to force dynamic-ip (disable setting public-ip and forces external lookup for public-ip)
# When false, will check itself for dynamic-ip (based on private-ip)
netmaker_dynamicIp: false
netmaker_nginx:
# Listen-port
tls_port: 51820
# Advertise-Port for services
# (must also be reachable by internal services!)
advertise_port: 51820
# This is the base-domain used for generating hostnames for services
netmaker_base_domain:
# host + base_domain
netmaker_api:
host: netmaker-api
netmaker_ui:
host: netmaker-ui
# MQTT-broker
netmaker_broker:
tls_host: netmaker-broker
# host + node_hostname
netmaker_rqlite:
http_host: netmaker-rqlite-http
cluster_host: netmaker-rqlite-cluster

@ -0,0 +1,37 @@
@startuml
interface ng_TLS
component netmaker_server {
component nginx {
component ng_stream
component ng_http
ng_stream -up- ng_TLS
ng_stream -right-> ng_http : tls-termination
}
component nm_ui
nm_ui -up- nm_ui_http
ng_http -down-( nm_ui_http
component Mosquitto
Mosquitto -up- mq_plain
Mosquitto -up- mq_tls
ng_stream -down-( mq_tls
component rqlite
rqlite -up- rq_http
rqlite -up- rq_cluster
ng_stream -down-( rq_cluster
ng_http -down-( rq_http
component nm_api
nm_api -down- nm_api_http
ng_http --( nm_api_http
nm_api .up.( ng_TLS : db-connection to rqlite-master
nm_api --( mq_plain
}
@enduml

@ -0,0 +1,12 @@
per_listener_settings false
listener 8883
protocol websockets
allow_anonymous false
listener 1883
protocol websockets
allow_anonymous false
plugin /usr/lib/mosquitto_dynamic_security.so
plugin_opt_config_file /mosquitto/data/dynamic-security.json

@ -0,0 +1,23 @@
#!/bin/ash
wait_for_netmaker() {
echo "SERVER: ${NETMAKER_SERVER_HOST}"
until curl --output /dev/null --silent --fail --head \
--location "${NETMAKER_SERVER_HOST}/api/server/health"; do
echo "Waiting for netmaker server to startup"
sleep 1
done
}
main(){
# wait for netmaker to startup
apk add curl
wait_for_netmaker
echo "Starting MQ..."
# Run the main container command.
/docker-entrypoint.sh
/usr/sbin/mosquitto -c /mosquitto/config/mosquitto.conf
}
main "${@}"

@ -0,0 +1,33 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
include /etc/nginx/stream.d/*.conf;

@ -0,0 +1,3 @@
---
# dependencies:
# - role: docker

@ -0,0 +1,40 @@
- name: Generate PrivateKey
community.crypto.openssl_privatekey:
path: /opt/netmaker_server/certs/node.key
owner: 1883 # Set owner to mosquitto-user (all other containers seem to run as root)
- name: Generate Certificate-Signing-Request from privateKey
community.crypto.openssl_csr:
path: /opt/netmaker_server/certs/node.csr
privatekey_path: /opt/netmaker_server/certs/node.key
common_name: "{{ ansible_facts.nodename }}"
subject_alt_name:
"DNS:{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }},\
DNS:{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }},\
DNS:{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }},\
DNS:{{ netmaker_api.host }}.{{ netmaker_base_domain }},\
DNS:{{ netmaker_ui.host }}.{{ netmaker_base_domain }}"
- name: Fetch CSR
ansible.builtin.fetch:
src: /opt/netmaker_server/certs/node.csr
dest: tmp_files/
- name: Sign CSR locally with CA
local_action: community.crypto.x509_certificate
args:
path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
csr_path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.csr
ownca_path: secret_files/netmaker_server/ca/ca.crt
ownca_privatekey_path: secret_files/netmaker_server/ca/ca.key
provider: ownca
- name: Copy Signed Certificate
ansible.builtin.copy:
src: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
dest: /opt/netmaker_server/certs/node.crt
- name: Copy CA Certificate
ansible.builtin.copy:
src: secret_files/netmaker_server/ca/ca.crt
dest: /opt/netmaker_server/certs/ca.crt

@ -0,0 +1,20 @@
- import_tasks: ./prerequisites.yml
- name: Copy folder-structure
ansible.builtin.copy:
src: opt/netmaker_server
dest: /opt/
mode: preserve
- name: Deploy compose file
ansible.builtin.template:
src: docker-compose.yml.template
dest: /opt/netmaker_server/docker-compose.yml
- import_tasks: ./certs.yml
- import_tasks: ./nginx.yml
- import_tasks: ./rqlite.yml
- import_tasks: ./netmaker.yml

@ -0,0 +1,57 @@
- name: Start rest of netmaker-services
command: "docker-compose --project-directory /opt/netmaker_server/ up -d"
register: command
failed_when: command.rc != 0
- name: Wait for netmaker-api to become available
uri:
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}'
return_content: yes
validate_certs: no
status_code:
- 404
until: uri_output.status == 404
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
# todo: check if exists?
- name: Create default mesh-network 'server'
uri:
validate_certs: no
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks'
method: POST
body:
netid: servnet
addressrange: 10.92.0.0/24
addressrange6: fd92::/64
body_format: json
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker_server'][0]"
register: default_mesh
until: "default_mesh is not failed"
retries: 2
delay: 10
# todo: check if exists?
- name: Create token for default-network
uri:
validate_certs: no
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks/servnet/keys' # todo: do implementation
method: POST
body:
name: ""
uses: 0
body_format: json
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker_server'][0]"
register: default_mesh_key
until: "default_mesh_key is not failed"
retries: 2
delay: 10

@ -0,0 +1,18 @@
- name: Deploy nginx configs
template:
src: "{{item.src}}"
dest: "{{item.dst}}"
loop:
- { src: 'nginx/proxy.conf.template', dst: '/opt/netmaker_server/nginx/conf/conf.d/proxy.conf' }
- { src: 'nginx/passthrough.conf.template', dst: '/opt/netmaker_server/nginx/conf/stream.d/passthrough.conf' }
- name: Start nginx service
command: "docker-compose --project-directory /opt/netmaker_server/ up -d nginx"
register: command
failed_when: command.rc != 0
- name: Waiting for nginx to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 51820
state: started

@ -0,0 +1,9 @@
- name: Install wireguard
package:
name:
- wireguard
state: latest
- name: Check if default-ipv4-address is private
set_fact:
private_ipv4_address: "{{ ansible_facts.default_ipv4.address | regex_search('^((10)|(192\\.168)|(172\\.((1[6-9])|(2[0-9])|(3[0-1])))|(100))\\.') }}"

@ -0,0 +1,42 @@
- name: Deploy rqlite config
ansible.builtin.template:
src: rqlite-config.json.template
dest: /opt/netmaker_server/rqlite/config.json
- name: Start rqlite service for 1st-node
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
register: command
failed_when: command.rc != 0
when: "inventory_hostname == groups['netmaker_server'][0]"
- name: Waiting for rqlite to accept connections on 1st-node
uri:
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
return_content: yes
validate_certs: no
status_code:
- 401
until: uri_output.status == 401
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
when: "inventory_hostname == groups['netmaker_server'][0]"
- name: Start rqlite service for other nodes
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
register: command
failed_when: command.rc != 0
when: "inventory_hostname != groups['netmaker_server'][0]"
- name: Waiting for rqlite to accept connections on other nodes
uri:
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
return_content: yes
validate_certs: no
status_code:
- 401
until: uri_output.status == 401
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
when: "inventory_hostname != groups['netmaker_server'][0]"

@ -0,0 +1,127 @@
version: "3.4"
services:
nginx:
image: nginx
restart: unless-stopped
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro # Override nginx-config to add stream-import
- ./nginx/conf/conf.d/:/etc/nginx/conf.d:ro # conf.d
- ./nginx/conf/stream.d/:/etc/nginx/stream.d:ro # conf.d
- ./certs:/certs:ro # SSL-certificates
ports:
- {{ netmaker_nginx.tls_port }}:443
rqlite: # Distributed sqlite-db
image: rqlite/rqlite
restart: unless-stopped
hostname: "{{ ansible_facts.nodename }}"
volumes:
- "./rqlite/data:/rqlite/file"
- "./rqlite/config.json:/config.json:ro"
- "./certs:/certs:ro"
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
command: "
-http-adv-addr {{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
-raft-addr [::]:4002
-raft-adv-addr {{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
-node-encrypt
-node-cert /certs/node.crt
-node-key /certs/node.key
-node-no-verify
-auth /config.json
{% if inventory_hostname != groups['netmaker_server'][0] %}
-join-as netmaker
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker_server'][0] }}:{{ netmaker_nginx.advertise_port }}
{% endif %}
"
# FIXME: /\ \/ Change http -> https
netmaker: # The Primary Server for running Netmaker
image: gravitl/netmaker:v0.17.1
depends_on:
- rqlite
cap_add:
- NET_ADMIN
- NET_RAW
- SYS_MODULE
sysctls:
- net.ipv4.ip_forward=1
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.all.forwarding=1
restart: unless-stopped
volumes: # Volume mounts necessary for sql, coredns, and mqtt
- ./dnsconfig/:/root/config/dnsconfig
- ./mosquitto/data/:/etc/netmaker/
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
hostname: "{{ ansible_facts.nodename }}"
environment: # Necessary capabilities to set iptables when running in container
NODE_ID: "{{ ansible_facts.nodename }}"
MASTER_KEY: "{{ netmaker_creds.master_key }}" # The admin master key for accessing the API. Change this in any production installation.
{% if not private_ipv4_address and not netmaker_dynamicIp %}
SERVER_HOST: "{{ ansible_facts.default_ipv4.address }}" # Set to public IP of machine.
{% endif %}
SERVER_NAME: "{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }}" # The domain/host IP indicating the mq broker address
SERVER_HTTP_HOST: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}" # Overrides SERVER_HOST if set. Useful for making HTTP available via different interfaces/networks.
SERVER_API_CONN_STRING: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}"
DISABLE_REMOTE_IP_CHECK: "off" # If turned "on", Server will not set Host based on remote IP check. This is already overridden if SERVER_HOST is set. Turned "off" by default.
DNS_MODE: "off" # Enables DNS Mode, meaning all nodes will set hosts file for private dns settings.
API_PORT: "8081" # The HTTP API port for Netmaker. Used for API calls / communication from front end. If changed, need to change port of BACKEND_URL for netmaker-ui.
REST_BACKEND: "on" # Enables the REST backend (API running on API_PORT at SERVER_HTTP_HOST). Change to "off" to turn off.
RCE: "off" # Enables setting PostUp and PostDown (arbitrary commands) on nodes from the server. Off by default.
CORS_ALLOWED_ORIGIN: "*" # The "allowed origin" for API requests. Change to restrict where API requests can come from.
DISPLAY_KEYS: "on" # Show keys permanently in UI (until deleted) as opposed to 1-time display.
DATABASE: "rqlite"
SQL_CONN: "https://netmaker:{{ netmaker_creds.rqlite_password }}@{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}/"
MQ_HOST: "mosquitto" # the address of the mq server. If running from docker compose it will be "mq". Otherwise, need to input address. If using "host networking", it will find and detect the IP of the mq container.
MQ_SERVER_PORT: "1883" # the reachable port of MQ by the server - change if internal MQ port changes (or use external port if MQ is not on the same machine)
MQ_PORT: "{{ netmaker_nginx.advertise_port }}" # the reachable port of MQ - change if external MQ port changes (port on proxy, not necessarily the one exposed in docker-compose)
MQ_ADMIN_PASSWORD: "{{ netmaker_creds.mq_admin_password }}"
HOST_NETWORK: "off" # whether or not host networking is turned on. Only turn on if configured for host networking (see docker-compose.hostnetwork.yml). Will set host-level settings like iptables.
PORT_FORWARD_SERVICES: "" # decide which services to port forward ("dns","ssh", or "mq")
# this section is for OAuth
AUTH_PROVIDER: "" # "<azure-ad|github|google|oidc>"
CLIENT_ID: "" # "<client id of your oauth provider>"
CLIENT_SECRET: "" # "<client secret of your oauth provider>"
FRONTEND_URL: "" # "https://dashboard.<netmaker base domain>"
AZURE_TENANT: "" # "<only for azure, you may optionally specify the tenant for the OAuth>"
OIDC_ISSUER: "" # https://oidc.yourprovider.com - URL of oidc provider
VERBOSITY: "1" # logging verbosity level - 1, 2, or 3
TELEMETRY: "off" # Whether or not to send telemetry data to help improve Netmaker. Switch to "off" to opt out of sending telemetry.
ports:
- "51821-51830:51821-51830/udp" # wireguard ports
netmaker-ui: # The Netmaker UI Component
image: gravitl/netmaker-ui:v0.17.1
depends_on:
- netmaker
links:
- "netmaker:api"
restart: unless-stopped
environment:
BACKEND_URL: "https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}" # URL where UI will send API requests. Change based on SERVER_HOST, SERVER_HTTP_HOST, and API_PORT
mosquitto: # the MQTT broker for netmaker
image: eclipse-mosquitto:2.0.11-openssl
restart: unless-stopped
volumes:
- ./mosquitto/config:/mosquitto/config
- ./mosquitto/data:/mosquitto/data
- ./mosquitto/logs:/mosquitto/log
depends_on:
- netmaker
command: ["/mosquitto/config/wait.sh"]
environment:
NETMAKER_SERVER_HOST: "http://netmaker:8081"

@ -0,0 +1,25 @@
stream{
# Map target-hosts based on hostname
map $ssl_preread_server_name $target_host {
hostnames; # Enable matching including prefix/suffix-mask
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} 127.0.0.1:8443;
{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }} rqlite:4002;
default 127.0.0.1:1;
}
server {
resolver 127.0.0.11; # Explicitly set docker-resolver
listen 443;
ssl_preread on;
proxy_pass $target_host;
}
}

@ -0,0 +1,29 @@
map $host $proxy_name {
hostnames;
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} netmaker-ui:80;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} netmaker:8081;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} rqlite:4001;
default 444;
}
server {
resolver 127.0.0.11; # Explicitly set docker-resolver
listen 8443 ssl;
ssl_certificate /certs/node.crt;
ssl_certificate_key /certs/node.key;
if ($proxy_name = 444){
return 444;
}
location / {
proxy_pass http://$proxy_name;
}
}

@ -0,0 +1,5 @@
[{
"username": "netmaker",
"password": "{{ netmaker_creds.rqlite_password }}",
"perms": ["all"]
}]

@ -0,0 +1 @@
ipv6_stable_secret: 1111:2222:3333:4444:5555:6666:7777:8888

@ -5,3 +5,5 @@
- import_tasks: ./packages.yml - import_tasks: ./packages.yml
- import_tasks: ./aliases.yml - import_tasks: ./aliases.yml
- import_tasks: ./networking.yml

@ -0,0 +1,22 @@
- name: Set sysctl settings for ip-forwarding
copy:
dest: "/etc/sysctl.d/ip-forwarding.conf"
content: |
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
notify: reload_sysctl
- name: Set sysctl settings for ipv6-address-generation
copy:
dest: "/etc/sysctl.d/ipv6-slaac-address-generation.conf"
content: |
net.ipv6.conf.default.addr_gen_mode = 2
net.ipv6.conf.default.stable_secret = {{ ipv6_stable_secret }}
notify: reload_sysctl
- name: Set sysctl settings to override ipv6-slaac with enabled forwarding
copy:
dest: "/etc/sysctl.d/ipv6-slaac-override.conf"
content: |
net.ipv6.conf.all.accept_ra = 2
notify: reload_sysctl

@ -0,0 +1,41 @@
---
kubernetes:
ipPool:
ipv4:
# Minimum: /24
cluster_cidr: 10.42.0.0/16
service_cidr: 10.43.0.0/16
ipv6:
# Minimum: /120
cluster_cidr: fd42::/56
service_cidr: fd43::/112
# Interface to grab node-IPv4/v6 from
nodeIp_interface: <interface to grab nodeIp from>
control_plane:
dns_name: <control-plane dns-reachable-name>
token: <shared token for nodes to join>
network:
# One of [flannel, calico]
plugin: calico
# Helper for networking
helper:
# https://github.com/Ruakij/RoutingTableToWg
# Translates received-routes from e.g. BGP to wireguard-allowedips
# Helpful, when nodeIp_interface is a wireguard-interface
routingtabletowg: false
# One of [traefik-ingress]
ingress_controller: traefik-ingress
config_extra:
# etcd-tuning
# heartbeat: 0.5-1.5x of rtt
# election: 10x- of heartbeat
etcd-arg:
heartbeat-interval: 500
election-timeout: 5000

@ -0,0 +1,33 @@
@startuml
rectangle "Control-Plane" as control_plane {
rectangle "Node" as sn1 {
component "netclient" as sn1_netclient
component etcd as sn1_etcd
component "k3s-server" as sn1_k3s_server
sn1_k3s_server - sn1_etcd
}
rectangle "Node" as sn2 {
component "netclient" as sn2_netclient
component etcd as sn2_etcd
component "k3s-server" as sn2_k3s_server
sn2_k3s_server - sn2_etcd
}
sn1_netclient -- sn2_netclient
sn1_etcd -- sn2_etcd
}
rectangle "Workers" {
rectangle "Node" as an1 {
component "netclient" as an1_netclient
component "k3s-agent" as sn1_k3s_agent
}
}
@enduml

@ -0,0 +1,35 @@
# Copyright 2018-2022 Docker Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
disabled_plugins = []
#root = "/var/lib/containerd"
#state = "/run/containerd"
#subreaper = true
#oom_score = 0
#[grpc]
# address = "/run/containerd/containerd.sock"
# uid = 0
# gid = 0
#[debug]
# address = "/run/containerd/debug.sock"
# uid = 0
# gid = 0
# level = "info"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true

@ -0,0 +1,19 @@
- name: reload_sysctl
command: sysctl --system
- name: restart_containerd
ansible.builtin.service:
name: containerd
state: restarted
- name: reload_networking
service:
name: networking
state: restarted
async: 5
poll: 0
notify: wait_for_connection
- name: wait_for_connection
wait_for_connection:
delay: 5

@ -0,0 +1,30 @@
- name: Create k3s-folder
ansible.builtin.file:
path: /etc/rancher/k3s/
state: directory
mode: '0755'
- name: Deploy k3s config
ansible.builtin.template:
src: k3s/{{ type }}/config.yaml.jinja2
dest: /etc/rancher/k3s/config.yaml
register: config
- name: Download install-script
get_url:
url: https://get.k3s.io
dest: /root/k3s_install.sh
mode: '744'
# todo: update when file changed?
- import_tasks: ./install/server/setup_network.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_helm.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_k3s.yml
when: "type == 'server'"
- import_tasks: ./install/agent/install_k3s.yml
when: "type == 'agent'"

@ -0,0 +1,12 @@
- name: Install K3s agent
command: /root/k3s_install.sh {{ type }}
register: command
changed_when: "'No change detected' not in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s-agent
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"

@ -0,0 +1,17 @@
- name: Add Balto key
apt_key:
url: https://baltocdn.com/helm/signing.asc
state: present
- name: Add Balto Repository
apt_repository:
repo: "deb https://baltocdn.com/helm/stable/debian/ all main"
state: present
filename: kubernetes
update_cache: yes
- name: Install helm
package:
name:
- helm
state: latest

@ -0,0 +1,55 @@
- name: Install K3s-server for 1st-node
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname == groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Install K3s-server for other nodes
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname != groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname != groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections on other nodes
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname != groups['kubernetes'][0]"
#- name: Add Kubernetes environment-vars to /etc/profile.d/
# blockinfile:
# path: /etc/profile.d/k3s-bin.sh
# marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
# block: |
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
# create: true
- name: Deploy calico
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_calico.yml
when: "kubernetes.network.plugin == 'calico'"
- name: Deploy network-helpers
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_network_helper.yml

@ -0,0 +1,19 @@
- name: Deploy calico operator
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
register: command
changed_when: "'created' in command.stdout"
run_once: true
failed_when:
- "command.rc == 1 and 'AlreadyExists' not in command.stderr"
- name: Deploy calico ressource template
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
dest: /root/calico-ressource.yml
run_once: true
- name: Deploy calico ressource
command: kubectl apply -f /root/calico-ressource.yml
register: command
changed_when: "'created' in command.stdout"
run_once: true

@ -0,0 +1,7 @@
- name: Deploy service-file for routing-table to wireguard-translation
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
mode: u=rw,g=r,o=r
run_once: true
when: "kubernetes.network.helper.routingtabletowg"

@ -0,0 +1,6 @@
- name: Set control-plane-dns-endpoint towards local-ip
blockinfile:
path: /etc/hosts
marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
block: |
{{ nodeip_ipv4 }} {{ kubernetes.control_plane.dns_name }}

@ -0,0 +1,4 @@
- import_tasks: ./prerequisites.yml
- import_tasks: ./install.yml

@ -0,0 +1,42 @@
#- name: Load br_netfilter kernel-module
# modprobe:
# name: br_netfilter
# state: present
- name: Set sysctl settings for iptables bridged traffic
copy:
dest: "/etc/sysctl.d/kubernetes.conf"
content: |
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.conf.all.forwarding=1
net.ipv6.conf.all.forwarding=1
notify: reload_sysctl
#- name: Disable swap
# command: swapoff -a
- name: Install required packages
package:
name:
#- containerd
#- iptables
# For Longhorn:
- nfs-common
- open-iscsi
state: latest
- import_tasks: ./prerequisites/containerd.yml
- name: Gather interface-name
set_fact:
interface: "{{ kubernetes.ipPool.nodeIp_interface | replace('-', '_') }}"
- name: Getting nodeIp-data from interface
set_fact:
nodeip_ipv4: "{{ ansible_facts[ interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ interface ].ipv6[0].address if ansible_facts[ interface ].ipv6 is defined }}"
- name: Run handlers to reload configurations
meta: flush_handlers

@ -0,0 +1,24 @@
- name: Check if containerd-service exists & is started
service:
name: containerd
state: started
ignore_errors: true
register: containerd_status
- name: Install containerd when not exists
package:
name:
- containerd
when: containerd_status is failed
- name: Create containerd config-folder
file:
path: /etc/containerd
state: directory
- name: Deploy containerd-config
ansible.builtin.copy:
src: containerd_config.toml
dest: /etc/containerd/config.toml
mode: u=rw,g=r,o=r
notify: restart_containerd

@ -0,0 +1,18 @@
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
token: '{{ kubernetes.token }}'
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}

@ -0,0 +1,49 @@
## Base ##
{% if inventory_hostname == groups['kubernetes'][0] %}
# Initialize with internal etcd
cluster-init: true
{% else %}
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
{% endif %}
token: '{{ kubernetes.token }}'
tls-san:
- {{ kubernetes.control_plane.dns_name }}
# Networking
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
{% endif %}
egress-selector-mode: disabled
# Network-plugin
{% if kubernetes.network.plugin == "flannel" %}
flannel-backend: vxlan
{% else %}
disable-network-policy: true
flannel-backend: none
{% endif %}
# Ingress-plugin
{% if kubernetes.ingress_controller != "traefik-ingress" %}
disable: traefik
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}
{{ kubernetes.config_extra | to_yaml }}

@ -0,0 +1,34 @@
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
- blockSize: 122
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% endif %}
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

@ -0,0 +1,45 @@
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: routingtabletowg
namespace: calico-system
labels:
app: routingtabletowg
spec:
selector:
matchLabels:
app: routingtabletowg
template:
metadata:
labels:
app: routingtabletowg
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
hostNetwork: true
containers:
- name: routingtabletowg
image: "ruakij/routingtabletowg:0.2.0"
env:
- name: INTERFACE
value: {{ kubernetes.ipPool.nodeIp_interface }}
- name: FILTER_PROTOCOL
value: bird
- name: PERIODIC_SYNC
value: '300'
securityContext:
capabilities:
add:
- NET_ADMIN
resources:
requests:
cpu: 10m
memory: 10Mi
limits:
cpu: 20m
memory: 20Mi
---

@ -0,0 +1,6 @@
netbird_client:
# Key and url to join a network
# leave empty to ignore
join_network:
setup_key:
management_url:

@ -0,0 +1,26 @@
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- ca-certificates
- curl
- gnupg
- name: Add netbird-key
apt_key:
url: https://pkgs.wiretrustee.com/debian/public.key
state: present
- name: Add netbird-repository
apt_repository:
repo: "deb https://pkgs.wiretrustee.com/debian stable main"
state: present
filename: netbird
update_cache: yes
- name: Install wireguard & netbird
package:
name:
- wireguard
- netbird
state: latest

@ -0,0 +1,16 @@
- name: Join netbird-network
when: "netbird_client.join_network.setup_key is defined"
command: "netbird up --management-url {{ netbird_client.join_network.management_url }} --setup-key {{ netbird_client.join_network.setup_key }}"
failed_when: command.rc != 0
changed_when: "'Connected' in command.stdout"
register: command
- name: Wait for netbird-interface to exist
wait_for:
path: "/sys/class/net/wt0"
state: present
when: command.changed
- name: Gather facts to get changes
ansible.builtin.gather_facts:
when: command.changed

@ -0,0 +1,4 @@
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml

@ -0,0 +1,29 @@
nftables:
# Rules to add
# Handled as templates
# Creates separate files for each entry.
# The identifier is necessary for ansible to be able to merge the keys (when 'hash_behaviour = merge')
# rule-ids have to be unique across files and raw
rules:
# Files with Rules to add
files:
#'<group_identifier>': '<relative-location>'
#'<group_identifier>':
# main: <relative-location>
# '<identifier>': '<relative-location>'
# Rules to add
raw:
#'<group_identifier>': '<content>'
#'<group_identifier>':
# main: <content>
# '<identifier>': '<content>'
# Decides if /etc/nftables.conf is applied or separate files which have changed
# Separate changes require the files to be self-tyding to not end up with duplicate rules
# e.g.
# table ip mytable
# flush table ip mytable
# delete table ip mytable
# table ip mytable {} ...
apply_global: false

@ -0,0 +1,8 @@
- name: Load group rules
command: "nft -f /etc/nftables/ansible-managed/{{ item }}.nft"
loop: "{{ combined_rules | list }}"
when: not nftables.apply_global
- name: Load global rule file
command: "nft -f /etc/nftables.nft"
when: nftables.apply_global

@ -0,0 +1,11 @@
- name: Deploying group files
include_tasks: ./per-group-template-file.yml
with_items:
- "{{ nftables.rules.files | list }}"
- name: Deploying group raw-files
include_tasks: ./per-group-template.yml
with_items:
- "{{ nftables.rules.raw | list }}"
- include_tasks: ./remove-files.yml

@ -0,0 +1,51 @@
- set_fact:
group_identifier: "{{ item }}"
value: "{{ nftables.rules.files[item] }}"
when: "item is defined"
#'<group_identifier>': '<relative-location>'
- block:
- name: Create main rule file
template:
src: "{{ value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
when: value is string
#'<group_identifier>':
# main: <relative-location>
# '<identifier>': '<relative-location>'
- block:
- set_fact:
items: "{{ nftables.rules.files[item] }}"
- block:
- name: Create main rule file
template:
src: "{{ items['main'] }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
- name: Include rule files
lineinfile:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
when: items['main'] is defined
- name: Create group folder
file:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
state: directory
when: items|length > 0
- set_fact:
test: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
- name: Create included rule files
template:
src: "{{ fileItem.value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ fileItem.key }}.nft"
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
loop_control:
loop_var: fileItem
when: value is mapping

@ -0,0 +1,48 @@
- set_fact:
group_identifier: "{{ item }}"
value: "{{ nftables.rules.raw[item] }}"
when: "item is defined"
#'<group_identifier>': '<content>'
- block:
- name: Create main rule file
copy:
content: "{{ value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
when: value is string
#'<group_identifier>':
# main: <content>
# '<identifier>': '<content>'
- block:
- set_fact:
items: "{{ nftables.rules.raw[item] }}"
- block:
- name: Create main rule file
copy:
content: "{{ items['main'] }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
- name: Include rule files
lineinfile:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
when: items['main'] is defined
- name: Create group folder
file:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
state: directory
when: items|length > 0
- name: Create included rule files
copy:
content: "{{ included_item.value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ included_item.key }}.nft"
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
loop_control:
loop_var: included_item
when: value is mapping

@ -0,0 +1,4 @@
- name: Install Packages
package:
name:
- nftables

@ -0,0 +1,7 @@
- import_tasks: ./prerequisites.yml
- import_tasks: ./setup-packages.yml
- import_tasks: ./deploy-rules/main.yml
- import_tasks: ./apply-files.yml

@ -0,0 +1,13 @@
# Defaults if missing
- name: Set defaults if missing
set_fact:
nftables:
rules:
files: "{{ nftables.rules.files | default({}) | combine({}) }}"
raw: "{{ nftables.rules.raw | default({}) | combine({}) }}"
combined_rules: "{{ nftables.rules.raw | combine(nftables.rules.files, recursive=true) }}"
#- name: Check items for consistency
# assert:
# that: "{{ nftables.rules.files.values() | length }} + {{ nftables.rules.raw.values() | length }} == {{ combined_rules.values() | length }}"
# fail_msg: "files and raw rules share the same identifier"

@ -0,0 +1,21 @@
- name: Handle removed group files
block:
- find:
paths: /etc/nftables/ansible-managed/
file_type: 'any'
excludes: '{% for item in combined_rules %}{{ item }},{{ item }}.nft,{% endfor %}'
depth: 1
register: removeFiles
- file:
path: "{{ fileItem.path }}"
state: absent
loop: "{{ removeFiles.files }}"
loop_control:
label: "{{ fileItem.path }}"
loop_var: fileItem
- name: Handle removed included files per group
include_tasks: ./remove-per-group.yml
with_items:
- "{{ combined_rules | list }}"

@ -0,0 +1,20 @@
- set_fact:
group_identifier: "{{ item }}"
group_items: "{{ combined_rules[item] }}"
- block:
- find:
paths: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
file_type: 'any'
excludes: '{% for item in group_items %}{{ item }}.nft,{% endfor %}'
register: removeFiles
- file:
path: "{{ fileItem.path }}"
state: absent
loop: "{{ removeFiles.files }}"
loop_control:
label: "{{ fileItem.path }}"
loop_var: fileItem
when: group_items is mapping

@ -0,0 +1,15 @@
- name: Install nftables
package:
name:
- nftables
- name: Create /etc/nftables/ansible-managed
file:
path: /etc/nftables/ansible-managed
state: directory
- name: Include files in /etc/nftables/ansible-managed/ from /etc/nftables.conf
blockinfile:
path: /etc/nftables.conf
marker: "# {mark} ANSIBLE MANAGED BLOCK - nftables"
content: 'include "/etc/nftables/ansible-managed/*.nft"'

@ -0,0 +1,12 @@
wireguard_ipv6_converter:
version: latest
# see https://github.com/Ruakij/wg-ipv6-converter#31-environment
setup:
interface: wg0
#ipv6_format: fc12::%02x%02x:%02x%02x/%d
#filter_prefix: 100.100
#recheck_interval: 60s
service:
#bindTo: netbird.service

@ -0,0 +1,11 @@
- name: Get architecture
set_fact:
arch: "{{ 'amd64' if ansible_architecture == 'x86_64' else 'arm64' }}"
versionUri: "{% if wireguard_ipv6_converter.version == 'latest' %}latest/download{% else %}download/{{ wireguard_ipv6_converter.version }}{% endif %}"
- name: Download binary
get_url:
url: https://github.com/Ruakij/wg-ipv6-converter/releases/{{ versionUri }}/wg-ipv6-converter_{{ arch }}
dest: /usr/local/bin/wg-ipv6-converter
mode: "744"
register: deployDownload

@ -0,0 +1,3 @@
- import_tasks: ./deploy.yml
- import_tasks: ./setup-service.yml

@ -0,0 +1,27 @@
- name: Deploy service
ansible.builtin.template:
src: wg-ipv6-conv.service.jinja2
dest: /etc/systemd/system/wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}.service
register: serviceFile
- name: Enable service
ansible.builtin.service:
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
daemon-reload: true
enabled: true
- name: Start service if interface exists already
ansible.builtin.service:
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
state: "{{ 'restarted' if deployDownload.changed or serviceFile.changed else 'started' }}"
register: service
when: "wireguard_ipv6_converter.setup.interface in ansible_interfaces"
- name: Pause for 5s to wait for program to have run
ansible.builtin.pause:
seconds: 5
when: "service.changed"
- name: Gather facts to get changes
ansible.builtin.gather_facts:
when: "service.changed"

@ -0,0 +1,29 @@
[Unit]
Description=WireGuard IPv6 converter for {{ wireguard_ipv6_converter.setup.interface }}
{% if wireguard_ipv6_converter.service.bindTo is defined %}
BindsTo={{ wireguard_ipv6_converter.service.bindTo }}
After={{ wireguard_ipv6_converter.service.bindTo }}
{% endif %}
[Service]
Type=simple
{% if wireguard_ipv6_converter.service.bindTo is defined %}
ExecStartPre=/bin/sleep 10
{% endif %}
ExecStart=/usr/local/bin/wg-ipv6-converter
Restart=always
RestartSec=30
Environment="INTERFACE={{ wireguard_ipv6_converter.setup.interface }}"
{% if wireguard_ipv6_converter.setup.ipv6_format is defined %}
Environment="IPV6_FORMAT={{ wireguard_ipv6_converter.setup.ipv6_format }}"
{% endif %}
{% if wireguard_ipv6_converter.setup.filter_prefix is defined %}
Environment="FILTER_PREFIX={{ wireguard_ipv6_converter.setup.filter_prefix }}"
{% endif %}
{% if wireguard_ipv6_converter.setup.recheck_interval is defined %}
Environment="RECHECK_INTERVAL={{ wireguard_ipv6_converter.setup.recheck_interval }}"
{% endif %}
[Install]
WantedBy=multi-user.target
Loading…
Cancel
Save