Compare commits

..

99 Commits

Author SHA1 Message Date
27e9d4a1ab Merge branch 'role_nftables' 2023-06-21 17:35:09 +02:00
755f9b2e1a Initial role 2023-05-21 20:57:43 +02:00
753f456ef3 Merge branch 'role_kubernetes-k3s' 2023-04-14 10:21:06 +02:00
093612f3a7 Change restart-check to started/restarted check 2023-04-14 10:20:16 +02:00
a92409c56f Add failed_when to deploy calico operator 2023-04-14 10:01:42 +02:00
f50e3ac33c Use first node's IP for joining cluster 2023-04-14 09:50:39 +02:00
668ff23ee6 Fix service task wrong usage 2023-04-14 09:42:46 +02:00
c2c6a2872f Fix conditional for changed after install 2023-04-12 23:12:10 +02:00
c1c7ec9e56 Remove workaround as k3s is now at 1.26 2023-04-12 22:39:43 +02:00
550f6868ff Fix old usage of network_plugin var 2023-04-12 20:49:38 +02:00
3fe288f6a5 Merge branch 'role_wireguard-ipv6-converter' 2023-04-12 20:40:01 +02:00
ab9220d042 Add pause & gather_facts at end when service was started 2023-04-12 20:39:49 +02:00
15ad7920d4 Merge branch 'role_wireguard-ipv6-converter' 2023-04-12 20:24:27 +02:00
7012e1ea2b Separate enable and re-/start service for fine-control 2023-04-12 20:23:08 +02:00
13ebd48c5d Add register for task download 2023-04-12 20:22:34 +02:00
1a76b94a46 Add download-version 2023-04-12 14:58:43 +02:00
1b765689e6 Make sure daemon is reloaded before service-start 2023-04-12 14:50:14 +02:00
5b607df2de Add checks if var is set 2023-04-12 14:47:02 +02:00
521b76453a Add start to naming 2023-04-12 14:45:55 +02:00
7f503d983a Fix download-location 2023-04-12 14:45:43 +02:00
b143d9c848 Fix checks 2023-04-12 14:45:31 +02:00
f95bcbc38d change defaults 2023-04-12 14:44:10 +02:00
248a3c08b8 Initial role-data 2023-04-12 13:36:14 +02:00
f2c86dc22d Merge branch 'role_kubernetes-k3s' 2023-04-05 20:20:03 +02:00
a79f2cac8a Merge branch 'role_netbird_client' 2023-04-05 20:19:44 +02:00
c8f90f0f8d Update calico 2023-04-05 20:15:31 +02:00
41570ea40d Create new block for network-stuff 2023-04-05 20:14:36 +02:00
a3c887748a Move network-helper to own file independend from calico 2023-04-05 20:14:12 +02:00
d113625fa8 Fix env-value not being string 2023-04-05 20:08:25 +02:00
dadd077723 Fix service and conditional 2023-04-05 14:13:22 +02:00
0d43d07ad4 Add extra-config option 2023-04-05 14:12:56 +02:00
d6f8f975bb Reload when config changed, but install already done 2023-04-05 13:32:14 +02:00
7c86a5d77d Add register for config 2023-04-05 13:30:22 +02:00
8c4e3c2401 Update routingtabletowg and use new sync feature 2023-03-30 14:54:49 +02:00
b46d35c8a5 Add labels 2023-03-30 14:54:28 +02:00
791ad96849 Add ipv6-check to calico deploy 2023-03-19 15:17:45 +01:00
fc3d9845d6 Fix undeterministic node-selection but uses group 2023-03-19 15:17:02 +01:00
590b75ac23 Add quotes to token-usage for special chars 2023-03-19 14:01:19 +01:00
0c82504299 Separate getting name and ips to fix bug easily 2023-03-19 14:00:46 +01:00
2fee9a1747 Only enable ipv6 when available and activated 2023-03-19 14:00:15 +01:00
fb44c39969 Add install of often-used packets 2023-03-19 13:58:47 +01:00
5452303992 Remove netmaker from dependency 2023-03-19 13:58:05 +01:00
4321d78cf8 Add comments to variables 2023-03-19 13:57:57 +01:00
f9a859e95c Add ingress-option 2023-03-19 13:56:26 +01:00
fd302e4ebc Move regather facts to join when changed 2023-03-19 13:47:01 +01:00
b5729caa0e Add wait for interface to come up 2023-03-19 13:46:44 +01:00
dca40ed835 Remove throttle 2023-03-19 13:46:21 +01:00
95ddd04a86 Fix join command 2023-03-19 10:39:06 +01:00
911bc47acb Initial role stuff 2023-03-17 15:58:49 +01:00
e5920b3ddf Add network-plugin option 2023-03-17 15:57:48 +01:00
f3e381aca3 Move netmaker to deprecated 2023-01-18 13:23:51 +01:00
233eadaf40 Merge branch 'role_netmaker_server' 2023-01-10 09:47:34 +01:00
109a09052d Update to 0.17.1
This also sets up tls-termination for mosquitto
2023-01-10 09:46:55 +01:00
4ea9492ca3 Change hos group-name 2023-01-10 09:46:01 +01:00
e5ebc2ad5f Merge branch 'role_common' 2022-11-02 16:41:10 +01:00
98c51c6fc1 Merge branch 'role_kubernetes-k3s' 2022-11-02 16:40:52 +01:00
6b59bf6c75 Merge branch 'role_netmaker' 2022-11-02 16:40:36 +01:00
1b2af7cf6c Merge branch 'role_netmaker_server' 2022-11-02 16:40:25 +01:00
d9cf3d2066 Re-gatherfacts at the end for other plays 2022-11-02 16:38:13 +01:00
f42bce9b6b Add changed-detection 2022-11-02 16:37:53 +01:00
0fc5dbb791 Initial role-data 2022-11-02 16:29:55 +01:00
975746e7d7 Add IPv6-network to common 2022-11-02 10:27:06 +01:00
a27ca2c37a Create dedicated docs-folder and move files there 2022-10-27 01:24:30 +02:00
247fdec7ae fixme: hotfix for multi-master netmaker-server
netmaker doesnt handle concurrent joins to different server-nodes well and will duplicate addresses
2022-10-27 01:23:45 +02:00
9cb2e88193 Merge branch 'role_netmaker' 2022-10-27 01:16:57 +02:00
25ceb0f456 Merge branch 'role_netmaker_server' 2022-10-27 01:16:55 +02:00
0d8d5e8528 Merge branch 'role_docker' 2022-10-27 00:29:06 +02:00
06eb8456ab Merge branch 'role_zsh' 2022-10-27 00:27:42 +02:00
657f861696 Merge branch 'role_common' 2022-10-27 00:27:32 +02:00
fcc4f1ed18 Fix task 2022-10-21 15:40:00 +02:00
de0e220004 Add defaults-var-file 2022-10-21 15:32:20 +02:00
f9cc97a8f2 Add CA to trust-store 2022-10-21 15:32:08 +02:00
811fc22eef Delete unnecessary task-file 2022-10-21 15:31:37 +02:00
cdd4c9babb Remove CA in args rqlite will use the system trust-store then 2022-10-21 14:49:35 +02:00
d553f604a9 Add own certs to mosquitto 2022-10-21 14:48:29 +02:00
806b41b73e Fix proxy-protocol being expected 2022-10-21 14:22:38 +02:00
ec98188a24 Fix variable name 2022-10-21 14:20:26 +02:00
06bdae380b Revert proxy-protocol-matching 2022-10-21 11:45:12 +02:00
83b50c10cd Use new variables and fix requests 2022-10-21 08:42:37 +02:00
3890007042 Use more specific hostnames in cert 2022-10-21 08:42:11 +02:00
bb3d363094 Created nginx-config-file-templates 2022-10-21 08:33:08 +02:00
7453f1e616 Move variables to defaults-folder 2022-10-21 08:31:05 +02:00
e022a6e9f0 Restructure to make better looking 2022-10-20 08:34:35 +02:00
772dc3a620 Move TLS-point outside of netmaker-system 2022-10-20 08:32:49 +02:00
6d5c86927d Make diagram more readable 2022-10-18 12:33:25 +02:00
c94168fb30 Comment-in connection-check todo: change check to http 2022-10-17 22:49:20 +02:00
6168ba2b0a Add missing dependency 2022-10-17 22:48:58 +02:00
e4a2c5dd2f Remove ports and add/change advertised adresses and ports 2022-10-17 22:48:49 +02:00
315f5a1805 Fix private_ip checking 2022-10-17 22:47:10 +02:00
d2d8ebd8cc Add missing nginx-file 2022-10-17 22:46:41 +02:00
dd87d5e724 Move cert-generation outside 2022-10-17 22:46:20 +02:00
86e6317e28 Fix naming 2022-10-17 22:45:24 +02:00
8fddfc532f Add nginx as service 2022-10-17 22:43:57 +02:00
f733543ae1 Fix architecture-diagram 2022-10-17 21:30:50 +02:00
526cf66bd7 Add chart for architecture 2022-10-17 21:19:10 +02:00
4cb418e2b6 Add role netmaker (netclient) 2022-10-17 14:51:52 +02:00
b593a2874a Add role netmaker_server 2022-10-17 14:48:02 +02:00
6adb029849 Add role docker 2022-10-17 14:10:13 +02:00
e7b0549468 Add role common 2022-10-17 12:59:33 +02:00
72 changed files with 1550 additions and 0 deletions

View File

@ -0,0 +1,4 @@
netclient:
# Token to join default-network
# leave empty to ignore
join_network_token:

View File

@ -0,0 +1,3 @@
---
dependencies: []
#- role: docker

View File

@ -0,0 +1,4 @@
- name: Deploy CA Certificate
ansible.builtin.copy:
src: secret_files/netmaker_server/ca/ca.crt
dest: /etc/ssl/certs/netmaker-ca.pem

View File

@ -0,0 +1,25 @@
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- gpg
- gpg-agent
- name: Add netmaker-key
apt_key:
url: https://apt.netmaker.org/gpg.key
state: present
- name: Add netmaker-repository
apt_repository:
repo: "deb https:apt.netmaker.org stable main"
state: present
filename: netmaker
update_cache: yes
- name: Install wireguard & netclient
package:
name:
- wireguard
- netclient
state: latest

View File

@ -0,0 +1,7 @@
- name: Join netmaker-network
when: "netclient.join_network_token is defined"
command: "netclient join -t {{ netclient.join_network_token }}"
failed_when: command.rc != 0
changed_when: "'starting wireguard' in command.stdout"
register: command
throttle: 1

View File

@ -0,0 +1,8 @@
- import_tasks: ./certs.yml
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml
- name: Gather facts to get changes
ansible.builtin.gather_facts:

View File

@ -0,0 +1,5 @@
netmaker_creds:
rqlite_password:
mq_admin_password:
master_key:

View File

@ -0,0 +1,28 @@
# Overwrite for specific nodes to force dynamic-ip (disable setting public-ip and forces external lookup for public-ip)
# When false, will check itself for dynamic-ip (based on private-ip)
netmaker_dynamicIp: false
netmaker_nginx:
# Listen-port
tls_port: 51820
# Advertise-Port for services
# (must also be reachable by internal services!)
advertise_port: 51820
# This is the base-domain used for generating hostnames for services
netmaker_base_domain:
# host + base_domain
netmaker_api:
host: netmaker-api
netmaker_ui:
host: netmaker-ui
# MQTT-broker
netmaker_broker:
tls_host: netmaker-broker
# host + node_hostname
netmaker_rqlite:
http_host: netmaker-rqlite-http
cluster_host: netmaker-rqlite-cluster

View File

@ -0,0 +1,37 @@
@startuml
interface ng_TLS
component netmaker_server {
component nginx {
component ng_stream
component ng_http
ng_stream -up- ng_TLS
ng_stream -right-> ng_http : tls-termination
}
component nm_ui
nm_ui -up- nm_ui_http
ng_http -down-( nm_ui_http
component Mosquitto
Mosquitto -up- mq_plain
Mosquitto -up- mq_tls
ng_stream -down-( mq_tls
component rqlite
rqlite -up- rq_http
rqlite -up- rq_cluster
ng_stream -down-( rq_cluster
ng_http -down-( rq_http
component nm_api
nm_api -down- nm_api_http
ng_http --( nm_api_http
nm_api .up.( ng_TLS : db-connection to rqlite-master
nm_api --( mq_plain
}
@enduml

View File

@ -0,0 +1,12 @@
per_listener_settings false
listener 8883
protocol websockets
allow_anonymous false
listener 1883
protocol websockets
allow_anonymous false
plugin /usr/lib/mosquitto_dynamic_security.so
plugin_opt_config_file /mosquitto/data/dynamic-security.json

View File

@ -0,0 +1,23 @@
#!/bin/ash
wait_for_netmaker() {
echo "SERVER: ${NETMAKER_SERVER_HOST}"
until curl --output /dev/null --silent --fail --head \
--location "${NETMAKER_SERVER_HOST}/api/server/health"; do
echo "Waiting for netmaker server to startup"
sleep 1
done
}
main(){
# wait for netmaker to startup
apk add curl
wait_for_netmaker
echo "Starting MQ..."
# Run the main container command.
/docker-entrypoint.sh
/usr/sbin/mosquitto -c /mosquitto/config/mosquitto.conf
}
main "${@}"

View File

@ -0,0 +1,33 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
include /etc/nginx/stream.d/*.conf;

View File

@ -0,0 +1,3 @@
---
# dependencies:
# - role: docker

View File

@ -0,0 +1,40 @@
- name: Generate PrivateKey
community.crypto.openssl_privatekey:
path: /opt/netmaker_server/certs/node.key
owner: 1883 # Set owner to mosquitto-user (all other containers seem to run as root)
- name: Generate Certificate-Signing-Request from privateKey
community.crypto.openssl_csr:
path: /opt/netmaker_server/certs/node.csr
privatekey_path: /opt/netmaker_server/certs/node.key
common_name: "{{ ansible_facts.nodename }}"
subject_alt_name:
"DNS:{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }},\
DNS:{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }},\
DNS:{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }},\
DNS:{{ netmaker_api.host }}.{{ netmaker_base_domain }},\
DNS:{{ netmaker_ui.host }}.{{ netmaker_base_domain }}"
- name: Fetch CSR
ansible.builtin.fetch:
src: /opt/netmaker_server/certs/node.csr
dest: tmp_files/
- name: Sign CSR locally with CA
local_action: community.crypto.x509_certificate
args:
path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
csr_path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.csr
ownca_path: secret_files/netmaker_server/ca/ca.crt
ownca_privatekey_path: secret_files/netmaker_server/ca/ca.key
provider: ownca
- name: Copy Signed Certificate
ansible.builtin.copy:
src: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
dest: /opt/netmaker_server/certs/node.crt
- name: Copy CA Certificate
ansible.builtin.copy:
src: secret_files/netmaker_server/ca/ca.crt
dest: /opt/netmaker_server/certs/ca.crt

View File

@ -0,0 +1,20 @@
- import_tasks: ./prerequisites.yml
- name: Copy folder-structure
ansible.builtin.copy:
src: opt/netmaker_server
dest: /opt/
mode: preserve
- name: Deploy compose file
ansible.builtin.template:
src: docker-compose.yml.template
dest: /opt/netmaker_server/docker-compose.yml
- import_tasks: ./certs.yml
- import_tasks: ./nginx.yml
- import_tasks: ./rqlite.yml
- import_tasks: ./netmaker.yml

View File

@ -0,0 +1,57 @@
- name: Start rest of netmaker-services
command: "docker-compose --project-directory /opt/netmaker_server/ up -d"
register: command
failed_when: command.rc != 0
- name: Wait for netmaker-api to become available
uri:
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}'
return_content: yes
validate_certs: no
status_code:
- 404
until: uri_output.status == 404
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
# todo: check if exists?
- name: Create default mesh-network 'server'
uri:
validate_certs: no
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks'
method: POST
body:
netid: servnet
addressrange: 10.92.0.0/24
addressrange6: fd92::/64
body_format: json
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker_server'][0]"
register: default_mesh
until: "default_mesh is not failed"
retries: 2
delay: 10
# todo: check if exists?
- name: Create token for default-network
uri:
validate_certs: no
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks/servnet/keys' # todo: do implementation
method: POST
body:
name: ""
uses: 0
body_format: json
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker_server'][0]"
register: default_mesh_key
until: "default_mesh_key is not failed"
retries: 2
delay: 10

View File

@ -0,0 +1,18 @@
- name: Deploy nginx configs
template:
src: "{{item.src}}"
dest: "{{item.dst}}"
loop:
- { src: 'nginx/proxy.conf.template', dst: '/opt/netmaker_server/nginx/conf/conf.d/proxy.conf' }
- { src: 'nginx/passthrough.conf.template', dst: '/opt/netmaker_server/nginx/conf/stream.d/passthrough.conf' }
- name: Start nginx service
command: "docker-compose --project-directory /opt/netmaker_server/ up -d nginx"
register: command
failed_when: command.rc != 0
- name: Waiting for nginx to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 51820
state: started

View File

@ -0,0 +1,9 @@
- name: Install wireguard
package:
name:
- wireguard
state: latest
- name: Check if default-ipv4-address is private
set_fact:
private_ipv4_address: "{{ ansible_facts.default_ipv4.address | regex_search('^((10)|(192\\.168)|(172\\.((1[6-9])|(2[0-9])|(3[0-1])))|(100))\\.') }}"

View File

@ -0,0 +1,42 @@
- name: Deploy rqlite config
ansible.builtin.template:
src: rqlite-config.json.template
dest: /opt/netmaker_server/rqlite/config.json
- name: Start rqlite service for 1st-node
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
register: command
failed_when: command.rc != 0
when: "inventory_hostname == groups['netmaker_server'][0]"
- name: Waiting for rqlite to accept connections on 1st-node
uri:
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
return_content: yes
validate_certs: no
status_code:
- 401
until: uri_output.status == 401
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
when: "inventory_hostname == groups['netmaker_server'][0]"
- name: Start rqlite service for other nodes
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
register: command
failed_when: command.rc != 0
when: "inventory_hostname != groups['netmaker_server'][0]"
- name: Waiting for rqlite to accept connections on other nodes
uri:
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
return_content: yes
validate_certs: no
status_code:
- 401
until: uri_output.status == 401
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
when: "inventory_hostname != groups['netmaker_server'][0]"

View File

@ -0,0 +1,127 @@
version: "3.4"
services:
nginx:
image: nginx
restart: unless-stopped
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro # Override nginx-config to add stream-import
- ./nginx/conf/conf.d/:/etc/nginx/conf.d:ro # conf.d
- ./nginx/conf/stream.d/:/etc/nginx/stream.d:ro # conf.d
- ./certs:/certs:ro # SSL-certificates
ports:
- {{ netmaker_nginx.tls_port }}:443
rqlite: # Distributed sqlite-db
image: rqlite/rqlite
restart: unless-stopped
hostname: "{{ ansible_facts.nodename }}"
volumes:
- "./rqlite/data:/rqlite/file"
- "./rqlite/config.json:/config.json:ro"
- "./certs:/certs:ro"
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
command: "
-http-adv-addr {{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
-raft-addr [::]:4002
-raft-adv-addr {{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
-node-encrypt
-node-cert /certs/node.crt
-node-key /certs/node.key
-node-no-verify
-auth /config.json
{% if inventory_hostname != groups['netmaker_server'][0] %}
-join-as netmaker
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker_server'][0] }}:{{ netmaker_nginx.advertise_port }}
{% endif %}
"
# FIXME: /\ \/ Change http -> https
netmaker: # The Primary Server for running Netmaker
image: gravitl/netmaker:v0.17.1
depends_on:
- rqlite
cap_add:
- NET_ADMIN
- NET_RAW
- SYS_MODULE
sysctls:
- net.ipv4.ip_forward=1
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.all.forwarding=1
restart: unless-stopped
volumes: # Volume mounts necessary for sql, coredns, and mqtt
- ./dnsconfig/:/root/config/dnsconfig
- ./mosquitto/data/:/etc/netmaker/
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
hostname: "{{ ansible_facts.nodename }}"
environment: # Necessary capabilities to set iptables when running in container
NODE_ID: "{{ ansible_facts.nodename }}"
MASTER_KEY: "{{ netmaker_creds.master_key }}" # The admin master key for accessing the API. Change this in any production installation.
{% if not private_ipv4_address and not netmaker_dynamicIp %}
SERVER_HOST: "{{ ansible_facts.default_ipv4.address }}" # Set to public IP of machine.
{% endif %}
SERVER_NAME: "{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }}" # The domain/host IP indicating the mq broker address
SERVER_HTTP_HOST: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}" # Overrides SERVER_HOST if set. Useful for making HTTP available via different interfaces/networks.
SERVER_API_CONN_STRING: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}"
DISABLE_REMOTE_IP_CHECK: "off" # If turned "on", Server will not set Host based on remote IP check. This is already overridden if SERVER_HOST is set. Turned "off" by default.
DNS_MODE: "off" # Enables DNS Mode, meaning all nodes will set hosts file for private dns settings.
API_PORT: "8081" # The HTTP API port for Netmaker. Used for API calls / communication from front end. If changed, need to change port of BACKEND_URL for netmaker-ui.
REST_BACKEND: "on" # Enables the REST backend (API running on API_PORT at SERVER_HTTP_HOST). Change to "off" to turn off.
RCE: "off" # Enables setting PostUp and PostDown (arbitrary commands) on nodes from the server. Off by default.
CORS_ALLOWED_ORIGIN: "*" # The "allowed origin" for API requests. Change to restrict where API requests can come from.
DISPLAY_KEYS: "on" # Show keys permanently in UI (until deleted) as opposed to 1-time display.
DATABASE: "rqlite"
SQL_CONN: "https://netmaker:{{ netmaker_creds.rqlite_password }}@{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}/"
MQ_HOST: "mosquitto" # the address of the mq server. If running from docker compose it will be "mq". Otherwise, need to input address. If using "host networking", it will find and detect the IP of the mq container.
MQ_SERVER_PORT: "1883" # the reachable port of MQ by the server - change if internal MQ port changes (or use external port if MQ is not on the same machine)
MQ_PORT: "{{ netmaker_nginx.advertise_port }}" # the reachable port of MQ - change if external MQ port changes (port on proxy, not necessarily the one exposed in docker-compose)
MQ_ADMIN_PASSWORD: "{{ netmaker_creds.mq_admin_password }}"
HOST_NETWORK: "off" # whether or not host networking is turned on. Only turn on if configured for host networking (see docker-compose.hostnetwork.yml). Will set host-level settings like iptables.
PORT_FORWARD_SERVICES: "" # decide which services to port forward ("dns","ssh", or "mq")
# this section is for OAuth
AUTH_PROVIDER: "" # "<azure-ad|github|google|oidc>"
CLIENT_ID: "" # "<client id of your oauth provider>"
CLIENT_SECRET: "" # "<client secret of your oauth provider>"
FRONTEND_URL: "" # "https://dashboard.<netmaker base domain>"
AZURE_TENANT: "" # "<only for azure, you may optionally specify the tenant for the OAuth>"
OIDC_ISSUER: "" # https://oidc.yourprovider.com - URL of oidc provider
VERBOSITY: "1" # logging verbosity level - 1, 2, or 3
TELEMETRY: "off" # Whether or not to send telemetry data to help improve Netmaker. Switch to "off" to opt out of sending telemetry.
ports:
- "51821-51830:51821-51830/udp" # wireguard ports
netmaker-ui: # The Netmaker UI Component
image: gravitl/netmaker-ui:v0.17.1
depends_on:
- netmaker
links:
- "netmaker:api"
restart: unless-stopped
environment:
BACKEND_URL: "https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}" # URL where UI will send API requests. Change based on SERVER_HOST, SERVER_HTTP_HOST, and API_PORT
mosquitto: # the MQTT broker for netmaker
image: eclipse-mosquitto:2.0.11-openssl
restart: unless-stopped
volumes:
- ./mosquitto/config:/mosquitto/config
- ./mosquitto/data:/mosquitto/data
- ./mosquitto/logs:/mosquitto/log
depends_on:
- netmaker
command: ["/mosquitto/config/wait.sh"]
environment:
NETMAKER_SERVER_HOST: "http://netmaker:8081"

View File

@ -0,0 +1,25 @@
stream{
# Map target-hosts based on hostname
map $ssl_preread_server_name $target_host {
hostnames; # Enable matching including prefix/suffix-mask
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} 127.0.0.1:8443;
{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }} rqlite:4002;
default 127.0.0.1:1;
}
server {
resolver 127.0.0.11; # Explicitly set docker-resolver
listen 443;
ssl_preread on;
proxy_pass $target_host;
}
}

View File

@ -0,0 +1,29 @@
map $host $proxy_name {
hostnames;
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} netmaker-ui:80;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} netmaker:8081;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} rqlite:4001;
default 444;
}
server {
resolver 127.0.0.11; # Explicitly set docker-resolver
listen 8443 ssl;
ssl_certificate /certs/node.crt;
ssl_certificate_key /certs/node.key;
if ($proxy_name = 444){
return 444;
}
location / {
proxy_pass http://$proxy_name;
}
}

View File

@ -0,0 +1,5 @@
[{
"username": "netmaker",
"password": "{{ netmaker_creds.rqlite_password }}",
"perms": ["all"]
}]

1
common/defaults/main.yml Normal file
View File

@ -0,0 +1 @@
ipv6_stable_secret: 1111:2222:3333:4444:5555:6666:7777:8888

2
common/handlers/main.yml Normal file
View File

@ -0,0 +1,2 @@
- name: reload_sysctl
command: sysctl --system

20
common/tasks/aliases.yml Normal file
View File

@ -0,0 +1,20 @@
- name: General aliases
blockinfile:
path: "{{ ansible_facts.env.HOME }}/.bashrc"
marker: "# {mark} ANSIBLE MANAGED BLOCK | General aliases"
block: |
alias clr="clear"
alias hgrep="history | grep"
alias syslog="tail -f --lines=100 /var/log/syslog"
alias cp="rsync -hlAXEptgoDS --numeric-ids --info=progress2"
- name: ls aliases and colors
blockinfile:
path: "{{ ansible_facts.env.HOME }}/.bashrc"
marker: "# {mark} ANSIBLE MANAGED BLOCK | ls aliases and colors"
block: |
export LS_OPTIONS='--color=auto'
eval "`dircolors`"
alias ls='ls $LS_OPTIONS'
alias ll='ls $LS_OPTIONS -l'
alias l='ls $LS_OPTIONS -la'

9
common/tasks/main.yml Normal file
View File

@ -0,0 +1,9 @@
- import_tasks: ./packages.yml
- import_tasks: ./ssh.yml
- import_tasks: ./packages.yml
- import_tasks: ./aliases.yml
- import_tasks: ./networking.yml

View File

@ -0,0 +1,22 @@
- name: Set sysctl settings for ip-forwarding
copy:
dest: "/etc/sysctl.d/ip-forwarding.conf"
content: |
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
notify: reload_sysctl
- name: Set sysctl settings for ipv6-address-generation
copy:
dest: "/etc/sysctl.d/ipv6-slaac-address-generation.conf"
content: |
net.ipv6.conf.default.addr_gen_mode = 2
net.ipv6.conf.default.stable_secret = {{ ipv6_stable_secret }}
notify: reload_sysctl
- name: Set sysctl settings to override ipv6-slaac with enabled forwarding
copy:
dest: "/etc/sysctl.d/ipv6-slaac-override.conf"
content: |
net.ipv6.conf.all.accept_ra = 2
notify: reload_sysctl

20
common/tasks/packages.yml Normal file
View File

@ -0,0 +1,20 @@
- name: Update Packages
apt:
update_cache: yes
upgrade: yes
when: ansible_facts.distribution == "Debian"
- name: Install Packages
package:
name:
- gpg
- htop
- iotop
- slurm
- sudo
- screen
- curl
- rsync
- zstd
state: latest
when: ansible_facts.distribution == "Debian"

12
common/tasks/ssh.yml Normal file
View File

@ -0,0 +1,12 @@
- name: Disable SSH password auth
lineinfile:
dest: /etc/ssh/sshd_config
regexp: '^PasswordAuthentication\s*yes'
line: "PasswordAuthentication no"
register: sshd_config
- name: Restart SSH daemon
service:
name: sshd
state: restarted
when: sshd_config.changed

2
docker/files/docker-compose Executable file
View File

@ -0,0 +1,2 @@
#!/bin/sh
docker compose $@

4
docker/handlers/main.yml Normal file
View File

@ -0,0 +1,4 @@
- name: restart_docker
service:
name: "docker"
state: restarted

40
docker/tasks/main.yml Normal file
View File

@ -0,0 +1,40 @@
#- name: Check if docker is already installed
# stat:
# path: /usr/bin/docker
# register: docker_file
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- gpg
- gpg-agent
- name: Add docker-key
apt_key:
url: https://download.docker.com/linux/debian/gpg
state: present
- name: Add docker-repository
apt_repository:
repo: "deb https://download.docker.com/linux/{{ ansible_facts.distribution | lower }} {{ ansible_facts.distribution_release }} stable"
state: present
filename: docker
update_cache: yes
- name: Install docker
package:
name:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-compose-plugin
- pass
state: latest
- name: Deploy docker-compose command to new docker compose plugin
ansible.builtin.copy:
src: docker-compose
dest: "/usr/local/bin/docker-compose"
mode: preserve

View File

@ -0,0 +1,41 @@
---
kubernetes:
ipPool:
ipv4:
# Minimum: /24
cluster_cidr: 10.42.0.0/16
service_cidr: 10.43.0.0/16
ipv6:
# Minimum: /120
cluster_cidr: fd42::/56
service_cidr: fd43::/112
# Interface to grab node-IPv4/v6 from
nodeIp_interface: <interface to grab nodeIp from>
control_plane:
dns_name: <control-plane dns-reachable-name>
token: <shared token for nodes to join>
network:
# One of [flannel, calico]
plugin: calico
# Helper for networking
helper:
# https://github.com/Ruakij/RoutingTableToWg
# Translates received-routes from e.g. BGP to wireguard-allowedips
# Helpful, when nodeIp_interface is a wireguard-interface
routingtabletowg: false
# One of [traefik-ingress]
ingress_controller: traefik-ingress
config_extra:
# etcd-tuning
# heartbeat: 0.5-1.5x of rtt
# election: 10x- of heartbeat
etcd-arg:
heartbeat-interval: 500
election-timeout: 5000

View File

@ -0,0 +1,33 @@
@startuml
rectangle "Control-Plane" as control_plane {
rectangle "Node" as sn1 {
component "netclient" as sn1_netclient
component etcd as sn1_etcd
component "k3s-server" as sn1_k3s_server
sn1_k3s_server - sn1_etcd
}
rectangle "Node" as sn2 {
component "netclient" as sn2_netclient
component etcd as sn2_etcd
component "k3s-server" as sn2_k3s_server
sn2_k3s_server - sn2_etcd
}
sn1_netclient -- sn2_netclient
sn1_etcd -- sn2_etcd
}
rectangle "Workers" {
rectangle "Node" as an1 {
component "netclient" as an1_netclient
component "k3s-agent" as sn1_k3s_agent
}
}
@enduml

View File

@ -0,0 +1,35 @@
# Copyright 2018-2022 Docker Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
disabled_plugins = []
#root = "/var/lib/containerd"
#state = "/run/containerd"
#subreaper = true
#oom_score = 0
#[grpc]
# address = "/run/containerd/containerd.sock"
# uid = 0
# gid = 0
#[debug]
# address = "/run/containerd/debug.sock"
# uid = 0
# gid = 0
# level = "info"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true

View File

@ -0,0 +1,19 @@
- name: reload_sysctl
command: sysctl --system
- name: restart_containerd
ansible.builtin.service:
name: containerd
state: restarted
- name: reload_networking
service:
name: networking
state: restarted
async: 5
poll: 0
notify: wait_for_connection
- name: wait_for_connection
wait_for_connection:
delay: 5

3
kubernetes/meta/main.yml Normal file
View File

@ -0,0 +1,3 @@
---
dependencies:
- role: docker

View File

@ -0,0 +1,30 @@
- name: Create k3s-folder
ansible.builtin.file:
path: /etc/rancher/k3s/
state: directory
mode: '0755'
- name: Deploy k3s config
ansible.builtin.template:
src: k3s/{{ type }}/config.yaml.jinja2
dest: /etc/rancher/k3s/config.yaml
register: config
- name: Download install-script
get_url:
url: https://get.k3s.io
dest: /root/k3s_install.sh
mode: '744'
# todo: update when file changed?
- import_tasks: ./install/server/setup_network.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_helm.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_k3s.yml
when: "type == 'server'"
- import_tasks: ./install/agent/install_k3s.yml
when: "type == 'agent'"

View File

@ -0,0 +1,12 @@
- name: Install K3s agent
command: /root/k3s_install.sh {{ type }}
register: command
changed_when: "'No change detected' not in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s-agent
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"

View File

@ -0,0 +1,17 @@
- name: Add Balto key
apt_key:
url: https://baltocdn.com/helm/signing.asc
state: present
- name: Add Balto Repository
apt_repository:
repo: "deb https://baltocdn.com/helm/stable/debian/ all main"
state: present
filename: kubernetes
update_cache: yes
- name: Install helm
package:
name:
- helm
state: latest

View File

@ -0,0 +1,55 @@
- name: Install K3s-server for 1st-node
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname == groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Install K3s-server for other nodes
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname != groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname != groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections on other nodes
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname != groups['kubernetes'][0]"
#- name: Add Kubernetes environment-vars to /etc/profile.d/
# blockinfile:
# path: /etc/profile.d/k3s-bin.sh
# marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
# block: |
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
# create: true
- name: Deploy calico
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_calico.yml
when: "kubernetes.network.plugin == 'calico'"
- name: Deploy network-helpers
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_network_helper.yml

View File

@ -0,0 +1,19 @@
- name: Deploy calico operator
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
register: command
changed_when: "'created' in command.stdout"
run_once: true
failed_when:
- "command.rc == 1 and 'AlreadyExists' not in command.stderr"
- name: Deploy calico ressource template
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
dest: /root/calico-ressource.yml
run_once: true
- name: Deploy calico ressource
command: kubectl apply -f /root/calico-ressource.yml
register: command
changed_when: "'created' in command.stdout"
run_once: true

View File

@ -0,0 +1,7 @@
- name: Deploy service-file for routing-table to wireguard-translation
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
mode: u=rw,g=r,o=r
run_once: true
when: "kubernetes.network.helper.routingtabletowg"

View File

@ -0,0 +1,6 @@
- name: Set control-plane-dns-endpoint towards local-ip
blockinfile:
path: /etc/hosts
marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
block: |
{{ nodeip_ipv4 }} {{ kubernetes.control_plane.dns_name }}

View File

@ -0,0 +1,4 @@
- import_tasks: ./prerequisites.yml
- import_tasks: ./install.yml

View File

@ -0,0 +1,42 @@
#- name: Load br_netfilter kernel-module
# modprobe:
# name: br_netfilter
# state: present
- name: Set sysctl settings for iptables bridged traffic
copy:
dest: "/etc/sysctl.d/kubernetes.conf"
content: |
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.conf.all.forwarding=1
net.ipv6.conf.all.forwarding=1
notify: reload_sysctl
#- name: Disable swap
# command: swapoff -a
- name: Install required packages
package:
name:
#- containerd
#- iptables
# For Longhorn:
- nfs-common
- open-iscsi
state: latest
- import_tasks: ./prerequisites/containerd.yml
- name: Gather interface-name
set_fact:
interface: "{{ kubernetes.ipPool.nodeIp_interface | replace('-', '_') }}"
- name: Getting nodeIp-data from interface
set_fact:
nodeip_ipv4: "{{ ansible_facts[ interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ interface ].ipv6[0].address if ansible_facts[ interface ].ipv6 is defined }}"
- name: Run handlers to reload configurations
meta: flush_handlers

View File

@ -0,0 +1,24 @@
- name: Check if containerd-service exists & is started
service:
name: containerd
state: started
ignore_errors: true
register: containerd_status
- name: Install containerd when not exists
package:
name:
- containerd
when: containerd_status is failed
- name: Create containerd config-folder
file:
path: /etc/containerd
state: directory
- name: Deploy containerd-config
ansible.builtin.copy:
src: containerd_config.toml
dest: /etc/containerd/config.toml
mode: u=rw,g=r,o=r
notify: restart_containerd

View File

@ -0,0 +1,18 @@
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
token: '{{ kubernetes.token }}'
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}

View File

@ -0,0 +1,49 @@
## Base ##
{% if inventory_hostname == groups['kubernetes'][0] %}
# Initialize with internal etcd
cluster-init: true
{% else %}
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
{% endif %}
token: '{{ kubernetes.token }}'
tls-san:
- {{ kubernetes.control_plane.dns_name }}
# Networking
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
{% endif %}
egress-selector-mode: disabled
# Network-plugin
{% if kubernetes.network.plugin == "flannel" %}
flannel-backend: vxlan
{% else %}
disable-network-policy: true
flannel-backend: none
{% endif %}
# Ingress-plugin
{% if kubernetes.ingress_controller != "traefik-ingress" %}
disable: traefik
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}
{{ kubernetes.config_extra | to_yaml }}

View File

@ -0,0 +1,34 @@
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
- blockSize: 122
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% endif %}
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

View File

@ -0,0 +1,45 @@
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: routingtabletowg
namespace: calico-system
labels:
app: routingtabletowg
spec:
selector:
matchLabels:
app: routingtabletowg
template:
metadata:
labels:
app: routingtabletowg
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
hostNetwork: true
containers:
- name: routingtabletowg
image: "ruakij/routingtabletowg:0.2.0"
env:
- name: INTERFACE
value: {{ kubernetes.ipPool.nodeIp_interface }}
- name: FILTER_PROTOCOL
value: bird
- name: PERIODIC_SYNC
value: '300'
securityContext:
capabilities:
add:
- NET_ADMIN
resources:
requests:
cpu: 10m
memory: 10Mi
limits:
cpu: 20m
memory: 20Mi
---

View File

@ -0,0 +1,6 @@
netbird_client:
# Key and url to join a network
# leave empty to ignore
join_network:
setup_key:
management_url:

View File

@ -0,0 +1,26 @@
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- ca-certificates
- curl
- gnupg
- name: Add netbird-key
apt_key:
url: https://pkgs.wiretrustee.com/debian/public.key
state: present
- name: Add netbird-repository
apt_repository:
repo: "deb https://pkgs.wiretrustee.com/debian stable main"
state: present
filename: netbird
update_cache: yes
- name: Install wireguard & netbird
package:
name:
- wireguard
- netbird
state: latest

View File

@ -0,0 +1,16 @@
- name: Join netbird-network
when: "netbird_client.join_network.setup_key is defined"
command: "netbird up --management-url {{ netbird_client.join_network.management_url }} --setup-key {{ netbird_client.join_network.setup_key }}"
failed_when: command.rc != 0
changed_when: "'Connected' in command.stdout"
register: command
- name: Wait for netbird-interface to exist
wait_for:
path: "/sys/class/net/wt0"
state: present
when: command.changed
- name: Gather facts to get changes
ansible.builtin.gather_facts:
when: command.changed

View File

@ -0,0 +1,4 @@
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml

View File

@ -0,0 +1,29 @@
nftables:
# Rules to add
# Handled as templates
# Creates separate files for each entry.
# The identifier is necessary for ansible to be able to merge the keys (when 'hash_behaviour = merge')
# rule-ids have to be unique across files and raw
rules:
# Files with Rules to add
files:
#'<group_identifier>': '<relative-location>'
#'<group_identifier>':
# main: <relative-location>
# '<identifier>': '<relative-location>'
# Rules to add
raw:
#'<group_identifier>': '<content>'
#'<group_identifier>':
# main: <content>
# '<identifier>': '<content>'
# Decides if /etc/nftables.conf is applied or separate files which have changed
# Separate changes require the files to be self-tyding to not end up with duplicate rules
# e.g.
# table ip mytable
# flush table ip mytable
# delete table ip mytable
# table ip mytable {} ...
apply_global: false

View File

@ -0,0 +1,8 @@
- name: Load group rules
command: "nft -f /etc/nftables/ansible-managed/{{ item }}.nft"
loop: "{{ combined_rules | list }}"
when: not nftables.apply_global
- name: Load global rule file
command: "nft -f /etc/nftables.nft"
when: nftables.apply_global

View File

@ -0,0 +1,11 @@
- name: Deploying group files
include_tasks: ./per-group-template-file.yml
with_items:
- "{{ nftables.rules.files | list }}"
- name: Deploying group raw-files
include_tasks: ./per-group-template.yml
with_items:
- "{{ nftables.rules.raw | list }}"
- include_tasks: ./remove-files.yml

View File

@ -0,0 +1,51 @@
- set_fact:
group_identifier: "{{ item }}"
value: "{{ nftables.rules.files[item] }}"
when: "item is defined"
#'<group_identifier>': '<relative-location>'
- block:
- name: Create main rule file
template:
src: "{{ value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
when: value is string
#'<group_identifier>':
# main: <relative-location>
# '<identifier>': '<relative-location>'
- block:
- set_fact:
items: "{{ nftables.rules.files[item] }}"
- block:
- name: Create main rule file
template:
src: "{{ items['main'] }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
- name: Include rule files
lineinfile:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
when: items['main'] is defined
- name: Create group folder
file:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
state: directory
when: items|length > 0
- set_fact:
test: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
- name: Create included rule files
template:
src: "{{ fileItem.value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ fileItem.key }}.nft"
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
loop_control:
loop_var: fileItem
when: value is mapping

View File

@ -0,0 +1,48 @@
- set_fact:
group_identifier: "{{ item }}"
value: "{{ nftables.rules.raw[item] }}"
when: "item is defined"
#'<group_identifier>': '<content>'
- block:
- name: Create main rule file
copy:
content: "{{ value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
when: value is string
#'<group_identifier>':
# main: <content>
# '<identifier>': '<content>'
- block:
- set_fact:
items: "{{ nftables.rules.raw[item] }}"
- block:
- name: Create main rule file
copy:
content: "{{ items['main'] }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
- name: Include rule files
lineinfile:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
when: items['main'] is defined
- name: Create group folder
file:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
state: directory
when: items|length > 0
- name: Create included rule files
copy:
content: "{{ included_item.value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ included_item.key }}.nft"
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
loop_control:
loop_var: included_item
when: value is mapping

View File

@ -0,0 +1,4 @@
- name: Install Packages
package:
name:
- nftables

7
nftables/tasks/main.yml Normal file
View File

@ -0,0 +1,7 @@
- import_tasks: ./prerequisites.yml
- import_tasks: ./setup-packages.yml
- import_tasks: ./deploy-rules/main.yml
- import_tasks: ./apply-files.yml

View File

@ -0,0 +1,13 @@
# Defaults if missing
- name: Set defaults if missing
set_fact:
nftables:
rules:
files: "{{ nftables.rules.files | default({}) | combine({}) }}"
raw: "{{ nftables.rules.raw | default({}) | combine({}) }}"
combined_rules: "{{ nftables.rules.raw | combine(nftables.rules.files, recursive=true) }}"
#- name: Check items for consistency
# assert:
# that: "{{ nftables.rules.files.values() | length }} + {{ nftables.rules.raw.values() | length }} == {{ combined_rules.values() | length }}"
# fail_msg: "files and raw rules share the same identifier"

View File

@ -0,0 +1,21 @@
- name: Handle removed group files
block:
- find:
paths: /etc/nftables/ansible-managed/
file_type: 'any'
excludes: '{% for item in combined_rules %}{{ item }},{{ item }}.nft,{% endfor %}'
depth: 1
register: removeFiles
- file:
path: "{{ fileItem.path }}"
state: absent
loop: "{{ removeFiles.files }}"
loop_control:
label: "{{ fileItem.path }}"
loop_var: fileItem
- name: Handle removed included files per group
include_tasks: ./remove-per-group.yml
with_items:
- "{{ combined_rules | list }}"

View File

@ -0,0 +1,20 @@
- set_fact:
group_identifier: "{{ item }}"
group_items: "{{ combined_rules[item] }}"
- block:
- find:
paths: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
file_type: 'any'
excludes: '{% for item in group_items %}{{ item }}.nft,{% endfor %}'
register: removeFiles
- file:
path: "{{ fileItem.path }}"
state: absent
loop: "{{ removeFiles.files }}"
loop_control:
label: "{{ fileItem.path }}"
loop_var: fileItem
when: group_items is mapping

View File

@ -0,0 +1,15 @@
- name: Install nftables
package:
name:
- nftables
- name: Create /etc/nftables/ansible-managed
file:
path: /etc/nftables/ansible-managed
state: directory
- name: Include files in /etc/nftables/ansible-managed/ from /etc/nftables.conf
blockinfile:
path: /etc/nftables.conf
marker: "# {mark} ANSIBLE MANAGED BLOCK - nftables"
content: 'include "/etc/nftables/ansible-managed/*.nft"'

View File

@ -0,0 +1,12 @@
wireguard_ipv6_converter:
version: latest
# see https://github.com/Ruakij/wg-ipv6-converter#31-environment
setup:
interface: wg0
#ipv6_format: fc12::%02x%02x:%02x%02x/%d
#filter_prefix: 100.100
#recheck_interval: 60s
service:
#bindTo: netbird.service

View File

@ -0,0 +1,11 @@
- name: Get architecture
set_fact:
arch: "{{ 'amd64' if ansible_architecture == 'x86_64' else 'arm64' }}"
versionUri: "{% if wireguard_ipv6_converter.version == 'latest' %}latest/download{% else %}download/{{ wireguard_ipv6_converter.version }}{% endif %}"
- name: Download binary
get_url:
url: https://github.com/Ruakij/wg-ipv6-converter/releases/{{ versionUri }}/wg-ipv6-converter_{{ arch }}
dest: /usr/local/bin/wg-ipv6-converter
mode: "744"
register: deployDownload

View File

@ -0,0 +1,3 @@
- import_tasks: ./deploy.yml
- import_tasks: ./setup-service.yml

View File

@ -0,0 +1,27 @@
- name: Deploy service
ansible.builtin.template:
src: wg-ipv6-conv.service.jinja2
dest: /etc/systemd/system/wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}.service
register: serviceFile
- name: Enable service
ansible.builtin.service:
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
daemon-reload: true
enabled: true
- name: Start service if interface exists already
ansible.builtin.service:
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
state: "{{ 'restarted' if deployDownload.changed or serviceFile.changed else 'started' }}"
register: service
when: "wireguard_ipv6_converter.setup.interface in ansible_interfaces"
- name: Pause for 5s to wait for program to have run
ansible.builtin.pause:
seconds: 5
when: "service.changed"
- name: Gather facts to get changes
ansible.builtin.gather_facts:
when: "service.changed"

View File

@ -0,0 +1,29 @@
[Unit]
Description=WireGuard IPv6 converter for {{ wireguard_ipv6_converter.setup.interface }}
{% if wireguard_ipv6_converter.service.bindTo is defined %}
BindsTo={{ wireguard_ipv6_converter.service.bindTo }}
After={{ wireguard_ipv6_converter.service.bindTo }}
{% endif %}
[Service]
Type=simple
{% if wireguard_ipv6_converter.service.bindTo is defined %}
ExecStartPre=/bin/sleep 10
{% endif %}
ExecStart=/usr/local/bin/wg-ipv6-converter
Restart=always
RestartSec=30
Environment="INTERFACE={{ wireguard_ipv6_converter.setup.interface }}"
{% if wireguard_ipv6_converter.setup.ipv6_format is defined %}
Environment="IPV6_FORMAT={{ wireguard_ipv6_converter.setup.ipv6_format }}"
{% endif %}
{% if wireguard_ipv6_converter.setup.filter_prefix is defined %}
Environment="FILTER_PREFIX={{ wireguard_ipv6_converter.setup.filter_prefix }}"
{% endif %}
{% if wireguard_ipv6_converter.setup.recheck_interval is defined %}
Environment="RECHECK_INTERVAL={{ wireguard_ipv6_converter.setup.recheck_interval }}"
{% endif %}
[Install]
WantedBy=multi-user.target