Compare commits

..

1 Commits

Author SHA1 Message Date
Ruakij 8537ea2b9e Add role ansible 2 years ago

@ -1,4 +0,0 @@
netclient:
# Token to join default-network
# leave empty to ignore
join_network_token:

@ -1,3 +0,0 @@
---
dependencies: []
#- role: docker

@ -1,4 +0,0 @@
- name: Deploy CA Certificate
ansible.builtin.copy:
src: secret_files/netmaker_server/ca/ca.crt
dest: /etc/ssl/certs/netmaker-ca.pem

@ -1,25 +0,0 @@
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- gpg
- gpg-agent
- name: Add netmaker-key
apt_key:
url: https://apt.netmaker.org/gpg.key
state: present
- name: Add netmaker-repository
apt_repository:
repo: "deb https:apt.netmaker.org stable main"
state: present
filename: netmaker
update_cache: yes
- name: Install wireguard & netclient
package:
name:
- wireguard
- netclient
state: latest

@ -1,7 +0,0 @@
- name: Join netmaker-network
when: "netclient.join_network_token is defined"
command: "netclient join -t {{ netclient.join_network_token }}"
failed_when: command.rc != 0
changed_when: "'starting wireguard' in command.stdout"
register: command
throttle: 1

@ -1,8 +0,0 @@
- import_tasks: ./certs.yml
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml
- name: Gather facts to get changes
ansible.builtin.gather_facts:

@ -1,5 +0,0 @@
netmaker_creds:
rqlite_password:
mq_admin_password:
master_key:

@ -1,28 +0,0 @@
# Overwrite for specific nodes to force dynamic-ip (disable setting public-ip and forces external lookup for public-ip)
# When false, will check itself for dynamic-ip (based on private-ip)
netmaker_dynamicIp: false
netmaker_nginx:
# Listen-port
tls_port: 51820
# Advertise-Port for services
# (must also be reachable by internal services!)
advertise_port: 51820
# This is the base-domain used for generating hostnames for services
netmaker_base_domain:
# host + base_domain
netmaker_api:
host: netmaker-api
netmaker_ui:
host: netmaker-ui
# MQTT-broker
netmaker_broker:
tls_host: netmaker-broker
# host + node_hostname
netmaker_rqlite:
http_host: netmaker-rqlite-http
cluster_host: netmaker-rqlite-cluster

@ -1,37 +0,0 @@
@startuml
interface ng_TLS
component netmaker_server {
component nginx {
component ng_stream
component ng_http
ng_stream -up- ng_TLS
ng_stream -right-> ng_http : tls-termination
}
component nm_ui
nm_ui -up- nm_ui_http
ng_http -down-( nm_ui_http
component Mosquitto
Mosquitto -up- mq_plain
Mosquitto -up- mq_tls
ng_stream -down-( mq_tls
component rqlite
rqlite -up- rq_http
rqlite -up- rq_cluster
ng_stream -down-( rq_cluster
ng_http -down-( rq_http
component nm_api
nm_api -down- nm_api_http
ng_http --( nm_api_http
nm_api .up.( ng_TLS : db-connection to rqlite-master
nm_api --( mq_plain
}
@enduml

@ -1,12 +0,0 @@
per_listener_settings false
listener 8883
protocol websockets
allow_anonymous false
listener 1883
protocol websockets
allow_anonymous false
plugin /usr/lib/mosquitto_dynamic_security.so
plugin_opt_config_file /mosquitto/data/dynamic-security.json

@ -1,23 +0,0 @@
#!/bin/ash
wait_for_netmaker() {
echo "SERVER: ${NETMAKER_SERVER_HOST}"
until curl --output /dev/null --silent --fail --head \
--location "${NETMAKER_SERVER_HOST}/api/server/health"; do
echo "Waiting for netmaker server to startup"
sleep 1
done
}
main(){
# wait for netmaker to startup
apk add curl
wait_for_netmaker
echo "Starting MQ..."
# Run the main container command.
/docker-entrypoint.sh
/usr/sbin/mosquitto -c /mosquitto/config/mosquitto.conf
}
main "${@}"

@ -1,33 +0,0 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
include /etc/nginx/stream.d/*.conf;

@ -1,3 +0,0 @@
---
# dependencies:
# - role: docker

@ -1,40 +0,0 @@
- name: Generate PrivateKey
community.crypto.openssl_privatekey:
path: /opt/netmaker_server/certs/node.key
owner: 1883 # Set owner to mosquitto-user (all other containers seem to run as root)
- name: Generate Certificate-Signing-Request from privateKey
community.crypto.openssl_csr:
path: /opt/netmaker_server/certs/node.csr
privatekey_path: /opt/netmaker_server/certs/node.key
common_name: "{{ ansible_facts.nodename }}"
subject_alt_name:
"DNS:{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }},\
DNS:{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }},\
DNS:{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }},\
DNS:{{ netmaker_api.host }}.{{ netmaker_base_domain }},\
DNS:{{ netmaker_ui.host }}.{{ netmaker_base_domain }}"
- name: Fetch CSR
ansible.builtin.fetch:
src: /opt/netmaker_server/certs/node.csr
dest: tmp_files/
- name: Sign CSR locally with CA
local_action: community.crypto.x509_certificate
args:
path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
csr_path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.csr
ownca_path: secret_files/netmaker_server/ca/ca.crt
ownca_privatekey_path: secret_files/netmaker_server/ca/ca.key
provider: ownca
- name: Copy Signed Certificate
ansible.builtin.copy:
src: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
dest: /opt/netmaker_server/certs/node.crt
- name: Copy CA Certificate
ansible.builtin.copy:
src: secret_files/netmaker_server/ca/ca.crt
dest: /opt/netmaker_server/certs/ca.crt

@ -1,20 +0,0 @@
- import_tasks: ./prerequisites.yml
- name: Copy folder-structure
ansible.builtin.copy:
src: opt/netmaker_server
dest: /opt/
mode: preserve
- name: Deploy compose file
ansible.builtin.template:
src: docker-compose.yml.template
dest: /opt/netmaker_server/docker-compose.yml
- import_tasks: ./certs.yml
- import_tasks: ./nginx.yml
- import_tasks: ./rqlite.yml
- import_tasks: ./netmaker.yml

@ -1,57 +0,0 @@
- name: Start rest of netmaker-services
command: "docker-compose --project-directory /opt/netmaker_server/ up -d"
register: command
failed_when: command.rc != 0
- name: Wait for netmaker-api to become available
uri:
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}'
return_content: yes
validate_certs: no
status_code:
- 404
until: uri_output.status == 404
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
# todo: check if exists?
- name: Create default mesh-network 'server'
uri:
validate_certs: no
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks'
method: POST
body:
netid: servnet
addressrange: 10.92.0.0/24
addressrange6: fd92::/64
body_format: json
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker_server'][0]"
register: default_mesh
until: "default_mesh is not failed"
retries: 2
delay: 10
# todo: check if exists?
- name: Create token for default-network
uri:
validate_certs: no
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks/servnet/keys' # todo: do implementation
method: POST
body:
name: ""
uses: 0
body_format: json
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker_server'][0]"
register: default_mesh_key
until: "default_mesh_key is not failed"
retries: 2
delay: 10

@ -1,18 +0,0 @@
- name: Deploy nginx configs
template:
src: "{{item.src}}"
dest: "{{item.dst}}"
loop:
- { src: 'nginx/proxy.conf.template', dst: '/opt/netmaker_server/nginx/conf/conf.d/proxy.conf' }
- { src: 'nginx/passthrough.conf.template', dst: '/opt/netmaker_server/nginx/conf/stream.d/passthrough.conf' }
- name: Start nginx service
command: "docker-compose --project-directory /opt/netmaker_server/ up -d nginx"
register: command
failed_when: command.rc != 0
- name: Waiting for nginx to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 51820
state: started

@ -1,9 +0,0 @@
- name: Install wireguard
package:
name:
- wireguard
state: latest
- name: Check if default-ipv4-address is private
set_fact:
private_ipv4_address: "{{ ansible_facts.default_ipv4.address | regex_search('^((10)|(192\\.168)|(172\\.((1[6-9])|(2[0-9])|(3[0-1])))|(100))\\.') }}"

@ -1,42 +0,0 @@
- name: Deploy rqlite config
ansible.builtin.template:
src: rqlite-config.json.template
dest: /opt/netmaker_server/rqlite/config.json
- name: Start rqlite service for 1st-node
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
register: command
failed_when: command.rc != 0
when: "inventory_hostname == groups['netmaker_server'][0]"
- name: Waiting for rqlite to accept connections on 1st-node
uri:
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
return_content: yes
validate_certs: no
status_code:
- 401
until: uri_output.status == 401
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
when: "inventory_hostname == groups['netmaker_server'][0]"
- name: Start rqlite service for other nodes
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
register: command
failed_when: command.rc != 0
when: "inventory_hostname != groups['netmaker_server'][0]"
- name: Waiting for rqlite to accept connections on other nodes
uri:
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
return_content: yes
validate_certs: no
status_code:
- 401
until: uri_output.status == 401
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
when: "inventory_hostname != groups['netmaker_server'][0]"

@ -1,127 +0,0 @@
version: "3.4"
services:
nginx:
image: nginx
restart: unless-stopped
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro # Override nginx-config to add stream-import
- ./nginx/conf/conf.d/:/etc/nginx/conf.d:ro # conf.d
- ./nginx/conf/stream.d/:/etc/nginx/stream.d:ro # conf.d
- ./certs:/certs:ro # SSL-certificates
ports:
- {{ netmaker_nginx.tls_port }}:443
rqlite: # Distributed sqlite-db
image: rqlite/rqlite
restart: unless-stopped
hostname: "{{ ansible_facts.nodename }}"
volumes:
- "./rqlite/data:/rqlite/file"
- "./rqlite/config.json:/config.json:ro"
- "./certs:/certs:ro"
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
command: "
-http-adv-addr {{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
-raft-addr [::]:4002
-raft-adv-addr {{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
-node-encrypt
-node-cert /certs/node.crt
-node-key /certs/node.key
-node-no-verify
-auth /config.json
{% if inventory_hostname != groups['netmaker_server'][0] %}
-join-as netmaker
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker_server'][0] }}:{{ netmaker_nginx.advertise_port }}
{% endif %}
"
# FIXME: /\ \/ Change http -> https
netmaker: # The Primary Server for running Netmaker
image: gravitl/netmaker:v0.17.1
depends_on:
- rqlite
cap_add:
- NET_ADMIN
- NET_RAW
- SYS_MODULE
sysctls:
- net.ipv4.ip_forward=1
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.all.forwarding=1
restart: unless-stopped
volumes: # Volume mounts necessary for sql, coredns, and mqtt
- ./dnsconfig/:/root/config/dnsconfig
- ./mosquitto/data/:/etc/netmaker/
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
hostname: "{{ ansible_facts.nodename }}"
environment: # Necessary capabilities to set iptables when running in container
NODE_ID: "{{ ansible_facts.nodename }}"
MASTER_KEY: "{{ netmaker_creds.master_key }}" # The admin master key for accessing the API. Change this in any production installation.
{% if not private_ipv4_address and not netmaker_dynamicIp %}
SERVER_HOST: "{{ ansible_facts.default_ipv4.address }}" # Set to public IP of machine.
{% endif %}
SERVER_NAME: "{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }}" # The domain/host IP indicating the mq broker address
SERVER_HTTP_HOST: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}" # Overrides SERVER_HOST if set. Useful for making HTTP available via different interfaces/networks.
SERVER_API_CONN_STRING: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}"
DISABLE_REMOTE_IP_CHECK: "off" # If turned "on", Server will not set Host based on remote IP check. This is already overridden if SERVER_HOST is set. Turned "off" by default.
DNS_MODE: "off" # Enables DNS Mode, meaning all nodes will set hosts file for private dns settings.
API_PORT: "8081" # The HTTP API port for Netmaker. Used for API calls / communication from front end. If changed, need to change port of BACKEND_URL for netmaker-ui.
REST_BACKEND: "on" # Enables the REST backend (API running on API_PORT at SERVER_HTTP_HOST). Change to "off" to turn off.
RCE: "off" # Enables setting PostUp and PostDown (arbitrary commands) on nodes from the server. Off by default.
CORS_ALLOWED_ORIGIN: "*" # The "allowed origin" for API requests. Change to restrict where API requests can come from.
DISPLAY_KEYS: "on" # Show keys permanently in UI (until deleted) as opposed to 1-time display.
DATABASE: "rqlite"
SQL_CONN: "https://netmaker:{{ netmaker_creds.rqlite_password }}@{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}/"
MQ_HOST: "mosquitto" # the address of the mq server. If running from docker compose it will be "mq". Otherwise, need to input address. If using "host networking", it will find and detect the IP of the mq container.
MQ_SERVER_PORT: "1883" # the reachable port of MQ by the server - change if internal MQ port changes (or use external port if MQ is not on the same machine)
MQ_PORT: "{{ netmaker_nginx.advertise_port }}" # the reachable port of MQ - change if external MQ port changes (port on proxy, not necessarily the one exposed in docker-compose)
MQ_ADMIN_PASSWORD: "{{ netmaker_creds.mq_admin_password }}"
HOST_NETWORK: "off" # whether or not host networking is turned on. Only turn on if configured for host networking (see docker-compose.hostnetwork.yml). Will set host-level settings like iptables.
PORT_FORWARD_SERVICES: "" # decide which services to port forward ("dns","ssh", or "mq")
# this section is for OAuth
AUTH_PROVIDER: "" # "<azure-ad|github|google|oidc>"
CLIENT_ID: "" # "<client id of your oauth provider>"
CLIENT_SECRET: "" # "<client secret of your oauth provider>"
FRONTEND_URL: "" # "https://dashboard.<netmaker base domain>"
AZURE_TENANT: "" # "<only for azure, you may optionally specify the tenant for the OAuth>"
OIDC_ISSUER: "" # https://oidc.yourprovider.com - URL of oidc provider
VERBOSITY: "1" # logging verbosity level - 1, 2, or 3
TELEMETRY: "off" # Whether or not to send telemetry data to help improve Netmaker. Switch to "off" to opt out of sending telemetry.
ports:
- "51821-51830:51821-51830/udp" # wireguard ports
netmaker-ui: # The Netmaker UI Component
image: gravitl/netmaker-ui:v0.17.1
depends_on:
- netmaker
links:
- "netmaker:api"
restart: unless-stopped
environment:
BACKEND_URL: "https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}" # URL where UI will send API requests. Change based on SERVER_HOST, SERVER_HTTP_HOST, and API_PORT
mosquitto: # the MQTT broker for netmaker
image: eclipse-mosquitto:2.0.11-openssl
restart: unless-stopped
volumes:
- ./mosquitto/config:/mosquitto/config
- ./mosquitto/data:/mosquitto/data
- ./mosquitto/logs:/mosquitto/log
depends_on:
- netmaker
command: ["/mosquitto/config/wait.sh"]
environment:
NETMAKER_SERVER_HOST: "http://netmaker:8081"

@ -1,25 +0,0 @@
stream{
# Map target-hosts based on hostname
map $ssl_preread_server_name $target_host {
hostnames; # Enable matching including prefix/suffix-mask
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} 127.0.0.1:8443;
{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }} rqlite:4002;
default 127.0.0.1:1;
}
server {
resolver 127.0.0.11; # Explicitly set docker-resolver
listen 443;
ssl_preread on;
proxy_pass $target_host;
}
}

@ -1,29 +0,0 @@
map $host $proxy_name {
hostnames;
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} netmaker-ui:80;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} netmaker:8081;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} rqlite:4001;
default 444;
}
server {
resolver 127.0.0.11; # Explicitly set docker-resolver
listen 8443 ssl;
ssl_certificate /certs/node.crt;
ssl_certificate_key /certs/node.key;
if ($proxy_name = 444){
return 444;
}
location / {
proxy_pass http://$proxy_name;
}
}

@ -1,5 +0,0 @@
[{
"username": "netmaker",
"password": "{{ netmaker_creds.rqlite_password }}",
"perms": ["all"]
}]

@ -0,0 +1,5 @@
- name: Install ansible packages
package:
name:
- python3
state: latest

@ -1 +0,0 @@
ipv6_stable_secret: 1111:2222:3333:4444:5555:6666:7777:8888

@ -1,2 +0,0 @@
- name: reload_sysctl
command: sysctl --system

@ -1,20 +0,0 @@
- name: General aliases
blockinfile:
path: "{{ ansible_facts.env.HOME }}/.bashrc"
marker: "# {mark} ANSIBLE MANAGED BLOCK | General aliases"
block: |
alias clr="clear"
alias hgrep="history | grep"
alias syslog="tail -f --lines=100 /var/log/syslog"
alias cp="rsync -hlAXEptgoDS --numeric-ids --info=progress2"
- name: ls aliases and colors
blockinfile:
path: "{{ ansible_facts.env.HOME }}/.bashrc"
marker: "# {mark} ANSIBLE MANAGED BLOCK | ls aliases and colors"
block: |
export LS_OPTIONS='--color=auto'
eval "`dircolors`"
alias ls='ls $LS_OPTIONS'
alias ll='ls $LS_OPTIONS -l'
alias l='ls $LS_OPTIONS -la'

@ -1,9 +0,0 @@
- import_tasks: ./packages.yml
- import_tasks: ./ssh.yml
- import_tasks: ./packages.yml
- import_tasks: ./aliases.yml
- import_tasks: ./networking.yml

@ -1,22 +0,0 @@
- name: Set sysctl settings for ip-forwarding
copy:
dest: "/etc/sysctl.d/ip-forwarding.conf"
content: |
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
notify: reload_sysctl
- name: Set sysctl settings for ipv6-address-generation
copy:
dest: "/etc/sysctl.d/ipv6-slaac-address-generation.conf"
content: |
net.ipv6.conf.default.addr_gen_mode = 2
net.ipv6.conf.default.stable_secret = {{ ipv6_stable_secret }}
notify: reload_sysctl
- name: Set sysctl settings to override ipv6-slaac with enabled forwarding
copy:
dest: "/etc/sysctl.d/ipv6-slaac-override.conf"
content: |
net.ipv6.conf.all.accept_ra = 2
notify: reload_sysctl

@ -1,20 +0,0 @@
- name: Update Packages
apt:
update_cache: yes
upgrade: yes
when: ansible_facts.distribution == "Debian"
- name: Install Packages
package:
name:
- gpg
- htop
- iotop
- slurm
- sudo
- screen
- curl
- rsync
- zstd
state: latest
when: ansible_facts.distribution == "Debian"

@ -1,12 +0,0 @@
- name: Disable SSH password auth
lineinfile:
dest: /etc/ssh/sshd_config
regexp: '^PasswordAuthentication\s*yes'
line: "PasswordAuthentication no"
register: sshd_config
- name: Restart SSH daemon
service:
name: sshd
state: restarted
when: sshd_config.changed

@ -1,2 +0,0 @@
#!/bin/sh
docker compose $@

@ -1,4 +0,0 @@
- name: restart_docker
service:
name: "docker"
state: restarted

@ -1,40 +0,0 @@
#- name: Check if docker is already installed
# stat:
# path: /usr/bin/docker
# register: docker_file
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- gpg
- gpg-agent
- name: Add docker-key
apt_key:
url: https://download.docker.com/linux/debian/gpg
state: present
- name: Add docker-repository
apt_repository:
repo: "deb https://download.docker.com/linux/{{ ansible_facts.distribution | lower }} {{ ansible_facts.distribution_release }} stable"
state: present
filename: docker
update_cache: yes
- name: Install docker
package:
name:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-compose-plugin
- pass
state: latest
- name: Deploy docker-compose command to new docker compose plugin
ansible.builtin.copy:
src: docker-compose
dest: "/usr/local/bin/docker-compose"
mode: preserve

@ -1,41 +0,0 @@
---
kubernetes:
ipPool:
ipv4:
# Minimum: /24
cluster_cidr: 10.42.0.0/16
service_cidr: 10.43.0.0/16
ipv6:
# Minimum: /120
cluster_cidr: fd42::/56
service_cidr: fd43::/112
# Interface to grab node-IPv4/v6 from
nodeIp_interface: <interface to grab nodeIp from>
control_plane:
dns_name: <control-plane dns-reachable-name>
token: <shared token for nodes to join>
network:
# One of [flannel, calico]
plugin: calico
# Helper for networking
helper:
# https://github.com/Ruakij/RoutingTableToWg
# Translates received-routes from e.g. BGP to wireguard-allowedips
# Helpful, when nodeIp_interface is a wireguard-interface
routingtabletowg: false
# One of [traefik-ingress]
ingress_controller: traefik-ingress
config_extra:
# etcd-tuning
# heartbeat: 0.5-1.5x of rtt
# election: 10x- of heartbeat
etcd-arg:
heartbeat-interval: 500
election-timeout: 5000

@ -1,33 +0,0 @@
@startuml
rectangle "Control-Plane" as control_plane {
rectangle "Node" as sn1 {
component "netclient" as sn1_netclient
component etcd as sn1_etcd
component "k3s-server" as sn1_k3s_server
sn1_k3s_server - sn1_etcd
}
rectangle "Node" as sn2 {
component "netclient" as sn2_netclient
component etcd as sn2_etcd
component "k3s-server" as sn2_k3s_server
sn2_k3s_server - sn2_etcd
}
sn1_netclient -- sn2_netclient
sn1_etcd -- sn2_etcd
}
rectangle "Workers" {
rectangle "Node" as an1 {
component "netclient" as an1_netclient
component "k3s-agent" as sn1_k3s_agent
}
}
@enduml

@ -1,35 +0,0 @@
# Copyright 2018-2022 Docker Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
disabled_plugins = []
#root = "/var/lib/containerd"
#state = "/run/containerd"
#subreaper = true
#oom_score = 0
#[grpc]
# address = "/run/containerd/containerd.sock"
# uid = 0
# gid = 0
#[debug]
# address = "/run/containerd/debug.sock"
# uid = 0
# gid = 0
# level = "info"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true

@ -1,19 +0,0 @@
- name: reload_sysctl
command: sysctl --system
- name: restart_containerd
ansible.builtin.service:
name: containerd
state: restarted
- name: reload_networking
service:
name: networking
state: restarted
async: 5
poll: 0
notify: wait_for_connection
- name: wait_for_connection
wait_for_connection:
delay: 5

@ -1,3 +0,0 @@
---
dependencies:
- role: docker

@ -1,30 +0,0 @@
- name: Create k3s-folder
ansible.builtin.file:
path: /etc/rancher/k3s/
state: directory
mode: '0755'
- name: Deploy k3s config
ansible.builtin.template:
src: k3s/{{ type }}/config.yaml.jinja2
dest: /etc/rancher/k3s/config.yaml
register: config
- name: Download install-script
get_url:
url: https://get.k3s.io
dest: /root/k3s_install.sh
mode: '744'
# todo: update when file changed?
- import_tasks: ./install/server/setup_network.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_helm.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_k3s.yml
when: "type == 'server'"
- import_tasks: ./install/agent/install_k3s.yml
when: "type == 'agent'"

@ -1,12 +0,0 @@
- name: Install K3s agent
command: /root/k3s_install.sh {{ type }}
register: command
changed_when: "'No change detected' not in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s-agent
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"

@ -1,17 +0,0 @@
- name: Add Balto key
apt_key:
url: https://baltocdn.com/helm/signing.asc
state: present
- name: Add Balto Repository
apt_repository:
repo: "deb https://baltocdn.com/helm/stable/debian/ all main"
state: present
filename: kubernetes
update_cache: yes
- name: Install helm
package:
name:
- helm
state: latest

@ -1,55 +0,0 @@
- name: Install K3s-server for 1st-node
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname == groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Install K3s-server for other nodes
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname != groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname != groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections on other nodes
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname != groups['kubernetes'][0]"
#- name: Add Kubernetes environment-vars to /etc/profile.d/
# blockinfile:
# path: /etc/profile.d/k3s-bin.sh
# marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
# block: |
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
# create: true
- name: Deploy calico
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_calico.yml
when: "kubernetes.network.plugin == 'calico'"
- name: Deploy network-helpers
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_network_helper.yml

@ -1,19 +0,0 @@
- name: Deploy calico operator
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
register: command
changed_when: "'created' in command.stdout"
run_once: true
failed_when:
- "command.rc == 1 and 'AlreadyExists' not in command.stderr"
- name: Deploy calico ressource template
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
dest: /root/calico-ressource.yml
run_once: true
- name: Deploy calico ressource
command: kubectl apply -f /root/calico-ressource.yml
register: command
changed_when: "'created' in command.stdout"
run_once: true

@ -1,7 +0,0 @@
- name: Deploy service-file for routing-table to wireguard-translation
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
mode: u=rw,g=r,o=r
run_once: true
when: "kubernetes.network.helper.routingtabletowg"

@ -1,6 +0,0 @@
- name: Set control-plane-dns-endpoint towards local-ip
blockinfile:
path: /etc/hosts
marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
block: |
{{ nodeip_ipv4 }} {{ kubernetes.control_plane.dns_name }}

@ -1,4 +0,0 @@
- import_tasks: ./prerequisites.yml
- import_tasks: ./install.yml

@ -1,42 +0,0 @@
#- name: Load br_netfilter kernel-module
# modprobe:
# name: br_netfilter
# state: present
- name: Set sysctl settings for iptables bridged traffic
copy:
dest: "/etc/sysctl.d/kubernetes.conf"
content: |
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.conf.all.forwarding=1
net.ipv6.conf.all.forwarding=1
notify: reload_sysctl
#- name: Disable swap
# command: swapoff -a
- name: Install required packages
package:
name:
#- containerd
#- iptables
# For Longhorn:
- nfs-common
- open-iscsi
state: latest
- import_tasks: ./prerequisites/containerd.yml
- name: Gather interface-name
set_fact:
interface: "{{ kubernetes.ipPool.nodeIp_interface | replace('-', '_') }}"
- name: Getting nodeIp-data from interface
set_fact:
nodeip_ipv4: "{{ ansible_facts[ interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ interface ].ipv6[0].address if ansible_facts[ interface ].ipv6 is defined }}"
- name: Run handlers to reload configurations
meta: flush_handlers

@ -1,24 +0,0 @@
- name: Check if containerd-service exists & is started
service:
name: containerd
state: started
ignore_errors: true
register: containerd_status
- name: Install containerd when not exists
package:
name:
- containerd
when: containerd_status is failed
- name: Create containerd config-folder
file:
path: /etc/containerd
state: directory
- name: Deploy containerd-config
ansible.builtin.copy:
src: containerd_config.toml
dest: /etc/containerd/config.toml
mode: u=rw,g=r,o=r
notify: restart_containerd

@ -1,18 +0,0 @@
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
token: '{{ kubernetes.token }}'
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}

@ -1,49 +0,0 @@
## Base ##
{% if inventory_hostname == groups['kubernetes'][0] %}
# Initialize with internal etcd
cluster-init: true
{% else %}
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
{% endif %}
token: '{{ kubernetes.token }}'
tls-san:
- {{ kubernetes.control_plane.dns_name }}
# Networking
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
{% endif %}
egress-selector-mode: disabled
# Network-plugin
{% if kubernetes.network.plugin == "flannel" %}
flannel-backend: vxlan
{% else %}
disable-network-policy: true
flannel-backend: none
{% endif %}
# Ingress-plugin
{% if kubernetes.ingress_controller != "traefik-ingress" %}
disable: traefik
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}
{{ kubernetes.config_extra | to_yaml }}

@ -1,34 +0,0 @@
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
- blockSize: 122
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% endif %}
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

@ -1,45 +0,0 @@
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: routingtabletowg
namespace: calico-system
labels:
app: routingtabletowg
spec:
selector:
matchLabels:
app: routingtabletowg
template:
metadata:
labels:
app: routingtabletowg
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
hostNetwork: true
containers:
- name: routingtabletowg
image: "ruakij/routingtabletowg:0.2.0"
env:
- name: INTERFACE
value: {{ kubernetes.ipPool.nodeIp_interface }}
- name: FILTER_PROTOCOL
value: bird
- name: PERIODIC_SYNC
value: '300'
securityContext:
capabilities:
add:
- NET_ADMIN
resources:
requests:
cpu: 10m
memory: 10Mi
limits:
cpu: 20m
memory: 20Mi
---

@ -1,6 +0,0 @@
netbird_client:
# Key and url to join a network
# leave empty to ignore
join_network:
setup_key:
management_url:

@ -1,26 +0,0 @@
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- ca-certificates
- curl
- gnupg
- name: Add netbird-key
apt_key:
url: https://pkgs.wiretrustee.com/debian/public.key
state: present
- name: Add netbird-repository
apt_repository:
repo: "deb https://pkgs.wiretrustee.com/debian stable main"
state: present
filename: netbird
update_cache: yes
- name: Install wireguard & netbird
package:
name:
- wireguard
- netbird
state: latest

@ -1,16 +0,0 @@
- name: Join netbird-network
when: "netbird_client.join_network.setup_key is defined"
command: "netbird up --management-url {{ netbird_client.join_network.management_url }} --setup-key {{ netbird_client.join_network.setup_key }}"
failed_when: command.rc != 0
changed_when: "'Connected' in command.stdout"
register: command
- name: Wait for netbird-interface to exist
wait_for:
path: "/sys/class/net/wt0"
state: present
when: command.changed
- name: Gather facts to get changes
ansible.builtin.gather_facts:
when: command.changed

@ -1,4 +0,0 @@
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml

@ -1,29 +0,0 @@
nftables:
# Rules to add
# Handled as templates
# Creates separate files for each entry.
# The identifier is necessary for ansible to be able to merge the keys (when 'hash_behaviour = merge')
# rule-ids have to be unique across files and raw
rules:
# Files with Rules to add
files:
#'<group_identifier>': '<relative-location>'
#'<group_identifier>':
# main: <relative-location>
# '<identifier>': '<relative-location>'
# Rules to add
raw:
#'<group_identifier>': '<content>'
#'<group_identifier>':
# main: <content>
# '<identifier>': '<content>'
# Decides if /etc/nftables.conf is applied or separate files which have changed
# Separate changes require the files to be self-tyding to not end up with duplicate rules
# e.g.
# table ip mytable
# flush table ip mytable
# delete table ip mytable
# table ip mytable {} ...
apply_global: false

@ -1,8 +0,0 @@
- name: Load group rules
command: "nft -f /etc/nftables/ansible-managed/{{ item }}.nft"
loop: "{{ combined_rules | list }}"
when: not nftables.apply_global
- name: Load global rule file
command: "nft -f /etc/nftables.nft"
when: nftables.apply_global

@ -1,11 +0,0 @@
- name: Deploying group files
include_tasks: ./per-group-template-file.yml
with_items:
- "{{ nftables.rules.files | list }}"
- name: Deploying group raw-files
include_tasks: ./per-group-template.yml
with_items:
- "{{ nftables.rules.raw | list }}"
- include_tasks: ./remove-files.yml

@ -1,51 +0,0 @@
- set_fact:
group_identifier: "{{ item }}"
value: "{{ nftables.rules.files[item] }}"
when: "item is defined"
#'<group_identifier>': '<relative-location>'
- block:
- name: Create main rule file
template:
src: "{{ value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
when: value is string
#'<group_identifier>':
# main: <relative-location>
# '<identifier>': '<relative-location>'
- block:
- set_fact:
items: "{{ nftables.rules.files[item] }}"
- block:
- name: Create main rule file
template:
src: "{{ items['main'] }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
- name: Include rule files
lineinfile:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
when: items['main'] is defined
- name: Create group folder
file:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
state: directory
when: items|length > 0
- set_fact:
test: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
- name: Create included rule files
template:
src: "{{ fileItem.value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ fileItem.key }}.nft"
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
loop_control:
loop_var: fileItem
when: value is mapping

@ -1,48 +0,0 @@
- set_fact:
group_identifier: "{{ item }}"
value: "{{ nftables.rules.raw[item] }}"
when: "item is defined"
#'<group_identifier>': '<content>'
- block:
- name: Create main rule file
copy:
content: "{{ value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
when: value is string
#'<group_identifier>':
# main: <content>
# '<identifier>': '<content>'
- block:
- set_fact:
items: "{{ nftables.rules.raw[item] }}"
- block:
- name: Create main rule file
copy:
content: "{{ items['main'] }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
- name: Include rule files
lineinfile:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
when: items['main'] is defined
- name: Create group folder
file:
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
state: directory
when: items|length > 0
- name: Create included rule files
copy:
content: "{{ included_item.value }}"
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ included_item.key }}.nft"
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
loop_control:
loop_var: included_item
when: value is mapping

@ -1,4 +0,0 @@
- name: Install Packages
package:
name:
- nftables

@ -1,7 +0,0 @@
- import_tasks: ./prerequisites.yml
- import_tasks: ./setup-packages.yml
- import_tasks: ./deploy-rules/main.yml
- import_tasks: ./apply-files.yml

@ -1,13 +0,0 @@
# Defaults if missing
- name: Set defaults if missing
set_fact:
nftables:
rules:
files: "{{ nftables.rules.files | default({}) | combine({}) }}"
raw: "{{ nftables.rules.raw | default({}) | combine({}) }}"
combined_rules: "{{ nftables.rules.raw | combine(nftables.rules.files, recursive=true) }}"
#- name: Check items for consistency
# assert:
# that: "{{ nftables.rules.files.values() | length }} + {{ nftables.rules.raw.values() | length }} == {{ combined_rules.values() | length }}"
# fail_msg: "files and raw rules share the same identifier"

@ -1,21 +0,0 @@
- name: Handle removed group files
block:
- find:
paths: /etc/nftables/ansible-managed/
file_type: 'any'
excludes: '{% for item in combined_rules %}{{ item }},{{ item }}.nft,{% endfor %}'
depth: 1
register: removeFiles
- file:
path: "{{ fileItem.path }}"
state: absent
loop: "{{ removeFiles.files }}"
loop_control:
label: "{{ fileItem.path }}"
loop_var: fileItem
- name: Handle removed included files per group
include_tasks: ./remove-per-group.yml
with_items:
- "{{ combined_rules | list }}"

@ -1,20 +0,0 @@
- set_fact:
group_identifier: "{{ item }}"
group_items: "{{ combined_rules[item] }}"
- block:
- find:
paths: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
file_type: 'any'
excludes: '{% for item in group_items %}{{ item }}.nft,{% endfor %}'
register: removeFiles
- file:
path: "{{ fileItem.path }}"
state: absent
loop: "{{ removeFiles.files }}"
loop_control:
label: "{{ fileItem.path }}"
loop_var: fileItem
when: group_items is mapping

@ -1,15 +0,0 @@
- name: Install nftables
package:
name:
- nftables
- name: Create /etc/nftables/ansible-managed
file:
path: /etc/nftables/ansible-managed
state: directory
- name: Include files in /etc/nftables/ansible-managed/ from /etc/nftables.conf
blockinfile:
path: /etc/nftables.conf
marker: "# {mark} ANSIBLE MANAGED BLOCK - nftables"
content: 'include "/etc/nftables/ansible-managed/*.nft"'

@ -1,12 +0,0 @@
wireguard_ipv6_converter:
version: latest
# see https://github.com/Ruakij/wg-ipv6-converter#31-environment
setup:
interface: wg0
#ipv6_format: fc12::%02x%02x:%02x%02x/%d
#filter_prefix: 100.100
#recheck_interval: 60s
service:
#bindTo: netbird.service

@ -1,11 +0,0 @@
- name: Get architecture
set_fact:
arch: "{{ 'amd64' if ansible_architecture == 'x86_64' else 'arm64' }}"
versionUri: "{% if wireguard_ipv6_converter.version == 'latest' %}latest/download{% else %}download/{{ wireguard_ipv6_converter.version }}{% endif %}"
- name: Download binary
get_url:
url: https://github.com/Ruakij/wg-ipv6-converter/releases/{{ versionUri }}/wg-ipv6-converter_{{ arch }}
dest: /usr/local/bin/wg-ipv6-converter
mode: "744"
register: deployDownload

@ -1,3 +0,0 @@
- import_tasks: ./deploy.yml
- import_tasks: ./setup-service.yml

@ -1,27 +0,0 @@
- name: Deploy service
ansible.builtin.template:
src: wg-ipv6-conv.service.jinja2
dest: /etc/systemd/system/wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}.service
register: serviceFile
- name: Enable service
ansible.builtin.service:
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
daemon-reload: true
enabled: true
- name: Start service if interface exists already
ansible.builtin.service:
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
state: "{{ 'restarted' if deployDownload.changed or serviceFile.changed else 'started' }}"
register: service
when: "wireguard_ipv6_converter.setup.interface in ansible_interfaces"
- name: Pause for 5s to wait for program to have run
ansible.builtin.pause:
seconds: 5
when: "service.changed"
- name: Gather facts to get changes
ansible.builtin.gather_facts:
when: "service.changed"

@ -1,29 +0,0 @@
[Unit]
Description=WireGuard IPv6 converter for {{ wireguard_ipv6_converter.setup.interface }}
{% if wireguard_ipv6_converter.service.bindTo is defined %}
BindsTo={{ wireguard_ipv6_converter.service.bindTo }}
After={{ wireguard_ipv6_converter.service.bindTo }}
{% endif %}
[Service]
Type=simple
{% if wireguard_ipv6_converter.service.bindTo is defined %}
ExecStartPre=/bin/sleep 10
{% endif %}
ExecStart=/usr/local/bin/wg-ipv6-converter
Restart=always
RestartSec=30
Environment="INTERFACE={{ wireguard_ipv6_converter.setup.interface }}"
{% if wireguard_ipv6_converter.setup.ipv6_format is defined %}
Environment="IPV6_FORMAT={{ wireguard_ipv6_converter.setup.ipv6_format }}"
{% endif %}
{% if wireguard_ipv6_converter.setup.filter_prefix is defined %}
Environment="FILTER_PREFIX={{ wireguard_ipv6_converter.setup.filter_prefix }}"
{% endif %}
{% if wireguard_ipv6_converter.setup.recheck_interval is defined %}
Environment="RECHECK_INTERVAL={{ wireguard_ipv6_converter.setup.recheck_interval }}"
{% endif %}
[Install]
WantedBy=multi-user.target

@ -1,105 +0,0 @@
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="$HOME/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
ZSH_THEME="agnoster"
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in $ZSH/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment one of the following lines to change the auto-update behavior
# zstyle ':omz:update' mode disabled # disable automatic updates
# zstyle ':omz:update' mode auto # update automatically without asking
# zstyle ':omz:update' mode reminder # just remind me to update when it's time
# Uncomment the following line to change how often to auto-update (in days).
# zstyle ':omz:update' frequency 13
# Uncomment the following line if pasting URLs and other text is messed up.
# DISABLE_MAGIC_FUNCTIONS="true"
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# You can also set it to another string to have that shown instead of the default red dots.
# e.g. COMPLETION_WAITING_DOTS="%F{yellow}waiting...%f"
# Caution: this setting can cause issues with multiline prompts in zsh < 5.7.1 (see #5765)
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load?
# Standard plugins can be found in $ZSH/plugins/
# Custom plugins may be added to $ZSH_CUSTOM/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(
git
zsh-autosuggestions
fast-syntax-highlighting
)
source $ZSH/oh-my-zsh.sh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"

@ -1,44 +0,0 @@
- name: Check if user is not root
meta: end_play
when: ansible_facts.user_id == "root"
- name: Install zsh
package:
name:
- zsh
state: latest
become: yes
- name: Setup Oh-my-zsh using yay/pacman
community.general.pacman:
name:
- oh-my-zsh
- oh-my-zsh-plugin-autosuggestions
- fast-syntax-highlighting
state: latest
executable: yay
when: ansible_facts.distribution == "Archlinux"
become: yes
- name: Setup Oh-my-zsh using sh
command: 'sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"'
when: ansible_facts.distribution != "Archlinux"
become: yes
- name: Symlink custom-plugins
file:
src: "/usr/share/zsh/plugins/fast-syntax-highlighting"
dest: "/usr/share/oh-my-zsh/custom/plugins/"
state: link
- name: Deploy user-config-file
ansible.builtin.copy:
src: .zshrc
dest: "{{ ansible_facts.env.HOME }}/"
- name: Symlink oh-my-zsh user-settings folder from shared
file:
src: "/usr/share/oh-my-zsh/"
dest: "{{ ansible_facts.env.HOME }}/.oh-my-zsh"
state: link
Loading…
Cancel
Save