59 Commits

Author SHA1 Message Date
b8ae38bab8 Check loclhost instead of external in case firewalls block external connections 2023-06-21 17:36:51 +02:00
093612f3a7 Change restart-check to started/restarted check 2023-04-14 10:20:16 +02:00
a92409c56f Add failed_when to deploy calico operator 2023-04-14 10:01:42 +02:00
f50e3ac33c Use first node's IP for joining cluster 2023-04-14 09:50:39 +02:00
668ff23ee6 Fix service task wrong usage 2023-04-14 09:42:46 +02:00
c2c6a2872f Fix conditional for changed after install 2023-04-12 23:12:10 +02:00
c1c7ec9e56 Remove workaround as k3s is now at 1.26 2023-04-12 22:39:43 +02:00
550f6868ff Fix old usage of network_plugin var 2023-04-12 20:49:38 +02:00
c8f90f0f8d Update calico 2023-04-05 20:15:31 +02:00
41570ea40d Create new block for network-stuff 2023-04-05 20:14:36 +02:00
a3c887748a Move network-helper to own file independend from calico 2023-04-05 20:14:12 +02:00
d113625fa8 Fix env-value not being string 2023-04-05 20:08:25 +02:00
dadd077723 Fix service and conditional 2023-04-05 14:13:22 +02:00
0d43d07ad4 Add extra-config option 2023-04-05 14:12:56 +02:00
d6f8f975bb Reload when config changed, but install already done 2023-04-05 13:32:14 +02:00
7c86a5d77d Add register for config 2023-04-05 13:30:22 +02:00
8c4e3c2401 Update routingtabletowg and use new sync feature 2023-03-30 14:54:49 +02:00
b46d35c8a5 Add labels 2023-03-30 14:54:28 +02:00
791ad96849 Add ipv6-check to calico deploy 2023-03-19 15:17:45 +01:00
fc3d9845d6 Fix undeterministic node-selection but uses group 2023-03-19 15:17:02 +01:00
590b75ac23 Add quotes to token-usage for special chars 2023-03-19 14:01:19 +01:00
0c82504299 Separate getting name and ips to fix bug easily 2023-03-19 14:00:46 +01:00
2fee9a1747 Only enable ipv6 when available and activated 2023-03-19 14:00:15 +01:00
fb44c39969 Add install of often-used packets 2023-03-19 13:58:47 +01:00
5452303992 Remove netmaker from dependency 2023-03-19 13:58:05 +01:00
4321d78cf8 Add comments to variables 2023-03-19 13:57:57 +01:00
f9a859e95c Add ingress-option 2023-03-19 13:56:26 +01:00
e5920b3ddf Add network-plugin option 2023-03-17 15:57:48 +01:00
0fc5dbb791 Initial role-data 2022-11-02 16:29:55 +01:00
9cb2e88193 Merge branch 'role_netmaker' 2022-10-27 01:16:57 +02:00
25ceb0f456 Merge branch 'role_netmaker_server' 2022-10-27 01:16:55 +02:00
fcc4f1ed18 Fix task 2022-10-21 15:40:00 +02:00
de0e220004 Add defaults-var-file 2022-10-21 15:32:20 +02:00
f9cc97a8f2 Add CA to trust-store 2022-10-21 15:32:08 +02:00
811fc22eef Delete unnecessary task-file 2022-10-21 15:31:37 +02:00
cdd4c9babb Remove CA in args rqlite will use the system trust-store then 2022-10-21 14:49:35 +02:00
d553f604a9 Add own certs to mosquitto 2022-10-21 14:48:29 +02:00
806b41b73e Fix proxy-protocol being expected 2022-10-21 14:22:38 +02:00
ec98188a24 Fix variable name 2022-10-21 14:20:26 +02:00
06bdae380b Revert proxy-protocol-matching 2022-10-21 11:45:12 +02:00
83b50c10cd Use new variables and fix requests 2022-10-21 08:42:37 +02:00
3890007042 Use more specific hostnames in cert 2022-10-21 08:42:11 +02:00
bb3d363094 Created nginx-config-file-templates 2022-10-21 08:33:08 +02:00
7453f1e616 Move variables to defaults-folder 2022-10-21 08:31:05 +02:00
e022a6e9f0 Restructure to make better looking 2022-10-20 08:34:35 +02:00
772dc3a620 Move TLS-point outside of netmaker-system 2022-10-20 08:32:49 +02:00
6d5c86927d Make diagram more readable 2022-10-18 12:33:25 +02:00
c94168fb30 Comment-in connection-check todo: change check to http 2022-10-17 22:49:20 +02:00
6168ba2b0a Add missing dependency 2022-10-17 22:48:58 +02:00
e4a2c5dd2f Remove ports and add/change advertised adresses and ports 2022-10-17 22:48:49 +02:00
315f5a1805 Fix private_ip checking 2022-10-17 22:47:10 +02:00
d2d8ebd8cc Add missing nginx-file 2022-10-17 22:46:41 +02:00
dd87d5e724 Move cert-generation outside 2022-10-17 22:46:20 +02:00
86e6317e28 Fix naming 2022-10-17 22:45:24 +02:00
8fddfc532f Add nginx as service 2022-10-17 22:43:57 +02:00
f733543ae1 Fix architecture-diagram 2022-10-17 21:30:50 +02:00
526cf66bd7 Add chart for architecture 2022-10-17 21:19:10 +02:00
4cb418e2b6 Add role netmaker (netclient) 2022-10-17 14:51:52 +02:00
b593a2874a Add role netmaker_server 2022-10-17 14:48:02 +02:00
43 changed files with 917 additions and 153 deletions

View File

@@ -2,16 +2,40 @@
kubernetes:
ipPool:
ipv4:
# Minimum: /24
cluster_cidr: 10.42.0.0/16
service_cidr: 10.43.0.0/16
nodeip_cidr: 10.41.0.0/24
ipv6:
# Minimum: /120
cluster_cidr: fd42::/56
service_cidr: fd43::/112
# Interface to grab node-IPv4/v6 from
nodeIp_interface: <interface to grab nodeIp from>
control_plane:
dns_name: <control-plane dns-reachable-name>
shared_token: <shared token for nodes to join>
token: <shared token for nodes to join>
network:
# One of [flannel, calico]
plugin: calico
# Helper for networking
helper:
# https://github.com/Ruakij/RoutingTableToWg
# Translates received-routes from e.g. BGP to wireguard-allowedips
# Helpful, when nodeIp_interface is a wireguard-interface
routingtabletowg: false
# One of [traefik-ingress]
ingress_controller: traefik-ingress
config_extra:
# etcd-tuning
# heartbeat: 0.5-1.5x of rtt
# election: 10x- of heartbeat
etcd-arg:
heartbeat-interval: 500
election-timeout: 5000

View File

@@ -1,30 +1,33 @@
@startuml
component netmaker as nm1
component netmaker as nm2
component ... as nm3
rectangle "Control-Plane" as control_plane {
rectangle "Node" as sn1 {
component "netclient" as sn1_netclient
interface interface as if1
interface interface as if2
interface ... as if3
component etcd as sn1_etcd
component "k3s-server" as sn1_k3s_server
sn1_k3s_server - sn1_etcd
}
component kubernetes as kn1
component kubernetes as kn2
component ... as kn3
rectangle "Node" as sn2 {
component "netclient" as sn2_netclient
nm1 -up- if1
kn1 -down-( if1
component etcd as sn2_etcd
component "k3s-server" as sn2_k3s_server
sn2_k3s_server - sn2_etcd
}
nm2 -up- if2
kn2 -down-( if2
sn1_netclient -- sn2_netclient
sn1_etcd -- sn2_etcd
}
nm3 -up- if3
kn3 -down-( if3
rectangle "Workers" {
rectangle "Node" as an1 {
component "netclient" as an1_netclient
nm1 -right- nm2
nm2 -right- nm3
component "k3s-agent" as sn1_k3s_agent
}
}
kn1 .right. kn2
kn2 .right. kn3
@enduml

View File

@@ -1,9 +0,0 @@
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: rke2-canal
namespace: kube-system
spec:
valuesContent: |-
flannel:
backend: "wireguard"

View File

@@ -1,4 +1,3 @@
---
dependencies:
- role: docker
- role: netmaker

View File

@@ -0,0 +1,30 @@
- name: Create k3s-folder
ansible.builtin.file:
path: /etc/rancher/k3s/
state: directory
mode: '0755'
- name: Deploy k3s config
ansible.builtin.template:
src: k3s/{{ type }}/config.yaml.jinja2
dest: /etc/rancher/k3s/config.yaml
register: config
- name: Download install-script
get_url:
url: https://get.k3s.io
dest: /root/k3s_install.sh
mode: '744'
# todo: update when file changed?
- import_tasks: ./install/server/setup_network.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_helm.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_k3s.yml
when: "type == 'server'"
- import_tasks: ./install/agent/install_k3s.yml
when: "type == 'agent'"

View File

@@ -0,0 +1,12 @@
- name: Install K3s agent
command: /root/k3s_install.sh {{ type }}
register: command
changed_when: "'No change detected' not in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s-agent
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"

View File

@@ -0,0 +1,55 @@
- name: Install K3s-server for 1st-node
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname == groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections
ansible.builtin.wait_for:
host: "127.0.0.1"
port: 6443
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Install K3s-server for other nodes
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname != groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' not in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Make sure service is started / restarted on config change
service:
name: k3s
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
when: "inventory_hostname != groups['kubernetes'][0]"
- name: Waiting for K3s-server to accept connections on other nodes
ansible.builtin.wait_for:
host: "127.0.0.1"
port: 6443
state: started
when: "inventory_hostname != groups['kubernetes'][0]"
#- name: Add Kubernetes environment-vars to /etc/profile.d/
# blockinfile:
# path: /etc/profile.d/k3s-bin.sh
# marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
# block: |
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
# create: true
- name: Deploy calico
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_calico.yml
when: "kubernetes.network.plugin == 'calico'"
- name: Deploy network-helpers
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_network_helper.yml

View File

@@ -0,0 +1,19 @@
- name: Deploy calico operator
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
register: command
changed_when: "'created' in command.stdout"
run_once: true
failed_when:
- "command.rc == 1 and 'AlreadyExists' not in command.stderr"
- name: Deploy calico ressource template
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
dest: /root/calico-ressource.yml
run_once: true
- name: Deploy calico ressource
command: kubectl apply -f /root/calico-ressource.yml
register: command
changed_when: "'created' in command.stdout"
run_once: true

View File

@@ -0,0 +1,7 @@
- name: Deploy service-file for routing-table to wireguard-translation
ansible.builtin.template:
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
mode: u=rw,g=r,o=r
run_once: true
when: "kubernetes.network.helper.routingtabletowg"

View File

@@ -0,0 +1,6 @@
- name: Set control-plane-dns-endpoint towards local-ip
blockinfile:
path: /etc/hosts
marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
block: |
{{ nodeip_ipv4 }} {{ kubernetes.control_plane.dns_name }}

View File

@@ -1,65 +0,0 @@
- name: Create rke-helm-manifests-folder
ansible.builtin.file:
path: '/var/lib/rancher/rke2/server/manifests/'
state: directory
mode: '0755'
- name: Deploy helm-manifests
ansible.builtin.copy:
src: 'helm-manifests/'
dest: '/var/lib/rancher/rke2/server/manifests/'
- name: Create rke-folder
ansible.builtin.file:
path: /etc/rancher/rke2/
state: directory
mode: '0755'
- name: Deploy rke2 config
ansible.builtin.template:
src: rke2/config.yaml.template
dest: /etc/rancher/rke2/config.yaml
- name: Install RKE2
command: bash -c "curl -sfL https://get.rke2.io | sh -"
- name: Add RKE2 environment-vars to /etc/profile.d/
blockinfile:
path: /etc/profile.d/rke2-bin.sh
marker: "# {mark} ANSIBLE MANAGED BLOCK | rke2"
block: |
export PATH="/var/lib/rancher/rke2/bin/:$PATH"
export KUBECONFIG="/etc/rancher/rke2/rke2.yaml"
create: true
- name: Enable and start rke2-server service for 1st-node
ansible.builtin.service:
name: rke2-server
enabled: yes
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Waiting for kubelet to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 10250
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Enable and start rke2-server service for other nodes
ansible.builtin.service:
name: rke2-server
enabled: yes
state: started
when: "inventory_hostname != groups['kubernetes'][0]"
register: rke2_start
until: "rke2_start is not failed"
retries: 2
delay: 10
- name: Waiting for kubelet to accept connections on other nodes
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 10250
state: started
when: "inventory_hostname != groups['kubernetes'][0]"

View File

@@ -1,6 +1,4 @@
- import_tasks: ./prerequisites.yml
- import_tasks: ./install_helm.yml
- import_tasks: ./install_rke2.yml
- import_tasks: ./install.yml

View File

@@ -17,53 +17,26 @@
#- name: Disable swap
# command: swapoff -a
- name: Install iptables
- name: Install required packages
package:
name:
#- containerd
- iptables
#- iptables
# For Longhorn:
- nfs-common
- open-iscsi
state: latest
- name: Check if containerd-service exists & is started
service:
name: containerd
state: started
ignore_errors: true
register: containerd_status
- name: Install containerd when not exists
package:
name:
- containerd
when: containerd_status is failed
- import_tasks: ./prerequisites/containerd.yml
- name: Create containerd config-folder
file:
path: /etc/containerd
state: directory
- name: Deploy containerd-config
ansible.builtin.copy:
src: containerd_config.toml
dest: /etc/containerd/config.toml
mode: u=rw,g=r,o=r
notify: restart_containerd
# todo: Move to netmaker-role as handler?
- name: Gather facts to get changes
ansible.builtin.gather_facts:
- name: Gather interface-name
set_fact:
interface: "{{ kubernetes.ipPool.nodeIp_interface | replace('-', '_') }}"
- name: Getting nodeIp-data from interface
set_fact:
nodeip_ipv4: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv6[0].address }}"
- name: Set control-plane-dns-endpoint towards local-ip
blockinfile:
path: /etc/hosts
marker: "# {mark} ANSIBLE MANAGED BLOCK | k8s"
block: |
{{ nodeip_ipv4 }} {{ kubernetes.control_plane.dns_name }}
nodeip_ipv4: "{{ ansible_facts[ interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ interface ].ipv6[0].address if ansible_facts[ interface ].ipv6 is defined }}"
- name: Run handlers to reload configurations
meta: flush_handlers

View File

@@ -0,0 +1,24 @@
- name: Check if containerd-service exists & is started
service:
name: containerd
state: started
ignore_errors: true
register: containerd_status
- name: Install containerd when not exists
package:
name:
- containerd
when: containerd_status is failed
- name: Create containerd config-folder
file:
path: /etc/containerd
state: directory
- name: Deploy containerd-config
ansible.builtin.copy:
src: containerd_config.toml
dest: /etc/containerd/config.toml
mode: u=rw,g=r,o=r
notify: restart_containerd

View File

@@ -0,0 +1,18 @@
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
token: '{{ kubernetes.token }}'
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}

View File

@@ -0,0 +1,49 @@
## Base ##
{% if inventory_hostname == groups['kubernetes'][0] %}
# Initialize with internal etcd
cluster-init: true
{% else %}
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
{% endif %}
token: '{{ kubernetes.token }}'
tls-san:
- {{ kubernetes.control_plane.dns_name }}
# Networking
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
{% else %}
node-ip: {{ nodeip_ipv4 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
{% endif %}
egress-selector-mode: disabled
# Network-plugin
{% if kubernetes.network.plugin == "flannel" %}
flannel-backend: vxlan
{% else %}
disable-network-policy: true
flannel-backend: none
{% endif %}
# Ingress-plugin
{% if kubernetes.ingress_controller != "traefik-ingress" %}
disable: traefik
{% endif %}
## Label
# Region & DC
node-label:
{% if region is defined %}
- topology.kubernetes.io/region={{ region }}
{% endif %}
{% if zone is defined %}
- topology.kubernetes.io/zone={{ zone }}
{% endif %}
{{ kubernetes.config_extra | to_yaml }}

View File

@@ -0,0 +1,34 @@
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
- blockSize: 122
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
{% endif %}
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

View File

@@ -0,0 +1,45 @@
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: routingtabletowg
namespace: calico-system
labels:
app: routingtabletowg
spec:
selector:
matchLabels:
app: routingtabletowg
template:
metadata:
labels:
app: routingtabletowg
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
hostNetwork: true
containers:
- name: routingtabletowg
image: "ruakij/routingtabletowg:0.2.0"
env:
- name: INTERFACE
value: {{ kubernetes.ipPool.nodeIp_interface }}
- name: FILTER_PROTOCOL
value: bird
- name: PERIODIC_SYNC
value: '300'
securityContext:
capabilities:
add:
- NET_ADMIN
resources:
requests:
cpu: 10m
memory: 10Mi
limits:
cpu: 20m
memory: 20Mi
---

View File

@@ -1,16 +0,0 @@
## Base ##
container-runtime-endpoint: unix:///run/containerd/containerd.sock
{% if inventory_hostname != groups['kubernetes'][0] %}
server: https://{{ kubernetes.control_plane.dns_name }}:9345
{% endif %}
token: {{ kubernetes.shared_token }}
tls-san:
- {{ kubernetes.control_plane.dns_name }}
## Networking ##
#cni: cilium
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}

View File

@@ -0,0 +1,4 @@
netclient:
# Token to join default-network
# leave empty to ignore
join_network_token:

3
netmaker/meta/main.yml Normal file
View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: docker

4
netmaker/tasks/certs.yml Normal file
View File

@@ -0,0 +1,4 @@
- name: Deploy CA Certificate
ansible.builtin.copy:
src: secret_files/netmaker_server/ca/ca.crt
dest: /etc/ssl/certs/netmaker-ca.pem

View File

@@ -0,0 +1,25 @@
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- gpg
- gpg-agent
- name: Add netmaker-key
apt_key:
url: https://apt.netmaker.org/gpg.key
state: present
- name: Add netmaker-repository
apt_repository:
repo: "deb https:apt.netmaker.org stable main"
state: present
filename: netmaker
update_cache: yes
- name: Install wireguard & netclient
package:
name:
- wireguard
- netclient
state: latest

View File

@@ -0,0 +1,5 @@
- name: Join netmaker-network
when: "netclient.join_network_token is defined"
command: "netclient join -t {{ netclient.join_network_token }}"
failed_when: command.rc != 0
register: command

5
netmaker/tasks/main.yml Normal file
View File

@@ -0,0 +1,5 @@
- import_tasks: ./certs.yml
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml

View File

@@ -0,0 +1,5 @@
netmaker_creds:
rqlite_password:
mq_admin_password:
master_key:

View File

@@ -0,0 +1,28 @@
# Overwrite for specific nodes to force dynamic-ip (disable setting public-ip and forces external lookup for public-ip)
# When false, will check itself for dynamic-ip (based on private-ip)
netmaker_dynamicIp: false
netmaker_nginx:
# Listen-port
tls_port: 51820
# Advertise-Port for services
# (must also be reachable by internal services!)
advertise_port: 51820
# This is the base-domain used for generating hostnames for services
netmaker_base_domain:
# host + base_domain
netmaker_api:
host: netmaker-api
netmaker_ui:
host: netmaker-ui
# MQTT-broker
netmaker_broker:
tls_host: netmaker-broker
# host + node_hostname
netmaker_rqlite:
http_host: netmaker-rqlite-http
cluster_host: netmaker-rqlite-cluster

View File

@@ -0,0 +1,37 @@
@startuml
interface ng_TLS
component netmaker_server {
component nginx {
component ng_stream
component ng_http
ng_stream -up- ng_TLS
ng_stream -right-> ng_http : tls-termination
}
component nm_ui
nm_ui -up- nm_ui_http
ng_http -down-( nm_ui_http
component Mosquitto
Mosquitto -up- mq_plain
Mosquitto -up- mq_tls
ng_stream -down-( mq_tls
component rqlite
rqlite -up- rq_http
rqlite -up- rq_cluster
ng_stream -down-( rq_cluster
ng_http -down-( rq_http
component nm_api
nm_api -down- nm_api_http
ng_http --( nm_api_http
nm_api -up-( ng_TLS : db-connection to rqlite-master
nm_api --( mq_plain
}
@enduml

View File

@@ -0,0 +1,12 @@
per_listener_settings false
listener 8883
allow_anonymous false
certfile /certs/node.crt
keyfile /certs/node.key
listener 1883
allow_anonymous false
plugin /usr/lib/mosquitto_dynamic_security.so
plugin_opt_config_file /mosquitto/data/dynamic-security.json

View File

@@ -0,0 +1,23 @@
#!/bin/ash
wait_for_netmaker() {
echo "SERVER: ${NETMAKER_SERVER_HOST}"
until curl --output /dev/null --silent --fail --head \
--location "${NETMAKER_SERVER_HOST}/api/server/health"; do
echo "Waiting for netmaker server to startup"
sleep 1
done
}
main(){
# wait for netmaker to startup
apk add curl
wait_for_netmaker
echo "Starting MQ..."
# Run the main container command.
/docker-entrypoint.sh
/usr/sbin/mosquitto -c /mosquitto/config/mosquitto.conf
}
main "${@}"

View File

@@ -0,0 +1,33 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
include /etc/nginx/stream.d/*.conf;

View File

@@ -0,0 +1,3 @@
---
# dependencies:
# - role: docker

View File

@@ -0,0 +1,40 @@
- name: Generate PrivateKey
community.crypto.openssl_privatekey:
path: /opt/netmaker_server/certs/node.key
owner: 1883 # Set owner to mosquitto-user (all other containers seem to run as root)
- name: Generate Certificate-Signing-Request from privateKey
community.crypto.openssl_csr:
path: /opt/netmaker_server/certs/node.csr
privatekey_path: /opt/netmaker_server/certs/node.key
common_name: "{{ ansible_facts.nodename }}"
subject_alt_name:
"DNS:{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }},\
DNS:{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }},\
DNS:{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }},\
DNS:{{ netmaker_api.host }}.{{ netmaker_base_domain }},\
DNS:{{ netmaker_ui.host }}.{{ netmaker_base_domain }}"
- name: Fetch CSR
ansible.builtin.fetch:
src: /opt/netmaker_server/certs/node.csr
dest: tmp_files/
- name: Sign CSR locally with CA
local_action: community.crypto.x509_certificate
args:
path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
csr_path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.csr
ownca_path: secret_files/netmaker_server/ca/ca.crt
ownca_privatekey_path: secret_files/netmaker_server/ca/ca.key
provider: ownca
- name: Copy Signed Certificate
ansible.builtin.copy:
src: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
dest: /opt/netmaker_server/certs/node.crt
- name: Copy CA Certificate
ansible.builtin.copy:
src: secret_files/netmaker_server/ca/ca.crt
dest: /opt/netmaker_server/certs/ca.crt

View File

@@ -0,0 +1,20 @@
- import_tasks: ./prerequisites.yml
- name: Copy folder-structure
ansible.builtin.copy:
src: opt/netmaker_server
dest: /opt/
mode: preserve
- name: Deploy compose file
ansible.builtin.template:
src: docker-compose.yml.template
dest: /opt/netmaker_server/docker-compose.yml
- import_tasks: ./certs.yml
- import_tasks: ./nginx.yml
- import_tasks: ./rqlite.yml
- import_tasks: ./netmaker.yml

View File

@@ -0,0 +1,57 @@
- name: Start rest of netmaker-services
command: "docker-compose --project-directory /opt/netmaker_server/ up -d"
register: command
failed_when: command.rc != 0
- name: Wait for netmaker-api to become available
uri:
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}'
return_content: yes
validate_certs: no
status_code:
- 404
until: uri_output.status == 404
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
# todo: check if exists?
- name: Create default mesh-network 'server'
uri:
validate_certs: no
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks'
method: POST
body:
netid: servnet
addressrange: 10.92.0.0/24
addressrange6: fd92::/64
body_format: json
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker'][0]"
register: default_mesh
until: "default_mesh is not failed"
retries: 2
delay: 10
# todo: check if exists?
- name: Create token for default-network
uri:
validate_certs: no
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks/servnet/keys' # todo: do implementation
method: POST
body:
name: ""
uses: 0
body_format: json
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker'][0]"
register: default_mesh_key
until: "default_mesh_key is not failed"
retries: 2
delay: 10

View File

@@ -0,0 +1,18 @@
- name: Deploy nginx configs
template:
src: "{{item.src}}"
dest: "{{item.dst}}"
loop:
- { src: 'nginx/proxy.conf.template', dst: '/opt/netmaker_server/nginx/conf/conf.d/proxy.conf' }
- { src: 'nginx/passthrough.conf.template', dst: '/opt/netmaker_server/nginx/conf/stream.d/passthrough.conf' }
- name: Start nginx service
command: "docker-compose --project-directory /opt/netmaker_server/ up -d nginx"
register: command
failed_when: command.rc != 0
- name: Waiting for nginx to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 51820
state: started

View File

@@ -0,0 +1,9 @@
- name: Install wireguard
package:
name:
- wireguard
state: latest
- name: Check if default-ipv4-address is private
set_fact:
private_ipv4_address: "{{ ansible_facts.default_ipv4.address | regex_search('^((10)|(192\\.168)|(172\\.((1[6-9])|(2[0-9])|(3[0-1])))|(100))\\.') }}"

View File

@@ -0,0 +1,42 @@
- name: Deploy rqlite config
ansible.builtin.template:
src: rqlite-config.json.template
dest: /opt/netmaker_server/rqlite/config.json
- name: Start rqlite service for 1st-node
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
register: command
failed_when: command.rc != 0
when: "inventory_hostname == groups['netmaker_server'][0]"
- name: Waiting for rqlite to accept connections on 1st-node
uri:
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
return_content: yes
validate_certs: no
status_code:
- 401
until: uri_output.status == 401
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
when: "inventory_hostname == groups['netmaker_server'][0]"
- name: Start rqlite service for other nodes
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
register: command
failed_when: command.rc != 0
when: "inventory_hostname != groups['netmaker_server'][0]"
- name: Waiting for rqlite to accept connections on other nodes
uri:
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
return_content: yes
validate_certs: no
status_code:
- 401
until: uri_output.status == 401
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
when: "inventory_hostname != groups['netmaker_server'][0]"

View File

@@ -0,0 +1,128 @@
version: "3.4"
services:
nginx:
image: nginx
restart: unless-stopped
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro # Override nginx-config to add stream-import
- ./nginx/conf/conf.d/:/etc/nginx/conf.d:ro # conf.d
- ./nginx/conf/stream.d/:/etc/nginx/stream.d:ro # conf.d
- ./certs:/certs:ro # SSL-certificates
ports:
- {{ netmaker_nginx.tls_port }}:443
rqlite: # Distributed sqlite-db
image: rqlite/rqlite
restart: unless-stopped
hostname: "{{ ansible_facts.nodename }}"
volumes:
- "./rqlite/data:/rqlite/file"
- "./rqlite/config.json:/config.json:ro"
- "./certs:/certs:ro"
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
command: "
-http-adv-addr {{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
-raft-addr [::]:4002
-raft-adv-addr {{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
-node-encrypt
-node-cert /certs/node.crt
-node-key /certs/node.key
-node-no-verify
-auth /config.json
{% if inventory_hostname != groups['netmaker'][0] %}
-join-as netmaker
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker'][0] }}:{{ netmaker_nginx.advertise_port }}
{% endif %}
"
# FIXME: /\ \/ Change http -> https
netmaker: # The Primary Server for running Netmaker
image: gravitl/netmaker:v0.16.1
depends_on:
- rqlite
cap_add:
- NET_ADMIN
- NET_RAW
- SYS_MODULE
sysctls:
- net.ipv4.ip_forward=1
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.all.forwarding=1
restart: unless-stopped
volumes: # Volume mounts necessary for sql, coredns, and mqtt
- ./dnsconfig/:/root/config/dnsconfig
- ./mosquitto/data/:/etc/netmaker/
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
hostname: "{{ ansible_facts.nodename }}"
environment: # Necessary capabilities to set iptables when running in container
NODE_ID: "{{ ansible_facts.nodename }}"
MASTER_KEY: "{{ netmaker_creds.master_key }}" # The admin master key for accessing the API. Change this in any production installation.
{% if not private_ipv4_address and not netmaker_dynamicIp %}
SERVER_HOST: "{{ ansible_facts.default_ipv4.address }}" # Set to public IP of machine.
{% endif %}
SERVER_NAME: "{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }}" # The domain/host IP indicating the mq broker address
SERVER_HTTP_HOST: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}" # Overrides SERVER_HOST if set. Useful for making HTTP available via different interfaces/networks.
SERVER_API_CONN_STRING: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}"
DISABLE_REMOTE_IP_CHECK: "off" # If turned "on", Server will not set Host based on remote IP check. This is already overridden if SERVER_HOST is set. Turned "off" by default.
DNS_MODE: "off" # Enables DNS Mode, meaning all nodes will set hosts file for private dns settings.
API_PORT: "8081" # The HTTP API port for Netmaker. Used for API calls / communication from front end. If changed, need to change port of BACKEND_URL for netmaker-ui.
REST_BACKEND: "on" # Enables the REST backend (API running on API_PORT at SERVER_HTTP_HOST). Change to "off" to turn off.
RCE: "off" # Enables setting PostUp and PostDown (arbitrary commands) on nodes from the server. Off by default.
CORS_ALLOWED_ORIGIN: "*" # The "allowed origin" for API requests. Change to restrict where API requests can come from.
DISPLAY_KEYS: "on" # Show keys permanently in UI (until deleted) as opposed to 1-time display.
DATABASE: "rqlite"
SQL_CONN: "https://netmaker:{{ netmaker_creds.rqlite_password }}@{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}/"
MQ_HOST: "mosquitto" # the address of the mq server. If running from docker compose it will be "mq". Otherwise, need to input address. If using "host networking", it will find and detect the IP of the mq container.
MQ_SERVER_PORT: "1883" # the reachable port of MQ by the server - change if internal MQ port changes (or use external port if MQ is not on the same machine)
MQ_PORT: "{{ netmaker_nginx.advertise_port }}" # the reachable port of MQ - change if external MQ port changes (port on proxy, not necessarily the one exposed in docker-compose)
MQ_ADMIN_PASSWORD: "{{ netmaker_creds.mq_admin_password }}"
HOST_NETWORK: "off" # whether or not host networking is turned on. Only turn on if configured for host networking (see docker-compose.hostnetwork.yml). Will set host-level settings like iptables.
PORT_FORWARD_SERVICES: "" # decide which services to port forward ("dns","ssh", or "mq")
# this section is for OAuth
AUTH_PROVIDER: "" # "<azure-ad|github|google|oidc>"
CLIENT_ID: "" # "<client id of your oauth provider>"
CLIENT_SECRET: "" # "<client secret of your oauth provider>"
FRONTEND_URL: "" # "https://dashboard.<netmaker base domain>"
AZURE_TENANT: "" # "<only for azure, you may optionally specify the tenant for the OAuth>"
OIDC_ISSUER: "" # https://oidc.yourprovider.com - URL of oidc provider
VERBOSITY: "1" # logging verbosity level - 1, 2, or 3
TELEMETRY: "off" # Whether or not to send telemetry data to help improve Netmaker. Switch to "off" to opt out of sending telemetry.
ports:
- "51821-51830:51821-51830/udp" # wireguard ports
netmaker-ui: # The Netmaker UI Component
image: gravitl/netmaker-ui:v0.16.1
depends_on:
- netmaker
links:
- "netmaker:api"
restart: unless-stopped
environment:
BACKEND_URL: "https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}" # URL where UI will send API requests. Change based on SERVER_HOST, SERVER_HTTP_HOST, and API_PORT
mosquitto: # the MQTT broker for netmaker
image: eclipse-mosquitto:2.0.11-openssl
restart: unless-stopped
volumes:
- ./mosquitto/config:/mosquitto/config
- ./mosquitto/data:/mosquitto/data
- ./mosquitto/logs:/mosquitto/log
- "./certs:/certs:ro"
depends_on:
- netmaker
command: ["/mosquitto/config/wait.sh"]
environment:
NETMAKER_SERVER_HOST: "http://netmaker:8081"

View File

@@ -0,0 +1,25 @@
stream{
# Map target-hosts based on hostname
map $ssl_preread_server_name $target_host {
hostnames; # Enable matching including prefix/suffix-mask
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883; # todo: tls-terminate?
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} 127.0.0.1:8443;
{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }} rqlite:4002;
default 127.0.0.1:1;
}
server {
resolver 127.0.0.11; # Explicitly set docker-resolver
listen 443;
ssl_preread on;
proxy_pass $target_host;
}
}

View File

@@ -0,0 +1,27 @@
map $host $proxy_name {
hostnames;
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} netmaker-ui:80;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} netmaker:8081;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} rqlite:4001;
default 444;
}
server {
resolver 127.0.0.11; # Explicitly set docker-resolver
listen 8443 ssl;
ssl_certificate /certs/node.crt;
ssl_certificate_key /certs/node.key;
if ($proxy_name = 444){
return 444;
}
location / {
proxy_pass http://$proxy_name;
}
}

View File

@@ -0,0 +1,5 @@
[{
"username": "netmaker",
"password": "{{ netmaker_creds.rqlite_password }}",
"perms": ["all"]
}]