Compare commits
59 Commits
role_kuber
...
role_kuber
| Author | SHA1 | Date | |
|---|---|---|---|
| b8ae38bab8 | |||
| 093612f3a7 | |||
| a92409c56f | |||
| f50e3ac33c | |||
| 668ff23ee6 | |||
| c2c6a2872f | |||
| c1c7ec9e56 | |||
| 550f6868ff | |||
| c8f90f0f8d | |||
| 41570ea40d | |||
| a3c887748a | |||
| d113625fa8 | |||
| dadd077723 | |||
| 0d43d07ad4 | |||
| d6f8f975bb | |||
| 7c86a5d77d | |||
| 8c4e3c2401 | |||
| b46d35c8a5 | |||
| 791ad96849 | |||
| fc3d9845d6 | |||
| 590b75ac23 | |||
| 0c82504299 | |||
| 2fee9a1747 | |||
| fb44c39969 | |||
| 5452303992 | |||
| 4321d78cf8 | |||
| f9a859e95c | |||
| e5920b3ddf | |||
| 0fc5dbb791 | |||
| 9cb2e88193 | |||
| 25ceb0f456 | |||
| fcc4f1ed18 | |||
| de0e220004 | |||
| f9cc97a8f2 | |||
| 811fc22eef | |||
| cdd4c9babb | |||
| d553f604a9 | |||
| 806b41b73e | |||
| ec98188a24 | |||
| 06bdae380b | |||
| 83b50c10cd | |||
| 3890007042 | |||
| bb3d363094 | |||
| 7453f1e616 | |||
| e022a6e9f0 | |||
| 772dc3a620 | |||
| 6d5c86927d | |||
| c94168fb30 | |||
| 6168ba2b0a | |||
| e4a2c5dd2f | |||
| 315f5a1805 | |||
| d2d8ebd8cc | |||
| dd87d5e724 | |||
| 86e6317e28 | |||
| 8fddfc532f | |||
| f733543ae1 | |||
| 526cf66bd7 | |||
| 4cb418e2b6 | |||
| b593a2874a |
41
kubernetes/defaults/main.yml
Normal file
41
kubernetes/defaults/main.yml
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
kubernetes:
|
||||
ipPool:
|
||||
ipv4:
|
||||
# Minimum: /24
|
||||
cluster_cidr: 10.42.0.0/16
|
||||
service_cidr: 10.43.0.0/16
|
||||
ipv6:
|
||||
# Minimum: /120
|
||||
cluster_cidr: fd42::/56
|
||||
service_cidr: fd43::/112
|
||||
|
||||
# Interface to grab node-IPv4/v6 from
|
||||
nodeIp_interface: <interface to grab nodeIp from>
|
||||
|
||||
control_plane:
|
||||
dns_name: <control-plane dns-reachable-name>
|
||||
|
||||
token: <shared token for nodes to join>
|
||||
|
||||
network:
|
||||
# One of [flannel, calico]
|
||||
plugin: calico
|
||||
|
||||
# Helper for networking
|
||||
helper:
|
||||
# https://github.com/Ruakij/RoutingTableToWg
|
||||
# Translates received-routes from e.g. BGP to wireguard-allowedips
|
||||
# Helpful, when nodeIp_interface is a wireguard-interface
|
||||
routingtabletowg: false
|
||||
|
||||
# One of [traefik-ingress]
|
||||
ingress_controller: traefik-ingress
|
||||
|
||||
config_extra:
|
||||
# etcd-tuning
|
||||
# heartbeat: 0.5-1.5x of rtt
|
||||
# election: 10x- of heartbeat
|
||||
etcd-arg:
|
||||
heartbeat-interval: 500
|
||||
election-timeout: 5000
|
||||
33
kubernetes/docs/architecture.puml
Normal file
33
kubernetes/docs/architecture.puml
Normal file
@@ -0,0 +1,33 @@
|
||||
@startuml
|
||||
|
||||
rectangle "Control-Plane" as control_plane {
|
||||
rectangle "Node" as sn1 {
|
||||
component "netclient" as sn1_netclient
|
||||
|
||||
component etcd as sn1_etcd
|
||||
component "k3s-server" as sn1_k3s_server
|
||||
sn1_k3s_server - sn1_etcd
|
||||
}
|
||||
|
||||
rectangle "Node" as sn2 {
|
||||
component "netclient" as sn2_netclient
|
||||
|
||||
component etcd as sn2_etcd
|
||||
component "k3s-server" as sn2_k3s_server
|
||||
sn2_k3s_server - sn2_etcd
|
||||
}
|
||||
|
||||
sn1_netclient -- sn2_netclient
|
||||
sn1_etcd -- sn2_etcd
|
||||
}
|
||||
|
||||
rectangle "Workers" {
|
||||
rectangle "Node" as an1 {
|
||||
component "netclient" as an1_netclient
|
||||
|
||||
component "k3s-agent" as sn1_k3s_agent
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@enduml
|
||||
@@ -31,6 +31,5 @@ disabled_plugins = []
|
||||
# level = "info"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
FROM golang:1-buster
|
||||
|
||||
# Add PPA
|
||||
RUN echo "deb http://ppa.launchpad.net/dqlite/dev/ubuntu bionic main" > /etc/apt/sources.list.d/ppa_dqlite_dev_bionic.list
|
||||
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 50FB3D04
|
||||
# Install dependencies
|
||||
RUN apt update -y && apt install -y build-essential git libraft-dev libsqlite3-dev libdqlite-dev
|
||||
|
||||
# Clone
|
||||
RUN git clone https://github.com/canonical/k8s-dqlite --branch v1.0.4 /k8s-dqlite
|
||||
WORKDIR /k8s-dqlite
|
||||
|
||||
# Compile
|
||||
ENV CGO_LDFLAGS_ALLOW="-Wl,-z,now"
|
||||
RUN go build -o k8s-dqlite -tags libsqlite3,dqlite k8s-dqlite.go
|
||||
Binary file not shown.
@@ -1,3 +1,19 @@
|
||||
- name: reload_sysctl
|
||||
command: sysctl --system
|
||||
|
||||
- name: restart_containerd
|
||||
ansible.builtin.service:
|
||||
name: containerd
|
||||
state: restarted
|
||||
|
||||
- name: reload_networking
|
||||
service:
|
||||
name: networking
|
||||
state: restarted
|
||||
async: 5
|
||||
poll: 0
|
||||
notify: wait_for_connection
|
||||
|
||||
- name: wait_for_connection
|
||||
wait_for_connection:
|
||||
delay: 5
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: docker }
|
||||
dependencies:
|
||||
- role: docker
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
- name: Deploy Cilium-CLI
|
||||
ansible.builtin.unarchive:
|
||||
src: https://github.com/cilium/cilium-cli/releases/latest/download/cilium-linux-amd64.tar.gz
|
||||
dest: /usr/local/bin
|
||||
remote_src: yes
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
|
||||
- name: Install Cilium
|
||||
when: "inventory_hostname == groups['kubernetes'][0]"
|
||||
command: -cilium install
|
||||
environment:
|
||||
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||
30
kubernetes/tasks/install.yml
Normal file
30
kubernetes/tasks/install.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
- name: Create k3s-folder
|
||||
ansible.builtin.file:
|
||||
path: /etc/rancher/k3s/
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Deploy k3s config
|
||||
ansible.builtin.template:
|
||||
src: k3s/{{ type }}/config.yaml.jinja2
|
||||
dest: /etc/rancher/k3s/config.yaml
|
||||
register: config
|
||||
|
||||
- name: Download install-script
|
||||
get_url:
|
||||
url: https://get.k3s.io
|
||||
dest: /root/k3s_install.sh
|
||||
mode: '744'
|
||||
# todo: update when file changed?
|
||||
|
||||
- import_tasks: ./install/server/setup_network.yml
|
||||
when: "type == 'server'"
|
||||
|
||||
- import_tasks: ./install/server/install_helm.yml
|
||||
when: "type == 'server'"
|
||||
|
||||
- import_tasks: ./install/server/install_k3s.yml
|
||||
when: "type == 'server'"
|
||||
|
||||
- import_tasks: ./install/agent/install_k3s.yml
|
||||
when: "type == 'agent'"
|
||||
12
kubernetes/tasks/install/agent/install_k3s.yml
Normal file
12
kubernetes/tasks/install/agent/install_k3s.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
- name: Install K3s agent
|
||||
command: /root/k3s_install.sh {{ type }}
|
||||
register: command
|
||||
changed_when: "'No change detected' not in command.stdout"
|
||||
until: "command is not failed"
|
||||
retries: 2
|
||||
delay: 10
|
||||
|
||||
- name: Make sure service is started / restarted on config change
|
||||
service:
|
||||
name: k3s-agent
|
||||
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
|
||||
17
kubernetes/tasks/install/server/install_helm.yml
Normal file
17
kubernetes/tasks/install/server/install_helm.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
- name: Add Balto key
|
||||
apt_key:
|
||||
url: https://baltocdn.com/helm/signing.asc
|
||||
state: present
|
||||
|
||||
- name: Add Balto Repository
|
||||
apt_repository:
|
||||
repo: "deb https://baltocdn.com/helm/stable/debian/ all main"
|
||||
state: present
|
||||
filename: kubernetes
|
||||
update_cache: yes
|
||||
|
||||
- name: Install helm
|
||||
package:
|
||||
name:
|
||||
- helm
|
||||
state: latest
|
||||
55
kubernetes/tasks/install/server/install_k3s.yml
Normal file
55
kubernetes/tasks/install/server/install_k3s.yml
Normal file
@@ -0,0 +1,55 @@
|
||||
- name: Install K3s-server for 1st-node
|
||||
command: /root/k3s_install.sh {{ type }}
|
||||
when: "inventory_hostname == groups['kubernetes'][0]"
|
||||
register: command
|
||||
changed_when: "'No change detected' not in command.stdout"
|
||||
|
||||
- name: Make sure service is started / restarted on config change
|
||||
service:
|
||||
name: k3s
|
||||
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
|
||||
when: "inventory_hostname == groups['kubernetes'][0]"
|
||||
|
||||
- name: Waiting for K3s-server to accept connections
|
||||
ansible.builtin.wait_for:
|
||||
host: "127.0.0.1"
|
||||
port: 6443
|
||||
state: started
|
||||
when: "inventory_hostname == groups['kubernetes'][0]"
|
||||
|
||||
- name: Install K3s-server for other nodes
|
||||
command: /root/k3s_install.sh {{ type }}
|
||||
when: "inventory_hostname != groups['kubernetes'][0]"
|
||||
register: command
|
||||
changed_when: "'No change detected' not in command.stdout"
|
||||
until: "command is not failed"
|
||||
retries: 2
|
||||
delay: 10
|
||||
|
||||
- name: Make sure service is started / restarted on config change
|
||||
service:
|
||||
name: k3s
|
||||
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
|
||||
when: "inventory_hostname != groups['kubernetes'][0]"
|
||||
|
||||
- name: Waiting for K3s-server to accept connections on other nodes
|
||||
ansible.builtin.wait_for:
|
||||
host: "127.0.0.1"
|
||||
port: 6443
|
||||
state: started
|
||||
when: "inventory_hostname != groups['kubernetes'][0]"
|
||||
|
||||
#- name: Add Kubernetes environment-vars to /etc/profile.d/
|
||||
# blockinfile:
|
||||
# path: /etc/profile.d/k3s-bin.sh
|
||||
# marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
|
||||
# block: |
|
||||
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
|
||||
# create: true
|
||||
|
||||
- name: Deploy calico
|
||||
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_calico.yml
|
||||
when: "kubernetes.network.plugin == 'calico'"
|
||||
|
||||
- name: Deploy network-helpers
|
||||
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_network_helper.yml
|
||||
@@ -0,0 +1,19 @@
|
||||
- name: Deploy calico operator
|
||||
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
|
||||
register: command
|
||||
changed_when: "'created' in command.stdout"
|
||||
run_once: true
|
||||
failed_when:
|
||||
- "command.rc == 1 and 'AlreadyExists' not in command.stderr"
|
||||
|
||||
- name: Deploy calico ressource template
|
||||
ansible.builtin.template:
|
||||
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
|
||||
dest: /root/calico-ressource.yml
|
||||
run_once: true
|
||||
|
||||
- name: Deploy calico ressource
|
||||
command: kubectl apply -f /root/calico-ressource.yml
|
||||
register: command
|
||||
changed_when: "'created' in command.stdout"
|
||||
run_once: true
|
||||
@@ -0,0 +1,7 @@
|
||||
- name: Deploy service-file for routing-table to wireguard-translation
|
||||
ansible.builtin.template:
|
||||
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
|
||||
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
|
||||
mode: u=rw,g=r,o=r
|
||||
run_once: true
|
||||
when: "kubernetes.network.helper.routingtabletowg"
|
||||
6
kubernetes/tasks/install/server/setup_network.yml
Normal file
6
kubernetes/tasks/install/server/setup_network.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
- name: Set control-plane-dns-endpoint towards local-ip
|
||||
blockinfile:
|
||||
path: /etc/hosts
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
|
||||
block: |
|
||||
{{ nodeip_ipv4 }} {{ kubernetes.control_plane.dns_name }}
|
||||
@@ -1,28 +0,0 @@
|
||||
- name: Add Google-Cloud key
|
||||
apt_key:
|
||||
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
|
||||
state: present
|
||||
|
||||
- name: Add Kubernetes Repository
|
||||
apt_repository:
|
||||
repo: "deb https://apt.kubernetes.io/ kubernetes-xenial main"
|
||||
state: present
|
||||
filename: kubernetes
|
||||
update_cache: yes
|
||||
|
||||
- name: Install kubernetes-tools
|
||||
package:
|
||||
name:
|
||||
- kubeadm
|
||||
- kubelet
|
||||
- kubectl
|
||||
state: latest
|
||||
|
||||
- name: Hold upgrades for kubernetes-tools
|
||||
dpkg_selections:
|
||||
name: "{{ item }}"
|
||||
selection: hold
|
||||
loop:
|
||||
- kubeadm
|
||||
- kubelet
|
||||
- kubectl
|
||||
@@ -1,4 +0,0 @@
|
||||
- name: Join other nodes to cluster
|
||||
when: "inventory_hostname != groups['kubernetes'][0]"
|
||||
command:
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
- name: Initialize Kubernetes Cluster
|
||||
when: "inventory_hostname == groups['kubernetes'][0]"
|
||||
command: kubeadm init --control-plane-endpoint={{ control_plane.dns_name }}
|
||||
#--upload-certs
|
||||
|
||||
- name: Set environment-var for config
|
||||
lineinfile:
|
||||
dest: ~/.bashrc
|
||||
line: "export KUBECONFIG=/etc/kubernetes/admin.conf"
|
||||
@@ -1,9 +1,4 @@
|
||||
- import_tasks: ./prerequisites.yml
|
||||
|
||||
- import_tasks: ./k8s_deploy.yml
|
||||
- import_tasks: ./install.yml
|
||||
|
||||
- import_tasks: ./k8s_setup.yml
|
||||
|
||||
- import_tasks: ./deploy_cilium.yml
|
||||
|
||||
#- import_tasks: ./k8s_setup-cluster.yml
|
||||
|
||||
@@ -1,21 +1,42 @@
|
||||
- name: Load br_netfilter kernel-module
|
||||
modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
#- name: Load br_netfilter kernel-module
|
||||
# modprobe:
|
||||
# name: br_netfilter
|
||||
# state: present
|
||||
|
||||
- name: Set sysctl settings for iptables bridged traffic
|
||||
copy:
|
||||
dest: "/etc/sysctl.d/kubernetes.conf"
|
||||
content: |
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
dest: "/etc/sysctl.d/kubernetes.conf"
|
||||
content: |
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
|
||||
net.ipv4.conf.all.forwarding=1
|
||||
net.ipv6.conf.all.forwarding=1
|
||||
notify: reload_sysctl
|
||||
|
||||
- import_tasks: ./prerequisites/swap.yml
|
||||
#- name: Disable swap
|
||||
# command: swapoff -a
|
||||
|
||||
- name: Install required packages
|
||||
package:
|
||||
name:
|
||||
#- containerd
|
||||
#- iptables
|
||||
# For Longhorn:
|
||||
- nfs-common
|
||||
- open-iscsi
|
||||
state: latest
|
||||
|
||||
- import_tasks: ./prerequisites/containerd.yml
|
||||
|
||||
- name: Set control-plane-dns-endpoint towards local-ip
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
line: "{{ ansible_facts.default_ipv6.address }} k8s-control-plane.system.ruekov.eu"
|
||||
- name: Gather interface-name
|
||||
set_fact:
|
||||
interface: "{{ kubernetes.ipPool.nodeIp_interface | replace('-', '_') }}"
|
||||
|
||||
- name: Getting nodeIp-data from interface
|
||||
set_fact:
|
||||
nodeip_ipv4: "{{ ansible_facts[ interface ].ipv4.address }}"
|
||||
nodeip_ipv6: "{{ ansible_facts[ interface ].ipv6[0].address if ansible_facts[ interface ].ipv6 is defined }}"
|
||||
|
||||
- name: Run handlers to reload configurations
|
||||
meta: flush_handlers
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
- name: Disable swap-mounts
|
||||
replace:
|
||||
path: /etc/fstab
|
||||
regexp: '^([ \t]*(?!#)\S+[ \t]+swap[ \t]+.*)'
|
||||
replace: '# \1'
|
||||
|
||||
- name: Disable active swap immediately
|
||||
command: swapoff -va
|
||||
changed_when: "command.stdout != ''"
|
||||
register: command
|
||||
18
kubernetes/templates/k3s/agent/config.yaml.jinja2
Normal file
18
kubernetes/templates/k3s/agent/config.yaml.jinja2
Normal file
@@ -0,0 +1,18 @@
|
||||
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
|
||||
token: '{{ kubernetes.token }}'
|
||||
|
||||
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
||||
{% else %}
|
||||
node-ip: {{ nodeip_ipv4 }}
|
||||
{% endif %}
|
||||
|
||||
## Label
|
||||
# Region & DC
|
||||
node-label:
|
||||
{% if region is defined %}
|
||||
- topology.kubernetes.io/region={{ region }}
|
||||
{% endif %}
|
||||
{% if zone is defined %}
|
||||
- topology.kubernetes.io/zone={{ zone }}
|
||||
{% endif %}
|
||||
49
kubernetes/templates/k3s/server/config.yaml.jinja2
Normal file
49
kubernetes/templates/k3s/server/config.yaml.jinja2
Normal file
@@ -0,0 +1,49 @@
|
||||
## Base ##
|
||||
{% if inventory_hostname == groups['kubernetes'][0] %}
|
||||
# Initialize with internal etcd
|
||||
cluster-init: true
|
||||
{% else %}
|
||||
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
|
||||
{% endif %}
|
||||
|
||||
token: '{{ kubernetes.token }}'
|
||||
tls-san:
|
||||
- {{ kubernetes.control_plane.dns_name }}
|
||||
|
||||
# Networking
|
||||
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
||||
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
||||
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
|
||||
{% else %}
|
||||
node-ip: {{ nodeip_ipv4 }}
|
||||
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
|
||||
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
|
||||
{% endif %}
|
||||
|
||||
egress-selector-mode: disabled
|
||||
|
||||
# Network-plugin
|
||||
{% if kubernetes.network.plugin == "flannel" %}
|
||||
flannel-backend: vxlan
|
||||
{% else %}
|
||||
disable-network-policy: true
|
||||
flannel-backend: none
|
||||
{% endif %}
|
||||
|
||||
# Ingress-plugin
|
||||
{% if kubernetes.ingress_controller != "traefik-ingress" %}
|
||||
disable: traefik
|
||||
{% endif %}
|
||||
|
||||
## Label
|
||||
# Region & DC
|
||||
node-label:
|
||||
{% if region is defined %}
|
||||
- topology.kubernetes.io/region={{ region }}
|
||||
{% endif %}
|
||||
{% if zone is defined %}
|
||||
- topology.kubernetes.io/zone={{ zone }}
|
||||
{% endif %}
|
||||
|
||||
{{ kubernetes.config_extra | to_yaml }}
|
||||
@@ -0,0 +1,34 @@
|
||||
# This section includes base Calico installation configuration.
|
||||
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
# Configures Calico networking.
|
||||
calicoNetwork:
|
||||
# Note: The ipPools section cannot be modified post-install.
|
||||
ipPools:
|
||||
- blockSize: 26
|
||||
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
|
||||
encapsulation: None
|
||||
natOutgoing: Enabled
|
||||
nodeSelector: all()
|
||||
|
||||
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||
- blockSize: 122
|
||||
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
||||
encapsulation: None
|
||||
natOutgoing: Enabled
|
||||
nodeSelector: all()
|
||||
{% endif %}
|
||||
|
||||
---
|
||||
|
||||
# This section configures the Calico API server.
|
||||
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
||||
@@ -0,0 +1,45 @@
|
||||
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: routingtabletowg
|
||||
namespace: calico-system
|
||||
labels:
|
||||
app: routingtabletowg
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: routingtabletowg
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: routingtabletowg
|
||||
spec:
|
||||
tolerations:
|
||||
# this toleration is to have the daemonset runnable on master nodes
|
||||
# remove it if your masters can't run pods
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: routingtabletowg
|
||||
image: "ruakij/routingtabletowg:0.2.0"
|
||||
env:
|
||||
- name: INTERFACE
|
||||
value: {{ kubernetes.ipPool.nodeIp_interface }}
|
||||
- name: FILTER_PROTOCOL
|
||||
value: bird
|
||||
- name: PERIODIC_SYNC
|
||||
value: '300'
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 10Mi
|
||||
limits:
|
||||
cpu: 20m
|
||||
memory: 20Mi
|
||||
---
|
||||
@@ -1,7 +0,0 @@
|
||||
{% if inventory_hostname != groups['kubernetes'][0] %}
|
||||
Cluster:
|
||||
{% for node in groups['kubernetes'] if node != inventory_hostname %}
|
||||
- {{ node }}:29001
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
Address: 0.0.0.0:29001
|
||||
4
netmaker/defauls/netmaker.yml
Normal file
4
netmaker/defauls/netmaker.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
netclient:
|
||||
# Token to join default-network
|
||||
# leave empty to ignore
|
||||
join_network_token:
|
||||
3
netmaker/meta/main.yml
Normal file
3
netmaker/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: docker
|
||||
4
netmaker/tasks/certs.yml
Normal file
4
netmaker/tasks/certs.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
- name: Deploy CA Certificate
|
||||
ansible.builtin.copy:
|
||||
src: secret_files/netmaker_server/ca/ca.crt
|
||||
dest: /etc/ssl/certs/netmaker-ca.pem
|
||||
25
netmaker/tasks/install.yml
Normal file
25
netmaker/tasks/install.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
- name: Install Packages
|
||||
# when: docker_file.stat.exists == False
|
||||
package:
|
||||
name:
|
||||
- gpg
|
||||
- gpg-agent
|
||||
|
||||
- name: Add netmaker-key
|
||||
apt_key:
|
||||
url: https://apt.netmaker.org/gpg.key
|
||||
state: present
|
||||
|
||||
- name: Add netmaker-repository
|
||||
apt_repository:
|
||||
repo: "deb https:apt.netmaker.org stable main"
|
||||
state: present
|
||||
filename: netmaker
|
||||
update_cache: yes
|
||||
|
||||
- name: Install wireguard & netclient
|
||||
package:
|
||||
name:
|
||||
- wireguard
|
||||
- netclient
|
||||
state: latest
|
||||
5
netmaker/tasks/join-network.yml
Normal file
5
netmaker/tasks/join-network.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
- name: Join netmaker-network
|
||||
when: "netclient.join_network_token is defined"
|
||||
command: "netclient join -t {{ netclient.join_network_token }}"
|
||||
failed_when: command.rc != 0
|
||||
register: command
|
||||
5
netmaker/tasks/main.yml
Normal file
5
netmaker/tasks/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
- import_tasks: ./certs.yml
|
||||
|
||||
- import_tasks: ./install.yml
|
||||
|
||||
- import_tasks: ./join-network.yml
|
||||
5
netmaker_server/defaults/credentials.yml
Normal file
5
netmaker_server/defaults/credentials.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
netmaker_creds:
|
||||
rqlite_password:
|
||||
mq_admin_password:
|
||||
|
||||
master_key:
|
||||
28
netmaker_server/defaults/main.yml
Normal file
28
netmaker_server/defaults/main.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
# Overwrite for specific nodes to force dynamic-ip (disable setting public-ip and forces external lookup for public-ip)
|
||||
# When false, will check itself for dynamic-ip (based on private-ip)
|
||||
netmaker_dynamicIp: false
|
||||
|
||||
netmaker_nginx:
|
||||
# Listen-port
|
||||
tls_port: 51820
|
||||
# Advertise-Port for services
|
||||
# (must also be reachable by internal services!)
|
||||
advertise_port: 51820
|
||||
|
||||
# This is the base-domain used for generating hostnames for services
|
||||
netmaker_base_domain:
|
||||
|
||||
# host + base_domain
|
||||
netmaker_api:
|
||||
host: netmaker-api
|
||||
netmaker_ui:
|
||||
host: netmaker-ui
|
||||
# MQTT-broker
|
||||
netmaker_broker:
|
||||
tls_host: netmaker-broker
|
||||
|
||||
# host + node_hostname
|
||||
netmaker_rqlite:
|
||||
http_host: netmaker-rqlite-http
|
||||
cluster_host: netmaker-rqlite-cluster
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
@startuml
|
||||
|
||||
interface ng_TLS
|
||||
|
||||
component netmaker_server {
|
||||
component nginx {
|
||||
component ng_stream
|
||||
component ng_http
|
||||
|
||||
ng_stream -up- ng_TLS
|
||||
|
||||
ng_stream -right-> ng_http : tls-termination
|
||||
}
|
||||
|
||||
component nm_ui
|
||||
nm_ui -up- nm_ui_http
|
||||
ng_http -down-( nm_ui_http
|
||||
|
||||
component Mosquitto
|
||||
Mosquitto -up- mq_plain
|
||||
Mosquitto -up- mq_tls
|
||||
ng_stream -down-( mq_tls
|
||||
|
||||
component rqlite
|
||||
rqlite -up- rq_http
|
||||
rqlite -up- rq_cluster
|
||||
ng_stream -down-( rq_cluster
|
||||
ng_http -down-( rq_http
|
||||
|
||||
component nm_api
|
||||
nm_api -down- nm_api_http
|
||||
ng_http --( nm_api_http
|
||||
nm_api -up-( ng_TLS : db-connection to rqlite-master
|
||||
nm_api --( mq_plain
|
||||
}
|
||||
|
||||
@enduml
|
||||
@@ -0,0 +1,12 @@
|
||||
per_listener_settings false
|
||||
|
||||
listener 8883
|
||||
allow_anonymous false
|
||||
certfile /certs/node.crt
|
||||
keyfile /certs/node.key
|
||||
|
||||
listener 1883
|
||||
allow_anonymous false
|
||||
|
||||
plugin /usr/lib/mosquitto_dynamic_security.so
|
||||
plugin_opt_config_file /mosquitto/data/dynamic-security.json
|
||||
23
netmaker_server/files/opt/netmaker_server/mosquitto/config/wait.sh
Executable file
23
netmaker_server/files/opt/netmaker_server/mosquitto/config/wait.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/ash
|
||||
|
||||
wait_for_netmaker() {
|
||||
echo "SERVER: ${NETMAKER_SERVER_HOST}"
|
||||
until curl --output /dev/null --silent --fail --head \
|
||||
--location "${NETMAKER_SERVER_HOST}/api/server/health"; do
|
||||
echo "Waiting for netmaker server to startup"
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
main(){
|
||||
# wait for netmaker to startup
|
||||
apk add curl
|
||||
wait_for_netmaker
|
||||
echo "Starting MQ..."
|
||||
# Run the main container command.
|
||||
/docker-entrypoint.sh
|
||||
/usr/sbin/mosquitto -c /mosquitto/config/mosquitto.conf
|
||||
|
||||
}
|
||||
|
||||
main "${@}"
|
||||
33
netmaker_server/files/opt/netmaker_server/nginx/nginx.conf
Normal file
33
netmaker_server/files/opt/netmaker_server/nginx/nginx.conf
Normal file
@@ -0,0 +1,33 @@
|
||||
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
|
||||
error_log /var/log/nginx/error.log notice;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
#tcp_nopush on;
|
||||
|
||||
keepalive_timeout 65;
|
||||
|
||||
#gzip on;
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
include /etc/nginx/stream.d/*.conf;
|
||||
3
netmaker_server/meta/main.yml
Normal file
3
netmaker_server/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# dependencies:
|
||||
# - role: docker
|
||||
40
netmaker_server/tasks/certs.yml
Normal file
40
netmaker_server/tasks/certs.yml
Normal file
@@ -0,0 +1,40 @@
|
||||
- name: Generate PrivateKey
|
||||
community.crypto.openssl_privatekey:
|
||||
path: /opt/netmaker_server/certs/node.key
|
||||
owner: 1883 # Set owner to mosquitto-user (all other containers seem to run as root)
|
||||
|
||||
- name: Generate Certificate-Signing-Request from privateKey
|
||||
community.crypto.openssl_csr:
|
||||
path: /opt/netmaker_server/certs/node.csr
|
||||
privatekey_path: /opt/netmaker_server/certs/node.key
|
||||
common_name: "{{ ansible_facts.nodename }}"
|
||||
subject_alt_name:
|
||||
"DNS:{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }},\
|
||||
DNS:{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }},\
|
||||
DNS:{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }},\
|
||||
DNS:{{ netmaker_api.host }}.{{ netmaker_base_domain }},\
|
||||
DNS:{{ netmaker_ui.host }}.{{ netmaker_base_domain }}"
|
||||
|
||||
- name: Fetch CSR
|
||||
ansible.builtin.fetch:
|
||||
src: /opt/netmaker_server/certs/node.csr
|
||||
dest: tmp_files/
|
||||
|
||||
- name: Sign CSR locally with CA
|
||||
local_action: community.crypto.x509_certificate
|
||||
args:
|
||||
path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
|
||||
csr_path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.csr
|
||||
ownca_path: secret_files/netmaker_server/ca/ca.crt
|
||||
ownca_privatekey_path: secret_files/netmaker_server/ca/ca.key
|
||||
provider: ownca
|
||||
|
||||
- name: Copy Signed Certificate
|
||||
ansible.builtin.copy:
|
||||
src: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
|
||||
dest: /opt/netmaker_server/certs/node.crt
|
||||
|
||||
- name: Copy CA Certificate
|
||||
ansible.builtin.copy:
|
||||
src: secret_files/netmaker_server/ca/ca.crt
|
||||
dest: /opt/netmaker_server/certs/ca.crt
|
||||
20
netmaker_server/tasks/main.yml
Normal file
20
netmaker_server/tasks/main.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
- import_tasks: ./prerequisites.yml
|
||||
|
||||
- name: Copy folder-structure
|
||||
ansible.builtin.copy:
|
||||
src: opt/netmaker_server
|
||||
dest: /opt/
|
||||
mode: preserve
|
||||
|
||||
- name: Deploy compose file
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.template
|
||||
dest: /opt/netmaker_server/docker-compose.yml
|
||||
|
||||
- import_tasks: ./certs.yml
|
||||
|
||||
- import_tasks: ./nginx.yml
|
||||
|
||||
- import_tasks: ./rqlite.yml
|
||||
|
||||
- import_tasks: ./netmaker.yml
|
||||
57
netmaker_server/tasks/netmaker.yml
Normal file
57
netmaker_server/tasks/netmaker.yml
Normal file
@@ -0,0 +1,57 @@
|
||||
- name: Start rest of netmaker-services
|
||||
command: "docker-compose --project-directory /opt/netmaker_server/ up -d"
|
||||
register: command
|
||||
failed_when: command.rc != 0
|
||||
|
||||
- name: Wait for netmaker-api to become available
|
||||
uri:
|
||||
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}'
|
||||
return_content: yes
|
||||
validate_certs: no
|
||||
status_code:
|
||||
- 404
|
||||
until: uri_output.status == 404
|
||||
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
|
||||
delay: 5 # Every 5 seconds
|
||||
register: uri_output
|
||||
|
||||
# todo: check if exists?
|
||||
|
||||
- name: Create default mesh-network 'server'
|
||||
uri:
|
||||
validate_certs: no
|
||||
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks'
|
||||
method: POST
|
||||
body:
|
||||
netid: servnet
|
||||
addressrange: 10.92.0.0/24
|
||||
addressrange6: fd92::/64
|
||||
body_format: json
|
||||
headers:
|
||||
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
|
||||
Content-Type: application/json
|
||||
when: "inventory_hostname == groups['netmaker'][0]"
|
||||
register: default_mesh
|
||||
until: "default_mesh is not failed"
|
||||
retries: 2
|
||||
delay: 10
|
||||
|
||||
# todo: check if exists?
|
||||
|
||||
- name: Create token for default-network
|
||||
uri:
|
||||
validate_certs: no
|
||||
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks/servnet/keys' # todo: do implementation
|
||||
method: POST
|
||||
body:
|
||||
name: ""
|
||||
uses: 0
|
||||
body_format: json
|
||||
headers:
|
||||
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
|
||||
Content-Type: application/json
|
||||
when: "inventory_hostname == groups['netmaker'][0]"
|
||||
register: default_mesh_key
|
||||
until: "default_mesh_key is not failed"
|
||||
retries: 2
|
||||
delay: 10
|
||||
18
netmaker_server/tasks/nginx.yml
Normal file
18
netmaker_server/tasks/nginx.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
- name: Deploy nginx configs
|
||||
template:
|
||||
src: "{{item.src}}"
|
||||
dest: "{{item.dst}}"
|
||||
loop:
|
||||
- { src: 'nginx/proxy.conf.template', dst: '/opt/netmaker_server/nginx/conf/conf.d/proxy.conf' }
|
||||
- { src: 'nginx/passthrough.conf.template', dst: '/opt/netmaker_server/nginx/conf/stream.d/passthrough.conf' }
|
||||
|
||||
- name: Start nginx service
|
||||
command: "docker-compose --project-directory /opt/netmaker_server/ up -d nginx"
|
||||
register: command
|
||||
failed_when: command.rc != 0
|
||||
|
||||
- name: Waiting for nginx to accept connections
|
||||
ansible.builtin.wait_for:
|
||||
host: "{{ inventory_hostname }}"
|
||||
port: 51820
|
||||
state: started
|
||||
9
netmaker_server/tasks/prerequisites.yml
Normal file
9
netmaker_server/tasks/prerequisites.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
- name: Install wireguard
|
||||
package:
|
||||
name:
|
||||
- wireguard
|
||||
state: latest
|
||||
|
||||
- name: Check if default-ipv4-address is private
|
||||
set_fact:
|
||||
private_ipv4_address: "{{ ansible_facts.default_ipv4.address | regex_search('^((10)|(192\\.168)|(172\\.((1[6-9])|(2[0-9])|(3[0-1])))|(100))\\.') }}"
|
||||
42
netmaker_server/tasks/rqlite.yml
Normal file
42
netmaker_server/tasks/rqlite.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
- name: Deploy rqlite config
|
||||
ansible.builtin.template:
|
||||
src: rqlite-config.json.template
|
||||
dest: /opt/netmaker_server/rqlite/config.json
|
||||
|
||||
- name: Start rqlite service for 1st-node
|
||||
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
|
||||
register: command
|
||||
failed_when: command.rc != 0
|
||||
when: "inventory_hostname == groups['netmaker_server'][0]"
|
||||
|
||||
- name: Waiting for rqlite to accept connections on 1st-node
|
||||
uri:
|
||||
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
|
||||
return_content: yes
|
||||
validate_certs: no
|
||||
status_code:
|
||||
- 401
|
||||
until: uri_output.status == 401
|
||||
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
|
||||
delay: 5 # Every 5 seconds
|
||||
register: uri_output
|
||||
when: "inventory_hostname == groups['netmaker_server'][0]"
|
||||
|
||||
- name: Start rqlite service for other nodes
|
||||
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
|
||||
register: command
|
||||
failed_when: command.rc != 0
|
||||
when: "inventory_hostname != groups['netmaker_server'][0]"
|
||||
|
||||
- name: Waiting for rqlite to accept connections on other nodes
|
||||
uri:
|
||||
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
|
||||
return_content: yes
|
||||
validate_certs: no
|
||||
status_code:
|
||||
- 401
|
||||
until: uri_output.status == 401
|
||||
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
|
||||
delay: 5 # Every 5 seconds
|
||||
register: uri_output
|
||||
when: "inventory_hostname != groups['netmaker_server'][0]"
|
||||
128
netmaker_server/templates/docker-compose.yml.template
Normal file
128
netmaker_server/templates/docker-compose.yml.template
Normal file
@@ -0,0 +1,128 @@
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro # Override nginx-config to add stream-import
|
||||
- ./nginx/conf/conf.d/:/etc/nginx/conf.d:ro # conf.d
|
||||
- ./nginx/conf/stream.d/:/etc/nginx/stream.d:ro # conf.d
|
||||
- ./certs:/certs:ro # SSL-certificates
|
||||
ports:
|
||||
- {{ netmaker_nginx.tls_port }}:443
|
||||
|
||||
rqlite: # Distributed sqlite-db
|
||||
image: rqlite/rqlite
|
||||
restart: unless-stopped
|
||||
hostname: "{{ ansible_facts.nodename }}"
|
||||
volumes:
|
||||
- "./rqlite/data:/rqlite/file"
|
||||
- "./rqlite/config.json:/config.json:ro"
|
||||
- "./certs:/certs:ro"
|
||||
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
|
||||
command: "
|
||||
-http-adv-addr {{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
|
||||
-raft-addr [::]:4002
|
||||
-raft-adv-addr {{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
|
||||
|
||||
-node-encrypt
|
||||
-node-cert /certs/node.crt
|
||||
-node-key /certs/node.key
|
||||
-node-no-verify
|
||||
|
||||
-auth /config.json
|
||||
|
||||
{% if inventory_hostname != groups['netmaker'][0] %}
|
||||
-join-as netmaker
|
||||
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker'][0] }}:{{ netmaker_nginx.advertise_port }}
|
||||
{% endif %}
|
||||
"
|
||||
# FIXME: /\ \/ Change http -> https
|
||||
|
||||
netmaker: # The Primary Server for running Netmaker
|
||||
image: gravitl/netmaker:v0.16.1
|
||||
depends_on:
|
||||
- rqlite
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
- SYS_MODULE
|
||||
sysctls:
|
||||
- net.ipv4.ip_forward=1
|
||||
- net.ipv4.conf.all.src_valid_mark=1
|
||||
- net.ipv6.conf.all.disable_ipv6=0
|
||||
- net.ipv6.conf.all.forwarding=1
|
||||
restart: unless-stopped
|
||||
volumes: # Volume mounts necessary for sql, coredns, and mqtt
|
||||
- ./dnsconfig/:/root/config/dnsconfig
|
||||
- ./mosquitto/data/:/etc/netmaker/
|
||||
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
|
||||
hostname: "{{ ansible_facts.nodename }}"
|
||||
environment: # Necessary capabilities to set iptables when running in container
|
||||
NODE_ID: "{{ ansible_facts.nodename }}"
|
||||
MASTER_KEY: "{{ netmaker_creds.master_key }}" # The admin master key for accessing the API. Change this in any production installation.
|
||||
|
||||
{% if not private_ipv4_address and not netmaker_dynamicIp %}
|
||||
SERVER_HOST: "{{ ansible_facts.default_ipv4.address }}" # Set to public IP of machine.
|
||||
{% endif %}
|
||||
SERVER_NAME: "{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }}" # The domain/host IP indicating the mq broker address
|
||||
SERVER_HTTP_HOST: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}" # Overrides SERVER_HOST if set. Useful for making HTTP available via different interfaces/networks.
|
||||
SERVER_API_CONN_STRING: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}"
|
||||
|
||||
DISABLE_REMOTE_IP_CHECK: "off" # If turned "on", Server will not set Host based on remote IP check. This is already overridden if SERVER_HOST is set. Turned "off" by default.
|
||||
DNS_MODE: "off" # Enables DNS Mode, meaning all nodes will set hosts file for private dns settings.
|
||||
|
||||
API_PORT: "8081" # The HTTP API port for Netmaker. Used for API calls / communication from front end. If changed, need to change port of BACKEND_URL for netmaker-ui.
|
||||
REST_BACKEND: "on" # Enables the REST backend (API running on API_PORT at SERVER_HTTP_HOST). Change to "off" to turn off.
|
||||
RCE: "off" # Enables setting PostUp and PostDown (arbitrary commands) on nodes from the server. Off by default.
|
||||
CORS_ALLOWED_ORIGIN: "*" # The "allowed origin" for API requests. Change to restrict where API requests can come from.
|
||||
DISPLAY_KEYS: "on" # Show keys permanently in UI (until deleted) as opposed to 1-time display.
|
||||
|
||||
DATABASE: "rqlite"
|
||||
SQL_CONN: "https://netmaker:{{ netmaker_creds.rqlite_password }}@{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}/"
|
||||
|
||||
MQ_HOST: "mosquitto" # the address of the mq server. If running from docker compose it will be "mq". Otherwise, need to input address. If using "host networking", it will find and detect the IP of the mq container.
|
||||
MQ_SERVER_PORT: "1883" # the reachable port of MQ by the server - change if internal MQ port changes (or use external port if MQ is not on the same machine)
|
||||
MQ_PORT: "{{ netmaker_nginx.advertise_port }}" # the reachable port of MQ - change if external MQ port changes (port on proxy, not necessarily the one exposed in docker-compose)
|
||||
MQ_ADMIN_PASSWORD: "{{ netmaker_creds.mq_admin_password }}"
|
||||
|
||||
HOST_NETWORK: "off" # whether or not host networking is turned on. Only turn on if configured for host networking (see docker-compose.hostnetwork.yml). Will set host-level settings like iptables.
|
||||
PORT_FORWARD_SERVICES: "" # decide which services to port forward ("dns","ssh", or "mq")
|
||||
|
||||
# this section is for OAuth
|
||||
AUTH_PROVIDER: "" # "<azure-ad|github|google|oidc>"
|
||||
CLIENT_ID: "" # "<client id of your oauth provider>"
|
||||
CLIENT_SECRET: "" # "<client secret of your oauth provider>"
|
||||
FRONTEND_URL: "" # "https://dashboard.<netmaker base domain>"
|
||||
AZURE_TENANT: "" # "<only for azure, you may optionally specify the tenant for the OAuth>"
|
||||
OIDC_ISSUER: "" # https://oidc.yourprovider.com - URL of oidc provider
|
||||
|
||||
VERBOSITY: "1" # logging verbosity level - 1, 2, or 3
|
||||
TELEMETRY: "off" # Whether or not to send telemetry data to help improve Netmaker. Switch to "off" to opt out of sending telemetry.
|
||||
ports:
|
||||
- "51821-51830:51821-51830/udp" # wireguard ports
|
||||
|
||||
netmaker-ui: # The Netmaker UI Component
|
||||
image: gravitl/netmaker-ui:v0.16.1
|
||||
depends_on:
|
||||
- netmaker
|
||||
links:
|
||||
- "netmaker:api"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
BACKEND_URL: "https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}" # URL where UI will send API requests. Change based on SERVER_HOST, SERVER_HTTP_HOST, and API_PORT
|
||||
|
||||
mosquitto: # the MQTT broker for netmaker
|
||||
image: eclipse-mosquitto:2.0.11-openssl
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./mosquitto/config:/mosquitto/config
|
||||
- ./mosquitto/data:/mosquitto/data
|
||||
- ./mosquitto/logs:/mosquitto/log
|
||||
- "./certs:/certs:ro"
|
||||
depends_on:
|
||||
- netmaker
|
||||
command: ["/mosquitto/config/wait.sh"]
|
||||
environment:
|
||||
NETMAKER_SERVER_HOST: "http://netmaker:8081"
|
||||
25
netmaker_server/templates/nginx/passthrough.conf.template
Normal file
25
netmaker_server/templates/nginx/passthrough.conf.template
Normal file
@@ -0,0 +1,25 @@
|
||||
stream{
|
||||
# Map target-hosts based on hostname
|
||||
map $ssl_preread_server_name $target_host {
|
||||
hostnames; # Enable matching including prefix/suffix-mask
|
||||
|
||||
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
|
||||
{{ netmaker_api.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
|
||||
|
||||
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883; # todo: tls-terminate?
|
||||
|
||||
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} 127.0.0.1:8443;
|
||||
{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }} rqlite:4002;
|
||||
|
||||
default 127.0.0.1:1;
|
||||
}
|
||||
|
||||
server {
|
||||
resolver 127.0.0.11; # Explicitly set docker-resolver
|
||||
|
||||
listen 443;
|
||||
ssl_preread on;
|
||||
|
||||
proxy_pass $target_host;
|
||||
}
|
||||
}
|
||||
27
netmaker_server/templates/nginx/proxy.conf.template
Normal file
27
netmaker_server/templates/nginx/proxy.conf.template
Normal file
@@ -0,0 +1,27 @@
|
||||
map $host $proxy_name {
|
||||
hostnames;
|
||||
|
||||
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} netmaker-ui:80;
|
||||
{{ netmaker_api.host }}.{{ netmaker_base_domain }} netmaker:8081;
|
||||
|
||||
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} rqlite:4001;
|
||||
|
||||
default 444;
|
||||
}
|
||||
|
||||
server {
|
||||
resolver 127.0.0.11; # Explicitly set docker-resolver
|
||||
|
||||
listen 8443 ssl;
|
||||
|
||||
ssl_certificate /certs/node.crt;
|
||||
ssl_certificate_key /certs/node.key;
|
||||
|
||||
if ($proxy_name = 444){
|
||||
return 444;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://$proxy_name;
|
||||
}
|
||||
}
|
||||
5
netmaker_server/templates/rqlite-config.json.template
Normal file
5
netmaker_server/templates/rqlite-config.json.template
Normal file
@@ -0,0 +1,5 @@
|
||||
[{
|
||||
"username": "netmaker",
|
||||
"password": "{{ netmaker_creds.rqlite_password }}",
|
||||
"perms": ["all"]
|
||||
}]
|
||||
Reference in New Issue
Block a user