Compare commits
28 Commits
role_nomad
...
role_kuber
| Author | SHA1 | Date | |
|---|---|---|---|
| b8ae38bab8 | |||
| 093612f3a7 | |||
| a92409c56f | |||
| f50e3ac33c | |||
| 668ff23ee6 | |||
| c2c6a2872f | |||
| c1c7ec9e56 | |||
| 550f6868ff | |||
| c8f90f0f8d | |||
| 41570ea40d | |||
| a3c887748a | |||
| d113625fa8 | |||
| dadd077723 | |||
| 0d43d07ad4 | |||
| d6f8f975bb | |||
| 7c86a5d77d | |||
| 8c4e3c2401 | |||
| b46d35c8a5 | |||
| 791ad96849 | |||
| fc3d9845d6 | |||
| 590b75ac23 | |||
| 0c82504299 | |||
| 2fee9a1747 | |||
| fb44c39969 | |||
| 5452303992 | |||
| 4321d78cf8 | |||
| f9a859e95c | |||
| e5920b3ddf |
@@ -1,3 +0,0 @@
|
|||||||
---
|
|
||||||
dependencies: []
|
|
||||||
#- role: docker
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
ipv6_stable_secret: 1111:2222:3333:4444:5555:6666:7777:8888
|
|
||||||
@@ -5,5 +5,3 @@
|
|||||||
- import_tasks: ./packages.yml
|
- import_tasks: ./packages.yml
|
||||||
|
|
||||||
- import_tasks: ./aliases.yml
|
- import_tasks: ./aliases.yml
|
||||||
|
|
||||||
- import_tasks: ./networking.yml
|
|
||||||
|
|||||||
@@ -1,22 +0,0 @@
|
|||||||
- name: Set sysctl settings for ip-forwarding
|
|
||||||
copy:
|
|
||||||
dest: "/etc/sysctl.d/ip-forwarding.conf"
|
|
||||||
content: |
|
|
||||||
net.ipv4.ip_forward = 1
|
|
||||||
net.ipv6.conf.all.forwarding = 1
|
|
||||||
notify: reload_sysctl
|
|
||||||
|
|
||||||
- name: Set sysctl settings for ipv6-address-generation
|
|
||||||
copy:
|
|
||||||
dest: "/etc/sysctl.d/ipv6-slaac-address-generation.conf"
|
|
||||||
content: |
|
|
||||||
net.ipv6.conf.default.addr_gen_mode = 2
|
|
||||||
net.ipv6.conf.default.stable_secret = {{ ipv6_stable_secret }}
|
|
||||||
notify: reload_sysctl
|
|
||||||
|
|
||||||
- name: Set sysctl settings to override ipv6-slaac with enabled forwarding
|
|
||||||
copy:
|
|
||||||
dest: "/etc/sysctl.d/ipv6-slaac-override.conf"
|
|
||||||
content: |
|
|
||||||
net.ipv6.conf.all.accept_ra = 2
|
|
||||||
notify: reload_sysctl
|
|
||||||
@@ -2,16 +2,40 @@
|
|||||||
kubernetes:
|
kubernetes:
|
||||||
ipPool:
|
ipPool:
|
||||||
ipv4:
|
ipv4:
|
||||||
|
# Minimum: /24
|
||||||
cluster_cidr: 10.42.0.0/16
|
cluster_cidr: 10.42.0.0/16
|
||||||
service_cidr: 10.43.0.0/16
|
service_cidr: 10.43.0.0/16
|
||||||
ipv6:
|
ipv6:
|
||||||
|
# Minimum: /120
|
||||||
cluster_cidr: fd42::/56
|
cluster_cidr: fd42::/56
|
||||||
service_cidr: fd43::/112
|
service_cidr: fd43::/112
|
||||||
|
|
||||||
# Replace - with _
|
# Interface to grab node-IPv4/v6 from
|
||||||
nodeIp_interface: <interface to grab nodeIp from>
|
nodeIp_interface: <interface to grab nodeIp from>
|
||||||
|
|
||||||
control_plane:
|
control_plane:
|
||||||
dns_name: <control-plane dns-reachable-name>
|
dns_name: <control-plane dns-reachable-name>
|
||||||
|
|
||||||
token: <shared token for nodes to join>
|
token: <shared token for nodes to join>
|
||||||
|
|
||||||
|
network:
|
||||||
|
# One of [flannel, calico]
|
||||||
|
plugin: calico
|
||||||
|
|
||||||
|
# Helper for networking
|
||||||
|
helper:
|
||||||
|
# https://github.com/Ruakij/RoutingTableToWg
|
||||||
|
# Translates received-routes from e.g. BGP to wireguard-allowedips
|
||||||
|
# Helpful, when nodeIp_interface is a wireguard-interface
|
||||||
|
routingtabletowg: false
|
||||||
|
|
||||||
|
# One of [traefik-ingress]
|
||||||
|
ingress_controller: traefik-ingress
|
||||||
|
|
||||||
|
config_extra:
|
||||||
|
# etcd-tuning
|
||||||
|
# heartbeat: 0.5-1.5x of rtt
|
||||||
|
# election: 10x- of heartbeat
|
||||||
|
etcd-arg:
|
||||||
|
heartbeat-interval: 500
|
||||||
|
election-timeout: 5000
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
---
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: docker
|
- role: docker
|
||||||
- role: netmaker
|
|
||||||
|
|||||||
@@ -8,6 +8,7 @@
|
|||||||
ansible.builtin.template:
|
ansible.builtin.template:
|
||||||
src: k3s/{{ type }}/config.yaml.jinja2
|
src: k3s/{{ type }}/config.yaml.jinja2
|
||||||
dest: /etc/rancher/k3s/config.yaml
|
dest: /etc/rancher/k3s/config.yaml
|
||||||
|
register: config
|
||||||
|
|
||||||
- name: Download install-script
|
- name: Download install-script
|
||||||
get_url:
|
get_url:
|
||||||
|
|||||||
@@ -1,7 +1,12 @@
|
|||||||
- name: Install K3s agent
|
- name: Install K3s agent
|
||||||
command: /root/k3s_install.sh {{ type }}
|
command: /root/k3s_install.sh {{ type }}
|
||||||
register: command
|
register: command
|
||||||
changed_when: "'No change detected' in command.stdout"
|
changed_when: "'No change detected' not in command.stdout"
|
||||||
until: "command is not failed"
|
until: "command is not failed"
|
||||||
retries: 2
|
retries: 2
|
||||||
delay: 10
|
delay: 10
|
||||||
|
|
||||||
|
- name: Make sure service is started / restarted on config change
|
||||||
|
service:
|
||||||
|
name: k3s-agent
|
||||||
|
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
|
||||||
|
|||||||
@@ -2,11 +2,17 @@
|
|||||||
command: /root/k3s_install.sh {{ type }}
|
command: /root/k3s_install.sh {{ type }}
|
||||||
when: "inventory_hostname == groups['kubernetes'][0]"
|
when: "inventory_hostname == groups['kubernetes'][0]"
|
||||||
register: command
|
register: command
|
||||||
changed_when: "'No change detected' in command.stdout"
|
changed_when: "'No change detected' not in command.stdout"
|
||||||
|
|
||||||
|
- name: Make sure service is started / restarted on config change
|
||||||
|
service:
|
||||||
|
name: k3s
|
||||||
|
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
|
||||||
|
when: "inventory_hostname == groups['kubernetes'][0]"
|
||||||
|
|
||||||
- name: Waiting for K3s-server to accept connections
|
- name: Waiting for K3s-server to accept connections
|
||||||
ansible.builtin.wait_for:
|
ansible.builtin.wait_for:
|
||||||
host: "{{ inventory_hostname }}"
|
host: "127.0.0.1"
|
||||||
port: 6443
|
port: 6443
|
||||||
state: started
|
state: started
|
||||||
when: "inventory_hostname == groups['kubernetes'][0]"
|
when: "inventory_hostname == groups['kubernetes'][0]"
|
||||||
@@ -15,17 +21,24 @@
|
|||||||
command: /root/k3s_install.sh {{ type }}
|
command: /root/k3s_install.sh {{ type }}
|
||||||
when: "inventory_hostname != groups['kubernetes'][0]"
|
when: "inventory_hostname != groups['kubernetes'][0]"
|
||||||
register: command
|
register: command
|
||||||
changed_when: "'No change detected' in command.stdout"
|
changed_when: "'No change detected' not in command.stdout"
|
||||||
until: "command is not failed"
|
until: "command is not failed"
|
||||||
retries: 2
|
retries: 2
|
||||||
delay: 10
|
delay: 10
|
||||||
|
|
||||||
|
- name: Make sure service is started / restarted on config change
|
||||||
|
service:
|
||||||
|
name: k3s
|
||||||
|
state: "{{ 'restarted' if not command.changed and config.changed else 'started' }}"
|
||||||
|
when: "inventory_hostname != groups['kubernetes'][0]"
|
||||||
|
|
||||||
- name: Waiting for K3s-server to accept connections on other nodes
|
- name: Waiting for K3s-server to accept connections on other nodes
|
||||||
ansible.builtin.wait_for:
|
ansible.builtin.wait_for:
|
||||||
host: "{{ inventory_hostname }}"
|
host: "127.0.0.1"
|
||||||
port: 6443
|
port: 6443
|
||||||
state: started
|
state: started
|
||||||
when: "inventory_hostname != groups['kubernetes'][0]"
|
when: "inventory_hostname != groups['kubernetes'][0]"
|
||||||
|
|
||||||
#- name: Add Kubernetes environment-vars to /etc/profile.d/
|
#- name: Add Kubernetes environment-vars to /etc/profile.d/
|
||||||
# blockinfile:
|
# blockinfile:
|
||||||
# path: /etc/profile.d/k3s-bin.sh
|
# path: /etc/profile.d/k3s-bin.sh
|
||||||
@@ -33,3 +46,10 @@
|
|||||||
# block: |
|
# block: |
|
||||||
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
|
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
|
||||||
# create: true
|
# create: true
|
||||||
|
|
||||||
|
- name: Deploy calico
|
||||||
|
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_calico.yml
|
||||||
|
when: "kubernetes.network.plugin == 'calico'"
|
||||||
|
|
||||||
|
- name: Deploy network-helpers
|
||||||
|
import_tasks: ./roles/kubernetes/tasks/install/server/network-plugin/deploy_network_helper.yml
|
||||||
|
|||||||
@@ -0,0 +1,19 @@
|
|||||||
|
- name: Deploy calico operator
|
||||||
|
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
|
||||||
|
register: command
|
||||||
|
changed_when: "'created' in command.stdout"
|
||||||
|
run_once: true
|
||||||
|
failed_when:
|
||||||
|
- "command.rc == 1 and 'AlreadyExists' not in command.stderr"
|
||||||
|
|
||||||
|
- name: Deploy calico ressource template
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
|
||||||
|
dest: /root/calico-ressource.yml
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Deploy calico ressource
|
||||||
|
command: kubectl apply -f /root/calico-ressource.yml
|
||||||
|
register: command
|
||||||
|
changed_when: "'created' in command.stdout"
|
||||||
|
run_once: true
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
- name: Deploy service-file for routing-table to wireguard-translation
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
|
||||||
|
mode: u=rw,g=r,o=r
|
||||||
|
run_once: true
|
||||||
|
when: "kubernetes.network.helper.routingtabletowg"
|
||||||
@@ -17,19 +17,26 @@
|
|||||||
#- name: Disable swap
|
#- name: Disable swap
|
||||||
# command: swapoff -a
|
# command: swapoff -a
|
||||||
|
|
||||||
#- name: Install iptables
|
- name: Install required packages
|
||||||
# package:
|
package:
|
||||||
# name:
|
name:
|
||||||
# #- containerd
|
#- containerd
|
||||||
#- iptables
|
#- iptables
|
||||||
# state: latest
|
# For Longhorn:
|
||||||
|
- nfs-common
|
||||||
|
- open-iscsi
|
||||||
|
state: latest
|
||||||
|
|
||||||
- import_tasks: ./prerequisites/containerd.yml
|
- import_tasks: ./prerequisites/containerd.yml
|
||||||
|
|
||||||
|
- name: Gather interface-name
|
||||||
|
set_fact:
|
||||||
|
interface: "{{ kubernetes.ipPool.nodeIp_interface | replace('-', '_') }}"
|
||||||
|
|
||||||
- name: Getting nodeIp-data from interface
|
- name: Getting nodeIp-data from interface
|
||||||
set_fact:
|
set_fact:
|
||||||
nodeip_ipv4: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv4.address }}"
|
nodeip_ipv4: "{{ ansible_facts[ interface ].ipv4.address }}"
|
||||||
nodeip_ipv6: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv6[0].address }}"
|
nodeip_ipv6: "{{ ansible_facts[ interface ].ipv6[0].address if ansible_facts[ interface ].ipv6 is defined }}"
|
||||||
|
|
||||||
- name: Run handlers to reload configurations
|
- name: Run handlers to reload configurations
|
||||||
meta: flush_handlers
|
meta: flush_handlers
|
||||||
|
|||||||
@@ -1,7 +1,18 @@
|
|||||||
server: https://{{ kubernetes.control_plane.dns_name }}:6443
|
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
|
||||||
token: {{ kubernetes.token }}
|
token: '{{ kubernetes.token }}'
|
||||||
|
|
||||||
|
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||||
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
||||||
|
{% else %}
|
||||||
|
node-ip: {{ nodeip_ipv4 }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
|
## Label
|
||||||
kubelet-arg: "--node-ip=0.0.0.0"
|
# Region & DC
|
||||||
|
node-label:
|
||||||
|
{% if region is defined %}
|
||||||
|
- topology.kubernetes.io/region={{ region }}
|
||||||
|
{% endif %}
|
||||||
|
{% if zone is defined %}
|
||||||
|
- topology.kubernetes.io/zone={{ zone }}
|
||||||
|
{% endif %}
|
||||||
|
|||||||
@@ -1,23 +1,49 @@
|
|||||||
## Base ##
|
## Base ##
|
||||||
{% if inventory_hostname == groups['kubernetes'][0] %}
|
{% if inventory_hostname == groups['kubernetes'][0] %}
|
||||||
|
# Initialize with internal etcd
|
||||||
cluster-init: true
|
cluster-init: true
|
||||||
{% else %}
|
{% else %}
|
||||||
server: https://{{ groups['kubernetes'][0] }}:6443
|
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
token: {{ kubernetes.token }}
|
token: '{{ kubernetes.token }}'
|
||||||
tls-san:
|
tls-san:
|
||||||
- {{ kubernetes.control_plane.dns_name }}
|
- {{ kubernetes.control_plane.dns_name }}
|
||||||
|
|
||||||
# Networking
|
# Networking
|
||||||
|
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||||
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
||||||
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
||||||
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
|
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
|
||||||
|
{% else %}
|
||||||
|
node-ip: {{ nodeip_ipv4 }}
|
||||||
|
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
|
||||||
|
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
egress-selector-mode: disabled
|
egress-selector-mode: disabled
|
||||||
|
|
||||||
# Network-plugin
|
# Network-plugin
|
||||||
|
{% if kubernetes.network.plugin == "flannel" %}
|
||||||
flannel-backend: vxlan
|
flannel-backend: vxlan
|
||||||
|
{% else %}
|
||||||
|
disable-network-policy: true
|
||||||
|
flannel-backend: none
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
|
# Ingress-plugin
|
||||||
kubelet-arg: "--node-ip=0.0.0.0"
|
{% if kubernetes.ingress_controller != "traefik-ingress" %}
|
||||||
|
disable: traefik
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
## Label
|
||||||
|
# Region & DC
|
||||||
|
node-label:
|
||||||
|
{% if region is defined %}
|
||||||
|
- topology.kubernetes.io/region={{ region }}
|
||||||
|
{% endif %}
|
||||||
|
{% if zone is defined %}
|
||||||
|
- topology.kubernetes.io/zone={{ zone }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{{ kubernetes.config_extra | to_yaml }}
|
||||||
|
|||||||
@@ -0,0 +1,34 @@
|
|||||||
|
# This section includes base Calico installation configuration.
|
||||||
|
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
|
||||||
|
apiVersion: operator.tigera.io/v1
|
||||||
|
kind: Installation
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
spec:
|
||||||
|
# Configures Calico networking.
|
||||||
|
calicoNetwork:
|
||||||
|
# Note: The ipPools section cannot be modified post-install.
|
||||||
|
ipPools:
|
||||||
|
- blockSize: 26
|
||||||
|
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
|
||||||
|
encapsulation: None
|
||||||
|
natOutgoing: Enabled
|
||||||
|
nodeSelector: all()
|
||||||
|
|
||||||
|
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
||||||
|
- blockSize: 122
|
||||||
|
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
||||||
|
encapsulation: None
|
||||||
|
natOutgoing: Enabled
|
||||||
|
nodeSelector: all()
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# This section configures the Calico API server.
|
||||||
|
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||||
|
apiVersion: operator.tigera.io/v1
|
||||||
|
kind: APIServer
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
spec: {}
|
||||||
@@ -0,0 +1,45 @@
|
|||||||
|
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: routingtabletowg
|
||||||
|
namespace: calico-system
|
||||||
|
labels:
|
||||||
|
app: routingtabletowg
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: routingtabletowg
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: routingtabletowg
|
||||||
|
spec:
|
||||||
|
tolerations:
|
||||||
|
# this toleration is to have the daemonset runnable on master nodes
|
||||||
|
# remove it if your masters can't run pods
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: routingtabletowg
|
||||||
|
image: "ruakij/routingtabletowg:0.2.0"
|
||||||
|
env:
|
||||||
|
- name: INTERFACE
|
||||||
|
value: {{ kubernetes.ipPool.nodeIp_interface }}
|
||||||
|
- name: FILTER_PROTOCOL
|
||||||
|
value: bird
|
||||||
|
- name: PERIODIC_SYNC
|
||||||
|
value: '300'
|
||||||
|
securityContext:
|
||||||
|
capabilities:
|
||||||
|
add:
|
||||||
|
- NET_ADMIN
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 10Mi
|
||||||
|
limits:
|
||||||
|
cpu: 20m
|
||||||
|
memory: 20Mi
|
||||||
|
---
|
||||||
3
netmaker/meta/main.yml
Normal file
3
netmaker/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- role: docker
|
||||||
@@ -2,6 +2,4 @@
|
|||||||
when: "netclient.join_network_token is defined"
|
when: "netclient.join_network_token is defined"
|
||||||
command: "netclient join -t {{ netclient.join_network_token }}"
|
command: "netclient join -t {{ netclient.join_network_token }}"
|
||||||
failed_when: command.rc != 0
|
failed_when: command.rc != 0
|
||||||
changed_when: "'starting wireguard' in command.stdout"
|
|
||||||
register: command
|
register: command
|
||||||
throttle: 1
|
|
||||||
@@ -3,6 +3,3 @@
|
|||||||
- import_tasks: ./install.yml
|
- import_tasks: ./install.yml
|
||||||
|
|
||||||
- import_tasks: ./join-network.yml
|
- import_tasks: ./join-network.yml
|
||||||
|
|
||||||
- name: Gather facts to get changes
|
|
||||||
ansible.builtin.gather_facts:
|
|
||||||
@@ -30,7 +30,7 @@ component netmaker_server {
|
|||||||
component nm_api
|
component nm_api
|
||||||
nm_api -down- nm_api_http
|
nm_api -down- nm_api_http
|
||||||
ng_http --( nm_api_http
|
ng_http --( nm_api_http
|
||||||
nm_api .up.( ng_TLS : db-connection to rqlite-master
|
nm_api -up-( ng_TLS : db-connection to rqlite-master
|
||||||
nm_api --( mq_plain
|
nm_api --( mq_plain
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1,11 +1,11 @@
|
|||||||
per_listener_settings false
|
per_listener_settings false
|
||||||
|
|
||||||
listener 8883
|
listener 8883
|
||||||
protocol websockets
|
|
||||||
allow_anonymous false
|
allow_anonymous false
|
||||||
|
certfile /certs/node.crt
|
||||||
|
keyfile /certs/node.key
|
||||||
|
|
||||||
listener 1883
|
listener 1883
|
||||||
protocol websockets
|
|
||||||
allow_anonymous false
|
allow_anonymous false
|
||||||
|
|
||||||
plugin /usr/lib/mosquitto_dynamic_security.so
|
plugin /usr/lib/mosquitto_dynamic_security.so
|
||||||
@@ -30,7 +30,7 @@
|
|||||||
headers:
|
headers:
|
||||||
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
|
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
when: "inventory_hostname == groups['netmaker_server'][0]"
|
when: "inventory_hostname == groups['netmaker'][0]"
|
||||||
register: default_mesh
|
register: default_mesh
|
||||||
until: "default_mesh is not failed"
|
until: "default_mesh is not failed"
|
||||||
retries: 2
|
retries: 2
|
||||||
@@ -50,7 +50,7 @@
|
|||||||
headers:
|
headers:
|
||||||
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
|
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
when: "inventory_hostname == groups['netmaker_server'][0]"
|
when: "inventory_hostname == groups['netmaker'][0]"
|
||||||
register: default_mesh_key
|
register: default_mesh_key
|
||||||
until: "default_mesh_key is not failed"
|
until: "default_mesh_key is not failed"
|
||||||
retries: 2
|
retries: 2
|
||||||
@@ -33,15 +33,15 @@ services:
|
|||||||
|
|
||||||
-auth /config.json
|
-auth /config.json
|
||||||
|
|
||||||
{% if inventory_hostname != groups['netmaker_server'][0] %}
|
{% if inventory_hostname != groups['netmaker'][0] %}
|
||||||
-join-as netmaker
|
-join-as netmaker
|
||||||
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker_server'][0] }}:{{ netmaker_nginx.advertise_port }}
|
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker'][0] }}:{{ netmaker_nginx.advertise_port }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
"
|
"
|
||||||
# FIXME: /\ \/ Change http -> https
|
# FIXME: /\ \/ Change http -> https
|
||||||
|
|
||||||
netmaker: # The Primary Server for running Netmaker
|
netmaker: # The Primary Server for running Netmaker
|
||||||
image: gravitl/netmaker:v0.17.1
|
image: gravitl/netmaker:v0.16.1
|
||||||
depends_on:
|
depends_on:
|
||||||
- rqlite
|
- rqlite
|
||||||
cap_add:
|
cap_add:
|
||||||
@@ -104,7 +104,7 @@ services:
|
|||||||
- "51821-51830:51821-51830/udp" # wireguard ports
|
- "51821-51830:51821-51830/udp" # wireguard ports
|
||||||
|
|
||||||
netmaker-ui: # The Netmaker UI Component
|
netmaker-ui: # The Netmaker UI Component
|
||||||
image: gravitl/netmaker-ui:v0.17.1
|
image: gravitl/netmaker-ui:v0.16.1
|
||||||
depends_on:
|
depends_on:
|
||||||
- netmaker
|
- netmaker
|
||||||
links:
|
links:
|
||||||
@@ -120,6 +120,7 @@ services:
|
|||||||
- ./mosquitto/config:/mosquitto/config
|
- ./mosquitto/config:/mosquitto/config
|
||||||
- ./mosquitto/data:/mosquitto/data
|
- ./mosquitto/data:/mosquitto/data
|
||||||
- ./mosquitto/logs:/mosquitto/log
|
- ./mosquitto/logs:/mosquitto/log
|
||||||
|
- "./certs:/certs:ro"
|
||||||
depends_on:
|
depends_on:
|
||||||
- netmaker
|
- netmaker
|
||||||
command: ["/mosquitto/config/wait.sh"]
|
command: ["/mosquitto/config/wait.sh"]
|
||||||
@@ -6,7 +6,7 @@ stream{
|
|||||||
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
|
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
|
||||||
{{ netmaker_api.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
|
{{ netmaker_api.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
|
||||||
|
|
||||||
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
|
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883; # todo: tls-terminate?
|
||||||
|
|
||||||
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} 127.0.0.1:8443;
|
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} 127.0.0.1:8443;
|
||||||
{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }} rqlite:4002;
|
{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }} rqlite:4002;
|
||||||
@@ -4,8 +4,6 @@ map $host $proxy_name {
|
|||||||
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} netmaker-ui:80;
|
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} netmaker-ui:80;
|
||||||
{{ netmaker_api.host }}.{{ netmaker_base_domain }} netmaker:8081;
|
{{ netmaker_api.host }}.{{ netmaker_base_domain }} netmaker:8081;
|
||||||
|
|
||||||
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883;
|
|
||||||
|
|
||||||
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} rqlite:4001;
|
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} rqlite:4001;
|
||||||
|
|
||||||
default 444;
|
default 444;
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
---
|
|
||||||
nomad:
|
|
||||||
version: 1.4.4
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Nomad
|
|
||||||
Documentation=https://www.nomadproject.io/docs/
|
|
||||||
Wants=network-online.target
|
|
||||||
After=network-online.target
|
|
||||||
|
|
||||||
# When using Nomad with Consul it is not necessary to start Consul first. These
|
|
||||||
# lines start Consul before Nomad as an optimization to avoid Nomad logging
|
|
||||||
# that Consul is unavailable at startup.
|
|
||||||
#Wants=consul.service
|
|
||||||
#After=consul.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
|
|
||||||
# Nomad server should be run as the nomad user. Nomad clients
|
|
||||||
# should be run as root
|
|
||||||
User=root
|
|
||||||
Group=root
|
|
||||||
|
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
|
||||||
ExecStart=/usr/local/bin/nomad agent -config /etc/nomad.d
|
|
||||||
KillMode=process
|
|
||||||
KillSignal=SIGINT
|
|
||||||
LimitNOFILE=65536
|
|
||||||
LimitNPROC=infinity
|
|
||||||
Restart=on-failure
|
|
||||||
RestartSec=2
|
|
||||||
|
|
||||||
## Configure unit start rate limiting. Units which are started more than
|
|
||||||
## *burst* times within an *interval* time span are not permitted to start any
|
|
||||||
## more. Use `StartLimitIntervalSec` or `StartLimitInterval` (depending on
|
|
||||||
## systemd version) to configure the checking interval and `StartLimitBurst`
|
|
||||||
## to configure how many starts per interval are allowed. The values in the
|
|
||||||
## commented lines are defaults.
|
|
||||||
|
|
||||||
# StartLimitBurst = 5
|
|
||||||
|
|
||||||
## StartLimitIntervalSec is used for systemd versions >= 230
|
|
||||||
# StartLimitIntervalSec = 10s
|
|
||||||
|
|
||||||
## StartLimitInterval is used for systemd versions < 230
|
|
||||||
# StartLimitInterval = 10s
|
|
||||||
|
|
||||||
TasksMax=infinity
|
|
||||||
OOMScoreAdjust=-1000
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
---
|
|
||||||
dependencies:
|
|
||||||
#- role: docker
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
- name: Download binary
|
|
||||||
ansible.builtin.unarchive:
|
|
||||||
remote_src: true
|
|
||||||
src: https://releases.hashicorp.com/nomad/{{ nomad.version }}/nomad_{{ nomad.version }}_{{ ansible_system | lower }}_{{ 'amd64' if ansible_architecture == 'x86_64' else ansible_architecture }}.zip
|
|
||||||
dest: /usr/local/bin/
|
|
||||||
mode: "755"
|
|
||||||
|
|
||||||
- name: Deploy systemd-service file
|
|
||||||
ansible.builtin.copy:
|
|
||||||
src: systemd-service
|
|
||||||
dest: /etc/systemd/system/nomad.service
|
|
||||||
mode: u=rw,g=r,o=r
|
|
||||||
|
|
||||||
- name: Create nomad user
|
|
||||||
ansible.builtin.user:
|
|
||||||
name: nomad
|
|
||||||
groups:
|
|
||||||
- docker
|
|
||||||
append: true
|
|
||||||
|
|
||||||
- name: Create directory for configs
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: /etc/nomad.d
|
|
||||||
state: directory
|
|
||||||
mode: "0755"
|
|
||||||
owner: "nomad"
|
|
||||||
group: "nomad"
|
|
||||||
|
|
||||||
- name: Create nomad.hcl configuration file
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: nomad.hcl.j2
|
|
||||||
dest: /etc/nomad.d/nomad.hcl
|
|
||||||
mode: "0644"
|
|
||||||
owner: "nomad"
|
|
||||||
group: "nomad"
|
|
||||||
|
|
||||||
- name: Create directory for data
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: /opt/nomad
|
|
||||||
state: directory
|
|
||||||
mode: "0755"
|
|
||||||
owner: "nomad"
|
|
||||||
group: "nomad"
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
- name: Start service
|
|
||||||
ansible.builtin.service:
|
|
||||||
name: nomad
|
|
||||||
state: restarted
|
|
||||||
|
|
||||||
- name: Waiting for service to accept connections
|
|
||||||
ansible.builtin.wait_for:
|
|
||||||
port: 4646
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
- import_tasks: ./install.yml
|
|
||||||
|
|
||||||
- import_tasks: ./launch.yml
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
data_dir = "/opt/nomad"
|
|
||||||
datacenter = "{{ datacenter }}"
|
|
||||||
|
|
||||||
|
|
||||||
bind_addr = "0.0.0.0"
|
|
||||||
|
|
||||||
advertise {
|
|
||||||
# Defaults to the first private IP address.
|
|
||||||
#http = "1.2.3.4"
|
|
||||||
#rpc = "1.2.3.4"
|
|
||||||
#serf = "1.2.3.4:5648" # non-default ports may be specified
|
|
||||||
}
|
|
||||||
|
|
||||||
{# TODO: Get interface-ip from hosts marked with type=server #}
|
|
||||||
{% set server_hosts = ansible_play_batch | difference([inventory_hostname]) %}
|
|
||||||
{% if type is defined and type == "server" %}
|
|
||||||
server {
|
|
||||||
enabled = true
|
|
||||||
bootstrap_expect = {{ server_hosts | length }}
|
|
||||||
|
|
||||||
server_join {
|
|
||||||
retry_join = [ "{{ server_hosts | join('", "') }}" ]
|
|
||||||
retry_max = 6
|
|
||||||
retry_interval = "15s"
|
|
||||||
}
|
|
||||||
|
|
||||||
default_scheduler_config {
|
|
||||||
scheduler_algorithm = "binpack"
|
|
||||||
memory_oversubscription_enabled = true
|
|
||||||
reject_job_registration = false
|
|
||||||
pause_eval_broker = false # New in Nomad 1.3.2
|
|
||||||
|
|
||||||
preemption_config {
|
|
||||||
batch_scheduler_enabled = true
|
|
||||||
system_scheduler_enabled = true
|
|
||||||
service_scheduler_enabled = true
|
|
||||||
sysbatch_scheduler_enabled = true # New in Nomad 1.2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
client {
|
|
||||||
enabled = true
|
|
||||||
|
|
||||||
{% if type != "server" %}
|
|
||||||
servers = [ "{{ server_hosts | join('", "') }}" ]
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
meta {
|
|
||||||
node_type = "{{ type }}"
|
|
||||||
{% if storage is defined and storage %}
|
|
||||||
seaweedfs_volume = "true"
|
|
||||||
{% endif %}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
plugin "raw_exec" {
|
|
||||||
config {
|
|
||||||
enabled = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
plugin "docker" {
|
|
||||||
config {
|
|
||||||
{% if type is defined and type == "server" %}
|
|
||||||
allow_privileged = true
|
|
||||||
{% endif %}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user