Compare commits
1 Commits
main
...
role_nomad
Author | SHA1 | Date |
---|---|---|
Ruakij | 940c169209 | 2 years ago |
@ -1,3 +1,4 @@
|
|||||||
---
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: docker
|
- role: docker
|
||||||
|
- role: netmaker
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
- name: Deploy calico operator
|
|
||||||
command: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
|
|
||||||
register: command
|
|
||||||
changed_when: "'created' in command.stdout"
|
|
||||||
run_once: true
|
|
||||||
failed_when:
|
|
||||||
- "command.rc == 1 and 'AlreadyExists' not in command.stderr"
|
|
||||||
|
|
||||||
- name: Deploy calico ressource template
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: ./k3s/server/network-plugin/calico/custom-ressource.yml.jinja2
|
|
||||||
dest: /root/calico-ressource.yml
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Deploy calico ressource
|
|
||||||
command: kubectl apply -f /root/calico-ressource.yml
|
|
||||||
register: command
|
|
||||||
changed_when: "'created' in command.stdout"
|
|
||||||
run_once: true
|
|
@ -1,7 +0,0 @@
|
|||||||
- name: Deploy service-file for routing-table to wireguard-translation
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: ./k3s/server/network-plugin/calico/routingtabletowg.yml.jinja2
|
|
||||||
dest: /var/lib/rancher/k3s/server/manifests/routingtabletowg.yml
|
|
||||||
mode: u=rw,g=r,o=r
|
|
||||||
run_once: true
|
|
||||||
when: "kubernetes.network.helper.routingtabletowg"
|
|
@ -1,18 +1,7 @@
|
|||||||
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
|
server: https://{{ kubernetes.control_plane.dns_name }}:6443
|
||||||
token: '{{ kubernetes.token }}'
|
token: {{ kubernetes.token }}
|
||||||
|
|
||||||
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
|
||||||
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
||||||
{% else %}
|
|
||||||
node-ip: {{ nodeip_ipv4 }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
## Label
|
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
|
||||||
# Region & DC
|
kubelet-arg: "--node-ip=0.0.0.0"
|
||||||
node-label:
|
|
||||||
{% if region is defined %}
|
|
||||||
- topology.kubernetes.io/region={{ region }}
|
|
||||||
{% endif %}
|
|
||||||
{% if zone is defined %}
|
|
||||||
- topology.kubernetes.io/zone={{ zone }}
|
|
||||||
{% endif %}
|
|
||||||
|
@ -1,49 +1,23 @@
|
|||||||
## Base ##
|
## Base ##
|
||||||
{% if inventory_hostname == groups['kubernetes'][0] %}
|
{% if inventory_hostname == groups['kubernetes'][0] %}
|
||||||
# Initialize with internal etcd
|
|
||||||
cluster-init: true
|
cluster-init: true
|
||||||
{% else %}
|
{% else %}
|
||||||
server: https://{{ hostvars[groups['kubernetes'][0]]['nodeip_ipv4'] }}:6443
|
server: https://{{ groups['kubernetes'][0] }}:6443
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
token: '{{ kubernetes.token }}'
|
token: {{ kubernetes.token }}
|
||||||
tls-san:
|
tls-san:
|
||||||
- {{ kubernetes.control_plane.dns_name }}
|
- {{ kubernetes.control_plane.dns_name }}
|
||||||
|
|
||||||
# Networking
|
# Networking
|
||||||
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
|
||||||
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
|
||||||
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
||||||
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
|
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
|
||||||
{% else %}
|
|
||||||
node-ip: {{ nodeip_ipv4 }}
|
|
||||||
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
|
|
||||||
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
egress-selector-mode: disabled
|
egress-selector-mode: disabled
|
||||||
|
|
||||||
# Network-plugin
|
# Network-plugin
|
||||||
{% if kubernetes.network.plugin == "flannel" %}
|
|
||||||
flannel-backend: vxlan
|
flannel-backend: vxlan
|
||||||
{% else %}
|
|
||||||
disable-network-policy: true
|
|
||||||
flannel-backend: none
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Ingress-plugin
|
|
||||||
{% if kubernetes.ingress_controller != "traefik-ingress" %}
|
|
||||||
disable: traefik
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
## Label
|
|
||||||
# Region & DC
|
|
||||||
node-label:
|
|
||||||
{% if region is defined %}
|
|
||||||
- topology.kubernetes.io/region={{ region }}
|
|
||||||
{% endif %}
|
|
||||||
{% if zone is defined %}
|
|
||||||
- topology.kubernetes.io/zone={{ zone }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{{ kubernetes.config_extra | to_yaml }}
|
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
|
||||||
|
kubelet-arg: "--node-ip=0.0.0.0"
|
||||||
|
@ -1,34 +0,0 @@
|
|||||||
# This section includes base Calico installation configuration.
|
|
||||||
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
|
|
||||||
apiVersion: operator.tigera.io/v1
|
|
||||||
kind: Installation
|
|
||||||
metadata:
|
|
||||||
name: default
|
|
||||||
spec:
|
|
||||||
# Configures Calico networking.
|
|
||||||
calicoNetwork:
|
|
||||||
# Note: The ipPools section cannot be modified post-install.
|
|
||||||
ipPools:
|
|
||||||
- blockSize: 26
|
|
||||||
cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }}
|
|
||||||
encapsulation: None
|
|
||||||
natOutgoing: Enabled
|
|
||||||
nodeSelector: all()
|
|
||||||
|
|
||||||
{% if nodeip_ipv6 != "" and kubernetes.ipPool.ipv6 is defined %}
|
|
||||||
- blockSize: 122
|
|
||||||
cidr: {{ kubernetes.ipPool.ipv6.cluster_cidr }}
|
|
||||||
encapsulation: None
|
|
||||||
natOutgoing: Enabled
|
|
||||||
nodeSelector: all()
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
# This section configures the Calico API server.
|
|
||||||
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
|
|
||||||
apiVersion: operator.tigera.io/v1
|
|
||||||
kind: APIServer
|
|
||||||
metadata:
|
|
||||||
name: default
|
|
||||||
spec: {}
|
|
@ -1,45 +0,0 @@
|
|||||||
# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: DaemonSet
|
|
||||||
metadata:
|
|
||||||
name: routingtabletowg
|
|
||||||
namespace: calico-system
|
|
||||||
labels:
|
|
||||||
app: routingtabletowg
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: routingtabletowg
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: routingtabletowg
|
|
||||||
spec:
|
|
||||||
tolerations:
|
|
||||||
# this toleration is to have the daemonset runnable on master nodes
|
|
||||||
# remove it if your masters can't run pods
|
|
||||||
- key: node-role.kubernetes.io/master
|
|
||||||
effect: NoSchedule
|
|
||||||
hostNetwork: true
|
|
||||||
containers:
|
|
||||||
- name: routingtabletowg
|
|
||||||
image: "ruakij/routingtabletowg:0.2.0"
|
|
||||||
env:
|
|
||||||
- name: INTERFACE
|
|
||||||
value: {{ kubernetes.ipPool.nodeIp_interface }}
|
|
||||||
- name: FILTER_PROTOCOL
|
|
||||||
value: bird
|
|
||||||
- name: PERIODIC_SYNC
|
|
||||||
value: '300'
|
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
add:
|
|
||||||
- NET_ADMIN
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 10m
|
|
||||||
memory: 10Mi
|
|
||||||
limits:
|
|
||||||
cpu: 20m
|
|
||||||
memory: 20Mi
|
|
||||||
---
|
|
@ -1,6 +0,0 @@
|
|||||||
netbird_client:
|
|
||||||
# Key and url to join a network
|
|
||||||
# leave empty to ignore
|
|
||||||
join_network:
|
|
||||||
setup_key:
|
|
||||||
management_url:
|
|
@ -1,26 +0,0 @@
|
|||||||
- name: Install Packages
|
|
||||||
# when: docker_file.stat.exists == False
|
|
||||||
package:
|
|
||||||
name:
|
|
||||||
- ca-certificates
|
|
||||||
- curl
|
|
||||||
- gnupg
|
|
||||||
|
|
||||||
- name: Add netbird-key
|
|
||||||
apt_key:
|
|
||||||
url: https://pkgs.wiretrustee.com/debian/public.key
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Add netbird-repository
|
|
||||||
apt_repository:
|
|
||||||
repo: "deb https://pkgs.wiretrustee.com/debian stable main"
|
|
||||||
state: present
|
|
||||||
filename: netbird
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Install wireguard & netbird
|
|
||||||
package:
|
|
||||||
name:
|
|
||||||
- wireguard
|
|
||||||
- netbird
|
|
||||||
state: latest
|
|
@ -1,16 +0,0 @@
|
|||||||
- name: Join netbird-network
|
|
||||||
when: "netbird_client.join_network.setup_key is defined"
|
|
||||||
command: "netbird up --management-url {{ netbird_client.join_network.management_url }} --setup-key {{ netbird_client.join_network.setup_key }}"
|
|
||||||
failed_when: command.rc != 0
|
|
||||||
changed_when: "'Connected' in command.stdout"
|
|
||||||
register: command
|
|
||||||
|
|
||||||
- name: Wait for netbird-interface to exist
|
|
||||||
wait_for:
|
|
||||||
path: "/sys/class/net/wt0"
|
|
||||||
state: present
|
|
||||||
when: command.changed
|
|
||||||
|
|
||||||
- name: Gather facts to get changes
|
|
||||||
ansible.builtin.gather_facts:
|
|
||||||
when: command.changed
|
|
@ -1,4 +0,0 @@
|
|||||||
- import_tasks: ./install.yml
|
|
||||||
|
|
||||||
- import_tasks: ./join-network.yml
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
|||||||
nftables:
|
|
||||||
# Rules to add
|
|
||||||
# Handled as templates
|
|
||||||
# Creates separate files for each entry.
|
|
||||||
# The identifier is necessary for ansible to be able to merge the keys (when 'hash_behaviour = merge')
|
|
||||||
# rule-ids have to be unique across files and raw
|
|
||||||
rules:
|
|
||||||
# Files with Rules to add
|
|
||||||
files:
|
|
||||||
#'<group_identifier>': '<relative-location>'
|
|
||||||
#'<group_identifier>':
|
|
||||||
# main: <relative-location>
|
|
||||||
# '<identifier>': '<relative-location>'
|
|
||||||
|
|
||||||
# Rules to add
|
|
||||||
raw:
|
|
||||||
#'<group_identifier>': '<content>'
|
|
||||||
#'<group_identifier>':
|
|
||||||
# main: <content>
|
|
||||||
# '<identifier>': '<content>'
|
|
||||||
|
|
||||||
# Decides if /etc/nftables.conf is applied or separate files which have changed
|
|
||||||
# Separate changes require the files to be self-tyding to not end up with duplicate rules
|
|
||||||
# e.g.
|
|
||||||
# table ip mytable
|
|
||||||
# flush table ip mytable
|
|
||||||
# delete table ip mytable
|
|
||||||
# table ip mytable {} ...
|
|
||||||
apply_global: false
|
|
@ -1,8 +0,0 @@
|
|||||||
- name: Load group rules
|
|
||||||
command: "nft -f /etc/nftables/ansible-managed/{{ item }}.nft"
|
|
||||||
loop: "{{ combined_rules | list }}"
|
|
||||||
when: not nftables.apply_global
|
|
||||||
|
|
||||||
- name: Load global rule file
|
|
||||||
command: "nft -f /etc/nftables.nft"
|
|
||||||
when: nftables.apply_global
|
|
@ -1,11 +0,0 @@
|
|||||||
- name: Deploying group files
|
|
||||||
include_tasks: ./per-group-template-file.yml
|
|
||||||
with_items:
|
|
||||||
- "{{ nftables.rules.files | list }}"
|
|
||||||
|
|
||||||
- name: Deploying group raw-files
|
|
||||||
include_tasks: ./per-group-template.yml
|
|
||||||
with_items:
|
|
||||||
- "{{ nftables.rules.raw | list }}"
|
|
||||||
|
|
||||||
- include_tasks: ./remove-files.yml
|
|
@ -1,51 +0,0 @@
|
|||||||
- set_fact:
|
|
||||||
group_identifier: "{{ item }}"
|
|
||||||
value: "{{ nftables.rules.files[item] }}"
|
|
||||||
when: "item is defined"
|
|
||||||
|
|
||||||
#'<group_identifier>': '<relative-location>'
|
|
||||||
- block:
|
|
||||||
- name: Create main rule file
|
|
||||||
template:
|
|
||||||
src: "{{ value }}"
|
|
||||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
|
||||||
when: value is string
|
|
||||||
|
|
||||||
#'<group_identifier>':
|
|
||||||
# main: <relative-location>
|
|
||||||
# '<identifier>': '<relative-location>'
|
|
||||||
- block:
|
|
||||||
- set_fact:
|
|
||||||
items: "{{ nftables.rules.files[item] }}"
|
|
||||||
|
|
||||||
- block:
|
|
||||||
- name: Create main rule file
|
|
||||||
template:
|
|
||||||
src: "{{ items['main'] }}"
|
|
||||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
|
||||||
|
|
||||||
- name: Include rule files
|
|
||||||
lineinfile:
|
|
||||||
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
|
||||||
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
|
|
||||||
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
|
|
||||||
when: items['main'] is defined
|
|
||||||
|
|
||||||
- name: Create group folder
|
|
||||||
file:
|
|
||||||
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
|
|
||||||
state: directory
|
|
||||||
when: items|length > 0
|
|
||||||
|
|
||||||
- set_fact:
|
|
||||||
test: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
|
|
||||||
|
|
||||||
- name: Create included rule files
|
|
||||||
template:
|
|
||||||
src: "{{ fileItem.value }}"
|
|
||||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ fileItem.key }}.nft"
|
|
||||||
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
|
|
||||||
loop_control:
|
|
||||||
loop_var: fileItem
|
|
||||||
|
|
||||||
when: value is mapping
|
|
@ -1,48 +0,0 @@
|
|||||||
- set_fact:
|
|
||||||
group_identifier: "{{ item }}"
|
|
||||||
value: "{{ nftables.rules.raw[item] }}"
|
|
||||||
when: "item is defined"
|
|
||||||
|
|
||||||
#'<group_identifier>': '<content>'
|
|
||||||
- block:
|
|
||||||
- name: Create main rule file
|
|
||||||
copy:
|
|
||||||
content: "{{ value }}"
|
|
||||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
|
||||||
when: value is string
|
|
||||||
|
|
||||||
#'<group_identifier>':
|
|
||||||
# main: <content>
|
|
||||||
# '<identifier>': '<content>'
|
|
||||||
- block:
|
|
||||||
- set_fact:
|
|
||||||
items: "{{ nftables.rules.raw[item] }}"
|
|
||||||
|
|
||||||
- block:
|
|
||||||
- name: Create main rule file
|
|
||||||
copy:
|
|
||||||
content: "{{ items['main'] }}"
|
|
||||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
|
||||||
|
|
||||||
- name: Include rule files
|
|
||||||
lineinfile:
|
|
||||||
path: "/etc/nftables/ansible-managed/{{ group_identifier }}.nft"
|
|
||||||
regexp: "include\\s+(\"|')\\/etc\\/nftables\\/ansible-managed\\/{{ group_identifier }}\\/.*$"
|
|
||||||
line: 'include "/etc/nftables/ansible-managed/{{ group_identifier }}/*.nft"'
|
|
||||||
when: items['main'] is defined
|
|
||||||
|
|
||||||
- name: Create group folder
|
|
||||||
file:
|
|
||||||
path: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
|
|
||||||
state: directory
|
|
||||||
when: items|length > 0
|
|
||||||
|
|
||||||
- name: Create included rule files
|
|
||||||
copy:
|
|
||||||
content: "{{ included_item.value }}"
|
|
||||||
dest: "/etc/nftables/ansible-managed/{{ group_identifier }}/{{ included_item.key }}.nft"
|
|
||||||
loop: "{{ items | dict2items | selectattr('key', 'ne', 'main') }}"
|
|
||||||
loop_control:
|
|
||||||
loop_var: included_item
|
|
||||||
|
|
||||||
when: value is mapping
|
|
@ -1,4 +0,0 @@
|
|||||||
- name: Install Packages
|
|
||||||
package:
|
|
||||||
name:
|
|
||||||
- nftables
|
|
@ -1,7 +0,0 @@
|
|||||||
- import_tasks: ./prerequisites.yml
|
|
||||||
|
|
||||||
- import_tasks: ./setup-packages.yml
|
|
||||||
|
|
||||||
- import_tasks: ./deploy-rules/main.yml
|
|
||||||
|
|
||||||
- import_tasks: ./apply-files.yml
|
|
@ -1,13 +0,0 @@
|
|||||||
# Defaults if missing
|
|
||||||
- name: Set defaults if missing
|
|
||||||
set_fact:
|
|
||||||
nftables:
|
|
||||||
rules:
|
|
||||||
files: "{{ nftables.rules.files | default({}) | combine({}) }}"
|
|
||||||
raw: "{{ nftables.rules.raw | default({}) | combine({}) }}"
|
|
||||||
combined_rules: "{{ nftables.rules.raw | combine(nftables.rules.files, recursive=true) }}"
|
|
||||||
|
|
||||||
#- name: Check items for consistency
|
|
||||||
# assert:
|
|
||||||
# that: "{{ nftables.rules.files.values() | length }} + {{ nftables.rules.raw.values() | length }} == {{ combined_rules.values() | length }}"
|
|
||||||
# fail_msg: "files and raw rules share the same identifier"
|
|
@ -1,21 +0,0 @@
|
|||||||
- name: Handle removed group files
|
|
||||||
block:
|
|
||||||
- find:
|
|
||||||
paths: /etc/nftables/ansible-managed/
|
|
||||||
file_type: 'any'
|
|
||||||
excludes: '{% for item in combined_rules %}{{ item }},{{ item }}.nft,{% endfor %}'
|
|
||||||
depth: 1
|
|
||||||
register: removeFiles
|
|
||||||
|
|
||||||
- file:
|
|
||||||
path: "{{ fileItem.path }}"
|
|
||||||
state: absent
|
|
||||||
loop: "{{ removeFiles.files }}"
|
|
||||||
loop_control:
|
|
||||||
label: "{{ fileItem.path }}"
|
|
||||||
loop_var: fileItem
|
|
||||||
|
|
||||||
- name: Handle removed included files per group
|
|
||||||
include_tasks: ./remove-per-group.yml
|
|
||||||
with_items:
|
|
||||||
- "{{ combined_rules | list }}"
|
|
@ -1,20 +0,0 @@
|
|||||||
- set_fact:
|
|
||||||
group_identifier: "{{ item }}"
|
|
||||||
group_items: "{{ combined_rules[item] }}"
|
|
||||||
|
|
||||||
- block:
|
|
||||||
- find:
|
|
||||||
paths: "/etc/nftables/ansible-managed/{{ group_identifier }}/"
|
|
||||||
file_type: 'any'
|
|
||||||
excludes: '{% for item in group_items %}{{ item }}.nft,{% endfor %}'
|
|
||||||
register: removeFiles
|
|
||||||
|
|
||||||
- file:
|
|
||||||
path: "{{ fileItem.path }}"
|
|
||||||
state: absent
|
|
||||||
loop: "{{ removeFiles.files }}"
|
|
||||||
loop_control:
|
|
||||||
label: "{{ fileItem.path }}"
|
|
||||||
loop_var: fileItem
|
|
||||||
|
|
||||||
when: group_items is mapping
|
|
@ -1,15 +0,0 @@
|
|||||||
- name: Install nftables
|
|
||||||
package:
|
|
||||||
name:
|
|
||||||
- nftables
|
|
||||||
|
|
||||||
- name: Create /etc/nftables/ansible-managed
|
|
||||||
file:
|
|
||||||
path: /etc/nftables/ansible-managed
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Include files in /etc/nftables/ansible-managed/ from /etc/nftables.conf
|
|
||||||
blockinfile:
|
|
||||||
path: /etc/nftables.conf
|
|
||||||
marker: "# {mark} ANSIBLE MANAGED BLOCK - nftables"
|
|
||||||
content: 'include "/etc/nftables/ansible-managed/*.nft"'
|
|
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
nomad:
|
||||||
|
version: 1.4.4
|
@ -0,0 +1,48 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Nomad
|
||||||
|
Documentation=https://www.nomadproject.io/docs/
|
||||||
|
Wants=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
# When using Nomad with Consul it is not necessary to start Consul first. These
|
||||||
|
# lines start Consul before Nomad as an optimization to avoid Nomad logging
|
||||||
|
# that Consul is unavailable at startup.
|
||||||
|
#Wants=consul.service
|
||||||
|
#After=consul.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
|
||||||
|
# Nomad server should be run as the nomad user. Nomad clients
|
||||||
|
# should be run as root
|
||||||
|
User=root
|
||||||
|
Group=root
|
||||||
|
|
||||||
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
ExecStart=/usr/local/bin/nomad agent -config /etc/nomad.d
|
||||||
|
KillMode=process
|
||||||
|
KillSignal=SIGINT
|
||||||
|
LimitNOFILE=65536
|
||||||
|
LimitNPROC=infinity
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=2
|
||||||
|
|
||||||
|
## Configure unit start rate limiting. Units which are started more than
|
||||||
|
## *burst* times within an *interval* time span are not permitted to start any
|
||||||
|
## more. Use `StartLimitIntervalSec` or `StartLimitInterval` (depending on
|
||||||
|
## systemd version) to configure the checking interval and `StartLimitBurst`
|
||||||
|
## to configure how many starts per interval are allowed. The values in the
|
||||||
|
## commented lines are defaults.
|
||||||
|
|
||||||
|
# StartLimitBurst = 5
|
||||||
|
|
||||||
|
## StartLimitIntervalSec is used for systemd versions >= 230
|
||||||
|
# StartLimitIntervalSec = 10s
|
||||||
|
|
||||||
|
## StartLimitInterval is used for systemd versions < 230
|
||||||
|
# StartLimitInterval = 10s
|
||||||
|
|
||||||
|
TasksMax=infinity
|
||||||
|
OOMScoreAdjust=-1000
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
#- role: docker
|
@ -0,0 +1,43 @@
|
|||||||
|
- name: Download binary
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
remote_src: true
|
||||||
|
src: https://releases.hashicorp.com/nomad/{{ nomad.version }}/nomad_{{ nomad.version }}_{{ ansible_system | lower }}_{{ 'amd64' if ansible_architecture == 'x86_64' else ansible_architecture }}.zip
|
||||||
|
dest: /usr/local/bin/
|
||||||
|
mode: "755"
|
||||||
|
|
||||||
|
- name: Deploy systemd-service file
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: systemd-service
|
||||||
|
dest: /etc/systemd/system/nomad.service
|
||||||
|
mode: u=rw,g=r,o=r
|
||||||
|
|
||||||
|
- name: Create nomad user
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: nomad
|
||||||
|
groups:
|
||||||
|
- docker
|
||||||
|
append: true
|
||||||
|
|
||||||
|
- name: Create directory for configs
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/nomad.d
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
owner: "nomad"
|
||||||
|
group: "nomad"
|
||||||
|
|
||||||
|
- name: Create nomad.hcl configuration file
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: nomad.hcl.j2
|
||||||
|
dest: /etc/nomad.d/nomad.hcl
|
||||||
|
mode: "0644"
|
||||||
|
owner: "nomad"
|
||||||
|
group: "nomad"
|
||||||
|
|
||||||
|
- name: Create directory for data
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /opt/nomad
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
owner: "nomad"
|
||||||
|
group: "nomad"
|
@ -0,0 +1,8 @@
|
|||||||
|
- name: Start service
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: nomad
|
||||||
|
state: restarted
|
||||||
|
|
||||||
|
- name: Waiting for service to accept connections
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
port: 4646
|
@ -0,0 +1,3 @@
|
|||||||
|
- import_tasks: ./install.yml
|
||||||
|
|
||||||
|
- import_tasks: ./launch.yml
|
@ -0,0 +1,71 @@
|
|||||||
|
data_dir = "/opt/nomad"
|
||||||
|
datacenter = "{{ datacenter }}"
|
||||||
|
|
||||||
|
|
||||||
|
bind_addr = "0.0.0.0"
|
||||||
|
|
||||||
|
advertise {
|
||||||
|
# Defaults to the first private IP address.
|
||||||
|
#http = "1.2.3.4"
|
||||||
|
#rpc = "1.2.3.4"
|
||||||
|
#serf = "1.2.3.4:5648" # non-default ports may be specified
|
||||||
|
}
|
||||||
|
|
||||||
|
{# TODO: Get interface-ip from hosts marked with type=server #}
|
||||||
|
{% set server_hosts = ansible_play_batch | difference([inventory_hostname]) %}
|
||||||
|
{% if type is defined and type == "server" %}
|
||||||
|
server {
|
||||||
|
enabled = true
|
||||||
|
bootstrap_expect = {{ server_hosts | length }}
|
||||||
|
|
||||||
|
server_join {
|
||||||
|
retry_join = [ "{{ server_hosts | join('", "') }}" ]
|
||||||
|
retry_max = 6
|
||||||
|
retry_interval = "15s"
|
||||||
|
}
|
||||||
|
|
||||||
|
default_scheduler_config {
|
||||||
|
scheduler_algorithm = "binpack"
|
||||||
|
memory_oversubscription_enabled = true
|
||||||
|
reject_job_registration = false
|
||||||
|
pause_eval_broker = false # New in Nomad 1.3.2
|
||||||
|
|
||||||
|
preemption_config {
|
||||||
|
batch_scheduler_enabled = true
|
||||||
|
system_scheduler_enabled = true
|
||||||
|
service_scheduler_enabled = true
|
||||||
|
sysbatch_scheduler_enabled = true # New in Nomad 1.2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
client {
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
{% if type != "server" %}
|
||||||
|
servers = [ "{{ server_hosts | join('", "') }}" ]
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
meta {
|
||||||
|
node_type = "{{ type }}"
|
||||||
|
{% if storage is defined and storage %}
|
||||||
|
seaweedfs_volume = "true"
|
||||||
|
{% endif %}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
plugin "raw_exec" {
|
||||||
|
config {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
plugin "docker" {
|
||||||
|
config {
|
||||||
|
{% if type is defined and type == "server" %}
|
||||||
|
allow_privileged = true
|
||||||
|
{% endif %}
|
||||||
|
}
|
||||||
|
}
|
@ -1,12 +0,0 @@
|
|||||||
wireguard_ipv6_converter:
|
|
||||||
version: latest
|
|
||||||
|
|
||||||
# see https://github.com/Ruakij/wg-ipv6-converter#31-environment
|
|
||||||
setup:
|
|
||||||
interface: wg0
|
|
||||||
#ipv6_format: fc12::%02x%02x:%02x%02x/%d
|
|
||||||
#filter_prefix: 100.100
|
|
||||||
#recheck_interval: 60s
|
|
||||||
|
|
||||||
service:
|
|
||||||
#bindTo: netbird.service
|
|
@ -1,11 +0,0 @@
|
|||||||
- name: Get architecture
|
|
||||||
set_fact:
|
|
||||||
arch: "{{ 'amd64' if ansible_architecture == 'x86_64' else 'arm64' }}"
|
|
||||||
versionUri: "{% if wireguard_ipv6_converter.version == 'latest' %}latest/download{% else %}download/{{ wireguard_ipv6_converter.version }}{% endif %}"
|
|
||||||
|
|
||||||
- name: Download binary
|
|
||||||
get_url:
|
|
||||||
url: https://github.com/Ruakij/wg-ipv6-converter/releases/{{ versionUri }}/wg-ipv6-converter_{{ arch }}
|
|
||||||
dest: /usr/local/bin/wg-ipv6-converter
|
|
||||||
mode: "744"
|
|
||||||
register: deployDownload
|
|
@ -1,3 +0,0 @@
|
|||||||
- import_tasks: ./deploy.yml
|
|
||||||
|
|
||||||
- import_tasks: ./setup-service.yml
|
|
@ -1,27 +0,0 @@
|
|||||||
- name: Deploy service
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: wg-ipv6-conv.service.jinja2
|
|
||||||
dest: /etc/systemd/system/wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}.service
|
|
||||||
register: serviceFile
|
|
||||||
|
|
||||||
- name: Enable service
|
|
||||||
ansible.builtin.service:
|
|
||||||
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
|
|
||||||
daemon-reload: true
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
- name: Start service if interface exists already
|
|
||||||
ansible.builtin.service:
|
|
||||||
name: wg-ipv6-converter_{{ wireguard_ipv6_converter.setup.interface }}
|
|
||||||
state: "{{ 'restarted' if deployDownload.changed or serviceFile.changed else 'started' }}"
|
|
||||||
register: service
|
|
||||||
when: "wireguard_ipv6_converter.setup.interface in ansible_interfaces"
|
|
||||||
|
|
||||||
- name: Pause for 5s to wait for program to have run
|
|
||||||
ansible.builtin.pause:
|
|
||||||
seconds: 5
|
|
||||||
when: "service.changed"
|
|
||||||
|
|
||||||
- name: Gather facts to get changes
|
|
||||||
ansible.builtin.gather_facts:
|
|
||||||
when: "service.changed"
|
|
@ -1,29 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=WireGuard IPv6 converter for {{ wireguard_ipv6_converter.setup.interface }}
|
|
||||||
{% if wireguard_ipv6_converter.service.bindTo is defined %}
|
|
||||||
BindsTo={{ wireguard_ipv6_converter.service.bindTo }}
|
|
||||||
After={{ wireguard_ipv6_converter.service.bindTo }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
{% if wireguard_ipv6_converter.service.bindTo is defined %}
|
|
||||||
ExecStartPre=/bin/sleep 10
|
|
||||||
{% endif %}
|
|
||||||
ExecStart=/usr/local/bin/wg-ipv6-converter
|
|
||||||
Restart=always
|
|
||||||
RestartSec=30
|
|
||||||
|
|
||||||
Environment="INTERFACE={{ wireguard_ipv6_converter.setup.interface }}"
|
|
||||||
{% if wireguard_ipv6_converter.setup.ipv6_format is defined %}
|
|
||||||
Environment="IPV6_FORMAT={{ wireguard_ipv6_converter.setup.ipv6_format }}"
|
|
||||||
{% endif %}
|
|
||||||
{% if wireguard_ipv6_converter.setup.filter_prefix is defined %}
|
|
||||||
Environment="FILTER_PREFIX={{ wireguard_ipv6_converter.setup.filter_prefix }}"
|
|
||||||
{% endif %}
|
|
||||||
{% if wireguard_ipv6_converter.setup.recheck_interval is defined %}
|
|
||||||
Environment="RECHECK_INTERVAL={{ wireguard_ipv6_converter.setup.recheck_interval }}"
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
Loading…
Reference in New Issue