19 Commits

Author SHA1 Message Date
940c169209 Initial role stuff 2023-03-16 14:06:28 +01:00
f3e381aca3 Move netmaker to deprecated 2023-01-18 13:23:51 +01:00
233eadaf40 Merge branch 'role_netmaker_server' 2023-01-10 09:47:34 +01:00
e5ebc2ad5f Merge branch 'role_common' 2022-11-02 16:41:10 +01:00
98c51c6fc1 Merge branch 'role_kubernetes-k3s' 2022-11-02 16:40:52 +01:00
6b59bf6c75 Merge branch 'role_netmaker' 2022-11-02 16:40:36 +01:00
1b2af7cf6c Merge branch 'role_netmaker_server' 2022-11-02 16:40:25 +01:00
d9cf3d2066 Re-gatherfacts at the end for other plays 2022-11-02 16:38:13 +01:00
f42bce9b6b Add changed-detection 2022-11-02 16:37:53 +01:00
0fc5dbb791 Initial role-data 2022-11-02 16:29:55 +01:00
975746e7d7 Add IPv6-network to common 2022-11-02 10:27:06 +01:00
247fdec7ae fixme: hotfix for multi-master netmaker-server
netmaker doesnt handle concurrent joins to different server-nodes well and will duplicate addresses
2022-10-27 01:23:45 +02:00
9cb2e88193 Merge branch 'role_netmaker' 2022-10-27 01:16:57 +02:00
25ceb0f456 Merge branch 'role_netmaker_server' 2022-10-27 01:16:55 +02:00
fcc4f1ed18 Fix task 2022-10-21 15:40:00 +02:00
de0e220004 Add defaults-var-file 2022-10-21 15:32:20 +02:00
f9cc97a8f2 Add CA to trust-store 2022-10-21 15:32:08 +02:00
811fc22eef Delete unnecessary task-file 2022-10-21 15:31:37 +02:00
4cb418e2b6 Add role netmaker (netclient) 2022-10-17 14:51:52 +02:00
48 changed files with 550 additions and 0 deletions

View File

@@ -0,0 +1,4 @@
netclient:
# Token to join default-network
# leave empty to ignore
join_network_token:

View File

@@ -0,0 +1,3 @@
---
dependencies: []
#- role: docker

View File

@@ -0,0 +1,4 @@
- name: Deploy CA Certificate
ansible.builtin.copy:
src: secret_files/netmaker_server/ca/ca.crt
dest: /etc/ssl/certs/netmaker-ca.pem

View File

@@ -0,0 +1,25 @@
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- gpg
- gpg-agent
- name: Add netmaker-key
apt_key:
url: https://apt.netmaker.org/gpg.key
state: present
- name: Add netmaker-repository
apt_repository:
repo: "deb https:apt.netmaker.org stable main"
state: present
filename: netmaker
update_cache: yes
- name: Install wireguard & netclient
package:
name:
- wireguard
- netclient
state: latest

View File

@@ -0,0 +1,7 @@
- name: Join netmaker-network
when: "netclient.join_network_token is defined"
command: "netclient join -t {{ netclient.join_network_token }}"
failed_when: command.rc != 0
changed_when: "'starting wireguard' in command.stdout"
register: command
throttle: 1

View File

@@ -0,0 +1,8 @@
- import_tasks: ./certs.yml
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml
- name: Gather facts to get changes
ansible.builtin.gather_facts:

1
common/defaults/main.yml Normal file
View File

@@ -0,0 +1 @@
ipv6_stable_secret: 1111:2222:3333:4444:5555:6666:7777:8888

View File

@@ -5,3 +5,5 @@
- import_tasks: ./packages.yml - import_tasks: ./packages.yml
- import_tasks: ./aliases.yml - import_tasks: ./aliases.yml
- import_tasks: ./networking.yml

View File

@@ -0,0 +1,22 @@
- name: Set sysctl settings for ip-forwarding
copy:
dest: "/etc/sysctl.d/ip-forwarding.conf"
content: |
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
notify: reload_sysctl
- name: Set sysctl settings for ipv6-address-generation
copy:
dest: "/etc/sysctl.d/ipv6-slaac-address-generation.conf"
content: |
net.ipv6.conf.default.addr_gen_mode = 2
net.ipv6.conf.default.stable_secret = {{ ipv6_stable_secret }}
notify: reload_sysctl
- name: Set sysctl settings to override ipv6-slaac with enabled forwarding
copy:
dest: "/etc/sysctl.d/ipv6-slaac-override.conf"
content: |
net.ipv6.conf.all.accept_ra = 2
notify: reload_sysctl

View File

@@ -0,0 +1,17 @@
---
kubernetes:
ipPool:
ipv4:
cluster_cidr: 10.42.0.0/16
service_cidr: 10.43.0.0/16
ipv6:
cluster_cidr: fd42::/56
service_cidr: fd43::/112
# Replace - with _
nodeIp_interface: <interface to grab nodeIp from>
control_plane:
dns_name: <control-plane dns-reachable-name>
token: <shared token for nodes to join>

View File

@@ -0,0 +1,33 @@
@startuml
rectangle "Control-Plane" as control_plane {
rectangle "Node" as sn1 {
component "netclient" as sn1_netclient
component etcd as sn1_etcd
component "k3s-server" as sn1_k3s_server
sn1_k3s_server - sn1_etcd
}
rectangle "Node" as sn2 {
component "netclient" as sn2_netclient
component etcd as sn2_etcd
component "k3s-server" as sn2_k3s_server
sn2_k3s_server - sn2_etcd
}
sn1_netclient -- sn2_netclient
sn1_etcd -- sn2_etcd
}
rectangle "Workers" {
rectangle "Node" as an1 {
component "netclient" as an1_netclient
component "k3s-agent" as sn1_k3s_agent
}
}
@enduml

View File

@@ -0,0 +1,35 @@
# Copyright 2018-2022 Docker Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
disabled_plugins = []
#root = "/var/lib/containerd"
#state = "/run/containerd"
#subreaper = true
#oom_score = 0
#[grpc]
# address = "/run/containerd/containerd.sock"
# uid = 0
# gid = 0
#[debug]
# address = "/run/containerd/debug.sock"
# uid = 0
# gid = 0
# level = "info"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true

View File

@@ -0,0 +1,19 @@
- name: reload_sysctl
command: sysctl --system
- name: restart_containerd
ansible.builtin.service:
name: containerd
state: restarted
- name: reload_networking
service:
name: networking
state: restarted
async: 5
poll: 0
notify: wait_for_connection
- name: wait_for_connection
wait_for_connection:
delay: 5

4
kubernetes/meta/main.yml Normal file
View File

@@ -0,0 +1,4 @@
---
dependencies:
- role: docker
- role: netmaker

View File

@@ -0,0 +1,29 @@
- name: Create k3s-folder
ansible.builtin.file:
path: /etc/rancher/k3s/
state: directory
mode: '0755'
- name: Deploy k3s config
ansible.builtin.template:
src: k3s/{{ type }}/config.yaml.jinja2
dest: /etc/rancher/k3s/config.yaml
- name: Download install-script
get_url:
url: https://get.k3s.io
dest: /root/k3s_install.sh
mode: '744'
# todo: update when file changed?
- import_tasks: ./install/server/setup_network.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_helm.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_k3s.yml
when: "type == 'server'"
- import_tasks: ./install/agent/install_k3s.yml
when: "type == 'agent'"

View File

@@ -0,0 +1,7 @@
- name: Install K3s agent
command: /root/k3s_install.sh {{ type }}
register: command
changed_when: "'No change detected' in command.stdout"
until: "command is not failed"
retries: 2
delay: 10

View File

@@ -0,0 +1,17 @@
- name: Add Balto key
apt_key:
url: https://baltocdn.com/helm/signing.asc
state: present
- name: Add Balto Repository
apt_repository:
repo: "deb https://baltocdn.com/helm/stable/debian/ all main"
state: present
filename: kubernetes
update_cache: yes
- name: Install helm
package:
name:
- helm
state: latest

View File

@@ -0,0 +1,35 @@
- name: Install K3s-server for 1st-node
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname == groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' in command.stdout"
- name: Waiting for K3s-server to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Install K3s-server for other nodes
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname != groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Waiting for K3s-server to accept connections on other nodes
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname != groups['kubernetes'][0]"
#- name: Add Kubernetes environment-vars to /etc/profile.d/
# blockinfile:
# path: /etc/profile.d/k3s-bin.sh
# marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
# block: |
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
# create: true

View File

@@ -0,0 +1,6 @@
- name: Set control-plane-dns-endpoint towards local-ip
blockinfile:
path: /etc/hosts
marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
block: |
{{ nodeip_ipv4 }} {{ kubernetes.control_plane.dns_name }}

View File

@@ -0,0 +1,4 @@
- import_tasks: ./prerequisites.yml
- import_tasks: ./install.yml

View File

@@ -0,0 +1,35 @@
#- name: Load br_netfilter kernel-module
# modprobe:
# name: br_netfilter
# state: present
- name: Set sysctl settings for iptables bridged traffic
copy:
dest: "/etc/sysctl.d/kubernetes.conf"
content: |
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.conf.all.forwarding=1
net.ipv6.conf.all.forwarding=1
notify: reload_sysctl
#- name: Disable swap
# command: swapoff -a
#- name: Install iptables
# package:
# name:
# #- containerd
# - iptables
# state: latest
- import_tasks: ./prerequisites/containerd.yml
- name: Getting nodeIp-data from interface
set_fact:
nodeip_ipv4: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv6[0].address }}"
- name: Run handlers to reload configurations
meta: flush_handlers

View File

@@ -0,0 +1,24 @@
- name: Check if containerd-service exists & is started
service:
name: containerd
state: started
ignore_errors: true
register: containerd_status
- name: Install containerd when not exists
package:
name:
- containerd
when: containerd_status is failed
- name: Create containerd config-folder
file:
path: /etc/containerd
state: directory
- name: Deploy containerd-config
ansible.builtin.copy:
src: containerd_config.toml
dest: /etc/containerd/config.toml
mode: u=rw,g=r,o=r
notify: restart_containerd

View File

@@ -0,0 +1,7 @@
server: https://{{ kubernetes.control_plane.dns_name }}:6443
token: {{ kubernetes.token }}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
kubelet-arg: "--node-ip=0.0.0.0"

View File

@@ -0,0 +1,23 @@
## Base ##
{% if inventory_hostname == groups['kubernetes'][0] %}
cluster-init: true
{% else %}
server: https://{{ groups['kubernetes'][0] }}:6443
{% endif %}
token: {{ kubernetes.token }}
tls-san:
- {{ kubernetes.control_plane.dns_name }}
# Networking
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
egress-selector-mode: disabled
# Network-plugin
flannel-backend: vxlan
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
kubelet-arg: "--node-ip=0.0.0.0"

3
nomad/defaults/main.yml Normal file
View File

@@ -0,0 +1,3 @@
---
nomad:
version: 1.4.4

View File

@@ -0,0 +1,48 @@
[Unit]
Description=Nomad
Documentation=https://www.nomadproject.io/docs/
Wants=network-online.target
After=network-online.target
# When using Nomad with Consul it is not necessary to start Consul first. These
# lines start Consul before Nomad as an optimization to avoid Nomad logging
# that Consul is unavailable at startup.
#Wants=consul.service
#After=consul.service
[Service]
# Nomad server should be run as the nomad user. Nomad clients
# should be run as root
User=root
Group=root
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nomad agent -config /etc/nomad.d
KillMode=process
KillSignal=SIGINT
LimitNOFILE=65536
LimitNPROC=infinity
Restart=on-failure
RestartSec=2
## Configure unit start rate limiting. Units which are started more than
## *burst* times within an *interval* time span are not permitted to start any
## more. Use `StartLimitIntervalSec` or `StartLimitInterval` (depending on
## systemd version) to configure the checking interval and `StartLimitBurst`
## to configure how many starts per interval are allowed. The values in the
## commented lines are defaults.
# StartLimitBurst = 5
## StartLimitIntervalSec is used for systemd versions >= 230
# StartLimitIntervalSec = 10s
## StartLimitInterval is used for systemd versions < 230
# StartLimitInterval = 10s
TasksMax=infinity
OOMScoreAdjust=-1000
[Install]
WantedBy=multi-user.target

3
nomad/meta/main.yml Normal file
View File

@@ -0,0 +1,3 @@
---
dependencies:
#- role: docker

43
nomad/tasks/install.yml Normal file
View File

@@ -0,0 +1,43 @@
- name: Download binary
ansible.builtin.unarchive:
remote_src: true
src: https://releases.hashicorp.com/nomad/{{ nomad.version }}/nomad_{{ nomad.version }}_{{ ansible_system | lower }}_{{ 'amd64' if ansible_architecture == 'x86_64' else ansible_architecture }}.zip
dest: /usr/local/bin/
mode: "755"
- name: Deploy systemd-service file
ansible.builtin.copy:
src: systemd-service
dest: /etc/systemd/system/nomad.service
mode: u=rw,g=r,o=r
- name: Create nomad user
ansible.builtin.user:
name: nomad
groups:
- docker
append: true
- name: Create directory for configs
ansible.builtin.file:
path: /etc/nomad.d
state: directory
mode: "0755"
owner: "nomad"
group: "nomad"
- name: Create nomad.hcl configuration file
ansible.builtin.template:
src: nomad.hcl.j2
dest: /etc/nomad.d/nomad.hcl
mode: "0644"
owner: "nomad"
group: "nomad"
- name: Create directory for data
ansible.builtin.file:
path: /opt/nomad
state: directory
mode: "0755"
owner: "nomad"
group: "nomad"

8
nomad/tasks/launch.yml Normal file
View File

@@ -0,0 +1,8 @@
- name: Start service
ansible.builtin.service:
name: nomad
state: restarted
- name: Waiting for service to accept connections
ansible.builtin.wait_for:
port: 4646

3
nomad/tasks/main.yml Normal file
View File

@@ -0,0 +1,3 @@
- import_tasks: ./install.yml
- import_tasks: ./launch.yml

View File

@@ -0,0 +1,71 @@
data_dir = "/opt/nomad"
datacenter = "{{ datacenter }}"
bind_addr = "0.0.0.0"
advertise {
# Defaults to the first private IP address.
#http = "1.2.3.4"
#rpc = "1.2.3.4"
#serf = "1.2.3.4:5648" # non-default ports may be specified
}
{# TODO: Get interface-ip from hosts marked with type=server #}
{% set server_hosts = ansible_play_batch | difference([inventory_hostname]) %}
{% if type is defined and type == "server" %}
server {
enabled = true
bootstrap_expect = {{ server_hosts | length }}
server_join {
retry_join = [ "{{ server_hosts | join('", "') }}" ]
retry_max = 6
retry_interval = "15s"
}
default_scheduler_config {
scheduler_algorithm = "binpack"
memory_oversubscription_enabled = true
reject_job_registration = false
pause_eval_broker = false # New in Nomad 1.3.2
preemption_config {
batch_scheduler_enabled = true
system_scheduler_enabled = true
service_scheduler_enabled = true
sysbatch_scheduler_enabled = true # New in Nomad 1.2
}
}
}
{% endif %}
client {
enabled = true
{% if type != "server" %}
servers = [ "{{ server_hosts | join('", "') }}" ]
{% endif %}
meta {
node_type = "{{ type }}"
{% if storage is defined and storage %}
seaweedfs_volume = "true"
{% endif %}
}
}
plugin "raw_exec" {
config {
enabled = true
}
}
plugin "docker" {
config {
{% if type is defined and type == "server" %}
allow_privileged = true
{% endif %}
}
}