4 Commits

Author SHA1 Message Date
83507bf027 Grab local-IP from specified interface instead 2022-11-02 08:54:14 +01:00
c2dcd88420 Add architecture 2022-11-02 08:53:04 +01:00
fbe64d43b5 Move vars to defaults 2022-10-21 16:05:35 +02:00
225c38df61 Add role kubernetes 2022-10-17 14:43:04 +02:00
48 changed files with 155 additions and 931 deletions

View File

@@ -1,4 +0,0 @@
netclient:
# Token to join default-network
# leave empty to ignore
join_network_token:

View File

@@ -1,3 +0,0 @@
---
dependencies: []
#- role: docker

View File

@@ -1,4 +0,0 @@
- name: Deploy CA Certificate
ansible.builtin.copy:
src: secret_files/netmaker_server/ca/ca.crt
dest: /etc/ssl/certs/netmaker-ca.pem

View File

@@ -1,25 +0,0 @@
- name: Install Packages
# when: docker_file.stat.exists == False
package:
name:
- gpg
- gpg-agent
- name: Add netmaker-key
apt_key:
url: https://apt.netmaker.org/gpg.key
state: present
- name: Add netmaker-repository
apt_repository:
repo: "deb https:apt.netmaker.org stable main"
state: present
filename: netmaker
update_cache: yes
- name: Install wireguard & netclient
package:
name:
- wireguard
- netclient
state: latest

View File

@@ -1,7 +0,0 @@
- name: Join netmaker-network
when: "netclient.join_network_token is defined"
command: "netclient join -t {{ netclient.join_network_token }}"
failed_when: command.rc != 0
changed_when: "'starting wireguard' in command.stdout"
register: command
throttle: 1

View File

@@ -1,8 +0,0 @@
- import_tasks: ./certs.yml
- import_tasks: ./install.yml
- import_tasks: ./join-network.yml
- name: Gather facts to get changes
ansible.builtin.gather_facts:

View File

@@ -1,5 +0,0 @@
netmaker_creds:
rqlite_password:
mq_admin_password:
master_key:

View File

@@ -1,28 +0,0 @@
# Overwrite for specific nodes to force dynamic-ip (disable setting public-ip and forces external lookup for public-ip)
# When false, will check itself for dynamic-ip (based on private-ip)
netmaker_dynamicIp: false
netmaker_nginx:
# Listen-port
tls_port: 51820
# Advertise-Port for services
# (must also be reachable by internal services!)
advertise_port: 51820
# This is the base-domain used for generating hostnames for services
netmaker_base_domain:
# host + base_domain
netmaker_api:
host: netmaker-api
netmaker_ui:
host: netmaker-ui
# MQTT-broker
netmaker_broker:
tls_host: netmaker-broker
# host + node_hostname
netmaker_rqlite:
http_host: netmaker-rqlite-http
cluster_host: netmaker-rqlite-cluster

View File

@@ -1,37 +0,0 @@
@startuml
interface ng_TLS
component netmaker_server {
component nginx {
component ng_stream
component ng_http
ng_stream -up- ng_TLS
ng_stream -right-> ng_http : tls-termination
}
component nm_ui
nm_ui -up- nm_ui_http
ng_http -down-( nm_ui_http
component Mosquitto
Mosquitto -up- mq_plain
Mosquitto -up- mq_tls
ng_stream -down-( mq_tls
component rqlite
rqlite -up- rq_http
rqlite -up- rq_cluster
ng_stream -down-( rq_cluster
ng_http -down-( rq_http
component nm_api
nm_api -down- nm_api_http
ng_http --( nm_api_http
nm_api .up.( ng_TLS : db-connection to rqlite-master
nm_api --( mq_plain
}
@enduml

View File

@@ -1,12 +0,0 @@
per_listener_settings false
listener 8883
protocol websockets
allow_anonymous false
listener 1883
protocol websockets
allow_anonymous false
plugin /usr/lib/mosquitto_dynamic_security.so
plugin_opt_config_file /mosquitto/data/dynamic-security.json

View File

@@ -1,23 +0,0 @@
#!/bin/ash
wait_for_netmaker() {
echo "SERVER: ${NETMAKER_SERVER_HOST}"
until curl --output /dev/null --silent --fail --head \
--location "${NETMAKER_SERVER_HOST}/api/server/health"; do
echo "Waiting for netmaker server to startup"
sleep 1
done
}
main(){
# wait for netmaker to startup
apk add curl
wait_for_netmaker
echo "Starting MQ..."
# Run the main container command.
/docker-entrypoint.sh
/usr/sbin/mosquitto -c /mosquitto/config/mosquitto.conf
}
main "${@}"

View File

@@ -1,33 +0,0 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
include /etc/nginx/stream.d/*.conf;

View File

@@ -1,3 +0,0 @@
---
# dependencies:
# - role: docker

View File

@@ -1,40 +0,0 @@
- name: Generate PrivateKey
community.crypto.openssl_privatekey:
path: /opt/netmaker_server/certs/node.key
owner: 1883 # Set owner to mosquitto-user (all other containers seem to run as root)
- name: Generate Certificate-Signing-Request from privateKey
community.crypto.openssl_csr:
path: /opt/netmaker_server/certs/node.csr
privatekey_path: /opt/netmaker_server/certs/node.key
common_name: "{{ ansible_facts.nodename }}"
subject_alt_name:
"DNS:{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }},\
DNS:{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }},\
DNS:{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }},\
DNS:{{ netmaker_api.host }}.{{ netmaker_base_domain }},\
DNS:{{ netmaker_ui.host }}.{{ netmaker_base_domain }}"
- name: Fetch CSR
ansible.builtin.fetch:
src: /opt/netmaker_server/certs/node.csr
dest: tmp_files/
- name: Sign CSR locally with CA
local_action: community.crypto.x509_certificate
args:
path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
csr_path: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.csr
ownca_path: secret_files/netmaker_server/ca/ca.crt
ownca_privatekey_path: secret_files/netmaker_server/ca/ca.key
provider: ownca
- name: Copy Signed Certificate
ansible.builtin.copy:
src: tmp_files/{{ inventory_hostname }}/opt/netmaker_server/certs/node.crt
dest: /opt/netmaker_server/certs/node.crt
- name: Copy CA Certificate
ansible.builtin.copy:
src: secret_files/netmaker_server/ca/ca.crt
dest: /opt/netmaker_server/certs/ca.crt

View File

@@ -1,20 +0,0 @@
- import_tasks: ./prerequisites.yml
- name: Copy folder-structure
ansible.builtin.copy:
src: opt/netmaker_server
dest: /opt/
mode: preserve
- name: Deploy compose file
ansible.builtin.template:
src: docker-compose.yml.template
dest: /opt/netmaker_server/docker-compose.yml
- import_tasks: ./certs.yml
- import_tasks: ./nginx.yml
- import_tasks: ./rqlite.yml
- import_tasks: ./netmaker.yml

View File

@@ -1,57 +0,0 @@
- name: Start rest of netmaker-services
command: "docker-compose --project-directory /opt/netmaker_server/ up -d"
register: command
failed_when: command.rc != 0
- name: Wait for netmaker-api to become available
uri:
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}'
return_content: yes
validate_certs: no
status_code:
- 404
until: uri_output.status == 404
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
# todo: check if exists?
- name: Create default mesh-network 'server'
uri:
validate_certs: no
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks'
method: POST
body:
netid: servnet
addressrange: 10.92.0.0/24
addressrange6: fd92::/64
body_format: json
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker_server'][0]"
register: default_mesh
until: "default_mesh is not failed"
retries: 2
delay: 10
# todo: check if exists?
- name: Create token for default-network
uri:
validate_certs: no
url: 'https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}/api/networks/servnet/keys' # todo: do implementation
method: POST
body:
name: ""
uses: 0
body_format: json
headers:
Authorization: 'Bearer {{ netmaker_creds.master_key }}'
Content-Type: application/json
when: "inventory_hostname == groups['netmaker_server'][0]"
register: default_mesh_key
until: "default_mesh_key is not failed"
retries: 2
delay: 10

View File

@@ -1,18 +0,0 @@
- name: Deploy nginx configs
template:
src: "{{item.src}}"
dest: "{{item.dst}}"
loop:
- { src: 'nginx/proxy.conf.template', dst: '/opt/netmaker_server/nginx/conf/conf.d/proxy.conf' }
- { src: 'nginx/passthrough.conf.template', dst: '/opt/netmaker_server/nginx/conf/stream.d/passthrough.conf' }
- name: Start nginx service
command: "docker-compose --project-directory /opt/netmaker_server/ up -d nginx"
register: command
failed_when: command.rc != 0
- name: Waiting for nginx to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 51820
state: started

View File

@@ -1,9 +0,0 @@
- name: Install wireguard
package:
name:
- wireguard
state: latest
- name: Check if default-ipv4-address is private
set_fact:
private_ipv4_address: "{{ ansible_facts.default_ipv4.address | regex_search('^((10)|(192\\.168)|(172\\.((1[6-9])|(2[0-9])|(3[0-1])))|(100))\\.') }}"

View File

@@ -1,42 +0,0 @@
- name: Deploy rqlite config
ansible.builtin.template:
src: rqlite-config.json.template
dest: /opt/netmaker_server/rqlite/config.json
- name: Start rqlite service for 1st-node
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
register: command
failed_when: command.rc != 0
when: "inventory_hostname == groups['netmaker_server'][0]"
- name: Waiting for rqlite to accept connections on 1st-node
uri:
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
return_content: yes
validate_certs: no
status_code:
- 401
until: uri_output.status == 401
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
when: "inventory_hostname == groups['netmaker_server'][0]"
- name: Start rqlite service for other nodes
command: "docker-compose --project-directory /opt/netmaker_server/ up -d rqlite"
register: command
failed_when: command.rc != 0
when: "inventory_hostname != groups['netmaker_server'][0]"
- name: Waiting for rqlite to accept connections on other nodes
uri:
url: 'https://{{ netmaker_rqlite.http_host }}.{{ inventory_hostname }}:{{ netmaker_nginx.advertise_port }}/status'
return_content: yes
validate_certs: no
status_code:
- 401
until: uri_output.status == 401
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
when: "inventory_hostname != groups['netmaker_server'][0]"

View File

@@ -1,127 +0,0 @@
version: "3.4"
services:
nginx:
image: nginx
restart: unless-stopped
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro # Override nginx-config to add stream-import
- ./nginx/conf/conf.d/:/etc/nginx/conf.d:ro # conf.d
- ./nginx/conf/stream.d/:/etc/nginx/stream.d:ro # conf.d
- ./certs:/certs:ro # SSL-certificates
ports:
- {{ netmaker_nginx.tls_port }}:443
rqlite: # Distributed sqlite-db
image: rqlite/rqlite
restart: unless-stopped
hostname: "{{ ansible_facts.nodename }}"
volumes:
- "./rqlite/data:/rqlite/file"
- "./rqlite/config.json:/config.json:ro"
- "./certs:/certs:ro"
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
command: "
-http-adv-addr {{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
-raft-addr [::]:4002
-raft-adv-addr {{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}
-node-encrypt
-node-cert /certs/node.crt
-node-key /certs/node.key
-node-no-verify
-auth /config.json
{% if inventory_hostname != groups['netmaker_server'][0] %}
-join-as netmaker
-join https://{{ netmaker_rqlite.http_host }}.{{ groups['netmaker_server'][0] }}:{{ netmaker_nginx.advertise_port }}
{% endif %}
"
# FIXME: /\ \/ Change http -> https
netmaker: # The Primary Server for running Netmaker
image: gravitl/netmaker:v0.17.1
depends_on:
- rqlite
cap_add:
- NET_ADMIN
- NET_RAW
- SYS_MODULE
sysctls:
- net.ipv4.ip_forward=1
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.all.forwarding=1
restart: unless-stopped
volumes: # Volume mounts necessary for sql, coredns, and mqtt
- ./dnsconfig/:/root/config/dnsconfig
- ./mosquitto/data/:/etc/netmaker/
- ./certs/ca.crt:/etc/ssl/certs/netmaker-ca.pem:ro # Add CA to system-trust-store
hostname: "{{ ansible_facts.nodename }}"
environment: # Necessary capabilities to set iptables when running in container
NODE_ID: "{{ ansible_facts.nodename }}"
MASTER_KEY: "{{ netmaker_creds.master_key }}" # The admin master key for accessing the API. Change this in any production installation.
{% if not private_ipv4_address and not netmaker_dynamicIp %}
SERVER_HOST: "{{ ansible_facts.default_ipv4.address }}" # Set to public IP of machine.
{% endif %}
SERVER_NAME: "{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }}" # The domain/host IP indicating the mq broker address
SERVER_HTTP_HOST: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}" # Overrides SERVER_HOST if set. Useful for making HTTP available via different interfaces/networks.
SERVER_API_CONN_STRING: "{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}"
DISABLE_REMOTE_IP_CHECK: "off" # If turned "on", Server will not set Host based on remote IP check. This is already overridden if SERVER_HOST is set. Turned "off" by default.
DNS_MODE: "off" # Enables DNS Mode, meaning all nodes will set hosts file for private dns settings.
API_PORT: "8081" # The HTTP API port for Netmaker. Used for API calls / communication from front end. If changed, need to change port of BACKEND_URL for netmaker-ui.
REST_BACKEND: "on" # Enables the REST backend (API running on API_PORT at SERVER_HTTP_HOST). Change to "off" to turn off.
RCE: "off" # Enables setting PostUp and PostDown (arbitrary commands) on nodes from the server. Off by default.
CORS_ALLOWED_ORIGIN: "*" # The "allowed origin" for API requests. Change to restrict where API requests can come from.
DISPLAY_KEYS: "on" # Show keys permanently in UI (until deleted) as opposed to 1-time display.
DATABASE: "rqlite"
SQL_CONN: "https://netmaker:{{ netmaker_creds.rqlite_password }}@{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }}:{{ netmaker_nginx.advertise_port }}/"
MQ_HOST: "mosquitto" # the address of the mq server. If running from docker compose it will be "mq". Otherwise, need to input address. If using "host networking", it will find and detect the IP of the mq container.
MQ_SERVER_PORT: "1883" # the reachable port of MQ by the server - change if internal MQ port changes (or use external port if MQ is not on the same machine)
MQ_PORT: "{{ netmaker_nginx.advertise_port }}" # the reachable port of MQ - change if external MQ port changes (port on proxy, not necessarily the one exposed in docker-compose)
MQ_ADMIN_PASSWORD: "{{ netmaker_creds.mq_admin_password }}"
HOST_NETWORK: "off" # whether or not host networking is turned on. Only turn on if configured for host networking (see docker-compose.hostnetwork.yml). Will set host-level settings like iptables.
PORT_FORWARD_SERVICES: "" # decide which services to port forward ("dns","ssh", or "mq")
# this section is for OAuth
AUTH_PROVIDER: "" # "<azure-ad|github|google|oidc>"
CLIENT_ID: "" # "<client id of your oauth provider>"
CLIENT_SECRET: "" # "<client secret of your oauth provider>"
FRONTEND_URL: "" # "https://dashboard.<netmaker base domain>"
AZURE_TENANT: "" # "<only for azure, you may optionally specify the tenant for the OAuth>"
OIDC_ISSUER: "" # https://oidc.yourprovider.com - URL of oidc provider
VERBOSITY: "1" # logging verbosity level - 1, 2, or 3
TELEMETRY: "off" # Whether or not to send telemetry data to help improve Netmaker. Switch to "off" to opt out of sending telemetry.
ports:
- "51821-51830:51821-51830/udp" # wireguard ports
netmaker-ui: # The Netmaker UI Component
image: gravitl/netmaker-ui:v0.17.1
depends_on:
- netmaker
links:
- "netmaker:api"
restart: unless-stopped
environment:
BACKEND_URL: "https://{{ netmaker_api.host }}.{{ netmaker_base_domain }}:{{ netmaker_nginx.advertise_port }}" # URL where UI will send API requests. Change based on SERVER_HOST, SERVER_HTTP_HOST, and API_PORT
mosquitto: # the MQTT broker for netmaker
image: eclipse-mosquitto:2.0.11-openssl
restart: unless-stopped
volumes:
- ./mosquitto/config:/mosquitto/config
- ./mosquitto/data:/mosquitto/data
- ./mosquitto/logs:/mosquitto/log
depends_on:
- netmaker
command: ["/mosquitto/config/wait.sh"]
environment:
NETMAKER_SERVER_HOST: "http://netmaker:8081"

View File

@@ -1,25 +0,0 @@
stream{
# Map target-hosts based on hostname
map $ssl_preread_server_name $target_host {
hostnames; # Enable matching including prefix/suffix-mask
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} 127.0.0.1:8443;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} 127.0.0.1:8443;
{{ netmaker_rqlite.cluster_host }}.{{ ansible_facts.nodename }} rqlite:4002;
default 127.0.0.1:1;
}
server {
resolver 127.0.0.11; # Explicitly set docker-resolver
listen 443;
ssl_preread on;
proxy_pass $target_host;
}
}

View File

@@ -1,29 +0,0 @@
map $host $proxy_name {
hostnames;
{{ netmaker_ui.host }}.{{ netmaker_base_domain }} netmaker-ui:80;
{{ netmaker_api.host }}.{{ netmaker_base_domain }} netmaker:8081;
{{ netmaker_broker.tls_host }}.{{ netmaker_base_domain }} mosquitto:8883;
{{ netmaker_rqlite.http_host }}.{{ ansible_facts.nodename }} rqlite:4001;
default 444;
}
server {
resolver 127.0.0.11; # Explicitly set docker-resolver
listen 8443 ssl;
ssl_certificate /certs/node.crt;
ssl_certificate_key /certs/node.key;
if ($proxy_name = 444){
return 444;
}
location / {
proxy_pass http://$proxy_name;
}
}

View File

@@ -1,5 +0,0 @@
[{
"username": "netmaker",
"password": "{{ netmaker_creds.rqlite_password }}",
"perms": ["all"]
}]

View File

@@ -1 +0,0 @@
ipv6_stable_secret: 1111:2222:3333:4444:5555:6666:7777:8888

View File

@@ -5,5 +5,3 @@
- import_tasks: ./packages.yml
- import_tasks: ./aliases.yml
- import_tasks: ./networking.yml

View File

@@ -1,22 +0,0 @@
- name: Set sysctl settings for ip-forwarding
copy:
dest: "/etc/sysctl.d/ip-forwarding.conf"
content: |
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
notify: reload_sysctl
- name: Set sysctl settings for ipv6-address-generation
copy:
dest: "/etc/sysctl.d/ipv6-slaac-address-generation.conf"
content: |
net.ipv6.conf.default.addr_gen_mode = 2
net.ipv6.conf.default.stable_secret = {{ ipv6_stable_secret }}
notify: reload_sysctl
- name: Set sysctl settings to override ipv6-slaac with enabled forwarding
copy:
dest: "/etc/sysctl.d/ipv6-slaac-override.conf"
content: |
net.ipv6.conf.all.accept_ra = 2
notify: reload_sysctl

View File

@@ -4,14 +4,14 @@ kubernetes:
ipv4:
cluster_cidr: 10.42.0.0/16
service_cidr: 10.43.0.0/16
nodeip_cidr: 10.41.0.0/24
ipv6:
cluster_cidr: fd42::/56
service_cidr: fd43::/112
# Replace - with _
nodeIp_interface: <interface to grab nodeIp from>
control_plane:
dns_name: <control-plane dns-reachable-name>
token: <shared token for nodes to join>
shared_token: <shared token for nodes to join>

View File

@@ -1,33 +1,30 @@
@startuml
rectangle "Control-Plane" as control_plane {
rectangle "Node" as sn1 {
component "netclient" as sn1_netclient
component netmaker as nm1
component netmaker as nm2
component ... as nm3
component etcd as sn1_etcd
component "k3s-server" as sn1_k3s_server
sn1_k3s_server - sn1_etcd
}
interface interface as if1
interface interface as if2
interface ... as if3
rectangle "Node" as sn2 {
component "netclient" as sn2_netclient
component kubernetes as kn1
component kubernetes as kn2
component ... as kn3
component etcd as sn2_etcd
component "k3s-server" as sn2_k3s_server
sn2_k3s_server - sn2_etcd
}
nm1 -up- if1
kn1 -down-( if1
sn1_netclient -- sn2_netclient
sn1_etcd -- sn2_etcd
}
nm2 -up- if2
kn2 -down-( if2
rectangle "Workers" {
rectangle "Node" as an1 {
component "netclient" as an1_netclient
nm3 -up- if3
kn3 -down-( if3
component "k3s-agent" as sn1_k3s_agent
}
}
nm1 -right- nm2
nm2 -right- nm3
kn1 .right. kn2
kn2 .right. kn3
@enduml

View File

@@ -0,0 +1,9 @@
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: rke2-canal
namespace: kube-system
spec:
valuesContent: |-
flannel:
backend: "wireguard"

View File

@@ -1,29 +0,0 @@
- name: Create k3s-folder
ansible.builtin.file:
path: /etc/rancher/k3s/
state: directory
mode: '0755'
- name: Deploy k3s config
ansible.builtin.template:
src: k3s/{{ type }}/config.yaml.jinja2
dest: /etc/rancher/k3s/config.yaml
- name: Download install-script
get_url:
url: https://get.k3s.io
dest: /root/k3s_install.sh
mode: '744'
# todo: update when file changed?
- import_tasks: ./install/server/setup_network.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_helm.yml
when: "type == 'server'"
- import_tasks: ./install/server/install_k3s.yml
when: "type == 'server'"
- import_tasks: ./install/agent/install_k3s.yml
when: "type == 'agent'"

View File

@@ -1,7 +0,0 @@
- name: Install K3s agent
command: /root/k3s_install.sh {{ type }}
register: command
changed_when: "'No change detected' in command.stdout"
until: "command is not failed"
retries: 2
delay: 10

View File

@@ -1,35 +0,0 @@
- name: Install K3s-server for 1st-node
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname == groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' in command.stdout"
- name: Waiting for K3s-server to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Install K3s-server for other nodes
command: /root/k3s_install.sh {{ type }}
when: "inventory_hostname != groups['kubernetes'][0]"
register: command
changed_when: "'No change detected' in command.stdout"
until: "command is not failed"
retries: 2
delay: 10
- name: Waiting for K3s-server to accept connections on other nodes
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 6443
state: started
when: "inventory_hostname != groups['kubernetes'][0]"
#- name: Add Kubernetes environment-vars to /etc/profile.d/
# blockinfile:
# path: /etc/profile.d/k3s-bin.sh
# marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
# block: |
# export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
# create: true

View File

@@ -1,6 +0,0 @@
- name: Set control-plane-dns-endpoint towards local-ip
blockinfile:
path: /etc/hosts
marker: "# {mark} ANSIBLE MANAGED BLOCK | k3s"
block: |
{{ nodeip_ipv4 }} {{ kubernetes.control_plane.dns_name }}

View File

@@ -0,0 +1,65 @@
- name: Create rke-helm-manifests-folder
ansible.builtin.file:
path: '/var/lib/rancher/rke2/server/manifests/'
state: directory
mode: '0755'
- name: Deploy helm-manifests
ansible.builtin.copy:
src: 'helm-manifests/'
dest: '/var/lib/rancher/rke2/server/manifests/'
- name: Create rke-folder
ansible.builtin.file:
path: /etc/rancher/rke2/
state: directory
mode: '0755'
- name: Deploy rke2 config
ansible.builtin.template:
src: rke2/config.yaml.template
dest: /etc/rancher/rke2/config.yaml
- name: Install RKE2
command: bash -c "curl -sfL https://get.rke2.io | sh -"
- name: Add RKE2 environment-vars to /etc/profile.d/
blockinfile:
path: /etc/profile.d/rke2-bin.sh
marker: "# {mark} ANSIBLE MANAGED BLOCK | rke2"
block: |
export PATH="/var/lib/rancher/rke2/bin/:$PATH"
export KUBECONFIG="/etc/rancher/rke2/rke2.yaml"
create: true
- name: Enable and start rke2-server service for 1st-node
ansible.builtin.service:
name: rke2-server
enabled: yes
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Waiting for kubelet to accept connections
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 10250
state: started
when: "inventory_hostname == groups['kubernetes'][0]"
- name: Enable and start rke2-server service for other nodes
ansible.builtin.service:
name: rke2-server
enabled: yes
state: started
when: "inventory_hostname != groups['kubernetes'][0]"
register: rke2_start
until: "rke2_start is not failed"
retries: 2
delay: 10
- name: Waiting for kubelet to accept connections on other nodes
ansible.builtin.wait_for:
host: "{{ inventory_hostname }}"
port: 10250
state: started
when: "inventory_hostname != groups['kubernetes'][0]"

View File

@@ -1,4 +1,6 @@
- import_tasks: ./prerequisites.yml
- import_tasks: ./install.yml
- import_tasks: ./install_helm.yml
- import_tasks: ./install_rke2.yml

View File

@@ -17,19 +17,53 @@
#- name: Disable swap
# command: swapoff -a
#- name: Install iptables
# package:
# name:
# #- containerd
# - iptables
# state: latest
- name: Install iptables
package:
name:
#- containerd
- iptables
state: latest
- import_tasks: ./prerequisites/containerd.yml
- name: Check if containerd-service exists & is started
service:
name: containerd
state: started
ignore_errors: true
register: containerd_status
- name: Install containerd when not exists
package:
name:
- containerd
when: containerd_status is failed
- name: Create containerd config-folder
file:
path: /etc/containerd
state: directory
- name: Deploy containerd-config
ansible.builtin.copy:
src: containerd_config.toml
dest: /etc/containerd/config.toml
mode: u=rw,g=r,o=r
notify: restart_containerd
# todo: Move to netmaker-role as handler?
- name: Gather facts to get changes
ansible.builtin.gather_facts:
- name: Getting nodeIp-data from interface
set_fact:
nodeip_ipv4: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv4.address }}"
nodeip_ipv6: "{{ ansible_facts[ kubernetes.ipPool.nodeIp_interface ].ipv6[0].address }}"
- name: Set control-plane-dns-endpoint towards local-ip
blockinfile:
path: /etc/hosts
marker: "# {mark} ANSIBLE MANAGED BLOCK | k8s"
block: |
{{ nodeip_ipv4 }} {{ kubernetes.control_plane.dns_name }}
- name: Run handlers to reload configurations
meta: flush_handlers

View File

@@ -1,24 +0,0 @@
- name: Check if containerd-service exists & is started
service:
name: containerd
state: started
ignore_errors: true
register: containerd_status
- name: Install containerd when not exists
package:
name:
- containerd
when: containerd_status is failed
- name: Create containerd config-folder
file:
path: /etc/containerd
state: directory
- name: Deploy containerd-config
ansible.builtin.copy:
src: containerd_config.toml
dest: /etc/containerd/config.toml
mode: u=rw,g=r,o=r
notify: restart_containerd

View File

@@ -1,7 +0,0 @@
server: https://{{ kubernetes.control_plane.dns_name }}:6443
token: {{ kubernetes.token }}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
kubelet-arg: "--node-ip=0.0.0.0"

View File

@@ -1,23 +0,0 @@
## Base ##
{% if inventory_hostname == groups['kubernetes'][0] %}
cluster-init: true
{% else %}
server: https://{{ groups['kubernetes'][0] }}:6443
{% endif %}
token: {{ kubernetes.token }}
tls-san:
- {{ kubernetes.control_plane.dns_name }}
# Networking
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
egress-selector-mode: disabled
# Network-plugin
flannel-backend: vxlan
# FIXME: Workaround for bug in Kubernetes 1.24/1.25 ignoring node IPv6 addresses
kubelet-arg: "--node-ip=0.0.0.0"

View File

@@ -0,0 +1,16 @@
## Base ##
container-runtime-endpoint: unix:///run/containerd/containerd.sock
{% if inventory_hostname != groups['kubernetes'][0] %}
server: https://{{ kubernetes.control_plane.dns_name }}:9345
{% endif %}
token: {{ kubernetes.shared_token }}
tls-san:
- {{ kubernetes.control_plane.dns_name }}
## Networking ##
#cni: cilium
cluster-cidr: {{ kubernetes.ipPool.ipv4.cluster_cidr }},{{ kubernetes.ipPool.ipv6.cluster_cidr }}
service-cidr: {{ kubernetes.ipPool.ipv4.service_cidr }},{{ kubernetes.ipPool.ipv6.service_cidr }}
node-ip: {{ nodeip_ipv4 }},{{ nodeip_ipv6 }}

View File

@@ -1,3 +0,0 @@
---
nomad:
version: 1.4.4

View File

@@ -1,48 +0,0 @@
[Unit]
Description=Nomad
Documentation=https://www.nomadproject.io/docs/
Wants=network-online.target
After=network-online.target
# When using Nomad with Consul it is not necessary to start Consul first. These
# lines start Consul before Nomad as an optimization to avoid Nomad logging
# that Consul is unavailable at startup.
#Wants=consul.service
#After=consul.service
[Service]
# Nomad server should be run as the nomad user. Nomad clients
# should be run as root
User=root
Group=root
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nomad agent -config /etc/nomad.d
KillMode=process
KillSignal=SIGINT
LimitNOFILE=65536
LimitNPROC=infinity
Restart=on-failure
RestartSec=2
## Configure unit start rate limiting. Units which are started more than
## *burst* times within an *interval* time span are not permitted to start any
## more. Use `StartLimitIntervalSec` or `StartLimitInterval` (depending on
## systemd version) to configure the checking interval and `StartLimitBurst`
## to configure how many starts per interval are allowed. The values in the
## commented lines are defaults.
# StartLimitBurst = 5
## StartLimitIntervalSec is used for systemd versions >= 230
# StartLimitIntervalSec = 10s
## StartLimitInterval is used for systemd versions < 230
# StartLimitInterval = 10s
TasksMax=infinity
OOMScoreAdjust=-1000
[Install]
WantedBy=multi-user.target

View File

@@ -1,3 +0,0 @@
---
dependencies:
#- role: docker

View File

@@ -1,43 +0,0 @@
- name: Download binary
ansible.builtin.unarchive:
remote_src: true
src: https://releases.hashicorp.com/nomad/{{ nomad.version }}/nomad_{{ nomad.version }}_{{ ansible_system | lower }}_{{ 'amd64' if ansible_architecture == 'x86_64' else ansible_architecture }}.zip
dest: /usr/local/bin/
mode: "755"
- name: Deploy systemd-service file
ansible.builtin.copy:
src: systemd-service
dest: /etc/systemd/system/nomad.service
mode: u=rw,g=r,o=r
- name: Create nomad user
ansible.builtin.user:
name: nomad
groups:
- docker
append: true
- name: Create directory for configs
ansible.builtin.file:
path: /etc/nomad.d
state: directory
mode: "0755"
owner: "nomad"
group: "nomad"
- name: Create nomad.hcl configuration file
ansible.builtin.template:
src: nomad.hcl.j2
dest: /etc/nomad.d/nomad.hcl
mode: "0644"
owner: "nomad"
group: "nomad"
- name: Create directory for data
ansible.builtin.file:
path: /opt/nomad
state: directory
mode: "0755"
owner: "nomad"
group: "nomad"

View File

@@ -1,8 +0,0 @@
- name: Start service
ansible.builtin.service:
name: nomad
state: restarted
- name: Waiting for service to accept connections
ansible.builtin.wait_for:
port: 4646

View File

@@ -1,3 +0,0 @@
- import_tasks: ./install.yml
- import_tasks: ./launch.yml

View File

@@ -1,71 +0,0 @@
data_dir = "/opt/nomad"
datacenter = "{{ datacenter }}"
bind_addr = "0.0.0.0"
advertise {
# Defaults to the first private IP address.
#http = "1.2.3.4"
#rpc = "1.2.3.4"
#serf = "1.2.3.4:5648" # non-default ports may be specified
}
{# TODO: Get interface-ip from hosts marked with type=server #}
{% set server_hosts = ansible_play_batch | difference([inventory_hostname]) %}
{% if type is defined and type == "server" %}
server {
enabled = true
bootstrap_expect = {{ server_hosts | length }}
server_join {
retry_join = [ "{{ server_hosts | join('", "') }}" ]
retry_max = 6
retry_interval = "15s"
}
default_scheduler_config {
scheduler_algorithm = "binpack"
memory_oversubscription_enabled = true
reject_job_registration = false
pause_eval_broker = false # New in Nomad 1.3.2
preemption_config {
batch_scheduler_enabled = true
system_scheduler_enabled = true
service_scheduler_enabled = true
sysbatch_scheduler_enabled = true # New in Nomad 1.2
}
}
}
{% endif %}
client {
enabled = true
{% if type != "server" %}
servers = [ "{{ server_hosts | join('", "') }}" ]
{% endif %}
meta {
node_type = "{{ type }}"
{% if storage is defined and storage %}
seaweedfs_volume = "true"
{% endif %}
}
}
plugin "raw_exec" {
config {
enabled = true
}
}
plugin "docker" {
config {
{% if type is defined and type == "server" %}
allow_privileged = true
{% endif %}
}
}