Compare commits

..

1 Commits

Author SHA1 Message Date
Felix Stupp 5b312bfbad
WIP acme dehydrated 3 years ago

3
.gitignore vendored

@ -1,7 +1,5 @@
/ansible_collections
credentials/**
facts/**
/venv/**
public_keys/**
__pycache__/
!README.md
@ -12,4 +10,3 @@ __pycache__/
/*.yml
!/site.yml
!/hosts.yml
!/collection_requirements.yml

3
.gitmodules vendored

@ -1,3 +0,0 @@
[submodule "misc/mitogen"]
path = misc/mitogen
url = https://git.banananet.work/archive/mitogen.git

@ -11,6 +11,5 @@
},
"files.exclude": {
"playbooks/{credentials,filter_plugins,group_vars,helpers,host_vars,public_keys,roles}/": true
},
"python.pythonPath": "/home/zocker/Repositories/ansible2/venv/bin/python",
}
}

@ -1,37 +1,8 @@
[defaults]
# always ask for vault pass instead of expecting user to make it available unasked
ask_vault_pass = True
# force handlers to be executed always, especially after a normal task failed to execute
# without this option, it might happen that a handler might be missed to be executed after the role was completed in multiple tries (e.g. reload certain services)
force_handlers = True
# select custom inventory parser for setting up inventory
inventory = ./hosts.py
# install & use ansible collections locally (similar to venv) instead of globally
# helps to prevent differences on developer machines to be disturbing
# collections will be automatically setup from the dependency list "collection-requirements.yml" using "make ansible_collections"
# requires dev's to documentate each external dependency inside the repository
collections_path = ./ # ansible then searches for the subdirectory "ansible_collections" for itself
# disable usage of cowsay for ansible-playbook's logging (increases readability drastically, only matters if cowsay is installed)
nocows = True
# disable storing retry files after fail because of no usage
retry_files_enabled = False
# automatically select python interpreter, should be sufficient
interpreter_python = auto
# add mitogen strategies and select mitogen as default strategy
# mitogen, see https://mitogen.networkgenomics.com/ansible_detailed.html
strategy_plugins = ./misc/mitogen/ansible_mitogen/plugins/strategy
strategy = mitogen_linear
[diff]
# always enable --diff option
always = True

12
enter

@ -1,12 +0,0 @@
#!/bin/echo You need to source this script! Use something like: source
# (re-)create env if required (e.g. requirements.txt changed)
make setup
# enable coloring on these tools
export ANSIBLE_FORCE_COLORS=1
export PY_COLORS=1
# enter venv
. ./venv/bin/activate

@ -1,92 +0,0 @@
---
# === Constants defined by OS packages / applications
# seperated in arbitary system/kernel and applications/packages
# each group is sorted alphabetically
# general system/kernel constants
global_fstab_file: "/etc/fstab"
global_resolv_conf: "/etc/resolv.conf"
global_pamd: "/etc/pam.d"
global_proc_hidepid_service_whitelist:
- "{{ global_systemd_login_service_name }}"
- "{{ global_systemd_user_service_name }}"
global_users_directory: "/home"
# application constants
global_ansible_facts_directory: "/etc/ansible/facts.d"
global_apparmor_profiles_directory: "/etc/apparmor.d"
global_apparmor_profiles_local_directory: "{{ global_apparmor_profiles_directory }}/local"
global_apt_sources_directory: "/etc/apt/sources.list.d"
global_bind_service_name: "named.service"
global_bind_configuration_directory: "/etc/bind"
global_bind_data_directory: "/var/lib/bind"
global_certbot_configuration_directory: "/etc/letsencrypt"
global_certbot_configuration_file: "{{ global_certbot_configuration_directory }}/cli.ini"
global_certbot_certificates_directory: "/etc/letsencrypt/live"
global_chromium_configuration_directory: "/etc/chromium"
global_chromium_managed_policies_file: "{{ global_chromium_configuration_directory }}/policies/managed/managed_policies.json"
global_dnsmasq_configuration_file: "/etc/dnsmasq.conf"
global_dnsmasq_configuration_directory: "/etc/dnsmasq.d"
global_docker_service_name: "docker.service"
global_docker_configuration_directory: "/etc/docker"
global_docker_daemon_configuration_file: "{{ global_docker_configuration_directory }}/daemon.json"
global_fail2ban_service_name: "fail2ban.service"
global_fail2ban_system_directory: "/etc/fail2ban"
global_fail2ban_configuration_directory: "{{ global_fail2ban_system_directory }}/fail2ban.d"
global_fail2ban_actions_directory: "{{ global_fail2ban_system_directory }}/action.d"
global_fail2ban_filters_directory: "{{ global_fail2ban_system_directory }}/filter.d"
global_fail2ban_jails_directory: "{{ global_fail2ban_system_directory }}/jail.d"
global_interfaces_directory: "/etc/network/interfaces.d"
global_lightdm_configuration_directory: "/etc/lightdm"
global_log_directory: "/var/log"
global_mysql_socket_path: "/var/run/mysqld/mysqld.sock"
global_nfs_port: "2049" # for version 4
global_nfs_directory: "{{ global_webservers_directory }}/nfs"
global_nginx_system_user: www-data
global_nginx_service_name: "nginx.service"
global_nginx_installation_directory: "/etc/nginx"
global_plymouth_themes_directory: "/usr/share/plymouth/themes"
global_redis_configuration_directory: "/etc/redis"
global_redis_service_name: "redis-server.service"
global_ssh_service_name: "sshd.service"
global_ssh_configuration_directory: "/etc/ssh/"
global_ssh_configuration_environment_directory: "{{ global_configuration_environment_directory }}/ssh"
global_ssh_configuration_link_name: "config"
global_ssh_configuration_link: "{{ global_ssh_configuration_environment_directory }}/{{ global_ssh_configuration_link_name }}"
global_sudoers_directory: "/etc/sudoers.d"
global_wireguard_configuration_directory: "/etc/wireguard"
global_systemd_preset_directory: "/lib/systemd/system"
global_systemd_configuration_directory: "/etc/systemd/system"
global_systemd_journal_configuration_directory: "/etc/systemd/journald.conf.d"
global_systemd_login_service_name: "systemd-logind.service"
global_systemd_network_directory: "/etc/systemd/network"
global_systemd_network_service_name: "systemd-networkd.service"
global_systemd_network_system_user: "systemd-network"
global_systemd_user_service_name: "user@.service"
global_zsh_antigen_source: "/usr/share/zsh-antigen/antigen.zsh"

@ -5,23 +5,17 @@ TIMEZONE: "Europe/Berlin"
local_user: "{{ lookup('env','USER') }}"
global_username: zocker
global_admin_mail: felix.stupp@outlook.com # TODO change to felix.stupp@banananet.work, verify if all usages will apply change (e.g. lets encrypt)
global_admin_mail: felix.stupp@outlook.com
ansible_user: "{{ global_username }}"
ansible_become: yes
ansible_become_pass: "{{ zocker_password }}"
default_gpg_keyserver_hostname: "keys.openpgp.org"
default_tg_monitor_recipient_id: "{{ zocker_telegram_id }}"
zocker_authorized_keys_url: "https://git.banananet.work/zocker.keys"
update_scripts_directory: "/root/update"
tailscale_vpn_subnet: "100.64.0.0/10"
backup_gpg_fingerprint: "73D09948B2392D688A45DC8393E1BD26F6B02FB7"
backups_to_keep: 1
backups_directory: "/backups"
@ -60,13 +54,12 @@ global_dns_session_key_algorithm: "hmac-sha512"
global_dns_update_key_algorithm: "ED25519"
global_dns_ttl: "{{ 60 * 60 }}" # default if omitted in all cases
global_dns_debug_ttl: "{{ 60 }}" # mostly used if has_debug_instance to allow short transfer times
global_dns_key_ttl: "{{ 15 * 60 }}" # Lower TTL for security relevant records / more often updated records
global_ssh_key_directory: "{{ global_public_key_directory }}/ssh"
global_ssh_host_key_directory: "{{ global_ssh_key_directory }}/hosts"
global_validate_python_script: "/usr/bin/python3 -m pylint --disable=C0114 %s"
global_validate_shell_script: "/usr/bin/shellcheck %s" # TODO add "--format="
global_validate_sshd_config: "/usr/sbin/sshd -t -f %s"
global_validate_sudoers_file: "/usr/sbin/visudo -c -f %s"
global_wireguard_private_directory: "{{ global_credentials_directory }}/wireguard"
@ -101,25 +94,114 @@ raspbian_repository_mirror: "http://raspbian.raspberrypi.org/raspbian/"
raspbian_archive_repository_mirror: "http://archive.raspberrypi.org/debian/"
raspbian_repository_use_sources: yes
# System configuration
global_users_directory: "/home"
# Application configurations
global_ansible_facts_directory: "/etc/ansible/facts.d"
global_apparmor_profiles_directory: "/etc/apparmor.d"
global_apparmor_profiles_local_directory: "{{ global_apparmor_profiles_directory }}/local"
global_apt_sources_directory: "/etc/apt/sources.list.d"
global_bind_service_name: "named.service"
global_bind_configuration_directory: "/etc/bind"
global_bind_data_directory: "/var/lib/bind"
global_certbot_configuration_directory: "/etc/letsencrypt"
global_certbot_configuration_file: "{{ global_certbot_configuration_directory }}/cli.ini"
global_certbot_certificates_directory: "/etc/letsencrypt/live"
global_chromium_configuration_directory: "/etc/chromium"
global_chromium_managed_policies_file: "{{ global_chromium_configuration_directory }}/policies/managed/managed_policies.json"
global_dehydrated_system_user: dehydrated
# configs
global_dehydrated_configuration_directory: "/etc/dehydrated"
global_dehydrated_configuration_file: "{{ global_dehydrated_configuration_directory }}/config"
global_dehydrated_domains_directory: "{{ global_dehydrated_configuration_directory }}/domains"
global_dehydrated_hook_script_path: "{{ global_dehydrated_configuration_directory }}/hook.py"
global_dehydrated_hook_configuration_file: "{{ global_dehydrated_configuration_directory }}/hook.json"
# data dirs
global_dehydrated_data_directory: "/var/lib/dehydrated"
global_dehydrated_domains_main_file: "{{ global_dehydrated_data_directory }}/domains.generated.txt"
global_dehydrated_certificates_directory: "{{ global_dehydrated_data_directory }}/certificates"
global_dns_upstream_servers:
# Quad9 DNS with DNSSEC support, without EDNS
- "9.9.9.9"
- "149.112.112.112"
- "2620:fe::fe"
- "2620:fe::9"
- "9.9.9.11"
- "149.112.112.11"
- "2620:fe::11"
- "2620:fe::fe:11"
global_dnsmasq_configuration_file: "/etc/dnsmasq.conf"
global_dnsmasq_configuration_directory: "/etc/dnsmasq.d"
global_fail2ban_service_name: "fail2ban.service"
global_fail2ban_system_directory: "/etc/fail2ban"
global_fail2ban_configuration_directory: "{{ global_fail2ban_system_directory }}/fail2ban.d"
global_fail2ban_actions_directory: "{{ global_fail2ban_system_directory }}/action.d"
global_fail2ban_filters_directory: "{{ global_fail2ban_system_directory }}/filter.d"
global_fail2ban_jails_directory: "{{ global_fail2ban_system_directory }}/jail.d"
global_ip_discover_url: "https://keys.banananet.work/ping"
global_ip_discover_register_pass: "{{ lookup('password', 'credentials/ip_discover/register_pass chars=digits,ascii_letters length=256') }}"
global_interfaces_directory: "/etc/network/interfaces.d"
global_lightdm_configuration_directory: "/etc/lightdm"
global_log_directory: "/var/log"
global_mysql_socket_path: "/var/run/mysqld/mysqld.sock"
global_nfs_port: "2049" # for version 4
global_nfs_directory: "{{ global_webservers_directory }}/nfs"
global_nginx_system_user: www-data
global_nginx_service_name: "nginx.service"
global_nginx_installation_directory: "/etc/nginx"
global_pamd: "/etc/pam.d"
global_plymouth_themes_directory: "/usr/share/plymouth/themes"
global_redis_configuration_directory: "/etc/redis"
global_redis_service_name: "redis-server.service"
global_resolv_conf: "/etc/resolv.conf"
global_ssh_service_name: "sshd.service"
global_ssh_configuration_directory: "/etc/ssh/"
global_ssh_configuration_environment_directory: "{{ global_configuration_environment_directory }}/ssh"
global_ssh_configuration_link_name: "config"
global_ssh_configuration_link: "{{ global_ssh_configuration_environment_directory }}/{{ global_ssh_configuration_link_name }}"
global_sudoers_directory: "/etc/sudoers.d"
global_wireguard_configuration_directory: "/etc/wireguard"
global_wireguard_port: 51820
global_wireguard_ipv4_subnet: 22
global_wireguard_ipv4_netmask: "{{ ('0.0.0.0/' + (global_wireguard_ipv4_subnet | string)) | ipaddr('netmask') }}"
global_wireguard_ipv4_range: "10.162.4.0/{{ global_wireguard_ipv4_subnet }}"
# TODO Wireguard IPv6 Support
global_systemd_preset_directory: "/lib/systemd/system"
global_systemd_configuration_directory: "/etc/systemd/system"
global_systemd_journal_configuration_directory: "/etc/systmed/journald.conf.d"
global_systemd_journal_max_storage: 1G
global_systemd_network_directory: "/etc/systemd/network"
global_systemd_network_service_name: "systemd-networkd.service"
global_systemd_network_system_user: "systemd-network"
global_zsh_antigen_source: "/usr/share/zsh-antigen/antigen.zsh"
# Projects
# WG Minecraft
project_wg_minecraft_port: 25566
# Miscellaneous

@ -1,38 +1,24 @@
$ANSIBLE_VAULT;1.1;AES256
66386430666466343732636663313264663933613563643231323066383261616361353234366534
3337323862636537663538343062333064383838653138340a343662326139396634343261396230
65666533626263386465616466663431333339613162373766363937333564323233353930303836
6332366434333437370a666636656534653031303237633863356630393836386137353837303039
33323433343065313135323462316163343364656562303962373634656666353235363537366361
35383031343138376439316365306337636264346434363863623765356161663133653363633533
30613430613333666561303935663833396265363931653133373934363263323362333839366662
62373533643535323430353032386431346462363566323637613736313336373665666631326633
34343830653535623262333730356164636131623735333839663336623735353138313962656564
35643231303461653236373665613339313332386535376665623130646637626531306366316266
36613961653162633639333536333434383332363061653062396163623664316363303561636634
63353263313730313133613537386536616338323533303666653131656262323763616432343664
65626130383432326663303238383233633265393936633934623634366663333862643562383736
38313265306138303431363634656334656530393539636232613962386238613963643161306234
35646136613764353138666431363337393765343233303332663530336261316331383665643536
30663831656566663239656565613535316438666632663236666636383762333432303964333833
33353661623965633630383536613633313437666430623565636635633634646338633666356234
66323966396638316236626234326364633366666266643832333066383735306330366234383533
63386563626264303234303832356662363732356438306234656561373637376137346565653966
65373465303032393939383833386333353461633732623232393761353236306331626164386238
35353464373732346537626464663532653434386564636532623838383937363463633332366534
35613137613933636434336432653964353536303366353832356161653535353165613964333339
30646139316661656363383832313765326234316134393732636262373730386562626233633439
39643862393336653533373731333938343164363233323638353265656139333465363831333431
62323332396537656432343235633735636631646334306265376566343364646566396563386537
32366335313335666436613531356535623364336135636665623233363763663537393538666233
35643431396430336533396137303763333332626439316265383138663639343061656631626463
39386461303866373862626361373836643437346365343531323264386631313834613166393833
34656537326531643962636436393236393537373935346135663335656666343430313335373633
65393066636233653262623031383564393038353730393363356561363936356366636330386264
37383064636433646265396365373330613833623338666638653532363061316261343639323937
33623665316161353035366438663337346532653262366434366138306364343966653235383636
38666263623633356463373963636135656637613164353265613635353733316138626637623364
34386338633363653231643334323161653933613864636338626638323035323233643137353964
35666332346264613136343039336261303964343237373136393139376234363833376164643839
33316566353033363333633966643366303537653766623935643933373062313830316166303961
37393638653064623935356564303236343766393939323561356461656636626534
63343063643236623632373437303138303636643862323961633739653032376333386666626162
3738366330393339303030373430653162616138383261370a393738326638663064323963366338
31303332353439666363653839353932333338313830366566653534343739613036306465656137
6366353730656230320a633334306135653163313435303037343138326137383765363666376262
63353237396637386663663535646363366639313961343037656162336664343832656331393535
33353534653738346331313034666237656630613439656164343234333161353939356435656634
63396134356138323064313365366537336137646432636131353734343130653066383862346461
66383364656233393839666462336661643730646633633135626331643366666135353437346633
37633838373339363332633134386637303561366238353538643837386332636439383034333434
31363866373161636431383862326137306466613361356337646133643630373332666434666133
66366564383161376234343135616531613238613131363834313764363366326163333562303061
31333734333336663037313333383632373130313631626533623139666265646530386464616135
30363462623136393730616337306163663763616430303530306361393834303661613864313830
33616161323535323865626639323132333131626662626161623234613136663961393063303739
61353632373265363761636235313430383237363938396534666663353336383234663561373833
63666364313539393831353833393763326432303035343830386663633534356362316130353866
64383564666431343333626332356666633231653239363130386265363164356664326633623065
61393636613162376334646661663232626534326562613235633434656466303435393233613233
36666463316331366365643861633362386466663863316564656439633364616566373062306633
66326464326138306130666631313830643236663134363166383264366139643861393565623537
33376165396531323863626635323237363665363539613963376537373635323365616234313762
66313934623631386432633861383136386464353932316534363836613038313934356331363737
333931356137336563653162316563306636

@ -1,14 +0,0 @@
---
bootstrap_user: "root"
global_dns_upstream_servers:
- 213.133.100.100
- 213.133.99.99
- 213.133.98.98
- "2a01:4f8:0:1::add:1010"
- "2a01:4f8:0:1::add:9898"
- "2a01:4f8:0:1::add:9999"
debian_repository_mirror: "http://mirror.hetzner.de/debian/packages"
debian_repository_use_sources: no # Not supported by Hetzner mirrors, but also not required

@ -1,212 +1,27 @@
#!/usr/bin/env python3
import json
import re
import sys
import yaml
class LoopPrevention:
def __init__(self, obj):
self.__obj = obj
self.__entered = False
def __enter__(self):
if self.__entered:
raise Exception("detected and prevented infinite loop")
self.__entered = True
return self
def __exit__(self, *args):
self.__entered = False
return False # forward exception
class Group:
def __init__(self, inv):
self.__inv = inv
self.__hosts = set()
self.__children = set()
def add_host(self, host):
if not host in self.__hosts:
self.__hosts.add(host)
def add_hosts(self, hosts):
self.__hosts |= hosts
@property
def direct_hosts(self):
return set(self.__hosts)
@property
def all_hosts(self):
with LoopPrevention(self):
hosts = self.direct_hosts
for child in self.children:
hosts |= self.__inv._group(child).all_hosts
return hosts
def add_child(self, group_name):
if not group_name in self.__children:
self.__children.add(group_name)
@property
def children(self):
return set(self.__children)
def export(self):
return { "hosts": list(self.__hosts), "vars": dict(), "children": list(self.__children) }
class Inventory:
def __init__(self):
self.__groups = dict()
self.add_group("all")
def __group(self, group_name):
if group_name not in self.__groups:
self.__groups[group_name] = Group(self)
return self.__groups[group_name]
def _group(self, group_name):
if group_name not in self.__groups:
raise Exception(f'Unknown group "{group_name}"')
return self.__groups[group_name]
def add_host(self, host):
self.__group("all").add_host(host)
def add_hosts(self, hosts):
self.__group("all").add_hosts(hosts)
def add_group(self, group_name):
self.__group(group_name)
def add_host_to_group(self, host, group_name):
self.add_host(host)
self.__group(group_name).add_host(host)
def add_hosts_to_group(self, hosts, group_name):
self.add_hosts(hosts)
self.__group(group_name).add_hosts(hosts)
def add_child_to_group(self, child_name, parent_name):
self.__group(child_name)
self.__group(parent_name).add_child(child_name)
def all_hosts_of_group(self, group_name):
return self._group(group_name).all_hosts
def export(self):
meta_dict = {
"_meta": {
"hostvars": {},
},
}
group_dict = { group_name: group.export() for group_name, group in self.__groups.items() }
return { **meta_dict , **group_dict }
def _read_yaml(path):
with open(path, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as e:
return AnsibleError(e)
GROUPS_PATTERN_OPS = {
"": lambda old, add: old | add,
"&": lambda old, add: old & add,
"!": lambda old, add: old - add,
}
GROUPS_PATTERN_OPS_NAMES = "".join(GROUPS_PATTERN_OPS.keys())
GROUPS_PATTERN = re.compile(r'^(?P<operation>[' + GROUPS_PATTERN_OPS_NAMES + r']?)(?P<group_name>[^' + GROUPS_PATTERN_OPS_NAMES + r'].*)$')
def _parse_group_aliasses(inv, data):
for group, syntax in data.items():
if isinstance(syntax, str):
group_list = syntax.split(':')
elif isinstance(syntax, list):
group_list = syntax
else:
raise Exception(f'Unknown syntax for alias "{group}": {syntax}')
if len(syntax) <= 0 or len(group_list) <= 0:
raise Exception(f'Empty syntax for alias "{group}": {syntax}')
if group_list[0][0] == '!': # if first entry is an inversion
group_list.insert(0, 'all') # remove group from all for inversion
hosts = set()
for group_name in group_list:
group_matched = GROUPS_PATTERN.match(group_name)
add = inv.all_hosts_of_group(group_matched.group('group_name'))
op = GROUPS_PATTERN_OPS[group_matched.group('operation')]
hosts = op(hosts, add)
inv.add_hosts_to_group(hosts, group)
def _parse_groups(inv, data):
for group, children in data.items():
inv.add_group(group)
if children is None:
continue # as if no children are given
for child in children:
inv.add_child_to_group(child, group)
if isinstance(children, dict):
_parse_groups(inv, children)
def _parse_host_groups(inv, data):
GROUPS_KEY = "_all"
for host_group, hosts in data.items():
inv.add_group(host_group)
if hosts is None:
continue
for host in hosts:
if host != GROUPS_KEY:
inv.add_host_to_group(host, host_group)
if isinstance(hosts, dict):
hosts = dict(hosts) # copy dict for further edits
parents = hosts.pop(GROUPS_KEY, None)
if parents is not None:
for parent in parents:
inv.add_child_to_group(host_group, parent)
_parse_single_hosts(inv, hosts)
def _parse_single_hosts(inv, data):
for host, groups in data.items():
inv.add_host(host)
if groups is not None:
for group in groups:
inv.add_host_to_group(host, group)
def _parse_version_0(inv, data):
return _parse_single_hosts(inv, data)
parser_mapping_v1 = { "groups": _parse_groups, "host_groups": _parse_host_groups, "single_hosts": _parse_single_hosts }
def _parse_version_1(inv, data):
for key_name, parser in parser_mapping_v1.items():
if key_name in data:
parser(inv, data[key_name])
def _parse_version_2(inv, data):
_parse_version_1(inv, data)
_parse_group_aliasses(inv, data["group_aliasses"])
parser_version_mapping = {
None: _parse_version_0, # legacy version without version number, only hosts list with tags
1: _parse_version_1, # adds support for default, inversed group dependencies and host_groups aside single_hosts (ignores aliases supported with version 2)
2: _parse_version_2, # adds support for aliases (thus destroying the common graph structures where aliasses were used)
}
def parse(path):
data = _read_yaml(path)
inv = Inventory()
version = data.get("version", None)
# detect that version was used as hostname
if not isinstance(version, (int, float, complex)):
version = None
if version not in parser_version_mapping:
raise AnsibleError(Exception("Version not supported"))
parser_version_mapping[version](inv, data)
return inv.export()
with open(path, 'r') as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as e:
return AnsibleError(e)
ret = { "all": { "hosts": list(), "vars": dict(), "children": list() } , "_meta": { "hostvars": {} } }
for host, groups in data.items():
ret["all"]["hosts"].append(host)
if groups is not None:
for group in groups:
if not group in ret:
ret[group] = dict()
ret[group]["hosts"] = list()
ret[group]["vars"] = dict()
ret[group]["children"] = list()
if not host in ret[group]["hosts"]:
ret[group]["hosts"].append(host)
return ret
print(json.dumps(parse("hosts.yml")))

@ -1,85 +1,41 @@
version: 2
groups: # a:b meaning b is a, can be nested
# hardware structure
dev_known:
barebones:
- rented_barebones # sub group
# list of all known barebone device groups
- dev_surface3 # Microsoft Surface 3
virtual:
- rented_vserver # sub group
dev_unknown: # for unknown device kinds
# structure of rented servers
rented:
rented_barebones:
- hetzner_server # https://robot.your-server.de/server
rented_vserver:
- bwcloud_vserver # https://portal.bw-cloud.org/
- contabo_vserver # https://my.contabo.com/vps
# OS structure
os_known: # list of all known OS derivates
- os_debian
- os_raspbian
# applications
bootstrapable: # which OSes/hosts can be bootstraped
- os_debian
- os_raspbian
group_aliasses: # a:b meaning a equals b, should only depend on groups not defined here
# unknown groups
dev_unknown: "!dev_known"
os_unknown: "!os_known"
# applications
bootstrap: "bootstrapable:!no_bootstrap" # which hosts should be bootstraped
common_roles: "!no_common_roles"
wireguard_backbones: "public_available:!no_wireguard_automatic"
wireguard_clients: "!public_available:!no_wireguard_automatic"
host_groups: # group: host: [*groups]
no_defaults: # do not include in all default playbooks / roles
_all:
- no_bootstrap # do not setup sudo bootstrap
- no_common_roles # do not include in common roles
- no_wireguard_automatic # do not assign wireguard role automatic, hosts may be excluded from wireguard or assigned to their wireguard role manually
rented:
_all:
- public_available # rented are public available
# to group similar devices together
common_server: # public common servers
_all:
- os_debian
hatoria.banananet.work:
- hetzner_server
nvak.banananet.work:
- contabo_vserver
morska.banananet.work:
- bwcloud_vserver
rurapenthe.banananet.work:
- bwcloud_vserver
single_hosts: # a:b meaning a is b, cannot be nested
# Local Servers
hardie.eridon.banananet.work:
- os_debian
# Embedded Devices
wgpanel.eridon.banananet.work:
- dev_surface3
- os_debian
- no_wireguard_automatic # no wireguard
# Public Servers
hatoria.banananet.work:
- hetzner_server
- os_debian
- bootstrap
- public_available
- wireguard_backbones
nvak.banananet.work:
- contabo_vserver
- os_debian
- bootstrap
- public_available
- wireguard_backbones
morska.banananet.work:
- bwcloud_vserver
- os_debian
- bootstrap
- public_available
- wireguard_backbones
rurapenthe.banananet.work:
- bwcloud_vserver
- os_debian
- bootstrap
- public_available
- wireguard_backbones
# Location Eridon
## Local Servers
hardie.eridon.banananet.work:
- bootstrap
## Embedded Devices
wgpanel.eridon.banananet.work:
- surface3
- os_debian
- bootstrap

@ -2,52 +2,25 @@ vault:=group_vars/all/vault.yml
playbooks_dir:=playbooks
playbooks:=$(wildcard ${playbooks_dir}/*.yml)
credentials_dir:=credentials
credentials_file:=misc/credentials.tar.gpg
venv_dir:=venv
# Default Target (must be first target)
.PHONY: main list vault ${playbooks} store-credentials load-credentials
.PHONY: main
main:
ansible-playbook site.yml
# Virtual Environment's Setup
.PHONY: setup
setup: ansible_collections ${venv_dir}
ansible_collections: collection-requirements.yml ${venv_dir}
mkdir --parent $@
. ./${venv_dir}/bin/activate && ansible-galaxy install -r $<
${venv_dir}: pip-requirements.txt
python3 -m venv $@
. ./$@/bin/activate && python3 -m pip install -r $<
# Playbook Execution
.PHONY: list
list:
@echo ${playbooks}
.PHONY: ${playbooks}
${playbooks}:
ansible-playbook ${playbooks_dir}/$@.yml
# Vault Handling
.PHONY: vault
vault:
ansible-vault edit ${vault}
# Credential Handling
${playbooks}:
ansible-playbook ${playbooks_dir}/$@.yml
.PHONY: store-credentials
store-credentials: ${credentials_file}
store-credentials: credentials.tar.gpg
${credentials_file}: $(shell find "${credentials_dir}")
credentials.tar.gpg: $(shell find "${credentials_dir}")
tar -cf - "${credentials_dir}" | gpg --encrypt --recipient 73D09948B2392D688A45DC8393E1BD26F6B02FB7 > "$@"
.PHONY: load-credentials
load-credentials:
< "${credentials_file}" gpg --decrypt | tar -xf -
< credentials.tar.gpg gpg --decrypt | tar -xf -

@ -1 +0,0 @@
Subproject commit 36f3e3b28c82611c72a867cdc1f5ddc8bd9325e9

@ -1,27 +0,0 @@
#### Python / PiP Requirements ####
# each group either sorted by alphabet or, if applicable, sorted by hierachy
### Main Runtime Dependencies ###
# Ansible itself
ansible ~= 2.10.0 # pinned to 2.10 because upgrade may bring issues
### Test Frameworks ###
ansible-lint # simple linter
yamllint # linter for YAML files in general
## molecule ##
# role based test framework for Ansible
molecule
# enable docker for test environments, requires Docker to be installed on host and usuable without additional permissions
molecule-docker
# allows using Vagrant (VMs) for creating test environments, requires Vagrant and any hypervisor (e.g. VirtualBox) to be installed
molecule-vagrant
python-vagrant # extra module required as not always installed with vagrant

@ -1,5 +1,5 @@
- name: Configure hatoria as dns server
hosts: hatoria.banananet.work
- name: Configure nvak as dns server
hosts: nvak.banananet.work
vars:
# Source: https://docs.hetzner.com/dns-console/dns/general/authoritative-name-servers
hetzner_authoritatives:
@ -46,13 +46,9 @@
roles:
- role: dns/master
domain: banananet.work
main_nameserver_domain: "ns1.banananet.work" # required glue entry already configured
responsible_mail_name: hostmaster.banananet.work
slaves_ip: "{{ hetzner_authoritatives_ip }}"
entries:
# main NS entry
- type: NS
data: ns1.banananet.work.
# Hetzner NS entries
- type: NS
data: "{{ hetzner_authoritatives }}"
@ -97,11 +93,10 @@
data: "10 10 10110 mc.wg.{{ domain }}."
- role: dns/master
domain: forumderschan.de
main_nameserver_domain: "ns1.banananet.work"
responsible_mail_name: hostmaster.banananet.work
slaves_ip: "{{ hetzner_authoritatives_ip }}"
entries:
# main NS entry
# Glue record
- type: NS
data: ns1.banananet.work.
# Hetzner NS entries
@ -112,10 +107,9 @@
data: 0 issue "letsencrypt.org"
- role: dns/master
domain: stadtpiraten-karlsruhe.de
main_nameserver_domain: "ns1.banananet.work"
responsible_mail_name: hostmaster.banananet.work
entries:
# main NS entry
# Glue record
- type: NS
data: ns1.banananet.work.
# limit CA
@ -127,17 +121,3 @@
roles:
- role: dns/server_entries
domain: "{{ inventory_hostname }}"
- name: Arbitary entries
# all tasks/roles here must be local only
hosts: all # select any host as not important
run_once: yes # run only once "for first host"
gather_facts: no # do not gather facts from host as these may not be used
roles:
- role: ext_mail/mailjet
tags:
- mailjet
- wg.banananet.work
domain: wg.banananet.work
verification_name: 5803f0f5
verification_data: 5803f0f5f4278d66327350f7a8141b70

@ -1 +0,0 @@
*.yml

@ -16,7 +16,3 @@
owner: root
group: root
mode: u=rw,g=r,o=r
# If something goes wrong with mouting or /etc/hosts, add this back to cloud.cfg using directory:
#mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
#manage_etc_hosts: true

@ -1,7 +1,7 @@
---
- name: Configure Surface 3 device
hosts: dev_surface3
hosts: surface3
tasks:
- name: Install packages for hardware
apt:

@ -1,27 +1,7 @@
- name: Configure hatoria.banananet.work
hosts: hatoria.banananet.work
vars:
bnet_cloud_domain: "cloud.banananet.work"
bnet_cloud_username: "{{ bnet_cloud_domain | domain_to_username }}"
roles:
- role: nginx/default_server # Would not be configurable otherwise
tags:
- default_server
# Git Server
- role: server/gitea
tags:
- git.banananet.work
domain: git.banananet.work
gitea_system_user: git
database_user: gitea
- role: server/drone.io/server
domain: ci.git.banananet.work
bind_port: 12824
gitea_server_url: https://git.banananet.work
gitea_client_id: "{{ drone_ci_gitea_main_oauth2_client_id }}"
gitea_client_secret: "{{ drone_ci_gitea_main_oauth2_client_secret }}"
- role: server/drone.io/runner
drone_server_host: ci.git.banananet.work
# Banananet.work
- role: server/static
tags:
@ -33,23 +13,6 @@
- banananet.work
domain: www.banananet.work
dest: banananet.work
# SpotMe Server
- role: server/spotme
tags:
- spotme.banananet.work
domain: spotme.banananet.work
bind_port: 12820
# Firefox Sync Server
- role: server/firefox-sync
tags:
- firefox.banananet.work
domain: firefox.banananet.work
# RSS Server
# TODO Manual initialization of database required
- role: server/tt-rss
tags:
- rss.banananet.work
domain: rss.banananet.work
# Linx Server
- role: server/linx
tags:
@ -72,8 +35,7 @@
- role: server/nextcloud
tags:
- cloud.banananet.work
domain: "{{ bnet_cloud_domain }}"
system_user: "{{ bnet_cloud_username }}"
domain: cloud.banananet.work
nextcloud_admin_user: "{{ global_username }}"
enabled_apps_list:
- accessibility
@ -153,6 +115,7 @@
tags:
- forumderschan.de
domain: forumderschan.de
is_debug_instance: yes
repo: git@git.banananet.work:strichliste/strichliste-php.git
root: html
installation_includes:
@ -162,225 +125,15 @@
- forumderschan.de
domain: www.forumderschan.de
dest: forumderschan.de
# Monitors
- role: misc/tg_monitor_cmd
tags: tg-monitor-cmd
monitor_name: forumderschan.de-NS
description: "NS entries of forumderschan.de"
command_str: >-
/usr/bin/dig
@a.nic.de.
forumderschan.de. NS
| grep --only-matching --perl-regexp '(?<=\s)(\S+\.)+(?=$)'
| sort
use_shell: yes
# WG Nextcloud
- role: server/nextcloud
tags:
- wg.banananet.work
domain: wg.banananet.work
nextcloud_admin_user: felix
enabled_apps_list:
- accessibility
- activity
- apporder
- bruteforcesettings
- calendar
- checksum
- cloud_federation_api
- comments
- contacts
- cookbook
- cospend
- dav
- deck
- encryption
- external
- federatedfilesharing
- federation
- files
- files_automatedtagging
- files_external
- files_pdfviewer
- files_rightclick
- files_sharing
- files_trashbin
- files_versions
- files_videoplayer
- firstrunwizard
- logreader
- lookup_server_connector
- metadata
- nextcloud_announcements
- notes
- notifications
- oauth2
- ocdownloader
- password_policy
- photos
- polls
- privacy
- provisioning_api
- quota_warning
- ransomware_protection
- serverinfo
- settings
- sharebymail
- side_menu
- sociallogin
- socialsharing_email
- support
- suspicious_login
- systemtags
- tasks
- text
- theming
- twofactor_admin
- twofactor_backupcodes
- twofactor_gateway
- twofactor_nextcloud_notification
- twofactor_totp
- twofactor_u2f
- updatenotification
- viewer
- workflowengine
disabled_apps_list:
- admin_audit
- recommendations
- spreed
- survey_client
- user_ldap
# WG Minecraft
- role: server/minecraft
tags:
- mc.wg.banananet.work
domain: mc.wg.banananet.work
minecraft_version: "1.16.4"
minecraft_version: "1.16.1"
minecraft_ram: "16G"
minecraft_port: 25566
config:
difficulty: normal
motd: ChaosCraft
view-distance: 16
# # Stadtpiraten
# - role: server/typo3
# domain: piraten.dev.banananet.work
# - role: server/php
# domain: forum.piraten.dev.banananet.work
# repo: PHPBB # TODO
# version: master
# # Stadtpiraten (prod)
# - role: nginx/forward
# domain: www.stadtpiraten-karlsruhe.de
# dest: stadtpiraten-karlsruhe.de
# SMD/SFC HST 2020
- role: nginx/forward
tags:
- proj-hst
- hst21.banananet.work
domain: hst20.banananet.work
dest: hst21.banananet.work
- role: server/nextcloud
tags:
- proj-hst
- hst21.banananet.work
domain: hst21.banananet.work
system_user: nc-hst21
nextcloud_admin_user: felix
enabled_apps_list:
- accessibility
- activity
- apporder
- bruteforcesettings
- calendar
- checksum
- cloud_federation_api
- comments
- contacts
- contactsinteraction
- cospend
- dav
- deck
- encryption
- external
- federatedfilesharing
- federation
- files
- files_automatedtagging
- files_linkeditor
- files_mindmap
- files_pdfviewer
- files_rightclick
- files_sharing
- files_trashbin
- files_versions
- files_videoplayer
- firstrunwizard
- forms
- logreader
- lookup_server_connector
- mail
- maps
- metadata
- nextcloud_announcements
- notes
- notifications
- oauth2
- password_policy
- photos
- polls
- privacy
- provisioning_api
- quota_warning
- ransomware_protection
- serverinfo
- settings
- sharebymail
- socialsharing_email
- spreed
- support
- suspicious_login
- systemtags
- tasks
- text
- theming
- twofactor_admin
- twofactor_backupcodes
- twofactor_gateway
- twofactor_totp
- twofactor_u2f
- updatenotification
- viewer
- whiteboard
- workflowengine
disabled_apps_list:
- admin_audit
- dashboard
- files_external
- recommendations
- sociallogin
- survey_client
- user_ldap
- user_status
- weather_status
tasks:
- name: Configure custom archive Nextcloud directory on hdd for personal usages
tags:
- cloud.banananet.work
- custom_archive_directory
vars:
archive_directory: "{{ global_hdd_directory }}/{{ bnet_cloud_domain }}~personal-archive"
block:
- name: Create archive directory
file:
state: directory
path: "{{ archive_directory }}"
owner: "{{ bnet_cloud_username }}"
group: "{{ bnet_cloud_username }}"
mode: "u=rwx,g=rx,o="
register: archive_directory_task
- name: Show message to user about path on changes
debug:
msg: >-
Changed custom archive directory: Please ensure you (re-)configure this directory properly on your Nextcloud instance: {{ archive_directory | quote }}
when: archive_directory_task.changed

@ -2,9 +2,229 @@
hosts: nvak.banananet.work
roles:
- role: nginx/default_server # Would not be configurable otherwise
# Git Server
- role: server/gitea
tags:
- git.banananet.work
domain: git.banananet.work
gitea_system_user: git
database_user: gitea
# SpotMe Server
- role: server/spotme
tags:
- spotme.banananet.work
domain: spotme.banananet.work
bind_port: 12820
spotme_system_user: spotme
# # Admin Panel
# - role: server/php
# domain: nvak.banananet.work
# repo: PHPMYADMIN # TODO
# BananaNetwork Keys
# - role: server/node
# domain: keys.banananet.work
# repo: https://git.banananet.work/banananetwork/keys.git
# bind_port: 12822
# system_user: keys-banananet-work
# Firefox Sync Server
- role: server/firefox-sync
tags:
- firefox.banananet.work
domain: firefox.banananet.work
system_user: firefox-banananet-work
# RSS Server
# TODO Manual initialization of database required
- role: server/tt-rss
tags:
- rss.banananet.work
domain: rss.banananet.work
system_user: rss-banananet-work
# DSA Seite
# - role: server/node
# domain: dsa.banananet.work
# repo: git@git.banananet.work:dsaGroup/dsaPage.git
# bind_port: 12821
# system_user: dsaPage
# Forum der Schande
- role: server/php
tags:
- forumderschan.de
domain: forumderschan.de
has_debug_instance: yes
system_user: forumderschan-de
repo: git@git.banananet.work:strichliste/strichliste-php.git
root: html
installation_includes:
- includes
# WG Nextcloud
- role: server/nextcloud
tags:
- wg.banananet.work
domain: wg.banananet.work
system_user: wg-banananet-work
nextcloud_admin_user: felix
enabled_apps_list:
- accessibility
- activity
- apporder
- bruteforcesettings
- calendar
- checksum
- cloud_federation_api
- comments
- contacts
- cookbook
- cospend
- dav
- deck
- encryption
- external
- federatedfilesharing
- federation
- files
- files_automatedtagging
- files_external
- files_pdfviewer
- files_rightclick
- files_sharing
- files_trashbin
- files_versions
- files_videoplayer
- firstrunwizard
- logreader
- lookup_server_connector
- metadata
- nextcloud_announcements
- notes
- notifications
- oauth2
- ocdownloader
- password_policy
- photos
- polls
- privacy
- provisioning_api
- quota_warning
- ransomware_detection
- ransomware_protection
- serverinfo
- settings
- sharebymail
- side_menu
- sociallogin
- socialsharing_email
- support
- suspicious_login
- systemtags
- tasks
- text
- theming
- twofactor_admin
- twofactor_backupcodes
- twofactor_gateway
- twofactor_nextcloud_notification
- twofactor_totp
- twofactor_u2f
- updatenotification
- viewer
- workflowengine
disabled_apps_list:
- admin_audit
- recommendations
- spreed
- survey_client
- user_ldap
- role: server/static
domain: turnips.banananet.work
repo: https://git.banananet.work/banananetwork/ac-nh-turnip-prices.git
# SMD/SFC HST 2020
- role: server/nextcloud
tags:
- hst20.banananet.work
domain: hst20.banananet.work
system_user: nc-hst20
nextcloud_admin_user: felix
enabled_apps_list:
- accessibility
- activity
- apporder
- bruteforcesettings
- calendar
#- checksum # currently not supported
- cloud_federation_api
- comments
- contacts
- cospend
- dav
- deck
- encryption
- external
- federatedfilesharing
- federation
- files
- files_automatedtagging
- files_mindmap
- files_pdfviewer
- files_rightclick
- files_sharing
- files_trashbin
- files_versions
- files_videoplayer
- firstrunwizard
- forms
- logreader
- lookup_server_connector
- metadata
- nextcloud_announcements
- notes
- notifications
- oauth2
- password_policy
- photos
- polls
- privacy
- provisioning_api
- quota_warning
#- ransomware_detection # currently not supported
- ransomware_protection
- serverinfo
- settings
- sharebymail
- socialsharing_email
- spreed
- support
- suspicious_login
- systemtags
- tasks
- text
- theming
- twofactor_admin
- twofactor_backupcodes
- twofactor_gateway
- twofactor_totp
- twofactor_u2f
- updatenotification
- viewer
- whiteboard
- workflowengine
disabled_apps_list:
- admin_audit
- dashboard
- files_external
- recommendations
- sociallogin
- survey_client
- user_ldap
- user_status
- weather_status
# # Stadtpiraten
# - role: server/typo3
# domain: piraten.dev.banananet.work
# - role: server/php
# domain: forum.piraten.dev.banananet.work
# repo: PHPBB # TODO
# version: master
# # Stadtpiraten (prod)
# - role: nginx/forward
# domain: www.stadtpiraten-karlsruhe.de
# dest: stadtpiraten-karlsruhe.de

@ -1 +0,0 @@
../library

@ -2,6 +2,7 @@
- name: Configure wireguard backbones
hosts: wireguard_backbones
strategy: linear
tags:
- wireguard
- wireguard_backbones
@ -10,6 +11,7 @@
- name: Configure wireguard clients
hosts: wireguard_clients
strategy: linear
tags:
- wireguard
- wireguard_clients

@ -52,11 +52,23 @@
group: "{{ username }}"
mode: "u=rwx,g=rx,o="
- name: Configure ssh configration directory
file:
path: "{{ user_directory }}/.ssh"
state: directory
owner: "{{ username }}"
group: "{{ username }}"
mode: "u=rwx,g=rx,o="
- name: Configure authorized_keys
authorized_key:
state: present
user: "{{ username }}"
key: "{{ authorized_keys }}"
get_url:
url: "{{ authorized_keys }}"
dest: "{{ user_directory }}/.ssh/authorized_keys"
force: yes
owner: "{{ username }}"
group: "{{ username }}"
mode: "u=rwx,g=rx,o="
ignore_errors: yes
- name: Configure zsh
become_user: "{{ username }}"

@ -2,4 +2,21 @@
acme_account_mail: "{{ global_admin_mail }}"
acme_key_algorithm: rsa
acme_key_size: 4096
acme_user_umask: "0477" # resulting in u=rw,g=,o= being allowed
domain_data_name: "data.txt"
keyfullchain_name: "keyfullchain.pem"
hook_script_path: "{{ global_dehydrated_configuration_directory }}/hook.py"
hook_config_path: "{{ global_dehydrated_configuration_directory }}/hook.json"
hook_config:
challenge_record_ttl: 20
domain_data_name: "{{ domain_data_name }}"
domains_directory: "{{ global_dehydrated_domains_directory }}"
domains_main_file: "{{ global_dehydrated_domains_main_file }}"
key_file_mode: 0o0310 # resulting in u=rw,g=r,o=
keyfullchain_name: "{{ keyfullchain_name }}"

@ -3,4 +3,7 @@
allow_duplicates: no
dependencies:
- role: misc/system_user
system_user: "{{ global_dehydrated_system_user }}"
user_directory: "{{ global_dehydrated_data_directory }}"
- role: nginx/application

@ -4,12 +4,39 @@
apt:
state: present
name:
- certbot # main package
- dehydrated # main package
- name: Configure certbot
- name: Create configuration directory
file:
state: directory
path: "{{ global_dehydrated_configuration_directory }}"
owner: root
group: "{{ global_dehydrated_system_user }}"
mode: u=rwx,g=rx,o=
- name: Configure dehydrated
template:
src: config
dest: "{{ configuration_file }}"
owner: root
group: "{{ global_dehydrated_system_user }}"
mode: u=rw,g=r,o=
validate: "{{ global_validate_shell_script }}"
- name: Deploy global hook config
copy:
content: |
{{ hook_config | to_nice_json }}
dest: "{{ global_dehydrated_hook_configuration_file }}"
owner: root
group: "{{ global_dehydrated_system_user }}"
mode: u=rw,g=r,o=
- name: Deploy global hook script
template:
src: cli.ini
dest: "{{ global_certbot_configuration_file }}"
src: hook.py
dest: "{{ hook_script_path }}"
owner: root
group: root
mode: u=rw,g=r,o=r
group: "{{ global_dehydrated_system_user }}"
mode: u=rwx,g=rx,o=
validate: "{{ global_validate_shell_script }}"

@ -1,12 +0,0 @@
# Accept service terms
agree-tos
# Default RSA key size
rsa-key-size = {{ acme_key_size }}
# E-Mail Address for registration
email = {{ acme_account_mail }}
# Use webroot per default
authenticator = webroot
webroot-path = {{ acme_validation_root_directory }}

@ -0,0 +1,133 @@
########################################################
# This is the main config file for dehydrated #
# #
# This file is looked for in the following locations: #
# $SCRIPTDIR/config (next to this script) #
# /usr/local/etc/dehydrated/config #
# /etc/dehydrated/config #
# ${PWD}/config (in current working-directory) #
# #
# Default values of this config are in comments #
########################################################
# Which user should dehydrated run as? This will be implicitly enforced when running as root
#DEHYDRATED_USER=
# Which group should dehydrated run as? This will be implicitly enforced when running as root
#DEHYDRATED_GROUP=
# Resolve names to addresses of IP version only. (curl)
# supported values: 4, 6
# default: <unset>
#IP_VERSION=
# URL to certificate authority or internal preset
# Presets: letsencrypt, letsencrypt-test, zerossl, buypass, buypass-test
# default: letsencrypt
CA="letsencrypt"
# Path to old certificate authority
# Set this value to your old CA value when upgrading from ACMEv1 to ACMEv2 under a different endpoint.
# If dehydrated detects an account-key for the old CA it will automatically reuse that key
# instead of registering a new one.
# default: https://acme-v01.api.letsencrypt.org/directory
#OLDCA="https://acme-v01.api.letsencrypt.org/directory"
# Which challenge should be used? Currently http-01, dns-01 and tls-alpn-01 are supported
#CHALLENGETYPE="http-01"
# Path to a directory containing additional config files, allowing to override
# the defaults found in the main configuration file. Additional config files
# in this directory needs to be named with a '.sh' ending.
# default: <unset>
#CONFIG_D=
# Directory for per-domain configuration files.
# If not set, per-domain configurations are sourced from each certificates output directory.
# default: <unset>
#DOMAINS_D=
# Base directory for account key, generated certificates and list of domains (default: $SCRIPTDIR -- uses config directory if undefined)
BASEDIR={{ global_dehydrated_configuration_directory | quote }}
# File containing the list of domains to request certificates for (default: $BASEDIR/domains.txt)
DOMAINS_TXT={{ global_dehydrated_domains_file | quote }}
# Output directory for generated certificates
CERTDIR={{ global_dehydrated_certificates_directory }}
# Output directory for alpn verification certificates
#ALPNCERTDIR="${BASEDIR}/alpn-certs"
# Directory for account keys and registration information
#ACCOUNTDIR="${BASEDIR}/accounts"
# Output directory for challenge-tokens to be served by webserver or deployed in HOOK (default: /var/www/dehydrated)
WELLKNOWN={{ acme_validation_root_directory | quote }}
# Default keysize for private keys (default: 4096)
KEYSIZE={{ acme_key_size | quote }}
# Path to openssl config file (default: <unset> - tries to figure out system default)
#OPENSSL_CNF=
# Path to OpenSSL binary (default: "openssl")
#OPENSSL="openssl"
# Extra options passed to the curl binary (default: <unset>)
#CURL_OPTS=
# Program or function called in certain situations
#
# After generating the challenge-response, or after failed challenge (in this case altname is empty)
# Given arguments: clean_challenge|deploy_challenge altname token-filename token-content
#
# After successfully signing certificate
# Given arguments: deploy_cert domain path/to/privkey.pem path/to/cert.pem path/to/fullchain.pem
#
# BASEDIR and WELLKNOWN variables are exported and can be used in an external program
# default: <unset>
HOOK={{ hook_script_path | quote }}
# Chain clean_challenge|deploy_challenge arguments together into one hook call per certificate (default: no)
#HOOK_CHAIN="no"
# Minimum days before expiration to automatically renew certificate (default: 30)
#RENEW_DAYS="30"
# Regenerate private keys instead of just signing new certificates on renewal (default: yes)
PRIVATE_KEY_RENEW="yes"
# Create an extra private key for rollover (default: no)
#PRIVATE_KEY_ROLLOVER="no"
# Which public key algorithm should be used? Supported: rsa, prime256v1 and secp384r1
KEY_ALGO={{ acme_key_algorithm | quote }}
# E-mail to use during the registration (default: <unset>)
CONTACT_EMAIL={{ acme_account_mail | quote }}
# Lockfile location, to prevent concurrent access (default: $BASEDIR/lock)
#LOCKFILE="${BASEDIR}/lock"
# Option to add CSR-flag indicating OCSP stapling to be mandatory (default: no)
#OCSP_MUST_STAPLE="no"
# Fetch OCSP responses (default: no)
OCSP_FETCH="yes"
# OCSP refresh interval (default: 5 days)
#OCSP_DAYS=5
# Issuer chain cache directory (default: $BASEDIR/chains)
#CHAINCACHE="${BASEDIR}/chains"
# Automatic cleanup (default: no)
#AUTO_CLEANUP="no"
# ACME API version (default: auto)
#API=auto
# Preferred issuer chain (default: <unset> -> uses default chain)
#PREFERRED_CHAIN=

@ -0,0 +1,329 @@
#!/usr/bin/env python3
import argparse
import functools
import json
import os
from pathlib import Path
import subprocess
import sys
# constants
ACME_DOMAIN_PREFIX = "_acme-challenge"
SUBPROCESS_DEFAULT_KWARGS = {"shell": False}
SUBPROCESS_ENV = ["/usr/bin/env"]
# subprocess helpers
def safe_run(args, **kwargs):
return subprocess.run(args, **SUBPROCESS_DEFAULT_KWARGS, check=True, **kwargs)
def safe_call(args, **kwargs):
return safe_run(SUBPROCESS_ENV + args, **kwargs)
def safe_popen(args, subprocess_input, **kwargs):
proc = subprocess.Popen(SUBPROCESS_ENV + args, **SUBPROCESS_DEFAULT_KWARGS, stdin=subprocess.PIPE, **kwargs)
ret = proc.communicate(input=subprocess_input)
if proc.returncode != 0:
raise subprocess.CalledProcessError(returncode=proc.returncode, cmd=proc.args)
return (proc, *ret)
# shell replacements
def chmod(mode, *paths):
safe_call(["chmod", mode] + [str(path) for path in paths])
def concat(out_path, *in_paths, mode=0o333):
with open(out_path, mode="wb", opener=functools.partial(os.open, mode=mode)) as out_file:
for in_path in in_paths:
with in_path.open(mode="rb") as in_file:
out_file.write(in_file.read())
def iterdir_recursive(dir_path, suffix=""):
ret = []
for entry in dir_path.iterdir():
if entry.is_dir():
ret.extend(iterdir_recursive(entry, suffix=suffix))
elif entry.name.endswith(suffix):
ret.append(entry)
return ret
def nsupdate(key_path, record_name, record_ttl=60, record_type=None, record_data=None, delete_record=False):
if delete_record:
record_type = record_type or ""
record_data = record_data or ""
action = f"update delete {record_name} {record_ttl} IN {record_type} {record_data}"
else:
if not (record_name and record_ttl and record_type and record_data):
raise Exception("args missing")
action = f"update add {record_name} {record_ttl} IN {record_type} {record_data}"
safe_popen(["nsupdate", "-k", str(key_path)], f"{action}\nsend\n")
# hooks code
class DomainData:
def __init__(self, domain):
self.__domain = domain
@property
def domain(self):
return self.__domain
@property
def domain_dir(self):
return config.domains_directory / self.__domain
@property
def domain_data_file(self):
return self.domain_dir / config.domain_data_name
def run_hooks(self, hook_name, **kwargs):
hook_dir = self.domain_dir / hook_name
if not hook_dir.exists():
return
for hook in iterdir_recursive(hook_dir):
if os.access(hook, os.X_OK):
safe_run([str(hook)]) # TODO args
def deploy_challenge(domain, token_filename, token_value):
# token_filename only for http-01 challenges
challenge_name = f"{ACME_DOMAIN_PREFIX}.{domain}."
nsupdate(key_path=None, record_name=challenge_name, record_ttl=config.challenge_record_ttl, record_type="TXT", record_data=token_value)
def clean_challenge(domain, token_filename, token_value):
# token_filename only for http-01 challenges
challenge_name = f"{ACME_DOMAIN_PREFIX}.{domain}."
nsupdate(key_path=None, record_name=challenge_name, delete_record=True)
def sync_cert(domain, key_path, cert_path, fullchain_path, chain_path, request_path):
safe_call(["sync", key_path, cert_path, fullchain_path, chain_path, request_path])
def deploy_cert(domain, key_path, cert_path, fullchain_path, chain_path, timestamp: int):
# make public key files readable for all
chmod("+r", cert_path, fullchain_path, chain_path)
# create legacy key+cert+chain file
keyfullchain_path = key_path / ".." / config.keyfullchain_name
concat(keyfullchain_path, key_path, fullchain_path, mode=config.key_file_mode)
def deploy_ocsp(domain, ocsp_path, timestamp):
# make OCSP readable for all
chmod("+r", ocsp_path)
pass
def unchanged_cert(domain, key_path, cert_path, fullchain_path, chain_path):
pass
def invalid_challenge(domain, response):
pass
def request_failure(status_code, reason, req_type, headers):
pass
def generate_csr(domain, cert_dir, alt_names):
pass
def startup_hook():
# prepare domains list
concat(config.domains_main_file, *iterdir_recursive(config.domains_directory))
def exit_hook(error):
if error is None:
pass
else:
pass
# hooks metadata
arg_infos = {
"alt_names": {
"help": "All domain names for the current certificate as specified in domains.txt.",
},
"cert_path": {
"help": "The path of the file containing the signed certificate.",
"type": Path,
},
"chain_path": {
"help": "The path of the file containing the intermediate certificate(s).",
"type": Path,
},
"cert_dir": {
"help": "Certificate output directory for this particular certificate.",
"type": Path,
},
"domain": {
"help": "The domain name (CN or subject alternative name) being validated.",
},
"error": {
"help": "Contains error message if dehydrated exits with error.",
"nargs": "?",
},
"fullchain_path": {
"help": "The path of the file containing the signed full certificate chain.",
"type": Path,
},
"headers": {
"help": "HTTP headers returned by the CA",
},
"key_path": {
"help": "The path of the file containing the private key.",
"type": Path,
},
"ocsp_path": {
"help": "The path of the ocsp stapling file.",
"type": Path,
},
"req_type": {
"help": "The kind of request that was made (GET, POST, …).",
},
"reason": {
"help": "The specific reason for the error.",
},
"request_path": {
"help": "The path of the file containing the certificate signing request.",
"type": Path,
},
"response": {
"help": "The response that the verification server returned."
},
"status_code": {
"help": "The HTML status code that originated the error.",
"type": int,
},
"timestamp": {
"help": "Timestamp when the specified certificate was created.",
"type": int, # TODO to date
},
"token_filename": {
"help": "The name of the file containing the token to be served for HTTP validation.",
},
"token_value": {
"help": "The name of the file containing the token to be served for HTTP validation.",
},
}
hooks = {
"deploy_challenge": (deploy_challenge, [
"domain",
"token_filename",
"token_value",
]),
"clean_challenge": (clean_challenge, [
"domain",
"token_filename",
"token_value",
]),
"sync_cert": (sync_cert, [
"domain",
"key_path",
"cert_path",
"fullchain_path",
"chain_path",
"request_path",
]),
"deploy_cert": (deploy_cert, [
"domain",
"key_path",
"cert_path",
"fullchain_path",
"chain_path",
"timestamp",
]),
"deploy_ocsp": (deploy_ocsp, [
"domain",
"ocsp_path",
"timestamp",
]),
"unchanged_cert": (unchanged_cert, [
"domain",
"key_path",
"cert_path",
"fullchain_path",
"chain_path",
]),
"invalid_challenge": (invalid_challenge, [
"domain",
"response",
]),
"request_failure": (request_failure, [
"status_code",
"reason",
"req_type",
"headers",
]),
"generate_csr": (generate_csr, [
"domain",
"cert_dir",
"alt_names",
]),
"startup_hook": (startup_hook, [
]),
"exit_hook": (exit_hook, [
"error",
]),
}
# general
def read_config(config_path):
return json.loads(config_path.read_text())
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", "--config-file", dest='_config', type=Path, help="Path to config file")
subparsers = parser.add_subparsers(dest='_hook', required=True)
for hook_name, hook_data in hooks.items():
hook_fun, hook_args = hook_data
hook_parser = subparsers.add_parser(
hook_name,
prefix_chars='+',
)
hook_parser.set_defaults(
_func=hook_fun,
_parser=hook_parser,
)
for arg_name in hook_args:
hook_parser.add_argument(
arg_name,
**arg_infos[arg_name],
)
args = parser.parse_args()
return (args, {key: val for key, val in args.__dict__.items() if not key.startswith("_")})
def external_hook(hook_name, domain, **kwargs):
domain_dir = config.domains_directory / domain
print(domain)
def main(argv):
global config
args, fun_args = parse_args()
#config = read_config(args._config)
#args._fun(**fun_args)
external_hook(args._hook, **fun_args)
if __name__ == "__main__":
main(sys.argv[1:])

@ -8,6 +8,21 @@ domains:
acme_must_staple: yes
dane_configure: yes
dane_protocol: tcp
dane_port: 443 # default for https
dane_domain: "_{{ dane_port }}._{{ dane_protocol }}.{{ domain }}"
# TODO Requires gnutls-bin to be installed
dane_command: >-
danetool --tlsa-rr
--load-pubkey=cert.pem
--hash=sha512
--host={{ domain | quote }}
--proto={{ dane_protocol | quote }}
--port={{ dane_port | quote }}
--no-domain
certificate_name: "{{ effective_domain }}"
# acme_validation_root_directory from nginx/application

@ -1,5 +1,8 @@
---
# TODO DANE TLSA
# Test with https://check.sidnlabs.nl/dane/
- name: Issue certificate for {{ certificate_name }}
command:
cmd: >-

@ -24,17 +24,6 @@
tags:
- backups
- name: Upload python helper scripts
copy:
src: "{{ item }}"
dest: "{{ global_helper_directory }}/{{ item }}"
owner: root
group: root
mode: "u=rwx,g=rx,o=rx"
validate: "{{ global_validate_python_script }}"
loop:
- check_subnet.py
- name: Build and upload template helper scripts
template:
src: "{{ item }}"

@ -1,39 +0,0 @@
---
# protecting process list of users different than root
# Source: https://wiki.archlinux.org/index.php/Security#hidepid
- name: Configure group for reading other processes
group:
state: present
name: proc
system: yes
- name: Configure proc mounting in fstab
lineinfile:
path: "{{ global_fstab_file }}"
regexp: '^\S+\s+/proc\s+proc\s+'
line: >-
proc /proc proc
nosuid,nodev,noexec,hidepid=2,gid=proc
0 0
- name: Ensure configuration directory for whitelisted services exist
file:
state: directory
path: "{{ global_systemd_configuration_directory }}/{{ item }}.d"
owner: root
group: root
mode: u=rwx,g=rx,o=rx
loop: "{{ global_proc_hidepid_service_whitelist }}"
- name: Configure whitelisted services to adapt to hidepid setting
copy:
content: |
[Service]
SupplementaryGroups=proc
dest: "{{ global_systemd_configuration_directory }}/{{ item }}.d/proc_hidepid_whitelist.conf"
owner: root
group: root
mode: u=rw,g=r,o=r
loop: "{{ global_proc_hidepid_service_whitelist }}"

@ -9,18 +9,11 @@
- name: Configure ufw
import_tasks: ufw.yml
- name: Enforce kernel security
import_tasks: kernel_hidepid.yml
tags:
- kernel_hidepid
- name: Configure locales
import_tasks: locales.yml
- name: Configure journald
import_tasks: journald.yml
tags:
- journald
- name: Configure custom facts
import_tasks: custom_facts.yml

@ -24,9 +24,7 @@
- pv # Required for scripting
- python3
- python3-apt # required for Ansible
- python3-ipy # required for helper check_subnet.py
- python3-pip
- python3-yaml # required for scripting
- sed # required for scripting
- shellcheck
- software-properties-common

@ -35,7 +35,6 @@
owner: root
group: root
mode: "u=rw,g=r,o=r"
validate: "{{ global_validate_sshd_config }}"
notify: reassemble sshd config
- name: Upload main ssh_config

@ -18,7 +18,6 @@
state: present
name:
- bind9
- python3-dnspython
- name: Create directories for zone databases
file:

@ -44,11 +44,5 @@
loop_control:
label: "{{ item.domain | default('@') | domain_relative_to(effective_domain) }}. {{ item.type }}"
delegate_to: "{{ dns_system_domain }}"
register: dns_entries_task
tags:
- dns_entries
- name: Wait for entries to become announced
wait_for:
timeout: 8
when: dns_entries_task.changed

@ -1,5 +0,0 @@
---
docker_configuration:
dns: "{{ ansible_dns.nameservers | ipv4 }}" # use only ipv4 dns servers TODO: check if docker supports ipv6
log-driver: journald # send container logs also to journald

@ -1,6 +0,0 @@
---
- name: restart docker
systemd:
name: "{{ global_docker_service_name }}"
state: restarted

@ -1,18 +0,0 @@
---
- name: Install Docker-CE
apt:
state: present
name:
- docker.io
- docker-compose
- python3-docker
- name: Configure docker daemon
copy:
content: "{{ docker_configuration | to_nice_json }}\n"
dest: "{{ global_docker_daemon_configuration_file }}"
owner: root
group: root
mode: u=rw,g=r,o=r
notify: restart docker

@ -1,18 +0,0 @@
---
# Required arguments
#domain: example.com # to derive instance_name
instance_name: "{{ domain }}" # required if domain is not set
#repo_url: https://git.example.com/app/docker.git
# Optional arguments
repo_version: master # branch/tag of git repository to use
compose_overrides: "" # syntax of common docker-compose file
main_compose_name: docker-compose.yml # to derive main_compose_path
#user_directory # to derive docker_directory
docker_directory: "{{ user_directory }}/docker:{{ instance_name }}"
repository_directory: "{{ docker_directory }}/repository"
main_compose_path: "{{ repository_directory }}/{{{ main_compose_name }}"
compose_override_path: "{{ docker_directory }}/compose.override.yml"

@ -1,6 +0,0 @@
---
allow_duplicates: yes
dependencies:
- role: docker/application

@ -1,32 +0,0 @@
---
- name: Ensure directory for repository exists
file:
state: directory
path: "{{ docker_directory }}"
owner: root
group: root
mode: u=rwx,g=rx,o=rx
- name: Clone git repository
git:
repo: "{{ repo_url }}"
dest: "{{ repository_directory }}"
version: "{{ repo_version }}"
- name: Configure docker-compose overrides
copy:
content: "{{ compose_overrides }}"
dest: "{{ compose_override_path }}"
validate: "/usr/bin/docker-compose -f {{ main_compose_path | quote }} -f %s config" # requires original compose file because override is not (always) valid only
- name: Build and start docker containers
docker_compose:
state: present
project_name: "{{ instance_name }}"
project_src: "{{ repository_directory }}"
files:
- "{{ main_compose_name }}"
- "{{ compose_override_path }}"
build: yes
recreate: smart

@ -1,33 +0,0 @@
---
# domain: example.com
# MailJet will assign a unique verification record which name and data must be given in these variables
# e.g. the record 'mailjet._12345678.example.com TXT "abcdefghijklmnopqrstuvwxyz123456"'
# resolves to name="12345678" and data="abcdefghijklmnopqrstuvwxyz123456"
# verification_name: 12345678
# verification_data: abcdefghijklmnopqrstuvwxyz123456
spf_redirect_domain: "spf.mailjet.com"
dkim_key_name: mailjet
dkim_key_data: >-
"v=DKIM1; k=rsa; "
"p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDrA+r6R11vE4/FMWpgOzNWn5"
"JIzn7/y88DTla9DZvfpbcBFGEKDqhArsa+t9V34TdFzpIss4T80F1C7BrGWnwo46"
"I7rk2y5ee9Ga3iwG5EyilrXF10hw+qk2EsTKdAHld0x24vnzW/tFWfF47eu4ricY"
"/KuIrjXQ4Xs23eCNw6vQIDAQAB"
dmarc_policy: "v=DMARC1;p=none"
# derived DNS record data
# names are relative to domain
verification_record_name: "mailjet._{{ verification_name }}"
verification_record_data: "{{ verification_data }}"
spf_record_name: "@"
spf_record_data: "v=spf1 include:{{ spf_redirect_domain }} -all"
dkim_record_name: "{{ dkim_key_name }}._domainkey"
dkim_record_data: "{{ dkim_key_data }}"
dmarc_record_name: "_dmarc"
dmarc_record_data: "{{ dmarc_policy }}"

@ -1,20 +0,0 @@
---
allow_duplicates: yes
dependencies:
- role: dns/entries
# domain
entries:
- domain: "{{ verification_record_name }}"
type: TXT
data: "{{ verification_record_data }}"
- domain: "{{ spf_record_name }}"
type: TXT
data: "{{ spf_record_data }}"
- domain: "{{ dkim_record_name }}"
type: TXT
data: "{{ dkim_record_data }}"
- domain: "{{ dmarc_record_name }}"
type: TXT
data: "{{ dmarc_record_data }}"

@ -8,8 +8,6 @@
group: root
mode: "u=rwx,g=rx,o=r"
validate: "{{ global_validate_shell_script }}"
tags:
- deploy-auto-update-script
- name: Create repository directory for {{ repo_name }}
file:

@ -21,7 +21,7 @@ fi
git remote set-url origin "$REPO";
[ -z "$GPG_FINGERPRINT" ] ||
gpg --quiet --keyserver {{ default_gpg_keyserver_hostname | quote }} --recv "$GPG_FINGERPRINT";
gpg --quiet --keyserver eu.pool.sks-keyservers.net --recv "$GPG_FINGERPRINT";
git fetch --recurse-submodules --tags > /dev/null;
TAG=$(git tag --list | grep "^$PREFIX" | sort -r | head -n 1);

@ -1,5 +0,0 @@
---
# packages: []
configuration_name: "{{ packages[0] }}"
priority: 990

@ -1,6 +0,0 @@
---
allow_duplicates: yes
dependencies:
- role: misc/deb_backports

@ -1,16 +0,0 @@
---
- name: Restrict backports for apt
copy:
dest: "/etc/apt/preferences.d/backports-{{ configuration_name }}"
owner: root
group: root
mode: "u=rw,g=r,o=r"
content: |
Package: {{ packages | join(" ") }}
Pin: release a={{ debian_backports_name }}
Pin-Priority: {{ priority }}
notify: update apt cache
- name: Flush handlers for backports priority configuration
meta: flush_handlers

@ -0,0 +1,3 @@
---
docker_version: "stable"

@ -0,0 +1,29 @@
---
- name: Add key for source for docker
apt_key:
state: present
id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
url: https://download.docker.com/linux/debian/gpg
- name: Add source for docker
apt_repository:
state: present
repo: "deb [arch={{ ansible_local.dpkg.architecture }}] https://download.docker.com/linux/debian {{ ansible_distribution_release }} {{ docker_version }}"
filename: docker
update_cache: yes
- name: Install Docker-CE
apt:
state: present
name:
- docker-ce
- docker-ce-cli
- containerd.io
- python3-docker
install_recommends: no # To fix https://github.com/raspberrypi/linux/issues/3021
- name: Docker Module Python3 Dependencies
pip:
name:
- docker-compose

@ -1,12 +1,7 @@
---
notify_directory: "{{ global_deployment_directory }}/ssh_notify"
notify_script: "{{ notify_directory }}/telegram.sh"
notify_cache_directory: "{{ notify_directory }}/cache"
notify_users_directory: "{{ notify_directory }}/users"
trusted_vpn_subnet: "{{ tailscale_vpn_subnet }}"
notify_script: "{{ global_deployment_directory }}/ssh_notify/telegram.sh"
# recipient_id
bot_key: "{{ global_telegram_server_bot_key }}"
bot_key: "{{ global_ssh_notify_telegram_bot_key }}"
timeout: 10

@ -7,25 +7,13 @@
- curl
- gawk
- name: Create directories for notify script
- name: Create directory for notify script
file:
state: directory
path: "{{ item }}"
path: "{{ notify_script | dirname }}"
owner: root
group: root
mode: u=rwx,g=rx,o=
loop:
- "{{ notify_directory }}"
- "{{ notify_cache_directory }}"
- "{{ notify_users_directory }}"
- name: Configure recipient id for root user
ansible.builtin.template:
src: root_id.j2
dest: "{{ notify_users_directory }}/root"
owner: root
group: root
mode: u=rw,g=r,o=
- name: Install notify script
template:

@ -1,41 +1,14 @@
#!/bin/bash
# Modified version, original source: https://gitlab.com/snippets/1871482#note_188602535
USER_ID_DIR={{ notify_users_directory | quote }}
CACHE_DIR={{ notify_cache_directory | quote }}
USERID={{ recipient_id | quote }}
KEY={{ bot_key | quote }}
VPN_SUBNET={{ trusted_vpn_subnet | quote }}
TIMEOUT={{ timeout | quote }}
getUserId() {
USER_CONF="${USER_ID_DIR}/$1"
[[ -r "$USER_CONF" ]] && head -n 1 "$USER_CONF"
}
URL="https://api.telegram.org/bot$KEY/sendMessage"
sendMessage() {
curl -s --max-time "$TIMEOUT" -H "Content-Type: application/x-www-form-urlencoded" -d "chat_id=$1" -d "disable_web_page_preview=1" -d "parse_mode=Markdown" -d "text=$2" "$URL" >/dev/null
}
if [[ "$PAM_SERVICE" == "sshd" && "$PAM_TYPE" == "open_session" && "$PAM_USER" != "git" && -z "$TMUX" && -n "$PAM_RHOST" ]] && ! /ansible/helpers/check_subnet.py "$PAM_RHOST" "$VPN_SUBNET"; then
if [[ "$PAM_SERVICE" == "sshd" && "$PAM_TYPE" == "open_session" && "$PAM_USER" != "git" && -z "$TMUX" ]]; then
IP="$PAM_RHOST"
cache_file="${CACHE_DIR}/${IP}-${PAM_USER}"
cache_mtime=$(stat --format="%Y" "$cache_file" 2>/dev/null)
current_time=$(date +%s)
touch "$cache_file"
if (( cache_mtime > (current_time - 4*60*60) )); then
exit 0
fi
# define message text
HOSTNAME=$(hostname --fqdn)
TEXT="Successful login from [$IP](https://stat.ripe.net/app/$IP) for ${PAM_USER} @ ${HOSTNAME} ($(date "+%Y-%m-%d %H:%M"))"
# send to root
ROOT_USER_ID="$(getUserId root)"
sendMessage "$ROOT_USER_ID" "$TEXT (This message was sent to you because you are the admin.)"
# send to user if id is known
USER_ID="$(getUserId "$PAM_USER")"
if [[ -n "$USER_ID" ]]; then
sendMessage "$USER_ID" "$TEXT"
fi
TEXT="Successful login from [$IP](https://ipinfo.io/$IP) for ${PAM_USER} @ ${HOSTNAME} ($(date "+%Y-%m-%d %H:%M"))"
curl -s --max-time $TIMEOUT -d "chat_id=$USERID" -d "disable_web_page_preview=1" -d "parse_mode=Markdown" -d "text=$TEXT" "$URL" > /dev/null
fi

@ -1,26 +0,0 @@
---
# monitor_name: "echo-output-check"
instance_name: "tg-monitor-cmd-{{ monitor_name }}"
description: "{{ monitor_name }}" # should be human fancy
# command: "/bin/echo Hello" # or command_str
command_str: "{{ command | map('quote') | join(' ') }}"
use_shell: no # read https://docs.python.org/3/library/subprocess.html#security-considerations before using use_shell=yes
system_user: tg-monitor-cmd
recipient_id: "{{ default_tg_monitor_recipient_id }}"
bot_key: "{{ global_telegram_server_bot_key }}"
telegram_timeout: 10
calendar_spec: "*:0:0" # once every hour
service_description: |
Telegram Monitor Command of {{ description }}
# paths
monitoring_directory: "{{ global_deployment_directory }}/tg-monitor-cmd"
instance_directory: "{{ monitoring_directory }}/{{ monitor_name }}"
script_path: "{{ instance_directory }}/script.py"
data_path: "{{ instance_directory }}/data"

@ -1,8 +0,0 @@
---
allow_duplicates: yes
dependencies:
- role: misc/system_user
# system_user
user_directory: "{{ monitoring_directory }}"

@ -1,69 +0,0 @@
---
- name: Create directory for monitoring scripts and data
file:
state: directory
path: "{{ instance_directory }}"
owner: root
group: "{{ system_user }}"
mode: u=rwx,g=rx,o=
- name: Deploy script
template:
src: monitor.py
dest: "{{ script_path }}"
owner: root
group: "{{ system_user }}"
mode: u=rwx,g=rx,o=
register: script_task
- name: Create empty data file
copy:
content: ""
dest: "{{ data_path }}"
force: no # do not overwrite
- name: Ensure permissions on data file
file:
state: file
path: "{{ data_path }}"
owner: root
group: "{{ system_user }}"
mode: u=rw,g=rw,o=
- name: Register service for monitor
template:
src: monitor.service
dest: "{{ global_systemd_configuration_directory }}/{{ instance_name }}.service"
owner: root
group: root
mode: u=rw,g=r,o=
register: service_task
- name: Run service for initial test
systemd:
state: started
daemon_reload: yes
name: "{{ instance_name }}.service"
when: script_task.changed or service_task.changed
- name: Register timer for monitor service
template:
src: monitor.timer
dest: "{{ global_systemd_configuration_directory }}/{{ instance_name }}.timer"
owner: root
group: root
mode: u=rw,g=r,o=
register: timer_task
- name: Restart timer for monitor
systemd:
state: restarted
daemon_reload: yes
name: "{{ instance_name }}.timer"
when: timer_task.changed
- name: Enable timer for monitor
systemd:
name: "{{ instance_name }}.timer"
enabled: yes

@ -1,64 +0,0 @@
#!/usr/bin/env python3
# imports
from pathlib import Path
from hashlib import sha256
import shlex
import subprocess
import sys
import requests
# config
MONITOR_DESC = """{{ description }}"""
MONITOR_COMMAND = """{{ command_str }}"""
USE_SHELL = {{ use_shell | ternary('True', 'False') }}
DATA_PATH = Path("""{{ data_path }}""")
TG_ENDPOINT = "https://api.telegram.org"
TG_KEY = """{{ bot_key }}"""
TG_RECIPIENT = """{{ recipient_id }}"""
# helpers
def print_error(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def tg_msg(msg: str) -> None:
print(f"Sending message using telegram:\n{msg}")
ret = requests.post(f"{TG_ENDPOINT}/bot{TG_KEY}/sendMessage", data={
"chat_id": TG_RECIPIENT,
"disable_web_page_preview": 1,
"parse_mode": "Markdown",
"text": msg,
})
if 400 <= ret.status_code:
raise Exception(f"Sending telegram message failed: {ret.status_code} {ret.text}")
def run_cmd(cmd: list, **kwargs) -> str:
return subprocess.run(cmd, capture_output=True, check=True, text=True, **kwargs).stdout
def hash_data(data: str) -> bool:
return sha256(data.encode("utf-8")).hexdigest()
def check_cmd(cmd_str: str, use_shell: bool, data_file: Path) -> str:
cmd = shlex.split(cmd_str) if not use_shell else cmd_str
old_hash = data_file.read_text() if data_file.exists() else None
new_data = run_cmd(cmd, shell=use_shell)
new_hash = hash_data(new_data)
if old_hash == new_hash:
return None
data_file.write_text(new_hash)
return new_data
if __name__ == "__main__":
try:
data = check_cmd(MONITOR_COMMAND, USE_SHELL, DATA_PATH)
if data:
tg_msg(f"{MONITOR_DESC} changed to:\n```\n{data}\n```")
except Exception as e:
tg_msg(f"Got exception while running command of {MONITOR_DESC}: {str(e)}")
raise e

@ -1,23 +0,0 @@
[Unit]
Description={{ service_description }}
[Service]
Type=simple
ExecStart={{ script_path | quote }}
User={{ system_user }}
Group={{ system_user }}
UMask=007
PrivateTmp=yes
PrivateDevices=yes
ProtectHome=yes
ReadOnlyPaths=/
ReadWritePaths=-{{ data_path }}
ProtectKernelModules=true
ProtectKernelTunables=true
ProtectControlGroups=true
RestrictRealtime=true
RestrictNamespaces=true
ProtectSystem=full

@ -1,8 +0,0 @@
[Unit]
Description=Timer of {{ service_description }}
[Timer]
OnCalendar={{ calendar_spec }}
[Install]
WantedBy=multi-user.target

@ -1,8 +1,6 @@
---
- meta: flush_handlers
tags:
- mysql_database
- name: Create SQL user {{ database_user }}
mysql_user:
@ -15,8 +13,6 @@
login_unix_socket: "{{ global_mysql_socket_path }}"
login_user: root
login_password: "{{ mysql_root_password }}"
tags:
- mysql_database
- name: Create SQL database {{ database_name }}
mysql_db:
@ -25,18 +21,7 @@
login_user: root
login_password: "{{ mysql_root_password }}"
register: create_database
tags:
- mysql_database
- name: Import SQL database template on creation
include_tasks:
file: import.yml
apply:
tags:
- mysql_database
when:
- create_database is defined
- create_database.changed
- database_template is defined
tags:
- always
include_tasks: import.yml
when: create_database.changed and database_template is defined

@ -36,14 +36,12 @@
with_items: "{{ nginx_snippets }}"
notify: reload nginx
- name: Configure dns resolver addresses for nginx
copy:
content: |
resolver {{ ansible_dns.nameservers | ipwrap | join(' ') }};
dest: "{{ nginx_snippets_directory }}/resolver.conf"
owner: root
group: root
mode: u=rwx,g=rx,o=rx
- name: Retrieve dns resolver addresses
shell: >-
echo resolver $(awk 'BEGIN{ORS=" "} $1=="nameserver" {print $2}' /etc/resolv.conf) ';'
> {{ nginx_snippets_directory | quote }}/resolver.conf
args:
creates: "{{ nginx_snippets_directory }}/resolver.conf"
notify: reload nginx
- name: Configure validation directory

@ -1,4 +1,4 @@
---
# domain: old.example.com
# dest: new.example.com
domain: example.com
dest: example.com

@ -3,7 +3,5 @@
allow_duplicates: yes
dependencies:
- role: nginx/server
# domain
directives: |
return 301 https://{{ dest }}$request_uri;
- role: acme/certificate
- role: nginx/application

@ -0,0 +1,12 @@
---
- name: Enable forwarding {{ domain }} to {{ dest }}
template:
src: forward.conf
dest: "{{ nginx_sites_directory }}/{{ domain }}"
owner: root
group: root
mode: "u=rw,g=r,o=r"
notify: reload nginx
tags:
- certificate

@ -0,0 +1,14 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name {{ effective_domain }};
ssl on;
ssl_certificate {{ acme_fullchain_location }};
ssl_certificate_key {{ acme_key_location }};
include {{ nginx_snippets_directory }}/https;
include {{ nginx_snippets_directory }}/global;
return 301 https://{{ dest }}$request_uri;
}

@ -9,13 +9,6 @@ socket: "{{ socket_directory }}/socket"
allow_overwrite_includes: no
includes: []
env_vars: {}
admin_values: {}
memory_limit: 0 # unlimited
# status_page_path: "/status" # Disabled by default
default_admin_values:
memory_limit: None # unlimited
enforced_admin_values: {}
effective_admin_values: "{{ default_admin_values | combine(admin_values) | combine(enforced_admin_values) }}"

@ -24,5 +24,3 @@
group: root
mode: u=rw,g=r,o=
notify: "reload php-fpm"
tags:
- nginx-php-pool-config

@ -37,6 +37,6 @@ env[{{ key }}] = {{ val | quote }}
{% if not allow_overwrite_includes %}
php_admin_value[include_path] = ".:{{ includes | join(':') }}:/usr/share/php"
{% endif %}
{% for key, value in admin_values.items() %}{% if value != None %}
php_admin_value[{{ key }}] = {{ value | quote }}
{% endif %}{% endfor %}
{% if memory_limit is defined and memory_limit %}
php_admin_value[memory_limit] = {{ memory_limit }}
{% endif %}

@ -3,6 +3,7 @@ server {
listen [::]:443 ssl http2;
server_name {{ effective_domain }};
ssl on;
ssl_certificate {{ acme_fullchain_location }};
ssl_certificate_key {{ acme_key_location }};

@ -10,4 +10,3 @@
notify: reload nginx
tags:
- certificate
- nginx-server-config

@ -3,6 +3,7 @@ server {
listen [::]:443 ssl http2;
server_name {{ effective_domain }};
ssl on;
ssl_certificate {{ acme_fullchain_location }};
ssl_certificate_key {{ acme_key_location }};

@ -1,9 +0,0 @@
---
instance_name: "drone-runner" # must be unique if multiple runners deployed to machine
docker_image: "drone/drone-runner-docker:1"
# drone_server_host: ci.example.com
drone_rpc_secret: "{{ lookup('file', 'credentials/' + drone_server_host + '/rpc_secret') }}" # sync with server/drone.io/server, because must be known to all runners
drone_runner_capacity: 4
drone_runner_name: "{{ inventory_hostname }}"

@ -1,6 +0,0 @@
---
allow_duplicates: yes
dependencies:
- role: docker/application

@ -1,21 +0,0 @@
---
- name: Start drone runner using docker-compose
docker_compose:
state: present
project_name: "{{ instance_name }}"
definition:
version: '2'
services:
drone-runner:
image: "{{ docker_image }}"
restart: always
environment:
DRONE_RPC_PROTO: https
DRONE_RPC_HOST: "{{ drone_server_host }}"
DRONE_RPC_SECRET: "{{ drone_rpc_secret }}"
DRONE_RUNER_CAPACITY: "{{ drone_runner_capacity }}"
DRONE_RUNNER_NAME: "{{ drone_runner_name }}"
DOCKER_API_VERSION: "1.39"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"

@ -1,21 +0,0 @@
---
# domain: ci.example.com
docker_image: "drone/drone:1"
# TODO Bind to socket path
# bind_port
#!socket_directory: "{{ user_directory }}/socket"
#!socket_path: "{{ socket_directory }}/socket"
# gitea_server_url: https://git.example.com/gitea
# gitea_client_id generated by gitea
# gitea_client_secret generated by gitea
instance_directory: "{{ global_webservers_directory }}/{{ domain }}"
data_directory: "{{ instance_directory }}/data"
drone_data_directory: "{{ data_directory }}/drone_volume"
drone_admin_user: "{{ global_username }}"
drone_rpc_secret: "{{ lookup('password', 'credentials/' + domain + '/rpc_secret chars=digits,ascii_letters length=80') }}" # sync with server/drone.io/runner, because must be known to all runners
drone_database_secret: "{{ lookup('password', 'credentials/' + domain + '/database_secret length=32 chars=0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f') }}"

@ -1,15 +0,0 @@
---
allow_duplicates: yes
dependencies:
- role: docker/application
- role: misc/backup_files
# domain
backup_directory: "{{ data_directory }}"
- role: misc/hdd_dir
# domain
hdd_source_dir: "{{ data_directory }}"
- role: nginx/proxy
# domain
backend_port: "{{ bind_port }}"

@ -1,47 +0,0 @@
---
- name: Create instance directory
file:
state: directory
path: "{{ instance_directory }}"
owner: root
group: root
mode: u=rwx,g=rx,o=
- name: Create general data directory
file:
state: directory
path: "{{ data_directory }}"
owner: root
group: root
mode: u=rwx,g=rx,o=
- name: Create data directory for drone volume
file:
state: directory
path: "{{ drone_data_directory }}"
# let docker/drone.io manage control permissions
- name: Start drone server using docker-compose
docker_compose:
state: present
project_name: "{{ domain }}"
definition:
version: '2'
services:
drone-server:
image: "{{ docker_image }}"
restart: always
environment:
DRONE_DATABASE_SECRET: "{{ drone_database_secret }}"
DRONE_GITEA_SERVER: "{{ gitea_server_url }}"
DRONE_GITEA_CLIENT_ID: "{{ gitea_client_id }}"
DRONE_GITEA_CLIENT_SECRET: "{{ gitea_client_secret }}"
DRONE_RPC_SECRET: "{{ drone_rpc_secret }}"
DRONE_SERVER_HOST: "{{ domain }}"
DRONE_SERVER_PROTO: https
DRONE_USER_CREATE: "username:{{ drone_admin_user }},admin:true"
ports:
- "127.0.0.1:{{ bind_port }}:80" # for nginx reverse proxy
volumes:
- "{{ data_directory }}:/data"

@ -6,7 +6,6 @@
name:
- git-core
- g++
- libmariadb-dev
- python-dev
- python-virtualenv

@ -64,8 +64,6 @@
owner: root
group: root
mode: "u=rwx,g=rx,o=r"
tags:
- deploy-auto-update-script
- name: Download gitea
command: "{{ gitea_update_script_path }}"

@ -10,7 +10,7 @@ readonly SERVICE_NAME={{ gitea_service_name | quote }};
set -euxo pipefail;
gpg --quiet --keyserver {{ default_gpg_keyserver_hostname | quote }} --recv "$GPG_FINGERPRINT";
gpg --quiet --keyserver eu.pool.sks-keyservers.net --recv "$GPG_FINGERPRINT";
function error() {
echo "$@" >&2;

@ -31,8 +31,6 @@
owner: root
group: root
mode: "u=rwx,g=rx,o=r"
tags:
- deploy-auto-update-script
- name: Download linx
command: "{{ update_script_path }}"

@ -13,7 +13,6 @@ nextcloud_release_remote_signature: "{{ nextcloud_release_remote }}.asc"
user_directory: "{{ global_webservers_directory }}/{{ domain }}"
nextcloud_installation_directory: "{{ user_directory }}/nextcloud" # directory name of inside downloaded tar zip
nextcloud_data_directory: "{{ user_directory }}/data"
scripts_directory: "{{ user_directory }}/scripts"
nextcloud_keyring: "{{ user_directory }}/nextcloud.gpg"
nextcloud_release_file: "{{ user_directory }}/nextcloud.tar.bz2"
@ -33,14 +32,8 @@ database_user: "{{ system_user }}"
nextcloud_admin_user: "admin"
nextcloud_admin_pass: "{{ lookup('password', 'credentials/' + inventory_hostname + '/' + domain + '/' + nextcloud_admin_user + ' length=80') }}"
default_phone_region: "DE"
files_chunk_size: "{{ 10 * 1024 * 1024 }}" # bytes
reset_password_link: "disabled" # "" means internal enabled, "disabled" means disabled, <link> means using this link
nginx_max_size: "{{ php_post_max_size|int + 4 * 1024 }}" # appending encryption overhead
php_post_max_size: "{{ php_upload_max_size|int + 4 * 1024 }}" # appending HTTP overhead
php_upload_max_size: "{{ files_chunk_size|int + 16 * 1024 }}" # appending internal overhead
import_config:
system:
# domain
@ -48,7 +41,7 @@ import_config:
trusted_domains:
- "{{ effective_domain }}"
# NextCloud
lost_password_link: "{{ reset_password_link }}"
lost_password_link: "{{ reset_password_link }}" # disallow custom password reset
# database
dbtype: mysql
dbhost: localhost
@ -63,11 +56,6 @@ import_config:
redis:
host: "{{ redis_socket_path }}"
port: 0
# user experience
default_phone_region: "{{ default_phone_region }}"
apps:
files:
max_chunk_size: "{{ files_chunk_size }}"
enabled_apps_list:
- accessibility

@ -20,10 +20,7 @@ dependencies:
src: "{{ nextcloud_installation_directory }}"
includes:
- "{{ nextcloud_installation_directory }}/apps"
admin_values:
memory_limit: 1G
post_max_size: "{{ php_post_max_size }}"
upload_max_size: "{{ php_upload_max_size }}"
memory_limit: 1G
- role: redis/instance
# domain
# system_user
@ -44,7 +41,7 @@ dependencies:
rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json last;
rewrite ^/.well-known/webfinger /public.php?service=webfinger last;
client_max_body_size {{ nginx_max_size }};
client_max_body_size 10240M;
#fastcgi_buffers 64 4K;
location / {
rewrite ^ /index.php;

@ -56,10 +56,7 @@
- name: Install Nextcloud
become_user: "{{ system_user }}"
command: >-
/usr/bin/php
--define apc.enable_cli=1
occ
maintenance:install
/usr/bin/php occ maintenance:install
--database mysql
--database-name {{ database_name | quote }}
--database-user {{ database_user | quote }}
@ -85,41 +82,20 @@
- name: Import additional Nextcloud configuration
become_user: "{{ system_user }}"
command: >-
/usr/bin/php
--define apc.enable_cli=1
occ
/usr/bin/php occ
config:import
{{ import_config_file | quote }}
args:
chdir: "{{ nextcloud_installation_directory }}"
creates: "{{ nextcloud_config }}"
when: import_config_file_task.changed
tags:
- nextcloud_config
- name: Create scripts directories
file:
state: directory
path: "{{ scripts_directory }}"
owner: "{{ system_user }}"
group: "{{ system_user }}"
mode: "u=rwx,g=rx,o="
- name: Install helper scripts
template:
src: "scripts/{{ item }}"
dest: "{{ scripts_directory }}/{{ item }}"
owner: "{{ system_user }}"
group: "{{ system_user }}"
mode: "u=rwx,g=rx,o="
loop:
- extract_app_list.py
- name: Install Nextcloud apps
become_user: "{{ system_user }}"
command: >-
/usr/bin/php
--define apc.enable_cli=1
occ
/usr/bin/php occ
app:install
{{ item | quote }}
args:
@ -137,9 +113,7 @@
- name: Disable some Nextcloud apps
become_user: "{{ system_user }}"
command: >-
/usr/bin/php
--define apc.enable_cli=1
occ
/usr/bin/php occ
app:disable
{{ item | quote }}
args:

@ -0,0 +1,39 @@
#!/usr/bin/env bash
readonly CHECKSUM_TYPE="sha256";
readonly CHECKSUM_APP="${CHECKSUM_TYPE}sum";
readonly GPG_FINGERPRINT="{{ nextcloud_gpg_fingerprint }}";
readonly NEXTCLOUD_USER="{{ system_user }}";
readonly NEXTCLOUD_DIR="{{ nextcloud_installation_directory }}";
readonly NEXTCLOUD_GIT_REPO="{{ nextcloud_source_repo }}";
readonly GIT_REPO="https://github.com/nextcloud/server";
readonly VERSION_REGEX='v\d+(\.\d+)*';
set -e;
gpg --quiet --keyserver eu.pool.sks-keyservers.net --recv "$GPG_FINGERPRINT";
function error() {
echo "$@" >&2;
}
function as() {
sudo -u "$NEXTCLOUD_USER" "$@";
}
cd "$NEXTCLOUD_DIR";
version="$(curl --silent "$GIT_REPO/releases.atom"
| grep --only-matching --perl-regexp '(?<=\s)<link.*/>'
| grep --only-matching --perl-regexp '(?<=href="'"$GIT_REPO"'/releases/tag/'"$VERSION_REGEX"'(?="/>)'
| sort --reverse --numeric-sort
| head --lines=1)";
if g verify-tag --raw "$TAG" 2>&1 | grep --fixed-strings "[GNUPG:] VALIDSIG $GPG_FINGERPRINT " > /dev/null; then
as composer update;
else
error "Invalid or missing signature for $TAG";
exit 1;
fi

@ -1,33 +0,0 @@
#!/usr/bin/env python3
import getpass
import subprocess
import json
import sys
NC_USER = """{{ system_user }}"""
NC_INSTALL_DIR = """{{ nextcloud_installation_directory }}"""
if __name__ == "__main__":
args_list = [
"/usr/bin/env",
"php",
"occ",
"app:list",
"--output=json"
]
if getpass.getuser() != NC_USER:
args_list = [
"sudo",
"-u", NC_USER,
] + args_list
try:
proc = subprocess.run(args_list, capture_output=True, check=True, cwd=NC_INSTALL_DIR, text=True)
except subprocess.CalledProcessError as e:
print(e.stderr, file=sys.stderr)
raise e
apps_nc = json.loads(proc.stdout)
for name, apps in reversed(sorted(apps_nc.items())):
print(f"{name}_apps_list:")
for app_name in apps:
print(f" - {app_name}")

@ -8,9 +8,9 @@ spotme_service_name: "{{ domain }}.service"
spotme_user_directory: "{{ global_webservers_directory }}/{{ domain }}"
spotme_installation_directory: "{{ spotme_user_directory }}/server"
service_environment_file: "{{ user_directory }}/{{ spotme_service_name }}.env"
service_environment_file: "{{ user_directory }}/{{ service_name }}.env"
database_user: "{{ spotme_system_user }}"
database_user: "spotme"
# database_pass from mysql/database
# database_name from mysql/database

@ -4,7 +4,7 @@
apt:
state: present
name:
- openjdk-11-jdk-headless
- openjdk-8-jdk-headless
update_cache: yes
# TODO Role for Git Username / Password Configuration
@ -22,7 +22,6 @@
args:
stdin: "{{ spotme_remote_user | urlencode }}"
register: remote_user_encoded
check_mode: no # only converts some data, does not change anything
changed_when: False
- name: Encode password for SpotMe remote source
@ -31,7 +30,6 @@
args:
stdin: "{{ spotme_remote_pass | urlencode }}"
register: remote_pass_encoded
check_mode: no # only converts some data, does not change anything
changed_when: False
- name: Store credentials for SpotMe remote source

@ -122,7 +122,7 @@
// *** Cookies and login sessions ***
// **********************************
define('SESSION_COOKIE_LIFETIME', 2592000);
define('SESSION_COOKIE_LIFETIME', 86400);
// Default lifetime of a session (e.g. login) cookie. In seconds,
// 0 means cookie will be deleted when browser closes.

@ -11,14 +11,11 @@
roles:
- role: bootstrap
- name: Configure common roles expected by others
hosts: common_roles
- hosts: all
roles:
- role: hostname
fqdn: "{{ inventory_hostname }}"
- role: common
tags:
- common
- role: fail2ban/application
- role: account
username: "{{ global_username }}"
@ -27,18 +24,16 @@
sudo: yes
# Enroll certain features not on ansible test/debug servers
- hosts: common_roles:!ansible_debug
- hosts: all:!ansible_debug
roles:
- role: misc/ssh_tg_notify
recipient_id: "{{ zocker_telegram_id }}"
tags:
- ssh_tg_notify
# Group specific configurations
- name: Include configuration for group bwcloud
import_playbook: playbooks/group_bwcloud.yml
- name: Include configuration for group dev_surface3
import_playbook: playbooks/group_dev_surface3.yml
- name: Include configuration for group surface3
import_playbook: playbooks/group_surface3.yml
- name: Include configuration for group os_raspbian
import_playbook: playbooks/group_os_raspbian.yml

Loading…
Cancel
Save