Compare commits
1 Commits
master
...
dehydrated
Author | SHA1 | Date |
---|---|---|
Felix Stupp | 5b312bfbad | 4 years ago |
@ -1,3 +0,0 @@
|
||||
[submodule "misc/mitogen"]
|
||||
path = misc/mitogen
|
||||
url = https://git.banananet.work/archive/mitogen.git
|
@ -1,12 +0,0 @@
|
||||
#!/bin/echo You need to source this script! Use something like: source
|
||||
|
||||
# (re-)create env if required (e.g. requirements.txt changed)
|
||||
make setup
|
||||
|
||||
# enable coloring on these tools
|
||||
export ANSIBLE_FORCE_COLORS=1
|
||||
export PY_COLORS=1
|
||||
|
||||
# enter venv
|
||||
. ./venv/bin/activate
|
||||
|
@ -1,92 +0,0 @@
|
||||
---
|
||||
# === Constants defined by OS packages / applications
|
||||
# seperated in arbitary system/kernel and applications/packages
|
||||
# each group is sorted alphabetically
|
||||
|
||||
# general system/kernel constants
|
||||
|
||||
global_fstab_file: "/etc/fstab"
|
||||
|
||||
global_resolv_conf: "/etc/resolv.conf"
|
||||
global_pamd: "/etc/pam.d"
|
||||
|
||||
global_proc_hidepid_service_whitelist:
|
||||
- "{{ global_systemd_login_service_name }}"
|
||||
- "{{ global_systemd_user_service_name }}"
|
||||
|
||||
global_users_directory: "/home"
|
||||
|
||||
# application constants
|
||||
|
||||
global_ansible_facts_directory: "/etc/ansible/facts.d"
|
||||
|
||||
global_apparmor_profiles_directory: "/etc/apparmor.d"
|
||||
global_apparmor_profiles_local_directory: "{{ global_apparmor_profiles_directory }}/local"
|
||||
|
||||
global_apt_sources_directory: "/etc/apt/sources.list.d"
|
||||
|
||||
global_bind_service_name: "named.service"
|
||||
global_bind_configuration_directory: "/etc/bind"
|
||||
global_bind_data_directory: "/var/lib/bind"
|
||||
|
||||
global_certbot_configuration_directory: "/etc/letsencrypt"
|
||||
global_certbot_configuration_file: "{{ global_certbot_configuration_directory }}/cli.ini"
|
||||
global_certbot_certificates_directory: "/etc/letsencrypt/live"
|
||||
|
||||
global_chromium_configuration_directory: "/etc/chromium"
|
||||
global_chromium_managed_policies_file: "{{ global_chromium_configuration_directory }}/policies/managed/managed_policies.json"
|
||||
|
||||
global_dnsmasq_configuration_file: "/etc/dnsmasq.conf"
|
||||
global_dnsmasq_configuration_directory: "/etc/dnsmasq.d"
|
||||
|
||||
global_docker_service_name: "docker.service"
|
||||
global_docker_configuration_directory: "/etc/docker"
|
||||
global_docker_daemon_configuration_file: "{{ global_docker_configuration_directory }}/daemon.json"
|
||||
|
||||
global_fail2ban_service_name: "fail2ban.service"
|
||||
global_fail2ban_system_directory: "/etc/fail2ban"
|
||||
global_fail2ban_configuration_directory: "{{ global_fail2ban_system_directory }}/fail2ban.d"
|
||||
global_fail2ban_actions_directory: "{{ global_fail2ban_system_directory }}/action.d"
|
||||
global_fail2ban_filters_directory: "{{ global_fail2ban_system_directory }}/filter.d"
|
||||
global_fail2ban_jails_directory: "{{ global_fail2ban_system_directory }}/jail.d"
|
||||
|
||||
global_interfaces_directory: "/etc/network/interfaces.d"
|
||||
|
||||
global_lightdm_configuration_directory: "/etc/lightdm"
|
||||
|
||||
global_log_directory: "/var/log"
|
||||
|
||||
global_mysql_socket_path: "/var/run/mysqld/mysqld.sock"
|
||||
|
||||
global_nfs_port: "2049" # for version 4
|
||||
global_nfs_directory: "{{ global_webservers_directory }}/nfs"
|
||||
|
||||
global_nginx_system_user: www-data
|
||||
global_nginx_service_name: "nginx.service"
|
||||
global_nginx_installation_directory: "/etc/nginx"
|
||||
|
||||
global_plymouth_themes_directory: "/usr/share/plymouth/themes"
|
||||
|
||||
global_redis_configuration_directory: "/etc/redis"
|
||||
global_redis_service_name: "redis-server.service"
|
||||
|
||||
global_ssh_service_name: "sshd.service"
|
||||
global_ssh_configuration_directory: "/etc/ssh/"
|
||||
global_ssh_configuration_environment_directory: "{{ global_configuration_environment_directory }}/ssh"
|
||||
global_ssh_configuration_link_name: "config"
|
||||
global_ssh_configuration_link: "{{ global_ssh_configuration_environment_directory }}/{{ global_ssh_configuration_link_name }}"
|
||||
|
||||
global_sudoers_directory: "/etc/sudoers.d"
|
||||
|
||||
global_wireguard_configuration_directory: "/etc/wireguard"
|
||||
|
||||
global_systemd_preset_directory: "/lib/systemd/system"
|
||||
global_systemd_configuration_directory: "/etc/systemd/system"
|
||||
global_systemd_journal_configuration_directory: "/etc/systemd/journald.conf.d"
|
||||
global_systemd_login_service_name: "systemd-logind.service"
|
||||
global_systemd_network_directory: "/etc/systemd/network"
|
||||
global_systemd_network_service_name: "systemd-networkd.service"
|
||||
global_systemd_network_system_user: "systemd-network"
|
||||
global_systemd_user_service_name: "user@.service"
|
||||
|
||||
global_zsh_antigen_source: "/usr/share/zsh-antigen/antigen.zsh"
|
@ -1,38 +1,24 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
66386430666466343732636663313264663933613563643231323066383261616361353234366534
|
||||
3337323862636537663538343062333064383838653138340a343662326139396634343261396230
|
||||
65666533626263386465616466663431333339613162373766363937333564323233353930303836
|
||||
6332366434333437370a666636656534653031303237633863356630393836386137353837303039
|
||||
33323433343065313135323462316163343364656562303962373634656666353235363537366361
|
||||
35383031343138376439316365306337636264346434363863623765356161663133653363633533
|
||||
30613430613333666561303935663833396265363931653133373934363263323362333839366662
|
||||
62373533643535323430353032386431346462363566323637613736313336373665666631326633
|
||||
34343830653535623262333730356164636131623735333839663336623735353138313962656564
|
||||
35643231303461653236373665613339313332386535376665623130646637626531306366316266
|
||||
36613961653162633639333536333434383332363061653062396163623664316363303561636634
|
||||
63353263313730313133613537386536616338323533303666653131656262323763616432343664
|
||||
65626130383432326663303238383233633265393936633934623634366663333862643562383736
|
||||
38313265306138303431363634656334656530393539636232613962386238613963643161306234
|
||||
35646136613764353138666431363337393765343233303332663530336261316331383665643536
|
||||
30663831656566663239656565613535316438666632663236666636383762333432303964333833
|
||||
33353661623965633630383536613633313437666430623565636635633634646338633666356234
|
||||
66323966396638316236626234326364633366666266643832333066383735306330366234383533
|
||||
63386563626264303234303832356662363732356438306234656561373637376137346565653966
|
||||
65373465303032393939383833386333353461633732623232393761353236306331626164386238
|
||||
35353464373732346537626464663532653434386564636532623838383937363463633332366534
|
||||
35613137613933636434336432653964353536303366353832356161653535353165613964333339
|
||||
30646139316661656363383832313765326234316134393732636262373730386562626233633439
|
||||
39643862393336653533373731333938343164363233323638353265656139333465363831333431
|
||||
62323332396537656432343235633735636631646334306265376566343364646566396563386537
|
||||
32366335313335666436613531356535623364336135636665623233363763663537393538666233
|
||||
35643431396430336533396137303763333332626439316265383138663639343061656631626463
|
||||
39386461303866373862626361373836643437346365343531323264386631313834613166393833
|
||||
34656537326531643962636436393236393537373935346135663335656666343430313335373633
|
||||
65393066636233653262623031383564393038353730393363356561363936356366636330386264
|
||||
37383064636433646265396365373330613833623338666638653532363061316261343639323937
|
||||
33623665316161353035366438663337346532653262366434366138306364343966653235383636
|
||||
38666263623633356463373963636135656637613164353265613635353733316138626637623364
|
||||
34386338633363653231643334323161653933613864636338626638323035323233643137353964
|
||||
35666332346264613136343039336261303964343237373136393139376234363833376164643839
|
||||
33316566353033363333633966643366303537653766623935643933373062313830316166303961
|
||||
37393638653064623935356564303236343766393939323561356461656636626534
|
||||
63343063643236623632373437303138303636643862323961633739653032376333386666626162
|
||||
3738366330393339303030373430653162616138383261370a393738326638663064323963366338
|
||||
31303332353439666363653839353932333338313830366566653534343739613036306465656137
|
||||
6366353730656230320a633334306135653163313435303037343138326137383765363666376262
|
||||
63353237396637386663663535646363366639313961343037656162336664343832656331393535
|
||||
33353534653738346331313034666237656630613439656164343234333161353939356435656634
|
||||
63396134356138323064313365366537336137646432636131353734343130653066383862346461
|
||||
66383364656233393839666462336661643730646633633135626331643366666135353437346633
|
||||
37633838373339363332633134386637303561366238353538643837386332636439383034333434
|
||||
31363866373161636431383862326137306466613361356337646133643630373332666434666133
|
||||
66366564383161376234343135616531613238613131363834313764363366326163333562303061
|
||||
31333734333336663037313333383632373130313631626533623139666265646530386464616135
|
||||
30363462623136393730616337306163663763616430303530306361393834303661613864313830
|
||||
33616161323535323865626639323132333131626662626161623234613136663961393063303739
|
||||
61353632373265363761636235313430383237363938396534666663353336383234663561373833
|
||||
63666364313539393831353833393763326432303035343830386663633534356362316130353866
|
||||
64383564666431343333626332356666633231653239363130386265363164356664326633623065
|
||||
61393636613162376334646661663232626534326562613235633434656466303435393233613233
|
||||
36666463316331366365643861633362386466663863316564656439633364616566373062306633
|
||||
66326464326138306130666631313830643236663134363166383264366139643861393565623537
|
||||
33376165396531323863626635323237363665363539613963376537373635323365616234313762
|
||||
66313934623631386432633861383136386464353932316534363836613038313934356331363737
|
||||
333931356137336563653162316563306636
|
||||
|
@ -1,14 +0,0 @@
|
||||
---
|
||||
|
||||
bootstrap_user: "root"
|
||||
|
||||
global_dns_upstream_servers:
|
||||
- 213.133.100.100
|
||||
- 213.133.99.99
|
||||
- 213.133.98.98
|
||||
- "2a01:4f8:0:1::add:1010"
|
||||
- "2a01:4f8:0:1::add:9898"
|
||||
- "2a01:4f8:0:1::add:9999"
|
||||
|
||||
debian_repository_mirror: "http://mirror.hetzner.de/debian/packages"
|
||||
debian_repository_use_sources: no # Not supported by Hetzner mirrors, but also not required
|
@ -1,212 +1,27 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
|
||||
class LoopPrevention:
|
||||
|
||||
def __init__(self, obj):
|
||||
self.__obj = obj
|
||||
self.__entered = False
|
||||
|
||||
def __enter__(self):
|
||||
if self.__entered:
|
||||
raise Exception("detected and prevented infinite loop")
|
||||
self.__entered = True
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.__entered = False
|
||||
return False # forward exception
|
||||
|
||||
|
||||
class Group:
|
||||
|
||||
def __init__(self, inv):
|
||||
self.__inv = inv
|
||||
self.__hosts = set()
|
||||
self.__children = set()
|
||||
|
||||
def add_host(self, host):
|
||||
if not host in self.__hosts:
|
||||
self.__hosts.add(host)
|
||||
|
||||
def add_hosts(self, hosts):
|
||||
self.__hosts |= hosts
|
||||
|
||||
@property
|
||||
def direct_hosts(self):
|
||||
return set(self.__hosts)
|
||||
|
||||
@property
|
||||
def all_hosts(self):
|
||||
with LoopPrevention(self):
|
||||
hosts = self.direct_hosts
|
||||
for child in self.children:
|
||||
hosts |= self.__inv._group(child).all_hosts
|
||||
return hosts
|
||||
|
||||
def add_child(self, group_name):
|
||||
if not group_name in self.__children:
|
||||
self.__children.add(group_name)
|
||||
|
||||
@property
|
||||
def children(self):
|
||||
return set(self.__children)
|
||||
|
||||
def export(self):
|
||||
return { "hosts": list(self.__hosts), "vars": dict(), "children": list(self.__children) }
|
||||
|
||||
|
||||
class Inventory:
|
||||
|
||||
def __init__(self):
|
||||
self.__groups = dict()
|
||||
self.add_group("all")
|
||||
|
||||
def __group(self, group_name):
|
||||
if group_name not in self.__groups:
|
||||
self.__groups[group_name] = Group(self)
|
||||
return self.__groups[group_name]
|
||||
|
||||
def _group(self, group_name):
|
||||
if group_name not in self.__groups:
|
||||
raise Exception(f'Unknown group "{group_name}"')
|
||||
return self.__groups[group_name]
|
||||
|
||||
def add_host(self, host):
|
||||
self.__group("all").add_host(host)
|
||||
|
||||
def add_hosts(self, hosts):
|
||||
self.__group("all").add_hosts(hosts)
|
||||
|
||||
def add_group(self, group_name):
|
||||
self.__group(group_name)
|
||||
|
||||
def add_host_to_group(self, host, group_name):
|
||||
self.add_host(host)
|
||||
self.__group(group_name).add_host(host)
|
||||
|
||||
def add_hosts_to_group(self, hosts, group_name):
|
||||
self.add_hosts(hosts)
|
||||
self.__group(group_name).add_hosts(hosts)
|
||||
|
||||
def add_child_to_group(self, child_name, parent_name):
|
||||
self.__group(child_name)
|
||||
self.__group(parent_name).add_child(child_name)
|
||||
|
||||
def all_hosts_of_group(self, group_name):
|
||||
return self._group(group_name).all_hosts
|
||||
|
||||
def export(self):
|
||||
meta_dict = {
|
||||
"_meta": {
|
||||
"hostvars": {},
|
||||
},
|
||||
}
|
||||
group_dict = { group_name: group.export() for group_name, group in self.__groups.items() }
|
||||
return { **meta_dict , **group_dict }
|
||||
|
||||
|
||||
def _read_yaml(path):
|
||||
with open(path, 'r') as stream:
|
||||
try:
|
||||
return yaml.safe_load(stream)
|
||||
except yaml.YAMLError as e:
|
||||
return AnsibleError(e)
|
||||
|
||||
GROUPS_PATTERN_OPS = {
|
||||
"": lambda old, add: old | add,
|
||||
"&": lambda old, add: old & add,
|
||||
"!": lambda old, add: old - add,
|
||||
}
|
||||
GROUPS_PATTERN_OPS_NAMES = "".join(GROUPS_PATTERN_OPS.keys())
|
||||
GROUPS_PATTERN = re.compile(r'^(?P<operation>[' + GROUPS_PATTERN_OPS_NAMES + r']?)(?P<group_name>[^' + GROUPS_PATTERN_OPS_NAMES + r'].*)$')
|
||||
def _parse_group_aliasses(inv, data):
|
||||
for group, syntax in data.items():
|
||||
if isinstance(syntax, str):
|
||||
group_list = syntax.split(':')
|
||||
elif isinstance(syntax, list):
|
||||
group_list = syntax
|
||||
else:
|
||||
raise Exception(f'Unknown syntax for alias "{group}": {syntax}')
|
||||
if len(syntax) <= 0 or len(group_list) <= 0:
|
||||
raise Exception(f'Empty syntax for alias "{group}": {syntax}')
|
||||
if group_list[0][0] == '!': # if first entry is an inversion
|
||||
group_list.insert(0, 'all') # remove group from all for inversion
|
||||
hosts = set()
|
||||
for group_name in group_list:
|
||||
group_matched = GROUPS_PATTERN.match(group_name)
|
||||
add = inv.all_hosts_of_group(group_matched.group('group_name'))
|
||||
op = GROUPS_PATTERN_OPS[group_matched.group('operation')]
|
||||
hosts = op(hosts, add)
|
||||
inv.add_hosts_to_group(hosts, group)
|
||||
|
||||
def _parse_groups(inv, data):
|
||||
for group, children in data.items():
|
||||
inv.add_group(group)
|
||||
if children is None:
|
||||
continue # as if no children are given
|
||||
for child in children:
|
||||
inv.add_child_to_group(child, group)
|
||||
if isinstance(children, dict):
|
||||
_parse_groups(inv, children)
|
||||
|
||||
def _parse_host_groups(inv, data):
|
||||
GROUPS_KEY = "_all"
|
||||
for host_group, hosts in data.items():
|
||||
inv.add_group(host_group)
|
||||
if hosts is None:
|
||||
continue
|
||||
for host in hosts:
|
||||
if host != GROUPS_KEY:
|
||||
inv.add_host_to_group(host, host_group)
|
||||
if isinstance(hosts, dict):
|
||||
hosts = dict(hosts) # copy dict for further edits
|
||||
parents = hosts.pop(GROUPS_KEY, None)
|
||||
if parents is not None:
|
||||
for parent in parents:
|
||||
inv.add_child_to_group(host_group, parent)
|
||||
_parse_single_hosts(inv, hosts)
|
||||
|
||||
def _parse_single_hosts(inv, data):
|
||||
for host, groups in data.items():
|
||||
inv.add_host(host)
|
||||
if groups is not None:
|
||||
for group in groups:
|
||||
inv.add_host_to_group(host, group)
|
||||
|
||||
def _parse_version_0(inv, data):
|
||||
return _parse_single_hosts(inv, data)
|
||||
|
||||
parser_mapping_v1 = { "groups": _parse_groups, "host_groups": _parse_host_groups, "single_hosts": _parse_single_hosts }
|
||||
def _parse_version_1(inv, data):
|
||||
for key_name, parser in parser_mapping_v1.items():
|
||||
if key_name in data:
|
||||
parser(inv, data[key_name])
|
||||
|
||||
def _parse_version_2(inv, data):
|
||||
_parse_version_1(inv, data)
|
||||
_parse_group_aliasses(inv, data["group_aliasses"])
|
||||
|
||||
parser_version_mapping = {
|
||||
None: _parse_version_0, # legacy version without version number, only hosts list with tags
|
||||
1: _parse_version_1, # adds support for default, inversed group dependencies and host_groups aside single_hosts (ignores aliases supported with version 2)
|
||||
2: _parse_version_2, # adds support for aliases (thus destroying the common graph structures where aliasses were used)
|
||||
}
|
||||
def parse(path):
|
||||
data = _read_yaml(path)
|
||||
inv = Inventory()
|
||||
version = data.get("version", None)
|
||||
# detect that version was used as hostname
|
||||
if not isinstance(version, (int, float, complex)):
|
||||
version = None
|
||||
if version not in parser_version_mapping:
|
||||
raise AnsibleError(Exception("Version not supported"))
|
||||
parser_version_mapping[version](inv, data)
|
||||
return inv.export()
|
||||
with open(path, 'r') as stream:
|
||||
try:
|
||||
data = yaml.safe_load(stream)
|
||||
except yaml.YAMLError as e:
|
||||
return AnsibleError(e)
|
||||
ret = { "all": { "hosts": list(), "vars": dict(), "children": list() } , "_meta": { "hostvars": {} } }
|
||||
for host, groups in data.items():
|
||||
ret["all"]["hosts"].append(host)
|
||||
if groups is not None:
|
||||
for group in groups:
|
||||
if not group in ret:
|
||||
ret[group] = dict()
|
||||
ret[group]["hosts"] = list()
|
||||
ret[group]["vars"] = dict()
|
||||
ret[group]["children"] = list()
|
||||
if not host in ret[group]["hosts"]:
|
||||
ret[group]["hosts"].append(host)
|
||||
return ret
|
||||
|
||||
print(json.dumps(parse("hosts.yml")))
|
||||
|
@ -1,85 +1,41 @@
|
||||
version: 2
|
||||
|
||||
groups: # a:b meaning b is a, can be nested
|
||||
|
||||
# hardware structure
|
||||
dev_known:
|
||||
barebones:
|
||||
- rented_barebones # sub group
|
||||
# list of all known barebone device groups
|
||||
- dev_surface3 # Microsoft Surface 3
|
||||
virtual:
|
||||
- rented_vserver # sub group
|
||||
dev_unknown: # for unknown device kinds
|
||||
|
||||
# structure of rented servers
|
||||
rented:
|
||||
rented_barebones:
|
||||
- hetzner_server # https://robot.your-server.de/server
|
||||
rented_vserver:
|
||||
- bwcloud_vserver # https://portal.bw-cloud.org/
|
||||
- contabo_vserver # https://my.contabo.com/vps
|
||||
|
||||
# OS structure
|
||||
os_known: # list of all known OS derivates
|
||||
- os_debian
|
||||
- os_raspbian
|
||||
|
||||
# applications
|
||||
|
||||
bootstrapable: # which OSes/hosts can be bootstraped
|
||||
- os_debian
|
||||
- os_raspbian
|
||||
|
||||
|
||||
group_aliasses: # a:b meaning a equals b, should only depend on groups not defined here
|
||||
|
||||
# unknown groups
|
||||
dev_unknown: "!dev_known"
|
||||
os_unknown: "!os_known"
|
||||
|
||||
# applications
|
||||
bootstrap: "bootstrapable:!no_bootstrap" # which hosts should be bootstraped
|
||||
common_roles: "!no_common_roles"
|
||||
wireguard_backbones: "public_available:!no_wireguard_automatic"
|
||||
wireguard_clients: "!public_available:!no_wireguard_automatic"
|
||||
|
||||
|
||||
host_groups: # group: host: [*groups]
|
||||
|
||||
no_defaults: # do not include in all default playbooks / roles
|
||||
_all:
|
||||
- no_bootstrap # do not setup sudo bootstrap
|
||||
- no_common_roles # do not include in common roles
|
||||
- no_wireguard_automatic # do not assign wireguard role automatic, hosts may be excluded from wireguard or assigned to their wireguard role manually
|
||||
|
||||
rented:
|
||||
_all:
|
||||
- public_available # rented are public available
|
||||
|
||||
# to group similar devices together
|
||||
|
||||
common_server: # public common servers
|
||||
_all:
|
||||
- os_debian
|
||||
hatoria.banananet.work:
|
||||
- hetzner_server
|
||||
nvak.banananet.work:
|
||||
- contabo_vserver
|
||||
morska.banananet.work:
|
||||
- bwcloud_vserver
|
||||
rurapenthe.banananet.work:
|
||||
- bwcloud_vserver
|
||||
|
||||
|
||||
single_hosts: # a:b meaning a is b, cannot be nested
|
||||
|
||||
# Local Servers
|
||||
hardie.eridon.banananet.work:
|
||||
- os_debian
|
||||
|
||||
# Embedded Devices
|
||||
wgpanel.eridon.banananet.work:
|
||||
- dev_surface3
|
||||
- os_debian
|
||||
- no_wireguard_automatic # no wireguard
|
||||
# Public Servers
|
||||
|
||||
hatoria.banananet.work:
|
||||
- hetzner_server
|
||||
- os_debian
|
||||
- bootstrap
|
||||
- public_available
|
||||
- wireguard_backbones
|
||||
|
||||
nvak.banananet.work:
|
||||
- contabo_vserver
|
||||
- os_debian
|
||||
- bootstrap
|
||||
- public_available
|
||||
- wireguard_backbones
|
||||
|
||||
morska.banananet.work:
|
||||
- bwcloud_vserver
|
||||
- os_debian
|
||||
- bootstrap
|
||||
- public_available
|
||||
- wireguard_backbones
|
||||
|
||||
rurapenthe.banananet.work:
|
||||
- bwcloud_vserver
|
||||
- os_debian
|
||||
- bootstrap
|
||||
- public_available
|
||||
- wireguard_backbones
|
||||
|
||||
# Location Eridon
|
||||
|
||||
## Local Servers
|
||||
hardie.eridon.banananet.work:
|
||||
- bootstrap
|
||||
|
||||
## Embedded Devices
|
||||
wgpanel.eridon.banananet.work:
|
||||
- surface3
|
||||
- os_debian
|
||||
- bootstrap
|
||||
|
@ -1 +0,0 @@
|
||||
Subproject commit 36f3e3b28c82611c72a867cdc1f5ddc8bd9325e9
|
@ -1,27 +0,0 @@
|
||||
#### Python / PiP Requirements ####
|
||||
|
||||
# each group either sorted by alphabet or, if applicable, sorted by hierachy
|
||||
|
||||
|
||||
### Main Runtime Dependencies ###
|
||||
|
||||
# Ansible itself
|
||||
ansible ~= 2.10.0 # pinned to 2.10 because upgrade may bring issues
|
||||
|
||||
|
||||
### Test Frameworks ###
|
||||
|
||||
ansible-lint # simple linter
|
||||
yamllint # linter for YAML files in general
|
||||
|
||||
## molecule ##
|
||||
# role based test framework for Ansible
|
||||
|
||||
molecule
|
||||
|
||||
# enable docker for test environments, requires Docker to be installed on host and usuable without additional permissions
|
||||
molecule-docker
|
||||
|
||||
# allows using Vagrant (VMs) for creating test environments, requires Vagrant and any hypervisor (e.g. VirtualBox) to be installed
|
||||
molecule-vagrant
|
||||
python-vagrant # extra module required as not always installed with vagrant
|
@ -1 +0,0 @@
|
||||
*.yml
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
|
||||
- name: Configure Surface 3 device
|
||||
hosts: dev_surface3
|
||||
hosts: surface3
|
||||
tasks:
|
||||
- name: Install packages for hardware
|
||||
apt:
|
@ -1 +0,0 @@
|
||||
../library
|
@ -1,12 +0,0 @@
|
||||
# Accept service terms
|
||||
agree-tos
|
||||
|
||||
# Default RSA key size
|
||||
rsa-key-size = {{ acme_key_size }}
|
||||
|
||||
# E-Mail Address for registration
|
||||
email = {{ acme_account_mail }}
|
||||
|
||||
# Use webroot per default
|
||||
authenticator = webroot
|
||||
webroot-path = {{ acme_validation_root_directory }}
|
@ -0,0 +1,329 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
# constants
|
||||
|
||||
ACME_DOMAIN_PREFIX = "_acme-challenge"
|
||||
SUBPROCESS_DEFAULT_KWARGS = {"shell": False}
|
||||
SUBPROCESS_ENV = ["/usr/bin/env"]
|
||||
|
||||
|
||||
# subprocess helpers
|
||||
|
||||
def safe_run(args, **kwargs):
|
||||
return subprocess.run(args, **SUBPROCESS_DEFAULT_KWARGS, check=True, **kwargs)
|
||||
|
||||
def safe_call(args, **kwargs):
|
||||
return safe_run(SUBPROCESS_ENV + args, **kwargs)
|
||||
|
||||
def safe_popen(args, subprocess_input, **kwargs):
|
||||
proc = subprocess.Popen(SUBPROCESS_ENV + args, **SUBPROCESS_DEFAULT_KWARGS, stdin=subprocess.PIPE, **kwargs)
|
||||
ret = proc.communicate(input=subprocess_input)
|
||||
if proc.returncode != 0:
|
||||
raise subprocess.CalledProcessError(returncode=proc.returncode, cmd=proc.args)
|
||||
return (proc, *ret)
|
||||
|
||||
|
||||
# shell replacements
|
||||
|
||||
def chmod(mode, *paths):
|
||||
safe_call(["chmod", mode] + [str(path) for path in paths])
|
||||
|
||||
def concat(out_path, *in_paths, mode=0o333):
|
||||
with open(out_path, mode="wb", opener=functools.partial(os.open, mode=mode)) as out_file:
|
||||
for in_path in in_paths:
|
||||
with in_path.open(mode="rb") as in_file:
|
||||
out_file.write(in_file.read())
|
||||
|
||||
def iterdir_recursive(dir_path, suffix=""):
|
||||
ret = []
|
||||
for entry in dir_path.iterdir():
|
||||
if entry.is_dir():
|
||||
ret.extend(iterdir_recursive(entry, suffix=suffix))
|
||||
elif entry.name.endswith(suffix):
|
||||
ret.append(entry)
|
||||
return ret
|
||||
|
||||
def nsupdate(key_path, record_name, record_ttl=60, record_type=None, record_data=None, delete_record=False):
|
||||
if delete_record:
|
||||
record_type = record_type or ""
|
||||
record_data = record_data or ""
|
||||
action = f"update delete {record_name} {record_ttl} IN {record_type} {record_data}"
|
||||
else:
|
||||
if not (record_name and record_ttl and record_type and record_data):
|
||||
raise Exception("args missing")
|
||||
action = f"update add {record_name} {record_ttl} IN {record_type} {record_data}"
|
||||
safe_popen(["nsupdate", "-k", str(key_path)], f"{action}\nsend\n")
|
||||
|
||||
|
||||
# hooks code
|
||||
|
||||
|
||||
class DomainData:
|
||||
|
||||
def __init__(self, domain):
|
||||
self.__domain = domain
|
||||
|
||||
@property
|
||||
def domain(self):
|
||||
return self.__domain
|
||||
|
||||
@property
|
||||
def domain_dir(self):
|
||||
return config.domains_directory / self.__domain
|
||||
|
||||
@property
|
||||
def domain_data_file(self):
|
||||
return self.domain_dir / config.domain_data_name
|
||||
|
||||
def run_hooks(self, hook_name, **kwargs):
|
||||
hook_dir = self.domain_dir / hook_name
|
||||
if not hook_dir.exists():
|
||||
return
|
||||
for hook in iterdir_recursive(hook_dir):
|
||||
if os.access(hook, os.X_OK):
|
||||
safe_run([str(hook)]) # TODO args
|
||||
|
||||
|
||||
def deploy_challenge(domain, token_filename, token_value):
|
||||
# token_filename only for http-01 challenges
|
||||
challenge_name = f"{ACME_DOMAIN_PREFIX}.{domain}."
|
||||
nsupdate(key_path=None, record_name=challenge_name, record_ttl=config.challenge_record_ttl, record_type="TXT", record_data=token_value)
|
||||
|
||||
|
||||
def clean_challenge(domain, token_filename, token_value):
|
||||
# token_filename only for http-01 challenges
|
||||
challenge_name = f"{ACME_DOMAIN_PREFIX}.{domain}."
|
||||
nsupdate(key_path=None, record_name=challenge_name, delete_record=True)
|
||||
|
||||
|
||||
def sync_cert(domain, key_path, cert_path, fullchain_path, chain_path, request_path):
|
||||
safe_call(["sync", key_path, cert_path, fullchain_path, chain_path, request_path])
|
||||
|
||||
|
||||
def deploy_cert(domain, key_path, cert_path, fullchain_path, chain_path, timestamp: int):
|
||||
# make public key files readable for all
|
||||
chmod("+r", cert_path, fullchain_path, chain_path)
|
||||
# create legacy key+cert+chain file
|
||||
keyfullchain_path = key_path / ".." / config.keyfullchain_name
|
||||
concat(keyfullchain_path, key_path, fullchain_path, mode=config.key_file_mode)
|
||||
|
||||
|
||||
def deploy_ocsp(domain, ocsp_path, timestamp):
|
||||
# make OCSP readable for all
|
||||
chmod("+r", ocsp_path)
|
||||
pass
|
||||
|
||||
|
||||
def unchanged_cert(domain, key_path, cert_path, fullchain_path, chain_path):
|
||||
pass
|
||||
|
||||
|
||||
def invalid_challenge(domain, response):
|
||||
pass
|
||||
|
||||
|
||||
def request_failure(status_code, reason, req_type, headers):
|
||||
pass
|
||||
|
||||
|
||||
def generate_csr(domain, cert_dir, alt_names):
|
||||
pass
|
||||
|
||||
|
||||
def startup_hook():
|
||||
# prepare domains list
|
||||
concat(config.domains_main_file, *iterdir_recursive(config.domains_directory))
|
||||
|
||||
|
||||
def exit_hook(error):
|
||||
if error is None:
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
|
||||
|
||||
# hooks metadata
|
||||
|
||||
arg_infos = {
|
||||
"alt_names": {
|
||||
"help": "All domain names for the current certificate as specified in domains.txt.",
|
||||
},
|
||||
"cert_path": {
|
||||
"help": "The path of the file containing the signed certificate.",
|
||||
"type": Path,
|
||||
},
|
||||
"chain_path": {
|
||||
"help": "The path of the file containing the intermediate certificate(s).",
|
||||
"type": Path,
|
||||
},
|
||||
"cert_dir": {
|
||||
"help": "Certificate output directory for this particular certificate.",
|
||||
"type": Path,
|
||||
},
|
||||
"domain": {
|
||||
"help": "The domain name (CN or subject alternative name) being validated.",
|
||||
},
|
||||
"error": {
|
||||
"help": "Contains error message if dehydrated exits with error.",
|
||||
"nargs": "?",
|
||||
},
|
||||
"fullchain_path": {
|
||||
"help": "The path of the file containing the signed full certificate chain.",
|
||||
"type": Path,
|
||||
},
|
||||
"headers": {
|
||||
"help": "HTTP headers returned by the CA",
|
||||
},
|
||||
"key_path": {
|
||||
"help": "The path of the file containing the private key.",
|
||||
"type": Path,
|
||||
},
|
||||
"ocsp_path": {
|
||||
"help": "The path of the ocsp stapling file.",
|
||||
"type": Path,
|
||||
},
|
||||
"req_type": {
|
||||
"help": "The kind of request that was made (GET, POST, …).",
|
||||
},
|
||||
"reason": {
|
||||
"help": "The specific reason for the error.",
|
||||
},
|
||||
"request_path": {
|
||||
"help": "The path of the file containing the certificate signing request.",
|
||||
"type": Path,
|
||||
},
|
||||
"response": {
|
||||
"help": "The response that the verification server returned."
|
||||
},
|
||||
"status_code": {
|
||||
"help": "The HTML status code that originated the error.",
|
||||
"type": int,
|
||||
},
|
||||
"timestamp": {
|
||||
"help": "Timestamp when the specified certificate was created.",
|
||||
"type": int, # TODO to date
|
||||
},
|
||||
"token_filename": {
|
||||
"help": "The name of the file containing the token to be served for HTTP validation.",
|
||||
},
|
||||
"token_value": {
|
||||
"help": "The name of the file containing the token to be served for HTTP validation.",
|
||||
},
|
||||
}
|
||||
|
||||
hooks = {
|
||||
"deploy_challenge": (deploy_challenge, [
|
||||
"domain",
|
||||
"token_filename",
|
||||
"token_value",
|
||||
]),
|
||||
"clean_challenge": (clean_challenge, [
|
||||
"domain",
|
||||
"token_filename",
|
||||
"token_value",
|
||||
]),
|
||||
"sync_cert": (sync_cert, [
|
||||
"domain",
|
||||
"key_path",
|
||||
"cert_path",
|
||||
"fullchain_path",
|
||||
"chain_path",
|
||||
"request_path",
|
||||
]),
|
||||
"deploy_cert": (deploy_cert, [
|
||||
"domain",
|
||||
"key_path",
|
||||
"cert_path",
|
||||
"fullchain_path",
|
||||
"chain_path",
|
||||
"timestamp",
|
||||
]),
|
||||
"deploy_ocsp": (deploy_ocsp, [
|
||||
"domain",
|
||||
"ocsp_path",
|
||||
"timestamp",
|
||||
]),
|
||||
"unchanged_cert": (unchanged_cert, [
|
||||
"domain",
|
||||
"key_path",
|
||||
"cert_path",
|
||||
"fullchain_path",
|
||||
"chain_path",
|
||||
]),
|
||||
"invalid_challenge": (invalid_challenge, [
|
||||
"domain",
|
||||
"response",
|
||||
]),
|
||||
"request_failure": (request_failure, [
|
||||
"status_code",
|
||||
"reason",
|
||||
"req_type",
|
||||
"headers",
|
||||
]),
|
||||
"generate_csr": (generate_csr, [
|
||||
"domain",
|
||||
"cert_dir",
|
||||
"alt_names",
|
||||
]),
|
||||
"startup_hook": (startup_hook, [
|
||||
]),
|
||||
"exit_hook": (exit_hook, [
|
||||
"error",
|
||||
]),
|
||||
}
|
||||
|
||||
|
||||
# general
|
||||
|
||||
|
||||
def read_config(config_path):
|
||||
return json.loads(config_path.read_text())
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-c", "--config", "--config-file", dest='_config', type=Path, help="Path to config file")
|
||||
subparsers = parser.add_subparsers(dest='_hook', required=True)
|
||||
for hook_name, hook_data in hooks.items():
|
||||
hook_fun, hook_args = hook_data
|
||||
hook_parser = subparsers.add_parser(
|
||||
hook_name,
|
||||
prefix_chars='+',
|
||||
)
|
||||
hook_parser.set_defaults(
|
||||
_func=hook_fun,
|
||||
_parser=hook_parser,
|
||||
)
|
||||
for arg_name in hook_args:
|
||||
hook_parser.add_argument(
|
||||
arg_name,
|
||||
**arg_infos[arg_name],
|
||||
)
|
||||
args = parser.parse_args()
|
||||
return (args, {key: val for key, val in args.__dict__.items() if not key.startswith("_")})
|
||||
|
||||
|
||||
def external_hook(hook_name, domain, **kwargs):
|
||||
domain_dir = config.domains_directory / domain
|
||||
print(domain)
|
||||
|
||||
|
||||
def main(argv):
|
||||
global config
|
||||
args, fun_args = parse_args()
|
||||
#config = read_config(args._config)
|
||||
#args._fun(**fun_args)
|
||||
external_hook(args._hook, **fun_args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
@ -1,39 +0,0 @@
|
||||
---
|
||||
|
||||
# protecting process list of users different than root
|
||||
# Source: https://wiki.archlinux.org/index.php/Security#hidepid
|
||||
|
||||
- name: Configure group for reading other processes
|
||||
group:
|
||||
state: present
|
||||
name: proc
|
||||
system: yes
|
||||
|
||||
- name: Configure proc mounting in fstab
|
||||
lineinfile:
|
||||
path: "{{ global_fstab_file }}"
|
||||
regexp: '^\S+\s+/proc\s+proc\s+'
|
||||
line: >-
|
||||
proc /proc proc
|
||||
nosuid,nodev,noexec,hidepid=2,gid=proc
|
||||
0 0
|
||||
|
||||
- name: Ensure configuration directory for whitelisted services exist
|
||||
file:
|
||||
state: directory
|
||||
path: "{{ global_systemd_configuration_directory }}/{{ item }}.d"
|
||||
owner: root
|
||||
group: root
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
loop: "{{ global_proc_hidepid_service_whitelist }}"
|
||||
|
||||
- name: Configure whitelisted services to adapt to hidepid setting
|
||||
copy:
|
||||
content: |
|
||||
[Service]
|
||||
SupplementaryGroups=proc
|
||||
dest: "{{ global_systemd_configuration_directory }}/{{ item }}.d/proc_hidepid_whitelist.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: u=rw,g=r,o=r
|
||||
loop: "{{ global_proc_hidepid_service_whitelist }}"
|
@ -1,5 +0,0 @@
|
||||
---
|
||||
|
||||
docker_configuration:
|
||||
dns: "{{ ansible_dns.nameservers | ipv4 }}" # use only ipv4 dns servers TODO: check if docker supports ipv6
|
||||
log-driver: journald # send container logs also to journald
|
@ -1,6 +0,0 @@
|
||||
---
|
||||
|
||||
- name: restart docker
|
||||
systemd:
|
||||
name: "{{ global_docker_service_name }}"
|
||||
state: restarted
|
@ -1,18 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Install Docker-CE
|
||||
apt:
|
||||
state: present
|
||||
name:
|
||||
- docker.io
|
||||
- docker-compose
|
||||
- python3-docker
|
||||
|
||||
- name: Configure docker daemon
|
||||
copy:
|
||||
content: "{{ docker_configuration | to_nice_json }}\n"
|
||||
dest: "{{ global_docker_daemon_configuration_file }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: u=rw,g=r,o=r
|
||||
notify: restart docker
|
@ -1,18 +0,0 @@
|
||||
---
|
||||
|
||||
# Required arguments
|
||||
#domain: example.com # to derive instance_name
|
||||
instance_name: "{{ domain }}" # required if domain is not set
|
||||
#repo_url: https://git.example.com/app/docker.git
|
||||
|
||||
# Optional arguments
|
||||
repo_version: master # branch/tag of git repository to use
|
||||
compose_overrides: "" # syntax of common docker-compose file
|
||||
main_compose_name: docker-compose.yml # to derive main_compose_path
|
||||
|
||||
#user_directory # to derive docker_directory
|
||||
docker_directory: "{{ user_directory }}/docker:{{ instance_name }}"
|
||||
repository_directory: "{{ docker_directory }}/repository"
|
||||
|
||||
main_compose_path: "{{ repository_directory }}/{{{ main_compose_name }}"
|
||||
compose_override_path: "{{ docker_directory }}/compose.override.yml"
|
@ -1,6 +0,0 @@
|
||||
---
|
||||
|
||||
allow_duplicates: yes
|
||||
|
||||
dependencies:
|
||||
- role: docker/application
|
@ -1,32 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Ensure directory for repository exists
|
||||
file:
|
||||
state: directory
|
||||
path: "{{ docker_directory }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
|
||||
- name: Clone git repository
|
||||
git:
|
||||
repo: "{{ repo_url }}"
|
||||
dest: "{{ repository_directory }}"
|
||||
version: "{{ repo_version }}"
|
||||
|
||||
- name: Configure docker-compose overrides
|
||||
copy:
|
||||
content: "{{ compose_overrides }}"
|
||||
dest: "{{ compose_override_path }}"
|
||||
validate: "/usr/bin/docker-compose -f {{ main_compose_path | quote }} -f %s config" # requires original compose file because override is not (always) valid only
|
||||
|
||||
- name: Build and start docker containers
|
||||
docker_compose:
|
||||
state: present
|
||||
project_name: "{{ instance_name }}"
|
||||
project_src: "{{ repository_directory }}"
|
||||
files:
|
||||
- "{{ main_compose_name }}"
|
||||
- "{{ compose_override_path }}"
|
||||
build: yes
|
||||
recreate: smart
|
@ -1,33 +0,0 @@
|
||||
---
|
||||
|
||||
# domain: example.com
|
||||
|
||||
# MailJet will assign a unique verification record which name and data must be given in these variables
|
||||
# e.g. the record 'mailjet._12345678.example.com TXT "abcdefghijklmnopqrstuvwxyz123456"'
|
||||
# resolves to name="12345678" and data="abcdefghijklmnopqrstuvwxyz123456"
|
||||
# verification_name: 12345678
|
||||
# verification_data: abcdefghijklmnopqrstuvwxyz123456
|
||||
|
||||
spf_redirect_domain: "spf.mailjet.com"
|
||||
|
||||
dkim_key_name: mailjet
|
||||
dkim_key_data: >-
|
||||
"v=DKIM1; k=rsa; "
|
||||
"p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDrA+r6R11vE4/FMWpgOzNWn5"
|
||||
"JIzn7/y88DTla9DZvfpbcBFGEKDqhArsa+t9V34TdFzpIss4T80F1C7BrGWnwo46"
|
||||
"I7rk2y5ee9Ga3iwG5EyilrXF10hw+qk2EsTKdAHld0x24vnzW/tFWfF47eu4ricY"
|
||||
"/KuIrjXQ4Xs23eCNw6vQIDAQAB"
|
||||
|
||||
dmarc_policy: "v=DMARC1;p=none"
|
||||
|
||||
# derived DNS record data
|
||||
# names are relative to domain
|
||||
|
||||
verification_record_name: "mailjet._{{ verification_name }}"
|
||||
verification_record_data: "{{ verification_data }}"
|
||||
spf_record_name: "@"
|
||||
spf_record_data: "v=spf1 include:{{ spf_redirect_domain }} -all"
|
||||
dkim_record_name: "{{ dkim_key_name }}._domainkey"
|
||||
dkim_record_data: "{{ dkim_key_data }}"
|
||||
dmarc_record_name: "_dmarc"
|
||||
dmarc_record_data: "{{ dmarc_policy }}"
|
@ -1,20 +0,0 @@
|
||||
---
|
||||
|
||||
allow_duplicates: yes
|
||||
|
||||
dependencies:
|
||||
- role: dns/entries
|
||||
# domain
|
||||
entries:
|
||||
- domain: "{{ verification_record_name }}"
|
||||
type: TXT
|
||||
data: "{{ verification_record_data }}"
|
||||
- domain: "{{ spf_record_name }}"
|
||||
type: TXT
|
||||
data: "{{ spf_record_data }}"
|
||||
- domain: "{{ dkim_record_name }}"
|
||||
type: TXT
|
||||
data: "{{ dkim_record_data }}"
|
||||
- domain: "{{ dmarc_record_name }}"
|
||||
type: TXT
|
||||
data: "{{ dmarc_record_data }}"
|
@ -1,5 +0,0 @@
|
||||
---
|
||||
|
||||
# packages: []
|
||||
configuration_name: "{{ packages[0] }}"
|
||||
priority: 990
|
@ -1,6 +0,0 @@
|
||||
---
|
||||
|
||||
allow_duplicates: yes
|
||||
|
||||
dependencies:
|
||||
- role: misc/deb_backports
|
@ -1,16 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Restrict backports for apt
|
||||
copy:
|
||||
dest: "/etc/apt/preferences.d/backports-{{ configuration_name }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "u=rw,g=r,o=r"
|
||||
content: |
|
||||
Package: {{ packages | join(" ") }}
|
||||
Pin: release a={{ debian_backports_name }}
|
||||
Pin-Priority: {{ priority }}
|
||||
notify: update apt cache
|
||||
|
||||
- name: Flush handlers for backports priority configuration
|
||||
meta: flush_handlers
|
@ -0,0 +1,3 @@
|
||||
---
|
||||
|
||||
docker_version: "stable"
|
@ -0,0 +1,29 @@
|
||||
---
|
||||
|
||||
- name: Add key for source for docker
|
||||
apt_key:
|
||||
state: present
|
||||
id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
||||
url: https://download.docker.com/linux/debian/gpg
|
||||
|
||||
- name: Add source for docker
|
||||
apt_repository:
|
||||
state: present
|
||||
repo: "deb [arch={{ ansible_local.dpkg.architecture }}] https://download.docker.com/linux/debian {{ ansible_distribution_release }} {{ docker_version }}"
|
||||
filename: docker
|
||||
update_cache: yes
|
||||
|
||||
- name: Install Docker-CE
|
||||
apt:
|
||||
state: present
|
||||
name:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
- python3-docker
|
||||
install_recommends: no # To fix https://github.com/raspberrypi/linux/issues/3021
|
||||
|
||||
- name: Docker Module Python3 Dependencies
|
||||
pip:
|
||||
name:
|
||||
- docker-compose
|
@ -1,12 +1,7 @@
|
||||
---
|
||||
|
||||
notify_directory: "{{ global_deployment_directory }}/ssh_notify"
|
||||
notify_script: "{{ notify_directory }}/telegram.sh"
|
||||
notify_cache_directory: "{{ notify_directory }}/cache"
|
||||
notify_users_directory: "{{ notify_directory }}/users"
|
||||
|
||||
trusted_vpn_subnet: "{{ tailscale_vpn_subnet }}"
|
||||
notify_script: "{{ global_deployment_directory }}/ssh_notify/telegram.sh"
|
||||
|
||||
# recipient_id
|
||||
bot_key: "{{ global_telegram_server_bot_key }}"
|
||||
bot_key: "{{ global_ssh_notify_telegram_bot_key }}"
|
||||
timeout: 10
|
||||
|
@ -1,41 +1,14 @@
|
||||
#!/bin/bash
|
||||
# Modified version, original source: https://gitlab.com/snippets/1871482#note_188602535
|
||||
|
||||
USER_ID_DIR={{ notify_users_directory | quote }}
|
||||
CACHE_DIR={{ notify_cache_directory | quote }}
|
||||
USERID={{ recipient_id | quote }}
|
||||
KEY={{ bot_key | quote }}
|
||||
VPN_SUBNET={{ trusted_vpn_subnet | quote }}
|
||||
|
||||
TIMEOUT={{ timeout | quote }}
|
||||
|
||||
getUserId() {
|
||||
USER_CONF="${USER_ID_DIR}/$1"
|
||||
[[ -r "$USER_CONF" ]] && head -n 1 "$USER_CONF"
|
||||
}
|
||||
|
||||
URL="https://api.telegram.org/bot$KEY/sendMessage"
|
||||
sendMessage() {
|
||||
curl -s --max-time "$TIMEOUT" -H "Content-Type: application/x-www-form-urlencoded" -d "chat_id=$1" -d "disable_web_page_preview=1" -d "parse_mode=Markdown" -d "text=$2" "$URL" >/dev/null
|
||||
}
|
||||
|
||||
if [[ "$PAM_SERVICE" == "sshd" && "$PAM_TYPE" == "open_session" && "$PAM_USER" != "git" && -z "$TMUX" && -n "$PAM_RHOST" ]] && ! /ansible/helpers/check_subnet.py "$PAM_RHOST" "$VPN_SUBNET"; then
|
||||
if [[ "$PAM_SERVICE" == "sshd" && "$PAM_TYPE" == "open_session" && "$PAM_USER" != "git" && -z "$TMUX" ]]; then
|
||||
IP="$PAM_RHOST"
|
||||
cache_file="${CACHE_DIR}/${IP}-${PAM_USER}"
|
||||
cache_mtime=$(stat --format="%Y" "$cache_file" 2>/dev/null)
|
||||
current_time=$(date +%s)
|
||||
touch "$cache_file"
|
||||
if (( cache_mtime > (current_time - 4*60*60) )); then
|
||||
exit 0
|
||||
fi
|
||||
# define message text
|
||||
HOSTNAME=$(hostname --fqdn)
|
||||
TEXT="Successful login from [$IP](https://stat.ripe.net/app/$IP) for ${PAM_USER} @ ${HOSTNAME} ($(date "+%Y-%m-%d %H:%M"))"
|
||||
# send to root
|
||||
ROOT_USER_ID="$(getUserId root)"
|
||||
sendMessage "$ROOT_USER_ID" "$TEXT (This message was sent to you because you are the admin.)"
|
||||
# send to user if id is known
|
||||
USER_ID="$(getUserId "$PAM_USER")"
|
||||
if [[ -n "$USER_ID" ]]; then
|
||||
sendMessage "$USER_ID" "$TEXT"
|
||||
fi
|
||||
TEXT="Successful login from [$IP](https://ipinfo.io/$IP) for ${PAM_USER} @ ${HOSTNAME} ($(date "+%Y-%m-%d %H:%M"))"
|
||||
curl -s --max-time $TIMEOUT -d "chat_id=$USERID" -d "disable_web_page_preview=1" -d "parse_mode=Markdown" -d "text=$TEXT" "$URL" > /dev/null
|
||||
fi
|
||||
|
@ -1,26 +0,0 @@
|
||||
---
|
||||
|
||||
# monitor_name: "echo-output-check"
|
||||
instance_name: "tg-monitor-cmd-{{ monitor_name }}"
|
||||
description: "{{ monitor_name }}" # should be human fancy
|
||||
|
||||
# command: "/bin/echo Hello" # or command_str
|
||||
command_str: "{{ command | map('quote') | join(' ') }}"
|
||||
use_shell: no # read https://docs.python.org/3/library/subprocess.html#security-considerations before using use_shell=yes
|
||||
|
||||
system_user: tg-monitor-cmd
|
||||
|
||||
recipient_id: "{{ default_tg_monitor_recipient_id }}"
|
||||
bot_key: "{{ global_telegram_server_bot_key }}"
|
||||
telegram_timeout: 10
|
||||
|
||||
calendar_spec: "*:0:0" # once every hour
|
||||
service_description: |
|
||||
Telegram Monitor Command of {{ description }}
|
||||
|
||||
# paths
|
||||
|
||||
monitoring_directory: "{{ global_deployment_directory }}/tg-monitor-cmd"
|
||||
instance_directory: "{{ monitoring_directory }}/{{ monitor_name }}"
|
||||
script_path: "{{ instance_directory }}/script.py"
|
||||
data_path: "{{ instance_directory }}/data"
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
|
||||
allow_duplicates: yes
|
||||
|
||||
dependencies:
|
||||
- role: misc/system_user
|
||||
# system_user
|
||||
user_directory: "{{ monitoring_directory }}"
|
@ -1,69 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Create directory for monitoring scripts and data
|
||||
file:
|
||||
state: directory
|
||||
path: "{{ instance_directory }}"
|
||||
owner: root
|
||||
group: "{{ system_user }}"
|
||||
mode: u=rwx,g=rx,o=
|
||||
|
||||
- name: Deploy script
|
||||
template:
|
||||
src: monitor.py
|
||||
dest: "{{ script_path }}"
|
||||
owner: root
|
||||
group: "{{ system_user }}"
|
||||
mode: u=rwx,g=rx,o=
|
||||
register: script_task
|
||||
|
||||
- name: Create empty data file
|
||||
copy:
|
||||
content: ""
|
||||
dest: "{{ data_path }}"
|
||||
force: no # do not overwrite
|
||||
|
||||
- name: Ensure permissions on data file
|
||||
file:
|
||||
state: file
|
||||
path: "{{ data_path }}"
|
||||
owner: root
|
||||
group: "{{ system_user }}"
|
||||
mode: u=rw,g=rw,o=
|
||||
|
||||
- name: Register service for monitor
|
||||
template:
|
||||
src: monitor.service
|
||||
dest: "{{ global_systemd_configuration_directory }}/{{ instance_name }}.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: u=rw,g=r,o=
|
||||
register: service_task
|
||||
|
||||
- name: Run service for initial test
|
||||
systemd:
|
||||
state: started
|
||||
daemon_reload: yes
|
||||
name: "{{ instance_name }}.service"
|
||||
when: script_task.changed or service_task.changed
|
||||
|
||||
- name: Register timer for monitor service
|
||||
template:
|
||||
src: monitor.timer
|
||||
dest: "{{ global_systemd_configuration_directory }}/{{ instance_name }}.timer"
|
||||
owner: root
|
||||
group: root
|
||||
mode: u=rw,g=r,o=
|
||||
register: timer_task
|
||||
|
||||
- name: Restart timer for monitor
|
||||
systemd:
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
name: "{{ instance_name }}.timer"
|
||||
when: timer_task.changed
|
||||
|
||||
- name: Enable timer for monitor
|
||||
systemd:
|
||||
name: "{{ instance_name }}.timer"
|
||||
enabled: yes
|
@ -1,64 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# imports
|
||||
|
||||
from pathlib import Path
|
||||
from hashlib import sha256
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import requests
|
||||
|
||||
# config
|
||||
|
||||
MONITOR_DESC = """{{ description }}"""
|
||||
MONITOR_COMMAND = """{{ command_str }}"""
|
||||
USE_SHELL = {{ use_shell | ternary('True', 'False') }}
|
||||
|
||||
DATA_PATH = Path("""{{ data_path }}""")
|
||||
|
||||
TG_ENDPOINT = "https://api.telegram.org"
|
||||
TG_KEY = """{{ bot_key }}"""
|
||||
TG_RECIPIENT = """{{ recipient_id }}"""
|
||||
|
||||
# helpers
|
||||
|
||||
def print_error(*args, **kwargs):
|
||||
print(*args, file=sys.stderr, **kwargs)
|
||||
|
||||
def tg_msg(msg: str) -> None:
|
||||
print(f"Sending message using telegram:\n{msg}")
|
||||
ret = requests.post(f"{TG_ENDPOINT}/bot{TG_KEY}/sendMessage", data={
|
||||
"chat_id": TG_RECIPIENT,
|
||||
"disable_web_page_preview": 1,
|
||||
"parse_mode": "Markdown",
|
||||
"text": msg,
|
||||
})
|
||||
if 400 <= ret.status_code:
|
||||
raise Exception(f"Sending telegram message failed: {ret.status_code} {ret.text}")
|
||||
|
||||
def run_cmd(cmd: list, **kwargs) -> str:
|
||||
return subprocess.run(cmd, capture_output=True, check=True, text=True, **kwargs).stdout
|
||||
|
||||
def hash_data(data: str) -> bool:
|
||||
return sha256(data.encode("utf-8")).hexdigest()
|
||||
|
||||
def check_cmd(cmd_str: str, use_shell: bool, data_file: Path) -> str:
|
||||
cmd = shlex.split(cmd_str) if not use_shell else cmd_str
|
||||
old_hash = data_file.read_text() if data_file.exists() else None
|
||||
new_data = run_cmd(cmd, shell=use_shell)
|
||||
new_hash = hash_data(new_data)
|
||||
if old_hash == new_hash:
|
||||
return None
|
||||
data_file.write_text(new_hash)
|
||||
return new_data
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
data = check_cmd(MONITOR_COMMAND, USE_SHELL, DATA_PATH)
|
||||
if data:
|
||||
tg_msg(f"{MONITOR_DESC} changed to:\n```\n{data}\n```")
|
||||
except Exception as e:
|
||||
tg_msg(f"Got exception while running command of {MONITOR_DESC}: {str(e)}")
|
||||
raise e
|
@ -1,23 +0,0 @@
|
||||
[Unit]
|
||||
Description={{ service_description }}
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart={{ script_path | quote }}
|
||||
User={{ system_user }}
|
||||
Group={{ system_user }}
|
||||
|
||||
UMask=007
|
||||
PrivateTmp=yes
|
||||
PrivateDevices=yes
|
||||
ProtectHome=yes
|
||||
ReadOnlyPaths=/
|
||||
ReadWritePaths=-{{ data_path }}
|
||||
|
||||
ProtectKernelModules=true
|
||||
ProtectKernelTunables=true
|
||||
ProtectControlGroups=true
|
||||
RestrictRealtime=true
|
||||
RestrictNamespaces=true
|
||||
|
||||
ProtectSystem=full
|
@ -1,8 +0,0 @@
|
||||
[Unit]
|
||||
Description=Timer of {{ service_description }}
|
||||
|
||||
[Timer]
|
||||
OnCalendar={{ calendar_spec }}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,4 +1,4 @@
|
||||
---
|
||||
|
||||
# domain: old.example.com
|
||||
# dest: new.example.com
|
||||
domain: example.com
|
||||
dest: example.com
|
||||
|
@ -0,0 +1,12 @@
|
||||
---
|
||||
|
||||
- name: Enable forwarding {{ domain }} to {{ dest }}
|
||||
template:
|
||||
src: forward.conf
|
||||
dest: "{{ nginx_sites_directory }}/{{ domain }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "u=rw,g=r,o=r"
|
||||
notify: reload nginx
|
||||
tags:
|
||||
- certificate
|
@ -0,0 +1,14 @@
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
server_name {{ effective_domain }};
|
||||
|
||||
ssl on;
|
||||
ssl_certificate {{ acme_fullchain_location }};
|
||||
ssl_certificate_key {{ acme_key_location }};
|
||||
|
||||
include {{ nginx_snippets_directory }}/https;
|
||||
include {{ nginx_snippets_directory }}/global;
|
||||
|
||||
return 301 https://{{ dest }}$request_uri;
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
---
|
||||
|
||||
instance_name: "drone-runner" # must be unique if multiple runners deployed to machine
|
||||
docker_image: "drone/drone-runner-docker:1"
|
||||
|
||||
# drone_server_host: ci.example.com
|
||||
drone_rpc_secret: "{{ lookup('file', 'credentials/' + drone_server_host + '/rpc_secret') }}" # sync with server/drone.io/server, because must be known to all runners
|
||||
drone_runner_capacity: 4
|
||||
drone_runner_name: "{{ inventory_hostname }}"
|
@ -1,6 +0,0 @@
|
||||
---
|
||||
|
||||
allow_duplicates: yes
|
||||
|
||||
dependencies:
|
||||
- role: docker/application
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Start drone runner using docker-compose
|
||||
docker_compose:
|
||||
state: present
|
||||
project_name: "{{ instance_name }}"
|
||||
definition:
|
||||
version: '2'
|
||||
services:
|
||||
drone-runner:
|
||||
image: "{{ docker_image }}"
|
||||
restart: always
|
||||
environment:
|
||||
DRONE_RPC_PROTO: https
|
||||
DRONE_RPC_HOST: "{{ drone_server_host }}"
|
||||
DRONE_RPC_SECRET: "{{ drone_rpc_secret }}"
|
||||
DRONE_RUNER_CAPACITY: "{{ drone_runner_capacity }}"
|
||||
DRONE_RUNNER_NAME: "{{ drone_runner_name }}"
|
||||
DOCKER_API_VERSION: "1.39"
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
|
||||
# domain: ci.example.com
|
||||
docker_image: "drone/drone:1"
|
||||
|
||||
# TODO Bind to socket path
|
||||
# bind_port
|
||||
#!socket_directory: "{{ user_directory }}/socket"
|
||||
#!socket_path: "{{ socket_directory }}/socket"
|
||||
|
||||
# gitea_server_url: https://git.example.com/gitea
|
||||
# gitea_client_id generated by gitea
|
||||
# gitea_client_secret generated by gitea
|
||||
|
||||
instance_directory: "{{ global_webservers_directory }}/{{ domain }}"
|
||||
data_directory: "{{ instance_directory }}/data"
|
||||
drone_data_directory: "{{ data_directory }}/drone_volume"
|
||||
|
||||
drone_admin_user: "{{ global_username }}"
|
||||
drone_rpc_secret: "{{ lookup('password', 'credentials/' + domain + '/rpc_secret chars=digits,ascii_letters length=80') }}" # sync with server/drone.io/runner, because must be known to all runners
|
||||
drone_database_secret: "{{ lookup('password', 'credentials/' + domain + '/database_secret length=32 chars=0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f') }}"
|
@ -1,15 +0,0 @@
|
||||
---
|
||||
|
||||
allow_duplicates: yes
|
||||
|
||||
dependencies:
|
||||
- role: docker/application
|
||||
- role: misc/backup_files
|
||||
# domain
|
||||
backup_directory: "{{ data_directory }}"
|
||||
- role: misc/hdd_dir
|
||||
# domain
|
||||
hdd_source_dir: "{{ data_directory }}"
|
||||
- role: nginx/proxy
|
||||
# domain
|
||||
backend_port: "{{ bind_port }}"
|
@ -1,47 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Create instance directory
|
||||
file:
|
||||
state: directory
|
||||
path: "{{ instance_directory }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: u=rwx,g=rx,o=
|
||||
|
||||
- name: Create general data directory
|
||||
file:
|
||||
state: directory
|
||||
path: "{{ data_directory }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: u=rwx,g=rx,o=
|
||||
|
||||
- name: Create data directory for drone volume
|
||||
file:
|
||||
state: directory
|
||||
path: "{{ drone_data_directory }}"
|
||||
# let docker/drone.io manage control permissions
|
||||
|
||||
- name: Start drone server using docker-compose
|
||||
docker_compose:
|
||||
state: present
|
||||
project_name: "{{ domain }}"
|
||||
definition:
|
||||
version: '2'
|
||||
services:
|
||||
drone-server:
|
||||
image: "{{ docker_image }}"
|
||||
restart: always
|
||||
environment:
|
||||
DRONE_DATABASE_SECRET: "{{ drone_database_secret }}"
|
||||
DRONE_GITEA_SERVER: "{{ gitea_server_url }}"
|
||||
DRONE_GITEA_CLIENT_ID: "{{ gitea_client_id }}"
|
||||
DRONE_GITEA_CLIENT_SECRET: "{{ gitea_client_secret }}"
|
||||
DRONE_RPC_SECRET: "{{ drone_rpc_secret }}"
|
||||
DRONE_SERVER_HOST: "{{ domain }}"
|
||||
DRONE_SERVER_PROTO: https
|
||||
DRONE_USER_CREATE: "username:{{ drone_admin_user }},admin:true"
|
||||
ports:
|
||||
- "127.0.0.1:{{ bind_port }}:80" # for nginx reverse proxy
|
||||
volumes:
|
||||
- "{{ data_directory }}:/data"
|
@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
readonly CHECKSUM_TYPE="sha256";
|
||||
readonly CHECKSUM_APP="${CHECKSUM_TYPE}sum";
|
||||
readonly GPG_FINGERPRINT="{{ nextcloud_gpg_fingerprint }}";
|
||||
readonly NEXTCLOUD_USER="{{ system_user }}";
|
||||
readonly NEXTCLOUD_DIR="{{ nextcloud_installation_directory }}";
|
||||
readonly NEXTCLOUD_GIT_REPO="{{ nextcloud_source_repo }}";
|
||||
|
||||
readonly GIT_REPO="https://github.com/nextcloud/server";
|
||||
readonly VERSION_REGEX='v\d+(\.\d+)*';
|
||||
|
||||
set -e;
|
||||
|
||||
gpg --quiet --keyserver eu.pool.sks-keyservers.net --recv "$GPG_FINGERPRINT";
|
||||
|
||||
function error() {
|
||||
echo "$@" >&2;
|
||||
}
|
||||
|
||||
function as() {
|
||||
sudo -u "$NEXTCLOUD_USER" "$@";
|
||||
}
|
||||
|
||||
cd "$NEXTCLOUD_DIR";
|
||||
|
||||
version="$(curl --silent "$GIT_REPO/releases.atom"
|
||||
| grep --only-matching --perl-regexp '(?<=\s)<link.*/>'
|
||||
| grep --only-matching --perl-regexp '(?<=href="'"$GIT_REPO"'/releases/tag/'"$VERSION_REGEX"'(?="/>)'
|
||||
| sort --reverse --numeric-sort
|
||||
| head --lines=1)";
|
||||
if g verify-tag --raw "$TAG" 2>&1 | grep --fixed-strings "[GNUPG:] VALIDSIG $GPG_FINGERPRINT " > /dev/null; then
|
||||
as composer update;
|
||||
else
|
||||
error "Invalid or missing signature for $TAG";
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
|
@ -1,33 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import getpass
|
||||
import subprocess
|
||||
import json
|
||||
import sys
|
||||
|
||||
NC_USER = """{{ system_user }}"""
|
||||
NC_INSTALL_DIR = """{{ nextcloud_installation_directory }}"""
|
||||
|
||||
if __name__ == "__main__":
|
||||
args_list = [
|
||||
"/usr/bin/env",
|
||||
"php",
|
||||
"occ",
|
||||
"app:list",
|
||||
"--output=json"
|
||||
]
|
||||
if getpass.getuser() != NC_USER:
|
||||
args_list = [
|
||||
"sudo",
|
||||
"-u", NC_USER,
|
||||
] + args_list
|
||||
try:
|
||||
proc = subprocess.run(args_list, capture_output=True, check=True, cwd=NC_INSTALL_DIR, text=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(e.stderr, file=sys.stderr)
|
||||
raise e
|
||||
apps_nc = json.loads(proc.stdout)
|
||||
for name, apps in reversed(sorted(apps_nc.items())):
|
||||
print(f"{name}_apps_list:")
|
||||
for app_name in apps:
|
||||
print(f" - {app_name}")
|
Loading…
Reference in New Issue