repositorio inicial de proxy reverso, basado en Latina.Red

es una version jibarizada, que solo incluye los roles principales:
- althost que orquesta todo
- proxy reverso Nginx dockerizado
- certbot maneja SSL de este
- dns es un servidor DNS bind9
- rap es la VPN
- users gestiona usuarixs linux
This commit is contained in:
Bet@ 2024-08-22 14:15:39 -04:00
commit 8c1a6e13e4
48 changed files with 986 additions and 0 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
*.retry
*keys.yml

10
abyayala.yml Normal file
View File

@ -0,0 +1,10 @@
althost: abyayala
matrix:
- service_name: chem
roles:
- web
domains:
- marmite.abyaya.la
- wordpress.marmite.abyaya.la
nodo: marmite.comun.abyaya.la
ssl: yes

2
ansible.cfg Normal file
View File

@ -0,0 +1,2 @@
[defaults]
inventory = ./hosts.production

5
group_vars/abyayala/vars Normal file
View File

@ -0,0 +1,5 @@
primary_dns_server: abyaya.la
host_ip: 5.161.236.18
compose_path: /opt/{{ althost }}
oculta: "~/.oculta"
proxy_scale: 2

7
group_vars/all/main.yml Normal file
View File

@ -0,0 +1,7 @@
compose_path: /opt/{{ althost }}
local_compose_path: /tmp/{{ althost }}
admin_email: beta@numerica.cl
keys_file: ~/keys.yml
# docker swarm
manager_node_ip: 5.161.236.18
# worker_token:

View File

@ -0,0 +1,5 @@
primary_dns_server: abyaya.la
host_ip: 127.0.0.1
compose_path: /tmp/{{ althost }}
oculta: "~/.oculta"
proxy_scale: 1

8
hosts.production Normal file
View File

@ -0,0 +1,8 @@
[localhost]
127.0.0.1
[abyayala]
5.161.236.18
[abyayala:vars]
ansible_ssh_user=root

View File

@ -0,0 +1,7 @@
---
- name: deploy docker
command: "docker stack deploy -c {{ compose_path }}/docker-compose.yml {{ althost }}"
- name: deploy service update
command: docker stack deploy -c {{ compose_path }}/docker-compose-mini.yml {{ althost }}

View File

@ -0,0 +1,35 @@
- set_fact:
services_content: "{{ lookup('template', 'templates/services.yml') }}"
volumes_content: "{{ lookup('template', 'templates/volumes.yml') }}"
networks_content: "{{ lookup('template', 'templates/networks.yml') }}"
- name: define services in local composition
local_action:
module: blockinfile
path: "{{ local_compose_path }}/docker-compose.yml"
insertafter: "services:"
marker: "# {mark} {{ service_name|upper }}"
block: "{{ services_content }}"
changed_when: false
- name: define volumes in local composition
local_action:
module: lineinfile
path: "{{ local_compose_path }}/docker-compose.yml"
insertafter: "volumes: #"
line: "{{ volumes_content }}"
state: present
regexp: "{{ volumes_content }}"
when: volumes_content is defined
changed_when: false
- name: define networks in local composition
local_action:
module: lineinfile
path: "{{ local_compose_path }}/docker-compose.yml"
insertafter: "networks: #"
line: "{{ networks_content }}"
regexp: "{{ networks_content }}"
state: present
when: networks_content is defined
changed_when: false

View File

@ -0,0 +1,113 @@
# DOCKER CE this is specific for Debian
# https://docs.docker.com/install/linux/docker-ce/debian/
- block:
- name: required packages
apt:
name: ['apt-transport-https', 'ca-certificates', 'curl', 'gnupg2', 'software-properties-common', 'python3-pip']
state: present
- name: docker signing key
apt_key:
url: https://download.docker.com/linux/debian/gpg
state: present
- name: docker apt repository
apt_repository:
repo: deb [arch=amd64] https://download.docker.com/linux/debian bullseye stable
- name: install docker community edition
apt:
name: docker-ce
update_cache: yes
- name: is node already in swarm mode
shell: docker info
register: swarm_status
changed_when: False
- name: declare this node as swarm manager
shell: docker swarm init --advertise-addr {{ ansible_default_ipv4.address }}
when: "worker is undefined and swarm_status.stdout.find('Swarm: active') == -1"
- name: join this node to the swarm manager
shell: docker swarm join --token {{ worker_token }} {{ manager_node_ip }}:2377
when: worker is defined
# TODO APPARMOR ?
# ANSIBLE requirements
- name: ensure python is present
apt: name=python3 state=present
- name: ensure pip is present
apt:
name: python3-pip
state: present
# ansible-docker requirements
- name: python package docker-py is deprecated
pip:
name: docker-py
state: absent
break_system_packages: true
- name: ensure python package docker is present
pip:
name: docker
state: present
break_system_packages: true
# https://stackoverflow.com/questions/77490435/attributeerror-cython-sources
- name: fix python package Cython version
pip:
name: Cython
state: present
version: <3.0.0
break_system_packages: true
- name: fix python package PyYAML version
shell: pip install "pyyaml==5.4.1" --no-build-isolation --break-system-packages
- name: ensure python package docker-compose is present
pip:
name: docker-compose
state: present
break_system_packages: true
tags: installation
# DOCKER COMPOSITION IN MASTER
- block:
- name: make sure compose path exists
file: path={{ compose_path }} state=directory
- name: make sure local compose path exists
local_action:
module: file
path: "{{ local_compose_path }}"
state: directory
- name: clean docker-compose.yml
local_action:
module: template
dest: "{{ local_compose_path }}/docker-compose.yml"
src: roles/althost/templates/docker-compose.yml
changed_when: false
- name: make base keys dictionary
local_action:
module: lineinfile
path: "{{ keys_file }}"
line: "KEYS:"
regexp: "KEYS:"
changed_when: false
- name: execute roles per domain mapping
include_tasks: roles.yml
with_items: "{{ matrix }}"
# TODO
# service undefined: ejecuta todos los roles
# service defined: si es master, ejecuta su rol | si es worker, tambien pero en el otro host

View File

@ -0,0 +1,11 @@
- set_fact:
current_service: "{{ item }}"
service_name: "{{ item.service_name }}"
service_roles: "{{ item.roles }}"
- include_role:
name: "{{ current_role_name }}"
with_items: "{{ service_roles }}"
loop_control:
loop_var: current_role_name
when: (service is undefined) or (service is defined and service_name == service)

View File

@ -0,0 +1,11 @@
version: '3.3'
# servicios docker
services:
# volumenes compartidos
volumes: #
# redes compartidas
networks: #

View File

@ -0,0 +1,8 @@
deploy:
placement:
constraints:
{% if item.worker is defined %}
- node.role == worker
{% else %}
- node.role == manager
{% endif %}

View File

View File

@ -0,0 +1,48 @@
- name: check if the certificate exists
stat:
path: "{{ certs_mountpoint }}/live/{{ loop.domains[0] }}"
register: ssl_cert
become: yes
- name: check if vhost already exists
stat:
path: "{{ vhosts_path }}/{{ loop.domains[0] }}.conf"
register: vhost_stat
- set_fact:
needs_cert: (loop.ssl | default(domains_default_ssl) ) or (loop.force_https | default(domains_default_force_https))
needs_vhost: needs_cert and not vhost_stat.stat.exists
obtain_cert: needs_cert and not ssl_cert.stat.exists
- name: certificate obtention
block:
- set_fact:
vhost: "{{ loop }}"
- name: add plain vhost to proxy for certbot test
template:
src: vhost.conf
dest: "{{ vhosts_path }}/{{ loop.domains[0] }}.conf"
when: needs_vhost
- name: fetch certificate with certbot container
docker_container:
name: chencriptemos
image: certbot/certbot
state: started
volumes:
- "{{ althost }}_certs_data:/etc/letsencrypt"
- "{{ althost }}_certs_www:{{ certs_www_path }}"
command: "certonly --webroot --agree-tos --expand --email {{ webmaster_email }} -w {{ certs_www_path }} -d {{ loop.domains | join(' -d ') }}"
detach: yes
cleanup: yes
notify:
- reload proxy
register: cert_result
when: obtain_cert
# RESET
- set_fact:
needs_vhost: no

View File

@ -0,0 +1,56 @@
- name: ensure certs volume exists
docker_volume:
name: "{{ althost }}_certs_data"
state: present
register: vol_certs
# TODO dict vol_certs no tiene ansible_facts : The error was: 'dict object' has no attribute 'docker_volume'
- name: get certificates mountpoint
set_fact:
# certs_mountpoint: "{{ vol_certs.docker_volume.Mountpoint }}"
certs_mountpoint: "/var/lib/docker/volumes/{{ alt }}_certs_data/_data"
# docker_compose could use proxy.state.running if we used docker compose instead of swarm
- name: check if proxy is running
shell: netstat -tunl | grep ":80 " | cat
register: http_port
changed_when: false
- set_fact:
proxy_running: "{{ http_port.stdout != \"\" }}"
- name: ensure docker is in crontab's PATH
cron:
name: PATH
env: yes
value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
- name: ensure cron uses bash
cron:
name: SHELL
env: yes
value: /bin/bash
- name: automatic letsencrypt certs renewal
cron:
name: certificate renewal
day: 4,18
hour: 0
minute: 0
job: "docker run --rm -v {{ althost }}_certs_data:/etc/letsencrypt -v {{ althost }}_certs_www:/var/www/letsencrypt certbot/certbot renew >> /var/log/renewal.log 2>&1"
- name: proxy update, after certs renewal
cron:
name: proxy update
day: 4,18
hour: 6
minute: 10
job: "docker service update --force {{ althost }}_proxy"
- name: mail proxy update, after certs renewal
cron:
name: mail proxy update
day: 4,18
hour: 6
minute: 20
job: "docker service update {{ althost }}_correspondencia_front"

View File

@ -0,0 +1,8 @@
---
- include_tasks: ../roles/backup/tasks/back/common.yml
- set_fact:
db_vol: "{{ alt }}_dns_server_bind"
- name: backup binary files
include_tasks: ../roles/backup/tasks/back/dbvol.yml

View File

@ -0,0 +1,4 @@
---
- name: reload dns
command: "docker service update --force {{ althost }}_nombres"

View File

@ -0,0 +1,39 @@
- set_fact:
main_domain: "{{ zone.domains[0] }}"
- name: generate authoritative zones
local_action:
module: template
src: "soa.hosts"
dest: "{{ local_zone_path }}/{{ main_domain }}.hosts"
changed_when: false
- name: add other zone's domains as CNAME
local_action:
module: blockinfile
path: "{{ local_zone_path }}/{{ main_domain }}.hosts"
insertafter: ";{{ main_domain }}. IN HINFO comment"
marker: "; {mark} {{ subdomain | upper }}"
block: "{{ subdomain }}. IN CNAME {{ main_domain }}."
when: subdomain != main_domain
with_items: "{{ zone.domains }}"
loop_control:
loop_var: subdomain
changed_when: false
- name: include authoritative zones in config file
local_action:
module: blockinfile
path: "{{ local_conf_file }}"
insertafter: "// {{ althost }}"
marker: "// {mark} {{ main_domain | upper }}"
block: "{{ lookup('template', 'zone.conf') }}"
changed_when: false
# we publish here not to have to iterate authoritative domains again
- name: publish zone file
copy:
src: "{{ local_zone_path }}/{{ main_domain }}.hosts"
dest: "{{ zone_path }}/{{ main_domain }}.hosts"
notify:
- reload dns

95
roles/dns/tasks/main.yml Normal file
View File

@ -0,0 +1,95 @@
- include_tasks: ../../althost/tasks/compose.yml
- name: pull BIND dns server's image
docker_image:
name: "{{ DNS_image }}"
state: present
tags: installation
- set_fact:
local_zone_file: "{{ local_zone_path }}/{{ primary_dns_server }}.hosts"
local_conf_file: "{{ local_dns_path }}/named.conf.local"
- local_action:
module: file
dest: "{{ local_zone_path }}"
state: directory
- file: dest="{{ zone_path }}" state=directory
- name: generate main zone in local
local_action:
module: template
src: "althost.hosts"
dest: "{{ local_zone_file }}"
changed_when: false
- name: generate main configuration file in local
local_action:
module: template
src: named.conf.local
dest: "{{ local_conf_file }}"
changed_when: false
# a non-authoritative matrix is a libertarian one
- name: slice matrix with those having domains defined
set_fact:
matrix_domains: "{{ matrix_domains | default([]) | union(neo.domains) }}"
with_items: "{{ matrix }}"
when: (neo.domains is defined) and not (neo.authoritative is defined and neo.authoritative)
loop_control:
loop_var: neo
# DNS server pointed to althost with an A record
# TODO and dominio regexp.match prmary_dns_server
# TEST point domain A record and not declare authoritative, see if these CNAMES work
- name: for each domain add a CNAME to main zone
local_action:
module: blockinfile
path: "{{ local_zone_file }}"
insertafter: "{{ primary_dns_server }}. IN HINFO Libre Abierta"
marker: "; {mark} {{ dominio | upper }}"
block: "{{ dominio }}. IN CNAME {{ primary_dns_server }}."
with_items: "{{ matrix_domains }}"
when: dominio != primary_dns_server and dominio != "ns1.{{ primary_dns_server }}" and dominio != "{{ mail_server }}"
loop_control:
loop_var: dominio
changed_when: false
# DNS server pointed to althost with a NS record
- name: slice matrix with authoritative domains
set_fact:
authoritative_domains: "{{ authoritative_domains | default([]) | union([zona]) }}"
with_items: "{{ matrix }}"
when: (zona.domains is defined) and (zona.authoritative is defined and zona.authoritative)
loop_control:
loop_var: zona
# TODO primary_dns_server domain cannot be declared as authoritative, it would be a duplicate entry
- name: handle authoritative zones
include_tasks: cnames.yml
with_items: "{{ authoritative_domains }}"
when: authoritative_domains is defined
loop_control:
loop_var: zone
- name: publish DNS changes
block:
- name: create directories
file:
dest: "{{ zone_path }}"
state: directory
- name: publish main zone file
copy:
src: "{{ local_zone_file }}"
dest: "{{ zone_path }}/{{ primary_dns_server }}.hosts"
notify:
- reload dns
- name: publish config file
copy:
src: "{{ local_conf_file }}"
dest: "{{ dns_path }}/named.conf.local"
notify:
- reload dns

View File

@ -0,0 +1,13 @@
$ttl 38400
{{ primary_dns_server }}. IN SOA {{ primary_dns_server }}. {{ admin_email | regex_replace('@','.') }}. (
{{ ansible_date_time.epoch }} ; serial
43200 ; refresh
1800 ; retry
3600000 ; expire
86400 ) ; ttl
{{ primary_dns_server }}. IN NS {{ primary_dns_server }}.
{{ primary_dns_server }}. IN A {{ host_ip }}
ns1.{{ primary_dns_server }}. IN A {{ host_ip }}
{{ primary_dns_server }}. IN HINFO Libre Abierta
{% include "files/custom_dns_records/" ~ primary_dns_server ignore missing %}

View File

@ -0,0 +1,17 @@
//
// Do any local configuration here
//
// Consider adding the 1918 zones here, if they are not used in your
// organization
//include "/etc/bind/zones.rfc1918";
// master server zone
zone "{{ primary_dns_server }}" {
type master;
file "/var/lib/bind/{{ primary_dns_server }}.hosts";
};
// {{ althost }}

View File

@ -0,0 +1 @@
proxy:

View File

@ -0,0 +1,9 @@
# BEGIN PROXY
location / {
proxy_pass https://{{ vhost.service_name }}:10000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# END PROXY

View File

@ -0,0 +1,11 @@
{{ service_name }}:
image: "{{ DNS_image }}"
networks:
- proxy
ports:
- "53:53/udp"
- "53:53/tcp"
volumes:
- "{{ zone_path }}/:/var/lib/bind/"
- "{{ dns_path }}/named.conf.local:/etc/bind/named.conf.local"
{% include "service_placement.yml" %}

View File

@ -0,0 +1,15 @@
$ttl 38400
{{ zone.domains[0] }}. IN SOA {{ primary_dns_server }}. {{ admin_email | regex_replace('@','.') }}. (
{{ ansible_date_time.epoch }} ; serial
43200 ; refresh
1800 ; retry
3600000 ; expire
86400 ) ; ttl
{{ zone.domains[0] }}. IN NS {{ primary_dns_server }}.
{{ zone.domains[0] }}. IN A {{ host_ip }}
{% include "files/custom_dns_records/" ~ zone.domains[0] ignore missing %}
;{{ zone.domains[0] }}. IN HINFO comment

View File

View File

@ -0,0 +1,5 @@
zone "{{ main_domain }}" {
type master;
file "/var/lib/bind/{{ main_domain }}.hosts";
};

5
roles/dns/vars/main.yml Normal file
View File

@ -0,0 +1,5 @@
DNS_image: numericalatina/bind9
dns_path: "{{ compose_path }}/dns"
zone_path: "{{ dns_path }}/zones"
local_dns_path: "{{ local_compose_path }}/dns"
local_zone_path: "{{ local_dns_path }}/zones"

View File

@ -0,0 +1,8 @@
---
- include_tasks: ../roles/backup/tasks/back/common.yml
- set_fact:
db_vol: "{{ alt }}_certs_data"
- name: backup cert files
include_tasks: ../roles/backup/tasks/back/dbvol.yml

View File

@ -0,0 +1,5 @@
---
- name: reload proxy
command: docker service update --force {{ althost }}_proxy

View File

@ -0,0 +1,14 @@
---
# TODO no sirve para 4 replicas
#- name: get service container name
# command: "docker ps -f name={{ althost }}_proxy --quiet"
# register: service_container_ID_
# changed_when: false#
#
#- set_fact:
# service_container_ID: "{{ service_container_ID_.stdout }}"
# reload_command: "bash -l -c \"nginx -s reload\""
#- name: reload nginx conf
# shell: "docker exec {{ service_container_ID }} {{ reload_command }}"

View File

@ -0,0 +1,56 @@
- name: certbot role
include_role: name=certbot
tags: certbot
- include_tasks: ../../althost/tasks/compose.yml
vars: # forcing since this role is included statically
service_name: proxy
- name: configuration path
file: path={{ conf_path }} state=directory
# TODO leaving unused vhosts bugs proxy
- name: clean vhosts_path
file: path={{ vhosts_path }} state=absent
when: clean_vhosts is defined
- name: virtual hosts path
file: path={{ vhosts_path }} state=directory
- name: generate dhparams
command: openssl dhparam -outform pem -out {{ conf_path }}/dhparam2048.pem 2048
args:
creates: "{{ conf_path }}/dhparam2048.pem"
- name: copy nginx common files
template: dest={{ conf_path }}/{{ common }} src={{ common }} backup=yes
with_items:
- common.conf
- common_ssl.conf
loop_control:
loop_var: common
- name: domains' stuff
block:
- name: slice matrix with those having domains defined
set_fact:
matrix_loop: "{{ matrix_loop | default([]) | union([ domino ]) }}"
with_items: "{{ matrix }}"
when: "{{ domino.domains is defined and domino.roles[0]!='dns' }}"
loop_control:
loop_var: domino
- name: certificates loop
include_tasks: ../../certbot/tasks/certbot.yml
with_items: "{{ matrix_loop | default([]) }}"
tags: certbot
loop_control:
loop_var: loop
when: (service is undefined) or (service is defined and service == loop.service_name)
- name: vhosts loop
include_tasks: vhosts.yml
with_items: "{{ matrix_loop }}"
loop_control:
loop_var: vhost
when: (service is undefined) or (service is defined and service == vhost.service_name)

View File

@ -0,0 +1,25 @@
- set_fact:
custom_vhost: "roles/{{ vhost.roles[0] }}/templates/vhost.conf"
vhost_dest: "{{ vhosts_path }}/{{ vhost.domains[0] }}.conf"
proxy_conf: "roles/{{ vhost.roles[0] }}/templates/proxy.conf"
- set_fact:
proxy_conf_look: "{{ lookup('template', proxy_conf) }}"
when: proxy_conf is is_file
- name: generate nginx vhosts
template:
src: "{{ default_vhost }}"
dest: "{{ vhost_dest }}"
when: custom_vhost is not is_file
notify:
- reload proxy
- name: generate nginx custom vhosts
template:
src: "{{ custom_vhost }}"
dest: "{{ vhost_dest }}"
when: custom_vhost is is_file
notify:
- reload proxy

View File

@ -0,0 +1,3 @@
location /.well-known/acme-challenge {
root {{ certs_www_path }};
}

View File

@ -0,0 +1,11 @@
# Now let's really get fancy, and pre-generate a 2048 bit random parameter
# for DH elliptic curves. If not created and specified, default is only 1024 bits.
#
# Generated by OpenSSL with the following command:
# openssl dhparam -outform pem -out dhparam2048.pem 2048
ssl_dhparam /etc/nginx/conf/dhparam2048.pem;
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
ssl_prefer_server_ciphers on;

View File

@ -0,0 +1,16 @@
# BEGIN PROXY
location / {
proxy_ssl_verify off;
proxy_ssl_name $comun;
proxy_ssl_server_name on;
proxy_pass https://$comun;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
{% include "files/custom_proxy_includes/" ~ vhost.domains[0] ignore missing %}
}
# END PROXY

View File

@ -0,0 +1 @@
proxy:

View File

@ -0,0 +1,19 @@
proxy:
image: nginx
deploy:
replicas: {{ proxy_scale }}
placement:
constraints:
- node.role == manager
ports:
- "80:80"
- "443:443"
networks:
- proxy
volumes:
- "{{ vhosts_path }}:/etc/nginx/conf.d/"
- "{{ conf_path }}:/etc/nginx/conf/"
- "certs_www:{{ certs_www_path }}"
- "certs_data:{{ nginx_certs_path }}:ro"

View File

@ -0,0 +1,52 @@
map $http_host $comun {
hostnames;
.{{ vhost.domains[0] }} {{ vhost.nodo }};
}
server {
server_name {{ vhost.domains | join(' ') }};
listen 80;
resolver 8.8.8.8 8.8.4.4 valid=300s;
resolver_timeout 5s;
{% if not needs_vhost and ((vhost.ssl | default(domains_default_ssl) ) or (vhost.force_https | default(domains_default_force_https))) %}
listen 443 ssl;
# letsencrypt
ssl_certificate {{ nginx_certs_path }}/live/{{ vhost.domains[0] }}/fullchain.pem;
ssl_certificate_key {{ nginx_certs_path }}/live/{{ vhost.domains[0] }}/privkey.pem;
include conf/common_ssl.conf;
{% if vhost.force_https | default(domains_default_force_https) %}
if ($scheme != 'https') {
rewrite ^/(.*)$ https://$host/$1 redirect;
}
{% else %}
if ($scheme != 'https') {
rewrite ^/(.*)admin/(.*)$ https://$host/$1admin/$2 redirect;
rewrite ^/(.*)login/(.*)$ https://$host/$1login/$2 redirect;
}
{% endif %}
{% endif %}
include conf/common.conf;
{% if not needs_vhost %}
{% if proxy_conf is is_file %}
{{ proxy_conf_look }}
{% else %}
{% include "default_proxy.conf" %}
{% endif %}
{% endif %}
{% include "files/custom_server_includes/" ~ vhost.domains[0] ignore missing %}
}

View File

@ -0,0 +1,2 @@
certs_data:
certs_www:

15
roles/proxy/vars/main.yml Normal file
View File

@ -0,0 +1,15 @@
domains_default_ssl: no
domains_default_force_https: no
# nginx
vhosts_path: "{{ compose_path }}/proxy/vhosts"
conf_path: "{{ compose_path }}/proxy/conf"
certs_www_path: /var/www/letsencrypt
nginx_certs_path: /etc/nginx/certs
# defaults
needs_vhost: no
default_vhost: roles/proxy/templates/vhost.conf
# certbot
webmaster_email: webmaster@numerica.cl

16
roles/rap/hosts/latinared Normal file
View File

@ -0,0 +1,16 @@
Address = latina.red
Port = 65000
-----BEGIN RSA PUBLIC KEY-----
MIICCgKCAgEAv0oPDlf8mbr8fbiLLfUEucm5QxFLxarqhuph/ui3PmDXoiJGQK0d
zHl92dYDdgRMw+4b1ogThGUZ8ryAt3UtpD8B6oLCRmhGn2tjNIFQN3kUIJQ3aa+M
5nEBf6ys0IHEieUYVHXr5lxSc09Y/wsNbtATVZ25VT0ercR0c3gbpcypu8JpTdyn
iul43IXiO8zKjeQhn9VQAnwIMzAa+6Y5Aj4y2HtFsUqzjJYY3KZBRTTNR1277QJ8
NDMuQT0opE79E8ipOZaOdcFRZIUOG8cBhhavozSVCzjI8A1iA8QrJpEjo/zjV+D4
h5EQpztvvzYdlOsE/9Cclc+iAGbnxOUt6K2bopMu/TI+gNh7DoA2wgFZ7DlQhCuO
MiYV+9dzI4gKZvAO0IxiGnmq8s2UjWRnD9zVZztU5IOE1xBo7OrmcHnnOvsMMsmx
Bc8F8jR6u8XgfSODS22sNK62N9joC2E3Gg/ulW75IUqIlyyHumwjm0XMuIXI6qDZ
AUv0lOnYOWATveQlgCqLO9OyJmocbejz1ZtkcLgI/EDzwDYEpaKLf+39a7eLuNRa
sqf+BIoJkGxi6LVG8gW5YhOLaaPclmsOztiID7vPHs0jH4bY7N0JR2TI8aaIlY9O
P6ZJArG60IPtMUvurTrSmUGdHLoSg6+PSPZLfYMXoqKatrAhdEcc9ssCAwEAAQ==
-----END RSA PUBLIC KEY-----

16
roles/rap/hosts/tierra Normal file
View File

@ -0,0 +1,16 @@
Address = tierra
Port = 65000
-----BEGIN RSA PUBLIC KEY-----
MIICCgKCAgEAtr75G3mVKBr8k7Sz5LQLuOBMdN2cwIZVQt0BVPq/gWeUowDPM8he
QcjmSDZrwRE0NUQRiB5/Gicb9UXoQaEfq7xQfgcMl4Nt9MfHbhY6sORviAyHOsjM
xYw/vy5SfWq3O7E/sH01BEbUt5r/oVoG2dy2rDQ5xBW30zeMJisvTMH5kMwHAWIG
kdq0xI1WgFEa8eXo004Li8QD9k064wajN3mU0rUXEY0FCl9GbjP1S6a6ge3Sz+MP
Je5RZzbNxvtY/YVeQBzjwUDawsjTZ9Lx968U9RaO2Z7LBrltLSSVU4aw6tH78zN8
qVZ5PR0LmUQ38FWYNGcbUtfWRQACZOYkgBKbR0UOyLIbEODMUMmqZD7yOwAWf9BI
Tw8NemFmkCs+Jb2LrayIOYaqvV3iY01oL2NgPhjV01XLs51GnHmiWUTfsEvu1lmM
2uJoX75AzlL8We2RgWjknux3m4ce1sVTzMM36uatoz2lLDwBw9F6RKd2h6Gwb0kI
k5ltlnjEttyQg/X1NowQClOfaUza+75mGaj1owK1zoYWZEUk4iRwoCT0KM1jnSKz
z4w0uDz9YHhHDEG/evD6VLgPLcXdU2a+dKMU5GLGOcElKxi0p9bfzLJ9Efb2+PEY
IJYfFlMT3YEacWsxLFuxUGbTbVCNccCIwv6QVQ8gnio+rowr8q64wwkCAwEAAQ==
-----END RSA PUBLIC KEY-----

View File

@ -0,0 +1,60 @@
- block:
- name: ensure sudo is present
tags: apt
apt: name=sudo state=present
- name: Make sure we have a 'sudo' group
group: name=sudo state=present
- name: Allow 'sudo' group to have passwordless sudo
lineinfile:
dest: /etc/sudoers
state: present
regexp: '^%sudo'
line: '%sudo ALL=(ALL) NOPASSWD: ALL'
- name: Ensure user is present
user:
name: "{{ item.0.name }}"
comment: "{{ item.0.comment }}"
state: present
shell: "{{ item.0.shell | default('/bin/bash') }}"
with_subelements:
- "{{ ssh_users }}"
- servers_allow
- skip_missing: true
when: (item.1 == inventory_hostname or item.1 == "all") and (item.0.root is undefined)
- name: Ensure user is in sudo group
user:
name: "{{ item.0.name }}"
state: present
groups: sudo
append: yes
with_subelements:
- "{{ ssh_users }}"
- servers_allow
- skip_missing: true
when: ( item.1 == inventory_hostname or item.1 == "all" ) and (item.0.sudo is defined and item.0.sudo)
- name: Populate user authorized_keys
authorized_key: user="{{ item.0.name }}"
key="{{ lookup('file', 'ssh/'+item.0.name+'.pub') }}"
state=present
with_subelements:
- "{{ ssh_users }}"
- servers_allow
- skip_missing: true
when: (item.1 == inventory_hostname or item.1 == "all") and (item.0.root is undefined)
- name: Populate root's authorized_keys
authorized_key: user="root"
key="{{ lookup('file', 'ssh/'+item.0.name+'.pub') }}"
state=present
with_subelements:
- "{{ ssh_users }}"
- servers_allow
- skip_missing: true
when: (item.1 == inventory_hostname or item.1 == "all") and (item.0.root is defined and item.0.root)
tags: users

39
tasks/deploy.yml Normal file
View File

@ -0,0 +1,39 @@
# ansible-playbook --vault-id @prompt deploy.yml -e "host=digitalocean alt=numerica"
# opcional: service define despliegue de servicio especifico
---
- hosts: "{{ host }}"
vars_files:
- "{{ keys_file }}"
tasks:
- name: import matrix
local_action: "include_vars dir=./ files_matching={{ alt }}.yml"
- include_role: name=althost
# tags: installation
- include_role: name=users
tags: users, installation
- include_role: name=proxy
tags: proxy
- name: publish docker composition
copy:
src: "{{ local_compose_path }}/docker-compose.yml"
dest: "{{ compose_path }}/docker-compose.yml"
notify:
- deploy docker
when: service is undefined
- name: publish a mini composition for single services
copy:
src: "{{ local_compose_path }}/docker-compose.yml"
dest: "{{ compose_path }}/docker-compose-mini.yml"
notify:
- deploy service update
when: service is defined
- name: self-destruct from crontab
cron:
name: nightly deploy
state: absent

72
tasks/rap.yml Normal file
View File

@ -0,0 +1,72 @@
# ansible-playbook rap.yml -e "host=hetzner"
---
- hosts: "{{ host }}"
vars:
rap:
port: 65000
rap_dir: "{{ directory | default('/root') }}"
environment:
PATH: "/usr/sbin:{{ ansible_env.PATH }}"
tasks:
- name: install dependencies
package:
name: "{{ item }}"
state: present
loop:
- rsync
- git
- tinc
become: yes
- name: iptables
iptables:
action: append
chain: INPUT
table: filter
destination_port: "{{ rap.port }}"
protocol: "{{ item }}"
jump: ACCEPT
loop:
- tcp
- udp
become: yes
- name: install rap
git:
repo: "https://0xacab.org/pip/rap.git"
dest: "{{ rap_dir }}/rap"
- set_fact:
hostname: "{% if '{{ host }} != localhost' %} {{ ansible_hostname }} {% else %} {{ inventory_hostname }} {% endif %}"
# inventory_hostname=> localhost/IP | ansible_hostname=>infra
- name: node name
shell: 'echo {{ hostname }} | tr -cd "[:alnum:]" | tr "[:upper:]" "[:lower:]"'
register: node_name
changed_when: false
- stat:
path: "{{ rap_dir }}/rap/hosts/{{ node_name.stdout }}"
register: node_exists
- name: init node
when: "node_exists.stat.exists == False"
shell: "cd {{ rap_dir }}/rap && ./rap init -f -a {{ hostname }} -p {{ rap.port }}"
- name: fetch hosts files
fetch:
flat: true
src: "{{ rap_dir }}/rap/hosts/{{ node_name.stdout }}"
dest: "./rap/hosts/{{ node_name.stdout }}"
- name: copy hosts files
copy:
src: "./rap/hosts/"
dest: "{{ rap_dir }}/rap/hosts/"
- name: connectto
shell: 'cd {{ rap_dir }}/rap && for host in ./hosts/*; do test "./hosts/{{ node_name.stdout }}" = "${host}" && continue ; basename "${host}" | xargs -r ./rap connectto "{{ node_name.stdout }}"; done'
- name: install node
shell: "cd {{ rap_dir }}/rap && ./rap install {{ node_name.stdout }}"

6
tasks/users.yml Normal file
View File

@ -0,0 +1,6 @@
# ansible-playbook --vault-id @prompt users.yml -e "host=hetzner"
---
- hosts: "{{ host }}"
tasks:
- include_role:
name: users