initial commit

This commit is contained in:
Paul Warren 2023-10-21 19:53:32 +11:00
commit 2b63ed37be
11 changed files with 356 additions and 0 deletions

68
README.md Normal file
View File

@ -0,0 +1,68 @@
# Tailscale
This Ansible role installs and configure the [Tailscale client](https://tailscale.com/download)
for Linux (Ubuntu) devices.
This role was written based on [artis3n/ansible-role-tailscale](https://github.com/artis3n/ansible-role-tailscale).
## Use Tailscale as exit node and DNS server for devices
For example, when abroad. The point then is to route *all traffic* via
our Tailscale exit node, *including* DNS queries.
Designate a Tailscale node as **exit node** via the web UI.
To route the traffic from your device to that exit node,
run`tailscale up --exit-node=<ip-exit-node>` (on Linux) or select the corresponding
menu option on Android.
When you use the exit node feature, DNS traffic is automatically forwarded
(so [no DNS leakage](https://github.com/tailscale/tailscale/issues/1713)).
Awesome!
Tailscale exit nodes can then be shared with other users in our Github org,
or with external users. Very cool!
Note that you need to add the Tailscale IP address of the exit node to
the **Nameservers** setting in the Tailscale web UI. Also, it might be a good
idea to set `override local DNS`.
Finally, internet connectivity from your Tailscale nodes will not work at all
unless you set Pi-Hole's listening behaviour to **Listen on all interfaces, permit all origins**
(default was **Listen only on eth0**).
## Use Tailscale as DNS server for Android devices?
The idea is to *not* route all traffic via the exit node, only the DNS traffic.
This might be useful in certain situations (where you don't mind the ISP seeing
your traffic, but you still want to benefit from our ad/tracker blocking).
I have not tested this properly yet.
+ https://shotor.com/blog/run-your-own-mesh-vpn-and-dns-with-tailscale-and-pihole/
+ https://forum.tailscale.com/t/need-some-help-with-default-dns-when-using-tailscale/341
+ https://github.com/tailscale/tailscale/issues/915
+ https://github.com/tailscale/tailscale/issues/74
## Notes on running Tailscale client inside LXC container
My DNS server (PiHole + unbound) runs as an LXC container.
In the same container we also run Tailscale.
This works fine. For details on how the LXC profile was setup,
see the [lxd-server role](https://codeberg.org/ansible/lxd-server).
## Refs
+ https://github.com/artis3n/ansible-role-tailscale
+ https://github.com/dockpack/base_tailscale
+ https://tailscale.com/kb/1103/exit-nodes/
+ https://tailscale.com/kb/1114/pi-hole/
+ https://tailscale.com/kb/1130/lxc-unprivileged/
+ https://tailscale.com/kb/1112/userspace-networking/
+ https://tailscale.com/kb/1084/sharing/#sharing--exit-nodes

25
defaults/main.yml Normal file
View File

@ -0,0 +1,25 @@
---
tailscale_package: tailscale
tailscale_service: tailscaled
release_stability: stable
apt_dependencies:
- gnupg2
- gnupg-agent
- apt-transport-https
- python3-apt
legacy_apt_dependencies:
# Only install on legacy Debian systems
- python-apt
distro:
ubuntu: ubuntu
debian: debian
apt_deb: deb https://pkgs.tailscale.com/{{ release_stability | lower }}/{{ distro[ansible_distribution | lower] }} {{ ansible_distribution_release | lower }} main
apt_signkey: https://pkgs.tailscale.com/{{ release_stability | lower }}/{{ distro[ansible_distribution | lower] }}/{{ ansible_distribution_release | lower }}.gpg
original_distribution_major_version: '{{ ansible_distribution_major_version }}'

19
handlers/main.yml Normal file
View File

@ -0,0 +1,19 @@
---
- name: Tailscale status
listen: Confirm Tailscale is connected
ansible.builtin.command: tailscale status
register: handlers_tailscale_status
- name: Debug Tailscale status
listen: Confirm Tailscale is connected
debug:
var: handlers_tailscale_status
when: verbose | bool
- name: Assert Tailscale is connected
listen: Confirm Tailscale is connected
assert:
that:
- handlers_tailscale_status.stdout | length != 0
- handlers_tailscale_status.stdout is not match('\[L\+V9o\]')

33
tasks/debian.yml Normal file
View File

@ -0,0 +1,33 @@
---
- name: Install Tailscale apt dependencies
ansible.builtin.apt:
name: "{{ apt_dependencies }}"
state: present
update_cache: yes
# this should be a better way to select only old Debian systems
- name: Install legacy apt dependencies (only for old Debian systems)
ansible.builtin.apt:
name: "{{ legacy_apt_dependencies }}"
state: present
update_cache: yes
when:
- ansible_distribution == "Debian"
- ansible_distribution_major_version | int < 10
- name: Add Tailscale signing key
ansible.builtin.apt_key:
url: "{{ apt_signkey }}"
state: present
- name: Add Tailscale repo to apt sources
ansible.builtin.apt_repository:
repo: "{{ apt_deb }}"
state: present
filename: "tailscale-{{ ansible_lsb.id | lower }}"
- name: Install Tailscale
ansible.builtin.apt:
name: "{{ tailscale_package }}"
state: present

43
tasks/ip-forwarding.yml Normal file
View File

@ -0,0 +1,43 @@
---
# We must enable IP forwarding for hosts that advertise routes or are exit nodes
# https://tailscale.com/kb/1104/enable-ip-forwarding
# https://docs.ansible.com/ansible/latest/collections/ansible/posix/sysctl_module.html
# https://tailscale.com/kb/1019/subnets
# rename the ugly "item.value" of the outer loop to something more workable
- name: Rename value var from outer loop
ansible.builtin.set_fact:
tsnet: "{{ item.value }}"
- name: Enable IPv4 forwarding for hosts that advertise routes or exit nodes
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: '1'
state: present
reload: yes
sysctl_set: yes
loop: "{{ tsnet }}"
loop_control:
loop_var: tshost
when:
- inventory_hostname == tshost.host
- >
tshost.args.find("advertise-routes") != -1 or
tshost.args.find("advertise-exit-node") != -1
- name: Enable IPv6 forwarding for hosts that advertise routes or exit nodes
ansible.posix.sysctl:
name: net.ipv6.conf.all.forwarding
value: '1'
state: present
reload: yes
sysctl_set: yes
loop: "{{ tsnet }}"
loop_control:
loop_var: tshost
when:
- inventory_hostname == tshost.host
- >
tshost.args.find("advertise-routes") != -1 or
tshost.args.find("advertise-exit-node") != -1

37
tasks/main.yml Normal file
View File

@ -0,0 +1,37 @@
---
- name: Tailscale pre-auth key required (skipped if auth key exists)
fail:
msg: >
You must include a Node Pre-Authorization key.
Set a `tailscale_auth_key` ansible-vault encrypted variable.
You can create this key from: https://login.tailscale.com/admin/settings/authkeys
when: tailscale_auth_key is not defined
# ansible_distribution == Debian matches Raspbian too
- name: PreReqs
when: ansible_distribution == 'Ubuntu' or ansible_distribution == 'Debian'
ansible.builtin.include_tasks: debian.yml
- name: Enable Tailscale service
ansible.builtin.systemd:
name: "{{ tailscale_service }}"
state: started
enabled: yes
# on a fresh install, this task returns non-zero return code with
# "stdout": "Logged out." (task fails, playbook stops here)
# why am I bothering with these checks, if all it does is causing the playbook to fail?
- name: Check if Tailscale is connected
ansible.builtin.command: tailscale status
changed_when: false
register: tailscale_status
failed_when: tailscale_status.rc != 0
# - name: Print Tailscale status
# debug:
# var: tailscale_status
# when: verbose | bool
- name: Bring up Tailscale.com clients
ansible.builtin.include_tasks: tailscale.com.yml

78
tasks/main.yml~ Normal file
View File

@ -0,0 +1,78 @@
---
- name: Tailscale pre-auth key required (skipped if auth key exists)
fail:
msg: >
You must include a Node Pre-Authorization key.
Set a `tailscale_auth_key` ansible-vault encrypted variable.
You can create this key from: https://login.tailscale.com/admin/settings/authkeys
when: tailscale_auth_key is not defined
# Print an error message to the console but proceed anyway
- name: Unstable warning
fail:
msg: Installing Tailscale from the unstable branch. This is bleeding edge and may have issues. Be warned.
when: release_stability | lower == 'unstable'
ignore_errors: yes
# this is for debugging purposes
# - name: Detecting operating system
# debug:
# msg: "{{ ansible_distribution }} {{ ansible_distribution_major_version }} ({{ ansible_distribution_release }})"
# ansible_distribution == Debian matches Raspbian too
- name: Debian and Ubuntu
when: ansible_distribution == 'Ubuntu' or ansible_distribution == 'Debian'
ansible.builtin.include_tasks: debian.yml
- name: Zorin OS
when: ansible_distribution == 'Zorin OS'
ansible.builtin.include_tasks: zorinos.yml
- name: Enable Tailscale service
ansible.builtin.systemd:
name: "{{ tailscale_service }}"
state: started
enabled: yes
# on a fresh install, this task returns non-zero return code with
# "stdout": "Logged out." (task fails, playbook stops here)
# why am I bothering with these checks, if all it does is causing the playbook to fail?
# - name: Check if Tailscale is connected
# ansible.builtin.command: tailscale status
# changed_when: false
# register: tailscale_status
# failed_when: tailscale_status.rc != 0
# - name: Print Tailscale status
# debug:
# var: tailscale_status
# when: verbose | bool
# Note the use of include_tasks to nest two loops inside each other
# https://docs.ansible.com/ansible/latest/user_guide/playbooks_loops.html#defining-inner-and-outer-variable-names-with-loop-var
#- name: Set IPv4/6 forwarding for clients that advertise routes or exit nodes
# block:
# Note about the quirky behaviour of lookup() in this context:
# Without wantlist=true, this code fails if the tailnets variable contains
# just a single tailnet (but not if it contains more than one).
# With wantlist=true, this code seems to work for both n==1 and n>1. Weird.
# - name: Enable IP forwarding for Tailscale clients
# ansible.builtin.include_tasks: ip-forwarding.yml
# NOTE, the lookup() returns key/value subkeys (what we want is in "value")
# ALSO, watch out: since the lookup() returns our list inside a subkey,
# using loop_control.loop_var: only leads to weird errors. We handle this
# by assignment inside the ip-forwarding.yml file instead.
# https://docs.ansible.com/ansible/latest/collections/ansible/builtin/dict_lookup.html
# loop: "{{ lookup('ansible.builtin.dict', tailnets, wantlist=true) }}"
# END OF BLOCK
# Bring up Tailscale service on clients connected to tailscale.com
# I don't think this is not useful for Headscale due to the way keys are set up
# initially, at least not until I understand it better
- name: Bring up Tailscale.com clients
ansible.builtin.include_tasks: tailscale.com.yml
# loop: "{{ lookup('ansible.builtin.dict', tailnets, wantlist=true) }}"

5
tasks/tailscale.com.yml Normal file
View File

@ -0,0 +1,5 @@
---
- name: tailscale up
ansible.builtin.command: >
tailscale up --authkey={{ tailscale_auth_key }} {{ tshost.args | default() }} --login-server=https://ts.longley.ws/

24
tasks/zorinos.yml Normal file
View File

@ -0,0 +1,24 @@
---
- name: Install Tailscale apt dependencies
ansible.builtin.apt:
name: "{{ apt_dependencies }}"
state: present
update_cache: yes
# if Zorin, then replace ansible_lsb.id with "ubuntu" (instead of zorin)
- name: Add Tailscale signing key
ansible.builtin.apt_key:
url: "https://pkgs.tailscale.com/{{ release_stability | lower }}/ubuntu/{{ ansible_distribution_release | lower }}.gpg"
state: present
- name: Add Tailscale deb to apt sources
ansible.builtin.apt_repository:
repo: "deb https://pkgs.tailscale.com/{{ release_stability | lower }}/ubuntu {{ ansible_distribution_release | lower }} main"
state: present
filename: "tailscale-ubuntu"
- name: Install Tailscale
ansible.builtin.apt:
name: "{{ tailscale_package }}"
state: present

10
ts.yaml~ Normal file
View File

@ -0,0 +1,10 @@
---
- hosts: cluster
become: yes
roles:
- role: tailscale
vars:
- tailscale_auth_key: 'd4d8be0f07b30d4bc7ee43b99431035dc089e79a0c42dc30'

14
vars/main.yml Normal file
View File

@ -0,0 +1,14 @@
---
# whether to output debug information during role execution
verbose: false
# new auth keys no longer have no expiration dates, they are only valid for 90 days
# https://github.com/tailscale/tailscale/issues/1151
# NOTE: nodes stay authorised even after the key expires, so expired keys only affect restarted clients
tailscale_auth_key: '4bca68571ced2873b48de87a8a0c95497cc6b1599705108e'
# Tailscale tailnets
# tailnets contains one or multiple dicts (each dict represents a tailnet, i.e., an account)
# and each dict contains a list of hosts (hostname and optional arguments for `tailscale up ...`)
#tailnets: "{{ lookup('community.general.passwordstore', 'ansible/roles/tailscale/hosts subkey=tailnets') }}"