Merge branch 'master' into patch-1

This commit is contained in:
Jack Ivanov 2019-05-17 15:21:28 +02:00 committed by GitHub
commit 9433b1881e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
95 changed files with 2560 additions and 1342 deletions

3
.ansible-lint Normal file
View file

@ -0,0 +1,3 @@
skip_list:
- '204'
verbosity: 1

View file

@ -1,26 +1,21 @@
--- ---
language: python language: python
python: "2.7" python: "2.7"
sudo: required dist: xenial
dist: trusty
services: services:
- docker - docker
matrix:
fast_finish: true
addons: addons:
apt: apt:
sources: sources: &default_sources
- sourceline: 'ppa:ubuntu-lxc/stable' - sourceline: 'ppa:ubuntu-lxc/stable'
- sourceline: 'ppa:wireguard/wireguard' - sourceline: 'ppa:wireguard/wireguard'
packages: packages: &default_packages
- python-pip - python-pip
- lxd - lxd
- expect-dev - expect-dev
- debootstrap - debootstrap
- shellcheck
- tree - tree
- bridge-utils - bridge-utils
- dnsutils - dnsutils
@ -29,7 +24,12 @@ addons:
- libffi-dev - libffi-dev
- python-dev - python-dev
- linux-headers-$(uname -r) - linux-headers-$(uname -r)
- wireguard-dkms - wireguard
- libxml2-utils
- crudini
- fping
- strongswan
- libstrongswan-standard-plugins
cache: cache:
directories: directories:
@ -41,35 +41,66 @@ before_cache:
- sudo tar cf $HOME/lxc/cache.tar /var/lib/lxd/images/ - sudo tar cf $HOME/lxc/cache.tar /var/lib/lxd/images/
- sudo chown $USER. $HOME/lxc/cache.tar - sudo chown $USER. $HOME/lxc/cache.tar
env: custom_scripts:
- LXC_NAME=docker LXC_DISTRO=ubuntu LXC_RELEASE=18.04 provisioning: &provisioning
before_install:
- test "${LXC_NAME}" != "docker" && sudo modprobe wireguard || docker build -t travis/algo .
install:
- sudo tar xf $HOME/lxc/cache.tar -C / || echo "Didn't extract cache."
- ssh-keygen -f ~/.ssh/id_rsa -t rsa -N '' - ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
- chmod 0644 ~/.ssh/config - sudo ./tests/pre-deploy.sh
- echo -e "#cloud-config\nssh_authorized_keys:\n - $(cat ~/.ssh/id_rsa.pub)" | sudo lxc profile set default user.user-data - - 'sed -i "s/^reduce_mtu:\s0$/reduce_mtu: 20/" config.cfg'
- sudo cp -f tests/lxd-bridge /etc/default/lxd-bridge tests: &tests
- sudo service lxd restart - sudo ./tests/wireguard-client.sh
- sudo lxc launch ${LXC_DISTRO}:${LXC_RELEASE} ${LXC_NAME} - sudo env "PATH=$PATH" ./tests/ipsec-client.sh
- until host ${LXC_NAME}.lxd 10.0.8.1 -t A; do sleep 3; done - sudo ./tests/ssh-tunnel.sh
- export LXC_IP="$(dig ${LXC_NAME}.lxd @10.0.8.1 +short)"
- pip install -r requirements.txt
- pip install ansible-lint
- gem install awesome_bot
- ansible-playbook --version
- tree . -L 2
matrix:
fast_finish: true
include:
- stage: Tests
name: code checks and linters
addons:
apt:
packages:
- shellcheck
script: script:
# - awesome_bot --allow-dupe --skip-save-results *.md docs/*.md --white-list paypal.com,do.co,microsoft.com,https://github.com/trailofbits/algo/archive/master.zip,https://github.com/trailofbits/algo/issues/new - pip install ansible-lint
# - shellcheck algo - shellcheck algo install.sh
# - ansible-lint main.yml users.yml deploy_client.yml
- ansible-playbook main.yml --syntax-check - ansible-playbook main.yml --syntax-check
- ansible-lint -v roles/*/*/*.yml playbooks/*.yml *.yml
- stage: Deploy
name: local deployment from docker
addons:
apt:
sources: *default_sources
packages: *default_packages
env: DEPLOY=docker
before_install: *provisioning
before_script:
- docker build -t travis/algo .
- ./tests/local-deploy.sh - ./tests/local-deploy.sh
- ./tests/update-users.sh - ./tests/update-users.sh
script: *tests
- stage: Deploy
name: cloud-init deployment
addons:
apt:
sources: *default_sources
packages: *default_packages
env: DEPLOY=cloud-init
before_install: *provisioning
before_script:
- until sudo lxc exec algo -- test -f /var/log/cloud-init-output.log; do echo 'Log file not found, Sleep for 3 seconds'; sleep 3; done
- ( sudo lxc exec algo -- tail -f /var/log/cloud-init-output.log & )
- |
until sudo lxc exec algo -- test -f /var/lib/cloud/data/result.json; do
echo 'Cloud init is not finished. Sleep for 30 seconds';
sleep 30;
done
- sudo lxc exec algo -- test -f /opt/algo/configs/localhost/.config.yml
- sudo lxc exec algo -- tar zcf /root/algo-configs.tar -C /opt/algo/configs/ .
- sudo lxc file pull algo/root/algo-configs.tar ./
- sudo tar -C ./configs -zxf algo-configs.tar
script: *tests
notifications: notifications:
email: false email: false

View file

@ -72,15 +72,15 @@ That's it! You will get the message below when the server deployment process com
You can now setup clients to connect it, e.g. your iPhone or laptop. Proceed to [Configure the VPN Clients](#configure-the-vpn-clients) below. You can now setup clients to connect it, e.g. your iPhone or laptop. Proceed to [Configure the VPN Clients](#configure-the-vpn-clients) below.
``` ```
"\"#----------------------------------------------------------------------#\"", "# Congratulations! #"
"\"# Congratulations! #\"", "# Your Algo server is running. #"
"\"# Your Algo server is running. #\"", "# Config files and certificates are in the ./configs/ directory. #"
"\"# Config files and certificates are in the ./configs/ directory. #\"", "# Go to https://whoer.net/ after connecting #"
"\"# Go to https://whoer.net/ after connecting #\"", "# and ensure that all your traffic passes through the VPN. #"
"\"# and ensure that all your traffic passes through the VPN. #\"", "# Local DNS resolver 172.16.0.1 #"
"\"# Local DNS resolver 172.16.0.1 #\"", "# The p12 and SSH keys password for new users is XXXXXXXX #"
"\"# The p12 and SSH keys password is XXXXXXXX #\"", "# The CA key password is XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #"
"\"#----------------------------------------------------------------------#\"", "# Shell access: ssh -i configs/algo.pem root@xxx.xxx.xx.xx #"
``` ```
## Configure the VPN Clients ## Configure the VPN Clients
@ -93,11 +93,13 @@ WireGuard is used to provide VPN services on Apple devices. Algo generates a Wir
On iOS, install the [WireGuard](https://itunes.apple.com/us/app/wireguard/id1441195209?mt=8) app from the iOS App Store. Then, use the WireGuard app to scan the QR code or AirDrop the configuration file to the device. On iOS, install the [WireGuard](https://itunes.apple.com/us/app/wireguard/id1441195209?mt=8) app from the iOS App Store. Then, use the WireGuard app to scan the QR code or AirDrop the configuration file to the device.
On macOS Mojave or later, install the [WireGuard](https://itunes.apple.com/us/app/wireguard/id1451685025?mt=12) app from the Mac App Store. WireGuard will appear in the menu bar once you run the app. Click on the WireGuard icon, choose **Import tunnel(s) from file...**, then select the appropriate WireGuard configuration file. Enable "Connect on Demand" by editing the tunnel configuration in the WireGuard app. On macOS Mojave or later, install the [WireGuard](https://itunes.apple.com/us/app/wireguard/id1451685025?mt=12) app from the Mac App Store. WireGuard will appear in the menu bar once you run the app. Click on the WireGuard icon, choose **Import tunnel(s) from file...**, then select the appropriate WireGuard configuration file.
On either iOS or macOS, you can enable "Connect on Demand" and/or exclude certain trusted Wi-Fi networks (such as your home or work) by editing the tunnel configuration in the WireGuard app. (Algo can't do this automatically for you.)
Installing WireGuard is a little more complicated on older version of macOS. See [Using macOS as a Client with WireGuard](docs/client-macos-wireguard.md). Installing WireGuard is a little more complicated on older version of macOS. See [Using macOS as a Client with WireGuard](docs/client-macos-wireguard.md).
If you prefer to use the built-in IPSEC VPN on Apple devices, then see [Using Apple Devices as a Client with IPSEC](docs/client-apple-ipsec.md). If you prefer to use the built-in IPSEC VPN on Apple devices, or need "Connect on Demand" or excluded Wi-Fi networks automatically configured, then see [Using Apple Devices as a Client with IPSEC](docs/client-apple-ipsec.md).
### Android Devices ### Android Devices
@ -164,16 +166,14 @@ Use the example command below to start an SSH tunnel by replacing `user` and `ip
## SSH into Algo Server ## SSH into Algo Server
To SSH into the Algo server for administrative purposes you can use the example command below by replacing `ip` with your own: Your Algo server is configured for key-only SSH access for administrative purposes. Open the Terminal app, `cd` into the `algo-master` directory where you originally downloaded Algo, and then use the command listed on the success message:
`ssh root@ip -i ~/.ssh/algo.pem` `ssh -i configs/algo.pem user@ip`
If you find yourself regularly logging into Algo then it will be useful to load your Algo ssh key automatically. Add the following snippet to the bottom of `~/.bash_profile` to add it to your shell environment permanently. where `user` is either `root` or `ubuntu` as listed on the success message, and `ip` is the IP address of your Algo server. If you find yourself regularly logging into the server then it will be useful to load your Algo ssh key automatically. Add the following snippet to the bottom of `~/.bash_profile` to add it to your shell environment permanently.
`ssh-add ~/.ssh/algo > /dev/null 2>&1` `ssh-add ~/.ssh/algo > /dev/null 2>&1`
Note the admin username is `ubuntu` instead of `root` on providers other than Digital Ocean.
## Adding or Removing Users ## Adding or Removing Users
If you chose to save the CA certificate during the deploy process, then Algo's own scripts can easily add and remove users from the VPN server. If you chose to save the CA certificate during the deploy process, then Algo's own scripts can easily add and remove users from the VPN server.
@ -185,27 +185,7 @@ If you chose to save the CA certificate during the deploy process, then Algo's o
After this process completes, the Algo VPN server will contain only the users listed in the `config.cfg` file. After this process completes, the Algo VPN server will contain only the users listed in the `config.cfg` file.
## Additional Documentation ## Additional Documentation
* [Deployment instructions, cloud provider setup instructions, and further client setup instructions available here.](docs/index.md)
* Setup instructions
- Documentation for available [Ansible roles](docs/setup-roles.md)
- Deploy from [Fedora Workstation (26)](docs/deploy-from-fedora-workstation.md)
- Deploy from [RedHat/CentOS 6.x](docs/deploy-from-redhat-centos6.md)
- Deploy from [Windows](docs/deploy-from-windows.md)
- Deploy from [Ansible](docs/deploy-from-ansible.md) directly
* Client setup
- Setup [Android](docs/client-android.md) clients
- Setup [Generic/Linux](docs/client-linux.md) clients with Ansible
- Setup Ubuntu clients to use [WireGuard](docs/client-linux-wireguard.md)
- Setup Apple devices to use [IPSEC](docs/client-apple-ipsec.md)
* Cloud setup
- Configure [Amazon EC2](docs/cloud-amazon-ec2.md)
- Configure [Azure](docs/cloud-azure.md)
- Configure [DigitalOcean](docs/cloud-do.md)
- Configure [Google Cloud Platform](docs/cloud-gce.md)
* Advanced Deployment
- Deploy to your own [FreeBSD](docs/deploy-to-freebsd.md) server
- Deploy to your own [Ubuntu 18.04](docs/deploy-to-ubuntu.md) server
- Deploy to an [unsupported cloud provider](docs/deploy-to-unsupported-cloud.md)
* [FAQ](docs/faq.md) * [FAQ](docs/faq.md)
* [Troubleshooting](docs/troubleshooting.md) * [Troubleshooting](docs/troubleshooting.md)

9
algo
View file

@ -7,7 +7,8 @@ then
ACTIVATE_SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/env/bin/activate" ACTIVATE_SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/env/bin/activate"
if [ -f "$ACTIVATE_SCRIPT" ] if [ -f "$ACTIVATE_SCRIPT" ]
then then
source $ACTIVATE_SCRIPT # shellcheck source=/dev/null
source "$ACTIVATE_SCRIPT"
else else
echo "$ACTIVATE_SCRIPT not found. Did you follow documentation to install dependencies?" echo "$ACTIVATE_SCRIPT not found. Did you follow documentation to install dependencies?"
exit 1 exit 1
@ -15,8 +16,8 @@ then
fi fi
case "$1" in case "$1" in
update-users) PLAYBOOK=users.yml; ARGS="${@:2} -t update-users";; update-users) PLAYBOOK=users.yml; ARGS=( "${@:2}" -t update-users ) ;;
*) PLAYBOOK=main.yml; ARGS=${@} ;; *) PLAYBOOK=main.yml; ARGS=( "${@}" ) ;;
esac esac
ansible-playbook ${PLAYBOOK} ${ARGS} ansible-playbook ${PLAYBOOK} "${ARGS[@]}"

View file

@ -4,7 +4,8 @@ pipelining = True
retry_files_enabled = False retry_files_enabled = False
host_key_checking = False host_key_checking = False
timeout = 60 timeout = 60
stdout_callback = full_skip stdout_callback = default
display_skipped_hosts = no
[paramiko_connection] [paramiko_connection]
record_host_keys = False record_host_keys = False

View file

@ -2,48 +2,20 @@
- name: Provision the server - name: Provision the server
hosts: localhost hosts: localhost
tags: always tags: always
become: false
vars_files: vars_files:
- config.cfg - config.cfg
pre_tasks: tasks:
- block: - block:
- name: Local pre-tasks - name: Local pre-tasks
import_tasks: playbooks/cloud-pre.yml import_tasks: playbooks/cloud-pre.yml
tags: always
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always
roles: - name: Include a provisioning role
- role: cloud-digitalocean include_role:
when: algo_provider == "digitalocean" name: "{{ 'local' if algo_provider == 'local' else 'cloud-' + algo_provider }}"
- role: cloud-ec2
when: algo_provider == "ec2"
- role: cloud-vultr
when: algo_provider == "vultr"
- role: cloud-gce
when: algo_provider == "gce"
- role: cloud-azure
when: algo_provider == "azure"
- role: cloud-lightsail
when: algo_provider == "lightsail"
- role: cloud-scaleway
when: algo_provider == "scaleway"
- role: cloud-openstack
when: algo_provider == "openstack"
- role: local
when: algo_provider == "local"
post_tasks:
- block:
- name: Local post-tasks - name: Local post-tasks
import_tasks: playbooks/cloud-post.yml import_tasks: playbooks/cloud-post.yml
become: false
tags: cloud
rescue: rescue:
- debug: var=fail_hint - include_tasks: playbooks/rescue.yml
tags: always
- fail:
tags: always

View file

@ -1,15 +1,14 @@
--- ---
# This is the list of user to generate. # This is the list of users to generate.
# Every device must have a unique username. # Every device must have a unique username.
# You can generate up to 250 users at one time. # You can generate up to 250 users at one time.
# Usernames with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123".
users: users:
- phone - phone
- laptop - laptop
- desktop - desktop
# NOTE: You must "escape" any usernames with leading 0's, like "000dan"
### Advanced users only below this line ### ### Advanced users only below this line ###
# If True re-init all existing certificates. Boolean # If True re-init all existing certificates. Boolean
@ -25,6 +24,12 @@ ipsec_enabled: true
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration # https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration
strongswan_log_level: 2 strongswan_log_level: 2
# rightsourceip for ipsec
# ipv4
strongswan_network: 10.19.48.0/24
# ipv6
strongswan_network_ipv6: 'fd9d:bc11:4020::/48'
# Deploy WireGuard # Deploy WireGuard
wireguard_enabled: true wireguard_enabled: true
wireguard_port: 51820 wireguard_port: 51820
@ -33,6 +38,10 @@ wireguard_port: 51820
# See: https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence # See: https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence
wireguard_PersistentKeepalive: 0 wireguard_PersistentKeepalive: 0
# WireGuard network configuration
wireguard_network_ipv4: 10.19.49.0/24
wireguard_network_ipv6: fd9d:bc11:4021::/48
# Reduce the MTU of the VPN tunnel # Reduce the MTU of the VPN tunnel
# Some cloud and internet providers use a smaller MTU (Maximum Transmission # Some cloud and internet providers use a smaller MTU (Maximum Transmission
# Unit) than the normal value of 1500 and if you don't reduce the MTU of your # Unit) than the normal value of 1500 and if you don't reduce the MTU of your
@ -48,9 +57,7 @@ reduce_mtu: 0
# If you load very large blocklists, you may also have to modify resource limits: # If you load very large blocklists, you may also have to modify resource limits:
# /etc/systemd/system/dnsmasq.service.d/100-CustomLimitations.conf # /etc/systemd/system/dnsmasq.service.d/100-CustomLimitations.conf
adblock_lists: adblock_lists:
- "http://winhelp2002.mvps.org/hosts.txt" - "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts"
- "https://adaway.org/hosts.txt"
- "https://www.malwaredomainlist.com/hostslist/hosts.txt"
- "https://hosts-file.net/ad_servers.txt" - "https://hosts-file.net/ad_servers.txt"
# Enable DNS encryption. # Enable DNS encryption.
@ -80,8 +87,9 @@ dns_servers:
- 2606:4700:4700::1111 - 2606:4700:4700::1111
- 2606:4700:4700::1001 - 2606:4700:4700::1001
# IP address for the local dns resolver # Randomly generated IP address for the local dns resolver
local_service_ip: 172.16.0.1 local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
# Your Algo server will automatically install security updates. Some updates # Your Algo server will automatically install security updates. Some updates
# require a reboot to take effect but your Algo server will not reboot itself # require a reboot to take effect but your Algo server will not reboot itself
@ -122,13 +130,9 @@ cloud_providers:
digitalocean: digitalocean:
size: s-1vcpu-1gb size: s-1vcpu-1gb
image: "ubuntu-18-04-x64" image: "ubuntu-18-04-x64"
ec2:
# Change the encrypted flag to "true" to enable AWS volume encryption, for encryption of data at rest. # Change the encrypted flag to "true" to enable AWS volume encryption, for encryption of data at rest.
# Warning: the Algo script will take approximately 6 minutes longer to complete. # Warning: the Algo script will take approximately 6 minutes longer to complete.
# Also note that the documented AWS minimum permissions aren't sufficient.
# You will have to edit the AWS user policy documented at
# https://github.com/trailofbits/algo/blob/master/docs/cloud-amazon-ec2.md to also allow "ec2:CopyImage".
# See https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-edit.html
ec2:
encrypted: false encrypted: false
size: t2.micro size: t2.micro
image: image:

View file

@ -1,5 +1,7 @@
---
- name: Configure the client - name: Configure the client
hosts: localhost hosts: localhost
become: false
vars_files: vars_files:
- config.cfg - config.cfg
@ -8,9 +10,10 @@
add_host: add_host:
name: "{{ client_ip }}" name: "{{ client_ip }}"
groups: client-host groups: client-host
ansible_ssh_user: "{{ ssh_user }}" ansible_ssh_user: "{{ 'root' if client_ip == 'localhost' else ssh_user }}"
vpn_user: "{{ vpn_user }}" vpn_user: "{{ vpn_user }}"
server_ip: "{{ server_ip }}" IP_subject_alt_name: "{{ server_ip }}"
ansible_python_interpreter: "/usr/bin/python3"
- name: Configure the client and install required software - name: Configure the client and install required software
hosts: client-host hosts: client-host
@ -18,33 +21,6 @@
become: true become: true
vars_files: vars_files:
- config.cfg - config.cfg
- roles/vpn/defaults/main.yml - roles/strongswan/defaults/main.yml
pre_tasks:
- name: Get the OS
raw: uname -a
register: distribution
- name: Modify the server name fact
set_fact:
IP_subject_alt_name: "{{ server_ip }}"
- name: Ubuntu Xenial | Install prerequisites
raw: >
test -x /usr/bin/python2.7 ||
sudo apt-get update -qq && sudo apt-get install -qq -y python2.7 &&
sudo update-alternatives --install /usr/bin/python python /usr/bin/python2.7 1
changed_when: false
when: "'ubuntu' in distribution.stdout|lower"
- name: Fedora 25 | Install prerequisites
raw: >
test -x /usr/bin/python2.7 ||
sudo dnf install python2 -y &&
sudo update-alternatives --install /usr/bin/python python /usr/bin/python2.7 1 &&
rpm -ql python2-dnf || dnf install python2-dnf -y
changed_when: false
when: "'fedora' in distribution.stdout|lower"
roles: roles:
- { role: client, tags: ['client'] } - role: client

View file

@ -1,31 +1,25 @@
# Using Ubuntu Server as a Client with WireGuard # Using Ubuntu as a Client with WireGuard
## Install WireGuard ## Install WireGuard
To connect to your Algo VPN using [WireGuard](https://www.wireguard.com) from an Ubuntu Server 16.04 (Xenial) or 18.04 (Bionic) client, first install WireGuard on the client: To connect to your AlgoVPN using [WireGuard](https://www.wireguard.com) from Ubuntu, first install WireGuard:
```shell ```shell
# Add the WireGuard repository: # Add the WireGuard repository:
sudo add-apt-repository ppa:wireguard/wireguard sudo add-apt-repository ppa:wireguard/wireguard
# Update the list of available packages (not necessary on Bionic): # Update the list of available packages (not necessary on 18.04 or later):
sudo apt update sudo apt update
# Install the tools and kernel module: # Install the tools and kernel module:
sudo apt install wireguard sudo apt install wireguard openresolv
``` ```
(For installation on other Linux distributions, see the [Installation](https://www.wireguard.com/install/) page on the WireGuard site.) For installation on other Linux distributions, see the [Installation](https://www.wireguard.com/install/) page on the WireGuard site.
## Locate the Config File ## Locate the Config File
The Algo-generated config files for WireGuard are named `configs/<ip_address>/wireguard/<username>.conf` on the system where you ran `./algo`. One file was generated for each of the users you added to `config.cfg` before you ran `./algo`. Each Linux and Android client you connect to your Algo VPN must use a different WireGuard config file. Choose one of these files and copy it to your Linux client. The Algo-generated config files for WireGuard are named `configs/<ip_address>/wireguard/<username>.conf` on the system where you ran `./algo`. One file was generated for each of the users you added to `config.cfg`. Each WireGuard client you connect to your AlgoVPN must use a different config file. Choose one of these files and copy it to your Linux client.
If your client is running Bionic (or another Linux that uses `systemd-resolved` for DNS) you should first edit the config file. Comment out the line that begins with `DNS =` and replace it with:
```
PostUp = systemd-resolve -i %i --set-dns=172.16.0.1 --set-domain=~.
```
Use the IP address shown on the `DNS =` line (for most, this will be `172.16.0.1`). If the `DNS =` line contains multiple IP addresses, use multiple `--set-dns=` options.
## Configure WireGuard ## Configure WireGuard
@ -33,7 +27,7 @@ Finally, install the config file on your client as `/etc/wireguard/wg0.conf` and
```shell ```shell
# Install the config file to the WireGuard configuration directory on your # Install the config file to the WireGuard configuration directory on your
# Bionic or Xenial client: # Linux client:
sudo install -o root -g root -m 600 <username>.conf /etc/wireguard/wg0.conf sudo install -o root -g root -m 600 <username>.conf /etc/wireguard/wg0.conf
# Start the WireGuard VPN: # Start the WireGuard VPN:
@ -52,4 +46,4 @@ curl ipv4.icanhazip.com
sudo systemctl enable wg-quick@wg0 sudo systemctl enable wg-quick@wg0
``` ```
(If your Linux distribution does not use `systemd`, you can bring up WireGuard with `sudo wg-quick up wg0`). If your Linux distribution does not use `systemd` you can bring up WireGuard with `sudo wg-quick up wg0`.

View file

@ -96,9 +96,7 @@ For more, see [Scripted Deployment](deploy-from-ansible.md).
## Using the DigitalOcean firewall with Algo ## Using the DigitalOcean firewall with Algo
Many cloud providers include the option to configure an external firewall between the Internet and your cloud server. For some providers this is mandatory and Algo will configure it for you, but for DigitalOcean the external firewall is optional. Many cloud providers include the option to configure an external firewall between the Internet and your cloud server. For some providers this is mandatory and Algo will configure it for you, but for DigitalOcean the external firewall is optional. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
An Algo VPN runs its own firewall and doesn't require an external firewall, but you might wish to use the DigitalOcean firewall for example to limit the addresses which can connect to your Algo VPN over SSH, or perhaps to block SSH altogether.
To configure the DigitalOcean firewall, go to **Networking**, **Firewalls**, and choose **Create Firewall**. To configure the DigitalOcean firewall, go to **Networking**, **Firewalls**, and choose **Create Firewall**.

9
docs/cloud-scaleway.md Normal file
View file

@ -0,0 +1,9 @@
### Configuration file
Algo requires an API key from your Scaleway account to create a server.
The API key is generated by going to your Scaleway credentials at [https://console.scaleway.com/account/credentials](https://console.scaleway.com/account/credentials), and then selecting "Generate new token" on the right side of the box labeled "API Tokens".
Enter this token when Algo prompts you for the `auth token`.
This information will be pass as the `algo_scaleway_token` variable when asked for in the Algo prompt.
Your organization ID is also on this page: https://console.scaleway.com/account/credentials

View file

@ -1,8 +1,15 @@
### Configuration file ### Configuration file
You need to create a configuration file in INI format with your api key (https://my.vultr.com/settings/#settingsapi) Algo requires an API key from your Vultr account in order to create a server. The API key is generated by going to your Vultr settings at https://my.vultr.com/settings/#settingsapi, and then selecting "generate new API key" on the right side of the box labeled "API Key".
Algo can read the API key in several different ways. Algo will first look for the file containing the API key in the environment variable $VULTR_API_CONFIG if present. You can set this with the command: `export VULTR_API_CONFIG=/path/to/vultr.ini`. Probably the simplest way to give Algo the API key is to create a file titled `.vultr.ini` in your home directory by typing `nano ~/.vultr.ini`, then entering the following text:
``` ```
[default] [default]
key = <your api key> key = <your api key>
``` ```
where you've cut-and-pasted the API key from above into the `<your api key>` field (no brackets).
When Algo asks `Enter the local path to your configuration INI file
(https://trailofbits.github.io/algo/cloud-vultr.html):` if you hit enter without typing anything, Algo will look for the file in `~/.vultr.ini` by default.

View file

@ -1,10 +1,10 @@
# Scripted Deployment # Deployment from Ansible
Before you begin, make sure you have installed all the dependencies necessary for your operating system as described in the [README](../README.md). Before you begin, make sure you have installed all the dependencies necessary for your operating system as described in the [README](../README.md).
You can deploy Algo non-interactively by running the Ansible playbooks directly with `ansible-playbook`. You can deploy Algo non-interactively by running the Ansible playbooks directly with `ansible-playbook`.
`ansible-playbook` accepts "tags" via the `-t` or `TAGS` options. You can pass tags as a list of comma separated values. Ansible will only run plays (install roles) with the specified tags. `ansible-playbook` accepts "tags" via the `-t` or `TAGS` options. You can pass tags as a list of comma separated values. Ansible will only run plays (install roles) with the specified tags. You can also use the `--skip-tags` option to skip certain parts of the install, such as `iptables` (overwrite iptables rules), `ipsec` (install strongSwan), `wireguard` (install Wireguard).
`ansible-playbook` accepts variables via the `-e` or `--extra-vars` option. You can pass variables as space separated key=value pairs. Algo requires certain variables that are listed below. `ansible-playbook` accepts variables via the `-e` or `--extra-vars` option. You can pass variables as space separated key=value pairs. Algo requires certain variables that are listed below.
@ -23,25 +23,25 @@ ansible-playbook main.yml -e "provider=digitalocean
do_token=token" do_token=token"
``` ```
See below for more information about providers and extra variables See below for more information about variables and roles.
### Variables ### Variables
- `provider` - (Required) The provider to use. See possible values below - `provider` - (Required) The provider to use. See possible values below
- `server_name` - (Required) Server name. Default: algo - `server_name` - (Required) Server name. Default: algo
- `ondemand_cellular` (Optional) VPN On Demand when connected to cellular networks. Default: false - `ondemand_cellular` (Optional) VPN On Demand when connected to cellular networks with IPsec. Default: false
- `ondemand_wifi` - (Optional. See `ondemand_wifi_exclude`) VPN On Demand when connected to WiFi networks. Default: false - `ondemand_wifi` - (Optional. See `ondemand_wifi_exclude`) VPN On Demand when connected to WiFi networks with IPsec. Default: false
- `ondemand_wifi_exclude` (Required if `ondemand_wifi` set) - WiFi networks to exclude from using the VPN. Comma-separated values - `ondemand_wifi_exclude` (Required if `ondemand_wifi` set) - WiFi networks to exclude from using the VPN. Comma-separated values
- `local_dns` - (Optional) Enable a DNS resolver. Default: false - `local_dns` - (Optional) Enable a DNS resolver. Default: false
- `ssh_tunneling` - (Optional) Enable SSH tunneling for each user. Default: false - `ssh_tunneling` - (Optional) Enable SSH tunneling for each user. Default: false
- `windows` - (Optional) Enables compatible ciphers and key exchange to support Windows clients, less secure. Default: false - `windows` - (Optional) Enables compatible ciphers and key exchange to support Windows clients, less secure. Default: false
- `store_cakey` - (Optional) Whether or not keep the CA key (required to add users in the future, but less secure). Default: false - `store_cakey` - (Optional) Whether or not keep the CA key (required to add users in the future, but less secure). Default: false
If any of those unspecified ansible will ask the user to input If any of the above variables are unspecified, ansible will ask the user to input them.
### Ansible roles ### Ansible roles
Roles can be activated by specifying an extra variable `provider` Cloud roles can be activated by specifying an extra variable `provider`.
Cloud roles: Cloud roles:
@ -55,13 +55,25 @@ Cloud roles:
Server roles: Server roles:
- role: vpn - role: strongswan
* Installs [strongSwan](https://www.strongswan.org/)
* Enables AppArmor, limits CPU and memory access, and drops user privileges
* Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user
* Bundles the appropriate certificates into Apple mobileconfig profiles and Powershell scripts for each user
- role: dns_adblocking - role: dns_adblocking
* Installs the [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) local resolver with a blacklist for advertising domains
* Constrains dnsmasq with AppArmor and cgroups CPU and memory limitations
- role: dns_encryption - role: dns_encryption
* Installs [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy)
* Constrains dnscrypt-proxy with AppArmor and cgroups CPU and memory limitations
- role: ssh_tunneling - role: ssh_tunneling
* Adds a restricted `algo` group with no shell access and limited SSH forwarding options
* Creates one limited, local account and an SSH public key for each user
- role: wireguard - role: wireguard
* Installs a [Wireguard](https://www.wireguard.com/) server, with a startup script, and automatic checks for upgrades
* Creates wireguard.conf files for Linux clients as well as QR codes for Apple/Android clients
Note: The `vpn` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables: Note: The `strongswan` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables:
- ondemand_wifi: true - ondemand_wifi: true
- ondemand_wifi_exclude: HomeNet,OfficeWifi - ondemand_wifi_exclude: HomeNet,OfficeWifi
@ -91,9 +103,9 @@ Possible options can be gathered calling to https://api.digitalocean.com/v2/regi
Required variables: Required variables:
- aws_access_key - aws_access_key: `AKIA...`
- aws_secret_key - aws_secret_key
- region - region: e.g. `us-east-1`
Possible options can be gathered via cli `aws ec2 describe-regions` Possible options can be gathered via cli `aws ec2 describe-regions`
@ -114,7 +126,8 @@ Additional variables:
"ec2:DescribeImages", "ec2:DescribeImages",
"ec2:DescribeKeyPairs", "ec2:DescribeKeyPairs",
"ec2:DescribeRegions", "ec2:DescribeRegions",
"ec2:ImportKeyPair" "ec2:ImportKeyPair",
"ec2:CopyImage"
], ],
"Resource": [ "Resource": [
"*" "*"
@ -179,8 +192,8 @@ Required variables:
Required variables: Required variables:
- [vultr_config](https://trailofbits.github.io/algo/cloud-vultr.html) - [vultr_config](https://trailofbits.github.io/algo/cloud-vultr.html): /path/to/.vultr.ini
- [region](https://api.vultr.com/v1/regions/list) - [region](https://api.vultr.com/v1/regions/list): e.g. `Chicago`, `'New Jersey'`
### Azure ### Azure
@ -196,9 +209,9 @@ Required variables:
Required variables: Required variables:
- aws_access_key - aws_access_key: `AKIA...`
- aws_secret_key - aws_secret_key
- region - region: e.g. `us-east-1`
Possible options can be gathered via cli `aws lightsail get-regions` Possible options can be gathered via cli `aws lightsail get-regions`
@ -230,13 +243,7 @@ Possible options can be gathered via cli `aws lightsail get-regions`
Required variables: Required variables:
- [scaleway_token](https://www.scaleway.com/docs/generate-an-api-token/) - [scaleway_token](https://www.scaleway.com/docs/generate-an-api-token/)
- [scaleway_org](https://cloud.scaleway.com/#/billing) - region: e.g. ams1, par1
- region
Possible regions:
- ams1
- par1
### OpenStack ### OpenStack

View file

@ -0,0 +1,59 @@
# Deploy from script or cloud-init
You can use `install.sh` to prepare the environment and deploy AlgoVPN on the local Ubuntu server in one shot using cloud-init, or run the script directly on the server after it's been created. The script doesn't configure any parameters in your cloud, so it's on your own to configure related [firewall rules](/docs/firewalls.md), a floating ip address and other resources you may need. The output of the install script (including the p12 and CA passwords) and user config files will be installed into the `/opt/algo` directory.
## Cloud init deployment
You can copy-paste the snippet below to the user data (cloud-init or startup script) field when creating a new server. For now it is only possible for [DigitalOcean](https://www.digitalocean.com/docs/droplets/resources/metadata/), Amazon [EC2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and [Lightsail](https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-how-to-configure-server-additional-data-shell-script), [Google Cloud](https://cloud.google.com/compute/docs/startupscript), [Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init) and [Vultr](https://my.vultr.com/startup/), although Vultr doesn't [officially support cloud-init](https://www.vultr.com/docs/getting-started-with-cloud-init).
```
#!/bin/bash
curl -s https://raw.githubusercontent.com/trailofbits/algo/master/install.sh | sudo -E bash -x
```
The command will prepare the environment and install AlgoVPN with the default parameters below. If you want to modify the behavior you may define additional variables.
## Variables
`METHOD` - which method of the deployment to use. Possible values are local and cloud. Default: cloud. The cloud method is intended to use in cloud-init deployments only. If you are not using cloud-init to deploy the server you have to use the local method.
`ONDEMAND_CELLULAR` - "Connect On Demand" when connected to cellular networks. Boolean. Default: false.
`ONDEMAND_WIFI` - "Connect On Demand" when connected to Wi-Fi. Default: false.
`ONDEMAND_WIFI_EXCLUDE` - List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand". Comma-separated list.
`WINDOWS` - To support Windows 10 or Linux Desktop clients. Default: false.
`STORE_CAKEY` - To retain the CA key. (required to add users in the future, but less secure). Default: false.
`LOCAL_DNS` - To install an ad blocking DNS resolver. Default: false.
`SSH_TUNNELING` - Enable SSH tunneling for each user. Default: false.
`ENDPOINT` - The public IP address or domain name of your server: (IMPORTANT! This is used to verify the certificate). It will be gathered automatically for DigitalOcean, AWS, GCE, Azure or Vultr if the `METHOD` is cloud. Otherwise you need to define this variable according to your public IP address.
`USERS` - list of VPN users. Comma-separated list. Default: user1.
`REPO_SLUG` - Owner and repository that used to get the installation scripts from. Default: trailofbits/algo.
`REPO_BRANCH` - Branch for `REPO_SLUG`. Default: master.
`EXTRA_VARS` - Additional extra variables.
`ANSIBLE_EXTRA_ARGS` - Any available ansible parameters. ie: `--skip-tags apparmor`.
## Examples
##### How to customise a cloud-init deployment by variables
```
#!/bin/bash
export ONDEMAND_CELLULAR=true
export WINDOWS=true
export SSH_TUNNELING=true
curl -s https://raw.githubusercontent.com/trailofbits/algo/master/install.sh | sudo -E bash -x
```
##### How to deploy locally without using cloud-init
```
export METHOD=local
export ONDEMAND_CELLULAR=true
export ENDPOINT=[your server's IP here]
curl -s https://raw.githubusercontent.com/trailofbits/algo/master/install.sh | sudo -E bash -x
```
##### How to deploy a server using arguments
The arguments order as per [variables](#variables) above
```
curl -s https://raw.githubusercontent.com/trailofbits/algo/master/install.sh | sudo -E bash -x -s local true false _null true true true true myvpnserver.com phone,laptop,desktop
```

View file

@ -8,4 +8,6 @@ Install to existing Ubuntu 18.04 server (Advanced)
``` ```
Make sure your server is running the operating system specified. Make sure your server is running the operating system specified.
**PLEASE NOTE**: Algo is intended for use as a _dedicated_ VPN server. If you install Algo on an existing server, then any existing services might break. In particular, the firewall rules will be overwritten. If you don't want to overwrite the rules you must deploy via `ansible-playbook` and skip the `iptables` tag as described in [deploy-from-ansible.md](deploy-from-ansible.md), after which you'll need to implement the necessary rules yourself. **PLEASE NOTE**: Algo is intended for use as a _dedicated_ VPN server. If you install Algo on an existing server, then any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
If you don't want to overwrite the rules you must deploy via `ansible-playbook` and skip the `iptables` tag as described in [deploy-from-ansible.md](deploy-from-ansible.md), after which you'll need to implement the necessary rules yourself.

View file

@ -50,7 +50,7 @@ Algo is short for "Al Gore", the **V**ice **P**resident of **N**etworks everywhe
## Can DNS filtering be disabled? ## Can DNS filtering be disabled?
There is no official way to disable DNS filtering, but there is a workaround: SSH to your Algo server (using the 'shell access' command printed upon a successful deployment), edit `/etc/ipsec.conf`, and change `rightdns=172.16.0.1` to `rightdns=8.8.8.8`. Then run `ipsec restart`. If all else fails, we recommend deploying a new Algo server without the adblocking feature enabled. You can temporarily disable DNS filtering for all IPsec clients at once with the following workaround: SSH to your Algo server (using the 'shell access' command printed upon a successful deployment), edit `/etc/ipsec.conf`, and change `rightdns=<random_ip>` to `rightdns=8.8.8.8`. Then run `sudo systemctl restart strongswan`. DNS filtering for Wireguard clients has to be disabled on each client device separately by modifying the settings in the app, or by directly modifying the `DNS` setting on the `clientname.conf` file. If all else fails, we recommend deploying a new Algo server without the adblocking feature enabled.
## Wasn't IPSEC backdoored by the US government? ## Wasn't IPSEC backdoored by the US government?
@ -74,4 +74,4 @@ No.
## What inbound ports are used? ## What inbound ports are used?
You should only need 22/TCP, 500/UDP, 4500/UDP, and 51820/UDP opened on any firewall that sits between your clients and your Algo server. You should only need 22/TCP, 500/UDP, 4500/UDP, and 51820/UDP opened on any firewall that sits between your clients and your Algo server. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.

34
docs/firewalls.md Normal file
View file

@ -0,0 +1,34 @@
# AlgoVPN and Firewalls
Your AlgoVPN requires properly configured firewalls. The key points to know are:
* If you deploy to a **cloud** provider all firewall configuration will done automatically.
* If you perform a **local** installation on an existing server you are responsible for configuring any external firewalls. You must also take care not to interfere with the server firewall configuration of the AlgoVPN.
## The Two Types of Firewall
![Firewall Illustration](/docs/images/firewalls.png)
### Server Firewall
During installation Algo configures the Linux [Netfilter](https://en.wikipedia.org/wiki/Netfilter) firewall on the server. The rules added are required for AlgoVPN to work properly. The package `netfilter-persistent` is used to load the IPv4 and IPv6 rules files that Algo generates and stores in `/etc/iptables`. The rules for IPv6 are only generated if the server appears to be properly configured for IPv6. The use of conflicting firewall packages on the server such as `ufw` will likely break AlgoVPN.
### External Firewall
Most cloud service providers offer a firewall that sits between the Internet and your AlgoVPN. With some providers (such as EC2, Lightsail, and GCE) this firewall is required and is configured by Algo during a **cloud** deployment. If the firewall is not required by the provider then Algo does not configure it.
External firewalls are not configured when performing a **local** installation, even when using a server from a cloud service provider.
Any external firewall must be configured to pass the following incoming ports over IPv4 :
Port | Protocol | Description | Related variables in `config.cfg`
---- | -------- | ----------- | ---------------------------------
22 | TCP | Secure Shell (SSH) | None
500 | UDP | IPsec IKEv2 | `ipsec_enabled`
4500 | UDP | IPsec NAT-T | `ipsec_enabled`
51820 | UDP | WireGuard | `wireguard_enabled`, `wireguard_port`
If you have chosen to disable either IPsec or WireGuard in `config.cfg` before running `./algo` then the corresponding ports don't need to pass through the firewall. SSH is used when performing a **cloud** deployment and when subsequently modifying the list of VPN users by running `./algo update-users`.
Even when not required by the cloud service provider, you still might wish to use an external firewall to limit SSH access to your AlgoVPN to connections from certain IP addresses, or perhaps to block SSH access altogether if you don't need it. Every service provider firewall is different so refer to the provider's documentation for more information.

BIN
docs/images/firewalls.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

View file

@ -1,21 +1,29 @@
# Algo VPN documentation # Algo VPN documentation
* Setup instructions * Deployment instructions
- Documentation for available [Ansible roles](setup-roles.md)
- Deploy from [Fedora Workstation (26)](deploy-from-fedora-workstation.md) - Deploy from [Fedora Workstation (26)](deploy-from-fedora-workstation.md)
- Deploy from [RedHat/CentOS 6.x](deploy-from-redhat-centos6.md) - Deploy from [RedHat/CentOS 6.x](deploy-from-redhat-centos6.md)
- Deploy from [Windows](deploy-from-windows.md) - Deploy from [Windows](deploy-from-windows.md)
- Deploy from [Ansible](deploy-from-ansible.md) directly - Deploy from a [Docker container](deploy-from-docker.md)
- Deploy from [Ansible](deploy-from-ansible.md) non-interactively
- Deploy onto a [cloud server at time of creation](deploy-from-script-or-cloud-init-to-localhost.md)
* Client setup * Client setup
- Setup [Android](client-android.md) clients - Setup [Android](client-android.md) clients
- Setup [Generic/Linux](client-linux.md) clients with Ansible - Setup [Generic/Linux](client-linux.md) clients with Ansible
* Cloud setup - Setup Ubuntu clients to use [WireGuard](client-linux-wireguard.md)
- Setup Apple devices to use [IPSEC](client-apple-ipsec.md)
- Setup Macs running macOS 10.13 or older to use [Wireguard](client-macos-wireguard.md)
- Manual Windows 10 client setup for [IPSEC](client-windows.md)
* Cloud provider setup
- Configure [Amazon EC2](cloud-amazon-ec2.md)
- Configure [Azure](cloud-azure.md) - Configure [Azure](cloud-azure.md)
- Configure [DigitalOcean](cloud-do.md) - Configure [DigitalOcean](cloud-do.md)
- Configure [Google Cloud Platform](cloud-gce.md)
- Configure [Vultr](cloud-vultr.md) - Configure [Vultr](cloud-vultr.md)
* Advanced Deployment * Advanced Deployment
- Deploy to your own [FreeBSD](deploy-to-freebsd.md) server - Deploy to your own [FreeBSD](deploy-to-freebsd.md) server
- Deploy to your own [Ubuntu 18.04](deploy-to-ubuntu.md) server - Deploy to your own [Ubuntu 18.04](deploy-to-ubuntu.md) server
- Deploy to an [unsupported cloud provider](deploy-to-unsupported-cloud.md) - Deploy to an [unsupported cloud provider](deploy-to-unsupported-cloud.md)
* [FAQ](faq.md) * [FAQ](faq.md)
* [Firewalls](firewalls.md)
* [Troubleshooting](troubleshooting.md) * [Troubleshooting](troubleshooting.md)

View file

@ -1,28 +0,0 @@
# Ansible Roles
## Required roles
* **Common**
* Installs several required packages and software updates, then reboots if necessary
* Configures network interfaces, and enables packet forwarding on them
* **VPN**
* Installs [strongSwan](https://www.strongswan.org/), enables AppArmor, limits CPU and memory access, and drops user privileges
* Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user
* Bundles the appropriate certificates into Apple mobileconfig profiles for each user
* Configures IPtables to block traffic that might pose a risk to VPN users, such as [SMB/CIFS](https://medium.com/@ValdikSS/deanonymizing-windows-users-and-capturing-microsoft-and-vpn-accounts-f7e53fe73834)
## Optional roles
* **Security Enhancements**
* Enables [unattended-upgrades](https://help.ubuntu.com/community/AutomaticSecurityUpdates) to ensure available patches are always applied
* Modify features like core dumps, kernel parameters, and SUID binaries to limit possible attacks
* Enhances SSH with modern ciphers and seccomp, and restricts access to old or unwanted features like X11 forwarding and SFTP
* **DNS-based Adblocking**
* Install the [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) local resolver with a blacklist for advertising domains
* Constrains dnsmasq with AppArmor and cgroups CPU and memory limitations
* **DNS encryption**
* Install [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy)
* Constrains dingo with AppArmor and cgroups CPU and memory limitations
* **SSH Tunneling**
* Adds a restricted `algo` group with no shell access and limited SSH forwarding options
* Creates one limited, local account per user and an SSH public key for each

View file

@ -18,7 +18,7 @@ First of all, check [this](https://github.com/trailofbits/algo#features) and ens
* [Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid](#windows-the-value-of-parameter-linuxconfigurationsshpublickeyskeydata-is-invalid) * [Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid](#windows-the-value-of-parameter-linuxconfigurationsshpublickeyskeydata-is-invalid)
* [Docker: Failed to connect to the host via ssh](#docker-failed-to-connect-to-the-host-via-ssh) * [Docker: Failed to connect to the host via ssh](#docker-failed-to-connect-to-the-host-via-ssh)
* [Wireguard: Unable to find 'configs/...' in expected paths](#wireguard-unable-to-find-configs-in-expected-paths) * [Wireguard: Unable to find 'configs/...' in expected paths](#wireguard-unable-to-find-configs-in-expected-paths)
* [Ubuntu Error: "unable to write 'random state" when generating CA password](#ubuntu-error-unable-to-write-random-state-when-generating-ca-password") * [Ubuntu Error: "unable to write 'random state'" when generating CA password](#ubuntu-error-unable-to-write-random-state-when-generating-ca-password)
* [Connection Problems](#connection-problems) * [Connection Problems](#connection-problems)
* [I'm blocked or get CAPTCHAs when I access certain websites](#im-blocked-or-get-captchas-when-i-access-certain-websites) * [I'm blocked or get CAPTCHAs when I access certain websites](#im-blocked-or-get-captchas-when-i-access-certain-websites)
* [I want to change the list of trusted Wifi networks on my Apple device](#i-want-to-change-the-list-of-trusted-wifi-networks-on-my-apple-device) * [I want to change the list of trusted Wifi networks on my Apple device](#i-want-to-change-the-list-of-trusted-wifi-networks-on-my-apple-device)
@ -153,7 +153,9 @@ You need to reset the permissions on your `.ssh` directory. Run `chmod 700 /home
### The region you want is not available ### The region you want is not available
You want to install Algo to a specific region in a cloud provider, but that region is not available in the list given by the installer. In that case, you should [file an issue](https://github.com/trailofbits/algo/issues/new). Cloud providers add new regions on a regular basis and we don't always keep up. File an issue and give us information about what region is missing and we'll add it. Algo downloads the regions from the supported cloud providers (other than Microsoft Azure) listed in the first menu using APIs. If the region you want isn't available, the cloud provider has probably taken it offline for some reason. You should investigate further with your cloud provider.
If there's a specific region you want to install to in Microsoft Azure that isn't available, you should [file an issue](https://github.com/trailofbits/algo/issues/new), give us information about what region is missing, and we'll add it.
### AWS: SSH permission denied with an ECDSA key ### AWS: SSH permission denied with an ECDSA key
@ -269,7 +271,7 @@ sudo rm -rf /etc/wireguard/*.lock
``` ```
Then immediately re-run `./algo`. Then immediately re-run `./algo`.
### Ubuntu Error: "unable to write 'random state" when generating CA password ### Ubuntu Error: "unable to write 'random state'" when generating CA password
When running Algo, you received an error like this: When running Algo, you received an error like this:

View file

@ -25,7 +25,9 @@
- config.cfg - config.cfg
tasks: tasks:
- pause: - block:
- name: Cloud prompt
pause:
prompt: | prompt: |
What provider would you like to use? What provider would you like to use?
{% for p in providers_map %} {% for p in providers_map %}
@ -40,7 +42,8 @@
set_fact: set_fact:
algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}" algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}"
- pause: - name: VPN server name prompt
pause:
prompt: | prompt: |
Name the vpn server Name the vpn server
[algo] [algo]
@ -49,21 +52,24 @@
- server_name is undefined - server_name is undefined
- algo_provider != "local" - algo_provider != "local"
- block: - block:
- pause: - name: Cellular On Demand prompt
pause:
prompt: | prompt: |
Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to cellular networks? Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to cellular networks?
[y/N] [y/N]
register: _ondemand_cellular register: _ondemand_cellular
when: ondemand_cellular is undefined when: ondemand_cellular is undefined
- pause: - name: Wi-Fi On Demand prompt
pause:
prompt: | prompt: |
Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to Wi-Fi? Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to Wi-Fi?
[y/N] [y/N]
register: _ondemand_wifi register: _ondemand_wifi
when: ondemand_wifi is undefined when: ondemand_wifi is undefined
- pause: - name: Trusted Wi-Fi networks prompt
pause:
prompt: | prompt: |
List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand" List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand"
(e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi) (e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
@ -73,14 +79,16 @@
- (ondemand_wifi|default(false)|bool) or - (ondemand_wifi|default(false)|bool) or
(booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false)) (booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
- pause: - name: Compatible ciphers prompt
pause:
prompt: | prompt: |
Do you want the VPN to support Windows 10 or Linux Desktop clients? (enables compatible ciphers and key exchange, less secure) Do you want the VPN to support Windows 10 or Linux Desktop clients? (enables compatible ciphers and key exchange, less secure)
[y/N] [y/N]
register: _windows register: _windows
when: windows is undefined when: windows is undefined
- pause: - name: Retain the CA key prompt
pause:
prompt: | prompt: |
Do you want to retain the CA key? (required to add users in the future, but less secure) Do you want to retain the CA key? (required to add users in the future, but less secure)
[y/N] [y/N]
@ -88,14 +96,16 @@
when: store_cakey is undefined when: store_cakey is undefined
when: ipsec_enabled when: ipsec_enabled
- pause: - name: DNS adblocking prompt
pause:
prompt: | prompt: |
Do you want to install an ad blocking DNS resolver on this VPN server? Do you want to install an ad blocking DNS resolver on this VPN server?
[y/N] [y/N]
register: _local_dns register: _local_dns
when: local_dns is undefined when: local_dns is undefined
- pause: - name: SSH tunneling prompt
pause:
prompt: | prompt: |
Do you want each user to have their own account for SSH tunneling? Do you want each user to have their own account for SSH tunneling?
[y/N] [y/N]
@ -106,34 +116,38 @@
set_fact: set_fact:
algo_server_name: >- algo_server_name: >-
{% if server_name is defined %}{% set _server = server_name %} {% if server_name is defined %}{% set _server = server_name %}
{%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input != "" %}{% set _server = _algo_server_name.user_input %} {%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input|length > 0 -%}
{%- set _server = _algo_server_name.user_input -%}
{%- else %}{% set _server = defaults['server_name'] %}{% endif -%} {%- else %}{% set _server = defaults['server_name'] %}{% endif -%}
{{ _server | regex_replace('(?!\.)(\W|_)', '-') }} {{ _server | regex_replace('(?!\.)(\W|_)', '-') }}
algo_ondemand_cellular: >- algo_ondemand_cellular: >-
{% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }} {% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }}
{%- elif _ondemand_cellular.user_input is defined and _ondemand_cellular.user_input != "" %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }} {%- elif _ondemand_cellular.user_input is defined %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_ondemand_wifi: >- algo_ondemand_wifi: >-
{% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }} {% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }}
{%- elif _ondemand_wifi.user_input is defined and _ondemand_wifi.user_input != "" %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }} {%- elif _ondemand_wifi.user_input is defined %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_ondemand_wifi_exclude: >- algo_ondemand_wifi_exclude: >-
{% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }} {% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }}
{%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input != "" %}{{ _ondemand_wifi_exclude.user_input | b64encode }} {%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input|length > 0 -%}
{{ _ondemand_wifi_exclude.user_input | b64encode }}
{%- else %}{{ '_null' | b64encode }}{% endif %} {%- else %}{{ '_null' | b64encode }}{% endif %}
algo_local_dns: >- algo_local_dns: >-
{% if local_dns is defined %}{{ local_dns | bool }} {% if local_dns is defined %}{{ local_dns | bool }}
{%- elif _local_dns.user_input is defined and _local_dns.user_input != "" %}{{ booleans_map[_local_dns.user_input] | default(defaults['local_dns']) }} {%- elif _local_dns.user_input is defined %}{{ booleans_map[_local_dns.user_input] | default(defaults['local_dns']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_ssh_tunneling: >- algo_ssh_tunneling: >-
{% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }} {% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }}
{%- elif _ssh_tunneling.user_input is defined and _ssh_tunneling.user_input != "" %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }} {%- elif _ssh_tunneling.user_input is defined %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_windows: >- algo_windows: >-
{% if windows is defined %}{{ windows | bool }} {% if windows is defined %}{{ windows | bool }}
{%- elif _windows.user_input is defined and _windows.user_input != "" %}{{ booleans_map[_windows.user_input] | default(defaults['windows']) }} {%- elif _windows.user_input is defined %}{{ booleans_map[_windows.user_input] | default(defaults['windows']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_store_cakey: >- algo_store_cakey: >-
{% if store_cakey is defined %}{{ store_cakey | bool }} {% if ipsec_enabled %}{%- if store_cakey is defined %}{{ store_cakey | bool }}
{%- elif _store_cakey.user_input is defined and _store_cakey.user_input != "" %}{{ booleans_map[_store_cakey.user_input] | default(defaults['store_cakey']) }} {%- elif _store_cakey.user_input is defined %}{{ booleans_map[_store_cakey.user_input] | default(defaults['store_cakey']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}{% endif %}
rescue:
- include_tasks: playbooks/rescue.yml

112
install.sh Normal file
View file

@ -0,0 +1,112 @@
#!/usr/bin/env sh
set -ex
METHOD="${1:-${METHOD:-cloud}}"
ONDEMAND_CELLULAR="${2:-${ONDEMAND_CELLULAR:-false}}"
ONDEMAND_WIFI="${3:-${ONDEMAND_WIFI:-false}}"
ONDEMAND_WIFI_EXCLUDE="${4:-${ONDEMAND_WIFI_EXCLUDE:-_null}}"
WINDOWS="${5:-${WINDOWS:-false}}"
STORE_CAKEY="${6:-${STORE_CAKEY:-false}}"
LOCAL_DNS="${7:-${LOCAL_DNS:-false}}"
SSH_TUNNELING="${8:-${SSH_TUNNELING:-false}}"
ENDPOINT="${9:-${ENDPOINT:-localhost}}"
USERS="${10:-${USERS:-user1}}"
REPO_SLUG="${11:-${REPO_SLUG:-trailofbits/algo}}"
REPO_BRANCH="${12:-${REPO_BRANCH:-master}}"
EXTRA_VARS="${13:-${EXTRA_VARS:-placeholder=null}}"
ANSIBLE_EXTRA_ARGS="${14:-${ANSIBLE_EXTRA_ARGS}}"
cd /opt/
installRequirements() {
apt-get update
apt-get install \
software-properties-common \
git \
build-essential \
libssl-dev \
libffi-dev \
python-dev \
python-pip \
python-setuptools \
python-virtualenv \
bind9-host \
jq -y
}
getAlgo() {
[ ! -d "algo" ] && git clone "https://github.com/${REPO_SLUG}" -b "${REPO_BRANCH}" algo
cd algo
python -m virtualenv --python="$(command -v python2)" .venv
# shellcheck source=/dev/null
. .venv/bin/activate
python -m pip install -U pip virtualenv
python -m pip install -r requirements.txt
}
publicIpFromInterface() {
echo "Couldn't find a valid ipv4 address, using the first IP found on the interfaces as the endpoint."
DEFAULT_INTERFACE="$(ip -4 route list match default | grep -Eo "dev .*" | awk '{print $2}')"
ENDPOINT=$(ip -4 addr sh dev "$DEFAULT_INTERFACE" | grep -w inet | head -n1 | awk '{print $2}' | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b')
export ENDPOINT=$ENDPOINT
echo "Using ${ENDPOINT} as the endpoint"
}
publicIpFromMetadata() {
if curl -s http://169.254.169.254/metadata/v1/vendor-data | grep DigitalOcean >/dev/null; then
ENDPOINT="$(curl -s http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address)"
elif test "$(curl -s http://169.254.169.254/latest/meta-data/services/domain)" = "amazonaws.com"; then
ENDPOINT="$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)"
elif host -t A -W 10 metadata.google.internal 127.0.0.53 >/dev/null; then
ENDPOINT="$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")"
elif test "$(curl -s -H Metadata:true 'http://169.254.169.254/metadata/instance/compute/publisher/?api-version=2017-04-02&format=text')" = "Canonical"; then
ENDPOINT="$(curl -H Metadata:true 'http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-04-02&format=text')"
fi
if echo "${ENDPOINT}" | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"; then
export ENDPOINT=$ENDPOINT
echo "Using ${ENDPOINT} as the endpoint"
else
publicIpFromInterface
fi
}
deployAlgo() {
getAlgo
cd /opt/algo
# shellcheck source=/dev/null
. .venv/bin/activate
export HOME=/root
export ANSIBLE_LOCAL_TEMP=/root/.ansible/tmp
export ANSIBLE_REMOTE_TEMP=/root/.ansible/tmp
# shellcheck disable=SC2086
ansible-playbook main.yml \
-e provider=local \
-e "ondemand_cellular=${ONDEMAND_CELLULAR}" \
-e "ondemand_wifi=${ONDEMAND_WIFI}" \
-e "ondemand_wifi_exclude=${ONDEMAND_WIFI_EXCLUDE}" \
-e "windows=${WINDOWS}" \
-e "store_cakey=${STORE_CAKEY}" \
-e "local_dns=${LOCAL_DNS}" \
-e "ssh_tunneling=${SSH_TUNNELING}" \
-e "endpoint=$ENDPOINT" \
-e "users=$(echo "$USERS" | jq -Rc 'split(",")')" \
-e server=localhost \
-e ssh_user=root \
-e "${EXTRA_VARS}" \
--skip-tags debug ${ANSIBLE_EXTRA_ARGS} |
tee /var/log/algo.log
}
if test "$METHOD" = "cloud"; then
publicIpFromMetadata
fi
installRequirements
deployAlgo

619
library/scaleway_compute.py Normal file
View file

@ -0,0 +1,619 @@
#!/usr/bin/python
#
# Scaleway Compute management module
#
# Copyright (C) 2018 Online SAS.
# https://www.scaleway.com
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: scaleway_compute
short_description: Scaleway compute management module
version_added: "2.6"
author: Remy Leone (@sieben)
description:
- "This module manages compute instances on Scaleway."
extends_documentation_fragment: scaleway
options:
enable_ipv6:
description:
- Enable public IPv6 connectivity on the instance
default: false
type: bool
boot_type:
description:
- Boot method
default: bootscript
choices:
- bootscript
- local
image:
description:
- Image identifier used to start the instance with
required: true
name:
description:
- Name of the instance
organization:
description:
- Organization identifier
required: true
state:
description:
- Indicate desired state of the instance.
default: present
choices:
- present
- absent
- running
- restarted
- stopped
tags:
description:
- List of tags to apply to the instance (5 max)
required: false
default: []
region:
description:
- Scaleway compute zone
required: true
choices:
- ams1
- EMEA-NL-EVS
- par1
- EMEA-FR-PAR1
commercial_type:
description:
- Commercial name of the compute node
required: true
choices:
- ARM64-2GB
- ARM64-4GB
- ARM64-8GB
- ARM64-16GB
- ARM64-32GB
- ARM64-64GB
- ARM64-128GB
- C1
- C2S
- C2M
- C2L
- START1-XS
- START1-S
- START1-M
- START1-L
- X64-15GB
- X64-30GB
- X64-60GB
- X64-120GB
wait:
description:
- Wait for the instance to reach its desired state before returning.
type: bool
default: 'no'
wait_timeout:
description:
- Time to wait for the server to reach the expected state
required: false
default: 300
wait_sleep_time:
description:
- Time to wait before every attempt to check the state of the server
required: false
default: 3
'''
EXAMPLES = '''
- name: Create a server
scaleway_compute:
name: foobar
state: present
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
region: ams1
commercial_type: VC1S
tags:
- test
- www
- name: Destroy it right after
scaleway_compute:
name: foobar
state: absent
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
region: ams1
commercial_type: VC1S
'''
RETURN = '''
'''
import datetime
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import quote as urlquote
from ansible.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
SCALEWAY_COMMERCIAL_TYPES = [
# Virtual ARM64 compute instance
'ARM64-2GB',
'ARM64-4GB',
'ARM64-8GB',
'ARM64-16GB',
'ARM64-32GB',
'ARM64-64GB',
'ARM64-128GB',
# Baremetal
'C1', # ARM64 (4 cores) - 2GB
'C2S', # X86-64 (4 cores) - 8GB
'C2M', # X86-64 (8 cores) - 16GB
'C2L', # x86-64 (8 cores) - 32 GB
# Virtual X86-64 compute instance
'START1-XS', # Starter X86-64 (1 core) - 1GB - 25 GB NVMe
'START1-S', # Starter X86-64 (2 cores) - 2GB - 50 GB NVMe
'START1-M', # Starter X86-64 (4 cores) - 4GB - 100 GB NVMe
'START1-L', # Starter X86-64 (8 cores) - 8GB - 200 GB NVMe
'X64-15GB',
'X64-30GB',
'X64-60GB',
'X64-120GB',
]
SCALEWAY_SERVER_STATES = (
'stopped',
'stopping',
'starting',
'running',
'locked'
)
SCALEWAY_TRANSITIONS_STATES = (
"stopping",
"starting",
"pending"
)
def fetch_state(compute_api, server):
compute_api.module.debug("fetch_state of server: %s" % server["id"])
response = compute_api.get(path="servers/%s" % server["id"])
if response.status_code == 404:
return "absent"
if not response.ok:
msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
compute_api.module.fail_json(msg=msg)
try:
compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"]))
return response.json["server"]["state"]
except KeyError:
compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json)
def wait_to_complete_state_transition(compute_api, server):
wait = compute_api.module.params["wait"]
if not wait:
return
wait_timeout = compute_api.module.params["wait_timeout"]
wait_sleep_time = compute_api.module.params["wait_sleep_time"]
start = datetime.datetime.utcnow()
end = start + datetime.timedelta(seconds=wait_timeout)
while datetime.datetime.utcnow() < end:
compute_api.module.debug("We are going to wait for the server to finish its transition")
if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES:
compute_api.module.debug("It seems that the server is not in transition anymore.")
compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server))
break
time.sleep(wait_sleep_time)
else:
compute_api.module.fail_json(msg="Server takes too long to finish its transition")
def create_server(compute_api, server):
compute_api.module.debug("Starting a create_server")
target_server = None
response = compute_api.post(path="servers",
data={"enable_ipv6": server["enable_ipv6"],
"boot_type": server["boot_type"],
"tags": server["tags"],
"commercial_type": server["commercial_type"],
"image": server["image"],
"name": server["name"],
"organization": server["organization"]})
if not response.ok:
msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json)
compute_api.module.fail_json(msg=msg)
try:
target_server = response.json["server"]
except KeyError:
compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
return target_server
def restart_server(compute_api, server):
return perform_action(compute_api=compute_api, server=server, action="reboot")
def stop_server(compute_api, server):
return perform_action(compute_api=compute_api, server=server, action="poweroff")
def start_server(compute_api, server):
return perform_action(compute_api=compute_api, server=server, action="poweron")
def perform_action(compute_api, server, action):
response = compute_api.post(path="servers/%s/action" % server["id"],
data={"action": action})
if not response.ok:
msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json)
compute_api.module.fail_json(msg=msg)
wait_to_complete_state_transition(compute_api=compute_api, server=server)
return response
def remove_server(compute_api, server):
compute_api.module.debug("Starting remove server strategy")
response = compute_api.delete(path="servers/%s" % server["id"])
if not response.ok:
msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json)
compute_api.module.fail_json(msg=msg)
wait_to_complete_state_transition(compute_api=compute_api, server=server)
return response
def present_strategy(compute_api, wished_server):
compute_api.module.debug("Starting present strategy")
changed = False
query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
if not query_results:
changed = True
if compute_api.module.check_mode:
return changed, {"status": "A server would be created."}
target_server = create_server(compute_api=compute_api, server=wished_server)
else:
target_server = query_results[0]
if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
wished_server=wished_server):
changed = True
if compute_api.module.check_mode:
return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
return changed, target_server
def absent_strategy(compute_api, wished_server):
compute_api.module.debug("Starting absent strategy")
changed = False
target_server = None
query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
if not query_results:
return changed, {"status": "Server already absent."}
else:
target_server = query_results[0]
changed = True
if compute_api.module.check_mode:
return changed, {"status": "Server %s would be made absent." % target_server["id"]}
# A server MUST be stopped to be deleted.
while not fetch_state(compute_api=compute_api, server=target_server) == "stopped":
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
response = stop_server(compute_api=compute_api, server=target_server)
if not response.ok:
err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code,
response.json)
compute_api.module.fail_json(msg=err_msg)
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
response = remove_server(compute_api=compute_api, server=target_server)
if not response.ok:
err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json)
compute_api.module.fail_json(msg=err_msg)
return changed, {"status": "Server %s deleted" % target_server["id"]}
def running_strategy(compute_api, wished_server):
compute_api.module.debug("Starting running strategy")
changed = False
query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
if not query_results:
changed = True
if compute_api.module.check_mode:
return changed, {"status": "A server would be created before being run."}
target_server = create_server(compute_api=compute_api, server=wished_server)
else:
target_server = query_results[0]
if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
wished_server=wished_server):
changed = True
if compute_api.module.check_mode:
return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]}
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
current_state = fetch_state(compute_api=compute_api, server=target_server)
if current_state not in ("running", "starting"):
compute_api.module.debug("running_strategy: Server in state: %s" % current_state)
changed = True
if compute_api.module.check_mode:
return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
response = start_server(compute_api=compute_api, server=target_server)
if not response.ok:
msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json)
compute_api.module.fail_json(msg=msg)
return changed, target_server
def stop_strategy(compute_api, wished_server):
compute_api.module.debug("Starting stop strategy")
query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
changed = False
if not query_results:
if compute_api.module.check_mode:
return changed, {"status": "A server would be created before being stopped."}
target_server = create_server(compute_api=compute_api, server=wished_server)
changed = True
else:
target_server = query_results[0]
compute_api.module.debug("stop_strategy: Servers are found.")
if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
wished_server=wished_server):
changed = True
if compute_api.module.check_mode:
return changed, {
"status": "Server %s attributes would be changed before stopping it." % target_server["id"]}
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
current_state = fetch_state(compute_api=compute_api, server=target_server)
if current_state not in ("stopped",):
compute_api.module.debug("stop_strategy: Server in state: %s" % current_state)
changed = True
if compute_api.module.check_mode:
return changed, {"status": "Server %s would be stopped." % target_server["id"]}
response = stop_server(compute_api=compute_api, server=target_server)
compute_api.module.debug(response.json)
compute_api.module.debug(response.ok)
if not response.ok:
msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json)
compute_api.module.fail_json(msg=msg)
return changed, target_server
def restart_strategy(compute_api, wished_server):
compute_api.module.debug("Starting restart strategy")
changed = False
query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
if not query_results:
changed = True
if compute_api.module.check_mode:
return changed, {"status": "A server would be created before being rebooted."}
target_server = create_server(compute_api=compute_api, server=wished_server)
else:
target_server = query_results[0]
if server_attributes_should_be_changed(compute_api=compute_api,
target_server=target_server,
wished_server=wished_server):
changed = True
if compute_api.module.check_mode:
return changed, {
"status": "Server %s attributes would be changed before rebooting it." % target_server["id"]}
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
changed = True
if compute_api.module.check_mode:
return changed, {"status": "Server %s would be rebooted." % target_server["id"]}
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
if fetch_state(compute_api=compute_api, server=target_server) in ("running",):
response = restart_server(compute_api=compute_api, server=target_server)
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
if not response.ok:
msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code,
response.json)
compute_api.module.fail_json(msg=msg)
if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",):
response = restart_server(compute_api=compute_api, server=target_server)
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
if not response.ok:
msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code,
response.json)
compute_api.module.fail_json(msg=msg)
return changed, target_server
state_strategy = {
"present": present_strategy,
"restarted": restart_strategy,
"stopped": stop_strategy,
"running": running_strategy,
"absent": absent_strategy
}
def find(compute_api, wished_server, per_page=1):
compute_api.module.debug("Getting inside find")
# Only the name attribute is accepted in the Compute query API
url = 'servers?name=%s&per_page=%d' % (urlquote(wished_server["name"]), per_page)
response = compute_api.get(url)
if not response.ok:
msg = 'Error during server search: (%s) %s' % (response.status_code, response.json)
compute_api.module.fail_json(msg=msg)
search_results = response.json["servers"]
return search_results
PATCH_MUTABLE_SERVER_ATTRIBUTES = (
"ipv6",
"tags",
"name",
"dynamic_ip_required",
)
def server_attributes_should_be_changed(compute_api, target_server, wished_server):
compute_api.module.debug("Checking if server attributes should be changed")
compute_api.module.debug("Current Server: %s" % target_server)
compute_api.module.debug("Wished Server: %s" % wished_server)
debug_dict = dict((x, (target_server[x], wished_server[x]))
for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
if x in target_server and x in wished_server)
compute_api.module.debug("Debug dict %s" % debug_dict)
try:
return any([target_server[x] != wished_server[x]
for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
if x in target_server and x in wished_server])
except AttributeError:
compute_api.module.fail_json(msg="Error while checking if attributes should be changed")
def server_change_attributes(compute_api, target_server, wished_server):
compute_api.module.debug("Starting patching server attributes")
patch_payload = dict((x, wished_server[x])
for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
if x in wished_server and x in target_server)
response = compute_api.patch(path="servers/%s" % target_server["id"],
data=patch_payload)
if not response.ok:
msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json)
compute_api.module.fail_json(msg=msg)
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
return response
def core(module):
region = module.params["region"]
wished_server = {
"state": module.params["state"],
"image": module.params["image"],
"name": module.params["name"],
"commercial_type": module.params["commercial_type"],
"enable_ipv6": module.params["enable_ipv6"],
"boot_type": module.params["boot_type"],
"tags": module.params["tags"],
"organization": module.params["organization"]
}
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
compute_api = Scaleway(module=module)
changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server)
module.exit_json(changed=changed, msg=summary)
def main():
argument_spec = scaleway_argument_spec()
argument_spec.update(dict(
image=dict(required=True),
name=dict(),
region=dict(required=True, choices=SCALEWAY_LOCATION.keys()),
commercial_type=dict(required=True, choices=SCALEWAY_COMMERCIAL_TYPES),
enable_ipv6=dict(default=False, type="bool"),
boot_type=dict(default="bootscript"),
state=dict(choices=state_strategy.keys(), default='present'),
tags=dict(type="list", default=[]),
organization=dict(required=True),
wait=dict(type="bool", default=False),
wait_timeout=dict(type="int", default=300),
wait_sleep_time=dict(type="int", default=3),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
core(module)
if __name__ == '__main__':
main()

View file

@ -1,4 +1,23 @@
--- ---
- hosts: localhost
become: false
tasks:
- name: Ensure the requirements installed
debug:
msg: "{{ '' | ipaddr }}"
ignore_errors: true
no_log: true
register: ipaddr
- name: Verify Ansible meets Algo VPN requirements.
assert:
that:
- ansible_version.full is version('2.7.10', '==')
- not ipaddr.failed
msg: >
You must update the requirements to use this version of Algo.
Try to run python -m pip install -U -r requirements.txt
- name: Include prompts playbook - name: Include prompts playbook
import_playbook: input.yml import_playbook: input.yml

View file

@ -1,29 +1,34 @@
--- ---
- block:
- name: Display the invocation environment - name: Display the invocation environment
local_action: shell: >
module: shell
./algo-showenv.sh \ ./algo-showenv.sh \
'algo_provider "{{ algo_provider }}"' \ 'algo_provider "{{ algo_provider }}"' \
{% if ipsec_enabled %}
'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \ 'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \
'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \ 'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \
'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \ 'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \
'algo_windows "{{ algo_windows }}"' \
{% endif %}
'algo_local_dns "{{ algo_local_dns }}"' \ 'algo_local_dns "{{ algo_local_dns }}"' \
'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \ 'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \
'algo_windows "{{ algo_windows }}"' \
'wireguard_enabled "{{ wireguard_enabled }}"' \ 'wireguard_enabled "{{ wireguard_enabled }}"' \
'dns_encryption "{{ dns_encryption }}"' \ 'dns_encryption "{{ dns_encryption }}"' \
> /dev/tty > /dev/tty
tags: debug tags: debug
- name: Install the requirements - name: Install the requirements
local_action: pip:
module: pip
state: latest state: latest
name: name:
- pyOpenSSL - pyOpenSSL
- jinja2==2.8 - jinja2==2.8
- segno - segno
tags: always tags:
- always
- skip_ansible_lint
delegate_to: localhost
become: false
- name: Generate the SSH private key - name: Generate the SSH private key
openssl_privatekey: openssl_privatekey:

6
playbooks/rescue.yml Normal file
View file

@ -0,0 +1,6 @@
---
- debug:
var: fail_hint
- name: Fail the installation
fail:

View file

@ -1 +1,2 @@
ansible==2.5.2 ansible==2.7.10
netaddr

View file

@ -1,4 +1,3 @@
--- ---
- name: restart strongswan - name: restart strongswan
service: name=strongswan state=restarted service: name=strongswan state=restarted

View file

@ -8,13 +8,21 @@
package: name="{{ item }}" state=present package: name="{{ item }}" state=present
with_items: with_items:
- "{{ prerequisites }}" - "{{ prerequisites }}"
register: result
until: result is succeeded
retries: 10
delay: 3
- name: Install strongSwan - name: Install strongSwan
package: name=strongswan state=present package: name=strongswan state=present
register: result
until: result is succeeded
retries: 10
delay: 3
- name: Setup the ipsec config - name: Setup the ipsec config
template: template:
src: "roles/vpn/templates/client_ipsec.conf.j2" src: "roles/strongswan/templates/client_ipsec.conf.j2"
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.conf" dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.conf"
mode: '0644' mode: '0644'
with_items: with_items:
@ -24,7 +32,7 @@
- name: Setup the ipsec secrets - name: Setup the ipsec secrets
template: template:
src: "roles/vpn/templates/client_ipsec.secrets.j2" src: "roles/strongswan/templates/client_ipsec.secrets.j2"
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.secrets" dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.secrets"
mode: '0600' mode: '0600'
with_items: with_items:

View file

@ -1,6 +1,6 @@
--- ---
- name: Set OS specific facts
- set_fact: set_fact:
prerequisites: prerequisites:
- epel-release - epel-release
configs_prefix: /etc/strongswan configs_prefix: /etc/strongswan

View file

@ -1,5 +1,6 @@
--- ---
- name: Set OS specific facts
- set_fact: set_fact:
prerequisites: [] prerequisites:
- libstrongswan-standard-plugins
configs_prefix: /etc configs_prefix: /etc

View file

@ -1,6 +1,6 @@
--- ---
- name: Set OS specific facts
- set_fact: set_fact:
prerequisites: prerequisites:
- libselinux-python - libselinux-python
configs_prefix: /etc/strongswan configs_prefix: /etc/strongswan

View file

@ -1,5 +1,6 @@
--- ---
- name: Set OS specific facts
- set_fact: set_fact:
prerequisites: [] prerequisites:
- libstrongswan-standard-plugins
configs_prefix: /etc configs_prefix: /etc

View file

@ -1,5 +1,4 @@
--- ---
- block:
- name: Build python virtual environment - name: Build python virtual environment
import_tasks: venv.yml import_tasks: venv.yml
@ -10,14 +9,14 @@
- set_fact: - set_fact:
algo_region: >- algo_region: >-
{% if region is defined %}{{ region }} {% if region is defined %}{{ region }}
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ azure_regions[_algo_region.user_input | int -1 ]['name'] }} {%- elif _algo_region.user_input %}{{ azure_regions[_algo_region.user_input | int -1 ]['name'] }}
{%- else %}{{ azure_regions[default_region | int - 1]['name'] }}{% endif %} {%- else %}{{ azure_regions[default_region | int - 1]['name'] }}{% endif %}
- name: Create AlgoVPN Server - name: Create AlgoVPN Server
azure_rm_deployment: azure_rm_deployment:
state: present state: present
deployment_name: "{{ algo_server_name }}" deployment_name: "{{ algo_server_name }}"
template: "{{ lookup('file', 'deployment.json') }}" template: "{{ lookup('file', role_path + '/files/deployment.json') }}"
secret: "{{ secret }}" secret: "{{ secret }}"
tenant: "{{ tenant }}" tenant: "{{ tenant }}"
client_id: "{{ client_id }}" client_id: "{{ client_id }}"
@ -40,8 +39,3 @@
ansible_ssh_user: ubuntu ansible_ssh_user: ubuntu
environment: environment:
PYTHONPATH: "{{ azure_venv }}/lib/python2.7/site-packages/" PYTHONPATH: "{{ azure_venv }}/lib/python2.7/site-packages/"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -10,23 +10,32 @@
name: name:
- packaging - packaging
- requests[security] - requests[security]
- azure-mgmt-compute>=2.0.0,<3 - azure-cli-core==2.0.35
- azure-mgmt-network>=1.3.0,<2 - azure-cli-nspkg==3.0.2
- azure-mgmt-storage>=1.5.0,<2 - azure-common==1.1.11
- azure-mgmt-resource>=1.1.0,<2 - azure-mgmt-batch==4.1.0
- azure-storage>=0.35.1,<0.36 - azure-mgmt-compute==2.1.0
- azure-cli-core>=2.0.12,<3 - azure-mgmt-containerinstance==0.4.0
- azure-mgmt-containerregistry==2.0.0
- azure-mgmt-containerservice==3.0.1
- azure-mgmt-dns==1.2.0
- azure-mgmt-keyvault==0.40.0
- azure-mgmt-marketplaceordering==0.1.0
- azure-mgmt-monitor==0.5.2
- azure-mgmt-network==1.7.1
- azure-mgmt-nspkg==2.0.0
- azure-mgmt-rdbms==1.2.0
- azure-mgmt-resource==1.2.2
- azure-mgmt-sql==0.7.1
- azure-mgmt-storage==1.5.0
- azure-mgmt-trafficmanager==0.50.0
- azure-mgmt-web==0.32.0
- azure-nspkg==2.0.0
- azure-storage==0.35.1
- msrest==0.4.29 - msrest==0.4.29
- msrestazure==0.4.31 - msrestazure==0.4.31
- azure-mgmt-dns>=1.0.1,<2 - azure-keyvault==1.0.0a1
- azure-mgmt-keyvault>=0.40.0,<0.41 - azure-graphrbac==0.40.0
- azure-mgmt-batch>=4.1.0,<5
- azure-mgmt-sql>=0.7.1,<0.8
- azure-mgmt-web>=0.32.0,<0.33
- azure-mgmt-containerservice>=2.0.0,<3.0.0
- azure-mgmt-containerregistry>=1.0.1
- azure-mgmt-rdbms==1.2.0
- azure-mgmt-containerinstance==0.4.0
state: latest state: latest
virtualenv: "{{ azure_venv }}" virtualenv: "{{ azure_venv }}"
virtualenv_python: python2.7 virtualenv_python: python2.7

View file

@ -1,4 +1,4 @@
- block: ---
- name: Build python virtual environment - name: Build python virtual environment
import_tasks: venv.yml import_tasks: venv.yml
@ -10,7 +10,7 @@
set_fact: set_fact:
algo_do_region: >- algo_do_region: >-
{% if region is defined %}{{ region }} {% if region is defined %}{{ region }}
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ do_regions[_algo_region.user_input | int -1 ]['slug'] }} {%- elif _algo_region.user_input %}{{ do_regions[_algo_region.user_input | int -1 ]['slug'] }}
{%- else %}{{ do_regions[default_region | int - 1]['slug'] }}{% endif %} {%- else %}{{ do_regions[default_region | int - 1]['slug'] }}{% endif %}
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}" public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
@ -22,7 +22,7 @@
api_token: "{{ algo_do_token }}" api_token: "{{ algo_do_token }}"
name: "{{ SSH_keys.comment }}" name: "{{ SSH_keys.comment }}"
register: ssh_keys register: ssh_keys
until: ssh_keys.changed != true until: not ssh_keys.changed
retries: 10 retries: 10
delay: 1 delay: 1
@ -83,7 +83,7 @@
api_token: "{{ algo_do_token }}" api_token: "{{ algo_do_token }}"
name: "{{ SSH_keys.comment }}" name: "{{ SSH_keys.comment }}"
register: ssh_keys register: ssh_keys
until: ssh_keys.changed != true until: not ssh_keys.changed
retries: 10 retries: 10
delay: 1 delay: 1
@ -103,8 +103,3 @@
msg: "Please, ensure that your API token is not read-only." msg: "Please, ensure that your API token is not read-only."
environment: environment:
PYTHONPATH: "{{ digitalocean_venv }}/lib/python2.7/site-packages/" PYTHONPATH: "{{ digitalocean_venv }}/lib/python2.7/site-packages/"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -6,7 +6,7 @@
stack_name: "{{ stack_name }}" stack_name: "{{ stack_name }}"
state: "present" state: "present"
region: "{{ algo_region }}" region: "{{ algo_region }}"
template: roles/cloud-ec2/files/stack.yml template: roles/cloud-ec2/files/stack.yaml
template_parameters: template_parameters:
InstanceTypeParameter: "{{ cloud_providers.ec2.size }}" InstanceTypeParameter: "{{ cloud_providers.ec2.size }}"
PublicSSHKeyParameter: "{{ lookup('file', SSH_keys.public) }}" PublicSSHKeyParameter: "{{ lookup('file', SSH_keys.public) }}"

View file

@ -1,4 +1,4 @@
- block: ---
- name: Build python virtual environment - name: Build python virtual environment
import_tasks: venv.yml import_tasks: venv.yml
@ -9,7 +9,7 @@
- set_fact: - set_fact:
algo_region: >- algo_region: >-
{% if region is defined %}{{ region }} {% if region is defined %}{{ region }}
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ aws_regions[_algo_region.user_input | int -1 ]['region_name'] }} {%- elif _algo_region.user_input %}{{ aws_regions[_algo_region.user_input | int -1 ]['region_name'] }}
{%- else %}{{ aws_regions[default_region | int - 1]['region_name'] }}{% endif %} {%- else %}{{ aws_regions[default_region | int - 1]['region_name'] }}{% endif %}
stack_name: "{{ algo_server_name | replace('.', '-') }}" stack_name: "{{ algo_server_name | replace('.', '-') }}"
@ -41,8 +41,3 @@
ansible_ssh_user: ubuntu ansible_ssh_user: ubuntu
environment: environment:
PYTHONPATH: "{{ ec2_venv }}/lib/python2.7/site-packages/" PYTHONPATH: "{{ ec2_venv }}/lib/python2.7/site-packages/"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -1,4 +1,4 @@
- block: ---
- name: Build python virtual environment - name: Build python virtual environment
import_tasks: venv.yml import_tasks: venv.yml
@ -55,8 +55,3 @@
ansible_ssh_user: ubuntu ansible_ssh_user: ubuntu
environment: environment:
PYTHONPATH: "{{ gce_venv }}/lib/python2.7/site-packages/" PYTHONPATH: "{{ gce_venv }}/lib/python2.7/site-packages/"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -63,5 +63,5 @@
- set_fact: - set_fact:
algo_region: >- algo_region: >-
{% if region is defined %}{{ region }} {% if region is defined %}{{ region }}
{%- elif _gce_region.user_input is defined and _gce_region.user_input != "" %}{{ gce_regions[_gce_region.user_input | int -1 ] }} {%- elif _gce_region.user_input %}{{ gce_regions[_gce_region.user_input | int -1 ] }}
{%- else %}{{ gce_regions[default_region | int - 1] }}{% endif %} {%- else %}{{ gce_regions[default_region | int - 1] }}{% endif %}

View file

@ -1,4 +1,4 @@
- block: ---
- name: Build python virtual environment - name: Build python virtual environment
import_tasks: venv.yml import_tasks: venv.yml
@ -42,9 +42,3 @@
ansible_ssh_user: ubuntu ansible_ssh_user: ubuntu
environment: environment:
PYTHONPATH: "{{ lightsail_venv }}/lib/python2.7/site-packages/" PYTHONPATH: "{{ lightsail_venv }}/lib/python2.7/site-packages/"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -57,5 +57,5 @@
- set_fact: - set_fact:
algo_region: >- algo_region: >-
{% if region is defined %}{{ region }} {% if region is defined %}{{ region }}
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ lightsail_regions[_algo_region.user_input | int -1 ]['name'] }} {%- elif _algo_region.user_input %}{{ lightsail_regions[_algo_region.user_input | int -1 ]['name'] }}
{%- else %}{{ lightsail_regions[default_region | int - 1]['name'] }}{% endif %} {%- else %}{{ lightsail_regions[default_region | int - 1]['name'] }}{% endif %}

View file

@ -1,9 +1,8 @@
--- ---
- fail: - fail:
msg: "OpenStack credentials are not set. Download it from the OpenStack dashboard->Compute->API Access and source it in the shell (eg: source /tmp/dhc-openrc.sh)" msg: "OpenStack credentials are not set. Download it from the OpenStack dashboard->Compute->API Access and source it in the shell (eg: source /tmp/dhc-openrc.sh)"
when: lookup('env', 'OS_AUTH_URL') == "" when: lookup('env', 'OS_AUTH_URL')|length <= 0
- block:
- name: Build python virtual environment - name: Build python virtual environment
import_tasks: venv.yml import_tasks: venv.yml
@ -81,9 +80,3 @@
ansible_ssh_user: ubuntu ansible_ssh_user: ubuntu
environment: environment:
PYTHONPATH: "{{ openstack_venv }}/lib/python2.7/site-packages/" PYTHONPATH: "{{ openstack_venv }}/lib/python2.7/site-packages/"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -1,10 +0,0 @@
---
- name: Set image id as a fact
set_fact:
image_id: "{{ item.id }}"
no_log: true
when:
- cloud_providers.scaleway.image == item.name
- cloud_providers.scaleway.arch == item.arch
- server_disk_size == item.root_volume.size
with_items: "{{ outer_item['json']['images'] }}"

View file

@ -1,140 +1,46 @@
- block:
- name: Include prompts - name: Include prompts
import_tasks: prompts.yml import_tasks: prompts.yml
- name: Set disk size - block:
set_fact: - name: Gather Scaleway organizations facts
server_disk_size: 50000000000 scaleway_organization_facts:
- name: Check server size
set_fact:
server_disk_size: 25000000000
when: cloud_providers.scaleway.size == "START1-XS"
- name: Check if server exists
uri:
url: "https://cp-{{ algo_region }}.scaleway.com/servers"
method: GET
headers:
Content-Type: 'application/json'
X-Auth-Token: "{{ algo_scaleway_token }}"
status_code: 200
register: scaleway_servers
- name: Set server id as a fact
set_fact:
server_id: "{{ item.id }}"
no_log: true
when: algo_server_name == item.name
with_items: "{{ scaleway_servers.json.servers }}"
- name: Create a server if it doesn't exist
block:
- name: Get the organization id
uri:
url: https://account.cloud.online.net/organizations
method: GET
headers:
Content-Type: 'application/json'
X-Auth-Token: "{{ algo_scaleway_token }}"
status_code: 200
register: scaleway_organizations
- name: Set organization id as a fact
set_fact:
organization_id: "{{ item.id }}"
no_log: true
when: algo_scaleway_org == item.name
with_items: "{{ scaleway_organizations.json.organizations }}"
- name: Get total count of images
uri:
url: "https://cp-{{ algo_region }}.scaleway.com/images"
method: GET
headers:
Content-Type: 'application/json'
X-Auth-Token: "{{ algo_scaleway_token }}"
status_code: 200
register: scaleway_pages
- name: Get images - name: Get images
uri: scaleway_image_facts:
url: "https://cp-{{ algo_region }}.scaleway.com/images?per_page=100&page={{ item }}" region: "{{ algo_region }}"
method: GET
headers:
Content-Type: 'application/json'
X-Auth-Token: "{{ algo_scaleway_token }}"
status_code: 200
register: scaleway_images
with_sequence: start=1 end={{ ((scaleway_pages.x_total_count|int / 100)| round )|int }}
- name: Set image id as a fact - name: Set cloud specific facts
include_tasks: image_facts.yml set_fact:
with_items: "{{ scaleway_images['results'] }}" organization_id: "{{ scaleway_organization_facts[0]['id'] }}"
loop_control: images: >-
loop_var: outer_item [{% for i in scaleway_image_facts -%}
{% if i.name == cloud_providers.scaleway.image and
i.arch == cloud_providers.scaleway.arch -%}
'{{ i.id }}'{% if not loop.last %},{% endif %}
{%- endif -%}
{%- endfor -%}]
- name: Create a server - name: Create a server
uri: scaleway_compute:
url: "https://cp-{{ algo_region }}.scaleway.com/servers/"
method: POST
headers:
Content-Type: 'application/json'
X-Auth-Token: "{{ algo_scaleway_token }}"
body:
organization: "{{ organization_id }}"
name: "{{ algo_server_name }}" name: "{{ algo_server_name }}"
image: "{{ image_id }}"
commercial_type: "{{cloud_providers.scaleway.size }}"
enable_ipv6: true enable_ipv6: true
boot_type: local boot_type: local
state: running
image: "{{ images[0] }}"
organization: "{{ organization_id }}"
region: "{{ algo_region }}"
commercial_type: "{{ cloud_providers.scaleway.size }}"
wait: true
tags: tags:
- Environment:Algo - Environment:Algo
- AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }} - AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }}
status_code: 201
body_format: json
register: algo_instance
- name: Set server id as a fact
set_fact:
server_id: "{{ algo_instance.json.server.id }}"
when: server_id is not defined
- name: Power on the server
uri:
url: https://cp-{{ algo_region }}.scaleway.com/servers/{{ server_id }}/action
method: POST
headers:
Content-Type: application/json
X-Auth-Token: "{{ algo_scaleway_token }}"
body:
action: poweron
status_code: 202
body_format: json
ignore_errors: true
no_log: true
- name: Wait for the server to become running
uri:
url: "https://cp-{{ algo_region }}.scaleway.com/servers/{{ server_id }}"
method: GET
headers:
Content-Type: 'application/json'
X-Auth-Token: "{{ algo_scaleway_token }}"
status_code: 200
until:
- algo_instance.json.server.state is defined
- algo_instance.json.server.state == "running"
retries: 20
delay: 30
register: algo_instance register: algo_instance
until: algo_instance.msg.public_ip
retries: 3
delay: 3
environment:
SCW_TOKEN: "{{ algo_scaleway_token }}"
- set_fact: - set_fact:
cloud_instance_ip: "{{ algo_instance['json']['server']['public_ip']['address'] }}" cloud_instance_ip: "{{ algo_instance.msg.public_ip.address }}"
ansible_ssh_user: root ansible_ssh_user: root
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -1,16 +1,12 @@
--- ---
- pause: - pause:
prompt: | prompt: |
Enter your auth token (https://www.scaleway.com/docs/generate-an-api-token/) Enter your auth token (https://trailofbits.github.io/algo/cloud-scaleway.html)
echo: false echo: false
register: _scaleway_token register: _scaleway_token
when: scaleway_token is undefined when:
- scaleway_token is undefined
- pause: - lookup('env','SCW_TOKEN')|length <= 0
prompt: |
Enter your organization name (https://cloud.scaleway.com/#/billing)
register: _scaleway_org
when: scaleway_org is undefined
- pause: - pause:
prompt: | prompt: |
@ -26,9 +22,8 @@
- name: Set scaleway facts - name: Set scaleway facts
set_fact: set_fact:
algo_scaleway_token: "{{ scaleway_token | default(_scaleway_token.user_input) }}" algo_scaleway_token: "{{ scaleway_token | default(_scaleway_token.user_input) | default(lookup('env','SCW_TOKEN'), true) }}"
algo_scaleway_org: "{{ scaleway_org | default(_scaleway_org.user_input|default(omit)) }}"
algo_region: >- algo_region: >-
{% if region is defined %}{{ region }} {% if region is defined %}{{ region }}
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ scaleway_regions[_algo_region.user_input | int -1 ]['alias'] }} {%- elif _algo_region.user_input %}{{ scaleway_regions[_algo_region.user_input | int -1 ]['alias'] }}
{%- else %}{{ scaleway_regions.0.alias }}{% endif %} {%- else %}{{ scaleway_regions.0.alias }}{% endif %}

View file

@ -1,20 +1,43 @@
- block: ---
- name: Include prompts - name: Include prompts
import_tasks: prompts.yml import_tasks: prompts.yml
- block:
- name: Upload the SSH key - name: Upload the SSH key
vr_ssh_key: vultr_ssh_key:
name: "{{ SSH_keys.comment }}" name: "{{ SSH_keys.comment }}"
ssh_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}" ssh_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
register: ssh_key register: ssh_key
- name: Creating a firewall group
vultr_firewall_group:
name: "{{ algo_server_name }}"
- name: Creating firewall rules
vultr_firewall_rule:
group: "{{ algo_server_name }}"
protocol: "{{ item.protocol }}"
port: "{{ item.port }}"
ip_version: "{{ item.ip }}"
cidr: "{{ item.cidr }}"
with_items:
- { protocol: tcp, port: 22, ip: v4, cidr: "0.0.0.0/0" }
- { protocol: tcp, port: 22, ip: v6, cidr: "::/0" }
- { protocol: udp, port: 500, ip: v4, cidr: "0.0.0.0/0" }
- { protocol: udp, port: 500, ip: v6, cidr: "::/0" }
- { protocol: udp, port: 4500, ip: v4, cidr: "0.0.0.0/0" }
- { protocol: udp, port: 4500, ip: v6, cidr: "::/0" }
- { protocol: udp, port: "{{ wireguard_port }}", ip: v4, cidr: "0.0.0.0/0" }
- { protocol: udp, port: "{{ wireguard_port }}", ip: v6, cidr: "::/0" }
- name: Creating a server - name: Creating a server
vr_server: vultr_server:
name: "{{ algo_server_name }}" name: "{{ algo_server_name }}"
hostname: "{{ algo_server_name }}" hostname: "{{ algo_server_name }}"
os: "{{ cloud_providers.vultr.os }}" os: "{{ cloud_providers.vultr.os }}"
plan: "{{ cloud_providers.vultr.size }}" plan: "{{ cloud_providers.vultr.size }}"
region: "{{ algo_vultr_region }}" region: "{{ algo_vultr_region }}"
firewall_group: "{{ algo_server_name }}"
state: started state: started
tag: Environment:Algo tag: Environment:Algo
ssh_key: "{{ ssh_key.vultr_ssh_key.name }}" ssh_key: "{{ ssh_key.vultr_ssh_key.name }}"
@ -29,8 +52,3 @@
environment: environment:
VULTR_API_CONFIG: "{{ algo_vultr_config }}" VULTR_API_CONFIG: "{{ algo_vultr_config }}"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -4,7 +4,9 @@
Enter the local path to your configuration INI file Enter the local path to your configuration INI file
(https://trailofbits.github.io/algo/cloud-vultr.html): (https://trailofbits.github.io/algo/cloud-vultr.html):
register: _vultr_config register: _vultr_config
when: vultr_config is undefined when:
- vultr_config is undefined
- lookup('env','VULTR_API_CONFIG')|length <= 0
- name: Set the token as a fact - name: Set the token as a fact
set_fact: set_fact:
@ -52,5 +54,5 @@
set_fact: set_fact:
algo_vultr_region: >- algo_vultr_region: >-
{% if region is defined %}{{ region }} {% if region is defined %}{{ region }}
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ vultr_regions[_algo_region.user_input | int -1 ]['name'] }} {%- elif _algo_region.user_input %}{{ vultr_regions[_algo_region.user_input | int -1 ]['name'] }}
{%- else %}{{ vultr_regions[default_region | int - 1]['name'] }}{% endif %} {%- else %}{{ vultr_regions[default_region | int - 1]['name'] }}{% endif %}

View file

@ -18,10 +18,7 @@
ifconfig lo100 destroy || true && ifconfig lo100 destroy || true &&
ifconfig lo100 create && ifconfig lo100 create &&
ifconfig lo100 inet {{ local_service_ip }} netmask 255.255.255.255 && ifconfig lo100 inet {{ local_service_ip }} netmask 255.255.255.255 &&
ifconfig lo100 inet6 FCAA::1/64; echo $? ifconfig lo100 inet6 {{ local_service_ipv6 }}/128; echo $?
- name: save iptables
shell: service netfilter-persistent save
- name: restart iptables - name: restart iptables
service: name=netfilter-persistent state=restarted service: name=netfilter-persistent state=restarted

View file

@ -1,26 +1,26 @@
--- ---
- block: - block:
- name: Generate password for the CA key - name: Generate password for the CA key
local_action: command: openssl rand -hex 16
module: shell
openssl rand -hex 16
register: CA_password register: CA_password
- name: Generate p12 export password - name: Generate p12 export password
local_action: shell: >
module: shell openssl rand 8 |
openssl rand 8 | python -c 'import sys,string; chars=string.ascii_letters + string.digits + "_@"; print("".join([chars[ord(c) % 64] for c in list(sys.stdin.read())]))' python -c 'import sys,string; chars=string.ascii_letters + string.digits + "_@"; print("".join([chars[ord(c) % 64] for c in list(sys.stdin.read())]))'
register: p12_password_generated register: p12_password_generated
when: p12_password is not defined when: p12_password is not defined
tags: update-users tags: update-users
become: false become: false
delegate_to: localhost
- name: Define facts - name: Define facts
set_fact: set_fact:
p12_export_password: "{{ p12_password|default(p12_password_generated.stdout) }}" p12_export_password: "{{ p12_password|default(p12_password_generated.stdout) }}"
tags: update-users tags: update-users
- set_fact: - name: Set facts
set_fact:
CA_password: "{{ CA_password.stdout }}" CA_password: "{{ CA_password.stdout }}"
IP_subject_alt_name: "{{ IP_subject_alt_name }}" IP_subject_alt_name: "{{ IP_subject_alt_name }}"
@ -31,5 +31,5 @@
- name: Check size of MTU - name: Check size of MTU
set_fact: set_fact:
reduce_mtu: "{% if reduce_mtu|int == 0 and ansible_default_ipv4['mtu']|int < 1500 %}{{ 1500 - ansible_default_ipv4['mtu']|int }}{% else %}{{ reduce_mtu|int }}{% endif %}" reduce_mtu: "{{ 1500 - ansible_default_ipv4['mtu']|int if reduce_mtu|int == 0 and ansible_default_ipv4['mtu']|int < 1500 else reduce_mtu|int }}"
tags: always tags: always

View file

@ -17,7 +17,8 @@
- name: Gather additional facts - name: Gather additional facts
import_tasks: facts.yml import_tasks: facts.yml
- set_fact: - name: Set OS specific facts
set_fact:
config_prefix: "/usr/local/" config_prefix: "/usr/local/"
strongswan_shell: /usr/sbin/nologin strongswan_shell: /usr/sbin/nologin
strongswan_home: /var/empty strongswan_home: /var/empty
@ -53,7 +54,7 @@
block: | block: |
cloned_interfaces="lo100" cloned_interfaces="lo100"
ifconfig_lo100="inet {{ local_service_ip }} netmask 255.255.255.255" ifconfig_lo100="inet {{ local_service_ip }} netmask 255.255.255.255"
ifconfig_lo100_ipv6="inet6 FCAA::1/64" ifconfig_lo100_ipv6="inet6 {{ local_service_ipv6 }}/128"
notify: notify:
- restart loopback bsd - restart loopback bsd
@ -73,5 +74,6 @@
shell: > shell: >
kldstat -n ipfw.ko || kldload ipfw ; sysctl net.inet.ip.fw.enable=0 && kldstat -n ipfw.ko || kldload ipfw ; sysctl net.inet.ip.fw.enable=0 &&
bash /etc/rc.firewall && sysctl net.inet.ip.fw.enable=1 bash /etc/rc.firewall && sysctl net.inet.ip.fw.enable=1
changed_when: false
- meta: flush_handlers - meta: flush_handlers

View file

@ -1,8 +1,8 @@
--- ---
- block:
- name: Check the system - name: Check the system
raw: uname -a raw: uname -a
register: OS register: OS
changed_when: false
tags: tags:
- update-users - update-users
@ -18,15 +18,10 @@
- name: Sysctl tuning - name: Sysctl tuning
sysctl: name="{{ item.item }}" value="{{ item.value }}" sysctl: name="{{ item.item }}" value="{{ item.value }}"
when: item.item != "" when: item.item
with_items: with_items:
- "{{ sysctl|default([]) }}" - "{{ sysctl|default([]) }}"
tags: tags:
- always - always
- meta: flush_handlers - meta: flush_handlers
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -24,7 +24,7 @@
when: reboot_required is defined and reboot_required.stdout == 'required' when: reboot_required is defined and reboot_required.stdout == 'required'
ignore_errors: true ignore_errors: true
- name: Wait until SSH becomes ready... - name: Wait until the server becomes ready...
wait_for_connection: wait_for_connection:
delay: 20 delay: 20
timeout: 320 timeout: 320
@ -61,18 +61,21 @@
- meta: flush_handlers - meta: flush_handlers
- name: Check apparmor support - name: Check apparmor support
shell: apparmor_status command: apparmor_status
ignore_errors: yes ignore_errors: yes
changed_when: false
register: apparmor_status register: apparmor_status
- set_fact: - name: Set fact if apparmor enabled
set_fact:
apparmor_enabled: true apparmor_enabled: true
when: '"profiles are in enforce mode" in apparmor_status.stdout' when: '"profiles are in enforce mode" in apparmor_status.stdout'
- name: Gather additional facts - name: Gather additional facts
import_tasks: facts.yml import_tasks: facts.yml
- set_fact: - name: Set OS specific facts
set_fact:
tools: tools:
- git - git
- screen - screen
@ -82,6 +85,7 @@
- iptables-persistent - iptables-persistent
- cgroup-tools - cgroup-tools
- openssl - openssl
- gnupg2
sysctl: sysctl:
- item: net.ipv4.ip_forward - item: net.ipv4.ip_forward
value: 1 value: 1
@ -92,11 +96,9 @@
- name: Install tools - name: Install tools
apt: apt:
name: "{{ item }}" name: "{{ tools|default([]) }}"
state: present state: present
update_cache: true update_cache: true
with_items:
- "{{ tools|default([]) }}"
- name: Install headers - name: Install headers
apt: apt:

View file

@ -2,7 +2,7 @@
- name: Install unattended-upgrades - name: Install unattended-upgrades
apt: apt:
name: unattended-upgrades name: unattended-upgrades
state: latest state: present
- name: Configure unattended-upgrades - name: Configure unattended-upgrades
template: template:

View file

@ -4,4 +4,4 @@ Name=lo
[Network] [Network]
Description=lo:100 Description=lo:100
Address={{ local_service_ip }}/32 Address={{ local_service_ip }}/32
Address=FCAA::1/64 Address={{ local_service_ipv6 }}/128

View file

@ -83,7 +83,7 @@ COMMIT
# particular virtual (tun,tap,...) or physical (ethernet) interface. # particular virtual (tun,tap,...) or physical (ethernet) interface.
# Accept DNS traffic to the local DNS resolver # Accept DNS traffic to the local DNS resolver
-A INPUT -d fcaa::1 -p udp --dport 53 -j ACCEPT -A INPUT -d {{ local_service_ipv6 }}/128 -p udp --dport 53 -j ACCEPT
# Drop traffic between VPN clients # Drop traffic between VPN clients
-A FORWARD -s {{ subnets|join(',') }} -d {{ subnets|join(',') }} -j {{ "DROP" if BetweenClients_DROP else "ACCEPT" }} -A FORWARD -s {{ subnets|join(',') }} -d {{ subnets|join(',') }} -j {{ "DROP" if BetweenClients_DROP else "ACCEPT" }}

View file

@ -3,3 +3,7 @@
- name: restart apparmor - name: restart apparmor
service: name=apparmor state=restarted service: name=apparmor state=restarted
- name: daemon-reload
systemd:
daemon_reload: true

View file

@ -1,5 +1,4 @@
--- ---
- block:
- name: Dnsmasq installed - name: Dnsmasq installed
package: name=dnsmasq package: name=dnsmasq
@ -37,6 +36,7 @@
- name: Update adblock hosts - name: Update adblock hosts
command: /usr/local/sbin/adblock.sh command: /usr/local/sbin/adblock.sh
changed_when: false
- meta: flush_handlers - meta: flush_handlers
@ -45,8 +45,3 @@
name: dnsmasq name: dnsmasq
state: started state: started
enabled: yes enabled: yes
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -7,13 +7,13 @@
owner: root owner: root
group: root group: root
mode: 0600 mode: 0600
when: apparmor_enabled|default(false)|bool == true when: apparmor_enabled|default(false)|bool
notify: notify:
- restart dnsmasq - restart dnsmasq
- name: Ubuntu | Enforce the dnsmasq AppArmor policy - name: Ubuntu | Enforce the dnsmasq AppArmor policy
shell: aa-enforce usr.sbin.dnsmasq command: aa-enforce usr.sbin.dnsmasq
when: apparmor_enabled|default(false)|bool == true when: apparmor_enabled|default(false)|bool
tags: ['apparmor'] tags: ['apparmor']
- name: Ubuntu | Ensure that the dnsmasq service directory exist - name: Ubuntu | Ensure that the dnsmasq service directory exist

View file

@ -116,7 +116,7 @@ group=nogroup
#except-interface= #except-interface=
# Or which to listen on by address (remember to include 127.0.0.1 if # Or which to listen on by address (remember to include 127.0.0.1 if
# you use this.) # you use this.)
listen-address=127.0.0.1,FCAA::1,{{ local_service_ip }} listen-address=127.0.0.1,{{ local_service_ipv6 }},{{ local_service_ip }}
# If you want dnsmasq to provide only DNS service on an interface, # If you want dnsmasq to provide only DNS service on an interface,
# configure it as shown above, and then use the following line to # configure it as shown above, and then use the following line to
# disable DHCP and TFTP on it. # disable DHCP and TFTP on it.

View file

@ -12,7 +12,7 @@
- name: Install dnscrypt-proxy - name: Install dnscrypt-proxy
apt: apt:
name: dnscrypt-proxy name: dnscrypt-proxy
state: latest state: present
update_cache: true update_cache: true
- name: Configure unattended-upgrades - name: Configure unattended-upgrades
@ -37,7 +37,7 @@
command: aa-enforce usr.bin.dnscrypt-proxy command: aa-enforce usr.bin.dnscrypt-proxy
changed_when: false changed_when: false
tags: apparmor tags: apparmor
when: apparmor_enabled|default(false)|bool == true when: apparmor_enabled|default(false)|bool
- name: Ubuntu | Ensure that the dnscrypt-proxy service directory exist - name: Ubuntu | Ensure that the dnscrypt-proxy service directory exist
file: file:

View file

@ -1,10 +1,3 @@
--- ---
- block:
- name: Include prompts - name: Include prompts
import_tasks: prompts.yml import_tasks: prompts.yml
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -10,24 +10,24 @@
set_fact: set_fact:
cloud_instance_ip: >- cloud_instance_ip: >-
{% if server is defined %}{{ server }} {% if server is defined %}{{ server }}
{%- elif _algo_server.user_input is defined and _algo_server.user_input != "" %}{{ _algo_server.user_input }} {%- elif _algo_server.user_input %}{{ _algo_server.user_input }}
{%- else %}localhost{% endif %} {%- else %}localhost{% endif %}
- block:
- pause: - pause:
prompt: | prompt: |
What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost) What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost)
[root] [root]
register: _algo_ssh_user register: _algo_ssh_user
when: when: ssh_user is undefined
- ssh_user is undefined
- cloud_instance_ip != "localhost"
- name: Set the facts - name: Set the facts
set_fact: set_fact:
ansible_ssh_user: >- ansible_ssh_user: >-
{% if ssh_user is defined %}{{ ssh_user }} {% if ssh_user is defined %}{{ ssh_user }}
{%- elif _algo_ssh_user.user_input is defined and _algo_ssh_user.user_input != "" %}{{ _algo_ssh_user.user_input }} {%- elif _algo_ssh_user.user_input %}{{ _algo_ssh_user.user_input }}
{%- else %}root{% endif %} {%- else %}root{% endif %}
when: cloud_instance_ip != "localhost"
- pause: - pause:
prompt: | prompt: |
@ -40,5 +40,5 @@
set_fact: set_fact:
IP_subject_alt_name: >- IP_subject_alt_name: >-
{% if endpoint is defined %}{{ endpoint }} {% if endpoint is defined %}{{ endpoint }}
{%- elif _endpoint.user_input is defined and _endpoint.user_input != "" %}{{ _endpoint.user_input }} {%- elif _endpoint.user_input %}{{ _endpoint.user_input }}
{%- else %}{{ cloud_instance_ip }}{% endif %} {%- else %}{{ cloud_instance_ip }}{% endif %}

View file

@ -1,5 +1,4 @@
--- ---
- block:
- name: Ensure that the sshd_config file has desired options - name: Ensure that the sshd_config file has desired options
blockinfile: blockinfile:
dest: /etc/ssh/sshd_config dest: /etc/ssh/sshd_config
@ -43,7 +42,7 @@
file: file:
dest: "{{ ssh_tunnels_config_path }}" dest: "{{ ssh_tunnels_config_path }}"
state: absent state: absent
when: keys_clean_all|bool == True when: keys_clean_all|bool
- name: Ensure the config directories exist - name: Ensure the config directories exist
file: file:
@ -113,8 +112,3 @@
when: item not in users when: item not in users
with_items: "{{ getent_group['algo'][2].split(',') }}" with_items: "{{ getent_group['algo'][2].split(',') }}"
tags: update-users tags: update-users
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -1,8 +1,6 @@
--- ---
ipsec_config_path: "configs/{{ IP_subject_alt_name }}/ipsec/" ipsec_config_path: "configs/{{ IP_subject_alt_name }}/ipsec/"
ipsec_pki_path: "{{ ipsec_config_path }}/.pki/" ipsec_pki_path: "{{ ipsec_config_path }}/.pki/"
strongswan_network: 10.19.48.0/24
strongswan_network_ipv6: 'fd9d:bc11:4020::/48'
strongswan_shell: /usr/sbin/nologin strongswan_shell: /usr/sbin/nologin
strongswan_home: /var/lib/strongswan strongswan_home: /var/lib/strongswan
BetweenClients_DROP: true BetweenClients_DROP: true

View file

@ -2,7 +2,7 @@
service: name=strongswan state=restarted service: name=strongswan state=restarted
- name: daemon-reload - name: daemon-reload
shell: systemctl daemon-reload systemd: daemon_reload=true
- name: restart apparmor - name: restart apparmor
service: name=apparmor state=restarted service: name=apparmor state=restarted

View file

@ -1,8 +1,13 @@
--- ---
- name: Register p12 PayloadContent - name: Register p12 PayloadContent
shell: cat private/{{ item }}.p12 | base64 shell: |
set -o pipefail
cat private/{{ item }}.p12 |
base64
register: PayloadContent register: PayloadContent
changed_when: false
args: args:
executable: bash
chdir: "{{ ipsec_pki_path }}" chdir: "{{ ipsec_pki_path }}"
with_items: "{{ users }}" with_items: "{{ users }}"

View file

@ -23,12 +23,22 @@
owner: strongswan owner: strongswan
group: "{{ root_group|default('root') }}" group: "{{ root_group|default('root') }}"
mode: "0600" mode: "0600"
- src: charon.conf.j2
dest: "strongswan.d/charon.conf"
owner: root
group: "{{ root_group|default('root') }}"
mode: "0644"
notify: notify:
- restart strongswan - restart strongswan
- name: Get loaded plugins - name: Get loaded plugins
shell: > shell: |
find {{ config_prefix|default('/') }}etc/strongswan.d/charon/ -type f -name '*.conf' -exec basename {} \; | cut -f1 -d. set -o pipefail
find {{ config_prefix|default('/') }}etc/strongswan.d/charon/ -type f -name '*.conf' -exec basename {} \; |
cut -f1 -d.
changed_when: false
args:
executable: bash
register: strongswan_plugins register: strongswan_plugins
- name: Disable unneeded plugins - name: Disable unneeded plugins

View file

@ -1,5 +1,4 @@
--- ---
- block:
- include_tasks: ubuntu.yml - include_tasks: ubuntu.yml
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
@ -30,8 +29,3 @@
enabled: true enabled: true
- meta: flush_handlers - meta: flush_handlers
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

View file

@ -2,14 +2,19 @@
- block: - block:
- name: Set subjectAltName as a fact - name: Set subjectAltName as a fact
set_fact: set_fact:
subjectAltName: "{{ subjectAltName_IP }}{% if ipv6_support %},IP:{{ ansible_default_ipv6['address'] }}{% endif %}{% if domain and subjectAltName_DNS %},DNS:{{ subjectAltName_DNS }}{% endif %}" subjectAltName: >-
{{ subjectAltName_IP }}
{%- if ipv6_support -%},IP:{{ ansible_default_ipv6['address'] }}{%- endif -%}
{%- if domain and subjectAltName_DNS -%},DNS:{{ subjectAltName_DNS }}{%- endif -%}
tags: always tags: always
- debug: var=subjectAltName
- name: Ensure the pki directory does not exist - name: Ensure the pki directory does not exist
file: file:
dest: "{{ ipsec_pki_path }}" dest: "{{ ipsec_pki_path }}"
state: absent state: absent
when: keys_clean_all|bool == True when: keys_clean_all|bool
- name: Ensure the pki directories exist - name: Ensure the pki directories exist
file: file:
@ -151,6 +156,23 @@
with_items: "{{ users }}" with_items: "{{ users }}"
register: p12 register: p12
- name: Build the client's p12 with the CA cert included
shell: >
umask 077;
{{ openssl_bin }} pkcs12
-in certs/{{ item }}.crt
-inkey private/{{ item }}.key
-export
-name {{ item }}
-out private/{{ item }}_ca.p12
-certfile cacert.pem
-passout pass:"{{ p12_export_password }}"
args:
chdir: "{{ ipsec_pki_path }}"
executable: bash
with_items: "{{ users }}"
register: p12
- name: Copy the p12 certificates - name: Copy the p12 certificates
copy: copy:
src: "{{ ipsec_pki_path }}/private/{{ item }}.p12" src: "{{ ipsec_pki_path }}/private/{{ item }}.p12"
@ -209,3 +231,13 @@
- gencrl.changed - gencrl.changed
notify: notify:
- rereadcrls - rereadcrls
- name: Delete the CA key
file:
path: "{{ ipsec_pki_path }}/private/cakey.pem"
state: absent
become: false
delegate_to: localhost
when:
- ipsec_enabled
- not algo_store_cakey

View file

@ -1,18 +1,19 @@
--- ---
- name: Set OS specific facts
- set_fact: set_fact:
strongswan_additional_plugins: [] strongswan_additional_plugins: []
- name: Ubuntu | Install strongSwan - name: Ubuntu | Install strongSwan
apt: apt:
name: strongswan name: strongswan
state: latest state: present
update_cache: yes update_cache: yes
install_recommends: yes install_recommends: yes
- name: Ubuntu | Enforcing ipsec with apparmor - name: Ubuntu | Enforcing ipsec with apparmor
shell: aa-enforce "{{ item }}" command: aa-enforce "{{ item }}"
when: apparmor_enabled|default(false)|bool == true when: apparmor_enabled|default(false)|bool
changed_when: false
with_items: with_items:
- /usr/lib/ipsec/charon - /usr/lib/ipsec/charon
- /usr/lib/ipsec/lookip - /usr/lib/ipsec/lookip

View file

@ -0,0 +1,365 @@
# Options for the charon IKE daemon.
charon {
# Accept unencrypted ID and HASH payloads in IKEv1 Main Mode.
# accept_unencrypted_mainmode_messages = no
# Maximum number of half-open IKE_SAs for a single peer IP.
# block_threshold = 5
# Whether Certificate Revocation Lists (CRLs) fetched via HTTP or LDAP
# should be saved under a unique file name derived from the public key of
# the Certification Authority (CA) to /etc/ipsec.d/crls (stroke) or
# /etc/swanctl/x509crl (vici), respectively.
# cache_crls = no
# Whether relations in validated certificate chains should be cached in
# memory.
# cert_cache = yes
# Send Cisco Unity vendor ID payload (IKEv1 only).
# cisco_unity = no
# Close the IKE_SA if setup of the CHILD_SA along with IKE_AUTH failed.
close_ike_on_child_failure = yes
# Number of half-open IKE_SAs that activate the cookie mechanism.
# cookie_threshold = 10
# Delete CHILD_SAs right after they got successfully rekeyed (IKEv1 only).
# delete_rekeyed = no
# Delay in seconds until inbound IPsec SAs are deleted after rekeyings
# (IKEv2 only).
# delete_rekeyed_delay = 5
# Use ANSI X9.42 DH exponent size or optimum size matched to cryptographic
# strength.
# dh_exponent_ansi_x9_42 = yes
# Use RTLD_NOW with dlopen when loading plugins and IMV/IMCs to reveal
# missing symbols immediately.
# dlopen_use_rtld_now = no
# DNS server assigned to peer via configuration payload (CP).
# dns1 =
# DNS server assigned to peer via configuration payload (CP).
# dns2 =
# Enable Denial of Service protection using cookies and aggressiveness
# checks.
# dos_protection = yes
# Compliance with the errata for RFC 4753.
# ecp_x_coordinate_only = yes
# Free objects during authentication (might conflict with plugins).
# flush_auth_cfg = no
# Whether to follow IKEv2 redirects (RFC 5685).
# follow_redirects = yes
# Maximum size (complete IP datagram size in bytes) of a sent IKE fragment
# when using proprietary IKEv1 or standardized IKEv2 fragmentation, defaults
# to 1280 (use 0 for address family specific default values, which uses a
# lower value for IPv4). If specified this limit is used for both IPv4 and
# IPv6.
# fragment_size = 1280
# Name of the group the daemon changes to after startup.
# group =
# Timeout in seconds for connecting IKE_SAs (also see IKE_SA_INIT DROPPING).
half_open_timeout = 5
# Enable hash and URL support.
# hash_and_url = no
# Allow IKEv1 Aggressive Mode with pre-shared keys as responder.
# i_dont_care_about_security_and_use_aggressive_mode_psk = no
# Whether to ignore the traffic selectors from the kernel's acquire events
# for IKEv2 connections (they are not used for IKEv1).
# ignore_acquire_ts = no
# A space-separated list of routing tables to be excluded from route
# lookups.
# ignore_routing_tables =
# Maximum number of IKE_SAs that can be established at the same time before
# new connection attempts are blocked.
# ikesa_limit = 0
# Number of exclusively locked segments in the hash table.
# ikesa_table_segments = 1
# Size of the IKE_SA hash table.
# ikesa_table_size = 1
# Whether to close IKE_SA if the only CHILD_SA closed due to inactivity.
inactivity_close_ike = yes
# Limit new connections based on the current number of half open IKE_SAs,
# see IKE_SA_INIT DROPPING in strongswan.conf(5).
# init_limit_half_open = 0
# Limit new connections based on the number of queued jobs.
# init_limit_job_load = 0
# Causes charon daemon to ignore IKE initiation requests.
# initiator_only = no
# Install routes into a separate routing table for established IPsec
# tunnels.
# install_routes = yes
# Install virtual IP addresses.
# install_virtual_ip = yes
# The name of the interface on which virtual IP addresses should be
# installed.
# install_virtual_ip_on =
# Check daemon, libstrongswan and plugin integrity at startup.
# integrity_test = no
# A comma-separated list of network interfaces that should be ignored, if
# interfaces_use is specified this option has no effect.
# interfaces_ignore =
# A comma-separated list of network interfaces that should be used by
# charon. All other interfaces are ignored.
# interfaces_use =
# NAT keep alive interval.
keep_alive = 25s
# Plugins to load in the IKE daemon charon.
# load =
# Determine plugins to load via each plugin's load option.
# load_modular = no
# Initiate IKEv2 reauthentication with a make-before-break scheme.
# make_before_break = no
# Maximum number of IKEv1 phase 2 exchanges per IKE_SA to keep state about
# and track concurrently.
# max_ikev1_exchanges = 3
# Maximum packet size accepted by charon.
# max_packet = 10000
# Enable multiple authentication exchanges (RFC 4739).
# multiple_authentication = yes
# WINS servers assigned to peer via configuration payload (CP).
# nbns1 =
# WINS servers assigned to peer via configuration payload (CP).
# nbns2 =
# UDP port used locally. If set to 0 a random port will be allocated.
# port = 500
# UDP port used locally in case of NAT-T. If set to 0 a random port will be
# allocated. Has to be different from charon.port, otherwise a random port
# will be allocated.
# port_nat_t = 4500
# Whether to prefer updating SAs to the path with the best route.
# prefer_best_path = no
# Prefer locally configured proposals for IKE/IPsec over supplied ones as
# responder (disabling this can avoid keying retries due to
# INVALID_KE_PAYLOAD notifies).
# prefer_configured_proposals = yes
# By default public IPv6 addresses are preferred over temporary ones (RFC
# 4941), to make connections more stable. Enable this option to reverse
# this.
# prefer_temporary_addrs = no
# Process RTM_NEWROUTE and RTM_DELROUTE events.
# process_route = yes
# Delay in ms for receiving packets, to simulate larger RTT.
# receive_delay = 0
# Delay request messages.
# receive_delay_request = yes
# Delay response messages.
# receive_delay_response = yes
# Specific IKEv2 message type to delay, 0 for any.
# receive_delay_type = 0
# Size of the AH/ESP replay window, in packets.
# replay_window = 32
# Base to use for calculating exponential back off, see IKEv2 RETRANSMISSION
# in strongswan.conf(5).
# retransmit_base = 1.8
# Maximum jitter in percent to apply randomly to calculated retransmission
# timeout (0 to disable).
# retransmit_jitter = 0
# Upper limit in seconds for calculated retransmission timeout (0 to
# disable).
# retransmit_limit = 0
# Timeout in seconds before sending first retransmit.
# retransmit_timeout = 4.0
# Number of times to retransmit a packet before giving up.
# retransmit_tries = 5
# Interval in seconds to use when retrying to initiate an IKE_SA (e.g. if
# DNS resolution failed), 0 to disable retries.
# retry_initiate_interval = 0
# Initiate CHILD_SA within existing IKE_SAs (always enabled for IKEv1).
reuse_ikesa = yes
# Numerical routing table to install routes to.
# routing_table =
# Priority of the routing table.
# routing_table_prio =
# Whether to use RSA with PSS padding instead of PKCS#1 padding by default.
# rsa_pss = no
# Delay in ms for sending packets, to simulate larger RTT.
# send_delay = 0
# Delay request messages.
# send_delay_request = yes
# Delay response messages.
# send_delay_response = yes
# Specific IKEv2 message type to delay, 0 for any.
# send_delay_type = 0
# Send strongSwan vendor ID payload
# send_vendor_id = no
# Whether to enable Signature Authentication as per RFC 7427.
# signature_authentication = yes
# Whether to enable constraints against IKEv2 signature schemes.
# signature_authentication_constraints = yes
# The upper limit for SPIs requested from the kernel for IPsec SAs.
# spi_max = 0xcfffffff
# The lower limit for SPIs requested from the kernel for IPsec SAs.
# spi_min = 0xc0000000
# Number of worker threads in charon.
# threads = 16
# Name of the user the daemon changes to after startup.
# user =
crypto_test {
# Benchmark crypto algorithms and order them by efficiency.
# bench = no
# Buffer size used for crypto benchmark.
# bench_size = 1024
# Number of iterations to test each algorithm.
# bench_time = 50
# Test crypto algorithms during registration (requires test vectors
# provided by the test-vectors plugin).
# on_add = no
# Test crypto algorithms on each crypto primitive instantiation.
# on_create = no
# Strictly require at least one test vector to enable an algorithm.
# required = no
# Whether to test RNG with TRUE quality; requires a lot of entropy.
# rng_true = no
}
host_resolver {
# Maximum number of concurrent resolver threads (they are terminated if
# unused).
# max_threads = 3
# Minimum number of resolver threads to keep around.
# min_threads = 0
}
leak_detective {
# Includes source file names and line numbers in leak detective output.
# detailed = yes
# Threshold in bytes for leaks to be reported (0 to report all).
# usage_threshold = 10240
# Threshold in number of allocations for leaks to be reported (0 to
# report all).
# usage_threshold_count = 0
}
processor {
# Section to configure the number of reserved threads per priority class
# see JOB PRIORITY MANAGEMENT in strongswan.conf(5).
priority_threads {
}
}
# Section containing a list of scripts (name = path) that are executed when
# the daemon is started.
start-scripts {
}
# Section containing a list of scripts (name = path) that are executed when
# the daemon is terminated.
stop-scripts {
}
tls {
# List of TLS encryption ciphers.
# cipher =
# List of TLS key exchange methods.
# key_exchange =
# List of TLS MAC algorithms.
# mac =
# List of TLS cipher suites.
# suites =
}
x509 {
# Discard certificates with unsupported or unknown critical extensions.
# enforce_critical = yes
}
}

View file

@ -1,4 +1,4 @@
conn ikev2-{{ IP_subject_alt_name }} conn algovpn-{{ IP_subject_alt_name }}
fragmentation=yes fragmentation=yes
rekey=no rekey=no
dpdaction=clear dpdaction=clear
@ -16,7 +16,7 @@ conn ikev2-{{ IP_subject_alt_name }}
right={{ IP_subject_alt_name }} right={{ IP_subject_alt_name }}
rightid={{ IP_subject_alt_name }} rightid={{ IP_subject_alt_name }}
rightsubnet=0.0.0.0/0 rightsubnet={{ rightsubnet | default('0.0.0.0/0') }}
rightauth=pubkey rightauth=pubkey
leftsourceip=%config leftsourceip=%config

View file

@ -9,6 +9,8 @@ conn %default
keyexchange=ikev2 keyexchange=ikev2
compress=yes compress=yes
dpddelay=35s dpddelay=35s
lifetime=3h
ikelifetime=12h
{% if algo_windows %} {% if algo_windows %}
ike={{ ciphers.compat.ike }} ike={{ ciphers.compat.ike }}

View file

@ -69,7 +69,7 @@
<key>IntegrityAlgorithm</key> <key>IntegrityAlgorithm</key>
<string>SHA2-512</string> <string>SHA2-512</string>
<key>LifeTimeInMinutes</key> <key>LifeTimeInMinutes</key>
<integer>20</integer> <integer>1440</integer>
</dict> </dict>
<key>DeadPeerDetectionRate</key> <key>DeadPeerDetectionRate</key>
<string>Medium</string> <string>Medium</string>
@ -90,7 +90,7 @@
<key>IntegrityAlgorithm</key> <key>IntegrityAlgorithm</key>
<string>SHA2-512</string> <string>SHA2-512</string>
<key>LifeTimeInMinutes</key> <key>LifeTimeInMinutes</key>
<integer>20</integer> <integer>1440</integer>
</dict> </dict>
<key>LocalIdentifier</key> <key>LocalIdentifier</key>
<string>{{ item.0 }}</string> <string>{{ item.0 }}</string>

View file

@ -3,26 +3,16 @@ wireguard_PersistentKeepalive: 0
wireguard_config_path: "configs/{{ IP_subject_alt_name }}/wireguard/" wireguard_config_path: "configs/{{ IP_subject_alt_name }}/wireguard/"
wireguard_pki_path: "{{ wireguard_config_path }}/.pki/" wireguard_pki_path: "{{ wireguard_config_path }}/.pki/"
wireguard_interface: wg0 wireguard_interface: wg0
_wireguard_network_ipv4:
subnet: 10.19.49.0
prefix: 24
gateway: 10.19.49.1
clients_range: 10.19.49
clients_start: 2
_wireguard_network_ipv6:
subnet: 'fd9d:bc11:4021::'
prefix: 48
gateway: 'fd9d:bc11:4021::1'
clients_range: 'fd9d:bc11:4021::'
clients_start: 2
wireguard_network_ipv4: "{{ _wireguard_network_ipv4['subnet'] }}/{{ _wireguard_network_ipv4['prefix'] }}"
wireguard_network_ipv6: "{{ _wireguard_network_ipv6['subnet'] }}/{{ _wireguard_network_ipv6['prefix'] }}"
keys_clean_all: false keys_clean_all: false
wireguard_dns_servers: >- wireguard_dns_servers: >-
{% if local_dns|default(false)|bool or dns_encryption|default(false)|bool == true %} {% if local_dns|default(false)|bool or dns_encryption|default(false)|bool %}
{{ local_service_ip }} {{ local_service_ip }}
{% else %} {% else %}
{% for host in dns_servers.ipv4 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% if ipv6_support %},{% for host in dns_servers.ipv6 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %} {% for host in dns_servers.ipv4 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% if ipv6_support %},{% for host in dns_servers.ipv6 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
{% endif %} {% endif %}
wireguard_client_ip: "{{ _wireguard_network_ipv4['clients_range'] }}.{{ _wireguard_network_ipv4['clients_start'] + index|int + 1 }}/{{ _wireguard_network_ipv4['prefix'] }}{% if ipv6_support %},{{ _wireguard_network_ipv6['clients_range'] }}{{ _wireguard_network_ipv6['clients_start'] + index|int + 1 }}/{{ _wireguard_network_ipv6['prefix'] }}{% endif %}" wireguard_client_ip: >-
wireguard_server_ip: "{{ _wireguard_network_ipv4['gateway'] }}/{{ _wireguard_network_ipv4['prefix'] }}{% if ipv6_support %},{{ _wireguard_network_ipv6['gateway'] }}/{{ _wireguard_network_ipv6['prefix'] }}{% endif %}" {{ wireguard_network_ipv4 | ipaddr(index|int+2) }}
{{ ',' + wireguard_network_ipv6 | ipaddr(index|int+2) if ipv6_support else '' }}
wireguard_server_ip: >-
{{ wireguard_network_ipv4 | ipaddr('1') }}
{{ ',' + wireguard_network_ipv6 | ipaddr('1') if ipv6_support else '' }}

View file

@ -4,7 +4,8 @@
name: wireguard name: wireguard
state: present state: present
- set_fact: - name: Set OS specific facts
set_fact:
service_name: wireguard service_name: wireguard
tags: always tags: always

View file

@ -3,7 +3,7 @@
file: file:
dest: "{{ config_prefix|default('/') }}etc/wireguard/private_{{ item }}.lock" dest: "{{ config_prefix|default('/') }}etc/wireguard/private_{{ item }}.lock"
state: absent state: absent
when: keys_clean_all|bool == True when: keys_clean_all|bool
with_items: with_items:
- "{{ users }}" - "{{ users }}"
- "{{ IP_subject_alt_name }}" - "{{ IP_subject_alt_name }}"
@ -39,7 +39,10 @@
when: wg_genkey.changed when: wg_genkey.changed
- name: Generate public keys - name: Generate public keys
shell: echo "{{ lookup('file', wireguard_pki_path + '/private/' + item) }}" | wg pubkey shell: |
set -o pipefail
echo "{{ lookup('file', wireguard_pki_path + '/private/' + item) }}" |
wg pubkey
register: wg_pubkey register: wg_pubkey
changed_when: false changed_when: false
args: args:

View file

@ -75,7 +75,6 @@
notify: restart wireguard notify: restart wireguard
tags: update-users tags: update-users
- name: WireGuard enabled and started - name: WireGuard enabled and started
service: service:
name: "{{ service_name }}" name: "{{ service_name }}"

View file

@ -27,6 +27,7 @@
group: root group: root
mode: 0644 mode: 0644
- set_fact: - name: Set OS specific facts
set_fact:
service_name: "wg-quick@{{ wireguard_interface }}" service_name: "wg-quick@{{ wireguard_interface }}"
tags: always tags: always

View file

@ -7,6 +7,6 @@ DNS = {{ wireguard_dns_servers }}
[Peer] [Peer]
PublicKey = {{ lookup('file', wireguard_pki_path + '/public/' + IP_subject_alt_name) }} PublicKey = {{ lookup('file', wireguard_pki_path + '/public/' + IP_subject_alt_name) }}
AllowedIPs = 0.0.0.0/0, ::/0 AllowedIPs = 0.0.0.0/0{{ ', ::/0' if ipv6_support else '' }}
Endpoint = {{ IP_subject_alt_name }}:{{ wireguard_port }} Endpoint = {{ IP_subject_alt_name }}:{{ wireguard_port }}
{{ 'PersistentKeepalive = ' + wireguard_PersistentKeepalive|string if wireguard_PersistentKeepalive > 0 else '' }} {{ 'PersistentKeepalive = ' + wireguard_PersistentKeepalive|string if wireguard_PersistentKeepalive > 0 else '' }}

View file

@ -11,7 +11,6 @@ SaveConfig = false
[Peer] [Peer]
# {{ u }} # {{ u }}
PublicKey = {{ lookup('file', wireguard_pki_path + '/public/' + u) }} PublicKey = {{ lookup('file', wireguard_pki_path + '/public/' + u) }}
AllowedIPs = {{ _wireguard_network_ipv4['clients_range'] }}.{{ _wireguard_network_ipv4['clients_start'] + index }}/32{% if ipv6_support %},{{ _wireguard_network_ipv6['clients_range'] }}{{ _wireguard_network_ipv6['clients_start'] + index }}/128{% endif %} AllowedIPs = {{ wireguard_network_ipv4 | ipaddr(index|int+1) | ipv4('address') }}/32{{ ',' + wireguard_network_ipv6 | ipaddr(index|int+1) | ipv6('address') + '/128' if ipv6_support else '' }}
{% endif %} {% endif %}
{% endfor %} {% endfor %}

View file

@ -2,45 +2,43 @@
- name: Configure the server and install required software - name: Configure the server and install required software
hosts: vpn-host hosts: vpn-host
gather_facts: false gather_facts: false
tags: algo
become: true become: true
vars_files: vars_files:
- config.cfg - config.cfg
tasks:
roles: - block:
- role: common - import_role:
name: common
tags: common tags: common
- role: dns_encryption
- import_role:
name: dns_encryption
when: dns_encryption when: dns_encryption
tags: dns_encryption tags: dns_encryption
- role: dns_adblocking
- import_role:
name: dns_adblocking
when: algo_local_dns when: algo_local_dns
tags: dns_adblocking tags: dns_adblocking
- role: wireguard
- import_role:
name: wireguard
when: wireguard_enabled when: wireguard_enabled
tags: wireguard tags: wireguard
- role: strongswan
- import_role:
name: strongswan
when: ipsec_enabled when: ipsec_enabled
tags: ipsec tags: ipsec
- role: ssh_tunneling
- import_role:
name: ssh_tunneling
when: algo_ssh_tunneling when: algo_ssh_tunneling
tags: ssh_tunneling tags: ssh_tunneling
post_tasks:
- block: - block:
- name: Delete the CA key
local_action:
module: file
path: "{{ ipsec_pki_path }}/private/cakey.pem"
state: absent
become: false
when:
- ipsec_enabled
- not algo_store_cakey
- name: Dump the configuration - name: Dump the configuration
local_action: copy:
module: copy
dest: "configs/{{ IP_subject_alt_name }}/.config.yml" dest: "configs/{{ IP_subject_alt_name }}/.config.yml"
content: | content: |
server: {{ 'localhost' if inventory_hostname == 'localhost' else inventory_hostname }} server: {{ 'localhost' if inventory_hostname == 'localhost' else inventory_hostname }}
@ -60,8 +58,12 @@
IP_subject_alt_name: {{ IP_subject_alt_name }} IP_subject_alt_name: {{ IP_subject_alt_name }}
ipsec_enabled: {{ ipsec_enabled }} ipsec_enabled: {{ ipsec_enabled }}
wireguard_enabled: {{ wireguard_enabled }} wireguard_enabled: {{ wireguard_enabled }}
{% if tests|default(false)|bool %}ca_password: {{ CA_password }}{% endif %} {% if tests|default(false)|bool %}
ca_password: {{ CA_password }}
p12_password: {{ p12_export_password }}
{% endif %}
become: false become: false
delegate_to: localhost
- name: Create a symlink if deploying to localhost - name: Create a symlink if deploying to localhost
file: file:
@ -79,7 +81,4 @@
- " {{ congrats.ssh_access if algo_provider != 'local' else ''}}" - " {{ congrats.ssh_access if algo_provider != 'local' else ''}}"
tags: always tags: always
rescue: rescue:
- debug: var=fail_hint - include_tasks: playbooks/rescue.yml
tags: always
- fail:
tags: always

1
tests/algo.conf Normal file
View file

@ -0,0 +1 @@
dhcp-host=algo,10.0.8.100

18
tests/cloud-init.sh Executable file
View file

@ -0,0 +1,18 @@
#!/bin/bash
echo "#!/bin/bash
export METHOD=local
export ONDEMAND_CELLULAR=true
export ONDEMAND_WIFI=true
export ONDEMAND_WIFI_EXCLUDE=test1,test2
export WINDOWS=true
export STORE_CAKEY=true
export LOCAL_DNS=true
export SSH_TUNNELING=true
export ENDPOINT=10.0.8.100
export USERS=desktop,user1,user2
export EXTRA_VARS='install_headers=false tests=true apparmor_enabled=false local_service_ip=172.16.0.1'
export ANSIBLE_EXTRA_ARGS='--skip-tags apparmor'
export REPO_SLUG=${TRAVIS_PULL_REQUEST_SLUG:-${TRAVIS_REPO_SLUG:-trailofbits/algo}}
export REPO_BRANCH=${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH:-master}}
curl -s https://raw.githubusercontent.com/${TRAVIS_PULL_REQUEST_SLUG:-${TRAVIS_REPO_SLUG}}/${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}/install.sh | sudo -E bash -x"

25
tests/ipsec-client.sh Executable file
View file

@ -0,0 +1,25 @@
#!/usr/bin/env bash
set -euxo pipefail
xmllint --noout ./configs/10.0.8.100/ipsec/apple/user1.mobileconfig
ansible-playbook deploy_client.yml \
-e client_ip=localhost \
-e vpn_user=desktop \
-e server_ip=10.0.8.100 \
-e rightsubnet='172.16.0.1/32'
ipsec up algovpn-10.0.8.100
ipsec statusall
ipsec statusall | grep -w ^algovpn-10.0.8.100 | grep -w ESTABLISHED
fping -t 900 -c3 -r3 -Dse 10.0.8.100 172.16.0.1
host google.com 172.16.0.1
echo "IPsec tests passed"
ipsec down algovpn-10.0.8.100

View file

@ -2,11 +2,11 @@
set -ex set -ex
DEPLOY_ARGS="provider=local server=$LXC_IP ssh_user=ubuntu endpoint=$LXC_IP apparmor_enabled=false ondemand_cellular=true ondemand_wifi=true ondemand_wifi_exclude=test local_dns=true ssh_tunneling=true windows=true store_cakey=true install_headers=false tests=true" DEPLOY_ARGS="provider=local server=10.0.8.100 ssh_user=ubuntu endpoint=10.0.8.100 apparmor_enabled=false ondemand_cellular=true ondemand_wifi=true ondemand_wifi_exclude=test local_dns=true ssh_tunneling=true windows=true store_cakey=true install_headers=false tests=true local_service_ip=172.16.0.1"
if [ "${LXC_NAME}" == "docker" ] if [ "${DEPLOY}" == "docker" ]
then then
docker run -it -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "DEPLOY_ARGS=${DEPLOY_ARGS}" travis/algo /bin/sh -c "chown -R 0:0 /root/.ssh && source env/bin/activate && ansible-playbook main.yml -e \"${DEPLOY_ARGS}\" --skip-tags apparmor" docker run -it -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "DEPLOY_ARGS=${DEPLOY_ARGS}" travis/algo /bin/sh -c "chown -R root: /root/.ssh && chmod -R 600 /root/.ssh && source env/bin/activate && ansible-playbook main.yml -e \"${DEPLOY_ARGS}\" --skip-tags apparmor"
else else
ansible-playbook main.yml -e "${DEPLOY_ARGS}" --skip-tags apparmor ansible-playbook main.yml -e "${DEPLOY_ARGS}" --skip-tags apparmor
fi fi

View file

@ -1,7 +1,7 @@
USE_LXD_BRIDGE="true" USE_LXD_BRIDGE="true"
LXD_BRIDGE="lxdbr0" LXD_BRIDGE="lxdbr0"
UPDATE_PROFILE="true" UPDATE_PROFILE="true"
LXD_CONFILE="" LXD_CONFILE="/etc/default/algo.conf"
LXD_DOMAIN="lxd" LXD_DOMAIN="lxd"
LXD_IPV4_ADDR="10.0.8.1" LXD_IPV4_ADDR="10.0.8.1"
LXD_IPV4_NETMASK="255.255.255.0" LXD_IPV4_NETMASK="255.255.255.0"
@ -13,4 +13,4 @@ LXD_IPV6_ADDR=""
LXD_IPV6_MASK="" LXD_IPV6_MASK=""
LXD_IPV6_NETWORK="" LXD_IPV6_NETWORK=""
LXD_IPV6_NAT="false" LXD_IPV6_NAT="false"
LXD_IPV6_PROXY="true" LXD_IPV6_PROXY="false"

30
tests/pre-deploy.sh Executable file
View file

@ -0,0 +1,30 @@
#!/usr/bin/env bash
set -euxo pipefail
sysctl net.ipv6.conf.all.disable_ipv6=0
tar xf $HOME/lxc/cache.tar -C / || echo "Didn't extract cache."
cp -f tests/lxd-bridge /etc/default/lxd-bridge
cp -f tests/algo.conf /etc/default/algo.conf
if [[ "$DEPLOY" == "cloud-init" ]]; then
bash tests/cloud-init.sh | lxc profile set default user.user-data -
else
echo -e "#cloud-config\nssh_authorized_keys:\n - $(cat ~/.ssh/id_rsa.pub)" | lxc profile set default user.user-data -
fi
systemctl restart lxd-bridge.service lxd-containers.service lxd.service
lxc profile set default raw.lxc lxc.aa_profile=unconfined
lxc profile set default security.privileged true
lxc profile show default
lxc launch ubuntu:18.04 algo
ip addr
until dig A +short algo.lxd @10.0.8.1 | grep -vE '^$' > /dev/null; do
sleep 3
done
lxc list

15
tests/ssh-tunnel.sh Executable file
View file

@ -0,0 +1,15 @@
#!/usr/bin/env bash
set -euxo pipefail
PASS=$(grep ^p12_password: configs/10.0.8.100/.config.yml | awk '{print $2}')
ssh-keygen -p -P ${PASS} -N '' -f configs/10.0.8.100/ssh-tunnel/desktop.pem
ssh -o StrictHostKeyChecking=no -D 127.0.0.1:1080 -f -q -C -N desktop@10.0.8.100 -i configs/10.0.8.100/ssh-tunnel/desktop.pem
git config --global http.proxy 'socks5://127.0.0.1:1080'
git clone -vv https://github.com/trailofbits/algo /tmp/ssh-tunnel-check
echo "SSH tunneling tests passed"

View file

@ -2,11 +2,11 @@
set -ex set -ex
USER_ARGS="{ 'server': '$LXC_IP', 'users': ['user1', 'user2'] }" USER_ARGS="{ 'server': '10.0.8.100', 'users': ['desktop', 'user1', 'user2'], 'local_service_ip': '172.16.0.1' }"
if [ "${LXC_NAME}" == "docker" ] if [ "${DEPLOY}" == "docker" ]
then then
docker run -it -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "USER_ARGS=${USER_ARGS}" travis/algo /bin/sh -c "chown -R 0:0 /root/.ssh && source env/bin/activate && ansible-playbook users.yml -e \"${USER_ARGS}\" -t update-users" docker run -it -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "USER_ARGS=${USER_ARGS}" travis/algo /bin/sh -c "chown -R root: /root/.ssh && chmod -R 600 /root/.ssh && source env/bin/activate && ansible-playbook users.yml -e \"${USER_ARGS}\" -t update-users"
else else
ansible-playbook users.yml -e "${USER_ARGS}" -t update-users ansible-playbook users.yml -e "${USER_ARGS}" -t update-users
fi fi
@ -15,7 +15,7 @@ fi
# IPsec # IPsec
# #
if sudo openssl crl -inform pem -noout -text -in configs/$LXC_IP/ipsec/.pki/crl/phone.crt | grep CRL if sudo openssl crl -inform pem -noout -text -in configs/10.0.8.100/ipsec/.pki/crl/phone.crt | grep CRL
then then
echo "The CRL check passed" echo "The CRL check passed"
else else
@ -23,7 +23,7 @@ if sudo openssl crl -inform pem -noout -text -in configs/$LXC_IP/ipsec/.pki/crl/
exit 1 exit 1
fi fi
if sudo openssl x509 -inform pem -noout -text -in configs/$LXC_IP/ipsec/.pki/certs/user1.crt | grep CN=user1 if sudo openssl x509 -inform pem -noout -text -in configs/10.0.8.100/ipsec/.pki/certs/user1.crt | grep CN=user1
then then
echo "The new user exists" echo "The new user exists"
else else
@ -35,7 +35,7 @@ fi
# WireGuard # WireGuard
# #
if sudo test -f configs/$LXC_IP/wireguard/user1.conf if sudo test -f configs/10.0.8.100/wireguard/user1.conf
then then
echo "WireGuard: The new user exists" echo "WireGuard: The new user exists"
else else
@ -47,7 +47,7 @@ fi
# SSH tunneling # SSH tunneling
# #
if sudo test -f configs/$LXC_IP/ssh-tunnel/user1.ssh_config if sudo test -f configs/10.0.8.100/ssh-tunnel/user1.ssh_config
then then
echo "SSH Tunneling: The new user exists" echo "SSH Tunneling: The new user exists"
else else

23
tests/wireguard-client.sh Executable file
View file

@ -0,0 +1,23 @@
#!/usr/bin/env bash
set -euxo pipefail
crudini --set configs/10.0.8.100/wireguard/user1.conf Interface Table off
wg-quick up configs/10.0.8.100/wireguard/user1.conf
wg
ifconfig user1
ip route add 172.16.0.1/32 dev user1
fping -t 900 -c3 -r3 -Dse 10.0.8.100 172.16.0.1
wg | grep "latest handshake"
host google.com 172.16.0.1
echo "WireGuard tests passed"
wg-quick down configs/10.0.8.100/wireguard/user1.conf

View file

@ -7,7 +7,8 @@
tasks: tasks:
- block: - block:
- pause: - name: Server address prompt
pause:
prompt: "Enter the IP address of your server: (or use localhost for local installation)" prompt: "Enter the IP address of your server: (or use localhost for local installation)"
register: _server register: _server
when: server is undefined when: server is undefined
@ -16,14 +17,15 @@
set_fact: set_fact:
algo_server: >- algo_server: >-
{% if server is defined %}{{ server }} {% if server is defined %}{{ server }}
{%- elif _server.user_input is defined and _server.user_input != "" %}{{ _server.user_input }} {%- elif _server.user_input %}{{ _server.user_input }}
{%- else %}omit{% endif %} {%- else %}omit{% endif %}
- name: Import host specific variables - name: Import host specific variables
include_vars: include_vars:
file: "configs/{{ algo_server }}/.config.yml" file: "configs/{{ algo_server }}/.config.yml"
- pause: - name: CA password prompt
pause:
prompt: Enter the password for the private CA key prompt: Enter the password for the private CA key
echo: false echo: false
register: _ca_password register: _ca_password
@ -35,9 +37,13 @@
set_fact: set_fact:
CA_password: >- CA_password: >-
{% if ca_password is defined %}{{ ca_password }} {% if ca_password is defined %}{{ ca_password }}
{%- elif _ca_password.user_input is defined and _ca_password.user_input != "" %}{{ _ca_password.user_input }} {%- elif _ca_password.user_input %}{{ _ca_password.user_input }}
{%- else %}omit{% endif %} {%- else %}omit{% endif %}
- name: Local pre-tasks
import_tasks: playbooks/cloud-pre.yml
become: false
- name: Add the server to the vpn-host group - name: Add the server to the vpn-host group
add_host: add_host:
name: "{{ algo_server }}" name: "{{ algo_server }}"
@ -47,10 +53,7 @@
ansible_python_interpreter: "/usr/bin/python3" ansible_python_interpreter: "/usr/bin/python3"
CA_password: "{{ CA_password }}" CA_password: "{{ CA_password }}"
rescue: rescue:
- debug: var=fail_hint - include_tasks: playbooks/rescue.yml
tags: always
- fail:
tags: always
- name: User management - name: User management
hosts: vpn-host hosts: vpn-host
@ -60,37 +63,28 @@
- config.cfg - config.cfg
- "configs/{{ inventory_hostname }}/.config.yml" - "configs/{{ inventory_hostname }}/.config.yml"
pre_tasks: tasks:
- block: - block:
- name: Local pre-tasks - import_role:
import_tasks: playbooks/cloud-pre.yml name: common
become: false
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always
roles: - import_role:
- role: common name: wireguard
- role: wireguard
tags: [ 'vpn', 'wireguard' ]
when: wireguard_enabled when: wireguard_enabled
- role: strongswan
- import_role:
name: strongswan
when: ipsec_enabled when: ipsec_enabled
tags: ipsec tags: ipsec
- role: ssh_tunneling
- import_role:
name: ssh_tunneling
when: algo_ssh_tunneling when: algo_ssh_tunneling
post_tasks:
- block:
- debug: - debug:
msg: msg:
- "{{ congrats.common.split('\n') }}" - "{{ congrats.common.split('\n') }}"
- " {% if p12.changed %}{{ congrats.p12_pass }}{% endif %}" - " {% if p12.changed %}{{ congrats.p12_pass }}{% endif %}"
tags: always tags: always
rescue: rescue:
- debug: var=fail_hint - include_tasks: playbooks/rescue.yml
tags: always
- fail:
tags: always