mirror of
https://github.com/trailofbits/algo.git
synced 2025-08-10 23:03:03 +02:00
Merge branch 'master' into patch-1
This commit is contained in:
commit
9433b1881e
95 changed files with 2560 additions and 1342 deletions
3
.ansible-lint
Normal file
3
.ansible-lint
Normal file
|
@ -0,0 +1,3 @@
|
|||
skip_list:
|
||||
- '204'
|
||||
verbosity: 1
|
101
.travis.yml
101
.travis.yml
|
@ -1,26 +1,21 @@
|
|||
---
|
||||
language: python
|
||||
python: "2.7"
|
||||
sudo: required
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
sources: &default_sources
|
||||
- sourceline: 'ppa:ubuntu-lxc/stable'
|
||||
- sourceline: 'ppa:wireguard/wireguard'
|
||||
packages:
|
||||
packages: &default_packages
|
||||
- python-pip
|
||||
- lxd
|
||||
- expect-dev
|
||||
- debootstrap
|
||||
- shellcheck
|
||||
- tree
|
||||
- bridge-utils
|
||||
- dnsutils
|
||||
|
@ -29,7 +24,12 @@ addons:
|
|||
- libffi-dev
|
||||
- python-dev
|
||||
- linux-headers-$(uname -r)
|
||||
- wireguard-dkms
|
||||
- wireguard
|
||||
- libxml2-utils
|
||||
- crudini
|
||||
- fping
|
||||
- strongswan
|
||||
- libstrongswan-standard-plugins
|
||||
|
||||
cache:
|
||||
directories:
|
||||
|
@ -41,35 +41,66 @@ before_cache:
|
|||
- sudo tar cf $HOME/lxc/cache.tar /var/lib/lxd/images/
|
||||
- sudo chown $USER. $HOME/lxc/cache.tar
|
||||
|
||||
env:
|
||||
- LXC_NAME=docker LXC_DISTRO=ubuntu LXC_RELEASE=18.04
|
||||
custom_scripts:
|
||||
provisioning: &provisioning
|
||||
- ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
|
||||
- sudo ./tests/pre-deploy.sh
|
||||
- 'sed -i "s/^reduce_mtu:\s0$/reduce_mtu: 20/" config.cfg'
|
||||
tests: &tests
|
||||
- sudo ./tests/wireguard-client.sh
|
||||
- sudo env "PATH=$PATH" ./tests/ipsec-client.sh
|
||||
- sudo ./tests/ssh-tunnel.sh
|
||||
|
||||
before_install:
|
||||
- test "${LXC_NAME}" != "docker" && sudo modprobe wireguard || docker build -t travis/algo .
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- stage: Tests
|
||||
name: code checks and linters
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- shellcheck
|
||||
script:
|
||||
- pip install ansible-lint
|
||||
- shellcheck algo install.sh
|
||||
- ansible-playbook main.yml --syntax-check
|
||||
- ansible-lint -v roles/*/*/*.yml playbooks/*.yml *.yml
|
||||
|
||||
install:
|
||||
- sudo tar xf $HOME/lxc/cache.tar -C / || echo "Didn't extract cache."
|
||||
- ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
|
||||
- chmod 0644 ~/.ssh/config
|
||||
- echo -e "#cloud-config\nssh_authorized_keys:\n - $(cat ~/.ssh/id_rsa.pub)" | sudo lxc profile set default user.user-data -
|
||||
- sudo cp -f tests/lxd-bridge /etc/default/lxd-bridge
|
||||
- sudo service lxd restart
|
||||
- sudo lxc launch ${LXC_DISTRO}:${LXC_RELEASE} ${LXC_NAME}
|
||||
- until host ${LXC_NAME}.lxd 10.0.8.1 -t A; do sleep 3; done
|
||||
- export LXC_IP="$(dig ${LXC_NAME}.lxd @10.0.8.1 +short)"
|
||||
- pip install -r requirements.txt
|
||||
- pip install ansible-lint
|
||||
- gem install awesome_bot
|
||||
- ansible-playbook --version
|
||||
- tree . -L 2
|
||||
- stage: Deploy
|
||||
name: local deployment from docker
|
||||
addons:
|
||||
apt:
|
||||
sources: *default_sources
|
||||
packages: *default_packages
|
||||
env: DEPLOY=docker
|
||||
before_install: *provisioning
|
||||
before_script:
|
||||
- docker build -t travis/algo .
|
||||
- ./tests/local-deploy.sh
|
||||
- ./tests/update-users.sh
|
||||
script: *tests
|
||||
|
||||
script:
|
||||
# - awesome_bot --allow-dupe --skip-save-results *.md docs/*.md --white-list paypal.com,do.co,microsoft.com,https://github.com/trailofbits/algo/archive/master.zip,https://github.com/trailofbits/algo/issues/new
|
||||
# - shellcheck algo
|
||||
# - ansible-lint main.yml users.yml deploy_client.yml
|
||||
- ansible-playbook main.yml --syntax-check
|
||||
- ./tests/local-deploy.sh
|
||||
- ./tests/update-users.sh
|
||||
- stage: Deploy
|
||||
name: cloud-init deployment
|
||||
addons:
|
||||
apt:
|
||||
sources: *default_sources
|
||||
packages: *default_packages
|
||||
env: DEPLOY=cloud-init
|
||||
before_install: *provisioning
|
||||
before_script:
|
||||
- until sudo lxc exec algo -- test -f /var/log/cloud-init-output.log; do echo 'Log file not found, Sleep for 3 seconds'; sleep 3; done
|
||||
- ( sudo lxc exec algo -- tail -f /var/log/cloud-init-output.log & )
|
||||
- |
|
||||
until sudo lxc exec algo -- test -f /var/lib/cloud/data/result.json; do
|
||||
echo 'Cloud init is not finished. Sleep for 30 seconds';
|
||||
sleep 30;
|
||||
done
|
||||
- sudo lxc exec algo -- test -f /opt/algo/configs/localhost/.config.yml
|
||||
- sudo lxc exec algo -- tar zcf /root/algo-configs.tar -C /opt/algo/configs/ .
|
||||
- sudo lxc file pull algo/root/algo-configs.tar ./
|
||||
- sudo tar -C ./configs -zxf algo-configs.tar
|
||||
script: *tests
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
|
|
56
README.md
56
README.md
|
@ -72,15 +72,15 @@ That's it! You will get the message below when the server deployment process com
|
|||
You can now setup clients to connect it, e.g. your iPhone or laptop. Proceed to [Configure the VPN Clients](#configure-the-vpn-clients) below.
|
||||
|
||||
```
|
||||
"\"#----------------------------------------------------------------------#\"",
|
||||
"\"# Congratulations! #\"",
|
||||
"\"# Your Algo server is running. #\"",
|
||||
"\"# Config files and certificates are in the ./configs/ directory. #\"",
|
||||
"\"# Go to https://whoer.net/ after connecting #\"",
|
||||
"\"# and ensure that all your traffic passes through the VPN. #\"",
|
||||
"\"# Local DNS resolver 172.16.0.1 #\"",
|
||||
"\"# The p12 and SSH keys password is XXXXXXXX #\"",
|
||||
"\"#----------------------------------------------------------------------#\"",
|
||||
"# Congratulations! #"
|
||||
"# Your Algo server is running. #"
|
||||
"# Config files and certificates are in the ./configs/ directory. #"
|
||||
"# Go to https://whoer.net/ after connecting #"
|
||||
"# and ensure that all your traffic passes through the VPN. #"
|
||||
"# Local DNS resolver 172.16.0.1 #"
|
||||
"# The p12 and SSH keys password for new users is XXXXXXXX #"
|
||||
"# The CA key password is XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #"
|
||||
"# Shell access: ssh -i configs/algo.pem root@xxx.xxx.xx.xx #"
|
||||
```
|
||||
|
||||
## Configure the VPN Clients
|
||||
|
@ -93,11 +93,13 @@ WireGuard is used to provide VPN services on Apple devices. Algo generates a Wir
|
|||
|
||||
On iOS, install the [WireGuard](https://itunes.apple.com/us/app/wireguard/id1441195209?mt=8) app from the iOS App Store. Then, use the WireGuard app to scan the QR code or AirDrop the configuration file to the device.
|
||||
|
||||
On macOS Mojave or later, install the [WireGuard](https://itunes.apple.com/us/app/wireguard/id1451685025?mt=12) app from the Mac App Store. WireGuard will appear in the menu bar once you run the app. Click on the WireGuard icon, choose **Import tunnel(s) from file...**, then select the appropriate WireGuard configuration file. Enable "Connect on Demand" by editing the tunnel configuration in the WireGuard app.
|
||||
On macOS Mojave or later, install the [WireGuard](https://itunes.apple.com/us/app/wireguard/id1451685025?mt=12) app from the Mac App Store. WireGuard will appear in the menu bar once you run the app. Click on the WireGuard icon, choose **Import tunnel(s) from file...**, then select the appropriate WireGuard configuration file.
|
||||
|
||||
On either iOS or macOS, you can enable "Connect on Demand" and/or exclude certain trusted Wi-Fi networks (such as your home or work) by editing the tunnel configuration in the WireGuard app. (Algo can't do this automatically for you.)
|
||||
|
||||
Installing WireGuard is a little more complicated on older version of macOS. See [Using macOS as a Client with WireGuard](docs/client-macos-wireguard.md).
|
||||
|
||||
If you prefer to use the built-in IPSEC VPN on Apple devices, then see [Using Apple Devices as a Client with IPSEC](docs/client-apple-ipsec.md).
|
||||
If you prefer to use the built-in IPSEC VPN on Apple devices, or need "Connect on Demand" or excluded Wi-Fi networks automatically configured, then see [Using Apple Devices as a Client with IPSEC](docs/client-apple-ipsec.md).
|
||||
|
||||
### Android Devices
|
||||
|
||||
|
@ -164,16 +166,14 @@ Use the example command below to start an SSH tunnel by replacing `user` and `ip
|
|||
|
||||
## SSH into Algo Server
|
||||
|
||||
To SSH into the Algo server for administrative purposes you can use the example command below by replacing `ip` with your own:
|
||||
Your Algo server is configured for key-only SSH access for administrative purposes. Open the Terminal app, `cd` into the `algo-master` directory where you originally downloaded Algo, and then use the command listed on the success message:
|
||||
|
||||
`ssh root@ip -i ~/.ssh/algo.pem`
|
||||
`ssh -i configs/algo.pem user@ip`
|
||||
|
||||
If you find yourself regularly logging into Algo then it will be useful to load your Algo ssh key automatically. Add the following snippet to the bottom of `~/.bash_profile` to add it to your shell environment permanently.
|
||||
where `user` is either `root` or `ubuntu` as listed on the success message, and `ip` is the IP address of your Algo server. If you find yourself regularly logging into the server then it will be useful to load your Algo ssh key automatically. Add the following snippet to the bottom of `~/.bash_profile` to add it to your shell environment permanently.
|
||||
|
||||
`ssh-add ~/.ssh/algo > /dev/null 2>&1`
|
||||
|
||||
Note the admin username is `ubuntu` instead of `root` on providers other than Digital Ocean.
|
||||
|
||||
## Adding or Removing Users
|
||||
|
||||
If you chose to save the CA certificate during the deploy process, then Algo's own scripts can easily add and remove users from the VPN server.
|
||||
|
@ -185,27 +185,7 @@ If you chose to save the CA certificate during the deploy process, then Algo's o
|
|||
After this process completes, the Algo VPN server will contain only the users listed in the `config.cfg` file.
|
||||
|
||||
## Additional Documentation
|
||||
|
||||
* Setup instructions
|
||||
- Documentation for available [Ansible roles](docs/setup-roles.md)
|
||||
- Deploy from [Fedora Workstation (26)](docs/deploy-from-fedora-workstation.md)
|
||||
- Deploy from [RedHat/CentOS 6.x](docs/deploy-from-redhat-centos6.md)
|
||||
- Deploy from [Windows](docs/deploy-from-windows.md)
|
||||
- Deploy from [Ansible](docs/deploy-from-ansible.md) directly
|
||||
* Client setup
|
||||
- Setup [Android](docs/client-android.md) clients
|
||||
- Setup [Generic/Linux](docs/client-linux.md) clients with Ansible
|
||||
- Setup Ubuntu clients to use [WireGuard](docs/client-linux-wireguard.md)
|
||||
- Setup Apple devices to use [IPSEC](docs/client-apple-ipsec.md)
|
||||
* Cloud setup
|
||||
- Configure [Amazon EC2](docs/cloud-amazon-ec2.md)
|
||||
- Configure [Azure](docs/cloud-azure.md)
|
||||
- Configure [DigitalOcean](docs/cloud-do.md)
|
||||
- Configure [Google Cloud Platform](docs/cloud-gce.md)
|
||||
* Advanced Deployment
|
||||
- Deploy to your own [FreeBSD](docs/deploy-to-freebsd.md) server
|
||||
- Deploy to your own [Ubuntu 18.04](docs/deploy-to-ubuntu.md) server
|
||||
- Deploy to an [unsupported cloud provider](docs/deploy-to-unsupported-cloud.md)
|
||||
* [Deployment instructions, cloud provider setup instructions, and further client setup instructions available here.](docs/index.md)
|
||||
* [FAQ](docs/faq.md)
|
||||
* [Troubleshooting](docs/troubleshooting.md)
|
||||
|
||||
|
@ -247,4 +227,4 @@ All donations support continued development. Thanks!
|
|||
* Use our [referral code](https://m.do.co/c/4d7f4ff9cfe4) when you sign up to Digital Ocean for a $10 credit.
|
||||
* We also accept and appreciate contributions of new code and bugfixes via Github Pull Requests.
|
||||
|
||||
Algo is licensed and distributed under the AGPLv3. If you want to distribute a closed-source modification or service based on Algo, then please consider <a href="mailto:opensource@trailofbits.com">purchasing an exception</a> . As with the methods above, this will help support continued development.
|
||||
Algo is licensed and distributed under the AGPLv3. If you want to distribute a closed-source modification or service based on Algo, then please consider <a href="mailto:opensource@trailofbits.com">purchasing an exception</a> . As with the methods above, this will help support continued development.
|
||||
|
|
23
algo
23
algo
|
@ -4,19 +4,20 @@ set -e
|
|||
|
||||
if [ -z ${VIRTUAL_ENV+x} ]
|
||||
then
|
||||
ACTIVATE_SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/env/bin/activate"
|
||||
if [ -f "$ACTIVATE_SCRIPT" ]
|
||||
then
|
||||
source $ACTIVATE_SCRIPT
|
||||
else
|
||||
echo "$ACTIVATE_SCRIPT not found. Did you follow documentation to install dependencies?"
|
||||
exit 1
|
||||
fi
|
||||
ACTIVATE_SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/env/bin/activate"
|
||||
if [ -f "$ACTIVATE_SCRIPT" ]
|
||||
then
|
||||
# shellcheck source=/dev/null
|
||||
source "$ACTIVATE_SCRIPT"
|
||||
else
|
||||
echo "$ACTIVATE_SCRIPT not found. Did you follow documentation to install dependencies?"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
update-users) PLAYBOOK=users.yml; ARGS="${@:2} -t update-users";;
|
||||
*) PLAYBOOK=main.yml; ARGS=${@} ;;
|
||||
update-users) PLAYBOOK=users.yml; ARGS=( "${@:2}" -t update-users ) ;;
|
||||
*) PLAYBOOK=main.yml; ARGS=( "${@}" ) ;;
|
||||
esac
|
||||
|
||||
ansible-playbook ${PLAYBOOK} ${ARGS}
|
||||
ansible-playbook ${PLAYBOOK} "${ARGS[@]}"
|
||||
|
|
|
@ -4,7 +4,8 @@ pipelining = True
|
|||
retry_files_enabled = False
|
||||
host_key_checking = False
|
||||
timeout = 60
|
||||
stdout_callback = full_skip
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
|
||||
[paramiko_connection]
|
||||
record_host_keys = False
|
||||
|
|
40
cloud.yml
40
cloud.yml
|
@ -2,48 +2,20 @@
|
|||
- name: Provision the server
|
||||
hosts: localhost
|
||||
tags: always
|
||||
become: false
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
pre_tasks:
|
||||
tasks:
|
||||
- block:
|
||||
- name: Local pre-tasks
|
||||
import_tasks: playbooks/cloud-pre.yml
|
||||
tags: always
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
roles:
|
||||
- role: cloud-digitalocean
|
||||
when: algo_provider == "digitalocean"
|
||||
- role: cloud-ec2
|
||||
when: algo_provider == "ec2"
|
||||
- role: cloud-vultr
|
||||
when: algo_provider == "vultr"
|
||||
- role: cloud-gce
|
||||
when: algo_provider == "gce"
|
||||
- role: cloud-azure
|
||||
when: algo_provider == "azure"
|
||||
- role: cloud-lightsail
|
||||
when: algo_provider == "lightsail"
|
||||
- role: cloud-scaleway
|
||||
when: algo_provider == "scaleway"
|
||||
- role: cloud-openstack
|
||||
when: algo_provider == "openstack"
|
||||
- role: local
|
||||
when: algo_provider == "local"
|
||||
- name: Include a provisioning role
|
||||
include_role:
|
||||
name: "{{ 'local' if algo_provider == 'local' else 'cloud-' + algo_provider }}"
|
||||
|
||||
post_tasks:
|
||||
- block:
|
||||
- name: Local post-tasks
|
||||
import_tasks: playbooks/cloud-post.yml
|
||||
become: false
|
||||
tags: cloud
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- include_tasks: playbooks/rescue.yml
|
||||
|
|
30
config.cfg
30
config.cfg
|
@ -1,15 +1,14 @@
|
|||
---
|
||||
|
||||
# This is the list of user to generate.
|
||||
# This is the list of users to generate.
|
||||
# Every device must have a unique username.
|
||||
# You can generate up to 250 users at one time.
|
||||
# Usernames with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123".
|
||||
users:
|
||||
- phone
|
||||
- laptop
|
||||
- desktop
|
||||
|
||||
# NOTE: You must "escape" any usernames with leading 0's, like "000dan"
|
||||
|
||||
### Advanced users only below this line ###
|
||||
|
||||
# If True re-init all existing certificates. Boolean
|
||||
|
@ -25,6 +24,12 @@ ipsec_enabled: true
|
|||
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration
|
||||
strongswan_log_level: 2
|
||||
|
||||
# rightsourceip for ipsec
|
||||
# ipv4
|
||||
strongswan_network: 10.19.48.0/24
|
||||
# ipv6
|
||||
strongswan_network_ipv6: 'fd9d:bc11:4020::/48'
|
||||
|
||||
# Deploy WireGuard
|
||||
wireguard_enabled: true
|
||||
wireguard_port: 51820
|
||||
|
@ -33,6 +38,10 @@ wireguard_port: 51820
|
|||
# See: https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence
|
||||
wireguard_PersistentKeepalive: 0
|
||||
|
||||
# WireGuard network configuration
|
||||
wireguard_network_ipv4: 10.19.49.0/24
|
||||
wireguard_network_ipv6: fd9d:bc11:4021::/48
|
||||
|
||||
# Reduce the MTU of the VPN tunnel
|
||||
# Some cloud and internet providers use a smaller MTU (Maximum Transmission
|
||||
# Unit) than the normal value of 1500 and if you don't reduce the MTU of your
|
||||
|
@ -48,9 +57,7 @@ reduce_mtu: 0
|
|||
# If you load very large blocklists, you may also have to modify resource limits:
|
||||
# /etc/systemd/system/dnsmasq.service.d/100-CustomLimitations.conf
|
||||
adblock_lists:
|
||||
- "http://winhelp2002.mvps.org/hosts.txt"
|
||||
- "https://adaway.org/hosts.txt"
|
||||
- "https://www.malwaredomainlist.com/hostslist/hosts.txt"
|
||||
- "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts"
|
||||
- "https://hosts-file.net/ad_servers.txt"
|
||||
|
||||
# Enable DNS encryption.
|
||||
|
@ -80,8 +87,9 @@ dns_servers:
|
|||
- 2606:4700:4700::1111
|
||||
- 2606:4700:4700::1001
|
||||
|
||||
# IP address for the local dns resolver
|
||||
local_service_ip: 172.16.0.1
|
||||
# Randomly generated IP address for the local dns resolver
|
||||
local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
|
||||
local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
|
||||
|
||||
# Your Algo server will automatically install security updates. Some updates
|
||||
# require a reboot to take effect but your Algo server will not reboot itself
|
||||
|
@ -122,13 +130,9 @@ cloud_providers:
|
|||
digitalocean:
|
||||
size: s-1vcpu-1gb
|
||||
image: "ubuntu-18-04-x64"
|
||||
ec2:
|
||||
# Change the encrypted flag to "true" to enable AWS volume encryption, for encryption of data at rest.
|
||||
# Warning: the Algo script will take approximately 6 minutes longer to complete.
|
||||
# Also note that the documented AWS minimum permissions aren't sufficient.
|
||||
# You will have to edit the AWS user policy documented at
|
||||
# https://github.com/trailofbits/algo/blob/master/docs/cloud-amazon-ec2.md to also allow "ec2:CopyImage".
|
||||
# See https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-edit.html
|
||||
ec2:
|
||||
encrypted: false
|
||||
size: t2.micro
|
||||
image:
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
---
|
||||
- name: Configure the client
|
||||
hosts: localhost
|
||||
become: false
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
|
@ -8,9 +10,10 @@
|
|||
add_host:
|
||||
name: "{{ client_ip }}"
|
||||
groups: client-host
|
||||
ansible_ssh_user: "{{ ssh_user }}"
|
||||
ansible_ssh_user: "{{ 'root' if client_ip == 'localhost' else ssh_user }}"
|
||||
vpn_user: "{{ vpn_user }}"
|
||||
server_ip: "{{ server_ip }}"
|
||||
IP_subject_alt_name: "{{ server_ip }}"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
|
||||
- name: Configure the client and install required software
|
||||
hosts: client-host
|
||||
|
@ -18,33 +21,6 @@
|
|||
become: true
|
||||
vars_files:
|
||||
- config.cfg
|
||||
- roles/vpn/defaults/main.yml
|
||||
|
||||
pre_tasks:
|
||||
- name: Get the OS
|
||||
raw: uname -a
|
||||
register: distribution
|
||||
|
||||
- name: Modify the server name fact
|
||||
set_fact:
|
||||
IP_subject_alt_name: "{{ server_ip }}"
|
||||
|
||||
- name: Ubuntu Xenial | Install prerequisites
|
||||
raw: >
|
||||
test -x /usr/bin/python2.7 ||
|
||||
sudo apt-get update -qq && sudo apt-get install -qq -y python2.7 &&
|
||||
sudo update-alternatives --install /usr/bin/python python /usr/bin/python2.7 1
|
||||
changed_when: false
|
||||
when: "'ubuntu' in distribution.stdout|lower"
|
||||
|
||||
- name: Fedora 25 | Install prerequisites
|
||||
raw: >
|
||||
test -x /usr/bin/python2.7 ||
|
||||
sudo dnf install python2 -y &&
|
||||
sudo update-alternatives --install /usr/bin/python python /usr/bin/python2.7 1 &&
|
||||
rpm -ql python2-dnf || dnf install python2-dnf -y
|
||||
changed_when: false
|
||||
when: "'fedora' in distribution.stdout|lower"
|
||||
|
||||
- roles/strongswan/defaults/main.yml
|
||||
roles:
|
||||
- { role: client, tags: ['client'] }
|
||||
- role: client
|
||||
|
|
|
@ -1,31 +1,25 @@
|
|||
# Using Ubuntu Server as a Client with WireGuard
|
||||
# Using Ubuntu as a Client with WireGuard
|
||||
|
||||
## Install WireGuard
|
||||
|
||||
To connect to your Algo VPN using [WireGuard](https://www.wireguard.com) from an Ubuntu Server 16.04 (Xenial) or 18.04 (Bionic) client, first install WireGuard on the client:
|
||||
To connect to your AlgoVPN using [WireGuard](https://www.wireguard.com) from Ubuntu, first install WireGuard:
|
||||
|
||||
```shell
|
||||
# Add the WireGuard repository:
|
||||
sudo add-apt-repository ppa:wireguard/wireguard
|
||||
|
||||
# Update the list of available packages (not necessary on Bionic):
|
||||
sudo apt update
|
||||
# Update the list of available packages (not necessary on 18.04 or later):
|
||||
sudo apt update
|
||||
|
||||
# Install the tools and kernel module:
|
||||
sudo apt install wireguard
|
||||
sudo apt install wireguard openresolv
|
||||
```
|
||||
|
||||
(For installation on other Linux distributions, see the [Installation](https://www.wireguard.com/install/) page on the WireGuard site.)
|
||||
For installation on other Linux distributions, see the [Installation](https://www.wireguard.com/install/) page on the WireGuard site.
|
||||
|
||||
## Locate the Config File
|
||||
|
||||
The Algo-generated config files for WireGuard are named `configs/<ip_address>/wireguard/<username>.conf` on the system where you ran `./algo`. One file was generated for each of the users you added to `config.cfg` before you ran `./algo`. Each Linux and Android client you connect to your Algo VPN must use a different WireGuard config file. Choose one of these files and copy it to your Linux client.
|
||||
|
||||
If your client is running Bionic (or another Linux that uses `systemd-resolved` for DNS) you should first edit the config file. Comment out the line that begins with `DNS =` and replace it with:
|
||||
```
|
||||
PostUp = systemd-resolve -i %i --set-dns=172.16.0.1 --set-domain=~.
|
||||
```
|
||||
Use the IP address shown on the `DNS =` line (for most, this will be `172.16.0.1`). If the `DNS =` line contains multiple IP addresses, use multiple `--set-dns=` options.
|
||||
The Algo-generated config files for WireGuard are named `configs/<ip_address>/wireguard/<username>.conf` on the system where you ran `./algo`. One file was generated for each of the users you added to `config.cfg`. Each WireGuard client you connect to your AlgoVPN must use a different config file. Choose one of these files and copy it to your Linux client.
|
||||
|
||||
## Configure WireGuard
|
||||
|
||||
|
@ -33,7 +27,7 @@ Finally, install the config file on your client as `/etc/wireguard/wg0.conf` and
|
|||
|
||||
```shell
|
||||
# Install the config file to the WireGuard configuration directory on your
|
||||
# Bionic or Xenial client:
|
||||
# Linux client:
|
||||
sudo install -o root -g root -m 600 <username>.conf /etc/wireguard/wg0.conf
|
||||
|
||||
# Start the WireGuard VPN:
|
||||
|
@ -42,14 +36,14 @@ sudo systemctl start wg-quick@wg0
|
|||
# Check that it started properly:
|
||||
sudo systemctl status wg-quick@wg0
|
||||
|
||||
# Verify the connection to the Algo VPN:
|
||||
# Verify the connection to the AlgoVPN:
|
||||
sudo wg
|
||||
|
||||
# See that your client is using the IP address of your Algo VPN:
|
||||
# See that your client is using the IP address of your AlgoVPN:
|
||||
curl ipv4.icanhazip.com
|
||||
|
||||
# Optionally configure the connection to come up at boot time:
|
||||
sudo systemctl enable wg-quick@wg0
|
||||
```
|
||||
|
||||
(If your Linux distribution does not use `systemd`, you can bring up WireGuard with `sudo wg-quick up wg0`).
|
||||
If your Linux distribution does not use `systemd` you can bring up WireGuard with `sudo wg-quick up wg0`.
|
||||
|
|
|
@ -96,9 +96,7 @@ For more, see [Scripted Deployment](deploy-from-ansible.md).
|
|||
|
||||
## Using the DigitalOcean firewall with Algo
|
||||
|
||||
Many cloud providers include the option to configure an external firewall between the Internet and your cloud server. For some providers this is mandatory and Algo will configure it for you, but for DigitalOcean the external firewall is optional.
|
||||
|
||||
An Algo VPN runs its own firewall and doesn't require an external firewall, but you might wish to use the DigitalOcean firewall for example to limit the addresses which can connect to your Algo VPN over SSH, or perhaps to block SSH altogether.
|
||||
Many cloud providers include the option to configure an external firewall between the Internet and your cloud server. For some providers this is mandatory and Algo will configure it for you, but for DigitalOcean the external firewall is optional. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
|
||||
|
||||
To configure the DigitalOcean firewall, go to **Networking**, **Firewalls**, and choose **Create Firewall**.
|
||||
|
||||
|
|
9
docs/cloud-scaleway.md
Normal file
9
docs/cloud-scaleway.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
### Configuration file
|
||||
|
||||
Algo requires an API key from your Scaleway account to create a server.
|
||||
The API key is generated by going to your Scaleway credentials at [https://console.scaleway.com/account/credentials](https://console.scaleway.com/account/credentials), and then selecting "Generate new token" on the right side of the box labeled "API Tokens".
|
||||
|
||||
Enter this token when Algo prompts you for the `auth token`.
|
||||
This information will be pass as the `algo_scaleway_token` variable when asked for in the Algo prompt.
|
||||
|
||||
Your organization ID is also on this page: https://console.scaleway.com/account/credentials
|
|
@ -1,8 +1,15 @@
|
|||
### Configuration file
|
||||
|
||||
You need to create a configuration file in INI format with your api key (https://my.vultr.com/settings/#settingsapi)
|
||||
Algo requires an API key from your Vultr account in order to create a server. The API key is generated by going to your Vultr settings at https://my.vultr.com/settings/#settingsapi, and then selecting "generate new API key" on the right side of the box labeled "API Key".
|
||||
|
||||
Algo can read the API key in several different ways. Algo will first look for the file containing the API key in the environment variable $VULTR_API_CONFIG if present. You can set this with the command: `export VULTR_API_CONFIG=/path/to/vultr.ini`. Probably the simplest way to give Algo the API key is to create a file titled `.vultr.ini` in your home directory by typing `nano ~/.vultr.ini`, then entering the following text:
|
||||
|
||||
```
|
||||
[default]
|
||||
key = <your api key>
|
||||
```
|
||||
where you've cut-and-pasted the API key from above into the `<your api key>` field (no brackets).
|
||||
|
||||
When Algo asks `Enter the local path to your configuration INI file
|
||||
(https://trailofbits.github.io/algo/cloud-vultr.html):` if you hit enter without typing anything, Algo will look for the file in `~/.vultr.ini` by default.
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# Scripted Deployment
|
||||
# Deployment from Ansible
|
||||
|
||||
Before you begin, make sure you have installed all the dependencies necessary for your operating system as described in the [README](../README.md).
|
||||
|
||||
You can deploy Algo non-interactively by running the Ansible playbooks directly with `ansible-playbook`.
|
||||
|
||||
`ansible-playbook` accepts "tags" via the `-t` or `TAGS` options. You can pass tags as a list of comma separated values. Ansible will only run plays (install roles) with the specified tags.
|
||||
`ansible-playbook` accepts "tags" via the `-t` or `TAGS` options. You can pass tags as a list of comma separated values. Ansible will only run plays (install roles) with the specified tags. You can also use the `--skip-tags` option to skip certain parts of the install, such as `iptables` (overwrite iptables rules), `ipsec` (install strongSwan), `wireguard` (install Wireguard).
|
||||
|
||||
`ansible-playbook` accepts variables via the `-e` or `--extra-vars` option. You can pass variables as space separated key=value pairs. Algo requires certain variables that are listed below.
|
||||
|
||||
|
@ -23,25 +23,25 @@ ansible-playbook main.yml -e "provider=digitalocean
|
|||
do_token=token"
|
||||
```
|
||||
|
||||
See below for more information about providers and extra variables
|
||||
See below for more information about variables and roles.
|
||||
|
||||
### Variables
|
||||
|
||||
- `provider` - (Required) The provider to use. See possible values below
|
||||
- `server_name` - (Required) Server name. Default: algo
|
||||
- `ondemand_cellular` (Optional) VPN On Demand when connected to cellular networks. Default: false
|
||||
- `ondemand_wifi` - (Optional. See `ondemand_wifi_exclude`) VPN On Demand when connected to WiFi networks. Default: false
|
||||
- `ondemand_cellular` (Optional) VPN On Demand when connected to cellular networks with IPsec. Default: false
|
||||
- `ondemand_wifi` - (Optional. See `ondemand_wifi_exclude`) VPN On Demand when connected to WiFi networks with IPsec. Default: false
|
||||
- `ondemand_wifi_exclude` (Required if `ondemand_wifi` set) - WiFi networks to exclude from using the VPN. Comma-separated values
|
||||
- `local_dns` - (Optional) Enable a DNS resolver. Default: false
|
||||
- `ssh_tunneling` - (Optional) Enable SSH tunneling for each user. Default: false
|
||||
- `windows` - (Optional) Enables compatible ciphers and key exchange to support Windows clients, less secure. Default: false
|
||||
- `store_cakey` - (Optional) Whether or not keep the CA key (required to add users in the future, but less secure). Default: false
|
||||
|
||||
If any of those unspecified ansible will ask the user to input
|
||||
If any of the above variables are unspecified, ansible will ask the user to input them.
|
||||
|
||||
### Ansible roles
|
||||
|
||||
Roles can be activated by specifying an extra variable `provider`
|
||||
Cloud roles can be activated by specifying an extra variable `provider`.
|
||||
|
||||
Cloud roles:
|
||||
|
||||
|
@ -55,13 +55,25 @@ Cloud roles:
|
|||
|
||||
Server roles:
|
||||
|
||||
- role: vpn
|
||||
- role: strongswan
|
||||
* Installs [strongSwan](https://www.strongswan.org/)
|
||||
* Enables AppArmor, limits CPU and memory access, and drops user privileges
|
||||
* Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user
|
||||
* Bundles the appropriate certificates into Apple mobileconfig profiles and Powershell scripts for each user
|
||||
- role: dns_adblocking
|
||||
* Installs the [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) local resolver with a blacklist for advertising domains
|
||||
* Constrains dnsmasq with AppArmor and cgroups CPU and memory limitations
|
||||
- role: dns_encryption
|
||||
* Installs [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy)
|
||||
* Constrains dnscrypt-proxy with AppArmor and cgroups CPU and memory limitations
|
||||
- role: ssh_tunneling
|
||||
* Adds a restricted `algo` group with no shell access and limited SSH forwarding options
|
||||
* Creates one limited, local account and an SSH public key for each user
|
||||
- role: wireguard
|
||||
* Installs a [Wireguard](https://www.wireguard.com/) server, with a startup script, and automatic checks for upgrades
|
||||
* Creates wireguard.conf files for Linux clients as well as QR codes for Apple/Android clients
|
||||
|
||||
Note: The `vpn` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables:
|
||||
Note: The `strongswan` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables:
|
||||
|
||||
- ondemand_wifi: true
|
||||
- ondemand_wifi_exclude: HomeNet,OfficeWifi
|
||||
|
@ -91,9 +103,9 @@ Possible options can be gathered calling to https://api.digitalocean.com/v2/regi
|
|||
|
||||
Required variables:
|
||||
|
||||
- aws_access_key
|
||||
- aws_access_key: `AKIA...`
|
||||
- aws_secret_key
|
||||
- region
|
||||
- region: e.g. `us-east-1`
|
||||
|
||||
Possible options can be gathered via cli `aws ec2 describe-regions`
|
||||
|
||||
|
@ -114,7 +126,8 @@ Additional variables:
|
|||
"ec2:DescribeImages",
|
||||
"ec2:DescribeKeyPairs",
|
||||
"ec2:DescribeRegions",
|
||||
"ec2:ImportKeyPair"
|
||||
"ec2:ImportKeyPair",
|
||||
"ec2:CopyImage"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
|
@ -179,8 +192,8 @@ Required variables:
|
|||
|
||||
Required variables:
|
||||
|
||||
- [vultr_config](https://trailofbits.github.io/algo/cloud-vultr.html)
|
||||
- [region](https://api.vultr.com/v1/regions/list)
|
||||
- [vultr_config](https://trailofbits.github.io/algo/cloud-vultr.html): /path/to/.vultr.ini
|
||||
- [region](https://api.vultr.com/v1/regions/list): e.g. `Chicago`, `'New Jersey'`
|
||||
|
||||
### Azure
|
||||
|
||||
|
@ -196,9 +209,9 @@ Required variables:
|
|||
|
||||
Required variables:
|
||||
|
||||
- aws_access_key
|
||||
- aws_access_key: `AKIA...`
|
||||
- aws_secret_key
|
||||
- region
|
||||
- region: e.g. `us-east-1`
|
||||
|
||||
Possible options can be gathered via cli `aws lightsail get-regions`
|
||||
|
||||
|
@ -230,13 +243,7 @@ Possible options can be gathered via cli `aws lightsail get-regions`
|
|||
Required variables:
|
||||
|
||||
- [scaleway_token](https://www.scaleway.com/docs/generate-an-api-token/)
|
||||
- [scaleway_org](https://cloud.scaleway.com/#/billing)
|
||||
- region
|
||||
|
||||
Possible regions:
|
||||
|
||||
- ams1
|
||||
- par1
|
||||
- region: e.g. ams1, par1
|
||||
|
||||
### OpenStack
|
||||
|
||||
|
|
59
docs/deploy-from-script-or-cloud-init-to-localhost.md
Normal file
59
docs/deploy-from-script-or-cloud-init-to-localhost.md
Normal file
|
@ -0,0 +1,59 @@
|
|||
# Deploy from script or cloud-init
|
||||
|
||||
You can use `install.sh` to prepare the environment and deploy AlgoVPN on the local Ubuntu server in one shot using cloud-init, or run the script directly on the server after it's been created. The script doesn't configure any parameters in your cloud, so it's on your own to configure related [firewall rules](/docs/firewalls.md), a floating ip address and other resources you may need. The output of the install script (including the p12 and CA passwords) and user config files will be installed into the `/opt/algo` directory.
|
||||
|
||||
## Cloud init deployment
|
||||
|
||||
You can copy-paste the snippet below to the user data (cloud-init or startup script) field when creating a new server. For now it is only possible for [DigitalOcean](https://www.digitalocean.com/docs/droplets/resources/metadata/), Amazon [EC2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and [Lightsail](https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-how-to-configure-server-additional-data-shell-script), [Google Cloud](https://cloud.google.com/compute/docs/startupscript), [Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init) and [Vultr](https://my.vultr.com/startup/), although Vultr doesn't [officially support cloud-init](https://www.vultr.com/docs/getting-started-with-cloud-init).
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
curl -s https://raw.githubusercontent.com/trailofbits/algo/master/install.sh | sudo -E bash -x
|
||||
```
|
||||
The command will prepare the environment and install AlgoVPN with the default parameters below. If you want to modify the behavior you may define additional variables.
|
||||
|
||||
## Variables
|
||||
|
||||
`METHOD` - which method of the deployment to use. Possible values are local and cloud. Default: cloud. The cloud method is intended to use in cloud-init deployments only. If you are not using cloud-init to deploy the server you have to use the local method.
|
||||
`ONDEMAND_CELLULAR` - "Connect On Demand" when connected to cellular networks. Boolean. Default: false.
|
||||
`ONDEMAND_WIFI` - "Connect On Demand" when connected to Wi-Fi. Default: false.
|
||||
`ONDEMAND_WIFI_EXCLUDE` - List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand". Comma-separated list.
|
||||
`WINDOWS` - To support Windows 10 or Linux Desktop clients. Default: false.
|
||||
`STORE_CAKEY` - To retain the CA key. (required to add users in the future, but less secure). Default: false.
|
||||
`LOCAL_DNS` - To install an ad blocking DNS resolver. Default: false.
|
||||
`SSH_TUNNELING` - Enable SSH tunneling for each user. Default: false.
|
||||
`ENDPOINT` - The public IP address or domain name of your server: (IMPORTANT! This is used to verify the certificate). It will be gathered automatically for DigitalOcean, AWS, GCE, Azure or Vultr if the `METHOD` is cloud. Otherwise you need to define this variable according to your public IP address.
|
||||
`USERS` - list of VPN users. Comma-separated list. Default: user1.
|
||||
`REPO_SLUG` - Owner and repository that used to get the installation scripts from. Default: trailofbits/algo.
|
||||
`REPO_BRANCH` - Branch for `REPO_SLUG`. Default: master.
|
||||
`EXTRA_VARS` - Additional extra variables.
|
||||
`ANSIBLE_EXTRA_ARGS` - Any available ansible parameters. ie: `--skip-tags apparmor`.
|
||||
|
||||
## Examples
|
||||
|
||||
##### How to customise a cloud-init deployment by variables
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
export ONDEMAND_CELLULAR=true
|
||||
export WINDOWS=true
|
||||
export SSH_TUNNELING=true
|
||||
curl -s https://raw.githubusercontent.com/trailofbits/algo/master/install.sh | sudo -E bash -x
|
||||
```
|
||||
|
||||
##### How to deploy locally without using cloud-init
|
||||
|
||||
```
|
||||
export METHOD=local
|
||||
export ONDEMAND_CELLULAR=true
|
||||
export ENDPOINT=[your server's IP here]
|
||||
curl -s https://raw.githubusercontent.com/trailofbits/algo/master/install.sh | sudo -E bash -x
|
||||
```
|
||||
|
||||
##### How to deploy a server using arguments
|
||||
|
||||
The arguments order as per [variables](#variables) above
|
||||
|
||||
```
|
||||
curl -s https://raw.githubusercontent.com/trailofbits/algo/master/install.sh | sudo -E bash -x -s local true false _null true true true true myvpnserver.com phone,laptop,desktop
|
||||
```
|
|
@ -1,6 +1,6 @@
|
|||
# Local deployment
|
||||
|
||||
You can use Algo to configure a local server as an Algo VPN rather than create and configure a new server on a cloud provider.
|
||||
You can use Algo to configure a local server as an AlgoVPN rather than create and configure a new server on a cloud provider.
|
||||
|
||||
Install the Algo scripts on your server and follow the normal installation instructions, then choose:
|
||||
```
|
||||
|
@ -8,4 +8,6 @@ Install to existing Ubuntu 18.04 server (Advanced)
|
|||
```
|
||||
Make sure your server is running the operating system specified.
|
||||
|
||||
**PLEASE NOTE**: Algo is intended for use as a _dedicated_ VPN server. If you install Algo on an existing server, then any existing services might break. In particular, the firewall rules will be overwritten. If you don't want to overwrite the rules you must deploy via `ansible-playbook` and skip the `iptables` tag as described in [deploy-from-ansible.md](deploy-from-ansible.md), after which you'll need to implement the necessary rules yourself.
|
||||
**PLEASE NOTE**: Algo is intended for use as a _dedicated_ VPN server. If you install Algo on an existing server, then any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
|
||||
|
||||
If you don't want to overwrite the rules you must deploy via `ansible-playbook` and skip the `iptables` tag as described in [deploy-from-ansible.md](deploy-from-ansible.md), after which you'll need to implement the necessary rules yourself.
|
||||
|
|
|
@ -50,7 +50,7 @@ Algo is short for "Al Gore", the **V**ice **P**resident of **N**etworks everywhe
|
|||
|
||||
## Can DNS filtering be disabled?
|
||||
|
||||
There is no official way to disable DNS filtering, but there is a workaround: SSH to your Algo server (using the 'shell access' command printed upon a successful deployment), edit `/etc/ipsec.conf`, and change `rightdns=172.16.0.1` to `rightdns=8.8.8.8`. Then run `ipsec restart`. If all else fails, we recommend deploying a new Algo server without the adblocking feature enabled.
|
||||
You can temporarily disable DNS filtering for all IPsec clients at once with the following workaround: SSH to your Algo server (using the 'shell access' command printed upon a successful deployment), edit `/etc/ipsec.conf`, and change `rightdns=<random_ip>` to `rightdns=8.8.8.8`. Then run `sudo systemctl restart strongswan`. DNS filtering for Wireguard clients has to be disabled on each client device separately by modifying the settings in the app, or by directly modifying the `DNS` setting on the `clientname.conf` file. If all else fails, we recommend deploying a new Algo server without the adblocking feature enabled.
|
||||
|
||||
## Wasn't IPSEC backdoored by the US government?
|
||||
|
||||
|
@ -74,4 +74,4 @@ No.
|
|||
|
||||
## What inbound ports are used?
|
||||
|
||||
You should only need 22/TCP, 500/UDP, 4500/UDP, and 51820/UDP opened on any firewall that sits between your clients and your Algo server.
|
||||
You should only need 22/TCP, 500/UDP, 4500/UDP, and 51820/UDP opened on any firewall that sits between your clients and your Algo server. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
|
||||
|
|
34
docs/firewalls.md
Normal file
34
docs/firewalls.md
Normal file
|
@ -0,0 +1,34 @@
|
|||
# AlgoVPN and Firewalls
|
||||
|
||||
Your AlgoVPN requires properly configured firewalls. The key points to know are:
|
||||
|
||||
* If you deploy to a **cloud** provider all firewall configuration will done automatically.
|
||||
|
||||
* If you perform a **local** installation on an existing server you are responsible for configuring any external firewalls. You must also take care not to interfere with the server firewall configuration of the AlgoVPN.
|
||||
|
||||
## The Two Types of Firewall
|
||||
|
||||

|
||||
|
||||
### Server Firewall
|
||||
|
||||
During installation Algo configures the Linux [Netfilter](https://en.wikipedia.org/wiki/Netfilter) firewall on the server. The rules added are required for AlgoVPN to work properly. The package `netfilter-persistent` is used to load the IPv4 and IPv6 rules files that Algo generates and stores in `/etc/iptables`. The rules for IPv6 are only generated if the server appears to be properly configured for IPv6. The use of conflicting firewall packages on the server such as `ufw` will likely break AlgoVPN.
|
||||
|
||||
### External Firewall
|
||||
|
||||
Most cloud service providers offer a firewall that sits between the Internet and your AlgoVPN. With some providers (such as EC2, Lightsail, and GCE) this firewall is required and is configured by Algo during a **cloud** deployment. If the firewall is not required by the provider then Algo does not configure it.
|
||||
|
||||
External firewalls are not configured when performing a **local** installation, even when using a server from a cloud service provider.
|
||||
|
||||
Any external firewall must be configured to pass the following incoming ports over IPv4 :
|
||||
|
||||
Port | Protocol | Description | Related variables in `config.cfg`
|
||||
---- | -------- | ----------- | ---------------------------------
|
||||
22 | TCP | Secure Shell (SSH) | None
|
||||
500 | UDP | IPsec IKEv2 | `ipsec_enabled`
|
||||
4500 | UDP | IPsec NAT-T | `ipsec_enabled`
|
||||
51820 | UDP | WireGuard | `wireguard_enabled`, `wireguard_port`
|
||||
|
||||
If you have chosen to disable either IPsec or WireGuard in `config.cfg` before running `./algo` then the corresponding ports don't need to pass through the firewall. SSH is used when performing a **cloud** deployment and when subsequently modifying the list of VPN users by running `./algo update-users`.
|
||||
|
||||
Even when not required by the cloud service provider, you still might wish to use an external firewall to limit SSH access to your AlgoVPN to connections from certain IP addresses, or perhaps to block SSH access altogether if you don't need it. Every service provider firewall is different so refer to the provider's documentation for more information.
|
BIN
docs/images/firewalls.png
Normal file
BIN
docs/images/firewalls.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 113 KiB |
|
@ -1,21 +1,29 @@
|
|||
# Algo VPN documentation
|
||||
|
||||
* Setup instructions
|
||||
- Documentation for available [Ansible roles](setup-roles.md)
|
||||
* Deployment instructions
|
||||
- Deploy from [Fedora Workstation (26)](deploy-from-fedora-workstation.md)
|
||||
- Deploy from [RedHat/CentOS 6.x](deploy-from-redhat-centos6.md)
|
||||
- Deploy from [Windows](deploy-from-windows.md)
|
||||
- Deploy from [Ansible](deploy-from-ansible.md) directly
|
||||
- Deploy from a [Docker container](deploy-from-docker.md)
|
||||
- Deploy from [Ansible](deploy-from-ansible.md) non-interactively
|
||||
- Deploy onto a [cloud server at time of creation](deploy-from-script-or-cloud-init-to-localhost.md)
|
||||
* Client setup
|
||||
- Setup [Android](client-android.md) clients
|
||||
- Setup [Generic/Linux](client-linux.md) clients with Ansible
|
||||
* Cloud setup
|
||||
- Setup Ubuntu clients to use [WireGuard](client-linux-wireguard.md)
|
||||
- Setup Apple devices to use [IPSEC](client-apple-ipsec.md)
|
||||
- Setup Macs running macOS 10.13 or older to use [Wireguard](client-macos-wireguard.md)
|
||||
- Manual Windows 10 client setup for [IPSEC](client-windows.md)
|
||||
* Cloud provider setup
|
||||
- Configure [Amazon EC2](cloud-amazon-ec2.md)
|
||||
- Configure [Azure](cloud-azure.md)
|
||||
- Configure [DigitalOcean](cloud-do.md)
|
||||
- Configure [Google Cloud Platform](cloud-gce.md)
|
||||
- Configure [Vultr](cloud-vultr.md)
|
||||
* Advanced Deployment
|
||||
- Deploy to your own [FreeBSD](deploy-to-freebsd.md) server
|
||||
- Deploy to your own [Ubuntu 18.04](deploy-to-ubuntu.md) server
|
||||
- Deploy to an [unsupported cloud provider](deploy-to-unsupported-cloud.md)
|
||||
* [FAQ](faq.md)
|
||||
* [Firewalls](firewalls.md)
|
||||
* [Troubleshooting](troubleshooting.md)
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
# Ansible Roles
|
||||
|
||||
## Required roles
|
||||
|
||||
* **Common**
|
||||
* Installs several required packages and software updates, then reboots if necessary
|
||||
* Configures network interfaces, and enables packet forwarding on them
|
||||
* **VPN**
|
||||
* Installs [strongSwan](https://www.strongswan.org/), enables AppArmor, limits CPU and memory access, and drops user privileges
|
||||
* Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user
|
||||
* Bundles the appropriate certificates into Apple mobileconfig profiles for each user
|
||||
* Configures IPtables to block traffic that might pose a risk to VPN users, such as [SMB/CIFS](https://medium.com/@ValdikSS/deanonymizing-windows-users-and-capturing-microsoft-and-vpn-accounts-f7e53fe73834)
|
||||
|
||||
## Optional roles
|
||||
|
||||
* **Security Enhancements**
|
||||
* Enables [unattended-upgrades](https://help.ubuntu.com/community/AutomaticSecurityUpdates) to ensure available patches are always applied
|
||||
* Modify features like core dumps, kernel parameters, and SUID binaries to limit possible attacks
|
||||
* Enhances SSH with modern ciphers and seccomp, and restricts access to old or unwanted features like X11 forwarding and SFTP
|
||||
* **DNS-based Adblocking**
|
||||
* Install the [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) local resolver with a blacklist for advertising domains
|
||||
* Constrains dnsmasq with AppArmor and cgroups CPU and memory limitations
|
||||
* **DNS encryption**
|
||||
* Install [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy)
|
||||
* Constrains dingo with AppArmor and cgroups CPU and memory limitations
|
||||
* **SSH Tunneling**
|
||||
* Adds a restricted `algo` group with no shell access and limited SSH forwarding options
|
||||
* Creates one limited, local account per user and an SSH public key for each
|
|
@ -18,7 +18,7 @@ First of all, check [this](https://github.com/trailofbits/algo#features) and ens
|
|||
* [Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid](#windows-the-value-of-parameter-linuxconfigurationsshpublickeyskeydata-is-invalid)
|
||||
* [Docker: Failed to connect to the host via ssh](#docker-failed-to-connect-to-the-host-via-ssh)
|
||||
* [Wireguard: Unable to find 'configs/...' in expected paths](#wireguard-unable-to-find-configs-in-expected-paths)
|
||||
* [Ubuntu Error: "unable to write 'random state" when generating CA password](#ubuntu-error-unable-to-write-random-state-when-generating-ca-password")
|
||||
* [Ubuntu Error: "unable to write 'random state'" when generating CA password](#ubuntu-error-unable-to-write-random-state-when-generating-ca-password)
|
||||
* [Connection Problems](#connection-problems)
|
||||
* [I'm blocked or get CAPTCHAs when I access certain websites](#im-blocked-or-get-captchas-when-i-access-certain-websites)
|
||||
* [I want to change the list of trusted Wifi networks on my Apple device](#i-want-to-change-the-list-of-trusted-wifi-networks-on-my-apple-device)
|
||||
|
@ -153,7 +153,9 @@ You need to reset the permissions on your `.ssh` directory. Run `chmod 700 /home
|
|||
|
||||
### The region you want is not available
|
||||
|
||||
You want to install Algo to a specific region in a cloud provider, but that region is not available in the list given by the installer. In that case, you should [file an issue](https://github.com/trailofbits/algo/issues/new). Cloud providers add new regions on a regular basis and we don't always keep up. File an issue and give us information about what region is missing and we'll add it.
|
||||
Algo downloads the regions from the supported cloud providers (other than Microsoft Azure) listed in the first menu using APIs. If the region you want isn't available, the cloud provider has probably taken it offline for some reason. You should investigate further with your cloud provider.
|
||||
|
||||
If there's a specific region you want to install to in Microsoft Azure that isn't available, you should [file an issue](https://github.com/trailofbits/algo/issues/new), give us information about what region is missing, and we'll add it.
|
||||
|
||||
### AWS: SSH permission denied with an ECDSA key
|
||||
|
||||
|
@ -269,7 +271,7 @@ sudo rm -rf /etc/wireguard/*.lock
|
|||
```
|
||||
Then immediately re-run `./algo`.
|
||||
|
||||
### Ubuntu Error: "unable to write 'random state" when generating CA password
|
||||
### Ubuntu Error: "unable to write 'random state'" when generating CA password
|
||||
|
||||
When running Algo, you received an error like this:
|
||||
|
||||
|
|
212
input.yml
212
input.yml
|
@ -25,115 +25,129 @@
|
|||
- config.cfg
|
||||
|
||||
tasks:
|
||||
- pause:
|
||||
prompt: |
|
||||
What provider would you like to use?
|
||||
{% for p in providers_map %}
|
||||
{{ loop.index }}. {{ p['name']}}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired provider
|
||||
register: _algo_provider
|
||||
when: provider is undefined
|
||||
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}"
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Name the vpn server
|
||||
[algo]
|
||||
register: _algo_server_name
|
||||
when:
|
||||
- server_name is undefined
|
||||
- algo_provider != "local"
|
||||
- block:
|
||||
- pause:
|
||||
- name: Cloud prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to cellular networks?
|
||||
[y/N]
|
||||
register: _ondemand_cellular
|
||||
when: ondemand_cellular is undefined
|
||||
What provider would you like to use?
|
||||
{% for p in providers_map %}
|
||||
{{ loop.index }}. {{ p['name'] }}
|
||||
{% endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to Wi-Fi?
|
||||
[y/N]
|
||||
register: _ondemand_wifi
|
||||
when: ondemand_wifi is undefined
|
||||
Enter the number of your desired provider
|
||||
register: _algo_provider
|
||||
when: provider is undefined
|
||||
|
||||
- pause:
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}"
|
||||
|
||||
- name: VPN server name prompt
|
||||
pause:
|
||||
prompt: |
|
||||
List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand"
|
||||
(e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
|
||||
register: _ondemand_wifi_exclude
|
||||
Name the vpn server
|
||||
[algo]
|
||||
register: _algo_server_name
|
||||
when:
|
||||
- ondemand_wifi_exclude is undefined
|
||||
- (ondemand_wifi|default(false)|bool) or
|
||||
(booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
|
||||
- server_name is undefined
|
||||
- algo_provider != "local"
|
||||
- block:
|
||||
- name: Cellular On Demand prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to cellular networks?
|
||||
[y/N]
|
||||
register: _ondemand_cellular
|
||||
when: ondemand_cellular is undefined
|
||||
|
||||
- pause:
|
||||
- name: Wi-Fi On Demand prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to Wi-Fi?
|
||||
[y/N]
|
||||
register: _ondemand_wifi
|
||||
when: ondemand_wifi is undefined
|
||||
|
||||
- name: Trusted Wi-Fi networks prompt
|
||||
pause:
|
||||
prompt: |
|
||||
List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand"
|
||||
(e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
|
||||
register: _ondemand_wifi_exclude
|
||||
when:
|
||||
- ondemand_wifi_exclude is undefined
|
||||
- (ondemand_wifi|default(false)|bool) or
|
||||
(booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
|
||||
|
||||
- name: Compatible ciphers prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want the VPN to support Windows 10 or Linux Desktop clients? (enables compatible ciphers and key exchange, less secure)
|
||||
[y/N]
|
||||
register: _windows
|
||||
when: windows is undefined
|
||||
|
||||
- name: Retain the CA key prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want to retain the CA key? (required to add users in the future, but less secure)
|
||||
[y/N]
|
||||
register: _store_cakey
|
||||
when: store_cakey is undefined
|
||||
when: ipsec_enabled
|
||||
|
||||
- name: DNS adblocking prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want the VPN to support Windows 10 or Linux Desktop clients? (enables compatible ciphers and key exchange, less secure)
|
||||
Do you want to install an ad blocking DNS resolver on this VPN server?
|
||||
[y/N]
|
||||
register: _windows
|
||||
when: windows is undefined
|
||||
register: _local_dns
|
||||
when: local_dns is undefined
|
||||
|
||||
- pause:
|
||||
- name: SSH tunneling prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want to retain the CA key? (required to add users in the future, but less secure)
|
||||
Do you want each user to have their own account for SSH tunneling?
|
||||
[y/N]
|
||||
register: _store_cakey
|
||||
when: store_cakey is undefined
|
||||
when: ipsec_enabled
|
||||
register: _ssh_tunneling
|
||||
when: ssh_tunneling is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want to install an ad blocking DNS resolver on this VPN server?
|
||||
[y/N]
|
||||
register: _local_dns
|
||||
when: local_dns is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want each user to have their own account for SSH tunneling?
|
||||
[y/N]
|
||||
register: _ssh_tunneling
|
||||
when: ssh_tunneling is undefined
|
||||
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_server_name: >-
|
||||
{% if server_name is defined %}{% set _server = server_name %}
|
||||
{%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input != "" %}{% set _server = _algo_server_name.user_input %}
|
||||
{%- else %}{% set _server = defaults['server_name'] %}{% endif -%}
|
||||
{{ _server | regex_replace('(?!\.)(\W|_)', '-') }}
|
||||
algo_ondemand_cellular: >-
|
||||
{% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }}
|
||||
{%- elif _ondemand_cellular.user_input is defined and _ondemand_cellular.user_input != "" %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi: >-
|
||||
{% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }}
|
||||
{%- elif _ondemand_wifi.user_input is defined and _ondemand_wifi.user_input != "" %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi_exclude: >-
|
||||
{% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }}
|
||||
{%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input != "" %}{{ _ondemand_wifi_exclude.user_input | b64encode }}
|
||||
{%- else %}{{ '_null' | b64encode }}{% endif %}
|
||||
algo_local_dns: >-
|
||||
{% if local_dns is defined %}{{ local_dns | bool }}
|
||||
{%- elif _local_dns.user_input is defined and _local_dns.user_input != "" %}{{ booleans_map[_local_dns.user_input] | default(defaults['local_dns']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ssh_tunneling: >-
|
||||
{% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }}
|
||||
{%- elif _ssh_tunneling.user_input is defined and _ssh_tunneling.user_input != "" %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_windows: >-
|
||||
{% if windows is defined %}{{ windows | bool }}
|
||||
{%- elif _windows.user_input is defined and _windows.user_input != "" %}{{ booleans_map[_windows.user_input] | default(defaults['windows']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_store_cakey: >-
|
||||
{% if store_cakey is defined %}{{ store_cakey | bool }}
|
||||
{%- elif _store_cakey.user_input is defined and _store_cakey.user_input != "" %}{{ booleans_map[_store_cakey.user_input] | default(defaults['store_cakey']) }}
|
||||
{%- else %}false{% endif %}
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_server_name: >-
|
||||
{% if server_name is defined %}{% set _server = server_name %}
|
||||
{%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input|length > 0 -%}
|
||||
{%- set _server = _algo_server_name.user_input -%}
|
||||
{%- else %}{% set _server = defaults['server_name'] %}{% endif -%}
|
||||
{{ _server | regex_replace('(?!\.)(\W|_)', '-') }}
|
||||
algo_ondemand_cellular: >-
|
||||
{% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }}
|
||||
{%- elif _ondemand_cellular.user_input is defined %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi: >-
|
||||
{% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }}
|
||||
{%- elif _ondemand_wifi.user_input is defined %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi_exclude: >-
|
||||
{% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }}
|
||||
{%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input|length > 0 -%}
|
||||
{{ _ondemand_wifi_exclude.user_input | b64encode }}
|
||||
{%- else %}{{ '_null' | b64encode }}{% endif %}
|
||||
algo_local_dns: >-
|
||||
{% if local_dns is defined %}{{ local_dns | bool }}
|
||||
{%- elif _local_dns.user_input is defined %}{{ booleans_map[_local_dns.user_input] | default(defaults['local_dns']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ssh_tunneling: >-
|
||||
{% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }}
|
||||
{%- elif _ssh_tunneling.user_input is defined %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_windows: >-
|
||||
{% if windows is defined %}{{ windows | bool }}
|
||||
{%- elif _windows.user_input is defined %}{{ booleans_map[_windows.user_input] | default(defaults['windows']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_store_cakey: >-
|
||||
{% if ipsec_enabled %}{%- if store_cakey is defined %}{{ store_cakey | bool }}
|
||||
{%- elif _store_cakey.user_input is defined %}{{ booleans_map[_store_cakey.user_input] | default(defaults['store_cakey']) }}
|
||||
{%- else %}false{% endif %}{% endif %}
|
||||
rescue:
|
||||
- include_tasks: playbooks/rescue.yml
|
||||
|
|
112
install.sh
Normal file
112
install.sh
Normal file
|
@ -0,0 +1,112 @@
|
|||
#!/usr/bin/env sh
|
||||
|
||||
set -ex
|
||||
|
||||
METHOD="${1:-${METHOD:-cloud}}"
|
||||
ONDEMAND_CELLULAR="${2:-${ONDEMAND_CELLULAR:-false}}"
|
||||
ONDEMAND_WIFI="${3:-${ONDEMAND_WIFI:-false}}"
|
||||
ONDEMAND_WIFI_EXCLUDE="${4:-${ONDEMAND_WIFI_EXCLUDE:-_null}}"
|
||||
WINDOWS="${5:-${WINDOWS:-false}}"
|
||||
STORE_CAKEY="${6:-${STORE_CAKEY:-false}}"
|
||||
LOCAL_DNS="${7:-${LOCAL_DNS:-false}}"
|
||||
SSH_TUNNELING="${8:-${SSH_TUNNELING:-false}}"
|
||||
ENDPOINT="${9:-${ENDPOINT:-localhost}}"
|
||||
USERS="${10:-${USERS:-user1}}"
|
||||
REPO_SLUG="${11:-${REPO_SLUG:-trailofbits/algo}}"
|
||||
REPO_BRANCH="${12:-${REPO_BRANCH:-master}}"
|
||||
EXTRA_VARS="${13:-${EXTRA_VARS:-placeholder=null}}"
|
||||
ANSIBLE_EXTRA_ARGS="${14:-${ANSIBLE_EXTRA_ARGS}}"
|
||||
|
||||
cd /opt/
|
||||
|
||||
installRequirements() {
|
||||
apt-get update
|
||||
apt-get install \
|
||||
software-properties-common \
|
||||
git \
|
||||
build-essential \
|
||||
libssl-dev \
|
||||
libffi-dev \
|
||||
python-dev \
|
||||
python-pip \
|
||||
python-setuptools \
|
||||
python-virtualenv \
|
||||
bind9-host \
|
||||
jq -y
|
||||
}
|
||||
|
||||
getAlgo() {
|
||||
[ ! -d "algo" ] && git clone "https://github.com/${REPO_SLUG}" -b "${REPO_BRANCH}" algo
|
||||
cd algo
|
||||
|
||||
python -m virtualenv --python="$(command -v python2)" .venv
|
||||
# shellcheck source=/dev/null
|
||||
. .venv/bin/activate
|
||||
python -m pip install -U pip virtualenv
|
||||
python -m pip install -r requirements.txt
|
||||
}
|
||||
|
||||
publicIpFromInterface() {
|
||||
echo "Couldn't find a valid ipv4 address, using the first IP found on the interfaces as the endpoint."
|
||||
DEFAULT_INTERFACE="$(ip -4 route list match default | grep -Eo "dev .*" | awk '{print $2}')"
|
||||
ENDPOINT=$(ip -4 addr sh dev "$DEFAULT_INTERFACE" | grep -w inet | head -n1 | awk '{print $2}' | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b')
|
||||
export ENDPOINT=$ENDPOINT
|
||||
echo "Using ${ENDPOINT} as the endpoint"
|
||||
}
|
||||
|
||||
publicIpFromMetadata() {
|
||||
if curl -s http://169.254.169.254/metadata/v1/vendor-data | grep DigitalOcean >/dev/null; then
|
||||
ENDPOINT="$(curl -s http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address)"
|
||||
elif test "$(curl -s http://169.254.169.254/latest/meta-data/services/domain)" = "amazonaws.com"; then
|
||||
ENDPOINT="$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)"
|
||||
elif host -t A -W 10 metadata.google.internal 127.0.0.53 >/dev/null; then
|
||||
ENDPOINT="$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")"
|
||||
elif test "$(curl -s -H Metadata:true 'http://169.254.169.254/metadata/instance/compute/publisher/?api-version=2017-04-02&format=text')" = "Canonical"; then
|
||||
ENDPOINT="$(curl -H Metadata:true 'http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-04-02&format=text')"
|
||||
fi
|
||||
|
||||
if echo "${ENDPOINT}" | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"; then
|
||||
export ENDPOINT=$ENDPOINT
|
||||
echo "Using ${ENDPOINT} as the endpoint"
|
||||
else
|
||||
publicIpFromInterface
|
||||
fi
|
||||
}
|
||||
|
||||
deployAlgo() {
|
||||
getAlgo
|
||||
|
||||
cd /opt/algo
|
||||
# shellcheck source=/dev/null
|
||||
. .venv/bin/activate
|
||||
|
||||
export HOME=/root
|
||||
export ANSIBLE_LOCAL_TEMP=/root/.ansible/tmp
|
||||
export ANSIBLE_REMOTE_TEMP=/root/.ansible/tmp
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
ansible-playbook main.yml \
|
||||
-e provider=local \
|
||||
-e "ondemand_cellular=${ONDEMAND_CELLULAR}" \
|
||||
-e "ondemand_wifi=${ONDEMAND_WIFI}" \
|
||||
-e "ondemand_wifi_exclude=${ONDEMAND_WIFI_EXCLUDE}" \
|
||||
-e "windows=${WINDOWS}" \
|
||||
-e "store_cakey=${STORE_CAKEY}" \
|
||||
-e "local_dns=${LOCAL_DNS}" \
|
||||
-e "ssh_tunneling=${SSH_TUNNELING}" \
|
||||
-e "endpoint=$ENDPOINT" \
|
||||
-e "users=$(echo "$USERS" | jq -Rc 'split(",")')" \
|
||||
-e server=localhost \
|
||||
-e ssh_user=root \
|
||||
-e "${EXTRA_VARS}" \
|
||||
--skip-tags debug ${ANSIBLE_EXTRA_ARGS} |
|
||||
tee /var/log/algo.log
|
||||
}
|
||||
|
||||
if test "$METHOD" = "cloud"; then
|
||||
publicIpFromMetadata
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
|
||||
deployAlgo
|
619
library/scaleway_compute.py
Normal file
619
library/scaleway_compute.py
Normal file
|
@ -0,0 +1,619 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Scaleway Compute management module
|
||||
#
|
||||
# Copyright (C) 2018 Online SAS.
|
||||
# https://www.scaleway.com
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: scaleway_compute
|
||||
short_description: Scaleway compute management module
|
||||
version_added: "2.6"
|
||||
author: Remy Leone (@sieben)
|
||||
description:
|
||||
- "This module manages compute instances on Scaleway."
|
||||
extends_documentation_fragment: scaleway
|
||||
|
||||
options:
|
||||
|
||||
enable_ipv6:
|
||||
description:
|
||||
- Enable public IPv6 connectivity on the instance
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
boot_type:
|
||||
description:
|
||||
- Boot method
|
||||
default: bootscript
|
||||
choices:
|
||||
- bootscript
|
||||
- local
|
||||
|
||||
image:
|
||||
description:
|
||||
- Image identifier used to start the instance with
|
||||
required: true
|
||||
|
||||
name:
|
||||
description:
|
||||
- Name of the instance
|
||||
|
||||
organization:
|
||||
description:
|
||||
- Organization identifier
|
||||
required: true
|
||||
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the instance.
|
||||
default: present
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
- running
|
||||
- restarted
|
||||
- stopped
|
||||
|
||||
tags:
|
||||
description:
|
||||
- List of tags to apply to the instance (5 max)
|
||||
required: false
|
||||
default: []
|
||||
|
||||
region:
|
||||
description:
|
||||
- Scaleway compute zone
|
||||
required: true
|
||||
choices:
|
||||
- ams1
|
||||
- EMEA-NL-EVS
|
||||
- par1
|
||||
- EMEA-FR-PAR1
|
||||
|
||||
commercial_type:
|
||||
description:
|
||||
- Commercial name of the compute node
|
||||
required: true
|
||||
choices:
|
||||
- ARM64-2GB
|
||||
- ARM64-4GB
|
||||
- ARM64-8GB
|
||||
- ARM64-16GB
|
||||
- ARM64-32GB
|
||||
- ARM64-64GB
|
||||
- ARM64-128GB
|
||||
- C1
|
||||
- C2S
|
||||
- C2M
|
||||
- C2L
|
||||
- START1-XS
|
||||
- START1-S
|
||||
- START1-M
|
||||
- START1-L
|
||||
- X64-15GB
|
||||
- X64-30GB
|
||||
- X64-60GB
|
||||
- X64-120GB
|
||||
|
||||
wait:
|
||||
description:
|
||||
- Wait for the instance to reach its desired state before returning.
|
||||
type: bool
|
||||
default: 'no'
|
||||
|
||||
wait_timeout:
|
||||
description:
|
||||
- Time to wait for the server to reach the expected state
|
||||
required: false
|
||||
default: 300
|
||||
|
||||
wait_sleep_time:
|
||||
description:
|
||||
- Time to wait before every attempt to check the state of the server
|
||||
required: false
|
||||
default: 3
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a server
|
||||
scaleway_compute:
|
||||
name: foobar
|
||||
state: present
|
||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
region: ams1
|
||||
commercial_type: VC1S
|
||||
tags:
|
||||
- test
|
||||
- www
|
||||
|
||||
- name: Destroy it right after
|
||||
scaleway_compute:
|
||||
name: foobar
|
||||
state: absent
|
||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
region: ams1
|
||||
commercial_type: VC1S
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
'''
|
||||
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote as urlquote
|
||||
from ansible.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
|
||||
|
||||
SCALEWAY_COMMERCIAL_TYPES = [
|
||||
|
||||
# Virtual ARM64 compute instance
|
||||
'ARM64-2GB',
|
||||
'ARM64-4GB',
|
||||
'ARM64-8GB',
|
||||
'ARM64-16GB',
|
||||
'ARM64-32GB',
|
||||
'ARM64-64GB',
|
||||
'ARM64-128GB',
|
||||
|
||||
# Baremetal
|
||||
'C1', # ARM64 (4 cores) - 2GB
|
||||
'C2S', # X86-64 (4 cores) - 8GB
|
||||
'C2M', # X86-64 (8 cores) - 16GB
|
||||
'C2L', # x86-64 (8 cores) - 32 GB
|
||||
|
||||
# Virtual X86-64 compute instance
|
||||
'START1-XS', # Starter X86-64 (1 core) - 1GB - 25 GB NVMe
|
||||
'START1-S', # Starter X86-64 (2 cores) - 2GB - 50 GB NVMe
|
||||
'START1-M', # Starter X86-64 (4 cores) - 4GB - 100 GB NVMe
|
||||
'START1-L', # Starter X86-64 (8 cores) - 8GB - 200 GB NVMe
|
||||
'X64-15GB',
|
||||
'X64-30GB',
|
||||
'X64-60GB',
|
||||
'X64-120GB',
|
||||
]
|
||||
|
||||
SCALEWAY_SERVER_STATES = (
|
||||
'stopped',
|
||||
'stopping',
|
||||
'starting',
|
||||
'running',
|
||||
'locked'
|
||||
)
|
||||
|
||||
SCALEWAY_TRANSITIONS_STATES = (
|
||||
"stopping",
|
||||
"starting",
|
||||
"pending"
|
||||
)
|
||||
|
||||
|
||||
def fetch_state(compute_api, server):
|
||||
compute_api.module.debug("fetch_state of server: %s" % server["id"])
|
||||
response = compute_api.get(path="servers/%s" % server["id"])
|
||||
|
||||
if response.status_code == 404:
|
||||
return "absent"
|
||||
|
||||
if not response.ok:
|
||||
msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
try:
|
||||
compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"]))
|
||||
return response.json["server"]["state"]
|
||||
except KeyError:
|
||||
compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json)
|
||||
|
||||
|
||||
def wait_to_complete_state_transition(compute_api, server):
|
||||
wait = compute_api.module.params["wait"]
|
||||
if not wait:
|
||||
return
|
||||
wait_timeout = compute_api.module.params["wait_timeout"]
|
||||
wait_sleep_time = compute_api.module.params["wait_sleep_time"]
|
||||
|
||||
start = datetime.datetime.utcnow()
|
||||
end = start + datetime.timedelta(seconds=wait_timeout)
|
||||
while datetime.datetime.utcnow() < end:
|
||||
compute_api.module.debug("We are going to wait for the server to finish its transition")
|
||||
if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES:
|
||||
compute_api.module.debug("It seems that the server is not in transition anymore.")
|
||||
compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server))
|
||||
break
|
||||
time.sleep(wait_sleep_time)
|
||||
else:
|
||||
compute_api.module.fail_json(msg="Server takes too long to finish its transition")
|
||||
|
||||
|
||||
def create_server(compute_api, server):
|
||||
compute_api.module.debug("Starting a create_server")
|
||||
target_server = None
|
||||
response = compute_api.post(path="servers",
|
||||
data={"enable_ipv6": server["enable_ipv6"],
|
||||
"boot_type": server["boot_type"],
|
||||
"tags": server["tags"],
|
||||
"commercial_type": server["commercial_type"],
|
||||
"image": server["image"],
|
||||
"name": server["name"],
|
||||
"organization": server["organization"]})
|
||||
|
||||
if not response.ok:
|
||||
msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
try:
|
||||
target_server = response.json["server"]
|
||||
except KeyError:
|
||||
compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
|
||||
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
|
||||
|
||||
return target_server
|
||||
|
||||
|
||||
def restart_server(compute_api, server):
|
||||
return perform_action(compute_api=compute_api, server=server, action="reboot")
|
||||
|
||||
|
||||
def stop_server(compute_api, server):
|
||||
return perform_action(compute_api=compute_api, server=server, action="poweroff")
|
||||
|
||||
|
||||
def start_server(compute_api, server):
|
||||
return perform_action(compute_api=compute_api, server=server, action="poweron")
|
||||
|
||||
|
||||
def perform_action(compute_api, server, action):
|
||||
response = compute_api.post(path="servers/%s/action" % server["id"],
|
||||
data={"action": action})
|
||||
if not response.ok:
|
||||
msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=server)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def remove_server(compute_api, server):
|
||||
compute_api.module.debug("Starting remove server strategy")
|
||||
response = compute_api.delete(path="servers/%s" % server["id"])
|
||||
if not response.ok:
|
||||
msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=server)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def present_strategy(compute_api, wished_server):
|
||||
compute_api.module.debug("Starting present strategy")
|
||||
changed = False
|
||||
query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
|
||||
|
||||
if not query_results:
|
||||
changed = True
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "A server would be created."}
|
||||
|
||||
target_server = create_server(compute_api=compute_api, server=wished_server)
|
||||
else:
|
||||
target_server = query_results[0]
|
||||
|
||||
if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
|
||||
wished_server=wished_server):
|
||||
changed = True
|
||||
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
|
||||
|
||||
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
|
||||
return changed, target_server
|
||||
|
||||
|
||||
def absent_strategy(compute_api, wished_server):
|
||||
compute_api.module.debug("Starting absent strategy")
|
||||
changed = False
|
||||
target_server = None
|
||||
query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
|
||||
|
||||
if not query_results:
|
||||
return changed, {"status": "Server already absent."}
|
||||
else:
|
||||
target_server = query_results[0]
|
||||
|
||||
changed = True
|
||||
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "Server %s would be made absent." % target_server["id"]}
|
||||
|
||||
# A server MUST be stopped to be deleted.
|
||||
while not fetch_state(compute_api=compute_api, server=target_server) == "stopped":
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
|
||||
response = stop_server(compute_api=compute_api, server=target_server)
|
||||
|
||||
if not response.ok:
|
||||
err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code,
|
||||
response.json)
|
||||
compute_api.module.fail_json(msg=err_msg)
|
||||
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
|
||||
|
||||
response = remove_server(compute_api=compute_api, server=target_server)
|
||||
|
||||
if not response.ok:
|
||||
err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json)
|
||||
compute_api.module.fail_json(msg=err_msg)
|
||||
|
||||
return changed, {"status": "Server %s deleted" % target_server["id"]}
|
||||
|
||||
|
||||
def running_strategy(compute_api, wished_server):
|
||||
compute_api.module.debug("Starting running strategy")
|
||||
changed = False
|
||||
query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
|
||||
|
||||
if not query_results:
|
||||
changed = True
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "A server would be created before being run."}
|
||||
|
||||
target_server = create_server(compute_api=compute_api, server=wished_server)
|
||||
else:
|
||||
target_server = query_results[0]
|
||||
|
||||
if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
|
||||
wished_server=wished_server):
|
||||
changed = True
|
||||
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]}
|
||||
|
||||
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
|
||||
current_state = fetch_state(compute_api=compute_api, server=target_server)
|
||||
if current_state not in ("running", "starting"):
|
||||
compute_api.module.debug("running_strategy: Server in state: %s" % current_state)
|
||||
changed = True
|
||||
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
|
||||
|
||||
response = start_server(compute_api=compute_api, server=target_server)
|
||||
if not response.ok:
|
||||
msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
return changed, target_server
|
||||
|
||||
|
||||
def stop_strategy(compute_api, wished_server):
|
||||
compute_api.module.debug("Starting stop strategy")
|
||||
query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
|
||||
|
||||
changed = False
|
||||
|
||||
if not query_results:
|
||||
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "A server would be created before being stopped."}
|
||||
|
||||
target_server = create_server(compute_api=compute_api, server=wished_server)
|
||||
changed = True
|
||||
else:
|
||||
target_server = query_results[0]
|
||||
|
||||
compute_api.module.debug("stop_strategy: Servers are found.")
|
||||
|
||||
if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
|
||||
wished_server=wished_server):
|
||||
changed = True
|
||||
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {
|
||||
"status": "Server %s attributes would be changed before stopping it." % target_server["id"]}
|
||||
|
||||
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
|
||||
|
||||
current_state = fetch_state(compute_api=compute_api, server=target_server)
|
||||
if current_state not in ("stopped",):
|
||||
compute_api.module.debug("stop_strategy: Server in state: %s" % current_state)
|
||||
|
||||
changed = True
|
||||
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "Server %s would be stopped." % target_server["id"]}
|
||||
|
||||
response = stop_server(compute_api=compute_api, server=target_server)
|
||||
compute_api.module.debug(response.json)
|
||||
compute_api.module.debug(response.ok)
|
||||
|
||||
if not response.ok:
|
||||
msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
return changed, target_server
|
||||
|
||||
|
||||
def restart_strategy(compute_api, wished_server):
|
||||
compute_api.module.debug("Starting restart strategy")
|
||||
changed = False
|
||||
query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
|
||||
|
||||
if not query_results:
|
||||
changed = True
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "A server would be created before being rebooted."}
|
||||
|
||||
target_server = create_server(compute_api=compute_api, server=wished_server)
|
||||
else:
|
||||
target_server = query_results[0]
|
||||
|
||||
if server_attributes_should_be_changed(compute_api=compute_api,
|
||||
target_server=target_server,
|
||||
wished_server=wished_server):
|
||||
changed = True
|
||||
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {
|
||||
"status": "Server %s attributes would be changed before rebooting it." % target_server["id"]}
|
||||
|
||||
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
|
||||
changed = True
|
||||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "Server %s would be rebooted." % target_server["id"]}
|
||||
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
|
||||
|
||||
if fetch_state(compute_api=compute_api, server=target_server) in ("running",):
|
||||
response = restart_server(compute_api=compute_api, server=target_server)
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
|
||||
if not response.ok:
|
||||
msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code,
|
||||
response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",):
|
||||
response = restart_server(compute_api=compute_api, server=target_server)
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
|
||||
if not response.ok:
|
||||
msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code,
|
||||
response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
return changed, target_server
|
||||
|
||||
|
||||
state_strategy = {
|
||||
"present": present_strategy,
|
||||
"restarted": restart_strategy,
|
||||
"stopped": stop_strategy,
|
||||
"running": running_strategy,
|
||||
"absent": absent_strategy
|
||||
}
|
||||
|
||||
|
||||
def find(compute_api, wished_server, per_page=1):
|
||||
compute_api.module.debug("Getting inside find")
|
||||
# Only the name attribute is accepted in the Compute query API
|
||||
url = 'servers?name=%s&per_page=%d' % (urlquote(wished_server["name"]), per_page)
|
||||
response = compute_api.get(url)
|
||||
|
||||
if not response.ok:
|
||||
msg = 'Error during server search: (%s) %s' % (response.status_code, response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
search_results = response.json["servers"]
|
||||
|
||||
return search_results
|
||||
|
||||
|
||||
PATCH_MUTABLE_SERVER_ATTRIBUTES = (
|
||||
"ipv6",
|
||||
"tags",
|
||||
"name",
|
||||
"dynamic_ip_required",
|
||||
)
|
||||
|
||||
|
||||
def server_attributes_should_be_changed(compute_api, target_server, wished_server):
|
||||
compute_api.module.debug("Checking if server attributes should be changed")
|
||||
compute_api.module.debug("Current Server: %s" % target_server)
|
||||
compute_api.module.debug("Wished Server: %s" % wished_server)
|
||||
debug_dict = dict((x, (target_server[x], wished_server[x]))
|
||||
for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
|
||||
if x in target_server and x in wished_server)
|
||||
compute_api.module.debug("Debug dict %s" % debug_dict)
|
||||
|
||||
try:
|
||||
return any([target_server[x] != wished_server[x]
|
||||
for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
|
||||
if x in target_server and x in wished_server])
|
||||
except AttributeError:
|
||||
compute_api.module.fail_json(msg="Error while checking if attributes should be changed")
|
||||
|
||||
|
||||
def server_change_attributes(compute_api, target_server, wished_server):
|
||||
compute_api.module.debug("Starting patching server attributes")
|
||||
patch_payload = dict((x, wished_server[x])
|
||||
for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
|
||||
if x in wished_server and x in target_server)
|
||||
response = compute_api.patch(path="servers/%s" % target_server["id"],
|
||||
data=patch_payload)
|
||||
if not response.ok:
|
||||
msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def core(module):
|
||||
region = module.params["region"]
|
||||
wished_server = {
|
||||
"state": module.params["state"],
|
||||
"image": module.params["image"],
|
||||
"name": module.params["name"],
|
||||
"commercial_type": module.params["commercial_type"],
|
||||
"enable_ipv6": module.params["enable_ipv6"],
|
||||
"boot_type": module.params["boot_type"],
|
||||
"tags": module.params["tags"],
|
||||
"organization": module.params["organization"]
|
||||
}
|
||||
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
|
||||
|
||||
compute_api = Scaleway(module=module)
|
||||
|
||||
changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server)
|
||||
module.exit_json(changed=changed, msg=summary)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = scaleway_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
image=dict(required=True),
|
||||
name=dict(),
|
||||
region=dict(required=True, choices=SCALEWAY_LOCATION.keys()),
|
||||
commercial_type=dict(required=True, choices=SCALEWAY_COMMERCIAL_TYPES),
|
||||
enable_ipv6=dict(default=False, type="bool"),
|
||||
boot_type=dict(default="bootscript"),
|
||||
state=dict(choices=state_strategy.keys(), default='present'),
|
||||
tags=dict(type="list", default=[]),
|
||||
organization=dict(required=True),
|
||||
wait=dict(type="bool", default=False),
|
||||
wait_timeout=dict(type="int", default=300),
|
||||
wait_sleep_time=dict(type="int", default=3),
|
||||
))
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
core(module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
19
main.yml
19
main.yml
|
@ -1,4 +1,23 @@
|
|||
---
|
||||
- hosts: localhost
|
||||
become: false
|
||||
tasks:
|
||||
- name: Ensure the requirements installed
|
||||
debug:
|
||||
msg: "{{ '' | ipaddr }}"
|
||||
ignore_errors: true
|
||||
no_log: true
|
||||
register: ipaddr
|
||||
|
||||
- name: Verify Ansible meets Algo VPN requirements.
|
||||
assert:
|
||||
that:
|
||||
- ansible_version.full is version('2.7.10', '==')
|
||||
- not ipaddr.failed
|
||||
msg: >
|
||||
You must update the requirements to use this version of Algo.
|
||||
Try to run python -m pip install -U -r requirements.txt
|
||||
|
||||
- name: Include prompts playbook
|
||||
import_playbook: input.yml
|
||||
|
||||
|
|
|
@ -1,29 +1,34 @@
|
|||
---
|
||||
- name: Display the invocation environment
|
||||
local_action:
|
||||
module: shell
|
||||
- block:
|
||||
- name: Display the invocation environment
|
||||
shell: >
|
||||
./algo-showenv.sh \
|
||||
'algo_provider "{{ algo_provider }}"' \
|
||||
{% if ipsec_enabled %}
|
||||
'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \
|
||||
'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \
|
||||
'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \
|
||||
'algo_windows "{{ algo_windows }}"' \
|
||||
{% endif %}
|
||||
'algo_local_dns "{{ algo_local_dns }}"' \
|
||||
'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \
|
||||
'algo_windows "{{ algo_windows }}"' \
|
||||
'wireguard_enabled "{{ wireguard_enabled }}"' \
|
||||
'dns_encryption "{{ dns_encryption }}"' \
|
||||
> /dev/tty
|
||||
tags: debug
|
||||
tags: debug
|
||||
|
||||
- name: Install the requirements
|
||||
local_action:
|
||||
module: pip
|
||||
state: latest
|
||||
name:
|
||||
- pyOpenSSL
|
||||
- jinja2==2.8
|
||||
- segno
|
||||
tags: always
|
||||
- name: Install the requirements
|
||||
pip:
|
||||
state: latest
|
||||
name:
|
||||
- pyOpenSSL
|
||||
- jinja2==2.8
|
||||
- segno
|
||||
tags:
|
||||
- always
|
||||
- skip_ansible_lint
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: Generate the SSH private key
|
||||
openssl_privatekey:
|
||||
|
|
6
playbooks/rescue.yml
Normal file
6
playbooks/rescue.yml
Normal file
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- debug:
|
||||
var: fail_hint
|
||||
|
||||
- name: Fail the installation
|
||||
fail:
|
|
@ -1 +1,2 @@
|
|||
ansible==2.5.2
|
||||
ansible==2.7.10
|
||||
netaddr
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
---
|
||||
|
||||
- name: restart strongswan
|
||||
service: name=strongswan state=restarted
|
||||
|
|
|
@ -8,13 +8,21 @@
|
|||
package: name="{{ item }}" state=present
|
||||
with_items:
|
||||
- "{{ prerequisites }}"
|
||||
register: result
|
||||
until: result is succeeded
|
||||
retries: 10
|
||||
delay: 3
|
||||
|
||||
- name: Install strongSwan
|
||||
package: name=strongswan state=present
|
||||
register: result
|
||||
until: result is succeeded
|
||||
retries: 10
|
||||
delay: 3
|
||||
|
||||
- name: Setup the ipsec config
|
||||
template:
|
||||
src: "roles/vpn/templates/client_ipsec.conf.j2"
|
||||
src: "roles/strongswan/templates/client_ipsec.conf.j2"
|
||||
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.conf"
|
||||
mode: '0644'
|
||||
with_items:
|
||||
|
@ -24,7 +32,7 @@
|
|||
|
||||
- name: Setup the ipsec secrets
|
||||
template:
|
||||
src: "roles/vpn/templates/client_ipsec.secrets.j2"
|
||||
src: "roles/strongswan/templates/client_ipsec.secrets.j2"
|
||||
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.secrets"
|
||||
mode: '0600'
|
||||
with_items:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
|
||||
- set_fact:
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
prerequisites:
|
||||
- epel-release
|
||||
configs_prefix: /etc/strongswan
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
|
||||
- set_fact:
|
||||
prerequisites: []
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
prerequisites:
|
||||
- libstrongswan-standard-plugins
|
||||
configs_prefix: /etc
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
|
||||
- set_fact:
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
prerequisites:
|
||||
- libselinux-python
|
||||
configs_prefix: /etc/strongswan
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
|
||||
- set_fact:
|
||||
prerequisites: []
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
prerequisites:
|
||||
- libstrongswan-standard-plugins
|
||||
configs_prefix: /etc
|
||||
|
|
|
@ -1,47 +1,41 @@
|
|||
---
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- block:
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
- set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input %}{{ azure_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- else %}{{ azure_regions[default_region | int - 1]['name'] }}{% endif %}
|
||||
|
||||
- set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ azure_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- else %}{{ azure_regions[default_region | int - 1]['name'] }}{% endif %}
|
||||
- name: Create AlgoVPN Server
|
||||
azure_rm_deployment:
|
||||
state: present
|
||||
deployment_name: "{{ algo_server_name }}"
|
||||
template: "{{ lookup('file', role_path + '/files/deployment.json') }}"
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group_name: "{{ algo_server_name }}"
|
||||
location: "{{ algo_region }}"
|
||||
parameters:
|
||||
sshKeyData:
|
||||
value: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
WireGuardPort:
|
||||
value: "{{ wireguard_port }}"
|
||||
vmSize:
|
||||
value: "{{ cloud_providers.azure.size }}"
|
||||
imageReferenceSku:
|
||||
value: "{{ cloud_providers.azure.image }}"
|
||||
register: azure_rm_deployment
|
||||
|
||||
- name: Create AlgoVPN Server
|
||||
azure_rm_deployment:
|
||||
state: present
|
||||
deployment_name: "{{ algo_server_name }}"
|
||||
template: "{{ lookup('file', 'deployment.json') }}"
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group_name: "{{ algo_server_name }}"
|
||||
location: "{{ algo_region }}"
|
||||
parameters:
|
||||
sshKeyData:
|
||||
value: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
WireGuardPort:
|
||||
value: "{{ wireguard_port }}"
|
||||
vmSize:
|
||||
value: "{{ cloud_providers.azure.size }}"
|
||||
imageReferenceSku:
|
||||
value: "{{ cloud_providers.azure.image }}"
|
||||
register: azure_rm_deployment
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ azure_rm_deployment.deployment.outputs.publicIPAddresses.value }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ azure_venv }}/lib/python2.7/site-packages/"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ azure_rm_deployment.deployment.outputs.publicIPAddresses.value }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ azure_venv }}/lib/python2.7/site-packages/"
|
||||
|
|
|
@ -10,23 +10,32 @@
|
|||
name:
|
||||
- packaging
|
||||
- requests[security]
|
||||
- azure-mgmt-compute>=2.0.0,<3
|
||||
- azure-mgmt-network>=1.3.0,<2
|
||||
- azure-mgmt-storage>=1.5.0,<2
|
||||
- azure-mgmt-resource>=1.1.0,<2
|
||||
- azure-storage>=0.35.1,<0.36
|
||||
- azure-cli-core>=2.0.12,<3
|
||||
- azure-cli-core==2.0.35
|
||||
- azure-cli-nspkg==3.0.2
|
||||
- azure-common==1.1.11
|
||||
- azure-mgmt-batch==4.1.0
|
||||
- azure-mgmt-compute==2.1.0
|
||||
- azure-mgmt-containerinstance==0.4.0
|
||||
- azure-mgmt-containerregistry==2.0.0
|
||||
- azure-mgmt-containerservice==3.0.1
|
||||
- azure-mgmt-dns==1.2.0
|
||||
- azure-mgmt-keyvault==0.40.0
|
||||
- azure-mgmt-marketplaceordering==0.1.0
|
||||
- azure-mgmt-monitor==0.5.2
|
||||
- azure-mgmt-network==1.7.1
|
||||
- azure-mgmt-nspkg==2.0.0
|
||||
- azure-mgmt-rdbms==1.2.0
|
||||
- azure-mgmt-resource==1.2.2
|
||||
- azure-mgmt-sql==0.7.1
|
||||
- azure-mgmt-storage==1.5.0
|
||||
- azure-mgmt-trafficmanager==0.50.0
|
||||
- azure-mgmt-web==0.32.0
|
||||
- azure-nspkg==2.0.0
|
||||
- azure-storage==0.35.1
|
||||
- msrest==0.4.29
|
||||
- msrestazure==0.4.31
|
||||
- azure-mgmt-dns>=1.0.1,<2
|
||||
- azure-mgmt-keyvault>=0.40.0,<0.41
|
||||
- azure-mgmt-batch>=4.1.0,<5
|
||||
- azure-mgmt-sql>=0.7.1,<0.8
|
||||
- azure-mgmt-web>=0.32.0,<0.33
|
||||
- azure-mgmt-containerservice>=2.0.0,<3.0.0
|
||||
- azure-mgmt-containerregistry>=1.0.1
|
||||
- azure-mgmt-rdbms==1.2.0
|
||||
- azure-mgmt-containerinstance==0.4.0
|
||||
- azure-keyvault==1.0.0a1
|
||||
- azure-graphrbac==0.40.0
|
||||
state: latest
|
||||
virtualenv: "{{ azure_venv }}"
|
||||
virtualenv_python: python2.7
|
||||
|
|
|
@ -1,110 +1,105 @@
|
|||
---
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- block:
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
- name: Set additional facts
|
||||
set_fact:
|
||||
algo_do_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input %}{{ do_regions[_algo_region.user_input | int -1 ]['slug'] }}
|
||||
{%- else %}{{ do_regions[default_region | int - 1]['slug'] }}{% endif %}
|
||||
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- name: Set additional facts
|
||||
set_fact:
|
||||
algo_do_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ do_regions[_algo_region.user_input | int -1 ]['slug'] }}
|
||||
{%- else %}{{ do_regions[default_region | int - 1]['slug'] }}{% endif %}
|
||||
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- block:
|
||||
- name: "Delete the existing Algo SSH keys"
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
until: ssh_keys.changed != true
|
||||
retries: 10
|
||||
delay: 1
|
||||
|
||||
rescue:
|
||||
- name: Collect the fail error
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
|
||||
- debug: var=ssh_keys
|
||||
|
||||
- fail:
|
||||
msg: "Please, ensure that your API token is not read-only."
|
||||
|
||||
- name: "Upload the SSH key"
|
||||
- block:
|
||||
- name: "Delete the existing Algo SSH keys"
|
||||
digital_ocean:
|
||||
state: present
|
||||
state: absent
|
||||
command: ssh
|
||||
ssh_pub_key: "{{ public_key }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: do_ssh_key
|
||||
register: ssh_keys
|
||||
until: not ssh_keys.changed
|
||||
retries: 10
|
||||
delay: 1
|
||||
|
||||
- name: "Creating a droplet..."
|
||||
rescue:
|
||||
- name: Collect the fail error
|
||||
digital_ocean:
|
||||
state: present
|
||||
command: droplet
|
||||
name: "{{ algo_server_name }}"
|
||||
region_id: "{{ algo_do_region }}"
|
||||
size_id: "{{ cloud_providers.digitalocean.size }}"
|
||||
image_id: "{{ cloud_providers.digitalocean.image }}"
|
||||
ssh_key_ids: "{{ do_ssh_key.ssh_key.id }}"
|
||||
unique_name: yes
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ algo_do_token }}"
|
||||
ipv6: yes
|
||||
register: do
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ do.droplet.ip_address }}"
|
||||
ansible_ssh_user: root
|
||||
- debug: var=ssh_keys
|
||||
|
||||
- name: Tag the droplet
|
||||
digital_ocean_tag:
|
||||
name: "Environment:Algo"
|
||||
resource_id: "{{ do.droplet.id }}"
|
||||
- fail:
|
||||
msg: "Please, ensure that your API token is not read-only."
|
||||
|
||||
- name: "Upload the SSH key"
|
||||
digital_ocean:
|
||||
state: present
|
||||
command: ssh
|
||||
ssh_pub_key: "{{ public_key }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: do_ssh_key
|
||||
|
||||
- name: "Creating a droplet..."
|
||||
digital_ocean:
|
||||
state: present
|
||||
command: droplet
|
||||
name: "{{ algo_server_name }}"
|
||||
region_id: "{{ algo_do_region }}"
|
||||
size_id: "{{ cloud_providers.digitalocean.size }}"
|
||||
image_id: "{{ cloud_providers.digitalocean.image }}"
|
||||
ssh_key_ids: "{{ do_ssh_key.ssh_key.id }}"
|
||||
unique_name: yes
|
||||
api_token: "{{ algo_do_token }}"
|
||||
ipv6: yes
|
||||
register: do
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ do.droplet.ip_address }}"
|
||||
ansible_ssh_user: root
|
||||
|
||||
- name: Tag the droplet
|
||||
digital_ocean_tag:
|
||||
name: "Environment:Algo"
|
||||
resource_id: "{{ do.droplet.id }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
state: present
|
||||
|
||||
- block:
|
||||
- name: "Delete the new Algo SSH key"
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ algo_do_token }}"
|
||||
state: present
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
until: not ssh_keys.changed
|
||||
retries: 10
|
||||
delay: 1
|
||||
|
||||
- block:
|
||||
- name: "Delete the new Algo SSH key"
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
until: ssh_keys.changed != true
|
||||
retries: 10
|
||||
delay: 1
|
||||
rescue:
|
||||
- name: Collect the fail error
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
|
||||
rescue:
|
||||
- name: Collect the fail error
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
- debug: var=ssh_keys
|
||||
|
||||
- debug: var=ssh_keys
|
||||
|
||||
- fail:
|
||||
msg: "Please, ensure that your API token is not read-only."
|
||||
environment:
|
||||
PYTHONPATH: "{{ digitalocean_venv }}/lib/python2.7/site-packages/"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- fail:
|
||||
msg: "Please, ensure that your API token is not read-only."
|
||||
environment:
|
||||
PYTHONPATH: "{{ digitalocean_venv }}/lib/python2.7/site-packages/"
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
stack_name: "{{ stack_name }}"
|
||||
state: "present"
|
||||
region: "{{ algo_region }}"
|
||||
template: roles/cloud-ec2/files/stack.yml
|
||||
template: roles/cloud-ec2/files/stack.yaml
|
||||
template_parameters:
|
||||
InstanceTypeParameter: "{{ cloud_providers.ec2.size }}"
|
||||
PublicSSHKeyParameter: "{{ lookup('file', SSH_keys.public) }}"
|
||||
|
|
|
@ -1,48 +1,43 @@
|
|||
---
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- block:
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
- set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input %}{{ aws_regions[_algo_region.user_input | int -1 ]['region_name'] }}
|
||||
{%- else %}{{ aws_regions[default_region | int - 1]['region_name'] }}{% endif %}
|
||||
stack_name: "{{ algo_server_name | replace('.', '-') }}"
|
||||
|
||||
- set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ aws_regions[_algo_region.user_input | int -1 ]['region_name'] }}
|
||||
{%- else %}{{ aws_regions[default_region | int - 1]['region_name'] }}{% endif %}
|
||||
stack_name: "{{ algo_server_name | replace('.', '-') }}"
|
||||
- name: Locate official AMI for region
|
||||
ec2_ami_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
owners: "{{ cloud_providers.ec2.image.owner }}"
|
||||
region: "{{ algo_region }}"
|
||||
filters:
|
||||
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
|
||||
register: ami_search
|
||||
|
||||
- name: Locate official AMI for region
|
||||
ec2_ami_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
owners: "{{ cloud_providers.ec2.image.owner }}"
|
||||
region: "{{ algo_region }}"
|
||||
filters:
|
||||
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
|
||||
register: ami_search
|
||||
- import_tasks: encrypt_image.yml
|
||||
when: encrypted
|
||||
|
||||
- import_tasks: encrypt_image.yml
|
||||
when: encrypted
|
||||
- name: Set the ami id as a fact
|
||||
set_fact:
|
||||
ami_image: >-
|
||||
{% if ami_search_encrypted.image_id is defined %}{{ ami_search_encrypted.image_id }}
|
||||
{%- elif search_crypt.images is defined and search_crypt.images|length >= 1 %}{{ (search_crypt.images | sort(attribute='creation_date') | last)['image_id'] }}
|
||||
{%- else %}{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}{% endif %}
|
||||
|
||||
- name: Set the ami id as a fact
|
||||
set_fact:
|
||||
ami_image: >-
|
||||
{% if ami_search_encrypted.image_id is defined %}{{ ami_search_encrypted.image_id }}
|
||||
{%- elif search_crypt.images is defined and search_crypt.images|length >= 1 %}{{ (search_crypt.images | sort(attribute='creation_date') | last)['image_id'] }}
|
||||
{%- else %}{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}{% endif %}
|
||||
- name: Deploy the stack
|
||||
import_tasks: cloudformation.yml
|
||||
|
||||
- name: Deploy the stack
|
||||
import_tasks: cloudformation.yml
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ stack.stack_outputs.ElasticIP }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ ec2_venv }}/lib/python2.7/site-packages/"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ stack.stack_outputs.ElasticIP }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ ec2_venv }}/lib/python2.7/site-packages/"
|
||||
|
|
|
@ -1,62 +1,57 @@
|
|||
---
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- block:
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
- name: Network configured
|
||||
gce_net:
|
||||
name: "{{ algo_server_name }}"
|
||||
fwname: "{{ algo_server_name }}-fw"
|
||||
allowed: "udp:500,4500,{{ wireguard_port }};tcp:22"
|
||||
state: "present"
|
||||
mode: auto
|
||||
src_range: 0.0.0.0/0
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
|
||||
- name: Network configured
|
||||
gce_net:
|
||||
name: "{{ algo_server_name }}"
|
||||
fwname: "{{ algo_server_name }}-fw"
|
||||
allowed: "udp:500,4500,{{ wireguard_port }};tcp:22"
|
||||
state: "present"
|
||||
mode: auto
|
||||
src_range: 0.0.0.0/0
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
- block:
|
||||
- name: External IP allocated
|
||||
gce_eip:
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
region: "{{ algo_region.split('-')[0:2] | join('-') }}"
|
||||
state: present
|
||||
register: gce_eip
|
||||
|
||||
- block:
|
||||
- name: External IP allocated
|
||||
gce_eip:
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
region: "{{ algo_region.split('-')[0:2] | join('-') }}"
|
||||
state: present
|
||||
register: gce_eip
|
||||
- name: Set External IP as a fact
|
||||
set_fact:
|
||||
external_ip: "{{ gce_eip.address }}"
|
||||
when: cloud_providers.gce.external_static_ip
|
||||
|
||||
- name: Set External IP as a fact
|
||||
set_fact:
|
||||
external_ip: "{{ gce_eip.address }}"
|
||||
when: cloud_providers.gce.external_static_ip
|
||||
- name: "Creating a new instance..."
|
||||
gce:
|
||||
instance_names: "{{ algo_server_name }}"
|
||||
zone: "{{ algo_region }}"
|
||||
external_ip: "{{ external_ip | default('ephemeral') }}"
|
||||
machine_type: "{{ cloud_providers.gce.size }}"
|
||||
image: "{{ cloud_providers.gce.image }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
metadata: '{"ssh-keys":"ubuntu:{{ ssh_public_key_lookup }}"}'
|
||||
network: "{{ algo_server_name }}"
|
||||
tags:
|
||||
- "environment-algo"
|
||||
register: google_vm
|
||||
|
||||
- name: "Creating a new instance..."
|
||||
gce:
|
||||
instance_names: "{{ algo_server_name }}"
|
||||
zone: "{{ algo_region }}"
|
||||
external_ip: "{{ external_ip | default('ephemeral') }}"
|
||||
machine_type: "{{ cloud_providers.gce.size }}"
|
||||
image: "{{ cloud_providers.gce.image }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
metadata: '{"ssh-keys":"ubuntu:{{ ssh_public_key_lookup }}"}'
|
||||
network: "{{ algo_server_name }}"
|
||||
tags:
|
||||
- "environment-algo"
|
||||
register: google_vm
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ gce_venv }}/lib/python2.7/site-packages/"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ gce_venv }}/lib/python2.7/site-packages/"
|
||||
|
|
|
@ -63,5 +63,5 @@
|
|||
- set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _gce_region.user_input is defined and _gce_region.user_input != "" %}{{ gce_regions[_gce_region.user_input | int -1 ] }}
|
||||
{%- elif _gce_region.user_input %}{{ gce_regions[_gce_region.user_input | int -1 ] }}
|
||||
{%- else %}{{ gce_regions[default_region | int - 1] }}{% endif %}
|
||||
|
|
|
@ -1,50 +1,44 @@
|
|||
---
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- block:
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
- name: Create an instance
|
||||
lightsail:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
state: present
|
||||
region: "{{ algo_region }}"
|
||||
zone: "{{ algo_region }}a"
|
||||
blueprint_id: "{{ cloud_providers.lightsail.image }}"
|
||||
bundle_id: "{{ cloud_providers.lightsail.size }}"
|
||||
wait_timeout: 300
|
||||
open_ports:
|
||||
- from_port: 4500
|
||||
to_port: 4500
|
||||
protocol: udp
|
||||
- from_port: 500
|
||||
to_port: 500
|
||||
protocol: udp
|
||||
- from_port: "{{ wireguard_port }}"
|
||||
to_port: "{{ wireguard_port }}"
|
||||
protocol: udp
|
||||
user_data: |
|
||||
#!/bin/bash
|
||||
mkdir -p /home/ubuntu/.ssh/
|
||||
echo "{{ lookup('file', '{{ SSH_keys.public }}') }}" >> /home/ubuntu/.ssh/authorized_keys
|
||||
chown -R ubuntu: /home/ubuntu/.ssh/
|
||||
chmod 0700 /home/ubuntu/.ssh/
|
||||
chmod 0600 /home/ubuntu/.ssh/*
|
||||
test
|
||||
register: algo_instance
|
||||
|
||||
- name: Create an instance
|
||||
lightsail:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
state: present
|
||||
region: "{{ algo_region }}"
|
||||
zone: "{{ algo_region }}a"
|
||||
blueprint_id: "{{ cloud_providers.lightsail.image }}"
|
||||
bundle_id: "{{ cloud_providers.lightsail.size }}"
|
||||
wait_timeout: 300
|
||||
open_ports:
|
||||
- from_port: 4500
|
||||
to_port: 4500
|
||||
protocol: udp
|
||||
- from_port: 500
|
||||
to_port: 500
|
||||
protocol: udp
|
||||
- from_port: "{{ wireguard_port }}"
|
||||
to_port: "{{ wireguard_port }}"
|
||||
protocol: udp
|
||||
user_data: |
|
||||
#!/bin/bash
|
||||
mkdir -p /home/ubuntu/.ssh/
|
||||
echo "{{ lookup('file', '{{ SSH_keys.public }}') }}" >> /home/ubuntu/.ssh/authorized_keys
|
||||
chown -R ubuntu: /home/ubuntu/.ssh/
|
||||
chmod 0700 /home/ubuntu/.ssh/
|
||||
chmod 0600 /home/ubuntu/.ssh/*
|
||||
test
|
||||
register: algo_instance
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ algo_instance['instance']['public_ip_address'] }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ lightsail_venv }}/lib/python2.7/site-packages/"
|
||||
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ algo_instance['instance']['public_ip_address'] }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ lightsail_venv }}/lib/python2.7/site-packages/"
|
||||
|
|
|
@ -57,5 +57,5 @@
|
|||
- set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ lightsail_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- elif _algo_region.user_input %}{{ lightsail_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- else %}{{ lightsail_regions[default_region | int - 1]['name'] }}{% endif %}
|
||||
|
|
|
@ -1,89 +1,82 @@
|
|||
---
|
||||
- fail:
|
||||
msg: "OpenStack credentials are not set. Download it from the OpenStack dashboard->Compute->API Access and source it in the shell (eg: source /tmp/dhc-openrc.sh)"
|
||||
when: lookup('env', 'OS_AUTH_URL') == ""
|
||||
when: lookup('env', 'OS_AUTH_URL')|length <= 0
|
||||
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- block:
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
- name: Security group created
|
||||
os_security_group:
|
||||
state: "{{ state|default('present') }}"
|
||||
name: "{{ algo_server_name }}-security_group"
|
||||
description: AlgoVPN security group
|
||||
register: os_security_group
|
||||
|
||||
- block:
|
||||
- name: Security group created
|
||||
os_security_group:
|
||||
state: "{{ state|default('present') }}"
|
||||
name: "{{ algo_server_name }}-security_group"
|
||||
description: AlgoVPN security group
|
||||
register: os_security_group
|
||||
- name: Security rules created
|
||||
os_security_group_rule:
|
||||
state: "{{ state|default('present') }}"
|
||||
security_group: "{{ os_security_group.id }}"
|
||||
protocol: "{{ item.proto }}"
|
||||
port_range_min: "{{ item.port_min }}"
|
||||
port_range_max: "{{ item.port_max }}"
|
||||
remote_ip_prefix: "{{ item.range }}"
|
||||
with_items:
|
||||
- { proto: tcp, port_min: 22, port_max: 22, range: 0.0.0.0/0 }
|
||||
- { proto: icmp, port_min: -1, port_max: -1, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: 4500, port_max: 4500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: 500, port_max: 500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: "{{ wireguard_port }}", port_max: "{{ wireguard_port }}", range: 0.0.0.0/0 }
|
||||
|
||||
- name: Security rules created
|
||||
os_security_group_rule:
|
||||
state: "{{ state|default('present') }}"
|
||||
security_group: "{{ os_security_group.id }}"
|
||||
protocol: "{{ item.proto }}"
|
||||
port_range_min: "{{ item.port_min }}"
|
||||
port_range_max: "{{ item.port_max }}"
|
||||
remote_ip_prefix: "{{ item.range }}"
|
||||
with_items:
|
||||
- { proto: tcp, port_min: 22, port_max: 22, range: 0.0.0.0/0 }
|
||||
- { proto: icmp, port_min: -1, port_max: -1, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: 4500, port_max: 4500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: 500, port_max: 500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: "{{ wireguard_port }}", port_max: "{{ wireguard_port }}", range: 0.0.0.0/0 }
|
||||
- name: Keypair created
|
||||
os_keypair:
|
||||
state: "{{ state|default('present') }}"
|
||||
name: "{{ SSH_keys.comment|regex_replace('@', '_') }}"
|
||||
public_key_file: "{{ SSH_keys.public }}"
|
||||
register: os_keypair
|
||||
|
||||
- name: Keypair created
|
||||
os_keypair:
|
||||
state: "{{ state|default('present') }}"
|
||||
name: "{{ SSH_keys.comment|regex_replace('@', '_') }}"
|
||||
public_key_file: "{{ SSH_keys.public }}"
|
||||
register: os_keypair
|
||||
- name: Gather facts about flavors
|
||||
os_flavor_facts:
|
||||
ram: "{{ cloud_providers.openstack.flavor_ram }}"
|
||||
|
||||
- name: Gather facts about flavors
|
||||
os_flavor_facts:
|
||||
ram: "{{ cloud_providers.openstack.flavor_ram }}"
|
||||
- name: Gather facts about images
|
||||
os_image_facts:
|
||||
image: "{{ cloud_providers.openstack.image }}"
|
||||
|
||||
- name: Gather facts about images
|
||||
os_image_facts:
|
||||
image: "{{ cloud_providers.openstack.image }}"
|
||||
- name: Gather facts about public networks
|
||||
os_networks_facts:
|
||||
|
||||
- name: Gather facts about public networks
|
||||
os_networks_facts:
|
||||
- name: Set the network as a fact
|
||||
set_fact:
|
||||
public_network_id: "{{ item.id }}"
|
||||
when:
|
||||
- item['router:external']|default(omit)
|
||||
- item['admin_state_up']|default(omit)
|
||||
- item['status'] == 'ACTIVE'
|
||||
with_items: "{{ openstack_networks }}"
|
||||
|
||||
- name: Set the network as a fact
|
||||
set_fact:
|
||||
public_network_id: "{{ item.id }}"
|
||||
when:
|
||||
- item['router:external']|default(omit)
|
||||
- item['admin_state_up']|default(omit)
|
||||
- item['status'] == 'ACTIVE'
|
||||
with_items: "{{ openstack_networks }}"
|
||||
- name: Set facts
|
||||
set_fact:
|
||||
flavor_id: "{{ (openstack_flavors | sort(attribute='ram'))[0]['id'] }}"
|
||||
image_id: "{{ openstack_image['id'] }}"
|
||||
keypair_name: "{{ os_keypair.key.name }}"
|
||||
security_group_name: "{{ os_security_group['secgroup']['name'] }}"
|
||||
|
||||
- name: Set facts
|
||||
set_fact:
|
||||
flavor_id: "{{ (openstack_flavors | sort(attribute='ram'))[0]['id'] }}"
|
||||
image_id: "{{ openstack_image['id'] }}"
|
||||
keypair_name: "{{ os_keypair.key.name }}"
|
||||
security_group_name: "{{ os_security_group['secgroup']['name'] }}"
|
||||
- name: Server created
|
||||
os_server:
|
||||
state: "{{ state|default('present') }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
image: "{{ image_id }}"
|
||||
flavor: "{{ flavor_id }}"
|
||||
key_name: "{{ keypair_name }}"
|
||||
security_groups: "{{ security_group_name }}"
|
||||
nics:
|
||||
- net-id: "{{ public_network_id }}"
|
||||
register: os_server
|
||||
|
||||
- name: Server created
|
||||
os_server:
|
||||
state: "{{ state|default('present') }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
image: "{{ image_id }}"
|
||||
flavor: "{{ flavor_id }}"
|
||||
key_name: "{{ keypair_name }}"
|
||||
security_groups: "{{ security_group_name }}"
|
||||
nics:
|
||||
- net-id: "{{ public_network_id }}"
|
||||
register: os_server
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ os_server['openstack']['public_v4'] }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ openstack_venv }}/lib/python2.7/site-packages/"
|
||||
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ os_server['openstack']['public_v4'] }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ openstack_venv }}/lib/python2.7/site-packages/"
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
- name: Set image id as a fact
|
||||
set_fact:
|
||||
image_id: "{{ item.id }}"
|
||||
no_log: true
|
||||
when:
|
||||
- cloud_providers.scaleway.image == item.name
|
||||
- cloud_providers.scaleway.arch == item.arch
|
||||
- server_disk_size == item.root_volume.size
|
||||
with_items: "{{ outer_item['json']['images'] }}"
|
|
@ -1,140 +1,46 @@
|
|||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
- name: Gather Scaleway organizations facts
|
||||
scaleway_organization_facts:
|
||||
|
||||
- name: Set disk size
|
||||
- name: Get images
|
||||
scaleway_image_facts:
|
||||
region: "{{ algo_region }}"
|
||||
|
||||
- name: Set cloud specific facts
|
||||
set_fact:
|
||||
server_disk_size: 50000000000
|
||||
organization_id: "{{ scaleway_organization_facts[0]['id'] }}"
|
||||
images: >-
|
||||
[{% for i in scaleway_image_facts -%}
|
||||
{% if i.name == cloud_providers.scaleway.image and
|
||||
i.arch == cloud_providers.scaleway.arch -%}
|
||||
'{{ i.id }}'{% if not loop.last %},{% endif %}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}]
|
||||
|
||||
- name: Check server size
|
||||
set_fact:
|
||||
server_disk_size: 25000000000
|
||||
when: cloud_providers.scaleway.size == "START1-XS"
|
||||
|
||||
- name: Check if server exists
|
||||
uri:
|
||||
url: "https://cp-{{ algo_region }}.scaleway.com/servers"
|
||||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
register: scaleway_servers
|
||||
|
||||
- name: Set server id as a fact
|
||||
set_fact:
|
||||
server_id: "{{ item.id }}"
|
||||
no_log: true
|
||||
when: algo_server_name == item.name
|
||||
with_items: "{{ scaleway_servers.json.servers }}"
|
||||
|
||||
- name: Create a server if it doesn't exist
|
||||
block:
|
||||
- name: Get the organization id
|
||||
uri:
|
||||
url: https://account.cloud.online.net/organizations
|
||||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
register: scaleway_organizations
|
||||
|
||||
- name: Set organization id as a fact
|
||||
set_fact:
|
||||
organization_id: "{{ item.id }}"
|
||||
no_log: true
|
||||
when: algo_scaleway_org == item.name
|
||||
with_items: "{{ scaleway_organizations.json.organizations }}"
|
||||
|
||||
- name: Get total count of images
|
||||
uri:
|
||||
url: "https://cp-{{ algo_region }}.scaleway.com/images"
|
||||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
register: scaleway_pages
|
||||
|
||||
- name: Get images
|
||||
uri:
|
||||
url: "https://cp-{{ algo_region }}.scaleway.com/images?per_page=100&page={{ item }}"
|
||||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
register: scaleway_images
|
||||
with_sequence: start=1 end={{ ((scaleway_pages.x_total_count|int / 100)| round )|int }}
|
||||
|
||||
- name: Set image id as a fact
|
||||
include_tasks: image_facts.yml
|
||||
with_items: "{{ scaleway_images['results'] }}"
|
||||
loop_control:
|
||||
loop_var: outer_item
|
||||
|
||||
- name: Create a server
|
||||
uri:
|
||||
url: "https://cp-{{ algo_region }}.scaleway.com/servers/"
|
||||
method: POST
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
body:
|
||||
organization: "{{ organization_id }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
image: "{{ image_id }}"
|
||||
commercial_type: "{{cloud_providers.scaleway.size }}"
|
||||
enable_ipv6: true
|
||||
boot_type: local
|
||||
tags:
|
||||
- Environment:Algo
|
||||
- AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }}
|
||||
status_code: 201
|
||||
body_format: json
|
||||
register: algo_instance
|
||||
|
||||
- name: Set server id as a fact
|
||||
set_fact:
|
||||
server_id: "{{ algo_instance.json.server.id }}"
|
||||
when: server_id is not defined
|
||||
|
||||
- name: Power on the server
|
||||
uri:
|
||||
url: https://cp-{{ algo_region }}.scaleway.com/servers/{{ server_id }}/action
|
||||
method: POST
|
||||
headers:
|
||||
Content-Type: application/json
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
body:
|
||||
action: poweron
|
||||
status_code: 202
|
||||
body_format: json
|
||||
ignore_errors: true
|
||||
no_log: true
|
||||
|
||||
- name: Wait for the server to become running
|
||||
uri:
|
||||
url: "https://cp-{{ algo_region }}.scaleway.com/servers/{{ server_id }}"
|
||||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
until:
|
||||
- algo_instance.json.server.state is defined
|
||||
- algo_instance.json.server.state == "running"
|
||||
retries: 20
|
||||
delay: 30
|
||||
- name: Create a server
|
||||
scaleway_compute:
|
||||
name: "{{ algo_server_name }}"
|
||||
enable_ipv6: true
|
||||
boot_type: local
|
||||
state: running
|
||||
image: "{{ images[0] }}"
|
||||
organization: "{{ organization_id }}"
|
||||
region: "{{ algo_region }}"
|
||||
commercial_type: "{{ cloud_providers.scaleway.size }}"
|
||||
wait: true
|
||||
tags:
|
||||
- Environment:Algo
|
||||
- AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }}
|
||||
register: algo_instance
|
||||
until: algo_instance.msg.public_ip
|
||||
retries: 3
|
||||
delay: 3
|
||||
environment:
|
||||
SCW_TOKEN: "{{ algo_scaleway_token }}"
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ algo_instance['json']['server']['public_ip']['address'] }}"
|
||||
ansible_ssh_user: root
|
||||
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ algo_instance.msg.public_ip.address }}"
|
||||
ansible_ssh_user: root
|
||||
|
|
|
@ -1,16 +1,12 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your auth token (https://www.scaleway.com/docs/generate-an-api-token/)
|
||||
Enter your auth token (https://trailofbits.github.io/algo/cloud-scaleway.html)
|
||||
echo: false
|
||||
register: _scaleway_token
|
||||
when: scaleway_token is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your organization name (https://cloud.scaleway.com/#/billing)
|
||||
register: _scaleway_org
|
||||
when: scaleway_org is undefined
|
||||
when:
|
||||
- scaleway_token is undefined
|
||||
- lookup('env','SCW_TOKEN')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
|
@ -26,9 +22,8 @@
|
|||
|
||||
- name: Set scaleway facts
|
||||
set_fact:
|
||||
algo_scaleway_token: "{{ scaleway_token | default(_scaleway_token.user_input) }}"
|
||||
algo_scaleway_org: "{{ scaleway_org | default(_scaleway_org.user_input|default(omit)) }}"
|
||||
algo_scaleway_token: "{{ scaleway_token | default(_scaleway_token.user_input) | default(lookup('env','SCW_TOKEN'), true) }}"
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ scaleway_regions[_algo_region.user_input | int -1 ]['alias'] }}
|
||||
{%- elif _algo_region.user_input %}{{ scaleway_regions[_algo_region.user_input | int -1 ]['alias'] }}
|
||||
{%- else %}{{ scaleway_regions.0.alias }}{% endif %}
|
||||
|
|
|
@ -1,20 +1,43 @@
|
|||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
---
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- block:
|
||||
- name: Upload the SSH key
|
||||
vr_ssh_key:
|
||||
vultr_ssh_key:
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
ssh_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
register: ssh_key
|
||||
|
||||
- name: Creating a firewall group
|
||||
vultr_firewall_group:
|
||||
name: "{{ algo_server_name }}"
|
||||
|
||||
- name: Creating firewall rules
|
||||
vultr_firewall_rule:
|
||||
group: "{{ algo_server_name }}"
|
||||
protocol: "{{ item.protocol }}"
|
||||
port: "{{ item.port }}"
|
||||
ip_version: "{{ item.ip }}"
|
||||
cidr: "{{ item.cidr }}"
|
||||
with_items:
|
||||
- { protocol: tcp, port: 22, ip: v4, cidr: "0.0.0.0/0" }
|
||||
- { protocol: tcp, port: 22, ip: v6, cidr: "::/0" }
|
||||
- { protocol: udp, port: 500, ip: v4, cidr: "0.0.0.0/0" }
|
||||
- { protocol: udp, port: 500, ip: v6, cidr: "::/0" }
|
||||
- { protocol: udp, port: 4500, ip: v4, cidr: "0.0.0.0/0" }
|
||||
- { protocol: udp, port: 4500, ip: v6, cidr: "::/0" }
|
||||
- { protocol: udp, port: "{{ wireguard_port }}", ip: v4, cidr: "0.0.0.0/0" }
|
||||
- { protocol: udp, port: "{{ wireguard_port }}", ip: v6, cidr: "::/0" }
|
||||
|
||||
- name: Creating a server
|
||||
vr_server:
|
||||
vultr_server:
|
||||
name: "{{ algo_server_name }}"
|
||||
hostname: "{{ algo_server_name }}"
|
||||
os: "{{ cloud_providers.vultr.os }}"
|
||||
plan: "{{ cloud_providers.vultr.size }}"
|
||||
region: "{{ algo_vultr_region }}"
|
||||
firewall_group: "{{ algo_server_name }}"
|
||||
state: started
|
||||
tag: Environment:Algo
|
||||
ssh_key: "{{ ssh_key.vultr_ssh_key.name }}"
|
||||
|
@ -29,8 +52,3 @@
|
|||
|
||||
environment:
|
||||
VULTR_API_CONFIG: "{{ algo_vultr_config }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
|
|
@ -4,7 +4,9 @@
|
|||
Enter the local path to your configuration INI file
|
||||
(https://trailofbits.github.io/algo/cloud-vultr.html):
|
||||
register: _vultr_config
|
||||
when: vultr_config is undefined
|
||||
when:
|
||||
- vultr_config is undefined
|
||||
- lookup('env','VULTR_API_CONFIG')|length <= 0
|
||||
|
||||
- name: Set the token as a fact
|
||||
set_fact:
|
||||
|
@ -52,5 +54,5 @@
|
|||
set_fact:
|
||||
algo_vultr_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ vultr_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- elif _algo_region.user_input %}{{ vultr_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- else %}{{ vultr_regions[default_region | int - 1]['name'] }}{% endif %}
|
||||
|
|
|
@ -18,10 +18,7 @@
|
|||
ifconfig lo100 destroy || true &&
|
||||
ifconfig lo100 create &&
|
||||
ifconfig lo100 inet {{ local_service_ip }} netmask 255.255.255.255 &&
|
||||
ifconfig lo100 inet6 FCAA::1/64; echo $?
|
||||
|
||||
- name: save iptables
|
||||
shell: service netfilter-persistent save
|
||||
ifconfig lo100 inet6 {{ local_service_ipv6 }}/128; echo $?
|
||||
|
||||
- name: restart iptables
|
||||
service: name=netfilter-persistent state=restarted
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
---
|
||||
- block:
|
||||
- name: Generate password for the CA key
|
||||
local_action:
|
||||
module: shell
|
||||
openssl rand -hex 16
|
||||
command: openssl rand -hex 16
|
||||
register: CA_password
|
||||
|
||||
- name: Generate p12 export password
|
||||
local_action:
|
||||
module: shell
|
||||
openssl rand 8 | python -c 'import sys,string; chars=string.ascii_letters + string.digits + "_@"; print("".join([chars[ord(c) % 64] for c in list(sys.stdin.read())]))'
|
||||
shell: >
|
||||
openssl rand 8 |
|
||||
python -c 'import sys,string; chars=string.ascii_letters + string.digits + "_@"; print("".join([chars[ord(c) % 64] for c in list(sys.stdin.read())]))'
|
||||
register: p12_password_generated
|
||||
when: p12_password is not defined
|
||||
tags: update-users
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Define facts
|
||||
set_fact:
|
||||
p12_export_password: "{{ p12_password|default(p12_password_generated.stdout) }}"
|
||||
tags: update-users
|
||||
|
||||
- set_fact:
|
||||
- name: Set facts
|
||||
set_fact:
|
||||
CA_password: "{{ CA_password.stdout }}"
|
||||
IP_subject_alt_name: "{{ IP_subject_alt_name }}"
|
||||
|
||||
|
@ -31,5 +31,5 @@
|
|||
|
||||
- name: Check size of MTU
|
||||
set_fact:
|
||||
reduce_mtu: "{% if reduce_mtu|int == 0 and ansible_default_ipv4['mtu']|int < 1500 %}{{ 1500 - ansible_default_ipv4['mtu']|int }}{% else %}{{ reduce_mtu|int }}{% endif %}"
|
||||
reduce_mtu: "{{ 1500 - ansible_default_ipv4['mtu']|int if reduce_mtu|int == 0 and ansible_default_ipv4['mtu']|int < 1500 else reduce_mtu|int }}"
|
||||
tags: always
|
||||
|
|
|
@ -17,7 +17,8 @@
|
|||
- name: Gather additional facts
|
||||
import_tasks: facts.yml
|
||||
|
||||
- set_fact:
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
config_prefix: "/usr/local/"
|
||||
strongswan_shell: /usr/sbin/nologin
|
||||
strongswan_home: /var/empty
|
||||
|
@ -53,7 +54,7 @@
|
|||
block: |
|
||||
cloned_interfaces="lo100"
|
||||
ifconfig_lo100="inet {{ local_service_ip }} netmask 255.255.255.255"
|
||||
ifconfig_lo100_ipv6="inet6 FCAA::1/64"
|
||||
ifconfig_lo100_ipv6="inet6 {{ local_service_ipv6 }}/128"
|
||||
notify:
|
||||
- restart loopback bsd
|
||||
|
||||
|
@ -73,5 +74,6 @@
|
|||
shell: >
|
||||
kldstat -n ipfw.ko || kldload ipfw ; sysctl net.inet.ip.fw.enable=0 &&
|
||||
bash /etc/rc.firewall && sysctl net.inet.ip.fw.enable=1
|
||||
changed_when: false
|
||||
|
||||
- meta: flush_handlers
|
||||
|
|
|
@ -1,32 +1,27 @@
|
|||
---
|
||||
- block:
|
||||
- name: Check the system
|
||||
raw: uname -a
|
||||
register: OS
|
||||
tags:
|
||||
- update-users
|
||||
- name: Check the system
|
||||
raw: uname -a
|
||||
register: OS
|
||||
changed_when: false
|
||||
tags:
|
||||
- update-users
|
||||
|
||||
- include_tasks: ubuntu.yml
|
||||
when: '"Ubuntu" in OS.stdout or "Linux" in OS.stdout'
|
||||
tags:
|
||||
- update-users
|
||||
- include_tasks: ubuntu.yml
|
||||
when: '"Ubuntu" in OS.stdout or "Linux" in OS.stdout'
|
||||
tags:
|
||||
- update-users
|
||||
|
||||
- include_tasks: freebsd.yml
|
||||
when: '"FreeBSD" in OS.stdout'
|
||||
tags:
|
||||
- update-users
|
||||
- include_tasks: freebsd.yml
|
||||
when: '"FreeBSD" in OS.stdout'
|
||||
tags:
|
||||
- update-users
|
||||
|
||||
- name: Sysctl tuning
|
||||
sysctl: name="{{ item.item }}" value="{{ item.value }}"
|
||||
when: item.item != ""
|
||||
with_items:
|
||||
- "{{ sysctl|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
- name: Sysctl tuning
|
||||
sysctl: name="{{ item.item }}" value="{{ item.value }}"
|
||||
when: item.item
|
||||
with_items:
|
||||
- "{{ sysctl|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
- meta: flush_handlers
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- meta: flush_handlers
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||
ignore_errors: true
|
||||
|
||||
- name: Wait until SSH becomes ready...
|
||||
- name: Wait until the server becomes ready...
|
||||
wait_for_connection:
|
||||
delay: 20
|
||||
timeout: 320
|
||||
|
@ -61,18 +61,21 @@
|
|||
- meta: flush_handlers
|
||||
|
||||
- name: Check apparmor support
|
||||
shell: apparmor_status
|
||||
command: apparmor_status
|
||||
ignore_errors: yes
|
||||
changed_when: false
|
||||
register: apparmor_status
|
||||
|
||||
- set_fact:
|
||||
- name: Set fact if apparmor enabled
|
||||
set_fact:
|
||||
apparmor_enabled: true
|
||||
when: '"profiles are in enforce mode" in apparmor_status.stdout'
|
||||
|
||||
- name: Gather additional facts
|
||||
import_tasks: facts.yml
|
||||
|
||||
- set_fact:
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
tools:
|
||||
- git
|
||||
- screen
|
||||
|
@ -82,6 +85,7 @@
|
|||
- iptables-persistent
|
||||
- cgroup-tools
|
||||
- openssl
|
||||
- gnupg2
|
||||
sysctl:
|
||||
- item: net.ipv4.ip_forward
|
||||
value: 1
|
||||
|
@ -92,11 +96,9 @@
|
|||
|
||||
- name: Install tools
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
name: "{{ tools|default([]) }}"
|
||||
state: present
|
||||
update_cache: true
|
||||
with_items:
|
||||
- "{{ tools|default([]) }}"
|
||||
|
||||
- name: Install headers
|
||||
apt:
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: Install unattended-upgrades
|
||||
apt:
|
||||
name: unattended-upgrades
|
||||
state: latest
|
||||
state: present
|
||||
|
||||
- name: Configure unattended-upgrades
|
||||
template:
|
||||
|
|
|
@ -4,4 +4,4 @@ Name=lo
|
|||
[Network]
|
||||
Description=lo:100
|
||||
Address={{ local_service_ip }}/32
|
||||
Address=FCAA::1/64
|
||||
Address={{ local_service_ipv6 }}/128
|
||||
|
|
|
@ -83,7 +83,7 @@ COMMIT
|
|||
# particular virtual (tun,tap,...) or physical (ethernet) interface.
|
||||
|
||||
# Accept DNS traffic to the local DNS resolver
|
||||
-A INPUT -d fcaa::1 -p udp --dport 53 -j ACCEPT
|
||||
-A INPUT -d {{ local_service_ipv6 }}/128 -p udp --dport 53 -j ACCEPT
|
||||
|
||||
# Drop traffic between VPN clients
|
||||
-A FORWARD -s {{ subnets|join(',') }} -d {{ subnets|join(',') }} -j {{ "DROP" if BetweenClients_DROP else "ACCEPT" }}
|
||||
|
|
|
@ -3,3 +3,7 @@
|
|||
|
||||
- name: restart apparmor
|
||||
service: name=apparmor state=restarted
|
||||
|
||||
- name: daemon-reload
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
|
|
|
@ -1,52 +1,47 @@
|
|||
---
|
||||
- block:
|
||||
- name: Dnsmasq installed
|
||||
package: name=dnsmasq
|
||||
- name: Dnsmasq installed
|
||||
package: name=dnsmasq
|
||||
|
||||
- name: The dnsmasq directory created
|
||||
file: dest=/var/lib/dnsmasq state=directory mode=0755 owner=dnsmasq group=nogroup
|
||||
- name: The dnsmasq directory created
|
||||
file: dest=/var/lib/dnsmasq state=directory mode=0755 owner=dnsmasq group=nogroup
|
||||
|
||||
- include_tasks: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
- include_tasks: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
|
||||
- include_tasks: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
- include_tasks: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
|
||||
- name: Dnsmasq configured
|
||||
template:
|
||||
src: dnsmasq.conf.j2
|
||||
dest: "{{ config_prefix|default('/') }}etc/dnsmasq.conf"
|
||||
notify:
|
||||
- restart dnsmasq
|
||||
- name: Dnsmasq configured
|
||||
template:
|
||||
src: dnsmasq.conf.j2
|
||||
dest: "{{ config_prefix|default('/') }}etc/dnsmasq.conf"
|
||||
notify:
|
||||
- restart dnsmasq
|
||||
|
||||
- name: Adblock script created
|
||||
template:
|
||||
src: adblock.sh.j2
|
||||
dest: /usr/local/sbin/adblock.sh
|
||||
owner: root
|
||||
group: "{{ root_group|default('root') }}"
|
||||
mode: 0755
|
||||
- name: Adblock script created
|
||||
template:
|
||||
src: adblock.sh.j2
|
||||
dest: /usr/local/sbin/adblock.sh
|
||||
owner: root
|
||||
group: "{{ root_group|default('root') }}"
|
||||
mode: 0755
|
||||
|
||||
- name: Adblock script added to cron
|
||||
cron:
|
||||
name: Adblock hosts update
|
||||
minute: "{{ range(0, 60) | random }}"
|
||||
hour: "{{ range(0, 24) | random }}"
|
||||
job: /usr/local/sbin/adblock.sh
|
||||
user: root
|
||||
- name: Adblock script added to cron
|
||||
cron:
|
||||
name: Adblock hosts update
|
||||
minute: "{{ range(0, 60) | random }}"
|
||||
hour: "{{ range(0, 24) | random }}"
|
||||
job: /usr/local/sbin/adblock.sh
|
||||
user: root
|
||||
|
||||
- name: Update adblock hosts
|
||||
command: /usr/local/sbin/adblock.sh
|
||||
- name: Update adblock hosts
|
||||
command: /usr/local/sbin/adblock.sh
|
||||
changed_when: false
|
||||
|
||||
- meta: flush_handlers
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: Dnsmasq enabled and started
|
||||
service:
|
||||
name: dnsmasq
|
||||
state: started
|
||||
enabled: yes
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- name: Dnsmasq enabled and started
|
||||
service:
|
||||
name: dnsmasq
|
||||
state: started
|
||||
enabled: yes
|
||||
|
|
|
@ -7,13 +7,13 @@
|
|||
owner: root
|
||||
group: root
|
||||
mode: 0600
|
||||
when: apparmor_enabled|default(false)|bool == true
|
||||
when: apparmor_enabled|default(false)|bool
|
||||
notify:
|
||||
- restart dnsmasq
|
||||
|
||||
- name: Ubuntu | Enforce the dnsmasq AppArmor policy
|
||||
shell: aa-enforce usr.sbin.dnsmasq
|
||||
when: apparmor_enabled|default(false)|bool == true
|
||||
command: aa-enforce usr.sbin.dnsmasq
|
||||
when: apparmor_enabled|default(false)|bool
|
||||
tags: ['apparmor']
|
||||
|
||||
- name: Ubuntu | Ensure that the dnsmasq service directory exist
|
||||
|
|
|
@ -116,7 +116,7 @@ group=nogroup
|
|||
#except-interface=
|
||||
# Or which to listen on by address (remember to include 127.0.0.1 if
|
||||
# you use this.)
|
||||
listen-address=127.0.0.1,FCAA::1,{{ local_service_ip }}
|
||||
listen-address=127.0.0.1,{{ local_service_ipv6 }},{{ local_service_ip }}
|
||||
# If you want dnsmasq to provide only DNS service on an interface,
|
||||
# configure it as shown above, and then use the following line to
|
||||
# disable DHCP and TFTP on it.
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
- name: Install dnscrypt-proxy
|
||||
apt:
|
||||
name: dnscrypt-proxy
|
||||
state: latest
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Configure unattended-upgrades
|
||||
|
@ -37,7 +37,7 @@
|
|||
command: aa-enforce usr.bin.dnscrypt-proxy
|
||||
changed_when: false
|
||||
tags: apparmor
|
||||
when: apparmor_enabled|default(false)|bool == true
|
||||
when: apparmor_enabled|default(false)|bool
|
||||
|
||||
- name: Ubuntu | Ensure that the dnscrypt-proxy service directory exist
|
||||
file:
|
||||
|
|
|
@ -1,10 +1,3 @@
|
|||
---
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
|
|
@ -10,24 +10,24 @@
|
|||
set_fact:
|
||||
cloud_instance_ip: >-
|
||||
{% if server is defined %}{{ server }}
|
||||
{%- elif _algo_server.user_input is defined and _algo_server.user_input != "" %}{{ _algo_server.user_input }}
|
||||
{%- elif _algo_server.user_input %}{{ _algo_server.user_input }}
|
||||
{%- else %}localhost{% endif %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost)
|
||||
[root]
|
||||
register: _algo_ssh_user
|
||||
when:
|
||||
- ssh_user is undefined
|
||||
- cloud_instance_ip != "localhost"
|
||||
- block:
|
||||
- pause:
|
||||
prompt: |
|
||||
What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost)
|
||||
[root]
|
||||
register: _algo_ssh_user
|
||||
when: ssh_user is undefined
|
||||
|
||||
- name: Set the facts
|
||||
set_fact:
|
||||
ansible_ssh_user: >-
|
||||
{% if ssh_user is defined %}{{ ssh_user }}
|
||||
{%- elif _algo_ssh_user.user_input is defined and _algo_ssh_user.user_input != "" %}{{ _algo_ssh_user.user_input }}
|
||||
{%- else %}root{% endif %}
|
||||
- name: Set the facts
|
||||
set_fact:
|
||||
ansible_ssh_user: >-
|
||||
{% if ssh_user is defined %}{{ ssh_user }}
|
||||
{%- elif _algo_ssh_user.user_input %}{{ _algo_ssh_user.user_input }}
|
||||
{%- else %}root{% endif %}
|
||||
when: cloud_instance_ip != "localhost"
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
|
@ -40,5 +40,5 @@
|
|||
set_fact:
|
||||
IP_subject_alt_name: >-
|
||||
{% if endpoint is defined %}{{ endpoint }}
|
||||
{%- elif _endpoint.user_input is defined and _endpoint.user_input != "" %}{{ _endpoint.user_input }}
|
||||
{%- elif _endpoint.user_input %}{{ _endpoint.user_input }}
|
||||
{%- else %}{{ cloud_instance_ip }}{% endif %}
|
||||
|
|
|
@ -1,120 +1,114 @@
|
|||
---
|
||||
- name: Ensure that the sshd_config file has desired options
|
||||
blockinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
marker: '# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role'
|
||||
block: |
|
||||
Match Group algo
|
||||
AllowTcpForwarding local
|
||||
AllowAgentForwarding no
|
||||
AllowStreamLocalForwarding no
|
||||
PermitTunnel no
|
||||
X11Forwarding no
|
||||
notify:
|
||||
- restart ssh
|
||||
|
||||
- name: Ensure that the algo group exist
|
||||
group: name=algo state=present
|
||||
|
||||
- name: Ensure that the jail directory exist
|
||||
file:
|
||||
path: /var/jail/
|
||||
state: directory
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: "{{ root_group|default('root') }}"
|
||||
|
||||
- block:
|
||||
- name: Ensure that the sshd_config file has desired options
|
||||
blockinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
marker: '# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role'
|
||||
block: |
|
||||
Match Group algo
|
||||
AllowTcpForwarding local
|
||||
AllowAgentForwarding no
|
||||
AllowStreamLocalForwarding no
|
||||
PermitTunnel no
|
||||
X11Forwarding no
|
||||
notify:
|
||||
- restart ssh
|
||||
- name: Ensure that the SSH users exist
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
groups: algo
|
||||
home: '/var/jail/{{ item }}'
|
||||
createhome: yes
|
||||
generate_ssh_key: false
|
||||
shell: /bin/false
|
||||
state: present
|
||||
append: yes
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Ensure that the algo group exist
|
||||
group: name=algo state=present
|
||||
|
||||
- name: Ensure that the jail directory exist
|
||||
- block:
|
||||
- name: Clean up the ssh-tunnel directory
|
||||
file:
|
||||
path: /var/jail/
|
||||
dest: "{{ ssh_tunnels_config_path }}"
|
||||
state: absent
|
||||
when: keys_clean_all|bool
|
||||
|
||||
- name: Ensure the config directories exist
|
||||
file:
|
||||
dest: "{{ ssh_tunnels_config_path }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: "{{ root_group|default('root') }}"
|
||||
recurse: yes
|
||||
mode: '0700'
|
||||
|
||||
- block:
|
||||
- name: Ensure that the SSH users exist
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
groups: algo
|
||||
home: '/var/jail/{{ item }}'
|
||||
createhome: yes
|
||||
generate_ssh_key: false
|
||||
shell: /bin/false
|
||||
state: present
|
||||
append: yes
|
||||
with_items: "{{ users }}"
|
||||
- name: Check if the private keys exist
|
||||
stat:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item }}.pem"
|
||||
register: privatekey
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- block:
|
||||
- name: Clean up the ssh-tunnel directory
|
||||
file:
|
||||
dest: "{{ ssh_tunnels_config_path }}"
|
||||
state: absent
|
||||
when: keys_clean_all|bool == True
|
||||
- name: Build ssh private keys
|
||||
openssl_privatekey:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item.item }}.pem"
|
||||
passphrase: "{{ p12_export_password }}"
|
||||
cipher: aes256
|
||||
force: false
|
||||
no_log: true
|
||||
when: not item.stat.exists
|
||||
with_items: "{{ privatekey.results }}"
|
||||
register: openssl_privatekey
|
||||
|
||||
- name: Ensure the config directories exist
|
||||
file:
|
||||
dest: "{{ ssh_tunnels_config_path }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
mode: '0700'
|
||||
- name: Build ssh public keys
|
||||
openssl_publickey:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pub"
|
||||
privatekey_path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pem"
|
||||
privatekey_passphrase: "{{ p12_export_password }}"
|
||||
format: OpenSSH
|
||||
force: true
|
||||
no_log: true
|
||||
when: item.changed
|
||||
with_items: "{{ openssl_privatekey.results }}"
|
||||
|
||||
- name: Check if the private keys exist
|
||||
stat:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item }}.pem"
|
||||
register: privatekey
|
||||
with_items: "{{ users }}"
|
||||
- name: Build the client ssh config
|
||||
template:
|
||||
src: ssh_config.j2
|
||||
dest: "{{ ssh_tunnels_config_path }}/{{ item }}.ssh_config"
|
||||
mode: 0700
|
||||
with_items: "{{ users }}"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: Build ssh private keys
|
||||
openssl_privatekey:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item.item }}.pem"
|
||||
passphrase: "{{ p12_export_password }}"
|
||||
cipher: aes256
|
||||
force: false
|
||||
no_log: true
|
||||
when: not item.stat.exists
|
||||
with_items: "{{ privatekey.results }}"
|
||||
register: openssl_privatekey
|
||||
- name: The authorized keys file created
|
||||
authorized_key:
|
||||
user: "{{ item }}"
|
||||
key: "{{ lookup('file', ssh_tunnels_config_path + '/' + item + '.pub') }}"
|
||||
state: present
|
||||
manage_dir: true
|
||||
exclusive: true
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Build ssh public keys
|
||||
openssl_publickey:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pub"
|
||||
privatekey_path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pem"
|
||||
privatekey_passphrase: "{{ p12_export_password }}"
|
||||
format: OpenSSH
|
||||
force: true
|
||||
no_log: true
|
||||
when: item.changed
|
||||
with_items: "{{ openssl_privatekey.results }}"
|
||||
- name: Get active users
|
||||
getent:
|
||||
database: group
|
||||
key: algo
|
||||
split: ':'
|
||||
|
||||
- name: Build the client ssh config
|
||||
template:
|
||||
src: ssh_config.j2
|
||||
dest: "{{ ssh_tunnels_config_path }}/{{ item }}.ssh_config"
|
||||
mode: 0700
|
||||
with_items: "{{ users }}"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: The authorized keys file created
|
||||
authorized_key:
|
||||
user: "{{ item }}"
|
||||
key: "{{ lookup('file', ssh_tunnels_config_path + '/' + item + '.pub') }}"
|
||||
state: present
|
||||
manage_dir: true
|
||||
exclusive: true
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Get active users
|
||||
getent:
|
||||
database: group
|
||||
key: algo
|
||||
split: ':'
|
||||
|
||||
- name: Delete non-existing users
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
remove: yes
|
||||
force: yes
|
||||
when: item not in users
|
||||
with_items: "{{ getent_group['algo'][2].split(',') }}"
|
||||
tags: update-users
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- name: Delete non-existing users
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
remove: yes
|
||||
force: yes
|
||||
when: item not in users
|
||||
with_items: "{{ getent_group['algo'][2].split(',') }}"
|
||||
tags: update-users
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
---
|
||||
ipsec_config_path: "configs/{{ IP_subject_alt_name }}/ipsec/"
|
||||
ipsec_pki_path: "{{ ipsec_config_path }}/.pki/"
|
||||
strongswan_network: 10.19.48.0/24
|
||||
strongswan_network_ipv6: 'fd9d:bc11:4020::/48'
|
||||
strongswan_shell: /usr/sbin/nologin
|
||||
strongswan_home: /var/lib/strongswan
|
||||
BetweenClients_DROP: true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
service: name=strongswan state=restarted
|
||||
|
||||
- name: daemon-reload
|
||||
shell: systemctl daemon-reload
|
||||
systemd: daemon_reload=true
|
||||
|
||||
- name: restart apparmor
|
||||
service: name=apparmor state=restarted
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
---
|
||||
- name: Register p12 PayloadContent
|
||||
shell: cat private/{{ item }}.p12 | base64
|
||||
shell: |
|
||||
set -o pipefail
|
||||
cat private/{{ item }}.p12 |
|
||||
base64
|
||||
register: PayloadContent
|
||||
changed_when: false
|
||||
args:
|
||||
executable: bash
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
with_items: "{{ users }}"
|
||||
|
||||
|
|
|
@ -23,12 +23,22 @@
|
|||
owner: strongswan
|
||||
group: "{{ root_group|default('root') }}"
|
||||
mode: "0600"
|
||||
- src: charon.conf.j2
|
||||
dest: "strongswan.d/charon.conf"
|
||||
owner: root
|
||||
group: "{{ root_group|default('root') }}"
|
||||
mode: "0644"
|
||||
notify:
|
||||
- restart strongswan
|
||||
|
||||
- name: Get loaded plugins
|
||||
shell: >
|
||||
find {{ config_prefix|default('/') }}etc/strongswan.d/charon/ -type f -name '*.conf' -exec basename {} \; | cut -f1 -d.
|
||||
shell: |
|
||||
set -o pipefail
|
||||
find {{ config_prefix|default('/') }}etc/strongswan.d/charon/ -type f -name '*.conf' -exec basename {} \; |
|
||||
cut -f1 -d.
|
||||
changed_when: false
|
||||
args:
|
||||
executable: bash
|
||||
register: strongswan_plugins
|
||||
|
||||
- name: Disable unneeded plugins
|
||||
|
|
|
@ -1,37 +1,31 @@
|
|||
---
|
||||
- block:
|
||||
- include_tasks: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
- include_tasks: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Ensure that the strongswan user exist
|
||||
user:
|
||||
name: strongswan
|
||||
group: nogroup
|
||||
shell: "{{ strongswan_shell }}"
|
||||
home: "{{ strongswan_home }}"
|
||||
state: present
|
||||
- name: Ensure that the strongswan user exist
|
||||
user:
|
||||
name: strongswan
|
||||
group: nogroup
|
||||
shell: "{{ strongswan_shell }}"
|
||||
home: "{{ strongswan_home }}"
|
||||
state: present
|
||||
|
||||
- name: Install strongSwan
|
||||
package: name=strongswan state=present
|
||||
- name: Install strongSwan
|
||||
package: name=strongswan state=present
|
||||
|
||||
- import_tasks: ipsec_configuration.yml
|
||||
- import_tasks: openssl.yml
|
||||
tags: update-users
|
||||
- import_tasks: distribute_keys.yml
|
||||
- import_tasks: client_configs.yml
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
tags: update-users
|
||||
- import_tasks: ipsec_configuration.yml
|
||||
- import_tasks: openssl.yml
|
||||
tags: update-users
|
||||
- import_tasks: distribute_keys.yml
|
||||
- import_tasks: client_configs.yml
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
tags: update-users
|
||||
|
||||
- name: strongSwan started
|
||||
service:
|
||||
name: strongswan
|
||||
state: started
|
||||
enabled: true
|
||||
- name: strongSwan started
|
||||
service:
|
||||
name: strongswan
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- meta: flush_handlers
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- meta: flush_handlers
|
||||
|
|
|
@ -2,14 +2,19 @@
|
|||
- block:
|
||||
- name: Set subjectAltName as a fact
|
||||
set_fact:
|
||||
subjectAltName: "{{ subjectAltName_IP }}{% if ipv6_support %},IP:{{ ansible_default_ipv6['address'] }}{% endif %}{% if domain and subjectAltName_DNS %},DNS:{{ subjectAltName_DNS }}{% endif %}"
|
||||
subjectAltName: >-
|
||||
{{ subjectAltName_IP }}
|
||||
{%- if ipv6_support -%},IP:{{ ansible_default_ipv6['address'] }}{%- endif -%}
|
||||
{%- if domain and subjectAltName_DNS -%},DNS:{{ subjectAltName_DNS }}{%- endif -%}
|
||||
tags: always
|
||||
|
||||
- debug: var=subjectAltName
|
||||
|
||||
- name: Ensure the pki directory does not exist
|
||||
file:
|
||||
dest: "{{ ipsec_pki_path }}"
|
||||
state: absent
|
||||
when: keys_clean_all|bool == True
|
||||
when: keys_clean_all|bool
|
||||
|
||||
- name: Ensure the pki directories exist
|
||||
file:
|
||||
|
@ -151,6 +156,23 @@
|
|||
with_items: "{{ users }}"
|
||||
register: p12
|
||||
|
||||
- name: Build the client's p12 with the CA cert included
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} pkcs12
|
||||
-in certs/{{ item }}.crt
|
||||
-inkey private/{{ item }}.key
|
||||
-export
|
||||
-name {{ item }}
|
||||
-out private/{{ item }}_ca.p12
|
||||
-certfile cacert.pem
|
||||
-passout pass:"{{ p12_export_password }}"
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
executable: bash
|
||||
with_items: "{{ users }}"
|
||||
register: p12
|
||||
|
||||
- name: Copy the p12 certificates
|
||||
copy:
|
||||
src: "{{ ipsec_pki_path }}/private/{{ item }}.p12"
|
||||
|
@ -165,7 +187,7 @@
|
|||
awk '{print $5}' |
|
||||
sed 's/\/CN=//g'
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path}}"
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
register: valid_certs
|
||||
|
||||
- name: Revoke non-existing users
|
||||
|
@ -209,3 +231,13 @@
|
|||
- gencrl.changed
|
||||
notify:
|
||||
- rereadcrls
|
||||
|
||||
- name: Delete the CA key
|
||||
file:
|
||||
path: "{{ ipsec_pki_path }}/private/cakey.pem"
|
||||
state: absent
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
when:
|
||||
- ipsec_enabled
|
||||
- not algo_store_cakey
|
||||
|
|
|
@ -1,18 +1,19 @@
|
|||
---
|
||||
|
||||
- set_fact:
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
strongswan_additional_plugins: []
|
||||
|
||||
- name: Ubuntu | Install strongSwan
|
||||
apt:
|
||||
name: strongswan
|
||||
state: latest
|
||||
state: present
|
||||
update_cache: yes
|
||||
install_recommends: yes
|
||||
|
||||
- name: Ubuntu | Enforcing ipsec with apparmor
|
||||
shell: aa-enforce "{{ item }}"
|
||||
when: apparmor_enabled|default(false)|bool == true
|
||||
command: aa-enforce "{{ item }}"
|
||||
when: apparmor_enabled|default(false)|bool
|
||||
changed_when: false
|
||||
with_items:
|
||||
- /usr/lib/ipsec/charon
|
||||
- /usr/lib/ipsec/lookip
|
||||
|
|
365
roles/strongswan/templates/charon.conf.j2
Normal file
365
roles/strongswan/templates/charon.conf.j2
Normal file
|
@ -0,0 +1,365 @@
|
|||
# Options for the charon IKE daemon.
|
||||
charon {
|
||||
|
||||
# Accept unencrypted ID and HASH payloads in IKEv1 Main Mode.
|
||||
# accept_unencrypted_mainmode_messages = no
|
||||
|
||||
# Maximum number of half-open IKE_SAs for a single peer IP.
|
||||
# block_threshold = 5
|
||||
|
||||
# Whether Certificate Revocation Lists (CRLs) fetched via HTTP or LDAP
|
||||
# should be saved under a unique file name derived from the public key of
|
||||
# the Certification Authority (CA) to /etc/ipsec.d/crls (stroke) or
|
||||
# /etc/swanctl/x509crl (vici), respectively.
|
||||
# cache_crls = no
|
||||
|
||||
# Whether relations in validated certificate chains should be cached in
|
||||
# memory.
|
||||
# cert_cache = yes
|
||||
|
||||
# Send Cisco Unity vendor ID payload (IKEv1 only).
|
||||
# cisco_unity = no
|
||||
|
||||
# Close the IKE_SA if setup of the CHILD_SA along with IKE_AUTH failed.
|
||||
close_ike_on_child_failure = yes
|
||||
|
||||
# Number of half-open IKE_SAs that activate the cookie mechanism.
|
||||
# cookie_threshold = 10
|
||||
|
||||
# Delete CHILD_SAs right after they got successfully rekeyed (IKEv1 only).
|
||||
# delete_rekeyed = no
|
||||
|
||||
# Delay in seconds until inbound IPsec SAs are deleted after rekeyings
|
||||
# (IKEv2 only).
|
||||
# delete_rekeyed_delay = 5
|
||||
|
||||
# Use ANSI X9.42 DH exponent size or optimum size matched to cryptographic
|
||||
# strength.
|
||||
# dh_exponent_ansi_x9_42 = yes
|
||||
|
||||
# Use RTLD_NOW with dlopen when loading plugins and IMV/IMCs to reveal
|
||||
# missing symbols immediately.
|
||||
# dlopen_use_rtld_now = no
|
||||
|
||||
# DNS server assigned to peer via configuration payload (CP).
|
||||
# dns1 =
|
||||
|
||||
# DNS server assigned to peer via configuration payload (CP).
|
||||
# dns2 =
|
||||
|
||||
# Enable Denial of Service protection using cookies and aggressiveness
|
||||
# checks.
|
||||
# dos_protection = yes
|
||||
|
||||
# Compliance with the errata for RFC 4753.
|
||||
# ecp_x_coordinate_only = yes
|
||||
|
||||
# Free objects during authentication (might conflict with plugins).
|
||||
# flush_auth_cfg = no
|
||||
|
||||
# Whether to follow IKEv2 redirects (RFC 5685).
|
||||
# follow_redirects = yes
|
||||
|
||||
# Maximum size (complete IP datagram size in bytes) of a sent IKE fragment
|
||||
# when using proprietary IKEv1 or standardized IKEv2 fragmentation, defaults
|
||||
# to 1280 (use 0 for address family specific default values, which uses a
|
||||
# lower value for IPv4). If specified this limit is used for both IPv4 and
|
||||
# IPv6.
|
||||
# fragment_size = 1280
|
||||
|
||||
# Name of the group the daemon changes to after startup.
|
||||
# group =
|
||||
|
||||
# Timeout in seconds for connecting IKE_SAs (also see IKE_SA_INIT DROPPING).
|
||||
half_open_timeout = 5
|
||||
|
||||
# Enable hash and URL support.
|
||||
# hash_and_url = no
|
||||
|
||||
# Allow IKEv1 Aggressive Mode with pre-shared keys as responder.
|
||||
# i_dont_care_about_security_and_use_aggressive_mode_psk = no
|
||||
|
||||
# Whether to ignore the traffic selectors from the kernel's acquire events
|
||||
# for IKEv2 connections (they are not used for IKEv1).
|
||||
# ignore_acquire_ts = no
|
||||
|
||||
# A space-separated list of routing tables to be excluded from route
|
||||
# lookups.
|
||||
# ignore_routing_tables =
|
||||
|
||||
# Maximum number of IKE_SAs that can be established at the same time before
|
||||
# new connection attempts are blocked.
|
||||
# ikesa_limit = 0
|
||||
|
||||
# Number of exclusively locked segments in the hash table.
|
||||
# ikesa_table_segments = 1
|
||||
|
||||
# Size of the IKE_SA hash table.
|
||||
# ikesa_table_size = 1
|
||||
|
||||
# Whether to close IKE_SA if the only CHILD_SA closed due to inactivity.
|
||||
inactivity_close_ike = yes
|
||||
|
||||
# Limit new connections based on the current number of half open IKE_SAs,
|
||||
# see IKE_SA_INIT DROPPING in strongswan.conf(5).
|
||||
# init_limit_half_open = 0
|
||||
|
||||
# Limit new connections based on the number of queued jobs.
|
||||
# init_limit_job_load = 0
|
||||
|
||||
# Causes charon daemon to ignore IKE initiation requests.
|
||||
# initiator_only = no
|
||||
|
||||
# Install routes into a separate routing table for established IPsec
|
||||
# tunnels.
|
||||
# install_routes = yes
|
||||
|
||||
# Install virtual IP addresses.
|
||||
# install_virtual_ip = yes
|
||||
|
||||
# The name of the interface on which virtual IP addresses should be
|
||||
# installed.
|
||||
# install_virtual_ip_on =
|
||||
|
||||
# Check daemon, libstrongswan and plugin integrity at startup.
|
||||
# integrity_test = no
|
||||
|
||||
# A comma-separated list of network interfaces that should be ignored, if
|
||||
# interfaces_use is specified this option has no effect.
|
||||
# interfaces_ignore =
|
||||
|
||||
# A comma-separated list of network interfaces that should be used by
|
||||
# charon. All other interfaces are ignored.
|
||||
# interfaces_use =
|
||||
|
||||
# NAT keep alive interval.
|
||||
keep_alive = 25s
|
||||
|
||||
# Plugins to load in the IKE daemon charon.
|
||||
# load =
|
||||
|
||||
# Determine plugins to load via each plugin's load option.
|
||||
# load_modular = no
|
||||
|
||||
# Initiate IKEv2 reauthentication with a make-before-break scheme.
|
||||
# make_before_break = no
|
||||
|
||||
# Maximum number of IKEv1 phase 2 exchanges per IKE_SA to keep state about
|
||||
# and track concurrently.
|
||||
# max_ikev1_exchanges = 3
|
||||
|
||||
# Maximum packet size accepted by charon.
|
||||
# max_packet = 10000
|
||||
|
||||
# Enable multiple authentication exchanges (RFC 4739).
|
||||
# multiple_authentication = yes
|
||||
|
||||
# WINS servers assigned to peer via configuration payload (CP).
|
||||
# nbns1 =
|
||||
|
||||
# WINS servers assigned to peer via configuration payload (CP).
|
||||
# nbns2 =
|
||||
|
||||
# UDP port used locally. If set to 0 a random port will be allocated.
|
||||
# port = 500
|
||||
|
||||
# UDP port used locally in case of NAT-T. If set to 0 a random port will be
|
||||
# allocated. Has to be different from charon.port, otherwise a random port
|
||||
# will be allocated.
|
||||
# port_nat_t = 4500
|
||||
|
||||
# Whether to prefer updating SAs to the path with the best route.
|
||||
# prefer_best_path = no
|
||||
|
||||
# Prefer locally configured proposals for IKE/IPsec over supplied ones as
|
||||
# responder (disabling this can avoid keying retries due to
|
||||
# INVALID_KE_PAYLOAD notifies).
|
||||
# prefer_configured_proposals = yes
|
||||
|
||||
# By default public IPv6 addresses are preferred over temporary ones (RFC
|
||||
# 4941), to make connections more stable. Enable this option to reverse
|
||||
# this.
|
||||
# prefer_temporary_addrs = no
|
||||
|
||||
# Process RTM_NEWROUTE and RTM_DELROUTE events.
|
||||
# process_route = yes
|
||||
|
||||
# Delay in ms for receiving packets, to simulate larger RTT.
|
||||
# receive_delay = 0
|
||||
|
||||
# Delay request messages.
|
||||
# receive_delay_request = yes
|
||||
|
||||
# Delay response messages.
|
||||
# receive_delay_response = yes
|
||||
|
||||
# Specific IKEv2 message type to delay, 0 for any.
|
||||
# receive_delay_type = 0
|
||||
|
||||
# Size of the AH/ESP replay window, in packets.
|
||||
# replay_window = 32
|
||||
|
||||
# Base to use for calculating exponential back off, see IKEv2 RETRANSMISSION
|
||||
# in strongswan.conf(5).
|
||||
# retransmit_base = 1.8
|
||||
|
||||
# Maximum jitter in percent to apply randomly to calculated retransmission
|
||||
# timeout (0 to disable).
|
||||
# retransmit_jitter = 0
|
||||
|
||||
# Upper limit in seconds for calculated retransmission timeout (0 to
|
||||
# disable).
|
||||
# retransmit_limit = 0
|
||||
|
||||
# Timeout in seconds before sending first retransmit.
|
||||
# retransmit_timeout = 4.0
|
||||
|
||||
# Number of times to retransmit a packet before giving up.
|
||||
# retransmit_tries = 5
|
||||
|
||||
# Interval in seconds to use when retrying to initiate an IKE_SA (e.g. if
|
||||
# DNS resolution failed), 0 to disable retries.
|
||||
# retry_initiate_interval = 0
|
||||
|
||||
# Initiate CHILD_SA within existing IKE_SAs (always enabled for IKEv1).
|
||||
reuse_ikesa = yes
|
||||
|
||||
# Numerical routing table to install routes to.
|
||||
# routing_table =
|
||||
|
||||
# Priority of the routing table.
|
||||
# routing_table_prio =
|
||||
|
||||
# Whether to use RSA with PSS padding instead of PKCS#1 padding by default.
|
||||
# rsa_pss = no
|
||||
|
||||
# Delay in ms for sending packets, to simulate larger RTT.
|
||||
# send_delay = 0
|
||||
|
||||
# Delay request messages.
|
||||
# send_delay_request = yes
|
||||
|
||||
# Delay response messages.
|
||||
# send_delay_response = yes
|
||||
|
||||
# Specific IKEv2 message type to delay, 0 for any.
|
||||
# send_delay_type = 0
|
||||
|
||||
# Send strongSwan vendor ID payload
|
||||
# send_vendor_id = no
|
||||
|
||||
# Whether to enable Signature Authentication as per RFC 7427.
|
||||
# signature_authentication = yes
|
||||
|
||||
# Whether to enable constraints against IKEv2 signature schemes.
|
||||
# signature_authentication_constraints = yes
|
||||
|
||||
# The upper limit for SPIs requested from the kernel for IPsec SAs.
|
||||
# spi_max = 0xcfffffff
|
||||
|
||||
# The lower limit for SPIs requested from the kernel for IPsec SAs.
|
||||
# spi_min = 0xc0000000
|
||||
|
||||
# Number of worker threads in charon.
|
||||
# threads = 16
|
||||
|
||||
# Name of the user the daemon changes to after startup.
|
||||
# user =
|
||||
|
||||
crypto_test {
|
||||
|
||||
# Benchmark crypto algorithms and order them by efficiency.
|
||||
# bench = no
|
||||
|
||||
# Buffer size used for crypto benchmark.
|
||||
# bench_size = 1024
|
||||
|
||||
# Number of iterations to test each algorithm.
|
||||
# bench_time = 50
|
||||
|
||||
# Test crypto algorithms during registration (requires test vectors
|
||||
# provided by the test-vectors plugin).
|
||||
# on_add = no
|
||||
|
||||
# Test crypto algorithms on each crypto primitive instantiation.
|
||||
# on_create = no
|
||||
|
||||
# Strictly require at least one test vector to enable an algorithm.
|
||||
# required = no
|
||||
|
||||
# Whether to test RNG with TRUE quality; requires a lot of entropy.
|
||||
# rng_true = no
|
||||
|
||||
}
|
||||
|
||||
host_resolver {
|
||||
|
||||
# Maximum number of concurrent resolver threads (they are terminated if
|
||||
# unused).
|
||||
# max_threads = 3
|
||||
|
||||
# Minimum number of resolver threads to keep around.
|
||||
# min_threads = 0
|
||||
|
||||
}
|
||||
|
||||
leak_detective {
|
||||
|
||||
# Includes source file names and line numbers in leak detective output.
|
||||
# detailed = yes
|
||||
|
||||
# Threshold in bytes for leaks to be reported (0 to report all).
|
||||
# usage_threshold = 10240
|
||||
|
||||
# Threshold in number of allocations for leaks to be reported (0 to
|
||||
# report all).
|
||||
# usage_threshold_count = 0
|
||||
|
||||
}
|
||||
|
||||
processor {
|
||||
|
||||
# Section to configure the number of reserved threads per priority class
|
||||
# see JOB PRIORITY MANAGEMENT in strongswan.conf(5).
|
||||
priority_threads {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
# Section containing a list of scripts (name = path) that are executed when
|
||||
# the daemon is started.
|
||||
start-scripts {
|
||||
|
||||
}
|
||||
|
||||
# Section containing a list of scripts (name = path) that are executed when
|
||||
# the daemon is terminated.
|
||||
stop-scripts {
|
||||
|
||||
}
|
||||
|
||||
tls {
|
||||
|
||||
# List of TLS encryption ciphers.
|
||||
# cipher =
|
||||
|
||||
# List of TLS key exchange methods.
|
||||
# key_exchange =
|
||||
|
||||
# List of TLS MAC algorithms.
|
||||
# mac =
|
||||
|
||||
# List of TLS cipher suites.
|
||||
# suites =
|
||||
|
||||
}
|
||||
|
||||
x509 {
|
||||
|
||||
# Discard certificates with unsupported or unknown critical extensions.
|
||||
# enforce_critical = yes
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
conn ikev2-{{ IP_subject_alt_name }}
|
||||
conn algovpn-{{ IP_subject_alt_name }}
|
||||
fragmentation=yes
|
||||
rekey=no
|
||||
dpdaction=clear
|
||||
|
@ -16,7 +16,7 @@ conn ikev2-{{ IP_subject_alt_name }}
|
|||
|
||||
right={{ IP_subject_alt_name }}
|
||||
rightid={{ IP_subject_alt_name }}
|
||||
rightsubnet=0.0.0.0/0
|
||||
rightsubnet={{ rightsubnet | default('0.0.0.0/0') }}
|
||||
rightauth=pubkey
|
||||
|
||||
leftsourceip=%config
|
||||
|
|
|
@ -9,6 +9,8 @@ conn %default
|
|||
keyexchange=ikev2
|
||||
compress=yes
|
||||
dpddelay=35s
|
||||
lifetime=3h
|
||||
ikelifetime=12h
|
||||
|
||||
{% if algo_windows %}
|
||||
ike={{ ciphers.compat.ike }}
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
<key>IntegrityAlgorithm</key>
|
||||
<string>SHA2-512</string>
|
||||
<key>LifeTimeInMinutes</key>
|
||||
<integer>20</integer>
|
||||
<integer>1440</integer>
|
||||
</dict>
|
||||
<key>DeadPeerDetectionRate</key>
|
||||
<string>Medium</string>
|
||||
|
@ -90,7 +90,7 @@
|
|||
<key>IntegrityAlgorithm</key>
|
||||
<string>SHA2-512</string>
|
||||
<key>LifeTimeInMinutes</key>
|
||||
<integer>20</integer>
|
||||
<integer>1440</integer>
|
||||
</dict>
|
||||
<key>LocalIdentifier</key>
|
||||
<string>{{ item.0 }}</string>
|
||||
|
|
|
@ -3,26 +3,16 @@ wireguard_PersistentKeepalive: 0
|
|||
wireguard_config_path: "configs/{{ IP_subject_alt_name }}/wireguard/"
|
||||
wireguard_pki_path: "{{ wireguard_config_path }}/.pki/"
|
||||
wireguard_interface: wg0
|
||||
_wireguard_network_ipv4:
|
||||
subnet: 10.19.49.0
|
||||
prefix: 24
|
||||
gateway: 10.19.49.1
|
||||
clients_range: 10.19.49
|
||||
clients_start: 2
|
||||
_wireguard_network_ipv6:
|
||||
subnet: 'fd9d:bc11:4021::'
|
||||
prefix: 48
|
||||
gateway: 'fd9d:bc11:4021::1'
|
||||
clients_range: 'fd9d:bc11:4021::'
|
||||
clients_start: 2
|
||||
wireguard_network_ipv4: "{{ _wireguard_network_ipv4['subnet'] }}/{{ _wireguard_network_ipv4['prefix'] }}"
|
||||
wireguard_network_ipv6: "{{ _wireguard_network_ipv6['subnet'] }}/{{ _wireguard_network_ipv6['prefix'] }}"
|
||||
keys_clean_all: false
|
||||
wireguard_dns_servers: >-
|
||||
{% if local_dns|default(false)|bool or dns_encryption|default(false)|bool == true %}
|
||||
{% if local_dns|default(false)|bool or dns_encryption|default(false)|bool %}
|
||||
{{ local_service_ip }}
|
||||
{% else %}
|
||||
{% for host in dns_servers.ipv4 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% if ipv6_support %},{% for host in dns_servers.ipv6 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
|
||||
{% endif %}
|
||||
wireguard_client_ip: "{{ _wireguard_network_ipv4['clients_range'] }}.{{ _wireguard_network_ipv4['clients_start'] + index|int + 1 }}/{{ _wireguard_network_ipv4['prefix'] }}{% if ipv6_support %},{{ _wireguard_network_ipv6['clients_range'] }}{{ _wireguard_network_ipv6['clients_start'] + index|int + 1 }}/{{ _wireguard_network_ipv6['prefix'] }}{% endif %}"
|
||||
wireguard_server_ip: "{{ _wireguard_network_ipv4['gateway'] }}/{{ _wireguard_network_ipv4['prefix'] }}{% if ipv6_support %},{{ _wireguard_network_ipv6['gateway'] }}/{{ _wireguard_network_ipv6['prefix'] }}{% endif %}"
|
||||
wireguard_client_ip: >-
|
||||
{{ wireguard_network_ipv4 | ipaddr(index|int+2) }}
|
||||
{{ ',' + wireguard_network_ipv6 | ipaddr(index|int+2) if ipv6_support else '' }}
|
||||
wireguard_server_ip: >-
|
||||
{{ wireguard_network_ipv4 | ipaddr('1') }}
|
||||
{{ ',' + wireguard_network_ipv6 | ipaddr('1') if ipv6_support else '' }}
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
name: wireguard
|
||||
state: present
|
||||
|
||||
- set_fact:
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
service_name: wireguard
|
||||
tags: always
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
file:
|
||||
dest: "{{ config_prefix|default('/') }}etc/wireguard/private_{{ item }}.lock"
|
||||
state: absent
|
||||
when: keys_clean_all|bool == True
|
||||
when: keys_clean_all|bool
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- "{{ IP_subject_alt_name }}"
|
||||
|
@ -39,7 +39,10 @@
|
|||
when: wg_genkey.changed
|
||||
|
||||
- name: Generate public keys
|
||||
shell: echo "{{ lookup('file', wireguard_pki_path + '/private/' + item) }}" | wg pubkey
|
||||
shell: |
|
||||
set -o pipefail
|
||||
echo "{{ lookup('file', wireguard_pki_path + '/private/' + item) }}" |
|
||||
wg pubkey
|
||||
register: wg_pubkey
|
||||
changed_when: false
|
||||
args:
|
||||
|
|
|
@ -75,7 +75,6 @@
|
|||
notify: restart wireguard
|
||||
tags: update-users
|
||||
|
||||
|
||||
- name: WireGuard enabled and started
|
||||
service:
|
||||
name: "{{ service_name }}"
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
group: root
|
||||
mode: 0644
|
||||
|
||||
- set_fact:
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
service_name: "wg-quick@{{ wireguard_interface }}"
|
||||
tags: always
|
||||
|
|
|
@ -7,6 +7,6 @@ DNS = {{ wireguard_dns_servers }}
|
|||
|
||||
[Peer]
|
||||
PublicKey = {{ lookup('file', wireguard_pki_path + '/public/' + IP_subject_alt_name) }}
|
||||
AllowedIPs = 0.0.0.0/0, ::/0
|
||||
AllowedIPs = 0.0.0.0/0{{ ', ::/0' if ipv6_support else '' }}
|
||||
Endpoint = {{ IP_subject_alt_name }}:{{ wireguard_port }}
|
||||
{{ 'PersistentKeepalive = ' + wireguard_PersistentKeepalive|string if wireguard_PersistentKeepalive > 0 else '' }}
|
||||
|
|
|
@ -11,7 +11,6 @@ SaveConfig = false
|
|||
[Peer]
|
||||
# {{ u }}
|
||||
PublicKey = {{ lookup('file', wireguard_pki_path + '/public/' + u) }}
|
||||
AllowedIPs = {{ _wireguard_network_ipv4['clients_range'] }}.{{ _wireguard_network_ipv4['clients_start'] + index }}/32{% if ipv6_support %},{{ _wireguard_network_ipv6['clients_range'] }}{{ _wireguard_network_ipv6['clients_start'] + index }}/128{% endif %}
|
||||
|
||||
AllowedIPs = {{ wireguard_network_ipv4 | ipaddr(index|int+1) | ipv4('address') }}/32{{ ',' + wireguard_network_ipv6 | ipaddr(index|int+1) | ipv6('address') + '/128' if ipv6_support else '' }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
|
145
server.yml
145
server.yml
|
@ -2,84 +2,83 @@
|
|||
- name: Configure the server and install required software
|
||||
hosts: vpn-host
|
||||
gather_facts: false
|
||||
tags: algo
|
||||
become: true
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
roles:
|
||||
- role: common
|
||||
tags: common
|
||||
- role: dns_encryption
|
||||
when: dns_encryption
|
||||
tags: dns_encryption
|
||||
- role: dns_adblocking
|
||||
when: algo_local_dns
|
||||
tags: dns_adblocking
|
||||
- role: wireguard
|
||||
when: wireguard_enabled
|
||||
tags: wireguard
|
||||
- role: strongswan
|
||||
when: ipsec_enabled
|
||||
tags: ipsec
|
||||
- role: ssh_tunneling
|
||||
when: algo_ssh_tunneling
|
||||
tags: ssh_tunneling
|
||||
|
||||
post_tasks:
|
||||
tasks:
|
||||
- block:
|
||||
- name: Delete the CA key
|
||||
local_action:
|
||||
module: file
|
||||
path: "{{ ipsec_pki_path }}/private/cakey.pem"
|
||||
state: absent
|
||||
become: false
|
||||
when:
|
||||
- ipsec_enabled
|
||||
- not algo_store_cakey
|
||||
- import_role:
|
||||
name: common
|
||||
tags: common
|
||||
|
||||
- name: Dump the configuration
|
||||
local_action:
|
||||
module: copy
|
||||
dest: "configs/{{ IP_subject_alt_name }}/.config.yml"
|
||||
content: |
|
||||
server: {{ 'localhost' if inventory_hostname == 'localhost' else inventory_hostname }}
|
||||
server_user: {{ ansible_ssh_user }}
|
||||
{% if algo_provider != "local" %}
|
||||
ansible_ssh_private_key_file: {{ ansible_ssh_private_key_file|default(SSH_keys.private) }}
|
||||
{% endif %}
|
||||
algo_provider: {{ algo_provider }}
|
||||
algo_server_name: {{ algo_server_name }}
|
||||
algo_ondemand_cellular: {{ algo_ondemand_cellular }}
|
||||
algo_ondemand_wifi: {{ algo_ondemand_wifi }}
|
||||
algo_ondemand_wifi_exclude: {{ algo_ondemand_wifi_exclude }}
|
||||
algo_local_dns: {{ algo_local_dns }}
|
||||
algo_ssh_tunneling: {{ algo_ssh_tunneling }}
|
||||
algo_windows: {{ algo_windows }}
|
||||
algo_store_cakey: {{ algo_store_cakey }}
|
||||
IP_subject_alt_name: {{ IP_subject_alt_name }}
|
||||
ipsec_enabled: {{ ipsec_enabled }}
|
||||
wireguard_enabled: {{ wireguard_enabled }}
|
||||
{% if tests|default(false)|bool %}ca_password: {{ CA_password }}{% endif %}
|
||||
become: false
|
||||
- import_role:
|
||||
name: dns_encryption
|
||||
when: dns_encryption
|
||||
tags: dns_encryption
|
||||
|
||||
- name: Create a symlink if deploying to localhost
|
||||
file:
|
||||
src: "{{ IP_subject_alt_name }}"
|
||||
dest: configs/localhost
|
||||
state: link
|
||||
force: true
|
||||
when: inventory_hostname == 'localhost'
|
||||
- import_role:
|
||||
name: dns_adblocking
|
||||
when: algo_local_dns
|
||||
tags: dns_adblocking
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass if algo_ssh_tunneling or ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ca_key_pass if algo_store_cakey and ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ssh_access if algo_provider != 'local' else ''}}"
|
||||
tags: always
|
||||
- import_role:
|
||||
name: wireguard
|
||||
when: wireguard_enabled
|
||||
tags: wireguard
|
||||
|
||||
- import_role:
|
||||
name: strongswan
|
||||
when: ipsec_enabled
|
||||
tags: ipsec
|
||||
|
||||
- import_role:
|
||||
name: ssh_tunneling
|
||||
when: algo_ssh_tunneling
|
||||
tags: ssh_tunneling
|
||||
|
||||
- block:
|
||||
- name: Dump the configuration
|
||||
copy:
|
||||
dest: "configs/{{ IP_subject_alt_name }}/.config.yml"
|
||||
content: |
|
||||
server: {{ 'localhost' if inventory_hostname == 'localhost' else inventory_hostname }}
|
||||
server_user: {{ ansible_ssh_user }}
|
||||
{% if algo_provider != "local" %}
|
||||
ansible_ssh_private_key_file: {{ ansible_ssh_private_key_file|default(SSH_keys.private) }}
|
||||
{% endif %}
|
||||
algo_provider: {{ algo_provider }}
|
||||
algo_server_name: {{ algo_server_name }}
|
||||
algo_ondemand_cellular: {{ algo_ondemand_cellular }}
|
||||
algo_ondemand_wifi: {{ algo_ondemand_wifi }}
|
||||
algo_ondemand_wifi_exclude: {{ algo_ondemand_wifi_exclude }}
|
||||
algo_local_dns: {{ algo_local_dns }}
|
||||
algo_ssh_tunneling: {{ algo_ssh_tunneling }}
|
||||
algo_windows: {{ algo_windows }}
|
||||
algo_store_cakey: {{ algo_store_cakey }}
|
||||
IP_subject_alt_name: {{ IP_subject_alt_name }}
|
||||
ipsec_enabled: {{ ipsec_enabled }}
|
||||
wireguard_enabled: {{ wireguard_enabled }}
|
||||
{% if tests|default(false)|bool %}
|
||||
ca_password: {{ CA_password }}
|
||||
p12_password: {{ p12_export_password }}
|
||||
{% endif %}
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create a symlink if deploying to localhost
|
||||
file:
|
||||
src: "{{ IP_subject_alt_name }}"
|
||||
dest: configs/localhost
|
||||
state: link
|
||||
force: true
|
||||
when: inventory_hostname == 'localhost'
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass if algo_ssh_tunneling or ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ca_key_pass if algo_store_cakey and ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ssh_access if algo_provider != 'local' else ''}}"
|
||||
tags: always
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- include_tasks: playbooks/rescue.yml
|
||||
|
|
1
tests/algo.conf
Normal file
1
tests/algo.conf
Normal file
|
@ -0,0 +1 @@
|
|||
dhcp-host=algo,10.0.8.100
|
18
tests/cloud-init.sh
Executable file
18
tests/cloud-init.sh
Executable file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
echo "#!/bin/bash
|
||||
export METHOD=local
|
||||
export ONDEMAND_CELLULAR=true
|
||||
export ONDEMAND_WIFI=true
|
||||
export ONDEMAND_WIFI_EXCLUDE=test1,test2
|
||||
export WINDOWS=true
|
||||
export STORE_CAKEY=true
|
||||
export LOCAL_DNS=true
|
||||
export SSH_TUNNELING=true
|
||||
export ENDPOINT=10.0.8.100
|
||||
export USERS=desktop,user1,user2
|
||||
export EXTRA_VARS='install_headers=false tests=true apparmor_enabled=false local_service_ip=172.16.0.1'
|
||||
export ANSIBLE_EXTRA_ARGS='--skip-tags apparmor'
|
||||
export REPO_SLUG=${TRAVIS_PULL_REQUEST_SLUG:-${TRAVIS_REPO_SLUG:-trailofbits/algo}}
|
||||
export REPO_BRANCH=${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH:-master}}
|
||||
|
||||
curl -s https://raw.githubusercontent.com/${TRAVIS_PULL_REQUEST_SLUG:-${TRAVIS_REPO_SLUG}}/${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}/install.sh | sudo -E bash -x"
|
25
tests/ipsec-client.sh
Executable file
25
tests/ipsec-client.sh
Executable file
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
xmllint --noout ./configs/10.0.8.100/ipsec/apple/user1.mobileconfig
|
||||
|
||||
ansible-playbook deploy_client.yml \
|
||||
-e client_ip=localhost \
|
||||
-e vpn_user=desktop \
|
||||
-e server_ip=10.0.8.100 \
|
||||
-e rightsubnet='172.16.0.1/32'
|
||||
|
||||
ipsec up algovpn-10.0.8.100
|
||||
|
||||
ipsec statusall
|
||||
|
||||
ipsec statusall | grep -w ^algovpn-10.0.8.100 | grep -w ESTABLISHED
|
||||
|
||||
fping -t 900 -c3 -r3 -Dse 10.0.8.100 172.16.0.1
|
||||
|
||||
host google.com 172.16.0.1
|
||||
|
||||
echo "IPsec tests passed"
|
||||
|
||||
ipsec down algovpn-10.0.8.100
|
|
@ -2,11 +2,11 @@
|
|||
|
||||
set -ex
|
||||
|
||||
DEPLOY_ARGS="provider=local server=$LXC_IP ssh_user=ubuntu endpoint=$LXC_IP apparmor_enabled=false ondemand_cellular=true ondemand_wifi=true ondemand_wifi_exclude=test local_dns=true ssh_tunneling=true windows=true store_cakey=true install_headers=false tests=true"
|
||||
DEPLOY_ARGS="provider=local server=10.0.8.100 ssh_user=ubuntu endpoint=10.0.8.100 apparmor_enabled=false ondemand_cellular=true ondemand_wifi=true ondemand_wifi_exclude=test local_dns=true ssh_tunneling=true windows=true store_cakey=true install_headers=false tests=true local_service_ip=172.16.0.1"
|
||||
|
||||
if [ "${LXC_NAME}" == "docker" ]
|
||||
if [ "${DEPLOY}" == "docker" ]
|
||||
then
|
||||
docker run -it -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "DEPLOY_ARGS=${DEPLOY_ARGS}" travis/algo /bin/sh -c "chown -R 0:0 /root/.ssh && source env/bin/activate && ansible-playbook main.yml -e \"${DEPLOY_ARGS}\" --skip-tags apparmor"
|
||||
docker run -it -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "DEPLOY_ARGS=${DEPLOY_ARGS}" travis/algo /bin/sh -c "chown -R root: /root/.ssh && chmod -R 600 /root/.ssh && source env/bin/activate && ansible-playbook main.yml -e \"${DEPLOY_ARGS}\" --skip-tags apparmor"
|
||||
else
|
||||
ansible-playbook main.yml -e "${DEPLOY_ARGS}" --skip-tags apparmor
|
||||
fi
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
USE_LXD_BRIDGE="true"
|
||||
LXD_BRIDGE="lxdbr0"
|
||||
UPDATE_PROFILE="true"
|
||||
LXD_CONFILE=""
|
||||
LXD_CONFILE="/etc/default/algo.conf"
|
||||
LXD_DOMAIN="lxd"
|
||||
LXD_IPV4_ADDR="10.0.8.1"
|
||||
LXD_IPV4_NETMASK="255.255.255.0"
|
||||
|
@ -13,4 +13,4 @@ LXD_IPV6_ADDR=""
|
|||
LXD_IPV6_MASK=""
|
||||
LXD_IPV6_NETWORK=""
|
||||
LXD_IPV6_NAT="false"
|
||||
LXD_IPV6_PROXY="true"
|
||||
LXD_IPV6_PROXY="false"
|
||||
|
|
30
tests/pre-deploy.sh
Executable file
30
tests/pre-deploy.sh
Executable file
|
@ -0,0 +1,30 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
|
||||
tar xf $HOME/lxc/cache.tar -C / || echo "Didn't extract cache."
|
||||
cp -f tests/lxd-bridge /etc/default/lxd-bridge
|
||||
cp -f tests/algo.conf /etc/default/algo.conf
|
||||
|
||||
if [[ "$DEPLOY" == "cloud-init" ]]; then
|
||||
bash tests/cloud-init.sh | lxc profile set default user.user-data -
|
||||
else
|
||||
echo -e "#cloud-config\nssh_authorized_keys:\n - $(cat ~/.ssh/id_rsa.pub)" | lxc profile set default user.user-data -
|
||||
fi
|
||||
|
||||
systemctl restart lxd-bridge.service lxd-containers.service lxd.service
|
||||
|
||||
lxc profile set default raw.lxc lxc.aa_profile=unconfined
|
||||
lxc profile set default security.privileged true
|
||||
lxc profile show default
|
||||
lxc launch ubuntu:18.04 algo
|
||||
|
||||
ip addr
|
||||
|
||||
until dig A +short algo.lxd @10.0.8.1 | grep -vE '^$' > /dev/null; do
|
||||
sleep 3
|
||||
done
|
||||
|
||||
lxc list
|
15
tests/ssh-tunnel.sh
Executable file
15
tests/ssh-tunnel.sh
Executable file
|
@ -0,0 +1,15 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
PASS=$(grep ^p12_password: configs/10.0.8.100/.config.yml | awk '{print $2}')
|
||||
|
||||
ssh-keygen -p -P ${PASS} -N '' -f configs/10.0.8.100/ssh-tunnel/desktop.pem
|
||||
|
||||
ssh -o StrictHostKeyChecking=no -D 127.0.0.1:1080 -f -q -C -N desktop@10.0.8.100 -i configs/10.0.8.100/ssh-tunnel/desktop.pem
|
||||
|
||||
git config --global http.proxy 'socks5://127.0.0.1:1080'
|
||||
|
||||
git clone -vv https://github.com/trailofbits/algo /tmp/ssh-tunnel-check
|
||||
|
||||
echo "SSH tunneling tests passed"
|
|
@ -2,11 +2,11 @@
|
|||
|
||||
set -ex
|
||||
|
||||
USER_ARGS="{ 'server': '$LXC_IP', 'users': ['user1', 'user2'] }"
|
||||
USER_ARGS="{ 'server': '10.0.8.100', 'users': ['desktop', 'user1', 'user2'], 'local_service_ip': '172.16.0.1' }"
|
||||
|
||||
if [ "${LXC_NAME}" == "docker" ]
|
||||
if [ "${DEPLOY}" == "docker" ]
|
||||
then
|
||||
docker run -it -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "USER_ARGS=${USER_ARGS}" travis/algo /bin/sh -c "chown -R 0:0 /root/.ssh && source env/bin/activate && ansible-playbook users.yml -e \"${USER_ARGS}\" -t update-users"
|
||||
docker run -it -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "USER_ARGS=${USER_ARGS}" travis/algo /bin/sh -c "chown -R root: /root/.ssh && chmod -R 600 /root/.ssh && source env/bin/activate && ansible-playbook users.yml -e \"${USER_ARGS}\" -t update-users"
|
||||
else
|
||||
ansible-playbook users.yml -e "${USER_ARGS}" -t update-users
|
||||
fi
|
||||
|
@ -15,7 +15,7 @@ fi
|
|||
# IPsec
|
||||
#
|
||||
|
||||
if sudo openssl crl -inform pem -noout -text -in configs/$LXC_IP/ipsec/.pki/crl/phone.crt | grep CRL
|
||||
if sudo openssl crl -inform pem -noout -text -in configs/10.0.8.100/ipsec/.pki/crl/phone.crt | grep CRL
|
||||
then
|
||||
echo "The CRL check passed"
|
||||
else
|
||||
|
@ -23,7 +23,7 @@ if sudo openssl crl -inform pem -noout -text -in configs/$LXC_IP/ipsec/.pki/crl/
|
|||
exit 1
|
||||
fi
|
||||
|
||||
if sudo openssl x509 -inform pem -noout -text -in configs/$LXC_IP/ipsec/.pki/certs/user1.crt | grep CN=user1
|
||||
if sudo openssl x509 -inform pem -noout -text -in configs/10.0.8.100/ipsec/.pki/certs/user1.crt | grep CN=user1
|
||||
then
|
||||
echo "The new user exists"
|
||||
else
|
||||
|
@ -35,7 +35,7 @@ fi
|
|||
# WireGuard
|
||||
#
|
||||
|
||||
if sudo test -f configs/$LXC_IP/wireguard/user1.conf
|
||||
if sudo test -f configs/10.0.8.100/wireguard/user1.conf
|
||||
then
|
||||
echo "WireGuard: The new user exists"
|
||||
else
|
||||
|
@ -47,7 +47,7 @@ fi
|
|||
# SSH tunneling
|
||||
#
|
||||
|
||||
if sudo test -f configs/$LXC_IP/ssh-tunnel/user1.ssh_config
|
||||
if sudo test -f configs/10.0.8.100/ssh-tunnel/user1.ssh_config
|
||||
then
|
||||
echo "SSH Tunneling: The new user exists"
|
||||
else
|
||||
|
|
23
tests/wireguard-client.sh
Executable file
23
tests/wireguard-client.sh
Executable file
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
crudini --set configs/10.0.8.100/wireguard/user1.conf Interface Table off
|
||||
|
||||
wg-quick up configs/10.0.8.100/wireguard/user1.conf
|
||||
|
||||
wg
|
||||
|
||||
ifconfig user1
|
||||
|
||||
ip route add 172.16.0.1/32 dev user1
|
||||
|
||||
fping -t 900 -c3 -r3 -Dse 10.0.8.100 172.16.0.1
|
||||
|
||||
wg | grep "latest handshake"
|
||||
|
||||
host google.com 172.16.0.1
|
||||
|
||||
echo "WireGuard tests passed"
|
||||
|
||||
wg-quick down configs/10.0.8.100/wireguard/user1.conf
|
70
users.yml
70
users.yml
|
@ -7,7 +7,8 @@
|
|||
|
||||
tasks:
|
||||
- block:
|
||||
- pause:
|
||||
- name: Server address prompt
|
||||
pause:
|
||||
prompt: "Enter the IP address of your server: (or use localhost for local installation)"
|
||||
register: _server
|
||||
when: server is undefined
|
||||
|
@ -16,14 +17,15 @@
|
|||
set_fact:
|
||||
algo_server: >-
|
||||
{% if server is defined %}{{ server }}
|
||||
{%- elif _server.user_input is defined and _server.user_input != "" %}{{ _server.user_input }}
|
||||
{%- elif _server.user_input %}{{ _server.user_input }}
|
||||
{%- else %}omit{% endif %}
|
||||
|
||||
- name: Import host specific variables
|
||||
include_vars:
|
||||
file: "configs/{{ algo_server }}/.config.yml"
|
||||
|
||||
- pause:
|
||||
- name: CA password prompt
|
||||
pause:
|
||||
prompt: Enter the password for the private CA key
|
||||
echo: false
|
||||
register: _ca_password
|
||||
|
@ -35,9 +37,13 @@
|
|||
set_fact:
|
||||
CA_password: >-
|
||||
{% if ca_password is defined %}{{ ca_password }}
|
||||
{%- elif _ca_password.user_input is defined and _ca_password.user_input != "" %}{{ _ca_password.user_input }}
|
||||
{%- elif _ca_password.user_input %}{{ _ca_password.user_input }}
|
||||
{%- else %}omit{% endif %}
|
||||
|
||||
- name: Local pre-tasks
|
||||
import_tasks: playbooks/cloud-pre.yml
|
||||
become: false
|
||||
|
||||
- name: Add the server to the vpn-host group
|
||||
add_host:
|
||||
name: "{{ algo_server }}"
|
||||
|
@ -47,10 +53,7 @@
|
|||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
CA_password: "{{ CA_password }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- include_tasks: playbooks/rescue.yml
|
||||
|
||||
- name: User management
|
||||
hosts: vpn-host
|
||||
|
@ -60,37 +63,28 @@
|
|||
- config.cfg
|
||||
- "configs/{{ inventory_hostname }}/.config.yml"
|
||||
|
||||
pre_tasks:
|
||||
tasks:
|
||||
- block:
|
||||
- name: Local pre-tasks
|
||||
import_tasks: playbooks/cloud-pre.yml
|
||||
become: false
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- import_role:
|
||||
name: common
|
||||
|
||||
roles:
|
||||
- role: common
|
||||
- role: wireguard
|
||||
tags: [ 'vpn', 'wireguard' ]
|
||||
when: wireguard_enabled
|
||||
- role: strongswan
|
||||
when: ipsec_enabled
|
||||
tags: ipsec
|
||||
- role: ssh_tunneling
|
||||
when: algo_ssh_tunneling
|
||||
- import_role:
|
||||
name: wireguard
|
||||
when: wireguard_enabled
|
||||
|
||||
post_tasks:
|
||||
- block:
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {% if p12.changed %}{{ congrats.p12_pass }}{% endif %}"
|
||||
tags: always
|
||||
- import_role:
|
||||
name: strongswan
|
||||
when: ipsec_enabled
|
||||
tags: ipsec
|
||||
|
||||
- import_role:
|
||||
name: ssh_tunneling
|
||||
when: algo_ssh_tunneling
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {% if p12.changed %}{{ congrats.p12_pass }}{% endif %}"
|
||||
tags: always
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- include_tasks: playbooks/rescue.yml
|
||||
|
|
Loading…
Add table
Reference in a new issue