Compare commits

..

No commits in common. "master" and "v1.1" have entirely different histories.
master ... v1.1

174 changed files with 3220 additions and 4640 deletions

View file

@ -1,10 +1,3 @@
skip_list: skip_list:
- yaml
- '204' - '204'
verbosity: 1 verbosity: 1
warn_list:
- no-changed-when
- no-handler
- fqcn-builtins
- var-spacing

View file

@ -9,10 +9,5 @@ README.md
config.cfg config.cfg
configs configs
docs docs
.env
logo.png logo.png
tests tests
CHANGELOG.md
PULL_REQUEST_TEMPLATE.md
Vagrantfile
Makefile

View file

@ -1,13 +0,0 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
# Maintain dependencies for Python
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "daily"

View file

@ -1,44 +0,0 @@
name: Create and publish a Docker image
on:
push:
branches: ['master']
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-push-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
# set latest tag for master branch
type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }}
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View file

@ -1,153 +0,0 @@
name: Main
on: [push, pull_request]
jobs:
lint:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v2.3.2
with:
python-version: '3.11'
cache: 'pip'
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
sudo apt update -y
python -m pip install --upgrade pip
pip install -r requirements.txt
sudo snap install shellcheck
pip install ansible-lint
- name: Checks and linters
run: |
/snap/bin/shellcheck algo install.sh
ansible-playbook main.yml --syntax-check
ansible-lint -x experimental,package-latest,unnamed-task -v *.yml roles/{local,cloud-*}/*/*.yml || true
scripted-deploy:
runs-on: ubuntu-20.04
strategy:
matrix:
UBUNTU_VERSION: ["22.04"]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v2.3.2
with:
python-version: '3.11'
cache: 'pip'
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
sudo apt update -y
sudo apt install -y \
wireguard \
libxml2-utils \
crudini \
fping \
strongswan \
libstrongswan-standard-plugins \
openresolv
python3 -m pip install --upgrade pip
python3 -m pip install -r requirements.txt
sudo snap refresh lxd
sudo lxd init --auto
- name: Provision
env:
DEPLOY: cloud-init
UBUNTU_VERSION: ${{ matrix.UBUNTU_VERSION }}
REPOSITORY: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name || github.repository }}
BRANCH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref || github.ref }}
run: |
ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
# sed -i "s/^reduce_mtu:\s0$/reduce_mtu: 80/" config.cfg
sudo -E ./tests/pre-deploy.sh
- name: Deployment
run: |
set -x
until sudo lxc exec algo -- test -f /var/log/cloud-init-output.log; do echo 'Log file not found, Sleep for 3 seconds'; sleep 3; done
( sudo lxc exec algo -- tail -f /var/log/cloud-init-output.log & )
until sudo lxc exec algo -- test -f /var/lib/cloud/data/result.json; do
echo 'Cloud init is not finished. Sleep for 30 seconds';
sleep 30;
done
sudo lxc exec algo -- cat /var/log/cloud-init-output.log
sudo lxc exec algo -- test -f /opt/algo/configs/localhost/.config.yml
sudo lxc exec algo -- tar zcf /root/algo-configs.tar -C /opt/algo/configs/ .
sudo lxc file pull algo/root/algo-configs.tar ./
sudo tar -C ./configs -zxf algo-configs.tar
- name: Tests
run: |
set -x
sudo -E bash -x ./tests/wireguard-client.sh
sudo env "PATH=$PATH" ./tests/ipsec-client.sh
docker-deploy:
runs-on: ubuntu-20.04
strategy:
matrix:
UBUNTU_VERSION: ["22.04"]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v2.3.2
with:
python-version: '3.11'
cache: 'pip'
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
set -x
sudo apt update -y
sudo apt install -y \
wireguard \
libxml2-utils \
crudini \
fping \
strongswan \
libstrongswan-standard-plugins \
openresolv
python3 -m pip install --upgrade pip
python3 -m pip install -r requirements.txt
sudo snap refresh lxd
sudo lxd init --auto
- name: Provision
env:
DEPLOY: docker
UBUNTU_VERSION: ${{ matrix.UBUNTU_VERSION }}
REPOSITORY: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name || github.repository }}
BRANCH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref || github.ref }}
run: |
ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
sed -i "s/^reduce_mtu:\s0$/reduce_mtu: 80/" config.cfg
sudo -E ./tests/pre-deploy.sh
- name: Deployment
env:
DEPLOY: docker
UBUNTU_VERSION: ${{ matrix.UBUNTU_VERSION }}
run: |
docker build -t local/algo .
./tests/local-deploy.sh
./tests/update-users.sh
- name: Tests
run: |
set -x
sudo bash -x ./tests/wireguard-client.sh
sudo env "PATH=$PATH" bash -x ./tests/ipsec-client.sh
sudo bash -x ./tests/ssh-tunnel.sh

3
.gitignore vendored
View file

@ -3,8 +3,7 @@
configs/* configs/*
inventory_users inventory_users
*.kate-swp *.kate-swp
*env env
.DS_Store .DS_Store
venvs/* venvs/*
!venvs/.gitinit !venvs/.gitinit
.vagrant

123
.travis.yml Normal file
View file

@ -0,0 +1,123 @@
---
language: python
python: "2.7"
dist: xenial
services:
- docker
addons:
apt:
sources: &default_sources
- sourceline: 'ppa:ubuntu-lxc/stable'
- sourceline: 'ppa:wireguard/wireguard'
packages: &default_packages
- python-pip
- lxd
- expect-dev
- debootstrap
- tree
- bridge-utils
- dnsutils
- build-essential
- libssl-dev
- libffi-dev
- python-dev
- linux-headers-$(uname -r)
- wireguard
- libxml2-utils
- crudini
- fping
- strongswan
- libstrongswan-standard-plugins
cache:
directories:
- $HOME/lxc/
pip: true
before_cache:
- mkdir $HOME/lxc
- sudo tar cf $HOME/lxc/cache.tar /var/lib/lxd/images/
- sudo chown $USER. $HOME/lxc/cache.tar
custom_scripts:
provisioning: &provisioning
- ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
- sudo ./tests/pre-deploy.sh
- 'sed -i "s/^reduce_mtu:\s0$/reduce_mtu: 20/" config.cfg'
tests: &tests
- sudo ./tests/wireguard-client.sh
- sudo env "PATH=$PATH" ./tests/ipsec-client.sh
- sudo ./tests/ssh-tunnel.sh
stages:
- &tests-and-linters
stage: Tests
name: code checks and linters
addons:
apt:
packages:
- shellcheck
script:
- pip install ansible-lint
- shellcheck algo install.sh
- ansible-playbook main.yml --syntax-check
- ansible-lint -v *.yml
- &deploy-local
stage: Deploy
name: local deployment from docker
addons:
apt:
sources: *default_sources
packages: *default_packages
before_install: *provisioning
before_script:
- docker build -t travis/algo .
- ./tests/local-deploy.sh
- ./tests/update-users.sh
script: *tests
- &deploy-cloudinit
stage: Deploy
name: cloud-init deployment
addons:
apt:
sources: *default_sources
packages: *default_packages
env: DEPLOY=cloud-init
before_install: *provisioning
before_script:
- until sudo lxc exec algo -- test -f /var/log/cloud-init-output.log; do echo 'Log file not found, Sleep for 3 seconds'; sleep 3; done
- ( sudo lxc exec algo -- tail -f /var/log/cloud-init-output.log & )
- |
until sudo lxc exec algo -- test -f /var/lib/cloud/data/result.json; do
echo 'Cloud init is not finished. Sleep for 30 seconds';
sleep 30;
done
- sudo lxc exec algo -- test -f /opt/algo/configs/localhost/.config.yml
- sudo lxc exec algo -- tar zcf /root/algo-configs.tar -C /opt/algo/configs/ .
- sudo lxc file pull algo/root/algo-configs.tar ./
- sudo tar -C ./configs -zxf algo-configs.tar
script: *tests
matrix:
fast_finish: true
include:
- <<: *tests-and-linters
- <<: *deploy-local
name: 'Ubuntu 18.04: local deployment from docker'
env: DEPLOY=docker UBUNTU_VERSION=18.04
- <<: *deploy-local
name: 'Ubuntu 19.04: local deployment from docker'
env: DEPLOY=docker UBUNTU_VERSION=19.04
- <<: *deploy-cloudinit
name: 'Ubuntu 18.04: cloud-init deployment'
env: DEPLOY=cloud-init UBUNTU_VERSION=18.04
- <<: *deploy-cloudinit
name: 'Ubuntu 19.04: cloud-init deployment'
env: DEPLOY=cloud-init UBUNTU_VERSION=19.04
notifications:
email: false

View file

@ -1,34 +1,7 @@
## 1.2 [(Unreleased)](https://github.com/trailofbits/algo/tree/HEAD) ## 1.2 [(Unreleased)](https://github.com/trailofbits/algo/tree/HEAD)
### Added
- New provider CloudStack added [\#1420](https://github.com/trailofbits/algo/pull/1420)
- Support for Ubuntu 20.04 [\#1782](https://github.com/trailofbits/algo/pull/1782)
- Allow WireGuard to listen on port 53 [\#1594](https://github.com/trailofbits/algo/pull/1594)
- Introducing Makefile [\#1553](https://github.com/trailofbits/algo/pull/1553)
- Option to unblock SMB and Netbios [\#1558](https://github.com/trailofbits/algo/pull/1558)
- Allow OnDemand to be toggled later [\#1557](https://github.com/trailofbits/algo/pull/1557)
- New provider Hetzner added [\#1549](https://github.com/trailofbits/algo/pull/1549)
- Alternative Ingress IP [\#1605](https://github.com/trailofbits/algo/pull/1605)
### Fixes ## 1.1 [(Jul 31, 2019)](https://github.com/trailofbits/algo/tree/v1.1)
- WSL private SSH key permissions [\#1584](https://github.com/trailofbits/algo/pull/1584)
- Scaleway instance creating issue [\#1549](https://github.com/trailofbits/algo/pull/1549)
### Changed
- Discontinue use of the WireGuard PPA [\#1855](https://github.com/trailofbits/algo/pull/1855)
- SSH changes [\#1636](https://github.com/trailofbits/algo/pull/1636)
- Default port is set to `4160` and can be changed in the config
- SSH user for every cloud provider is `algo`
- EC2: enable EBS encryption by default [\#1556](https://github.com/trailofbits/algo/pull/1556)
- Upgrades [\#1549](https://github.com/trailofbits/algo/pull/1549)
- Python 3
- Ansible 2.9 [\#1777](https://github.com/trailofbits/algo/pull/1777)
### Breaking changes
- Python virtual environment moved to .env [\#1549](https://github.com/trailofbits/algo/pull/1549)
## 1.1 [(Jul 31, 2019)](https://github.com/trailofbits/algo/releases/tag/v1.1)
### Removed ### Removed
- IKEv2 for Windows is now deleted, use Wireguard [\#1493](https://github.com/trailofbits/algo/issues/1493) - IKEv2 for Windows is now deleted, use Wireguard [\#1493](https://github.com/trailofbits/algo/issues/1493)
@ -63,7 +36,7 @@
- Simplify Apple Profile Configuration Template [\#1033](https://github.com/trailofbits/algo/pull/1033) ([faf0](https://github.com/faf0)) - Simplify Apple Profile Configuration Template [\#1033](https://github.com/trailofbits/algo/pull/1033) ([faf0](https://github.com/faf0))
- Include roles as separate tasks [\#1365](https://github.com/trailofbits/algo/pull/1365) ([jackivanov](https://github.com/jackivanov)) - Include roles as separate tasks [\#1365](https://github.com/trailofbits/algo/pull/1365) ([jackivanov](https://github.com/jackivanov))
## 1.0 [(Mar 19, 2019)](https://github.com/trailofbits/algo/releases/tag/v1.0) ## 1.0 [(Mar 19, 2019)](https://github.com/trailofbits/algo/tree/v1.0)
### Added ### Added
- Tagged releases and changelog [\#724](https://github.com/trailofbits/algo/issues/724) - Tagged releases and changelog [\#724](https://github.com/trailofbits/algo/issues/724)

View file

@ -1 +0,0 @@
* @jackivanov

View file

@ -1,7 +1,8 @@
FROM python:3.11-alpine FROM python:2-alpine
ARG VERSION="git" ARG VERSION="git"
ARG PACKAGES="bash libffi openssh-client openssl rsync tini gcc libffi-dev linux-headers make musl-dev openssl-dev rust cargo" ARG PACKAGES="bash libffi openssh-client openssl rsync tini"
ARG BUILD_PACKAGES="gcc libffi-dev linux-headers make musl-dev openssl-dev"
LABEL name="algo" \ LABEL name="algo" \
version="${VERSION}" \ version="${VERSION}" \
@ -14,11 +15,13 @@ RUN mkdir -p /algo && mkdir -p /algo/configs
WORKDIR /algo WORKDIR /algo
COPY requirements.txt . COPY requirements.txt .
RUN python3 -m pip --no-cache-dir install -U pip && \ RUN apk --no-cache add ${BUILD_PACKAGES} && \
python3 -m pip --no-cache-dir install virtualenv && \ python -m pip --no-cache-dir install -U pip && \
python3 -m virtualenv .env && \ python -m pip --no-cache-dir install virtualenv && \
source .env/bin/activate && \ python -m virtualenv env && \
python3 -m pip --no-cache-dir install -r requirements.txt source env/bin/activate && \
python -m pip --no-cache-dir install -r requirements.txt && \
apk del ${BUILD_PACKAGES}
COPY . . COPY . .
RUN chmod 0755 /algo/algo-docker.sh RUN chmod 0755 /algo/algo-docker.sh

View file

@ -1,39 +0,0 @@
## docker-build: Build and tag a docker image
.PHONY: docker-build
IMAGE := trailofbits/algo
TAG := latest
DOCKERFILE := Dockerfile
CONFIGURATIONS := $(shell pwd)
docker-build:
docker build \
-t $(IMAGE):$(TAG) \
-f $(DOCKERFILE) \
.
## docker-deploy: Mount config directory and deploy Algo
.PHONY: docker-deploy
# '--rm' flag removes the container when finished.
docker-deploy:
docker run \
--cap-drop=all \
--rm \
-it \
-v $(CONFIGURATIONS):/data \
$(IMAGE):$(TAG)
## docker-clean: Remove images and containers.
.PHONY: docker-prune
docker-prune:
docker images \
$(IMAGE) |\
awk '{if (NR>1) print $$3}' |\
xargs docker rmi
## docker-all: Build, Deploy, Prune
.PHONY: docker-all
docker-all: docker-build docker-deploy docker-prune

View file

@ -14,9 +14,9 @@
## Types of changes ## Types of changes
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: --> <!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
- Bug fix (non-breaking change which fixes an issue) - [] Bug fix (non-breaking change which fixes an issue)
- New feature (non-breaking change which adds functionality) - [] New feature (non-breaking change which adds functionality)
- Breaking change (fix or feature that would cause existing functionality to not work as expected) - [] Breaking change (fix or feature that would cause existing functionality to not work as expected)
## Checklist: ## Checklist:
<!--- Go over all the following points, and put an `x` in all the boxes that apply. --> <!--- Go over all the following points, and put an `x` in all the boxes that apply. -->

184
README.md
View file

@ -1,88 +1,75 @@
# Algo VPN # Algo VPN
[![Join the chat at https://gitter.im/trailofbits/algo](https://badges.gitter.im/trailofbits/algo.svg)](https://gitter.im/trailofbits/algo?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/fold_left.svg?style=social&label=Follow%20%40AlgoVPN)](https://twitter.com/AlgoVPN) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/fold_left.svg?style=social&label=Follow%20%40AlgoVPN)](https://twitter.com/AlgoVPN)
[![](https://github.com/trailofbits/algo/workflows/Main/badge.svg?branch=master)](https://github.com/trailofbits/algo/actions) [![TravisCI Status](https://api.travis-ci.org/trailofbits/algo.svg?branch=master)](https://travis-ci.org/trailofbits/algo)
Algo VPN is a set of Ansible scripts that simplify the setup of a personal WireGuard and IPsec VPN. It uses the most secure defaults available and works with common cloud providers. See our [release announcement](https://blog.trailofbits.com/2016/12/12/meet-algo-the-vpn-that-works/) for more information. Algo VPN is a set of Ansible scripts that simplify the setup of a personal IPSEC and Wireguard VPN. It uses the most secure defaults available, works with common cloud providers, and does not require client software on most devices. See our [release announcement](https://blog.trailofbits.com/2016/12/12/meet-algo-the-vpn-that-works/) for more information.
## Features ## Features
* Supports only IKEv2 with strong crypto (AES-GCM, SHA2, and P-256) for iOS, macOS, and Linux * Supports only IKEv2 with strong crypto (AES-GCM, SHA2, and P-256) and [WireGuard](https://www.wireguard.com/)
* Supports [WireGuard](https://www.wireguard.com/) for all of the above, in addition to Android and Windows 10 * Generates Apple profiles to auto-configure iOS and macOS devices
* Generates .conf files and QR codes for iOS, macOS, Android, and Windows WireGuard clients
* Generates Apple profiles to auto-configure iOS and macOS devices for IPsec - no client software required
* Includes a helper script to add and remove users * Includes a helper script to add and remove users
* Blocks ads with a local DNS resolver (optional) * Blocks ads with a local DNS resolver (optional)
* Sets up limited SSH users for tunneling traffic (optional) * Sets up limited SSH users for tunneling traffic (optional)
* Based on current versions of Ubuntu and strongSwan * Based on current versions of Ubuntu and strongSwan
* Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Vultr, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack, CloudStack, Hetzner Cloud, Linode, or [your own Ubuntu server (for more advanced users)](docs/deploy-to-ubuntu.md) * Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Vultr, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack, or [your own Ubuntu server](docs/deploy-to-ubuntu.md)
## Anti-features ## Anti-features
* Does not support legacy cipher suites or protocols like L2TP, IKEv1, or RSA * Does not support legacy cipher suites or protocols like L2TP, IKEv1, or RSA
* Does not install Tor, OpenVPN, or other risky servers * Does not install Tor, OpenVPN, or other risky servers
* Does not depend on the security of [TLS](https://tools.ietf.org/html/rfc7457) * Does not depend on the security of [TLS](https://tools.ietf.org/html/rfc7457)
* Does not require client software on most platforms
* Does not claim to provide anonymity or censorship avoidance * Does not claim to provide anonymity or censorship avoidance
* Does not claim to protect you from the [FSB](https://en.wikipedia.org/wiki/Federal_Security_Service), [MSS](https://en.wikipedia.org/wiki/Ministry_of_State_Security_(China)), [DGSE](https://en.wikipedia.org/wiki/Directorate-General_for_External_Security), or [FSM](https://en.wikipedia.org/wiki/Flying_Spaghetti_Monster) * Does not claim to protect you from the [FSB](https://en.wikipedia.org/wiki/Federal_Security_Service), [MSS](https://en.wikipedia.org/wiki/Ministry_of_State_Security_(China)), [DGSE](https://en.wikipedia.org/wiki/Directorate-General_for_External_Security), or [FSM](https://en.wikipedia.org/wiki/Flying_Spaghetti_Monster)
## Deploy the Algo Server ## Deploy the Algo Server
The easiest way to get an Algo server running is to run it on your local system or from [Google Cloud Shell](docs/deploy-from-cloudshell.md) and let it set up a _new_ virtual machine in the cloud for you. The easiest way to get an Algo server running is to let it set up a _new_ virtual machine in the cloud for you.
1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon Lightsail](https://aws.amazon.com/lightsail/), [Amazon EC2](https://aws.amazon.com/), [Vultr](https://www.vultr.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/), [DreamCompute](https://www.dreamhost.com/cloud/computing/), [Linode](https://www.linode.com), or other OpenStack-based cloud hosting, [Exoscale](https://www.exoscale.com) or other CloudStack-based cloud hosting, or [Hetzner Cloud](https://www.hetzner.com/). 1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon Lightsail](https://aws.amazon.com/lightsail/), [Amazon EC2](https://aws.amazon.com/), [Vultr](https://www.vultr.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/), and [DreamCompute](https://www.dreamhost.com/cloud/computing/) or other OpenStack-based cloud hosting.
2. **Get a copy of Algo.** The Algo scripts will be installed on your local system. There are two ways to get a copy: 2. **[Download Algo](https://github.com/trailofbits/algo/archive/master.zip).** Unzip it in a convenient location on your local machine.
- Download the [ZIP file](https://github.com/trailofbits/algo/archive/master.zip). Unzip the file to create a directory named `algo-master` containing the Algo scripts. 3. **Install Algo's core dependencies.** Open the Terminal. The `python` interpreter you use to deploy Algo must be python2. If you don't know what this means, you're probably fine. `cd` into the `algo-master` directory where you unzipped Algo, then run:
- Use `git clone` to create a directory named `algo` containing the Algo scripts: - macOS:
```bash ```bash
git clone https://github.com/trailofbits/algo.git $ python -m ensurepip --user
``` $ python -m pip install --user --upgrade virtualenv
```
- Linux (deb-based):
```bash
$ sudo apt-get update && sudo apt-get install \
build-essential \
libssl-dev \
libffi-dev \
python-dev \
python-pip \
python-setuptools \
python-virtualenv -y
```
- Linux (rpm-based): See the pre-installation documentation for [RedHat/CentOS 6.x](docs/deploy-from-redhat-centos6.md) or [Fedora](docs/deploy-from-fedora-workstation.md)
- Windows: See the [Windows documentation](docs/deploy-from-windows.md)
3. **Install Algo's core dependencies.** Algo requires that **Python 3.10 or later** and at least one supporting package are installed on your system. 4. **Install Algo's remaining dependencies.** Use the same Terminal window as the previous step and run:
- **macOS:** Catalina (10.15) and higher includes Python 3 as part of the optional Command Line Developer Tools package. From Terminal run:
```bash
python3 -m pip install --user --upgrade virtualenv
```
If prompted, install the Command Line Developer Tools and re-run the above command.
For macOS versions prior to Catalina, see [Deploy from macOS](docs/deploy-from-macos.md) for information on installing Python 3 .
- **Linux:** Recent releases of Ubuntu, Debian, and Fedora come with Python 3 already installed. If your Python version is not 3.10, then you will need to use pyenv to install Python 3.10. Make sure your system is up-to-date and install the supporting package(s):
* Ubuntu and Debian:
```bash
sudo apt install -y --no-install-recommends python3-virtualenv file lookup
```
On a Raspberry Pi running Ubuntu also install `libffi-dev` and `libssl-dev`.
* Fedora:
```bash
sudo dnf install -y python3-virtualenv
```
- **Windows:** Use the Windows Subsystem for Linux (WSL) to create your own copy of Ubuntu running under Windows from which to install and run Algo. See the [Windows documentation](docs/deploy-from-windows.md) for more information.
4. **Install Algo's remaining dependencies.** You'll need to run these commands from the Algo directory each time you download a new copy of Algo. In a Terminal window `cd` into the `algo-master` (ZIP file) or `algo` (`git clone`) directory and run:
```bash ```bash
python3 -m virtualenv --python="$(command -v python3)" .env && $ python -m virtualenv --python=`which python2` env &&
source .env/bin/activate && source env/bin/activate &&
python3 -m pip install -U pip virtualenv && python -m pip install -U pip virtualenv &&
python3 -m pip install -r requirements.txt python -m pip install -r requirements.txt
``` ```
On Fedora first run `export TMPDIR=/var/tmp`, then add the option `--system-site-packages` to the first command above (after `python3 -m virtualenv`). On macOS install the C compiler if prompted. On macOS, you may be prompted to install `cc`. You should press accept if so.
5. **Set your configuration options.** Open the file `config.cfg` in your favorite text editor. Specify the users you wish to create in the `users` list. Create a unique user for each device you plan to connect to your VPN. 5. **List the users to create.** Open `config.cfg` in your favorite text editor. Specify the users you wish to create in the `users` list. If you want to be able to add or delete users later, you **must** select `yes` for the `Do you want to retain the CA key?` prompt during the deployment. Make a unique user for each device you plan to setup.
> Note: [IKEv2 Only] If you want to add or delete users later, you **must** select `yes` at the `Do you want to retain the keys (PKI)?` prompt during the server deployment. You should also review the other options before deployment, as changing your mind about them later [may require you to deploy a brand new server](https://github.com/trailofbits/algo/blob/master/docs/faq.md#i-deployed-an-algo-server-can-you-update-it-with-new-features).
6. **Start the deployment.** Return to your terminal. In the Algo directory, run `./algo` and follow the instructions. There are several optional features available, none of which are required for a fully functional VPN server. These optional features are described in greater detail in [here](docs/deploy-from-ansible.md). 6. **Start the deployment.** Return to your terminal. In the Algo directory, run `./algo` and follow the instructions. There are several optional features available. None are required for a fully functional VPN server. These optional features are described in greater detail in [deploy-from-ansible.md](docs/deploy-from-ansible.md).
That's it! You will get the message below when the server deployment process completes. Take note of the p12 (user certificate) password and the CA key in case you need them later, **they will only be displayed this time**. That's it! You will get the message below when the server deployment process completes. You now have an Algo server on the internet. Take note of the p12 (user certificate) password and the CA key in case you need them later, **they will only be displayed this time**.
You can now set up clients to connect to your VPN. Proceed to [Configure the VPN Clients](#configure-the-vpn-clients) below. You can now setup clients to connect it, e.g. your iPhone or laptop. Proceed to [Configure the VPN Clients](#configure-the-vpn-clients) below.
``` ```
"# Congratulations! #" "# Congratulations! #"
@ -93,7 +80,7 @@ You can now set up clients to connect to your VPN. Proceed to [Configure the VPN
"# Local DNS resolver 172.16.0.1 #" "# Local DNS resolver 172.16.0.1 #"
"# The p12 and SSH keys password for new users is XXXXXXXX #" "# The p12 and SSH keys password for new users is XXXXXXXX #"
"# The CA key password is XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #" "# The CA key password is XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #"
"# Shell access: ssh -F configs/<server_ip>/ssh_config <hostname> #" "# Shell access: ssh -i configs/algo.pem root@xxx.xxx.xx.xx #"
``` ```
## Configure the VPN Clients ## Configure the VPN Clients
@ -124,17 +111,36 @@ WireGuard is used to provide VPN services on Windows. Algo generates a WireGuard
Install the [WireGuard VPN Client](https://www.wireguard.com/install/#windows-7-8-81-10-2012-2016-2019). Import the generated `wireguard/<username>.conf` file to your device, then setup a new connection with it. Install the [WireGuard VPN Client](https://www.wireguard.com/install/#windows-7-8-81-10-2012-2016-2019). Import the generated `wireguard/<username>.conf` file to your device, then setup a new connection with it.
### Linux WireGuard Clients ### Linux Network Manager Clients (e.g., Ubuntu, Debian, or Fedora Desktop)
WireGuard works great with Linux clients. See [this page](docs/client-linux-wireguard.md) for an example of how to configure WireGuard on Ubuntu. Network Manager does not support AES-GCM. In order to support Linux Desktop clients, choose the "compatible" cryptography during the deploy process and use at least Network Manager 1.4.1. See [Issue #263](https://github.com/trailofbits/algo/issues/263) for more information.
### Linux strongSwan IPsec Clients (e.g., OpenWRT, Ubuntu Server, etc.) ### Linux strongSwan Clients (e.g., OpenWRT, Ubuntu Server, etc.)
Please see [this page](docs/client-linux-ipsec.md). Install strongSwan, then copy the included ipsec_user.conf, ipsec_user.secrets, user.crt (user certificate), and user.key (private key) files to your client device. These will require customization based on your exact use case. These files were originally generated with a point-to-point OpenWRT-based VPN in mind.
### OpenWrt Wireguard Clients #### Ubuntu Server example
Please see [this page](docs/client-openwrt-router-wireguard.md). 1. `sudo apt-get install strongswan libstrongswan-standard-plugins`: install strongSwan
2. `/etc/ipsec.d/certs`: copy `<name>.crt` from `algo-master/configs/<server_ip>/ipsec/manual/<name>.crt`
3. `/etc/ipsec.d/private`: copy `<name>.key` from `algo-master/configs/<server_ip>/ipsec/manual/<name>.key`
4. `/etc/ipsec.d/cacerts`: copy `cacert.pem` from `algo-master/configs/<server_ip>/ipsec/manual/cacert.pem`
5. `/etc/ipsec.secrets`: add your `user.key` to the list, e.g. `<server_ip> : ECDSA <name>.key`
6. `/etc/ipsec.conf`: add the connection from `ipsec_user.conf` and ensure `leftcert` matches the `<name>.crt` filename
7. `sudo ipsec restart`: pick up config changes
8. `sudo ipsec up <conn-name>`: start the ipsec tunnel
9. `sudo ipsec down <conn-name>`: shutdown the ipsec tunnel
One common use case is to let your server access your local LAN without going through the VPN. Set up a passthrough connection by adding the following to `/etc/ipsec.conf`:
conn lan-passthrough
leftsubnet=192.168.1.1/24 # Replace with your LAN subnet
rightsubnet=192.168.1.1/24 # Replace with your LAN subnet
authby=never # No authentication necessary
type=pass # passthrough
auto=route # no need to ipsec up lan-passthrough
To configure the connection to come up at boot time replace `auto=add` with `auto=start`.
### Other Devices ### Other Devices
@ -152,80 +158,36 @@ Depending on the platform, you may need one or multiple of the following files.
If you turned on the optional SSH tunneling role, then local user accounts will be created for each user in `config.cfg` and SSH authorized_key files for them will be in the `configs` directory (user.ssh.pem). SSH user accounts do not have shell access, cannot authenticate with a password, and only have limited tunneling options (e.g., `ssh -N` is required). This ensures that SSH users have the least access required to setup a tunnel and can perform no other actions on the Algo server. If you turned on the optional SSH tunneling role, then local user accounts will be created for each user in `config.cfg` and SSH authorized_key files for them will be in the `configs` directory (user.ssh.pem). SSH user accounts do not have shell access, cannot authenticate with a password, and only have limited tunneling options (e.g., `ssh -N` is required). This ensures that SSH users have the least access required to setup a tunnel and can perform no other actions on the Algo server.
Use the example command below to start an SSH tunnel by replacing `<user>` and `<ip>` with your own. Once the tunnel is setup, you can configure a browser or other application to use 127.0.0.1:1080 as a SOCKS proxy to route traffic through the Algo server: Use the example command below to start an SSH tunnel by replacing `user` and `ip` with your own. Once the tunnel is setup, you can configure a browser or other application to use 127.0.0.1:1080 as a SOCKS proxy to route traffic through the Algo server.
```bash `ssh -D 127.0.0.1:1080 -f -q -C -N user@ip -i configs/<server_ip>/ssh-tunnel/<user>.pem`
ssh -D 127.0.0.1:1080 -f -q -C -N <user>@algo -i configs/<ip>/ssh-tunnel/<user>.pem -F configs/<ip>/ssh_config
```
## SSH into Algo Server ## SSH into Algo Server
Your Algo server is configured for key-only SSH access for administrative purposes. Open the Terminal app, `cd` into the `algo-master` directory where you originally downloaded Algo, and then use the command listed on the success message: Your Algo server is configured for key-only SSH access for administrative purposes. Open the Terminal app, `cd` into the `algo-master` directory where you originally downloaded Algo, and then use the command listed on the success message:
``` `ssh -i configs/algo.pem user@ip`
ssh -F configs/<ip>/ssh_config <hostname>
```
where `<ip>` is the IP address of your Algo server. If you find yourself regularly logging into the server then it will be useful to load your Algo ssh key automatically. Add the following snippet to the bottom of `~/.bash_profile` to add it to your shell environment permanently: where `user` is either `root` or `ubuntu` as listed on the success message, and `ip` is the IP address of your Algo server. If you find yourself regularly logging into the server then it will be useful to load your Algo ssh key automatically. Add the following snippet to the bottom of `~/.bash_profile` to add it to your shell environment permanently.
``` `ssh-add ~/.ssh/algo > /dev/null 2>&1`
ssh-add ~/.ssh/algo > /dev/null 2>&1
```
Alternatively, you can choose to include the generated configuration for any Algo servers created into your SSH config. Edit the file `~/.ssh/config` to include this directive at the top:
```
Include <algodirectory>/configs/*/ssh_config
```
where `<algodirectory>` is the directory where you cloned Algo.
## Adding or Removing Users ## Adding or Removing Users
_If you chose to save the CA key during the deploy process,_ then Algo's own scripts can easily add and remove users from the VPN server. _If you chose to save the CA key during the deploy process,_ then Algo's own scripts can easily add and remove users from the VPN server.
1. Update the `users` list in your `config.cfg` 1. Update the `users` list in your `config.cfg`
2. Open a terminal, `cd` to the algo directory, and activate the virtual environment with `source .env/bin/activate` 2. Open a terminal, `cd` to the algo directory, and activate the virtual environment with `source env/bin/activate`
3. Run the command: `./algo update-users` 3. Run the command: `./algo update-users`
After this process completes, the Algo VPN server will contain only the users listed in the `config.cfg` file. After this process completes, the Algo VPN server will contain only the users listed in the `config.cfg` file.
## Additional Documentation ## Additional Documentation
* [Deployment instructions, cloud provider setup instructions, and further client setup instructions available here.](docs/index.md)
* [FAQ](docs/faq.md) * [FAQ](docs/faq.md)
* [Troubleshooting](docs/troubleshooting.md) * [Troubleshooting](docs/troubleshooting.md)
* How Algo uses [Firewalls](docs/firewalls.md)
### Setup Instructions for Specific Cloud Providers If you read all the documentation and have further questions, [join the chat on Gitter](https://gitter.im/trailofbits/algo).
* Configure [Amazon EC2](docs/cloud-amazon-ec2.md)
* Configure [Azure](docs/cloud-azure.md)
* Configure [DigitalOcean](docs/cloud-do.md)
* Configure [Google Cloud Platform](docs/cloud-gce.md)
* Configure [Vultr](docs/cloud-vultr.md)
* Configure [CloudStack](docs/cloud-cloudstack.md)
* Configure [Hetzner Cloud](docs/cloud-hetzner.md)
### Install and Deploy from Common Platforms
* Deploy from [macOS](docs/deploy-from-macos.md)
* Deploy from [Windows](docs/deploy-from-windows.md)
* Deploy from [Google Cloud Shell](docs/deploy-from-cloudshell.md)
* Deploy from a [Docker container](docs/deploy-from-docker.md)
### Setup VPN Clients to Connect to the Server
* Setup [Android](docs/client-android.md) clients
* Setup [Linux](docs/client-linux.md) clients with Ansible
* Setup Ubuntu clients to use [WireGuard](docs/client-linux-wireguard.md)
* Setup Linux clients to use [IPsec](docs/client-linux-ipsec.md)
* Setup Apple devices to use [IPsec](docs/client-apple-ipsec.md)
* Setup Macs running macOS 10.13 or older to use [WireGuard](docs/client-macos-wireguard.md)
### Advanced Deployment
* Deploy to your own [Ubuntu](docs/deploy-to-ubuntu.md) server, and road warrior setup
* Deploy from [Ansible](docs/deploy-from-ansible.md) non-interactively
* Deploy onto a [cloud server at time of creation with shell script or cloud-init](docs/deploy-from-script-or-cloud-init-to-localhost.md)
* Deploy to an [unsupported cloud provider](docs/deploy-to-unsupported-cloud.md)
* Deploy to your own [FreeBSD](docs/deploy-to-freebsd.md) server
If you've read all the documentation and have further questions, [create a new discussion](https://github.com/trailofbits/algo/discussions).
## Endorsements ## Endorsements

View file

@ -1,9 +0,0 @@
# Reporting Security Issues
The Algo team and community take security bugs in Algo seriously. We appreciate your efforts to responsibly disclose your findings, and will make every effort to acknowledge your contributions.
To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/trailofbits/algo/security/) tab.
The Algo team will send a response indicating the next steps in handling your report. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance.
Report security bugs in third-party modules to the person or team maintaining the module.

36
Vagrantfile vendored
View file

@ -1,36 +0,0 @@
Vagrant.configure("2") do |config|
config.vm.box = "bento/ubuntu-20.04"
config.vm.provider "virtualbox" do |v|
v.name = "algo-20.04"
v.memory = "512"
v.cpus = "1"
end
config.vm.synced_folder "./", "/opt/algo", create: true
config.vm.provision "ansible_local" do |ansible|
ansible.playbook = "/opt/algo/main.yml"
# https://github.com/hashicorp/vagrant/issues/12204
ansible.pip_install_cmd = "sudo apt-get install -y python3-pip python-is-python3 && sudo ln -s -f /usr/bin/pip3 /usr/bin/pip"
ansible.install_mode = "pip_args_only"
ansible.pip_args = "-r /opt/algo/requirements.txt"
ansible.inventory_path = "/opt/algo/inventory"
ansible.limit = "local"
ansible.verbose = "-vvvv"
ansible.extra_vars = {
provider: "local",
server: "localhost",
ssh_user: "",
endpoint: "127.0.0.1",
ondemand_cellular: true,
ondemand_wifi: false,
dns_adblocking: true,
ssh_tunneling: true,
store_pki: true,
tests: true,
no_log: false
}
end
end

2
algo
View file

@ -4,7 +4,7 @@ set -e
if [ -z ${VIRTUAL_ENV+x} ] if [ -z ${VIRTUAL_ENV+x} ]
then then
ACTIVATE_SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.env/bin/activate" ACTIVATE_SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/env/bin/activate"
if [ -f "$ACTIVATE_SCRIPT" ] if [ -f "$ACTIVATE_SCRIPT" ]
then then
# shellcheck source=/dev/null # shellcheck source=/dev/null

View file

@ -11,7 +11,7 @@ usage() {
retcode="${1:-0}" retcode="${1:-0}"
echo "To run algo from Docker:" echo "To run algo from Docker:"
echo "" echo ""
echo "docker run --cap-drop=all -it -v <path to configurations>:"${DATA_DIR}" ghcr.io/trailofbits/algo:latest" echo "docker run --cap-drop=all -it -v <path to configurations>:"${DATA_DIR}" trailofbits/algo:latest"
echo "" echo ""
exit ${retcode} exit ${retcode}
} }
@ -37,7 +37,7 @@ fi
tr -d '\r' < "${DATA_DIR}"/config.cfg > "${ALGO_DIR}"/config.cfg tr -d '\r' < "${DATA_DIR}"/config.cfg > "${ALGO_DIR}"/config.cfg
test -d "${DATA_DIR}"/configs && rsync -qLktr --delete "${DATA_DIR}"/configs "${ALGO_DIR}"/ test -d "${DATA_DIR}"/configs && rsync -qLktr --delete "${DATA_DIR}"/configs "${ALGO_DIR}"/
"${ALGO_DIR}"/algo "${ALGO_ARGS[@]}" "${ALGO_DIR}"/algo ${ALGO_ARGS}
retcode=${?} retcode=${?}
rsync -qLktr --delete "${ALGO_DIR}"/configs "${DATA_DIR}"/ rsync -qLktr --delete "${ALGO_DIR}"/configs "${DATA_DIR}"/

View file

@ -68,10 +68,10 @@ elif [[ -f LICENSE && ${STAT} ]]; then
fi fi
# The Python version might be useful to know. # The Python version might be useful to know.
if [[ -x ./.env/bin/python3 ]]; then if [[ -x ./env/bin/python ]]; then
./.env/bin/python3 --version 2>&1 ./env/bin/python --version 2>&1
elif [[ -f ./algo ]]; then elif [[ -f ./algo ]]; then
echo ".env/bin/python3 not found: has 'python3 -m virtualenv ...' been run?" echo "env/bin/python not found: has 'python -m virtualenv ...' been run?"
fi fi
# Just print out all command line arguments, which are expected # Just print out all command line arguments, which are expected

View file

@ -6,7 +6,6 @@ host_key_checking = False
timeout = 60 timeout = 60
stdout_callback = default stdout_callback = default
display_skipped_hosts = no display_skipped_hosts = no
force_valid_group_names = ignore
[paramiko_connection] [paramiko_connection]
record_host_keys = False record_host_keys = False

View file

@ -8,14 +8,14 @@
tasks: tasks:
- block: - block:
- name: Local pre-tasks - name: Local pre-tasks
import_tasks: playbooks/cloud-pre.yml import_tasks: playbooks/cloud-pre.yml
- name: Include a provisioning role - name: Include a provisioning role
include_role: include_role:
name: "{{ 'local' if algo_provider == 'local' else 'cloud-' + algo_provider }}" name: "{{ 'local' if algo_provider == 'local' else 'cloud-' + algo_provider }}"
- name: Local post-tasks - name: Local post-tasks
import_tasks: playbooks/cloud-post.yml import_tasks: playbooks/cloud-post.yml
rescue: rescue:
- include_tasks: playbooks/rescue.yml - include_tasks: playbooks/rescue.yml

View file

@ -1,37 +1,50 @@
--- ---
# This is the list of users to generate. # This is the list of users to generate.
# Every device must have a unique user. # Every device must have a unique username.
# You can add up to 65,534 new users over the lifetime of an AlgoVPN. # You can generate up to 250 users at one time.
# User names with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123". # Usernames with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123".
# Email addresses are not allowed.
users: users:
- phone - phone
- laptop - laptop
- desktop - desktop
### Review these options BEFORE you run Algo, as they are very difficult/impossible to change after the server is deployed. ### Advanced users only below this line ###
# Change default SSH port for the cloud roles only # Store the PKI in a ram disk. Enabled only if store_pki (retain the PKI) is set to false
# It doesn't apply if you deploy to your existing Ubuntu Server # Supports on MacOS and Linux only (including Windows Subsystem for Linux)
ssh_port: 4160 pki_in_tmpfs: true
# If True re-init all existing certificates. Boolean
keys_clean_all: False
# Clean up cloud python environments
clean_environment: false
# Deploy StrongSwan to enable IPsec support # Deploy StrongSwan to enable IPsec support
ipsec_enabled: true ipsec_enabled: true
# StrongSwan log level
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration
strongswan_log_level: 2
# rightsourceip for ipsec
# ipv4
strongswan_network: 10.19.48.0/24
# ipv6
strongswan_network_ipv6: 'fd9d:bc11:4020::/48'
# Deploy WireGuard # Deploy WireGuard
# WireGuard will listen on 51820/UDP. You might need to change to another port
# if your network blocks this one. Be aware that 53/UDP (DNS) is blocked on some
# mobile data networks.
wireguard_enabled: true wireguard_enabled: true
wireguard_port: 51820 wireguard_port: 51820
# If you're behind NAT or a firewall and you want to receive incoming connections long after network traffic has gone silent.
# This option will keep the "connection" open in the eyes of NAT.
# See: https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence
wireguard_PersistentKeepalive: 0
# This feature allows you to configure the Algo server to send outbound traffic # WireGuard network configuration
# through a different external IP address than the one you are establishing the VPN connection with. wireguard_network_ipv4: 10.19.49.0/24
# More info https://trailofbits.github.io/algo/cloud-alternative-ingress-ip.html wireguard_network_ipv6: fd9d:bc11:4021::/48
# Available for the following cloud providers:
# - DigitalOcean
alternative_ingress_ip: false
# Reduce the MTU of the VPN tunnel # Reduce the MTU of the VPN tunnel
# Some cloud and internet providers use a smaller MTU (Maximum Transmission # Some cloud and internet providers use a smaller MTU (Maximum Transmission
@ -49,35 +62,13 @@ reduce_mtu: 0
# /etc/systemd/system/dnsmasq.service.d/100-CustomLimitations.conf # /etc/systemd/system/dnsmasq.service.d/100-CustomLimitations.conf
adblock_lists: adblock_lists:
- "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" - "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts"
- "https://hosts-file.net/ad_servers.txt"
# Enable DNS encryption. # Enable DNS encryption.
# If 'false', 'dns_servers' should be specified below. # If 'false', 'dns_servers' should be specified below.
# DNS encryption can not be disabled if DNS adblocking is enabled # DNS encryption can not be disabled if DNS adblocking is enabled
dns_encryption: true dns_encryption: true
# Block traffic between connected clients. Change this to false to enable
# connected clients to reach each other, as well as other computers on the
# same LAN as your Algo server (i.e. the "road warrior" setup). In this
# case, you may also want to enable SMB/CIFS and NETBIOS traffic below.
BetweenClients_DROP: true
# Block SMB/CIFS traffic
block_smb: true
# Block NETBIOS traffic
block_netbios: true
# Your Algo server will automatically install security updates. Some updates
# require a reboot to take effect but your Algo server will not reboot itself
# automatically unless you change 'enabled' below from 'false' to 'true', in
# which case a reboot will take place if necessary at the time specified (as
# HH:MM) in the time zone of your Algo server. The default time zone is UTC.
unattended_reboot:
enabled: false
time: 06:00
### Advanced users only below this line ###
# DNS servers which will be used if 'dns_encryption' is 'true'. Multiple # DNS servers which will be used if 'dns_encryption' is 'true'. Multiple
# providers may be specified, but avoid mixing providers that filter results # providers may be specified, but avoid mixing providers that filter results
# (like Cisco) with those that don't (like Cloudflare) or you could get # (like Cisco) with those that don't (like Cloudflare) or you could get
@ -88,17 +79,10 @@ dnscrypt_servers:
ipv4: ipv4:
- cloudflare - cloudflare
# - google # - google
# - <YourCustomServer> # E.g., if using NextDNS, this will be something like NextDNS-abc123.
# You must also fill in custom_server_stamps below. You may specify
# multiple custom servers.
ipv6: ipv6:
- cloudflare-ipv6 - cloudflare-ipv6
custom_server_stamps:
# YourCustomServer: 'sdns://...'
# DNS servers which will be used if 'dns_encryption' is 'false'. # DNS servers which will be used if 'dns_encryption' is 'false'.
# Fallback resolvers for systemd-resolved
# The default is to use Cloudflare. # The default is to use Cloudflare.
dns_servers: dns_servers:
ipv4: ipv4:
@ -108,38 +92,21 @@ dns_servers:
- 2606:4700:4700::1111 - 2606:4700:4700::1111
- 2606:4700:4700::1001 - 2606:4700:4700::1001
# Store the PKI in a ram disk. Enabled only if store_pki (retain the PKI) is set to false
# Supports on MacOS and Linux only (including Windows Subsystem for Linux)
pki_in_tmpfs: true
# Set this to 'true' when running './algo update-users' if you want ALL users to get new certs, not just new users.
keys_clean_all: false
# StrongSwan log level
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration
strongswan_log_level: 2
# rightsourceip for ipsec
# ipv4
strongswan_network: 10.48.0.0/16
# ipv6
strongswan_network_ipv6: '2001:db8:4160::/48'
# If you're behind NAT or a firewall and you want to receive incoming connections long after network traffic has gone silent.
# This option will keep the "connection" open in the eyes of NAT.
# See: https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence
wireguard_PersistentKeepalive: 0
# WireGuard network configuration
wireguard_network_ipv4: 10.49.0.0/16
wireguard_network_ipv6: 2001:db8:a160::/48
# Randomly generated IP address for the local dns resolver # Randomly generated IP address for the local dns resolver
local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
# Hide sensitive data # Your Algo server will automatically install security updates. Some updates
no_log: true # require a reboot to take effect but your Algo server will not reboot itself
# automatically unless you change 'enabled' below from 'false' to 'true', in
# which case a reboot will take place if necessary at the time specified (as
# HH:MM) in the time zone of your Algo server. The default time zone is UTC.
unattended_reboot:
enabled: false
time: 06:00
# Block traffic between connected clients
BetweenClients_DROP: true
congrats: congrats:
common: | common: |
@ -154,73 +121,48 @@ congrats:
ca_key_pass: | ca_key_pass: |
"# The CA key password is {{ CA_password|default(omit) }} #" "# The CA key password is {{ CA_password|default(omit) }} #"
ssh_access: | ssh_access: |
"# Shell access: ssh -F configs/{{ ansible_ssh_host|default(omit) }}/ssh_config {{ algo_server_name }} #" "# Shell access: ssh -i {{ ansible_ssh_private_key_file|default(omit) }} {{ ansible_ssh_user|default(omit) }}@{{ ansible_ssh_host|default(omit) }} #"
SSH_keys: SSH_keys:
comment: algo@ssh comment: algo@ssh
private: configs/algo.pem private: configs/algo.pem
private_tmp: /tmp/algo-ssh.pem
public: configs/algo.pem.pub public: configs/algo.pem.pub
cloud_providers: cloud_providers:
azure: azure:
size: Standard_B1S size: Standard_B1S
osDisk: image: 19.04
# The storage account type to use for the OS disk. Possible values:
# 'Standard_LRS', 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS',
# 'Premium_ZRS', 'StandardSSD_ZRS', 'PremiumV2_LRS'.
type: Standard_LRS
image:
publisher: Canonical
offer: 0001-com-ubuntu-minimal-jammy-daily
sku: minimal-22_04-daily-lts
version: latest
digitalocean: digitalocean:
# See docs for extended droplet options, pricing, and availability.
# Possible values: 's-1vcpu-512mb-10gb', 's-1vcpu-1gb', ...
size: s-1vcpu-1gb size: s-1vcpu-1gb
image: "ubuntu-22-04-x64" image: "ubuntu-19-04-x64"
ec2: ec2:
# Change the encrypted flag to "false" to disable AWS volume encryption. # Change the encrypted flag to "true" to enable AWS volume encryption, for encryption of data at rest.
encrypted: true # Warning: the Algo script will take approximately 6 minutes longer to complete.
encrypted: false
# Set use_existing_eip to "true" if you want to use a pre-allocated Elastic IP # Set use_existing_eip to "true" if you want to use a pre-allocated Elastic IP
# Additional prompt will be raised to determine which IP to use # Additional prompt will be raised to determine which IP to use
use_existing_eip: false use_existing_eip: false
size: t2.micro size: t2.micro
image: image:
name: "ubuntu-jammy-22.04" name: "ubuntu-disco-19.04"
arch: x86_64
owner: "099720109477" owner: "099720109477"
# Change instance_market_type from "on-demand" to "spot" to launch a spot
# instance. See deploy-from-ansible.md for spot's additional IAM permission
instance_market_type: on-demand
gce: gce:
size: e2-micro size: f1-micro
image: ubuntu-2204-lts image: ubuntu-1904
external_static_ip: false external_static_ip: false
lightsail: lightsail:
size: nano_2_0 size: nano_1_0
image: ubuntu_22_04 image: ubuntu_18_04
scaleway: scaleway:
size: DEV1-S size: START1-S
image: Ubuntu 22.04 Jammy Jellyfish image: Ubuntu Bionic Beaver
arch: x86_64 arch: x86_64
hetzner:
server_type: cx22
image: ubuntu-22.04
openstack: openstack:
flavor_ram: ">=512" flavor_ram: ">=512"
image: Ubuntu-22.04 image: Ubuntu-18.04
cloudstack:
size: Micro
image: Linux Ubuntu 22.04 LTS 64-bit
disk: 10
vultr: vultr:
os: Ubuntu 22.04 LTS x64 os: Ubuntu 19.04 x64
size: vc2-1c-1gb size: 1024 MB RAM,25 GB SSD,1.00 TB BW
linode:
type: g6-nanode-1
image: linode/ubuntu22.04
local: local:
fail_hint: fail_hint:

View file

@ -13,7 +13,7 @@
ansible_ssh_user: "{{ 'root' if client_ip == 'localhost' else ssh_user }}" ansible_ssh_user: "{{ 'root' if client_ip == 'localhost' else ssh_user }}"
vpn_user: "{{ vpn_user }}" vpn_user: "{{ vpn_user }}"
IP_subject_alt_name: "{{ server_ip }}" IP_subject_alt_name: "{{ server_ip }}"
ansible_python_interpreter: /usr/bin/python3 ansible_python_interpreter: "/usr/bin/python3"
- name: Configure the client and install required software - name: Configure the client and install required software
hosts: client-host hosts: client-host

View file

@ -1,37 +0,0 @@
# Linux strongSwan IPsec Clients (e.g., OpenWRT, Ubuntu Server, etc.)
Install strongSwan, then copy the included ipsec_user.conf, ipsec_user.secrets, user.crt (user certificate), and user.key (private key) files to your client device. These will require customization based on your exact use case. These files were originally generated with a point-to-point OpenWRT-based VPN in mind.
## Ubuntu Server example
1. `sudo apt-get install strongswan libstrongswan-standard-plugins`: install strongSwan
2. `/etc/ipsec.d/certs`: copy `<name>.crt` from `algo-master/configs/<server_ip>/ipsec/.pki/certs/<name>.crt`
3. `/etc/ipsec.d/private`: copy `<name>.key` from `algo-master/configs/<server_ip>/ipsec/.pki/private/<name>.key`
4. `/etc/ipsec.d/cacerts`: copy `cacert.pem` from `algo-master/configs/<server_ip>/ipsec/manual/cacert.pem`
5. `/etc/ipsec.secrets`: add your `user.key` to the list, e.g. `<server_ip> : ECDSA <name>.key`
6. `/etc/ipsec.conf`: add the connection from `ipsec_user.conf` and ensure `leftcert` matches the `<name>.crt` filename
7. `sudo ipsec restart`: pick up config changes
8. `sudo ipsec up <conn-name>`: start the ipsec tunnel
9. `sudo ipsec down <conn-name>`: shutdown the ipsec tunnel
One common use case is to let your server access your local LAN without going through the VPN. Set up a passthrough connection by adding the following to `/etc/ipsec.conf`:
conn lan-passthrough
leftsubnet=192.168.1.1/24 # Replace with your LAN subnet
rightsubnet=192.168.1.1/24 # Replace with your LAN subnet
authby=never # No authentication necessary
type=pass # passthrough
auto=route # no need to ipsec up lan-passthrough
To configure the connection to come up at boot time replace `auto=add` with `auto=start`.
## Notes on SELinux
If you use a system with SELinux enabled you might need to set appropriate file contexts:
````
semanage fcontext -a -t ipsec_key_file_t "$(pwd)(/.*)?"
restorecon -R -v $(pwd)
````
See [this comment](https://github.com/trailofbits/algo/issues/263#issuecomment-328053950).

View file

@ -2,16 +2,16 @@
## Install WireGuard ## Install WireGuard
To connect to your AlgoVPN using [WireGuard](https://www.wireguard.com) from Ubuntu, make sure your system is up-to-date then install WireGuard: To connect to your AlgoVPN using [WireGuard](https://www.wireguard.com) from Ubuntu, first install WireGuard:
```shell ```shell
# Update your system: # Add the WireGuard repository:
sudo apt update && sudo apt upgrade sudo add-apt-repository ppa:wireguard/wireguard
# If the file /var/run/reboot-required exists then reboot: # Update the list of available packages (not necessary on 18.04 or later):
[ -e /var/run/reboot-required ] && sudo reboot sudo apt update
# Install WireGuard: # Install the tools and kernel module:
sudo apt install wireguard openresolv sudo apt install wireguard openresolv
``` ```
@ -47,16 +47,3 @@ sudo systemctl enable wg-quick@wg0
``` ```
If your Linux distribution does not use `systemd` you can bring up WireGuard with `sudo wg-quick up wg0`. If your Linux distribution does not use `systemd` you can bring up WireGuard with `sudo wg-quick up wg0`.
## Using a DNS Search Domain
As of the `v1.0.20200510` release of `wireguard-tools` WireGuard supports setting a DNS search domain. In your `wg0.conf` file a non-numeric entry on the `DNS` line will be used as a search domain. For example this:
```
DNS = 172.27.153.31, fd00::b:991f, mydomain.com
```
will cause your `/etc/resolv.conf` to contain:
```
search mydomain.com
nameserver 172.27.153.31
nameserver fd00::b:991f
```

View file

@ -1,88 +0,0 @@
# Using Router with OpenWRT as a Client with WireGuard
This scenario is useful in case you want to use vpn with devices which has no vpn capability like smart tv, or make vpn connection available via router for multiple devices.
This is a tested, working scenario with following environment:
- algo installed ubuntu at digitalocean
- client side router "TP-Link TL-WR1043ND" with openwrt ver. 21.02.1. [Openwrt Install instructions](https://openwrt.org/toh/tp-link/tl-wr1043nd)
- or client side router "TP-Link Archer C20i AC750" with openwrt ver. 21.02.1. [Openwrt install instructions](https://openwrt.org/toh/tp-link/archer_c20i)
see compatible device list at https://openwrt.org/toh/start . Theoretically any of the device on list should work
## Router setup
Make sure that you have
- router with openwrt installed,
- router is connected to internet,
- router and device in front of router does not have same ip . By default openwrt have 192.168.1.1 if so change it to something like 192.168.2.1
### Install required packages(WebUI)
- Open router web UI (mostly http://192.168.1.1 )
- Login. (by default username: root, password:<empty>
- System -> Software, click "Update lists"
- Install following packages wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5
- restart router
### Alternative Install required packages(ssh)
- Open router web UI (mostly http://192.168.1.1 )
- ssh root@192.168.1.1
- opkg update
- opkg install wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5
- reboot
### Create an Interface(WebUI)
- Open router web UI
- Navigate Network -> Interface
- Click "Add new interface"
- Give a Name. e.g. `AlgoVpn`
- Select Protocol. `Wireguard VPN`
- click `Create Interface`
- In *General Settings* tab
- `Bring up on boot` *checked*
- Private key: `Interface -> Private Key` from algo config file
- Ip Address: `Interface -> Address` from algo config file
- In *Peers* tab
- Click add
- Name `algo`
- Public key: `[Peer]->PublicKey` from algo config file
- Preshared key: `[Peer]->PresharedKey` from algo config file
- Allowed IPs: 0.0.0.0/0
- Route Allowed IPs: checked
- Endpoint Host: `[Peer]->Endpoint` ip from algo config file
- Endpoint Port: `[Peer]->Endpoint` port from algo config file
- Persistent Keep Alive: `25`
- Click Save & Save Apply
### Configure Firewall(WebUI)
- Open router web UI
- Navigate to Network -> Firewall
- Click `Add configuration`:
- Name: e.g. ivpn_fw
- Input: Reject
- Output: Accept
- Forward: Reject
- Masquerading: Checked
- MSS clamping: Checked
- Covered networks: Select created VPN interface
- Allow forward to destination zones - Unspecified
- Allow forward from source zones - lan
- Click Save & Save Apply
- Reboot router
There may be additional configuration required depending on environment like dns configuration.
You can also verify the configuration using ssh. /etc/config/network. It should look like
```
config interface 'algo'
option proto 'wireguard'
list addresses '10.0.0.2/32'
option private_key '......' # The private key generated by itself just now
config wireguard_wg0
option public_key '......' # Server's public key
option route_allowed_ips '1'
list allowed_ips '0.0.0.0/0'
option endpoint_host '......' # Server's public ip address
option endpoint_port '51820'
option persistent_keepalive '25'
```

6
docs/client-windows.md Normal file
View file

@ -0,0 +1,6 @@
# Windows client setup
## Installation via profiles
1. Install the [WireGuard VPN Client](https://www.wireguard.com/install/#windows-7-8-81-10-2012-2016-2019) and start it.
2. Import the corresponding `wireguard/<name>.conf` file to your device, then setup a new connection with it.

View file

@ -1,22 +0,0 @@
# Alternative Ingress IP
This feature allows you to configure the Algo server to send outbound traffic through a different external IP address than the one you are establishing the VPN connection with.
![cloud-alternative-ingress-ip](/docs/images/cloud-alternative-ingress-ip.png)
Additional info might be found in [this issue](https://github.com/trailofbits/algo/issues/1047)
#### Caveats
##### Extra charges
- DigitalOcean: Floating IPs are free when assigned to a Droplet, but after manually deleting a Droplet you need to also delete the Floating IP or you'll get charged for it.
##### IPv6
Some cloud providers provision a VM with an `/128` address block size. This is the only IPv6 address provided and for outbound and incoming traffic.
If the provided address block size is bigger, e.g., `/64`, Algo takes a separate address than the one is assigned to the server to send outbound IPv6 traffic.

View file

@ -6,28 +6,18 @@ Creating an Amazon AWS account requires giving Amazon a phone number that can re
### Select an EC2 plan ### Select an EC2 plan
The cheapest EC2 plan you can choose is the "Free Plan" a.k.a. the ["AWS Free Tier"](https://aws.amazon.com/free/). It is only available to new AWS customers, it has limits on usage, and it converts to standard pricing after 12 months (the "introductory period"). After you exceed the usage limits, after the 12 month period, or if you are an existing AWS customer, then you will pay standard pay-as-you-go service prices. The cheapest EC2 plan you can choose is the "Free Plan" a.k.a. the "AWS Free Tier." It is only available to new AWS customers, it has limits on usage, and it converts to standard pricing after 12 months (the "introductory period"). After you exceed the usage limits, after the 12 month period, or if you are an existing AWS customer, then you will pay standard pay-as-you-go service prices.
*Note*: Your Algo instance will not stop working when you hit the bandwidth limit, you will just start accumulating service charges on your AWS account. *Note*: Your Algo instance will not stop working when you hit the bandwidth limit, you will just start accumulating service charges on your AWS account.
As of the time of this writing (July 2018), the Free Tier limits include "750 hours of Amazon EC2 Linux t2.micro instance usage" per month, 15 GB of bandwidth (outbound) per month, and 30 GB of cloud storage. Algo will not even use 1% of the storage limit, but you may have to monitor your bandwidth usage or keep an eye out for the email from Amazon when you are about to exceed the Free Tier limits. As of the time of this writing (July 2018), the Free Tier limits include "750 hours of Amazon EC2 Linux t2.micro instance usage" per month, 15 GB of bandwidth (outbound) per month, and 30 GB of cloud storage. Algo will not even use 1% of the storage limit, but you may have to monitor your bandwidth usage or keep an eye out for the email from Amazon when you are about to exceed the Free Tier limits.
If you are not eligible for the free tier plan or have passed the 12 months of the introductory period, you can switch to [AWS Graviton](https://aws.amazon.com/ec2/graviton/) instances that are generally cheaper. To use the graviton instances, make the following changes in the ec2 section of your `config.cfg` file:
* Set the `size` to `t4g.nano`
* Set the `arch` to `arm64`
> Currently, among all the instance sizes available on AWS, the t4g.nano instance is the least expensive option that does not require any promotional offers. However, AWS is currently running a promotion that provides a free trial of the `t4g.small` instance until December 31, 2023, which is available to all customers. For more information about this promotion, please refer to the [documentation](https://aws.amazon.com/ec2/faqs/#t4g-instances).
Additional configurations are documented in the [EC2 section of the deploy from ansible guide](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#amazon-ec2)
### Create an AWS permissions policy ### Create an AWS permissions policy
In the AWS console, find the policies menu: click Services > IAM > Policies. Click Create Policy. In the AWS console, find the policies menu: click Services > IAM > Policies. Click Create Policy.
Here, you have the policy editor. Switch to the JSON tab and copy-paste over the existing empty policy with [the minimum required AWS policy needed for Algo deployment](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#minimum-required-iam-permissions-for-deployment). Here, you have the policy editor. Switch to the JSON tab and copy-paste over the existing empty policy with [the minimum required AWS policy needed for Algo deployment](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#minimum-required-iam-permissions-for-deployment).
When prompted to name the policy, name it `AlgoVPN_Provisioning`.
![Creating a new permissions policy in the AWS console.](/docs/images/aws-ec2-new-policy.png) ![Creating a new permissions policy in the AWS console.](/docs/images/aws-ec2-new-policy.png)
### Set up an AWS user ### Set up an AWS user
@ -58,27 +48,22 @@ On the final screen, click the Download CSV button. This file includes the AWS a
After you have downloaded Algo and installed its dependencies, the next step is running Algo to provision the VPN server on your AWS account. After you have downloaded Algo and installed its dependencies, the next step is running Algo to provision the VPN server on your AWS account.
First you will be asked which server type to setup. You would want to enter "3" to use Amazon EC2. First you will be asked which server type to setup. You would want to enter "2" to use Amazon EC2.
``` ```
$ ./algo $ ./algo
What provider would you like to use? What provider would you like to use?
1. DigitalOcean 1. DigitalOcean
2. Amazon Lightsail 2. Amazon EC2
3. Amazon EC2 3. Microsoft Azure
4. Microsoft Azure 4. Google Compute Engine
5. Google Compute Engine 5. Scaleway
6. Hetzner Cloud 6. OpenStack (DreamCompute optimised)
7. Vultr 7. Install to existing Ubuntu 16.04 server (Advanced)
8. Scaleway
9. OpenStack (DreamCompute optimised)
10. CloudStack (Exoscale optimised)
11. Linode
12. Install to existing Ubuntu server (for more advanced users)
Enter the number of your desired provider Enter the number of your desired provider
: 3 : 2
``` ```
Next you will be asked for the AWS Access Key (Access Key ID) and AWS Secret Key (Secret Access Key) that you received in the CSV file when you setup the account (don't worry if you don't see your text entered in the console; the key input is hidden here by Algo). Next you will be asked for the AWS Access Key (Access Key ID) and AWS Secret Key (Secret Access Key) that you received in the CSV file when you setup the account (don't worry if you don't see your text entered in the console; the key input is hidden here by Algo).
@ -131,5 +116,4 @@ Enter the number of your desired region
You will then be asked the remainder of the standard Algo setup questions. You will then be asked the remainder of the standard Algo setup questions.
## Cleanup ## Cleanup
If you've installed Algo onto EC2 multiple times, your AWS account may become cluttered with unused or deleted resources e.g. instances, VPCs, subnets, etc. This may cause future installs to fail. The easiest way to clean up after you're done with a server is to go to "CloudFormation" from the console and delete the CloudFormation stack associated with that server. Please note that unless you've enabled termination protection on your instance, deleting the stack this way will delete your instance without warning, so be sure you are deleting the correct stack. If you've installed Algo onto EC2 multiple times, your AWS account may become cluttered with unused or deleted resources e.g. instances, VPCs, subnets, etc. This may cause future installs to fail. The easiest way to clean up after you're done with a server is to go to "CloudFormation" from the console and delete the CloudFormation stack associated with that server. Please note that unless you've enabled termination protection on your instance, deleting the stack this way will delete your instance without warning, so be sure you are deleting the correct stack.

View file

@ -1,11 +0,0 @@
### Configuration file
Algo scripts will ask you for the API detail. You need to fetch the API credentials and the endpoint from the provider control panel.
Example for Exoscale (European cloud provider exposing CloudStack API), visit https://portal.exoscale.com/u/<your@account>/account/profile/api to gather the required information: CloudStack api key and secret.
```bash
export CLOUDSTACK_KEY="<your api key>"
export CLOUDSTACK_SECRET="<your secret>"
export CLOUDSTACK_ENDPOINT="https://api.exoscale.com/compute"
```

View file

@ -18,18 +18,6 @@ You will be returned to the **Tokens/Keys** tab, and your new key will be shown
Copy or note down the hash that shows below the name you entered, as this will be necessary for the steps below. This value will disappear if you leave this page, and you'll need to regenerate it if you forget it. Copy or note down the hash that shows below the name you entered, as this will be necessary for the steps below. This value will disappear if you leave this page, and you'll need to regenerate it if you forget it.
## Select a Droplet (optional)
The default option is the `s-1vcpu-1gb` because it is available in all regions. However, you may want to switch to a cheaper droplet such as `s-1vcpu-512mb-10gb` even though it is not available in all regions. This can be edited in the [Configuration File](config.cfg) under `cloud_providers > digitalocean > size`. See this brief comparison between the two droplets below:
| Droplet Type | Monthly Cost | Bandwidth | Availability |
|:--|:-:|:-:|:--|
| `s-1vcpu-512mb-10gb` | $4/month | 0.5 TB | Limited |
| `s-1vcpu-1gb` | $6/month | 1.0 TB | All regions |
| ... | ... | ... | ... |
*Note: Exceeding bandwidth limits costs $0.01/GiB at time of writing ([docs](https://docs.digitalocean.com/products/billing/bandwidth/#droplets)). See the live list of droplets [here](https://slugs.do-api.dev/).*
## Using DigitalOcean with Algo (interactive) ## Using DigitalOcean with Algo (interactive)
These steps are for those who run Algo using Docker or using the `./algo` command. These steps are for those who run Algo using Docker or using the `./algo` command.

View file

@ -38,4 +38,4 @@ gcloud services enable compute.googleapis.com
**Attention:** take care of the `configs/gce.json` file, which contains the credentials to manage your Google Cloud account, including create and delete servers on this project. **Attention:** take care of the `configs/gce.json` file, which contains the credentials to manage your Google Cloud account, including create and delete servers on this project.
There are more advanced arguments available for deployment [using ansible](deploy-from-ansible.md). There are more advanced arguments available for deploynment [using ansible](deploy-from-ansible.md).

View file

@ -1,3 +0,0 @@
## API Token
Sign in into the [Hetzner Cloud Console](https://console.hetzner.cloud/) choose a project, go to `Security``API Tokens`, and `Generate API Token` with `Read & Write` access. Make sure to copy the token because it wont be shown to you again. A token is bound to a project. To interact with the API of another project you have to create a new token inside the project.

View file

@ -1,9 +0,0 @@
## API Token
Sign into the Linode Manager and go to the
[tokens management page](https://cloud.linode.com/profile/tokens).
Click `Add a Personal Access Token`. Label your new token and select *at least* the
`Linodes` read/write permission and `StackScripts` read/write permission.
Press `Submit` and make sure to copy the displayed token
as it won't be shown again.

View file

@ -1,10 +1,9 @@
### Configuration file ### Configuration file
Algo requires an API key from your Scaleway account to create a server. Algo requires an API key from your Scaleway account to create a server.
The API key is generated by going to your Scaleway credentials at [https://console.scaleway.com/project/credentials](https://console.scaleway.com/project/credentials), and then selecting "Generate new API key" on the right side of the box labeled "API Keys". The API key is generated by going to your Scaleway credentials at [https://console.scaleway.com/account/credentials](https://console.scaleway.com/account/credentials), and then selecting "Generate new token" on the right side of the box labeled "API Tokens".
You'll be ask for to specify a purpose for your API key before it is created. You will then be presented and "Access key" and a "Secret key".
Enter the "Secret key" when Algo prompts you for the `auth token`. You won't need the "Access key". Enter this token when Algo prompts you for the `auth token`.
This information will be pass as the `algo_scaleway_token` variable when asked for in the Algo prompt. This information will be pass as the `algo_scaleway_token` variable when asked for in the Algo prompt.
Your organization ID is also on this page: https://console.scaleway.com/account/credentials Your organization ID is also on this page: https://console.scaleway.com/account/credentials

View file

@ -26,12 +26,12 @@ See below for more information about variables and roles.
- `provider` - (Required) The provider to use. See possible values below - `provider` - (Required) The provider to use. See possible values below
- `server_name` - (Required) Server name. Default: algo - `server_name` - (Required) Server name. Default: algo
- `ondemand_cellular` (Optional) Enables VPN On Demand when connected to cellular networks for iOS/macOS clients using IPsec. Default: false - `ondemand_cellular` (Optional) VPN On Demand when connected to cellular networks with IPsec. Default: false
- `ondemand_wifi` - (Optional. See `ondemand_wifi_exclude`) Enables VPN On Demand when connected to WiFi networks for iOS/macOS clients using IPsec. Default: false - `ondemand_wifi` - (Optional. See `ondemand_wifi_exclude`) VPN On Demand when connected to WiFi networks with IPsec. Default: false
- `ondemand_wifi_exclude` (Required if `ondemand_wifi` set) - WiFi networks to exclude from using the VPN. Comma-separated values - `ondemand_wifi_exclude` (Required if `ondemand_wifi` set) - WiFi networks to exclude from using the VPN. Comma-separated values
- `dns_adblocking` - (Optional) Enables dnscrypt-proxy adblocking. Default: false - `dns_adblocking` - (Optional) Enables dnscrypt-proxy adblocking. Default: false
- `ssh_tunneling` - (Optional) Enable SSH tunneling for each user. Default: false - `ssh_tunneling` - (Optional) Enable SSH tunneling for each user. Default: false
- `store_pki` - (Optional) Whether or not keep the CA key (required to add users in the future, but less secure). Default: false - `store_cakey` - (Optional) Whether or not keep the CA key (required to add users in the future, but less secure). Default: false
If any of the above variables are unspecified, ansible will ask the user to input them. If any of the above variables are unspecified, ansible will ask the user to input them.
@ -41,34 +41,30 @@ Cloud roles can be activated by specifying an extra variable `provider`.
Cloud roles: Cloud roles:
- role: cloud-digitalocean, [provider: digitalocean](#digital-ocean) - role: cloud-digitalocean, provider: digitalocean
- role: cloud-ec2, [provider: ec2](#amazon-ec2) - role: cloud-ec2, provider: ec2
- role: cloud-gce, [provider: gce](#google-compute-engine) - role: cloud-vultr, provider: vultr
- role: cloud-vultr, [provider: vultr](#vultr) - role: cloud-gce, provider: gce
- role: cloud-azure, [provider: azure](#azure) - role: cloud-azure, provider: azure
- role: cloud-lightsail, [provider: lightsail](#lightsail) - role: cloud-scaleway, provider: scaleway
- role: cloud-scaleway, [provider: scaleway](#scaleway) - role: cloud-openstack, provider: openstack
- role: cloud-openstack, [provider: openstack](#openstack)
- role: cloud-cloudstack, [provider: cloudstack](#cloudstack)
- role: cloud-hetzner, [provider: hetzner](#hetzner)
- role: cloud-linode, [provider: linode](#linode)
Server roles: Server roles:
- role: strongswan - role: strongswan
- Installs [strongSwan](https://www.strongswan.org/) * Installs [strongSwan](https://www.strongswan.org/)
- Enables AppArmor, limits CPU and memory access, and drops user privileges * Enables AppArmor, limits CPU and memory access, and drops user privileges
- Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user * Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user
- Bundles the appropriate certificates into Apple mobileconfig profiles for each user * Bundles the appropriate certificates into Apple mobileconfig profiles for each user
- role: dns_adblocking - role: dns_adblocking
- Installs DNS encryption through [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy) with blacklists to be updated daily from `adblock_lists` in `config.cfg` - note this will occur even if `dns_encryption` in `config.cfg` is set to `false` * Installs DNS encryption through [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy) with blacklists to be updated daily from `adblock_lists` in `config.cfg` - note this will occur even if `dns_encryption` in `config.cfg` is set to `false`
- Constrains dnscrypt-proxy with AppArmor and cgroups CPU and memory limitations * Constrains dnscrypt-proxy with AppArmor and cgroups CPU and memory limitations
- role: ssh_tunneling - role: ssh_tunneling
- Adds a restricted `algo` group with no shell access and limited SSH forwarding options * Adds a restricted `algo` group with no shell access and limited SSH forwarding options
- Creates one limited, local account and an SSH public key for each user * Creates one limited, local account and an SSH public key for each user
- role: wireguard - role: wireguard
- Installs a [Wireguard](https://www.wireguard.com/) server, with a startup script, and automatic checks for upgrades * Installs a [Wireguard](https://www.wireguard.com/) server, with a startup script, and automatic checks for upgrades
- Creates wireguard.conf files for Linux clients as well as QR codes for Apple/Android clients * Creates wireguard.conf files for Linux clients as well as QR codes for Apple/Android clients
Note: The `strongswan` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables: Note: The `strongswan` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables:
@ -96,7 +92,7 @@ Required variables:
- do_token - do_token
- region - region
Possible options can be gathered calling to <https://api.digitalocean.com/v2/regions> Possible options can be gathered calling to https://api.digitalocean.com/v2/regions
### Amazon EC2 ### Amazon EC2
@ -110,26 +106,9 @@ Possible options can be gathered via cli `aws ec2 describe-regions`
Additional variables: Additional variables:
- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: true) - [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: false)
- [size](https://aws.amazon.com/ec2/instance-types/) - EC2 instance type. String (Default: t2.micro)
- [image](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-images.html) - AMI `describe-images` search parameters to find the OS for the hosted image. Each OS and architecture has a unique AMI-ID. The OS owner, for example [Ubuntu](https://cloud-images.ubuntu.com/locator/ec2/), updates these images often. If parameters below result in multiple results, the most recent AMI-ID is chosen
``` #### Minimum required IAM permissions for deployment:
# Example of equivalent cli command
aws ec2 describe-images --owners "099720109477" --filters "Name=architecture,Values=arm64" "Name=name,Values=ubuntu/images/hvm-ssd/ubuntu-jammy-22.04*"
```
- [owners] - The operating system owner id. Default is [Canonical](https://help.ubuntu.com/community/EC2StartersGuide#Official_Ubuntu_Cloud_Guest_Amazon_Machine_Images_.28AMIs.29) (Default: 099720109477)
- [arch] - The architecture (Default: x86_64, Optional: arm64)
- [name] - The wildcard string to filter available ami names. Algo appends this name with the string "-\*64-server-\*", and prepends with "ubuntu/images/hvm-ssd/" (Default: Ubuntu latest LTS)
- [instance_market_type](https://aws.amazon.com/ec2/pricing/) - Two pricing models are supported: on-demand and spot. String (Default: on-demand)
- If using spot instance types, one additional IAM permission along with the below minimum is required for deployment:
```
"ec2:CreateLaunchTemplate"
```
#### Minimum required IAM permissions for deployment
``` ```
{ {
@ -167,18 +146,14 @@ Additional variables:
"Sid": "CloudFormationEC2Access", "Sid": "CloudFormationEC2Access",
"Effect": "Allow", "Effect": "Allow",
"Action": [ "Action": [
"ec2:DescribeRegions",
"ec2:CreateInternetGateway", "ec2:CreateInternetGateway",
"ec2:DescribeVpcs", "ec2:DescribeVpcs",
"ec2:CreateVpc", "ec2:CreateVpc",
"ec2:DescribeInternetGateways", "ec2:DescribeInternetGateways",
"ec2:ModifyVpcAttribute", "ec2:ModifyVpcAttribute",
"ec2:CreateTags", "ec2:createTags",
"ec2:CreateSubnet", "ec2:CreateSubnet",
"ec2:AssociateVpcCidrBlock", "ec2:Associate*",
"ec2:AssociateSubnetCidrBlock",
"ec2:AssociateRouteTable",
"ec2:AssociateAddress",
"ec2:CreateRouteTable", "ec2:CreateRouteTable",
"ec2:AttachInternetGateway", "ec2:AttachInternetGateway",
"ec2:DescribeRouteTables", "ec2:DescribeRouteTables",
@ -205,8 +180,8 @@ Additional variables:
Required variables: Required variables:
- gce_credentials_file: e.g. /configs/gce.json if you use the [GCE docs](https://trailofbits.github.io/algo/cloud-gce.html) - can also be defined in environment as GCE_CREDENTIALS_FILE_PATH - gce_credentials_file
- [region](https://cloud.google.com/compute/docs/regions-zones/): e.g. `useast-1` - [region](https://cloud.google.com/compute/docs/regions-zones/)
### Vultr ### Vultr
@ -235,7 +210,7 @@ Required variables:
Possible options can be gathered via cli `aws lightsail get-regions` Possible options can be gathered via cli `aws lightsail get-regions`
#### Minimum required IAM permissions for deployment #### Minimum required IAM permissions for deployment:
``` ```
{ {
@ -248,27 +223,7 @@ Possible options can be gathered via cli `aws lightsail get-regions`
"lightsail:GetRegions", "lightsail:GetRegions",
"lightsail:GetInstance", "lightsail:GetInstance",
"lightsail:CreateInstances", "lightsail:CreateInstances",
"lightsail:DisableAddOn", "lightsail:OpenInstancePublicPorts"
"lightsail:PutInstancePublicPorts",
"lightsail:StartInstance",
"lightsail:TagResource",
"lightsail:GetStaticIp",
"lightsail:AllocateStaticIp",
"lightsail:AttachStaticIp"
],
"Resource": [
"*"
]
},
{
"Sid": "DeployCloudFormationStack",
"Effect": "Allow",
"Action": [
"cloudformation:CreateStack",
"cloudformation:UpdateStack",
"cloudformation:DescribeStacks",
"cloudformation:DescribeStackEvents",
"cloudformation:ListStackResources"
], ],
"Resource": [ "Resource": [
"*" "*"
@ -283,36 +238,12 @@ Possible options can be gathered via cli `aws lightsail get-regions`
Required variables: Required variables:
- [scaleway_token](https://www.scaleway.com/docs/generate-an-api-token/) - [scaleway_token](https://www.scaleway.com/docs/generate-an-api-token/)
- region: e.g. `ams1`, `par1` - region: e.g. ams1, par1
### OpenStack ### OpenStack
You need to source the rc file prior to run Algo. Download it from the OpenStack dashboard->Compute->API Access and source it in the shell (eg: source /tmp/dhc-openrc.sh) You need to source the rc file prior to run Algo. Download it from the OpenStack dashboard->Compute->API Access and source it in the shell (eg: source /tmp/dhc-openrc.sh)
### CloudStack
Required variables:
- [cs_config](https://trailofbits.github.io/algo/cloud-cloudstack.html): /path/to/.cloudstack.ini
- cs_region: e.g. `exoscale`
- cs_zones: e.g. `ch-gva2`
The first two can also be defined in your environment, using the variables `CLOUDSTACK_CONFIG` and `CLOUDSTACK_REGION`.
### Hetzner
Required variables:
- hcloud_token: Your [API token](https://trailofbits.github.io/algo/cloud-hetzner.html#api-token) - can also be defined in the environment as HCLOUD_TOKEN
- region: e.g. `nbg1`
### Linode
Required variables:
- linode_token: Your [API token](https://trailofbits.github.io/algo/cloud-linode.html#api-token) - can also be defined in the environment as LINODE_TOKEN
- region: e.g. `us-east`
### Update users ### Update users
Playbook: Playbook:

View file

@ -1,15 +0,0 @@
# Deploy from Google Cloud Shell
If you want to try Algo but don't wish to install the software on your own system you can use the **free** [Google Cloud Shell](https://cloud.google.com/shell/) to deploy a VPN to any supported cloud provider. Note that you cannot choose `Install to existing Ubuntu server` to turn Google Cloud Shell into your VPN server.
1. See the [Cloud Shell documentation](https://cloud.google.com/shell/docs/) to start an instance of Cloud Shell in your browser.
2. Follow the [Algo installation instructions](https://github.com/trailofbits/algo#deploy-the-algo-server) as shown but skip step **3. Install Algo's core dependencies** as they are already installed. Run Algo to deploy to a supported cloud provider.
3. Once Algo has completed, retrieve a copy of the configuration files that were created to your local system. While still in the Algo directory, run:
```
zip -r configs configs
dl configs.zip
```
4. Unzip `configs.zip` on your local system and use the files to configure your VPN clients.

View file

@ -4,8 +4,9 @@ While it is not possible to run your Algo server from within a Docker container,
## Limitations ## Limitations
1. This has not yet been tested with user namespacing enabled. 1. [Advanced](deploy-from-ansible.md) installations are not currently supported; you must use the interactive `algo` script.
2. If you're running this on Windows, take care when editing files under `configs/` to ensure that line endings are set appropriately for Unix systems. 2. This has not yet been tested with user namespacing enabled.
3. If you're running this on Windows, take care when editing files under `configs/` to ensure that line endings are set appropriately for Unix systems.
## Deploying an Algo Server with Docker ## Deploying an Algo Server with Docker
@ -13,74 +14,38 @@ While it is not possible to run your Algo server from within a Docker container,
2. Create a local directory to hold your VPN configs (e.g. `C:\Users\trailofbits\Documents\VPNs\`) 2. Create a local directory to hold your VPN configs (e.g. `C:\Users\trailofbits\Documents\VPNs\`)
3. Create a local copy of [config.cfg](https://github.com/trailofbits/algo/blob/master/config.cfg), with required modifications (e.g. `C:\Users\trailofbits\Documents\VPNs\config.cfg`) 3. Create a local copy of [config.cfg](https://github.com/trailofbits/algo/blob/master/config.cfg), with required modifications (e.g. `C:\Users\trailofbits\Documents\VPNs\config.cfg`)
4. Run the Docker container, mounting your configurations appropriately (assuming the container is named `trailofbits/algo` with a tag `latest`): 4. Run the Docker container, mounting your configurations appropriately (assuming the container is named `trailofbits/algo` with a tag `latest`):
- From Windows:
- From Windows:
```powershell ```powershell
C:\Users\trailofbits> docker run --cap-drop=all -it \ C:\Users\trailofbits> docker run --cap-drop=all -it \
-v C:\Users\trailofbits\Documents\VPNs:/data \ -v C:\Users\trailofbits\Documents\VPNs:/data \
ghcr.io/trailofbits/algo:latest trailofbits/algo:latest
``` ```
- From Linux:
- From Linux:
```bash ```bash
$ docker run --cap-drop=all -it \ $ docker run --cap-drop=all -it \
-v /home/trailofbits/Documents/VPNs:/data \ -v /home/trailofbits/Documents/VPNs:/data \
ghcr.io/trailofbits/algo:latest trailofbits/algo:latest
``` ```
5. When it exits, you'll be left with a fully populated `configs` directory, containing all appropriate configuration data for your clients, and for future server management 5. When it exits, you'll be left with a fully populated `configs` directory, containing all appropriate configuration data for your clients, and for future server management
### Providing Additional Files ### Providing Additional Files
f
If you need to provide additional files -- like authorization files for Google Cloud Project -- you can simply specify an additional `-v` parameter, and provide the appropriate path when prompted by `algo`. If you need to provide additional files -- like authorization files for Google Cloud Project -- you can simply specify an additional `-v` parameter, and provide the appropriate path when prompted by `algo`.
For example, you can specify `-v C:\Users\trailofbits\Documents\VPNs\gce_auth.json:/algo/gce_auth.json`, making the local path to your credentials JSON file `/algo/gce_auth.json`. For example, you can specify `-v C:\Users\trailofbits\Documents\VPNs\gce_auth.json:/algo/gce_auth.json`, making the local path to your credentials JSON file `/algo/gce_auth.json`.
### Scripted deployment
Ansible variables (see [Deployment from Ansible](deploy-from-ansible.md)) can be passed via `ALGO_ARGS` environment variable.
_The leading `-e` (or `--extra-vars`) is required_, e.g.
```bash
$ ALGO_ARGS="-e
provider=digitalocean
server_name=algo
ondemand_cellular=false
ondemand_wifi=false
dns_adblocking=true
ssh_tunneling=true
store_pki=true
region=ams3
do_token=token"
$ docker run --cap-drop=all -it \
-e "ALGO_ARGS=$ALGO_ARGS" \
-v /home/trailofbits/Documents/VPNs:/data \
ghcr.io/trailofbits/algo:latest
```
## Managing an Algo Server with Docker ## Managing an Algo Server with Docker
Even though the container itself is transient, because you've persisted the configuration data, you can use the same Docker image to manage your Algo server. This is done by setting the environment variable `ALGO_ARGS`. Even though the container itself is transient, because you've persisted the configuration data, you can use the same Docker image to manage your Algo server. This is done by setting the environment variable `ALGO_ARGS`.
If you want to use Algo to update the users on an existing server, specify `-e "ALGO_ARGS=update-users"` in your `docker run` command: If you want to use Algo to update the users on an existing server, specify `-e "ALGO_ARGS=update-users"` in your `docker run` command:
```powershell ```powershell
$ docker run --cap-drop=all -it \ $ docker run --cap-drop=all -it \
-e "ALGO_ARGS=update-users" \ -e "ALGO_ARGS=update-users" \
-v C:\Users\trailofbits\Documents\VPNs:/data \ -v C:\Users\trailofbits\Documents\VPNs:/data \
ghcr.io/trailofbits/algo:latest trailofbits/algo:latest
``` ```
## GNU Makefile for Docker
You can also build and deploy with a Makefile. This simplifies some of the command strings and opens the door for further user configuration.
The `Makefile` consists of three targets: `docker-build`, `docker-deploy`, and `docker-prune`.
`docker-all` will run thru all of them.
## Building Your Own Docker Image ## Building Your Own Docker Image
You can use the Dockerfile provided in this repository as-is, or modify it to suit your needs. Further instructions on building an image can be found in the [Docker engine](https://docs.docker.com/engine/) documents. You can use the Dockerfile provided in this repository as-is, or modify it to suit your needs. Further instructions on building an image can be found in the [Docker engine](https://docs.docker.com/engine/) documents.

View file

@ -0,0 +1,115 @@
# Deploy from Fedora Workstation
These docs were written based on experience on Fedora Workstation 30.
## Prerequisites
### DNF counterparts of apt packages
The following table lists `apt` packages with their `dnf` counterpart. This is purely informative.
Using `python2-*` in favour of `python3-*` as per [declared dependency](https://github.com/trailofbits/algo#deploy-the-algo-server).
| `apt` | `dnf` |
| ----- | ----- |
| `build-essential` | `make automake gcc gcc-c++ kernel-devel` |
| `libssl-dev` | `openssl-devel` |
| `libffi-dev` | `libffi-devel` |
| `python-dev` | `python2-devel` |
| `python-pip` | `python2-pip` |
| `python-setuptools` | `python2-setuptools` |
| `python-virtualenv` | `python2-virtualenv` |
### Install requirements
First, let's make sure our system is up-to-date:
````
dnf upgrade
````
Next, install the required packages:
````
dnf install -y \
ansible \
automake \
gcc \
gcc-c++ \
kernel-devel \
openssl-devel \
libffi-devel \
libselinux-python \
python2-devel \
python2-pip \
python2-setuptools \
python2-virtualenv \
python2-crypto \
python2-pyyaml \
python2-pyOpenSSL \
python2-libselinux \
make
````
## Get Algo
[Download](https://github.com/trailofbits/algo/archive/master.zip) or clone:
````
git clone git@github.com:trailofbits/algo.git
cd algo
````
If you downloaded Algo, unzip to your prefered location and `cd` into it.
We'll assume from this point forward that our working directory is the `algo` root directory.
## Prepare algo
Some steps are needed before we can deploy our Algo VPN server.
### Check `pip`
Run `pip -v` and check the python version it is using:
````
$ pip -V
pip 19.0.3 from /usr/lib/python2.7/site-packages (python 2.7)
````
`python 2.7` is what we're looking for.
### Setup virtualenv and install requirements
````
python2 -m virtualenv --system-site-packages env
source env/bin/activate
pip -q install --user -r requirements.txt
````
## Configure
Edit the userlist and any other settings you desire in `config.cfg` using your prefered editor.
## Deploy
We can now deploy our server by running:
````
./algo
````
Note the IP and password of the newly created Algo VPN server and store it safely.
If you want to setup client config on your Fedora Workstation, refer to [the Linux Client docs](client-linux.md).
## Notes on SELinux
If you have SELinux enabled, you'll need to set appropriate file contexts:
````
semanage fcontext -a -t ipsec_key_file_t "$(pwd)(/.*)?"
restorecon -R -v $(pwd)
````
See [this comment](https://github.com/trailofbits/algo/issues/263#issuecomment-328053950).

View file

@ -1,66 +0,0 @@
# Deploy from macOS
While you can't turn a macOS system in an AlgoVPN, you can install the Algo scripts on a macOS system and use them to deploy your AlgoVPN to a cloud provider.
Algo uses [Ansible](https://www.ansible.com) which requires Python 3. macOS includes an obsolete version of Python 2 installed as `/usr/bin/python` which you should ignore.
## macOS 10.15 Catalina
Catalina comes with Python 3 installed as `/usr/bin/python3`. This file, and certain others like `/usr/bin/git`, start out as stub files that prompt you to install the Command Line Developer Tools package the first time you run them. This is the easiest way to install Python 3 on Catalina.
Note that Python 3 from Command Line Developer Tools prior to the release for Xcode 11.5 on 2020-05-20 might not work with Algo. If Software Update does not offer to update an older version of the tools you can download a newer version from [here](https://developer.apple.com/download/more/) (Apple ID login required).
## macOS prior to 10.15 Catalina
You'll need to install Python 3 before you can run Algo. Python 3 is available from different packagers, two of which are listed below.
### Ansible and SSL Validation
Ansible validates SSL network connections using OpenSSL but macOS includes LibreSSL which behaves differently. Therefore each version of Python below includes or depends on its own copy of OpenSSL.
OpenSSL needs access to a list of trusted CA certificates in order to validate SSL connections. Each packager handles initializing this certificate store differently. If you see the error `CERTIFICATE_VERIFY_FAILED` when running Algo make sure you've followed the packager-specific instructions correctly.
### Choose a packager and install Python 3
Choose one of the packagers below as your source for Python 3. Avoid installing versions from multiple packagers on the same Mac as you may encounter conflicts. In particular they might fight over creating symbolic links in `/usr/local/bin`.
#### Option 1: Install using the Homebrew package manager
If you're comfortable using the command line in Terminal the [Homebrew](https://brew.sh) project is a great source of software for macOS.
First install Homebrew using the instructions on the [Homebrew](https://brew.sh) page.
The install command below takes care of initializing the CA certificate store.
##### Installation
```
brew install python3
```
After installation open a new tab or window in Terminal and verify that the command `which python3` returns `/usr/local/bin/python3`.
##### Removal
```
brew uninstall python3
```
#### Option 2: Install the package from Python.org
If you don't want to install a package manager you can download the Python package for macOS from [python.org](https://www.python.org/downloads/mac-osx/).
##### Installation
Download the most recent version of Python and install it like any other macOS package. Then initialize the CA certificate store from Finder by double-clicking on the file `Install Certificates.command` found in the `/Applications/Python 3.8` folder.
When you double-click on `Install Certificates.command` a new Terminal window will open. If the window remains blank then the command has not run correctly. This can happen if you've changed the default shell in Terminal Preferences. Try changing it back to the default and run `Install Certificates.command` again.
After installation open a new tab or window in Terminal and verify that the command `which python3` returns either `/usr/local/bin/python3` or `/Library/Frameworks/Python.framework/Versions/3.8/bin/python3`.
##### Removal
Unfortunately the python.org package does not include an uninstaller and removing it requires several steps:
1. In Finder, delete the package folder found in `/Applications`.
2. In Finder, delete the *rest* of the package found under ` /Library/Frameworks/Python.framework/Versions`.
3. In Terminal, undo the changes to your `PATH` by running:
```mv ~/.bash_profile.pysave ~/.bash_profile```
4. In Terminal, remove the dozen or so symbolic links the package created in `/usr/local/bin`. Or just leave them because installing another version of Python will overwrite most of them.

View file

@ -0,0 +1,86 @@
# RedHat/CentOS 6.x pre-installation requirements
Many people prefer RedHat or CentOS 6 (or similar variants like Amazon Linux) for to their stability and lack of systemd. Unfortunately, there are a number of dated libraries, notably Python 2.6, that prevent Algo from running without errors. This script will prepare a RedHat, CentOS, or similar VM to deploy to Algo cloud instances.
## Step 1: Prep for RH/CentOS 6.8/Amazon
```shell
yum -y -q update
yum -y -q install epel-release
```
Enable any kernel updates:
```shell
reboot
```
## Step 2: Install Ansible and launch Algo
Fix GPG key warnings during Ansible rpm install:
```shell
rpm --import https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-6
```
Fix GPG key warning during official Software Collections (SCL) package install:
```shell
rpm --import https://raw.githubusercontent.com/sclorg/centos-release-scl/master/centos-release-scl/RPM-GPG-KEY-CentOS-SIG-SCLo
```
RedHat/CentOS 6.x uses Python 2.6 by default, which is explicitly deprecated and produces many warnings and errors, so we must install a safe, non-invasive 2.7 tool set which has to be expressly enabled (and will not survive login sessions and reboots):
```shell
# Install the Software Collections Library (to enable Python 2.7)
yum -y -q install centos-release-SCL
# 2.7 will not be used until explicitly enabled, per login session
yum -y -q install python27-python-devel python27-python-setuptools python27-python-pip
yum -y -q install openssl-devel libffi-devel automake gcc gcc-c++ kernel-devel wget unzip ansible nano
# Enable 2.7 default for this session (needs re-run between logins & reboots)
# shellcheck disable=SC1091
source /opt/rh/python27/enable
# We're now defaulted to 2.7
# Upgrade pip itself
pip -q install --upgrade pip
# python-devel needed to prevent setup.py crash
pip -q install pycrypto
# pycrypto 2.7.1 needed for latest security patch
pip -q install setuptools --upgrade
# virtualenv to make installing dependencies easier
pip -q install virtualenv
wget -q https://github.com/trailofbits/algo/archive/master.zip
unzip master.zip
cd algo-master || echo "No Algo directory found"
# Set up a virtualenv and install the local Algo dependencies (must be run from algo-master)
virtualenv env && source env/bin/activate
pip -q install -r requirements.txt
# Edit the userlist and any other settings you desire
nano config.cfg
# Now you can run the Algo installer!
./algo
```
## Post-install macOS
1. Copy `./configs/*mobileconfig` to your local Mac
2. Install the VPN profile on your Mac (10.10+ required)
```shell
/usr/bin/profiles -I -F ./x.x.x.x_NAME.mobileconfig
```
3. To remove:
```shell
/usr/bin/profiles -D -F ./x.x.x.x_NAME.mobileconfig
```
The VPN connection will now appear under Networks (which can be pinned to the top menu bar if preferred)

View file

@ -1,13 +1,10 @@
# Deploy from script or cloud-init # Deploy from script or cloud-init
You can use `install.sh` to prepare the environment and deploy AlgoVPN on the local Ubuntu server in one shot using cloud-init, or run the script directly on the server after it's been created. You can use `install.sh` to prepare the environment and deploy AlgoVPN on the local Ubuntu server in one shot using cloud-init, or run the script directly on the server after it's been created. The script doesn't configure any parameters in your cloud, so it's on your own to configure related [firewall rules](/docs/firewalls.md), a floating ip address and other resources you may need. The output of the install script (including the p12 and CA passwords) and user config files will be installed into the `/opt/algo` directory.
The script doesn't configure any parameters in your cloud, so you're on your own to configure related [firewall rules](/docs/firewalls.md), a floating IP address and other resources you may need. The output of the install script (including the p12 and CA passwords) can be found at `/var/log/algo.log`, and user config files will be installed into the `/opt/algo/configs/localhost` directory. If you need to update users later, `cd /opt/algo`, change the user list in `config.cfg`, install additional dependencies as in step 4 of the [main README](https://github.com/trailofbits/algo/blob/master/README.md), and run `./algo update-users` from that directory.
## Cloud init deployment ## Cloud init deployment
You can copy-paste the snippet below to the user data (cloud-init or startup script) field when creating a new server. You can copy-paste the snippet below to the user data (cloud-init or startup script) field when creating a new server. For now it is only possible for [DigitalOcean](https://www.digitalocean.com/docs/droplets/resources/metadata/), Amazon [EC2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and [Lightsail](https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-how-to-configure-server-additional-data-shell-script), [Google Cloud](https://cloud.google.com/compute/docs/startupscript), [Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init) and [Vultr](https://my.vultr.com/startup/), although Vultr doesn't [officially support cloud-init](https://www.vultr.com/docs/getting-started-with-cloud-init).
For now this has only been successfully tested on [DigitalOcean](https://www.digitalocean.com/docs/droplets/resources/metadata/), Amazon [EC2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and [Lightsail](https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-how-to-configure-server-additional-data-shell-script), [Google Cloud](https://cloud.google.com/compute/docs/startupscript), [Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init) and [Vultr](https://my.vultr.com/startup/), although Vultr doesn't [officially support cloud-init](https://www.vultr.com/docs/getting-started-with-cloud-init).
``` ```
#!/bin/bash #!/bin/bash
@ -17,31 +14,19 @@ The command will prepare the environment and install AlgoVPN with the default pa
## Variables ## Variables
- `METHOD`: which method of the deployment to use. Possible values are local and cloud. Default: cloud. The cloud method is intended to use in cloud-init deployments only. If you are not using cloud-init to deploy the server you have to use the local method. `METHOD` - which method of the deployment to use. Possible values are local and cloud. Default: cloud. The cloud method is intended to use in cloud-init deployments only. If you are not using cloud-init to deploy the server you have to use the local method.
`ONDEMAND_CELLULAR` - "Connect On Demand" when connected to cellular networks. Boolean. Default: false.
- `ONDEMAND_CELLULAR`: "Connect On Demand" when connected to cellular networks. Boolean. Default: false. `ONDEMAND_WIFI` - "Connect On Demand" when connected to Wi-Fi. Default: false.
`ONDEMAND_WIFI_EXCLUDE` - List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand". Comma-separated list.
- `ONDEMAND_WIFI`: "Connect On Demand" when connected to Wi-Fi. Default: false. `STORE_PKI` - To retain the PKI. (required to add users in the future, but less secure). Default: false.
`DNS_ADBLOCKING` - To install an ad blocking DNS resolver. Default: false.
- `ONDEMAND_WIFI_EXCLUDE`: List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand". Comma-separated list. `SSH_TUNNELING` - Enable SSH tunneling for each user. Default: false.
`ENDPOINT` - The public IP address or domain name of your server: (IMPORTANT! This is used to verify the certificate). It will be gathered automatically for DigitalOcean, AWS, GCE, Azure or Vultr if the `METHOD` is cloud. Otherwise you need to define this variable according to your public IP address.
- `STORE_PKI`: To retain the PKI. (required to add users in the future, but less secure). Default: false. `USERS` - list of VPN users. Comma-separated list. Default: user1.
`REPO_SLUG` - Owner and repository that used to get the installation scripts from. Default: trailofbits/algo.
- `DNS_ADBLOCKING`: To install an ad blocking DNS resolver. Default: false. `REPO_BRANCH` - Branch for `REPO_SLUG`. Default: master.
`EXTRA_VARS` - Additional extra variables.
- `SSH_TUNNELING`: Enable SSH tunneling for each user. Default: false. `ANSIBLE_EXTRA_ARGS` - Any available ansible parameters. ie: `--skip-tags apparmor`.
- `ENDPOINT`: The public IP address or domain name of your server: (IMPORTANT! This is used to verify the certificate). It will be gathered automatically for DigitalOcean, AWS, GCE, Azure or Vultr if the `METHOD` is cloud. Otherwise you need to define this variable according to your public IP address.
- `USERS`: list of VPN users. Comma-separated list. Default: user1.
- `REPO_SLUG`: Owner and repository that used to get the installation scripts from. Default: trailofbits/algo.
- `REPO_BRANCH`: Branch for `REPO_SLUG`. Default: master.
- `EXTRA_VARS`: Additional extra variables.
- `ANSIBLE_EXTRA_ARGS`: Any available ansible parameters. ie: `--skip-tags apparmor`.
## Examples ## Examples

View file

@ -1,15 +1,8 @@
# Deploy from Windows # Windows client prerequisite
The Algo scripts can't be run directly on Windows, but you can use the Windows Subsystem for Linux (WSL) to run a copy of Ubuntu Linux right on your Windows system. You can then run Algo to deploy a VPN server to a supported cloud provider, though you can't turn the instance of Ubuntu running under WSL into a VPN server.
To run WSL you will need:
* A 64-bit system
* 64-bit Windows 10 (Anniversary update or later version) * 64-bit Windows 10 (Anniversary update or later version)
## Install WSL Once you verify your system is 64-bit (32-bit is not supported) and up to date, you have to do a few manual steps to enable the 'Windows Subsystem for Linux':
Enable the 'Windows Subsystem for Linux':
1. Open 'Settings' 1. Open 'Settings'
2. Click 'Update & Security', then click the 'For developers' option on the left. 2. Click 'Update & Security', then click the 'For developers' option on the left.
@ -21,54 +14,20 @@ Wait a minute for Windows to install a few things in the background (it will eve
2. Click on 'Turn Windows features on or off' 2. Click on 'Turn Windows features on or off'
3. Scroll down and check 'Windows Subsystem for Linux', and then click OK. 3. Scroll down and check 'Windows Subsystem for Linux', and then click OK.
4. The subsystem will be installed, then Windows will require a restart. 4. The subsystem will be installed, then Windows will require a restart.
5. Restart Windows and then install [Ubuntu 20.04 LTS from the Windows Store](https://www.microsoft.com/p/ubuntu-2004-lts/9n6svws3rx71). 5. Restart Windows and then [install Ubuntu from the Windows Store](https://www.microsoft.com/p/ubuntu/9nblggh4msv6).
6. Run Ubuntu from the Start menu. It will take a few minutes to install. It will have you create a separate user account for the Linux subsystem. Once that's done, you will finally have Ubuntu running somewhat integrated with Windows. 6. Run Ubuntu from the Start menu. It will take a few minutes to install. It will have you create a separate user account for the Linux subsystem. Once that's done, you will finally have Ubuntu running somewhat integrated with Windows.
## Install Algo
Run these commands in the Ubuntu Terminal to install a prerequisite package and download the Algo scripts to your home directory. Note that when using WSL you should **not** install Algo in the `/mnt/c` directory due to problems with file permissions. Install additional packages:
You may need to follow [these directions](https://devblogs.microsoft.com/commandline/copy-and-paste-arrives-for-linuxwsl-consoles/) in order to paste commands into the Ubuntu Terminal.
```shell ```shell
cd sudo apt-get update && sudo apt-get install git build-essential libssl-dev libffi-dev python-dev python-pip python-setuptools python-virtualenv -y
umask 0002
sudo apt update
sudo apt install -y python3-virtualenv
git clone https://github.com/trailofbits/algo
cd algo
``` ```
## Post installation steps Clone the Algo repository:
These steps should be only if you clone the Algo repository to the host machine disk (C:, D:, etc.). WSL mount host system disks to `\mnt` directory.
### Allow git to change files metadata
By default git cannot change files metadata (using chmod for example) for files stored at host machine disks (https://docs.microsoft.com/en-us/windows/wsl/wsl-config#set-wsl-launch-settings). Allow it:
1. Start Ubuntu Terminal.
2. Edit /etc/wsl.conf (create it if it doesn't exist). Add the following:
```
[automount]
options = "metadata"
```
3. Close all Ubuntu Terminals.
4. Run powershell.
5. Run `wsl --shutdown` in powershell.
### Allow run Ansible in a world writable directory
Ansible threat host machine directories as world writable directory and do not load .cfg from it by default (https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir). For fix run inside `algo` directory:
```shell ```shell
chmod 744 . cd ~ && git clone https://github.com/trailofbits/algo && cd algo
``` ```
Now you can continue by following the [README](https://github.com/trailofbits/algo#deploy-the-algo-server) from the 4th step to deploy your Algo server! Now, you can go through the [README](https://github.com/trailofbits/algo#deploy-the-algo-server) (start from the 4th step) and deploy your Algo server!
You'll be instructed to edit the file `config.cfg` in order to specify the Algo user accounts to be created. If you're new to Linux the simplest editor to use is `nano`. To edit the file while in the `algo` directory, run:
```shell
nano config.cfg
```
Once `./algo` has finished you can use the `cp` command to copy the configuration files from the `configs` directory into your Windows directory under `/mnt/c/Users` for easier access.

View file

@ -1,25 +1,11 @@
# Local Installation # Local Installation
**PLEASE NOTE**: Algo is intended for use to create a _dedicated_ VPN server. No uninstallation option is provided. If you install Algo on an existing server any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information. You can use Algo to configure a pre-existing server as an AlgoVPN rather than using it create and configure a new server on a supported cloud provider. This is referred to as a **local** installation rather than a **cloud** deployment.
------
## Outbound VPN Server
You can use Algo to configure a pre-existing server as an AlgoVPN rather than using it to create and configure a new server on a supported cloud provider. This is referred to as a **local** installation rather than a **cloud** deployment. If you're new to Algo or unfamiliar with Linux you'll find a cloud deployment to be easier.
To perform a local installation, install the Algo scripts following the normal installation instructions, then choose:
Install the Algo scripts following the normal installation instructions, then choose:
``` ```
Install to existing Ubuntu latest LTS server (for more advanced users) Install to existing Ubuntu 18.04 or 19.04 server (Advanced)
``` ```
Make sure your target server is running an unmodified copy of the operating system version specified. The target can be the same system where you've installed the Algo scripts, or a remote system that you are able to access as root via SSH without needing to enter the SSH key passphrase (such as when using `ssh-agent`). Make sure your target server is running an unmodified copy of the operating system version specified. The target can be the same system where you've installed the Algo scripts, or a remote system that you are able to access as root via SSH without needing to enter the SSH key passphrase (such as when using `ssh-agent`).
## Inbound VPN Server (also called "Road Warrior" setup) **PLEASE NOTE**: Algo is intended for use to create a _dedicated_ VPN server. No uninstallation option is provided. If you install Algo on an existing server any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
Some may find it useful to set up an Algo server on an Ubuntu box on your home LAN, with the intention of being able to securely access your LAN and any resources on it when you're traveling elsewhere (the ["road warrior" setup](https://en.wikipedia.org/wiki/Road_warrior_(computing))). A few tips if you're doing so:
- Make sure you forward any [relevant incoming ports](/docs/firewalls.md#external-firewall) to the Algo server from your router;
- Change `BetweenClients_DROP` in `config.cfg` to `false`, and also consider changing `block_smb` and `block_netbios` to `false`;
- If you want to use a DNS server on your LAN to resolve local domain names properly (e.g. a Pi-hole), set the `dns_encryption` flag in `config.cfg` to `false`, and change `dns_servers` to the local DNS server IP (i.e. `192.168.1.2`).

View file

@ -2,7 +2,7 @@
Algo officially supports the [cloud providers listed here](https://github.com/trailofbits/algo/blob/master/README.md#deploy-the-algo-server). If you want to deploy Algo on another virtual hosting provider, that provider must support: Algo officially supports the [cloud providers listed here](https://github.com/trailofbits/algo/blob/master/README.md#deploy-the-algo-server). If you want to deploy Algo on another virtual hosting provider, that provider must support:
1. the base operating system image that Algo uses (Ubuntu latest LTS release), and 1. the base operating system image that Algo uses (Ubuntu 18.04, 19.04), and
2. a minimum of certain kernel modules required for the strongSwan IPsec server. 2. a minimum of certain kernel modules required for the strongSwan IPsec server.
Please see the [Required Kernel Modules](https://wiki.strongswan.org/projects/strongswan/wiki/KernelModules) documentation from strongSwan for a list of the specific required modules and a script to check for them. As a first step, we recommend running their shell script to determine initial compatibility with your new hosting provider. Please see the [Required Kernel Modules](https://wiki.strongswan.org/projects/strongswan/wiki/KernelModules) documentation from strongSwan for a list of the specific required modules and a script to check for them. As a first step, we recommend running their shell script to determine initial compatibility with your new hosting provider.

View file

@ -1,7 +1,6 @@
# FAQ # FAQ
* [Has Algo been audited?](#has-algo-been-audited) * [Has Algo been audited?](#has-algo-been-audited)
* [What's the current status of WireGuard?](#whats-the-current-status-of-wireguard)
* [Why aren't you using Tor?](#why-arent-you-using-tor) * [Why aren't you using Tor?](#why-arent-you-using-tor)
* [Why aren't you using Racoon, LibreSwan, or OpenSwan?](#why-arent-you-using-racoon-libreswan-or-openswan) * [Why aren't you using Racoon, LibreSwan, or OpenSwan?](#why-arent-you-using-racoon-libreswan-or-openswan)
* [Why aren't you using a memory-safe or verified IKE daemon?](#why-arent-you-using-a-memory-safe-or-verified-ike-daemon) * [Why aren't you using a memory-safe or verified IKE daemon?](#why-arent-you-using-a-memory-safe-or-verified-ike-daemon)
@ -12,16 +11,14 @@
* [Can DNS filtering be disabled?](#can-dns-filtering-be-disabled) * [Can DNS filtering be disabled?](#can-dns-filtering-be-disabled)
* [Wasn't IPSEC backdoored by the US government?](#wasnt-ipsec-backdoored-by-the-us-government) * [Wasn't IPSEC backdoored by the US government?](#wasnt-ipsec-backdoored-by-the-us-government)
* [What inbound ports are used?](#what-inbound-ports-are-used) * [What inbound ports are used?](#what-inbound-ports-are-used)
* [How do I monitor user activity?](#how-do-i-monitor-user-activity)
* [How do I reach another connected client?](#how-do-i-reach-another-connected-client)
## Has Algo been audited? ## Has Algo been audited?
No. This project is under active development. We're happy to [accept and fix issues](https://github.com/trailofbits/algo/issues) as they are identified. Use Algo at your own risk. If you find a security issue of any severity, please [contact us on Slack](https://slack.empirehacking.nyc). No. This project is under active development. We're happy to [accept and fix issues](https://github.com/trailofbits/algo/issues) as they are identified. Use Algo at your own risk. If you find a security issue of any severity, please [contact us on Slack](https://empireslacking.herokuapp.com).
## What's the current status of WireGuard? ## What's the current status of WireGuard?
[WireGuard reached "stable" 1.0.0 release](https://lists.zx2c4.com/pipermail/wireguard/2020-March/005206.html) in Spring 2020. It has undergone [substantial](https://www.wireguard.com/formal-verification/) security review. [WireGuard is a work in progress](https://www.wireguard.com/#work-in-progress). It has undergone [substantial](https://www.wireguard.com/formal-verification/) security review, however, its authors are appropriately cautious about its safety and the protocol is subject to change. As a result, WireGuard does not yet have a "stable" 1.0 release. Releases are tagged with their build date -- "0.0.YYYYMMDD" -- and users should be advised to apply new updates when they are available.
## Why aren't you using Tor? ## Why aren't you using Tor?
@ -45,11 +42,11 @@ Alpine Linux is not supported out-of-the-box by any major cloud provider. We are
## I deployed an Algo server. Can you update it with new features? ## I deployed an Algo server. Can you update it with new features?
No. By design, the Algo development team has no access to any Algo server that our users have deployed. We cannot modify the configuration, update the software, or sniff the traffic that goes through your personal Algo VPN server. This prevents scenarios where we are legally compelled or hacked to push down backdoored updates that surveil our users. No. By design, the Algo development team has no access to any Algo server that our users haved deployed. We cannot modify the configuration, update the software, or sniff the traffic that goes through your personal Algo VPN server. This prevents scenarios where we are legally compelled or hacked to push down backdoored updates that surveil our users.
As a result, once your Algo server has been deployed, it is yours to maintain. It will use unattended-upgrades by default to apply security and feature updates to Ubuntu, as well as to the core VPN software of strongSwan, dnscrypt-proxy and WireGuard. However, if you want to take advantage of new features available in the current release of Algo, then you have two options. You can use the [SSH administrative interface](/README.md#ssh-into-algo-server) to make the changes you want on your own or you can shut down the server and deploy a new one (recommended). As a result, once your Algo server has been deployed, it is yours to maintain. If you want to take advantage of new features available in the current release of Algo, then you have two options. You can use the [SSH administrative interface](/README.md#ssh-into-algo-server) to make the changes you want on your own or you can shut down the server and deploy a new one (recommended).
As an extension of this rationale, most configuration options (other than users) available in `config.cfg` can only be set at the time of initial deployment. In the future, we will make it easier for users who want to update their own servers by providing official releases of Algo. Each official release will summarize the changes from the last release to make it easier to follow along with them.
## Where did the name "Algo" come from? ## Where did the name "Algo" come from?
@ -57,7 +54,7 @@ Algo is short for "Al Gore", the **V**ice **P**resident of **N**etworks everywhe
## Can DNS filtering be disabled? ## Can DNS filtering be disabled?
You can temporarily disable DNS filtering for all IPsec clients at once with the following workaround: SSH to your Algo server (using the 'shell access' command printed upon a successful deployment), edit `/etc/ipsec.conf`, and change `rightdns=<random_ip>` to `rightdns=8.8.8.8`. Then run `sudo systemctl restart strongswan`. DNS filtering for WireGuard clients has to be disabled on each client device separately by modifying the settings in the app, or by directly modifying the `DNS` setting on the `clientname.conf` file. If all else fails, we recommend deploying a new Algo server without the adblocking feature enabled. You can temporarily disable DNS filtering for all IPsec clients at once with the following workaround: SSH to your Algo server (using the 'shell access' command printed upon a successful deployment), edit `/etc/ipsec.conf`, and change `rightdns=<random_ip>` to `rightdns=8.8.8.8`. Then run `sudo systemctl restart strongswan`. DNS filtering for Wireguard clients has to be disabled on each client device separately by modifying the settings in the app, or by directly modifying the `DNS` setting on the `clientname.conf` file. If all else fails, we recommend deploying a new Algo server without the adblocking feature enabled.
## Wasn't IPSEC backdoored by the US government? ## Wasn't IPSEC backdoored by the US government?
@ -81,12 +78,4 @@ No.
## What inbound ports are used? ## What inbound ports are used?
You should only need 4160/TCP, 500/UDP, 4500/UDP, and 51820/UDP opened on any firewall that sits between your clients and your Algo server. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information. You should only need 22/TCP, 500/UDP, 4500/UDP, and 51820/UDP opened on any firewall that sits between your clients and your Algo server. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
## How do I monitor user activity?
Your Algo server will track IPsec client logins by default in `/var/log/syslog`. This will give you client names, date/time of connection and reconnection, and what IP addresses they're connecting from. This can be disabled entirely by setting `strongswan_log_level` to `-1` in `config.cfg`. WireGuard doesn't save any logs, but entering `sudo wg` on the server will give you the last endpoint and contact time of each client. Disabling this is [paradoxically difficult](https://git.zx2c4.com/blind-operator-mode/about/). There isn't any out-of-the-box way to monitor actual user _activity_ (e.g. websites browsed, etc.)
## How do I reach another connected client?
By default, your Algo server doesn't allow connections between connected clients. This can be changed at the time of deployment by enabling the `BetweenClients_DROP` flag in `config.cfg`. See the ["Road Warrior" instructions](/docs/deploy-to-ubuntu.md#road-warrior-setup) for more details.

View file

@ -24,7 +24,7 @@ Any external firewall must be configured to pass the following incoming ports ov
Port | Protocol | Description | Related variables in `config.cfg` Port | Protocol | Description | Related variables in `config.cfg`
---- | -------- | ----------- | --------------------------------- ---- | -------- | ----------- | ---------------------------------
4160 | TCP | Secure Shell (SSH) | `ssh_port` (**cloud** only; for **local** port remains 22) 22 | TCP | Secure Shell (SSH) | None
500 | UDP | IPsec IKEv2 | `ipsec_enabled` 500 | UDP | IPsec IKEv2 | `ipsec_enabled`
4500 | UDP | IPsec NAT-T | `ipsec_enabled` 4500 | UDP | IPsec NAT-T | `ipsec_enabled`
51820 | UDP | WireGuard | `wireguard_enabled`, `wireguard_port` 51820 | UDP | WireGuard | `wireguard_enabled`, `wireguard_port`

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 121 KiB

View file

@ -1,31 +1,28 @@
# Algo VPN documentation # Algo VPN documentation
* Deployment instructions * Deployment instructions
- Deploy from [Fedora Workstation (26)](deploy-from-fedora-workstation.md)
- Deploy from [RedHat/CentOS 6.x](deploy-from-redhat-centos6.md) - Deploy from [RedHat/CentOS 6.x](deploy-from-redhat-centos6.md)
- Deploy from [Windows](deploy-from-windows.md) - Deploy from [Windows](deploy-from-windows.md)
- Deploy from a [Docker container](deploy-from-docker.md) - Deploy from a [Docker container](deploy-from-docker.md)
- Deploy from [Ansible](deploy-from-ansible.md) non-interactively - Deploy from [Ansible](deploy-from-ansible.md) non-interactively
- Deploy onto a [cloud server at time of creation with shell script or cloud-init](deploy-from-script-or-cloud-init-to-localhost.md) - Deploy onto a [cloud server at time of creation](deploy-from-script-or-cloud-init-to-localhost.md)
- Deploy from [macOS](deploy-from-macos.md)
- Deploy from [Google Cloud Shell](deploy-from-cloudshell.md)
* Client setup * Client setup
- Setup [Android](client-android.md) clients - Setup [Android](client-android.md) clients
- Setup [Generic/Linux](client-linux.md) clients with Ansible - Setup [Generic/Linux](client-linux.md) clients with Ansible
- Setup Ubuntu clients to use [WireGuard](client-linux-wireguard.md) - Setup Ubuntu clients to use [WireGuard](client-linux-wireguard.md)
- Setup Linux clients to use [IPsec](client-linux-ipsec.md) - Setup Apple devices to use [IPSEC](client-apple-ipsec.md)
- Setup Apple devices to use [IPsec](client-apple-ipsec.md) - Setup Macs running macOS 10.13 or older to use [Wireguard](client-macos-wireguard.md)
- Setup Macs running macOS 10.13 or older to use [WireGuard](client-macos-wireguard.md) - Manual Windows 10 client setup for [IPSEC](client-windows.md)
* Cloud provider setup * Cloud provider setup
- Configure [Amazon EC2](cloud-amazon-ec2.md) - Configure [Amazon EC2](cloud-amazon-ec2.md)
- Configure [Azure](cloud-azure.md) - Configure [Azure](cloud-azure.md)
- Configure [DigitalOcean](cloud-do.md) - Configure [DigitalOcean](cloud-do.md)
- Configure [Google Cloud Platform](cloud-gce.md) - Configure [Google Cloud Platform](cloud-gce.md)
- Configure [Vultr](cloud-vultr.md) - Configure [Vultr](cloud-vultr.md)
- Configure [CloudStack](cloud-cloudstack.md)
- Configure [Hetzner Cloud](cloud-hetzner.md)
* Advanced Deployment * Advanced Deployment
- Deploy to your own [FreeBSD](deploy-to-freebsd.md) server - Deploy to your own [FreeBSD](deploy-to-freebsd.md) server
- Deploy to your own [Ubuntu](deploy-to-ubuntu.md) server, and road warrior setup - Deploy to your own [Ubuntu](deploy-to-ubuntu.md) server
- Deploy to an [unsupported cloud provider](deploy-to-unsupported-cloud.md) - Deploy to an [unsupported cloud provider](deploy-to-unsupported-cloud.md)
* [FAQ](faq.md) * [FAQ](faq.md)
* [Firewalls](firewalls.md) * [Firewalls](firewalls.md)

View file

@ -9,21 +9,16 @@ First of all, check [this](https://github.com/trailofbits/algo#features) and ens
* [Error: "TypeError: must be str, not bytes"](#error-typeerror-must-be-str-not-bytes) * [Error: "TypeError: must be str, not bytes"](#error-typeerror-must-be-str-not-bytes)
* [Error: "ansible-playbook: command not found"](#error-ansible-playbook-command-not-found) * [Error: "ansible-playbook: command not found"](#error-ansible-playbook-command-not-found)
* [Error: "Could not fetch URL ... TLSV1_ALERT_PROTOCOL_VERSION](#could-not-fetch-url--tlsv1_alert_protocol_version) * [Error: "Could not fetch URL ... TLSV1_ALERT_PROTOCOL_VERSION](#could-not-fetch-url--tlsv1_alert_protocol_version)
* [Fatal: "Failed to validate the SSL certificate for ..."](#fatal-failed-to-validate-the-SSL-certificate)
* [Bad owner or permissions on .ssh](#bad-owner-or-permissions-on-ssh) * [Bad owner or permissions on .ssh](#bad-owner-or-permissions-on-ssh)
* [The region you want is not available](#the-region-you-want-is-not-available) * [The region you want is not available](#the-region-you-want-is-not-available)
* [AWS: SSH permission denied with an ECDSA key](#aws-ssh-permission-denied-with-an-ecdsa-key) * [AWS: SSH permission denied with an ECDSA key](#aws-ssh-permission-denied-with-an-ecdsa-key)
* [AWS: "Deploy the template" fails with CREATE_FAILED](#aws-deploy-the-template-fails-with-create_failed) * [AWS: "Deploy the template" fails with CREATE_FAILED](#aws-deploy-the-template-fails-with-create_failed)
* [AWS: not authorized to perform: cloudformation:UpdateStack](#aws-not-authorized-to-perform-cloudformationupdatestack) * [AWS: not authorized to perform: cloudformation:UpdateStack](#aws-not-authorized-to-perform-cloudformationupdatestack)
* [DigitalOcean: error tagging resource 'xxxxxxxx': param is missing or the value is empty: resources](#digitalocean-error-tagging-resource) * [DigitalOcean: error tagging resource 'xxxxxxxx': param is missing or the value is empty: resources](#digitalocean-error-tagging-resource)
* [Azure: The client xxx with object id xxx does not have authorization to perform action Microsoft.Resources/subscriptions/resourcegroups/write' over scope](#azure-deployment-permissions-error)
* [Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid](#windows-the-value-of-parameter-linuxconfigurationsshpublickeyskeydata-is-invalid) * [Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid](#windows-the-value-of-parameter-linuxconfigurationsshpublickeyskeydata-is-invalid)
* [Docker: Failed to connect to the host via ssh](#docker-failed-to-connect-to-the-host-via-ssh) * [Docker: Failed to connect to the host via ssh](#docker-failed-to-connect-to-the-host-via-ssh)
* [Error: Failed to create symlinks for deploying to localhost](#error-failed-to-create-symlinks-for-deploying-to-localhost)
* [Wireguard: Unable to find 'configs/...' in expected paths](#wireguard-unable-to-find-configs-in-expected-paths) * [Wireguard: Unable to find 'configs/...' in expected paths](#wireguard-unable-to-find-configs-in-expected-paths)
* [Ubuntu Error: "unable to write 'random state'" when generating CA password](#ubuntu-error-unable-to-write-random-state-when-generating-ca-password) * [Ubuntu Error: "unable to write 'random state'" when generating CA password](#ubuntu-error-unable-to-write-random-state-when-generating-ca-password)
* [Timeout when waiting for search string OpenSSH in xxx.xxx.xxx.xxx:4160](#old-networking-firewall-in-place)
* [Linode Error: "Unable to query the Linode API. Saw: 400: The requested distribution is not supported by this stackscript.; "](#linode-error-uable-to-query-the-linode-api-saw-400-the-requested-distribution-is-not-supported-by-this-stackscript)
* [Connection Problems](#connection-problems) * [Connection Problems](#connection-problems)
* [I'm blocked or get CAPTCHAs when I access certain websites](#im-blocked-or-get-captchas-when-i-access-certain-websites) * [I'm blocked or get CAPTCHAs when I access certain websites](#im-blocked-or-get-captchas-when-i-access-certain-websites)
* [I want to change the list of trusted Wifi networks on my Apple device](#i-want-to-change-the-list-of-trusted-wifi-networks-on-my-apple-device) * [I want to change the list of trusted Wifi networks on my Apple device](#i-want-to-change-the-list-of-trusted-wifi-networks-on-my-apple-device)
@ -41,10 +36,6 @@ First of all, check [this](https://github.com/trailofbits/algo#features) and ens
Look here if you have a problem running the installer to set up a new Algo server. Look here if you have a problem running the installer to set up a new Algo server.
### Python version is not supported
The minimum Python version required to run Algo is 3.8. Most modern operation systems should have it by default, but if the OS you are using doesn't meet the requirements, you have to upgrade. See the official documentation for your OS, or manual download it from https://www.python.org/downloads/. Otherwise, you may [deploy from docker](deploy-from-docker.md)
### Error: "You have not agreed to the Xcode license agreements" ### Error: "You have not agreed to the Xcode license agreements"
On macOS, you tried to install the dependencies with pip and encountered the following error: On macOS, you tried to install the dependencies with pip and encountered the following error:
@ -114,22 +105,25 @@ Command /usr/bin/python -c "import setuptools, tokenize;__file__='/private/tmp/p
Storing debug log for failure in /Users/algore/Library/Logs/pip.log Storing debug log for failure in /Users/algore/Library/Logs/pip.log
``` ```
You are running an old version of `pip` that cannot download the binary `cryptography` dependency. Upgrade to a new version of `pip` by running `sudo python3 -m pip install -U pip`. You are running an old version of `pip` that cannot download the binary `cryptography` dependency. Upgrade to a new version of `pip` by running `sudo pip install -U pip`.
### Error: "TypeError: must be str, not bytes"
You tried to install Algo and you see many repeated errors referencing `TypeError`, such as `TypeError: '>=' not supported between instances of 'TypeError' and 'int'` and `TypeError: must be str, not bytes`. For example:
```
TASK [Wait until SSH becomes ready...] *****************************************
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: TypeError: must be str, not bytes
fatal: [localhost -> localhost]: FAILED! => {"changed": false, "failed": true, "module_stderr": "Traceback (most recent call last):\n File \"/var/folders/x_/nvr61v455qq98vp22k5r5vm40000gn/T/ansible_6sdjysth/ansible_module_wait_for.py\", line 538, in <module>\n main()\n File \"/var/folders/x_/nvr61v455qq98vp22k5r5vm40000gn/T/ansible_6sdjysth/ansible_module_wait_for.py\", line 483, in main\n data += response\nTypeError: must be str, not bytes\n", "module_stdout": "", "msg": "MODULE FAILURE"}
```
You may be trying to run Algo with Python3. Algo uses [Ansible](https://github.com/ansible/ansible) which has issues with Python3, although this situation is improving over time. Try running Algo with Python2 to fix this issue. Open your terminal and `cd` to the directory with Algo, then run: ``virtualenv -p `which python2.7` env && source env/bin/activate && pip install -r requirements.txt``
### Error: "ansible-playbook: command not found" ### Error: "ansible-playbook: command not found"
You tried to install Algo and you see an error that reads "ansible-playbook: command not found." You tried to install Algo and you see an error that reads "ansible-playbook: command not found."
You did not finish step 4 in the installation instructions, "[Install Algo's remaining dependencies](https://github.com/trailofbits/algo#deploy-the-algo-server)." Algo depends on [Ansible](https://github.com/ansible/ansible), an automation framework, and this error indicates that you do not have Ansible installed. Ansible is installed by `pip` when you run `python3 -m pip install -r requirements.txt`. You must complete the installation instructions to run the Algo server deployment process. You did not finish step 4 in the installation instructions, "[Install Algo's remaining dependencies](https://github.com/trailofbits/algo#deploy-the-algo-server)." Algo depends on [Ansible](https://github.com/ansible/ansible), an automation framework, and this error indicates that you do not have Ansible installed. Ansible is installed by `pip` when you run `python -m pip install -r requirements.txt`. You must complete the installation instructions to run the Algo server deployment process.
### Fatal: "Failed to validate the SSL certificate"
You received a message like this:
```
fatal: [localhost]: FAILED! => {"changed": false, "msg": "Failed to validate the SSL certificate for api.digitalocean.com:443. Make sure your managed systems have a valid CA certificate installed. You can use validate_certs=False if you do not need to confirm the servers identity but this is unsafe and not recommended. Paths checked for this platform: /etc/ssl/certs, /etc/ansible, /usr/local/etc/openssl. The exception msg was: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1076).", "status": -1, "url": "https://api.digitalocean.com/v2/regions"}
```
Your local system does not have a CA certificate that can validate the cloud provider's API. Are you using MacPorts instead of Homebrew? The MacPorts openssl installation does not include a CA certificate, but you can fix this by installing the [curl-ca-bundle](https://andatche.com/articles/2012/02/fixing-ssl-ca-certificates-with-openssl-from-macports/) port with `port install curl-ca-bundle`. That should do the trick.
### Could not fetch URL ... TLSV1_ALERT_PROTOCOL_VERSION ### Could not fetch URL ... TLSV1_ALERT_PROTOCOL_VERSION
@ -143,9 +137,9 @@ No matching distribution found for SecretStorage<3 (from -r requirements.txt (li
It's time to upgrade your python. It's time to upgrade your python.
`brew upgrade python3` `brew upgrade python2`
You can also download python 3.7.x from python.org. You can also download python 2.7.x from python.org.
### Bad owner or permissions on .ssh ### Bad owner or permissions on .ssh
@ -227,40 +221,6 @@ The error is caused because Digital Ocean changed its API to treat the tag argum
5. Finally run `doctl compute tag list` to make sure that the tag has been deleted 5. Finally run `doctl compute tag list` to make sure that the tag has been deleted
6. Run algo as directed 6. Run algo as directed
### Azure: No such file or directory: '/home/username/.azure/azureProfile.json'
```
TASK [cloud-azure : Create AlgoVPN Server] *****************************************************************************************************************************************************************
An exception occurred during task execution. To see the full traceback, use -vvv.
The error was: FileNotFoundError: [Errno 2] No such file or directory: '/home/ubuntu/.azure/azureProfile.json'
fatal: [localhost]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):
File \"/usr/local/lib/python3.6/dist-packages/azure/cli/core/_session.py\", line 39, in load
with codecs_open(self.filename, 'r', encoding=self._encoding) as f:
File \"/usr/lib/python3.6/codecs.py\", line 897, in open\n file = builtins.open(filename, mode, buffering)
FileNotFoundError: [Errno 2] No such file or directory: '/home/ubuntu/.azure/azureProfile.json'
", "module_stdout": "", "msg": "MODULE FAILURE
See stdout/stderr for the exact error", "rc": 1}
```
It happens when your machine is not authenticated in the azure cloud, follow this [guide](https://trailofbits.github.io/algo/cloud-azure.html) to configure your environment
### Azure: Deployment Permissions Error
The AAD Application Registration (aka, the 'Service Principal', where you got the ClientId) needs permission to create the resources for the subscription. Otherwise, you will get the following error when you run the Ansible deploy script:
```
fatal: [localhost]: FAILED! => {"changed": false, "msg": "Resource group create_or_update failed with status code: 403 and message: The client 'xxxxx' with object id 'THE_OBJECT_ID' does not have authorization to perform action 'Microsoft.Resources/subscriptions/resourcegroups/write' over scope '/subscriptions/THE_SUBSCRIPTION_ID/resourcegroups/algo' or the scope is invalid. If access was recently granted, please refresh your credentials."}
```
The solution for this is to open the Azure CLI and run the following command to grant contributor role to the Service Principal:
```
az role assignment create --assignee-object-id THE_OBJECT_ID --scope subscriptions/THE_SUBSCRIPTION_ID --role contributor
```
After this is applied, the Service Principal has permissions to create the resources and you can re-run `ansible-playbook main.yml` to complete the deployment.
### Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid ### Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid
You tried to deploy Algo from Windows and you received an error like this one: You tried to deploy Algo from Windows and you received an error like this one:
@ -294,41 +254,6 @@ You need to add the following to the ansible.cfg in repo root:
control_path_dir=/dev/shm/ansible_control_path control_path_dir=/dev/shm/ansible_control_path
``` ```
### Error: Failed to create symlinks for deploying to localhost
You tried to run Algo and you received an error like this one:
```
TASK [Create a symlink if deploying to localhost] ********************************************************************
fatal: [localhost]: FAILED! => {"changed": false, "gid": 1000, "group": "ubuntu", "mode": "0775", "msg": "the directory configs/localhost is not empty, refusing to convert it", "owner": "ubuntu", "path": "configs/localhost", "size": 4096, "state": "directory", "uid": 1000}
included: /home/ubuntu/algo-master/playbooks/rescue.yml for localhost
TASK [debug] *********************************************************************************************************
ok: [localhost] => {
"fail_hint": [
"Sorry, but something went wrong!",
"Please check the troubleshooting guide.",
"https://trailofbits.github.io/algo/troubleshooting.html"
]
}
TASK [Fail the installation] *****************************************************************************************
```
This error is usually encountered when using the local install option and `localhost` is provided in answer to this question, which is expecting an IP address or domain name of your server:
```
Enter the public IP address or domain name of your server: (IMPORTANT! This is used to verify the certificate)
[localhost]
:
```
You should remove the files in /etc/wireguard/ and configs/ as follows:
```ssh
sudo rm -rf /etc/wireguard/*
rm -rf configs/*
```
And then immediately re-run `./algo` and provide a domain name or IP address in response to the question referenced above.
### Wireguard: Unable to find 'configs/...' in expected paths ### Wireguard: Unable to find 'configs/...' in expected paths
You tried to run Algo and you received an error like this one: You tried to run Algo and you received an error like this one:
@ -339,11 +264,10 @@ TASK [wireguard : Generate public keys] ****************************************
fatal: [localhost]: FAILED! => {"msg": "An unhandled exception occurred while running the lookup plugin 'file'. Error was a <class 'ansible.errors.AnsibleError'>, original message: could not locate file in lookup: configs/xxx.xxx.xxx.xxx/wireguard//private/dan"} fatal: [localhost]: FAILED! => {"msg": "An unhandled exception occurred while running the lookup plugin 'file'. Error was a <class 'ansible.errors.AnsibleError'>, original message: could not locate file in lookup: configs/xxx.xxx.xxx.xxx/wireguard//private/dan"}
``` ```
This error is usually hit when using the local install option on a server that isn't Ubuntu 18.04 or later. You should upgrade your server to Ubuntu 18.04 or later. If this doesn't work, try removing files in /etc/wireguard/ and the configs directories as follows: This error is usually hit when using the local install option on a server that isn't Ubuntu 18.04. You should upgrade your server to Ubuntu 18.04. If this doesn't work, try removing `*.lock` files at /etc/wireguard/ as follows:
```ssh ```ssh
sudo rm -rf /etc/wireguard/* sudo rm -rf /etc/wireguard/*.lock
rm -rf configs/*
``` ```
Then immediately re-run `./algo`. Then immediately re-run `./algo`.
@ -364,32 +288,6 @@ sudo chown $USER:$USER $HOME/.rnd
Now, run Algo again. Now, run Algo again.
### Old Networking Firewall In Place
You may see the following output when attemptint to run ./algo from your localhost:
```
TASK [Wait until SSH becomes ready...] **********************************************************************************************************************
fatal: [localhost]: FAILED! => {"changed": false, "elapsed": 321, "msg": "Timeout when waiting for search string OpenSSH in xxx.xxx.xxx.xxx:4160"}
included: /home/<username>/algo/algo/playbooks/rescue.yml for localhost
TASK [debug] ************************************************************************************************************************************************
ok: [localhost] => {
"fail_hint": [
"Sorry, but something went wrong!",
"Please check the troubleshooting guide.",
"https://trailofbits.github.io/algo/troubleshooting.html"
]
}
```
If you see this error then one possible explanation is that you have a previous firewall configured in your cloud hosting provider which needs to be either updated or ideally removed. Removing this can often fix this issue.
### Linode Error: "Unable to query the Linode API. Saw: 400: The requested distribution is not supported by this stackscript.; "
StackScript is a custom deployment script that defines a set of configurations for a Linode instance (e.g. which distribution, specs, etc.). if you used algo with default values in the past deployments, a stackscript that would've been created is 're-used' in the deployment process (in fact, go see 'create Linodes' and under 'StackScripts' tab). Thus, there's a little chance that your deployment process will generate this 'unsupported stackscript' error due to a pre-existing StackScript that doesn't support a particular configuration setting or value due to an 'old' stackscript. The quickest solution is just to change the name of your deployment from the default value of 'algo' (or any other name that you've used before, again see the dashboard) and re-run the deployment.
## Connection Problems ## Connection Problems
Look here if you deployed an Algo server but now have a problem connecting to it with a client. Look here if you deployed an Algo server but now have a problem connecting to it with a client.
@ -516,6 +414,32 @@ Certain cloud providers (like AWS Lightsail) don't assign an IPv6 address to you
Manually disconnecting and then reconnecting should restore your connection. To solve this, you need to either "force IPv4 connection" if available on your phone, or install an IPv4 APN, which might be available from your carrier tech support. T-mobile's is available [for iOS here under "iOS IPv4/IPv6 fix"](https://www.reddit.com/r/tmobile/wiki/index), and [here is a walkthrough for Android phones](https://www.myopenrouter.com/article/vpn-connections-not-working-t-mobile-heres-how-fix). Manually disconnecting and then reconnecting should restore your connection. To solve this, you need to either "force IPv4 connection" if available on your phone, or install an IPv4 APN, which might be available from your carrier tech support. T-mobile's is available [for iOS here under "iOS IPv4/IPv6 fix"](https://www.reddit.com/r/tmobile/wiki/index), and [here is a walkthrough for Android phones](https://www.myopenrouter.com/article/vpn-connections-not-working-t-mobile-heres-how-fix).
### Error: name 'basestring' is not defined
```
TASK [cloud-digitalocean : Creating a droplet...] *******************************************
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: NameError: name 'basestring' is not defined
fatal: [localhost]: FAILED! => {"changed": false, "msg": "name 'basestring' is not defined"}
```
If you get something like the above it's likely you're not using a python2 virtualenv.
Ensure running `python2.7` drops you into a python 2 shell (it looks something like this)
```
user@homebook ~ $ python2.7
Python 2.7.10 (default, Feb 7 2017, 00:08:15)
[GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>>
```
Then rerun the dependency installation explicitly using python 2.7
```
python2.7 -m virtualenv --python=`which python2.7` env && source env/bin/activate && python2.7 -m pip install -U pip && python2.7 -m pip install -r requirements.txt
```
### IPsec: Difficulty connecting through router ### IPsec: Difficulty connecting through router
Some routers treat IPsec connections specially because older versions of IPsec did not work properly through [NAT](https://en.wikipedia.org/wiki/Network_address_translation). If you're having problems connecting to your AlgoVPN through a specific router using IPsec you might need to change some settings on the router. Some routers treat IPsec connections specially because older versions of IPsec did not work properly through [NAT](https://en.wikipedia.org/wiki/Network_address_translation). If you're having problems connecting to your AlgoVPN through a specific router using IPsec you might need to change some settings on the router.
@ -530,4 +454,4 @@ If your router runs [pfSense](https://www.pfsense.org) and a single IPsec client
## I have a problem not covered here ## I have a problem not covered here
If you have an issue that you cannot solve with the guidance here, [create a new discussion](https://github.com/trailofbits/algo/discussions) and ask for help. If you think you found a new issue in Algo, [file an issue](https://github.com/trailofbits/algo/issues/new). If you have an issue that you cannot solve with the guidance here, [join our Gitter](https://gitter.im/trailofbits/algo) and ask for help. If you think you found a new issue in Algo, [file an issue](https://github.com/trailofbits/algo/issues/new).

View file

@ -1,29 +0,0 @@
#!/bin/sh
set -eux
# shellcheck disable=SC2230
which sudo || until \
apt-get update -y && \
apt-get install sudo -yf --install-suggests; do
sleep 3
done
getent passwd algo || useradd -m -d /home/algo -s /bin/bash -G adm -p '!' algo
(umask 337 && echo "algo ALL=(ALL) NOPASSWD:ALL" >/etc/sudoers.d/10-algo-user)
cat <<EOF >/etc/ssh/sshd_config
{{ lookup('template', 'files/cloud-init/sshd_config') }}
EOF
test -d /home/algo/.ssh || sudo -u algo mkdir -m 0700 /home/algo/.ssh
echo "{{ lookup('file', '{{ SSH_keys.public }}') }}" | (sudo -u algo tee /home/algo/.ssh/authorized_keys && chmod 0600 /home/algo/.ssh/authorized_keys)
ufw --force reset
# shellcheck disable=SC2015
dpkg -l sshguard && until apt-get remove -y --purge sshguard; do
sleep 3
done || true
systemctl restart sshd.service

View file

@ -1,30 +0,0 @@
#cloud-config
output: {all: '| tee -a /var/log/cloud-init-output.log'}
package_update: true
package_upgrade: true
packages:
- sudo
users:
- default
- name: algo
homedir: /home/algo
sudo: ALL=(ALL) NOPASSWD:ALL
groups: adm,netdev
shell: /bin/bash
lock_passwd: true
ssh_authorized_keys:
- "{{ lookup('file', '{{ SSH_keys.public }}') }}"
write_files:
- path: /etc/ssh/sshd_config
content: |
{{ lookup('template', 'files/cloud-init/sshd_config') | indent(width=6) }}
runcmd:
- set -x
- ufw --force reset
- sudo apt-get remove -y --purge sshguard || true
- systemctl restart sshd.service

View file

@ -1,10 +0,0 @@
Port {{ ssh_port }}
AllowGroups algo
PermitRootLogin no
PasswordAuthentication no
ChallengeResponseAuthentication no
UsePAM yes
X11Forwarding yes
PrintMotd no
AcceptEnv LANG LC_*
Subsystem sftp /usr/lib/openssh/sftp-server

163
input.yml
View file

@ -14,50 +14,47 @@
- { name: DigitalOcean, alias: digitalocean } - { name: DigitalOcean, alias: digitalocean }
- { name: Amazon Lightsail, alias: lightsail } - { name: Amazon Lightsail, alias: lightsail }
- { name: Amazon EC2, alias: ec2 } - { name: Amazon EC2, alias: ec2 }
- { name: Vultr, alias: vultr }
- { name: Microsoft Azure, alias: azure } - { name: Microsoft Azure, alias: azure }
- { name: Google Compute Engine, alias: gce } - { name: Google Compute Engine, alias: gce }
- { name: Hetzner Cloud, alias: hetzner } - { name: Scaleway, alias: scaleway}
- { name: Vultr, alias: vultr }
- { name: Scaleway, alias: scaleway }
- { name: OpenStack (DreamCompute optimised), alias: openstack } - { name: OpenStack (DreamCompute optimised), alias: openstack }
- { name: CloudStack (Exoscale optimised), alias: cloudstack } - { name: Install to existing Ubuntu 18.04 or 19.04 server (Advanced), alias: local }
- { name: Linode, alias: linode }
- { name: Install to existing Ubuntu latest LTS server (for more advanced users), alias: local }
vars_files: vars_files:
- config.cfg - config.cfg
tasks: tasks:
- block: - block:
- name: Cloud prompt - name: Cloud prompt
pause: pause:
prompt: | prompt: |
What provider would you like to use? What provider would you like to use?
{% for p in providers_map %} {% for p in providers_map %}
{{ loop.index }}. {{ p['name'] }} {{ loop.index }}. {{ p['name'] }}
{% endfor %} {% endfor %}
Enter the number of your desired provider Enter the number of your desired provider
register: _algo_provider register: _algo_provider
when: provider is undefined when: provider is undefined
- name: Set facts based on the input - name: Set facts based on the input
set_fact: set_fact:
algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}" algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}"
- name: VPN server name prompt
pause:
prompt: |
Name the vpn server
[algo]
register: _algo_server_name
when:
- server_name is undefined
- algo_provider != "local"
- name: VPN server name prompt
pause:
prompt: |
Name the vpn server
[algo]
register: _algo_server_name
when:
- server_name is undefined
- algo_provider != "local"
- block:
- name: Cellular On Demand prompt - name: Cellular On Demand prompt
pause: pause:
prompt: | prompt: |
Do you want macOS/iOS clients to enable "Connect On Demand" when connected to cellular networks? Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to cellular networks?
[y/N] [y/N]
register: _ondemand_cellular register: _ondemand_cellular
when: ondemand_cellular is undefined when: ondemand_cellular is undefined
@ -65,7 +62,7 @@
- name: Wi-Fi On Demand prompt - name: Wi-Fi On Demand prompt
pause: pause:
prompt: | prompt: |
Do you want macOS/iOS clients to enable "Connect On Demand" when connected to Wi-Fi? Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to Wi-Fi?
[y/N] [y/N]
register: _ondemand_wifi register: _ondemand_wifi
when: ondemand_wifi is undefined when: ondemand_wifi is undefined
@ -73,12 +70,13 @@
- name: Trusted Wi-Fi networks prompt - name: Trusted Wi-Fi networks prompt
pause: pause:
prompt: | prompt: |
List the names of any trusted Wi-Fi networks where macOS/iOS clients should not use "Connect On Demand" List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand"
(e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi) (e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
register: _ondemand_wifi_exclude register: _ondemand_wifi_exclude
when: when:
- ondemand_wifi_exclude is undefined - ondemand_wifi_exclude is undefined
- (ondemand_wifi|default(false)|bool) or (booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false)) - (ondemand_wifi|default(false)|bool) or
(booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
- name: Retain the PKI prompt - name: Retain the PKI prompt
pause: pause:
@ -86,58 +84,57 @@
Do you want to retain the keys (PKI)? (required to add users in the future, but less secure) Do you want to retain the keys (PKI)? (required to add users in the future, but less secure)
[y/N] [y/N]
register: _store_pki register: _store_pki
when: when: store_pki is undefined
- store_pki is undefined when: ipsec_enabled
- ipsec_enabled
- name: DNS adblocking prompt - name: DNS adblocking prompt
pause: pause:
prompt: | prompt: |
Do you want to enable DNS ad blocking on this VPN server? Do you want to enable DNS ad blocking on this VPN server?
[y/N] [y/N]
register: _dns_adblocking register: _dns_adblocking
when: dns_adblocking is undefined when: dns_adblocking is undefined
- name: SSH tunneling prompt - name: SSH tunneling prompt
pause: pause:
prompt: | prompt: |
Do you want each user to have their own account for SSH tunneling? Do you want each user to have their own account for SSH tunneling?
[y/N] [y/N]
register: _ssh_tunneling register: _ssh_tunneling
when: ssh_tunneling is undefined when: ssh_tunneling is undefined
- name: Set facts based on the input - name: Set facts based on the input
set_fact: set_fact:
algo_server_name: >- algo_server_name: >-
{% if server_name is defined %}{% set _server = server_name %} {% if server_name is defined %}{% set _server = server_name %}
{%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input|length > 0 -%} {%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input|length > 0 -%}
{%- set _server = _algo_server_name.user_input -%} {%- set _server = _algo_server_name.user_input -%}
{%- else %}{% set _server = defaults['server_name'] %}{% endif -%} {%- else %}{% set _server = defaults['server_name'] %}{% endif -%}
{{ _server | regex_replace('(?!\.)(\W|_)', '-') }} {{ _server | regex_replace('(?!\.)(\W|_)', '-') }}
algo_ondemand_cellular: >- algo_ondemand_cellular: >-
{% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }} {% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }}
{%- elif _ondemand_cellular.user_input is defined %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }} {%- elif _ondemand_cellular.user_input is defined %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_ondemand_wifi: >- algo_ondemand_wifi: >-
{% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }} {% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }}
{%- elif _ondemand_wifi.user_input is defined %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }} {%- elif _ondemand_wifi.user_input is defined %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_ondemand_wifi_exclude: >- algo_ondemand_wifi_exclude: >-
{% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }} {% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }}
{%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input|length > 0 -%} {%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input|length > 0 -%}
{{ _ondemand_wifi_exclude.user_input | b64encode }} {{ _ondemand_wifi_exclude.user_input | b64encode }}
{%- else %}{{ '_null' | b64encode }}{% endif %} {%- else %}{{ '_null' | b64encode }}{% endif %}
algo_dns_adblocking: >- algo_dns_adblocking: >-
{% if dns_adblocking is defined %}{{ dns_adblocking | bool }} {% if dns_adblocking is defined %}{{ dns_adblocking | bool }}
{%- elif _dns_adblocking.user_input is defined %}{{ booleans_map[_dns_adblocking.user_input] | default(defaults['dns_adblocking']) }} {%- elif _dns_adblocking.user_input is defined %}{{ booleans_map[_dns_adblocking.user_input] | default(defaults['dns_adblocking']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_ssh_tunneling: >- algo_ssh_tunneling: >-
{% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }} {% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }}
{%- elif _ssh_tunneling.user_input is defined %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }} {%- elif _ssh_tunneling.user_input is defined %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_store_pki: >- algo_store_pki: >-
{% if ipsec_enabled %}{%- if store_pki is defined %}{{ store_pki | bool }} {% if ipsec_enabled %}{%- if store_pki is defined %}{{ store_pki | bool }}
{%- elif _store_pki.user_input is defined %}{{ booleans_map[_store_pki.user_input] | default(defaults['store_pki']) }} {%- elif _store_pki.user_input is defined %}{{ booleans_map[_store_pki.user_input] | default(defaults['store_pki']) }}
{%- else %}false{% endif %}{% endif %} {%- else %}false{% endif %}{% endif %}
rescue: rescue:
- include_tasks: playbooks/rescue.yml - include_tasks: playbooks/rescue.yml

View file

@ -22,7 +22,16 @@ installRequirements() {
export DEBIAN_FRONTEND=noninteractive export DEBIAN_FRONTEND=noninteractive
apt-get update apt-get update
apt-get install \ apt-get install \
python3-virtualenv \ software-properties-common \
git \
build-essential \
libssl-dev \
libffi-dev \
python-dev \
python-pip \
python-setuptools \
python-virtualenv \
bind9-host \
jq -y jq -y
} }
@ -30,18 +39,18 @@ getAlgo() {
[ ! -d "algo" ] && git clone "https://github.com/${REPO_SLUG}" -b "${REPO_BRANCH}" algo [ ! -d "algo" ] && git clone "https://github.com/${REPO_SLUG}" -b "${REPO_BRANCH}" algo
cd algo cd algo
python3 -m virtualenv --python="$(command -v python3)" .env python -m virtualenv --python="$(command -v python2)" .venv
# shellcheck source=/dev/null # shellcheck source=/dev/null
. .env/bin/activate . .venv/bin/activate
python3 -m pip install -U pip virtualenv python -m pip install -U pip virtualenv
python3 -m pip install -r requirements.txt python -m pip install -r requirements.txt
} }
publicIpFromInterface() { publicIpFromInterface() {
echo "Couldn't find a valid ipv4 address, using the first IP found on the interfaces as the endpoint." echo "Couldn't find a valid ipv4 address, using the first IP found on the interfaces as the endpoint."
DEFAULT_INTERFACE="$(ip -4 route list match default | grep -Eo "dev .*" | awk '{print $2}')" DEFAULT_INTERFACE="$(ip -4 route list match default | grep -Eo "dev .*" | awk '{print $2}')"
ENDPOINT=$(ip -4 addr sh dev "$DEFAULT_INTERFACE" | grep -w inet | head -n1 | awk '{print $2}' | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b') ENDPOINT=$(ip -4 addr sh dev "$DEFAULT_INTERFACE" | grep -w inet | head -n1 | awk '{print $2}' | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b')
export ENDPOINT="${ENDPOINT}" export ENDPOINT=$ENDPOINT
echo "Using ${ENDPOINT} as the endpoint" echo "Using ${ENDPOINT} as the endpoint"
} }
@ -57,7 +66,7 @@ publicIpFromMetadata() {
fi fi
if echo "${ENDPOINT}" | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"; then if echo "${ENDPOINT}" | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"; then
export ENDPOINT="${ENDPOINT}" export ENDPOINT=$ENDPOINT
echo "Using ${ENDPOINT} as the endpoint" echo "Using ${ENDPOINT} as the endpoint"
else else
publicIpFromInterface publicIpFromInterface
@ -69,7 +78,7 @@ deployAlgo() {
cd /opt/algo cd /opt/algo
# shellcheck source=/dev/null # shellcheck source=/dev/null
. .env/bin/activate . .venv/bin/activate
export HOME=/root export HOME=/root
export ANSIBLE_LOCAL_TEMP=/root/.ansible/tmp export ANSIBLE_LOCAL_TEMP=/root/.ansible/tmp

View file

@ -1,2 +1,2 @@
[local] [local]
localhost ansible_connection=local ansible_python_interpreter=python3 localhost ansible_connection=local ansible_python_interpreter=python

View file

@ -1,288 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Patrick F. Marques <patrickfmarques@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_floating_ip
short_description: Manage DigitalOcean Floating IPs
description:
- Create/delete/assign a floating IP.
version_added: "2.4"
author: "Patrick Marques (@pmarques)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
ip:
description:
- Public IP address of the Floating IP. Used to remove an IP
region:
description:
- The region that the Floating IP is reserved to.
droplet_id:
description:
- The Droplet that the Floating IP has been assigned to.
oauth_token:
description:
- DigitalOcean OAuth token.
required: true
notes:
- Version 2 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
- name: "Create a Floating IP in region lon1"
digital_ocean_floating_ip:
state: present
region: lon1
- name: "Create a Floating IP assigned to Droplet ID 123456"
digital_ocean_floating_ip:
state: present
droplet_id: 123456
- name: "Delete a Floating IP with ip 1.2.3.4"
digital_ocean_floating_ip:
state: absent
ip: "1.2.3.4"
'''
RETURN = '''
# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#floating-ips
data:
description: a DigitalOcean Floating IP resource
returned: success and no resource constraint
type: dict
sample: {
"action": {
"id": 68212728,
"status": "in-progress",
"type": "assign_ip",
"started_at": "2015-10-15T17:45:44Z",
"completed_at": null,
"resource_id": 758603823,
"resource_type": "floating_ip",
"region": {
"name": "New York 3",
"slug": "nyc3",
"sizes": [
"512mb",
"1gb",
"2gb",
"4gb",
"8gb",
"16gb",
"32gb",
"48gb",
"64gb"
],
"features": [
"private_networking",
"backups",
"ipv6",
"metadata"
],
"available": true
},
"region_slug": "nyc3"
}
}
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.digital_ocean import DigitalOceanHelper
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(self.info["body"])
return None
try:
return json.loads(self.body)
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
def wait_action(module, rest, ip, action_id, timeout=10):
end_time = time.time() + 10
while time.time() < end_time:
response = rest.get('floating_ips/{0}/actions/{1}'.format(ip, action_id))
status_code = response.status_code
status = response.json['action']['status']
# TODO: check status_code == 200?
if status == 'completed':
return True
elif status == 'errored':
module.fail_json(msg='Floating ip action error [ip: {0}: action: {1}]'.format(
ip, action_id), data=json)
module.fail_json(msg='Floating ip action timeout [ip: {0}: action: {1}]'.format(
ip, action_id), data=json)
def core(module):
api_token = module.params['oauth_token']
state = module.params['state']
ip = module.params['ip']
droplet_id = module.params['droplet_id']
rest = DigitalOceanHelper(module)
if state in ('present'):
if droplet_id is not None and module.params['ip'] is not None:
# Lets try to associate the ip to the specified droplet
associate_floating_ips(module, rest)
else:
create_floating_ips(module, rest)
elif state in ('absent'):
response = rest.delete("floating_ips/{0}".format(ip))
status_code = response.status_code
json_data = response.json
if status_code == 204:
module.exit_json(changed=True)
elif status_code == 404:
module.exit_json(changed=False)
else:
module.exit_json(changed=False, data=json_data)
def get_floating_ip_details(module, rest):
ip = module.params['ip']
response = rest.get("floating_ips/{0}".format(ip))
status_code = response.status_code
json_data = response.json
if status_code == 200:
return json_data['floating_ip']
else:
module.fail_json(msg="Error assigning floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def assign_floating_id_to_droplet(module, rest):
ip = module.params['ip']
payload = {
"type": "assign",
"droplet_id": module.params['droplet_id'],
}
response = rest.post("floating_ips/{0}/actions".format(ip), data=payload)
status_code = response.status_code
json_data = response.json
if status_code == 201:
wait_action(module, rest, ip, json_data['action']['id'])
module.exit_json(changed=True, data=json_data)
else:
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def associate_floating_ips(module, rest):
floating_ip = get_floating_ip_details(module, rest)
droplet = floating_ip['droplet']
# TODO: If already assigned to a droplet verify if is one of the specified as valid
if droplet is not None and str(droplet['id']) in [module.params['droplet_id']]:
module.exit_json(changed=False)
else:
assign_floating_id_to_droplet(module, rest)
def create_floating_ips(module, rest):
payload = {
}
floating_ip_data = None
if module.params['region'] is not None:
payload["region"] = module.params['region']
if module.params['droplet_id'] is not None:
payload["droplet_id"] = module.params['droplet_id']
floating_ips = rest.get_paginated_data(base_url='floating_ips?', data_key_name='floating_ips')
for floating_ip in floating_ips:
if floating_ip['droplet'] and floating_ip['droplet']['id'] == module.params['droplet_id']:
floating_ip_data = {'floating_ip': floating_ip}
if floating_ip_data:
module.exit_json(changed=False, data=floating_ip_data)
else:
response = rest.post("floating_ips", data=payload)
status_code = response.status_code
json_data = response.json
if status_code == 202:
module.exit_json(changed=True, data=json_data)
else:
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
ip=dict(aliases=['id'], required=False),
region=dict(required=False),
droplet_id=dict(required=False, type='int'),
oauth_token=dict(
no_log=True,
# Support environment variable for DigitalOcean OAuth Token
fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN']),
required=True,
),
validate_certs=dict(type='bool', default=True),
timeout=dict(type='int', default=30),
),
required_if=[
('state', 'delete', ['ip'])
],
mutually_exclusive=[
['region', 'droplet_id']
],
)
core(module)
if __name__ == '__main__':
main()

139
library/gce_region_facts.py Normal file
View file

@ -0,0 +1,139 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_region_facts
version_added: "5.3"
short_description: Gather facts about GCE regions.
description:
- Gather facts about GCE regions.
options:
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
aliases: []
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
author: "Jack Ivanov (@jackivanov)"
'''
EXAMPLES = '''
# Gather facts about all regions
- gce_region_facts:
'''
RETURN = '''
regions:
returned: on success
description: >
Each element consists of a dict with all the information related
to that region.
type: list
sample: "[{
"name": "asia-east1",
"status": "UP",
"zones": [
{
"name": "asia-east1-a",
"status": "UP"
},
{
"name": "asia-east1-b",
"status": "UP"
},
{
"name": "asia-east1-c",
"status": "UP"
}
]
}]"
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect, unexpected_error_msg
def main():
module = AnsibleModule(
argument_spec=dict(
service_account_email=dict(),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
project_id=dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
gce = gce_connect(module)
changed = False
gce_regions = []
try:
regions = gce.ex_list_regions()
for r in regions:
gce_region = {}
gce_region['name'] = r.name
gce_region['status'] = r.status
gce_region['zones'] = []
for z in r.zones:
gce_zone = {}
gce_zone['name'] = z.name
gce_zone['status'] = z.status
gce_region['zones'].append(gce_zone)
gce_regions.append(gce_region)
json_output = { 'regions': gce_regions }
module.exit_json(changed=False, results=json_output)
except ResourceNotFoundError:
pass
if __name__ == '__main__':
main()

View file

@ -1,93 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), scope=dict(required=True, type='str')))
if module._name == 'gcp_compute_image_facts':
module.deprecate("The 'gcp_compute_image_facts' module has been renamed to 'gcp_compute_regions_info'", version='2.13')
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'resources': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/{scope}".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()

551
library/lightsail.py Normal file
View file

@ -0,0 +1,551 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lightsail
short_description: Create or delete a virtual machine instance in AWS Lightsail
description:
- Creates or instances in AWS Lightsail and optionally wait for it to be 'running'.
version_added: "2.4"
author: "Nick Ball (@nickball)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
name:
description:
- Name of the instance
required: true
default : null
zone:
description:
- AWS availability zone in which to launch the instance. Required when state='present'
required: false
default: null
blueprint_id:
description:
- ID of the instance blueprint image. Required when state='present'
required: false
default: null
bundle_id:
description:
- Bundle of specification info for the instance. Required when state='present'
required: false
default: null
user_data:
description:
- Launch script that can configure the instance with additional data
required: false
default: null
key_pair_name:
description:
- Name of the key pair to use with the instance
required: false
default: null
wait:
description:
- Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
open_ports:
description:
- Adds public ports to an Amazon Lightsail instance.
default: null
suboptions:
from_port:
description: Begin of the range
required: true
default: null
to_port:
description: End of the range
required: true
default: null
protocol:
description: Accepted traffic protocol.
required: true
choices:
- udp
- tcp
- all
default: null
requirements:
- "python >= 2.6"
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a new Lightsail instance, register the instance details
- lightsail:
state: present
name: myinstance
region: us-east-1
zone: us-east-1a
blueprint_id: ubuntu_16_04
bundle_id: nano_1_0
key_pair_name: id_rsa
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
wait_timeout: 500
open_ports:
- from_port: 4500
to_port: 4500
protocol: udp
- from_port: 500
to_port: 500
protocol: udp
register: my_instance
- debug:
msg: "Name is {{ my_instance.instance.name }}"
- debug:
msg: "IP is {{ my_instance.instance.publicIpAddress }}"
# Delete an instance if present
- lightsail:
state: absent
region: us-east-1
name: myinstance
'''
RETURN = '''
changed:
description: if a snapshot has been modified/created
returned: always
type: bool
sample:
changed: true
instance:
description: instance data
returned: always
type: dict
sample:
arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
blueprint_id: "ubuntu_16_04"
blueprint_name: "Ubuntu"
bundle_id: "nano_1_0"
created_at: "2017-03-27T08:38:59.714000-04:00"
hardware:
cpu_count: 1
ram_size_in_gb: 0.5
is_static_ip: false
location:
availability_zone: "us-east-1a"
region_name: "us-east-1"
name: "my_instance"
networking:
monthly_transfer:
gb_per_month_allocated: 1024
ports:
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 80
protocol: tcp
to_port: 80
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 22
protocol: tcp
to_port: 22
private_ip_address: "172.26.8.14"
public_ip_address: "34.207.152.202"
resource_type: "Instance"
ssh_key_name: "keypair"
state:
code: 16
name: running
support_code: "588307843083/i-0997c97831ee21e33"
username: "ubuntu"
'''
import time
import traceback
try:
import botocore
HAS_BOTOCORE = True
except ImportError:
HAS_BOTOCORE = False
try:
import boto3
except ImportError:
# will be caught by imported HAS_BOTO3
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
HAS_BOTO3, camel_dict_to_snake_dict)
def create_instance(module, client, instance_name):
"""
Create an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the new instance.
"""
changed = False
# Check if instance already exists
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
zone = module.params.get('zone')
blueprint_id = module.params.get('blueprint_id')
bundle_id = module.params.get('bundle_id')
user_data = module.params.get('user_data')
user_data = '' if user_data is None else user_data
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
if module.params.get('key_pair_name'):
key_pair_name = module.params.get('key_pair_name')
else:
key_pair_name = ''
if module.params.get('open_ports'):
open_ports = module.params.get('open_ports')
else:
open_ports = '[]'
resp = None
if inst is None:
try:
resp = client.create_instances(
instanceNames=[
instance_name
],
availabilityZone=zone,
blueprintId=blueprint_id,
bundleId=bundle_id,
userData=user_data,
keyPairName=key_pair_name,
)
resp = resp['operations'][0]
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e))
inst = _find_instance_info(client, instance_name)
# Wait for instance to become running
if wait:
while (wait_max > time.time()) and (inst is not None and inst['state']['name'] != "running"):
try:
time.sleep(2)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to start instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(1)
# Timed out
if wait and not changed and wait_max <= time.time():
module.fail_json(msg="Wait for instance start timeout at %s" % time.asctime())
# Attempt to open ports
if open_ports:
if inst is not None:
try:
for o in open_ports:
resp = client.open_instance_public_ports(
instanceName=instance_name,
portInfo={
'fromPort': o['from_port'],
'toPort': o['to_port'],
'protocol': o['protocol']
}
)
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Error opening ports for instance {0}, error: {1}'.format(instance_name, e))
changed = True
return (changed, inst)
def delete_instance(module, client, instance_name):
"""
Terminates an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the instance deleted (pre-deletion).
If the instance to be deleted is running
"changed" will be set to False.
"""
# It looks like deleting removes the instance immediately, nothing to wait for
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before deleting
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc())
# sleep and retry
time.sleep(10)
# Attempt to delete
if inst is not None:
while not changed and ((wait and wait_max > time.time()) or (not wait)):
try:
client.delete_instance(instanceName=instance_name)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e))
# Timed out
if wait and not changed and wait_max <= time.time():
module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime())
return (changed, inst)
def restart_instance(module, client, instance_name):
"""
Reboot an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to reboot
Returns a dictionary of instance information
about the restarted instance
If the instance was not able to reboot,
"changed" will be set to False.
Wait will not apply here as this is an OS-level operation
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(3)
# send reboot
if inst is not None:
try:
client.reboot_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e))
changed = True
return (changed, inst)
def startstop_instance(module, client, instance_name, state):
"""
Starts or stops an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to start/stop
state: Target state ("running" or "stopped")
Returns a dictionary of instance information
about the instance started/stopped
If the instance was not able to state change,
"changed" will be set to False.
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(1)
# Try state change
if inst is not None and inst['state']['name'] != state:
try:
if state == 'running':
client.start_instance(instanceName=instance_name)
else:
client.stop_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e))
changed = True
# Grab current instance info
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def core(module):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
client = None
try:
client = boto3_conn(module, conn_type='client', resource='lightsail',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
changed = False
state = module.params['state']
name = module.params['name']
if state == 'absent':
changed, instance_dict = delete_instance(module, client, name)
elif state in ('running', 'stopped'):
changed, instance_dict = startstop_instance(module, client, name, state)
elif state == 'restarted':
changed, instance_dict = restart_instance(module, client, name)
elif state == 'present':
changed, instance_dict = create_instance(module, client, name)
module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
def _find_instance_info(client, instance_name):
''' handle exceptions where this function is called '''
inst = None
try:
inst = client.get_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
raise
return inst['instance']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']),
zone=dict(type='str'),
blueprint_id=dict(type='str'),
bundle_id=dict(type='str'),
key_pair_name=dict(type='str'),
user_data=dict(type='str'),
wait=dict(type='bool', default=True),
wait_timeout=dict(default=300),
open_ports=dict(type='list')
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
if not HAS_BOTOCORE:
module.fail_json(msg='Python module "botocore" is missing, please install it')
try:
core(module)
except (botocore.exceptions.ClientError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View file

@ -93,7 +93,7 @@ def main():
response = client.get_regions( response = client.get_regions(
includeAvailabilityZones=False includeAvailabilityZones=False
) )
module.exit_json(changed=False, data=response) module.exit_json(changed=False, results=response)
except (botocore.exceptions.ClientError, Exception) as e: except (botocore.exceptions.ClientError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc()) module.fail_json(msg=str(e), exception=traceback.format_exc())

View file

@ -1,113 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import traceback
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible.module_utils.linode import get_user_agent
LINODE_IMP_ERR = None
try:
from linode_api4 import StackScript, LinodeClient
HAS_LINODE_DEPENDENCY = True
except ImportError:
LINODE_IMP_ERR = traceback.format_exc()
HAS_LINODE_DEPENDENCY = False
def create_stackscript(module, client, **kwargs):
"""Creates a stackscript and handles return format."""
try:
response = client.linode.stackscript_create(**kwargs)
return response._raw_json
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
def stackscript_available(module, client):
"""Try to retrieve a stackscript."""
try:
label = module.params['label']
desc = module.params['description']
result = client.linode.stackscripts(StackScript.label == label,
StackScript.description == desc,
mine_only=True
)
return result[0]
except IndexError:
return None
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
def initialise_module():
"""Initialise the module parameter specification."""
return AnsibleModule(
argument_spec=dict(
label=dict(type='str', required=True),
state=dict(
type='str',
required=True,
choices=['present', 'absent']
),
access_token=dict(
type='str',
required=True,
no_log=True,
fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
),
script=dict(type='str', required=True),
images=dict(type='list', required=True),
description=dict(type='str', required=False),
public=dict(type='bool', required=False, default=False),
),
supports_check_mode=False
)
def build_client(module):
"""Build a LinodeClient."""
return LinodeClient(
module.params['access_token'],
user_agent=get_user_agent('linode_v4_module')
)
def main():
"""Module entrypoint."""
module = initialise_module()
if not HAS_LINODE_DEPENDENCY:
module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
client = build_client(module)
stackscript = stackscript_available(module, client)
if module.params['state'] == 'present' and stackscript is not None:
module.exit_json(changed=False, stackscript=stackscript._raw_json)
elif module.params['state'] == 'present' and stackscript is None:
stackscript_json = create_stackscript(
module, client,
label=module.params['label'],
script=module.params['script'],
images=module.params['images'],
desc=module.params['description'],
public=module.params['public'],
)
module.exit_json(changed=True, stackscript=stackscript_json)
elif module.params['state'] == 'absent' and stackscript is not None:
stackscript.delete()
module.exit_json(changed=True, stackscript=stackscript._raw_json)
elif module.params['state'] == 'absent' and stackscript is None:
module.exit_json(changed=False, stackscript={})
if __name__ == "__main__":
main()

View file

@ -1,142 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import traceback
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible.module_utils.linode import get_user_agent
LINODE_IMP_ERR = None
try:
from linode_api4 import Instance, LinodeClient
HAS_LINODE_DEPENDENCY = True
except ImportError:
LINODE_IMP_ERR = traceback.format_exc()
HAS_LINODE_DEPENDENCY = False
def create_linode(module, client, **kwargs):
"""Creates a Linode instance and handles return format."""
if kwargs['root_pass'] is None:
kwargs.pop('root_pass')
try:
response = client.linode.instance_create(**kwargs)
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
try:
if isinstance(response, tuple):
instance, root_pass = response
instance_json = instance._raw_json
instance_json.update({'root_pass': root_pass})
return instance_json
else:
return response._raw_json
except TypeError:
module.fail_json(msg='Unable to parse Linode instance creation'
' response. Please raise a bug against this'
' module on https://github.com/ansible/ansible/issues'
)
def maybe_instance_from_label(module, client):
"""Try to retrieve an instance based on a label."""
try:
label = module.params['label']
result = client.linode.instances(Instance.label == label)
return result[0]
except IndexError:
return None
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
def initialise_module():
"""Initialise the module parameter specification."""
return AnsibleModule(
argument_spec=dict(
label=dict(type='str', required=True),
state=dict(
type='str',
required=True,
choices=['present', 'absent']
),
access_token=dict(
type='str',
required=True,
no_log=True,
fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
),
authorized_keys=dict(type='list', required=False),
group=dict(type='str', required=False),
image=dict(type='str', required=False),
region=dict(type='str', required=False),
root_pass=dict(type='str', required=False, no_log=True),
tags=dict(type='list', required=False),
type=dict(type='str', required=False),
stackscript_id=dict(type='int', required=False),
),
supports_check_mode=False,
required_one_of=(
['state', 'label'],
),
required_together=(
['region', 'image', 'type'],
)
)
def build_client(module):
"""Build a LinodeClient."""
return LinodeClient(
module.params['access_token'],
user_agent=get_user_agent('linode_v4_module')
)
def main():
"""Module entrypoint."""
module = initialise_module()
if not HAS_LINODE_DEPENDENCY:
module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
client = build_client(module)
instance = maybe_instance_from_label(module, client)
if module.params['state'] == 'present' and instance is not None:
module.exit_json(changed=False, instance=instance._raw_json)
elif module.params['state'] == 'present' and instance is None:
instance_json = create_linode(
module, client,
authorized_keys=module.params['authorized_keys'],
group=module.params['group'],
image=module.params['image'],
label=module.params['label'],
region=module.params['region'],
root_pass=module.params['root_pass'],
tags=module.params['tags'],
ltype=module.params['type'],
stackscript_id=module.params['stackscript_id'],
)
module.exit_json(changed=True, instance=instance_json)
elif module.params['state'] == 'absent' and instance is not None:
instance.delete()
module.exit_json(changed=True, instance=instance._raw_json)
elif module.params['state'] == 'absent' and instance is None:
module.exit_json(changed=False, instance={})
if __name__ == "__main__":
main()

View file

@ -29,15 +29,6 @@ extends_documentation_fragment: scaleway
options: options:
public_ip:
description:
- Manage public IP on a Scaleway server
- Could be Scaleway IP address UUID
- C(dynamic) Means that IP is destroyed at the same time the host is destroyed
- C(absent) Means no public IP at all
version_added: '2.8'
default: absent
enable_ipv6: enable_ipv6:
description: description:
- Enable public IPv6 connectivity on the instance - Enable public IPv6 connectivity on the instance
@ -97,6 +88,26 @@ options:
description: description:
- Commercial name of the compute node - Commercial name of the compute node
required: true required: true
choices:
- ARM64-2GB
- ARM64-4GB
- ARM64-8GB
- ARM64-16GB
- ARM64-32GB
- ARM64-64GB
- ARM64-128GB
- C1
- C2S
- C2M
- C2L
- START1-XS
- START1-S
- START1-M
- START1-L
- X64-15GB
- X64-30GB
- X64-60GB
- X64-120GB
wait: wait:
description: description:
@ -115,13 +126,6 @@ options:
- Time to wait before every attempt to check the state of the server - Time to wait before every attempt to check the state of the server
required: false required: false
default: 3 default: 3
security_group:
description:
- Security group unique identifier
- If no value provided, the default security group or current security group will be used
required: false
version_added: "2.8"
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -137,19 +141,6 @@ EXAMPLES = '''
- test - test
- www - www
- name: Create a server attached to a security group
scaleway_compute:
name: foobar
state: present
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
region: ams1
commercial_type: VC1S
security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
tags:
- test
- www
- name: Destroy it right after - name: Destroy it right after
scaleway_compute: scaleway_compute:
name: foobar name: foobar
@ -170,6 +161,34 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import quote as urlquote from ansible.module_utils.six.moves.urllib.parse import quote as urlquote
from ansible.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
SCALEWAY_COMMERCIAL_TYPES = [
# Virtual ARM64 compute instance
'ARM64-2GB',
'ARM64-4GB',
'ARM64-8GB',
'ARM64-16GB',
'ARM64-32GB',
'ARM64-64GB',
'ARM64-128GB',
# Baremetal
'C1', # ARM64 (4 cores) - 2GB
'C2S', # X86-64 (4 cores) - 8GB
'C2M', # X86-64 (8 cores) - 16GB
'C2L', # x86-64 (8 cores) - 32 GB
# Virtual X86-64 compute instance
'START1-XS', # Starter X86-64 (1 core) - 1GB - 25 GB NVMe
'START1-S', # Starter X86-64 (2 cores) - 2GB - 50 GB NVMe
'START1-M', # Starter X86-64 (4 cores) - 4GB - 100 GB NVMe
'START1-L', # Starter X86-64 (8 cores) - 8GB - 200 GB NVMe
'X64-15GB',
'X64-30GB',
'X64-60GB',
'X64-120GB',
]
SCALEWAY_SERVER_STATES = ( SCALEWAY_SERVER_STATES = (
'stopped', 'stopped',
'stopping', 'stopping',
@ -185,17 +204,6 @@ SCALEWAY_TRANSITIONS_STATES = (
) )
def check_image_id(compute_api, image_id):
response = compute_api.get(path="images")
if response.ok and response.json:
image_ids = [image["id"] for image in response.json["images"]]
if image_id not in image_ids:
compute_api.module.fail_json(msg='Error in getting image %s on %s' % (image_id, compute_api.module.params.get('api_url')))
else:
compute_api.module.fail_json(msg="Error in getting images from: %s" % compute_api.module.params.get('api_url'))
def fetch_state(compute_api, server): def fetch_state(compute_api, server):
compute_api.module.debug("fetch_state of server: %s" % server["id"]) compute_api.module.debug("fetch_state of server: %s" % server["id"])
response = compute_api.get(path="servers/%s" % server["id"]) response = compute_api.get(path="servers/%s" % server["id"])
@ -234,51 +242,17 @@ def wait_to_complete_state_transition(compute_api, server):
compute_api.module.fail_json(msg="Server takes too long to finish its transition") compute_api.module.fail_json(msg="Server takes too long to finish its transition")
def public_ip_payload(compute_api, public_ip):
# We don't want a public ip
if public_ip in ("absent",):
return {"dynamic_ip_required": False}
# IP is only attached to the instance and is released as soon as the instance terminates
if public_ip in ("dynamic", "allocated"):
return {"dynamic_ip_required": True}
# We check that the IP we want to attach exists, if so its ID is returned
response = compute_api.get("ips")
if not response.ok:
msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json)
compute_api.module.fail_json(msg=msg)
ip_list = []
try:
ip_list = response.json["ips"]
except KeyError:
compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json)
lookup = [ip["id"] for ip in ip_list]
if public_ip in lookup:
return {"public_ip": public_ip}
def create_server(compute_api, server): def create_server(compute_api, server):
compute_api.module.debug("Starting a create_server") compute_api.module.debug("Starting a create_server")
target_server = None target_server = None
data = {"enable_ipv6": server["enable_ipv6"], response = compute_api.post(path="servers",
"tags": server["tags"], data={"enable_ipv6": server["enable_ipv6"],
"commercial_type": server["commercial_type"], "boot_type": server["boot_type"],
"image": server["image"], "tags": server["tags"],
"dynamic_ip_required": server["dynamic_ip_required"], "commercial_type": server["commercial_type"],
"name": server["name"], "image": server["image"],
"organization": server["organization"] "name": server["name"],
} "organization": server["organization"]})
if server["boot_type"]:
data["boot_type"] = server["boot_type"]
if server["security_group"]:
data["security_group"] = server["security_group"]
response = compute_api.post(path="servers", data=data)
if not response.ok: if not response.ok:
msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json) msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json)
@ -351,7 +325,7 @@ def present_strategy(compute_api, wished_server):
if compute_api.module.check_mode: if compute_api.module.check_mode:
return changed, {"status": "Server %s attributes would be changed." % target_server["id"]} return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
return changed, target_server return changed, target_server
@ -373,7 +347,7 @@ def absent_strategy(compute_api, wished_server):
return changed, {"status": "Server %s would be made absent." % target_server["id"]} return changed, {"status": "Server %s would be made absent." % target_server["id"]}
# A server MUST be stopped to be deleted. # A server MUST be stopped to be deleted.
while fetch_state(compute_api=compute_api, server=target_server) != "stopped": while not fetch_state(compute_api=compute_api, server=target_server) == "stopped":
wait_to_complete_state_transition(compute_api=compute_api, server=target_server) wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
response = stop_server(compute_api=compute_api, server=target_server) response = stop_server(compute_api=compute_api, server=target_server)
@ -414,7 +388,7 @@ def running_strategy(compute_api, wished_server):
if compute_api.module.check_mode: if compute_api.module.check_mode:
return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]} return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]}
target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
current_state = fetch_state(compute_api=compute_api, server=target_server) current_state = fetch_state(compute_api=compute_api, server=target_server)
if current_state not in ("running", "starting"): if current_state not in ("running", "starting"):
@ -458,7 +432,7 @@ def stop_strategy(compute_api, wished_server):
return changed, { return changed, {
"status": "Server %s attributes would be changed before stopping it." % target_server["id"]} "status": "Server %s attributes would be changed before stopping it." % target_server["id"]}
target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
wait_to_complete_state_transition(compute_api=compute_api, server=target_server) wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
@ -505,7 +479,7 @@ def restart_strategy(compute_api, wished_server):
return changed, { return changed, {
"status": "Server %s attributes would be changed before rebooting it." % target_server["id"]} "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]}
target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
changed = True changed = True
if compute_api.module.check_mode: if compute_api.module.check_mode:
@ -544,8 +518,8 @@ state_strategy = {
def find(compute_api, wished_server, per_page=1): def find(compute_api, wished_server, per_page=1):
compute_api.module.debug("Getting inside find") compute_api.module.debug("Getting inside find")
# Only the name attribute is accepted in the Compute query API # Only the name attribute is accepted in the Compute query API
response = compute_api.get("servers", params={"name": wished_server["name"], url = 'servers?name=%s&per_page=%d' % (urlquote(wished_server["name"]), per_page)
"per_page": per_page}) response = compute_api.get(url)
if not response.ok: if not response.ok:
msg = 'Error during server search: (%s) %s' % (response.status_code, response.json) msg = 'Error during server search: (%s) %s' % (response.status_code, response.json)
@ -561,7 +535,6 @@ PATCH_MUTABLE_SERVER_ATTRIBUTES = (
"tags", "tags",
"name", "name",
"dynamic_ip_required", "dynamic_ip_required",
"security_group",
) )
@ -573,51 +546,29 @@ def server_attributes_should_be_changed(compute_api, target_server, wished_serve
for x in PATCH_MUTABLE_SERVER_ATTRIBUTES for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
if x in target_server and x in wished_server) if x in target_server and x in wished_server)
compute_api.module.debug("Debug dict %s" % debug_dict) compute_api.module.debug("Debug dict %s" % debug_dict)
try: try:
for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: return any([target_server[x] != wished_server[x]
if key in target_server and key in wished_server: for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
# When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook if x in target_server and x in wished_server])
if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys(
) and target_server[key]["id"] != wished_server[key]:
return True
# Handling other structure compare simply the two objects content
elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]:
return True
return False
except AttributeError: except AttributeError:
compute_api.module.fail_json(msg="Error while checking if attributes should be changed") compute_api.module.fail_json(msg="Error while checking if attributes should be changed")
def server_change_attributes(compute_api, target_server, wished_server): def server_change_attributes(compute_api, target_server, wished_server):
compute_api.module.debug("Starting patching server attributes") compute_api.module.debug("Starting patching server attributes")
patch_payload = dict() patch_payload = dict((x, wished_server[x])
for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: if x in wished_server and x in target_server)
if key in target_server and key in wished_server:
# When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]:
# Setting all key to current value except ID
key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id")
# Setting ID to the user specified ID
key_dict["id"] = wished_server[key]
patch_payload[key] = key_dict
elif not isinstance(target_server[key], dict):
patch_payload[key] = wished_server[key]
response = compute_api.patch(path="servers/%s" % target_server["id"], response = compute_api.patch(path="servers/%s" % target_server["id"],
data=patch_payload) data=patch_payload)
if not response.ok: if not response.ok:
msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json) msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json)
compute_api.module.fail_json(msg=msg) compute_api.module.fail_json(msg=msg)
try:
target_server = response.json["server"]
except KeyError:
compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
wait_to_complete_state_transition(compute_api=compute_api, server=target_server) wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
return target_server return response
def core(module): def core(module):
@ -630,19 +581,12 @@ def core(module):
"enable_ipv6": module.params["enable_ipv6"], "enable_ipv6": module.params["enable_ipv6"],
"boot_type": module.params["boot_type"], "boot_type": module.params["boot_type"],
"tags": module.params["tags"], "tags": module.params["tags"],
"organization": module.params["organization"], "organization": module.params["organization"]
"security_group": module.params["security_group"]
} }
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
compute_api = Scaleway(module=module) compute_api = Scaleway(module=module)
check_image_id(compute_api, wished_server["image"])
# IP parameters of the wished server depends on the configuration
ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"])
wished_server.update(ip_payload)
changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server) changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server)
module.exit_json(changed=changed, msg=summary) module.exit_json(changed=changed, msg=summary)
@ -653,17 +597,15 @@ def main():
image=dict(required=True), image=dict(required=True),
name=dict(), name=dict(),
region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), region=dict(required=True, choices=SCALEWAY_LOCATION.keys()),
commercial_type=dict(required=True), commercial_type=dict(required=True, choices=SCALEWAY_COMMERCIAL_TYPES),
enable_ipv6=dict(default=False, type="bool"), enable_ipv6=dict(default=False, type="bool"),
boot_type=dict(choices=['bootscript', 'local']), boot_type=dict(default="bootscript"),
public_ip=dict(default="absent"),
state=dict(choices=state_strategy.keys(), default='present'), state=dict(choices=state_strategy.keys(), default='present'),
tags=dict(type="list", default=[]), tags=dict(type="list", default=[]),
organization=dict(required=True), organization=dict(required=True),
wait=dict(type="bool", default=False), wait=dict(type="bool", default=False),
wait_timeout=dict(type="int", default=300), wait_timeout=dict(type="int", default=300),
wait_sleep_time=dict(type="int", default=3), wait_sleep_time=dict(type="int", default=3),
security_group=dict(),
)) ))
module = AnsibleModule( module = AnsibleModule(
argument_spec=argument_spec, argument_spec=argument_spec,

View file

@ -2,18 +2,6 @@
- hosts: localhost - hosts: localhost
become: false become: false
tasks: tasks:
- name: Playbook dir stat
stat:
path: "{{ playbook_dir }}"
register: _playbook_dir
- name: Ensure Ansible is not being run in a world writable directory
assert:
that: _playbook_dir.stat.mode|int <= 775
msg: >
Ansible is being run in a world writable directory ({{ playbook_dir }}), ignoring it as an ansible.cfg source.
For more information see https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir
- name: Ensure the requirements installed - name: Ensure the requirements installed
debug: debug:
msg: "{{ '' | ipaddr }}" msg: "{{ '' | ipaddr }}"
@ -23,32 +11,21 @@
- name: Set required ansible version as a fact - name: Set required ansible version as a fact
set_fact: set_fact:
required_ansible_version: "{{ item | regex_replace('^ansible[\\s+]?(?P<op>[=,>,<]+)[\\s+]?(?P<ver>\\d.\\d+(.\\d+)?)$', '{\"op\": \"\\g<op>\",\"ver\"\ required_ansible_version:
: \"\\g<ver>\" }') }}" "{{ item | regex_replace('^ansible[\\s+]?(?P<op>[=,>,<]+)[\\s+]?(?P<ver>\\d.\\d(.\\d+)?)$',
'{\"op\": \"\\g<op>\",\"ver\": \"\\g<ver>\" }') }}"
when: '"ansible" in item' when: '"ansible" in item'
with_items: "{{ lookup('file', 'requirements.txt').splitlines() }}" with_items: "{{ lookup('file', 'requirements.txt').splitlines() }}"
- name: Just get the list from default pip - name: Verify Ansible meets Algo VPN requirements.
community.general.pip_package_info:
register: pip_package_info
- name: Verify Python meets Algo VPN requirements
assert:
that: (ansible_python.version.major|string + '.' + ansible_python.version.minor|string) is version('3.8', '>=')
msg: >
Python version is not supported.
You must upgrade to at least Python 3.8 to use this version of Algo.
See for more details - https://trailofbits.github.io/algo/troubleshooting.html#python-version-is-not-supported
- name: Verify Ansible meets Algo VPN requirements
assert: assert:
that: that:
- pip_package_info.packages.pip.ansible.0.version is version(required_ansible_version.ver, required_ansible_version.op) - ansible_version.full is version(required_ansible_version.ver, required_ansible_version.op)
- not ipaddr.failed - not ipaddr.failed
msg: > msg: >
Ansible version is {{ pip_package_info.packages.pip.ansible.0.version }}. Ansible version is {{ ansible_version.full }}.
You must update the requirements to use this version of Algo. You must update the requirements to use this version of Algo.
Try to run python3 -m pip install -U -r requirements.txt Try to run python -m pip install -U -r requirements.txt
- name: Include prompts playbook - name: Include prompts playbook
import_playbook: input.yml import_playbook: input.yml

View file

@ -1,5 +1,5 @@
--- ---
- name: Set subjectAltName as a fact - name: Set subjectAltName as afact
set_fact: set_fact:
IP_subject_alt_name: "{{ (IP_subject_alt_name if algo_provider == 'local' else cloud_instance_ip) | lower }}" IP_subject_alt_name: "{{ (IP_subject_alt_name if algo_provider == 'local' else cloud_instance_ip) | lower }}"
@ -8,9 +8,8 @@
name: "{% if cloud_instance_ip == 'localhost' %}localhost{% else %}{{ cloud_instance_ip }}{% endif %}" name: "{% if cloud_instance_ip == 'localhost' %}localhost{% else %}{{ cloud_instance_ip }}{% endif %}"
groups: vpn-host groups: vpn-host
ansible_connection: "{% if cloud_instance_ip == 'localhost' %}local{% else %}ssh{% endif %}" ansible_connection: "{% if cloud_instance_ip == 'localhost' %}local{% else %}ssh{% endif %}"
ansible_ssh_user: "{{ ansible_ssh_user|default('root') }}" ansible_ssh_user: "{{ ansible_ssh_user }}"
ansible_ssh_port: "{{ ansible_ssh_port|default(22) }}" ansible_python_interpreter: "/usr/bin/python3"
ansible_python_interpreter: /usr/bin/python3
algo_provider: "{{ algo_provider }}" algo_provider: "{{ algo_provider }}"
algo_server_name: "{{ algo_server_name }}" algo_server_name: "{{ algo_server_name }}"
algo_ondemand_cellular: "{{ algo_ondemand_cellular }}" algo_ondemand_cellular: "{{ algo_ondemand_cellular }}"
@ -20,20 +19,18 @@
algo_ssh_tunneling: "{{ algo_ssh_tunneling }}" algo_ssh_tunneling: "{{ algo_ssh_tunneling }}"
algo_store_pki: "{{ algo_store_pki }}" algo_store_pki: "{{ algo_store_pki }}"
IP_subject_alt_name: "{{ IP_subject_alt_name }}" IP_subject_alt_name: "{{ IP_subject_alt_name }}"
alternative_ingress_ip: "{{ alternative_ingress_ip | default(omit) }}"
cloudinit: "{{ cloudinit|default(false) }}"
- name: Additional variables for the server - name: Additional variables for the server
add_host: add_host:
name: "{% if cloud_instance_ip == 'localhost' %}localhost{% else %}{{ cloud_instance_ip }}{% endif %}" name: "{% if cloud_instance_ip == 'localhost' %}localhost{% else %}{{ cloud_instance_ip }}{% endif %}"
ansible_ssh_private_key_file: "{{ SSH_keys.private_tmp }}" ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
when: algo_provider != 'local' when: algo_provider != 'local'
- name: Wait until SSH becomes ready... - name: Wait until SSH becomes ready...
wait_for: wait_for:
port: "{{ ansible_ssh_port|default(22) }}" port: 22
host: "{{ cloud_instance_ip }}" host: "{{ cloud_instance_ip }}"
search_regex: OpenSSH search_regex: "OpenSSH"
delay: 10 delay: 10
timeout: 320 timeout: 320
state: present state: present
@ -44,12 +41,12 @@
when: when:
- pki_in_tmpfs - pki_in_tmpfs
- not algo_store_pki - not algo_store_pki
- ansible_system == "Darwin" or ansible_system == "Linux" - ansible_system == "Darwin" or
ansible_system == "Linux"
- debug: - debug:
var: IP_subject_alt_name var: IP_subject_alt_name
- name: Wait 600 seconds for target connection to become reachable/usable - name: A short pause, in order to be sure the instance is ready
wait_for_connection: pause:
delegate_to: "{{ item }}" seconds: 20
loop: "{{ groups['vpn-host'] }}"

View file

@ -1,53 +1,45 @@
--- ---
- block: - block:
- name: Display the invocation environment - name: Display the invocation environment
shell: > shell: >
./algo-showenv.sh \ ./algo-showenv.sh \
'algo_provider "{{ algo_provider }}"' \ 'algo_provider "{{ algo_provider }}"' \
{% if ipsec_enabled %} {% if ipsec_enabled %}
'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \ 'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \
'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \ 'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \
'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \ 'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \
{% endif %} {% endif %}
'algo_dns_adblocking "{{ algo_dns_adblocking }}"' \ 'algo_dns_adblocking "{{ algo_dns_adblocking }}"' \
'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \ 'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \
'wireguard_enabled "{{ wireguard_enabled }}"' \ 'wireguard_enabled "{{ wireguard_enabled }}"' \
'dns_encryption "{{ dns_encryption }}"' \ 'dns_encryption "{{ dns_encryption }}"' \
> /dev/tty || true > /dev/tty
tags: debug tags: debug
- name: Install the requirements - name: Install the requirements
pip: pip:
state: present state: latest
name: name:
- pyOpenSSL>=0.15 - pyOpenSSL
- segno - jinja2==2.8
tags: - segno
- always tags:
- skip_ansible_lint - always
- skip_ansible_lint
delegate_to: localhost delegate_to: localhost
become: false become: false
- block: - name: Generate the SSH private key
- name: Generate the SSH private key openssl_privatekey:
openssl_privatekey: path: "{{ SSH_keys.private }}"
path: "{{ SSH_keys.private }}" size: 2048
size: 4096 mode: "0600"
mode: "0600" type: RSA
type: RSA when: algo_provider != "local"
- name: Generate the SSH public key - name: Generate the SSH public key
openssl_publickey: openssl_publickey:
path: "{{ SSH_keys.public }}" path: "{{ SSH_keys.public }}"
privatekey_path: "{{ SSH_keys.private }}" privatekey_path: "{{ SSH_keys.private }}"
format: OpenSSH format: OpenSSH
- name: Copy the private SSH key to /tmp
copy:
src: "{{ SSH_keys.private }}"
dest: "{{ SSH_keys.private_tmp }}"
force: true
mode: "0600"
delegate_to: localhost
become: false
when: algo_provider != "local" when: algo_provider != "local"

View file

@ -1,5 +1,5 @@
--- ---
- name: Linux | set OS specific facts - name: Linux | set OS specific facts
set_fact: set_fact:
tmpfs_volume_name: AlgoVPN-{{ IP_subject_alt_name }} tmpfs_volume_name: "AlgoVPN-{{ IP_subject_alt_name }}"
tmpfs_volume_path: /dev/shm tmpfs_volume_path: /dev/shm

View file

@ -1,7 +1,7 @@
--- ---
- name: MacOS | set OS specific facts - name: MacOS | set OS specific facts
set_fact: set_fact:
tmpfs_volume_name: AlgoVPN-{{ IP_subject_alt_name }} tmpfs_volume_name: "AlgoVPN-{{ IP_subject_alt_name }}"
tmpfs_volume_path: /Volumes tmpfs_volume_path: /Volumes
- name: MacOS | mount a ram disk - name: MacOS | mount a ram disk
@ -9,4 +9,4 @@
/usr/sbin/diskutil info "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/" || /usr/sbin/diskutil info "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/" ||
/usr/sbin/diskutil erasevolume HFS+ "{{ tmpfs_volume_name }}" $(hdiutil attach -nomount ram://64000) /usr/sbin/diskutil erasevolume HFS+ "{{ tmpfs_volume_name }}" $(hdiutil attach -nomount ram://64000)
args: args:
creates: /{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }} creates: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}"

View file

@ -9,9 +9,11 @@
- name: Set config paths as facts - name: Set config paths as facts
set_fact: set_fact:
ipsec_pki_path: /{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/IPsec/ wireguard_pki_path: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/WireGuard/"
ipsec_pki_path: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/IPsec/"
- name: Update config paths - name: Update config paths
add_host: add_host:
name: "{{ 'localhost' if cloud_instance_ip == 'localhost' else cloud_instance_ip }}" name: "{{ 'localhost' if cloud_instance_ip == 'localhost' else cloud_instance_ip }}"
wireguard_pki_path: "{{ wireguard_pki_path }}"
ipsec_pki_path: "{{ ipsec_pki_path }}" ipsec_pki_path: "{{ ipsec_pki_path }}"

View file

@ -1,26 +1,26 @@
--- ---
- name: Linux | Delete the PKI directory - name: Linux | Delete the PKI directory
file: file:
path: /{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/ path: "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/"
state: absent state: absent
when: facts.ansible_system == "Linux" when: facts.ansible_system == "Linux"
- block: - block:
- name: MacOS | check fs the ramdisk exists - name: MacOS | check fs the ramdisk exists
command: /usr/sbin/diskutil info "{{ facts.tmpfs_volume_name }}" command: /usr/sbin/diskutil info "{{ facts.tmpfs_volume_name }}"
ignore_errors: true ignore_errors: true
changed_when: false changed_when: false
register: diskutil_info register: diskutil_info
- name: MacOS | unmount and eject the ram disk - name: MacOS | unmount and eject the ram disk
shell: > shell: >
/usr/sbin/diskutil umount force "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/" && /usr/sbin/diskutil umount force "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/" &&
/usr/sbin/diskutil eject "{{ facts.tmpfs_volume_name }}" /usr/sbin/diskutil eject "{{ facts.tmpfs_volume_name }}"
changed_when: false changed_when: false
when: diskutil_info.rc == 0 when: diskutil_info.rc == 0
register: result register: result
until: result.rc == 0 until: result.rc == 0
retries: 5 retries: 5
delay: 3 delay: 3
when: when:
- facts.ansible_system == "Darwin" - facts.ansible_system == "Darwin"

View file

@ -1,3 +1,2 @@
ansible==9.1.0 ansible==2.7.12
jinja2~=3.0.3
netaddr netaddr

View file

@ -1,5 +0,0 @@
libstrongswan {
x509 {
enforce_critical = no
}
}

View file

@ -1,3 +1,3 @@
--- ---
- name: restart strongswan - name: restart strongswan
service: name={{ strongswan_service }} state=restarted service: name=strongswan state=restarted

View file

@ -1,6 +1,6 @@
---
- name: Gather Facts - name: Gather Facts
setup: setup:
- name: Include system based facts and tasks - name: Include system based facts and tasks
import_tasks: systems/main.yml import_tasks: systems/main.yml
@ -22,9 +22,9 @@
- name: Setup the ipsec config - name: Setup the ipsec config
template: template:
src: roles/strongswan/templates/client_ipsec.conf.j2 src: "roles/strongswan/templates/client_ipsec.conf.j2"
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.conf" dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.conf"
mode: "0644" mode: '0644'
with_items: with_items:
- "{{ vpn_user }}" - "{{ vpn_user }}"
notify: notify:
@ -32,9 +32,9 @@
- name: Setup the ipsec secrets - name: Setup the ipsec secrets
template: template:
src: roles/strongswan/templates/client_ipsec.secrets.j2 src: "roles/strongswan/templates/client_ipsec.secrets.j2"
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.secrets" dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.secrets"
mode: "0600" mode: '0600'
with_items: with_items:
- "{{ vpn_user }}" - "{{ vpn_user }}"
notify: notify:
@ -44,33 +44,25 @@
lineinfile: lineinfile:
dest: "{{ item.dest }}" dest: "{{ item.dest }}"
line: "{{ item.line }}" line: "{{ item.line }}"
create: true create: yes
with_items: with_items:
- dest: "{{ configs_prefix }}/ipsec.conf" - dest: "{{ configs_prefix }}/ipsec.conf"
line: include ipsec.{{ IP_subject_alt_name }}.conf line: "include ipsec.{{ IP_subject_alt_name }}.conf"
- dest: "{{ configs_prefix }}/ipsec.secrets" - dest: "{{ configs_prefix }}/ipsec.secrets"
line: include ipsec.{{ IP_subject_alt_name }}.secrets line: "include ipsec.{{ IP_subject_alt_name }}.secrets"
notify: notify:
- restart strongswan - restart strongswan
- name: Configure libstrongswan to relax CA constraints
copy:
src: libstrongswan-relax-constraints.conf
dest: "{{ configs_prefix }}/strongswan.d/relax-ca-constraints.conf"
owner: root
group: root
mode: 0644
- name: Setup the certificates and keys - name: Setup the certificates and keys
template: template:
src: "{{ item.src }}" src: "{{ item.src }}"
dest: "{{ item.dest }}" dest: "{{ item.dest }}"
with_items: with_items:
- src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/certs/{{ vpn_user }}.crt - src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/certs/{{ vpn_user }}.crt"
dest: "{{ configs_prefix }}/ipsec.d/certs/{{ vpn_user }}.crt" dest: "{{ configs_prefix }}/ipsec.d/certs/{{ vpn_user }}.crt"
- src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/cacert.pem - src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/cacert.pem"
dest: "{{ configs_prefix }}/ipsec.d/cacerts/{{ IP_subject_alt_name }}.pem" dest: "{{ configs_prefix }}/ipsec.d/cacerts/{{ IP_subject_alt_name }}.pem"
- src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/private/{{ vpn_user }}.key - src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/private/{{ vpn_user }}.key"
dest: "{{ configs_prefix }}/ipsec.d/private/{{ vpn_user }}.key" dest: "{{ configs_prefix }}/ipsec.d/private/{{ vpn_user }}.key"
notify: notify:
- restart strongswan - restart strongswan

View file

@ -1,4 +1,5 @@
--- ---
- include_tasks: Debian.yml - include_tasks: Debian.yml
when: ansible_distribution == 'Debian' when: ansible_distribution == 'Debian'

View file

@ -1,210 +1,243 @@
--- ---
# az account list-locations --query 'sort_by([].{name:name,displayName:displayName,regionalDisplayName:regionalDisplayName}, &name)' -o yaml azure_venv: "{{ playbook_dir }}/configs/.venvs/azure"
azure_regions: _azure_regions: >
- displayName: Asia [
name: asia {
regionalDisplayName: Asia "displayName": "East Asia",
- displayName: Asia Pacific "latitude": "22.267",
name: asiapacific "longitude": "114.188",
regionalDisplayName: Asia Pacific "name": "eastasia",
- displayName: Australia "subscriptionId": null
name: australia },
regionalDisplayName: Australia {
- displayName: Australia Central "displayName": "Southeast Asia",
name: australiacentral "latitude": "1.283",
regionalDisplayName: (Asia Pacific) Australia Central "longitude": "103.833",
- displayName: Australia Central 2 "name": "southeastasia",
name: australiacentral2 "subscriptionId": null
regionalDisplayName: (Asia Pacific) Australia Central 2 },
- displayName: Australia East {
name: australiaeast "displayName": "Central US",
regionalDisplayName: (Asia Pacific) Australia East "latitude": "41.5908",
- displayName: Australia Southeast "longitude": "-93.6208",
name: australiasoutheast "name": "centralus",
regionalDisplayName: (Asia Pacific) Australia Southeast "subscriptionId": null
- displayName: Brazil },
name: brazil {
regionalDisplayName: Brazil "displayName": "East US",
- displayName: Brazil South "latitude": "37.3719",
name: brazilsouth "longitude": "-79.8164",
regionalDisplayName: (South America) Brazil South "name": "eastus",
- displayName: Brazil Southeast "subscriptionId": null
name: brazilsoutheast },
regionalDisplayName: (South America) Brazil Southeast {
- displayName: Canada "displayName": "East US 2",
name: canada "latitude": "36.6681",
regionalDisplayName: Canada "longitude": "-78.3889",
- displayName: Canada Central "name": "eastus2",
name: canadacentral "subscriptionId": null
regionalDisplayName: (Canada) Canada Central },
- displayName: Canada East {
name: canadaeast "displayName": "West US",
regionalDisplayName: (Canada) Canada East "latitude": "37.783",
- displayName: Central India "longitude": "-122.417",
name: centralindia "name": "westus",
regionalDisplayName: (Asia Pacific) Central India "subscriptionId": null
- displayName: Central US },
name: centralus {
regionalDisplayName: (US) Central US "displayName": "North Central US",
- displayName: Central US EUAP "latitude": "41.8819",
name: centraluseuap "longitude": "-87.6278",
regionalDisplayName: (US) Central US EUAP "name": "northcentralus",
- displayName: Central US (Stage) "subscriptionId": null
name: centralusstage },
regionalDisplayName: (US) Central US (Stage) {
- displayName: East Asia "displayName": "South Central US",
name: eastasia "latitude": "29.4167",
regionalDisplayName: (Asia Pacific) East Asia "longitude": "-98.5",
- displayName: East Asia (Stage) "name": "southcentralus",
name: eastasiastage "subscriptionId": null
regionalDisplayName: (Asia Pacific) East Asia (Stage) },
- displayName: East US {
name: eastus "displayName": "North Europe",
regionalDisplayName: (US) East US "latitude": "53.3478",
- displayName: East US 2 "longitude": "-6.2597",
name: eastus2 "name": "northeurope",
regionalDisplayName: (US) East US 2 "subscriptionId": null
- displayName: East US 2 EUAP },
name: eastus2euap {
regionalDisplayName: (US) East US 2 EUAP "displayName": "West Europe",
- displayName: East US 2 (Stage) "latitude": "52.3667",
name: eastus2stage "longitude": "4.9",
regionalDisplayName: (US) East US 2 (Stage) "name": "westeurope",
- displayName: East US (Stage) "subscriptionId": null
name: eastusstage },
regionalDisplayName: (US) East US (Stage) {
- displayName: Europe "displayName": "Japan West",
name: europe "latitude": "34.6939",
regionalDisplayName: Europe "longitude": "135.5022",
- displayName: France Central "name": "japanwest",
name: francecentral "subscriptionId": null
regionalDisplayName: (Europe) France Central },
- displayName: France South {
name: francesouth "displayName": "Japan East",
regionalDisplayName: (Europe) France South "latitude": "35.68",
- displayName: Germany North "longitude": "139.77",
name: germanynorth "name": "japaneast",
regionalDisplayName: (Europe) Germany North "subscriptionId": null
- displayName: Germany West Central },
name: germanywestcentral {
regionalDisplayName: (Europe) Germany West Central "displayName": "Brazil South",
- displayName: Global "latitude": "-23.55",
name: global "longitude": "-46.633",
regionalDisplayName: Global "name": "brazilsouth",
- displayName: India "subscriptionId": null
name: india },
regionalDisplayName: India {
- displayName: Japan "displayName": "Australia East",
name: japan "latitude": "-33.86",
regionalDisplayName: Japan "longitude": "151.2094",
- displayName: Japan East "name": "australiaeast",
name: japaneast "subscriptionId": null
regionalDisplayName: (Asia Pacific) Japan East },
- displayName: Japan West {
name: japanwest "displayName": "Australia Southeast",
regionalDisplayName: (Asia Pacific) Japan West "latitude": "-37.8136",
- displayName: Jio India Central "longitude": "144.9631",
name: jioindiacentral "name": "australiasoutheast",
regionalDisplayName: (Asia Pacific) Jio India Central "subscriptionId": null
- displayName: Jio India West },
name: jioindiawest {
regionalDisplayName: (Asia Pacific) Jio India West "displayName": "South India",
- displayName: Korea Central "latitude": "12.9822",
name: koreacentral "longitude": "80.1636",
regionalDisplayName: (Asia Pacific) Korea Central "name": "southindia",
- displayName: Korea South "subscriptionId": null
name: koreasouth },
regionalDisplayName: (Asia Pacific) Korea South {
- displayName: North Central US "displayName": "Central India",
name: northcentralus "latitude": "18.5822",
regionalDisplayName: (US) North Central US "longitude": "73.9197",
- displayName: North Central US (Stage) "name": "centralindia",
name: northcentralusstage "subscriptionId": null
regionalDisplayName: (US) North Central US (Stage) },
- displayName: North Europe {
name: northeurope "displayName": "West India",
regionalDisplayName: (Europe) North Europe "latitude": "19.088",
- displayName: Norway East "longitude": "72.868",
name: norwayeast "name": "westindia",
regionalDisplayName: (Europe) Norway East "subscriptionId": null
- displayName: Norway West },
name: norwaywest {
regionalDisplayName: (Europe) Norway West "displayName": "Canada Central",
- displayName: Qatar Central "latitude": "43.653",
name: qatarcentral "longitude": "-79.383",
regionalDisplayName: (Europe) Qatar Central "name": "canadacentral",
- displayName: South Africa North "subscriptionId": null
name: southafricanorth },
regionalDisplayName: (Africa) South Africa North {
- displayName: South Africa West "displayName": "Canada East",
name: southafricawest "latitude": "46.817",
regionalDisplayName: (Africa) South Africa West "longitude": "-71.217",
- displayName: South Central US "name": "canadaeast",
name: southcentralus "subscriptionId": null
regionalDisplayName: (US) South Central US },
- displayName: South Central US (Stage) {
name: southcentralusstage "displayName": "UK South",
regionalDisplayName: (US) South Central US (Stage) "latitude": "50.941",
- displayName: Southeast Asia "longitude": "-0.799",
name: southeastasia "name": "uksouth",
regionalDisplayName: (Asia Pacific) Southeast Asia "subscriptionId": null
- displayName: Southeast Asia (Stage) },
name: southeastasiastage {
regionalDisplayName: (Asia Pacific) Southeast Asia (Stage) "displayName": "UK West",
- displayName: South India "latitude": "53.427",
name: southindia "longitude": "-3.084",
regionalDisplayName: (Asia Pacific) South India "name": "ukwest",
- displayName: Sweden Central "subscriptionId": null
name: swedencentral },
regionalDisplayName: (Europe) Sweden Central {
- displayName: Sweden South "displayName": "West Central US",
name: swedensouth "latitude": "40.890",
regionalDisplayName: (Europe) Sweden South "longitude": "-110.234",
- displayName: Switzerland North "name": "westcentralus",
name: switzerlandnorth "subscriptionId": null
regionalDisplayName: (Europe) Switzerland North },
- displayName: Switzerland West {
name: switzerlandwest "displayName": "West US 2",
regionalDisplayName: (Europe) Switzerland West "latitude": "47.233",
- displayName: UAE Central "longitude": "-119.852",
name: uaecentral "name": "westus2",
regionalDisplayName: (Middle East) UAE Central "subscriptionId": null
- displayName: UAE North },
name: uaenorth {
regionalDisplayName: (Middle East) UAE North "displayName": "Korea Central",
- displayName: United Kingdom "latitude": "37.5665",
name: uk "longitude": "126.9780",
regionalDisplayName: United Kingdom "name": "koreacentral",
- displayName: UK South "subscriptionId": null
name: uksouth },
regionalDisplayName: (Europe) UK South {
- displayName: UK West "displayName": "Korea South",
name: ukwest "latitude": "35.1796",
regionalDisplayName: (Europe) UK West "longitude": "129.0756",
- displayName: United States "name": "koreasouth",
name: unitedstates "subscriptionId": null
regionalDisplayName: United States },
- displayName: West Central US {
name: westcentralus "displayName": "France Central",
regionalDisplayName: (US) West Central US "latitude": "46.3772",
- displayName: West Europe "longitude": "2.3730",
name: westeurope "name": "francecentral",
regionalDisplayName: (Europe) West Europe "subscriptionId": null
- displayName: West India },
name: westindia {
regionalDisplayName: (Asia Pacific) West India "displayName": "France South",
- displayName: West US "latitude": "43.8345",
name: westus "longitude": "2.1972",
regionalDisplayName: (US) West US "name": "francesouth",
- displayName: West US 2 "subscriptionId": null
name: westus2 },
regionalDisplayName: (US) West US 2 {
- displayName: West US 2 (Stage) "displayName": "Australia Central",
name: westus2stage "latitude": "-35.3075",
regionalDisplayName: (US) West US 2 (Stage) "longitude": "149.1244",
- displayName: West US 3 "name": "australiacentral",
name: westus3 "subscriptionId": null
regionalDisplayName: (US) West US 3 },
- displayName: West US (Stage) {
name: westusstage "displayName": "Australia Central 2",
regionalDisplayName: (US) West US (Stage) "latitude": "-35.3075",
"longitude": "149.1244",
"name": "australiacentral2",
"subscriptionId": null
},
{
"displayName": "UAE Central",
"latitude": "24.466667",
"longitude": "54.366669",
"name": "uaecentral",
"subscriptionId": null
},
{
"displayName": "UAE North",
"latitude": "25.266666",
"longitude": "55.316666",
"name": "uaenorth",
"subscriptionId": null
},
{
"displayName": "South Africa North",
"latitude": "-25.731340",
"longitude": "28.218370",
"name": "southafricanorth",
"subscriptionId": null
},
{
"displayName": "South Africa West",
"latitude": "-34.075691",
"longitude": "18.843266",
"name": "southafricawest",
"subscriptionId": null
}
]

View file

@ -11,26 +11,8 @@
"vmSize": { "vmSize": {
"type": "string" "type": "string"
}, },
"imageReferencePublisher": {
"type": "string"
},
"imageReferenceOffer": {
"type": "string"
},
"imageReferenceSku": { "imageReferenceSku": {
"type": "string" "type": "string"
},
"imageReferenceVersion": {
"type": "string"
},
"osDiskType": {
"type": "string"
},
"SshPort": {
"type": "int"
},
"UserData": {
"type": "string"
} }
}, },
"variables": { "variables": {
@ -48,10 +30,10 @@
{ {
"name": "AllowSSH", "name": "AllowSSH",
"properties": { "properties": {
"description": "Allow SSH", "description": "Locks inbound down to ssh default port 22.",
"protocol": "Tcp", "protocol": "Tcp",
"sourcePortRange": "*", "sourcePortRange": "*",
"destinationPortRange": "[parameters('SshPort')]", "destinationPortRange": "22",
"sourceAddressPrefix": "*", "sourceAddressPrefix": "*",
"destinationAddressPrefix": "*", "destinationAddressPrefix": "*",
"access": "Allow", "access": "Allow",
@ -178,14 +160,13 @@
}, },
"osProfile": { "osProfile": {
"computerName": "[resourceGroup().name]", "computerName": "[resourceGroup().name]",
"customData": "[parameters('UserData')]", "adminUsername": "ubuntu",
"adminUsername": "algo",
"linuxConfiguration": { "linuxConfiguration": {
"disablePasswordAuthentication": true, "disablePasswordAuthentication": true,
"ssh": { "ssh": {
"publicKeys": [ "publicKeys": [
{ {
"path": "/home/algo/.ssh/authorized_keys", "path": "/home/ubuntu/.ssh/authorized_keys",
"keyData": "[parameters('sshKeyData')]" "keyData": "[parameters('sshKeyData')]"
} }
] ]
@ -194,16 +175,13 @@
}, },
"storageProfile": { "storageProfile": {
"imageReference": { "imageReference": {
"publisher": "[parameters('imageReferencePublisher')]", "publisher": "Canonical",
"offer": "[parameters('imageReferenceOffer')]", "offer": "UbuntuServer",
"sku": "[parameters('imageReferenceSku')]", "sku": "[parameters('imageReferenceSku')]",
"version": "[parameters('imageReferenceVersion')]" "version": "latest"
}, },
"osDisk": { "osDisk": {
"createOption": "FromImage", "createOption": "FromImage"
"managedDisk": {
"storageAccountType": "[parameters('osDiskType')]"
}
} }
}, },
"networkProfile": { "networkProfile": {

View file

@ -2,51 +2,40 @@
- name: Build python virtual environment - name: Build python virtual environment
import_tasks: venv.yml import_tasks: venv.yml
- name: Include prompts - block:
import_tasks: prompts.yml - name: Include prompts
import_tasks: prompts.yml
- set_fact: - set_fact:
algo_region: >- algo_region: >-
{% if region is defined %}{{ region }} {% if region is defined %}{{ region }}
{%- elif _algo_region.user_input %}{{ azure_regions[_algo_region.user_input | int -1 ]['name'] }} {%- elif _algo_region.user_input %}{{ azure_regions[_algo_region.user_input | int -1 ]['name'] }}
{%- else %}{{ azure_regions[default_region | int - 1]['name'] }}{% endif %} {%- else %}{{ azure_regions[default_region | int - 1]['name'] }}{% endif %}
- name: Create AlgoVPN Server - name: Create AlgoVPN Server
azure_rm_deployment: azure_rm_deployment:
state: present state: present
deployment_name: "{{ algo_server_name }}" deployment_name: "{{ algo_server_name }}"
template: "{{ lookup('file', role_path + '/files/deployment.json') }}" template: "{{ lookup('file', role_path + '/files/deployment.json') }}"
secret: "{{ secret }}" secret: "{{ secret }}"
tenant: "{{ tenant }}" tenant: "{{ tenant }}"
client_id: "{{ client_id }}" client_id: "{{ client_id }}"
subscription_id: "{{ subscription_id }}" subscription_id: "{{ subscription_id }}"
resource_group_name: "{{ algo_server_name }}" resource_group_name: "{{ algo_server_name }}"
location: "{{ algo_region }}" location: "{{ algo_region }}"
parameters: parameters:
sshKeyData: sshKeyData:
value: "{{ lookup('file', '{{ SSH_keys.public }}') }}" value: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
WireGuardPort: WireGuardPort:
value: "{{ wireguard_port }}" value: "{{ wireguard_port }}"
vmSize: vmSize:
value: "{{ cloud_providers.azure.size }}" value: "{{ cloud_providers.azure.size }}"
imageReferencePublisher: imageReferenceSku:
value: "{{ cloud_providers.azure.image.publisher }}" value: "{{ cloud_providers.azure.image }}"
imageReferenceOffer: register: azure_rm_deployment
value: "{{ cloud_providers.azure.image.offer }}"
imageReferenceSku:
value: "{{ cloud_providers.azure.image.sku }}"
imageReferenceVersion:
value: "{{ cloud_providers.azure.image.version }}"
osDiskType:
value: "{{ cloud_providers.azure.osDisk.type }}"
SshPort:
value: "{{ ssh_port }}"
UserData:
value: "{{ lookup('template', 'files/cloud-init/base.yml') | b64encode }}"
register: azure_rm_deployment
- set_fact: - set_fact:
cloud_instance_ip: "{{ azure_rm_deployment.deployment.outputs.publicIPAddresses.value }}" cloud_instance_ip: "{{ azure_rm_deployment.deployment.outputs.publicIPAddresses.value }}"
ansible_ssh_user: algo ansible_ssh_user: ubuntu
ansible_ssh_port: "{{ ssh_port }}" environment:
cloudinit: true PYTHONPATH: "{{ azure_venv }}/lib/python2.7/site-packages/"

View file

@ -6,21 +6,25 @@
subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}" subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}"
- block: - block:
- name: Set the default region - name: Set facts about the regions
set_fact: set_fact:
default_region: >- azure_regions: "{{ _azure_regions|from_json | sort(attribute='name') }}"
- name: Set the default region
set_fact:
default_region: >-
{% for r in azure_regions %}
{%- if r['name'] == "eastus" %}{{ loop.index }}{% endif %}
{%- endfor %}
- pause:
prompt: |
What region should the server be located in?
{% for r in azure_regions %} {% for r in azure_regions %}
{%- if r['name'] == "eastus" %}{{ loop.index }}{% endif %} {{ loop.index }}. {{ r['displayName'] }}
{%- endfor %} {% endfor %}
- pause: Enter the number of your desired region
prompt: | [{{ default_region }}]
What region should the server be located in? register: _algo_region
{% for r in azure_regions %}
{{ loop.index }}. {{ r['regionalDisplayName'] }}
{% endfor %}
Enter the number of your desired region
[{{ default_region }}]
register: _algo_region
when: region is undefined when: region is undefined

View file

@ -1,6 +1,41 @@
--- ---
- name: Clean up the environment
file:
dest: "{{ azure_venv }}"
state: absent
when: clean_environment
- name: Install requirements - name: Install requirements
pip: pip:
requirements: https://raw.githubusercontent.com/ansible-collections/azure/v1.13.0/requirements-azure.txt name:
- packaging
- requests[security]
- azure-cli-core==2.0.35
- azure-cli-nspkg==3.0.2
- azure-common==1.1.11
- azure-mgmt-batch==4.1.0
- azure-mgmt-compute==2.1.0
- azure-mgmt-containerinstance==0.4.0
- azure-mgmt-containerregistry==2.0.0
- azure-mgmt-containerservice==3.0.1
- azure-mgmt-dns==1.2.0
- azure-mgmt-keyvault==0.40.0
- azure-mgmt-marketplaceordering==0.1.0
- azure-mgmt-monitor==0.5.2
- azure-mgmt-network==1.7.1
- azure-mgmt-nspkg==2.0.0
- azure-mgmt-rdbms==1.2.0
- azure-mgmt-resource==1.2.2
- azure-mgmt-sql==0.7.1
- azure-mgmt-storage==1.5.0
- azure-mgmt-trafficmanager==0.50.0
- azure-mgmt-web==0.32.0
- azure-nspkg==2.0.0
- azure-storage==0.35.1
- msrest==0.4.29
- msrestazure==0.4.31
- azure-keyvault==1.0.0a1
- azure-graphrbac==0.40.0
state: latest state: latest
virtualenv_python: python3 virtualenv: "{{ azure_venv }}"
virtualenv_python: python2.7

View file

@ -1,59 +0,0 @@
---
- name: Build python virtual environment
import_tasks: venv.yml
- name: Include prompts
import_tasks: prompts.yml
- block:
- set_fact:
algo_region: >-
{% if region is defined %}{{ region }}
{%- elif _algo_region.user_input is defined and _algo_region.user_input | length > 0 %}{{ cs_zones[_algo_region.user_input | int -1 ]['name'] }}
{%- else %}{{ cs_zones[default_zone | int - 1]['name'] }}{% endif %}
- name: Security group created
cs_securitygroup:
name: "{{ algo_server_name }}-security_group"
description: AlgoVPN security group
register: cs_security_group
- name: Security rules created
cs_securitygroup_rule:
security_group: "{{ cs_security_group.name }}"
protocol: "{{ item.proto }}"
start_port: "{{ item.start_port }}"
end_port: "{{ item.end_port }}"
cidr: "{{ item.range }}"
with_items:
- { proto: tcp, start_port: "{{ ssh_port }}", end_port: "{{ ssh_port }}", range: 0.0.0.0/0 }
- { proto: udp, start_port: 4500, end_port: 4500, range: 0.0.0.0/0 }
- { proto: udp, start_port: 500, end_port: 500, range: 0.0.0.0/0 }
- { proto: udp, start_port: "{{ wireguard_port }}", end_port: "{{ wireguard_port }}", range: 0.0.0.0/0 }
- name: Set facts
set_fact:
image_id: "{{ cloud_providers.cloudstack.image }}"
size: "{{ cloud_providers.cloudstack.size }}"
disk: "{{ cloud_providers.cloudstack.disk }}"
- name: Server created
cs_instance:
name: "{{ algo_server_name }}"
root_disk_size: "{{ disk }}"
template: "{{ image_id }}"
security_groups: "{{ cs_security_group.name }}"
zone: "{{ algo_region }}"
service_offering: "{{ size }}"
user_data: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
register: cs_server
- set_fact:
cloud_instance_ip: "{{ cs_server.default_ip }}"
ansible_ssh_user: algo
ansible_ssh_port: "{{ ssh_port }}"
cloudinit: true
environment:
CLOUDSTACK_KEY: "{{ algo_cs_key }}"
CLOUDSTACK_SECRET: "{{ algo_cs_token }}"
CLOUDSTACK_ENDPOINT: "{{ algo_cs_url }}"

View file

@ -1,65 +0,0 @@
---
- block:
- pause:
prompt: |
Enter the API key (https://trailofbits.github.io/algo/cloud-cloudstack.html):
echo: false
register: _cs_key
when:
- cs_key is undefined
- lookup('env','CLOUDSTACK_KEY')|length <= 0
- pause:
prompt: |
Enter the API ssecret (https://trailofbits.github.io/algo/cloud-cloudstack.html):
echo: false
register: _cs_secret
when:
- cs_secret is undefined
- lookup('env','CLOUDSTACK_SECRET')|length <= 0
- pause:
prompt: |
Enter the API endpoint (https://trailofbits.github.io/algo/cloud-cloudstack.html)
[https://api.exoscale.com/compute]
register: _cs_url
when:
- cs_url is undefined
- lookup('env', 'CLOUDSTACK_ENDPOINT') | length <= 0
- set_fact:
algo_cs_key: "{{ cs_key | default(_cs_key.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_KEY'), true) }}"
algo_cs_token: "{{ cs_secret | default(_cs_secret.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_SECRET'), true) }}"
algo_cs_url: "{{ cs_url | default(_cs_url.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_ENDPOINT'), true) | default('https://api.exoscale.com/compute',\
\ true) }}"
- name: Get zones on cloud
cs_zone_info:
register: _cs_zones
environment:
CLOUDSTACK_KEY: "{{ algo_cs_key }}"
CLOUDSTACK_SECRET: "{{ algo_cs_token }}"
CLOUDSTACK_ENDPOINT: "{{ algo_cs_url }}"
- name: Extract zones from output
set_fact:
cs_zones: "{{ _cs_zones['zones'] | sort(attribute='name') }}"
- name: Set the default zone
set_fact:
default_zone: >-
{% for z in cs_zones %}
{%- if z['name'] == "ch-gva-2" %}{{ loop.index }}{% endif %}
{%- endfor %}
- pause:
prompt: |
What zone should the server be located in?
{% for z in cs_zones %}
{{ loop.index }}. {{ z['name'] }}
{% endfor %}
Enter the number of your desired zone
[{{ default_zone }}]
register: _algo_region
when: region is undefined

View file

@ -1,8 +0,0 @@
---
- name: Install requirements
pip:
name:
- cs
- sshpubkeys
state: latest
virtualenv_python: python3

View file

@ -0,0 +1,2 @@
---
digitalocean_venv: "{{ playbook_dir }}/configs/.venvs/digitalocean"

View file

@ -1,50 +1,105 @@
--- ---
- name: Include prompts - name: Build python virtual environment
import_tasks: prompts.yml import_tasks: venv.yml
- name: Upload the SSH key
digital_ocean_sshkey:
oauth_token: "{{ algo_do_token }}"
name: "{{ SSH_keys.comment }}"
ssh_pub_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
register: do_ssh_key
- name: Creating a droplet...
digital_ocean_droplet:
state: present
name: "{{ algo_server_name }}"
oauth_token: "{{ algo_do_token }}"
size: "{{ cloud_providers.digitalocean.size }}"
region: "{{ algo_do_region }}"
image: "{{ cloud_providers.digitalocean.image }}"
wait_timeout: 300
unique_name: true
ipv6: true
ssh_keys: "{{ do_ssh_key.data.ssh_key.id }}"
user_data: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
tags:
- Environment:Algo
register: digital_ocean_droplet
# Return data is not idempotent
- set_fact:
droplet: "{{ digital_ocean_droplet.data.droplet | default(digital_ocean_droplet.data) }}"
- block: - block:
- name: Create a Floating IP - name: Include prompts
digital_ocean_floating_ip: import_tasks: prompts.yml
state: present
oauth_token: "{{ algo_do_token }}"
droplet_id: "{{ droplet.id }}"
register: digital_ocean_floating_ip
- name: Set the static ip as a fact - name: Set additional facts
set_fact: set_fact:
cloud_alternative_ingress_ip: "{{ digital_ocean_floating_ip.data.floating_ip.ip }}" algo_do_region: >-
when: alternative_ingress_ip {% if region is defined %}{{ region }}
{%- elif _algo_region.user_input %}{{ do_regions[_algo_region.user_input | int -1 ]['slug'] }}
{%- else %}{{ do_regions[default_region | int - 1]['slug'] }}{% endif %}
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
- set_fact: - block:
cloud_instance_ip: "{{ (droplet.networks.v4 | selectattr('type', '==', 'public')).0.ip_address }}" - name: "Delete the existing Algo SSH keys"
ansible_ssh_user: algo digital_ocean:
ansible_ssh_port: "{{ ssh_port }}" state: absent
cloudinit: true command: ssh
api_token: "{{ algo_do_token }}"
name: "{{ SSH_keys.comment }}"
register: ssh_keys
until: not ssh_keys.changed
retries: 10
delay: 1
rescue:
- name: Collect the fail error
digital_ocean:
state: absent
command: ssh
api_token: "{{ algo_do_token }}"
name: "{{ SSH_keys.comment }}"
register: ssh_keys
ignore_errors: yes
- debug: var=ssh_keys
- fail:
msg: "Please, ensure that your API token is not read-only."
- name: "Upload the SSH key"
digital_ocean:
state: present
command: ssh
ssh_pub_key: "{{ public_key }}"
api_token: "{{ algo_do_token }}"
name: "{{ SSH_keys.comment }}"
register: do_ssh_key
- name: "Creating a droplet..."
digital_ocean:
state: present
command: droplet
name: "{{ algo_server_name }}"
region_id: "{{ algo_do_region }}"
size_id: "{{ cloud_providers.digitalocean.size }}"
image_id: "{{ cloud_providers.digitalocean.image }}"
ssh_key_ids: "{{ do_ssh_key.ssh_key.id }}"
unique_name: yes
api_token: "{{ algo_do_token }}"
ipv6: yes
register: do
- set_fact:
cloud_instance_ip: "{{ do.droplet.ip_address }}"
ansible_ssh_user: root
- name: Tag the droplet
digital_ocean_tag:
name: "Environment:Algo"
resource_id: "{{ do.droplet.id }}"
api_token: "{{ algo_do_token }}"
state: present
- block:
- name: "Delete the new Algo SSH key"
digital_ocean:
state: absent
command: ssh
api_token: "{{ algo_do_token }}"
name: "{{ SSH_keys.comment }}"
register: ssh_keys
until: not ssh_keys.changed
retries: 10
delay: 1
rescue:
- name: Collect the fail error
digital_ocean:
state: absent
command: ssh
api_token: "{{ algo_do_token }}"
name: "{{ SSH_keys.comment }}"
register: ssh_keys
ignore_errors: yes
- debug: var=ssh_keys
- fail:
msg: "Please, ensure that your API token is not read-only."
environment:
PYTHONPATH: "{{ digitalocean_venv }}/lib/python2.7/site-packages/"

View file

@ -18,13 +18,13 @@
method: GET method: GET
status_code: 200 status_code: 200
headers: headers:
Content-Type: application/json Content-Type: "application/json"
Authorization: Bearer {{ algo_do_token }} Authorization: "Bearer {{ algo_do_token }}"
register: _do_regions register: _do_regions
- name: Set facts about the regions - name: Set facts about thre regions
set_fact: set_fact:
do_regions: "{{ _do_regions.json.regions | selectattr('available', 'true') | sort(attribute='slug') }}" do_regions: "{{ _do_regions.json.regions | sort(attribute='slug') }}"
- name: Set default region - name: Set default region
set_fact: set_fact:
@ -44,10 +44,3 @@
[{{ default_region }}] [{{ default_region }}]
register: _algo_region register: _algo_region
when: region is undefined when: region is undefined
- name: Set additional facts
set_fact:
algo_do_region: >-
{% if region is defined %}{{ region }}
{%- elif _algo_region.user_input %}{{ do_regions[_algo_region.user_input | int -1 ]['slug'] }}
{%- else %}{{ do_regions[default_region | int - 1]['slug'] }}{% endif %}

View file

@ -0,0 +1,13 @@
---
- name: Clean up the environment
file:
dest: "{{ digitalocean_venv }}"
state: absent
when: clean_environment
- name: Install requirements
pip:
name: dopy
version: 0.3.5
virtualenv: "{{ digitalocean_venv }}"
virtualenv_python: python2.7

View file

@ -1,6 +1,8 @@
--- ---
ami_search_encrypted: omit
encrypted: "{{ cloud_providers.ec2.encrypted }}" encrypted: "{{ cloud_providers.ec2.encrypted }}"
ec2_vpc_nets: ec2_vpc_nets:
cidr_block: 172.16.0.0/16 cidr_block: 172.16.0.0/16
subnet_cidr: 172.16.254.0/23 subnet_cidr: 172.16.254.0/23
ec2_venv: "{{ playbook_dir }}/configs/.venvs/aws"
existing_eip: "" existing_eip: ""

View file

@ -14,23 +14,9 @@ Parameters:
UseThisElasticIP: UseThisElasticIP:
Type: String Type: String
Default: '' Default: ''
EbsEncrypted:
Type: String
UserData:
Type: String
SshPort:
Type: String
InstanceMarketTypeParameter:
Description: Launch a Spot instance or standard on-demand instance
Type: String
Default: on-demand
AllowedValues:
- spot
- on-demand
Conditions: Conditions:
AllocateNewEIP: !Equals [!Ref UseThisElasticIP, ''] AllocateNewEIP: !Equals [!Ref UseThisElasticIP, '']
AssociateExistingEIP: !Not [!Equals [!Ref UseThisElasticIP, '']] AssociateExistingEIP: !Not [!Equals [!Ref UseThisElasticIP, '']]
InstanceIsSpot: !Equals [spot, !Ref InstanceMarketTypeParameter]
Resources: Resources:
VPC: VPC:
Type: AWS::EC2::VPC Type: AWS::EC2::VPC
@ -135,8 +121,8 @@ Resources:
GroupDescription: Enable SSH and IPsec GroupDescription: Enable SSH and IPsec
SecurityGroupIngress: SecurityGroupIngress:
- IpProtocol: tcp - IpProtocol: tcp
FromPort: !Ref SshPort FromPort: '22'
ToPort: !Ref SshPort ToPort: '22'
CidrIp: 0.0.0.0/0 CidrIp: 0.0.0.0/0
- IpProtocol: udp - IpProtocol: udp
FromPort: '500' FromPort: '500'
@ -154,30 +140,25 @@ Resources:
- Key: Name - Key: Name
Value: !Ref AWS::StackName Value: !Ref AWS::StackName
EC2LaunchTemplate:
Type: AWS::EC2::LaunchTemplate
Condition: InstanceIsSpot # Only create this template if requested
Properties: # a spot instance_market_type in config.cfg
LaunchTemplateName: !Ref AWS::StackName
LaunchTemplateData:
InstanceMarketOptions:
MarketType: spot
EC2Instance: EC2Instance:
Type: AWS::EC2::Instance Type: AWS::EC2::Instance
DependsOn: DependsOn:
- SubnetIPv6 - SubnetIPv6
- Subnet - Subnet
- InstanceSecurityGroup - InstanceSecurityGroup
Metadata:
AWS::CloudFormation::Init:
config:
files:
/home/ubuntu/.ssh/authorized_keys:
content:
Ref: PublicSSHKeyParameter
mode: "000644"
owner: "ubuntu"
group: "ubuntu"
Properties: Properties:
InstanceType: InstanceType:
Ref: InstanceTypeParameter Ref: InstanceTypeParameter
BlockDeviceMappings:
- DeviceName: /dev/sda1
Ebs:
DeleteOnTermination: true
VolumeSize: 8
Encrypted: !Ref EbsEncrypted
InstanceInitiatedShutdownBehavior: terminate InstanceInitiatedShutdownBehavior: terminate
SecurityGroupIds: SecurityGroupIds:
- Ref: InstanceSecurityGroup - Ref: InstanceSecurityGroup
@ -185,15 +166,15 @@ Resources:
Ref: ImageIdParameter Ref: ImageIdParameter
SubnetId: !Ref Subnet SubnetId: !Ref Subnet
Ipv6AddressCount: 1 Ipv6AddressCount: 1
UserData: !Ref UserData UserData:
LaunchTemplate: "Fn::Base64":
!If # Only if Conditions created "EC2LaunchTemplate" !Sub |
- InstanceIsSpot #!/bin/bash -xe
- apt-get update
LaunchTemplateId: apt-get -y install python-pip
!Ref EC2LaunchTemplate pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
Version: 1 cfn-init -v --stack ${AWS::StackName} --resource EC2Instance --region ${AWS::Region}
- !Ref AWS::NoValue # Else this LaunchTemplate not set cfn-signal -e $? --stack ${AWS::StackName} --resource EC2Instance --region ${AWS::Region}
Tags: Tags:
- Key: Name - Key: Name
Value: !Ref AWS::StackName Value: !Ref AWS::StackName

View file

@ -4,7 +4,7 @@
aws_access_key: "{{ access_key }}" aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}" aws_secret_key: "{{ secret_key }}"
stack_name: "{{ stack_name }}" stack_name: "{{ stack_name }}"
state: present state: "present"
region: "{{ algo_region }}" region: "{{ algo_region }}"
template: roles/cloud-ec2/files/stack.yaml template: roles/cloud-ec2/files/stack.yaml
template_parameters: template_parameters:
@ -13,10 +13,6 @@
ImageIdParameter: "{{ ami_image }}" ImageIdParameter: "{{ ami_image }}"
WireGuardPort: "{{ wireguard_port }}" WireGuardPort: "{{ wireguard_port }}"
UseThisElasticIP: "{{ existing_eip }}" UseThisElasticIP: "{{ existing_eip }}"
EbsEncrypted: "{{ encrypted }}"
UserData: "{{ lookup('template', 'files/cloud-init/base.yml') | b64encode }}"
SshPort: "{{ ssh_port }}"
InstanceMarketTypeParameter: "{{ cloud_providers.ec2.instance_market_type }}"
tags: tags:
Environment: Algo Environment: Algo
register: stack register: stack

View file

@ -0,0 +1,29 @@
---
- name: Check if the encrypted image already exist
ec2_ami_facts:
aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}"
owners: self
region: "{{ algo_region }}"
filters:
state: available
"tag:Algo": encrypted
"tag:image": "{{ cloud_providers.ec2.image.name }}"
register: search_crypt
- name: Copy to an encrypted image
ec2_ami_copy:
aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}"
encrypted: yes
name: "algo/{{ cloud_providers.ec2.image.name }}"
kms_key_id: "{{ kms_key_id | default(omit) }}"
region: "{{ algo_region }}"
source_image_id: "{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}"
source_region: "{{ algo_region }}"
wait: true
tags:
Algo: "encrypted"
image: "{{ cloud_providers.ec2.image.name }}"
register: ami_search_encrypted
when: search_crypt.images|length|int == 0

View file

@ -2,29 +2,35 @@
- name: Build python virtual environment - name: Build python virtual environment
import_tasks: venv.yml import_tasks: venv.yml
- name: Include prompts - block:
import_tasks: prompts.yml - name: Include prompts
import_tasks: prompts.yml
- name: Locate official AMI for region - name: Locate official AMI for region
ec2_ami_info: ec2_ami_facts:
aws_access_key: "{{ access_key }}" aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}" aws_secret_key: "{{ secret_key }}"
owners: "{{ cloud_providers.ec2.image.owner }}" owners: "{{ cloud_providers.ec2.image.owner }}"
region: "{{ algo_region }}" region: "{{ algo_region }}"
filters: filters:
architecture: "{{ cloud_providers.ec2.image.arch }}" name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
name: ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-*64-server-* register: ami_search
register: ami_search
- name: Set the ami id as a fact - import_tasks: encrypt_image.yml
set_fact: when: encrypted
ami_image: "{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}"
- name: Deploy the stack - name: Set the ami id as a fact
import_tasks: cloudformation.yml set_fact:
ami_image: >-
{% if ami_search_encrypted.image_id is defined %}{{ ami_search_encrypted.image_id }}
{%- elif search_crypt.images is defined and search_crypt.images|length >= 1 %}{{ (search_crypt.images | sort(attribute='creation_date') | last)['image_id'] }}
{%- else %}{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}{% endif %}
- set_fact: - name: Deploy the stack
cloud_instance_ip: "{{ stack.stack_outputs.ElasticIP }}" import_tasks: cloudformation.yml
ansible_ssh_user: algo
ansible_ssh_port: "{{ ssh_port }}" - set_fact:
cloudinit: true cloud_instance_ip: "{{ stack.stack_outputs.ElasticIP }}"
ansible_ssh_user: ubuntu
environment:
PYTHONPATH: "{{ ec2_venv }}/lib/python2.7/site-packages/"

View file

@ -1,17 +1,17 @@
--- ---
- pause: - pause:
prompt: | prompt: |
Enter your AWS Access Key ID (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html) Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md) Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md)
echo: false echo: false
register: _aws_access_key register: _aws_access_key
when: when:
- aws_access_key is undefined - aws_access_key is undefined
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0 - lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
- pause: - pause:
prompt: | prompt: |
Enter your AWS Secret Access Key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html) Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
echo: false echo: false
register: _aws_secret_key register: _aws_secret_key
when: when:
@ -23,35 +23,35 @@
secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}" secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
- block: - block:
- name: Get regions - name: Get regions
aws_region_info: aws_region_facts:
aws_access_key: "{{ access_key }}" aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}" aws_secret_key: "{{ secret_key }}"
region: us-east-1 region: us-east-1
register: _aws_regions register: _aws_regions
- name: Set facts about the regions - name: Set facts about the regions
set_fact: set_fact:
aws_regions: "{{ _aws_regions.regions | sort(attribute='region_name') }}" aws_regions: "{{ _aws_regions.regions | sort(attribute='region_name') }}"
- name: Set the default region - name: Set the default region
set_fact: set_fact:
default_region: >- default_region: >-
{% for r in aws_regions %}
{%- if r['region_name'] == "us-east-1" %}{{ loop.index }}{% endif %}
{%- endfor %}
- pause:
prompt: |
What region should the server be located in?
(https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
{% for r in aws_regions %} {% for r in aws_regions %}
{%- if r['region_name'] == "us-east-1" %}{{ loop.index }}{% endif %} {{ loop.index }}. {{ r['region_name'] }}
{%- endfor %} {% endfor %}
- pause: Enter the number of your desired region
prompt: | [{{ default_region }}]
What region should the server be located in? register: _algo_region
(https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
{% for r in aws_regions %}
{{ loop.index }}. {{ r['region_name'] }}
{% endfor %}
Enter the number of your desired region
[{{ default_region }}]
register: _algo_region
when: region is undefined when: region is undefined
- name: Set algo_region and stack_name facts - name: Set algo_region and stack_name facts
@ -63,26 +63,26 @@
stack_name: "{{ algo_server_name | replace('.', '-') }}" stack_name: "{{ algo_server_name | replace('.', '-') }}"
- block: - block:
- name: Get existing available Elastic IPs - name: Get existing available Elastic IPs
ec2_eip_info: ec2_eip_facts:
aws_access_key: "{{ access_key }}" aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}" aws_secret_key: "{{ secret_key }}"
region: "{{ algo_region }}" region: "{{ algo_region }}"
register: raw_eip_addresses register: raw_eip_addresses
- set_fact: - set_fact:
available_eip_addresses: "{{ raw_eip_addresses.addresses | selectattr('association_id', 'undefined') | list }}" available_eip_addresses: "{{ raw_eip_addresses.addresses | selectattr('association_id', 'undefined') | list }}"
- pause: - pause:
prompt: >- prompt: >-
What Elastic IP would you like to use? What Elastic IP would you like to use?
{% for eip in available_eip_addresses %} {% for eip in available_eip_addresses %}
{{ loop.index }}. {{ eip['public_ip'] }} {{ loop.index }}. {{ eip['public_ip'] }}
{% endfor %} {% endfor %}
Enter the number of your desired Elastic IP Enter the number of your desired Elastic IP
register: _use_existing_eip register: _use_existing_eip
- set_fact: - set_fact:
existing_eip: "{{ available_eip_addresses[_use_existing_eip.user_input | int -1 ]['allocation_id'] }}" existing_eip: "{{ available_eip_addresses[_use_existing_eip.user_input | int -1 ]['allocation_id'] }}"
when: cloud_providers.ec2.use_existing_eip when: cloud_providers.ec2.use_existing_eip

View file

@ -1,8 +1,15 @@
--- ---
- name: Clean up the environment
file:
dest: "{{ ec2_venv }}"
state: absent
when: clean_environment
- name: Install requirements - name: Install requirements
pip: pip:
name: name:
- boto>=2.5 - boto>=2.5
- boto3 - boto3
state: latest state: latest
virtualenv_python: python3 virtualenv: "{{ ec2_venv }}"
virtualenv_python: python2.7

View file

@ -0,0 +1,2 @@
---
gce_venv: "{{ playbook_dir }}/configs/.venvs/gce"

View file

@ -2,83 +2,56 @@
- name: Build python virtual environment - name: Build python virtual environment
import_tasks: venv.yml import_tasks: venv.yml
- name: Include prompts
import_tasks: prompts.yml
- name: Network configured
gcp_compute_network:
auth_kind: serviceaccount
service_account_file: "{{ credentials_file_path }}"
project: "{{ project_id }}"
name: algovpn
auto_create_subnetworks: true
routing_config:
routing_mode: REGIONAL
register: gcp_compute_network
- name: Firewall configured
gcp_compute_firewall:
auth_kind: serviceaccount
service_account_file: "{{ credentials_file_path }}"
project: "{{ project_id }}"
name: algovpn
network: "{{ gcp_compute_network }}"
direction: INGRESS
allowed:
- ip_protocol: udp
ports:
- "500"
- "4500"
- "{{ wireguard_port|string }}"
- ip_protocol: tcp
ports:
- "{{ ssh_port }}"
- ip_protocol: icmp
- block: - block:
- name: Include prompts
import_tasks: prompts.yml
- name: Network configured
gce_net:
name: "{{ algo_server_name }}"
fwname: "{{ algo_server_name }}-fw"
allowed: "udp:500,4500,{{ wireguard_port }};tcp:22"
state: "present"
mode: auto
src_range: 0.0.0.0/0
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file_path }}"
project_id: "{{ project_id }}"
- block:
- name: External IP allocated - name: External IP allocated
gcp_compute_address: gce_eip:
auth_kind: serviceaccount service_account_email: "{{ service_account_email }}"
service_account_file: "{{ credentials_file_path }}" credentials_file: "{{ credentials_file_path }}"
project: "{{ project_id }}" project_id: "{{ project_id }}"
name: "{{ algo_server_name }}" name: "{{ algo_server_name }}"
region: "{{ algo_region }}" region: "{{ algo_region.split('-')[0:2] | join('-') }}"
register: gcp_compute_address state: present
register: gce_eip
- name: Set External IP as a fact - name: Set External IP as a fact
set_fact: set_fact:
external_ip: "{{ gcp_compute_address.address }}" external_ip: "{{ gce_eip.address }}"
when: cloud_providers.gce.external_static_ip when: cloud_providers.gce.external_static_ip
- name: Instance created - name: "Creating a new instance..."
gcp_compute_instance: gce:
auth_kind: serviceaccount instance_names: "{{ algo_server_name }}"
service_account_file: "{{ credentials_file_path }}" zone: "{{ algo_region }}"
project: "{{ project_id }}" external_ip: "{{ external_ip | default('ephemeral') }}"
name: "{{ algo_server_name }}" machine_type: "{{ cloud_providers.gce.size }}"
zone: "{{ algo_zone }}" image: "{{ cloud_providers.gce.image }}"
machine_type: "{{ cloud_providers.gce.size }}" service_account_email: "{{ service_account_email }}"
disks: credentials_file: "{{ credentials_file_path }}"
- auto_delete: true project_id: "{{ project_id }}"
boot: true metadata: '{"ssh-keys":"ubuntu:{{ ssh_public_key_lookup }}"}'
initialize_params: network: "{{ algo_server_name }}"
source_image: projects/ubuntu-os-cloud/global/images/family/{{ cloud_providers.gce.image }} tags:
metadata: - "environment-algo"
ssh-keys: algo:{{ ssh_public_key_lookup }} register: google_vm
user-data: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
network_interfaces:
- network: "{{ gcp_compute_network }}"
access_configs:
- name: "{{ algo_server_name }}"
nat_ip: "{{ gcp_compute_address|default(None) }}"
type: ONE_TO_ONE_NAT
tags:
items:
- environment-algo
register: gcp_compute_instance
- set_fact: - set_fact:
cloud_instance_ip: "{{ gcp_compute_instance.networkInterfaces[0].accessConfigs[0].natIP }}" cloud_instance_ip: "{{ google_vm.instance_data[0].public_ip }}"
ansible_ssh_user: algo ansible_ssh_user: ubuntu
ansible_ssh_port: "{{ ssh_port }}" environment:
cloudinit: true PYTHONPATH: "{{ gce_venv }}/lib/python2.7/site-packages/"

View file

@ -9,8 +9,7 @@
- lookup('env','GCE_CREDENTIALS_FILE_PATH')|length <= 0 - lookup('env','GCE_CREDENTIALS_FILE_PATH')|length <= 0
- set_fact: - set_fact:
credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'),\ credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}"
\ true) }}"
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}" ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
- set_fact: - set_fact:
@ -21,60 +20,48 @@
project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}" project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}"
- block: - block:
- name: Get regions - name: Get regions
gcp_compute_location_info: gce_region_facts:
auth_kind: serviceaccount service_account_email: "{{ credentials_file_lookup.client_email }}"
service_account_file: "{{ credentials_file_path }}" credentials_file: "{{ credentials_file_path }}"
project: "{{ project_id }}" project_id: "{{ credentials_file_lookup.project_id }}"
scope: regions register: _gce_regions
filters: status=UP
register: gcp_compute_regions_info
- name: Set facts about the regions - name: Set facts about the regions
set_fact: set_fact:
gce_regions: >- gce_regions: >-
[{%- for region in gcp_compute_regions_info.resources | sort(attribute='name') -%} [{%- for region in _gce_regions.results.regions | sort(attribute='name') -%}
'{{ region.name }}'{% if not loop.last %},{% endif %} {% if region.status == "UP" %}
{%- endfor -%}] {% for zone in region.zones | sort(attribute='name') %}
{% if zone.status == "UP" %}
- name: Set facts about the default region '{{ zone.name }}'
set_fact: {% endif %}{% if not loop.last %},{% endif %}
default_region: >-
{% for region in gce_regions %}
{%- if region == "us-east1" %}{{ loop.index }}{% endif %}
{%- endfor %}
- pause:
prompt: |
What region should the server be located in?
(https://cloud.google.com/compute/docs/regions-zones/#locations)
{% for r in gce_regions %}
{{ loop.index }}. {{ r }}
{% endfor %} {% endfor %}
{% endif %}{% if not loop.last %},{% endif %}
{%- endfor -%}]
Enter the number of your desired region - name: Set facts about the default region
[{{ default_region }}] set_fact:
register: _gce_region default_region: >-
{% for region in gce_regions %}
{%- if region == "us-east1-b" %}{{ loop.index }}{% endif %}
{%- endfor %}
- pause:
prompt: |
What region should the server be located in?
(https://cloud.google.com/compute/docs/regions-zones/)
{% for r in gce_regions %}
{{ loop.index }}. {{ r }}
{% endfor %}
Enter the number of your desired region
[{{ default_region }}]
register: _gce_region
when: region is undefined when: region is undefined
- name: Set region as a fact - set_fact:
set_fact:
algo_region: >- algo_region: >-
{% if region is defined %}{{ region }} {% if region is defined %}{{ region }}
{%- elif _gce_region.user_input %}{{ gce_regions[_gce_region.user_input | int -1 ] }} {%- elif _gce_region.user_input %}{{ gce_regions[_gce_region.user_input | int -1 ] }}
{%- else %}{{ gce_regions[default_region | int - 1] }}{% endif %} {%- else %}{{ gce_regions[default_region | int - 1] }}{% endif %}
- name: Get zones
gcp_compute_location_info:
auth_kind: serviceaccount
service_account_file: "{{ credentials_file_path }}"
project: "{{ project_id }}"
scope: zones
filters:
- name={{ algo_region }}-*
- status=UP
register: gcp_compute_zone_info
- name: Set random available zone as a fact
set_fact:
algo_zone: "{{ (gcp_compute_zone_info.resources | random(seed=algo_server_name + algo_region + project_id) ).name }}"

View file

@ -1,8 +1,14 @@
--- ---
- name: Clean up the environment
file:
dest: "{{ gce_venv }}"
state: absent
when: clean_environment
- name: Install requirements - name: Install requirements
pip: pip:
name: name:
- requests>=2.18.4 - apache-libcloud
- google-auth>=1.3.0
state: latest state: latest
virtualenv_python: python3 virtualenv: "{{ gce_venv }}"
virtualenv_python: python2.7

View file

@ -1,34 +0,0 @@
---
- name: Build python virtual environment
import_tasks: venv.yml
- name: Include prompts
import_tasks: prompts.yml
- name: Create an ssh key
hetzner.hcloud.ssh_key:
name: algo-{{ 999999 | random(seed=lookup('file', SSH_keys.public)) }}
public_key: "{{ lookup('file', SSH_keys.public) }}"
state: present
api_token: "{{ algo_hcloud_token }}"
register: hcloud_ssh_key
- name: Create a server...
hetzner.hcloud.server:
name: "{{ algo_server_name }}"
location: "{{ algo_hcloud_region }}"
server_type: "{{ cloud_providers.hetzner.server_type }}"
image: "{{ cloud_providers.hetzner.image }}"
state: present
api_token: "{{ algo_hcloud_token }}"
ssh_keys: "{{ hcloud_ssh_key.hcloud_ssh_key.name }}"
user_data: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
labels:
Environment: algo
register: hcloud_server
- set_fact:
cloud_instance_ip: "{{ hcloud_server.hcloud_server.ipv4_address }}"
ansible_ssh_user: algo
ansible_ssh_port: "{{ ssh_port }}"
cloudinit: true

Some files were not shown because too many files have changed in this diff Show more